Merge branches 'acpi_pad', 'acpica', 'apei-bugzilla-43282', 'battery', 'cpuidle-coupl...
authorLen Brown <len.brown@intel.com>
Thu, 26 Jul 2012 04:03:58 +0000 (00:03 -0400)
committerLen Brown <len.brown@intel.com>
Thu, 26 Jul 2012 04:03:58 +0000 (00:03 -0400)
1573 files changed:
.mailmap
Documentation/ABI/testing/sysfs-block-rssd
Documentation/ABI/testing/sysfs-bus-iio
Documentation/ABI/testing/sysfs-class-mtd
Documentation/DocBook/media/v4l/controls.xml
Documentation/DocBook/media/v4l/pixfmt.xml
Documentation/DocBook/media/v4l/v4l2.xml
Documentation/DocBook/media/v4l/vidioc-create-bufs.xml
Documentation/DocBook/media/v4l/vidioc-dqevent.xml
Documentation/DocBook/media/v4l/vidioc-g-ext-ctrls.xml
Documentation/arm/SPEAr/overview.txt
Documentation/device-mapper/verity.txt
Documentation/devicetree/bindings/i2c/i2c-mux-pinctrl.txt [new file with mode: 0644]
Documentation/devicetree/bindings/input/fsl-mma8450.txt
Documentation/devicetree/bindings/mfd/mc13xxx.txt
Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
Documentation/devicetree/bindings/net/fsl-fec.txt
Documentation/devicetree/bindings/pinctrl/fsl,imx6q-pinctrl.txt
Documentation/devicetree/bindings/spi/fsl-imx-cspi.txt
Documentation/devicetree/bindings/thermal/spear-thermal.txt [new file with mode: 0644]
Documentation/devicetree/bindings/vendor-prefixes.txt
Documentation/hwmon/coretemp
Documentation/kdump/kdump.txt
Documentation/kernel-parameters.txt
Documentation/networking/stmmac.txt
Documentation/prctl/no_new_privs.txt [new file with mode: 0644]
Documentation/stable_kernel_rules.txt
Documentation/thermal/sysfs-api.txt
Documentation/virtual/kvm/api.txt
Documentation/vm/frontswap.txt [new file with mode: 0644]
MAINTAINERS
Makefile
arch/arm/Kconfig
arch/arm/boot/dts/mmp2-brownstone.dts
arch/arm/boot/dts/omap2.dtsi
arch/arm/boot/dts/spear1310-evb.dts
arch/arm/boot/dts/spear1310.dtsi
arch/arm/boot/dts/spear1340-evb.dts
arch/arm/boot/dts/spear1340.dtsi
arch/arm/boot/dts/spear13xx.dtsi
arch/arm/boot/dts/spear300-evb.dts
arch/arm/boot/dts/spear300.dtsi
arch/arm/boot/dts/spear310-evb.dts
arch/arm/boot/dts/spear310.dtsi
arch/arm/boot/dts/spear320-evb.dts
arch/arm/boot/dts/spear320.dtsi
arch/arm/boot/dts/spear3xx.dtsi
arch/arm/boot/dts/spear600.dtsi
arch/arm/common/dmabounce.c
arch/arm/configs/omap2plus_defconfig
arch/arm/include/asm/atomic.h
arch/arm/include/asm/domain.h
arch/arm/include/asm/futex.h
arch/arm/include/asm/hardware/sp810.h
arch/arm/include/asm/thread_info.h
arch/arm/kernel/entry-armv.S
arch/arm/kernel/kprobes-test-arm.c
arch/arm/kernel/kprobes-thumb.c
arch/arm/kernel/perf_event.c
arch/arm/kernel/ptrace.c
arch/arm/kernel/signal.c
arch/arm/kernel/signal.h
arch/arm/kernel/traps.c
arch/arm/kernel/vmlinux.lds.S
arch/arm/mach-dove/include/mach/bridge-regs.h
arch/arm/mach-dove/include/mach/dove.h
arch/arm/mach-exynos/Kconfig
arch/arm/mach-exynos/pm_domains.c
arch/arm/mach-highbank/Makefile
arch/arm/mach-highbank/core.h
arch/arm/mach-highbank/highbank.c
arch/arm/mach-highbank/smc.S [new file with mode: 0644]
arch/arm/mach-imx/Kconfig
arch/arm/mach-imx/clk-imx1.c
arch/arm/mach-imx/clk-imx21.c
arch/arm/mach-imx/clk-imx25.c
arch/arm/mach-imx/clk-imx27.c
arch/arm/mach-imx/clk-imx31.c
arch/arm/mach-imx/clk-imx35.c
arch/arm/mach-imx/clk-imx51-imx53.c
arch/arm/mach-imx/clk-imx6q.c
arch/arm/mach-imx/clk-pllv2.c
arch/arm/mach-imx/crm-regs-imx5.h
arch/arm/mach-imx/hotplug.c
arch/arm/mach-imx/mach-cpuimx35.c
arch/arm/mach-imx/mach-cpuimx51sd.c
arch/arm/mach-imx/mach-imx27_visstrim_m10.c
arch/arm/mach-imx/mach-mx21ads.c
arch/arm/mach-imx/mm-imx3.c
arch/arm/mach-imx/mm-imx5.c
arch/arm/mach-kirkwood/board-iconnect.c
arch/arm/mach-kirkwood/common.c
arch/arm/mach-kirkwood/include/mach/bridge-regs.h
arch/arm/mach-kirkwood/include/mach/kirkwood.h
arch/arm/mach-mmp/include/mach/gpio-pxa.h [deleted file]
arch/arm/mach-mmp/irq.c
arch/arm/mach-mv78xx0/include/mach/bridge-regs.h
arch/arm/mach-mv78xx0/include/mach/mv78xx0.h
arch/arm/mach-mxs/mach-apx4devkit.c
arch/arm/mach-omap2/board-flash.c
arch/arm/mach-omap2/board-n8x0.c
arch/arm/mach-omap2/board-omap3beagle.c
arch/arm/mach-omap2/board-overo.c
arch/arm/mach-omap2/board-rx51-peripherals.c
arch/arm/mach-omap2/clock3xxx_data.c
arch/arm/mach-omap2/clock44xx_data.c
arch/arm/mach-omap2/clockdomain.h
arch/arm/mach-omap2/clockdomains2xxx_3xxx_data.c
arch/arm/mach-omap2/clockdomains44xx_data.c
arch/arm/mach-omap2/cm.h
arch/arm/mach-omap2/cminst44xx.c
arch/arm/mach-omap2/display.c
arch/arm/mach-omap2/dsp.c
arch/arm/mach-omap2/id.c
arch/arm/mach-omap2/irq.c
arch/arm/mach-omap2/mux.c
arch/arm/mach-omap2/mux.h
arch/arm/mach-omap2/omap_hwmod.c
arch/arm/mach-omap2/omap_hwmod_44xx_data.c
arch/arm/mach-omap2/omap_l3_smx.c
arch/arm/mach-omap2/omap_phy_internal.c
arch/arm/mach-omap2/pm34xx.c
arch/arm/mach-omap2/prm2xxx_3xxx.c
arch/arm/mach-omap2/serial.c
arch/arm/mach-omap2/twl-common.c
arch/arm/mach-omap2/usb-musb.c
arch/arm/mach-omap2/usb-tusb6010.c
arch/arm/mach-orion5x/include/mach/bridge-regs.h
arch/arm/mach-orion5x/include/mach/io.h [new file with mode: 0644]
arch/arm/mach-orion5x/include/mach/orion5x.h
arch/arm/mach-pxa/hx4700.c
arch/arm/mach-s3c24xx/clock-s3c2440.c
arch/arm/mach-shmobile/Kconfig
arch/arm/mach-shmobile/board-armadillo800eva.c
arch/arm/mach-shmobile/board-kzm9d.c
arch/arm/mach-shmobile/board-kzm9g.c
arch/arm/mach-shmobile/board-mackerel.c
arch/arm/mach-shmobile/clock-sh73a0.c
arch/arm/mach-shmobile/intc-r8a7779.c
arch/arm/mach-shmobile/platsmp.c
arch/arm/mach-shmobile/setup-sh7372.c
arch/arm/mach-spear13xx/include/mach/debug-macro.S
arch/arm/mach-spear13xx/include/mach/dma.h
arch/arm/mach-spear13xx/include/mach/generic.h
arch/arm/mach-spear13xx/include/mach/gpio.h
arch/arm/mach-spear13xx/include/mach/irqs.h
arch/arm/mach-spear13xx/include/mach/spear.h
arch/arm/mach-spear13xx/include/mach/timex.h
arch/arm/mach-spear13xx/include/mach/uncompress.h
arch/arm/mach-spear13xx/spear1310.c
arch/arm/mach-spear13xx/spear1340.c
arch/arm/mach-spear13xx/spear13xx.c
arch/arm/mach-spear3xx/include/mach/debug-macro.S
arch/arm/mach-spear3xx/include/mach/generic.h
arch/arm/mach-spear3xx/include/mach/gpio.h
arch/arm/mach-spear3xx/include/mach/irqs.h
arch/arm/mach-spear3xx/include/mach/misc_regs.h
arch/arm/mach-spear3xx/include/mach/spear.h
arch/arm/mach-spear3xx/include/mach/timex.h
arch/arm/mach-spear3xx/include/mach/uncompress.h
arch/arm/mach-spear3xx/spear300.c
arch/arm/mach-spear3xx/spear310.c
arch/arm/mach-spear3xx/spear320.c
arch/arm/mach-spear3xx/spear3xx.c
arch/arm/mach-spear6xx/include/mach/gpio.h
arch/arm/mach-spear6xx/include/mach/misc_regs.h
arch/arm/mach-spear6xx/spear6xx.c
arch/arm/mach-tegra/reset.c
arch/arm/mach-ux500/board-mop500.c
arch/arm/mach-ux500/timer.c
arch/arm/mach-versatile/core.c
arch/arm/mach-versatile/include/mach/hardware.h
arch/arm/mach-versatile/include/mach/io.h [new file with mode: 0644]
arch/arm/mach-versatile/pci.c
arch/arm/mm/dma-mapping.c
arch/arm/mm/init.c
arch/arm/mm/mm.h
arch/arm/mm/mmu.c
arch/arm/net/bpf_jit_32.c
arch/arm/net/bpf_jit_32.h
arch/arm/plat-mxc/epit.c
arch/arm/plat-mxc/include/mach/common.h
arch/arm/plat-mxc/include/mach/mx2_cam.h
arch/arm/plat-mxc/time.c
arch/arm/plat-omap/clock.c
arch/arm/plat-omap/include/plat/cpu.h
arch/arm/plat-omap/include/plat/mmc.h
arch/arm/plat-orion/common.c
arch/arm/plat-pxa/ssp.c
arch/arm/plat-samsung/adc.c
arch/arm/plat-samsung/devs.c
arch/arm/plat-samsung/include/plat/map-s3c.h
arch/arm/plat-samsung/include/plat/watchdog-reset.h
arch/arm/plat-samsung/s5p-clock.c
arch/arm/plat-spear/include/plat/debug-macro.S
arch/arm/plat-spear/include/plat/pl080.h
arch/arm/plat-spear/include/plat/shirq.h
arch/arm/plat-spear/include/plat/timex.h
arch/arm/plat-spear/include/plat/uncompress.h
arch/arm/plat-spear/pl080.c
arch/arm/plat-spear/restart.c
arch/arm/plat-spear/shirq.c
arch/avr32/kernel/signal.c
arch/blackfin/kernel/process.c
arch/h8300/include/asm/pgtable.h
arch/h8300/include/asm/uaccess.h
arch/h8300/kernel/setup.c
arch/h8300/kernel/signal.c
arch/h8300/kernel/time.c
arch/h8300/mm/init.c
arch/m32r/boot/compressed/Makefile
arch/m32r/boot/compressed/misc.c
arch/m32r/include/asm/ptrace.h
arch/m32r/kernel/ptrace.c
arch/m32r/kernel/signal.c
arch/m68k/Kconfig
arch/m68k/include/asm/Kbuild
arch/m68k/include/asm/m528xsim.h
arch/m68k/include/asm/uaccess_mm.h
arch/m68k/kernel/ptrace.c
arch/m68k/kernel/time.c
arch/m68k/lib/uaccess.c
arch/m68k/platform/68328/timers.c
arch/m68k/platform/68360/config.c
arch/m68k/platform/coldfire/clk.c
arch/mips/Kconfig
arch/mips/bcm47xx/Kconfig
arch/mips/bcm63xx/dev-pcmcia.c
arch/mips/cavium-octeon/Kconfig
arch/mips/cavium-octeon/smp.c
arch/mips/include/asm/bitops.h
arch/mips/include/asm/cmpxchg.h
arch/mips/include/asm/cpu.h
arch/mips/include/asm/gic.h
arch/mips/include/asm/inst.h
arch/mips/include/asm/io.h
arch/mips/include/asm/irq.h
arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h
arch/mips/include/asm/mips-boards/maltaint.h
arch/mips/include/asm/mipsmtregs.h
arch/mips/include/asm/switch_to.h
arch/mips/include/asm/thread_info.h
arch/mips/kernel/cpu-probe.c
arch/mips/kernel/mips_ksyms.c
arch/mips/kernel/octeon_switch.S
arch/mips/kernel/perf_event_mipsxx.c
arch/mips/kernel/r2300_switch.S
arch/mips/kernel/r4k_switch.S
arch/mips/kernel/smp-bmips.c
arch/mips/kernel/smp.c
arch/mips/kernel/smtc.c
arch/mips/kernel/sync-r4k.c
arch/mips/kernel/traps.c
arch/mips/kernel/vmlinux.lds.S
arch/mips/mm/Makefile
arch/mips/mm/c-r4k.c
arch/mips/mm/page-funcs.S [new file with mode: 0644]
arch/mips/mm/page.c
arch/mips/mm/tlbex.c
arch/mips/mti-malta/malta-pci.c
arch/mips/mti-malta/malta-setup.c
arch/mips/netlogic/xlp/setup.c
arch/mips/oprofile/common.c
arch/mips/oprofile/op_model_mipsxx.c
arch/mips/pci/fixup-fuloong2e.c
arch/mips/pci/fixup-lemote2f.c
arch/mips/pci/fixup-malta.c
arch/mips/pci/fixup-mpc30x.c
arch/mips/pci/fixup-sb1250.c
arch/mips/pci/ops-tx4927.c
arch/mips/pci/pci-ip27.c
arch/mips/pci/pci-lantiq.c
arch/mips/pci/pci-xlr.c
arch/mips/pmc-sierra/yosemite/smp.c
arch/mips/powertv/asic/asic-calliope.c
arch/mips/powertv/asic/asic-cronus.c
arch/mips/powertv/asic/asic-gaia.c
arch/mips/powertv/asic/asic-zeus.c
arch/mips/txx9/generic/pci.c
arch/mn10300/include/asm/ptrace.h
arch/mn10300/include/asm/thread_info.h
arch/mn10300/include/asm/timex.h
arch/mn10300/kernel/cevt-mn10300.c
arch/mn10300/kernel/internal.h
arch/mn10300/kernel/irq.c
arch/mn10300/kernel/signal.c
arch/mn10300/kernel/traps.c
arch/mn10300/mm/dma-alloc.c
arch/mn10300/unit-asb2303/include/unit/timex.h
arch/mn10300/unit-asb2303/smc91111.c
arch/mn10300/unit-asb2305/include/unit/timex.h
arch/mn10300/unit-asb2305/unit-init.c
arch/mn10300/unit-asb2364/include/unit/timex.h
arch/parisc/Makefile
arch/parisc/include/asm/Kbuild
arch/parisc/include/asm/bug.h
arch/powerpc/include/asm/hw_irq.h
arch/powerpc/kernel/entry_64.S
arch/powerpc/kernel/irq.c
arch/powerpc/kernel/module_32.c
arch/powerpc/kernel/prom_init.c
arch/powerpc/kernel/time.c
arch/powerpc/kvm/book3s_hv.c
arch/powerpc/kvm/book3s_hv_rmhandlers.S
arch/powerpc/kvm/book3s_pr_papr.c
arch/powerpc/mm/numa.c
arch/powerpc/net/bpf_jit_64.S
arch/powerpc/platforms/cell/pervasive.c
arch/powerpc/platforms/pseries/iommu.c
arch/powerpc/platforms/pseries/nvram.c
arch/powerpc/platforms/pseries/processor_idle.c
arch/powerpc/xmon/xmon.c
arch/sh/Kconfig
arch/sh/Makefile
arch/sh/boards/mach-kfr2r09/setup.c
arch/sh/drivers/pci/pcie-sh7786.c
arch/sh/include/asm/Kbuild
arch/sh/include/asm/bitsperlong.h [deleted file]
arch/sh/include/asm/cputime.h [deleted file]
arch/sh/include/asm/current.h [deleted file]
arch/sh/include/asm/delay.h [deleted file]
arch/sh/include/asm/div64.h [deleted file]
arch/sh/include/asm/emergency-restart.h [deleted file]
arch/sh/include/asm/errno.h [deleted file]
arch/sh/include/asm/fcntl.h [deleted file]
arch/sh/include/asm/io_noioport.h
arch/sh/include/asm/ioctl.h [deleted file]
arch/sh/include/asm/ipcbuf.h [deleted file]
arch/sh/include/asm/irq_regs.h [deleted file]
arch/sh/include/asm/kvm_para.h [deleted file]
arch/sh/include/asm/local.h [deleted file]
arch/sh/include/asm/local64.h [deleted file]
arch/sh/include/asm/mman.h [deleted file]
arch/sh/include/asm/msgbuf.h [deleted file]
arch/sh/include/asm/param.h [deleted file]
arch/sh/include/asm/parport.h [deleted file]
arch/sh/include/asm/percpu.h [deleted file]
arch/sh/include/asm/poll.h [deleted file]
arch/sh/include/asm/resource.h [deleted file]
arch/sh/include/asm/scatterlist.h [deleted file]
arch/sh/include/asm/sembuf.h [deleted file]
arch/sh/include/asm/serial.h [deleted file]
arch/sh/include/asm/shmbuf.h [deleted file]
arch/sh/include/asm/siginfo.h [deleted file]
arch/sh/include/asm/sizes.h [deleted file]
arch/sh/include/asm/socket.h [deleted file]
arch/sh/include/asm/statfs.h [deleted file]
arch/sh/include/asm/termbits.h [deleted file]
arch/sh/include/asm/termios.h [deleted file]
arch/sh/include/asm/uaccess.h
arch/sh/include/asm/uaccess_32.h
arch/sh/include/asm/uaccess_64.h
arch/sh/include/asm/ucontext.h [deleted file]
arch/sh/include/asm/word-at-a-time.h [new file with mode: 0644]
arch/sh/include/asm/xor.h [deleted file]
arch/sh/include/cpu-sh2a/cpu/ubc.h [deleted file]
arch/sh/kernel/cpu/sh3/serial-sh7720.c
arch/sh/kernel/cpu/sh4a/clock-sh7343.c
arch/sh/kernel/cpu/sh4a/clock-sh7366.c
arch/sh/kernel/cpu/sh4a/clock-sh7722.c
arch/sh/kernel/cpu/sh4a/clock-sh7723.c
arch/sh/kernel/cpu/sh4a/clock-sh7724.c
arch/sh/kernel/cpu/sh4a/clock-sh7734.c
arch/sh/kernel/cpu/sh4a/clock-sh7757.c
arch/sh/kernel/cpu/sh4a/clock-sh7785.c
arch/sh/kernel/cpu/sh4a/clock-sh7786.c
arch/sh/kernel/cpu/sh4a/clock-shx3.c
arch/sh/kernel/cpu/sh5/entry.S
arch/sh/kernel/process.c
arch/sh/kernel/process_64.c
arch/sh/kernel/sh_ksyms_64.c
arch/sparc/include/asm/cmt.h [deleted file]
arch/sparc/include/asm/mpmbox.h [deleted file]
arch/sparc/kernel/vio.c
arch/tile/include/asm/thread_info.h
arch/tile/include/asm/uaccess.h
arch/tile/kernel/backtrace.c
arch/tile/kernel/entry.S
arch/tile/kernel/setup.c
arch/um/drivers/mconsole_kern.c
arch/x86/boot/header.S
arch/x86/boot/tools/build.c
arch/x86/crypto/aesni-intel_asm.S
arch/x86/ia32/ia32_signal.c
arch/x86/include/asm/cpufeature.h
arch/x86/include/asm/nmi.h
arch/x86/include/asm/pgtable-3level.h
arch/x86/include/asm/uaccess.h
arch/x86/include/asm/uv/uv_bau.h
arch/x86/kernel/acpi/boot.c
arch/x86/kernel/aperture_64.c
arch/x86/kernel/apic/io_apic.c
arch/x86/kernel/cpu/mcheck/mce.c
arch/x86/kernel/cpu/mkcapflags.pl
arch/x86/kernel/cpu/perf_event.c
arch/x86/kernel/cpu/perf_event.h
arch/x86/kernel/cpu/perf_event_intel.c
arch/x86/kernel/cpu/perf_event_intel_ds.c
arch/x86/kernel/cpu/scattered.c
arch/x86/kernel/kgdb.c
arch/x86/kernel/kvmclock.c
arch/x86/kernel/nmi_selftest.c
arch/x86/kernel/pci-dma.c
arch/x86/kernel/reboot.c
arch/x86/kernel/smpboot.c
arch/x86/kernel/vsyscall_64.c
arch/x86/kvm/mmu.c
arch/x86/lib/csum-wrappers_64.c
arch/x86/lib/usercopy.c
arch/x86/lib/x86-opcode-map.txt
arch/x86/mm/init.c
arch/x86/mm/ioremap.c
arch/x86/mm/pageattr.c
arch/x86/mm/srat.c
arch/x86/platform/mrst/early_printk_mrst.c
arch/x86/platform/mrst/mrst.c
arch/x86/platform/uv/tlb_uv.c
arch/x86/tools/gen-insn-attr-x86.awk
arch/x86/um/sys_call_table_32.c
arch/x86/xen/enlighten.c
arch/x86/xen/p2m.c
arch/x86/xen/setup.c
arch/xtensa/Makefile
arch/xtensa/include/asm/syscall.h
arch/xtensa/kernel/process.c
arch/xtensa/kernel/signal.c
arch/xtensa/kernel/vmlinux.lds.S
arch/xtensa/mm/init.c
block/blk-cgroup.c
block/blk-core.c
block/blk-timeout.c
block/cfq-iosched.c
block/scsi_ioctl.c
drivers/acpi/Kconfig
drivers/acpi/ac.c
drivers/acpi/acpi_pad.c
drivers/acpi/acpica/Makefile
drivers/acpi/acpica/acevents.h
drivers/acpi/acpica/acglobal.h
drivers/acpi/acpica/aclocal.h
drivers/acpi/acpica/acmacros.h
drivers/acpi/acpica/acobject.h
drivers/acpi/acpica/acpredef.h
drivers/acpi/acpica/acstruct.h
drivers/acpi/acpica/acutils.h
drivers/acpi/acpica/amlcode.h
drivers/acpi/acpica/amlresrc.h
drivers/acpi/acpica/dsargs.c
drivers/acpi/acpica/dscontrol.c
drivers/acpi/acpica/dsfield.c
drivers/acpi/acpica/dsinit.c
drivers/acpi/acpica/dsmethod.c
drivers/acpi/acpica/dsmthdat.c
drivers/acpi/acpica/dsobject.c
drivers/acpi/acpica/dsopcode.c
drivers/acpi/acpica/dsutils.c
drivers/acpi/acpica/dswscope.c
drivers/acpi/acpica/dswstate.c
drivers/acpi/acpica/evevent.c
drivers/acpi/acpica/evglock.c
drivers/acpi/acpica/evgpe.c
drivers/acpi/acpica/evgpeblk.c
drivers/acpi/acpica/evgpeutil.c
drivers/acpi/acpica/evmisc.c
drivers/acpi/acpica/evregion.c
drivers/acpi/acpica/evrgnini.c
drivers/acpi/acpica/evsci.c
drivers/acpi/acpica/evxface.c
drivers/acpi/acpica/evxfevnt.c
drivers/acpi/acpica/evxfgpe.c
drivers/acpi/acpica/evxfregn.c
drivers/acpi/acpica/exconfig.c
drivers/acpi/acpica/exconvrt.c
drivers/acpi/acpica/excreate.c
drivers/acpi/acpica/exdebug.c
drivers/acpi/acpica/exdump.c
drivers/acpi/acpica/exfldio.c
drivers/acpi/acpica/exmisc.c
drivers/acpi/acpica/exmutex.c
drivers/acpi/acpica/exprep.c
drivers/acpi/acpica/exregion.c
drivers/acpi/acpica/exresolv.c
drivers/acpi/acpica/exresop.c
drivers/acpi/acpica/exstore.c
drivers/acpi/acpica/exstorob.c
drivers/acpi/acpica/exsystem.c
drivers/acpi/acpica/exutils.c
drivers/acpi/acpica/hwacpi.c
drivers/acpi/acpica/hwesleep.c
drivers/acpi/acpica/hwregs.c
drivers/acpi/acpica/hwsleep.c
drivers/acpi/acpica/hwtimer.c
drivers/acpi/acpica/hwvalid.c
drivers/acpi/acpica/hwxface.c
drivers/acpi/acpica/hwxfsleep.c
drivers/acpi/acpica/nsaccess.c
drivers/acpi/acpica/nsalloc.c
drivers/acpi/acpica/nsdump.c
drivers/acpi/acpica/nsdumpdv.c
drivers/acpi/acpica/nseval.c
drivers/acpi/acpica/nsinit.c
drivers/acpi/acpica/nsload.c
drivers/acpi/acpica/nsnames.c
drivers/acpi/acpica/nsobject.c
drivers/acpi/acpica/nspredef.c
drivers/acpi/acpica/nsrepair.c
drivers/acpi/acpica/nsrepair2.c
drivers/acpi/acpica/nssearch.c
drivers/acpi/acpica/nsutils.c
drivers/acpi/acpica/nswalk.c
drivers/acpi/acpica/nsxfeval.c
drivers/acpi/acpica/nsxfname.c
drivers/acpi/acpica/nsxfobj.c
drivers/acpi/acpica/psargs.c
drivers/acpi/acpica/psloop.c
drivers/acpi/acpica/psopcode.c
drivers/acpi/acpica/psparse.c
drivers/acpi/acpica/psscope.c
drivers/acpi/acpica/pstree.c
drivers/acpi/acpica/psutils.c
drivers/acpi/acpica/psxface.c
drivers/acpi/acpica/rsaddr.c
drivers/acpi/acpica/rscalc.c
drivers/acpi/acpica/rscreate.c
drivers/acpi/acpica/rsdump.c
drivers/acpi/acpica/rslist.c
drivers/acpi/acpica/rsmisc.c
drivers/acpi/acpica/rsutils.c
drivers/acpi/acpica/rsxface.c
drivers/acpi/acpica/tbfadt.c
drivers/acpi/acpica/tbfind.c
drivers/acpi/acpica/tbinstal.c
drivers/acpi/acpica/tbutils.c
drivers/acpi/acpica/tbxface.c
drivers/acpi/acpica/tbxfload.c [new file with mode: 0644]
drivers/acpi/acpica/tbxfroot.c
drivers/acpi/acpica/utaddress.c
drivers/acpi/acpica/utalloc.c
drivers/acpi/acpica/utcopy.c
drivers/acpi/acpica/utdebug.c
drivers/acpi/acpica/utdecode.c
drivers/acpi/acpica/utdelete.c
drivers/acpi/acpica/uteval.c
drivers/acpi/acpica/utexcep.c [new file with mode: 0644]
drivers/acpi/acpica/utglobal.c
drivers/acpi/acpica/utids.c
drivers/acpi/acpica/utlock.c
drivers/acpi/acpica/utmath.c
drivers/acpi/acpica/utmisc.c
drivers/acpi/acpica/utmutex.c
drivers/acpi/acpica/utobject.c
drivers/acpi/acpica/utosi.c
drivers/acpi/acpica/utresrc.c
drivers/acpi/acpica/utstate.c
drivers/acpi/acpica/utxface.c
drivers/acpi/acpica/utxferror.c
drivers/acpi/acpica/utxfmutex.c
drivers/acpi/apei/apei-base.c
drivers/acpi/apei/apei-internal.h
drivers/acpi/apei/ghes.c
drivers/acpi/battery.c
drivers/acpi/bus.c
drivers/acpi/osl.c
drivers/acpi/power.c
drivers/acpi/processor_core.c
drivers/acpi/processor_idle.c
drivers/acpi/processor_perflib.c
drivers/acpi/scan.c
drivers/acpi/sleep.c
drivers/acpi/sysfs.c
drivers/acpi/thermal.c
drivers/acpi/video.c
drivers/ata/pata_arasan_cf.c
drivers/base/dd.c
drivers/base/power/main.c
drivers/base/regmap/regmap.c
drivers/bcma/driver_chipcommon_pmu.c
drivers/bcma/driver_pci.c
drivers/bcma/sprom.c
drivers/block/drbd/drbd_bitmap.c
drivers/block/drbd/drbd_req.c
drivers/block/floppy.c
drivers/block/loop.c
drivers/block/mtip32xx/mtip32xx.c
drivers/block/mtip32xx/mtip32xx.h
drivers/block/rbd.c
drivers/block/umem.c
drivers/block/xen-blkback/common.h
drivers/block/xen-blkfront.c
drivers/bluetooth/ath3k.c
drivers/bluetooth/btmrvl_drv.h
drivers/bluetooth/btmrvl_main.c
drivers/bluetooth/btmrvl_sdio.c
drivers/bluetooth/btusb.c
drivers/char/agp/intel-agp.c
drivers/char/agp/intel-agp.h
drivers/char/hw_random/atmel-rng.c
drivers/clk/clk.c
drivers/clk/mxs/clk-imx23.c
drivers/clk/mxs/clk-imx28.c
drivers/clk/spear/clk-aux-synth.c
drivers/clk/spear/clk-frac-synth.c
drivers/clk/spear/clk-gpt-synth.c
drivers/clk/spear/clk-vco-pll.c
drivers/clk/spear/clk.c
drivers/clk/spear/clk.h
drivers/clk/spear/spear1310_clock.c
drivers/clk/spear/spear1340_clock.c
drivers/clk/spear/spear3xx_clock.c
drivers/clk/spear/spear6xx_clock.c
drivers/clocksource/Makefile
drivers/clocksource/em_sti.c [new file with mode: 0644]
drivers/clocksource/sh_cmt.c
drivers/clocksource/sh_mtu2.c
drivers/clocksource/sh_tmu.c
drivers/cpuidle/Kconfig
drivers/cpuidle/Makefile
drivers/cpuidle/coupled.c [new file with mode: 0644]
drivers/cpuidle/cpuidle.c
drivers/cpuidle/cpuidle.h
drivers/dma/dw_dmac.c
drivers/dma/imx-sdma.c
drivers/dma/pl330.c
drivers/edac/edac_mc.c
drivers/edac/i7core_edac.c
drivers/edac/mpc85xx_edac.c
drivers/edac/sb_edac.c
drivers/extcon/extcon-max8997.c
drivers/extcon/extcon_class.c
drivers/extcon/extcon_gpio.c
drivers/gpio/Kconfig
drivers/gpio/devres.c
drivers/gpio/gpio-mxc.c
drivers/gpio/gpio-omap.c
drivers/gpio/gpio-samsung.c
drivers/gpio/gpio-sta2x11.c
drivers/gpio/gpio-tps65910.c
drivers/gpio/gpio-wm8994.c
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/exynos/exynos_drm_drv.c
drivers/gpu/drm/exynos/exynos_drm_encoder.c
drivers/gpu/drm/exynos/exynos_drm_fb.c
drivers/gpu/drm/exynos/exynos_drm_fb.h
drivers/gpu/drm/exynos/exynos_drm_gem.c
drivers/gpu/drm/exynos/exynos_mixer.c
drivers/gpu/drm/gma500/cdv_device.c
drivers/gpu/drm/gma500/opregion.c
drivers/gpu/drm/gma500/opregion.h
drivers/gpu/drm/gma500/psb_device.c
drivers/gpu/drm/gma500/psb_drv.c
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/i915_suspend.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/nouveau/nouveau_fbcon.c
drivers/gpu/drm/nouveau/nouveau_prime.c
drivers/gpu/drm/radeon/atombios_crtc.c
drivers/gpu/drm/radeon/atombios_encoders.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/evergreen_cs.c
drivers/gpu/drm/radeon/evergreen_hdmi.c
drivers/gpu/drm/radeon/evergreend.h
drivers/gpu/drm/radeon/ni.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/r600_audio.c
drivers/gpu/drm/radeon/r600_cs.c
drivers/gpu/drm/radeon/r600_hdmi.c
drivers/gpu/drm/radeon/r600d.h
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_drv.c
drivers/gpu/drm/radeon/radeon_gart.c
drivers/gpu/drm/radeon/radeon_gem.c
drivers/gpu/drm/radeon/radeon_kms.c
drivers/gpu/drm/radeon/radeon_pm.c
drivers/gpu/drm/radeon/radeon_prime.c
drivers/gpu/drm/radeon/rs600.c
drivers/gpu/drm/radeon/rs690.c
drivers/gpu/drm/radeon/rv770.c
drivers/gpu/drm/radeon/rv770d.h
drivers/gpu/drm/radeon/si.c
drivers/gpu/drm/radeon/si_reg.h
drivers/gpu/drm/radeon/sid.h
drivers/gpu/drm/sis/sis_drv.c
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/udl/udl_drv.c
drivers/gpu/drm/udl/udl_main.c
drivers/gpu/drm/via/via_map.c
drivers/gpu/vga/vga_switcheroo.c
drivers/hid/Kconfig
drivers/hid/hid-apple.c
drivers/hid/hid-core.c
drivers/hid/hid-ids.h
drivers/hid/hid-input.c
drivers/hid/hid-logitech-dj.c
drivers/hid/hid-magicmouse.c
drivers/hid/hid-multitouch.c
drivers/hid/usbhid/Kconfig
drivers/hid/usbhid/hid-quirks.c
drivers/hwmon/applesmc.c
drivers/hwmon/coretemp.c
drivers/hwmon/emc2103.c
drivers/hwmon/it87.c
drivers/hwmon/jc42.c
drivers/hwmon/lineage-pem.c
drivers/hwmon/ltc4261.c
drivers/hwmon/max16065.c
drivers/hwspinlock/hwspinlock_core.c
drivers/i2c/muxes/Kconfig
drivers/i2c/muxes/Makefile
drivers/i2c/muxes/i2c-mux-pinctrl.c [new file with mode: 0644]
drivers/ide/icside.c
drivers/ide/ide-cs.c
drivers/idle/intel_idle.c
drivers/iio/Kconfig
drivers/iio/industrialio-core.c
drivers/infiniband/core/cma.c
drivers/infiniband/hw/cxgb4/cm.c
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/hw/mlx4/mlx4_ib.h
drivers/infiniband/hw/mlx4/qp.c
drivers/infiniband/hw/ocrdma/ocrdma.h
drivers/infiniband/hw/ocrdma/ocrdma_abi.h
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
drivers/infiniband/hw/ocrdma/ocrdma_main.c
drivers/infiniband/hw/ocrdma/ocrdma_sli.h
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
drivers/infiniband/ulp/ipoib/ipoib_ib.c
drivers/input/joystick/as5011.c
drivers/input/joystick/xpad.c
drivers/input/keyboard/mcs_touchkey.c
drivers/input/keyboard/mpr121_touchkey.c
drivers/input/keyboard/qt1070.c
drivers/input/keyboard/tca6416-keypad.c
drivers/input/keyboard/tca8418_keypad.c
drivers/input/keyboard/tnetv107x-keypad.c
drivers/input/misc/ad714x.c
drivers/input/misc/dm355evm_keys.c
drivers/input/mouse/bcm5974.c
drivers/input/tablet/wacom_sys.c
drivers/input/touchscreen/ad7879.c
drivers/input/touchscreen/atmel_mxt_ts.c
drivers/input/touchscreen/bu21013_ts.c
drivers/input/touchscreen/cy8ctmg110_ts.c
drivers/input/touchscreen/intel-mid-touch.c
drivers/input/touchscreen/pixcir_i2c_ts.c
drivers/input/touchscreen/tnetv107x-ts.c
drivers/input/touchscreen/tsc2005.c
drivers/iommu/amd_iommu.c
drivers/iommu/amd_iommu_init.c
drivers/iommu/amd_iommu_types.h
drivers/iommu/tegra-smmu.c
drivers/isdn/mISDN/stack.c
drivers/leds/Kconfig
drivers/leds/led-class.c
drivers/leds/led-core.c
drivers/leds/ledtrig-heartbeat.c
drivers/md/dm-raid1.c
drivers/md/dm-region-hash.c
drivers/md/dm-thin.c
drivers/md/md.c
drivers/md/multipath.c
drivers/md/persistent-data/dm-space-map-checker.c
drivers/md/persistent-data/dm-space-map-disk.c
drivers/md/persistent-data/dm-transaction-manager.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid5.c
drivers/media/common/saa7146_fops.c
drivers/media/dvb/dvb-core/dvbdev.c
drivers/media/dvb/frontends/cx24110.c
drivers/media/dvb/frontends/cxd2820r_c.c
drivers/media/dvb/frontends/lg2160.c
drivers/media/dvb/siano/smsusb.c
drivers/media/radio/radio-maxiradio.c
drivers/media/radio/radio-sf16fmr2.c
drivers/media/radio/si470x/radio-si470x-usb.c
drivers/media/rc/winbond-cir.c
drivers/media/video/bt8xx/bttv-cards.c
drivers/media/video/bt8xx/bttv-driver.c
drivers/media/video/bt8xx/bttv.h
drivers/media/video/bt8xx/bttvp.h
drivers/media/video/bw-qcam.c
drivers/media/video/cx18/cx18-driver.c
drivers/media/video/cx18/cx18-driver.h
drivers/media/video/cx18/cx18-firmware.c
drivers/media/video/cx18/cx18-mailbox.c
drivers/media/video/cx231xx/cx231xx-audio.c
drivers/media/video/cx231xx/cx231xx-vbi.c
drivers/media/video/cx23885/cx23885-cards.c
drivers/media/video/cx23885/cx23885-dvb.c
drivers/media/video/cx23885/cx23885-video.c
drivers/media/video/cx23885/cx23885.h
drivers/media/video/cx25821/cx25821-core.c
drivers/media/video/cx25821/cx25821.h
drivers/media/video/cx25840/cx25840-core.c
drivers/media/video/cx88/cx88-blackbird.c
drivers/media/video/em28xx/em28xx-cards.c
drivers/media/video/em28xx/em28xx-input.c
drivers/media/video/gspca/gspca.c
drivers/media/video/gspca/ov534.c
drivers/media/video/gspca/ov534_9.c
drivers/media/video/gspca/pac7311.c
drivers/media/video/gspca/sn9c20x.c
drivers/media/video/gspca/sonixj.c
drivers/media/video/ivtv/ivtv-driver.c
drivers/media/video/ivtv/ivtv-driver.h
drivers/media/video/mem2mem_testdev.c
drivers/media/video/mx2_camera.c
drivers/media/video/omap3isp/isppreview.c
drivers/media/video/pms.c
drivers/media/video/s5p-fimc/fimc-capture.c
drivers/media/video/s5p-fimc/fimc-core.c
drivers/media/video/s5p-fimc/fimc-lite.c
drivers/media/video/s5p-fimc/fimc-mdevice.c
drivers/media/video/s5p-fimc/fimc-mdevice.h
drivers/media/video/s5p-mfc/regs-mfc.h
drivers/media/video/s5p-mfc/s5p_mfc_dec.c
drivers/media/video/s5p-mfc/s5p_mfc_enc.c
drivers/media/video/s5p-mfc/s5p_mfc_opr.h
drivers/media/video/s5p-mfc/s5p_mfc_shm.h
drivers/media/video/smiapp/Kconfig
drivers/media/video/smiapp/smiapp-core.c
drivers/media/video/tuner-core.c
drivers/media/video/v4l2-dev.c
drivers/media/video/v4l2-ioctl.c
drivers/media/video/vino.c
drivers/media/video/vivi.c
drivers/mfd/Kconfig
drivers/mfd/ab5500-core.h [deleted file]
drivers/mfd/mc13xxx-spi.c
drivers/mfd/omap-usb-host.c
drivers/mfd/palmas.c
drivers/mfd/stmpe-i2c.c
drivers/mfd/stmpe-spi.c
drivers/misc/mei/interrupt.c
drivers/misc/mei/main.c
drivers/misc/mei/wd.c
drivers/misc/sgi-xp/xpc_uv.c
drivers/mmc/card/block.c
drivers/mmc/core/cd-gpio.c
drivers/mmc/core/mmc.c
drivers/mmc/core/sd.c
drivers/mmc/core/sdio.c
drivers/mmc/host/atmel-mci-regs.h
drivers/mmc/host/atmel-mci.c
drivers/mmc/host/dw_mmc.c
drivers/mmc/host/mmci.c
drivers/mmc/host/mxs-mmc.c
drivers/mmc/host/omap.c
drivers/mmc/host/omap_hsmmc.c
drivers/mmc/host/sdhci-s3c.c
drivers/mmc/host/sdhci-spear.c
drivers/mmc/host/sdhci.c
drivers/mtd/mtdoops.c
drivers/mtd/nand/cafe_nand.c
drivers/mtd/nand/gpmi-nand/gpmi-nand.c
drivers/mtd/nand/mxc_nand.c
drivers/mtd/nand/nand_base.c
drivers/mtd/nand/nandsim.c
drivers/mtd/ubi/debug.c
drivers/mtd/ubi/wl.c
drivers/net/bonding/bond_debugfs.c
drivers/net/bonding/bond_main.c
drivers/net/bonding/bond_procfs.c
drivers/net/bonding/bond_sysfs.c
drivers/net/caif/caif_hsi.c
drivers/net/can/c_can/c_can.c
drivers/net/can/c_can/c_can.h
drivers/net/can/cc770/cc770_platform.c
drivers/net/can/flexcan.c
drivers/net/dummy.c
drivers/net/ethernet/atheros/atl1c/atl1c_main.c
drivers/net/ethernet/broadcom/b44.c
drivers/net/ethernet/broadcom/bnx2.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
drivers/net/ethernet/broadcom/cnic.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/emulex/benet/be_cmds.c
drivers/net/ethernet/emulex/benet/be_cmds.h
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/freescale/gianfar.c
drivers/net/ethernet/intel/Kconfig
drivers/net/ethernet/intel/e1000e/82571.c
drivers/net/ethernet/intel/e1000e/defines.h
drivers/net/ethernet/intel/e1000e/ethtool.c
drivers/net/ethernet/intel/e1000e/ich8lan.c
drivers/net/ethernet/intel/e1000e/mac.c
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/e1000e/phy.c
drivers/net/ethernet/intel/igb/e1000_82575.c
drivers/net/ethernet/intel/igbvf/ethtool.c
drivers/net/ethernet/intel/ixgbe/ixgbe.h
drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
drivers/net/ethernet/marvell/mv643xx_eth.c
drivers/net/ethernet/marvell/sky2.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/main.c
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
drivers/net/ethernet/mellanox/mlx4/port.c
drivers/net/ethernet/nxp/lpc_eth.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/stmicro/stmmac/Kconfig
drivers/net/ethernet/stmicro/stmmac/ring_mode.c
drivers/net/ethernet/stmicro/stmmac/stmmac.h
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
drivers/net/ethernet/sun/niu.c
drivers/net/ethernet/ti/davinci_cpdma.c
drivers/net/ethernet/tile/Kconfig
drivers/net/ethernet/tile/Makefile
drivers/net/ethernet/tile/tilegx.c [new file with mode: 0644]
drivers/net/hyperv/hyperv_net.h
drivers/net/hyperv/netvsc.c
drivers/net/phy/icplus.c
drivers/net/phy/mdio-mux.c
drivers/net/phy/mdio_bus.c
drivers/net/phy/micrel.c
drivers/net/usb/ipheth.c
drivers/net/usb/qmi_wwan.c
drivers/net/usb/sierra_net.c
drivers/net/usb/usbnet.c
drivers/net/virtio_net.c
drivers/net/wireless/airo.c
drivers/net/wireless/ath/ath.h
drivers/net/wireless/ath/ath5k/base.c
drivers/net/wireless/ath/ath9k/ath9k.h
drivers/net/wireless/ath/ath9k/htc_drv_main.c
drivers/net/wireless/ath/ath9k/hw.c
drivers/net/wireless/ath/ath9k/main.c
drivers/net/wireless/ath/ath9k/recv.c
drivers/net/wireless/ath/ath9k/xmit.c
drivers/net/wireless/ath/key.c
drivers/net/wireless/b43/b43.h
drivers/net/wireless/b43/main.c
drivers/net/wireless/b43legacy/dma.c
drivers/net/wireless/b43legacy/main.c
drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
drivers/net/wireless/ipw2x00/ipw2100.c
drivers/net/wireless/iwlegacy/4965-mac.c
drivers/net/wireless/iwlegacy/common.c
drivers/net/wireless/iwlwifi/iwl-6000.c
drivers/net/wireless/iwlwifi/iwl-agn-sta.c
drivers/net/wireless/iwlwifi/iwl-debugfs.c
drivers/net/wireless/iwlwifi/iwl-drv.c
drivers/net/wireless/iwlwifi/iwl-eeprom.c
drivers/net/wireless/iwlwifi/iwl-mac80211.c
drivers/net/wireless/iwlwifi/iwl-prph.h
drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
drivers/net/wireless/mac80211_hwsim.c
drivers/net/wireless/mwifiex/11n_rxreorder.c
drivers/net/wireless/mwifiex/11n_rxreorder.h
drivers/net/wireless/mwifiex/cfg80211.c
drivers/net/wireless/mwifiex/fw.h
drivers/net/wireless/mwifiex/ie.c
drivers/net/wireless/mwifiex/sdio.c
drivers/net/wireless/mwifiex/sta_event.c
drivers/net/wireless/mwifiex/txrx.c
drivers/net/wireless/mwifiex/uap_cmd.c
drivers/net/wireless/mwifiex/usb.c
drivers/net/wireless/mwifiex/wmm.c
drivers/net/wireless/rndis_wlan.c
drivers/net/wireless/rt2x00/rt2x00.h
drivers/net/wireless/rt2x00/rt2x00mac.c
drivers/net/wireless/rt2x00/rt2x00queue.c
drivers/net/wireless/rt2x00/rt2x00usb.c
drivers/net/wireless/rtl818x/rtl8187/leds.c
drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
drivers/net/wireless/ti/wl1251/acx.c
drivers/net/wireless/ti/wl1251/event.c
drivers/net/wireless/ti/wl1251/spi.c
drivers/net/wireless/ti/wlcore/Kconfig
drivers/net/xen-netfront.c
drivers/of/platform.c
drivers/oprofile/oprofile_perf.c
drivers/pci/pci-driver.c
drivers/pinctrl/core.c
drivers/pinctrl/pinctrl-imx.c
drivers/pinctrl/pinctrl-imx6q.c
drivers/pinctrl/pinctrl-mxs.c
drivers/pinctrl/pinctrl-nomadik.c
drivers/pinctrl/pinctrl-sirf.c
drivers/pinctrl/spear/pinctrl-spear.c
drivers/pinctrl/spear/pinctrl-spear.h
drivers/pinctrl/spear/pinctrl-spear1310.c
drivers/pinctrl/spear/pinctrl-spear1340.c
drivers/pinctrl/spear/pinctrl-spear300.c
drivers/pinctrl/spear/pinctrl-spear310.c
drivers/pinctrl/spear/pinctrl-spear320.c
drivers/pinctrl/spear/pinctrl-spear3xx.c
drivers/pinctrl/spear/pinctrl-spear3xx.h
drivers/platform/x86/acerhdf.c
drivers/platform/x86/ideapad-laptop.c
drivers/platform/x86/intel_ips.c
drivers/platform/x86/intel_mid_thermal.c
drivers/platform/x86/sony-laptop.c
drivers/regulator/ab8500.c
drivers/regulator/anatop-regulator.c
drivers/regulator/core.c
drivers/regulator/db8500-prcmu.c
drivers/regulator/gpio-regulator.c
drivers/regulator/max8649.c
drivers/regulator/palmas-regulator.c
drivers/regulator/s5m8767.c
drivers/regulator/tps65023-regulator.c
drivers/regulator/tps6524x-regulator.c
drivers/remoteproc/Kconfig
drivers/remoteproc/omap_remoteproc.c
drivers/remoteproc/remoteproc_core.c
drivers/rpmsg/virtio_rpmsg_bus.c
drivers/rtc/rtc-ab8500.c
drivers/rtc/rtc-cmos.c
drivers/rtc/rtc-mxc.c
drivers/rtc/rtc-spear.c
drivers/rtc/rtc-twl.c
drivers/scsi/aic94xx/aic94xx_task.c
drivers/scsi/bnx2i/bnx2i.h
drivers/scsi/bnx2i/bnx2i_hwi.c
drivers/scsi/bnx2i/bnx2i_iscsi.c
drivers/scsi/libsas/sas_ata.c
drivers/scsi/mpt2sas/mpt2sas_base.c
drivers/scsi/qla2xxx/qla_target.c
drivers/scsi/qla2xxx/qla_target.h
drivers/scsi/qla2xxx/tcm_qla2xxx.c
drivers/scsi/scsi.c
drivers/scsi/scsi_wait_scan.c
drivers/scsi/sd.c
drivers/spi/spi-omap2-mcspi.c
drivers/staging/comedi/drivers.c
drivers/staging/gdm72xx/netlink_k.c
drivers/staging/iio/Documentation/device.txt
drivers/staging/iio/adc/Kconfig
drivers/staging/iio/adc/ad7606_core.c
drivers/staging/media/lirc/lirc_serial.c
drivers/staging/omapdrm/omap_fbdev.c
drivers/staging/ramster/zcache-main.c
drivers/staging/rtl8712/usb_intf.c
drivers/staging/zcache/zcache-main.c
drivers/target/sbp/sbp_target.c
drivers/target/target_core_alua.c
drivers/target/target_core_cdb.c
drivers/target/target_core_file.c
drivers/target/target_core_file.h
drivers/target/target_core_pr.c
drivers/target/target_core_transport.c
drivers/target/tcm_fc/tfc_cmd.c
drivers/target/tcm_fc/tfc_sess.c
drivers/thermal/Kconfig
drivers/thermal/spear_thermal.c
drivers/thermal/thermal_sys.c
drivers/tty/hvc/hvc_opal.c
drivers/tty/hvc/hvc_xen.c
drivers/tty/serial/8250/8250.c
drivers/tty/serial/amba-pl011.c
drivers/tty/serial/serial_txx9.c
drivers/tty/serial/sh-sci.c
drivers/usb/Makefile
drivers/usb/class/cdc-acm.c
drivers/usb/class/cdc-wdm.c
drivers/usb/core/hcd-pci.c
drivers/usb/core/hub.c
drivers/usb/core/message.c
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/atmel_usba_udc.c
drivers/usb/gadget/fsl_qe_udc.c
drivers/usb/gadget/fsl_qe_udc.h
drivers/usb/gadget/fsl_udc_core.c
drivers/usb/gadget/fsl_usb2_udc.h
drivers/usb/gadget/goku_udc.c
drivers/usb/gadget/lpc32xx_udc.c
drivers/usb/gadget/mv_udc_core.c
drivers/usb/gadget/omap_udc.c
drivers/usb/gadget/pxa25x_udc.c
drivers/usb/gadget/s3c-hsudc.c
drivers/usb/gadget/s3c2410_udc.c
drivers/usb/host/ehci-hcd.c
drivers/usb/host/ehci-omap.c
drivers/usb/host/ehci-pci.c
drivers/usb/host/ehci-sh.c
drivers/usb/host/ehci-xilinx-of.c
drivers/usb/host/ohci-hub.c
drivers/usb/host/xhci-hub.c
drivers/usb/host/xhci-mem.c
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci.c
drivers/usb/host/xhci.h
drivers/usb/musb/davinci.c
drivers/usb/musb/davinci.h
drivers/usb/musb/musb_gadget.c
drivers/usb/musb/musb_host.c
drivers/usb/otg/twl6030-usb.c
drivers/usb/phy/Kconfig
drivers/usb/serial/cp210x.c
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/ftdi_sio_ids.h
drivers/usb/serial/generic.c
drivers/usb/serial/mct_u232.c
drivers/usb/serial/metro-usb.c
drivers/usb/serial/mos7840.c
drivers/usb/serial/option.c
drivers/usb/serial/qcserial.c
drivers/usb/serial/sierra.c
drivers/usb/serial/usb-serial.c
drivers/usb/storage/scsiglue.c
drivers/vhost/vhost.c
drivers/video/backlight/Kconfig
drivers/video/backlight/ili9320.c
drivers/video/bfin_adv7393fb.c
drivers/video/broadsheetfb.c
drivers/video/console/Kconfig
drivers/video/mbx/mbxfb.c
drivers/video/omap2/displays/panel-taal.c
drivers/video/omap2/dss/core.c
drivers/video/omap2/dss/dispc.c
drivers/video/omap2/dss/dsi.c
drivers/video/omap2/dss/dss.c
drivers/video/omap2/dss/hdmi.c
drivers/video/omap2/dss/rfbi.c
drivers/video/omap2/dss/venc.c
drivers/video/s3c-fb.c
drivers/video/savage/savagefb_driver.c
drivers/virtio/virtio_balloon.c
drivers/watchdog/hpwdt.c
drivers/watchdog/iTCO_wdt.c
drivers/watchdog/sp805_wdt.c
drivers/watchdog/watchdog_dev.c
drivers/xen/events.c
drivers/xen/pci.c
drivers/xen/tmem.c
fs/btrfs/backref.c
fs/btrfs/btrfs_inode.h
fs/btrfs/check-integrity.c
fs/btrfs/ctree.c
fs/btrfs/ctree.h
fs/btrfs/delayed-inode.c
fs/btrfs/delayed-inode.h
fs/btrfs/disk-io.c
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.c
fs/btrfs/file.c
fs/btrfs/free-space-cache.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/ioctl.h
fs/btrfs/ordered-data.c
fs/btrfs/rcu-string.h [new file with mode: 0644]
fs/btrfs/scrub.c
fs/btrfs/super.c
fs/btrfs/transaction.c
fs/btrfs/tree-log.c
fs/btrfs/volumes.c
fs/btrfs/volumes.h
fs/buffer.c
fs/ceph/addr.c
fs/cifs/cifsglob.h
fs/cifs/cifsproto.h
fs/cifs/cifssmb.c
fs/cifs/connect.c
fs/cifs/file.c
fs/cifs/misc.c
fs/cifs/readdir.c
fs/cifs/smb1ops.c
fs/cifs/transport.c
fs/dcache.c
fs/ecryptfs/kthread.c
fs/ecryptfs/miscdev.c
fs/eventpoll.c
fs/exec.c
fs/exofs/ore.c
fs/exofs/ore_raid.c
fs/exofs/sys.c
fs/ext4/balloc.c
fs/ext4/ioctl.c
fs/fat/inode.c
fs/fifo.c
fs/fs-writeback.c
fs/fuse/control.c
fs/fuse/dir.c
fs/fuse/file.c
fs/fuse/fuse_i.h
fs/fuse/inode.c
fs/hfsplus/ioctl.c
fs/hfsplus/wrapper.c
fs/locks.c
fs/nfs/callback.c
fs/nfs/callback_xdr.c
fs/nfs/client.c
fs/nfs/direct.c
fs/nfs/idmap.c
fs/nfs/inode.c
fs/nfs/nfs4_fs.h
fs/nfs/nfs4proc.c
fs/nfs/nfs4state.c
fs/nfs/nfs4xdr.c
fs/nfs/objlayout/objio_osd.c
fs/nfs/pnfs.c
fs/nfs/pnfs.h
fs/nfs/proc.c
fs/nfs/super.c
fs/nfs/write.c
fs/nfsd/nfs4state.c
fs/nilfs2/gcinode.c
fs/nilfs2/segment.c
fs/ocfs2/dlmglue.c
fs/ocfs2/extent_map.c
fs/ocfs2/file.c
fs/ocfs2/quota_global.c
fs/open.c
fs/proc/base.c
fs/pstore/inode.c
fs/pstore/platform.c
fs/pstore/ram.c
fs/pstore/ram_core.c
fs/ramfs/file-nommu.c
fs/splice.c
fs/ubifs/debug.c
fs/ubifs/find.c
fs/ubifs/sb.c
fs/udf/super.c
fs/xfs/xfs_alloc.c
fs/xfs/xfs_aops.c
fs/xfs/xfs_buf.c
fs/xfs/xfs_buf.h
fs/xfs/xfs_buf_item.c
fs/xfs/xfs_inode_item.c
fs/xfs/xfs_log.c
fs/xfs/xfs_log_cil.c
fs/xfs/xfs_log_priv.h
fs/xfs/xfs_log_recover.c
fs/xfs/xfs_mount.h
fs/xfs/xfs_sync.c
fs/xfs/xfs_trace.h
include/acpi/acexcep.h
include/acpi/acnames.h
include/acpi/acoutput.h
include/acpi/acpi.h
include/acpi/acpi_bus.h
include/acpi/acpiosxf.h
include/acpi/acpixf.h
include/acpi/acrestyp.h
include/acpi/actbl.h
include/acpi/actbl1.h
include/acpi/actbl2.h
include/acpi/actbl3.h
include/acpi/actypes.h
include/acpi/platform/acenv.h
include/acpi/platform/acgcc.h
include/acpi/platform/aclinux.h
include/asm-generic/bug.h
include/asm-generic/dma-contiguous.h
include/asm-generic/pgtable.h
include/drm/drm_crtc.h
include/drm/drm_pciids.h
include/drm/exynos_drm.h
include/linux/aio.h
include/linux/blkdev.h
include/linux/bootmem.h
include/linux/capability.h
include/linux/ceph/messenger.h
include/linux/clockchips.h
include/linux/compaction.h
include/linux/compiler-gcc.h
include/linux/cpuidle.h
include/linux/device.h
include/linux/eventpoll.h
include/linux/frontswap.h [new file with mode: 0644]
include/linux/fs.h
include/linux/fuse.h
include/linux/gpio.h
include/linux/hrtimer.h
include/linux/i2c-mux-pinctrl.h [new file with mode: 0644]
include/linux/init_task.h
include/linux/input.h
include/linux/irq.h
include/linux/kernel.h
include/linux/kmsg_dump.h
include/linux/kvm_host.h
include/linux/memblock.h
include/linux/mm_types.h
include/linux/mmc/sdhci-spear.h
include/linux/mmc/sdio.h
include/linux/mmzone.h
include/linux/moduleparam.h
include/linux/netfilter/xt_HMARK.h
include/linux/nfs_fs_sb.h
include/linux/nfs_xdr.h
include/linux/pata_arasan_cf_data.h
include/linux/perf_event.h
include/linux/platform_data/spear_thermal.h [deleted file]
include/linux/prctl.h
include/linux/pstore_ram.h
include/linux/pxa2xx_ssp.h
include/linux/radix-tree.h
include/linux/rcupdate.h
include/linux/rcutiny.h
include/linux/rcutree.h
include/linux/rpmsg.h
include/linux/sched.h
include/linux/skbuff.h
include/linux/spi/pxa2xx_spi.h
include/linux/splice.h
include/linux/swap.h
include/linux/swapfile.h [new file with mode: 0644]
include/linux/swapops.h
include/linux/tcp.h
include/linux/thermal.h
include/linux/usb/hcd.h
include/linux/vga_switcheroo.h
include/linux/videodev2.h
include/net/bluetooth/hci.h
include/net/inetpeer.h
include/net/ip_vs.h
include/net/mac80211.h
include/net/netfilter/nf_conntrack_ecache.h
include/net/phonet/gprs.h
include/net/route.h
include/net/sch_generic.h
include/net/sctp/structs.h
include/net/sctp/tsnmap.h
include/scsi/libsas.h
include/scsi/scsi_cmnd.h
include/scsi/scsi_device.h
include/sound/tea575x-tuner.h
include/target/target_core_fabric.h
include/trace/events/rcu.h
init/main.c
ipc/shm.c
kernel/cgroup.c
kernel/debug/kdb/kdb_main.c
kernel/debug/kdb/kdb_private.h
kernel/events/core.c
kernel/exit.c
kernel/fork.c
kernel/hrtimer.c
kernel/irq/chip.c
kernel/irq/internals.h
kernel/irq/manage.c
kernel/irq/migration.c
kernel/panic.c
kernel/pid_namespace.c
kernel/power/hibernate.c
kernel/power/user.c
kernel/printk.c
kernel/rcutree.c
kernel/rcutree.h
kernel/rcutree_plugin.h
kernel/relay.c
kernel/sched/core.c
kernel/sched/fair.c
kernel/sched/idle_task.c
kernel/sched/rt.c
kernel/sched/sched.h
kernel/smpboot.c
kernel/sys.c
kernel/time/clockevents.c
kernel/time/ntp.c
kernel/time/tick-sched.c
kernel/time/timekeeping.c
kernel/trace/ring_buffer.c
kernel/trace/trace.c
kernel/watchdog.c
lib/Kconfig.debug
lib/btree.c
lib/dma-debug.c
lib/fault-inject.c
lib/radix-tree.c
lib/raid6/recov.c
lib/raid6/recov_ssse3.c
lib/spinlock_debug.c
mm/Kconfig
mm/Makefile
mm/bootmem.c
mm/compaction.c
mm/frontswap.c [new file with mode: 0644]
mm/internal.h
mm/madvise.c
mm/memblock.c
mm/memcontrol.c
mm/memory.c
mm/memory_hotplug.c
mm/mempolicy.c
mm/migrate.c
mm/nobootmem.c
mm/nommu.c
mm/oom_kill.c
mm/page_alloc.c
mm/page_cgroup.c
mm/page_io.c
mm/pagewalk.c
mm/percpu-vm.c
mm/shmem.c
mm/sparse.c
mm/swapfile.c
mm/vmscan.c
net/8021q/vlan.c
net/9p/protocol.c
net/9p/trans_virtio.c
net/appletalk/ddp.c
net/ax25/af_ax25.c
net/batman-adv/bridge_loop_avoidance.c
net/batman-adv/bridge_loop_avoidance.h
net/batman-adv/routing.c
net/batman-adv/soft-interface.c
net/batman-adv/translation-table.c
net/bluetooth/af_bluetooth.c
net/bluetooth/hci_event.c
net/bluetooth/hidp/Kconfig
net/bluetooth/l2cap_core.c
net/bluetooth/mgmt.c
net/bluetooth/smp.c
net/bridge/br_if.c
net/bridge/br_netlink.c
net/bridge/br_private.h
net/caif/caif_dev.c
net/caif/caif_socket.c
net/can/raw.c
net/ceph/ceph_common.c
net/ceph/messenger.c
net/ceph/mon_client.c
net/ceph/osd_client.c
net/core/dev.c
net/core/drop_monitor.c
net/core/filter.c
net/core/neighbour.c
net/core/net_namespace.c
net/core/netpoll.c
net/core/netprio_cgroup.c
net/core/skbuff.c
net/ieee802154/dgram.c
net/ipv4/cipso_ipv4.c
net/ipv4/inetpeer.c
net/ipv4/ip_forward.c
net/ipv4/ipmr.c
net/ipv6/ip6_fib.c
net/ipv6/ip6_output.c
net/ipv6/ip6mr.c
net/ipv6/route.c
net/ipv6/tcp_ipv6.c
net/iucv/af_iucv.c
net/l2tp/l2tp_eth.c
net/l2tp/l2tp_ip.c
net/mac80211/agg-rx.c
net/mac80211/cfg.c
net/mac80211/iface.c
net/mac80211/mlme.c
net/mac80211/offchannel.c
net/mac80211/rc80211_minstrel_ht.c
net/mac80211/rx.c
net/mac80211/sta_info.c
net/mac80211/sta_info.h
net/mac80211/tx.c
net/mac80211/util.c
net/mac802154/tx.c
net/netfilter/ipset/ip_set_core.c
net/netfilter/ipset/ip_set_hash_netiface.c
net/netfilter/ipvs/ip_vs_ctl.c
net/netfilter/nf_conntrack_h323_main.c
net/netfilter/nfnetlink.c
net/netfilter/xt_HMARK.c
net/netfilter/xt_set.c
net/nfc/llcp/sock.c
net/nfc/nci/ntf.c
net/nfc/rawsock.c
net/phonet/af_phonet.c
net/phonet/datagram.c
net/phonet/pep-gprs.c
net/phonet/pep.c
net/phonet/pn_dev.c
net/phonet/pn_netlink.c
net/phonet/socket.c
net/phonet/sysctl.c
net/rxrpc/ar-peer.c
net/sched/sch_netem.c
net/sched/sch_sfb.c
net/sctp/associola.c
net/sctp/input.c
net/sctp/output.c
net/sctp/protocol.c
net/sctp/sm_make_chunk.c
net/sctp/sm_sideeffect.c
net/sctp/socket.c
net/sctp/transport.c
net/sctp/tsnmap.c
net/sctp/ulpevent.c
net/sctp/ulpqueue.c
net/sunrpc/rpc_pipe.c
net/sunrpc/svc.c
net/wireless/ibss.c
net/wireless/reg.c
net/wireless/util.c
scripts/get_maintainer.pl
scripts/gfp-translate [changed mode: 0644->0755]
security/security.c
security/selinux/hooks.c
security/selinux/include/classmap.h
sound/core/compress_offload.c
sound/i2c/other/tea575x-tuner.c
sound/pci/es1968.c
sound/pci/fm801.c
sound/pci/hda/Kconfig
sound/pci/hda/hda_auto_parser.c
sound/pci/hda/hda_auto_parser.h
sound/pci/hda/hda_codec.c
sound/pci/hda/hda_codec.h
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_conexant.c
sound/pci/hda/patch_realtek.c
sound/pci/hda/patch_sigmatel.c
sound/soc/codecs/tlv320aic3x.c
sound/soc/codecs/tlv320aic3x.h
sound/soc/codecs/wm2000.c
sound/soc/codecs/wm2200.c
sound/soc/codecs/wm8904.c
sound/soc/codecs/wm8994.c
sound/soc/codecs/wm8996.c
sound/soc/fsl/imx-audmux.c
sound/soc/pxa/pxa-ssp.c
sound/soc/soc-dapm.c
sound/soc/soc-pcm.c
sound/soc/tegra/tegra30_ahub.c
sound/soc/tegra/tegra_wm8903.c
sound/usb/6fire/firmware.c
sound/usb/card.h
sound/usb/endpoint.c
sound/usb/mixer_maps.c
sound/usb/pcm.c
sound/usb/quirks-table.h
sound/usb/stream.c
tools/hv/hv_kvp_daemon.c
tools/perf/MANIFEST
tools/perf/builtin-report.c
tools/perf/builtin-stat.c
tools/perf/builtin-top.c
tools/perf/design.txt
tools/perf/ui/browsers/annotate.c
tools/perf/util/PERF-VERSION-GEN
tools/perf/util/callchain.c
tools/perf/util/callchain.h
tools/perf/util/evlist.c
tools/perf/util/evlist.h
tools/perf/util/evsel.c
tools/perf/util/header.c
tools/perf/util/hist.c
tools/perf/util/hist.h
tools/perf/util/include/linux/bitops.h
tools/perf/util/map.c
tools/perf/util/pager.c
tools/perf/util/probe-event.c
tools/perf/util/session.c
tools/perf/util/session.h
tools/perf/util/symbol.c
tools/perf/util/symbol.h
tools/perf/util/trace-event-parse.c
tools/power/x86/turbostat/Makefile
tools/power/x86/turbostat/turbostat.8
tools/power/x86/turbostat/turbostat.c
virt/kvm/assigned-dev.c
virt/kvm/eventfd.c
virt/kvm/irq_comm.c
virt/kvm/kvm_main.c

index 2909c33bc54e231057fe06852d249d52f4439b15..658003aa94468687849d482a745fc61951d67e88 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -111,6 +111,7 @@ Uwe Kleine-König <ukleinek@informatik.uni-freiburg.de>
 Uwe Kleine-König <ukl@pengutronix.de>
 Uwe Kleine-König <Uwe.Kleine-Koenig@digi.com>
 Valdis Kletnieks <Valdis.Kletnieks@vt.edu>
+Viresh Kumar <viresh.linux@gmail.com> <viresh.kumar@st.com>
 Takashi YOSHII <takashi.yoshii.zj@renesas.com>
 Yusuke Goda <goda.yusuke@renesas.com>
 Gustavo Padovan <gustavo@las.ic.unicamp.br>
index 679ce354312281846e4ef2decee0689b0775c985..beef30c046b0d3181bfc353d15704bb4bdaf3da6 100644 (file)
@@ -1,26 +1,5 @@
-What:           /sys/block/rssd*/registers
-Date:           March 2012
-KernelVersion:  3.3
-Contact:        Asai Thambi S P <asamymuthupa@micron.com>
-Description:    This is a read-only file. Dumps below driver information and
-                hardware registers.
-                    - S ACTive
-                    - Command Issue
-                    - Completed
-                    - PORT IRQ STAT
-                    - HOST IRQ STAT
-                    - Allocated
-                    - Commands in Q
-
 What:           /sys/block/rssd*/status
 Date:           April 2012
 KernelVersion:  3.4
 Contact:        Asai Thambi S P <asamymuthupa@micron.com>
 Description:    This is a read-only file. Indicates the status of the device.
-
-What:           /sys/block/rssd*/flags
-Date:           May 2012
-KernelVersion:  3.5
-Contact:        Asai Thambi S P <asamymuthupa@micron.com>
-Description:    This is a read-only file. Dumps the flags in port and driver
-                data structure
index 5bc8a476c15ed5030d08d11778e6add865a89f9c..cfedf63cce151d2bdb9237ed58726a136f4cc065 100644 (file)
@@ -219,6 +219,7 @@ What:               /sys/bus/iio/devices/iio:deviceX/in_voltageY_scale
 What:          /sys/bus/iio/devices/iio:deviceX/in_voltageY_supply_scale
 What:          /sys/bus/iio/devices/iio:deviceX/in_voltage_scale
 What:          /sys/bus/iio/devices/iio:deviceX/out_voltageY_scale
+What:          /sys/bus/iio/devices/iio:deviceX/out_altvoltageY_scale
 What:          /sys/bus/iio/devices/iio:deviceX/in_accel_scale
 What:          /sys/bus/iio/devices/iio:deviceX/in_accel_peak_scale
 What:          /sys/bus/iio/devices/iio:deviceX/in_anglvel_scale
@@ -273,6 +274,7 @@ What:               /sys/bus/iio/devices/iio:deviceX/in_accel_scale_available
 What:          /sys/.../iio:deviceX/in_voltageX_scale_available
 What:          /sys/.../iio:deviceX/in_voltage-voltage_scale_available
 What:          /sys/.../iio:deviceX/out_voltageX_scale_available
+What:          /sys/.../iio:deviceX/out_altvoltageX_scale_available
 What:          /sys/.../iio:deviceX/in_capacitance_scale_available
 KernelVersion: 2.635
 Contact:       linux-iio@vger.kernel.org
@@ -298,14 +300,19 @@ Description:
                gives the 3dB frequency of the filter in Hz.
 
 What:          /sys/bus/iio/devices/iio:deviceX/out_voltageY_raw
+What:          /sys/bus/iio/devices/iio:deviceX/out_altvoltageY_raw
 KernelVersion: 2.6.37
 Contact:       linux-iio@vger.kernel.org
 Description:
                Raw (unscaled, no bias etc.) output voltage for
                channel Y.  The number must always be specified and
                unique if the output corresponds to a single channel.
+               While DAC like devices typically use out_voltage,
+               a continuous frequency generating device, such as
+               a DDS or PLL should use out_altvoltage.
 
 What:          /sys/bus/iio/devices/iio:deviceX/out_voltageY&Z_raw
+What:          /sys/bus/iio/devices/iio:deviceX/out_altvoltageY&Z_raw
 KernelVersion: 2.6.37
 Contact:       linux-iio@vger.kernel.org
 Description:
@@ -316,6 +323,8 @@ Description:
 
 What:          /sys/bus/iio/devices/iio:deviceX/out_voltageY_powerdown_mode
 What:          /sys/bus/iio/devices/iio:deviceX/out_voltage_powerdown_mode
+What:          /sys/bus/iio/devices/iio:deviceX/out_altvoltageY_powerdown_mode
+What:          /sys/bus/iio/devices/iio:deviceX/out_altvoltage_powerdown_mode
 KernelVersion: 2.6.38
 Contact:       linux-iio@vger.kernel.org
 Description:
@@ -330,6 +339,8 @@ Description:
 
 What:          /sys/.../iio:deviceX/out_votlageY_powerdown_mode_available
 What:          /sys/.../iio:deviceX/out_voltage_powerdown_mode_available
+What:          /sys/.../iio:deviceX/out_altvotlageY_powerdown_mode_available
+What:          /sys/.../iio:deviceX/out_altvoltage_powerdown_mode_available
 KernelVersion: 2.6.38
 Contact:       linux-iio@vger.kernel.org
 Description:
@@ -338,6 +349,8 @@ Description:
 
 What:          /sys/bus/iio/devices/iio:deviceX/out_voltageY_powerdown
 What:          /sys/bus/iio/devices/iio:deviceX/out_voltage_powerdown
+What:          /sys/bus/iio/devices/iio:deviceX/out_altvoltageY_powerdown
+What:          /sys/bus/iio/devices/iio:deviceX/out_altvoltage_powerdown
 KernelVersion: 2.6.38
 Contact:       linux-iio@vger.kernel.org
 Description:
@@ -346,6 +359,24 @@ Description:
                normal operation. Y may be suppressed if all outputs are
                controlled together.
 
+What:          /sys/bus/iio/devices/iio:deviceX/out_altvoltageY_frequency
+KernelVersion: 3.4.0
+Contact:       linux-iio@vger.kernel.org
+Description:
+               Output frequency for channel Y in Hz. The number must always be
+               specified and unique if the output corresponds to a single
+               channel.
+
+What:          /sys/bus/iio/devices/iio:deviceX/out_altvoltageY_phase
+KernelVersion: 3.4.0
+Contact:       linux-iio@vger.kernel.org
+Description:
+               Phase in radians of one frequency/clock output Y
+               (out_altvoltageY) relative to another frequency/clock output
+               (out_altvoltageZ) of the device X. The number must always be
+               specified and unique if the output corresponds to a single
+               channel.
+
 What:          /sys/bus/iio/devices/iio:deviceX/events
 KernelVersion: 2.6.35
 Contact:       linux-iio@vger.kernel.org
index db1ad7e34fc3a00e14be0c1045564fe6c98bd37e..938ef71e2035e7c04203c7b65d2a120dbd96944a 100644 (file)
@@ -142,13 +142,14 @@ KernelVersion:    3.4
 Contact:       linux-mtd@lists.infradead.org
 Description:
                This allows the user to examine and adjust the criteria by which
-               mtd returns -EUCLEAN from mtd_read().  If the maximum number of
-               bit errors that were corrected on any single region comprising
-               an ecc step (as reported by the driver) equals or exceeds this
-               value, -EUCLEAN is returned.  Otherwise, absent an error, 0 is
-               returned.  Higher layers (e.g., UBI) use this return code as an
-               indication that an erase block may be degrading and should be
-               scrutinized as a candidate for being marked as bad.
+               mtd returns -EUCLEAN from mtd_read() and mtd_read_oob().  If the
+               maximum number of bit errors that were corrected on any single
+               region comprising an ecc step (as reported by the driver) equals
+               or exceeds this value, -EUCLEAN is returned.  Otherwise, absent
+               an error, 0 is returned.  Higher layers (e.g., UBI) use this
+               return code as an indication that an erase block may be
+               degrading and should be scrutinized as a candidate for being
+               marked as bad.
 
                The initial value may be specified by the flash device driver.
                If not, then the default value is ecc_strength.
@@ -167,7 +168,7 @@ Description:
                block degradation, but high enough to avoid the consequences of
                a persistent return value of -EUCLEAN on devices where sticky
                bitflips occur.  Note that if bitflip_threshold exceeds
-               ecc_strength, -EUCLEAN is never returned by mtd_read().
+               ecc_strength, -EUCLEAN is never returned by the read operations.
                Conversely, if bitflip_threshold is zero, -EUCLEAN is always
                returned, absent a hard error.
 
index 676bc46f9c52a476b3c8c280cca437d756726235..cda0dfb6769aee5d0fcadb8a30b4d19658259bc7 100644 (file)
@@ -3988,7 +3988,7 @@ interface and may change in the future.</para>
            from RGB to Y'CbCr color space.
            </entry>
          </row>
-         <row id = "v4l2-jpeg-chroma-subsampling">
+         <row>
            <entrytbl spanname="descr" cols="2">
              <tbody valign="top">
                <row>
index f5ac15ed0549f21c009e5adaceb05eb22449bac9..e58934c92895f159fc3200946a0d75ed50434e84 100644 (file)
@@ -986,13 +986,13 @@ http://www.thedirks.org/winnov/</ulink></para></entry>
          <row id="V4L2-PIX-FMT-Y4">
            <entry><constant>V4L2_PIX_FMT_Y4</constant></entry>
            <entry>'Y04 '</entry>
-           <entry>Old 4-bit greyscale format. Only the least significant 4 bits of each byte are used,
+           <entry>Old 4-bit greyscale format. Only the most significant 4 bits of each byte are used,
 the other bits are set to 0.</entry>
          </row>
          <row id="V4L2-PIX-FMT-Y6">
            <entry><constant>V4L2_PIX_FMT_Y6</constant></entry>
            <entry>'Y06 '</entry>
-           <entry>Old 6-bit greyscale format. Only the least significant 6 bits of each byte are used,
+           <entry>Old 6-bit greyscale format. Only the most significant 6 bits of each byte are used,
 the other bits are set to 0.</entry>
          </row>
        </tbody>
index 015c561754b7cd1ce30817ac66e67a5acf049221..008c2d73a484622d8343752391fd5312c3ff2793 100644 (file)
@@ -560,6 +560,7 @@ and discussions on the V4L mailing list.</revremark>
     &sub-g-tuner;
     &sub-log-status;
     &sub-overlay;
+    &sub-prepare-buf;
     &sub-qbuf;
     &sub-querybuf;
     &sub-querycap;
@@ -567,7 +568,6 @@ and discussions on the V4L mailing list.</revremark>
     &sub-query-dv-preset;
     &sub-query-dv-timings;
     &sub-querystd;
-    &sub-prepare-buf;
     &sub-reqbufs;
     &sub-s-hw-freq-seek;
     &sub-streamon;
index 765549ff8a71172477d04c9e7d633c4a1282f31b..a2474ecb574acd06c533f7a22bd4c5fbccacc4c6 100644 (file)
@@ -108,10 +108,9 @@ information.</para>
 /></entry>
          </row>
          <row>
-           <entry>__u32</entry>
+           <entry>struct&nbsp;v4l2_format</entry>
            <entry><structfield>format</structfield></entry>
-           <entry>Filled in by the application, preserved by the driver.
-           See <xref linkend="v4l2-format" />.</entry>
+           <entry>Filled in by the application, preserved by the driver.</entry>
          </row>
          <row>
            <entry>__u32</entry>
index e8714aa1643343de973e32f5abfa5e331b26bb7d..98a856f9ec30123b4c2788247d9e5176f2990efa 100644 (file)
@@ -89,7 +89,7 @@
          <row>
            <entry></entry>
            <entry>&v4l2-event-frame-sync;</entry>
-            <entry><structfield>frame</structfield></entry>
+            <entry><structfield>frame_sync</structfield></entry>
            <entry>Event data for event V4L2_EVENT_FRAME_SYNC.</entry>
          </row>
          <row>
index e3d5afcdafbb5b03cf9994478e3e0d8139442dda..0a4b90fcf2dab77b36ed1e52749040ddcc97a113 100644 (file)
@@ -284,13 +284,6 @@ These controls are described in <xref
            processing controls. These controls are described in <xref
            linkend="image-process-controls" />.</entry>
          </row>
-         <row>
-           <entry><constant>V4L2_CTRL_CLASS_JPEG</constant></entry>
-           <entry>0x9d0000</entry>
-           <entry>The class containing JPEG compression controls.
-These controls are described in <xref
-               linkend="jpeg-controls" />.</entry>
-         </row>
        </tbody>
       </tgroup>
     </table>
index 57aae7765c74e7a7ed60b51bd243ee94193b7c80..65610bf52ebffbad7e13d8f522df603487ef59e2 100644 (file)
@@ -60,4 +60,4 @@ Introduction
   Document Author
   ---------------
 
-  Viresh Kumar <viresh.kumar@st.com>, (c) 2010-2012 ST Microelectronics
+  Viresh Kumar <viresh.linux@gmail.com>, (c) 2010-2012 ST Microelectronics
index 32e48797a14f80de6ebff7a441e489021bfce58a..9884681535ee36bf03a7d7baaa54abb36360d53d 100644 (file)
@@ -7,39 +7,39 @@ This target is read-only.
 
 Construction Parameters
 =======================
-    <version> <dev> <hash_dev> <hash_start>
+    <version> <dev> <hash_dev>
     <data_block_size> <hash_block_size>
     <num_data_blocks> <hash_start_block>
     <algorithm> <digest> <salt>
 
 <version>
-    This is the version number of the on-disk format.
+    This is the type of the on-disk hash format.
 
     0 is the original format used in the Chromium OS.
-       The salt is appended when hashing, digests are stored continuously and
-       the rest of the block is padded with zeros.
+      The salt is appended when hashing, digests are stored continuously and
+      the rest of the block is padded with zeros.
 
     1 is the current format that should be used for new devices.
-       The salt is prepended when hashing and each digest is
-       padded with zeros to the power of two.
+      The salt is prepended when hashing and each digest is
+      padded with zeros to the power of two.
 
 <dev>
-    This is the device containing the data the integrity of which needs to be
+    This is the device containing data, the integrity of which needs to be
     checked.  It may be specified as a path, like /dev/sdaX, or a device number,
     <major>:<minor>.
 
 <hash_dev>
-    This is the device that that supplies the hash tree data.  It may be
+    This is the device that supplies the hash tree data.  It may be
     specified similarly to the device path and may be the same device.  If the
-    same device is used, the hash_start should be outside of the dm-verity
-    configured device size.
+    same device is used, the hash_start should be outside the configured
+    dm-verity device.
 
 <data_block_size>
-    The block size on a data device.  Each block corresponds to one digest on
-    the hash device.
+    The block size on a data device in bytes.
+    Each block corresponds to one digest on the hash device.
 
 <hash_block_size>
-    The size of a hash block.
+    The size of a hash block in bytes.
 
 <num_data_blocks>
     The number of data blocks on the data device.  Additional blocks are
@@ -65,7 +65,7 @@ Construction Parameters
 Theory of operation
 ===================
 
-dm-verity is meant to be setup as part of a verified boot path.  This
+dm-verity is meant to be set up as part of a verified boot path.  This
 may be anything ranging from a boot using tboot or trustedgrub to just
 booting from a known-good device (like a USB drive or CD).
 
@@ -73,20 +73,20 @@ When a dm-verity device is configured, it is expected that the caller
 has been authenticated in some way (cryptographic signatures, etc).
 After instantiation, all hashes will be verified on-demand during
 disk access.  If they cannot be verified up to the root node of the
-tree, the root hash, then the I/O will fail.  This should identify
+tree, the root hash, then the I/O will fail.  This should detect
 tampering with any data on the device and the hash data.
 
 Cryptographic hashes are used to assert the integrity of the device on a
-per-block basis.  This allows for a lightweight hash computation on first read
-into the page cache.  Block hashes are stored linearly-aligned to the nearest
-block the size of a page.
+per-block basis. This allows for a lightweight hash computation on first read
+into the page cache. Block hashes are stored linearly, aligned to the nearest
+block size.
 
 Hash Tree
 ---------
 
 Each node in the tree is a cryptographic hash.  If it is a leaf node, the hash
-is of some block data on disk.  If it is an intermediary node, then the hash is
-of a number of child nodes.
+of some data block on disk is calculated. If it is an intermediary node,
+the hash of a number of child nodes is calculated.
 
 Each entry in the tree is a collection of neighboring nodes that fit in one
 block.  The number is determined based on block_size and the size of the
@@ -110,63 +110,23 @@ alg = sha256, num_blocks = 32768, block_size = 4096
 On-disk format
 ==============
 
-Below is the recommended on-disk format. The verity kernel code does not
-read the on-disk header. It only reads the hash blocks which directly
-follow the header. It is expected that a user-space tool will verify the
-integrity of the verity_header and then call dmsetup with the correct
-parameters. Alternatively, the header can be omitted and the dmsetup
-parameters can be passed via the kernel command-line in a rooted chain
-of trust where the command-line is verified.
+The verity kernel code does not read the verity metadata on-disk header.
+It only reads the hash blocks which directly follow the header.
+It is expected that a user-space tool will verify the integrity of the
+verity header.
 
-The on-disk format is especially useful in cases where the hash blocks
-are on a separate partition. The magic number allows easy identification
-of the partition contents. Alternatively, the hash blocks can be stored
-in the same partition as the data to be verified. In such a configuration
-the filesystem on the partition would be sized a little smaller than
-the full-partition, leaving room for the hash blocks.
-
-struct superblock {
-       uint8_t signature[8]
-               "verity\0\0";
-
-       uint8_t version;
-               1 - current format
-
-       uint8_t data_block_bits;
-               log2(data block size)
-
-       uint8_t hash_block_bits;
-               log2(hash block size)
-
-       uint8_t pad1[1];
-               zero padding
-
-       uint16_t salt_size;
-               big-endian salt size
-
-       uint8_t pad2[2];
-               zero padding
-
-       uint32_t data_blocks_hi;
-               big-endian high 32 bits of the 64-bit number of data blocks
-
-       uint32_t data_blocks_lo;
-               big-endian low 32 bits of the 64-bit number of data blocks
-
-       uint8_t algorithm[16];
-               cryptographic algorithm
-
-       uint8_t salt[384];
-               salt (the salt size is specified above)
-
-       uint8_t pad3[88];
-               zero padding to 512-byte boundary
-}
+Alternatively, the header can be omitted and the dmsetup parameters can
+be passed via the kernel command-line in a rooted chain of trust where
+the command-line is verified.
 
 Directly following the header (and with sector number padded to the next hash
 block boundary) are the hash blocks which are stored a depth at a time
 (starting from the root), sorted in order of increasing index.
 
+The full specification of kernel parameters and on-disk metadata format
+is available at the cryptsetup project's wiki page
+  http://code.google.com/p/cryptsetup/wiki/DMVerity
+
 Status
 ======
 V (for Valid) is returned if every check performed so far was valid.
@@ -174,21 +134,22 @@ If any check failed, C (for Corruption) is returned.
 
 Example
 =======
-
-Setup a device:
-  dmsetup create vroot --table \
-    "0 2097152 "\
-    "verity 1 /dev/sda1 /dev/sda2 4096 4096 2097152 1 "\
+Set up a device:
+  # dmsetup create vroot --readonly --table \
+    "0 2097152 verity 1 /dev/sda1 /dev/sda2 4096 4096 262144 1 sha256 "\
     "4392712ba01368efdf14b05c76f9e4df0d53664630b5d48632ed17a137f39076 "\
     "1234000000000000000000000000000000000000000000000000000000000000"
 
 A command line tool veritysetup is available to compute or verify
-the hash tree or activate the kernel driver.  This is available from
-the LVM2 upstream repository and may be supplied as a package called
-device-mapper-verity-tools:
-    git://sources.redhat.com/git/lvm2
-    http://sourceware.org/git/?p=lvm2.git
-    http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/verity?cvsroot=lvm2
-
-veritysetup -a vroot /dev/sda1 /dev/sda2 \
-       4392712ba01368efdf14b05c76f9e4df0d53664630b5d48632ed17a137f39076
+the hash tree or activate the kernel device. This is available from
+the cryptsetup upstream repository http://code.google.com/p/cryptsetup/
+(as a libcryptsetup extension).
+
+Create hash on the device:
+  # veritysetup format /dev/sda1 /dev/sda2
+  ...
+  Root hash: 4392712ba01368efdf14b05c76f9e4df0d53664630b5d48632ed17a137f39076
+
+Activate the device:
+  # veritysetup create vroot /dev/sda1 /dev/sda2 \
+    4392712ba01368efdf14b05c76f9e4df0d53664630b5d48632ed17a137f39076
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mux-pinctrl.txt b/Documentation/devicetree/bindings/i2c/i2c-mux-pinctrl.txt
new file mode 100644 (file)
index 0000000..ae8af16
--- /dev/null
@@ -0,0 +1,93 @@
+Pinctrl-based I2C Bus Mux
+
+This binding describes an I2C bus multiplexer that uses pin multiplexing to
+route the I2C signals, and represents the pin multiplexing configuration
+using the pinctrl device tree bindings.
+
+                                 +-----+  +-----+
+                                 | dev |  | dev |
+    +------------------------+   +-----+  +-----+
+    | SoC                    |      |        |
+    |                   /----|------+--------+
+    |   +---+   +------+     | child bus A, on first set of pins
+    |   |I2C|---|Pinmux|     |
+    |   +---+   +------+     | child bus B, on second set of pins
+    |                   \----|------+--------+--------+
+    |                        |      |        |        |
+    +------------------------+  +-----+  +-----+  +-----+
+                                | dev |  | dev |  | dev |
+                                +-----+  +-----+  +-----+
+
+Required properties:
+- compatible: i2c-mux-pinctrl
+- i2c-parent: The phandle of the I2C bus that this multiplexer's master-side
+  port is connected to.
+
+Also required are:
+
+* Standard pinctrl properties that specify the pin mux state for each child
+  bus. See ../pinctrl/pinctrl-bindings.txt.
+
+* Standard I2C mux properties. See mux.txt in this directory.
+
+* I2C child bus nodes. See mux.txt in this directory.
+
+For each named state defined in the pinctrl-names property, an I2C child bus
+will be created. I2C child bus numbers are assigned based on the index into
+the pinctrl-names property.
+
+The only exception is that no bus will be created for a state named "idle". If
+such a state is defined, it must be the last entry in pinctrl-names. For
+example:
+
+       pinctrl-names = "ddc", "pta", "idle"  ->  ddc = bus 0, pta = bus 1
+       pinctrl-names = "ddc", "idle", "pta"  ->  Invalid ("idle" not last)
+       pinctrl-names = "idle", "ddc", "pta"  ->  Invalid ("idle" not last)
+
+Whenever an access is made to a device on a child bus, the relevant pinctrl
+state will be programmed into hardware.
+
+If an idle state is defined, whenever an access is not being made to a device
+on a child bus, the idle pinctrl state will be programmed into hardware.
+
+If an idle state is not defined, the most recently used pinctrl state will be
+left programmed into hardware whenever no access is being made of a device on
+a child bus.
+
+Example:
+
+       i2cmux {
+               compatible = "i2c-mux-pinctrl";
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               i2c-parent = <&i2c1>;
+
+               pinctrl-names = "ddc", "pta", "idle";
+               pinctrl-0 = <&state_i2cmux_ddc>;
+               pinctrl-1 = <&state_i2cmux_pta>;
+               pinctrl-2 = <&state_i2cmux_idle>;
+
+               i2c@0 {
+                       reg = <0>;
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       eeprom {
+                               compatible = "eeprom";
+                               reg = <0x50>;
+                       };
+               };
+
+               i2c@1 {
+                       reg = <1>;
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       eeprom {
+                               compatible = "eeprom";
+                               reg = <0x50>;
+                       };
+               };
+       };
+
index a00c94ccbdeeb871c440b770de814d11c6f757d7..0b96e5737d3a569ca4acfec137cc6870d1b56880 100644 (file)
@@ -2,6 +2,7 @@
 
 Required properties:
 - compatible : "fsl,mma8450".
+- reg: the I2C address of MMA8450
 
 Example:
 
index 19f6af47a792986c23a9ed033e15e8639ec5c51b..baf07987ae6863c68b39efa5ba1227a327009337 100644 (file)
@@ -46,8 +46,8 @@ Examples:
 
 ecspi@70010000 { /* ECSPI1 */
        fsl,spi-num-chipselects = <2>;
-       cs-gpios = <&gpio3 24 0>, /* GPIO4_24 */
-                  <&gpio3 25 0>; /* GPIO4_25 */
+       cs-gpios = <&gpio4 24 0>, /* GPIO4_24 */
+                  <&gpio4 25 0>; /* GPIO4_25 */
        status = "okay";
 
        pmic: mc13892@0 {
index c7e404b3ef0515b5527583cae877ab48c5d69595..fea541ee8b34a18fbf1aaef9276c17ef992db830 100644 (file)
@@ -29,6 +29,6 @@ esdhc@70008000 {
        compatible = "fsl,imx51-esdhc";
        reg = <0x70008000 0x4000>;
        interrupts = <2>;
-       cd-gpios = <&gpio0 6 0>; /* GPIO1_6 */
-       wp-gpios = <&gpio0 5 0>; /* GPIO1_5 */
+       cd-gpios = <&gpio1 6 0>; /* GPIO1_6 */
+       wp-gpios = <&gpio1 5 0>; /* GPIO1_5 */
 };
index 7ab9e1a2d8bec19fac2283b5703fae60d2858998..4616fc28ee86e83793550a0ceda9fa25e4b46167 100644 (file)
@@ -19,6 +19,6 @@ ethernet@83fec000 {
        reg = <0x83fec000 0x4000>;
        interrupts = <87>;
        phy-mode = "mii";
-       phy-reset-gpios = <&gpio1 14 0>; /* GPIO2_14 */
+       phy-reset-gpios = <&gpio2 14 0>; /* GPIO2_14 */
        local-mac-address = [00 04 9F 01 1B B9];
 };
index 82b43f9158579cdf94b4134ab59d111f45c08a11..a4119f6422d9527e1a5e50d672c70b62aa9d0ad3 100644 (file)
@@ -1626,3 +1626,5 @@ MX6Q_PAD_SD2_DAT3__PCIE_CTRL_MUX_11               1587
 MX6Q_PAD_SD2_DAT3__GPIO_1_12                   1588
 MX6Q_PAD_SD2_DAT3__SJC_DONE                    1589
 MX6Q_PAD_SD2_DAT3__ANATOP_TESTO_3              1590
+MX6Q_PAD_ENET_RX_ER__ANATOP_USBOTG_ID          1591
+MX6Q_PAD_GPIO_1__ANATOP_USBOTG_ID              1592
index 9841057d112bb6e4c46299166ef7c5a9a6da5521..4256a6df9b79355d8c17f5f393c3956d6a5a78ca 100644 (file)
@@ -17,6 +17,6 @@ ecspi@70010000 {
        reg = <0x70010000 0x4000>;
        interrupts = <36>;
        fsl,spi-num-chipselects = <2>;
-       cs-gpios = <&gpio3 24 0>, /* GPIO4_24 */
-                  <&gpio3 25 0>; /* GPIO4_25 */
+       cs-gpios = <&gpio3 24 0>, /* GPIO3_24 */
+                  <&gpio3 25 0>; /* GPIO3_25 */
 };
diff --git a/Documentation/devicetree/bindings/thermal/spear-thermal.txt b/Documentation/devicetree/bindings/thermal/spear-thermal.txt
new file mode 100644 (file)
index 0000000..93e3b67
--- /dev/null
@@ -0,0 +1,14 @@
+* SPEAr Thermal
+
+Required properties:
+- compatible : "st,thermal-spear1340"
+- reg : Address range of the thermal registers
+- st,thermal-flags: flags used to enable thermal sensor
+
+Example:
+
+       thermal@fc000000 {
+               compatible = "st,thermal-spear1340";
+               reg = <0xfc000000 0x1000>;
+               st,thermal-flags = <0x7000>;
+       };
index 6eab91747a86a03c45c60d9bdca74fb28215b3a3..db4d3af3643c407ffad6e4ead2dd81d8c4ab36cf 100644 (file)
@@ -3,6 +3,7 @@ Device tree binding vendor prefix registry.  Keep list in alphabetical order.
 This isn't an exhaustive list, but you should add new prefixes to it before
 using them to avoid name-space collisions.
 
+ad     Avionic Design GmbH
 adi    Analog Devices, Inc.
 amcc   Applied Micro Circuits Corporation (APM, formally AMCC)
 apm    Applied Micro Circuits Corporation (APM)
index 84d46c0c71a37d627a5773e90172987481edc1a2..c86b50c03ea8f02276d2a35deb4a6f5822d78be9 100644 (file)
@@ -6,7 +6,9 @@ Supported chips:
     Prefix: 'coretemp'
     CPUID: family 0x6, models 0xe (Pentium M DC), 0xf (Core 2 DC 65nm),
                               0x16 (Core 2 SC 65nm), 0x17 (Penryn 45nm),
-                              0x1a (Nehalem), 0x1c (Atom), 0x1e (Lynnfield)
+                              0x1a (Nehalem), 0x1c (Atom), 0x1e (Lynnfield),
+                              0x26 (Tunnel Creek Atom), 0x27 (Medfield Atom),
+                              0x36 (Cedar Trail Atom)
     Datasheet: Intel 64 and IA-32 Architectures Software Developer's Manual
                Volume 3A: System Programming Guide
                http://softwarecommunity.intel.com/Wiki/Mobility/720.htm
@@ -52,6 +54,17 @@ Some information comes from ark.intel.com
 
 Process                Processor                                       TjMax(C)
 
+22nm           Core i5/i7 Processors
+               i7 3920XM, 3820QM, 3720QM, 3667U, 3520M         105
+               i5 3427U, 3360M/3320M                           105
+               i7 3770/3770K                                   105
+               i5 3570/3570K, 3550, 3470/3450                  105
+               i7 3770S                                        103
+               i5 3570S/3550S, 3475S/3470S/3450S               103
+               i7 3770T                                        94
+               i5 3570T                                        94
+               i5 3470T                                        91
+
 32nm           Core i3/i5/i7 Processors
                i7 660UM/640/620, 640LM/620, 620M, 610E         105
                i5 540UM/520/430, 540M/520/450/430              105
@@ -65,6 +78,11 @@ Process              Processor                                       TjMax(C)
                U3400                                           105
                P4505/P4500                                     90
 
+32nm           Atom Processors
+               Z2460                                           90
+               D2700/2550/2500                                 100
+               N2850/2800/2650/2600                            100
+
 45nm           Xeon Processors 5400 Quad-Core
                X5492, X5482, X5472, X5470, X5460, X5450        85
                E5472, E5462, E5450/40/30/20/10/05              85
@@ -85,6 +103,8 @@ Process              Processor                                       TjMax(C)
                N475/470/455/450                                100
                N280/270                                        90
                330/230                                         125
+               E680/660/640/620                                90
+               E680T/660T/640T/620T                            110
 
 45nm           Core2 Processors
                Solo ULV SU3500/3300                            100
index 506c7390c2b90e37b0df7f1b8e93dc7ff2b540fd..13f1aa09b938c09c91421ad55dc168ef67cd8347 100644 (file)
@@ -86,7 +86,7 @@ There is also a gitweb interface available at
 http://www.kernel.org/git/?p=utils/kernel/kexec/kexec-tools.git
 
 More information about kexec-tools can be found at
-http://www.kernel.org/pub/linux/utils/kernel/kexec/README.html
+http://horms.net/projects/kexec/
 
 3) Unpack the tarball with the tar command, as follows:
 
index c45513d806abc48d3e11fa756e4f11138fb179ce..a92c5ebf373e2bf4bea68072b58fbc0471ad9c13 100644 (file)
@@ -2543,6 +2543,15 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
 
        sched_debug     [KNL] Enables verbose scheduler debug messages.
 
+       skew_tick=      [KNL] Offset the periodic timer tick per cpu to mitigate
+                       xtime_lock contention on larger systems, and/or RCU lock
+                       contention on all systems with CONFIG_MAXSMP set.
+                       Format: { "0" | "1" }
+                       0 -- disable. (may be 1 via CONFIG_CMDLINE="skew_tick=1"
+                       1 -- enable.
+                       Note: increases power consumption, thus should only be
+                       enabled if running jitter sensitive (HPC/RT) workloads.
+
        security=       [SECURITY] Choose a security module to enable at boot.
                        If this boot parameter is not specified, only the first
                        security module asking for security registration will be
index ab1e8d7004c5238f9d4b30ec0137fd5f3e298226..5cb9a1972460fdcd2909f3a9fd06dd4c265a2921 100644 (file)
@@ -10,8 +10,8 @@ Currently this network device driver is for all STM embedded MAC/GMAC
 (i.e. 7xxx/5xxx SoCs), SPEAr (arm), Loongson1B (mips) and XLINX XC2V3000
 FF1152AMT0221 D1215994A VIRTEX FPGA board.
 
-DWC Ether MAC 10/100/1000 Universal version 3.60a (and older) and DWC Ether MAC 10/100
-Universal version 4.0 have been used for developing this driver.
+DWC Ether MAC 10/100/1000 Universal version 3.60a (and older) and DWC Ether
+MAC 10/100 Universal version 4.0 have been used for developing this driver.
 
 This driver supports both the platform bus and PCI.
 
@@ -54,27 +54,27 @@ net_device structure enabling the scatter/gather feature.
 When one or more packets are received, an interrupt happens. The interrupts
 are not queued so the driver has to scan all the descriptors in the ring during
 the receive process.
-This is based on NAPI so the interrupt handler signals only if there is work to be
-done, and it exits.
+This is based on NAPI so the interrupt handler signals only if there is work
+to be done, and it exits.
 Then the poll method will be scheduled at some future point.
 The incoming packets are stored, by the DMA, in a list of pre-allocated socket
 buffers in order to avoid the memcpy (Zero-copy).
 
 4.3) Timer-Driver Interrupt
-Instead of having the device that asynchronously notifies the frame receptions, the
-driver configures a timer to generate an interrupt at regular intervals.
-Based on the granularity of the timer, the frames that are received by the device
-will experience different levels of latency. Some NICs have dedicated timer
-device to perform this task. STMMAC can use either the RTC device or the TMU
-channel 2  on STLinux platforms.
+Instead of having the device that asynchronously notifies the frame receptions,
+the driver configures a timer to generate an interrupt at regular intervals.
+Based on the granularity of the timer, the frames that are received by the
+device will experience different levels of latency. Some NICs have dedicated
+timer device to perform this task. STMMAC can use either the RTC device or the
+TMU channel 2  on STLinux platforms.
 The timers frequency can be passed to the driver as parameter; when change it,
 take care of both hardware capability and network stability/performance impact.
-Several performance tests on STM platforms showed this optimisation allows to spare
-the CPU while having the maximum throughput.
+Several performance tests on STM platforms showed this optimisation allows to
+spare the CPU while having the maximum throughput.
 
 4.4) WOL
-Wake up on Lan feature through Magic and Unicast frames are supported for the GMAC
-core.
+Wake up on Lan feature through Magic and Unicast frames are supported for the
+GMAC core.
 
 4.5) DMA descriptors
 Driver handles both normal and enhanced descriptors. The latter has been only
@@ -106,7 +106,8 @@ Several driver's information can be passed through the platform
 These are included in the include/linux/stmmac.h header file
 and detailed below as well:
 
- struct plat_stmmacenet_data {
+struct plat_stmmacenet_data {
+       char *phy_bus_name;
        int bus_id;
        int phy_addr;
        int interface;
@@ -124,19 +125,24 @@ and detailed below as well:
        void (*bus_setup)(void __iomem *ioaddr);
        int (*init)(struct platform_device *pdev);
        void (*exit)(struct platform_device *pdev);
+       void *custom_cfg;
+       void *custom_data;
        void *bsp_priv;
  };
 
 Where:
+ o phy_bus_name: phy bus name to attach to the stmmac.
  o bus_id: bus identifier.
  o phy_addr: the physical address can be passed from the platform.
            If it is set to -1 the driver will automatically
            detect it at run-time by probing all the 32 addresses.
  o interface: PHY device's interface.
  o mdio_bus_data: specific platform fields for the MDIO bus.
- o pbl: the Programmable Burst Length is maximum number of beats to
+ o dma_cfg: internal DMA parameters
+   o pbl: the Programmable Burst Length is maximum number of beats to
        be transferred in one DMA transaction.
        GMAC also enables the 4xPBL by default.
+   o fixed_burst/mixed_burst/burst_len
  o clk_csr: fixed CSR Clock range selection.
  o has_gmac: uses the GMAC core.
  o enh_desc: if sets the MAC will use the enhanced descriptor structure.
@@ -160,8 +166,9 @@ Where:
             this is sometime necessary on some platforms (e.g. ST boxes)
             where the HW needs to have set some PIO lines or system cfg
             registers.
- o custom_cfg: this is a custom configuration that can be passed while
-             initialising the resources.
+ o custom_cfg/custom_data: this is a custom configuration that can be passed
+                          while initialising the resources.
+ o bsp_priv: another private poiter.
 
 For MDIO bus The we have:
 
@@ -180,7 +187,6 @@ Where:
  o irqs: list of IRQs, one per PHY.
  o probed_phy_irq: if irqs is NULL, use this for probed PHY.
 
-
 For DMA engine we have the following internal fields that should be
 tuned according to the HW capabilities.
 
diff --git a/Documentation/prctl/no_new_privs.txt b/Documentation/prctl/no_new_privs.txt
new file mode 100644 (file)
index 0000000..f7be84f
--- /dev/null
@@ -0,0 +1,57 @@
+The execve system call can grant a newly-started program privileges that
+its parent did not have.  The most obvious examples are setuid/setgid
+programs and file capabilities.  To prevent the parent program from
+gaining these privileges as well, the kernel and user code must be
+careful to prevent the parent from doing anything that could subvert the
+child.  For example:
+
+ - The dynamic loader handles LD_* environment variables differently if
+   a program is setuid.
+
+ - chroot is disallowed to unprivileged processes, since it would allow
+   /etc/passwd to be replaced from the point of view of a process that
+   inherited chroot.
+
+ - The exec code has special handling for ptrace.
+
+These are all ad-hoc fixes.  The no_new_privs bit (since Linux 3.5) is a
+new, generic mechanism to make it safe for a process to modify its
+execution environment in a manner that persists across execve.  Any task
+can set no_new_privs.  Once the bit is set, it is inherited across fork,
+clone, and execve and cannot be unset.  With no_new_privs set, execve
+promises not to grant the privilege to do anything that could not have
+been done without the execve call.  For example, the setuid and setgid
+bits will no longer change the uid or gid; file capabilities will not
+add to the permitted set, and LSMs will not relax constraints after
+execve.
+
+To set no_new_privs, use prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0).
+
+Be careful, though: LSMs might also not tighten constraints on exec
+in no_new_privs mode.  (This means that setting up a general-purpose
+service launcher to set no_new_privs before execing daemons may
+interfere with LSM-based sandboxing.)
+
+Note that no_new_privs does not prevent privilege changes that do not
+involve execve.  An appropriately privileged task can still call
+setuid(2) and receive SCM_RIGHTS datagrams.
+
+There are two main use cases for no_new_privs so far:
+
+ - Filters installed for the seccomp mode 2 sandbox persist across
+   execve and can change the behavior of newly-executed programs.
+   Unprivileged users are therefore only allowed to install such filters
+   if no_new_privs is set.
+
+ - By itself, no_new_privs can be used to reduce the attack surface
+   available to an unprivileged user.  If everything running with a
+   given uid has no_new_privs set, then that uid will be unable to
+   escalate its privileges by directly attacking setuid, setgid, and
+   fcap-using binaries; it will need to compromise something without the
+   no_new_privs bit set first.
+
+In the future, other potentially dangerous kernel features could become
+available to unprivileged tasks if no_new_privs is set.  In principle,
+several options to unshare(2) and clone(2) would be safe when
+no_new_privs is set, and no_new_privs + chroot is considerable less
+dangerous than chroot by itself.
index f0ab5cf28fcae0a1ff0783bf8a4ddc477447f585..4a7b54bd37e8c18b2eebfcd21608a2488589641b 100644 (file)
@@ -12,6 +12,12 @@ Rules on what kind of patches are accepted, and which ones are not, into the
    marked CONFIG_BROKEN), an oops, a hang, data corruption, a real
    security issue, or some "oh, that's not good" issue.  In short, something
    critical.
+ - Serious issues as reported by a user of a distribution kernel may also
+   be considered if they fix a notable performance or interactivity issue.
+   As these fixes are not as obvious and have a higher risk of a subtle
+   regression they should only be submitted by a distribution kernel
+   maintainer and include an addendum linking to a bugzilla entry if it
+   exists and additional information on the user-visible impact.
  - New device IDs and quirks are also accepted.
  - No "theoretical race condition" issues, unless an explanation of how the
    race can be exploited is also provided.
index 1733ab947a95d3849ff650e7252cfb1d0cb99d11..c087dbcf3535c2be5da5db5f8feeefaeaea9c88d 100644 (file)
@@ -32,7 +32,8 @@ temperature) and throttle appropriate devices.
 
 1.1 thermal zone device interface
 1.1.1 struct thermal_zone_device *thermal_zone_device_register(char *name,
-               int trips, void *devdata, struct thermal_zone_device_ops *ops)
+               int trips, int mask, void *devdata,
+               struct thermal_zone_device_ops *ops)
 
     This interface function adds a new thermal zone device (sensor) to
     /sys/class/thermal folder as thermal_zone[0-*]. It tries to bind all the
@@ -40,16 +41,17 @@ temperature) and throttle appropriate devices.
 
     name: the thermal zone name.
     trips: the total number of trip points this thermal zone supports.
+    mask: Bit string: If 'n'th bit is set, then trip point 'n' is writeable.
     devdata: device private data
     ops: thermal zone device call-backs.
        .bind: bind the thermal zone device with a thermal cooling device.
        .unbind: unbind the thermal zone device with a thermal cooling device.
        .get_temp: get the current temperature of the thermal zone.
-       .get_mode: get the current mode (user/kernel) of the thermal zone.
-           - "kernel" means thermal management is done in kernel.
-           - "user" will prevent kernel thermal driver actions upon trip points
+       .get_mode: get the current mode (enabled/disabled) of the thermal zone.
+           - "enabled" means the kernel thermal management is enabled.
+           - "disabled" will prevent kernel thermal driver action upon trip points
              so that user applications can take charge of thermal management.
-       .set_mode: set the mode (user/kernel) of the thermal zone.
+       .set_mode: set the mode (enabled/disabled) of the thermal zone.
        .get_trip_type: get the type of certain trip point.
        .get_trip_temp: get the temperature above which the certain trip point
                        will be fired.
@@ -119,6 +121,7 @@ Thermal zone device sys I/F, created once it's registered:
     |---mode:                  Working mode of the thermal zone
     |---trip_point_[0-*]_temp: Trip point temperature
     |---trip_point_[0-*]_type: Trip point type
+    |---trip_point_[0-*]_hyst: Hysteresis value for this trip point
 
 Thermal cooling device sys I/F, created once it's registered:
 /sys/class/thermal/cooling_device[0-*]:
@@ -167,14 +170,14 @@ temp
        RO, Required
 
 mode
-       One of the predefined values in [kernel, user].
+       One of the predefined values in [enabled, disabled].
        This file gives information about the algorithm that is currently
        managing the thermal zone. It can be either default kernel based
        algorithm or user space application.
-       kernel  = Thermal management in kernel thermal zone driver.
-       user    = Preventing kernel thermal zone driver actions upon
-                 trip points so that user application can take full
-                 charge of the thermal management.
+       enabled         = enable Kernel Thermal management.
+       disabled        = Preventing kernel thermal zone driver actions upon
+                         trip points so that user application can take full
+                         charge of the thermal management.
        RW, Optional
 
 trip_point_[0-*]_temp
@@ -188,6 +191,11 @@ trip_point_[0-*]_type
        thermal zone.
        RO, Optional
 
+trip_point_[0-*]_hyst
+       The hysteresis value for a trip point, represented as an integer
+       Unit: Celsius
+       RW, Optional
+
 cdev[0-*]
        Sysfs link to the thermal cooling device node where the sys I/F
        for cooling device throttling control represents.
@@ -248,7 +256,7 @@ method, the sys I/F structure will be built like this:
 |thermal_zone1:
     |---type:                  acpitz
     |---temp:                  37000
-    |---mode:                  kernel
+    |---mode:                  enabled
     |---trip_point_0_temp:     100000
     |---trip_point_0_type:     critical
     |---trip_point_1_temp:     80000
index 930126698a0f5b0f6e2c458b53604d581b201063..2c9948379469c1dba5f8c7ef711567067727ff08 100644 (file)
@@ -1930,6 +1930,23 @@ The "pte_enc" field provides a value that can OR'ed into the hash
 PTE's RPN field (ie, it needs to be shifted left by 12 to OR it
 into the hash PTE second double word).
 
+4.75 KVM_IRQFD
+
+Capability: KVM_CAP_IRQFD
+Architectures: x86
+Type: vm ioctl
+Parameters: struct kvm_irqfd (in)
+Returns: 0 on success, -1 on error
+
+Allows setting an eventfd to directly trigger a guest interrupt.
+kvm_irqfd.fd specifies the file descriptor to use as the eventfd and
+kvm_irqfd.gsi specifies the irqchip pin toggled by this event.  When
+an event is tiggered on the eventfd, an interrupt is injected into
+the guest using the specified gsi pin.  The irqfd is removed using
+the KVM_IRQFD_FLAG_DEASSIGN flag, specifying both kvm_irqfd.fd
+and kvm_irqfd.gsi.
+
+
 5. The kvm_run structure
 ------------------------
 
diff --git a/Documentation/vm/frontswap.txt b/Documentation/vm/frontswap.txt
new file mode 100644 (file)
index 0000000..37067cf
--- /dev/null
@@ -0,0 +1,278 @@
+Frontswap provides a "transcendent memory" interface for swap pages.
+In some environments, dramatic performance savings may be obtained because
+swapped pages are saved in RAM (or a RAM-like device) instead of a swap disk.
+
+(Note, frontswap -- and cleancache (merged at 3.0) -- are the "frontends"
+and the only necessary changes to the core kernel for transcendent memory;
+all other supporting code -- the "backends" -- is implemented as drivers.
+See the LWN.net article "Transcendent memory in a nutshell" for a detailed
+overview of frontswap and related kernel parts:
+https://lwn.net/Articles/454795/ )
+
+Frontswap is so named because it can be thought of as the opposite of
+a "backing" store for a swap device.  The storage is assumed to be
+a synchronous concurrency-safe page-oriented "pseudo-RAM device" conforming
+to the requirements of transcendent memory (such as Xen's "tmem", or
+in-kernel compressed memory, aka "zcache", or future RAM-like devices);
+this pseudo-RAM device is not directly accessible or addressable by the
+kernel and is of unknown and possibly time-varying size.  The driver
+links itself to frontswap by calling frontswap_register_ops to set the
+frontswap_ops funcs appropriately and the functions it provides must
+conform to certain policies as follows:
+
+An "init" prepares the device to receive frontswap pages associated
+with the specified swap device number (aka "type").  A "store" will
+copy the page to transcendent memory and associate it with the type and
+offset associated with the page. A "load" will copy the page, if found,
+from transcendent memory into kernel memory, but will NOT remove the page
+from from transcendent memory.  An "invalidate_page" will remove the page
+from transcendent memory and an "invalidate_area" will remove ALL pages
+associated with the swap type (e.g., like swapoff) and notify the "device"
+to refuse further stores with that swap type.
+
+Once a page is successfully stored, a matching load on the page will normally
+succeed.  So when the kernel finds itself in a situation where it needs
+to swap out a page, it first attempts to use frontswap.  If the store returns
+success, the data has been successfully saved to transcendent memory and
+a disk write and, if the data is later read back, a disk read are avoided.
+If a store returns failure, transcendent memory has rejected the data, and the
+page can be written to swap as usual.
+
+If a backend chooses, frontswap can be configured as a "writethrough
+cache" by calling frontswap_writethrough().  In this mode, the reduction
+in swap device writes is lost (and also a non-trivial performance advantage)
+in order to allow the backend to arbitrarily "reclaim" space used to
+store frontswap pages to more completely manage its memory usage.
+
+Note that if a page is stored and the page already exists in transcendent memory
+(a "duplicate" store), either the store succeeds and the data is overwritten,
+or the store fails AND the page is invalidated.  This ensures stale data may
+never be obtained from frontswap.
+
+If properly configured, monitoring of frontswap is done via debugfs in
+the /sys/kernel/debug/frontswap directory.  The effectiveness of
+frontswap can be measured (across all swap devices) with:
+
+failed_stores  - how many store attempts have failed
+loads          - how many loads were attempted (all should succeed)
+succ_stores    - how many store attempts have succeeded
+invalidates    - how many invalidates were attempted
+
+A backend implementation may provide additional metrics.
+
+FAQ
+
+1) Where's the value?
+
+When a workload starts swapping, performance falls through the floor.
+Frontswap significantly increases performance in many such workloads by
+providing a clean, dynamic interface to read and write swap pages to
+"transcendent memory" that is otherwise not directly addressable to the kernel.
+This interface is ideal when data is transformed to a different form
+and size (such as with compression) or secretly moved (as might be
+useful for write-balancing for some RAM-like devices).  Swap pages (and
+evicted page-cache pages) are a great use for this kind of slower-than-RAM-
+but-much-faster-than-disk "pseudo-RAM device" and the frontswap (and
+cleancache) interface to transcendent memory provides a nice way to read
+and write -- and indirectly "name" -- the pages.
+
+Frontswap -- and cleancache -- with a fairly small impact on the kernel,
+provides a huge amount of flexibility for more dynamic, flexible RAM
+utilization in various system configurations:
+
+In the single kernel case, aka "zcache", pages are compressed and
+stored in local memory, thus increasing the total anonymous pages
+that can be safely kept in RAM.  Zcache essentially trades off CPU
+cycles used in compression/decompression for better memory utilization.
+Benchmarks have shown little or no impact when memory pressure is
+low while providing a significant performance improvement (25%+)
+on some workloads under high memory pressure.
+
+"RAMster" builds on zcache by adding "peer-to-peer" transcendent memory
+support for clustered systems.  Frontswap pages are locally compressed
+as in zcache, but then "remotified" to another system's RAM.  This
+allows RAM to be dynamically load-balanced back-and-forth as needed,
+i.e. when system A is overcommitted, it can swap to system B, and
+vice versa.  RAMster can also be configured as a memory server so
+many servers in a cluster can swap, dynamically as needed, to a single
+server configured with a large amount of RAM... without pre-configuring
+how much of the RAM is available for each of the clients!
+
+In the virtual case, the whole point of virtualization is to statistically
+multiplex physical resources acrosst the varying demands of multiple
+virtual machines.  This is really hard to do with RAM and efforts to do
+it well with no kernel changes have essentially failed (except in some
+well-publicized special-case workloads).
+Specifically, the Xen Transcendent Memory backend allows otherwise
+"fallow" hypervisor-owned RAM to not only be "time-shared" between multiple
+virtual machines, but the pages can be compressed and deduplicated to
+optimize RAM utilization.  And when guest OS's are induced to surrender
+underutilized RAM (e.g. with "selfballooning"), sudden unexpected
+memory pressure may result in swapping; frontswap allows those pages
+to be swapped to and from hypervisor RAM (if overall host system memory
+conditions allow), thus mitigating the potentially awful performance impact
+of unplanned swapping.
+
+A KVM implementation is underway and has been RFC'ed to lkml.  And,
+using frontswap, investigation is also underway on the use of NVM as
+a memory extension technology.
+
+2) Sure there may be performance advantages in some situations, but
+   what's the space/time overhead of frontswap?
+
+If CONFIG_FRONTSWAP is disabled, every frontswap hook compiles into
+nothingness and the only overhead is a few extra bytes per swapon'ed
+swap device.  If CONFIG_FRONTSWAP is enabled but no frontswap "backend"
+registers, there is one extra global variable compared to zero for
+every swap page read or written.  If CONFIG_FRONTSWAP is enabled
+AND a frontswap backend registers AND the backend fails every "store"
+request (i.e. provides no memory despite claiming it might),
+CPU overhead is still negligible -- and since every frontswap fail
+precedes a swap page write-to-disk, the system is highly likely
+to be I/O bound and using a small fraction of a percent of a CPU
+will be irrelevant anyway.
+
+As for space, if CONFIG_FRONTSWAP is enabled AND a frontswap backend
+registers, one bit is allocated for every swap page for every swap
+device that is swapon'd.  This is added to the EIGHT bits (which
+was sixteen until about 2.6.34) that the kernel already allocates
+for every swap page for every swap device that is swapon'd.  (Hugh
+Dickins has observed that frontswap could probably steal one of
+the existing eight bits, but let's worry about that minor optimization
+later.)  For very large swap disks (which are rare) on a standard
+4K pagesize, this is 1MB per 32GB swap.
+
+When swap pages are stored in transcendent memory instead of written
+out to disk, there is a side effect that this may create more memory
+pressure that can potentially outweigh the other advantages.  A
+backend, such as zcache, must implement policies to carefully (but
+dynamically) manage memory limits to ensure this doesn't happen.
+
+3) OK, how about a quick overview of what this frontswap patch does
+   in terms that a kernel hacker can grok?
+
+Let's assume that a frontswap "backend" has registered during
+kernel initialization; this registration indicates that this
+frontswap backend has access to some "memory" that is not directly
+accessible by the kernel.  Exactly how much memory it provides is
+entirely dynamic and random.
+
+Whenever a swap-device is swapon'd frontswap_init() is called,
+passing the swap device number (aka "type") as a parameter.
+This notifies frontswap to expect attempts to "store" swap pages
+associated with that number.
+
+Whenever the swap subsystem is readying a page to write to a swap
+device (c.f swap_writepage()), frontswap_store is called.  Frontswap
+consults with the frontswap backend and if the backend says it does NOT
+have room, frontswap_store returns -1 and the kernel swaps the page
+to the swap device as normal.  Note that the response from the frontswap
+backend is unpredictable to the kernel; it may choose to never accept a
+page, it could accept every ninth page, or it might accept every
+page.  But if the backend does accept a page, the data from the page
+has already been copied and associated with the type and offset,
+and the backend guarantees the persistence of the data.  In this case,
+frontswap sets a bit in the "frontswap_map" for the swap device
+corresponding to the page offset on the swap device to which it would
+otherwise have written the data.
+
+When the swap subsystem needs to swap-in a page (swap_readpage()),
+it first calls frontswap_load() which checks the frontswap_map to
+see if the page was earlier accepted by the frontswap backend.  If
+it was, the page of data is filled from the frontswap backend and
+the swap-in is complete.  If not, the normal swap-in code is
+executed to obtain the page of data from the real swap device.
+
+So every time the frontswap backend accepts a page, a swap device read
+and (potentially) a swap device write are replaced by a "frontswap backend
+store" and (possibly) a "frontswap backend loads", which are presumably much
+faster.
+
+4) Can't frontswap be configured as a "special" swap device that is
+   just higher priority than any real swap device (e.g. like zswap,
+   or maybe swap-over-nbd/NFS)?
+
+No.  First, the existing swap subsystem doesn't allow for any kind of
+swap hierarchy.  Perhaps it could be rewritten to accomodate a hierarchy,
+but this would require fairly drastic changes.  Even if it were
+rewritten, the existing swap subsystem uses the block I/O layer which
+assumes a swap device is fixed size and any page in it is linearly
+addressable.  Frontswap barely touches the existing swap subsystem,
+and works around the constraints of the block I/O subsystem to provide
+a great deal of flexibility and dynamicity.
+
+For example, the acceptance of any swap page by the frontswap backend is
+entirely unpredictable. This is critical to the definition of frontswap
+backends because it grants completely dynamic discretion to the
+backend.  In zcache, one cannot know a priori how compressible a page is.
+"Poorly" compressible pages can be rejected, and "poorly" can itself be
+defined dynamically depending on current memory constraints.
+
+Further, frontswap is entirely synchronous whereas a real swap
+device is, by definition, asynchronous and uses block I/O.  The
+block I/O layer is not only unnecessary, but may perform "optimizations"
+that are inappropriate for a RAM-oriented device including delaying
+the write of some pages for a significant amount of time.  Synchrony is
+required to ensure the dynamicity of the backend and to avoid thorny race
+conditions that would unnecessarily and greatly complicate frontswap
+and/or the block I/O subsystem.  That said, only the initial "store"
+and "load" operations need be synchronous.  A separate asynchronous thread
+is free to manipulate the pages stored by frontswap.  For example,
+the "remotification" thread in RAMster uses standard asynchronous
+kernel sockets to move compressed frontswap pages to a remote machine.
+Similarly, a KVM guest-side implementation could do in-guest compression
+and use "batched" hypercalls.
+
+In a virtualized environment, the dynamicity allows the hypervisor
+(or host OS) to do "intelligent overcommit".  For example, it can
+choose to accept pages only until host-swapping might be imminent,
+then force guests to do their own swapping.
+
+There is a downside to the transcendent memory specifications for
+frontswap:  Since any "store" might fail, there must always be a real
+slot on a real swap device to swap the page.  Thus frontswap must be
+implemented as a "shadow" to every swapon'd device with the potential
+capability of holding every page that the swap device might have held
+and the possibility that it might hold no pages at all.  This means
+that frontswap cannot contain more pages than the total of swapon'd
+swap devices.  For example, if NO swap device is configured on some
+installation, frontswap is useless.  Swapless portable devices
+can still use frontswap but a backend for such devices must configure
+some kind of "ghost" swap device and ensure that it is never used.
+
+5) Why this weird definition about "duplicate stores"?  If a page
+   has been previously successfully stored, can't it always be
+   successfully overwritten?
+
+Nearly always it can, but no, sometimes it cannot.  Consider an example
+where data is compressed and the original 4K page has been compressed
+to 1K.  Now an attempt is made to overwrite the page with data that
+is non-compressible and so would take the entire 4K.  But the backend
+has no more space.  In this case, the store must be rejected.  Whenever
+frontswap rejects a store that would overwrite, it also must invalidate
+the old data and ensure that it is no longer accessible.  Since the
+swap subsystem then writes the new data to the read swap device,
+this is the correct course of action to ensure coherency.
+
+6) What is frontswap_shrink for?
+
+When the (non-frontswap) swap subsystem swaps out a page to a real
+swap device, that page is only taking up low-value pre-allocated disk
+space.  But if frontswap has placed a page in transcendent memory, that
+page may be taking up valuable real estate.  The frontswap_shrink
+routine allows code outside of the swap subsystem to force pages out
+of the memory managed by frontswap and back into kernel-addressable memory.
+For example, in RAMster, a "suction driver" thread will attempt
+to "repatriate" pages sent to a remote machine back to the local machine;
+this is driven using the frontswap_shrink mechanism when memory pressure
+subsides.
+
+7) Why does the frontswap patch create the new include file swapfile.h?
+
+The frontswap code depends on some swap-subsystem-internal data
+structures that have, over the years, moved back and forth between
+static and global.  This seemed a reasonable compromise:  Define
+them as global but declare them in a new include file that isn't
+included by the large number of source files that include swap.h.
+
+Dan Magenheimer, last updated April 9, 2012
index 55f0fda602ecc69d5242ca5181c002ab2a8dd983..41c06470a59c40772855ef6e6fe09cd95f7d010c 100644 (file)
@@ -242,13 +242,6 @@ W: http://www.lesswatts.org/projects/acpi/
 S:     Supported
 F:     drivers/acpi/fan.c
 
-ACPI PROCESSOR AGGREGATOR DRIVER
-M:     Shaohua Li <shaohua.li@intel.com>
-L:     linux-acpi@vger.kernel.org
-W:     http://www.lesswatts.org/projects/acpi/
-S:     Supported
-F:     drivers/acpi/acpi_pad.c
-
 ACPI THERMAL DRIVER
 M:     Zhang Rui <rui.zhang@intel.com>
 L:     linux-acpi@vger.kernel.org
@@ -579,7 +572,7 @@ F:  drivers/net/appletalk/
 F:     net/appletalk/
 
 ARASAN COMPACT FLASH PATA CONTROLLER
-M:     Viresh Kumar <viresh.kumar@st.com>
+M:     Viresh Kumar <viresh.linux@gmail.com>
 L:     linux-ide@vger.kernel.org
 S:     Maintained
 F:     include/linux/pata_arasan_cf_data.h
@@ -1077,7 +1070,7 @@ F:        drivers/media/video/s5p-fimc/
 ARM/SAMSUNG S5P SERIES Multi Format Codec (MFC) SUPPORT
 M:     Kyungmin Park <kyungmin.park@samsung.com>
 M:     Kamil Debski <k.debski@samsung.com>
-M:     Jeongtae Park <jtp.park@samsung.com>
+M:     Jeongtae Park <jtp.park@samsung.com>
 L:     linux-arm-kernel@lists.infradead.org
 L:     linux-media@vger.kernel.org
 S:     Maintained
@@ -1646,11 +1639,11 @@ S:      Maintained
 F:     drivers/gpio/gpio-bt8xx.c
 
 BTRFS FILE SYSTEM
-M:     Chris Mason <chris.mason@oracle.com>
+M:     Chris Mason <chris.mason@fusionio.com>
 L:     linux-btrfs@vger.kernel.org
 W:     http://btrfs.wiki.kernel.org/
 Q:     http://patchwork.kernel.org/project/linux-btrfs/list/
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs.git
 S:     Maintained
 F:     Documentation/filesystems/btrfs.txt
 F:     fs/btrfs/
@@ -1743,10 +1736,10 @@ F:      include/linux/can/platform/
 CAPABILITIES
 M:     Serge Hallyn <serge.hallyn@canonical.com>
 L:     linux-security-module@vger.kernel.org
-S:     Supported       
+S:     Supported
 F:     include/linux/capability.h
 F:     security/capability.c
-F:     security/commoncap.c 
+F:     security/commoncap.c
 F:     kernel/capability.c
 
 CELL BROADBAND ENGINE ARCHITECTURE
@@ -1800,6 +1793,9 @@ F:        include/linux/cfag12864b.h
 CFG80211 and NL80211
 M:     Johannes Berg <johannes@sipsolutions.net>
 L:     linux-wireless@vger.kernel.org
+W:     http://wireless.kernel.org/
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next.git
 S:     Maintained
 F:     include/linux/nl80211.h
 F:     include/net/cfg80211.h
@@ -2146,11 +2142,11 @@ S:      Orphan
 F:     drivers/net/wan/pc300*
 
 CYTTSP TOUCHSCREEN DRIVER
-M:      Javier Martinez Canillas <javier@dowhile0.org>
-L:      linux-input@vger.kernel.org
-S:      Maintained
-F:      drivers/input/touchscreen/cyttsp*
-F:      include/linux/input/cyttsp.h
+M:     Javier Martinez Canillas <javier@dowhile0.org>
+L:     linux-input@vger.kernel.org
+S:     Maintained
+F:     drivers/input/touchscreen/cyttsp*
+F:     include/linux/input/cyttsp.h
 
 DAMA SLAVE for AX.25
 M:     Joerg Reuter <jreuter@yaina.de>
@@ -2270,7 +2266,7 @@ F:        include/linux/device-mapper.h
 F:     include/linux/dm-*.h
 
 DIOLAN U2C-12 I2C DRIVER
-M:     Guenter Roeck <guenter.roeck@ericsson.com>
+M:     Guenter Roeck <linux@roeck-us.net>
 L:     linux-i2c@vger.kernel.org
 S:     Maintained
 F:     drivers/i2c/busses/i2c-diolan-u2c.c
@@ -2930,6 +2926,13 @@ F:       Documentation/power/freezing-of-tasks.txt
 F:     include/linux/freezer.h
 F:     kernel/freezer.c
 
+FRONTSWAP API
+M:     Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+L:     linux-kernel@vger.kernel.org
+S:     Maintained
+F:     mm/frontswap.c
+F:     include/linux/frontswap.h
+
 FS-CACHE: LOCAL CACHING FOR NETWORK FILESYSTEMS
 M:     David Howells <dhowells@redhat.com>
 L:     linux-cachefs@redhat.com
@@ -3138,7 +3141,7 @@ F:        drivers/tty/hvc/
 
 HARDWARE MONITORING
 M:     Jean Delvare <khali@linux-fr.org>
-M:     Guenter Roeck <guenter.roeck@ericsson.com>
+M:     Guenter Roeck <linux@roeck-us.net>
 L:     lm-sensors@lm-sensors.org
 W:     http://www.lm-sensors.org/
 T:     quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-hwmon/
@@ -3423,13 +3426,14 @@ S:      Supported
 F:     drivers/idle/i7300_idle.c
 
 IEEE 802.15.4 SUBSYSTEM
+M:     Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
 M:     Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
-M:     Sergey Lapin <slapin@ossfans.org>
 L:     linux-zigbee-devel@lists.sourceforge.net (moderated for non-subscribers)
 W:     http://apps.sourceforge.net/trac/linux-zigbee
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/lowpan/lowpan.git
 S:     Maintained
 F:     net/ieee802154/
+F:     net/mac802154/
 F:     drivers/ieee802154/
 
 IIO SUBSYSTEM AND DRIVERS
@@ -4096,6 +4100,8 @@ F:        drivers/scsi/53c700*
 LED SUBSYSTEM
 M:     Bryan Wu <bryan.wu@canonical.com>
 M:     Richard Purdie <rpurdie@rpsys.net>
+L:     linux-leds@vger.kernel.org
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/cooloney/linux-leds.git
 S:     Maintained
 F:     drivers/leds/
 F:     include/linux/leds.h
@@ -4340,7 +4346,8 @@ MAC80211
 M:     Johannes Berg <johannes@sipsolutions.net>
 L:     linux-wireless@vger.kernel.org
 W:     http://linuxwireless.org/
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next.git
 S:     Maintained
 F:     Documentation/networking/mac80211-injection.txt
 F:     include/net/mac80211.h
@@ -4351,7 +4358,8 @@ M:        Stefano Brivio <stefano.brivio@polimi.it>
 M:     Mattias Nissler <mattias.nissler@gmx.de>
 L:     linux-wireless@vger.kernel.org
 W:     http://linuxwireless.org/en/developers/Documentation/mac80211/RateControl/PID
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next.git
 S:     Maintained
 F:     net/mac80211/rc80211_pid*
 
@@ -4411,6 +4419,13 @@ S:       Orphan
 F:     drivers/video/matrox/matroxfb_*
 F:     include/linux/matroxfb.h
 
+MAX16065 HARDWARE MONITOR DRIVER
+M:     Guenter Roeck <linux@roeck-us.net>
+L:     lm-sensors@lm-sensors.org
+S:     Maintained
+F:     Documentation/hwmon/max16065
+F:     drivers/hwmon/max16065.c
+
 MAX6650 HARDWARE MONITOR AND FAN CONTROLLER DRIVER
 M:     "Hans J. Koch" <hjk@hansjkoch.de>
 L:     lm-sensors@lm-sensors.org
@@ -4633,8 +4648,8 @@ L:        netfilter@vger.kernel.org
 L:     coreteam@netfilter.org
 W:     http://www.netfilter.org/
 W:     http://www.iptables.org/
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf-2.6.git
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf-next-2.6.git
+T:     git git://1984.lsi.us.es/nf
+T:     git git://1984.lsi.us.es/nf-next
 S:     Supported
 F:     include/linux/netfilter*
 F:     include/linux/netfilter/
@@ -4836,6 +4851,7 @@ M:        Kevin Hilman <khilman@ti.com>
 L:     linux-omap@vger.kernel.org
 S:     Maintained
 F:     arch/arm/*omap*/*pm*
+F:     drivers/cpufreq/omap-cpufreq.c
 
 OMAP POWERDOMAIN/CLOCKDOMAIN SOC ADAPTATION LAYER SUPPORT
 M:     Rajendra Nayak <rnayak@ti.com>
@@ -5149,7 +5165,7 @@ F:        drivers/leds/leds-pca9532.c
 F:     include/linux/leds-pca9532.h
 
 PCA9541 I2C BUS MASTER SELECTOR DRIVER
-M:     Guenter Roeck <guenter.roeck@ericsson.com>
+M:     Guenter Roeck <linux@roeck-us.net>
 L:     linux-i2c@vger.kernel.org
 S:     Maintained
 F:     drivers/i2c/muxes/i2c-mux-pca9541.c
@@ -5169,7 +5185,7 @@ S:        Maintained
 F:     drivers/firmware/pcdp.*
 
 PCI ERROR RECOVERY
-M:     Linas Vepstas <linasvepstas@gmail.com>
+M:     Linas Vepstas <linasvepstas@gmail.com>
 L:     linux-pci@vger.kernel.org
 S:     Supported
 F:     Documentation/PCI/pci-error-recovery.txt
@@ -5275,7 +5291,7 @@ S:        Maintained
 F:     drivers/pinctrl/
 
 PIN CONTROLLER - ST SPEAR
-M:     Viresh Kumar <viresh.kumar@st.com>
+M:     Viresh Kumar <viresh.linux@gmail.com>
 L:     spear-devel@list.st.com
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 W:     http://www.st.com/spear
@@ -5299,7 +5315,7 @@ F:        drivers/video/fb-puv3.c
 F:     drivers/rtc/rtc-puv3.c
 
 PMBUS HARDWARE MONITORING DRIVERS
-M:     Guenter Roeck <guenter.roeck@ericsson.com>
+M:     Guenter Roeck <linux@roeck-us.net>
 L:     lm-sensors@lm-sensors.org
 W:     http://www.lm-sensors.org/
 W:     http://www.roeck-us.net/linux/drivers/
@@ -5542,7 +5558,7 @@ F:        Documentation/networking/LICENSE.qla3xxx
 F:     drivers/net/ethernet/qlogic/qla3xxx.*
 
 QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER
-M:     Anirban Chakraborty <anirban.chakraborty@qlogic.com>
+M:     Jitendra Kalsaria <jitendra.kalsaria@qlogic.com>
 M:     Sony Chacko <sony.chacko@qlogic.com>
 M:     linux-driver@qlogic.com
 L:     netdev@vger.kernel.org
@@ -5550,7 +5566,6 @@ S:        Supported
 F:     drivers/net/ethernet/qlogic/qlcnic/
 
 QLOGIC QLGE 10Gb ETHERNET DRIVER
-M:     Anirban Chakraborty <anirban.chakraborty@qlogic.com>
 M:     Jitendra Kalsaria <jitendra.kalsaria@qlogic.com>
 M:     Ron Mercer <ron.mercer@qlogic.com>
 M:     linux-driver@qlogic.com
@@ -5695,6 +5710,9 @@ F:        include/linux/remoteproc.h
 RFKILL
 M:     Johannes Berg <johannes@sipsolutions.net>
 L:     linux-wireless@vger.kernel.org
+W:     http://wireless.kernel.org/
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next.git
 S:     Maintained
 F:     Documentation/rfkill.txt
 F:     net/rfkill/
@@ -5849,7 +5867,7 @@ S:        Maintained
 F:     drivers/tty/serial
 
 SYNOPSYS DESIGNWARE DMAC DRIVER
-M:     Viresh Kumar <viresh.kumar@st.com>
+M:     Viresh Kumar <viresh.linux@gmail.com>
 S:     Maintained
 F:     include/linux/dw_dmac.h
 F:     drivers/dma/dw_dmac_regs.h
@@ -5885,7 +5903,7 @@ M:        Ingo Molnar <mingo@redhat.com>
 M:     Peter Zijlstra <peterz@infradead.org>
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git sched/core
 S:     Maintained
-F:     kernel/sched*
+F:     kernel/sched/
 F:     include/linux/sched.h
 
 SCORE ARCHITECTURE
@@ -5997,7 +6015,7 @@ S:        Maintained
 F:     drivers/mmc/host/sdhci-s3c.c
 
 SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) ST SPEAR DRIVER
-M:     Viresh Kumar <viresh.kumar@st.com>
+M:     Viresh Kumar <viresh.linux@gmail.com>
 L:     spear-devel@list.st.com
 L:     linux-mmc@vger.kernel.org
 S:     Maintained
@@ -6353,7 +6371,7 @@ S:        Maintained
 F:     include/linux/compiler.h
 
 SPEAR PLATFORM SUPPORT
-M:     Viresh Kumar <viresh.kumar@st.com>
+M:     Viresh Kumar <viresh.linux@gmail.com>
 M:     Shiraz Hashim <shiraz.hashim@st.com>
 L:     spear-devel@list.st.com
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -6362,7 +6380,7 @@ S:        Maintained
 F:     arch/arm/plat-spear/
 
 SPEAR13XX MACHINE SUPPORT
-M:     Viresh Kumar <viresh.kumar@st.com>
+M:     Viresh Kumar <viresh.linux@gmail.com>
 M:     Shiraz Hashim <shiraz.hashim@st.com>
 L:     spear-devel@list.st.com
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -6371,7 +6389,7 @@ S:        Maintained
 F:     arch/arm/mach-spear13xx/
 
 SPEAR3XX MACHINE SUPPORT
-M:     Viresh Kumar <viresh.kumar@st.com>
+M:     Viresh Kumar <viresh.linux@gmail.com>
 M:     Shiraz Hashim <shiraz.hashim@st.com>
 L:     spear-devel@list.st.com
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -6382,7 +6400,7 @@ F:        arch/arm/mach-spear3xx/
 SPEAR6XX MACHINE SUPPORT
 M:     Rajeev Kumar <rajeev-dlh.kumar@st.com>
 M:     Shiraz Hashim <shiraz.hashim@st.com>
-M:     Viresh Kumar <viresh.kumar@st.com>
+M:     Viresh Kumar <viresh.linux@gmail.com>
 L:     spear-devel@list.st.com
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 W:     http://www.st.com/spear
@@ -6390,7 +6408,7 @@ S:        Maintained
 F:     arch/arm/mach-spear6xx/
 
 SPEAR CLOCK FRAMEWORK SUPPORT
-M:     Viresh Kumar <viresh.kumar@st.com>
+M:     Viresh Kumar <viresh.linux@gmail.com>
 L:     spear-devel@list.st.com
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 W:     http://www.st.com/spear
@@ -7291,11 +7309,11 @@ F:      Documentation/DocBook/uio-howto.tmpl
 F:     drivers/uio/
 F:     include/linux/uio*.h
 
-UTIL-LINUX-NG PACKAGE
+UTIL-LINUX PACKAGE
 M:     Karel Zak <kzak@redhat.com>
-L:     util-linux-ng@vger.kernel.org
-W:     http://kernel.org/~kzak/util-linux-ng/
-T:     git git://git.kernel.org/pub/scm/utils/util-linux-ng/util-linux-ng.git
+L:     util-linux@vger.kernel.org
+W:     http://en.wikipedia.org/wiki/Util-linux
+T:     git git://git.kernel.org/pub/scm/utils/util-linux/util-linux.git
 S:     Maintained
 
 UVESAFB DRIVER
@@ -7397,7 +7415,7 @@ F:        include/linux/vlynq.h
 
 VME SUBSYSTEM
 M:     Martyn Welch <martyn.welch@ge.com>
-M:     Manohar Vanga <manohar.vanga@cern.ch>
+M:     Manohar Vanga <manohar.vanga@gmail.com>
 M:     Greg Kroah-Hartman <gregkh@linuxfoundation.org>
 L:     devel@driverdev.osuosl.org
 S:     Maintained
index 0d718ede9ea53949227b5beb56dcef17cb756a98..4bb09e1b1230d33d9328c2a67ba32aff22ebde7d 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 5
 SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION =
 NAME = Saber-toothed Squirrel
 
 # *DOCUMENTATION*
@@ -561,6 +561,8 @@ else
 KBUILD_CFLAGS  += -O2
 endif
 
+include $(srctree)/arch/$(SRCARCH)/Makefile
+
 ifdef CONFIG_READABLE_ASM
 # Disable optimizations that make assembler listings hard to read.
 # reorder blocks reorders the control in the function
@@ -571,8 +573,6 @@ KBUILD_CFLAGS += $(call cc-option,-fno-reorder-blocks,) \
                  $(call cc-option,-fno-partial-inlining)
 endif
 
-include $(srctree)/arch/$(SRCARCH)/Makefile
-
 ifneq ($(CONFIG_FRAME_WARN),0)
 KBUILD_CFLAGS += $(call cc-option,-Wframe-larger-than=${CONFIG_FRAME_WARN})
 endif
index b649c5904a4ff3c993732067411e7723fc4d4d19..a91009c6187062253579d0324292ade00ea2241c 100644 (file)
@@ -7,7 +7,6 @@ config ARM
        select HAVE_IDE if PCI || ISA || PCMCIA
        select HAVE_DMA_ATTRS
        select HAVE_DMA_CONTIGUOUS if (CPU_V6 || CPU_V6K || CPU_V7)
-       select CMA if (CPU_V6 || CPU_V6K || CPU_V7)
        select HAVE_MEMBLOCK
        select RTC_LIB
        select SYS_SUPPORTS_APM_EMULATION
@@ -294,6 +293,7 @@ config ARCH_VERSATILE
        select ICST
        select GENERIC_CLOCKEVENTS
        select ARCH_WANT_OPTIONAL_GPIOLIB
+       select NEED_MACH_IO_H if PCI
        select PLAT_VERSATILE
        select PLAT_VERSATILE_CLCD
        select PLAT_VERSATILE_FPGA_IRQ
@@ -589,6 +589,7 @@ config ARCH_ORION5X
        select PCI
        select ARCH_REQUIRE_GPIOLIB
        select GENERIC_CLOCKEVENTS
+       select NEED_MACH_IO_H
        select PLAT_ORION
        help
          Support for the following Marvell Orion 5x series SoCs:
index 153a4b2d12b58093d8be229587319523bbcf255e..c9b4f27d191e1c61c7099da10910b69d24c34b8d 100644 (file)
@@ -11,7 +11,7 @@
 /include/ "mmp2.dtsi"
 
 / {
-       model = "Marvell MMP2 Aspenite Development Board";
+       model = "Marvell MMP2 Brownstone Development Board";
        compatible = "mrvl,mmp2-brownstone", "mrvl,mmp2";
 
        chosen {
@@ -19,7 +19,7 @@
        };
 
        memory {
-               reg = <0x00000000 0x04000000>;
+               reg = <0x00000000 0x08000000>;
        };
 
        soc {
index f2ab4ea7cc0ee19c5863a1c57fd0ac9c6142a9cc..581cb081cb0f04586f98e8b10bdf1e095a3b65a0 100644 (file)
@@ -44,6 +44,8 @@
                        compatible = "ti,omap2-intc";
                        interrupt-controller;
                        #interrupt-cells = <1>;
+                       ti,intc-size = <96>;
+                       reg = <0x480FE000 0x1000>;
                };
 
                uart1: serial@4806a000 {
index 8314e4171884bf225d56cb83a9313ab0e9b76340..dd4358bc26e228bd78a1eb35bea71ed1b177a0cf 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * DTS file for SPEAr1310 Evaluation Baord
  *
- * Copyright 2012 Viresh Kumar <viresh.kumar@st.com>
+ * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
  *
  * The code contained herein is licensed under the GNU General Public
  * License. You may obtain a copy of the GNU General Public License
index 9e61da404d5774f937f6ecebf37e1f80323d5147..419ea7413d232b510bdd532064029520ec4336a5 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * DTS file for all SPEAr1310 SoCs
  *
- * Copyright 2012 Viresh Kumar <viresh.kumar@st.com>
+ * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
  *
  * The code contained herein is licensed under the GNU General Public
  * License. You may obtain a copy of the GNU General Public License
index 0d8472e5ab9f618b88476f815d241a7ebd51697f..c9a54e06fb6849314d3c4bc25031f7153608d775 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * DTS file for SPEAr1340 Evaluation Baord
  *
- * Copyright 2012 Viresh Kumar <viresh.kumar@st.com>
+ * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
  *
  * The code contained herein is licensed under the GNU General Public
  * License. You may obtain a copy of the GNU General Public License
index a26fc47a55e85485ba1b59b2fc9bc3b70d0a2690..d71fe2a68f098460249e076d54326d7f58fbd665 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * DTS file for all SPEAr1340 SoCs
  *
- * Copyright 2012 Viresh Kumar <viresh.kumar@st.com>
+ * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
  *
  * The code contained herein is licensed under the GNU General Public
  * License. You may obtain a copy of the GNU General Public License
index 1f8e1e1481dfb96733f618f915240b08f7097410..f7b84aced654ca643bc49ebe83b376425a6b893f 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * DTS file for all SPEAr13xx SoCs
  *
- * Copyright 2012 Viresh Kumar <viresh.kumar@st.com>
+ * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
  *
  * The code contained herein is licensed under the GNU General Public
  * License. You may obtain a copy of the GNU General Public License
@@ -43,8 +43,8 @@
 
        pmu {
                compatible = "arm,cortex-a9-pmu";
-               interrupts = <0 8 0x04
-                             0 9 0x04>;
+               interrupts = <0 6 0x04
+                             0 7 0x04>;
        };
 
        L2: l2-cache {
                gmac0: eth@e2000000 {
                        compatible = "st,spear600-gmac";
                        reg = <0xe2000000 0x8000>;
-                       interrupts = <0 23 0x4
-                                     0 24 0x4>;
+                       interrupts = <0 33 0x4
+                                     0 34 0x4>;
                        interrupt-names = "macirq", "eth_wake_irq";
                        status = "disabled";
                };
                        kbd@e0300000 {
                                compatible = "st,spear300-kbd";
                                reg = <0xe0300000 0x1000>;
+                               interrupts = <0 52 0x4>;
                                status = "disabled";
                        };
 
                        serial@e0000000 {
                                compatible = "arm,pl011", "arm,primecell";
                                reg = <0xe0000000 0x1000>;
-                               interrupts = <0 36 0x4>;
+                               interrupts = <0 35 0x4>;
                                status = "disabled";
                        };
 
index fc82b1a264588b34e31a251dd6c886ef754f39e4..d71b8d581e3d39f77ea8d783303b9eb8a2388030 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * DTS file for SPEAr300 Evaluation Baord
  *
- * Copyright 2012 Viresh Kumar <viresh.kumar@st.com>
+ * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
  *
  * The code contained herein is licensed under the GNU General Public
  * License. You may obtain a copy of the GNU General Public License
index 01c5e358fdb271b00a409f7c33af0a77d276c6be..ed3627c116ccbd7c5e8f90208fab4c37ef7a97d0 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * DTS file for SPEAr300 SoC
  *
- * Copyright 2012 Viresh Kumar <viresh.kumar@st.com>
+ * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
  *
  * The code contained herein is licensed under the GNU General Public
  * License. You may obtain a copy of the GNU General Public License
index dc5e2d445a9352e3774b52dfec8ce0cd5fbaf4d9..b00544e0cd5d18e313eaf6807dc4c8a18294548b 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * DTS file for SPEAr310 Evaluation Baord
  *
- * Copyright 2012 Viresh Kumar <viresh.kumar@st.com>
+ * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
  *
  * The code contained herein is licensed under the GNU General Public
  * License. You may obtain a copy of the GNU General Public License
index e47081c494d916bf4f21de313b03f2b63964394f..62fc4fb3e5f92f079cd3caaf0ccdf852e1ff01d1 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * DTS file for SPEAr310 SoC
  *
- * Copyright 2012 Viresh Kumar <viresh.kumar@st.com>
+ * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
  *
  * The code contained herein is licensed under the GNU General Public
  * License. You may obtain a copy of the GNU General Public License
index 6308fa3bec1ec65c19ff3a22fda6f4297e2a26ac..e4e912f9502466da3ab44a26eda9f8c83e383d8e 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * DTS file for SPEAr320 Evaluation Baord
  *
- * Copyright 2012 Viresh Kumar <viresh.kumar@st.com>
+ * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
  *
  * The code contained herein is licensed under the GNU General Public
  * License. You may obtain a copy of the GNU General Public License
@@ -15,8 +15,8 @@
 /include/ "spear320.dtsi"
 
 / {
-       model = "ST SPEAr300 Evaluation Board";
-       compatible = "st,spear300-evb", "st,spear300";
+       model = "ST SPEAr320 Evaluation Board";
+       compatible = "st,spear320-evb", "st,spear320";
        #address-cells = <1>;
        #size-cells = <1>;
 
@@ -26,7 +26,7 @@
 
        ahb {
                pinmux@b3000000 {
-                       st,pinmux-mode = <3>;
+                       st,pinmux-mode = <4>;
                        pinctrl-names = "default";
                        pinctrl-0 = <&state_default>;
 
index 5372ca399b1f37eba5de326b716c639cd3309be4..1f49d69595a06996b0c8b48d631ed116716f7abb 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * DTS file for SPEAr320 SoC
  *
- * Copyright 2012 Viresh Kumar <viresh.kumar@st.com>
+ * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
  *
  * The code contained herein is licensed under the GNU General Public
  * License. You may obtain a copy of the GNU General Public License
index 91072553963f02566caf2d6a42c727540998a162..3a8bb5736928292b1a78359342550e709e052a2d 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * DTS file for all SPEAr3xx SoCs
  *
- * Copyright 2012 Viresh Kumar <viresh.kumar@st.com>
+ * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
  *
  * The code contained herein is licensed under the GNU General Public
  * License. You may obtain a copy of the GNU General Public License
index 089f0a42c50ef4244765efc59450011637e33a08..a3c36e47d7efcafee29eb73865eb5dec5651d4a4 100644 (file)
                        timer@f0000000 {
                                compatible = "st,spear-timer";
                                reg = <0xf0000000 0x400>;
+                               interrupt-parent = <&vic0>;
                                interrupts = <16>;
                        };
                };
index 9d7eb530f95fd926785170162812d3ef1096178d..aa07f5938f05cfac42414a79f475135ff827c260 100644 (file)
@@ -366,8 +366,8 @@ static int __dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
        struct safe_buffer *buf;
        unsigned long off;
 
-       dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
-               __func__, addr, off, sz, dir);
+       dev_dbg(dev, "%s(dma=%#x,sz=%zx,dir=%x)\n",
+               __func__, addr, sz, dir);
 
        buf = find_safe_buffer_dev(dev, addr, __func__);
        if (!buf)
@@ -377,8 +377,8 @@ static int __dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
 
        BUG_ON(buf->direction != dir);
 
-       dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
-               __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
+       dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x off=%#lx) mapped to %p (dma=%#x)\n",
+               __func__, buf->ptr, virt_to_dma(dev, buf->ptr), off,
                buf->safe, buf->safe_dma_addr);
 
        DO_STATS(dev->archdata.dmabounce->bounce_count++);
@@ -406,8 +406,8 @@ static int __dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
        struct safe_buffer *buf;
        unsigned long off;
 
-       dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
-               __func__, addr, off, sz, dir);
+       dev_dbg(dev, "%s(dma=%#x,sz=%zx,dir=%x)\n",
+               __func__, addr, sz, dir);
 
        buf = find_safe_buffer_dev(dev, addr, __func__);
        if (!buf)
@@ -417,8 +417,8 @@ static int __dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
 
        BUG_ON(buf->direction != dir);
 
-       dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
-               __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
+       dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x off=%#lx) mapped to %p (dma=%#x)\n",
+               __func__, buf->ptr, virt_to_dma(dev, buf->ptr), off,
                buf->safe, buf->safe_dma_addr);
 
        DO_STATS(dev->archdata.dmabounce->bounce_count++);
index 9854ff4279e0891bfaaa59d8086df7ee247c5003..11828e632532accb5ab386ac9a0ffe173c6b9fdf 100644 (file)
@@ -176,7 +176,6 @@ CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
 CONFIG_USB_DEVICEFS=y
 CONFIG_USB_SUSPEND=y
 CONFIG_USB_MON=y
-CONFIG_USB_EHCI_HCD=y
 CONFIG_USB_WDM=y
 CONFIG_USB_STORAGE=y
 CONFIG_USB_LIBUSUAL=y
index 68374ba6a943e9d029246d927b64a52913ea525e..c79f61faa3a55e81ae773b6d9d83532821b24c33 100644 (file)
@@ -243,7 +243,7 @@ typedef struct {
 
 #define ATOMIC64_INIT(i) { (i) }
 
-static inline u64 atomic64_read(atomic64_t *v)
+static inline u64 atomic64_read(const atomic64_t *v)
 {
        u64 result;
 
index 3d2220498abc2db2d98378c94a3309c71e91ccee..6ddbe446425e11524d927b5cd8b479bcd2419238 100644 (file)
 #ifndef __ASSEMBLY__
 
 #ifdef CONFIG_CPU_USE_DOMAINS
-#define set_domain(x)                                  \
-       do {                                            \
-       __asm__ __volatile__(                           \
-       "mcr    p15, 0, %0, c3, c0      @ set domain"   \
-         : : "r" (x));                                 \
-       isb();                                          \
-       } while (0)
+static inline void set_domain(unsigned val)
+{
+       asm volatile(
+       "mcr    p15, 0, %0, c3, c0      @ set domain"
+         : : "r" (val));
+       isb();
+}
 
 #define modify_domain(dom,type)                                        \
        do {                                                    \
@@ -78,8 +78,8 @@
        } while (0)
 
 #else
-#define set_domain(x)          do { } while (0)
-#define modify_domain(dom,type)        do { } while (0)
+static inline void set_domain(unsigned val) { }
+static inline void modify_domain(unsigned dom, unsigned type)  { }
 #endif
 
 /*
index 7be54690aeec0f38be6bb2738a8a7df8a243021d..e42cf597f6e6b5b7bcad58cdf9e75289d66672d4 100644 (file)
@@ -19,6 +19,7 @@
        "       .long   1b, 4f, 2b, 4f\n"                       \
        "       .popsection\n"                                  \
        "       .pushsection .fixup,\"ax\"\n"                   \
+       "       .align  2\n"                                    \
        "4:     mov     %0, " err_reg "\n"                      \
        "       b       3b\n"                                   \
        "       .popsection"
index e0d1c0cfa54816fda646b9d1b2996d9e1176fff6..6b9b077d86b3788fabb129c0b18bfb722724a697 100644 (file)
@@ -4,7 +4,7 @@
  * ARM PrimeXsys System Controller SP810 header file
  *
  * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index b79f8e97f7755f22d82ae20ee00442cd11f7af02..af7b0bda3355d9af850ae722b2b1f55277ef6e67 100644 (file)
@@ -148,7 +148,6 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
 #define TIF_NOTIFY_RESUME      2       /* callback before returning to user */
 #define TIF_SYSCALL_TRACE      8
 #define TIF_SYSCALL_AUDIT      9
-#define TIF_SYSCALL_RESTARTSYS 10
 #define TIF_POLLING_NRFLAG     16
 #define TIF_USING_IWMMXT       17
 #define TIF_MEMDIE             18      /* is terminating due to OOM killer */
@@ -164,11 +163,9 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
 #define _TIF_POLLING_NRFLAG    (1 << TIF_POLLING_NRFLAG)
 #define _TIF_USING_IWMMXT      (1 << TIF_USING_IWMMXT)
 #define _TIF_SECCOMP           (1 << TIF_SECCOMP)
-#define _TIF_SYSCALL_RESTARTSYS        (1 << TIF_SYSCALL_RESTARTSYS)
 
 /* Checks for any syscall work in entry-common.S */
-#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
-                          _TIF_SYSCALL_RESTARTSYS)
+#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
 
 /*
  * Change these and you break ASM code in entry-common.S
index 437f0c426517cf39c1035c3e7a0b6cfecb9395cb..0d1851ca6eb993a628f623c257487200fe529604 100644 (file)
@@ -495,6 +495,7 @@ ENDPROC(__und_usr)
  * The out of line fixup for the ldrt above.
  */
        .pushsection .fixup, "ax"
+       .align  2
 4:     mov     pc, r9
        .popsection
        .pushsection __ex_table,"a"
index ba32b393b3f0c514c83799687348d52655bfe1da..38c1a3b103a0684b5b579bb74ca07f38bb91eb13 100644 (file)
@@ -187,8 +187,8 @@ void kprobe_arm_test_cases(void)
        TEST_BF_R ("mov pc, r",0,2f,"")
        TEST_BF_RR("mov pc, r",0,2f,", asl r",1,0,"")
        TEST_BB(   "sub pc, pc, #1b-2b+8")
-#if __LINUX_ARM_ARCH__ >= 6
-       TEST_BB(   "sub pc, pc, #1b-2b+8-2") /* UNPREDICTABLE before ARMv6 */
+#if __LINUX_ARM_ARCH__ == 6 && !defined(CONFIG_CPU_V7)
+       TEST_BB(   "sub pc, pc, #1b-2b+8-2") /* UNPREDICTABLE before and after ARMv6 */
 #endif
        TEST_BB_R( "sub pc, pc, r",14, 1f-2f+8,"")
        TEST_BB_R( "rsb pc, r",14,1f-2f+8,", pc")
index 8f96ec778e8dd4537afab036cb8e28456b9f4e10..6123daf397a7bbb7ffe161075165ddf57f175d10 100644 (file)
@@ -660,7 +660,7 @@ static const union decode_item t32_table_1111_100x[] = {
        /* LDRSB (literal)      1111 1001 x001 1111 xxxx xxxx xxxx xxxx */
        /* LDRH (literal)       1111 1000 x011 1111 xxxx xxxx xxxx xxxx */
        /* LDRSH (literal)      1111 1001 x011 1111 xxxx xxxx xxxx xxxx */
-       DECODE_EMULATEX (0xfe5f0000, 0xf81f0000, t32_simulate_ldr_literal,
+       DECODE_SIMULATEX(0xfe5f0000, 0xf81f0000, t32_simulate_ldr_literal,
                                                 REGS(PC, NOSPPCX, 0, 0, 0)),
 
        /* STRB (immediate)     1111 1000 0000 xxxx xxxx 1xxx xxxx xxxx */
index 186c8cb982c543a2cc1631796b34376151b33702..a02eada3aa5d06036027ac1241e652d5032781bd 100644 (file)
@@ -503,7 +503,7 @@ __hw_perf_event_init(struct perf_event *event)
             event_requires_mode_exclusion(&event->attr)) {
                pr_debug("ARM performance counters do not support "
                         "mode exclusion\n");
-               return -EPERM;
+               return -EOPNOTSUPP;
        }
 
        /*
index 5700a7ae7f0bc1511ae048d7a3f025c401e5729c..14e38261cd31db9d852db2eb0b8046251a04613d 100644 (file)
@@ -25,7 +25,6 @@
 #include <linux/regset.h>
 #include <linux/audit.h>
 #include <linux/tracehook.h>
-#include <linux/unistd.h>
 
 #include <asm/pgtable.h>
 #include <asm/traps.h>
@@ -918,8 +917,6 @@ asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
                audit_syscall_entry(AUDIT_ARCH_ARM, scno, regs->ARM_r0,
                                    regs->ARM_r1, regs->ARM_r2, regs->ARM_r3);
 
-       if (why == 0 && test_and_clear_thread_flag(TIF_SYSCALL_RESTARTSYS))
-               scno = __NR_restart_syscall - __NR_SYSCALL_BASE;
        if (!test_thread_flag(TIF_SYSCALL_TRACE))
                return scno;
 
index fd2392a17ac1befb3464b5a0e77960a0f862b4ca..536c5d6b340b7fca2aee9c770000372c38be98bb 100644 (file)
@@ -27,6 +27,7 @@
  */
 #define SWI_SYS_SIGRETURN      (0xef000000|(__NR_sigreturn)|(__NR_OABI_SYSCALL_BASE))
 #define SWI_SYS_RT_SIGRETURN   (0xef000000|(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE))
+#define SWI_SYS_RESTART                (0xef000000|__NR_restart_syscall|__NR_OABI_SYSCALL_BASE)
 
 /*
  * With EABI, the syscall number has to be loaded into r7.
@@ -46,6 +47,18 @@ const unsigned long sigreturn_codes[7] = {
        MOV_R7_NR_RT_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN,
 };
 
+/*
+ * Either we support OABI only, or we have EABI with the OABI
+ * compat layer enabled.  In the later case we don't know if
+ * user space is EABI or not, and if not we must not clobber r7.
+ * Always using the OABI syscall solves that issue and works for
+ * all those cases.
+ */
+const unsigned long syscall_restart_code[2] = {
+       SWI_SYS_RESTART,        /* swi  __NR_restart_syscall */
+       0xe49df004,             /* ldr  pc, [sp], #4 */
+};
+
 /*
  * atomically swap in the new signal mask, and wait for a signal.
  */
@@ -592,10 +605,12 @@ static void do_signal(struct pt_regs *regs, int syscall)
                case -ERESTARTNOHAND:
                case -ERESTARTSYS:
                case -ERESTARTNOINTR:
-               case -ERESTART_RESTARTBLOCK:
                        regs->ARM_r0 = regs->ARM_ORIG_r0;
                        regs->ARM_pc = restart_addr;
                        break;
+               case -ERESTART_RESTARTBLOCK:
+                       regs->ARM_r0 = -EINTR;
+                       break;
                }
        }
 
@@ -611,14 +626,12 @@ static void do_signal(struct pt_regs *regs, int syscall)
                 * debugger has chosen to restart at a different PC.
                 */
                if (regs->ARM_pc == restart_addr) {
-                       if (retval == -ERESTARTNOHAND ||
-                           retval == -ERESTART_RESTARTBLOCK
+                       if (retval == -ERESTARTNOHAND
                            || (retval == -ERESTARTSYS
                                && !(ka.sa.sa_flags & SA_RESTART))) {
                                regs->ARM_r0 = -EINTR;
                                regs->ARM_pc = continue_addr;
                        }
-                       clear_thread_flag(TIF_SYSCALL_RESTARTSYS);
                }
 
                handle_signal(signr, &ka, &info, regs);
@@ -632,8 +645,29 @@ static void do_signal(struct pt_regs *regs, int syscall)
                 * ignore the restart.
                 */
                if (retval == -ERESTART_RESTARTBLOCK
-                   && regs->ARM_pc == restart_addr)
-                       set_thread_flag(TIF_SYSCALL_RESTARTSYS);
+                   && regs->ARM_pc == continue_addr) {
+                       if (thumb_mode(regs)) {
+                               regs->ARM_r7 = __NR_restart_syscall - __NR_SYSCALL_BASE;
+                               regs->ARM_pc -= 2;
+                       } else {
+#if defined(CONFIG_AEABI) && !defined(CONFIG_OABI_COMPAT)
+                               regs->ARM_r7 = __NR_restart_syscall;
+                               regs->ARM_pc -= 4;
+#else
+                               u32 __user *usp;
+
+                               regs->ARM_sp -= 4;
+                               usp = (u32 __user *)regs->ARM_sp;
+
+                               if (put_user(regs->ARM_pc, usp) == 0) {
+                                       regs->ARM_pc = KERN_RESTART_CODE;
+                               } else {
+                                       regs->ARM_sp += 4;
+                                       force_sigsegv(0, current);
+                               }
+#endif
+                       }
+               }
        }
 
        restore_saved_sigmask();
index 5ff067b7c7522f428b4343832ecb759b7ccf16de..6fcfe8398aa473051fbf72396986a84dc700978f 100644 (file)
@@ -8,5 +8,7 @@
  * published by the Free Software Foundation.
  */
 #define KERN_SIGRETURN_CODE    (CONFIG_VECTORS_BASE + 0x00000500)
+#define KERN_RESTART_CODE      (KERN_SIGRETURN_CODE + sizeof(sigreturn_codes))
 
 extern const unsigned long sigreturn_codes[7];
+extern const unsigned long syscall_restart_code[2];
index 4928d89758f4ce0dea767acdf9fcf2299dff7833..3647170e9a16ba3aa8838218ed99e74dbc5b7c59 100644 (file)
@@ -820,6 +820,8 @@ void __init early_trap_init(void *vectors_base)
         */
        memcpy((void *)(vectors + KERN_SIGRETURN_CODE - CONFIG_VECTORS_BASE),
               sigreturn_codes, sizeof(sigreturn_codes));
+       memcpy((void *)(vectors + KERN_RESTART_CODE - CONFIG_VECTORS_BASE),
+              syscall_restart_code, sizeof(syscall_restart_code));
 
        flush_icache_range(vectors, vectors + PAGE_SIZE);
        modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
index 43a31fb06318dc0a1213827a5069b73cac19f5a6..36ff15bbfdd4961afecfef9ea292c6445e9b3836 100644 (file)
@@ -183,7 +183,9 @@ SECTIONS
        }
 #endif
 
+#ifdef CONFIG_SMP
        PERCPU_SECTION(L1_CACHE_BYTES)
+#endif
 
 #ifdef CONFIG_XIP_KERNEL
        __data_loc = ALIGN(4);          /* location in binary */
index 226949dc4ac04a242c40e215d23818421342b99c..f953bb54aa9d31d590791d97aaaf770aaaecfd93 100644 (file)
@@ -50,5 +50,6 @@
 #define POWER_MANAGEMENT       (BRIDGE_VIRT_BASE | 0x011c)
 
 #define TIMER_VIRT_BASE                (BRIDGE_VIRT_BASE | 0x0300)
+#define TIMER_PHYS_BASE         (BRIDGE_PHYS_BASE | 0x0300)
 
 #endif
index ad1165d488c13f393bf352a00f0e03b62265f3cc..d52b0ef313b7e53c2efa6de67d0ddeddc4790ca5 100644 (file)
@@ -78,6 +78,7 @@
 
 /* North-South Bridge */
 #define BRIDGE_VIRT_BASE       (DOVE_SB_REGS_VIRT_BASE | 0x20000)
+#define BRIDGE_PHYS_BASE       (DOVE_SB_REGS_PHYS_BASE | 0x20000)
 
 /* Cryptographic Engine */
 #define DOVE_CRYPT_PHYS_BASE   (DOVE_SB_REGS_PHYS_BASE | 0x30000)
index 573be57d3d2805c70f24ab3b4dd29e1b3e6e3843..6f6d13f91e4ca1fc65448ee4b4f950ea0eeeeebc 100644 (file)
@@ -212,7 +212,7 @@ config MACH_SMDKV310
        select EXYNOS_DEV_SYSMMU
        select EXYNOS4_DEV_AHCI
        select SAMSUNG_DEV_KEYPAD
-       select EXYNOS4_DEV_DMA
+       select EXYNOS_DEV_DMA
        select SAMSUNG_DEV_PWM
        select EXYNOS4_DEV_USB_OHCI
        select EXYNOS4_SETUP_FIMD0
@@ -264,7 +264,7 @@ config MACH_UNIVERSAL_C210
        select S5P_DEV_ONENAND
        select S5P_DEV_TV
        select EXYNOS_DEV_SYSMMU
-       select EXYNOS4_DEV_DMA
+       select EXYNOS_DEV_DMA
        select EXYNOS_DEV_DRM
        select EXYNOS4_SETUP_FIMD0
        select EXYNOS4_SETUP_I2C1
@@ -303,7 +303,7 @@ config MACH_NURI
        select S5P_DEV_MFC
        select S5P_DEV_USB_EHCI
        select S5P_SETUP_MIPIPHY
-       select EXYNOS4_DEV_DMA
+       select EXYNOS_DEV_DMA
        select EXYNOS_DEV_DRM
        select EXYNOS4_SETUP_FIMC
        select EXYNOS4_SETUP_FIMD0
@@ -341,7 +341,7 @@ config MACH_ORIGEN
        select SAMSUNG_DEV_PWM
        select EXYNOS_DEV_DRM
        select EXYNOS_DEV_SYSMMU
-       select EXYNOS4_DEV_DMA
+       select EXYNOS_DEV_DMA
        select EXYNOS4_DEV_USB_OHCI
        select EXYNOS4_SETUP_FIMD0
        select EXYNOS4_SETUP_SDHCI
index e9fafcf163de8982287876a7ca6d6cf94488f472..373c3c00d24cdbbe054a1aae60625d97188e6208 100644 (file)
@@ -119,7 +119,9 @@ static __init void exynos_pm_add_dev_to_genpd(struct platform_device *pdev,
                                                struct exynos_pm_domain *pd)
 {
        if (pdev->dev.bus) {
-               if (pm_genpd_add_device(&pd->pd, &pdev->dev))
+               if (!pm_genpd_add_device(&pd->pd, &pdev->dev))
+                       pm_genpd_dev_need_restore(&pdev->dev, true);
+               else
                        pr_info("%s: error in adding %s device to %s power"
                                "domain\n", __func__, dev_name(&pdev->dev),
                                pd->name);
@@ -151,9 +153,12 @@ static __init int exynos4_pm_init_power_domain(void)
        if (of_have_populated_dt())
                return exynos_pm_dt_parse_domains();
 
-       for (idx = 0; idx < ARRAY_SIZE(exynos4_pm_domains); idx++)
-               pm_genpd_init(&exynos4_pm_domains[idx]->pd, NULL,
-                               exynos4_pm_domains[idx]->is_off);
+       for (idx = 0; idx < ARRAY_SIZE(exynos4_pm_domains); idx++) {
+               struct exynos_pm_domain *pd = exynos4_pm_domains[idx];
+               int on = __raw_readl(pd->base + 0x4) & S5P_INT_LOCAL_PWR_EN;
+
+               pm_genpd_init(&pd->pd, NULL, !on);
+       }
 
 #ifdef CONFIG_S5P_DEV_FIMD0
        exynos_pm_add_dev_to_genpd(&s5p_device_fimd0, &exynos4_pd_lcd0);
index f8437dd238c2865adb462f8c1d23d575bda93cf1..ded4652ada803221ea97a92bb50dc6d8d6fef64e 100644 (file)
@@ -1,4 +1,8 @@
-obj-y                                  := clock.o highbank.o system.o
+obj-y                                  := clock.o highbank.o system.o smc.o
+
+plus_sec := $(call as-instr,.arch_extension sec,+sec)
+AFLAGS_smc.o                           :=-Wa,-march=armv7-a$(plus_sec)
+
 obj-$(CONFIG_DEBUG_HIGHBANK_UART)      += lluart.o
 obj-$(CONFIG_SMP)                      += platsmp.o
 obj-$(CONFIG_HOTPLUG_CPU)              += hotplug.o
index d8e2d0be64ac365dc665b1e7e1a52c4549973f77..141ed5171826acbc3caa6a1b7e843f2737361f65 100644 (file)
@@ -8,3 +8,4 @@ extern void highbank_lluart_map_io(void);
 static inline void highbank_lluart_map_io(void) {}
 #endif
 
+extern void highbank_smc1(int fn, int arg);
index 410a112bb52e29036d22c1de07ffed1965011389..8777612b1a42b2dd6fd1a49df59ff6a93153b9d6 100644 (file)
@@ -85,10 +85,24 @@ const static struct of_device_id irq_match[] = {
        {}
 };
 
+#ifdef CONFIG_CACHE_L2X0
+static void highbank_l2x0_disable(void)
+{
+       /* Disable PL310 L2 Cache controller */
+       highbank_smc1(0x102, 0x0);
+}
+#endif
+
 static void __init highbank_init_irq(void)
 {
        of_irq_init(irq_match);
+
+#ifdef CONFIG_CACHE_L2X0
+       /* Enable PL310 L2 Cache controller */
+       highbank_smc1(0x102, 0x1);
        l2x0_of_init(0, ~0UL);
+       outer_cache.disable = highbank_l2x0_disable;
+#endif
 }
 
 static void __init highbank_timer_init(void)
diff --git a/arch/arm/mach-highbank/smc.S b/arch/arm/mach-highbank/smc.S
new file mode 100644 (file)
index 0000000..407d17b
--- /dev/null
@@ -0,0 +1,27 @@
+/*
+ * Copied from omap44xx-smc.S Copyright (C) 2010 Texas Instruments, Inc.
+ * Copyright 2012 Calxeda, Inc.
+ *
+ * This program is free software,you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+
+/*
+ * This is common routine to manage secure monitor API
+ * used to modify the PL310 secure registers.
+ * 'r0' contains the value to be modified and 'r12' contains
+ * the monitor API number.
+ * Function signature : void highbank_smc1(u32 fn, u32 arg)
+ */
+
+ENTRY(highbank_smc1)
+       stmfd   sp!, {r4-r11, lr}
+       mov     r12, r0
+       mov     r0, r1
+       dsb
+       smc     #0
+       ldmfd   sp!, {r4-r11, pc}
+ENDPROC(highbank_smc1)
index 0021f726b153210b04b876cabc7d36c61663c2a1..eff4db5de0ddb8abbfde826cc14c16b9432c5373 100644 (file)
@@ -477,6 +477,7 @@ config MACH_MX31_3DS
        select IMX_HAVE_PLATFORM_IMX2_WDT
        select IMX_HAVE_PLATFORM_IMX_I2C
        select IMX_HAVE_PLATFORM_IMX_KEYPAD
+       select IMX_HAVE_PLATFORM_IMX_SSI
        select IMX_HAVE_PLATFORM_IMX_UART
        select IMX_HAVE_PLATFORM_IPU_CORE
        select IMX_HAVE_PLATFORM_MXC_EHCI
index 0f0beb580b73f2d6ecff6b2aa70f5115d9776532..516ddee1948e81dc207d1b91c29380452c6c970d 100644 (file)
@@ -108,8 +108,7 @@ int __init mx1_clocks_init(unsigned long fref)
        clk_register_clkdev(clk[clk32], NULL, "mxc_rtc.0");
        clk_register_clkdev(clk[clko], "clko", NULL);
 
-       mxc_timer_init(NULL, MX1_IO_ADDRESS(MX1_TIM1_BASE_ADDR),
-                       MX1_TIM1_INT);
+       mxc_timer_init(MX1_IO_ADDRESS(MX1_TIM1_BASE_ADDR), MX1_TIM1_INT);
 
        return 0;
 }
index 4e4f384ee8ddf6562c39485c1fede1b7e7da6425..ea13e61bd5f36163d3b1372c16dbc0ae7c0d90a5 100644 (file)
@@ -180,7 +180,7 @@ int __init mx21_clocks_init(unsigned long lref, unsigned long href)
        clk_register_clkdev(clk[sdhc1_ipg_gate], "sdhc1", NULL);
        clk_register_clkdev(clk[sdhc2_ipg_gate], "sdhc2", NULL);
 
-       mxc_timer_init(NULL, MX21_IO_ADDRESS(MX21_GPT1_BASE_ADDR),
-                       MX21_INT_GPT1);
+       mxc_timer_init(MX21_IO_ADDRESS(MX21_GPT1_BASE_ADDR), MX21_INT_GPT1);
+
        return 0;
 }
index d9833bb5fd610a3e9fdedff0cdc58a8724754ea9..fdd8cc87c9feee388ca8a94b0fb46fa20305f33a 100644 (file)
@@ -243,6 +243,6 @@ int __init mx25_clocks_init(void)
        clk_register_clkdev(clk[sdma_ahb], "ahb", "imx35-sdma");
        clk_register_clkdev(clk[iim_ipg], "iim", NULL);
 
-       mxc_timer_init(NULL, MX25_IO_ADDRESS(MX25_GPT1_BASE_ADDR), 54);
+       mxc_timer_init(MX25_IO_ADDRESS(MX25_GPT1_BASE_ADDR), 54);
        return 0;
 }
index 50a7ebd8d1b211f4790437fa6e74f9547cb9883c..295cbd7c08dcd9df383b8fd7eb5a1843083e895a 100644 (file)
@@ -263,8 +263,7 @@ int __init mx27_clocks_init(unsigned long fref)
        clk_register_clkdev(clk[ssi1_baud_gate], "bitrate" , "imx-ssi.0");
        clk_register_clkdev(clk[ssi2_baud_gate], "bitrate" , "imx-ssi.1");
 
-       mxc_timer_init(NULL, MX27_IO_ADDRESS(MX27_GPT1_BASE_ADDR),
-                       MX27_INT_GPT1);
+       mxc_timer_init(MX27_IO_ADDRESS(MX27_GPT1_BASE_ADDR), MX27_INT_GPT1);
 
        clk_prepare_enable(clk[emi_ahb_gate]);
 
index a854b9cae5ea505f3c2586224eb1f12651ffad93..c9a06d800f8ef7a0a7d96e015cd87754afd09a25 100644 (file)
@@ -175,8 +175,7 @@ int __init mx31_clocks_init(unsigned long fref)
        mx31_revision();
        clk_disable_unprepare(clk[iim_gate]);
 
-       mxc_timer_init(NULL, MX31_IO_ADDRESS(MX31_GPT1_BASE_ADDR),
-                       MX31_INT_GPT);
+       mxc_timer_init(MX31_IO_ADDRESS(MX31_GPT1_BASE_ADDR), MX31_INT_GPT);
 
        return 0;
 }
index a9e60bf7dd75ca7bf8419ba2318d9eaffb7484a0..c6422fb10bae37756693f3323f79df62e4fc932e 100644 (file)
@@ -201,7 +201,6 @@ int __init mx35_clocks_init()
                        pr_err("i.MX35 clk %d: register failed with %ld\n",
                                i, PTR_ERR(clk[i]));
 
-
        clk_register_clkdev(clk[pata_gate], NULL, "pata_imx");
        clk_register_clkdev(clk[can1_gate], NULL, "flexcan.0");
        clk_register_clkdev(clk[can2_gate], NULL, "flexcan.1");
@@ -264,14 +263,20 @@ int __init mx35_clocks_init()
        clk_prepare_enable(clk[iim_gate]);
        clk_prepare_enable(clk[emi_gate]);
 
+       /*
+        * SCC is needed to boot via mmc after a watchdog reset. The clock code
+        * before conversion to common clk also enabled UART1 (which isn't
+        * handled here and not needed for mmc) and IIM (which is enabled
+        * unconditionally above).
+        */
+       clk_prepare_enable(clk[scc_gate]);
+
        imx_print_silicon_rev("i.MX35", mx35_revision());
 
 #ifdef CONFIG_MXC_USE_EPIT
-       epit_timer_init(&epit1_clk,
-                       MX35_IO_ADDRESS(MX35_EPIT1_BASE_ADDR), MX35_INT_EPIT1);
+       epit_timer_init(MX35_IO_ADDRESS(MX35_EPIT1_BASE_ADDR), MX35_INT_EPIT1);
 #else
-       mxc_timer_init(NULL, MX35_IO_ADDRESS(MX35_GPT1_BASE_ADDR),
-                       MX35_INT_GPT);
+       mxc_timer_init(MX35_IO_ADDRESS(MX35_GPT1_BASE_ADDR), MX35_INT_GPT);
 #endif
 
        return 0;
index fcd94f3b0f0e7cf4380a47e80d0425c71a00f02d..a2200c77bf70dcdc09c44d642fe09656f36f0fcb 100644 (file)
@@ -104,12 +104,12 @@ static void __init mx5_clocks_common_init(unsigned long rate_ckil,
                                periph_apm_sel, ARRAY_SIZE(periph_apm_sel));
        clk[main_bus] = imx_clk_mux("main_bus", MXC_CCM_CBCDR, 25, 1,
                                main_bus_sel, ARRAY_SIZE(main_bus_sel));
-       clk[per_lp_apm] = imx_clk_mux("per_lp_apm", MXC_CCM_CBCDR, 1, 1,
+       clk[per_lp_apm] = imx_clk_mux("per_lp_apm", MXC_CCM_CBCMR, 1, 1,
                                per_lp_apm_sel, ARRAY_SIZE(per_lp_apm_sel));
        clk[per_pred1] = imx_clk_divider("per_pred1", "per_lp_apm", MXC_CCM_CBCDR, 6, 2);
        clk[per_pred2] = imx_clk_divider("per_pred2", "per_pred1", MXC_CCM_CBCDR, 3, 3);
        clk[per_podf] = imx_clk_divider("per_podf", "per_pred2", MXC_CCM_CBCDR, 0, 3);
-       clk[per_root] = imx_clk_mux("per_root", MXC_CCM_CBCDR, 1, 0,
+       clk[per_root] = imx_clk_mux("per_root", MXC_CCM_CBCMR, 0, 1,
                                per_root_sel, ARRAY_SIZE(per_root_sel));
        clk[ahb] = imx_clk_divider("ahb", "main_bus", MXC_CCM_CBCDR, 10, 3);
        clk[ahb_max] = imx_clk_gate2("ahb_max", "ahb", MXC_CCM_CCGR0, 28);
@@ -172,7 +172,7 @@ static void __init mx5_clocks_common_init(unsigned long rate_ckil,
        clk[pwm1_hf_gate] = imx_clk_gate2("pwm1_hf_gate", "ipg", MXC_CCM_CCGR2, 12);
        clk[pwm2_ipg_gate] = imx_clk_gate2("pwm2_ipg_gate", "ipg", MXC_CCM_CCGR2, 14);
        clk[pwm2_hf_gate] = imx_clk_gate2("pwm2_hf_gate", "ipg", MXC_CCM_CCGR2, 16);
-       clk[gpt_gate] = imx_clk_gate2("gpt_gate", "ipg", MXC_CCM_CCGR2, 18);
+       clk[gpt_gate] = imx_clk_gate2("gpt_gate", "per_root", MXC_CCM_CCGR2, 18);
        clk[fec_gate] = imx_clk_gate2("fec_gate", "ipg", MXC_CCM_CCGR2, 24);
        clk[usboh3_gate] = imx_clk_gate2("usboh3_gate", "ipg", MXC_CCM_CCGR2, 26);
        clk[usboh3_per_gate] = imx_clk_gate2("usboh3_per_gate", "usboh3_podf", MXC_CCM_CCGR2, 28);
@@ -366,8 +366,7 @@ int __init mx51_clocks_init(unsigned long rate_ckil, unsigned long rate_osc,
        clk_set_rate(clk[esdhc_b_podf], 166250000);
 
        /* System timer */
-       mxc_timer_init(NULL, MX51_IO_ADDRESS(MX51_GPT1_BASE_ADDR),
-               MX51_INT_GPT);
+       mxc_timer_init(MX51_IO_ADDRESS(MX51_GPT1_BASE_ADDR), MX51_INT_GPT);
 
        clk_prepare_enable(clk[iim_gate]);
        imx_print_silicon_rev("i.MX51", mx51_revision());
@@ -452,8 +451,7 @@ int __init mx53_clocks_init(unsigned long rate_ckil, unsigned long rate_osc,
        clk_set_rate(clk[esdhc_b_podf], 200000000);
 
        /* System timer */
-       mxc_timer_init(NULL, MX53_IO_ADDRESS(MX53_GPT1_BASE_ADDR),
-               MX53_INT_GPT);
+       mxc_timer_init(MX53_IO_ADDRESS(MX53_GPT1_BASE_ADDR), MX53_INT_GPT);
 
        clk_prepare_enable(clk[iim_gate]);
        imx_print_silicon_rev("i.MX53", mx53_revision());
index cab02d0a15d60d1c1bfe6e16157b386c0fb6b062..e1a17ac7b3b48419a8ff1f9dadfa5de03f5fd852 100644 (file)
@@ -122,10 +122,6 @@ static const char *cko1_sels[]     = { "pll3_usb_otg", "pll2_bus", "pll1_sys", "pll5
                                    "dummy", "axi", "enfc", "ipu1_di0", "ipu1_di1", "ipu2_di0",
                                    "ipu2_di1", "ahb", "ipg", "ipg_per", "ckil", "pll4_audio", };
 
-static const char * const clks_init_on[] __initconst = {
-       "mmdc_ch0_axi", "mmdc_ch1_axi", "usboh3",
-};
-
 enum mx6q_clks {
        dummy, ckil, ckih, osc, pll2_pfd0_352m, pll2_pfd1_594m, pll2_pfd2_396m,
        pll3_pfd0_720m, pll3_pfd1_540m, pll3_pfd2_508m, pll3_pfd3_454m,
@@ -156,16 +152,20 @@ enum mx6q_clks {
        ssi2, ssi3, uart_ipg, uart_serial, usboh3, usdhc1, usdhc2, usdhc3,
        usdhc4, vdo_axi, vpu_axi, cko1, pll1_sys, pll2_bus, pll3_usb_otg,
        pll4_audio, pll5_video, pll6_mlb, pll7_usb_host, pll8_enet, ssi1_ipg,
-       ssi2_ipg, ssi3_ipg, clk_max
+       ssi2_ipg, ssi3_ipg, rom,
+       clk_max
 };
 
 static struct clk *clk[clk_max];
 
+static enum mx6q_clks const clks_init_on[] __initconst = {
+       mmdc_ch0_axi, rom,
+};
+
 int __init mx6q_clocks_init(void)
 {
        struct device_node *np;
        void __iomem *base;
-       struct clk *c;
        int i, irq;
 
        clk[dummy] = imx_clk_fixed("dummy", 0);
@@ -365,6 +365,7 @@ int __init mx6q_clocks_init(void)
        clk[gpmi_bch]     = imx_clk_gate2("gpmi_bch",      "usdhc4",            base + 0x78, 26);
        clk[gpmi_io]      = imx_clk_gate2("gpmi_io",       "enfc",              base + 0x78, 28);
        clk[gpmi_apb]     = imx_clk_gate2("gpmi_apb",      "usdhc3",            base + 0x78, 30);
+       clk[rom]          = imx_clk_gate2("rom",           "ahb",               base + 0x7c, 0);
        clk[sata]         = imx_clk_gate2("sata",          "ipg",               base + 0x7c, 4);
        clk[sdma]         = imx_clk_gate2("sdma",          "ahb",               base + 0x7c, 6);
        clk[spba]         = imx_clk_gate2("spba",          "ipg",               base + 0x7c, 12);
@@ -424,21 +425,14 @@ int __init mx6q_clocks_init(void)
        clk_register_clkdev(clk[ahb], "ahb", NULL);
        clk_register_clkdev(clk[cko1], "cko1", NULL);
 
-       for (i = 0; i < ARRAY_SIZE(clks_init_on); i++) {
-               c = clk_get_sys(clks_init_on[i], NULL);
-               if (IS_ERR(c)) {
-                       pr_err("%s: failed to get clk %s", __func__,
-                              clks_init_on[i]);
-                       return PTR_ERR(c);
-               }
-               clk_prepare_enable(c);
-       }
+       for (i = 0; i < ARRAY_SIZE(clks_init_on); i++)
+               clk_prepare_enable(clk[clks_init_on[i]]);
 
        np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-gpt");
        base = of_iomap(np, 0);
        WARN_ON(!base);
        irq = irq_of_parse_and_map(np, 0);
-       mxc_timer_init(NULL, base, irq);
+       mxc_timer_init(base, irq);
 
        return 0;
 }
index 4685919deb633f2427d0eb14da32097108e7fe16..0440379e36284c03f3494b0b87f23a95c0f980b0 100644 (file)
@@ -74,30 +74,15 @@ struct clk_pllv2 {
        void __iomem    *base;
 };
 
-static unsigned long clk_pllv2_recalc_rate(struct clk_hw *hw,
-               unsigned long parent_rate)
+static unsigned long __clk_pllv2_recalc_rate(unsigned long parent_rate,
+               u32 dp_ctl, u32 dp_op, u32 dp_mfd, u32 dp_mfn)
 {
        long mfi, mfn, mfd, pdf, ref_clk, mfn_abs;
-       unsigned long dp_op, dp_mfd, dp_mfn, dp_ctl, pll_hfsm, dbl;
-       void __iomem *pllbase;
+       unsigned long dbl;
        s64 temp;
-       struct clk_pllv2 *pll = to_clk_pllv2(hw);
-
-       pllbase = pll->base;
 
-       dp_ctl = __raw_readl(pllbase + MXC_PLL_DP_CTL);
-       pll_hfsm = dp_ctl & MXC_PLL_DP_CTL_HFSM;
        dbl = dp_ctl & MXC_PLL_DP_CTL_DPDCK0_2_EN;
 
-       if (pll_hfsm == 0) {
-               dp_op = __raw_readl(pllbase + MXC_PLL_DP_OP);
-               dp_mfd = __raw_readl(pllbase + MXC_PLL_DP_MFD);
-               dp_mfn = __raw_readl(pllbase + MXC_PLL_DP_MFN);
-       } else {
-               dp_op = __raw_readl(pllbase + MXC_PLL_DP_HFS_OP);
-               dp_mfd = __raw_readl(pllbase + MXC_PLL_DP_HFS_MFD);
-               dp_mfn = __raw_readl(pllbase + MXC_PLL_DP_HFS_MFN);
-       }
        pdf = dp_op & MXC_PLL_DP_OP_PDF_MASK;
        mfi = (dp_op & MXC_PLL_DP_OP_MFI_MASK) >> MXC_PLL_DP_OP_MFI_OFFSET;
        mfi = (mfi <= 5) ? 5 : mfi;
@@ -123,18 +108,30 @@ static unsigned long clk_pllv2_recalc_rate(struct clk_hw *hw,
        return temp;
 }
 
-static int clk_pllv2_set_rate(struct clk_hw *hw, unsigned long rate,
+static unsigned long clk_pllv2_recalc_rate(struct clk_hw *hw,
                unsigned long parent_rate)
 {
+       u32 dp_op, dp_mfd, dp_mfn, dp_ctl;
+       void __iomem *pllbase;
        struct clk_pllv2 *pll = to_clk_pllv2(hw);
+
+       pllbase = pll->base;
+
+       dp_ctl = __raw_readl(pllbase + MXC_PLL_DP_CTL);
+       dp_op = __raw_readl(pllbase + MXC_PLL_DP_OP);
+       dp_mfd = __raw_readl(pllbase + MXC_PLL_DP_MFD);
+       dp_mfn = __raw_readl(pllbase + MXC_PLL_DP_MFN);
+
+       return __clk_pllv2_recalc_rate(parent_rate, dp_ctl, dp_op, dp_mfd, dp_mfn);
+}
+
+static int __clk_pllv2_set_rate(unsigned long rate, unsigned long parent_rate,
+               u32 *dp_op, u32 *dp_mfd, u32 *dp_mfn)
+{
        u32 reg;
-       void __iomem *pllbase;
        long mfi, pdf, mfn, mfd = 999999;
        s64 temp64;
        unsigned long quad_parent_rate;
-       unsigned long pll_hfsm, dp_ctl;
-
-       pllbase = pll->base;
 
        quad_parent_rate = 4 * parent_rate;
        pdf = mfi = -1;
@@ -144,25 +141,41 @@ static int clk_pllv2_set_rate(struct clk_hw *hw, unsigned long rate,
                return -EINVAL;
        pdf--;
 
-       temp64 = rate * (pdf+1) - quad_parent_rate * mfi;
-       do_div(temp64, quad_parent_rate/1000000);
+       temp64 = rate * (pdf + 1) - quad_parent_rate * mfi;
+       do_div(temp64, quad_parent_rate / 1000000);
        mfn = (long)temp64;
 
+       reg = mfi << 4 | pdf;
+
+       *dp_op = reg;
+       *dp_mfd = mfd;
+       *dp_mfn = mfn;
+
+       return 0;
+}
+
+static int clk_pllv2_set_rate(struct clk_hw *hw, unsigned long rate,
+               unsigned long parent_rate)
+{
+       struct clk_pllv2 *pll = to_clk_pllv2(hw);
+       void __iomem *pllbase;
+       u32 dp_ctl, dp_op, dp_mfd, dp_mfn;
+       int ret;
+
+       pllbase = pll->base;
+
+
+       ret = __clk_pllv2_set_rate(rate, parent_rate, &dp_op, &dp_mfd, &dp_mfn);
+       if (ret)
+               return ret;
+
        dp_ctl = __raw_readl(pllbase + MXC_PLL_DP_CTL);
        /* use dpdck0_2 */
        __raw_writel(dp_ctl | 0x1000L, pllbase + MXC_PLL_DP_CTL);
-       pll_hfsm = dp_ctl & MXC_PLL_DP_CTL_HFSM;
-       if (pll_hfsm == 0) {
-               reg = mfi << 4 | pdf;
-               __raw_writel(reg, pllbase + MXC_PLL_DP_OP);
-               __raw_writel(mfd, pllbase + MXC_PLL_DP_MFD);
-               __raw_writel(mfn, pllbase + MXC_PLL_DP_MFN);
-       } else {
-               reg = mfi << 4 | pdf;
-               __raw_writel(reg, pllbase + MXC_PLL_DP_HFS_OP);
-               __raw_writel(mfd, pllbase + MXC_PLL_DP_HFS_MFD);
-               __raw_writel(mfn, pllbase + MXC_PLL_DP_HFS_MFN);
-       }
+
+       __raw_writel(dp_op, pllbase + MXC_PLL_DP_OP);
+       __raw_writel(dp_mfd, pllbase + MXC_PLL_DP_MFD);
+       __raw_writel(dp_mfn, pllbase + MXC_PLL_DP_MFN);
 
        return 0;
 }
@@ -170,7 +183,11 @@ static int clk_pllv2_set_rate(struct clk_hw *hw, unsigned long rate,
 static long clk_pllv2_round_rate(struct clk_hw *hw, unsigned long rate,
                unsigned long *prate)
 {
-       return rate;
+       u32 dp_op, dp_mfd, dp_mfn;
+
+       __clk_pllv2_set_rate(rate, *prate, &dp_op, &dp_mfd, &dp_mfn);
+       return __clk_pllv2_recalc_rate(*prate, MXC_PLL_DP_CTL_DPDCK0_2_EN,
+                       dp_op, dp_mfd, dp_mfn);
 }
 
 static int clk_pllv2_prepare(struct clk_hw *hw)
index 5e11ba7daee2e34ce64d3ad903cebfe6adb151b7..5e3f1f0f4cab88189d2cebced96e5b04a5f8767c 100644 (file)
@@ -23,7 +23,7 @@
 #define MX53_DPLL1_BASE                MX53_IO_ADDRESS(MX53_PLL1_BASE_ADDR)
 #define MX53_DPLL2_BASE                MX53_IO_ADDRESS(MX53_PLL2_BASE_ADDR)
 #define MX53_DPLL3_BASE                MX53_IO_ADDRESS(MX53_PLL3_BASE_ADDR)
-#define MX53_DPLL4_BASE                MX53_IO_ADDRESS(MX53_PLL3_BASE_ADDR)
+#define MX53_DPLL4_BASE                MX53_IO_ADDRESS(MX53_PLL4_BASE_ADDR)
 
 /* PLL Register Offsets */
 #define MXC_PLL_DP_CTL                 0x00
index 89493abd497c61f67e37a8eea984e517431f224a..20ed2d56c1af6a3109ff3ea10843cda25d2a289e 100644 (file)
@@ -12,6 +12,7 @@
 
 #include <linux/errno.h>
 #include <asm/cacheflush.h>
+#include <asm/cp15.h>
 #include <mach/common.h>
 
 int platform_cpu_kill(unsigned int cpu)
@@ -19,6 +20,44 @@ int platform_cpu_kill(unsigned int cpu)
        return 1;
 }
 
+static inline void cpu_enter_lowpower(void)
+{
+       unsigned int v;
+
+       flush_cache_all();
+       asm volatile(
+               "mcr    p15, 0, %1, c7, c5, 0\n"
+       "       mcr     p15, 0, %1, c7, c10, 4\n"
+       /*
+        * Turn off coherency
+        */
+       "       mrc     p15, 0, %0, c1, c0, 1\n"
+       "       bic     %0, %0, %3\n"
+       "       mcr     p15, 0, %0, c1, c0, 1\n"
+       "       mrc     p15, 0, %0, c1, c0, 0\n"
+       "       bic     %0, %0, %2\n"
+       "       mcr     p15, 0, %0, c1, c0, 0\n"
+         : "=&r" (v)
+         : "r" (0), "Ir" (CR_C), "Ir" (0x40)
+         : "cc");
+}
+
+static inline void cpu_leave_lowpower(void)
+{
+       unsigned int v;
+
+       asm volatile(
+               "mrc    p15, 0, %0, c1, c0, 0\n"
+       "       orr     %0, %0, %1\n"
+       "       mcr     p15, 0, %0, c1, c0, 0\n"
+       "       mrc     p15, 0, %0, c1, c0, 1\n"
+       "       orr     %0, %0, %2\n"
+       "       mcr     p15, 0, %0, c1, c0, 1\n"
+         : "=&r" (v)
+         : "Ir" (CR_C), "Ir" (0x40)
+         : "cc");
+}
+
 /*
  * platform-specific code to shutdown a CPU
  *
@@ -26,9 +65,10 @@ int platform_cpu_kill(unsigned int cpu)
  */
 void platform_cpu_die(unsigned int cpu)
 {
-       flush_cache_all();
+       cpu_enter_lowpower();
        imx_enable_cpu(cpu, false);
        cpu_do_idle();
+       cpu_leave_lowpower();
 
        /* We should never return from idle */
        panic("cpu %d unexpectedly exit from shutdown\n", cpu);
index c515f8ede1a145a68a0345ae2d503e02949a9d14..6450303f1a7ac01e701adf8b886248a981c54856 100644 (file)
@@ -70,7 +70,6 @@ static struct i2c_board_info eukrea_cpuimx35_i2c_devices[] = {
                I2C_BOARD_INFO("pcf8563", 0x51),
        }, {
                I2C_BOARD_INFO("tsc2007", 0x48),
-               .type           = "tsc2007",
                .platform_data  = &tsc2007_info,
                .irq            = IMX_GPIO_TO_IRQ(TSC2007_IRQGPIO),
        },
index ac50f1671e381447d7fda9335d32cac33432b7ca..1e09de50cbcdc9292ad762d6228c7663fbfcc4a2 100644 (file)
@@ -142,7 +142,6 @@ static struct i2c_board_info eukrea_cpuimx51sd_i2c_devices[] = {
                I2C_BOARD_INFO("pcf8563", 0x51),
        }, {
                I2C_BOARD_INFO("tsc2007", 0x49),
-               .type           = "tsc2007",
                .platform_data  = &tsc2007_info,
        },
 };
index dff82eb57cd9f8957d1db67689ae5d58d7a4884c..ba09552fe5feee5b6720e3b294449e6d46bfeddf 100644 (file)
@@ -38,7 +38,7 @@
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/time.h>
-#include <asm/system.h>
+#include <asm/system_info.h>
 #include <mach/common.h>
 #include <mach/iomux-mx27.h>
 
@@ -116,6 +116,8 @@ static const int visstrim_m10_pins[] __initconst = {
        PB23_PF_USB_PWR,
        PB24_PF_USB_OC,
        /* CSI */
+       TVP5150_RSTN | GPIO_GPIO | GPIO_OUT,
+       TVP5150_PWDN | GPIO_GPIO | GPIO_OUT,
        PB10_PF_CSI_D0,
        PB11_PF_CSI_D1,
        PB12_PF_CSI_D2,
@@ -147,6 +149,24 @@ static struct gpio visstrim_m10_version_gpios[] = {
        { MOTHERBOARD_BIT2, GPIOF_IN, "mother-version-2" },
 };
 
+static const struct gpio visstrim_m10_gpios[] __initconst = {
+       {
+               .gpio = TVP5150_RSTN,
+               .flags = GPIOF_DIR_OUT | GPIOF_INIT_HIGH,
+               .label = "tvp5150_rstn",
+       },
+       {
+               .gpio = TVP5150_PWDN,
+               .flags = GPIOF_DIR_OUT | GPIOF_INIT_LOW,
+               .label = "tvp5150_pwdn",
+       },
+       {
+               .gpio = OTG_PHY_CS_GPIO,
+               .flags = GPIOF_DIR_OUT | GPIOF_INIT_LOW,
+               .label = "usbotg_cs",
+       },
+};
+
 /* Camera */
 static int visstrim_camera_power(struct device *dev, int on)
 {
@@ -190,13 +210,6 @@ static void __init visstrim_camera_init(void)
        struct platform_device *pdev;
        int dma;
 
-       /* Initialize tvp5150 gpios */
-       mxc_gpio_mode(TVP5150_RSTN | GPIO_GPIO | GPIO_OUT);
-       mxc_gpio_mode(TVP5150_PWDN | GPIO_GPIO | GPIO_OUT);
-       gpio_set_value(TVP5150_RSTN, 1);
-       gpio_set_value(TVP5150_PWDN, 0);
-       ndelay(1);
-
        gpio_set_value(TVP5150_PWDN, 1);
        ndelay(1);
        gpio_set_value(TVP5150_RSTN, 0);
@@ -377,10 +390,6 @@ static struct i2c_board_info visstrim_m10_i2c_devices[] = {
 /* USB OTG */
 static int otg_phy_init(struct platform_device *pdev)
 {
-       gpio_set_value(OTG_PHY_CS_GPIO, 0);
-
-       mdelay(10);
-
        return mx27_initialize_usb_hw(pdev->id, MXC_EHCI_POWER_PINS_ENABLED);
 }
 
@@ -435,6 +444,11 @@ static void __init visstrim_m10_board_init(void)
        if (ret)
                pr_err("Failed to setup pins (%d)\n", ret);
 
+       ret = gpio_request_array(visstrim_m10_gpios,
+                               ARRAY_SIZE(visstrim_m10_gpios));
+       if (ret)
+               pr_err("Failed to request gpios (%d)\n", ret);
+
        imx27_add_imx_ssi(0, &visstrim_m10_ssi_pdata);
        imx27_add_imx_uart0(&uart_pdata);
 
index d14bbe949a4f2575a13227b6a629fd177774492f..3e7401fca76c7e18860261a7541449e3c75b5c9d 100644 (file)
@@ -32,7 +32,7 @@
  * Memory-mapped I/O on MX21ADS base board
  */
 #define MX21ADS_MMIO_BASE_ADDR   0xf5000000
-#define MX21ADS_MMIO_SIZE        SZ_16M
+#define MX21ADS_MMIO_SIZE        0xc00000
 
 #define MX21ADS_REG_ADDR(offset)    (void __force __iomem *) \
                (MX21ADS_MMIO_BASE_ADDR + (offset))
index 967ed5b35a45914b3e26678db38bc0aaf0868faa..a8983b9778d1b7873dd850076718eabd690ca6f1 100644 (file)
@@ -86,6 +86,7 @@ static void __iomem *imx3_ioremap_caller(unsigned long phys_addr, size_t size,
 
 void __init imx3_init_l2x0(void)
 {
+#ifdef CONFIG_CACHE_L2X0
        void __iomem *l2x0_base;
        void __iomem *clkctl_base;
 
@@ -115,6 +116,7 @@ void __init imx3_init_l2x0(void)
        }
 
        l2x0_init(l2x0_base, 0x00030024, 0x00000000);
+#endif
 }
 
 #ifdef CONFIG_SOC_IMX31
@@ -179,6 +181,8 @@ void __init imx31_soc_init(void)
        mxc_register_gpio("imx31-gpio", 1, MX31_GPIO2_BASE_ADDR, SZ_16K, MX31_INT_GPIO2, 0);
        mxc_register_gpio("imx31-gpio", 2, MX31_GPIO3_BASE_ADDR, SZ_16K, MX31_INT_GPIO3, 0);
 
+       pinctrl_provide_dummies();
+
        if (to_version == 1) {
                strncpy(imx31_sdma_pdata.fw_name, "sdma-imx31-to1.bin",
                        strlen(imx31_sdma_pdata.fw_name));
index feeee17da96b227b769c1748c6175b6071b8bd59..1d003053d5621bd45341030b9f58db08f78990ac 100644 (file)
@@ -202,6 +202,8 @@ void __init imx51_soc_init(void)
        mxc_register_gpio("imx31-gpio", 2, MX51_GPIO3_BASE_ADDR, SZ_16K, MX51_INT_GPIO3_LOW, MX51_INT_GPIO3_HIGH);
        mxc_register_gpio("imx31-gpio", 3, MX51_GPIO4_BASE_ADDR, SZ_16K, MX51_INT_GPIO4_LOW, MX51_INT_GPIO4_HIGH);
 
+       pinctrl_provide_dummies();
+
        /* i.mx51 has the i.mx35 type sdma */
        imx_add_imx_sdma("imx35-sdma", MX51_SDMA_BASE_ADDR, MX51_INT_SDMA, &imx51_sdma_pdata);
 
index 2222c57395198f6535d49cc8a2122e756cbba7b2..b0d3cc49269def6d8ab668af574fd8341ec5aca8 100644 (file)
@@ -20,9 +20,6 @@
 #include <linux/mv643xx_eth.h>
 #include <linux/gpio.h>
 #include <linux/leds.h>
-#include <linux/spi/flash.h>
-#include <linux/spi/spi.h>
-#include <linux/spi/orion_spi.h>
 #include <linux/i2c.h>
 #include <linux/input.h>
 #include <linux/gpio_keys.h>
index 25fb3fd418efbe7e30b94136d920fd851e371de8..f261cd2426434aac74e2856c6069c9052d6f035f 100644 (file)
@@ -159,6 +159,7 @@ static struct clk __init *clk_register_gate_fn(struct device *dev,
        gate_fn->gate.flags = clk_gate_flags;
        gate_fn->gate.lock = lock;
        gate_fn->gate.hw.init = &init;
+       gate_fn->fn = fn;
 
        /* ops is the gate ops, but with our disable function */
        if (clk_gate_fn_ops.disable != clk_gate_fn_disable) {
@@ -193,9 +194,11 @@ static struct clk __init *kirkwood_register_gate_fn(const char *name,
                                    bit_idx, 0, &gating_lock, fn);
 }
 
+static struct clk *ge0, *ge1;
+
 void __init kirkwood_clk_init(void)
 {
-       struct clk *runit, *ge0, *ge1, *sata0, *sata1, *usb0, *sdio;
+       struct clk *runit, *sata0, *sata1, *usb0, *sdio;
        struct clk *crypto, *xor0, *xor1, *pex0, *pex1, *audio;
 
        tclk = clk_register_fixed_rate(NULL, "tclk", NULL,
@@ -257,6 +260,9 @@ void __init kirkwood_ge00_init(struct mv643xx_eth_platform_data *eth_data)
        orion_ge00_init(eth_data,
                        GE00_PHYS_BASE, IRQ_KIRKWOOD_GE00_SUM,
                        IRQ_KIRKWOOD_GE00_ERR);
+       /* The interface forgets the MAC address assigned by u-boot if
+       the clock is turned off, so claim the clk now. */
+       clk_prepare_enable(ge0);
 }
 
 
@@ -268,6 +274,7 @@ void __init kirkwood_ge01_init(struct mv643xx_eth_platform_data *eth_data)
        orion_ge01_init(eth_data,
                        GE01_PHYS_BASE, IRQ_KIRKWOOD_GE01_SUM,
                        IRQ_KIRKWOOD_GE01_ERR);
+       clk_prepare_enable(ge1);
 }
 
 
index 3eee37a3b501a81e594fed45997301ae27f67ce4..a115142f8690bedf3bde2f10ad6f868b400a85dd 100644 (file)
@@ -38,6 +38,7 @@
 #define IRQ_MASK_HIGH_OFF      0x0014
 
 #define TIMER_VIRT_BASE                (BRIDGE_VIRT_BASE | 0x0300)
+#define TIMER_PHYS_BASE                (BRIDGE_PHYS_BASE | 0x0300)
 
 #define L2_CONFIG_REG          (BRIDGE_VIRT_BASE | 0x0128)
 #define L2_WRITETHROUGH                0x00000010
index fede3d503efa0ef505b910494b4c96b5a9fa496d..c5b68510776b71c75c2731fcf5253a4114597c30 100644 (file)
@@ -80,6 +80,7 @@
 #define  UART1_VIRT_BASE       (DEV_BUS_VIRT_BASE | 0x2100)
 
 #define BRIDGE_VIRT_BASE       (KIRKWOOD_REGS_VIRT_BASE | 0x20000)
+#define BRIDGE_PHYS_BASE       (KIRKWOOD_REGS_PHYS_BASE | 0x20000)
 
 #define CRYPTO_PHYS_BASE       (KIRKWOOD_REGS_PHYS_BASE | 0x30000)
 
diff --git a/arch/arm/mach-mmp/include/mach/gpio-pxa.h b/arch/arm/mach-mmp/include/mach/gpio-pxa.h
deleted file mode 100644 (file)
index 0e135a5..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-#ifndef __ASM_MACH_GPIO_PXA_H
-#define __ASM_MACH_GPIO_PXA_H
-
-#include <mach/addr-map.h>
-#include <mach/cputype.h>
-#include <mach/irqs.h>
-
-#define GPIO_REGS_VIRT (APB_VIRT_BASE + 0x19000)
-
-#define BANK_OFF(n)    (((n) < 3) ? (n) << 2 : 0x100 + (((n) - 3) << 2))
-#define GPIO_REG(x)    (*(volatile u32 *)(GPIO_REGS_VIRT + (x)))
-
-#define gpio_to_bank(gpio)     ((gpio) >> 5)
-
-/* NOTE: these macros are defined here to make optimization of
- * gpio_{get,set}_value() to work when 'gpio' is a constant.
- * Usage of these macros otherwise is no longer recommended,
- * use generic GPIO API whenever possible.
- */
-#define GPIO_bit(gpio) (1 << ((gpio) & 0x1f))
-
-#define GPLR(x)                GPIO_REG(BANK_OFF(gpio_to_bank(x)) + 0x00)
-#define GPDR(x)                GPIO_REG(BANK_OFF(gpio_to_bank(x)) + 0x0c)
-#define GPSR(x)                GPIO_REG(BANK_OFF(gpio_to_bank(x)) + 0x18)
-#define GPCR(x)                GPIO_REG(BANK_OFF(gpio_to_bank(x)) + 0x24)
-
-#include <plat/gpio-pxa.h>
-
-#endif /* __ASM_MACH_GPIO_PXA_H */
index fcfe0e3bd7016631b5086cea80279b1d4db4a889..e60c7d98922b9b7876f561cc572a7b90bde28d73 100644 (file)
@@ -241,6 +241,7 @@ void __init mmp2_init_icu(void)
        icu_data[1].clr_mfp_irq_base = IRQ_MMP2_PMIC_BASE;
        icu_data[1].clr_mfp_hwirq = IRQ_MMP2_PMIC - IRQ_MMP2_PMIC_BASE;
        icu_data[1].nr_irqs = 2;
+       icu_data[1].cascade_irq = 4;
        icu_data[1].virq_base = IRQ_MMP2_PMIC_BASE;
        icu_data[1].domain = irq_domain_add_legacy(NULL, icu_data[1].nr_irqs,
                                                   icu_data[1].virq_base, 0,
@@ -249,6 +250,7 @@ void __init mmp2_init_icu(void)
        icu_data[2].reg_status = mmp_icu_base + 0x154;
        icu_data[2].reg_mask = mmp_icu_base + 0x16c;
        icu_data[2].nr_irqs = 2;
+       icu_data[2].cascade_irq = 5;
        icu_data[2].virq_base = IRQ_MMP2_RTC_BASE;
        icu_data[2].domain = irq_domain_add_legacy(NULL, icu_data[2].nr_irqs,
                                                   icu_data[2].virq_base, 0,
@@ -257,6 +259,7 @@ void __init mmp2_init_icu(void)
        icu_data[3].reg_status = mmp_icu_base + 0x180;
        icu_data[3].reg_mask = mmp_icu_base + 0x17c;
        icu_data[3].nr_irqs = 3;
+       icu_data[3].cascade_irq = 9;
        icu_data[3].virq_base = IRQ_MMP2_KEYPAD_BASE;
        icu_data[3].domain = irq_domain_add_legacy(NULL, icu_data[3].nr_irqs,
                                                   icu_data[3].virq_base, 0,
@@ -265,6 +268,7 @@ void __init mmp2_init_icu(void)
        icu_data[4].reg_status = mmp_icu_base + 0x158;
        icu_data[4].reg_mask = mmp_icu_base + 0x170;
        icu_data[4].nr_irqs = 5;
+       icu_data[4].cascade_irq = 17;
        icu_data[4].virq_base = IRQ_MMP2_TWSI_BASE;
        icu_data[4].domain = irq_domain_add_legacy(NULL, icu_data[4].nr_irqs,
                                                   icu_data[4].virq_base, 0,
@@ -273,6 +277,7 @@ void __init mmp2_init_icu(void)
        icu_data[5].reg_status = mmp_icu_base + 0x15c;
        icu_data[5].reg_mask = mmp_icu_base + 0x174;
        icu_data[5].nr_irqs = 15;
+       icu_data[5].cascade_irq = 35;
        icu_data[5].virq_base = IRQ_MMP2_MISC_BASE;
        icu_data[5].domain = irq_domain_add_legacy(NULL, icu_data[5].nr_irqs,
                                                   icu_data[5].virq_base, 0,
@@ -281,6 +286,7 @@ void __init mmp2_init_icu(void)
        icu_data[6].reg_status = mmp_icu_base + 0x160;
        icu_data[6].reg_mask = mmp_icu_base + 0x178;
        icu_data[6].nr_irqs = 2;
+       icu_data[6].cascade_irq = 51;
        icu_data[6].virq_base = IRQ_MMP2_MIPI_HSI1_BASE;
        icu_data[6].domain = irq_domain_add_legacy(NULL, icu_data[6].nr_irqs,
                                                   icu_data[6].virq_base, 0,
@@ -289,6 +295,7 @@ void __init mmp2_init_icu(void)
        icu_data[7].reg_status = mmp_icu_base + 0x188;
        icu_data[7].reg_mask = mmp_icu_base + 0x184;
        icu_data[7].nr_irqs = 2;
+       icu_data[7].cascade_irq = 55;
        icu_data[7].virq_base = IRQ_MMP2_MIPI_HSI0_BASE;
        icu_data[7].domain = irq_domain_add_legacy(NULL, icu_data[7].nr_irqs,
                                                   icu_data[7].virq_base, 0,
index c64dbb96dbad53a4264b8559ec531f229b75d08b..eb187e0e059bdbb1459b2ae21d69b6050176cba2 100644 (file)
@@ -31,5 +31,6 @@
 #define IRQ_MASK_HIGH_OFF      0x0014
 
 #define TIMER_VIRT_BASE                (BRIDGE_VIRT_BASE | 0x0300)
+#define TIMER_PHYS_BASE                (BRIDGE_PHYS_BASE | 0x0300)
 
 #endif
index 3674497162e3efa3b2238360fda132e9761a4d21..e807c4c52a0b6331a4e02146f71edc127d95cb7f 100644 (file)
@@ -42,6 +42,7 @@
 #define MV78XX0_CORE0_REGS_PHYS_BASE   0xf1020000
 #define MV78XX0_CORE1_REGS_PHYS_BASE   0xf1024000
 #define MV78XX0_CORE_REGS_VIRT_BASE    0xfe400000
+#define MV78XX0_CORE_REGS_PHYS_BASE    0xfe400000
 #define MV78XX0_CORE_REGS_SIZE         SZ_16K
 
 #define MV78XX0_PCIE_IO_PHYS_BASE(i)   (0xf0800000 + ((i) << 20))
@@ -59,6 +60,7 @@
  * Core-specific peripheral registers.
  */
 #define BRIDGE_VIRT_BASE       (MV78XX0_CORE_REGS_VIRT_BASE)
+#define BRIDGE_PHYS_BASE       (MV78XX0_CORE_REGS_PHYS_BASE)
 
 /*
  * Register Map
index 5e90b9dcdef8789e10d71d2a8500604d53e57eeb..f5f061757deb54371e599730d8f38fc0b15c100e 100644 (file)
@@ -205,6 +205,16 @@ static int apx4devkit_phy_fixup(struct phy_device *phy)
        return 0;
 }
 
+static void __init apx4devkit_fec_phy_clk_enable(void)
+{
+       struct clk *clk;
+
+       /* Enable fec phy clock */
+       clk = clk_get_sys("enet_out", NULL);
+       if (!IS_ERR(clk))
+               clk_prepare_enable(clk);
+}
+
 static void __init apx4devkit_init(void)
 {
        mx28_soc_init();
@@ -225,6 +235,7 @@ static void __init apx4devkit_init(void)
        phy_register_fixup_for_uid(PHY_ID_KS8051, MICREL_PHY_ID_MASK,
                        apx4devkit_phy_fixup);
 
+       apx4devkit_fec_phy_clk_enable();
        mx28_add_fec(0, &mx28_fec_pdata);
 
        mx28_add_mxs_mmc(0, &apx4devkit_mmc_pdata);
index 70a81f900bb5514d813c9f57c38ca64656798c4b..53c39d239d6e202c896ff9390eedfc0c24e60647 100644 (file)
@@ -97,11 +97,6 @@ __init board_onenand_init(struct mtd_partition *onenand_parts,
 
        gpmc_onenand_init(&board_onenand_data);
 }
-#else
-void
-__init board_onenand_init(struct mtd_partition *nor_parts, u8 nr_parts, u8 cs)
-{
-}
 #endif /* CONFIG_MTD_ONENAND_OMAP2 || CONFIG_MTD_ONENAND_OMAP2_MODULE */
 
 #if defined(CONFIG_MTD_NAND_OMAP2) || \
index 8ca14e88a31af12f43d219fe6d8a6b63b61ec4f1..2c5d0ed75285153d8126e60a030f08630a277635 100644 (file)
@@ -83,11 +83,9 @@ static struct musb_hdrc_config musb_config = {
 };
 
 static struct musb_hdrc_platform_data tusb_data = {
-#if defined(CONFIG_USB_MUSB_OTG)
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
        .mode           = MUSB_OTG,
-#elif defined(CONFIG_USB_MUSB_PERIPHERAL)
-       .mode           = MUSB_PERIPHERAL,
-#else /* defined(CONFIG_USB_MUSB_HOST) */
+#else
        .mode           = MUSB_HOST,
 #endif
        .set_power      = tusb_set_power,
index 79c6909eeb785ef24d71be68f076034c0cd476df..580fd17208dacc24dd51bbff63507763c0612730 100644 (file)
@@ -81,13 +81,13 @@ static u8 omap3_beagle_version;
 static struct {
        int mmc1_gpio_wp;
        int usb_pwr_level;
-       int reset_gpio;
+       int dvi_pd_gpio;
        int usr_button_gpio;
        int mmc_caps;
 } beagle_config = {
        .mmc1_gpio_wp = -EINVAL,
        .usb_pwr_level = GPIOF_OUT_INIT_LOW,
-       .reset_gpio = 129,
+       .dvi_pd_gpio = -EINVAL,
        .usr_button_gpio = 4,
        .mmc_caps = MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA,
 };
@@ -126,21 +126,21 @@ static void __init omap3_beagle_init_rev(void)
                printk(KERN_INFO "OMAP3 Beagle Rev: Ax/Bx\n");
                omap3_beagle_version = OMAP3BEAGLE_BOARD_AXBX;
                beagle_config.mmc1_gpio_wp = 29;
-               beagle_config.reset_gpio = 170;
+               beagle_config.dvi_pd_gpio = 170;
                beagle_config.usr_button_gpio = 7;
                break;
        case 6:
                printk(KERN_INFO "OMAP3 Beagle Rev: C1/C2/C3\n");
                omap3_beagle_version = OMAP3BEAGLE_BOARD_C1_3;
                beagle_config.mmc1_gpio_wp = 23;
-               beagle_config.reset_gpio = 170;
+               beagle_config.dvi_pd_gpio = 170;
                beagle_config.usr_button_gpio = 7;
                break;
        case 5:
                printk(KERN_INFO "OMAP3 Beagle Rev: C4\n");
                omap3_beagle_version = OMAP3BEAGLE_BOARD_C4;
                beagle_config.mmc1_gpio_wp = 23;
-               beagle_config.reset_gpio = 170;
+               beagle_config.dvi_pd_gpio = 170;
                beagle_config.usr_button_gpio = 7;
                break;
        case 0:
@@ -274,11 +274,9 @@ static int beagle_twl_gpio_setup(struct device *dev,
                if (r)
                        pr_err("%s: unable to configure nDVI_PWR_EN\n",
                                __func__);
-               r = gpio_request_one(gpio + 2, GPIOF_OUT_INIT_HIGH,
-                                    "DVI_LDO_EN");
-               if (r)
-                       pr_err("%s: unable to configure DVI_LDO_EN\n",
-                               __func__);
+
+               beagle_config.dvi_pd_gpio = gpio + 2;
+
        } else {
                /*
                 * REVISIT: need ehci-omap hooks for external VBUS
@@ -287,7 +285,7 @@ static int beagle_twl_gpio_setup(struct device *dev,
                if (gpio_request_one(gpio + 1, GPIOF_IN, "EHCI_nOC"))
                        pr_err("%s: unable to configure EHCI_nOC\n", __func__);
        }
-       dvi_panel.power_down_gpio = beagle_config.reset_gpio;
+       dvi_panel.power_down_gpio = beagle_config.dvi_pd_gpio;
 
        gpio_request_one(gpio + TWL4030_GPIO_MAX, beagle_config.usb_pwr_level,
                        "nEN_USB_PWR");
@@ -499,7 +497,7 @@ static void __init omap3_beagle_init(void)
        omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
        omap3_beagle_init_rev();
 
-       if (beagle_config.mmc1_gpio_wp != -EINVAL)
+       if (gpio_is_valid(beagle_config.mmc1_gpio_wp))
                omap_mux_init_gpio(beagle_config.mmc1_gpio_wp, OMAP_PIN_INPUT);
        mmc[0].caps = beagle_config.mmc_caps;
        omap_hsmmc_init(mmc);
@@ -510,15 +508,13 @@ static void __init omap3_beagle_init(void)
 
        platform_add_devices(omap3_beagle_devices,
                        ARRAY_SIZE(omap3_beagle_devices));
+       if (gpio_is_valid(beagle_config.dvi_pd_gpio))
+               omap_mux_init_gpio(beagle_config.dvi_pd_gpio, OMAP_PIN_OUTPUT);
        omap_display_init(&beagle_dss_data);
        omap_serial_init();
        omap_sdrc_init(mt46h32m32lf6_sdrc_params,
                                  mt46h32m32lf6_sdrc_params);
 
-       omap_mux_init_gpio(170, OMAP_PIN_INPUT);
-       /* REVISIT leave DVI powered down until it's needed ... */
-       gpio_request_one(170, GPIOF_OUT_INIT_HIGH, "DVI_nPD");
-
        usb_musb_init(NULL);
        usbhs_init(&usbhs_bdata);
        omap_nand_flash_init(NAND_BUSWIDTH_16, omap3beagle_nand_partitions,
index 8fa2fc3a4c3c51e973eb07c91f3c687c86aca62a..779734d8ba37304417350cd246c52044a0238897 100644 (file)
@@ -494,8 +494,8 @@ static void __init overo_init(void)
 
        regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies));
        omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
-       omap_hsmmc_init(mmc);
        overo_i2c_init();
+       omap_hsmmc_init(mmc);
        omap_display_init(&overo_dss_data);
        omap_serial_init();
        omap_sdrc_init(mt46h32m32lf6_sdrc_params,
index ff53deccecab09c588b83b663dc5cb781d278c2b..df2534de3361d5b9c2317db5cec7222e5993d2fb 100644 (file)
@@ -144,7 +144,6 @@ static struct lis3lv02d_platform_data rx51_lis3lv02d_data = {
        .release_resources = lis302_release,
        .st_min_limits = {-32, 3, 3},
        .st_max_limits = {-3, 32, 32},
-       .irq2 = OMAP_GPIO_IRQ(LIS302_IRQ2_GPIO),
 };
 #endif
 
@@ -1030,7 +1029,6 @@ static struct i2c_board_info __initdata rx51_peripherals_i2c_board_info_3[] = {
        {
                I2C_BOARD_INFO("lis3lv02d", 0x1d),
                .platform_data = &rx51_lis3lv02d_data,
-               .irq = OMAP_GPIO_IRQ(LIS302_IRQ1_GPIO),
        },
 #endif
 };
@@ -1056,6 +1054,10 @@ static int __init rx51_i2c_init(void)
        omap_pmic_init(1, 2200, "twl5030", INT_34XX_SYS_NIRQ, &rx51_twldata);
        omap_register_i2c_bus(2, 100, rx51_peripherals_i2c_board_info_2,
                              ARRAY_SIZE(rx51_peripherals_i2c_board_info_2));
+#if defined(CONFIG_SENSORS_LIS3_I2C) || defined(CONFIG_SENSORS_LIS3_I2C_MODULE)
+       rx51_lis3lv02d_data.irq2 = gpio_to_irq(LIS302_IRQ2_GPIO);
+       rx51_peripherals_i2c_board_info_3[0].irq = gpio_to_irq(LIS302_IRQ1_GPIO);
+#endif
        omap_register_i2c_bus(3, 400, rx51_peripherals_i2c_board_info_3,
                              ARRAY_SIZE(rx51_peripherals_i2c_board_info_3));
        return 0;
index 4e1a3b0e8cc83d5d505abf7d789273c4f1a970a0..1efdec236ae89dac6671bbd8f09d7cffb2067deb 100644 (file)
@@ -3514,7 +3514,7 @@ int __init omap3xxx_clk_init(void)
        struct omap_clk *c;
        u32 cpu_clkflg = 0;
 
-       if (cpu_is_omap3517()) {
+       if (soc_is_am35xx()) {
                cpu_mask = RATE_IN_34XX;
                cpu_clkflg = CK_AM35XX;
        } else if (cpu_is_omap3630()) {
index 2172f660384889535c6d812dae30dacf56b18eec..ba6f9a0a43e9096776963ea1285eea34fb1a24f8 100644 (file)
@@ -84,6 +84,7 @@ static struct clk slimbus_clk = {
 
 static struct clk sys_32k_ck = {
        .name           = "sys_32k_ck",
+       .clkdm_name     = "prm_clkdm",
        .rate           = 32768,
        .ops            = &clkops_null,
 };
@@ -512,6 +513,7 @@ static struct clk ddrphy_ck = {
        .name           = "ddrphy_ck",
        .parent         = &dpll_core_m2_ck,
        .ops            = &clkops_null,
+       .clkdm_name     = "l3_emif_clkdm",
        .fixed_div      = 2,
        .recalc         = &omap_fixed_divisor_recalc,
 };
@@ -769,6 +771,7 @@ static const struct clksel dpll_mpu_m2_div[] = {
 static struct clk dpll_mpu_m2_ck = {
        .name           = "dpll_mpu_m2_ck",
        .parent         = &dpll_mpu_ck,
+       .clkdm_name     = "cm_clkdm",
        .clksel         = dpll_mpu_m2_div,
        .clksel_reg     = OMAP4430_CM_DIV_M2_DPLL_MPU,
        .clksel_mask    = OMAP4430_DPLL_CLKOUT_DIV_MASK,
@@ -1149,6 +1152,7 @@ static const struct clksel l3_div_div[] = {
 static struct clk l3_div_ck = {
        .name           = "l3_div_ck",
        .parent         = &div_core_ck,
+       .clkdm_name     = "cm_clkdm",
        .clksel         = l3_div_div,
        .clksel_reg     = OMAP4430_CM_CLKSEL_CORE,
        .clksel_mask    = OMAP4430_CLKSEL_L3_MASK,
@@ -2824,6 +2828,7 @@ static const struct clksel trace_clk_div_div[] = {
 static struct clk trace_clk_div_ck = {
        .name           = "trace_clk_div_ck",
        .parent         = &pmd_trace_clk_mux_ck,
+       .clkdm_name     = "emu_sys_clkdm",
        .clksel         = trace_clk_div_div,
        .clksel_reg     = OMAP4430_CM_EMU_DEBUGSS_CLKCTRL,
        .clksel_mask    = OMAP4430_CLKSEL_PMD_TRACE_CLK_MASK,
@@ -3412,9 +3417,12 @@ int __init omap4xxx_clk_init(void)
        if (cpu_is_omap443x()) {
                cpu_mask = RATE_IN_4430;
                cpu_clkflg = CK_443X;
-       } else if (cpu_is_omap446x()) {
+       } else if (cpu_is_omap446x() || cpu_is_omap447x()) {
                cpu_mask = RATE_IN_4460 | RATE_IN_4430;
                cpu_clkflg = CK_446X | CK_443X;
+
+               if (cpu_is_omap447x())
+                       pr_warn("WARNING: OMAP4470 clock data incomplete!\n");
        } else {
                return 0;
        }
index f7b58609bad888b6b276a524badf9518e450a709..6227e9505c2db6858c44bf2b9f30e0abe7ec0023 100644 (file)
  *
  * CLKDM_NO_AUTODEPS: Prevent "autodeps" from being added/removed from this
  *     clockdomain.  (Currently, this applies to OMAP3 clockdomains only.)
+ * CLKDM_ACTIVE_WITH_MPU: The PRCM guarantees that this clockdomain is
+ *     active whenever the MPU is active.  True for interconnects and
+ *     the WKUP clockdomains.
  */
 #define CLKDM_CAN_FORCE_SLEEP                  (1 << 0)
 #define CLKDM_CAN_FORCE_WAKEUP                 (1 << 1)
 #define CLKDM_CAN_ENABLE_AUTO                  (1 << 2)
 #define CLKDM_CAN_DISABLE_AUTO                 (1 << 3)
 #define CLKDM_NO_AUTODEPS                      (1 << 4)
+#define CLKDM_ACTIVE_WITH_MPU                  (1 << 5)
 
 #define CLKDM_CAN_HWSUP                (CLKDM_CAN_ENABLE_AUTO | CLKDM_CAN_DISABLE_AUTO)
 #define CLKDM_CAN_SWSUP                (CLKDM_CAN_FORCE_SLEEP | CLKDM_CAN_FORCE_WAKEUP)
index 839145e1cfbea2bcf706e72be39da268e6778247..4972219653ce85b1bebcad5629ab92d733dfef45 100644 (file)
@@ -88,4 +88,5 @@ struct clockdomain wkup_common_clkdm = {
        .name           = "wkup_clkdm",
        .pwrdm          = { .name = "wkup_pwrdm" },
        .dep_bit        = OMAP_EN_WKUP_SHIFT,
+       .flags          = CLKDM_ACTIVE_WITH_MPU,
 };
index c534258474939e8efe3b96dd1c5ed096bd30f3ab..7f2133abe7d36e601c322fcec0a8f6217b500b46 100644 (file)
@@ -381,7 +381,7 @@ static struct clockdomain l4_wkup_44xx_clkdm = {
        .cm_inst          = OMAP4430_PRM_WKUP_CM_INST,
        .clkdm_offs       = OMAP4430_PRM_WKUP_CM_WKUP_CDOFFS,
        .dep_bit          = OMAP4430_L4WKUP_STATDEP_SHIFT,
-       .flags            = CLKDM_CAN_HWSUP,
+       .flags            = CLKDM_CAN_HWSUP | CLKDM_ACTIVE_WITH_MPU,
 };
 
 static struct clockdomain emu_sys_44xx_clkdm = {
index a7bc096bd407445ed8e248c6c9803435ddc406eb..f24e3f7a2bbc248389ac234c4b80e9f2ef4d5e03 100644 (file)
  */
 #define MAX_MODULE_READY_TIME          2000
 
+/*
+ * MAX_MODULE_DISABLE_TIME: max duration in microseconds to wait for
+ * the PRCM to request that a module enter the inactive state in the
+ * case of OMAP2 & 3.  In the case of OMAP4 this is the max duration
+ * in microseconds for the module to reach the inactive state from
+ * a functional state.
+ * XXX FSUSB on OMAP4430 takes ~4ms to idle after reset during
+ * kernel init.
+ */
+#define MAX_MODULE_DISABLE_TIME                5000
+
 #endif
index 8c86d294b1a326b6ea8bbf07239cb92983ba5f50..1a39945d9ff81fd5ed9a78f785c6fe08d3f1f939 100644 (file)
@@ -313,9 +313,9 @@ int omap4_cminst_wait_module_idle(u8 part, u16 inst, s16 cdoffs, u16 clkctrl_off
 
        omap_test_timeout((_clkctrl_idlest(part, inst, cdoffs, clkctrl_offs) ==
                           CLKCTRL_IDLEST_DISABLED),
-                         MAX_MODULE_READY_TIME, i);
+                         MAX_MODULE_DISABLE_TIME, i);
 
-       return (i < MAX_MODULE_READY_TIME) ? 0 : -EBUSY;
+       return (i < MAX_MODULE_DISABLE_TIME) ? 0 : -EBUSY;
 }
 
 /**
index 54d49ddb9b81c9ad8680d7a111995bc3190f0b5e..5fb47a14f4ba85211010afa34c439a3f1c73b30c 100644 (file)
@@ -271,9 +271,9 @@ static struct platform_device *create_simple_dss_pdev(const char *pdev_name,
                goto err;
        }
 
-       r = omap_device_register(pdev);
+       r = platform_device_add(pdev);
        if (r) {
-               pr_err("Could not register omap_device for %s\n", pdev_name);
+               pr_err("Could not register platform_device for %s\n", pdev_name);
                goto err;
        }
 
index 845309f146fe317fd82ec74dcc39a1158ce6d0aa..88ffa1e645cd948614afe61c17502533c64650a7 100644 (file)
@@ -20,6 +20,9 @@
 
 #include <linux/module.h>
 #include <linux/platform_device.h>
+
+#include <asm/memblock.h>
+
 #include "cm2xxx_3xxx.h"
 #include "prm2xxx_3xxx.h"
 #ifdef CONFIG_BRIDGE_DVFS
index 0389b3264abe78fa65a978c78223a10574e42614..00486a8564fdace3131b4a8309bdeb067a16cece 100644 (file)
@@ -246,6 +246,17 @@ void __init omap3xxx_check_features(void)
 
        omap_features |= OMAP3_HAS_SDRC;
 
+       /*
+        * am35x fixups:
+        * - The am35x Chip ID register has bits 12, 7:5, and 3:2 marked as
+        *   reserved and therefore return 0 when read.  Unfortunately,
+        *   OMAP3_CHECK_FEATURE() will interpret some of those zeroes to
+        *   mean that a feature is present even though it isn't so clear
+        *   the incorrectly set feature bits.
+        */
+       if (soc_is_am35xx())
+               omap_features &= ~(OMAP3_HAS_IVA | OMAP3_HAS_ISP);
+
        /*
         * TODO: Get additional info (where applicable)
         *       e.g. Size of L2 cache.
index fdc4303be563169dedbd458bd8391a16e43e252b..6038a8c84b743af0ed1f205af643f3a8a0013b63 100644 (file)
@@ -149,6 +149,7 @@ omap_alloc_gc(void __iomem *base, unsigned int irq_start, unsigned int num)
        ct->chip.irq_ack = omap_mask_ack_irq;
        ct->chip.irq_mask = irq_gc_mask_disable_reg;
        ct->chip.irq_unmask = irq_gc_unmask_enable_reg;
+       ct->chip.flags |= IRQCHIP_SKIP_SET_WAKE;
 
        ct->regs.enable = INTC_MIR_CLEAR0;
        ct->regs.disable = INTC_MIR_SET0;
index 80e55c5c99988c2bf63d49a63bf9b562cfaff036..9fe6829f4c16f2dd0837862f5f4254ecba98230b 100644 (file)
@@ -41,6 +41,7 @@
 #include "control.h"
 #include "mux.h"
 #include "prm.h"
+#include "common.h"
 
 #define OMAP_MUX_BASE_OFFSET           0x30    /* Offset from CTRL_BASE */
 #define OMAP_MUX_BASE_SZ               0x5ca
@@ -217,8 +218,7 @@ static int __init _omap_mux_get_by_name(struct omap_mux_partition *partition,
        return -ENODEV;
 }
 
-static int __init
-omap_mux_get_by_name(const char *muxname,
+int __init omap_mux_get_by_name(const char *muxname,
                        struct omap_mux_partition **found_partition,
                        struct omap_mux **found_mux)
 {
index 69fe060a0b755204875f42a9fd1e1ffc943736f9..471e62a74a166fb64a7486670a54854239cfb3b6 100644 (file)
@@ -59,6 +59,7 @@
 #define OMAP_PIN_OFF_WAKEUPENABLE      OMAP_WAKEUP_EN
 
 #define OMAP_MODE_GPIO(x)      (((x) & OMAP_MUX_MODE7) == OMAP_MUX_MODE4)
+#define OMAP_MODE_UART(x)      (((x) & OMAP_MUX_MODE7) == OMAP_MUX_MODE0)
 
 /* Flags for omapX_mux_init */
 #define OMAP_PACKAGE_MASK              0xffff
@@ -225,8 +226,18 @@ omap_hwmod_mux_init(struct omap_device_pad *bpads, int nr_pads);
  */
 void omap_hwmod_mux(struct omap_hwmod_mux_info *hmux, u8 state);
 
+int omap_mux_get_by_name(const char *muxname,
+               struct omap_mux_partition **found_partition,
+               struct omap_mux **found_mux);
 #else
 
+static inline int omap_mux_get_by_name(const char *muxname,
+               struct omap_mux_partition **found_partition,
+               struct omap_mux **found_mux)
+{
+       return 0;
+}
+
 static inline int omap_mux_init_gpio(int gpio, int val)
 {
        return 0;
index bf86f7e8f91f5837869cb1b799f2b59ab4a81395..2d710f50fca2fafd9cac99d2f515220c41de410f 100644 (file)
@@ -530,7 +530,7 @@ static int _disable_wakeup(struct omap_hwmod *oh, u32 *v)
        if (oh->class->sysc->idlemodes & SIDLE_SMART_WKUP)
                _set_slave_idlemode(oh, HWMOD_IDLEMODE_SMART, v);
        if (oh->class->sysc->idlemodes & MSTANDBY_SMART_WKUP)
-               _set_master_standbymode(oh, HWMOD_IDLEMODE_SMART_WKUP, v);
+               _set_master_standbymode(oh, HWMOD_IDLEMODE_SMART, v);
 
        /* XXX test pwrdm_get_wken for this hwmod's subsystem */
 
@@ -1124,15 +1124,18 @@ static struct omap_hwmod_addr_space * __init _find_mpu_rt_addr_space(struct omap
  * _enable_sysc - try to bring a module out of idle via OCP_SYSCONFIG
  * @oh: struct omap_hwmod *
  *
- * If module is marked as SWSUP_SIDLE, force the module out of slave
- * idle; otherwise, configure it for smart-idle.  If module is marked
- * as SWSUP_MSUSPEND, force the module out of master standby;
- * otherwise, configure it for smart-standby.  No return value.
+ * Ensure that the OCP_SYSCONFIG register for the IP block represented
+ * by @oh is set to indicate to the PRCM that the IP block is active.
+ * Usually this means placing the module into smart-idle mode and
+ * smart-standby, but if there is a bug in the automatic idle handling
+ * for the IP block, it may need to be placed into the force-idle or
+ * no-idle variants of these modes.  No return value.
  */
 static void _enable_sysc(struct omap_hwmod *oh)
 {
        u8 idlemode, sf;
        u32 v;
+       bool clkdm_act;
 
        if (!oh->class->sysc)
                return;
@@ -1141,8 +1144,16 @@ static void _enable_sysc(struct omap_hwmod *oh)
        sf = oh->class->sysc->sysc_flags;
 
        if (sf & SYSC_HAS_SIDLEMODE) {
-               idlemode = (oh->flags & HWMOD_SWSUP_SIDLE) ?
-                       HWMOD_IDLEMODE_NO : HWMOD_IDLEMODE_SMART;
+               clkdm_act = ((oh->clkdm &&
+                             oh->clkdm->flags & CLKDM_ACTIVE_WITH_MPU) ||
+                            (oh->_clk && oh->_clk->clkdm &&
+                             oh->_clk->clkdm->flags & CLKDM_ACTIVE_WITH_MPU));
+               if (clkdm_act && !(oh->class->sysc->idlemodes &
+                                  (SIDLE_SMART | SIDLE_SMART_WKUP)))
+                       idlemode = HWMOD_IDLEMODE_FORCE;
+               else
+                       idlemode = (oh->flags & HWMOD_SWSUP_SIDLE) ?
+                               HWMOD_IDLEMODE_NO : HWMOD_IDLEMODE_SMART;
                _set_slave_idlemode(oh, idlemode, &v);
        }
 
@@ -1208,8 +1219,13 @@ static void _idle_sysc(struct omap_hwmod *oh)
        sf = oh->class->sysc->sysc_flags;
 
        if (sf & SYSC_HAS_SIDLEMODE) {
-               idlemode = (oh->flags & HWMOD_SWSUP_SIDLE) ?
-                       HWMOD_IDLEMODE_FORCE : HWMOD_IDLEMODE_SMART;
+               /* XXX What about HWMOD_IDLEMODE_SMART_WKUP? */
+               if (oh->flags & HWMOD_SWSUP_SIDLE ||
+                   !(oh->class->sysc->idlemodes &
+                     (SIDLE_SMART | SIDLE_SMART_WKUP)))
+                       idlemode = HWMOD_IDLEMODE_FORCE;
+               else
+                       idlemode = HWMOD_IDLEMODE_SMART;
                _set_slave_idlemode(oh, idlemode, &v);
        }
 
index 950454a3fa314da4448c99eeaaf50f81201b382b..b7bcba5221ba260b31d0327b37442e52fb807143 100644 (file)
@@ -393,8 +393,7 @@ static struct omap_hwmod_class_sysconfig omap44xx_counter_sysc = {
        .rev_offs       = 0x0000,
        .sysc_offs      = 0x0004,
        .sysc_flags     = SYSC_HAS_SIDLEMODE,
-       .idlemodes      = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
-                          SIDLE_SMART_WKUP),
+       .idlemodes      = (SIDLE_FORCE | SIDLE_NO),
        .sysc_fields    = &omap_hwmod_sysc_type1,
 };
 
@@ -854,6 +853,11 @@ static struct omap_hwmod omap44xx_dss_hdmi_hwmod = {
        .name           = "dss_hdmi",
        .class          = &omap44xx_hdmi_hwmod_class,
        .clkdm_name     = "l3_dss_clkdm",
+       /*
+        * HDMI audio requires to use no-idle mode. Hence,
+        * set idle mode by software.
+        */
+       .flags          = HWMOD_SWSUP_SIDLE,
        .mpu_irqs       = omap44xx_dss_hdmi_irqs,
        .sdma_reqs      = omap44xx_dss_hdmi_sdma_reqs,
        .main_clk       = "dss_48mhz_clk",
@@ -1924,7 +1928,7 @@ static struct omap_hwmod_dma_info omap44xx_mcbsp1_sdma_reqs[] = {
 
 static struct omap_hwmod_opt_clk mcbsp1_opt_clks[] = {
        { .role = "pad_fck", .clk = "pad_clks_ck" },
-       { .role = "prcm_clk", .clk = "mcbsp1_sync_mux_ck" },
+       { .role = "prcm_fck", .clk = "mcbsp1_sync_mux_ck" },
 };
 
 static struct omap_hwmod omap44xx_mcbsp1_hwmod = {
@@ -1959,7 +1963,7 @@ static struct omap_hwmod_dma_info omap44xx_mcbsp2_sdma_reqs[] = {
 
 static struct omap_hwmod_opt_clk mcbsp2_opt_clks[] = {
        { .role = "pad_fck", .clk = "pad_clks_ck" },
-       { .role = "prcm_clk", .clk = "mcbsp2_sync_mux_ck" },
+       { .role = "prcm_fck", .clk = "mcbsp2_sync_mux_ck" },
 };
 
 static struct omap_hwmod omap44xx_mcbsp2_hwmod = {
@@ -1994,7 +1998,7 @@ static struct omap_hwmod_dma_info omap44xx_mcbsp3_sdma_reqs[] = {
 
 static struct omap_hwmod_opt_clk mcbsp3_opt_clks[] = {
        { .role = "pad_fck", .clk = "pad_clks_ck" },
-       { .role = "prcm_clk", .clk = "mcbsp3_sync_mux_ck" },
+       { .role = "prcm_fck", .clk = "mcbsp3_sync_mux_ck" },
 };
 
 static struct omap_hwmod omap44xx_mcbsp3_hwmod = {
@@ -2029,7 +2033,7 @@ static struct omap_hwmod_dma_info omap44xx_mcbsp4_sdma_reqs[] = {
 
 static struct omap_hwmod_opt_clk mcbsp4_opt_clks[] = {
        { .role = "pad_fck", .clk = "pad_clks_ck" },
-       { .role = "prcm_clk", .clk = "mcbsp4_sync_mux_ck" },
+       { .role = "prcm_fck", .clk = "mcbsp4_sync_mux_ck" },
 };
 
 static struct omap_hwmod omap44xx_mcbsp4_hwmod = {
@@ -3860,7 +3864,7 @@ static struct omap_hwmod_ocp_if omap44xx_l4_cfg__l3_main_2 = {
 };
 
 /* usb_host_fs -> l3_main_2 */
-static struct omap_hwmod_ocp_if omap44xx_usb_host_fs__l3_main_2 = {
+static struct omap_hwmod_ocp_if __maybe_unused omap44xx_usb_host_fs__l3_main_2 = {
        .master         = &omap44xx_usb_host_fs_hwmod,
        .slave          = &omap44xx_l3_main_2_hwmod,
        .clk            = "l3_div_ck",
@@ -3918,7 +3922,7 @@ static struct omap_hwmod_ocp_if omap44xx_l4_cfg__l3_main_3 = {
 };
 
 /* aess -> l4_abe */
-static struct omap_hwmod_ocp_if omap44xx_aess__l4_abe = {
+static struct omap_hwmod_ocp_if __maybe_unused omap44xx_aess__l4_abe = {
        .master         = &omap44xx_aess_hwmod,
        .slave          = &omap44xx_l4_abe_hwmod,
        .clk            = "ocp_abe_iclk",
@@ -4009,7 +4013,7 @@ static struct omap_hwmod_addr_space omap44xx_aess_addrs[] = {
 };
 
 /* l4_abe -> aess */
-static struct omap_hwmod_ocp_if omap44xx_l4_abe__aess = {
+static struct omap_hwmod_ocp_if __maybe_unused omap44xx_l4_abe__aess = {
        .master         = &omap44xx_l4_abe_hwmod,
        .slave          = &omap44xx_aess_hwmod,
        .clk            = "ocp_abe_iclk",
@@ -4027,7 +4031,7 @@ static struct omap_hwmod_addr_space omap44xx_aess_dma_addrs[] = {
 };
 
 /* l4_abe -> aess (dma) */
-static struct omap_hwmod_ocp_if omap44xx_l4_abe__aess_dma = {
+static struct omap_hwmod_ocp_if __maybe_unused omap44xx_l4_abe__aess_dma = {
        .master         = &omap44xx_l4_abe_hwmod,
        .slave          = &omap44xx_aess_hwmod,
        .clk            = "ocp_abe_iclk",
@@ -5853,7 +5857,7 @@ static struct omap_hwmod_addr_space omap44xx_usb_host_fs_addrs[] = {
 };
 
 /* l4_cfg -> usb_host_fs */
-static struct omap_hwmod_ocp_if omap44xx_l4_cfg__usb_host_fs = {
+static struct omap_hwmod_ocp_if __maybe_unused omap44xx_l4_cfg__usb_host_fs = {
        .master         = &omap44xx_l4_cfg_hwmod,
        .slave          = &omap44xx_usb_host_fs_hwmod,
        .clk            = "l4_div_ck",
@@ -6010,13 +6014,13 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = {
        &omap44xx_iva__l3_main_2,
        &omap44xx_l3_main_1__l3_main_2,
        &omap44xx_l4_cfg__l3_main_2,
-       &omap44xx_usb_host_fs__l3_main_2,
+       /* &omap44xx_usb_host_fs__l3_main_2, */
        &omap44xx_usb_host_hs__l3_main_2,
        &omap44xx_usb_otg_hs__l3_main_2,
        &omap44xx_l3_main_1__l3_main_3,
        &omap44xx_l3_main_2__l3_main_3,
        &omap44xx_l4_cfg__l3_main_3,
-       &omap44xx_aess__l4_abe,
+       /* &omap44xx_aess__l4_abe, */
        &omap44xx_dsp__l4_abe,
        &omap44xx_l3_main_1__l4_abe,
        &omap44xx_mpu__l4_abe,
@@ -6025,8 +6029,8 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = {
        &omap44xx_l4_cfg__l4_wkup,
        &omap44xx_mpu__mpu_private,
        &omap44xx_l4_cfg__ocp_wp_noc,
-       &omap44xx_l4_abe__aess,
-       &omap44xx_l4_abe__aess_dma,
+       /* &omap44xx_l4_abe__aess, */
+       /* &omap44xx_l4_abe__aess_dma, */
        &omap44xx_l3_main_2__c2c,
        &omap44xx_l4_wkup__counter_32k,
        &omap44xx_l4_cfg__ctrl_module_core,
@@ -6132,7 +6136,7 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = {
        &omap44xx_l4_per__uart2,
        &omap44xx_l4_per__uart3,
        &omap44xx_l4_per__uart4,
-       &omap44xx_l4_cfg__usb_host_fs,
+       /* &omap44xx_l4_cfg__usb_host_fs, */
        &omap44xx_l4_cfg__usb_host_hs,
        &omap44xx_l4_cfg__usb_otg_hs,
        &omap44xx_l4_cfg__usb_tll_hs,
index a05a62f9ee5b2e7bce17e9b89fc4ce6ff0f18d2b..acc216491b8a5908656354f7c2ba7ffbfce8944a 100644 (file)
@@ -155,10 +155,11 @@ static irqreturn_t omap3_l3_block_irq(struct omap3_l3 *l3,
        u8 multi = error & L3_ERROR_LOG_MULTI;
        u32 address = omap3_l3_decode_addr(error_addr);
 
-       WARN(true, "%s seen by %s %s at address %x\n",
+       pr_err("%s seen by %s %s at address %x\n",
                        omap3_l3_code_string(code),
                        omap3_l3_initiator_string(initid),
                        multi ? "Multiple Errors" : "", address);
+       WARN_ON(1);
 
        return IRQ_HANDLED;
 }
index 4c90477e6f82a91c5c4ddf140cd02cb718f69fc7..d52651a05daa6ce0686ec32a4915e17c8fdc9919 100644 (file)
@@ -239,21 +239,15 @@ void am35x_set_mode(u8 musb_mode)
 
        devconf2 &= ~CONF2_OTGMODE;
        switch (musb_mode) {
-#ifdef CONFIG_USB_MUSB_HDRC_HCD
        case MUSB_HOST:         /* Force VBUS valid, ID = 0 */
                devconf2 |= CONF2_FORCE_HOST;
                break;
-#endif
-#ifdef CONFIG_USB_GADGET_MUSB_HDRC
        case MUSB_PERIPHERAL:   /* Force VBUS valid, ID = 1 */
                devconf2 |= CONF2_FORCE_DEVICE;
                break;
-#endif
-#ifdef CONFIG_USB_MUSB_OTG
        case MUSB_OTG:          /* Don't override the VBUS/ID comparators */
                devconf2 |= CONF2_NO_OVERRIDE;
                break;
-#endif
        default:
                pr_info(KERN_INFO "Unsupported mode %u\n", musb_mode);
        }
index a34023d0ca7c665627d96ba4109a8a0e17b4ec6e..3a595e8997245495672d703fc52c7a148ffb0a9f 100644 (file)
@@ -724,6 +724,7 @@ int __init omap3_pm_init(void)
        ret = request_irq(omap_prcm_event_to_irq("io"),
                _prcm_int_handle_io, IRQF_SHARED | IRQF_NO_SUSPEND, "pm_io",
                omap3_pm_init);
+       enable_irq(omap_prcm_event_to_irq("io"));
 
        if (ret) {
                pr_err("pm: Failed to request pm_io irq\n");
index 9ce765407ad55d5ac9190af77cdb48338c338752..21cb74003a562f27bc2a5a8538093d92503c44d0 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/errno.h>
 #include <linux/err.h>
 #include <linux/io.h>
+#include <linux/irq.h>
 
 #include "common.h"
 #include <plat/cpu.h>
@@ -303,8 +304,15 @@ void omap3xxx_prm_restore_irqen(u32 *saved_mask)
 
 static int __init omap3xxx_prcm_init(void)
 {
-       if (cpu_is_omap34xx())
-               return omap_prcm_register_chain_handler(&omap3_prcm_irq_setup);
-       return 0;
+       int ret = 0;
+
+       if (cpu_is_omap34xx()) {
+               ret = omap_prcm_register_chain_handler(&omap3_prcm_irq_setup);
+               if (!ret)
+                       irq_set_status_flags(omap_prcm_event_to_irq("io"),
+                                            IRQ_NOAUTOEN);
+       }
+
+       return ret;
 }
 subsys_initcall(omap3xxx_prcm_init);
index 292d4aaca068e4c0971f03d1f1aabcd27c3ba1f5..c1b93c752d7013307b982422f0d871472a3d3eb0 100644 (file)
@@ -57,6 +57,7 @@ struct omap_uart_state {
 
        struct list_head node;
        struct omap_hwmod *oh;
+       struct omap_device_pad default_omap_uart_pads[2];
 };
 
 static LIST_HEAD(uart_list);
@@ -126,11 +127,70 @@ static void omap_uart_set_smartidle(struct platform_device *pdev) {}
 #endif /* CONFIG_PM */
 
 #ifdef CONFIG_OMAP_MUX
-static void omap_serial_fill_default_pads(struct omap_board_data *bdata)
+
+#define OMAP_UART_DEFAULT_PAD_NAME_LEN 28
+static char rx_pad_name[OMAP_UART_DEFAULT_PAD_NAME_LEN],
+               tx_pad_name[OMAP_UART_DEFAULT_PAD_NAME_LEN] __initdata;
+
+static void  __init
+omap_serial_fill_uart_tx_rx_pads(struct omap_board_data *bdata,
+                               struct omap_uart_state *uart)
+{
+       uart->default_omap_uart_pads[0].name = rx_pad_name;
+       uart->default_omap_uart_pads[0].flags = OMAP_DEVICE_PAD_REMUX |
+                                                       OMAP_DEVICE_PAD_WAKEUP;
+       uart->default_omap_uart_pads[0].enable = OMAP_PIN_INPUT |
+                                                       OMAP_MUX_MODE0;
+       uart->default_omap_uart_pads[0].idle = OMAP_PIN_INPUT | OMAP_MUX_MODE0;
+       uart->default_omap_uart_pads[1].name = tx_pad_name;
+       uart->default_omap_uart_pads[1].enable = OMAP_PIN_OUTPUT |
+                                                       OMAP_MUX_MODE0;
+       bdata->pads = uart->default_omap_uart_pads;
+       bdata->pads_cnt = ARRAY_SIZE(uart->default_omap_uart_pads);
+}
+
+static void  __init omap_serial_check_wakeup(struct omap_board_data *bdata,
+                                               struct omap_uart_state *uart)
 {
+       struct omap_mux_partition *tx_partition = NULL, *rx_partition = NULL;
+       struct omap_mux *rx_mux = NULL, *tx_mux = NULL;
+       char *rx_fmt, *tx_fmt;
+       int uart_nr = bdata->id + 1;
+
+       if (bdata->id != 2) {
+               rx_fmt = "uart%d_rx.uart%d_rx";
+               tx_fmt = "uart%d_tx.uart%d_tx";
+       } else {
+               rx_fmt = "uart%d_rx_irrx.uart%d_rx_irrx";
+               tx_fmt = "uart%d_tx_irtx.uart%d_tx_irtx";
+       }
+
+       snprintf(rx_pad_name, OMAP_UART_DEFAULT_PAD_NAME_LEN, rx_fmt,
+                       uart_nr, uart_nr);
+       snprintf(tx_pad_name, OMAP_UART_DEFAULT_PAD_NAME_LEN, tx_fmt,
+                       uart_nr, uart_nr);
+
+       if (omap_mux_get_by_name(rx_pad_name, &rx_partition, &rx_mux) >= 0 &&
+                       omap_mux_get_by_name
+                               (tx_pad_name, &tx_partition, &tx_mux) >= 0) {
+               u16 tx_mode, rx_mode;
+
+               tx_mode = omap_mux_read(tx_partition, tx_mux->reg_offset);
+               rx_mode = omap_mux_read(rx_partition, rx_mux->reg_offset);
+
+               /*
+                * Check if uart is used in default tx/rx mode i.e. in mux mode0
+                * if yes then configure rx pin for wake up capability
+                */
+               if (OMAP_MODE_UART(rx_mode) && OMAP_MODE_UART(tx_mode))
+                       omap_serial_fill_uart_tx_rx_pads(bdata, uart);
+       }
 }
 #else
-static void omap_serial_fill_default_pads(struct omap_board_data *bdata) {}
+static void __init omap_serial_check_wakeup(struct omap_board_data *bdata,
+               struct omap_uart_state *uart)
+{
+}
 #endif
 
 static char *cmdline_find_option(char *str)
@@ -287,8 +347,7 @@ void __init omap_serial_board_init(struct omap_uart_port_info *info)
                bdata.pads = NULL;
                bdata.pads_cnt = 0;
 
-               if (cpu_is_omap44xx() || cpu_is_omap34xx())
-                       omap_serial_fill_default_pads(&bdata);
+               omap_serial_check_wakeup(&bdata, uart);
 
                if (!info)
                        omap_serial_init_port(&bdata, NULL);
index 119d5a910f3a4a7ef902b5a2cb4054edc1278d56..43a979075338a76d03894e406952e9d6eb28c1ad 100644 (file)
@@ -32,6 +32,7 @@
 #include "twl-common.h"
 #include "pm.h"
 #include "voltage.h"
+#include "mux.h"
 
 static struct i2c_board_info __initdata pmic_i2c_board_info = {
        .addr           = 0x48,
@@ -77,6 +78,7 @@ void __init omap4_pmic_init(const char *pmic_type,
                    struct twl6040_platform_data *twl6040_data, int twl6040_irq)
 {
        /* PMIC part*/
+       omap_mux_init_signal("sys_nirq1", OMAP_PIN_INPUT_PULLUP | OMAP_PIN_OFF_WAKEUPENABLE);
        strncpy(omap4_i2c1_board_info[0].type, pmic_type,
                sizeof(omap4_i2c1_board_info[0].type));
        omap4_i2c1_board_info[0].irq = OMAP44XX_IRQ_SYS_1N;
index b19d1b43c12e59ef678debd195225c52f97e1e5a..c4a576856661014ea3bec9acc70f80e32d62c33b 100644 (file)
@@ -41,12 +41,10 @@ static struct musb_hdrc_config musb_config = {
 };
 
 static struct musb_hdrc_platform_data musb_plat = {
-#ifdef CONFIG_USB_MUSB_OTG
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
        .mode           = MUSB_OTG,
-#elif defined(CONFIG_USB_MUSB_HDRC_HCD)
+#else
        .mode           = MUSB_HOST,
-#elif defined(CONFIG_USB_GADGET_MUSB_HDRC)
-       .mode           = MUSB_PERIPHERAL,
 #endif
        /* .clock is set dynamically */
        .config         = &musb_config,
index db84a46ce7fd6da27ca899d3fcf326cae0a3ee79..805bea6edf1711adf0de68e8f68318bdd4c44b80 100644 (file)
@@ -300,7 +300,7 @@ tusb6010_setup_interface(struct musb_hdrc_platform_data *data,
                printk(error, 3, status);
                return status;
        }
-       tusb_resources[2].start = irq + IH_GPIO_BASE;
+       tusb_resources[2].start = gpio_to_irq(irq);
 
        /* set up memory timings ... can speed them up later */
        if (!ps_refclk) {
index 96484bcd34ca66bf441cde80c8735cb591c29b40..11a3c1e9801f4bea759d6deb3ec9179b019c3650 100644 (file)
@@ -35,5 +35,5 @@
 #define MAIN_IRQ_MASK          (ORION5X_BRIDGE_VIRT_BASE | 0x204)
 
 #define TIMER_VIRT_BASE                (ORION5X_BRIDGE_VIRT_BASE | 0x300)
-
+#define TIMER_PHYS_BASE                (ORION5X_BRIDGE_PHYS_BASE | 0x300)
 #endif
diff --git a/arch/arm/mach-orion5x/include/mach/io.h b/arch/arm/mach-orion5x/include/mach/io.h
new file mode 100644 (file)
index 0000000..1aa5d0a
--- /dev/null
@@ -0,0 +1,22 @@
+/*
+ * arch/arm/mach-orion5x/include/mach/io.h
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __ASM_ARCH_IO_H
+#define __ASM_ARCH_IO_H
+
+#include <mach/orion5x.h>
+#include <asm/sizes.h>
+
+#define IO_SPACE_LIMIT         SZ_2M
+static inline void __iomem *__io(unsigned long addr)
+{
+       return (void __iomem *)(addr + ORION5X_PCIE_IO_VIRT_BASE);
+}
+
+#define __io(a)                         __io(a)
+#endif
index 2745f5d95b3fe085a30e6c6c6c11c6c6c5ec687b..683e085ce1624088f7ec138c1c4fa14c636a2ddd 100644 (file)
@@ -82,6 +82,7 @@
 #define  UART1_VIRT_BASE               (ORION5X_DEV_BUS_VIRT_BASE | 0x2100)
 
 #define ORION5X_BRIDGE_VIRT_BASE       (ORION5X_REGS_VIRT_BASE | 0x20000)
+#define ORION5X_BRIDGE_PHYS_BASE       (ORION5X_REGS_PHYS_BASE | 0x20000)
 
 #define ORION5X_PCI_VIRT_BASE          (ORION5X_REGS_VIRT_BASE | 0x30000)
 
index d09da6a746b8a15d212e0e1aabcaeacbf8b480cc..d3de84b0dcbed280a27d12092509f6a9df4e181e 100644 (file)
@@ -127,7 +127,11 @@ static unsigned long hx4700_pin_config[] __initdata = {
        GPIO19_SSP2_SCLK,
        GPIO86_SSP2_RXD,
        GPIO87_SSP2_TXD,
-       GPIO88_GPIO,
+       GPIO88_GPIO | MFP_LPM_DRIVE_HIGH,       /* TSC2046_CS */
+
+       /* BQ24022 Regulator */
+       GPIO72_GPIO | MFP_LPM_KEEP_OUTPUT,      /* BQ24022_nCHARGE_EN */
+       GPIO96_GPIO | MFP_LPM_KEEP_OUTPUT,      /* BQ24022_ISET2 */
 
        /* HX4700 specific input GPIOs */
        GPIO12_GPIO | WAKEUP_ON_EDGE_RISE,      /* ASIC3_IRQ */
@@ -135,6 +139,10 @@ static unsigned long hx4700_pin_config[] __initdata = {
        GPIO14_GPIO,    /* nWLAN_IRQ */
 
        /* HX4700 specific output GPIOs */
+       GPIO61_GPIO | MFP_LPM_DRIVE_HIGH,       /* W3220_nRESET */
+       GPIO71_GPIO | MFP_LPM_DRIVE_HIGH,       /* ASIC3_nRESET */
+       GPIO81_GPIO | MFP_LPM_DRIVE_HIGH,       /* CPU_GP_nRESET */
+       GPIO116_GPIO | MFP_LPM_DRIVE_HIGH,      /* CPU_HW_nRESET */
        GPIO102_GPIO | MFP_LPM_DRIVE_LOW,       /* SYNAPTICS_POWER_ON */
 
        GPIO10_GPIO,    /* GSM_IRQ */
@@ -872,14 +880,19 @@ static struct gpio global_gpios[] = {
        { GPIO110_HX4700_LCD_LVDD_3V3_ON, GPIOF_OUT_INIT_HIGH, "LCD_LVDD" },
        { GPIO111_HX4700_LCD_AVDD_3V3_ON, GPIOF_OUT_INIT_HIGH, "LCD_AVDD" },
        { GPIO32_HX4700_RS232_ON,         GPIOF_OUT_INIT_HIGH, "RS232_ON" },
+       { GPIO61_HX4700_W3220_nRESET,     GPIOF_OUT_INIT_HIGH, "W3220_nRESET" },
        { GPIO71_HX4700_ASIC3_nRESET,     GPIOF_OUT_INIT_HIGH, "ASIC3_nRESET" },
+       { GPIO81_HX4700_CPU_GP_nRESET,    GPIOF_OUT_INIT_HIGH, "CPU_GP_nRESET" },
        { GPIO82_HX4700_EUART_RESET,      GPIOF_OUT_INIT_HIGH, "EUART_RESET" },
+       { GPIO116_HX4700_CPU_HW_nRESET,   GPIOF_OUT_INIT_HIGH, "CPU_HW_nRESET" },
 };
 
 static void __init hx4700_init(void)
 {
        int ret;
 
+       PCFR = PCFR_GPR_EN | PCFR_OPDE;
+
        pxa2xx_mfp_config(ARRAY_AND_SIZE(hx4700_pin_config));
        gpio_set_wake(GPIO12_HX4700_ASIC3_IRQ, 1);
        ret = gpio_request_array(ARRAY_AND_SIZE(global_gpios));
index 414364eb426cf70d58a439b7e99b15e26c91a0c7..cb2883d553b5e53c320588eb740d435e111d0758 100644 (file)
@@ -106,7 +106,7 @@ static struct clk s3c2440_clk_cam_upll = {
 static struct clk s3c2440_clk_ac97 = {
        .name           = "ac97",
        .enable         = s3c2410_clkcon_enable,
-       .ctrlbit        = S3C2440_CLKCON_CAMERA,
+       .ctrlbit        = S3C2440_CLKCON_AC97,
 };
 
 static unsigned long  s3c2440_fclk_n_getrate(struct clk *clk)
index f31383c32f9cdfd121e65b1d4a026d7fbd3a2f01..df33909205e2ce6155553a8485512bdd4817422b 100644 (file)
@@ -186,6 +186,12 @@ config SH_TIMER_TMU
        help
          This enables build of the TMU timer driver.
 
+config EM_TIMER_STI
+       bool "STI timer driver"
+       default y
+       help
+         This enables build of the STI timer driver.
+
 endmenu
 
 config SH_CLK_CPG
index 9e37026ef9ddb6b5aa2f12b3adcbacdc0b9fa838..9bd135531d76118feecbc2a02647344ce21b25c5 100644 (file)
@@ -779,6 +779,7 @@ DT_MACHINE_START(ARMADILLO800EVA_DT, "armadillo800eva")
        .init_irq       = r8a7740_init_irq,
        .handle_irq     = shmobile_handle_irq_intc,
        .init_machine   = eva_init,
+       .init_late      = shmobile_init_late,
        .timer          = &shmobile_timer,
        .dt_compat      = eva_boards_compat_dt,
 MACHINE_END
index 7bc5e7d39f9bd12b20e175a66a32a21d27dc3623..6a33cf393428f730d9cc2b44633af997e4c7d3df 100644 (file)
@@ -80,6 +80,7 @@ DT_MACHINE_START(KZM9D_DT, "kzm9d")
        .init_irq       = emev2_init_irq,
        .handle_irq     = gic_handle_irq,
        .init_machine   = kzm9d_add_standard_devices,
+       .init_late      = shmobile_init_late,
        .timer          = &shmobile_timer,
        .dt_compat      = kzm9d_boards_compat_dt,
 MACHINE_END
index d8e33b682832e8279e06c1e1d27e6825ad638397..c0ae815e7beb18a9be40be4bb892bfb725af7b78 100644 (file)
@@ -455,6 +455,7 @@ DT_MACHINE_START(KZM9G_DT, "kzm9g")
        .init_irq       = sh73a0_init_irq,
        .handle_irq     = gic_handle_irq,
        .init_machine   = kzm_init,
+       .init_late      = shmobile_init_late,
        .timer          = &shmobile_timer,
        .dt_compat      = kzm9g_boards_compat_dt,
 MACHINE_END
index b577f7c44678ade9a79753742e6606b3eb120600..150122a446304071d23595c291b906b7b537707c 100644 (file)
@@ -1512,6 +1512,9 @@ static void __init mackerel_init(void)
        gpio_request(GPIO_FN_SDHID0_1, NULL);
        gpio_request(GPIO_FN_SDHID0_0, NULL);
 
+       /* SDHI0 PORT172 card-detect IRQ26 */
+       gpio_request(GPIO_FN_IRQ26_172, NULL);
+
 #if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE)
        /* enable SDHI1 */
        gpio_request(GPIO_FN_SDHICMD1, NULL);
index 472d1f5361e5390a58ecde58966c075d0532bf6d..3946c4ba2aa813f0cea841b5c66b622c5f0d3950 100644 (file)
@@ -475,9 +475,9 @@ static struct clk *late_main_clks[] = {
 
 enum { MSTP001,
        MSTP129, MSTP128, MSTP127, MSTP126, MSTP125, MSTP118, MSTP116, MSTP100,
-       MSTP219,
+       MSTP219, MSTP218,
        MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200,
-       MSTP331, MSTP329, MSTP325, MSTP323, MSTP318,
+       MSTP331, MSTP329, MSTP325, MSTP323,
        MSTP314, MSTP313, MSTP312, MSTP311,
        MSTP303, MSTP302, MSTP301, MSTP300,
        MSTP411, MSTP410, MSTP403,
@@ -497,6 +497,7 @@ static struct clk mstp_clks[MSTP_NR] = {
        [MSTP116] = MSTP(&div4_clks[DIV4_HP], SMSTPCR1, 16, 0), /* IIC0 */
        [MSTP100] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 0, 0), /* LCDC0 */
        [MSTP219] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 19, 0), /* SCIFA7 */
+       [MSTP218] = MSTP(&div4_clks[DIV4_HP], SMSTPCR2, 18, 0), /* SY-DMAC */
        [MSTP207] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 7, 0), /* SCIFA5 */
        [MSTP206] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 6, 0), /* SCIFB */
        [MSTP204] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 4, 0), /* SCIFA0 */
@@ -508,7 +509,6 @@ static struct clk mstp_clks[MSTP_NR] = {
        [MSTP329] = MSTP(&r_clk, SMSTPCR3, 29, 0), /* CMT10 */
        [MSTP325] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 25, 0), /* IrDA */
        [MSTP323] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 23, 0), /* IIC1 */
-       [MSTP318] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 18, 0), /* SY-DMAC */
        [MSTP314] = MSTP(&div6_clks[DIV6_SDHI0], SMSTPCR3, 14, 0), /* SDHI0 */
        [MSTP313] = MSTP(&div6_clks[DIV6_SDHI1], SMSTPCR3, 13, 0), /* SDHI1 */
        [MSTP312] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 12, 0), /* MMCIF0 */
@@ -552,6 +552,7 @@ static struct clk_lookup lookups[] = {
        CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP116]), /* I2C0 */
        CLKDEV_DEV_ID("sh_mobile_lcdc_fb.0", &mstp_clks[MSTP100]), /* LCDC0 */
        CLKDEV_DEV_ID("sh-sci.7", &mstp_clks[MSTP219]), /* SCIFA7 */
+       CLKDEV_DEV_ID("sh-dma-engine.0", &mstp_clks[MSTP218]), /* SY-DMAC */
        CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[MSTP207]), /* SCIFA5 */
        CLKDEV_DEV_ID("sh-sci.8", &mstp_clks[MSTP206]), /* SCIFB */
        CLKDEV_DEV_ID("sh-sci.0", &mstp_clks[MSTP204]), /* SCIFA0 */
@@ -563,7 +564,6 @@ static struct clk_lookup lookups[] = {
        CLKDEV_DEV_ID("sh_cmt.10", &mstp_clks[MSTP329]), /* CMT10 */
        CLKDEV_DEV_ID("sh_irda.0", &mstp_clks[MSTP325]), /* IrDA */
        CLKDEV_DEV_ID("i2c-sh_mobile.1", &mstp_clks[MSTP323]), /* I2C1 */
-       CLKDEV_DEV_ID("sh-dma-engine.0", &mstp_clks[MSTP318]), /* SY-DMAC */
        CLKDEV_DEV_ID("sh_mobile_sdhi.0", &mstp_clks[MSTP314]), /* SDHI0 */
        CLKDEV_DEV_ID("sh_mobile_sdhi.1", &mstp_clks[MSTP313]), /* SDHI1 */
        CLKDEV_DEV_ID("sh_mmcif.0", &mstp_clks[MSTP312]), /* MMCIF0 */
index 550b23df4fd44d9b3cb4f7db19606153482b8c3e..f04fad4ec4fb5406edc4966fca6f3850b8861c56 100644 (file)
@@ -35,6 +35,9 @@
 #define INT2SMSKCR3 0xfe7822ac
 #define INT2SMSKCR4 0xfe7822b0
 
+#define INT2NTSR0 0xfe700060
+#define INT2NTSR1 0xfe700064
+
 static int r8a7779_set_wake(struct irq_data *data, unsigned int on)
 {
        return 0; /* always allow wakeup */
@@ -49,6 +52,10 @@ void __init r8a7779_init_irq(void)
        gic_init(0, 29, gic_dist_base, gic_cpu_base);
        gic_arch_extn.irq_set_wake = r8a7779_set_wake;
 
+       /* route all interrupts to ARM */
+       __raw_writel(0xffffffff, INT2NTSR0);
+       __raw_writel(0x3fffffff, INT2NTSR1);
+
        /* unmask all known interrupts in INTCS2 */
        __raw_writel(0xfffffff0, INT2SMSKCR0);
        __raw_writel(0xfff7ffff, INT2SMSKCR1);
index bacdd667e3b192f908ef352f87d321c371a7d9a7..fde0d23121dc6e14acd614504d054d0c83f778c6 100644 (file)
 #include <mach/common.h>
 #include <mach/emev2.h>
 
+#ifdef CONFIG_ARCH_SH73A0
 #define is_sh73a0() (machine_is_ag5evm() || machine_is_kota2() || \
                        of_machine_is_compatible("renesas,sh73a0"))
+#else
+#define is_sh73a0() (0)
+#endif
+
 #define is_r8a7779() machine_is_marzen()
+
+#ifdef CONFIG_ARCH_EMEV2
 #define is_emev2() of_machine_is_compatible("renesas,emev2")
+#else
+#define is_emev2() (0)
+#endif
 
 static unsigned int __init shmobile_smp_get_core_count(void)
 {
index 6a4bd582c028e8b890e926c73d9b292c941b1689..fafce9ce8218c440028b6083a2254c4452b1a223 100644 (file)
@@ -484,7 +484,7 @@ static const struct sh_dmae_slave_config sh7372_dmae_slaves[] = {
        },
 };
 
-#define SH7372_CHCLR 0x220
+#define SH7372_CHCLR (0x220 - 0x20)
 
 static const struct sh_dmae_channel sh7372_dmae_channels[] = {
        {
index ea1564609bd4229869be3cff80ea80111202f907..9e3ae6bfe50dc442c67a79387c2761e9ddf801db 100644 (file)
@@ -4,7 +4,7 @@
  * Debugging macro include header spear13xx machine family
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 383ab04dc6c97bc295ff34b3ecac822e1e7c7389..d50bdb6059256bf69bcce0b8c164664acba96203 100644 (file)
@@ -4,7 +4,7 @@
  * DMA information for SPEAr13xx machine family
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 6d8c45b9f2984d21c2eb61ad822fbaf563fbe5c4..dac57fd0cdfdcc865e62f1f0d61b1d98919099d4 100644 (file)
@@ -4,7 +4,7 @@
  * spear13xx machine family generic header file
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index cd6f4f86a56b96d702feaf2fc33f002156671491..85f176311f63e68913bfbecb4d5b612f9205096c 100644 (file)
@@ -4,7 +4,7 @@
  * GPIO macros for SPEAr13xx machine family
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index f542a24aa5f2b01675b8ec2afaabc8472e0a2340..271a62b4cd314e9d0773db8dacc175ce3c568f3d 100644 (file)
@@ -4,7 +4,7 @@
  * IRQ helper macros for spear13xx machine family
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 30c57ef726866135947239f80078c40461bd390a..65f27def239b804348de6690ca5b2f4718229ed3 100644 (file)
@@ -4,7 +4,7 @@
  * spear13xx Machine family specific definition
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 31af3e8d976e10162f0a3bc25eb0030cbf68bbb6..3a58b8284a6aa8345e5b094ab581284586154a95 100644 (file)
@@ -4,7 +4,7 @@
  * SPEAr3XX machine family specific timex definitions
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index c7840896ae6e65c25f4a471a5ab5ac8efeba84f9..70fe72f05deacb66cf16ea4c8fa0f887f3f28b84 100644 (file)
@@ -4,7 +4,7 @@
  * Serial port stubs for kernel decompress status messages
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index fefd15b2f380609b6b526a46113cd3c64ee4f718..732d29bc73307231ccc9009ba1b80195a7153588 100644 (file)
@@ -4,7 +4,7 @@
  * SPEAr1310 machine source file
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index ee38cbc56869cbce720d785675293b9827e247a2..81e4ed76ad0652a278255ba2b4dda307628307d3 100644 (file)
@@ -4,7 +4,7 @@
  * SPEAr1340 machine source file
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 50b349ae863d9edeedc7addcce6772646f468210..cf936b106e27b23d069f1a726f0cd474e7cd60d4 100644 (file)
@@ -4,7 +4,7 @@
  * SPEAr13XX machines common source file
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 590519f10d6ebc415c254518b2202a66661e0548..0a6381fad5d9a40a0b24250c724d2e315bd7d585 100644 (file)
@@ -4,7 +4,7 @@
  * Debugging macro include header spear3xx machine family
  *
  * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar<viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 4a95b9453c2a5ce67bb3f636c016d9c6c4e246bc..ce19113ca791ae1478cc0b846bdc7bf511cf7533 100644 (file)
@@ -4,7 +4,7 @@
  * SPEAr3XX machine family generic header file
  *
  * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar<viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 451b2081bfc966f841966ff27cf08c2637fcabc7..2ac74c6db7f1508fc71106b17d186c0feaa50971 100644 (file)
@@ -4,7 +4,7 @@
  * GPIO macros for SPEAr3xx machine family
  *
  * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar<viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 51bd62a0254c814acca905b44696e834700c9cb2..803de76f5f36fb5535c632c385869d70c86e61b8 100644 (file)
@@ -4,7 +4,7 @@
  * IRQ helper macros for SPEAr3xx machine family
  *
  * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 18e2ac576f25cc209880e4f239f40b92fcadee89..6309bf68d6f80d8c00aad241b3bdd6bc97ccbcf5 100644 (file)
@@ -4,7 +4,7 @@
  * Miscellaneous registers definitions for SPEAr3xx machine family
  *
  * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 51eb953148a998733f656670cb24705b03f0fbc9..8cca95193d4d6b8058f23a26fdabb0cc08338ff5 100644 (file)
@@ -4,7 +4,7 @@
  * SPEAr3xx Machine family specific definition
  *
  * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index a38cc9de876f73af957e13f9bc2738a3fe016b4d..9f5d08bd0c4441415d69cd82e788731b5f7f5214 100644 (file)
@@ -4,7 +4,7 @@
  * SPEAr3XX machine family specific timex definitions
  *
  * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 53ba8bbc0dfa9e63fa7142fc2fadb1c647d8c45e..b909b011f7c84c34d52701a70adc2201f246f97a 100644 (file)
@@ -4,7 +4,7 @@
  * Serial port stubs for kernel decompress status messages
  *
  * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index f74a05bdb829d5edb37c76bbb6395d39d661a431..0f882ecb7d810f4a98700fa6f46c913eec0860c9 100644 (file)
@@ -4,7 +4,7 @@
  * SPEAr300 machine source file
  *
  * Copyright (C) 2009-2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 84dfb09007470062cc6f14633c1d455e98562ca0..bbcf4571d361caefd90c2f685365a5a394fb56c5 100644 (file)
@@ -4,7 +4,7 @@
  * SPEAr310 machine source file
  *
  * Copyright (C) 2009-2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index a88fa841d29d021068d644a16d7c60eb7d7df93f..88d483bcd66a37665dc130bd5f0da016192a950a 100644 (file)
@@ -4,7 +4,7 @@
  * SPEAr320 machine source file
  *
  * Copyright (C) 2009-2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index f22419ed74a82cf394c69d0e5d51d2e53fad447d..66db5f13af844360b1b5845d1b06c13a6d51445a 100644 (file)
@@ -4,7 +4,7 @@
  * SPEAr3XX machines common source file
  *
  * Copyright (C) 2009-2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
@@ -87,7 +87,7 @@ void __init spear3xx_map_io(void)
 
 static void __init spear3xx_timer_init(void)
 {
-       char pclk_name[] = "pll3_48m_clk";
+       char pclk_name[] = "pll3_clk";
        struct clk *gpt_clk, *pclk;
 
        spear3xx_clk_init();
index 3a789dbb69f74c0489a264e9dd20560d97a0be32..d42cefc0356dcb0086f83d568eacc026a8365547 100644 (file)
@@ -4,7 +4,7 @@
  * GPIO macros for SPEAr6xx machine family
  *
  * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 179e45774b3a9722182b02140ff52566e76bdd53..c34acc201d3463fc39fcff6d410a34f9b3e0d42b 100644 (file)
@@ -4,7 +4,7 @@
  * Miscellaneous registers definitions for SPEAr6xx machine family
  *
  * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 2e2e3596583e9072318dbf895644c5aa67a90e38..9af67d003c62ce6f167307020fcd2f52fa181e8a 100644 (file)
@@ -423,7 +423,7 @@ void __init spear6xx_map_io(void)
 
 static void __init spear6xx_timer_init(void)
 {
-       char pclk_name[] = "pll3_48m_clk";
+       char pclk_name[] = "pll3_clk";
        struct clk *gpt_clk, *pclk;
 
        spear6xx_clk_init();
index 4d6a2ee99c3b37df6a57e186cc0530da08e6ec98..5beb7ebe2948ff1e706159bf958e9118b3906f7e 100644 (file)
@@ -33,7 +33,7 @@
 
 static bool is_enabled;
 
-static void tegra_cpu_reset_handler_enable(void)
+static void __init tegra_cpu_reset_handler_enable(void)
 {
        void __iomem *iram_base = IO_ADDRESS(TEGRA_IRAM_RESET_BASE);
        void __iomem *evp_cpu_reset =
index 9c74ac54584955be16798a69c1a997247d5c70b3..4fd93f5c49ec359f6889f054fe33cd076c7bf822 100644 (file)
@@ -580,43 +580,12 @@ static void ux500_uart0_reset(void)
        udelay(1);
 }
 
-/* This needs to be referenced by callbacks */
-struct pinctrl *u0_p;
-struct pinctrl_state *u0_def;
-struct pinctrl_state *u0_sleep;
-
-static void ux500_uart0_init(void)
-{
-       int ret;
-
-       if (IS_ERR(u0_p) || IS_ERR(u0_def))
-               return;
-
-       ret = pinctrl_select_state(u0_p, u0_def);
-       if (ret)
-               pr_err("could not set UART0 defstate\n");
-}
-
-static void ux500_uart0_exit(void)
-{
-       int ret;
-
-       if (IS_ERR(u0_p) || IS_ERR(u0_sleep))
-               return;
-
-       ret = pinctrl_select_state(u0_p, u0_sleep);
-       if (ret)
-               pr_err("could not set UART0 idlestate\n");
-}
-
 static struct amba_pl011_data uart0_plat = {
 #ifdef CONFIG_STE_DMA40
        .dma_filter = stedma40_filter,
        .dma_rx_param = &uart0_dma_cfg_rx,
        .dma_tx_param = &uart0_dma_cfg_tx,
 #endif
-       .init = ux500_uart0_init,
-       .exit = ux500_uart0_exit,
        .reset = ux500_uart0_reset,
 };
 
@@ -638,28 +607,7 @@ static struct amba_pl011_data uart2_plat = {
 
 static void __init mop500_uart_init(struct device *parent)
 {
-       struct amba_device *uart0_device;
-
-       uart0_device = db8500_add_uart0(parent, &uart0_plat);
-       if (uart0_device) {
-               u0_p = pinctrl_get(&uart0_device->dev);
-               if (IS_ERR(u0_p))
-                       dev_err(&uart0_device->dev,
-                               "could not get UART0 pinctrl\n");
-               else {
-                       u0_def = pinctrl_lookup_state(u0_p,
-                                                     PINCTRL_STATE_DEFAULT);
-                       if (IS_ERR(u0_def)) {
-                               dev_err(&uart0_device->dev,
-                                       "could not get UART0 defstate\n");
-                       }
-                       u0_sleep = pinctrl_lookup_state(u0_p,
-                                                       PINCTRL_STATE_SLEEP);
-                       if (IS_ERR(u0_sleep))
-                               dev_err(&uart0_device->dev,
-                                       "could not get UART0 idlestate\n");
-               }
-       }
+       db8500_add_uart0(parent, &uart0_plat);
        db8500_add_uart1(parent, &uart1_plat);
        db8500_add_uart2(parent, &uart2_plat);
 }
@@ -677,11 +625,6 @@ static struct platform_device *snowball_platform_devs[] __initdata = {
        &ab8500_device,
 };
 
-static struct platform_device *snowball_of_platform_devs[] __initdata = {
-       &snowball_led_dev,
-       &snowball_key_dev,
-};
-
 static void __init mop500_init_machine(void)
 {
        struct device *parent = NULL;
@@ -821,6 +764,11 @@ MACHINE_END
 
 #ifdef CONFIG_MACH_UX500_DT
 
+static struct platform_device *snowball_of_platform_devs[] __initdata = {
+       &snowball_led_dev,
+       &snowball_key_dev,
+};
+
 struct of_dev_auxdata u8500_auxdata_lookup[] __initdata = {
        /* Requires DMA and call-back bindings. */
        OF_DEV_AUXDATA("arm,pl011", 0x80120000, "uart0", &uart0_plat),
@@ -838,6 +786,8 @@ struct of_dev_auxdata u8500_auxdata_lookup[] __initdata = {
        OF_DEV_AUXDATA("st,nomadik-gpio", 0x8011e000, "gpio.6", NULL),
        OF_DEV_AUXDATA("st,nomadik-gpio", 0x8011e080, "gpio.7", NULL),
        OF_DEV_AUXDATA("st,nomadik-gpio", 0xa03fe000, "gpio.8", NULL),
+       /* Requires device name bindings. */
+       OF_DEV_AUXDATA("stericsson,nmk_pinctrl", 0, "pinctrl-db8500", NULL),
        {},
 };
 
index 741e71feca784134e179a11d0d4389fc80b70a32..66e7f00884ab4b4443d51f56c211ae7ac6385f49 100644 (file)
@@ -63,8 +63,10 @@ static void __init ux500_timer_init(void)
 
        /* TODO: Once MTU has been DT:ed place code above into else. */
        if (of_have_populated_dt()) {
+#ifdef CONFIG_OF
                np = of_find_matching_node(NULL, prcmu_timer_of_match);
                if (!np)
+#endif
                        goto dt_fail;
 
                tmp_base = of_iomap(np, 0);
index cf4687ee2a7bafe581cc32bd67dfa6ebfcff275e..cd8ea3588f93d3db2b557def5d0e844cf101b380 100644 (file)
@@ -169,26 +169,13 @@ static struct map_desc versatile_io_desc[] __initdata = {
                .pfn            = __phys_to_pfn(VERSATILE_PCI_CFG_BASE),
                .length         = VERSATILE_PCI_CFG_BASE_SIZE,
                .type           = MT_DEVICE
-       },
-#if 0
-       {
-               .virtual        =  VERSATILE_PCI_VIRT_MEM_BASE0,
-               .pfn            = __phys_to_pfn(VERSATILE_PCI_MEM_BASE0),
-               .length         = SZ_16M,
-               .type           = MT_DEVICE
        }, {
-               .virtual        =  VERSATILE_PCI_VIRT_MEM_BASE1,
-               .pfn            = __phys_to_pfn(VERSATILE_PCI_MEM_BASE1),
-               .length         = SZ_16M,
-               .type           = MT_DEVICE
-       }, {
-               .virtual        =  VERSATILE_PCI_VIRT_MEM_BASE2,
-               .pfn            = __phys_to_pfn(VERSATILE_PCI_MEM_BASE2),
-               .length         = SZ_16M,
+               .virtual        =  (unsigned long)VERSATILE_PCI_VIRT_MEM_BASE0,
+               .pfn            = __phys_to_pfn(VERSATILE_PCI_MEM_BASE0),
+               .length         = IO_SPACE_LIMIT,
                .type           = MT_DEVICE
        },
 #endif
-#endif
 };
 
 void __init versatile_map_io(void)
index 4d4973dd8fba88f5542c6898390f940ae9f8399e..408e58da46c641a81098622a028956f38e4e4253 100644 (file)
@@ -29,8 +29,9 @@
  */
 #define VERSATILE_PCI_VIRT_BASE                (void __iomem *)0xe8000000ul
 #define VERSATILE_PCI_CFG_VIRT_BASE    (void __iomem *)0xe9000000ul
+#define VERSATILE_PCI_VIRT_MEM_BASE0   (void __iomem *)PCIO_BASE
 
-/* macro to get at IO space when running virtually */
+/* macro to get at MMIO space when running virtually */
 #define IO_ADDRESS(x)          (((x) & 0x0fffffff) + (((x) >> 4) & 0x0f000000) + 0xf0000000)
 
 #define __io_address(n)                ((void __iomem __force *)IO_ADDRESS(n))
diff --git a/arch/arm/mach-versatile/include/mach/io.h b/arch/arm/mach-versatile/include/mach/io.h
new file mode 100644 (file)
index 0000000..0406513
--- /dev/null
@@ -0,0 +1,27 @@
+/*
+ *  arch/arm/mach-versatile/include/mach/io.h
+ *
+ *  Copyright (C) 2003 ARM Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+#ifndef __ASM_ARM_ARCH_IO_H
+#define __ASM_ARM_ARCH_IO_H
+
+#define PCIO_BASE      0xeb000000ul
+
+#define __io(a)                ((a) + PCIO_BASE)
+
+#endif
index 15c6a00000ec4604f7f491c2c11fdebccbc6982f..e95bf84cc837550650ccc053e5afc72486d6682a 100644 (file)
@@ -169,11 +169,18 @@ static struct pci_ops pci_versatile_ops = {
        .write  = versatile_write_config,
 };
 
+static struct resource io_port = {
+       .name   = "PCI",
+       .start  = 0,
+       .end    = IO_SPACE_LIMIT,
+       .flags  = IORESOURCE_IO,
+};
+
 static struct resource io_mem = {
        .name   = "PCI I/O space",
        .start  = VERSATILE_PCI_MEM_BASE0,
        .end    = VERSATILE_PCI_MEM_BASE0+VERSATILE_PCI_MEM_BASE0_SIZE-1,
-       .flags  = IORESOURCE_IO,
+       .flags  = IORESOURCE_MEM,
 };
 
 static struct resource non_mem = {
@@ -200,6 +207,12 @@ static int __init pci_versatile_setup_resources(struct pci_sys_data *sys)
                       "memory region (%d)\n", ret);
                goto out;
        }
+       ret = request_resource(&ioport_resource, &io_port);
+       if (ret) {
+               printk(KERN_ERR "PCI: unable to allocate I/O "
+                      "port region (%d)\n", ret);
+               goto out;
+       }
        ret = request_resource(&iomem_resource, &non_mem);
        if (ret) {
                printk(KERN_ERR "PCI: unable to allocate non-prefetchable "
@@ -218,7 +231,7 @@ static int __init pci_versatile_setup_resources(struct pci_sys_data *sys)
         * the mem resource for this bus
         * the prefetch mem resource for this bus
         */
-       pci_add_resource_offset(&sys->resources, &io_mem, sys->io_offset);
+       pci_add_resource_offset(&sys->resources, &io_port, sys->io_offset);
        pci_add_resource_offset(&sys->resources, &non_mem, sys->mem_offset);
        pci_add_resource_offset(&sys->resources, &pre_mem, sys->mem_offset);
 
@@ -249,6 +262,7 @@ int __init pci_versatile_setup(int nr, struct pci_sys_data *sys)
 
        if (nr == 0) {
                sys->mem_offset = 0;
+               sys->io_offset = 0;
                ret = pci_versatile_setup_resources(sys);
                if (ret < 0) {
                        printk("pci_versatile_setup: resources... oops?\n");
@@ -325,7 +339,6 @@ void __init pci_versatile_preinit(void)
 static int __init versatile_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
        int irq;
-       int devslot = PCI_SLOT(dev->devfn);
 
        /* slot,  pin,  irq
         *  24     1     27
index ea6b43154090a2af00cbb168c7ef2d3a0589b2b2..655878bcc96d265a2ec81f87112ba48d5b5a037e 100644 (file)
@@ -228,7 +228,7 @@ static pte_t **consistent_pte;
 
 #define DEFAULT_CONSISTENT_DMA_SIZE SZ_2M
 
-unsigned long consistent_base = CONSISTENT_END - DEFAULT_CONSISTENT_DMA_SIZE;
+static unsigned long consistent_base = CONSISTENT_END - DEFAULT_CONSISTENT_DMA_SIZE;
 
 void __init init_consistent_dma_size(unsigned long size)
 {
@@ -268,10 +268,8 @@ static int __init consistent_init(void)
        unsigned long base = consistent_base;
        unsigned long num_ptes = (CONSISTENT_END - base) >> PMD_SHIFT;
 
-#ifndef CONFIG_ARM_DMA_USE_IOMMU
-       if (cpu_architecture() >= CPU_ARCH_ARMv6)
+       if (IS_ENABLED(CONFIG_CMA) && !IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU))
                return 0;
-#endif
 
        consistent_pte = kmalloc(num_ptes * sizeof(pte_t), GFP_KERNEL);
        if (!consistent_pte) {
@@ -323,7 +321,7 @@ static struct arm_vmregion_head coherent_head = {
        .vm_list        = LIST_HEAD_INIT(coherent_head.vm_list),
 };
 
-size_t coherent_pool_size = DEFAULT_CONSISTENT_DMA_SIZE / 8;
+static size_t coherent_pool_size = DEFAULT_CONSISTENT_DMA_SIZE / 8;
 
 static int __init early_coherent_pool(char *p)
 {
@@ -342,7 +340,7 @@ static int __init coherent_init(void)
        struct page *page;
        void *ptr;
 
-       if (cpu_architecture() < CPU_ARCH_ARMv6)
+       if (!IS_ENABLED(CONFIG_CMA))
                return 0;
 
        ptr = __alloc_from_contiguous(NULL, size, prot, &page);
@@ -704,7 +702,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
 
        if (arch_is_coherent() || nommu())
                addr = __alloc_simple_buffer(dev, size, gfp, &page);
-       else if (cpu_architecture() < CPU_ARCH_ARMv6)
+       else if (!IS_ENABLED(CONFIG_CMA))
                addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller);
        else if (gfp & GFP_ATOMIC)
                addr = __alloc_from_pool(dev, size, &page, caller);
@@ -773,7 +771,7 @@ void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
 
        if (arch_is_coherent() || nommu()) {
                __dma_free_buffer(page, size);
-       } else if (cpu_architecture() < CPU_ARCH_ARMv6) {
+       } else if (!IS_ENABLED(CONFIG_CMA)) {
                __dma_free_remap(cpu_addr, size);
                __dma_free_buffer(page, size);
        } else {
@@ -1069,7 +1067,7 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, gfp_t
                return NULL;
 
        while (count) {
-               int j, order = __ffs(count);
+               int j, order = __fls(count);
 
                pages[i] = alloc_pages(gfp | __GFP_NOWARN, order);
                while (!pages[i] && order)
@@ -1093,7 +1091,7 @@ error:
        while (--i)
                if (pages[i])
                        __free_pages(pages[i], 0);
-       if (array_size < PAGE_SIZE)
+       if (array_size <= PAGE_SIZE)
                kfree(pages);
        else
                vfree(pages);
@@ -1108,7 +1106,7 @@ static int __iommu_free_buffer(struct device *dev, struct page **pages, size_t s
        for (i = 0; i < count; i++)
                if (pages[i])
                        __free_pages(pages[i], 0);
-       if (array_size < PAGE_SIZE)
+       if (array_size <= PAGE_SIZE)
                kfree(pages);
        else
                vfree(pages);
index c21d06c7dd7ec0ae4bdf036b22b74983aa2e5c6f..f54d59219764bc314b1cd10c587ea76f87fd70a9 100644 (file)
@@ -212,7 +212,7 @@ EXPORT_SYMBOL(arm_dma_zone_size);
  * allocations.  This must be the smallest DMA mask in the system,
  * so a successful GFP_DMA allocation will always satisfy this.
  */
-u32 arm_dma_limit;
+phys_addr_t arm_dma_limit;
 
 static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
        unsigned long dma_size)
index 93dc0c17cdcbddf0f5294be8ea6c022aa693ca1f..2e8a1efdf7b85f3443294b396ec5673496dd635d 100644 (file)
@@ -62,9 +62,9 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page
 #endif
 
 #ifdef CONFIG_ZONE_DMA
-extern u32 arm_dma_limit;
+extern phys_addr_t arm_dma_limit;
 #else
-#define arm_dma_limit ((u32)~0)
+#define arm_dma_limit ((phys_addr_t)~0)
 #endif
 
 extern phys_addr_t arm_lowmem_limit;
index e5dad60b558b468315294b2c8b95c70193b6f74d..cf4528d5177448fb79cb5dc6689c873c2dbddb4a 100644 (file)
@@ -791,6 +791,79 @@ void __init iotable_init(struct map_desc *io_desc, int nr)
        }
 }
 
+#ifndef CONFIG_ARM_LPAE
+
+/*
+ * The Linux PMD is made of two consecutive section entries covering 2MB
+ * (see definition in include/asm/pgtable-2level.h).  However a call to
+ * create_mapping() may optimize static mappings by using individual
+ * 1MB section mappings.  This leaves the actual PMD potentially half
+ * initialized if the top or bottom section entry isn't used, leaving it
+ * open to problems if a subsequent ioremap() or vmalloc() tries to use
+ * the virtual space left free by that unused section entry.
+ *
+ * Let's avoid the issue by inserting dummy vm entries covering the unused
+ * PMD halves once the static mappings are in place.
+ */
+
+static void __init pmd_empty_section_gap(unsigned long addr)
+{
+       struct vm_struct *vm;
+
+       vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm));
+       vm->addr = (void *)addr;
+       vm->size = SECTION_SIZE;
+       vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
+       vm->caller = pmd_empty_section_gap;
+       vm_area_add_early(vm);
+}
+
+static void __init fill_pmd_gaps(void)
+{
+       struct vm_struct *vm;
+       unsigned long addr, next = 0;
+       pmd_t *pmd;
+
+       /* we're still single threaded hence no lock needed here */
+       for (vm = vmlist; vm; vm = vm->next) {
+               if (!(vm->flags & VM_ARM_STATIC_MAPPING))
+                       continue;
+               addr = (unsigned long)vm->addr;
+               if (addr < next)
+                       continue;
+
+               /*
+                * Check if this vm starts on an odd section boundary.
+                * If so and the first section entry for this PMD is free
+                * then we block the corresponding virtual address.
+                */
+               if ((addr & ~PMD_MASK) == SECTION_SIZE) {
+                       pmd = pmd_off_k(addr);
+                       if (pmd_none(*pmd))
+                               pmd_empty_section_gap(addr & PMD_MASK);
+               }
+
+               /*
+                * Then check if this vm ends on an odd section boundary.
+                * If so and the second section entry for this PMD is empty
+                * then we block the corresponding virtual address.
+                */
+               addr += vm->size;
+               if ((addr & ~PMD_MASK) == SECTION_SIZE) {
+                       pmd = pmd_off_k(addr) + 1;
+                       if (pmd_none(*pmd))
+                               pmd_empty_section_gap(addr);
+               }
+
+               /* no need to look at any vm entry until we hit the next PMD */
+               next = (addr + PMD_SIZE - 1) & PMD_MASK;
+       }
+}
+
+#else
+#define fill_pmd_gaps() do { } while (0)
+#endif
+
 static void * __initdata vmalloc_min =
        (void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET);
 
@@ -1072,6 +1145,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
         */
        if (mdesc->map_io)
                mdesc->map_io();
+       fill_pmd_gaps();
 
        /*
         * Finally flush the caches and tlb to ensure that we're in a
index 62135849f48b75de671a045125cd2a670cb5c59c..c641fb6850170be36e7e47846a0fe4562af4fc52 100644 (file)
@@ -762,6 +762,11 @@ b_epilogue:
                        update_on_xread(ctx);
                        emit(ARM_MOV_R(r_A, r_X), ctx);
                        break;
+               case BPF_S_ANC_ALU_XOR_X:
+                       /* A ^= X */
+                       update_on_xread(ctx);
+                       emit(ARM_EOR_R(r_A, r_A, r_X), ctx);
+                       break;
                case BPF_S_ANC_PROTOCOL:
                        /* A = ntohs(skb->protocol) */
                        ctx->seen |= SEEN_SKB;
index 99ae5e3f46d2d7b29a755be7935178626aa471fe..7fa2f7d3cb90a4329f2f844b6555a9d81a42f3a8 100644 (file)
@@ -68,6 +68,8 @@
 #define ARM_INST_CMP_R         0x01500000
 #define ARM_INST_CMP_I         0x03500000
 
+#define ARM_INST_EOR_R         0x00200000
+
 #define ARM_INST_LDRB_I                0x05d00000
 #define ARM_INST_LDRB_R                0x07d00000
 #define ARM_INST_LDRH_I                0x01d000b0
 #define ARM_CMP_R(rn, rm)      _AL3_R(ARM_INST_CMP, 0, rn, rm)
 #define ARM_CMP_I(rn, imm)     _AL3_I(ARM_INST_CMP, 0, rn, imm)
 
+#define ARM_EOR_R(rd, rn, rm)  _AL3_R(ARM_INST_EOR, rd, rn, rm)
+
 #define ARM_LDR_I(rt, rn, off) (ARM_INST_LDR_I | (rt) << 12 | (rn) << 16 \
                                 | (off))
 #define ARM_LDRB_I(rt, rn, off)        (ARM_INST_LDRB_I | (rt) << 12 | (rn) << 16 \
index 9129c9e7d532b2a08f2f464e489aeabad5f1badf..88726f4dbbfa60b774e942176056c39f0b31fc66 100644 (file)
@@ -50,6 +50,7 @@
 #include <linux/irq.h>
 #include <linux/clockchips.h>
 #include <linux/clk.h>
+#include <linux/err.h>
 
 #include <mach/hardware.h>
 #include <asm/mach/time.h>
@@ -201,8 +202,16 @@ static int __init epit_clockevent_init(struct clk *timer_clk)
        return 0;
 }
 
-void __init epit_timer_init(struct clk *timer_clk, void __iomem *base, int irq)
+void __init epit_timer_init(void __iomem *base, int irq)
 {
+       struct clk *timer_clk;
+
+       timer_clk = clk_get_sys("imx-epit.0", NULL);
+       if (IS_ERR(timer_clk)) {
+               pr_err("i.MX epit: unable to get clk\n");
+               return;
+       }
+
        clk_prepare_enable(timer_clk);
 
        timer_base = base;
index cf663d84e7c1d1397291e4667c526087c3aaca56..e429ca1b814a179522bdf2bc9bb9e514ac136efc 100644 (file)
@@ -54,8 +54,8 @@ extern void imx50_soc_init(void);
 extern void imx51_soc_init(void);
 extern void imx53_soc_init(void);
 extern void imx51_init_late(void);
-extern void epit_timer_init(struct clk *timer_clk, void __iomem *base, int irq);
-extern void mxc_timer_init(struct clk *timer_clk, void __iomem *, int);
+extern void epit_timer_init(void __iomem *base, int irq);
+extern void mxc_timer_init(void __iomem *, int);
 extern int mx1_clocks_init(unsigned long fref);
 extern int mx21_clocks_init(unsigned long lref, unsigned long fref);
 extern int mx25_clocks_init(void);
index 7ded6f1f74bc63f0e701d4d2101df2e0ca003cf8..3c080a32dbf580d7669cc84adac7e5c510d76f6b 100644 (file)
@@ -23,6 +23,7 @@
 #ifndef __MACH_MX2_CAM_H_
 #define __MACH_MX2_CAM_H_
 
+#define MX2_CAMERA_SWAP16              (1 << 0)
 #define MX2_CAMERA_EXT_VSYNC           (1 << 1)
 #define MX2_CAMERA_CCIR                        (1 << 2)
 #define MX2_CAMERA_CCIR_INTERLACE      (1 << 3)
@@ -30,6 +31,7 @@
 #define MX2_CAMERA_GATED_CLOCK         (1 << 5)
 #define MX2_CAMERA_INV_DATA            (1 << 6)
 #define MX2_CAMERA_PCLK_SAMPLE_RISING  (1 << 7)
+#define MX2_CAMERA_PACK_DIR_MSB                (1 << 8)
 
 /**
  * struct mx2_camera_platform_data - optional platform data for mx2_camera
index 99f958ca6cb8c34ac650896418d1dc8e1a4cd247..00e8e659e66762104fc487cada9bbd8e81968df8 100644 (file)
@@ -58,6 +58,7 @@
 /* MX31, MX35, MX25, MX5 */
 #define V2_TCTL_WAITEN         (1 << 3) /* Wait enable mode */
 #define V2_TCTL_CLK_IPG                (1 << 6)
+#define V2_TCTL_CLK_PER                (2 << 6)
 #define V2_TCTL_FRR            (1 << 9)
 #define V2_IR                  0x0c
 #define V2_TSTAT               0x08
@@ -280,23 +281,22 @@ static int __init mxc_clockevent_init(struct clk *timer_clk)
        return 0;
 }
 
-void __init mxc_timer_init(struct clk *timer_clk, void __iomem *base, int irq)
+void __init mxc_timer_init(void __iomem *base, int irq)
 {
        uint32_t tctl_val;
+       struct clk *timer_clk;
        struct clk *timer_ipg_clk;
 
-       if (!timer_clk) {
-               timer_clk = clk_get_sys("imx-gpt.0", "per");
-               if (IS_ERR(timer_clk)) {
-                       pr_err("i.MX timer: unable to get clk\n");
-                       return;
-               }
-
-               timer_ipg_clk = clk_get_sys("imx-gpt.0", "ipg");
-               if (!IS_ERR(timer_ipg_clk))
-                       clk_prepare_enable(timer_ipg_clk);
+       timer_clk = clk_get_sys("imx-gpt.0", "per");
+       if (IS_ERR(timer_clk)) {
+               pr_err("i.MX timer: unable to get clk\n");
+               return;
        }
 
+       timer_ipg_clk = clk_get_sys("imx-gpt.0", "ipg");
+       if (!IS_ERR(timer_ipg_clk))
+               clk_prepare_enable(timer_ipg_clk);
+
        clk_prepare_enable(timer_clk);
 
        timer_base = base;
@@ -309,7 +309,7 @@ void __init mxc_timer_init(struct clk *timer_clk, void __iomem *base, int irq)
        __raw_writel(0, timer_base + MXC_TPRER); /* see datasheet note */
 
        if (timer_is_v2())
-               tctl_val = V2_TCTL_CLK_IPG | V2_TCTL_FRR | V2_TCTL_WAITEN | MXC_TCTL_TEN;
+               tctl_val = V2_TCTL_CLK_PER | V2_TCTL_FRR | V2_TCTL_WAITEN | MXC_TCTL_TEN;
        else
                tctl_val = MX1_2_TCTL_FRR | MX1_2_TCTL_CLK_PCLK1 | MXC_TCTL_TEN;
 
index 62ec5c452792706407922b5ea0173009f6247622..706b7e29397f5ebfc87a5de092eb4fd330a635ba 100644 (file)
@@ -461,6 +461,7 @@ static int clk_dbg_show_summary(struct seq_file *s, void *unused)
        struct clk *c;
        struct clk *pa;
 
+       mutex_lock(&clocks_mutex);
        seq_printf(s, "%-30s %-30s %-10s %s\n",
                "clock-name", "parent-name", "rate", "use-count");
 
@@ -469,6 +470,7 @@ static int clk_dbg_show_summary(struct seq_file *s, void *unused)
                seq_printf(s, "%-30s %-30s %-10lu %d\n",
                        c->name, pa ? pa->name : "none", c->rate, c->usecount);
        }
+       mutex_unlock(&clocks_mutex);
 
        return 0;
 }
index 297245dba66e4c286a73f08b90e39aa062739155..de6c0a08f4615d6fba3f07b316285f9a9b47095a 100644 (file)
@@ -252,8 +252,6 @@ IS_AM_SUBCLASS(335x, 0x335)
  * cpu_is_omap2423():  True for OMAP2423
  * cpu_is_omap2430():  True for OMAP2430
  * cpu_is_omap3430():  True for OMAP3430
- * cpu_is_omap3505():  True for OMAP3505
- * cpu_is_omap3517():  True for OMAP3517
  */
 #define GET_OMAP_TYPE  ((omap_rev() >> 16) & 0xffff)
 
@@ -277,8 +275,6 @@ IS_OMAP_TYPE(2422, 0x2422)
 IS_OMAP_TYPE(2423, 0x2423)
 IS_OMAP_TYPE(2430, 0x2430)
 IS_OMAP_TYPE(3430, 0x3430)
-IS_OMAP_TYPE(3505, 0x3517)
-IS_OMAP_TYPE(3517, 0x3517)
 
 #define cpu_is_omap310()               0
 #define cpu_is_omap730()               0
@@ -293,12 +289,6 @@ IS_OMAP_TYPE(3517, 0x3517)
 #define cpu_is_omap2422()              0
 #define cpu_is_omap2423()              0
 #define cpu_is_omap2430()              0
-#define cpu_is_omap3503()              0
-#define cpu_is_omap3515()              0
-#define cpu_is_omap3525()              0
-#define cpu_is_omap3530()              0
-#define cpu_is_omap3505()              0
-#define cpu_is_omap3517()              0
 #define cpu_is_omap3430()              0
 #define cpu_is_omap3630()              0
 
@@ -350,12 +340,6 @@ IS_OMAP_TYPE(3517, 0x3517)
 
 #if defined(CONFIG_ARCH_OMAP3)
 # undef cpu_is_omap3430
-# undef cpu_is_omap3503
-# undef cpu_is_omap3515
-# undef cpu_is_omap3525
-# undef cpu_is_omap3530
-# undef cpu_is_omap3505
-# undef cpu_is_omap3517
 # undef cpu_is_ti81xx
 # undef cpu_is_ti816x
 # undef cpu_is_ti814x
@@ -363,19 +347,6 @@ IS_OMAP_TYPE(3517, 0x3517)
 # undef cpu_is_am33xx
 # undef cpu_is_am335x
 # define cpu_is_omap3430()             is_omap3430()
-# define cpu_is_omap3503()             (cpu_is_omap3430() &&           \
-                                               (!omap3_has_iva()) &&   \
-                                               (!omap3_has_sgx()))
-# define cpu_is_omap3515()             (cpu_is_omap3430() &&           \
-                                               (!omap3_has_iva()) &&   \
-                                               (omap3_has_sgx()))
-# define cpu_is_omap3525()             (cpu_is_omap3430() &&           \
-                                               (!omap3_has_sgx()) &&   \
-                                               (omap3_has_iva()))
-# define cpu_is_omap3530()             (cpu_is_omap3430())
-# define cpu_is_omap3517()             is_omap3517()
-# define cpu_is_omap3505()             (cpu_is_omap3517() &&           \
-                                               !omap3_has_sgx())
 # undef cpu_is_omap3630
 # define cpu_is_omap3630()             is_omap363x()
 # define cpu_is_ti81xx()               is_ti81xx()
@@ -424,10 +395,6 @@ IS_OMAP_TYPE(3517, 0x3517)
 #define OMAP3630_REV_ES1_1     (OMAP363X_CLASS | (0x1 << 8))
 #define OMAP3630_REV_ES1_2     (OMAP363X_CLASS | (0x2 << 8))
 
-#define OMAP3517_CLASS         0x35170034
-#define OMAP3517_REV_ES1_0     OMAP3517_CLASS
-#define OMAP3517_REV_ES1_1     (OMAP3517_CLASS | (0x1 << 8))
-
 #define TI816X_CLASS           0x81600034
 #define TI8168_REV_ES1_0       TI816X_CLASS
 #define TI8168_REV_ES1_1       (TI816X_CLASS | (0x1 << 8))
index a7754a886d428af5599c00c4fefdcb4d2d058d12..5493bd95da5ee9f6988bb386a84151511a30b931 100644 (file)
@@ -172,8 +172,7 @@ struct omap_mmc_platform_data {
 extern void omap_mmc_notify_cover_event(struct device *dev, int slot,
                                        int is_closed);
 
-#if    defined(CONFIG_MMC_OMAP) || defined(CONFIG_MMC_OMAP_MODULE) || \
-       defined(CONFIG_MMC_OMAP_HS) || defined(CONFIG_MMC_OMAP_HS_MODULE)
+#if defined(CONFIG_MMC_OMAP) || defined(CONFIG_MMC_OMAP_MODULE)
 void omap1_init_mmc(struct omap_mmc_platform_data **mmc_data,
                                int nr_controllers);
 void omap242x_init_mmc(struct omap_mmc_platform_data **mmc_data);
@@ -185,7 +184,6 @@ static inline void omap1_init_mmc(struct omap_mmc_platform_data **mmc_data,
 static inline void omap242x_init_mmc(struct omap_mmc_platform_data **mmc_data)
 {
 }
-
 #endif
 
 extern int omap_msdi_reset(struct omap_hwmod *oh);
index 61fd837624a8532dc38e13a078d1d83941afc1d8..c1793786aea989de24668d31529f1d22eb1e7012 100644 (file)
@@ -582,7 +582,7 @@ void __init orion_spi_1_init(unsigned long mapbase)
  * Watchdog
  ****************************************************************************/
 static struct resource orion_wdt_resource =
-               DEFINE_RES_MEM(TIMER_VIRT_BASE, 0x28);
+               DEFINE_RES_MEM(TIMER_PHYS_BASE, 0x28);
 
 static struct platform_device orion_wdt_device = {
        .name           = "orion_wdt",
index 58b79809d20cd47a6a0579f91d49084d027f73e7..584c9bf8ed2d0feed3894fa1b672f26ab70365a5 100644 (file)
@@ -193,6 +193,7 @@ static const struct platform_device_id ssp_id_table[] = {
        { "pxa25x-nssp",        PXA25x_NSSP },
        { "pxa27x-ssp",         PXA27x_SSP },
        { "pxa168-ssp",         PXA168_SSP },
+       { "pxa910-ssp",         PXA910_SSP },
        { },
 };
 
index 33ecd0c9f0c3ecfdc63aaf44cab40a8deabd5f0c..b1e05ccff3acdb2ff4e5585ea1fb4947ebd18f28 100644 (file)
@@ -157,11 +157,13 @@ int s3c_adc_start(struct s3c_adc_client *client,
                return -EINVAL;
        }
 
-       if (client->is_ts && adc->ts_pend)
-               return -EAGAIN;
-
        spin_lock_irqsave(&adc->lock, flags);
 
+       if (client->is_ts && adc->ts_pend) {
+               spin_unlock_irqrestore(&adc->lock, flags);
+               return -EAGAIN;
+       }
+
        client->channel = channel;
        client->nr_samples = nr_samples;
 
index 1d214cb9d77050c921dc7e69f8b165c2eb89ee7e..6303974c2ee06af0e81a4f60437f0fac86d2f4f6 100644 (file)
@@ -126,7 +126,8 @@ struct platform_device s3c_device_adc = {
 #ifdef CONFIG_CPU_S3C2440
 static struct resource s3c_camif_resource[] = {
        [0] = DEFINE_RES_MEM(S3C2440_PA_CAMIF, S3C2440_SZ_CAMIF),
-       [1] = DEFINE_RES_IRQ(IRQ_CAM),
+       [1] = DEFINE_RES_IRQ(IRQ_S3C2440_CAM_C),
+       [2] = DEFINE_RES_IRQ(IRQ_S3C2440_CAM_P),
 };
 
 struct platform_device s3c_device_camif = {
index 7d048759b77244a1aee05507bc3813f9b55e0784..c0c70a895ca832e865f8c0b820e0fd9c85012cf2 100644 (file)
@@ -22,7 +22,7 @@
 #define S3C24XX_VA_WATCHDOG    S3C_VA_WATCHDOG
 
 #define S3C2412_VA_SSMC                S3C_ADDR_CPU(0x00000000)
-#define S3C2412_VA_EBI         S3C_ADDR_CPU(0x00010000)
+#define S3C2412_VA_EBI         S3C_ADDR_CPU(0x00100000)
 
 #define S3C2410_PA_UART                (0x50000000)
 #define S3C24XX_PA_UART                S3C2410_PA_UART
index f19aff19205c3d4090c6d239f574c6d788ed8af6..bc4db9b04e36e101de8885e6285e23baf60f9b89 100644 (file)
@@ -25,7 +25,7 @@ static inline void arch_wdt_reset(void)
 
        __raw_writel(0, S3C2410_WTCON);   /* disable watchdog, to be safe  */
 
-       if (s3c2410_wdtclk)
+       if (!IS_ERR(s3c2410_wdtclk))
                clk_enable(s3c2410_wdtclk);
 
        /* put initial values into count and data */
index 031a61899beffb07843c198a3237c385e5e2100a..48a1599110372ffb77fcf8e33e89449e4a40b39d 100644 (file)
@@ -37,6 +37,7 @@ struct clk clk_ext_xtal_mux = {
 struct clk clk_xusbxti = {
        .name           = "xusbxti",
        .id             = -1,
+       .rate           = 24000000,
 };
 
 struct clk s5p_clk_27m = {
index ab3de721c5db5756618d708159d4bf3a1865b6ee..75b05ad0fbad4d6e2b259ceaa59730bab611f71f 100644 (file)
@@ -4,7 +4,7 @@
  * Debugging macro include header for spear platform
  *
  * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index e14a3e4932f98ad264955bdf5a01969018eda1f8..2bc6b54460a80245db72a745e75de6237ae11405 100644 (file)
@@ -4,7 +4,7 @@
  * DMAC pl080 definitions for SPEAr platform
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 03ed8b585dcf7a5cf89d5b28304bf0d7e18b07be..88a7fbd247936e1f3f59817f7cd1a82e54b67b23 100644 (file)
@@ -4,7 +4,7 @@
  * SPEAr platform shared irq layer header file
  *
  * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 914d09dd50fdfa71c1e9a505b37987771e5070fe..ef95e5b780bd04d303748748cb0763e687ffedfb 100644 (file)
@@ -4,7 +4,7 @@
  * SPEAr platform specific timex definitions
  *
  * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 6dd455bafdfdaf90bf4f13f8a6cc8572a763135c..2ce6cb17a98b10aba011fcb99047a49a97da5d7d 100644 (file)
@@ -4,7 +4,7 @@
  * Serial port stubs for kernel decompress status messages
  *
  * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index a56a067717c112d972ac2f07e88cd444c34f5af3..12cf27f935f97372854cc1a0bd2c41251971499f 100644 (file)
@@ -4,7 +4,7 @@
  * DMAC pl080 definitions for SPEAr platform
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index ea0a61302b7ef56ff1650d2758dbe449d34deac9..4f990115b1bd7070c3092f3b768a7cce7e6f94a0 100644 (file)
@@ -4,7 +4,7 @@
  * SPEAr platform specific restart functions
  *
  * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 961fb7261243faf87baf47ce6ca420fc19b30d02..853e891e1184427212c3c8c16f3b1a060884e369 100644 (file)
@@ -4,7 +4,7 @@
  * SPEAr platform shared irq layer source file
  *
  * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index c140f9b41dce48fed05240ff531dc22e42f29095..d552a854dacccbbc8b5f8131bbdaa2203ffb74fa 100644 (file)
@@ -300,7 +300,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, struct thread_info *ti)
        if ((sysreg_read(SR) & MODE_MASK) == MODE_SUPERVISOR)
                syscall = 1;
 
-       if (ti->flags & _TIF_SIGPENDING))
+       if (ti->flags & _TIF_SIGPENDING)
                do_signal(regs, syscall);
 
        if (ti->flags & _TIF_NOTIFY_RESUME) {
index 2e3994b20169773f7c92c7369ad0cbc0944f54a8..62bcea7dcc6dff30bdaa5a4793ab336aef5ed688 100644 (file)
@@ -173,7 +173,7 @@ asmlinkage int bfin_clone(struct pt_regs *regs)
        unsigned long newsp;
 
 #ifdef __ARCH_SYNC_CORE_DCACHE
-       if (current->rt.nr_cpus_allowed == num_possible_cpus())
+       if (current->nr_cpus_allowed == num_possible_cpus())
                set_cpus_allowed_ptr(current, cpumask_of(smp_processor_id()));
 #endif
 
index a09230a08e02d201e4c2b214a0bd6cabc5a8c223..62ef17676b406274a76ddf5d3a51f7091a9ace97 100644 (file)
@@ -70,4 +70,7 @@ extern int is_in_rom(unsigned long);
 #define        VMALLOC_END     0xffffffff
 
 #define arch_enter_lazy_cpu_mode()    do {} while (0)
+
+#include <asm-generic/pgtable.h>
+
 #endif /* _H8300_PGTABLE_H */
index 356068cd0879bdf145bd68f12857b75cfd95ab4c..8725d1ad427263895a6e5d904c179e2c1440bbe0 100644 (file)
@@ -100,7 +100,6 @@ extern int __put_user_bad(void);
        break;                                                  \
     default:                                                   \
        __gu_err = __get_user_bad();                            \
-       __gu_val = 0;                                           \
        break;                                                  \
     }                                                          \
     (x) = __gu_val;                                            \
@@ -159,4 +158,6 @@ clear_user(void *to, unsigned long n)
        return 0;
 }
 
+#define __clear_user   clear_user
+
 #endif /* _H8300_UACCESS_H */
index 68d651081bd3f00dcbc71767e86b1614a98c9ab8..d0b1607f2711f577ec194e12127c458a6edd18a2 100644 (file)
@@ -35,6 +35,7 @@
 #include <asm/setup.h>
 #include <asm/irq.h>
 #include <asm/pgtable.h>
+#include <asm/sections.h>
 
 #if defined(__H8300H__)
 #define CPU "H8/300H"
@@ -54,7 +55,6 @@ unsigned long memory_end;
 
 char __initdata command_line[COMMAND_LINE_SIZE];
 
-extern int _stext, _etext, _sdata, _edata, _sbss, _ebss, _end;
 extern int _ramstart, _ramend;
 extern char _target_name[];
 extern void h8300_gpio_init(void);
@@ -119,9 +119,9 @@ void __init setup_arch(char **cmdline_p)
            memory_end = CONFIG_BLKDEV_RESERVE_ADDRESS; 
 #endif
 
-       init_mm.start_code = (unsigned long) &_stext;
-       init_mm.end_code = (unsigned long) &_etext;
-       init_mm.end_data = (unsigned long) &_edata;
+       init_mm.start_code = (unsigned long) _stext;
+       init_mm.end_code = (unsigned long) _etext;
+       init_mm.end_data = (unsigned long) _edata;
        init_mm.brk = (unsigned long) 0; 
 
 #if (defined(CONFIG_H8300H_SIM) || defined(CONFIG_H8S_SIM)) && defined(CONFIG_GDB_MAGICPRINT)
@@ -134,15 +134,12 @@ void __init setup_arch(char **cmdline_p)
        printk(KERN_INFO "H8/300 series support by Yoshinori Sato <ysato@users.sourceforge.jp>\n");
 
 #ifdef DEBUG
-       printk(KERN_DEBUG "KERNEL -> TEXT=0x%06x-0x%06x DATA=0x%06x-0x%06x "
-               "BSS=0x%06x-0x%06x\n", (int) &_stext, (int) &_etext,
-               (int) &_sdata, (int) &_edata,
-               (int) &_sbss, (int) &_ebss);
-       printk(KERN_DEBUG "KERNEL -> ROMFS=0x%06x-0x%06x MEM=0x%06x-0x%06x "
-               "STACK=0x%06x-0x%06x\n",
-              (int) &_ebss, (int) memory_start,
-               (int) memory_start, (int) memory_end,
-               (int) memory_end, (int) &_ramend);
+       printk(KERN_DEBUG "KERNEL -> TEXT=0x%p-0x%p DATA=0x%p-0x%p "
+               "BSS=0x%p-0x%p\n", _stext, _etext, _sdata, _edata, __bss_start,
+               __bss_stop);
+       printk(KERN_DEBUG "KERNEL -> ROMFS=0x%p-0x%06lx MEM=0x%06lx-0x%06lx "
+               "STACK=0x%06lx-0x%p\n", __bss_stop, memory_start, memory_start,
+               memory_end, memory_end, &_ramend);
 #endif
 
 #ifdef CONFIG_DEFAULT_CMDLINE
index fca10378701bb9c818b8c123fb4d3c05522cd0c6..5adaadaf92183c42703b9995e32e622dc926d958 100644 (file)
@@ -447,7 +447,7 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
  * want to handle. Thus you cannot kill init even with a SIGKILL even by
  * mistake.
  */
-statis void do_signal(struct pt_regs *regs)
+static void do_signal(struct pt_regs *regs)
 {
        siginfo_t info;
        int signr;
index 32263a138aa6ccf9a2a871a859254f2a18df03d9..e0f74191d55312fe4a8fa6548d95d34667604efe 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/profile.h>
 
 #include <asm/io.h>
+#include <asm/irq_regs.h>
 #include <asm/timer.h>
 
 #define        TICK_SIZE (tick_nsec / 1000)
index 973369c32a957cd4a1780e922301268ae0c30e19..981e25094b1a403d2fa6505b7a0733fd6cfac3ec 100644 (file)
@@ -36,6 +36,7 @@
 #include <asm/segment.h>
 #include <asm/page.h>
 #include <asm/pgtable.h>
+#include <asm/sections.h>
 
 #undef DEBUG
 
@@ -123,7 +124,6 @@ void __init mem_init(void)
        int codek = 0, datak = 0, initk = 0;
        /* DAVIDM look at setup memory map generically with reserved area */
        unsigned long tmp;
-       extern char _etext, _stext, _sdata, _ebss, __init_begin, __init_end;
        extern unsigned long  _ramend, _ramstart;
        unsigned long len = &_ramend - &_ramstart;
        unsigned long start_mem = memory_start; /* DAVIDM - these must start at end of kernel */
@@ -142,9 +142,9 @@ void __init mem_init(void)
        /* this will put all memory onto the freelists */
        totalram_pages = free_all_bootmem();
 
-       codek = (&_etext - &_stext) >> 10;
-       datak = (&_ebss - &_sdata) >> 10;
-       initk = (&__init_begin - &__init_end) >> 10;
+       codek = (_etext - _stext) >> 10;
+       datak = (__bss_stop - _sdata) >> 10;
+       initk = (__init_begin - __init_end) >> 10;
 
        tmp = nr_free_pages() << PAGE_SHIFT;
        printk(KERN_INFO "Memory available: %luk/%luk RAM, %luk/%luk ROM (%dk kernel code, %dk data)\n",
@@ -178,22 +178,21 @@ free_initmem(void)
 {
 #ifdef CONFIG_RAMKERNEL
        unsigned long addr;
-       extern char __init_begin, __init_end;
 /*
  *     the following code should be cool even if these sections
  *     are not page aligned.
  */
-       addr = PAGE_ALIGN((unsigned long)(&__init_begin));
+       addr = PAGE_ALIGN((unsigned long)(__init_begin));
        /* next to check that the page we free is not a partial page */
-       for (; addr + PAGE_SIZE < (unsigned long)(&__init_end); addr +=PAGE_SIZE) {
+       for (; addr + PAGE_SIZE < (unsigned long)__init_end; addr +=PAGE_SIZE) {
                ClearPageReserved(virt_to_page(addr));
                init_page_count(virt_to_page(addr));
                free_page(addr);
                totalram_pages++;
        }
        printk(KERN_INFO "Freeing unused kernel memory: %ldk freed (0x%x - 0x%x)\n",
-                       (addr - PAGE_ALIGN((long) &__init_begin)) >> 10,
-                       (int)(PAGE_ALIGN((unsigned long)(&__init_begin))),
+                       (addr - PAGE_ALIGN((long) __init_begin)) >> 10,
+                       (int)(PAGE_ALIGN((unsigned long)__init_begin)),
                        (int)(addr - PAGE_SIZE));
 #endif
 }
index 177716b1d61354a74a86ecfff0e08ba522660ec8..01729c2979ba2634f49f7c4403aab4e95652fead 100644 (file)
@@ -43,9 +43,9 @@ endif
 
 OBJCOPYFLAGS += -R .empty_zero_page
 
-suffix_$(CONFIG_KERNEL_GZIP)   = gz
-suffix_$(CONFIG_KERNEL_BZIP2)  = bz2
-suffix_$(CONFIG_KERNEL_LZMA)   = lzma
+suffix-$(CONFIG_KERNEL_GZIP)   = gz
+suffix-$(CONFIG_KERNEL_BZIP2)  = bz2
+suffix-$(CONFIG_KERNEL_LZMA)   = lzma
 
 $(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.$(suffix-y) FORCE
        $(call if_changed,ld)
index 370d60881977db4370f1686bee5d3b59a19eba69..28a09529f206915fd00633bf846dde48342b8b36 100644 (file)
@@ -28,7 +28,7 @@ static unsigned long free_mem_ptr;
 static unsigned long free_mem_end_ptr;
 
 #ifdef CONFIG_KERNEL_BZIP2
-static void *memset(void *s, int c, size_t n)
+void *memset(void *s, int c, size_t n)
 {
        char *ss = s;
 
@@ -39,6 +39,16 @@ static void *memset(void *s, int c, size_t n)
 #endif
 
 #ifdef CONFIG_KERNEL_GZIP
+void *memcpy(void *dest, const void *src, size_t n)
+{
+       char *d = dest;
+       const char *s = src;
+       while (n--)
+               *d++ = *s++;
+
+       return dest;
+}
+
 #define BOOT_HEAP_SIZE             0x10000
 #include "../../../../lib/decompress_inflate.c"
 #endif
index 527527584dd096dc2d7c2cc55bab64e83df0fee6..4313aa62b51b76746b4b800a592b3f55899c395e 100644 (file)
@@ -113,9 +113,6 @@ struct pt_regs {
 
 #define PTRACE_OLDSETOPTIONS   21
 
-/* options set using PTRACE_SETOPTIONS */
-#define PTRACE_O_TRACESYSGOOD  0x00000001
-
 #ifdef __KERNEL__
 
 #include <asm/m32r.h>          /* M32R_PSW_BSM, M32R_PSW_BPM */
index 4c03361537aa6b49f3e2ba144afd667cd6b75307..51f5e9aa49016fdce8112eb72083c0b167f21df8 100644 (file)
@@ -591,17 +591,16 @@ void user_enable_single_step(struct task_struct *child)
 
        if (access_process_vm(child, pc&~3, &insn, sizeof(insn), 0)
            != sizeof(insn))
-               return -EIO;
+               return;
 
        compute_next_pc(insn, pc, &next_pc, child);
        if (next_pc & 0x80000000)
-               return -EIO;
+               return;
 
        if (embed_debug_trap(child, next_pc))
-               return -EIO;
+               return;
 
        invalidate_cache();
-       return 0;
 }
 
 void user_disable_single_step(struct task_struct *child)
index f3fb2c029cfcab061fbbfb5697273067e57c38e8..d0f60b97bbc5d82b1855e73145ce0b809124200f 100644 (file)
@@ -286,7 +286,7 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
                        case -ERESTARTNOINTR:
                                regs->r0 = regs->orig_r0;
                                if (prev_insn(regs) < 0)
-                                       return -EFAULT;
+                                       return;
                }
        }
 
index cac5b6be572a8b83c8331ba5aa83bf84f3f83159..1471201282605485d05fe0b0afd024988509d55c 100644 (file)
@@ -7,6 +7,8 @@ config M68K
        select GENERIC_IRQ_SHOW
        select ARCH_HAVE_NMI_SAFE_CMPXCHG if RMW_INSNS
        select GENERIC_CPU_DEVICES
+       select GENERIC_STRNCPY_FROM_USER if MMU
+       select GENERIC_STRNLEN_USER if MMU
        select FPU if MMU
        select ARCH_USES_GETTIMEOFFSET if MMU && !COLDFIRE
 
index 1a922fad76f753aa66e8d212828935861892865a..eafa2539a8ee79dbcc65ea929ab082c767d93029 100644 (file)
@@ -1,2 +1,4 @@
 include include/asm-generic/Kbuild.asm
 header-y += cachectl.h
+
+generic-y += word-at-a-time.h
index d63b99ff7ff7aa8638bc3e44a2bba47923c38a6c..497c31c803ff55d3e57981fb590392b4f608d686 100644 (file)
@@ -86,7 +86,7 @@
 /*
  *     QSPI module.
  */
-#define        MCFQSPI_IOBASE          (MCF_IPSBAR + 0x340)
+#define        MCFQSPI_BASE            (MCF_IPSBAR + 0x340)
 #define        MCFQSPI_SIZE            0x40
 
 #define        MCFQSPI_CS0             147
index 9c80cd515b2069cab1a28b2b54a16f70d9a19028..472c891a4aeee4c1f1bc095b2e33b08840c5a6c2 100644 (file)
@@ -379,12 +379,15 @@ __constant_copy_to_user(void __user *to, const void *from, unsigned long n)
 #define copy_from_user(to, from, n)    __copy_from_user(to, from, n)
 #define copy_to_user(to, from, n)      __copy_to_user(to, from, n)
 
-long strncpy_from_user(char *dst, const char __user *src, long count);
-long strnlen_user(const char __user *src, long n);
+#define user_addr_max() \
+       (segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL)
+
+extern long strncpy_from_user(char *dst, const char __user *src, long count);
+extern __must_check long strlen_user(const char __user *str);
+extern __must_check long strnlen_user(const char __user *str, long n);
+
 unsigned long __clear_user(void __user *to, unsigned long n);
 
 #define clear_user     __clear_user
 
-#define strlen_user(str) strnlen_user(str, 32767)
-
 #endif /* _M68K_UACCESS_H */
index 8b4a2222e65877dd35eb736c8ca25592f8695ad0..1bc10e62b9affa45f8938f1f0150431971bffcde 100644 (file)
@@ -286,7 +286,7 @@ asmlinkage void syscall_trace(void)
        }
 }
 
-#ifdef CONFIG_COLDFIRE
+#if defined(CONFIG_COLDFIRE) || !defined(CONFIG_MMU)
 asmlinkage int syscall_trace_enter(void)
 {
        int ret = 0;
index d7deb7fc7eb5c667483d4604d7537cd95f2f59a6..707f0573ec6bfade21e43c7a91bd0b3896ed8d79 100644 (file)
@@ -85,7 +85,7 @@ void __init time_init(void)
        mach_sched_init(timer_interrupt);
 }
 
-#ifdef CONFIG_M68KCLASSIC
+#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
 
 u32 arch_gettimeoffset(void)
 {
@@ -108,4 +108,4 @@ static int __init rtc_init(void)
 
 module_init(rtc_init);
 
-#endif /* CONFIG_M68KCLASSIC */
+#endif /* CONFIG_ARCH_USES_GETTIMEOFFSET */
index 5664386338da851094d48191ac1f717aa5dfff69..5e97f2ee7c1197feaf1fb79ec07cd10566466026 100644 (file)
@@ -103,80 +103,6 @@ unsigned long __generic_copy_to_user(void __user *to, const void *from,
 }
 EXPORT_SYMBOL(__generic_copy_to_user);
 
-/*
- * Copy a null terminated string from userspace.
- */
-long strncpy_from_user(char *dst, const char __user *src, long count)
-{
-       long res;
-       char c;
-
-       if (count <= 0)
-               return count;
-
-       asm volatile ("\n"
-               "1:     "MOVES".b       (%2)+,%4\n"
-               "       move.b  %4,(%1)+\n"
-               "       jeq     2f\n"
-               "       subq.l  #1,%3\n"
-               "       jne     1b\n"
-               "2:     sub.l   %3,%0\n"
-               "3:\n"
-               "       .section .fixup,\"ax\"\n"
-               "       .even\n"
-               "10:    move.l  %5,%0\n"
-               "       jra     3b\n"
-               "       .previous\n"
-               "\n"
-               "       .section __ex_table,\"a\"\n"
-               "       .align  4\n"
-               "       .long   1b,10b\n"
-               "       .previous"
-               : "=d" (res), "+a" (dst), "+a" (src), "+r" (count), "=&d" (c)
-               : "i" (-EFAULT), "0" (count));
-
-       return res;
-}
-EXPORT_SYMBOL(strncpy_from_user);
-
-/*
- * Return the size of a string (including the ending 0)
- *
- * Return 0 on exception, a value greater than N if too long
- */
-long strnlen_user(const char __user *src, long n)
-{
-       char c;
-       long res;
-
-       asm volatile ("\n"
-               "1:     subq.l  #1,%1\n"
-               "       jmi     3f\n"
-               "2:     "MOVES".b       (%0)+,%2\n"
-               "       tst.b   %2\n"
-               "       jne     1b\n"
-               "       jra     4f\n"
-               "\n"
-               "3:     addq.l  #1,%0\n"
-               "4:     sub.l   %4,%0\n"
-               "5:\n"
-               "       .section .fixup,\"ax\"\n"
-               "       .even\n"
-               "20:    sub.l   %0,%0\n"
-               "       jra     5b\n"
-               "       .previous\n"
-               "\n"
-               "       .section __ex_table,\"a\"\n"
-               "       .align  4\n"
-               "       .long   2b,20b\n"
-               "       .previous\n"
-               : "=&a" (res), "+d" (n), "=&d" (c)
-               : "0" (src), "r" (src));
-
-       return res;
-}
-EXPORT_SYMBOL(strnlen_user);
-
 /*
  * Zero Userspace
  */
index c801c172b822354017151cb040e348db207f73b9..f4dc9b29560940485c28208bbcba493d0271202a 100644 (file)
@@ -53,6 +53,7 @@
 #endif
 
 static u32 m68328_tick_cnt;
+static irq_handler_t timer_interrupt;
 
 /***************************************************************************/
 
@@ -62,7 +63,7 @@ static irqreturn_t hw_tick(int irq, void *dummy)
        TSTAT &= 0;
 
        m68328_tick_cnt += TICKS_PER_JIFFY;
-       return arch_timer_interrupt(irq, dummy);
+       return timer_interrupt(irq, dummy);
 }
 
 /***************************************************************************/
@@ -99,7 +100,7 @@ static struct clocksource m68328_clk = {
 
 /***************************************************************************/
 
-void hw_timer_init(void)
+void hw_timer_init(irq_handler_t handler)
 {
        /* disable timer 1 */
        TCTL = 0;
@@ -115,6 +116,7 @@ void hw_timer_init(void)
        /* Enable timer 1 */
        TCTL |= TCTL_TEN;
        clocksource_register_hz(&m68328_clk, TICKS_PER_JIFFY*HZ);
+       timer_interrupt = handler;
 }
 
 /***************************************************************************/
index 255fc03913e9065cf3cb9474b70cc8abf002f165..9877cefad1e7640dd27622410c04f882acdb37e3 100644 (file)
@@ -35,6 +35,7 @@ extern void m360_cpm_reset(void);
 #define OSCILLATOR  (unsigned long int)33000000
 #endif
 
+static irq_handler_t timer_interrupt;
 unsigned long int system_clock;
 
 extern QUICC *pquicc;
@@ -52,7 +53,7 @@ static irqreturn_t hw_tick(int irq, void *dummy)
 
   pquicc->timer_ter1 = 0x0002; /* clear timer event */
 
-  return arch_timer_interrupt(irq, dummy);
+  return timer_interrupt(irq, dummy);
 }
 
 static struct irqaction m68360_timer_irq = {
@@ -61,7 +62,7 @@ static struct irqaction m68360_timer_irq = {
        .handler = hw_tick,
 };
 
-void hw_timer_init(void)
+void hw_timer_init(irq_handler_t handler)
 {
   unsigned char prescaler;
   unsigned short tgcr_save;
@@ -94,6 +95,8 @@ void hw_timer_init(void)
 
   pquicc->timer_ter1 = 0x0003; /* clear timer events */
 
+  timer_interrupt = handler;
+
   /* enable timer 1 interrupt in CIMR */
   setup_irq(CPMVEC_TIMER1, &m68360_timer_irq);
 
index 9f1260c5e2ad5d031b42b54447f141f247f808f4..44da406897e58b9b579d09cf4f69faeb7894725b 100644 (file)
@@ -42,4 +42,11 @@ unsigned long clk_get_rate(struct clk *clk)
        return MCF_CLK;
 }
 EXPORT_SYMBOL(clk_get_rate);
+
+struct clk *devm_clk_get(struct device *dev, const char *id)
+{
+       return NULL;
+}
+EXPORT_SYMBOL(devm_clk_get);
+
 /***************************************************************************/
index 09ab87ee6fef654eef0220ab05b1224492947efc..b3e10fdd389866659212f947971af764b98dcf3f 100644 (file)
@@ -288,6 +288,7 @@ config MIPS_MALTA
        select SYS_HAS_CPU_MIPS32_R1
        select SYS_HAS_CPU_MIPS32_R2
        select SYS_HAS_CPU_MIPS64_R1
+       select SYS_HAS_CPU_MIPS64_R2
        select SYS_HAS_CPU_NEVADA
        select SYS_HAS_CPU_RM7000
        select SYS_HAS_EARLY_PRINTK
@@ -1423,6 +1424,7 @@ config CPU_SB1
 config CPU_CAVIUM_OCTEON
        bool "Cavium Octeon processor"
        depends on SYS_HAS_CPU_CAVIUM_OCTEON
+       select ARCH_SPARSEMEM_ENABLE
        select CPU_HAS_PREFETCH
        select CPU_SUPPORTS_64BIT_KERNEL
        select SYS_SUPPORTS_SMP
index 6210b8d841098c5182a7ea986e6ac66c6a02371b..b311be45a7207028a7cf6df754a8b54c0f259938 100644 (file)
@@ -21,6 +21,7 @@ config BCM47XX_BCMA
        select BCMA
        select BCMA_HOST_SOC
        select BCMA_DRIVER_MIPS
+       select BCMA_HOST_PCI if PCI
        select BCMA_DRIVER_PCI_HOSTMODE if PCI
        default y
        help
index de4d917fd54d5ae99fa3ec69e55666a4fe3945cc..a551bab5ecb94b2aafce31787cea8e6bb31dbd10 100644 (file)
@@ -79,11 +79,11 @@ static int __init config_pcmcia_cs(unsigned int cs,
        return ret;
 }
 
-static const __initdata struct {
+static const struct {
        unsigned int    cs;
        unsigned int    base;
        unsigned int    size;
-} pcmcia_cs[3] = {
+} pcmcia_cs[3] __initconst = {
        {
                .cs     = MPI_CS_PCMCIA_COMMON,
                .base   = BCM_PCMCIA_COMMON_BASE_PA,
index f9e275a50d982be23ffda94a1e23d019c25569b5..2f4f6d5e05b66bdad381da0ddaff5b83d3897d51 100644 (file)
@@ -82,10 +82,6 @@ config CAVIUM_OCTEON_LOCK_L2_MEMCPY
        help
          Lock the kernel's implementation of memcpy() into L2.
 
-config ARCH_SPARSEMEM_ENABLE
-       def_bool y
-       select SPARSEMEM_STATIC
-
 config IOMMU_HELPER
        bool
 
index 4b93048044eb266457b4c9ac80a164c056e6eba9..ee1fb9f7f517c29e1ce6781699cbf90e75a314ce 100644 (file)
@@ -185,7 +185,6 @@ static void __cpuinit octeon_init_secondary(void)
        octeon_init_cvmcount();
 
        octeon_irq_setup_secondary();
-       raw_local_irq_enable();
 }
 
 /**
@@ -233,6 +232,7 @@ static void octeon_smp_finish(void)
 
        /* to generate the first CPU timer interrupt */
        write_c0_compare(read_c0_count() + mips_hpt_frequency / HZ);
+       local_irq_enable();
 }
 
 /**
index 2e1ad4c652b72cf9042524870a19631f9e0566e0..82ad35ce2b45d975705e90a90db8cf82b7189255 100644 (file)
@@ -17,7 +17,6 @@
 #include <linux/irqflags.h>
 #include <linux/types.h>
 #include <asm/barrier.h>
-#include <asm/bug.h>
 #include <asm/byteorder.h>             /* sigh ... */
 #include <asm/cpu-features.h>
 #include <asm/sgidefs.h>
index 285a41fa0b18dd0412a291805781f8d811ee82de..eee10dc07ac100defc6b0f00f6d8616da692e2c0 100644 (file)
@@ -8,6 +8,7 @@
 #ifndef __ASM_CMPXCHG_H
 #define __ASM_CMPXCHG_H
 
+#include <linux/bug.h>
 #include <linux/irqflags.h>
 #include <asm/war.h>
 
index f9fa2a479dd0e175ae7fc839d833b15695103f04..95e40c1e8ed114a3a95ad9cab9794e129ccbdca2 100644 (file)
@@ -94,6 +94,7 @@
 #define PRID_IMP_24KE          0x9600
 #define PRID_IMP_74K           0x9700
 #define PRID_IMP_1004K         0x9900
+#define PRID_IMP_M14KC         0x9c00
 
 /*
  * These are the PRID's for when 23:16 == PRID_COMP_SIBYTE
@@ -260,12 +261,12 @@ enum cpu_type_enum {
         */
        CPU_4KC, CPU_4KEC, CPU_4KSC, CPU_24K, CPU_34K, CPU_1004K, CPU_74K,
        CPU_ALCHEMY, CPU_PR4450, CPU_BMIPS32, CPU_BMIPS3300, CPU_BMIPS4350,
-       CPU_BMIPS4380, CPU_BMIPS5000, CPU_JZRISC,
+       CPU_BMIPS4380, CPU_BMIPS5000, CPU_JZRISC, CPU_M14KC,
 
        /*
         * MIPS64 class processors
         */
-       CPU_5KC, CPU_20KC, CPU_25KF, CPU_SB1, CPU_SB1A, CPU_LOONGSON2,
+       CPU_5KC, CPU_5KE, CPU_20KC, CPU_25KF, CPU_SB1, CPU_SB1A, CPU_LOONGSON2,
        CPU_CAVIUM_OCTEON, CPU_CAVIUM_OCTEON_PLUS, CPU_CAVIUM_OCTEON2,
        CPU_XLR, CPU_XLP,
 
@@ -288,7 +289,7 @@ enum cpu_type_enum {
 #define MIPS_CPU_ISA_M64R2     0x00000100
 
 #define MIPS_CPU_ISA_32BIT (MIPS_CPU_ISA_I | MIPS_CPU_ISA_II | \
-       MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 )
+       MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2)
 #define MIPS_CPU_ISA_64BIT (MIPS_CPU_ISA_III | MIPS_CPU_ISA_IV | \
        MIPS_CPU_ISA_V | MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)
 
index 86548da650e765f79db345f4d3964a5d0eb6c0fe..991b659e254803937cc99c9854d2c24f1153b0d3 100644 (file)
 
 #define GIC_VPE_EIC_SHADOW_SET_BASE    0x0100
 #define GIC_VPE_EIC_SS(intr) \
-       (GIC_EIC_SHADOW_SET_BASE + (4 * intr))
+       (GIC_VPE_EIC_SHADOW_SET_BASE + (4 * intr))
 
 #define GIC_VPE_EIC_VEC_BASE           0x0800
 #define GIC_VPE_EIC_VEC(intr) \
@@ -330,6 +330,17 @@ struct gic_intr_map {
 #define GIC_FLAG_TRANSPARENT   0x02
 };
 
+/*
+ * This is only used in EIC mode. This helps to figure out which
+ * shared interrupts we need to process when we get a vector interrupt.
+ */
+#define GIC_MAX_SHARED_INTR  0x5
+struct gic_shared_intr_map {
+       unsigned int num_shared_intr;
+       unsigned int intr_list[GIC_MAX_SHARED_INTR];
+       unsigned int local_intr_mask;
+};
+
 extern void gic_init(unsigned long gic_base_addr,
        unsigned long gic_addrspace_size, struct gic_intr_map *intrmap,
        unsigned int intrmap_size, unsigned int irqbase);
@@ -338,5 +349,7 @@ extern unsigned int gic_get_int(void);
 extern void gic_send_ipi(unsigned int intr);
 extern unsigned int plat_ipi_call_int_xlate(unsigned int);
 extern unsigned int plat_ipi_resched_int_xlate(unsigned int);
+extern void gic_bind_eic_interrupt(int irq, int set);
+extern unsigned int gic_get_timer_pending(void);
 
 #endif /* _ASM_GICREGS_H */
index 7ebfc392e58d1d5cb7c8f2a686e25a9041f853b8..ab84064283db2c50d847d888b32e42a4d316dd7d 100644 (file)
@@ -251,7 +251,7 @@ struct f_format {   /* FPU register format */
        unsigned int func : 6;
 };
 
-struct ma_format {     /* FPU multipy and add format (MIPS IV) */
+struct ma_format {     /* FPU multiply and add format (MIPS IV) */
        unsigned int opcode : 6;
        unsigned int fr : 5;
        unsigned int ft : 5;
@@ -324,7 +324,7 @@ struct f_format {   /* FPU register format */
        unsigned int opcode : 6;
 };
 
-struct ma_format {     /* FPU multipy and add format (MIPS IV) */
+struct ma_format {     /* FPU multiply and add format (MIPS IV) */
        unsigned int fmt : 2;
        unsigned int func : 4;
        unsigned int fd : 5;
index a58f22998a86507729e3d72d379ef6e23ddad019..29d9c23c20c72d87f847f28772666448cdd2711a 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/types.h>
 
 #include <asm/addrspace.h>
+#include <asm/bug.h>
 #include <asm/byteorder.h>
 #include <asm/cpu.h>
 #include <asm/cpu-features.h>
index fb698dc09bc9c39c60cf7c0d43992b424e239551..78dbb8a86da249d4bd4e6fa2db6b6c6b0c4bd4ed 100644 (file)
@@ -136,6 +136,7 @@ extern void free_irqno(unsigned int irq);
  * IE7.  Since R2 their number has to be read from the c0_intctl register.
  */
 #define CP0_LEGACY_COMPARE_IRQ 7
+#define CP0_LEGACY_PERFCNT_IRQ 7
 
 extern int cp0_compare_irq;
 extern int cp0_compare_irq_shift;
index 94d4faad29a1a4286c934d28e76175ca97eee935..fdcd78ca1b03d054f807ee0c64839c763194f855 100644 (file)
@@ -99,7 +99,7 @@
 #define CKCTL_6368_USBH_CLK_EN         (1 << 15)
 #define CKCTL_6368_DISABLE_GLESS_EN    (1 << 16)
 #define CKCTL_6368_NAND_CLK_EN         (1 << 17)
-#define CKCTL_6368_IPSEC_CLK_EN                (1 << 17)
+#define CKCTL_6368_IPSEC_CLK_EN                (1 << 18)
 
 #define CKCTL_6368_ALL_SAFE_EN         (CKCTL_6368_SWPKT_USB_EN |      \
                                        CKCTL_6368_SWPKT_SAR_EN |       \
index d11aa02a956a57ca41ff890dbe16bbdd0145db53..5447d9fc421941da85a2b0ce6bed2b865a4a8282 100644 (file)
 #define GIC_CPU_INT4           4 /* .                  */
 #define GIC_CPU_INT5           5 /* Core Interrupt 5   */
 
+/* MALTA GIC local interrupts */
+#define GIC_INT_TMR             (GIC_CPU_INT5)
+#define GIC_INT_PERFCTR         (GIC_CPU_INT5)
+
+/* GIC constants */
+/* Add 2 to convert non-eic hw int # to eic vector # */
+#define GIC_CPU_TO_VEC_OFFSET   (2)
+/* If we map an intr to pin X, GIC will actually generate vector X+1 */
+#define GIC_PIN_TO_VEC_OFFSET   (1)
+
 #define GIC_EXT_INTR(x)                x
 
 /* External Interrupts used for IPI */
index c9420aa97e3232dc8b96ccf2c960e6d0aa8f1f00..e71ff4c317f2d0bdd8df430b0f439286abb4559f 100644 (file)
@@ -48,7 +48,7 @@
 #define CP0_VPECONF0           $1, 2
 #define CP0_VPECONF1           $1, 3
 #define CP0_YQMASK             $1, 4
-#define CP0_VPESCHEDULE        $1, 5
+#define CP0_VPESCHEDULE                $1, 5
 #define CP0_VPESCHEFBK         $1, 6
 #define CP0_TCSTATUS           $2, 1
 #define CP0_TCBIND             $2, 2
index 5d33621b5658f9c46314f4fcc1d2d123eaaf83d5..4f8ddba8c360050703d6d760b5909e92944e8b23 100644 (file)
@@ -22,7 +22,7 @@ struct task_struct;
  * switch_to(n) should switch tasks to task nr n, first
  * checking that n isn't the current task, in which case it does nothing.
  */
-extern asmlinkage void *resume(void *last, void *next, void *next_ti);
+extern asmlinkage void *resume(void *last, void *next, void *next_ti, u32 __usedfpu);
 
 extern unsigned int ll_bit;
 extern struct task_struct *ll_task;
@@ -66,11 +66,13 @@ do {                                                                        \
 
 #define switch_to(prev, next, last)                                    \
 do {                                                                   \
+       u32 __usedfpu;                                                  \
        __mips_mt_fpaff_switch_to(prev);                                \
        if (cpu_has_dsp)                                                \
                __save_dsp(prev);                                       \
        __clear_software_ll_bit();                                      \
-       (last) = resume(prev, next, task_thread_info(next));            \
+       __usedfpu = test_and_clear_tsk_thread_flag(prev, TIF_USEDFPU);  \
+       (last) = resume(prev, next, task_thread_info(next), __usedfpu); \
 } while (0)
 
 #define finish_arch_switch(prev)                                       \
index e2eca7d1059800e22704b467ab4f94f955df27c0..ca97e0ecb64b5054195b7d271017fa2760b7d05a 100644 (file)
@@ -60,6 +60,8 @@ struct thread_info {
 register struct thread_info *__current_thread_info __asm__("$28");
 #define current_thread_info()  __current_thread_info
 
+#endif /* !__ASSEMBLY__ */
+
 /* thread information allocation */
 #if defined(CONFIG_PAGE_SIZE_4KB) && defined(CONFIG_32BIT)
 #define THREAD_SIZE_ORDER (1)
@@ -85,8 +87,6 @@ register struct thread_info *__current_thread_info __asm__("$28");
 
 #define STACK_WARN     (THREAD_SIZE / 8)
 
-#endif /* !__ASSEMBLY__ */
-
 #define PREEMPT_ACTIVE         0x10000000
 
 /*
index 6ae7ce4ac63eb9b6a65ecf8ae0a49093579ae597..f4630e1082ab676a96e052e504995e86716be012 100644 (file)
@@ -4,7 +4,7 @@
  * Copyright (C) xxxx  the Anonymous
  * Copyright (C) 1994 - 2006 Ralf Baechle
  * Copyright (C) 2003, 2004  Maciej W. Rozycki
- * Copyright (C) 2001, 2004  MIPS Inc.
+ * Copyright (C) 2001, 2004, 2011, 2012  MIPS Technologies, Inc.
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
@@ -199,6 +199,7 @@ void __init check_wait(void)
                cpu_wait = rm7k_wait_irqoff;
                break;
 
+       case CPU_M14KC:
        case CPU_24K:
        case CPU_34K:
        case CPU_1004K:
@@ -810,6 +811,10 @@ static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu)
                c->cputype = CPU_5KC;
                __cpu_name[cpu] = "MIPS 5Kc";
                break;
+       case PRID_IMP_5KE:
+               c->cputype = CPU_5KE;
+               __cpu_name[cpu] = "MIPS 5KE";
+               break;
        case PRID_IMP_20KC:
                c->cputype = CPU_20KC;
                __cpu_name[cpu] = "MIPS 20Kc";
@@ -831,6 +836,10 @@ static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu)
                c->cputype = CPU_74K;
                __cpu_name[cpu] = "MIPS 74Kc";
                break;
+       case PRID_IMP_M14KC:
+               c->cputype = CPU_M14KC;
+               __cpu_name[cpu] = "MIPS M14Kc";
+               break;
        case PRID_IMP_1004K:
                c->cputype = CPU_1004K;
                __cpu_name[cpu] = "MIPS 1004Kc";
index 57ba13edb03af10da20a9ce936645902d9f562c1..3fc1691110dc52f82e8ec1271054c1411c122658 100644 (file)
@@ -5,7 +5,7 @@
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
- * Copyright (C) 1996, 97, 98, 99, 2000, 01, 03, 04, 05 by Ralf Baechle
+ * Copyright (C) 1996, 97, 98, 99, 2000, 01, 03, 04, 05, 12 by Ralf Baechle
  * Copyright (C) 1999, 2000, 01 Silicon Graphics, Inc.
  */
 #include <linux/interrupt.h>
@@ -34,6 +34,12 @@ EXPORT_SYMBOL(memmove);
 
 EXPORT_SYMBOL(kernel_thread);
 
+/*
+ * Functions that operate on entire pages.  Mostly used by memory management.
+ */
+EXPORT_SYMBOL(clear_page);
+EXPORT_SYMBOL(copy_page);
+
 /*
  * Userspace access stuff.
  */
index ce89c806170846a1ea2bbcf027598fae11028648..0441f54b2a6acc9ab27a0d229c1465a8e0fa5975 100644 (file)
@@ -31,7 +31,7 @@
 
 /*
  * task_struct *resume(task_struct *prev, task_struct *next,
- *                     struct thread_info *next_ti)
+ *                     struct thread_info *next_ti, int usedfpu)
  */
        .align  7
        LEAF(resume)
index f29099b104c497e5cb4b4a3d368a801fc6c60d05..eb5e394a4650bbe9f0814e51bc00e1b8523c57f1 100644 (file)
@@ -162,11 +162,6 @@ static unsigned int counters_total_to_per_cpu(unsigned int counters)
        return counters >> vpe_shift();
 }
 
-static unsigned int counters_per_cpu_to_total(unsigned int counters)
-{
-       return counters << vpe_shift();
-}
-
 #else /* !CONFIG_MIPS_MT_SMP */
 #define vpe_id()       0
 
index 293898391e674bd0979f87918a019b3a377d575d..9c51be5a163a9249efa3bd74f12be62b64eba923 100644 (file)
@@ -43,7 +43,7 @@
 
 /*
  * task_struct *resume(task_struct *prev, task_struct *next,
- *                     struct thread_info *next_ti)
+ *                     struct thread_info *next_ti, int usedfpu)
  */
 LEAF(resume)
        mfc0    t1, CP0_STATUS
@@ -51,18 +51,9 @@ LEAF(resume)
        cpu_save_nonscratch a0
        sw      ra, THREAD_REG31(a0)
 
-       /*
-        * check if we need to save FPU registers
-        */
-       lw      t3, TASK_THREAD_INFO(a0)
-       lw      t0, TI_FLAGS(t3)
-       li      t1, _TIF_USEDFPU
-       and     t2, t0, t1
-       beqz    t2, 1f
-       nor     t1, zero, t1
+       beqz    a3, 1f
 
-       and     t0, t0, t1
-       sw      t0, TI_FLAGS(t3)
+       PTR_L   t3, TASK_THREAD_INFO(a0)
 
        /*
         * clear saved user stack CU1 bit
index 9414f935446968069ec4f22daed1ae19b0799fb7..42d2a3938420df28e64d1f016459529b13991b7e 100644 (file)
@@ -41,7 +41,7 @@
 
 /*
  * task_struct *resume(task_struct *prev, task_struct *next,
- *                     struct thread_info *next_ti)
+ *                     struct thread_info *next_ti, int usedfpu)
  */
        .align  5
        LEAF(resume)
        /*
         * check if we need to save FPU registers
         */
-       PTR_L   t3, TASK_THREAD_INFO(a0)
-       LONG_L  t0, TI_FLAGS(t3)
-       li      t1, _TIF_USEDFPU
-       and     t2, t0, t1
-       beqz    t2, 1f
-       nor     t1, zero, t1
 
-       and     t0, t0, t1
-       LONG_S  t0, TI_FLAGS(t3)
+       beqz    a3, 1f
 
+       PTR_L   t3, TASK_THREAD_INFO(a0)
        /*
         * clear saved user stack CU1 bit
         */
index 3046e2986006b448c884bbb7355a676cc63e6ad9..8e393b8443f7ae4aa8c4b8a99eb1f689e49957c1 100644 (file)
@@ -15,7 +15,6 @@
 #include <linux/smp.h>
 #include <linux/interrupt.h>
 #include <linux/spinlock.h>
-#include <linux/init.h>
 #include <linux/cpu.h>
 #include <linux/cpumask.h>
 #include <linux/reboot.h>
@@ -197,13 +196,6 @@ static void bmips_init_secondary(void)
 
        write_c0_brcm_action(ACTION_CLR_IPI(smp_processor_id(), 0));
 #endif
-
-       /* make sure there won't be a timer interrupt for a little while */
-       write_c0_compare(read_c0_count() + mips_hpt_frequency / HZ);
-
-       irq_enable_hazard();
-       set_c0_status(IE_SW0 | IE_SW1 | IE_IRQ1 | IE_IRQ5 | ST0_IE);
-       irq_enable_hazard();
 }
 
 /*
@@ -212,6 +204,13 @@ static void bmips_init_secondary(void)
 static void bmips_smp_finish(void)
 {
        pr_info("SMP: CPU%d is running\n", smp_processor_id());
+
+       /* make sure there won't be a timer interrupt for a little while */
+       write_c0_compare(read_c0_count() + mips_hpt_frequency / HZ);
+
+       irq_enable_hazard();
+       set_c0_status(IE_SW0 | IE_SW1 | IE_IRQ1 | IE_IRQ5 | ST0_IE);
+       irq_enable_hazard();
 }
 
 /*
index 48650c8180401aeb1ce1c3f2713471d9c6e45536..1268392f1d2786bc3abb91fdb9a88ad84fa85ec9 100644 (file)
@@ -122,13 +122,21 @@ asmlinkage __cpuinit void start_secondary(void)
 
        notify_cpu_starting(cpu);
 
-       mp_ops->smp_finish();
+       set_cpu_online(cpu, true);
+
        set_cpu_sibling_map(cpu);
 
        cpu_set(cpu, cpu_callin_map);
 
        synchronise_count_slave();
 
+       /*
+        * irq will be enabled in ->smp_finish(), enabling it too early
+        * is dangerous.
+        */
+       WARN_ON_ONCE(!irqs_disabled());
+       mp_ops->smp_finish();
+
        cpu_idle();
 }
 
@@ -196,8 +204,6 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
        while (!cpu_isset(cpu, cpu_callin_map))
                udelay(100);
 
-       set_cpu_online(cpu, true);
-
        return 0;
 }
 
index f5dd38f1d0152b49b13c971c59fde0cd218cb93e..15b5f3cfd20c48b9424dd5364d29cf1aeff77d4a 100644 (file)
@@ -322,7 +322,7 @@ int __init smtc_build_cpu_map(int start_cpu_slot)
 
 /*
  * Common setup before any secondaries are started
- * Make sure all CPU's are in a sensible state before we boot any of the
+ * Make sure all CPUs are in a sensible state before we boot any of the
  * secondaries.
  *
  * For MIPS MT "SMTC" operation, we set up all TCs, spread as evenly
@@ -340,12 +340,12 @@ static void smtc_tc_setup(int vpe, int tc, int cpu)
        /*
         * TCContext gets an offset from the base of the IPIQ array
         * to be used in low-level code to detect the presence of
-        * an active IPI queue
+        * an active IPI queue.
         */
        write_tc_c0_tccontext((sizeof(struct smtc_ipi_q) * cpu) << 16);
        /* Bind tc to vpe */
        write_tc_c0_tcbind(vpe);
-       /* In general, all TCs should have the same cpu_data indications */
+       /* In general, all TCs should have the same cpu_data indications. */
        memcpy(&cpu_data[cpu], &cpu_data[0], sizeof(struct cpuinfo_mips));
        /* For 34Kf, start with TC/CPU 0 as sole owner of single FPU context */
        if (cpu_data[0].cputype == CPU_34K ||
@@ -358,8 +358,8 @@ static void smtc_tc_setup(int vpe, int tc, int cpu)
 }
 
 /*
- * Tweak to get Count registes in as close a sync as possible.
- * Value seems good for 34K-class cores.
+ * Tweak to get Count registes in as close a sync as possible.  The
+ * value seems good for 34K-class cores.
  */
 
 #define CP0_SKEW 8
@@ -615,7 +615,6 @@ void __cpuinit smtc_boot_secondary(int cpu, struct task_struct *idle)
 
 void smtc_init_secondary(void)
 {
-       local_irq_enable();
 }
 
 void smtc_smp_finish(void)
@@ -631,6 +630,8 @@ void smtc_smp_finish(void)
        if (cpu > 0 && (cpu_data[cpu].vpe_id != cpu_data[cpu - 1].vpe_id))
                write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ);
 
+       local_irq_enable();
+
        printk("TC %d going on-line as CPU %d\n",
                cpu_data[smp_processor_id()].tc_id, smp_processor_id());
 }
index 99f913c8d7a6eb3e4db9ac2f7b8def493a4e5e5e..842d55e411fd396479b611ba0a1c0c2af2d4cb92 100644 (file)
@@ -111,7 +111,6 @@ void __cpuinit synchronise_count_master(void)
 void __cpuinit synchronise_count_slave(void)
 {
        int i;
-       unsigned long flags;
        unsigned int initcount;
        int ncpus;
 
@@ -123,8 +122,6 @@ void __cpuinit synchronise_count_slave(void)
        return;
 #endif
 
-       local_irq_save(flags);
-
        /*
         * Not every cpu is online at the time this gets called,
         * so we first wait for the master to say everyone is ready
@@ -154,7 +151,5 @@ void __cpuinit synchronise_count_slave(void)
        }
        /* Arrange for an interrupt in a short while */
        write_c0_compare(read_c0_count() + COUNTON);
-
-       local_irq_restore(flags);
 }
 #undef NR_LOOPS
index 2d0c2a277f525b5b89b89500a5153c9032ed4ce3..c3c29354370345ae74c25dce1e4e7e7da9263316 100644 (file)
@@ -132,6 +132,9 @@ static void show_backtrace(struct task_struct *task, const struct pt_regs *regs)
        unsigned long ra = regs->regs[31];
        unsigned long pc = regs->cp0_epc;
 
+       if (!task)
+               task = current;
+
        if (raw_show_trace || !__kernel_text_address(pc)) {
                show_raw_backtrace(sp);
                return;
@@ -1249,6 +1252,7 @@ static inline void parity_protection_init(void)
                break;
 
        case CPU_5KC:
+       case CPU_5KE:
                write_c0_ecc(0x80000000);
                back_to_back_c0_hazard();
                /* Set the PE bit (bit 31) in the c0_errctl register. */
@@ -1498,6 +1502,7 @@ extern void flush_tlb_handlers(void);
  * Timer interrupt
  */
 int cp0_compare_irq;
+EXPORT_SYMBOL_GPL(cp0_compare_irq);
 int cp0_compare_irq_shift;
 
 /*
@@ -1597,7 +1602,7 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu)
                        cp0_perfcount_irq = -1;
        } else {
                cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ;
-               cp0_compare_irq_shift = cp0_compare_irq;
+               cp0_compare_irq_shift = CP0_LEGACY_PERFCNT_IRQ;
                cp0_perfcount_irq = -1;
        }
 
index 924da5eb7031498ea93dc050a7492aa78f5bb0ec..df243a64f4305305564ec1e1e5b7f68c4c6e55ad 100644 (file)
@@ -1,5 +1,6 @@
 #include <asm/asm-offsets.h>
 #include <asm/page.h>
+#include <asm/thread_info.h>
 #include <asm-generic/vmlinux.lds.h>
 
 #undef mips
@@ -72,7 +73,7 @@ SECTIONS
        .data : {       /* Data */
                . = . + DATAOFFSET;             /* for CONFIG_MAPPED_KERNEL */
 
-               INIT_TASK_DATA(PAGE_SIZE)
+               INIT_TASK_DATA(THREAD_SIZE)
                NOSAVE_DATA
                CACHELINE_ALIGNED_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
                READ_MOSTLY_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
index 4aa20280613ea3954db6e7925cc2fead924b0b78..fd6203f14f1fbfb8e608606235f77f3dd3113272 100644 (file)
@@ -3,8 +3,8 @@
 #
 
 obj-y                          += cache.o dma-default.o extable.o fault.o \
-                                  gup.o init.o mmap.o page.o tlbex.o \
-                                  tlbex-fault.o uasm.o
+                                  gup.o init.o mmap.o page.o page-funcs.o \
+                                  tlbex.o tlbex-fault.o uasm.o
 
 obj-$(CONFIG_32BIT)            += ioremap.o pgtable-32.o
 obj-$(CONFIG_64BIT)            += pgtable-64.o
index 5109be96d98d099ec8509dd1de6a9048f5c4b82d..f092c265dc6360a89403ac62c184b7f4ab417437 100644 (file)
@@ -977,7 +977,7 @@ static void __cpuinit probe_pcache(void)
                        c->icache.linesz = 2 << lsize;
                else
                        c->icache.linesz = lsize;
-               c->icache.sets = 64 << ((config1 >> 22) & 7);
+               c->icache.sets = 32 << (((config1 >> 22) + 1) & 7);
                c->icache.ways = 1 + ((config1 >> 16) & 7);
 
                icache_size = c->icache.sets *
@@ -997,7 +997,7 @@ static void __cpuinit probe_pcache(void)
                        c->dcache.linesz = 2 << lsize;
                else
                        c->dcache.linesz= lsize;
-               c->dcache.sets = 64 << ((config1 >> 13) & 7);
+               c->dcache.sets = 32 << (((config1 >> 13) + 1) & 7);
                c->dcache.ways = 1 + ((config1 >> 7) & 7);
 
                dcache_size = c->dcache.sets *
@@ -1051,6 +1051,7 @@ static void __cpuinit probe_pcache(void)
        case CPU_R14000:
                break;
 
+       case CPU_M14KC:
        case CPU_24K:
        case CPU_34K:
        case CPU_74K:
diff --git a/arch/mips/mm/page-funcs.S b/arch/mips/mm/page-funcs.S
new file mode 100644 (file)
index 0000000..48a6b38
--- /dev/null
@@ -0,0 +1,50 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Micro-assembler generated clear_page/copy_page functions.
+ *
+ * Copyright (C) 2012  MIPS Technologies, Inc.
+ * Copyright (C) 2012  Ralf Baechle <ralf@linux-mips.org>
+ */
+#include <asm/asm.h>
+#include <asm/regdef.h>
+
+#ifdef CONFIG_SIBYTE_DMA_PAGEOPS
+#define cpu_clear_page_function_name   clear_page_cpu
+#define cpu_copy_page_function_name    copy_page_cpu
+#else
+#define cpu_clear_page_function_name   clear_page
+#define cpu_copy_page_function_name    copy_page
+#endif
+
+/*
+ * Maximum sizes:
+ *
+ * R4000 128 bytes S-cache:            0x058 bytes
+ * R4600 v1.7:                         0x05c bytes
+ * R4600 v2.0:                         0x060 bytes
+ * With prefetching, 16 word strides   0x120 bytes
+ */
+EXPORT(__clear_page_start)
+LEAF(cpu_clear_page_function_name)
+1:     j       1b              /* Dummy, will be replaced. */
+       .space 288
+END(cpu_clear_page_function_name)
+EXPORT(__clear_page_end)
+
+/*
+ * Maximum sizes:
+ *
+ * R4000 128 bytes S-cache:            0x11c bytes
+ * R4600 v1.7:                         0x080 bytes
+ * R4600 v2.0:                         0x07c bytes
+ * With prefetching, 16 word strides   0x540 bytes
+ */
+EXPORT(__copy_page_start)
+LEAF(cpu_copy_page_function_name)
+1:     j       1b              /* Dummy, will be replaced. */
+       .space 1344
+END(cpu_copy_page_function_name)
+EXPORT(__copy_page_end)
index cc0b626858b3d0b6e34e5e510004083ce7bfba24..98f530e182163146ee4bba570edc2c3013236f31 100644 (file)
@@ -6,6 +6,7 @@
  * Copyright (C) 2003, 04, 05 Ralf Baechle (ralf@linux-mips.org)
  * Copyright (C) 2007  Maciej W. Rozycki
  * Copyright (C) 2008  Thiemo Seufer
+ * Copyright (C) 2012  MIPS Technologies, Inc.
  */
 #include <linux/init.h>
 #include <linux/kernel.h>
@@ -71,45 +72,6 @@ static struct uasm_reloc __cpuinitdata relocs[5];
 #define cpu_is_r4600_v1_x()    ((read_c0_prid() & 0xfffffff0) == 0x00002010)
 #define cpu_is_r4600_v2_x()    ((read_c0_prid() & 0xfffffff0) == 0x00002020)
 
-/*
- * Maximum sizes:
- *
- * R4000 128 bytes S-cache:            0x058 bytes
- * R4600 v1.7:                         0x05c bytes
- * R4600 v2.0:                         0x060 bytes
- * With prefetching, 16 word strides   0x120 bytes
- */
-
-static u32 clear_page_array[0x120 / 4];
-
-#ifdef CONFIG_SIBYTE_DMA_PAGEOPS
-void clear_page_cpu(void *page) __attribute__((alias("clear_page_array")));
-#else
-void clear_page(void *page) __attribute__((alias("clear_page_array")));
-#endif
-
-EXPORT_SYMBOL(clear_page);
-
-/*
- * Maximum sizes:
- *
- * R4000 128 bytes S-cache:            0x11c bytes
- * R4600 v1.7:                         0x080 bytes
- * R4600 v2.0:                         0x07c bytes
- * With prefetching, 16 word strides   0x540 bytes
- */
-static u32 copy_page_array[0x540 / 4];
-
-#ifdef CONFIG_SIBYTE_DMA_PAGEOPS
-void
-copy_page_cpu(void *to, void *from) __attribute__((alias("copy_page_array")));
-#else
-void copy_page(void *to, void *from) __attribute__((alias("copy_page_array")));
-#endif
-
-EXPORT_SYMBOL(copy_page);
-
-
 static int pref_bias_clear_store __cpuinitdata;
 static int pref_bias_copy_load __cpuinitdata;
 static int pref_bias_copy_store __cpuinitdata;
@@ -282,10 +244,15 @@ static inline void __cpuinit build_clear_pref(u32 **buf, int off)
                }
 }
 
+extern u32 __clear_page_start;
+extern u32 __clear_page_end;
+extern u32 __copy_page_start;
+extern u32 __copy_page_end;
+
 void __cpuinit build_clear_page(void)
 {
        int off;
-       u32 *buf = (u32 *)&clear_page_array;
+       u32 *buf = &__clear_page_start;
        struct uasm_label *l = labels;
        struct uasm_reloc *r = relocs;
        int i;
@@ -356,17 +323,17 @@ void __cpuinit build_clear_page(void)
        uasm_i_jr(&buf, RA);
        uasm_i_nop(&buf);
 
-       BUG_ON(buf > clear_page_array + ARRAY_SIZE(clear_page_array));
+       BUG_ON(buf > &__clear_page_end);
 
        uasm_resolve_relocs(relocs, labels);
 
        pr_debug("Synthesized clear page handler (%u instructions).\n",
-                (u32)(buf - clear_page_array));
+                (u32)(buf - &__clear_page_start));
 
        pr_debug("\t.set push\n");
        pr_debug("\t.set noreorder\n");
-       for (i = 0; i < (buf - clear_page_array); i++)
-               pr_debug("\t.word 0x%08x\n", clear_page_array[i]);
+       for (i = 0; i < (buf - &__clear_page_start); i++)
+               pr_debug("\t.word 0x%08x\n", (&__clear_page_start)[i]);
        pr_debug("\t.set pop\n");
 }
 
@@ -427,7 +394,7 @@ static inline void build_copy_store_pref(u32 **buf, int off)
 void __cpuinit build_copy_page(void)
 {
        int off;
-       u32 *buf = (u32 *)&copy_page_array;
+       u32 *buf = &__copy_page_start;
        struct uasm_label *l = labels;
        struct uasm_reloc *r = relocs;
        int i;
@@ -595,21 +562,23 @@ void __cpuinit build_copy_page(void)
        uasm_i_jr(&buf, RA);
        uasm_i_nop(&buf);
 
-       BUG_ON(buf > copy_page_array + ARRAY_SIZE(copy_page_array));
+       BUG_ON(buf > &__copy_page_end);
 
        uasm_resolve_relocs(relocs, labels);
 
        pr_debug("Synthesized copy page handler (%u instructions).\n",
-                (u32)(buf - copy_page_array));
+                (u32)(buf - &__copy_page_start));
 
        pr_debug("\t.set push\n");
        pr_debug("\t.set noreorder\n");
-       for (i = 0; i < (buf - copy_page_array); i++)
-               pr_debug("\t.word 0x%08x\n", copy_page_array[i]);
+       for (i = 0; i < (buf - &__copy_page_start); i++)
+               pr_debug("\t.word 0x%08x\n", (&__copy_page_start)[i]);
        pr_debug("\t.set pop\n");
 }
 
 #ifdef CONFIG_SIBYTE_DMA_PAGEOPS
+extern void clear_page_cpu(void *page);
+extern void copy_page_cpu(void *to, void *from);
 
 /*
  * Pad descriptors to cacheline, since each is exclusively owned by a
index 0bc485b3cd606de49e62aff00c9fe611e3a90540..03eb0ef9158047b023ee87d8d5d0e45c67c1583f 100644 (file)
@@ -9,6 +9,7 @@
  * Copyright (C) 2005, 2007, 2008, 2009  Maciej W. Rozycki
  * Copyright (C) 2006  Ralf Baechle (ralf@linux-mips.org)
  * Copyright (C) 2008, 2009 Cavium Networks, Inc.
+ * Copyright (C) 2011  MIPS Technologies, Inc.
  *
  * ... and the days got worse and worse and now you see
  * I've gone completly out of my mind.
@@ -494,6 +495,7 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,
        case CPU_R14000:
        case CPU_4KC:
        case CPU_4KEC:
+       case CPU_M14KC:
        case CPU_SB1:
        case CPU_SB1A:
        case CPU_4KSC:
index bf80921f2f56c0b145e698fa2c673ff49c7c0636..284dea54faf5a9f629e9d2cab18121cc5db328c6 100644 (file)
@@ -241,8 +241,9 @@ void __init mips_pcibios_init(void)
                return;
        }
 
-       if (controller->io_resource->start < 0x00001000UL)      /* FIXME */
-               controller->io_resource->start = 0x00001000UL;
+       /* Change start address to avoid conflicts with ACPI and SMB devices */
+       if (controller->io_resource->start < 0x00002000UL)
+               controller->io_resource->start = 0x00002000UL;
 
        iomem_resource.end &= 0xfffffffffULL;                   /* 64 GB */
        ioport_resource.end = controller->io_resource->end;
@@ -253,7 +254,7 @@ void __init mips_pcibios_init(void)
 }
 
 /* Enable PCI 2.1 compatibility in PIIX4 */
-static void __init quirk_dlcsetup(struct pci_dev *dev)
+static void __devinit quirk_dlcsetup(struct pci_dev *dev)
 {
        u8 odlc, ndlc;
        (void) pci_read_config_byte(dev, 0x82, &odlc);
index b7f37d4982fab27b91213a83be7a178626b694cb..2e28f653f66d1b582b9d0ca89c389aaaa1b18ca8 100644 (file)
@@ -111,7 +111,7 @@ static void __init pci_clock_check(void)
        unsigned int __iomem *jmpr_p =
                (unsigned int *) ioremap(MALTA_JMPRS_REG, sizeof(unsigned int));
        int jmpr = (__raw_readl(jmpr_p) >> 2) & 0x07;
-       static const int pciclocks[] __initdata = {
+       static const int pciclocks[] __initconst = {
                33, 20, 25, 30, 12, 16, 37, 10
        };
        int pciclock = pciclocks[jmpr];
index acb677a1227cc043a364b8938e78d7bd911c64d3..b3df7c2aad1e144fe8be92584272b14aed53973f 100644 (file)
@@ -82,8 +82,10 @@ void __init prom_free_prom_memory(void)
 
 void xlp_mmu_init(void)
 {
+       /* enable extended TLB and Large Fixed TLB */
        write_c0_config6(read_c0_config6() | 0x24);
-       current_cpu_data.tlbsize = ((read_c0_config6() >> 16) & 0xffff) + 1;
+
+       /* set page mask of Fixed TLB in config7 */
        write_c0_config7(PM_DEFAULT_MASK >>
                (13 + (ffz(PM_DEFAULT_MASK >> 13) / 2)));
 }
@@ -100,6 +102,10 @@ void __init prom_init(void)
        nlm_common_ebase = read_c0_ebase() & (~((1 << 12) - 1));
 #ifdef CONFIG_SMP
        nlm_wakeup_secondary_cpus(0xffffffff);
+
+       /* update TLB size after waking up threads */
+       current_cpu_data.tlbsize = ((read_c0_config6() >> 16) & 0xffff) + 1;
+
        register_smp_ops(&nlm_smp_ops);
 #endif
 }
index d1f2d4c52d42d3a0e1e92f5605d81632eefc8aa6..b6e378211a2c940021fb2e7befb342b90b1b1777 100644 (file)
@@ -78,6 +78,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
 
        switch (current_cpu_type()) {
        case CPU_5KC:
+       case CPU_M14KC:
        case CPU_20KC:
        case CPU_24K:
        case CPU_25KF:
index baba3bcaa3c28100067a39d9e1a1132a5a8d02b4..4d80a856048d19261e3af077ff1ff5344b3b26d2 100644 (file)
@@ -322,6 +322,10 @@ static int __init mipsxx_init(void)
 
        op_model_mipsxx_ops.num_counters = counters;
        switch (current_cpu_type()) {
+       case CPU_M14KC:
+               op_model_mipsxx_ops.cpu_type = "mips/M14Kc";
+               break;
+
        case CPU_20KC:
                op_model_mipsxx_ops.cpu_type = "mips/20K";
                break;
index d5d4c018fb04c03f4ef6b1d26d05fd263ad4835e..0857ab8c3919750feb164b3649f60df502290312 100644 (file)
@@ -48,7 +48,7 @@ int pcibios_plat_dev_init(struct pci_dev *dev)
        return 0;
 }
 
-static void __init loongson2e_nec_fixup(struct pci_dev *pdev)
+static void __devinit loongson2e_nec_fixup(struct pci_dev *pdev)
 {
        unsigned int val;
 
@@ -60,7 +60,7 @@ static void __init loongson2e_nec_fixup(struct pci_dev *pdev)
        pci_write_config_dword(pdev, 0xe4, 1 << 5);
 }
 
-static void __init loongson2e_686b_func0_fixup(struct pci_dev *pdev)
+static void __devinit loongson2e_686b_func0_fixup(struct pci_dev *pdev)
 {
        unsigned char c;
 
@@ -135,7 +135,7 @@ static void __init loongson2e_686b_func0_fixup(struct pci_dev *pdev)
        printk(KERN_INFO"via686b fix: ISA bridge done\n");
 }
 
-static void __init loongson2e_686b_func1_fixup(struct pci_dev *pdev)
+static void __devinit loongson2e_686b_func1_fixup(struct pci_dev *pdev)
 {
        printk(KERN_INFO"via686b fix: IDE\n");
 
@@ -168,19 +168,19 @@ static void __init loongson2e_686b_func1_fixup(struct pci_dev *pdev)
        printk(KERN_INFO"via686b fix: IDE done\n");
 }
 
-static void __init loongson2e_686b_func2_fixup(struct pci_dev *pdev)
+static void __devinit loongson2e_686b_func2_fixup(struct pci_dev *pdev)
 {
        /* irq routing */
        pci_write_config_byte(pdev, PCI_INTERRUPT_LINE, 10);
 }
 
-static void __init loongson2e_686b_func3_fixup(struct pci_dev *pdev)
+static void __devinit loongson2e_686b_func3_fixup(struct pci_dev *pdev)
 {
        /* irq routing */
        pci_write_config_byte(pdev, PCI_INTERRUPT_LINE, 11);
 }
 
-static void __init loongson2e_686b_func5_fixup(struct pci_dev *pdev)
+static void __devinit loongson2e_686b_func5_fixup(struct pci_dev *pdev)
 {
        unsigned int val;
        unsigned char c;
index 4b9768d5d72948b200d7431b4754fb7c21a27bc9..a7b917dcf604bde1ee359b20fb203356c0e4a84e 100644 (file)
@@ -96,21 +96,21 @@ int pcibios_plat_dev_init(struct pci_dev *dev)
 }
 
 /* CS5536 SPEC. fixup */
-static void __init loongson_cs5536_isa_fixup(struct pci_dev *pdev)
+static void __devinit loongson_cs5536_isa_fixup(struct pci_dev *pdev)
 {
        /* the uart1 and uart2 interrupt in PIC is enabled as default */
        pci_write_config_dword(pdev, PCI_UART1_INT_REG, 1);
        pci_write_config_dword(pdev, PCI_UART2_INT_REG, 1);
 }
 
-static void __init loongson_cs5536_ide_fixup(struct pci_dev *pdev)
+static void __devinit loongson_cs5536_ide_fixup(struct pci_dev *pdev)
 {
        /* setting the mutex pin as IDE function */
        pci_write_config_dword(pdev, PCI_IDE_CFG_REG,
                               CS5536_IDE_FLASH_SIGNATURE);
 }
 
-static void __init loongson_cs5536_acc_fixup(struct pci_dev *pdev)
+static void __devinit loongson_cs5536_acc_fixup(struct pci_dev *pdev)
 {
        /* enable the AUDIO interrupt in PIC  */
        pci_write_config_dword(pdev, PCI_ACC_INT_REG, 1);
@@ -118,14 +118,14 @@ static void __init loongson_cs5536_acc_fixup(struct pci_dev *pdev)
        pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xc0);
 }
 
-static void __init loongson_cs5536_ohci_fixup(struct pci_dev *pdev)
+static void __devinit loongson_cs5536_ohci_fixup(struct pci_dev *pdev)
 {
        /* enable the OHCI interrupt in PIC */
        /* THE OHCI, EHCI, UDC, OTG are shared with interrupt in PIC */
        pci_write_config_dword(pdev, PCI_OHCI_INT_REG, 1);
 }
 
-static void __init loongson_cs5536_ehci_fixup(struct pci_dev *pdev)
+static void __devinit loongson_cs5536_ehci_fixup(struct pci_dev *pdev)
 {
        u32 hi, lo;
 
@@ -137,7 +137,7 @@ static void __init loongson_cs5536_ehci_fixup(struct pci_dev *pdev)
        pci_write_config_dword(pdev, PCI_EHCI_FLADJ_REG, 0x2000);
 }
 
-static void __init loongson_nec_fixup(struct pci_dev *pdev)
+static void __devinit loongson_nec_fixup(struct pci_dev *pdev)
 {
        unsigned int val;
 
index 0f48498bc231b074f1ec8b401e9660b29bf37e5c..70073c98ed320dc6f88f457028c8b9cd094e93ab 100644 (file)
@@ -49,10 +49,10 @@ int pcibios_plat_dev_init(struct pci_dev *dev)
        return 0;
 }
 
-static void __init malta_piix_func0_fixup(struct pci_dev *pdev)
+static void __devinit malta_piix_func0_fixup(struct pci_dev *pdev)
 {
        unsigned char reg_val;
-       static int piixirqmap[16] __initdata = {  /* PIIX PIRQC[A:D] irq mappings */
+       static int piixirqmap[16] __devinitdata = {  /* PIIX PIRQC[A:D] irq mappings */
                0,  0,  0,  3,
                4,  5,  6,  7,
                0,  9, 10, 11,
@@ -83,7 +83,7 @@ static void __init malta_piix_func0_fixup(struct pci_dev *pdev)
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_0,
         malta_piix_func0_fixup);
 
-static void __init malta_piix_func1_fixup(struct pci_dev *pdev)
+static void __devinit malta_piix_func1_fixup(struct pci_dev *pdev)
 {
        unsigned char reg_val;
 
index e08f49cb6875abd65d50b7cb1cee964f816d96b7..8e4f8288eca2e2fcfc17bd615a26a5e0a6a152de 100644 (file)
 
 #include <asm/vr41xx/mpc30x.h>
 
-static const int internal_func_irqs[] __initdata = {
+static const int internal_func_irqs[] __initconst = {
        VRC4173_CASCADE_IRQ,
        VRC4173_AC97_IRQ,
        VRC4173_USB_IRQ,
 };
 
-static const int irq_tab_mpc30x[] __initdata = {
+static const int irq_tab_mpc30x[] __initconst = {
  [12] = VRC4173_PCMCIA1_IRQ,
  [13] = VRC4173_PCMCIA2_IRQ,
  [29] = MQ200_IRQ,
index f0bb9146e6c038424281cd45bb5b3ebc9dab0815..d02900a72916869dc39a03d0dd2075a5f8e85d82 100644 (file)
@@ -15,7 +15,7 @@
  * Set the BCM1250, etc. PCI host bridge's TRDY timeout
  * to the finite max.
  */
-static void __init quirk_sb1250_pci(struct pci_dev *dev)
+static void __devinit quirk_sb1250_pci(struct pci_dev *dev)
 {
        pci_write_config_byte(dev, 0x40, 0xff);
 }
@@ -25,7 +25,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SIBYTE, PCI_DEVICE_ID_BCM1250_PCI,
 /*
  * The BCM1250, etc. PCI/HT bridge reports as a host bridge.
  */
-static void __init quirk_sb1250_ht(struct pci_dev *dev)
+static void __devinit quirk_sb1250_ht(struct pci_dev *dev)
 {
        dev->class = PCI_CLASS_BRIDGE_PCI << 8;
 }
@@ -35,7 +35,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SIBYTE, PCI_DEVICE_ID_BCM1250_HT,
 /*
  * Set the SP1011 HT/PCI bridge's TRDY timeout to the finite max.
  */
-static void __init quirk_sp1011(struct pci_dev *dev)
+static void __devinit quirk_sp1011(struct pci_dev *dev)
 {
        pci_write_config_byte(dev, 0x64, 0xff);
 }
index a1e7e6d80c8c718e9b532e5ccb8ff0e7bac9e164..bc13e29d2bb34d6f9a257322e222b943da533c1c 100644 (file)
@@ -495,7 +495,7 @@ irqreturn_t tx4927_pcierr_interrupt(int irq, void *dev_id)
 }
 
 #ifdef CONFIG_TOSHIBA_FPCIB0
-static void __init tx4927_quirk_slc90e66_bridge(struct pci_dev *dev)
+static void __devinit tx4927_quirk_slc90e66_bridge(struct pci_dev *dev)
 {
        struct tx4927_pcic_reg __iomem *pcicptr = pci_bus_to_pcicptr(dev->bus);
 
index 0fbe4c0c170a25f5af0bd0da083a14c47e82e5f7..fdc24440294c7028c6fdf7e2a655b896dbe60ef7 100644 (file)
@@ -212,7 +212,7 @@ static inline void pci_enable_swapping(struct pci_dev *dev)
        bridge->b_widget.w_tflush;      /* Flush */
 }
 
-static void __init pci_fixup_ioc3(struct pci_dev *d)
+static void __devinit pci_fixup_ioc3(struct pci_dev *d)
 {
        pci_disable_swapping(d);
 }
index ea453532a33c6dfc0eeb49659b2dc9036494713a..075d87acd12ac4158e090b7918250e958f0d8c05 100644 (file)
@@ -129,7 +129,7 @@ static int __devinit ltq_pci_startup(struct platform_device *pdev)
 
        /* setup reset gpio used by pci */
        reset_gpio = of_get_named_gpio(node, "gpio-reset", 0);
-       if (reset_gpio > 0)
+       if (gpio_is_valid(reset_gpio))
                devm_gpio_request(&pdev->dev, reset_gpio, "pci-reset");
 
        /* enable auto-switching between PCI and EBU */
@@ -192,7 +192,7 @@ static int __devinit ltq_pci_startup(struct platform_device *pdev)
        ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_IEN) | 0x10, LTQ_EBU_PCC_IEN);
 
        /* toggle reset pin */
-       if (reset_gpio > 0) {
+       if (gpio_is_valid(reset_gpio)) {
                __gpio_set_value(reset_gpio, 0);
                wmb();
                mdelay(1);
index 1644805a6730db188bf9f26245c947551e8a4b5b..172af1cd58672e137924e88dae7f37dcf82393be 100644 (file)
@@ -41,6 +41,7 @@
 #include <linux/irq.h>
 #include <linux/irqdesc.h>
 #include <linux/console.h>
+#include <linux/pci_regs.h>
 
 #include <asm/io.h>
 
@@ -156,35 +157,55 @@ struct pci_controller nlm_pci_controller = {
        .io_offset      = 0x00000000UL,
 };
 
+/*
+ * The top level PCIe links on the XLS PCIe controller appear as
+ * bridges. Given a device, this function finds which link it is
+ * on.
+ */
+static struct pci_dev *xls_get_pcie_link(const struct pci_dev *dev)
+{
+       struct pci_bus *bus, *p;
+
+       /* Find the bridge on bus 0 */
+       bus = dev->bus;
+       for (p = bus->parent; p && p->number != 0; p = p->parent)
+               bus = p;
+
+       return p ? bus->self : NULL;
+}
+
 static int get_irq_vector(const struct pci_dev *dev)
 {
+       struct pci_dev *lnk;
+
        if (!nlm_chip_is_xls())
-               return  PIC_PCIX_IRQ;   /* for XLR just one IRQ*/
+               return  PIC_PCIX_IRQ;   /* for XLR just one IRQ */
 
        /*
         * For XLS PCIe, there is an IRQ per Link, find out which
         * link the device is on to assign interrupts
-       */
-       if (dev->bus->self == NULL)
+        */
+       lnk = xls_get_pcie_link(dev);
+       if (lnk == NULL)
                return 0;
 
-       switch  (dev->bus->self->devfn) {
-       case 0x0:
+       switch  (PCI_SLOT(lnk->devfn)) {
+       case 0:
                return PIC_PCIE_LINK0_IRQ;
-       case 0x8:
+       case 1:
                return PIC_PCIE_LINK1_IRQ;
-       case 0x10:
+       case 2:
                if (nlm_chip_is_xls_b())
                        return PIC_PCIE_XLSB0_LINK2_IRQ;
                else
                        return PIC_PCIE_LINK2_IRQ;
-       case 0x18:
+       case 3:
                if (nlm_chip_is_xls_b())
                        return PIC_PCIE_XLSB0_LINK3_IRQ;
                else
                        return PIC_PCIE_LINK3_IRQ;
        }
-       WARN(1, "Unexpected devfn %d\n", dev->bus->self->devfn);
+       WARN(1, "Unexpected devfn %d\n", lnk->devfn);
        return 0;
 }
 
@@ -202,7 +223,27 @@ void arch_teardown_msi_irq(unsigned int irq)
 int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
 {
        struct msi_msg msg;
+       struct pci_dev *lnk;
        int irq, ret;
+       u16 val;
+
+       /* MSI not supported on XLR */
+       if (!nlm_chip_is_xls())
+               return 1;
+
+       /*
+        * Enable MSI on the XLS PCIe controller bridge which was disabled
+        * at enumeration, the bridge MSI capability is at 0x50
+        */
+       lnk = xls_get_pcie_link(dev);
+       if (lnk == NULL)
+               return 1;
+
+       pci_read_config_word(lnk, 0x50 + PCI_MSI_FLAGS, &val);
+       if ((val & PCI_MSI_FLAGS_ENABLE) == 0) {
+               val |= PCI_MSI_FLAGS_ENABLE;
+               pci_write_config_word(lnk, 0x50 + PCI_MSI_FLAGS, val);
+       }
 
        irq = get_irq_vector(dev);
        if (irq <= 0)
@@ -327,7 +368,7 @@ static int __init pcibios_init(void)
                }
        } else {
                /* XLR PCI controller ACK */
-               irq_set_handler_data(PIC_PCIE_XLSB0_LINK3_IRQ, xlr_pci_ack);
+               irq_set_handler_data(PIC_PCIX_IRQ, xlr_pci_ack);
        }
 
        return 0;
index b71fae231049dfa86d25f3e5fef2216d968faa17..5edab2bc6fc0c3314d5925d2197781a7f563b4df 100644 (file)
@@ -115,11 +115,11 @@ static void yos_send_ipi_mask(const struct cpumask *mask, unsigned int action)
  */
 static void __cpuinit yos_init_secondary(void)
 {
-       set_c0_status(ST0_CO | ST0_IE | ST0_IM);
 }
 
 static void __cpuinit yos_smp_finish(void)
 {
+       set_c0_status(ST0_CO | ST0_IM | ST0_IE);
 }
 
 /* Hook for after all CPUs are online */
index 0a170e0ffeaae4ea84e0d5bfe1480203e69524be..7773f3d956b0cdc914ab82cb7c2fd79664c558f8 100644 (file)
@@ -28,7 +28,7 @@
 
 #define CALLIOPE_ADDR(x)       (CALLIOPE_IO_BASE + (x))
 
-const struct register_map calliope_register_map __initdata = {
+const struct register_map calliope_register_map __initconst = {
        .eic_slow0_strt_add = {.phys = CALLIOPE_ADDR(0x800000)},
        .eic_cfg_bits = {.phys = CALLIOPE_ADDR(0x800038)},
        .eic_ready_status = {.phys = CALLIOPE_ADDR(0x80004c)},
index bbc0c122be5ee34c3ece4cb2fdee9ddf8efef384..da076db7b7ed2fd572d6429231b370e381a10e69 100644 (file)
@@ -28,7 +28,7 @@
 
 #define CRONUS_ADDR(x) (CRONUS_IO_BASE + (x))
 
-const struct register_map cronus_register_map __initdata = {
+const struct register_map cronus_register_map __initconst = {
        .eic_slow0_strt_add = {.phys = CRONUS_ADDR(0x000000)},
        .eic_cfg_bits = {.phys = CRONUS_ADDR(0x000038)},
        .eic_ready_status = {.phys = CRONUS_ADDR(0x00004C)},
index 91dda682752ce25ece90b800a9103bb33cd80c86..47683b370e748da98a21860ce04424e8f8459b36 100644 (file)
@@ -23,7 +23,7 @@
 #include <linux/init.h>
 #include <asm/mach-powertv/asic.h>
 
-const struct register_map gaia_register_map __initdata = {
+const struct register_map gaia_register_map __initconst = {
        .eic_slow0_strt_add = {.phys = GAIA_IO_BASE + 0x000000},
        .eic_cfg_bits = {.phys = GAIA_IO_BASE + 0x000038},
        .eic_ready_status = {.phys = GAIA_IO_BASE + 0x00004C},
index 4a05bb096476a1ef7751f528c16d9ea9015827a9..6ff4b10f09dab4e4c3ba35242db83654315df0ab 100644 (file)
@@ -28,7 +28,7 @@
 
 #define ZEUS_ADDR(x)   (ZEUS_IO_BASE + (x))
 
-const struct register_map zeus_register_map __initdata = {
+const struct register_map zeus_register_map __initconst = {
        .eic_slow0_strt_add = {.phys = ZEUS_ADDR(0x000000)},
        .eic_cfg_bits = {.phys = ZEUS_ADDR(0x000038)},
        .eic_ready_status = {.phys = ZEUS_ADDR(0x00004c)},
index 682efb0c108d22576992c9f554653e9329916d3b..64eb71b1528032ea4af4f084fbbe9cf6c8d91216 100644 (file)
@@ -269,7 +269,7 @@ txx9_i8259_irq_setup(int irq)
        return err;
 }
 
-static void __init quirk_slc90e66_bridge(struct pci_dev *dev)
+static void __devinit quirk_slc90e66_bridge(struct pci_dev *dev)
 {
        int irq;        /* PCI/ISA Bridge interrupt */
        u8 reg_64;
index 55b79ef10028cbc3253685acbf0979edd6784d08..44251b974f1d96d4931e4704d9433163eb070962 100644 (file)
@@ -81,9 +81,6 @@ struct pt_regs {
 #define PTRACE_GETFPREGS          14
 #define PTRACE_SETFPREGS          15
 
-/* options set using PTRACE_SETOPTIONS */
-#define PTRACE_O_TRACESYSGOOD     0x00000001
-
 #ifdef __KERNEL__
 
 #define user_mode(regs)                        (((regs)->epsw & EPSW_nSL) == EPSW_nSL)
index 08251d6f6b11015b5d8d265cba4fe88fc70e0f1c..ac519bbd42ffe32693a02d3f88ac5dbd21d426ff 100644 (file)
@@ -123,7 +123,7 @@ static inline unsigned long current_stack_pointer(void)
 }
 
 #ifndef CONFIG_KGDB
-void arch_release_thread_info(struct thread_info *ti)
+void arch_release_thread_info(struct thread_info *ti);
 #endif
 #define get_thread_info(ti)    get_task_struct((ti)->task)
 #define put_thread_info(ti)    put_task_struct((ti)->task)
index bd4e90dfe6c26d37c366abec8275a1d0d1e7b774..f8e66425cbf826f2f62aef4756869442a9ed5fdc 100644 (file)
@@ -11,7 +11,6 @@
 #ifndef _ASM_TIMEX_H
 #define _ASM_TIMEX_H
 
-#include <asm/hardirq.h>
 #include <unit/timex.h>
 
 #define TICK_SIZE (tick_nsec / 1000)
@@ -30,16 +29,6 @@ static inline cycles_t get_cycles(void)
 extern int init_clockevents(void);
 extern int init_clocksource(void);
 
-static inline void setup_jiffies_interrupt(int irq,
-                                          struct irqaction *action)
-{
-       u16 tmp;
-       setup_irq(irq, action);
-       set_intr_level(irq, NUM2GxICR_LEVEL(CONFIG_TIMER_IRQ_LEVEL));
-       GxICR(irq) |= GxICR_ENABLE | GxICR_DETECT | GxICR_REQUEST;
-       tmp = GxICR(irq);
-}
-
 #endif /* __KERNEL__ */
 
 #endif /* _ASM_TIMEX_H */
index 69cae0260786207d0c3b62fde6255f6b2f442995..ccce35e3e179150c87f5f94af2a2463333236a68 100644 (file)
@@ -70,6 +70,16 @@ static void event_handler(struct clock_event_device *dev)
 {
 }
 
+static inline void setup_jiffies_interrupt(int irq,
+                                          struct irqaction *action)
+{
+       u16 tmp;
+       setup_irq(irq, action);
+       set_intr_level(irq, NUM2GxICR_LEVEL(CONFIG_TIMER_IRQ_LEVEL));
+       GxICR(irq) |= GxICR_ENABLE | GxICR_DETECT | GxICR_REQUEST;
+       tmp = GxICR(irq);
+}
+
 int __init init_clockevents(void)
 {
        struct clock_event_device *cd;
index a5ac755dd69f4acbc8c6d213c47c285b8af41098..2df440105a80f78e111f4f863261cbb7abab10ae 100644 (file)
@@ -9,6 +9,8 @@
  * 2 of the Licence, or (at your option) any later version.
  */
 
+#include <linux/irqreturn.h>
+
 struct clocksource;
 struct clock_event_device;
 
index 2381df83bd0064287110896a2cfb0a2678a8a046..35932a8de8b8d299fd7aaebcf31d56eb7c6db6d3 100644 (file)
@@ -170,9 +170,9 @@ mn10300_cpupic_setaffinity(struct irq_data *d, const struct cpumask *mask,
        case SC1TXIRQ:
 #ifdef CONFIG_MN10300_TTYSM1_TIMER12
        case TM12IRQ:
-#elif CONFIG_MN10300_TTYSM1_TIMER9
+#elif defined(CONFIG_MN10300_TTYSM1_TIMER9)
        case TM9IRQ:
-#elif CONFIG_MN10300_TTYSM1_TIMER3
+#elif defined(CONFIG_MN10300_TTYSM1_TIMER3)
        case TM3IRQ:
 #endif /* CONFIG_MN10300_TTYSM1_TIMER12 */
 #endif /* CONFIG_MN10300_TTYSM1 */
index 6ab0bee2a54fd38efeccbaa683b4437f26406a34..4d584ae29ae1c1c2c53746ee3a6d1d9b78e7042a 100644 (file)
@@ -459,10 +459,11 @@ static int handle_signal(int sig,
        else
                ret = setup_frame(sig, ka, oldset, regs);
        if (ret)
-               return;
+               return ret;
 
        signal_delivered(sig, info, ka, regs,
-                                test_thread_flag(TIF_SINGLESTEP));
+                        test_thread_flag(TIF_SINGLESTEP));
+       return 0;
 }
 
 /*
index 94a9c6d53e1b890ea98d987628fad089d5fd012d..b900e5afa0aefae7969666fbb1cc9cf2e77d5994 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/kdebug.h>
 #include <linux/bug.h>
 #include <linux/irq.h>
+#include <linux/export.h>
 #include <asm/processor.h>
 #include <linux/uaccess.h>
 #include <asm/io.h>
index 159acb02cfd4c16dcd1e91f9268b8d9dc6e74463..e244ebe637e15436f643a63e28217d9714321645 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/string.h>
 #include <linux/pci.h>
 #include <linux/gfp.h>
+#include <linux/export.h>
 #include <asm/io.h>
 
 static unsigned long pci_sram_allocated = 0xbc000000;
index cc18fe7d8b90e2abc9061b430b7072386aeed0d8..c37f9832cf17d0a7cb5b8795766aa4c5feebd88e 100644 (file)
 #ifndef _ASM_UNIT_TIMEX_H
 #define _ASM_UNIT_TIMEX_H
 
-#ifndef __ASSEMBLY__
-#include <linux/irq.h>
-#endif /* __ASSEMBLY__ */
-
 #include <asm/timer-regs.h>
 #include <unit/clock.h>
 #include <asm/param.h>
index 43c246439413b8c5f70d9b50d79b733443e42882..53677694b16554af24b8c8262191673426ae193c 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/platform_device.h>
 
 #include <asm/io.h>
+#include <asm/irq.h>
 #include <asm/timex.h>
 #include <asm/processor.h>
 #include <asm/intctl-regs.h>
index 758af30d1a16aad5b74820431e5abda8cc4ed1df..4cefc224f448d98843fe44c7e67631e98fa37466 100644 (file)
 #ifndef _ASM_UNIT_TIMEX_H
 #define _ASM_UNIT_TIMEX_H
 
-#ifndef __ASSEMBLY__
-#include <linux/irq.h>
-#endif /* __ASSEMBLY__ */
-
 #include <asm/timer-regs.h>
 #include <unit/clock.h>
 #include <asm/param.h>
index e1becd6b757132bd5664e82c5a676250ad660c3a..bc4adfaf815c1c2276c2f0f18bd84924b10c12d7 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/init.h>
 #include <linux/pci.h>
 #include <asm/io.h>
+#include <asm/irq.h>
 #include <asm/setup.h>
 #include <asm/processor.h>
 #include <asm/intctl-regs.h>
index ddb7ed0107065e19132dc242cd5b42b361c80f21..42f32db75087cc36b2f089ee6cd7c1bd3629e35b 100644 (file)
 #ifndef _ASM_UNIT_TIMEX_H
 #define _ASM_UNIT_TIMEX_H
 
-#ifndef __ASSEMBLY__
-#include <linux/irq.h>
-#endif /* __ASSEMBLY__ */
-
 #include <asm/timer-regs.h>
 #include <unit/clock.h>
 #include <asm/param.h>
index dbc3850b1d0dd3ea3a32d94aee31e6f1af2496ec..5707f1a62341d799a75136337ebb13fbfd1cde63 100644 (file)
@@ -21,6 +21,7 @@ KBUILD_DEFCONFIG := default_defconfig
 
 NM             = sh $(srctree)/arch/parisc/nm
 CHECKFLAGS     += -D__hppa__=1
+LIBGCC         = $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
 
 MACHINE                := $(shell uname -m)
 ifeq ($(MACHINE),parisc*)
@@ -79,7 +80,7 @@ kernel-y                      := mm/ kernel/ math-emu/
 kernel-$(CONFIG_HPUX)          += hpux/
 
 core-y += $(addprefix arch/parisc/, $(kernel-y))
-libs-y += arch/parisc/lib/ `$(CC) -print-libgcc-file-name`
+libs-y += arch/parisc/lib/ $(LIBGCC)
 
 drivers-$(CONFIG_OPROFILE)             += arch/parisc/oprofile/
 
index 19a434f5505974e63f28a00d697dfd42b57f9345..4383707d9801eb7bfe2b1bb34be6bbea447be139 100644 (file)
@@ -1,3 +1,4 @@
 include include/asm-generic/Kbuild.asm
 
 header-y += pdc.h
+generic-y += word-at-a-time.h
index 72cfdb0cfdd157485e2acd4a27de5c7788b483c1..62a33338549c18d6bb32d0a08c7e0728808767fe 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef _PARISC_BUG_H
 #define _PARISC_BUG_H
 
+#include <linux/kernel.h>      /* for BUGFLAG_TAINT */
+
 /*
  * Tell the user there is some problem.
  * The offending file and line are encoded in the __bug_table section.
index c9aac24b02e267ab9d171af7d1087863e0ef0dc8..0554ab062bdc555bdd1d28fb655519b8a39fcbe5 100644 (file)
@@ -86,8 +86,8 @@ static inline bool arch_irqs_disabled(void)
 }
 
 #ifdef CONFIG_PPC_BOOK3E
-#define __hard_irq_enable()    asm volatile("wrteei 1" : : : "memory");
-#define __hard_irq_disable()   asm volatile("wrteei 0" : : : "memory");
+#define __hard_irq_enable()    asm volatile("wrteei 1" : : : "memory")
+#define __hard_irq_disable()   asm volatile("wrteei 0" : : : "memory")
 #else
 #define __hard_irq_enable()    __mtmsrd(local_paca->kernel_msr | MSR_EE, 1)
 #define __hard_irq_disable()   __mtmsrd(local_paca->kernel_msr, 1)
@@ -100,6 +100,14 @@ static inline void hard_irq_disable(void)
        get_paca()->irq_happened |= PACA_IRQ_HARD_DIS;
 }
 
+/* include/linux/interrupt.h needs hard_irq_disable to be a macro */
+#define hard_irq_disable       hard_irq_disable
+
+static inline bool lazy_irq_pending(void)
+{
+       return !!(get_paca()->irq_happened & ~PACA_IRQ_HARD_DIS);
+}
+
 /*
  * This is called by asynchronous interrupts to conditionally
  * re-enable hard interrupts when soft-disabled after having
@@ -117,6 +125,8 @@ static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
        return !regs->softe;
 }
 
+extern bool prep_irq_for_idle(void);
+
 #else /* CONFIG_PPC64 */
 
 #define SET_MSR_EE(x)  mtmsr(x)
index ed1718feb9d9c5e8ee2c55241663d31d99717e15..5971c85df1369780decc5dbc6feb0b10f9d23a05 100644 (file)
@@ -558,27 +558,54 @@ _GLOBAL(ret_from_except_lite)
        mtmsrd  r10,1             /* Update machine state */
 #endif /* CONFIG_PPC_BOOK3E */
 
-#ifdef CONFIG_PREEMPT
        clrrdi  r9,r1,THREAD_SHIFT      /* current_thread_info() */
-       li      r0,_TIF_NEED_RESCHED    /* bits to check */
        ld      r3,_MSR(r1)
        ld      r4,TI_FLAGS(r9)
-       /* Move MSR_PR bit in r3 to _TIF_SIGPENDING position in r0 */
-       rlwimi  r0,r3,32+TIF_SIGPENDING-MSR_PR_LG,_TIF_SIGPENDING
-       and.    r0,r4,r0        /* check NEED_RESCHED and maybe SIGPENDING */
-       bne     do_work
-
-#else /* !CONFIG_PREEMPT */
-       ld      r3,_MSR(r1)     /* Returning to user mode? */
        andi.   r3,r3,MSR_PR
-       beq     restore         /* if not, just restore regs and return */
+       beq     resume_kernel
 
        /* Check current_thread_info()->flags */
+       andi.   r0,r4,_TIF_USER_WORK_MASK
+       beq     restore
+
+       andi.   r0,r4,_TIF_NEED_RESCHED
+       beq     1f
+       bl      .restore_interrupts
+       bl      .schedule
+       b       .ret_from_except_lite
+
+1:     bl      .save_nvgprs
+       bl      .restore_interrupts
+       addi    r3,r1,STACK_FRAME_OVERHEAD
+       bl      .do_notify_resume
+       b       .ret_from_except
+
+resume_kernel:
+#ifdef CONFIG_PREEMPT
+       /* Check if we need to preempt */
+       andi.   r0,r4,_TIF_NEED_RESCHED
+       beq+    restore
+       /* Check that preempt_count() == 0 and interrupts are enabled */
+       lwz     r8,TI_PREEMPT(r9)
+       cmpwi   cr1,r8,0
+       ld      r0,SOFTE(r1)
+       cmpdi   r0,0
+       crandc  eq,cr1*4+eq,eq
+       bne     restore
+
+       /*
+        * Here we are preempting the current task. We want to make
+        * sure we are soft-disabled first
+        */
+       SOFT_DISABLE_INTS(r3,r4)
+1:     bl      .preempt_schedule_irq
+
+       /* Re-test flags and eventually loop */
        clrrdi  r9,r1,THREAD_SHIFT
        ld      r4,TI_FLAGS(r9)
-       andi.   r0,r4,_TIF_USER_WORK_MASK
-       bne     do_work
-#endif /* !CONFIG_PREEMPT */
+       andi.   r0,r4,_TIF_NEED_RESCHED
+       bne     1b
+#endif /* CONFIG_PREEMPT */
 
        .globl  fast_exc_return_irq
 fast_exc_return_irq:
@@ -759,50 +786,6 @@ restore_check_irq_replay:
 #endif /* CONFIG_PPC_BOOK3E */
 1:     b       .ret_from_except /* What else to do here ? */
  
-
-
-3:
-do_work:
-#ifdef CONFIG_PREEMPT
-       andi.   r0,r3,MSR_PR    /* Returning to user mode? */
-       bne     user_work
-       /* Check that preempt_count() == 0 and interrupts are enabled */
-       lwz     r8,TI_PREEMPT(r9)
-       cmpwi   cr1,r8,0
-       ld      r0,SOFTE(r1)
-       cmpdi   r0,0
-       crandc  eq,cr1*4+eq,eq
-       bne     restore
-
-       /*
-        * Here we are preempting the current task. We want to make
-        * sure we are soft-disabled first
-        */
-       SOFT_DISABLE_INTS(r3,r4)
-1:     bl      .preempt_schedule_irq
-
-       /* Re-test flags and eventually loop */
-       clrrdi  r9,r1,THREAD_SHIFT
-       ld      r4,TI_FLAGS(r9)
-       andi.   r0,r4,_TIF_NEED_RESCHED
-       bne     1b
-       b       restore
-
-user_work:
-#endif /* CONFIG_PREEMPT */
-
-       andi.   r0,r4,_TIF_NEED_RESCHED
-       beq     1f
-       bl      .restore_interrupts
-       bl      .schedule
-       b       .ret_from_except_lite
-
-1:     bl      .save_nvgprs
-       bl      .restore_interrupts
-       addi    r3,r1,STACK_FRAME_OVERHEAD
-       bl      .do_notify_resume
-       b       .ret_from_except
-
 unrecov_restore:
        addi    r3,r1,STACK_FRAME_OVERHEAD
        bl      .unrecoverable_exception
index 7835a5e1ea5fed3a33bbc7878b0c9e6c26796489..1f017bb7a7cebfc3278fecfd7abe8f3dfaada107 100644 (file)
@@ -229,7 +229,7 @@ notrace void arch_local_irq_restore(unsigned long en)
         */
        if (unlikely(irq_happened != PACA_IRQ_HARD_DIS))
                __hard_irq_disable();
-#ifdef CONFIG_TRACE_IRQFLAG
+#ifdef CONFIG_TRACE_IRQFLAGS
        else {
                /*
                 * We should already be hard disabled here. We had bugs
@@ -277,7 +277,7 @@ EXPORT_SYMBOL(arch_local_irq_restore);
  * NOTE: This is called with interrupts hard disabled but not marked
  * as such in paca->irq_happened, so we need to resync this.
  */
-void restore_interrupts(void)
+void notrace restore_interrupts(void)
 {
        if (irqs_disabled()) {
                local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
@@ -286,6 +286,52 @@ void restore_interrupts(void)
                __hard_irq_enable();
 }
 
+/*
+ * This is a helper to use when about to go into idle low-power
+ * when the latter has the side effect of re-enabling interrupts
+ * (such as calling H_CEDE under pHyp).
+ *
+ * You call this function with interrupts soft-disabled (this is
+ * already the case when ppc_md.power_save is called). The function
+ * will return whether to enter power save or just return.
+ *
+ * In the former case, it will have notified lockdep of interrupts
+ * being re-enabled and generally sanitized the lazy irq state,
+ * and in the latter case it will leave with interrupts hard
+ * disabled and marked as such, so the local_irq_enable() call
+ * in cpu_idle() will properly re-enable everything.
+ */
+bool prep_irq_for_idle(void)
+{
+       /*
+        * First we need to hard disable to ensure no interrupt
+        * occurs before we effectively enter the low power state
+        */
+       hard_irq_disable();
+
+       /*
+        * If anything happened while we were soft-disabled,
+        * we return now and do not enter the low power state.
+        */
+       if (lazy_irq_pending())
+               return false;
+
+       /* Tell lockdep we are about to re-enable */
+       trace_hardirqs_on();
+
+       /*
+        * Mark interrupts as soft-enabled and clear the
+        * PACA_IRQ_HARD_DIS from the pending mask since we
+        * are about to hard enable as well as a side effect
+        * of entering the low power state.
+        */
+       local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
+       local_paca->soft_enabled = 1;
+
+       /* Tell the caller to enter the low power state */
+       return true;
+}
+
 #endif /* CONFIG_PPC64 */
 
 int arch_show_interrupts(struct seq_file *p, int prec)
index 0b6d79617d7b08adb56c05286a167931cca6a3a9..2e3200ca485f7bab8aba23f4d02948f74eacee69 100644 (file)
@@ -176,8 +176,8 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
 
 static inline int entry_matches(struct ppc_plt_entry *entry, Elf32_Addr val)
 {
-       if (entry->jump[0] == 0x3d600000 + ((val + 0x8000) >> 16)
-           && entry->jump[1] == 0x396b0000 + (val & 0xffff))
+       if (entry->jump[0] == 0x3d800000 + ((val + 0x8000) >> 16)
+           && entry->jump[1] == 0x398c0000 + (val & 0xffff))
                return 1;
        return 0;
 }
@@ -204,10 +204,9 @@ static uint32_t do_plt_call(void *location,
                entry++;
        }
 
-       /* Stolen from Paul Mackerras as well... */
-       entry->jump[0] = 0x3d600000+((val+0x8000)>>16); /* lis r11,sym@ha */
-       entry->jump[1] = 0x396b0000 + (val&0xffff);     /* addi r11,r11,sym@l*/
-       entry->jump[2] = 0x7d6903a6;                    /* mtctr r11 */
+       entry->jump[0] = 0x3d800000+((val+0x8000)>>16); /* lis r12,sym@ha */
+       entry->jump[1] = 0x398c0000 + (val&0xffff);     /* addi r12,r12,sym@l*/
+       entry->jump[2] = 0x7d8903a6;                    /* mtctr r12 */
        entry->jump[3] = 0x4e800420;                    /* bctr */
 
        DEBUGP("Initialized plt for 0x%x at %p\n", val, entry);
index 1b488e5305c5f2d1cdfa889f3dfafead154b258c..0794a3017b1b53e65e4d1aa325b79711a9fb04b1 100644 (file)
@@ -1312,7 +1312,7 @@ static struct opal_secondary_data {
 
 extern char opal_secondary_entry;
 
-static void prom_query_opal(void)
+static void __init prom_query_opal(void)
 {
        long rc;
 
@@ -1436,7 +1436,7 @@ static void __init prom_opal_hold_cpus(void)
        prom_debug("prom_opal_hold_cpus: end...\n");
 }
 
-static void prom_opal_takeover(void)
+static void __init prom_opal_takeover(void)
 {
        struct opal_secondary_data *data = &RELOC(opal_secondary_data);
        struct opal_takeover_args *args = &data->args;
index 99a995c2a3f2496da4e2e48e807b7354488ff2c7..be171ee73bf8cd3bef83630fbc031ad4f29410e7 100644 (file)
@@ -475,6 +475,7 @@ void timer_interrupt(struct pt_regs * regs)
        struct pt_regs *old_regs;
        u64 *next_tb = &__get_cpu_var(decrementers_next_tb);
        struct clock_event_device *evt = &__get_cpu_var(decrementers);
+       u64 now;
 
        /* Ensure a positive value is written to the decrementer, or else
         * some CPUs will continue to take decrementer exceptions.
@@ -509,9 +510,16 @@ void timer_interrupt(struct pt_regs * regs)
                irq_work_run();
        }
 
-       *next_tb = ~(u64)0;
-       if (evt->event_handler)
-               evt->event_handler(evt);
+       now = get_tb_or_rtc();
+       if (now >= *next_tb) {
+               *next_tb = ~(u64)0;
+               if (evt->event_handler)
+                       evt->event_handler(evt);
+       } else {
+               now = *next_tb - now;
+               if (now <= DECREMENTER_MAX)
+                       set_dec((int)now);
+       }
 
 #ifdef CONFIG_PPC64
        /* collect purr register values often, for accurate calculations */
index c6af1d6238395947725a2e53ff0fbd6d6614b2e7..3abe1b86e58344060361f3efe933b8e119c415e3 100644 (file)
@@ -268,24 +268,45 @@ static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,
        return err;
 }
 
-static void kvmppc_update_vpa(struct kvm *kvm, struct kvmppc_vpa *vpap)
+static void kvmppc_update_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *vpap)
 {
+       struct kvm *kvm = vcpu->kvm;
        void *va;
        unsigned long nb;
+       unsigned long gpa;
 
-       vpap->update_pending = 0;
-       va = NULL;
-       if (vpap->next_gpa) {
-               va = kvmppc_pin_guest_page(kvm, vpap->next_gpa, &nb);
-               if (nb < vpap->len) {
-                       /*
-                        * If it's now too short, it must be that userspace
-                        * has changed the mappings underlying guest memory,
-                        * so unregister the region.
-                        */
+       /*
+        * We need to pin the page pointed to by vpap->next_gpa,
+        * but we can't call kvmppc_pin_guest_page under the lock
+        * as it does get_user_pages() and down_read().  So we
+        * have to drop the lock, pin the page, then get the lock
+        * again and check that a new area didn't get registered
+        * in the meantime.
+        */
+       for (;;) {
+               gpa = vpap->next_gpa;
+               spin_unlock(&vcpu->arch.vpa_update_lock);
+               va = NULL;
+               nb = 0;
+               if (gpa)
+                       va = kvmppc_pin_guest_page(kvm, vpap->next_gpa, &nb);
+               spin_lock(&vcpu->arch.vpa_update_lock);
+               if (gpa == vpap->next_gpa)
+                       break;
+               /* sigh... unpin that one and try again */
+               if (va)
                        kvmppc_unpin_guest_page(kvm, va);
-                       va = NULL;
-               }
+       }
+
+       vpap->update_pending = 0;
+       if (va && nb < vpap->len) {
+               /*
+                * If it's now too short, it must be that userspace
+                * has changed the mappings underlying guest memory,
+                * so unregister the region.
+                */
+               kvmppc_unpin_guest_page(kvm, va);
+               va = NULL;
        }
        if (vpap->pinned_addr)
                kvmppc_unpin_guest_page(kvm, vpap->pinned_addr);
@@ -296,20 +317,18 @@ static void kvmppc_update_vpa(struct kvm *kvm, struct kvmppc_vpa *vpap)
 
 static void kvmppc_update_vpas(struct kvm_vcpu *vcpu)
 {
-       struct kvm *kvm = vcpu->kvm;
-
        spin_lock(&vcpu->arch.vpa_update_lock);
        if (vcpu->arch.vpa.update_pending) {
-               kvmppc_update_vpa(kvm, &vcpu->arch.vpa);
+               kvmppc_update_vpa(vcpu, &vcpu->arch.vpa);
                init_vpa(vcpu, vcpu->arch.vpa.pinned_addr);
        }
        if (vcpu->arch.dtl.update_pending) {
-               kvmppc_update_vpa(kvm, &vcpu->arch.dtl);
+               kvmppc_update_vpa(vcpu, &vcpu->arch.dtl);
                vcpu->arch.dtl_ptr = vcpu->arch.dtl.pinned_addr;
                vcpu->arch.dtl_index = 0;
        }
        if (vcpu->arch.slb_shadow.update_pending)
-               kvmppc_update_vpa(kvm, &vcpu->arch.slb_shadow);
+               kvmppc_update_vpa(vcpu, &vcpu->arch.slb_shadow);
        spin_unlock(&vcpu->arch.vpa_update_lock);
 }
 
@@ -800,12 +819,39 @@ static int kvmppc_run_core(struct kvmppc_vcore *vc)
        struct kvm_vcpu *vcpu, *vcpu0, *vnext;
        long ret;
        u64 now;
-       int ptid, i;
+       int ptid, i, need_vpa_update;
 
        /* don't start if any threads have a signal pending */
-       list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
+       need_vpa_update = 0;
+       list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
                if (signal_pending(vcpu->arch.run_task))
                        return 0;
+               need_vpa_update |= vcpu->arch.vpa.update_pending |
+                       vcpu->arch.slb_shadow.update_pending |
+                       vcpu->arch.dtl.update_pending;
+       }
+
+       /*
+        * Initialize *vc, in particular vc->vcore_state, so we can
+        * drop the vcore lock if necessary.
+        */
+       vc->n_woken = 0;
+       vc->nap_count = 0;
+       vc->entry_exit_count = 0;
+       vc->vcore_state = VCORE_RUNNING;
+       vc->in_guest = 0;
+       vc->napping_threads = 0;
+
+       /*
+        * Updating any of the vpas requires calling kvmppc_pin_guest_page,
+        * which can't be called with any spinlocks held.
+        */
+       if (need_vpa_update) {
+               spin_unlock(&vc->lock);
+               list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
+                       kvmppc_update_vpas(vcpu);
+               spin_lock(&vc->lock);
+       }
 
        /*
         * Make sure we are running on thread 0, and that
@@ -838,20 +884,10 @@ static int kvmppc_run_core(struct kvmppc_vcore *vc)
                if (vcpu->arch.ceded)
                        vcpu->arch.ptid = ptid++;
 
-       vc->n_woken = 0;
-       vc->nap_count = 0;
-       vc->entry_exit_count = 0;
-       vc->vcore_state = VCORE_RUNNING;
        vc->stolen_tb += mftb() - vc->preempt_tb;
-       vc->in_guest = 0;
        vc->pcpu = smp_processor_id();
-       vc->napping_threads = 0;
        list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
                kvmppc_start_thread(vcpu);
-               if (vcpu->arch.vpa.update_pending ||
-                   vcpu->arch.slb_shadow.update_pending ||
-                   vcpu->arch.dtl.update_pending)
-                       kvmppc_update_vpas(vcpu);
                kvmppc_create_dtl_entry(vcpu, vc);
        }
        /* Grab any remaining hw threads so they can't go into the kernel */
index a84aafce2a129e311a0943be0975db3823b88402..a1044f43becd380cdc7216e082fbf267b97a3fae 100644 (file)
@@ -810,7 +810,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
        lwz     r3,VCORE_NAPPING_THREADS(r5)
        lwz     r4,VCPU_PTID(r9)
        li      r0,1
-       sldi    r0,r0,r4
+       sld     r0,r0,r4
        andc.   r3,r3,r0                /* no sense IPI'ing ourselves */
        beq     43f
        mulli   r4,r4,PACA_SIZE         /* get paca for thread 0 */
index 3ff9013d6e7914e59f2919f5ee5bf685ad7d7797..ee02b30878ed4bec733af6cbd9aa152eefe22d0f 100644 (file)
@@ -241,6 +241,7 @@ int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd)
        case H_PUT_TCE:
                return kvmppc_h_pr_put_tce(vcpu);
        case H_CEDE:
+               vcpu->arch.shared->msr |= MSR_EE;
                kvm_vcpu_block(vcpu);
                clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
                vcpu->stat.halt_wakeup++;
index b6edbb3b4a54f96aa46a58e603d927c874806449..1e95556dc692e3702f2df518dd4e08e7770ae52e 100644 (file)
@@ -635,11 +635,11 @@ static inline int __init read_usm_ranges(const u32 **usm)
  */
 static void __init parse_drconf_memory(struct device_node *memory)
 {
-       const u32 *dm, *usm;
+       const u32 *uninitialized_var(dm), *usm;
        unsigned int n, rc, ranges, is_kexec_kdump = 0;
        unsigned long lmb_size, base, size, sz;
        int nid;
-       struct assoc_arrays aa;
+       struct assoc_arrays aa = { .arrays = NULL };
 
        n = of_get_drconf_memory(memory, &dm);
        if (!n)
index 55ba3855a97f58093ec06f343cde1df2dffbb17b..7d3a3b5619a2b8f2528b61dc7077b4dd84c594d2 100644 (file)
@@ -105,6 +105,7 @@ sk_load_byte_msh_positive_offset:
        mr      r4, r_addr;                                     \
        li      r6, SIZE;                                       \
        bl      skb_copy_bits;                                  \
+       nop;                                                    \
        /* R3 = 0 on success */                                 \
        addi    r1, r1, BPF_PPC_SLOWPATH_FRAME;                 \
        ld      r0, 16(r1);                                     \
@@ -156,6 +157,7 @@ bpf_slow_path_byte_msh:
        mr      r4, r_addr;                                     \
        li      r5, SIZE;                                       \
        bl      bpf_internal_load_pointer_neg_helper;           \
+       nop;                                                    \
        /* R3 != 0 on success */                                \
        addi    r1, r1, BPF_PPC_SLOWPATH_FRAME;                 \
        ld      r0, 16(r1);                                     \
index efdacc829576582a0b4da52384f80ad62082cd4e..d17e98bc0c10d3ff0469cfa8af873f08d57abded 100644 (file)
@@ -42,11 +42,9 @@ static void cbe_power_save(void)
 {
        unsigned long ctrl, thread_switch_control;
 
-       /*
-        * We need to hard disable interrupts, the local_irq_enable() done by
-        * our caller upon return will hard re-enable.
-        */
-       hard_irq_disable();
+       /* Ensure our interrupt state is properly tracked */
+       if (!prep_irq_for_idle())
+               return;
 
        ctrl = mfspr(SPRN_CTRLF);
 
@@ -81,6 +79,9 @@ static void cbe_power_save(void)
         */
        ctrl &= ~(CTRL_RUNLATCH | CTRL_TE);
        mtspr(SPRN_CTRLT, ctrl);
+
+       /* Re-enable interrupts in MSR */
+       __hard_irq_enable();
 }
 
 static int cbe_system_reset_exception(struct pt_regs *regs)
index 0915b1ad66ce0dbb9e893ddd54a51753029e8a67..2d311c0caf8ef4499a3a54ebdb24f0c4e41c8735 100644 (file)
@@ -106,7 +106,7 @@ static int tce_build_pSeries(struct iommu_table *tbl, long index,
                tcep++;
        }
 
-       if (tbl->it_type == TCE_PCI_SWINV_CREATE)
+       if (tbl->it_type & TCE_PCI_SWINV_CREATE)
                tce_invalidate_pSeries_sw(tbl, tces, tcep - 1);
        return 0;
 }
@@ -121,7 +121,7 @@ static void tce_free_pSeries(struct iommu_table *tbl, long index, long npages)
        while (npages--)
                *(tcep++) = 0;
 
-       if (tbl->it_type == TCE_PCI_SWINV_FREE)
+       if (tbl->it_type & TCE_PCI_SWINV_FREE)
                tce_invalidate_pSeries_sw(tbl, tces, tcep - 1);
 }
 
index 36f957f31842f60688a7485f297ed6181dc926c9..8733a86ad52ed6dbb2b0fc9030b08852a3ca2b62 100644 (file)
@@ -68,9 +68,7 @@ static const char *pseries_nvram_os_partitions[] = {
 };
 
 static void oops_to_nvram(struct kmsg_dumper *dumper,
-               enum kmsg_dump_reason reason,
-               const char *old_msgs, unsigned long old_len,
-               const char *new_msgs, unsigned long new_len);
+                         enum kmsg_dump_reason reason);
 
 static struct kmsg_dumper nvram_kmsg_dumper = {
        .dump = oops_to_nvram
@@ -503,28 +501,6 @@ int __init pSeries_nvram_init(void)
        return 0;
 }
 
-/*
- * Try to capture the last capture_len bytes of the printk buffer.  Return
- * the amount actually captured.
- */
-static size_t capture_last_msgs(const char *old_msgs, size_t old_len,
-                               const char *new_msgs, size_t new_len,
-                               char *captured, size_t capture_len)
-{
-       if (new_len >= capture_len) {
-               memcpy(captured, new_msgs + (new_len - capture_len),
-                                                               capture_len);
-               return capture_len;
-       } else {
-               /* Grab the end of old_msgs. */
-               size_t old_tail_len = min(old_len, capture_len - new_len);
-               memcpy(captured, old_msgs + (old_len - old_tail_len),
-                                                               old_tail_len);
-               memcpy(captured + old_tail_len, new_msgs, new_len);
-               return old_tail_len + new_len;
-       }
-}
-
 /*
  * Are we using the ibm,rtas-log for oops/panic reports?  And if so,
  * would logging this oops/panic overwrite an RTAS event that rtas_errd
@@ -541,27 +517,6 @@ static int clobbering_unread_rtas_event(void)
                                                NVRAM_RTAS_READ_TIMEOUT);
 }
 
-/* Squeeze out each line's <n> severity prefix. */
-static size_t elide_severities(char *buf, size_t len)
-{
-       char *in, *out, *buf_end = buf + len;
-       /* Assume a <n> at the very beginning marks the start of a line. */
-       int newline = 1;
-
-       in = out = buf;
-       while (in < buf_end) {
-               if (newline && in+3 <= buf_end &&
-                               *in == '<' && isdigit(in[1]) && in[2] == '>') {
-                       in += 3;
-                       newline = 0;
-               } else {
-                       newline = (*in == '\n');
-                       *out++ = *in++;
-               }
-       }
-       return out - buf;
-}
-
 /* Derived from logfs_compress() */
 static int nvram_compress(const void *in, void *out, size_t inlen,
                                                        size_t outlen)
@@ -619,9 +574,7 @@ static int zip_oops(size_t text_len)
  * partition.  If that's too much, go back and capture uncompressed text.
  */
 static void oops_to_nvram(struct kmsg_dumper *dumper,
-               enum kmsg_dump_reason reason,
-               const char *old_msgs, unsigned long old_len,
-               const char *new_msgs, unsigned long new_len)
+                         enum kmsg_dump_reason reason)
 {
        static unsigned int oops_count = 0;
        static bool panicking = false;
@@ -660,14 +613,14 @@ static void oops_to_nvram(struct kmsg_dumper *dumper,
                return;
 
        if (big_oops_buf) {
-               text_len = capture_last_msgs(old_msgs, old_len,
-                       new_msgs, new_len, big_oops_buf, big_oops_buf_sz);
-               text_len = elide_severities(big_oops_buf, text_len);
+               kmsg_dump_get_buffer(dumper, false,
+                                    big_oops_buf, big_oops_buf_sz, &text_len);
                rc = zip_oops(text_len);
        }
        if (rc != 0) {
-               text_len = capture_last_msgs(old_msgs, old_len,
-                               new_msgs, new_len, oops_data, oops_data_sz);
+               kmsg_dump_rewind(dumper);
+               kmsg_dump_get_buffer(dumper, true,
+                                    oops_data, oops_data_sz, &text_len);
                err_type = ERR_TYPE_KERNEL_PANIC;
                *oops_len = (u16) text_len;
        }
index 41a34bc4a9a2903e3d9bb747176cfde009f16435..c71be66bd5dc2ff3bc6e0b3309ad969859cb4b84 100644 (file)
@@ -99,15 +99,18 @@ out:
 static void check_and_cede_processor(void)
 {
        /*
-        * Interrupts are soft-disabled at this point,
-        * but not hard disabled. So an interrupt might have
-        * occurred before entering NAP, and would be potentially
-        * lost (edge events, decrementer events, etc...) unless
-        * we first hard disable then check.
+        * Ensure our interrupt state is properly tracked,
+        * also checks if no interrupt has occurred while we
+        * were soft-disabled
         */
-       hard_irq_disable();
-       if (get_paca()->irq_happened == 0)
+       if (prep_irq_for_idle()) {
                cede_processor();
+#ifdef CONFIG_TRACE_IRQFLAGS
+               /* Ensure that H_CEDE returns with IRQs on */
+               if (WARN_ON(!(mfmsr() & MSR_EE)))
+                       __hard_irq_enable();
+#endif
+       }
 }
 
 static int dedicated_cede_loop(struct cpuidle_device *dev,
index 0f3ab06d222260c63d5866e6d6b7ab0ac643e761..eab3492a45c5c5244eca42d58354cf6d8a092836 100644 (file)
@@ -971,7 +971,7 @@ static int cpu_cmd(void)
                /* print cpus waiting or in xmon */
                printf("cpus stopped:");
                count = 0;
-               for (cpu = 0; cpu < NR_CPUS; ++cpu) {
+               for_each_possible_cpu(cpu) {
                        if (cpumask_test_cpu(cpu, &cpus_in_xmon)) {
                                if (count == 0)
                                        printf(" %x", cpu);
index 99bcd0ee838d6d1fe743dfd26ee31606fb05d23d..31d9db7913e4352a10680f76f2aaa8a5e800eef5 100644 (file)
@@ -32,6 +32,8 @@ config SUPERH
        select GENERIC_SMP_IDLE_THREAD
        select GENERIC_CLOCKEVENTS
        select GENERIC_CMOS_UPDATE if SH_SH03 || SH_DREAMCAST
+       select GENERIC_STRNCPY_FROM_USER
+       select GENERIC_STRNLEN_USER
        help
          The SuperH is a RISC processor targeted for use in embedded systems
          and consumer electronics; it was also used in the Sega Dreamcast
index 46edf070da1c377574d8ce17e52a74416f9c896d..aed701c7b11bb0208ffc3cf50efe2fb0ba0673f6 100644 (file)
@@ -9,6 +9,12 @@
 # License.  See the file "COPYING" in the main directory of this archive
 # for more details.
 #
+ifneq ($(SUBARCH),$(ARCH))
+  ifeq ($(CROSS_COMPILE),)
+    CROSS_COMPILE := $(call cc-cross-prefix, $(UTS_MACHINE)-linux-  $(UTS_MACHINE)-linux-gnu-  $(UTS_MACHINE)-unknown-linux-gnu-)
+  endif
+endif
+
 isa-y                                  := any
 isa-$(CONFIG_SH_DSP)                   := sh
 isa-$(CONFIG_CPU_SH2)                  := sh2
@@ -106,19 +112,13 @@ LDFLAGS_vmlinux           += --defsym phys_stext=_stext-$(CONFIG_PAGE_OFFSET) \
 KBUILD_DEFCONFIG       := cayman_defconfig
 endif
 
-ifneq ($(SUBARCH),$(ARCH))
-  ifeq ($(CROSS_COMPILE),)
-    CROSS_COMPILE := $(call cc-cross-prefix, $(UTS_MACHINE)-linux-  $(UTS_MACHINE)-linux-gnu-  $(UTS_MACHINE)-unknown-linux-gnu-)
-  endif
-endif
-
 ifdef CONFIG_CPU_LITTLE_ENDIAN
 ld-bfd                 := elf32-$(UTS_MACHINE)-linux
-LDFLAGS_vmlinux                += --defsym 'jiffies=jiffies_64' --oformat $(ld-bfd)
+LDFLAGS_vmlinux                += --defsym jiffies=jiffies_64 --oformat $(ld-bfd)
 LDFLAGS                        += -EL
 else
 ld-bfd                 := elf32-$(UTS_MACHINE)big-linux
-LDFLAGS_vmlinux                += --defsym 'jiffies=jiffies_64+4' --oformat $(ld-bfd)
+LDFLAGS_vmlinux                += --defsym jiffies=jiffies_64+4 --oformat $(ld-bfd)
 LDFLAGS                        += -EB
 endif
 
index 158c9176e42adc79fadeb78300ec14051a2a3801..43a179ce9afcb0bdcabc8f116901e27b7d4c02eb 100644 (file)
@@ -201,8 +201,8 @@ static struct resource kfr2r09_usb0_gadget_resources[] = {
                .flags  = IORESOURCE_MEM,
        },
        [1] = {
-               .start  = evtirq(0xa20),
-               .end    = evtirq(0xa20),
+               .start  = evt2irq(0xa20),
+               .end    = evt2irq(0xa20),
                .flags  = IORESOURCE_IRQ | IRQF_TRIGGER_LOW,
        },
 };
index c045142f73385978cd8e740ebb235ca40c40ad02..9e702f2f80452a222e08f490f9095c948b1143e4 100644 (file)
@@ -239,7 +239,7 @@ static int __init pcie_clk_init(struct sh7786_pcie_port *port)
        clk->enable_reg = (void __iomem *)(chan->reg_base + SH4A_PCIEPHYCTLR);
        clk->enable_bit = BITS_CKE;
 
-       ret = sh_clk_mstp32_register(clk, 1);
+       ret = sh_clk_mstp_register(clk, 1);
        if (unlikely(ret < 0))
                goto err_phy;
 
index 7beb42322f60059368590c80439617a6be614460..7b673ddcd5551cc887cbd393c331056c6c1cc20b 100644 (file)
@@ -1,5 +1,39 @@
 include include/asm-generic/Kbuild.asm
 
+generic-y += bitsperlong.h
+generic-y += cputime.h
+generic-y += current.h
+generic-y += delay.h
+generic-y += div64.h
+generic-y += emergency-restart.h
+generic-y += errno.h
+generic-y += fcntl.h
+generic-y += ioctl.h
+generic-y += ipcbuf.h
+generic-y += irq_regs.h
+generic-y += kvm_para.h
+generic-y += local.h
+generic-y += local64.h
+generic-y += param.h
+generic-y += parport.h
+generic-y += percpu.h
+generic-y += poll.h
+generic-y += mman.h
+generic-y += msgbuf.h
+generic-y += resource.h
+generic-y += scatterlist.h
+generic-y += sembuf.h
+generic-y += serial.h
+generic-y += shmbuf.h
+generic-y += siginfo.h
+generic-y += sizes.h
+generic-y += socket.h
+generic-y += statfs.h
+generic-y += termbits.h
+generic-y += termios.h
+generic-y += ucontext.h
+generic-y += xor.h
+
 header-y += cachectl.h
 header-y += cpu-features.h
 header-y += hw_breakpoint.h
diff --git a/arch/sh/include/asm/bitsperlong.h b/arch/sh/include/asm/bitsperlong.h
deleted file mode 100644 (file)
index 6dc0bb0..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/bitsperlong.h>
diff --git a/arch/sh/include/asm/cputime.h b/arch/sh/include/asm/cputime.h
deleted file mode 100644 (file)
index 6ca395d..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __SH_CPUTIME_H
-#define __SH_CPUTIME_H
-
-#include <asm-generic/cputime.h>
-
-#endif /* __SH_CPUTIME_H */
diff --git a/arch/sh/include/asm/current.h b/arch/sh/include/asm/current.h
deleted file mode 100644 (file)
index 4c51401..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/current.h>
diff --git a/arch/sh/include/asm/delay.h b/arch/sh/include/asm/delay.h
deleted file mode 100644 (file)
index 9670e12..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/delay.h>
diff --git a/arch/sh/include/asm/div64.h b/arch/sh/include/asm/div64.h
deleted file mode 100644 (file)
index 6cd978c..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/div64.h>
diff --git a/arch/sh/include/asm/emergency-restart.h b/arch/sh/include/asm/emergency-restart.h
deleted file mode 100644 (file)
index 108d8c4..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_EMERGENCY_RESTART_H
-#define _ASM_EMERGENCY_RESTART_H
-
-#include <asm-generic/emergency-restart.h>
-
-#endif /* _ASM_EMERGENCY_RESTART_H */
diff --git a/arch/sh/include/asm/errno.h b/arch/sh/include/asm/errno.h
deleted file mode 100644 (file)
index 51cf6f9..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_SH_ERRNO_H
-#define __ASM_SH_ERRNO_H
-
-#include <asm-generic/errno.h>
-
-#endif /* __ASM_SH_ERRNO_H */
diff --git a/arch/sh/include/asm/fcntl.h b/arch/sh/include/asm/fcntl.h
deleted file mode 100644 (file)
index 46ab12d..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/fcntl.h>
index e136d28d1d2ee5ad97f75866f9e9f3e4d118eba9..4d48f1436a63b72a34f8201ad8cb88416bc1a37d 100644 (file)
@@ -19,9 +19,20 @@ static inline u32 inl(unsigned long addr)
        return -1;
 }
 
-#define outb(x, y)     BUG()
-#define outw(x, y)     BUG()
-#define outl(x, y)     BUG()
+static inline void outb(unsigned char x, unsigned long port)
+{
+       BUG();
+}
+
+static inline void outw(unsigned short x, unsigned long port)
+{
+       BUG();
+}
+
+static inline void outl(unsigned int x, unsigned long port)
+{
+       BUG();
+}
 
 #define inb_p(addr)    inb(addr)
 #define inw_p(addr)    inw(addr)
diff --git a/arch/sh/include/asm/ioctl.h b/arch/sh/include/asm/ioctl.h
deleted file mode 100644 (file)
index b279fe0..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/ioctl.h>
diff --git a/arch/sh/include/asm/ipcbuf.h b/arch/sh/include/asm/ipcbuf.h
deleted file mode 100644 (file)
index 84c7e51..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/ipcbuf.h>
diff --git a/arch/sh/include/asm/irq_regs.h b/arch/sh/include/asm/irq_regs.h
deleted file mode 100644 (file)
index 3dd9c0b..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/irq_regs.h>
diff --git a/arch/sh/include/asm/kvm_para.h b/arch/sh/include/asm/kvm_para.h
deleted file mode 100644 (file)
index 14fab8f..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/kvm_para.h>
diff --git a/arch/sh/include/asm/local.h b/arch/sh/include/asm/local.h
deleted file mode 100644 (file)
index 9ed9b9c..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef __ASM_SH_LOCAL_H
-#define __ASM_SH_LOCAL_H
-
-#include <asm-generic/local.h>
-
-#endif /* __ASM_SH_LOCAL_H */
-
diff --git a/arch/sh/include/asm/local64.h b/arch/sh/include/asm/local64.h
deleted file mode 100644 (file)
index 36c93b5..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/local64.h>
diff --git a/arch/sh/include/asm/mman.h b/arch/sh/include/asm/mman.h
deleted file mode 100644 (file)
index 8eebf89..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/mman.h>
diff --git a/arch/sh/include/asm/msgbuf.h b/arch/sh/include/asm/msgbuf.h
deleted file mode 100644 (file)
index 809134c..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/msgbuf.h>
diff --git a/arch/sh/include/asm/param.h b/arch/sh/include/asm/param.h
deleted file mode 100644 (file)
index 965d454..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/param.h>
diff --git a/arch/sh/include/asm/parport.h b/arch/sh/include/asm/parport.h
deleted file mode 100644 (file)
index cf252af..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/parport.h>
diff --git a/arch/sh/include/asm/percpu.h b/arch/sh/include/asm/percpu.h
deleted file mode 100644 (file)
index 4db4b39..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ARCH_SH_PERCPU
-#define __ARCH_SH_PERCPU
-
-#include <asm-generic/percpu.h>
-
-#endif /* __ARCH_SH_PERCPU */
diff --git a/arch/sh/include/asm/poll.h b/arch/sh/include/asm/poll.h
deleted file mode 100644 (file)
index c98509d..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/poll.h>
diff --git a/arch/sh/include/asm/resource.h b/arch/sh/include/asm/resource.h
deleted file mode 100644 (file)
index 9c2499a..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_SH_RESOURCE_H
-#define __ASM_SH_RESOURCE_H
-
-#include <asm-generic/resource.h>
-
-#endif /* __ASM_SH_RESOURCE_H */
diff --git a/arch/sh/include/asm/scatterlist.h b/arch/sh/include/asm/scatterlist.h
deleted file mode 100644 (file)
index 98dfc35..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_SH_SCATTERLIST_H
-#define __ASM_SH_SCATTERLIST_H
-
-#include <asm-generic/scatterlist.h>
-
-#endif /* __ASM_SH_SCATTERLIST_H */
diff --git a/arch/sh/include/asm/sembuf.h b/arch/sh/include/asm/sembuf.h
deleted file mode 100644 (file)
index 7673b83..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/sembuf.h>
diff --git a/arch/sh/include/asm/serial.h b/arch/sh/include/asm/serial.h
deleted file mode 100644 (file)
index a0cb0ca..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/serial.h>
diff --git a/arch/sh/include/asm/shmbuf.h b/arch/sh/include/asm/shmbuf.h
deleted file mode 100644 (file)
index 83c05fc..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/shmbuf.h>
diff --git a/arch/sh/include/asm/siginfo.h b/arch/sh/include/asm/siginfo.h
deleted file mode 100644 (file)
index 813040e..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_SH_SIGINFO_H
-#define __ASM_SH_SIGINFO_H
-
-#include <asm-generic/siginfo.h>
-
-#endif /* __ASM_SH_SIGINFO_H */
diff --git a/arch/sh/include/asm/sizes.h b/arch/sh/include/asm/sizes.h
deleted file mode 100644 (file)
index dd248c2..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/sizes.h>
diff --git a/arch/sh/include/asm/socket.h b/arch/sh/include/asm/socket.h
deleted file mode 100644 (file)
index 6b71384..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/socket.h>
diff --git a/arch/sh/include/asm/statfs.h b/arch/sh/include/asm/statfs.h
deleted file mode 100644 (file)
index 9202a02..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_SH_STATFS_H
-#define __ASM_SH_STATFS_H
-
-#include <asm-generic/statfs.h>
-
-#endif /* __ASM_SH_STATFS_H */
diff --git a/arch/sh/include/asm/termbits.h b/arch/sh/include/asm/termbits.h
deleted file mode 100644 (file)
index 3935b10..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/termbits.h>
diff --git a/arch/sh/include/asm/termios.h b/arch/sh/include/asm/termios.h
deleted file mode 100644 (file)
index 280d78a..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/termios.h>
index 050f221fa898219b40e2faca0bbb9cebae93dd7e..8698a80ed00c1fcbebed2ec993887f104179d384 100644 (file)
@@ -25,6 +25,8 @@
        (__chk_user_ptr(addr),          \
         __access_ok((unsigned long __force)(addr), (size)))
 
+#define user_addr_max()        (current_thread_info()->addr_limit.seg)
+
 /*
  * Uh, these should become the main single-value transfer routines ...
  * They automatically use the right size if we just have the right
@@ -100,6 +102,11 @@ struct __large_struct { unsigned long buf[100]; };
 # include "uaccess_64.h"
 #endif
 
+extern long strncpy_from_user(char *dest, const char __user *src, long count);
+
+extern __must_check long strlen_user(const char __user *str);
+extern __must_check long strnlen_user(const char __user *str, long n);
+
 /* Generic arbitrary sized copy.  */
 /* Return the number of bytes NOT copied */
 __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n);
@@ -137,37 +144,6 @@ __kernel_size_t __clear_user(void *addr, __kernel_size_t size);
        __cl_size;                                                      \
 })
 
-/**
- * strncpy_from_user: - Copy a NUL terminated string from userspace.
- * @dst:   Destination address, in kernel space.  This buffer must be at
- *         least @count bytes long.
- * @src:   Source address, in user space.
- * @count: Maximum number of bytes to copy, including the trailing NUL.
- *
- * Copies a NUL-terminated string from userspace to kernel space.
- *
- * On success, returns the length of the string (not including the trailing
- * NUL).
- *
- * If access to userspace fails, returns -EFAULT (some data may have been
- * copied).
- *
- * If @count is smaller than the length of the string, copies @count bytes
- * and returns @count.
- */
-#define strncpy_from_user(dest,src,count)                              \
-({                                                                     \
-       unsigned long __sfu_src = (unsigned long)(src);                 \
-       int __sfu_count = (int)(count);                                 \
-       long __sfu_res = -EFAULT;                                       \
-                                                                       \
-       if (__access_ok(__sfu_src, __sfu_count))                        \
-               __sfu_res = __strncpy_from_user((unsigned long)(dest),  \
-                               __sfu_src, __sfu_count);                \
-                                                                       \
-       __sfu_res;                                                      \
-})
-
 static inline unsigned long
 copy_from_user(void *to, const void __user *from, unsigned long n)
 {
@@ -192,43 +168,6 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
        return __copy_size;
 }
 
-/**
- * strnlen_user: - Get the size of a string in user space.
- * @s: The string to measure.
- * @n: The maximum valid length
- *
- * Context: User context only.  This function may sleep.
- *
- * Get the size of a NUL-terminated string in user space.
- *
- * Returns the size of the string INCLUDING the terminating NUL.
- * On exception, returns 0.
- * If the string is too long, returns a value greater than @n.
- */
-static inline long strnlen_user(const char __user *s, long n)
-{
-       if (!__addr_ok(s))
-               return 0;
-       else
-               return __strnlen_user(s, n);
-}
-
-/**
- * strlen_user: - Get the size of a string in user space.
- * @str: The string to measure.
- *
- * Context: User context only.  This function may sleep.
- *
- * Get the size of a NUL-terminated string in user space.
- *
- * Returns the size of the string INCLUDING the terminating NUL.
- * On exception, returns 0.
- *
- * If there is a limit on the length of a valid string, you may wish to
- * consider using strnlen_user() instead.
- */
-#define strlen_user(str)       strnlen_user(str, ~0UL >> 1)
-
 /*
  * The exception table consists of pairs of addresses: the first is the
  * address of an instruction that is allowed to fault, and the second is
index ae0d24f6653f8ee1979d4d369b5e8fac9480beb1..c0de7ee35ab7aa5ba80c393f4a2588102d01e2a1 100644 (file)
@@ -170,79 +170,4 @@ __asm__ __volatile__( \
 
 extern void __put_user_unknown(void);
 
-static inline int
-__strncpy_from_user(unsigned long __dest, unsigned long __user __src, int __count)
-{
-       __kernel_size_t res;
-       unsigned long __dummy, _d, _s, _c;
-
-       __asm__ __volatile__(
-               "9:\n"
-               "mov.b  @%2+, %1\n\t"
-               "cmp/eq #0, %1\n\t"
-               "bt/s   2f\n"
-               "1:\n"
-               "mov.b  %1, @%3\n\t"
-               "dt     %4\n\t"
-               "bf/s   9b\n\t"
-               " add   #1, %3\n\t"
-               "2:\n\t"
-               "sub    %4, %0\n"
-               "3:\n"
-               ".section .fixup,\"ax\"\n"
-               "4:\n\t"
-               "mov.l  5f, %1\n\t"
-               "jmp    @%1\n\t"
-               " mov   %9, %0\n\t"
-               ".balign 4\n"
-               "5:     .long 3b\n"
-               ".previous\n"
-               ".section __ex_table,\"a\"\n"
-               "       .balign 4\n"
-               "       .long 9b,4b\n"
-               ".previous"
-               : "=r" (res), "=&z" (__dummy), "=r" (_s), "=r" (_d), "=r"(_c)
-               : "0" (__count), "2" (__src), "3" (__dest), "4" (__count),
-                 "i" (-EFAULT)
-               : "memory", "t");
-
-       return res;
-}
-
-/*
- * Return the size of a string (including the ending 0 even when we have
- * exceeded the maximum string length).
- */
-static inline long __strnlen_user(const char __user *__s, long __n)
-{
-       unsigned long res;
-       unsigned long __dummy;
-
-       __asm__ __volatile__(
-               "1:\t"
-               "mov.b  @(%0,%3), %1\n\t"
-               "cmp/eq %4, %0\n\t"
-               "bt/s   2f\n\t"
-               " add   #1, %0\n\t"
-               "tst    %1, %1\n\t"
-               "bf     1b\n\t"
-               "2:\n"
-               ".section .fixup,\"ax\"\n"
-               "3:\n\t"
-               "mov.l  4f, %1\n\t"
-               "jmp    @%1\n\t"
-               " mov   #0, %0\n"
-               ".balign 4\n"
-               "4:     .long 2b\n"
-               ".previous\n"
-               ".section __ex_table,\"a\"\n"
-               "       .balign 4\n"
-               "       .long 1b,3b\n"
-               ".previous"
-               : "=z" (res), "=&r" (__dummy)
-               : "0" (0), "r" (__s), "r" (__n)
-               : "t");
-       return res;
-}
-
 #endif /* __ASM_SH_UACCESS_32_H */
index 56fd20b8cdcc16db8fbe6da757da62dec0d1d4d9..2e07e0f40c6af3038ca6b69fecfefefe528dc2b8 100644 (file)
@@ -84,8 +84,4 @@ extern long __put_user_asm_l(void *, long);
 extern long __put_user_asm_q(void *, long);
 extern void __put_user_unknown(void);
 
-extern long __strnlen_user(const char *__s, long __n);
-extern int __strncpy_from_user(unsigned long __dest,
-              unsigned long __user __src, int __count);
-
 #endif /* __ASM_SH_UACCESS_64_H */
diff --git a/arch/sh/include/asm/ucontext.h b/arch/sh/include/asm/ucontext.h
deleted file mode 100644 (file)
index 9bc07b9..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/ucontext.h>
diff --git a/arch/sh/include/asm/word-at-a-time.h b/arch/sh/include/asm/word-at-a-time.h
new file mode 100644 (file)
index 0000000..6e38953
--- /dev/null
@@ -0,0 +1,53 @@
+#ifndef __ASM_SH_WORD_AT_A_TIME_H
+#define __ASM_SH_WORD_AT_A_TIME_H
+
+#ifdef CONFIG_CPU_BIG_ENDIAN
+# include <asm-generic/word-at-a-time.h>
+#else
+/*
+ * Little-endian version cribbed from x86.
+ */
+struct word_at_a_time {
+       const unsigned long one_bits, high_bits;
+};
+
+#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
+
+/* Carl Chatfield / Jan Achrenius G+ version for 32-bit */
+static inline long count_masked_bytes(long mask)
+{
+       /* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */
+       long a = (0x0ff0001+mask) >> 23;
+       /* Fix the 1 for 00 case */
+       return a & mask;
+}
+
+/* Return nonzero if it has a zero */
+static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c)
+{
+       unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits;
+       *bits = mask;
+       return mask;
+}
+
+static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c)
+{
+       return bits;
+}
+
+static inline unsigned long create_zero_mask(unsigned long bits)
+{
+       bits = (bits - 1) & ~bits;
+       return bits >> 7;
+}
+
+/* The mask we created is directly usable as a bytemask */
+#define zero_bytemask(mask) (mask)
+
+static inline unsigned long find_zero(unsigned long mask)
+{
+       return count_masked_bytes(mask);
+}
+#endif
+
+#endif
diff --git a/arch/sh/include/asm/xor.h b/arch/sh/include/asm/xor.h
deleted file mode 100644 (file)
index c82eb12..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/xor.h>
diff --git a/arch/sh/include/cpu-sh2a/cpu/ubc.h b/arch/sh/include/cpu-sh2a/cpu/ubc.h
deleted file mode 100644 (file)
index 1192e1c..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * SH-2A UBC definitions
- *
- * Copyright (C) 2008 Kieran Bingham
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#ifndef __ASM_CPU_SH2A_UBC_H
-#define __ASM_CPU_SH2A_UBC_H
-
-#define UBC_BARA                0xfffc0400
-#define UBC_BAMRA               0xfffc0404
-#define UBC_BBRA                0xfffc04a0     /* 16 bit access */
-#define UBC_BDRA                0xfffc0408
-#define UBC_BDMRA               0xfffc040c
-
-#define UBC_BARB                0xfffc0410
-#define UBC_BAMRB               0xfffc0414
-#define UBC_BBRB                0xfffc04b0     /* 16 bit access */
-#define UBC_BDRB                0xfffc0418
-#define UBC_BDMRB               0xfffc041c
-
-#define UBC_BRCR                0xfffc04c0
-
-#endif /* __ASM_CPU_SH2A_UBC_H */
index 8832c526cdf92d68cb8b59a48d2a05d8942ab181..c4a0336660dd102d6542b3f3a5721214439dce08 100644 (file)
@@ -2,7 +2,7 @@
 #include <linux/serial_core.h>
 #include <linux/io.h>
 #include <cpu/serial.h>
-#include <asm/gpio.h>
+#include <cpu/gpio.h>
 
 static void sh7720_sci_init_pins(struct uart_port *port, unsigned int cflag)
 {
index ea01a72f1b94f16892734df4a336e4b1f756f7c1..53638e231cd02f91bcb7678a9e1990de6338b8fd 100644 (file)
@@ -283,7 +283,7 @@ int __init arch_clk_init(void)
                ret = sh_clk_div6_register(div6_clks, DIV6_NR);
 
        if (!ret)
-               ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR);
+               ret = sh_clk_mstp_register(mstp_clks, MSTP_NR);
 
        return ret;
 }
index 7ac07b4f75de3c01982c5a4c2df50bb67cae8a86..22e485d1990b598c698c1f34344589f01a2b7683 100644 (file)
@@ -276,7 +276,7 @@ int __init arch_clk_init(void)
                ret = sh_clk_div6_register(div6_clks, DIV6_NR);
 
        if (!ret)
-               ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR);
+               ret = sh_clk_mstp_register(mstp_clks, MSTP_NR);
 
        return ret;
 }
index 8e1f97010c0d7ac9c792c23a5b09197508caeed5..c4cb740e4d10180d3034526d18cb0457b81ca301 100644 (file)
@@ -261,7 +261,7 @@ int __init arch_clk_init(void)
                ret = sh_clk_div6_register(div6_clks, DIV6_NR);
 
        if (!ret)
-               ret = sh_clk_mstp32_register(mstp_clks, HWBLK_NR);
+               ret = sh_clk_mstp_register(mstp_clks, HWBLK_NR);
 
        return ret;
 }
index 35f75cf0c7e57b5dfe02f1fb7dc93e1156634016..37c41c7747a3b73a2a70400a79dab3e8a07a4b7c 100644 (file)
@@ -311,7 +311,7 @@ int __init arch_clk_init(void)
                ret = sh_clk_div6_register(div6_clks, DIV6_NR);
 
        if (!ret)
-               ret = sh_clk_mstp32_register(mstp_clks, HWBLK_NR);
+               ret = sh_clk_mstp_register(mstp_clks, HWBLK_NR);
 
        return ret;
 }
index 2a87901673febed3df329d47d8bf21acdabb4530..c87e78f73234f453a7fd758dd14b053e844c2408 100644 (file)
@@ -375,7 +375,7 @@ int __init arch_clk_init(void)
                ret = sh_clk_div6_reparent_register(div6_clks, DIV6_NR);
 
        if (!ret)
-               ret = sh_clk_mstp32_register(mstp_clks, HWBLK_NR);
+               ret = sh_clk_mstp_register(mstp_clks, HWBLK_NR);
 
        return ret;
 }
index 1697642c1f738c4887e4ed2577dd49a9b2b8621e..deb683abacf0f6055dcbb3e0ef65b47071e933de 100644 (file)
@@ -260,7 +260,7 @@ int __init arch_clk_init(void)
                        &div4_table);
 
        if (!ret)
-               ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR);
+               ret = sh_clk_mstp_register(mstp_clks, MSTP_NR);
 
        return ret;
 }
index 04ab5aeaf9206527d0eae807a317ecd342a233fd..e84a43229b9c5af1ec9c7935c8cf8150c698cdac 100644 (file)
@@ -148,7 +148,7 @@ int __init arch_clk_init(void)
                ret = sh_clk_div4_register(div4_clks, ARRAY_SIZE(div4_clks),
                                           &div4_table);
        if (!ret)
-               ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR);
+               ret = sh_clk_mstp_register(mstp_clks, MSTP_NR);
 
        return ret;
 }
index ab1c58f2d101172b6f769d48278795d6e26adbda..1c83788db76a1f0c0055a1b8dd088073380cc579 100644 (file)
@@ -175,7 +175,7 @@ int __init arch_clk_init(void)
                ret = sh_clk_div4_register(div4_clks, ARRAY_SIZE(div4_clks),
                                           &div4_table);
        if (!ret)
-               ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR);
+               ret = sh_clk_mstp_register(mstp_clks, MSTP_NR);
 
        return ret;
 }
index 491709483e109bc75213963b508b6608c55bc9ad..8bba6f15902350d4b7429e960e0a49627b60ef59 100644 (file)
@@ -194,7 +194,7 @@ int __init arch_clk_init(void)
                ret = sh_clk_div4_register(div4_clks, ARRAY_SIZE(div4_clks),
                                           &div4_table);
        if (!ret)
-               ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR);
+               ret = sh_clk_mstp_register(mstp_clks, MSTP_NR);
 
        return ret;
 }
index 0f11b392bf466ef2334c221b02cd64de37134dac..a9422dab0ce747115bca41176e47d63f63d78ec4 100644 (file)
@@ -149,7 +149,7 @@ int __init arch_clk_init(void)
                ret = sh_clk_div4_register(div4_clks, ARRAY_SIZE(div4_clks),
                                           &div4_table);
        if (!ret)
-               ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR);
+               ret = sh_clk_mstp_register(mstp_clks, MSTP_NR);
 
        return ret;
 }
index ff1f0e6e9becd18f111d3a3d83730fd4860dfe02..b7cf6a547f117c5eeae4b69f6b1ccbcdb41b37c8 100644 (file)
@@ -1568,86 +1568,6 @@ ___clear_user_exit:
 
 #endif /* CONFIG_MMU */
 
-/*
- * int __strncpy_from_user(unsigned long __dest, unsigned long __src,
- *                        int __count)
- *
- * Inputs:
- * (r2)  target address
- * (r3)  source address
- * (r4)  maximum size in bytes
- *
- * Ouputs:
- * (*r2) copied data
- * (r2)  -EFAULT (in case of faulting)
- *       copied data (otherwise)
- */
-       .global __strncpy_from_user
-__strncpy_from_user:
-       pta     ___strncpy_from_user1, tr0
-       pta     ___strncpy_from_user_done, tr1
-       or      r4, ZERO, r5            /* r5 = original count */
-       beq/u   r4, r63, tr1            /* early exit if r4==0 */
-       movi    -(EFAULT), r6           /* r6 = reply, no real fixup */
-       or      ZERO, ZERO, r7          /* r7 = data, clear top byte of data */
-
-___strncpy_from_user1:
-       ld.b    r3, 0, r7               /* Fault address: only in reading */
-       st.b    r2, 0, r7
-       addi    r2, 1, r2
-       addi    r3, 1, r3
-       beq/u   ZERO, r7, tr1
-       addi    r4, -1, r4              /* return real number of copied bytes */
-       bne/l   ZERO, r4, tr0
-
-___strncpy_from_user_done:
-       sub     r5, r4, r6              /* If done, return copied */
-
-___strncpy_from_user_exit:
-       or      r6, ZERO, r2
-       ptabs   LINK, tr0
-       blink   tr0, ZERO
-
-/*
- * extern long __strnlen_user(const char *__s, long __n)
- *
- * Inputs:
- * (r2)  source address
- * (r3)  source size in bytes
- *
- * Ouputs:
- * (r2)  -EFAULT (in case of faulting)
- *       string length (otherwise)
- */
-       .global __strnlen_user
-__strnlen_user:
-       pta     ___strnlen_user_set_reply, tr0
-       pta     ___strnlen_user1, tr1
-       or      ZERO, ZERO, r5          /* r5 = counter */
-       movi    -(EFAULT), r6           /* r6 = reply, no real fixup */
-       or      ZERO, ZERO, r7          /* r7 = data, clear top byte of data */
-       beq     r3, ZERO, tr0
-
-___strnlen_user1:
-       ldx.b   r2, r5, r7              /* Fault address: only in reading */
-       addi    r3, -1, r3              /* No real fixup */
-       addi    r5, 1, r5
-       beq     r3, ZERO, tr0
-       bne     r7, ZERO, tr1
-! The line below used to be active.  This meant led to a junk byte lying between each pair
-! of entries in the argv & envp structures in memory.  Whilst the program saw the right data
-! via the argv and envp arguments to main, it meant the 'flat' representation visible through
-! /proc/$pid/cmdline was corrupt, causing trouble with ps, for example.
-!      addi    r5, 1, r5               /* Include '\0' */
-
-___strnlen_user_set_reply:
-       or      r5, ZERO, r6            /* If done, return counter */
-
-___strnlen_user_exit:
-       or      r6, ZERO, r2
-       ptabs   LINK, tr0
-       blink   tr0, ZERO
-
 /*
  * extern long __get_user_asm_?(void *val, long addr)
  *
@@ -1982,8 +1902,6 @@ asm_uaccess_start:
        .long   ___copy_user2, ___copy_user_exit
        .long   ___clear_user1, ___clear_user_exit
 #endif
-       .long   ___strncpy_from_user1, ___strncpy_from_user_exit
-       .long   ___strnlen_user1, ___strnlen_user_exit
        .long   ___get_user_asm_b1, ___get_user_asm_b_exit
        .long   ___get_user_asm_w1, ___get_user_asm_w_exit
        .long   ___get_user_asm_l1, ___get_user_asm_l_exit
index 9b7a459a4613d8573a7c84ac3bf970402431c31e..055d91b70305f3f2a45bf176507d484a4e49e619 100644 (file)
@@ -4,6 +4,7 @@
 #include <linux/sched.h>
 #include <linux/export.h>
 #include <linux/stackprotector.h>
+#include <asm/fpu.h>
 
 struct kmem_cache *task_xstate_cachep = NULL;
 unsigned int xstate_size;
index 4264583eabac52a292a4a3c79281b858020cd62d..602545b12a8678857c8752407be691a1bd511baa 100644 (file)
@@ -33,6 +33,7 @@
 #include <asm/switch_to.h>
 
 struct task_struct *last_task_used_math = NULL;
+struct pt_regs fake_swapper_regs = { 0, };
 
 void show_regs(struct pt_regs *regs)
 {
index 45afa5c51f6751585e2d8a8beba5cc11e837b5c8..26a0774f5272af46839c1faaf924ae9a4e29af13 100644 (file)
@@ -32,8 +32,6 @@ EXPORT_SYMBOL(__get_user_asm_b);
 EXPORT_SYMBOL(__get_user_asm_w);
 EXPORT_SYMBOL(__get_user_asm_l);
 EXPORT_SYMBOL(__get_user_asm_q);
-EXPORT_SYMBOL(__strnlen_user);
-EXPORT_SYMBOL(__strncpy_from_user);
 EXPORT_SYMBOL(__clear_user);
 EXPORT_SYMBOL(copy_page);
 EXPORT_SYMBOL(__copy_user);
diff --git a/arch/sparc/include/asm/cmt.h b/arch/sparc/include/asm/cmt.h
deleted file mode 100644 (file)
index 870db59..0000000
+++ /dev/null
@@ -1,59 +0,0 @@
-#ifndef _SPARC64_CMT_H
-#define _SPARC64_CMT_H
-
-/* cmt.h: Chip Multi-Threading register definitions
- *
- * Copyright (C) 2004 David S. Miller (davem@redhat.com)
- */
-
-/* ASI_CORE_ID - private */
-#define LP_ID          0x0000000000000010UL
-#define  LP_ID_MAX     0x00000000003f0000UL
-#define  LP_ID_ID      0x000000000000003fUL
-
-/* ASI_INTR_ID - private */
-#define LP_INTR_ID     0x0000000000000000UL
-#define  LP_INTR_ID_ID 0x00000000000003ffUL
-
-/* ASI_CESR_ID - private */
-#define CESR_ID                0x0000000000000040UL
-#define  CESR_ID_ID    0x00000000000000ffUL
-
-/* ASI_CORE_AVAILABLE - shared */
-#define LP_AVAIL       0x0000000000000000UL
-#define  LP_AVAIL_1    0x0000000000000002UL
-#define  LP_AVAIL_0    0x0000000000000001UL
-
-/* ASI_CORE_ENABLE_STATUS - shared */
-#define LP_ENAB_STAT   0x0000000000000010UL
-#define  LP_ENAB_STAT_1        0x0000000000000002UL
-#define  LP_ENAB_STAT_0        0x0000000000000001UL
-
-/* ASI_CORE_ENABLE - shared */
-#define LP_ENAB                0x0000000000000020UL
-#define  LP_ENAB_1     0x0000000000000002UL
-#define  LP_ENAB_0     0x0000000000000001UL
-
-/* ASI_CORE_RUNNING - shared */
-#define LP_RUNNING_RW  0x0000000000000050UL
-#define LP_RUNNING_W1S 0x0000000000000060UL
-#define LP_RUNNING_W1C 0x0000000000000068UL
-#define  LP_RUNNING_1  0x0000000000000002UL
-#define  LP_RUNNING_0  0x0000000000000001UL
-
-/* ASI_CORE_RUNNING_STAT - shared */
-#define LP_RUN_STAT    0x0000000000000058UL
-#define  LP_RUN_STAT_1 0x0000000000000002UL
-#define  LP_RUN_STAT_0 0x0000000000000001UL
-
-/* ASI_XIR_STEERING - shared */
-#define LP_XIR_STEER   0x0000000000000030UL
-#define  LP_XIR_STEER_1        0x0000000000000002UL
-#define  LP_XIR_STEER_0        0x0000000000000001UL
-
-/* ASI_CMT_ERROR_STEERING - shared */
-#define CMT_ER_STEER   0x0000000000000040UL
-#define  CMT_ER_STEER_1        0x0000000000000002UL
-#define  CMT_ER_STEER_0        0x0000000000000001UL
-
-#endif /* _SPARC64_CMT_H */
diff --git a/arch/sparc/include/asm/mpmbox.h b/arch/sparc/include/asm/mpmbox.h
deleted file mode 100644 (file)
index f842303..0000000
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * mpmbox.h:  Interface and defines for the OpenProm mailbox
- *               facilities for MP machines under Linux.
- *
- * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
- */
-
-#ifndef _SPARC_MPMBOX_H
-#define _SPARC_MPMBOX_H
-
-/* The prom allocates, for each CPU on the machine an unsigned
- * byte in physical ram.  You probe the device tree prom nodes
- * for these values.  The purpose of this byte is to be able to
- * pass messages from one cpu to another.
- */
-
-/* These are the main message types we have to look for in our
- * Cpu mailboxes, based upon these values we decide what course
- * of action to take.
- */
-
-/* The CPU is executing code in the kernel. */
-#define MAILBOX_ISRUNNING     0xf0
-
-/* Another CPU called romvec->pv_exit(), you should call
- * prom_stopcpu() when you see this in your mailbox.
- */
-#define MAILBOX_EXIT          0xfb
-
-/* Another CPU called romvec->pv_enter(), you should call
- * prom_cpuidle() when this is seen.
- */
-#define MAILBOX_GOSPIN        0xfc
-
-/* Another CPU has hit a breakpoint either into kadb or the prom
- * itself.  Just like MAILBOX_GOSPIN, you should call prom_cpuidle()
- * at this point.
- */
-#define MAILBOX_BPT_SPIN      0xfd
-
-/* Oh geese, some other nitwit got a damn watchdog reset.  The party's
- * over so go call prom_stopcpu().
- */
-#define MAILBOX_WDOG_STOP     0xfe
-
-#ifndef __ASSEMBLY__
-
-/* Handy macro's to determine a cpu's state. */
-
-/* Is the cpu still in Power On Self Test? */
-#define MBOX_POST_P(letter)  ((letter) >= 0x00 && (letter) <= 0x7f)
-
-/* Is the cpu at the 'ok' prompt of the PROM? */
-#define MBOX_PROMPROMPT_P(letter) ((letter) >= 0x80 && (letter) <= 0x8f)
-
-/* Is the cpu spinning in the PROM? */
-#define MBOX_PROMSPIN_P(letter) ((letter) >= 0x90 && (letter) <= 0xef)
-
-/* Sanity check... This is junk mail, throw it out. */
-#define MBOX_BOGON_P(letter) ((letter) >= 0xf1 && (letter) <= 0xfa)
-
-/* Is the cpu actively running an application/kernel-code? */
-#define MBOX_RUNNING_P(letter) ((letter) == MAILBOX_ISRUNNING)
-
-#endif /* !(__ASSEMBLY__) */
-
-#endif /* !(_SPARC_MPMBOX_H) */
index 5cffdc55f075a7c9229e40e6abfd97c44d211ed2..3e244f31e56b93c7f0e9487056d6b75d13951618 100644 (file)
@@ -443,7 +443,7 @@ static int __init vio_init(void)
        root_vdev = vio_create_one(hp, root, NULL);
        err = -ENODEV;
        if (!root_vdev) {
-               printk(KERN_ERR "VIO: Coult not create root device.\n");
+               printk(KERN_ERR "VIO: Could not create root device.\n");
                goto out_release;
        }
 
index 7e1fef36bde6651f2f199115919c887e494448f4..e9c670d7a7fe2e13c1edd65c410f840ba75f5e32 100644 (file)
@@ -91,11 +91,6 @@ extern void smp_nap(void);
 /* Enable interrupts racelessly and nap forever: helper for cpu_idle(). */
 extern void _cpu_idle(void);
 
-/* Switch boot idle thread to a freshly-allocated stack and free old stack. */
-extern void cpu_idle_on_new_stack(struct thread_info *old_ti,
-                                 unsigned long new_sp,
-                                 unsigned long new_ss10);
-
 #else /* __ASSEMBLY__ */
 
 /*
index c3dd275f25e2f0304d4719358d42d692f046175b..9ab078a4605dd9750384b15b72b34c523b038fb5 100644 (file)
@@ -146,7 +146,7 @@ extern int fixup_exception(struct pt_regs *regs);
 #ifdef __tilegx__
 #define __get_user_1(x, ptr, ret) __get_user_asm(ld1u, x, ptr, ret)
 #define __get_user_2(x, ptr, ret) __get_user_asm(ld2u, x, ptr, ret)
-#define __get_user_4(x, ptr, ret) __get_user_asm(ld4u, x, ptr, ret)
+#define __get_user_4(x, ptr, ret) __get_user_asm(ld4s, x, ptr, ret)
 #define __get_user_8(x, ptr, ret) __get_user_asm(ld, x, ptr, ret)
 #else
 #define __get_user_1(x, ptr, ret) __get_user_asm(lb_u, x, ptr, ret)
index 9092ce8aa6b472b302ec31d514bb2da71fac3bfe..f8b74ca83b9257a245adaa6a03d05cdb75431c97 100644 (file)
@@ -14,6 +14,7 @@
 
 #include <linux/kernel.h>
 #include <linux/string.h>
+#include <asm/byteorder.h>
 #include <asm/backtrace.h>
 #include <asm/tile-desc.h>
 #include <arch/abi.h>
@@ -336,8 +337,12 @@ static void find_caller_pc_and_caller_sp(CallerLocation *location,
                                bytes_to_prefetch / sizeof(tile_bundle_bits);
                }
 
-               /* Decode the next bundle. */
-               bundle.bits = prefetched_bundles[next_bundle++];
+               /*
+                * Decode the next bundle.
+                * TILE always stores instruction bundles in little-endian
+                * mode, even when the chip is running in big-endian mode.
+                */
+               bundle.bits = le64_to_cpu(prefetched_bundles[next_bundle++]);
                bundle.num_insns =
                        parse_insn_tile(bundle.bits, pc, bundle.insns);
                num_info_ops = bt_get_info_ops(&bundle, info_operands);
index 133c4b56a99ec7196c59f9dca60742953826bf75..c31637baff283698f776daf5a83c480dd00b26d6 100644 (file)
@@ -68,20 +68,6 @@ STD_ENTRY(KBacktraceIterator_init_current)
        jrp lr   /* keep backtracer happy */
        STD_ENDPROC(KBacktraceIterator_init_current)
 
-/*
- * Reset our stack to r1/r2 (sp and ksp0+cpu respectively), then
- * free the old stack (passed in r0) and re-invoke cpu_idle().
- * We update sp and ksp0 simultaneously to avoid backtracer warnings.
- */
-STD_ENTRY(cpu_idle_on_new_stack)
-       {
-        move sp, r1
-        mtspr SPR_SYSTEM_SAVE_K_0, r2
-       }
-       jal free_thread_info
-       j cpu_idle
-       STD_ENDPROC(cpu_idle_on_new_stack)
-
 /* Loop forever on a nap during SMP boot. */
 STD_ENTRY(smp_nap)
        nap
index 6098ccc59be2484a22a5f11c862569d9850c4248..dd87f342039064a0e545382acdf65321d0815ac1 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/smp.h>
 #include <linux/timex.h>
 #include <linux/hugetlb.h>
+#include <linux/start_kernel.h>
 #include <asm/setup.h>
 #include <asm/sections.h>
 #include <asm/cacheflush.h>
index 88e466b159dcac92a13d81b5919b0a5487d63830..43b39d61b538698229a23f654b7fb44a335187b3 100644 (file)
@@ -705,7 +705,6 @@ static void stack_proc(void *arg)
        struct task_struct *from = current, *to = arg;
 
        to->thread.saved_task = from;
-       rcu_switch_from(from);
        switch_to(from, to, from);
 }
 
index 8bbea6aa40d9705b68e1dd9531b7c9c74d96a106..efe5acfc79c30c8ab2df32586618fa6f7c2fc88b 100644 (file)
@@ -94,10 +94,10 @@ bs_die:
 
        .section ".bsdata", "a"
 bugger_off_msg:
-       .ascii  "Direct booting from floppy is no longer supported.\r\n"
-       .ascii  "Please use a boot loader program instead.\r\n"
+       .ascii  "Direct floppy boot is not supported. "
+       .ascii  "Use a boot loader program instead.\r\n"
        .ascii  "\n"
-       .ascii  "Remove disk and press any key to reboot . . .\r\n"
+       .ascii  "Remove disk and press any key to reboot ...\r\n"
        .byte   0
 
 #ifdef CONFIG_EFI_STUB
@@ -111,7 +111,7 @@ coff_header:
 #else
        .word   0x8664                          # x86-64
 #endif
-       .word   2                               # nr_sections
+       .word   3                               # nr_sections
        .long   0                               # TimeDateStamp
        .long   0                               # PointerToSymbolTable
        .long   1                               # NumberOfSymbols
@@ -158,8 +158,8 @@ extra_header_fields:
 #else
        .quad   0                               # ImageBase
 #endif
-       .long   0x1000                          # SectionAlignment
-       .long   0x200                           # FileAlignment
+       .long   0x20                            # SectionAlignment
+       .long   0x20                            # FileAlignment
        .word   0                               # MajorOperatingSystemVersion
        .word   0                               # MinorOperatingSystemVersion
        .word   0                               # MajorImageVersion
@@ -200,8 +200,10 @@ extra_header_fields:
 
        # Section table
 section_table:
-       .ascii  ".text"
-       .byte   0
+       #
+       # The offset & size fields are filled in by build.c.
+       #
+       .ascii  ".setup"
        .byte   0
        .byte   0
        .long   0
@@ -217,9 +219,8 @@ section_table:
 
        #
        # The EFI application loader requires a relocation section
-       # because EFI applications must be relocatable. But since
-       # we don't need the loader to fixup any relocs for us, we
-       # just create an empty (zero-length) .reloc section header.
+       # because EFI applications must be relocatable. The .reloc
+       # offset & size fields are filled in by build.c.
        #
        .ascii  ".reloc"
        .byte   0
@@ -233,6 +234,25 @@ section_table:
        .word   0                               # NumberOfRelocations
        .word   0                               # NumberOfLineNumbers
        .long   0x42100040                      # Characteristics (section flags)
+
+       #
+       # The offset & size fields are filled in by build.c.
+       #
+       .ascii  ".text"
+       .byte   0
+       .byte   0
+       .byte   0
+       .long   0
+       .long   0x0                             # startup_{32,64}
+       .long   0                               # Size of initialized data
+                                               # on disk
+       .long   0x0                             # startup_{32,64}
+       .long   0                               # PointerToRelocations
+       .long   0                               # PointerToLineNumbers
+       .word   0                               # NumberOfRelocations
+       .word   0                               # NumberOfLineNumbers
+       .long   0x60500020                      # Characteristics (section flags)
+
 #endif /* CONFIG_EFI_STUB */
 
        # Kernel attributes; used by setup.  This is part 1 of the
index 3f61f6e2b46f3ece150f20b409c1e652f3dd301b..4b8e165ee5723643dcd89619a341c7559eafe2bd 100644 (file)
@@ -50,6 +50,8 @@ typedef unsigned int   u32;
 u8 buf[SETUP_SECT_MAX*512];
 int is_big_kernel;
 
+#define PECOFF_RELOC_RESERVE 0x20
+
 /*----------------------------------------------------------------------*/
 
 static const u32 crctab32[] = {
@@ -133,11 +135,103 @@ static void usage(void)
        die("Usage: build setup system [> image]");
 }
 
-int main(int argc, char ** argv)
-{
 #ifdef CONFIG_EFI_STUB
-       unsigned int file_sz, pe_header;
+
+static void update_pecoff_section_header(char *section_name, u32 offset, u32 size)
+{
+       unsigned int pe_header;
+       unsigned short num_sections;
+       u8 *section;
+
+       pe_header = get_unaligned_le32(&buf[0x3c]);
+       num_sections = get_unaligned_le16(&buf[pe_header + 6]);
+
+#ifdef CONFIG_X86_32
+       section = &buf[pe_header + 0xa8];
+#else
+       section = &buf[pe_header + 0xb8];
 #endif
+
+       while (num_sections > 0) {
+               if (strncmp((char*)section, section_name, 8) == 0) {
+                       /* section header size field */
+                       put_unaligned_le32(size, section + 0x8);
+
+                       /* section header vma field */
+                       put_unaligned_le32(offset, section + 0xc);
+
+                       /* section header 'size of initialised data' field */
+                       put_unaligned_le32(size, section + 0x10);
+
+                       /* section header 'file offset' field */
+                       put_unaligned_le32(offset, section + 0x14);
+
+                       break;
+               }
+               section += 0x28;
+               num_sections--;
+       }
+}
+
+static void update_pecoff_setup_and_reloc(unsigned int size)
+{
+       u32 setup_offset = 0x200;
+       u32 reloc_offset = size - PECOFF_RELOC_RESERVE;
+       u32 setup_size = reloc_offset - setup_offset;
+
+       update_pecoff_section_header(".setup", setup_offset, setup_size);
+       update_pecoff_section_header(".reloc", reloc_offset, PECOFF_RELOC_RESERVE);
+
+       /*
+        * Modify .reloc section contents with a single entry. The
+        * relocation is applied to offset 10 of the relocation section.
+        */
+       put_unaligned_le32(reloc_offset + 10, &buf[reloc_offset]);
+       put_unaligned_le32(10, &buf[reloc_offset + 4]);
+}
+
+static void update_pecoff_text(unsigned int text_start, unsigned int file_sz)
+{
+       unsigned int pe_header;
+       unsigned int text_sz = file_sz - text_start;
+
+       pe_header = get_unaligned_le32(&buf[0x3c]);
+
+       /* Size of image */
+       put_unaligned_le32(file_sz, &buf[pe_header + 0x50]);
+
+       /*
+        * Size of code: Subtract the size of the first sector (512 bytes)
+        * which includes the header.
+        */
+       put_unaligned_le32(file_sz - 512, &buf[pe_header + 0x1c]);
+
+#ifdef CONFIG_X86_32
+       /*
+        * Address of entry point.
+        *
+        * The EFI stub entry point is +16 bytes from the start of
+        * the .text section.
+        */
+       put_unaligned_le32(text_start + 16, &buf[pe_header + 0x28]);
+#else
+       /*
+        * Address of entry point. startup_32 is at the beginning and
+        * the 64-bit entry point (startup_64) is always 512 bytes
+        * after. The EFI stub entry point is 16 bytes after that, as
+        * the first instruction allows legacy loaders to jump over
+        * the EFI stub initialisation
+        */
+       put_unaligned_le32(text_start + 528, &buf[pe_header + 0x28]);
+#endif /* CONFIG_X86_32 */
+
+       update_pecoff_section_header(".text", text_start, text_sz);
+}
+
+#endif /* CONFIG_EFI_STUB */
+
+int main(int argc, char ** argv)
+{
        unsigned int i, sz, setup_sectors;
        int c;
        u32 sys_size;
@@ -163,6 +257,12 @@ int main(int argc, char ** argv)
                die("Boot block hasn't got boot flag (0xAA55)");
        fclose(file);
 
+#ifdef CONFIG_EFI_STUB
+       /* Reserve 0x20 bytes for .reloc section */
+       memset(buf+c, 0, PECOFF_RELOC_RESERVE);
+       c += PECOFF_RELOC_RESERVE;
+#endif
+
        /* Pad unused space with zeros */
        setup_sectors = (c + 511) / 512;
        if (setup_sectors < SETUP_SECT_MIN)
@@ -170,6 +270,10 @@ int main(int argc, char ** argv)
        i = setup_sectors*512;
        memset(buf+c, 0, i-c);
 
+#ifdef CONFIG_EFI_STUB
+       update_pecoff_setup_and_reloc(i);
+#endif
+
        /* Set the default root device */
        put_unaligned_le16(DEFAULT_ROOT_DEV, &buf[508]);
 
@@ -194,66 +298,8 @@ int main(int argc, char ** argv)
        put_unaligned_le32(sys_size, &buf[0x1f4]);
 
 #ifdef CONFIG_EFI_STUB
-       file_sz = sz + i + ((sys_size * 16) - sz);
-
-       pe_header = get_unaligned_le32(&buf[0x3c]);
-
-       /* Size of image */
-       put_unaligned_le32(file_sz, &buf[pe_header + 0x50]);
-
-       /*
-        * Subtract the size of the first section (512 bytes) which
-        * includes the header and .reloc section. The remaining size
-        * is that of the .text section.
-        */
-       file_sz -= 512;
-
-       /* Size of code */
-       put_unaligned_le32(file_sz, &buf[pe_header + 0x1c]);
-
-#ifdef CONFIG_X86_32
-       /*
-        * Address of entry point.
-        *
-        * The EFI stub entry point is +16 bytes from the start of
-        * the .text section.
-        */
-       put_unaligned_le32(i + 16, &buf[pe_header + 0x28]);
-
-       /* .text size */
-       put_unaligned_le32(file_sz, &buf[pe_header + 0xb0]);
-
-       /* .text vma */
-       put_unaligned_le32(0x200, &buf[pe_header + 0xb4]);
-
-       /* .text size of initialised data */
-       put_unaligned_le32(file_sz, &buf[pe_header + 0xb8]);
-
-       /* .text file offset */
-       put_unaligned_le32(0x200, &buf[pe_header + 0xbc]);
-#else
-       /*
-        * Address of entry point. startup_32 is at the beginning and
-        * the 64-bit entry point (startup_64) is always 512 bytes
-        * after. The EFI stub entry point is 16 bytes after that, as
-        * the first instruction allows legacy loaders to jump over
-        * the EFI stub initialisation
-        */
-       put_unaligned_le32(i + 528, &buf[pe_header + 0x28]);
-
-       /* .text size */
-       put_unaligned_le32(file_sz, &buf[pe_header + 0xc0]);
-
-       /* .text vma */
-       put_unaligned_le32(0x200, &buf[pe_header + 0xc4]);
-
-       /* .text size of initialised data */
-       put_unaligned_le32(file_sz, &buf[pe_header + 0xc8]);
-
-       /* .text file offset */
-       put_unaligned_le32(0x200, &buf[pe_header + 0xcc]);
-#endif /* CONFIG_X86_32 */
-#endif /* CONFIG_EFI_STUB */
+       update_pecoff_text(setup_sectors * 512, sz + i + ((sys_size * 16) - sz));
+#endif
 
        crc = partial_crc32(buf, i, crc);
        if (fwrite(buf, 1, i, stdout) != i)
index be6d9e365a800a8569fb4915fccbfc50b0793a09..3470624d7835fa646b7c9f8dafa6acd3d9ed5f2f 100644 (file)
@@ -2460,10 +2460,12 @@ ENTRY(aesni_cbc_dec)
        pxor IN3, STATE4
        movaps IN4, IV
 #else
-       pxor (INP), STATE2
-       pxor 0x10(INP), STATE3
        pxor IN1, STATE4
        movaps IN2, IV
+       movups (INP), IN1
+       pxor IN1, STATE2
+       movups 0x10(INP), IN2
+       pxor IN2, STATE3
 #endif
        movups STATE1, (OUTP)
        movups STATE2, 0x10(OUTP)
index daeca56211e39b13babd4504cb19e9797f6e7810..673ac9b63d6bf51ca36e2a0f18c4ca0d125d4fde 100644 (file)
@@ -38,7 +38,7 @@
 int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
 {
        int err = 0;
-       bool ia32 = is_ia32_task();
+       bool ia32 = test_thread_flag(TIF_IA32);
 
        if (!access_ok(VERIFY_WRITE, to, sizeof(compat_siginfo_t)))
                return -EFAULT;
index 340ee49961a61061862f892526d70a2f183b314c..f91e80f4f180bdba4e123835a99d4f3a5fc93973 100644 (file)
 #define X86_FEATURE_XSAVEOPT   (7*32+ 4) /* Optimized Xsave */
 #define X86_FEATURE_PLN                (7*32+ 5) /* Intel Power Limit Notification */
 #define X86_FEATURE_PTS                (7*32+ 6) /* Intel Package Thermal Status */
-#define X86_FEATURE_DTS                (7*32+ 7) /* Digital Thermal Sensor */
+#define X86_FEATURE_DTHERM     (7*32+ 7) /* Digital Thermal Sensor */
 #define X86_FEATURE_HW_PSTATE  (7*32+ 8) /* AMD HW-PState */
 
 /* Virtualization flags: Linux defined, word 8 */
index 0e3793b821ef4027c88f60cd2f7407277d034325..dc580c42851c223a1f21142bbb3a0b8983e18975 100644 (file)
@@ -54,6 +54,20 @@ struct nmiaction {
        __register_nmi_handler((t), &fn##_na);  \
 })
 
+/*
+ * For special handlers that register/unregister in the
+ * init section only.  This should be considered rare.
+ */
+#define register_nmi_handler_initonly(t, fn, fg, n)            \
+({                                                     \
+       static struct nmiaction fn##_na __initdata = {          \
+               .handler = (fn),                        \
+               .name = (n),                            \
+               .flags = (fg),                          \
+       };                                              \
+       __register_nmi_handler((t), &fn##_na);  \
+})
+
 int __register_nmi_handler(unsigned int, struct nmiaction *);
 
 void unregister_nmi_handler(unsigned int, const char *);
index 43876f16caf1ca8981288089d57b362e017bafbf..cb00ccc7d571f2b5245f935bde849c5c5e289635 100644 (file)
@@ -47,16 +47,26 @@ static inline void native_set_pte(pte_t *ptep, pte_t pte)
  * they can run pmd_offset_map_lock or pmd_trans_huge or other pmd
  * operations.
  *
- * Without THP if the mmap_sem is hold for reading, the
- * pmd can only transition from null to not null while pmd_read_atomic runs.
- * So there's no need of literally reading it atomically.
+ * Without THP if the mmap_sem is hold for reading, the pmd can only
+ * transition from null to not null while pmd_read_atomic runs. So
+ * we can always return atomic pmd values with this function.
  *
  * With THP if the mmap_sem is hold for reading, the pmd can become
- * THP or null or point to a pte (and in turn become "stable") at any
- * time under pmd_read_atomic, so it's mandatory to read it atomically
- * with cmpxchg8b.
+ * trans_huge or none or point to a pte (and in turn become "stable")
+ * at any time under pmd_read_atomic. We could read it really
+ * atomically here with a atomic64_read for the THP enabled case (and
+ * it would be a whole lot simpler), but to avoid using cmpxchg8b we
+ * only return an atomic pmdval if the low part of the pmdval is later
+ * found stable (i.e. pointing to a pte). And we're returning a none
+ * pmdval if the low part of the pmd is none. In some cases the high
+ * and low part of the pmdval returned may not be consistent if THP is
+ * enabled (the low part may point to previously mapped hugepage,
+ * while the high part may point to a more recently mapped hugepage),
+ * but pmd_none_or_trans_huge_or_clear_bad() only needs the low part
+ * of the pmd to be read atomically to decide if the pmd is unstable
+ * or not, with the only exception of when the low part of the pmd is
+ * zero in which case we return a none pmd.
  */
-#ifndef CONFIG_TRANSPARENT_HUGEPAGE
 static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
 {
        pmdval_t ret;
@@ -74,12 +84,6 @@ static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
 
        return (pmd_t) { ret };
 }
-#else /* CONFIG_TRANSPARENT_HUGEPAGE */
-static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
-{
-       return (pmd_t) { atomic64_read((atomic64_t *)pmdp) };
-}
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 
 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
 {
index 04cd6882308e5d06b685bea5ef76ff1f863d67da..e1f3a17034fce61ac64e2ff54640fcc31ec0f582 100644 (file)
@@ -33,9 +33,8 @@
 #define segment_eq(a, b)       ((a).seg == (b).seg)
 
 #define user_addr_max() (current_thread_info()->addr_limit.seg)
-#define __addr_ok(addr)                                        \
-       ((unsigned long __force)(addr) <                \
-        (current_thread_info()->addr_limit.seg))
+#define __addr_ok(addr)        \
+       ((unsigned long __force)(addr) < user_addr_max())
 
 /*
  * Test whether a block of memory is a valid user space address.
  * This needs 33-bit (65-bit for x86_64) arithmetic. We have a carry...
  */
 
-#define __range_not_ok(addr, size)                                     \
+#define __range_not_ok(addr, size, limit)                              \
 ({                                                                     \
        unsigned long flag, roksum;                                     \
        __chk_user_ptr(addr);                                           \
        asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0"             \
            : "=&r" (flag), "=r" (roksum)                               \
            : "1" (addr), "g" ((long)(size)),                           \
-             "rm" (current_thread_info()->addr_limit.seg));            \
+             "rm" (limit));                                            \
        flag;                                                           \
 })
 
@@ -77,7 +76,8 @@
  * checks that the pointer is in the user space range - after calling
  * this function, memory access functions may still return -EFAULT.
  */
-#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
+#define access_ok(type, addr, size) \
+       (likely(__range_not_ok(addr, size, user_addr_max()) == 0))
 
 /*
  * The exception table consists of pairs of addresses relative to the
index becf47b81735ef6731e0271d0de7c0d939a389a9..6149b476d9dffe06bcd1e3e3136bc335fd3dbf98 100644 (file)
 /* 4 bits of software ack period */
 #define UV2_ACK_MASK                   0x7UL
 #define UV2_ACK_UNITS_SHFT             3
-#define UV2_LEG_SHFT UV2H_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_SHFT
 #define UV2_EXT_SHFT UV2H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_SHFT
 
 /*
index 8afb693198155c705c3e4bb42e1ba12a69427159..b2297e58c6ed27c418fe9d5e9f00bf1e0923c833 100644 (file)
@@ -422,12 +422,14 @@ acpi_parse_int_src_ovr(struct acpi_subtable_header * header,
                return 0;
        }
 
-       if (intsrc->source_irq == 0 && intsrc->global_irq == 2) {
+       if (intsrc->source_irq == 0) {
                if (acpi_skip_timer_override) {
-                       printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n");
+                       printk(PREFIX "BIOS IRQ0 override ignored.\n");
                        return 0;
                }
-               if (acpi_fix_pin2_polarity && (intsrc->inti_flags & ACPI_MADT_POLARITY_MASK)) {
+
+               if ((intsrc->global_irq == 2) && acpi_fix_pin2_polarity
+                       && (intsrc->inti_flags & ACPI_MADT_POLARITY_MASK)) {
                        intsrc->inti_flags &= ~ACPI_MADT_POLARITY_MASK;
                        printk(PREFIX "BIOS IRQ0 pin2 override: forcing polarity to high active.\n");
                }
@@ -1334,17 +1336,12 @@ static int __init dmi_disable_acpi(const struct dmi_system_id *d)
 }
 
 /*
- * Force ignoring BIOS IRQ0 pin2 override
+ * Force ignoring BIOS IRQ0 override
  */
 static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d)
 {
-       /*
-        * The ati_ixp4x0_rev() early PCI quirk should have set
-        * the acpi_skip_timer_override flag already:
-        */
        if (!acpi_skip_timer_override) {
-               WARN(1, KERN_ERR "ati_ixp4x0 quirk not complete.\n");
-               pr_notice("%s detected: Ignoring BIOS IRQ0 pin2 override\n",
+               pr_notice("%s detected: Ignoring BIOS IRQ0 override\n",
                        d->ident);
                acpi_skip_timer_override = 1;
        }
@@ -1438,7 +1435,7 @@ static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
         * is enabled.  This input is incorrectly designated the
         * ISA IRQ 0 via an interrupt source override even though
         * it is wired to the output of the master 8259A and INTIN0
-        * is not connected at all.  Force ignoring BIOS IRQ0 pin2
+        * is not connected at all.  Force ignoring BIOS IRQ0
         * override in that cases.
         */
        {
@@ -1473,6 +1470,14 @@ static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
                     DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq 6715b"),
                     },
         },
+       {
+        .callback = dmi_ignore_irq0_timer_override,
+        .ident = "FUJITSU SIEMENS",
+        .matches = {
+                    DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
+                    DMI_MATCH(DMI_PRODUCT_NAME, "AMILO PRO V2030"),
+                    },
+        },
        {}
 };
 
index 6e76c191a83572c07bf86c9bbc7c129c4bbc912c..d5fd66f0d4cd01ea1420180af7a1ac0be2313045 100644 (file)
@@ -20,7 +20,6 @@
 #include <linux/bitops.h>
 #include <linux/ioport.h>
 #include <linux/suspend.h>
-#include <linux/kmemleak.h>
 #include <asm/e820.h>
 #include <asm/io.h>
 #include <asm/iommu.h>
@@ -95,11 +94,6 @@ static u32 __init allocate_aperture(void)
                return 0;
        }
        memblock_reserve(addr, aper_size);
-       /*
-        * Kmemleak should not scan this block as it may not be mapped via the
-        * kernel direct mapping.
-        */
-       kmemleak_ignore(phys_to_virt(addr));
        printk(KERN_INFO "Mapping aperture over %d KB of RAM @ %lx\n",
                        aper_size >> 10, addr);
        insert_aperture_resource((u32)addr, aper_size);
index ac96561d1a99ef499d8791ee736bdd85221ba4a9..5f0ff597437c17add5334629458f8c2b35ffa148 100644 (file)
@@ -1195,7 +1195,7 @@ static void __clear_irq_vector(int irq, struct irq_cfg *cfg)
        BUG_ON(!cfg->vector);
 
        vector = cfg->vector;
-       for_each_cpu_and(cpu, cfg->domain, cpu_online_mask)
+       for_each_cpu(cpu, cfg->domain)
                per_cpu(vector_irq, cpu)[vector] = -1;
 
        cfg->vector = 0;
@@ -1203,7 +1203,7 @@ static void __clear_irq_vector(int irq, struct irq_cfg *cfg)
 
        if (likely(!cfg->move_in_progress))
                return;
-       for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) {
+       for_each_cpu(cpu, cfg->old_domain) {
                for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS;
                                                                vector++) {
                        if (per_cpu(vector_irq, cpu)[vector] != irq)
index 0a687fd185e6c97b496578d1dc6acb4001c2833d..da27c5d2168a74c29e3d1da16d108ef7017113c8 100644 (file)
@@ -1274,7 +1274,7 @@ static void mce_timer_fn(unsigned long data)
         */
        iv = __this_cpu_read(mce_next_interval);
        if (mce_notify_irq())
-               iv = max(iv, (unsigned long) HZ/100);
+               iv = max(iv / 2, (unsigned long) HZ/100);
        else
                iv = min(iv * 2, round_jiffies_relative(check_interval * HZ));
        __this_cpu_write(mce_next_interval, iv);
@@ -1557,7 +1557,7 @@ static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
 static void __mcheck_cpu_init_timer(void)
 {
        struct timer_list *t = &__get_cpu_var(mce_timer);
-       unsigned long iv = __this_cpu_read(mce_next_interval);
+       unsigned long iv = check_interval * HZ;
 
        setup_timer(t, mce_timer_fn, smp_processor_id());
 
index dfea390e16085225ffe82ffbdca3cf641d79ce90..c7b3fe2d72e0f71b94577fde23622a8797eca941 100644 (file)
@@ -1,4 +1,4 @@
-#!/usr/bin/perl
+#!/usr/bin/perl -w
 #
 # Generate the x86_cap_flags[] array from include/asm-x86/cpufeature.h
 #
@@ -11,22 +11,35 @@ open(OUT, "> $out\0") or die "$0: cannot create: $out: $!\n";
 print OUT "#include <asm/cpufeature.h>\n\n";
 print OUT "const char * const x86_cap_flags[NCAPINTS*32] = {\n";
 
+%features = ();
+$err = 0;
+
 while (defined($line = <IN>)) {
        if ($line =~ /^\s*\#\s*define\s+(X86_FEATURE_(\S+))\s+(.*)$/) {
                $macro = $1;
-               $feature = $2;
+               $feature = "\L$2";
                $tail = $3;
                if ($tail =~ /\/\*\s*\"([^"]*)\".*\*\//) {
-                       $feature = $1;
+                       $feature = "\L$1";
                }
 
-               if ($feature ne '') {
-                       printf OUT "\t%-32s = \"%s\",\n",
-                               "[$macro]", "\L$feature";
+               next if ($feature eq '');
+
+               if ($features{$feature}++) {
+                       print STDERR "$in: duplicate feature name: $feature\n";
+                       $err++;
                }
+               printf OUT "\t%-32s = \"%s\",\n", "[$macro]", $feature;
        }
 }
 print OUT "};\n";
 
 close(IN);
 close(OUT);
+
+if ($err) {
+       unlink($out);
+       exit(1);
+}
+
+exit(0);
index e049d6da01832cfc91b5e2a45b922f592c7bb7ae..c4706cf9c011d8fd068d205ed0b669142c112400 100644 (file)
@@ -1496,6 +1496,7 @@ static struct cpu_hw_events *allocate_fake_cpuc(void)
                if (!cpuc->shared_regs)
                        goto error;
        }
+       cpuc->is_fake = 1;
        return cpuc;
 error:
        free_fake_cpuc(cpuc);
@@ -1756,6 +1757,12 @@ perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
        dump_trace(NULL, regs, NULL, 0, &backtrace_ops, entry);
 }
 
+static inline int
+valid_user_frame(const void __user *fp, unsigned long size)
+{
+       return (__range_not_ok(fp, size, TASK_SIZE) == 0);
+}
+
 #ifdef CONFIG_COMPAT
 
 #include <asm/compat.h>
@@ -1780,7 +1787,7 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
                if (bytes != sizeof(frame))
                        break;
 
-               if (fp < compat_ptr(regs->sp))
+               if (!valid_user_frame(fp, sizeof(frame)))
                        break;
 
                perf_callchain_store(entry, frame.return_address);
@@ -1826,7 +1833,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
                if (bytes != sizeof(frame))
                        break;
 
-               if ((unsigned long)fp < regs->sp)
+               if (!valid_user_frame(fp, sizeof(frame)))
                        break;
 
                perf_callchain_store(entry, frame.return_address);
index 6638aaf5449302c2ea2d5d03073f11bbcaf5b6ba..7241e2fc3c17c87b766c35f8d412b8692436b825 100644 (file)
@@ -117,6 +117,7 @@ struct cpu_hw_events {
        struct perf_event       *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
 
        unsigned int            group_flag;
+       int                     is_fake;
 
        /*
         * Intel DebugStore bits
@@ -364,6 +365,7 @@ struct x86_pmu {
        int             pebs_record_size;
        void            (*drain_pebs)(struct pt_regs *regs);
        struct event_constraint *pebs_constraints;
+       void            (*pebs_aliases)(struct perf_event *event);
 
        /*
         * Intel LBR
index 166546ec6aefe523a20fc5b4206d0ef9a87e4679..187c294bc6583424e8613df62a883740ab8fca1a 100644 (file)
@@ -1119,27 +1119,33 @@ intel_bts_constraints(struct perf_event *event)
        return NULL;
 }
 
-static bool intel_try_alt_er(struct perf_event *event, int orig_idx)
+static int intel_alt_er(int idx)
 {
        if (!(x86_pmu.er_flags & ERF_HAS_RSP_1))
-               return false;
+               return idx;
 
-       if (event->hw.extra_reg.idx == EXTRA_REG_RSP_0) {
-               event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
-               event->hw.config |= 0x01bb;
-               event->hw.extra_reg.idx = EXTRA_REG_RSP_1;
-               event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1;
-       } else if (event->hw.extra_reg.idx == EXTRA_REG_RSP_1) {
+       if (idx == EXTRA_REG_RSP_0)
+               return EXTRA_REG_RSP_1;
+
+       if (idx == EXTRA_REG_RSP_1)
+               return EXTRA_REG_RSP_0;
+
+       return idx;
+}
+
+static void intel_fixup_er(struct perf_event *event, int idx)
+{
+       event->hw.extra_reg.idx = idx;
+
+       if (idx == EXTRA_REG_RSP_0) {
                event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
                event->hw.config |= 0x01b7;
-               event->hw.extra_reg.idx = EXTRA_REG_RSP_0;
                event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0;
+       } else if (idx == EXTRA_REG_RSP_1) {
+               event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
+               event->hw.config |= 0x01bb;
+               event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1;
        }
-
-       if (event->hw.extra_reg.idx == orig_idx)
-               return false;
-
-       return true;
 }
 
 /*
@@ -1157,14 +1163,18 @@ __intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc,
        struct event_constraint *c = &emptyconstraint;
        struct er_account *era;
        unsigned long flags;
-       int orig_idx = reg->idx;
+       int idx = reg->idx;
 
-       /* already allocated shared msr */
-       if (reg->alloc)
+       /*
+        * reg->alloc can be set due to existing state, so for fake cpuc we
+        * need to ignore this, otherwise we might fail to allocate proper fake
+        * state for this extra reg constraint. Also see the comment below.
+        */
+       if (reg->alloc && !cpuc->is_fake)
                return NULL; /* call x86_get_event_constraint() */
 
 again:
-       era = &cpuc->shared_regs->regs[reg->idx];
+       era = &cpuc->shared_regs->regs[idx];
        /*
         * we use spin_lock_irqsave() to avoid lockdep issues when
         * passing a fake cpuc
@@ -1173,6 +1183,29 @@ again:
 
        if (!atomic_read(&era->ref) || era->config == reg->config) {
 
+               /*
+                * If its a fake cpuc -- as per validate_{group,event}() we
+                * shouldn't touch event state and we can avoid doing so
+                * since both will only call get_event_constraints() once
+                * on each event, this avoids the need for reg->alloc.
+                *
+                * Not doing the ER fixup will only result in era->reg being
+                * wrong, but since we won't actually try and program hardware
+                * this isn't a problem either.
+                */
+               if (!cpuc->is_fake) {
+                       if (idx != reg->idx)
+                               intel_fixup_er(event, idx);
+
+                       /*
+                        * x86_schedule_events() can call get_event_constraints()
+                        * multiple times on events in the case of incremental
+                        * scheduling(). reg->alloc ensures we only do the ER
+                        * allocation once.
+                        */
+                       reg->alloc = 1;
+               }
+
                /* lock in msr value */
                era->config = reg->config;
                era->reg = reg->reg;
@@ -1180,17 +1213,17 @@ again:
                /* one more user */
                atomic_inc(&era->ref);
 
-               /* no need to reallocate during incremental event scheduling */
-               reg->alloc = 1;
-
                /*
                 * need to call x86_get_event_constraint()
                 * to check if associated event has constraints
                 */
                c = NULL;
-       } else if (intel_try_alt_er(event, orig_idx)) {
-               raw_spin_unlock_irqrestore(&era->lock, flags);
-               goto again;
+       } else {
+               idx = intel_alt_er(idx);
+               if (idx != reg->idx) {
+                       raw_spin_unlock_irqrestore(&era->lock, flags);
+                       goto again;
+               }
        }
        raw_spin_unlock_irqrestore(&era->lock, flags);
 
@@ -1204,11 +1237,14 @@ __intel_shared_reg_put_constraints(struct cpu_hw_events *cpuc,
        struct er_account *era;
 
        /*
-        * only put constraint if extra reg was actually
-        * allocated. Also takes care of event which do
-        * not use an extra shared reg
+        * Only put constraint if extra reg was actually allocated. Also takes
+        * care of event which do not use an extra shared reg.
+        *
+        * Also, if this is a fake cpuc we shouldn't touch any event state
+        * (reg->alloc) and we don't care about leaving inconsistent cpuc state
+        * either since it'll be thrown out.
         */
-       if (!reg->alloc)
+       if (!reg->alloc || cpuc->is_fake)
                return;
 
        era = &cpuc->shared_regs->regs[reg->idx];
@@ -1300,15 +1336,9 @@ static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
        intel_put_shared_regs_event_constraints(cpuc, event);
 }
 
-static int intel_pmu_hw_config(struct perf_event *event)
+static void intel_pebs_aliases_core2(struct perf_event *event)
 {
-       int ret = x86_pmu_hw_config(event);
-
-       if (ret)
-               return ret;
-
-       if (event->attr.precise_ip &&
-           (event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
+       if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
                /*
                 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
                 * (0x003c) so that we can use it with PEBS.
@@ -1329,10 +1359,48 @@ static int intel_pmu_hw_config(struct perf_event *event)
                 */
                u64 alt_config = X86_CONFIG(.event=0xc0, .inv=1, .cmask=16);
 
+               alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
+               event->hw.config = alt_config;
+       }
+}
+
+static void intel_pebs_aliases_snb(struct perf_event *event)
+{
+       if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
+               /*
+                * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
+                * (0x003c) so that we can use it with PEBS.
+                *
+                * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
+                * PEBS capable. However we can use UOPS_RETIRED.ALL
+                * (0x01c2), which is a PEBS capable event, to get the same
+                * count.
+                *
+                * UOPS_RETIRED.ALL counts the number of cycles that retires
+                * CNTMASK micro-ops. By setting CNTMASK to a value (16)
+                * larger than the maximum number of micro-ops that can be
+                * retired per cycle (4) and then inverting the condition, we
+                * count all cycles that retire 16 or less micro-ops, which
+                * is every cycle.
+                *
+                * Thereby we gain a PEBS capable cycle counter.
+                */
+               u64 alt_config = X86_CONFIG(.event=0xc2, .umask=0x01, .inv=1, .cmask=16);
 
                alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
                event->hw.config = alt_config;
        }
+}
+
+static int intel_pmu_hw_config(struct perf_event *event)
+{
+       int ret = x86_pmu_hw_config(event);
+
+       if (ret)
+               return ret;
+
+       if (event->attr.precise_ip && x86_pmu.pebs_aliases)
+               x86_pmu.pebs_aliases(event);
 
        if (intel_pmu_needs_lbr_smpl(event)) {
                ret = intel_pmu_setup_lbr_filter(event);
@@ -1607,6 +1675,7 @@ static __initconst const struct x86_pmu intel_pmu = {
        .max_period             = (1ULL << 31) - 1,
        .get_event_constraints  = intel_get_event_constraints,
        .put_event_constraints  = intel_put_event_constraints,
+       .pebs_aliases           = intel_pebs_aliases_core2,
 
        .format_attrs           = intel_arch3_formats_attr,
 
@@ -1840,8 +1909,9 @@ __init int intel_pmu_init(void)
                break;
 
        case 42: /* SandyBridge */
-               x86_add_quirk(intel_sandybridge_quirk);
        case 45: /* SandyBridge, "Romely-EP" */
+               x86_add_quirk(intel_sandybridge_quirk);
+       case 58: /* IvyBridge */
                memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
                       sizeof(hw_cache_event_ids));
 
@@ -1849,6 +1919,7 @@ __init int intel_pmu_init(void)
 
                x86_pmu.event_constraints = intel_snb_event_constraints;
                x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
+               x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
                x86_pmu.extra_regs = intel_snb_extra_regs;
                /* all extra regs are per-cpu when HT is on */
                x86_pmu.er_flags |= ERF_HAS_RSP_1;
index 5a3edc27f6e5754f758e5ff166f89b34d6d99a01..35e2192df9f4ae078ae8ecff742f72f30881816b 100644 (file)
@@ -400,14 +400,7 @@ struct event_constraint intel_snb_pebs_event_constraints[] = {
        INTEL_EVENT_CONSTRAINT(0xc4, 0xf),    /* BR_INST_RETIRED.* */
        INTEL_EVENT_CONSTRAINT(0xc5, 0xf),    /* BR_MISP_RETIRED.* */
        INTEL_EVENT_CONSTRAINT(0xcd, 0x8),    /* MEM_TRANS_RETIRED.* */
-       INTEL_UEVENT_CONSTRAINT(0x11d0, 0xf), /* MEM_UOP_RETIRED.STLB_MISS_LOADS */
-       INTEL_UEVENT_CONSTRAINT(0x12d0, 0xf), /* MEM_UOP_RETIRED.STLB_MISS_STORES */
-       INTEL_UEVENT_CONSTRAINT(0x21d0, 0xf), /* MEM_UOP_RETIRED.LOCK_LOADS */
-       INTEL_UEVENT_CONSTRAINT(0x22d0, 0xf), /* MEM_UOP_RETIRED.LOCK_STORES */
-       INTEL_UEVENT_CONSTRAINT(0x41d0, 0xf), /* MEM_UOP_RETIRED.SPLIT_LOADS */
-       INTEL_UEVENT_CONSTRAINT(0x42d0, 0xf), /* MEM_UOP_RETIRED.SPLIT_STORES */
-       INTEL_UEVENT_CONSTRAINT(0x81d0, 0xf), /* MEM_UOP_RETIRED.ANY_LOADS */
-       INTEL_UEVENT_CONSTRAINT(0x82d0, 0xf), /* MEM_UOP_RETIRED.ANY_STORES */
+       INTEL_EVENT_CONSTRAINT(0xd0, 0xf),    /* MEM_UOP_RETIRED.* */
        INTEL_EVENT_CONSTRAINT(0xd1, 0xf),    /* MEM_LOAD_UOPS_RETIRED.* */
        INTEL_EVENT_CONSTRAINT(0xd2, 0xf),    /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
        INTEL_UEVENT_CONSTRAINT(0x02d4, 0xf), /* MEM_LOAD_UOPS_MISC_RETIRED.LLC_MISS */
index addf9e82a7f23bf063d4b6205307ad5f0dc5e7e7..ee8e9abc859f8a20a695c69c9d834743ff933036 100644 (file)
@@ -31,7 +31,7 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c)
        const struct cpuid_bit *cb;
 
        static const struct cpuid_bit __cpuinitconst cpuid_bits[] = {
-               { X86_FEATURE_DTS,              CR_EAX, 0, 0x00000006, 0 },
+               { X86_FEATURE_DTHERM,           CR_EAX, 0, 0x00000006, 0 },
                { X86_FEATURE_IDA,              CR_EAX, 1, 0x00000006, 0 },
                { X86_FEATURE_ARAT,             CR_EAX, 2, 0x00000006, 0 },
                { X86_FEATURE_PLN,              CR_EAX, 4, 0x00000006, 0 },
index 8bfb6146f7530634d30fa9f25d1718111a214649..3f61904365cff214d26efa4a378830559e39a7f9 100644 (file)
@@ -444,12 +444,12 @@ void kgdb_roundup_cpus(unsigned long flags)
 
 /**
  *     kgdb_arch_handle_exception - Handle architecture specific GDB packets.
- *     @vector: The error vector of the exception that happened.
+ *     @e_vector: The error vector of the exception that happened.
  *     @signo: The signal number of the exception that happened.
  *     @err_code: The error code of the exception that happened.
- *     @remcom_in_buffer: The buffer of the packet we have read.
- *     @remcom_out_buffer: The buffer of %BUFMAX bytes to write a packet into.
- *     @regs: The &struct pt_regs of the current process.
+ *     @remcomInBuffer: The buffer of the packet we have read.
+ *     @remcomOutBuffer: The buffer of %BUFMAX bytes to write a packet into.
+ *     @linux_regs: The &struct pt_regs of the current process.
  *
  *     This function MUST handle the 'c' and 's' command packets,
  *     as well packets to set / remove a hardware breakpoint, if used.
index 086eb58c6e801134296372acd7d9efb36de6d12b..f1b42b3a186c7ec7594203b0e58c31e424181908 100644 (file)
@@ -120,11 +120,6 @@ bool kvm_check_and_clear_guest_paused(void)
        bool ret = false;
        struct pvclock_vcpu_time_info *src;
 
-       /*
-        * per_cpu() is safe here because this function is only called from
-        * timer functions where preemption is already disabled.
-        */
-       WARN_ON(!in_atomic());
        src = &__get_cpu_var(hv_clock);
        if ((src->flags & PVCLOCK_GUEST_STOPPED) != 0) {
                __this_cpu_and(hv_clock.flags, ~PVCLOCK_GUEST_STOPPED);
index e31bf8d5c4d2e410126f397cdb764c281b1693fc..149b8d9c6ad45a199cfa1f55b17df3562afe4127 100644 (file)
@@ -42,7 +42,7 @@ static int __init nmi_unk_cb(unsigned int val, struct pt_regs *regs)
 static void __init init_nmi_testsuite(void)
 {
        /* trap all the unknown NMIs we may generate */
-       register_nmi_handler(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk");
+       register_nmi_handler_initonly(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk");
 }
 
 static void __init cleanup_nmi_testsuite(void)
@@ -64,7 +64,7 @@ static void __init test_nmi_ipi(struct cpumask *mask)
 {
        unsigned long timeout;
 
-       if (register_nmi_handler(NMI_LOCAL, test_nmi_ipi_callback,
+       if (register_nmi_handler_initonly(NMI_LOCAL, test_nmi_ipi_callback,
                                 NMI_FLAG_FIRST, "nmi_selftest")) {
                nmi_fail = FAILURE;
                return;
index 62c9457ccd2f1c4ccad62a757aebdcd520498cc3..c0f420f76cd3b7a65d65536735a1bee1820f89b3 100644 (file)
@@ -100,7 +100,7 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size,
                                 struct dma_attrs *attrs)
 {
        unsigned long dma_mask;
-       struct page *page = NULL;
+       struct page *page;
        unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
        dma_addr_t addr;
 
@@ -108,6 +108,7 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size,
 
        flag |= __GFP_ZERO;
 again:
+       page = NULL;
        if (!(flag & GFP_ATOMIC))
                page = dma_alloc_from_contiguous(dev, count, get_order(size));
        if (!page)
index 79c45af81604c7e191116c23b3625c80bb81476d..5de92f1abd76fc4d055a42af0c690284c65b7f2d 100644 (file)
@@ -451,6 +451,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 990"),
                },
        },
+       {       /* Handle problems with rebooting on the Precision M6600. */
+               .callback = set_pci_reboot,
+               .ident = "Dell OptiPlex 990",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Precision M6600"),
+               },
+       },
        { }
 };
 
@@ -639,9 +647,11 @@ void native_machine_shutdown(void)
        set_cpus_allowed_ptr(current, cpumask_of(reboot_cpu_id));
 
        /*
-        * O.K Now that I'm on the appropriate processor,
-        * stop all of the others.
+        * O.K Now that I'm on the appropriate processor, stop all of the
+        * others. Also disable the local irq to not receive the per-cpu
+        * timer interrupt which may trigger scheduler's load balance.
         */
+       local_irq_disable();
        stop_other_cpus();
 #endif
 
index f56f96da77f57e011b64e3e69cbabdc76ed3d442..7bd8a0823654115cdb476b1693cb3eaeb6affba3 100644 (file)
@@ -349,9 +349,12 @@ static bool __cpuinit match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
 
 static bool __cpuinit match_mc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
 {
-       if (c->phys_proc_id == o->phys_proc_id)
-               return topology_sane(c, o, "mc");
+       if (c->phys_proc_id == o->phys_proc_id) {
+               if (cpu_has(c, X86_FEATURE_AMD_DCM))
+                       return true;
 
+               return topology_sane(c, o, "mc");
+       }
        return false;
 }
 
@@ -382,6 +385,15 @@ void __cpuinit set_cpu_sibling_map(int cpu)
                if ((i == cpu) || (has_mc && match_llc(c, o)))
                        link_mask(llc_shared, cpu, i);
 
+       }
+
+       /*
+        * This needs a separate iteration over the cpus because we rely on all
+        * cpu_sibling_mask links to be set-up.
+        */
+       for_each_cpu(i, cpu_sibling_setup_mask) {
+               o = &cpu_data(i);
+
                if ((i == cpu) || (has_mc && match_mc(c, o))) {
                        link_mask(core, cpu, i);
 
@@ -410,15 +422,7 @@ void __cpuinit set_cpu_sibling_map(int cpu)
 /* maps the cpu to the sched domain representing multi-core */
 const struct cpumask *cpu_coregroup_mask(int cpu)
 {
-       struct cpuinfo_x86 *c = &cpu_data(cpu);
-       /*
-        * For perf, we return last level cache shared map.
-        * And for power savings, we return cpu_core_map
-        */
-       if (!(cpu_has(c, X86_FEATURE_AMD_DCM)))
-               return cpu_core_mask(cpu);
-       else
-               return cpu_llc_shared_mask(cpu);
+       return cpu_llc_shared_mask(cpu);
 }
 
 static void impress_friends(void)
index 7515cf0e1805eae308e1dd0650b1dd33a8d8564d..5db36caf4289cdab3fe04bf899f4826504ced389 100644 (file)
@@ -139,6 +139,19 @@ static int addr_to_vsyscall_nr(unsigned long addr)
        return nr;
 }
 
+#ifdef CONFIG_SECCOMP
+static int vsyscall_seccomp(struct task_struct *tsk, int syscall_nr)
+{
+       if (!seccomp_mode(&tsk->seccomp))
+               return 0;
+       task_pt_regs(tsk)->orig_ax = syscall_nr;
+       task_pt_regs(tsk)->ax = syscall_nr;
+       return __secure_computing(syscall_nr);
+}
+#else
+#define vsyscall_seccomp(_tsk, _nr) 0
+#endif
+
 static bool write_ok_or_segv(unsigned long ptr, size_t size)
 {
        /*
@@ -174,6 +187,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
        int vsyscall_nr;
        int prev_sig_on_uaccess_error;
        long ret;
+       int skip;
 
        /*
         * No point in checking CS -- the only way to get here is a user mode
@@ -205,9 +219,6 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
        }
 
        tsk = current;
-       if (seccomp_mode(&tsk->seccomp))
-               do_exit(SIGKILL);
-
        /*
         * With a real vsyscall, page faults cause SIGSEGV.  We want to
         * preserve that behavior to make writing exploits harder.
@@ -222,8 +233,13 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
         * address 0".
         */
        ret = -EFAULT;
+       skip = 0;
        switch (vsyscall_nr) {
        case 0:
+               skip = vsyscall_seccomp(tsk, __NR_gettimeofday);
+               if (skip)
+                       break;
+
                if (!write_ok_or_segv(regs->di, sizeof(struct timeval)) ||
                    !write_ok_or_segv(regs->si, sizeof(struct timezone)))
                        break;
@@ -234,6 +250,10 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
                break;
 
        case 1:
+               skip = vsyscall_seccomp(tsk, __NR_time);
+               if (skip)
+                       break;
+
                if (!write_ok_or_segv(regs->di, sizeof(time_t)))
                        break;
 
@@ -241,6 +261,10 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
                break;
 
        case 2:
+               skip = vsyscall_seccomp(tsk, __NR_getcpu);
+               if (skip)
+                       break;
+
                if (!write_ok_or_segv(regs->di, sizeof(unsigned)) ||
                    !write_ok_or_segv(regs->si, sizeof(unsigned)))
                        break;
@@ -253,6 +277,12 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
 
        current_thread_info()->sig_on_uaccess_error = prev_sig_on_uaccess_error;
 
+       if (skip) {
+               if ((long)regs->ax <= 0L) /* seccomp errno emulation */
+                       goto do_ret;
+               goto done; /* seccomp trace/trap */
+       }
+
        if (ret == -EFAULT) {
                /* Bad news -- userspace fed a bad pointer to a vsyscall. */
                warn_bad_vsyscall(KERN_INFO, regs,
@@ -271,10 +301,11 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
 
        regs->ax = ret;
 
+do_ret:
        /* Emulate a ret instruction. */
        regs->ip = caller;
        regs->sp += 8;
-
+done:
        return true;
 
 sigsegv:
index be3cea4407ffad63824c068332079baf16336012..57e168e27b5b865e187d5ee3d34413189a1c5905 100644 (file)
@@ -3934,6 +3934,9 @@ static void kvm_mmu_remove_some_alloc_mmu_pages(struct kvm *kvm,
 {
        struct kvm_mmu_page *page;
 
+       if (list_empty(&kvm->arch.active_mmu_pages))
+               return;
+
        page = container_of(kvm->arch.active_mmu_pages.prev,
                            struct kvm_mmu_page, link);
        kvm_mmu_prepare_zap_page(kvm, page, invalid_list);
index 459b58a8a15cecd12c52ca0b48d19c0b57e4a4f0..25b7ae8d058ad8b756987ed7dc8d22382a956519 100644 (file)
@@ -115,7 +115,7 @@ EXPORT_SYMBOL(csum_partial_copy_to_user);
  * @src: source address
  * @dst: destination address
  * @len: number of bytes to be copied.
- * @isum: initial sum that is added into the result (32bit unfolded)
+ * @sum: initial sum that is added into the result (32bit unfolded)
  *
  * Returns an 32bit unfolded checksum of the buffer.
  */
index f61ee67ec00f0dc6d61b6165e087ba248f81df2c..4f74d94c8d9727f013b3ebed6fe5da6e3975ac93 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/module.h>
 
 #include <asm/word-at-a-time.h>
+#include <linux/sched.h>
 
 /*
  * best effort, GUP based copy_from_user() that is NMI-safe
@@ -21,6 +22,9 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
        void *map;
        int ret;
 
+       if (__range_not_ok(from, n, TASK_SIZE))
+               return len;
+
        do {
                ret = __get_user_pages_fast(addr, 1, 0, &page);
                if (!ret)
index 819137904428a4f82444cf5a9d5055bb59dc6d80..5d7e51f3fd2812ed4176c696d9b175d4ec52fe46 100644 (file)
@@ -28,7 +28,7 @@
 #  - (66): the last prefix is 0x66
 #  - (F3): the last prefix is 0xF3
 #  - (F2): the last prefix is 0xF2
-#
+#  - (!F3) : the last prefix is not 0xF3 (including non-last prefix case)
 
 Table: one byte opcode
 Referrer:
@@ -515,12 +515,12 @@ b4: LFS Gv,Mp
 b5: LGS Gv,Mp
 b6: MOVZX Gv,Eb
 b7: MOVZX Gv,Ew
-b8: JMPE | POPCNT Gv,Ev (F3)
+b8: JMPE (!F3) | POPCNT Gv,Ev (F3)
 b9: Grp10 (1A)
 ba: Grp8 Ev,Ib (1A)
 bb: BTC Ev,Gv
-bc: BSF Gv,Ev | TZCNT Gv,Ev (F3)
-bd: BSR Gv,Ev | LZCNT Gv,Ev (F3)
+bc: BSF Gv,Ev (!F3) | TZCNT Gv,Ev (F3)
+bd: BSR Gv,Ev (!F3) | LZCNT Gv,Ev (F3)
 be: MOVSX Gv,Eb
 bf: MOVSX Gv,Ew
 # 0x0f 0xc0-0xcf
index 97141c26a13ac8400cfce07c36d55cca50962b26..bc4e9d84157fc0c9aa85e71837998da166c1fa57 100644 (file)
@@ -62,7 +62,8 @@ static void __init find_early_table_space(struct map_range *mr, unsigned long en
                extra += PMD_SIZE;
 #endif
                /* The first 2/4M doesn't use large pages. */
-               extra += mr->end - mr->start;
+               if (mr->start < PMD_SIZE)
+                       extra += mr->end - mr->start;
 
                ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
        } else
index be1ef574ce9a7a933c22133be78f6a24bac6a80c..78fe3f1ac49f6d278687337f1887e360787ac3a2 100644 (file)
@@ -180,7 +180,7 @@ err_free_memtype:
 
 /**
  * ioremap_nocache     -   map bus memory into CPU space
- * @offset:    bus address of the memory
+ * @phys_addr:    bus address of the memory
  * @size:      size of the resource to map
  *
  * ioremap_nocache performs a platform specific sequence of operations to
@@ -217,7 +217,7 @@ EXPORT_SYMBOL(ioremap_nocache);
 
 /**
  * ioremap_wc  -       map memory into CPU space write combined
- * @offset:    bus address of the memory
+ * @phys_addr: bus address of the memory
  * @size:      size of the resource to map
  *
  * This version of ioremap ensures that the memory is marked write combining.
index e1ebde3152104840961fe6e953b28d628f4bab82..a718e0d23503fdc4bb3149d4ad5c7046458f2a57 100644 (file)
@@ -122,7 +122,7 @@ within(unsigned long addr, unsigned long start, unsigned long end)
 
 /**
  * clflush_cache_range - flush a cache range with clflush
- * @addr:      virtual start address
+ * @vaddr:     virtual start address
  * @size:      number of bytes to flush
  *
  * clflush is an unordered instruction which needs fencing with mfence
index 732af3a9618375da189f3da4651d75a7fbd4693b..4599c3e8bcb63f39fe618f0075ba9e52e0fbb353 100644 (file)
@@ -176,6 +176,8 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
                return;
        }
 
+       node_set(node, numa_nodes_parsed);
+
        printk(KERN_INFO "SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]\n",
               node, pxm,
               (unsigned long long) start, (unsigned long long) end - 1);
index 3c6e328483c7dddd2e91011eec806b78b798d542..028454f0c3a51ac86a7105d24d433e197621a95d 100644 (file)
@@ -110,19 +110,16 @@ static struct kmsg_dumper dw_dumper;
 static int dumper_registered;
 
 static void dw_kmsg_dump(struct kmsg_dumper *dumper,
-                       enum kmsg_dump_reason reason,
-                       const char *s1, unsigned long l1,
-                       const char *s2, unsigned long l2)
+                        enum kmsg_dump_reason reason)
 {
-       int i;
+       static char line[1024];
+       size_t len;
 
        /* When run to this, we'd better re-init the HW */
        mrst_early_console_init();
 
-       for (i = 0; i < l1; i++)
-               early_mrst_console.write(&early_mrst_console, s1 + i, 1);
-       for (i = 0; i < l2; i++)
-               early_mrst_console.write(&early_mrst_console, s2 + i, 1);
+       while (kmsg_dump_get_line(dumper, true, line, sizeof(line), &len))
+               early_mrst_console.write(&early_mrst_console, line, len);
 }
 
 /* Set the ratio rate to 115200, 8n1, IRQ disabled */
index e31bcd8f2eeef2af6d2db13b824864b74867767d..fd41a9262d657ee672be6700b0dcc1fb8c147127 100644 (file)
@@ -782,7 +782,7 @@ BLOCKING_NOTIFIER_HEAD(intel_scu_notifier);
 EXPORT_SYMBOL_GPL(intel_scu_notifier);
 
 /* Called by IPC driver */
-void intel_scu_devices_create(void)
+void __devinit intel_scu_devices_create(void)
 {
        int i;
 
index 3ae0e61abd23acf7b1de580b8c234873b1870209..59880afa851fc37d6995e3fd665fca1f1d3d91b3 100644 (file)
@@ -1295,7 +1295,6 @@ static void __init enable_timeouts(void)
                 */
                mmr_image |= (1L << SOFTACK_MSHIFT);
                if (is_uv2_hub()) {
-                       mmr_image &= ~(1L << UV2_LEG_SHFT);
                        mmr_image |= (1L << UV2_EXT_SHFT);
                }
                write_mmr_misc_control(pnode, mmr_image);
index 5f6a5b6c3a159842b4d3efc9aa3f45abb0a605d4..ddcf39b1a18d3f8ad034237abc9adf293d46b24c 100644 (file)
@@ -66,9 +66,10 @@ BEGIN {
        rex_expr = "^REX(\\.[XRWB]+)*"
        fpu_expr = "^ESC" # TODO
 
-       lprefix1_expr = "\\(66\\)"
+       lprefix1_expr = "\\((66|!F3)\\)"
        lprefix2_expr = "\\(F3\\)"
-       lprefix3_expr = "\\(F2\\)"
+       lprefix3_expr = "\\((F2|!F3)\\)"
+       lprefix_expr = "\\((66|F2|F3)\\)"
        max_lprefix = 4
 
        # All opcodes starting with lower-case 'v' or with (v1) superscript
@@ -333,13 +334,16 @@ function convert_operands(count,opnd,       i,j,imm,mod)
                if (match(ext, lprefix1_expr)) {
                        lptable1[idx] = add_flags(lptable1[idx],flags)
                        variant = "INAT_VARIANT"
-               } else if (match(ext, lprefix2_expr)) {
+               }
+               if (match(ext, lprefix2_expr)) {
                        lptable2[idx] = add_flags(lptable2[idx],flags)
                        variant = "INAT_VARIANT"
-               } else if (match(ext, lprefix3_expr)) {
+               }
+               if (match(ext, lprefix3_expr)) {
                        lptable3[idx] = add_flags(lptable3[idx],flags)
                        variant = "INAT_VARIANT"
-               } else {
+               }
+               if (!match(ext, lprefix_expr)){
                        table[idx] = add_flags(table[idx],flags)
                }
        }
index 416bd40c0eba51f46863888f05637fc8794389ba..68d1dc91b37badeaeff9fe517a06df2be6484f8a 100644 (file)
@@ -39,9 +39,9 @@
 #undef __SYSCALL_I386
 #define __SYSCALL_I386(nr, sym, compat) [ nr ] = sym,
 
-typedef void (*sys_call_ptr_t)(void);
+typedef asmlinkage void (*sys_call_ptr_t)(void);
 
-extern void sys_ni_syscall(void);
+extern asmlinkage void sys_ni_syscall(void);
 
 const sys_call_ptr_t sys_call_table[] __cacheline_aligned = {
        /*
index e74df9548a025c2d66b3052f8ac0002695e59498..ff962d4b821e5162415fa06ddf75e8c57d498b51 100644 (file)
@@ -209,6 +209,9 @@ static void __init xen_banner(void)
               xen_feature(XENFEAT_mmu_pt_update_preserve_ad) ? " (preserve-AD)" : "");
 }
 
+#define CPUID_THERM_POWER_LEAF 6
+#define APERFMPERF_PRESENT 0
+
 static __read_mostly unsigned int cpuid_leaf1_edx_mask = ~0;
 static __read_mostly unsigned int cpuid_leaf1_ecx_mask = ~0;
 
@@ -242,6 +245,11 @@ static void xen_cpuid(unsigned int *ax, unsigned int *bx,
                *dx = cpuid_leaf5_edx_val;
                return;
 
+       case CPUID_THERM_POWER_LEAF:
+               /* Disabling APERFMPERF for kernel usage */
+               maskecx = ~(1 << APERFMPERF_PRESENT);
+               break;
+
        case 0xb:
                /* Suppress extended topology stuff */
                maskebx = 0;
index ffd08c414e91a7cc9f4a507b4ca3bbad9b823523..64effdc6da9400c09515af384d3d4b94c8efcb50 100644 (file)
@@ -706,6 +706,7 @@ int m2p_add_override(unsigned long mfn, struct page *page,
        unsigned long uninitialized_var(address);
        unsigned level;
        pte_t *ptep = NULL;
+       int ret = 0;
 
        pfn = page_to_pfn(page);
        if (!PageHighMem(page)) {
@@ -741,6 +742,24 @@ int m2p_add_override(unsigned long mfn, struct page *page,
        list_add(&page->lru,  &m2p_overrides[mfn_hash(mfn)]);
        spin_unlock_irqrestore(&m2p_override_lock, flags);
 
+       /* p2m(m2p(mfn)) == mfn: the mfn is already present somewhere in
+        * this domain. Set the FOREIGN_FRAME_BIT in the p2m for the other
+        * pfn so that the following mfn_to_pfn(mfn) calls will return the
+        * pfn from the m2p_override (the backend pfn) instead.
+        * We need to do this because the pages shared by the frontend
+        * (xen-blkfront) can be already locked (lock_page, called by
+        * do_read_cache_page); when the userspace backend tries to use them
+        * with direct_IO, mfn_to_pfn returns the pfn of the frontend, so
+        * do_blockdev_direct_IO is going to try to lock the same pages
+        * again resulting in a deadlock.
+        * As a side effect get_user_pages_fast might not be safe on the
+        * frontend pages while they are being shared with the backend,
+        * because mfn_to_pfn (that ends up being called by GUPF) will
+        * return the backend pfn rather than the frontend pfn. */
+       ret = __get_user(pfn, &machine_to_phys_mapping[mfn]);
+       if (ret == 0 && get_phys_to_machine(pfn) == mfn)
+               set_phys_to_machine(pfn, FOREIGN_FRAME(mfn));
+
        return 0;
 }
 EXPORT_SYMBOL_GPL(m2p_add_override);
@@ -752,6 +771,7 @@ int m2p_remove_override(struct page *page, bool clear_pte)
        unsigned long uninitialized_var(address);
        unsigned level;
        pte_t *ptep = NULL;
+       int ret = 0;
 
        pfn = page_to_pfn(page);
        mfn = get_phys_to_machine(pfn);
@@ -821,6 +841,22 @@ int m2p_remove_override(struct page *page, bool clear_pte)
        } else
                set_phys_to_machine(pfn, page->index);
 
+       /* p2m(m2p(mfn)) == FOREIGN_FRAME(mfn): the mfn is already present
+        * somewhere in this domain, even before being added to the
+        * m2p_override (see comment above in m2p_add_override).
+        * If there are no other entries in the m2p_override corresponding
+        * to this mfn, then remove the FOREIGN_FRAME_BIT from the p2m for
+        * the original pfn (the one shared by the frontend): the backend
+        * cannot do any IO on this page anymore because it has been
+        * unshared. Removing the FOREIGN_FRAME_BIT from the p2m entry of
+        * the original pfn causes mfn_to_pfn(mfn) to return the frontend
+        * pfn again. */
+       mfn &= ~FOREIGN_FRAME_BIT;
+       ret = __get_user(pfn, &machine_to_phys_mapping[mfn]);
+       if (ret == 0 && get_phys_to_machine(pfn) == FOREIGN_FRAME(mfn) &&
+                       m2p_find_override(mfn) == NULL)
+               set_phys_to_machine(pfn, mfn);
+
        return 0;
 }
 EXPORT_SYMBOL_GPL(m2p_remove_override);
index 3ebba0753d3876b887875aaf53fdd461f544441c..a4790bf22c592326b4b53a80f615e2f2ee658b83 100644 (file)
@@ -371,7 +371,8 @@ char * __init xen_memory_setup(void)
        populated = xen_populate_chunk(map, memmap.nr_entries,
                        max_pfn, &last_pfn, xen_released_pages);
 
-       extra_pages += (xen_released_pages - populated);
+       xen_released_pages -= populated;
+       extra_pages += xen_released_pages;
 
        if (last_pfn > max_pfn) {
                max_pfn = min(MAX_DOMAIN_PAGES, last_pfn);
index 7608559de93aa647acfb6c8c877fb03208512b83..f973754ddf90414873b2c63bfdd08b4202f2dcb7 100644 (file)
@@ -68,8 +68,8 @@ endif
 
 # Only build variant and/or platform if it includes a Makefile
 
-buildvar := $(shell test -a $(srctree)/arch/xtensa/variants/$(VARIANT)/Makefile && echo arch/xtensa/variants/$(VARIANT)/)
-buildplf := $(shell test -a $(srctree)/arch/xtensa/platforms/$(PLATFORM)/Makefile && echo arch/xtensa/platforms/$(PLATFORM)/)
+buildvar := $(shell test -e $(srctree)/arch/xtensa/variants/$(VARIANT)/Makefile && echo arch/xtensa/variants/$(VARIANT)/)
+buildplf := $(shell test -e $(srctree)/arch/xtensa/platforms/$(PLATFORM)/Makefile && echo arch/xtensa/platforms/$(PLATFORM)/)
 
 # Find libgcc.a
 
index 0b9f2e13c78128b8dc1440305d9db04810a66476..c1dacca312f3906374614ae4eb0a486ab8070e8a 100644 (file)
@@ -31,5 +31,5 @@ asmlinkage long sys_pselect6(int n, fd_set __user *inp, fd_set __user *outp,
 asmlinkage long sys_ppoll(struct pollfd __user *ufds, unsigned int nfds,
        struct timespec __user *tsp, const sigset_t __user *sigmask,
        size_t sigsetsize);
-
-
+asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset,
+               size_t sigsetsize);
index 9b306e550e3f06ddfa342c041c7793543702c4b3..2c8d6a3d250afd2d18fa8b41b03dc26b0ad97ca2 100644 (file)
@@ -277,7 +277,7 @@ void xtensa_elf_core_copy_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs)
 
        /* Don't leak any random bits. */
 
-       memset(elfregs, 0, sizeof (elfregs));
+       memset(elfregs, 0, sizeof(*elfregs));
 
        /* Note:  PS.EXCM is not set while user task is running; its
         * being set in regs->ps is for exception handling convenience.
index b9f8e5850d3a4e1c9bc1dbe3ff6ee0b7dbc47cc9..efe4e854b3cdd06808eec1bcd3fe8fa70b07ed96 100644 (file)
@@ -493,7 +493,7 @@ static void do_signal(struct pt_regs *regs)
                if (ret)
                        return;
 
-               signal_delivered(signr, info, ka, regs, 0);
+               signal_delivered(signr, &info, &ka, regs, 0);
                if (current->ptrace & PT_SINGLESTEP)
                        task_pt_regs(current)->icountlevel = 1;
 
index 88ecea3facb4e8c81f507a667ed55feda6cfcb1a..ee2e2089483d08a532d795743b9a532cf2311f93 100644 (file)
@@ -83,7 +83,6 @@ SECTIONS
 
   _text = .;
   _stext = .;
-  _ftext = .;
 
   .text :
   {
@@ -112,7 +111,7 @@ SECTIONS
   EXCEPTION_TABLE(16)
   /* Data section */
 
-  _fdata = .;
+  _sdata = .;
   RW_DATA_SECTION(XCHAL_ICACHE_LINESIZE, PAGE_SIZE, THREAD_SIZE)
   _edata = .;
 
index ba150e5de2ebe6583c2b708525a26e33b8acbfb2..db955179da2d5de8adac6a4ab66d4be17ec943fc 100644 (file)
 
 #include <asm/bootparam.h>
 #include <asm/page.h>
-
-/* References to section boundaries */
-
-extern char _ftext, _etext, _fdata, _edata, _rodata_end;
-extern char __init_begin, __init_end;
+#include <asm/sections.h>
 
 /*
  * mem_reserve(start, end, must_exist)
@@ -197,9 +193,9 @@ void __init mem_init(void)
                        reservedpages++;
        }
 
-       codesize =  (unsigned long) &_etext - (unsigned long) &_ftext;
-       datasize =  (unsigned long) &_edata - (unsigned long) &_fdata;
-       initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
+       codesize =  (unsigned long) _etext - (unsigned long) _stext;
+       datasize =  (unsigned long) _edata - (unsigned long) _sdata;
+       initsize =  (unsigned long) __init_end - (unsigned long) __init_begin;
 
        printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, "
               "%ldk data, %ldk init %ldk highmem)\n",
@@ -237,7 +233,7 @@ void free_initrd_mem(unsigned long start, unsigned long end)
 
 void free_initmem(void)
 {
-       free_reserved_mem(&__init_begin, &__init_end);
-       printk("Freeing unused kernel memory: %dk freed\n",
-              (&__init_end - &__init_begin) >> 10);
+       free_reserved_mem(__init_begin, __init_end);
+       printk("Freeing unused kernel memory: %zuk freed\n",
+              (__init_end - __init_begin) >> 10);
 }
index 02cf6335e9bdc5bb940ec89fcdbfe63746f9b5df..e7dee617358e810aec57ff25162ff95604fabb3c 100644 (file)
@@ -125,12 +125,8 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q)
 
                blkg->pd[i] = pd;
                pd->blkg = blkg;
-       }
-
-       /* invoke per-policy init */
-       for (i = 0; i < BLKCG_MAX_POLS; i++) {
-               struct blkcg_policy *pol = blkcg_policy[i];
 
+               /* invoke per-policy init */
                if (blkcg_policy_enabled(blkg->q, pol))
                        pol->pd_init_fn(blkg);
        }
@@ -245,10 +241,9 @@ EXPORT_SYMBOL_GPL(blkg_lookup_create);
 
 static void blkg_destroy(struct blkcg_gq *blkg)
 {
-       struct request_queue *q = blkg->q;
        struct blkcg *blkcg = blkg->blkcg;
 
-       lockdep_assert_held(q->queue_lock);
+       lockdep_assert_held(blkg->q->queue_lock);
        lockdep_assert_held(&blkcg->lock);
 
        /* Something wrong if we are trying to remove same group twice */
index 3c923a7aeb56f1658142b091868c3c29ebffd3c5..93eb3e4f88ce78affc79c5ca6d8b7c7b0759be5a 100644 (file)
@@ -361,9 +361,10 @@ EXPORT_SYMBOL(blk_put_queue);
  */
 void blk_drain_queue(struct request_queue *q, bool drain_all)
 {
+       int i;
+
        while (true) {
                bool drain = false;
-               int i;
 
                spin_lock_irq(q->queue_lock);
 
@@ -408,6 +409,18 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
                        break;
                msleep(10);
        }
+
+       /*
+        * With queue marked dead, any woken up waiter will fail the
+        * allocation path, so the wakeup chaining is lost and we're
+        * left with hung waiters. We need to wake up those waiters.
+        */
+       if (q->request_fn) {
+               spin_lock_irq(q->queue_lock);
+               for (i = 0; i < ARRAY_SIZE(q->rq.wait); i++)
+                       wake_up_all(&q->rq.wait[i]);
+               spin_unlock_irq(q->queue_lock);
+       }
 }
 
 /**
@@ -467,7 +480,6 @@ void blk_cleanup_queue(struct request_queue *q)
        /* mark @q DEAD, no new request or merges will be allowed afterwards */
        mutex_lock(&q->sysfs_lock);
        queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
-
        spin_lock_irq(lock);
 
        /*
@@ -485,10 +497,6 @@ void blk_cleanup_queue(struct request_queue *q)
        queue_flag_set(QUEUE_FLAG_NOMERGES, q);
        queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
        queue_flag_set(QUEUE_FLAG_DEAD, q);
-
-       if (q->queue_lock != &q->__queue_lock)
-               q->queue_lock = &q->__queue_lock;
-
        spin_unlock_irq(lock);
        mutex_unlock(&q->sysfs_lock);
 
@@ -499,6 +507,11 @@ void blk_cleanup_queue(struct request_queue *q)
        del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);
        blk_sync_queue(q);
 
+       spin_lock_irq(lock);
+       if (q->queue_lock != &q->__queue_lock)
+               q->queue_lock = &q->__queue_lock;
+       spin_unlock_irq(lock);
+
        /* @q is and will stay empty, shutdown and put */
        blk_put_queue(q);
 }
index 780354888958cd7ea03aef2c1654b325bc9fb24e..6e4744cbfb56b4ca0d99062a0d9b0437c894016d 100644 (file)
@@ -197,44 +197,3 @@ void blk_add_timer(struct request *req)
                mod_timer(&q->timeout, expiry);
 }
 
-/**
- * blk_abort_queue -- Abort all request on given queue
- * @queue:     pointer to queue
- *
- */
-void blk_abort_queue(struct request_queue *q)
-{
-       unsigned long flags;
-       struct request *rq, *tmp;
-       LIST_HEAD(list);
-
-       /*
-        * Not a request based block device, nothing to abort
-        */
-       if (!q->request_fn)
-               return;
-
-       spin_lock_irqsave(q->queue_lock, flags);
-
-       elv_abort_queue(q);
-
-       /*
-        * Splice entries to local list, to avoid deadlocking if entries
-        * get readded to the timeout list by error handling
-        */
-       list_splice_init(&q->timeout_list, &list);
-
-       list_for_each_entry_safe(rq, tmp, &list, timeout_list)
-               blk_abort_request(rq);
-
-       /*
-        * Occasionally, blk_abort_request() will return without
-        * deleting the element from the list. Make sure we add those back
-        * instead of leaving them on the local stack list.
-        */
-       list_splice(&list, &q->timeout_list);
-
-       spin_unlock_irqrestore(q->queue_lock, flags);
-
-}
-EXPORT_SYMBOL_GPL(blk_abort_queue);
index 673c977cc2bfa238e0fe0efa6dddac5193fbccd8..fb52df9744f5fe411908162fbb02257ca0bc097a 100644 (file)
@@ -17,8 +17,6 @@
 #include "blk.h"
 #include "blk-cgroup.h"
 
-static struct blkcg_policy blkcg_policy_cfq __maybe_unused;
-
 /*
  * tunables
  */
@@ -418,11 +416,6 @@ static inline struct cfq_group *pd_to_cfqg(struct blkg_policy_data *pd)
        return pd ? container_of(pd, struct cfq_group, pd) : NULL;
 }
 
-static inline struct cfq_group *blkg_to_cfqg(struct blkcg_gq *blkg)
-{
-       return pd_to_cfqg(blkg_to_pd(blkg, &blkcg_policy_cfq));
-}
-
 static inline struct blkcg_gq *cfqg_to_blkg(struct cfq_group *cfqg)
 {
        return pd_to_blkg(&cfqg->pd);
@@ -572,6 +565,13 @@ static inline void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg) { }
 
 #ifdef CONFIG_CFQ_GROUP_IOSCHED
 
+static struct blkcg_policy blkcg_policy_cfq;
+
+static inline struct cfq_group *blkg_to_cfqg(struct blkcg_gq *blkg)
+{
+       return pd_to_cfqg(blkg_to_pd(blkg, &blkcg_policy_cfq));
+}
+
 static inline void cfqg_get(struct cfq_group *cfqg)
 {
        return blkg_get(cfqg_to_blkg(cfqg));
@@ -3951,10 +3951,11 @@ static void cfq_exit_queue(struct elevator_queue *e)
 
        cfq_shutdown_timer_wq(cfqd);
 
-#ifndef CONFIG_CFQ_GROUP_IOSCHED
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
+       blkcg_deactivate_policy(q, &blkcg_policy_cfq);
+#else
        kfree(cfqd->root_group);
 #endif
-       blkcg_deactivate_policy(q, &blkcg_policy_cfq);
        kfree(cfqd);
 }
 
@@ -4194,14 +4195,15 @@ static int __init cfq_init(void)
 #ifdef CONFIG_CFQ_GROUP_IOSCHED
        if (!cfq_group_idle)
                cfq_group_idle = 1;
-#else
-               cfq_group_idle = 0;
-#endif
 
        ret = blkcg_policy_register(&blkcg_policy_cfq);
        if (ret)
                return ret;
+#else
+       cfq_group_idle = 0;
+#endif
 
+       ret = -ENOMEM;
        cfq_pool = KMEM_CACHE(cfq_queue, 0);
        if (!cfq_pool)
                goto err_pol_unreg;
@@ -4215,13 +4217,17 @@ static int __init cfq_init(void)
 err_free_pool:
        kmem_cache_destroy(cfq_pool);
 err_pol_unreg:
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
        blkcg_policy_unregister(&blkcg_policy_cfq);
+#endif
        return ret;
 }
 
 static void __exit cfq_exit(void)
 {
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
        blkcg_policy_unregister(&blkcg_policy_cfq);
+#endif
        elv_unregister(&iosched_cfq);
        kmem_cache_destroy(cfq_pool);
 }
index 260fa80ef5750f80f4ebc1415d06abc992e18b8a..9a87daa6f4fbd10202ea9d47ae1a549c806a6fb4 100644 (file)
@@ -721,11 +721,14 @@ int scsi_verify_blk_ioctl(struct block_device *bd, unsigned int cmd)
                break;
        }
 
+       if (capable(CAP_SYS_RAWIO))
+               return 0;
+
        /* In particular, rule out all resets and host-specific ioctls.  */
        printk_ratelimited(KERN_WARNING
                           "%s: sending ioctl %x to a partition!\n", current->comm, cmd);
 
-       return capable(CAP_SYS_RAWIO) ? 0 : -ENOIOCTLCMD;
+       return -ENOIOCTLCMD;
 }
 EXPORT_SYMBOL(scsi_verify_blk_ioctl);
 
index 47768ff87343d27d2cc5228aa6e02f6cfdff20a0..80998958cf45381f4e1ef6f585b9467b31d1e4b3 100644 (file)
@@ -208,7 +208,7 @@ config ACPI_IPMI
 
 config ACPI_HOTPLUG_CPU
        bool
-       depends on ACPI_PROCESSOR && HOTPLUG_CPU
+       depends on EXPERIMENTAL && ACPI_PROCESSOR && HOTPLUG_CPU
        select ACPI_CONTAINER
        default y
 
index 6512b20aeccdce0184a6a5c524af85c6f7f6db91..d1fcbc0f6cbc6d93e3523f913822d68fda0b8ad2 100644 (file)
@@ -292,7 +292,9 @@ static int acpi_ac_add(struct acpi_device *device)
        ac->charger.properties = ac_props;
        ac->charger.num_properties = ARRAY_SIZE(ac_props);
        ac->charger.get_property = get_ac_property;
-       power_supply_register(&ac->device->dev, &ac->charger);
+       result = power_supply_register(&ac->device->dev, &ac->charger);
+       if (result)
+               goto end;
 
        printk(KERN_INFO PREFIX "%s [%s] (%s)\n",
               acpi_device_name(device), acpi_device_bid(device),
index a43fa1a57d57f45616c9d70c362df15d3b71c8e5..af4aad6ee2eb9eb8d40acfa6151c60c7911b51d1 100644 (file)
@@ -36,6 +36,7 @@
 #define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
 #define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
 static DEFINE_MUTEX(isolated_cpus_lock);
+static DEFINE_MUTEX(round_robin_lock);
 
 static unsigned long power_saving_mwait_eax;
 
@@ -107,7 +108,7 @@ static void round_robin_cpu(unsigned int tsk_index)
        if (!alloc_cpumask_var(&tmp, GFP_KERNEL))
                return;
 
-       mutex_lock(&isolated_cpus_lock);
+       mutex_lock(&round_robin_lock);
        cpumask_clear(tmp);
        for_each_cpu(cpu, pad_busy_cpus)
                cpumask_or(tmp, tmp, topology_thread_cpumask(cpu));
@@ -116,7 +117,7 @@ static void round_robin_cpu(unsigned int tsk_index)
        if (cpumask_empty(tmp))
                cpumask_andnot(tmp, cpu_online_mask, pad_busy_cpus);
        if (cpumask_empty(tmp)) {
-               mutex_unlock(&isolated_cpus_lock);
+               mutex_unlock(&round_robin_lock);
                return;
        }
        for_each_cpu(cpu, tmp) {
@@ -131,7 +132,7 @@ static void round_robin_cpu(unsigned int tsk_index)
        tsk_in_cpu[tsk_index] = preferred_cpu;
        cpumask_set_cpu(preferred_cpu, pad_busy_cpus);
        cpu_weight[preferred_cpu]++;
-       mutex_unlock(&isolated_cpus_lock);
+       mutex_unlock(&round_robin_lock);
 
        set_cpus_allowed_ptr(current, cpumask_of(preferred_cpu));
 }
@@ -144,7 +145,7 @@ static void exit_round_robin(unsigned int tsk_index)
 }
 
 static unsigned int idle_pct = 5; /* percentage */
-static unsigned int round_robin_time = 10; /* second */
+static unsigned int round_robin_time = 1; /* second */
 static int power_saving_thread(void *data)
 {
        struct sched_param param = {.sched_priority = 1};
@@ -234,7 +235,7 @@ static int create_power_saving_task(void)
 
        ps_tsks[ps_tsk_num] = kthread_run(power_saving_thread,
                (void *)(unsigned long)ps_tsk_num,
-               "power_saving/%d", ps_tsk_num);
+               "acpi_pad/%d", ps_tsk_num);
        rc = IS_ERR(ps_tsks[ps_tsk_num]) ? PTR_ERR(ps_tsks[ps_tsk_num]) : 0;
        if (!rc)
                ps_tsk_num++;
index 793b8cc8e256c87a743ae7011d731f25393b36be..0a1b3435f920af3be09813330191aa3b179de2e2 100644 (file)
@@ -134,12 +134,14 @@ acpi-y +=         \
        tbinstal.o      \
        tbutils.o       \
        tbxface.o       \
+       tbxfload.o      \
        tbxfroot.o
 
 acpi-y +=              \
        utaddress.o     \
        utalloc.o       \
        utcopy.o        \
+       utexcep.o       \
        utdebug.o       \
        utdecode.o      \
        utdelete.o      \
index d700f63e4701462a159af8c3b1c3a9a5112aeada..c0a43b38c6a3a138c6445b76d6b88e482ec68636 100644 (file)
@@ -237,7 +237,7 @@ u32 acpi_ev_install_sci_handler(void);
 
 acpi_status acpi_ev_remove_sci_handler(void);
 
-u32 acpi_ev_initialize_sCI(u32 program_sCI);
+u32 acpi_ev_initialize_SCI(u32 program_SCI);
 
 ACPI_HW_DEPENDENT_RETURN_VOID(void acpi_ev_terminate(void))
 #endif                         /* __ACEVENTS_H__  */
index 4f7d3f57d05cdd24f047ca2af28209abd631e7ae..ce79100fb5ebdaab6c296f15dcd9c937062784ae 100644 (file)
@@ -278,8 +278,7 @@ ACPI_EXTERN acpi_cache_t *acpi_gbl_operand_cache;
 
 /* Global handlers */
 
-ACPI_EXTERN struct acpi_object_notify_handler acpi_gbl_device_notify;
-ACPI_EXTERN struct acpi_object_notify_handler acpi_gbl_system_notify;
+ACPI_EXTERN struct acpi_global_notify_handler acpi_gbl_global_notify[2];
 ACPI_EXTERN acpi_exception_handler acpi_gbl_exception_handler;
 ACPI_EXTERN acpi_init_handler acpi_gbl_init_handler;
 ACPI_EXTERN acpi_tbl_handler acpi_gbl_table_handler;
@@ -327,14 +326,6 @@ extern const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS];
 
 #endif
 
-/* Exception codes */
-
-extern char const *acpi_gbl_exception_names_env[];
-extern char const *acpi_gbl_exception_names_pgm[];
-extern char const *acpi_gbl_exception_names_tbl[];
-extern char const *acpi_gbl_exception_names_aml[];
-extern char const *acpi_gbl_exception_names_ctrl[];
-
 /*****************************************************************************
  *
  * Namespace globals
@@ -463,4 +454,12 @@ ACPI_EXTERN u32 acpi_gbl_size_of_acpi_objects;
 
 #endif                         /* ACPI_DEBUGGER */
 
+/*****************************************************************************
+ *
+ * Info/help support
+ *
+ ****************************************************************************/
+
+extern const struct ah_predefined_name asl_predefined_info[];
+
 #endif                         /* __ACGLOBAL_H__ */
index e3922ca20e7f26e7eafa98ed4c2d23f2edf0435a..cc80fe10e8ea916cf03a24bf4fce2a1f8e838f3b 100644 (file)
@@ -299,7 +299,7 @@ acpi_status(*ACPI_INTERNAL_METHOD) (struct acpi_walk_state * walk_state);
  * Information structure for ACPI predefined names.
  * Each entry in the table contains the following items:
  *
- * Name                 - The ACPI reserved name
+ * name                 - The ACPI reserved name
  * param_count          - Number of arguments to the method
  * expected_return_btypes - Allowed type(s) for the return value
  */
@@ -404,6 +404,13 @@ struct acpi_gpe_handler_info {
        u8 originally_enabled;  /* True if GPE was originally enabled */
 };
 
+/* Notify info for implicit notify, multiple device objects */
+
+struct acpi_gpe_notify_info {
+       struct acpi_namespace_node *device_node;        /* Device to be notified */
+       struct acpi_gpe_notify_info *next;
+};
+
 struct acpi_gpe_notify_object {
        struct acpi_namespace_node *node;
        struct acpi_gpe_notify_object *next;
@@ -412,7 +419,7 @@ struct acpi_gpe_notify_object {
 union acpi_gpe_dispatch_info {
        struct acpi_namespace_node *method_node;        /* Method node for this GPE level */
        struct acpi_gpe_handler_info *handler;  /* Installed GPE handler */
-       struct acpi_gpe_notify_object device;   /* List of _PRW devices for implicit notify */
+       struct acpi_gpe_notify_info *notify_list;       /* List of _PRW devices for implicit notifies */
 };
 
 /*
@@ -420,7 +427,7 @@ union acpi_gpe_dispatch_info {
  * NOTE: Important to keep this struct as small as possible.
  */
 struct acpi_gpe_event_info {
-       union acpi_gpe_dispatch_info dispatch;  /* Either Method or Handler */
+       union acpi_gpe_dispatch_info dispatch;  /* Either Method, Handler, or notify_list */
        struct acpi_gpe_register_info *register_info;   /* Backpointer to register info */
        u8 flags;               /* Misc info about this GPE */
        u8 gpe_number;          /* This GPE */
@@ -600,13 +607,22 @@ acpi_status(*acpi_parse_downwards) (struct acpi_walk_state * walk_state,
 
 typedef acpi_status(*acpi_parse_upwards) (struct acpi_walk_state * walk_state);
 
+/* Global handlers for AML Notifies */
+
+struct acpi_global_notify_handler {
+       acpi_notify_handler handler;
+       void *context;
+};
+
 /*
  * Notify info - used to pass info to the deferred notify
  * handler/dispatcher.
  */
 struct acpi_notify_info {
-       ACPI_STATE_COMMON struct acpi_namespace_node *node;
-       union acpi_operand_object *handler_obj;
+       ACPI_STATE_COMMON u8 handler_list_id;
+       struct acpi_namespace_node *node;
+       union acpi_operand_object *handler_list_head;
+       struct acpi_global_notify_handler *global;
 };
 
 /* Generic state is union of structs above */
@@ -718,7 +734,7 @@ struct acpi_parse_obj_named {
        u32 name;               /* 4-byte name or zero if no name */
 };
 
-/* This version is used by the i_aSL compiler only */
+/* This version is used by the iASL compiler only */
 
 #define ACPI_MAX_PARSEOP_NAME   20
 
@@ -787,6 +803,7 @@ struct acpi_parse_state {
 #define ACPI_PARSEOP_IGNORE             0x01
 #define ACPI_PARSEOP_PARAMLIST          0x02
 #define ACPI_PARSEOP_EMPTY_TERMLIST     0x04
+#define ACPI_PARSEOP_PREDEF_CHECKED     0x08
 #define ACPI_PARSEOP_SPECIAL            0x10
 
 /*****************************************************************************
@@ -1075,4 +1092,18 @@ struct acpi_debug_mem_block {
 #define ACPI_MEM_LIST_MAX               1
 #define ACPI_NUM_MEM_LISTS              2
 
+/*****************************************************************************
+ *
+ * Info/help support
+ *
+ ****************************************************************************/
+
+struct ah_predefined_name {
+       char *name;
+       char *description;
+#ifndef ACPI_ASL_COMPILER
+       char *action;
+#endif
+};
+
 #endif                         /* __ACLOCAL_H__ */
index f119f473f71af88f61afc53122e5144050468718..832b6198652e0540a606047c782f5a74b528b447 100644 (file)
@@ -62,7 +62,7 @@
  * printf() format helpers
  */
 
-/* Split 64-bit integer into two 32-bit values. Use with %8.8_x%8.8_x */
+/* Split 64-bit integer into two 32-bit values. Use with %8.8X%8.8X */
 
 #define ACPI_FORMAT_UINT64(i)           ACPI_HIDWORD(i), ACPI_LODWORD(i)
 
 #define ACPI_INSERT_BITS(target, mask, source)          target = ((target & (~(mask))) | (source & mask))
 
 /*
- * A struct acpi_namespace_node can appear in some contexts
- * where a pointer to a union acpi_operand_object can also
+ * An object of type struct acpi_namespace_node can appear in some contexts
+ * where a pointer to an object of type union acpi_operand_object can also
  * appear. This macro is used to distinguish them.
  *
  * The "Descriptor" field is the first field in both structures.
index c065078ca83bbbb0f3a09a22a609121d0d7785d7..364a1303fb8f2fe7e0f78feb6243c0f8e797ca31 100644 (file)
@@ -113,8 +113,8 @@ struct acpi_object_integer {
 };
 
 /*
- * Note: The String and Buffer object must be identical through the Pointer
- * and length elements.  There is code that depends on this.
+ * Note: The String and Buffer object must be identical through the
+ * pointer and length elements. There is code that depends on this.
  *
  * Fields common to both Strings and Buffers
  */
@@ -206,8 +206,7 @@ struct acpi_object_method {
  * Common fields for objects that support ASL notifications
  */
 #define ACPI_COMMON_NOTIFY_INFO \
-       union acpi_operand_object       *system_notify;     /* Handler for system notifies */\
-       union acpi_operand_object       *device_notify;     /* Handler for driver notifies */\
+       union acpi_operand_object       *notify_list[2];    /* Handlers for system/device notifies */\
        union acpi_operand_object       *handler;       /* Handler for Address space */
 
 struct acpi_object_notify_common {     /* COMMON NOTIFY for POWER, PROCESSOR, DEVICE, and THERMAL */
@@ -296,10 +295,10 @@ struct acpi_object_buffer_field {
 
 struct acpi_object_notify_handler {
        ACPI_OBJECT_COMMON_HEADER struct acpi_namespace_node *node;     /* Parent device */
-       u32 handler_type;
-       acpi_notify_handler handler;
+       u32 handler_type;       /* Type: Device/System/Both */
+       acpi_notify_handler handler;    /* Handler address */
        void *context;
-       struct acpi_object_notify_handler *next;
+       union acpi_operand_object *next[2];     /* Device and System handler lists */
 };
 
 struct acpi_object_addr_handler {
@@ -382,7 +381,7 @@ struct acpi_object_cache_list {
 
 /******************************************************************************
  *
- * union acpi_operand_object Descriptor - a giant union of all of the above
+ * union acpi_operand_object descriptor - a giant union of all of the above
  *
  *****************************************************************************/
 
index bbb34c9be4e84de95f0d0922fe72ba5558f1b3e0..3080c017f5bad501427d9a7729ac0ead5240dff7 100644 (file)
@@ -140,7 +140,7 @@ enum acpi_return_package_types {
  *
  * The main entries in the table each contain the following items:
  *
- * Name                 - The ACPI reserved name
+ * name                 - The ACPI reserved name
  * param_count          - Number of arguments to the method
  * expected_btypes      - Allowed type(s) for the return value.
  *                        0 means that no return value is expected.
@@ -511,14 +511,14 @@ static const union acpi_predefined_info predefined_names[] =
        {{"_TMP", 0, ACPI_RTYPE_INTEGER}},
        {{"_TPC", 0, ACPI_RTYPE_INTEGER}},
        {{"_TPT", 1, 0}},
-       {{"_TRT", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each 2_ref/6_int */
+       {{"_TRT", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each 2 Ref/6 Int */
                          {{{ACPI_PTYPE2, ACPI_RTYPE_REFERENCE, 2, ACPI_RTYPE_INTEGER}, 6, 0}},
 
-       {{"_TSD", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each 5_int with count */
+       {{"_TSD", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each 5 Int with count */
                          {{{ACPI_PTYPE2_COUNT,ACPI_RTYPE_INTEGER, 5,0}, 0,0}},
 
        {{"_TSP", 0, ACPI_RTYPE_INTEGER}},
-       {{"_TSS", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each 5_int */
+       {{"_TSS", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each 5 Int */
                          {{{ACPI_PTYPE2, ACPI_RTYPE_INTEGER, 5,0}, 0,0}},
 
        {{"_TST", 0, ACPI_RTYPE_INTEGER}},
index 0404df605bc187940b0a82c51b5905bb83a4acc9..f196e2c9a71f65505773885cf3f3531f2fd25814 100644 (file)
@@ -68,7 +68,7 @@
 #define ACPI_WALK_METHOD            0x01
 #define ACPI_WALK_METHOD_RESTART    0x02
 
-/* Flags for i_aSL compiler only */
+/* Flags for iASL compiler only */
 
 #define ACPI_WALK_CONST_REQUIRED    0x10
 #define ACPI_WALK_CONST_OPTIONAL    0x20
index 925ccf22101b45adcb66817c736195adbcbf975f..5035327ebccc38b26b96789bd96c808422c13723 100644 (file)
@@ -460,6 +460,8 @@ acpi_ut_short_divide(u64 in_dividend,
 /*
  * utmisc
  */
+void ut_convert_backslashes(char *pathname);
+
 const char *acpi_ut_validate_exception(acpi_status status);
 
 u8 acpi_ut_is_pci_root_bridge(char *id);
index 905280fec0fa0a132049318fa36953b347a0c324..c26f8ff6c3b995922e71c15369c30e1d5de1be55 100644 (file)
 
 /*
  * Combination opcodes (actually two one-byte opcodes)
- * Used by the disassembler and i_aSL compiler
+ * Used by the disassembler and iASL compiler
  */
 #define AML_LGREATEREQUAL_OP        (u16) 0x9295
 #define AML_LLESSEQUAL_OP           (u16) 0x9294
 
 /* Multiple/complex types */
 
-#define ARGI_DATAOBJECT             0x12       /* Buffer, String, package or reference to a Node - Used only by size_of operator */
+#define ARGI_DATAOBJECT             0x12       /* Buffer, String, package or reference to a node - Used only by size_of operator */
 #define ARGI_COMPLEXOBJ             0x13       /* Buffer, String, or package (Used by INDEX op only) */
 #define ARGI_REF_OR_STRING          0x14       /* Reference or String (Used by DEREFOF op only) */
 #define ARGI_REGION_OR_BUFFER       0x15       /* Used by LOAD op only */
index 7b2128f274e71a10f2331ca8d42abd78af166677..af4947956ec21495c0ffaa0a50f1cfcf94f087af 100644 (file)
@@ -98,7 +98,7 @@
 #define ACPI_RESTAG_TRANSLATION                 "_TRA"
 #define ACPI_RESTAG_TRANSTYPE                   "_TRS" /* Sparse(1), Dense(0) */
 #define ACPI_RESTAG_TYPE                        "_TTP" /* Translation(1), Static (0) */
-#define ACPI_RESTAG_XFERTYPE                    "_SIZ" /* 8(0), 8_and16(1), 16(2) */
+#define ACPI_RESTAG_XFERTYPE                    "_SIZ" /* 8(0), 8And16(1), 16(2) */
 #define ACPI_RESTAG_VENDORDATA                  "_VEN"
 
 /* Default sizes for "small" resource descriptors */
@@ -235,7 +235,7 @@ AML_RESOURCE_LARGE_HEADER_COMMON AML_RESOURCE_ADDRESS_COMMON};
 
 struct aml_resource_extended_address64 {
        AML_RESOURCE_LARGE_HEADER_COMMON
-           AML_RESOURCE_ADDRESS_COMMON u8 revision_iD;
+           AML_RESOURCE_ADDRESS_COMMON u8 revision_ID;
        u8 reserved;
        u64 granularity;
        u64 minimum;
index 80eb1900297f549f8191b8d3deb4b90ad80863b8..c8b5e2565b987164a2f42138b521b7bfb90798fc 100644 (file)
@@ -62,7 +62,7 @@ acpi_ds_execute_arguments(struct acpi_namespace_node *node,
  *
  * FUNCTION:    acpi_ds_execute_arguments
  *
- * PARAMETERS:  Node                - Object NS node
+ * PARAMETERS:  node                - Object NS node
  *              scope_node          - Parent NS node
  *              aml_length          - Length of executable AML
  *              aml_start           - Pointer to the AML
index effe4ca1133fd7cd93ebb1ab397d8be0ffe68e72..465f02134b896d253a54b5c3de6211dc15cacc8e 100644 (file)
@@ -56,7 +56,7 @@ ACPI_MODULE_NAME("dscontrol")
  * FUNCTION:    acpi_ds_exec_begin_control_op
  *
  * PARAMETERS:  walk_list       - The list that owns the walk stack
- *              Op              - The control Op
+ *              op              - The control Op
  *
  * RETURN:      Status
  *
@@ -153,7 +153,7 @@ acpi_ds_exec_begin_control_op(struct acpi_walk_state *walk_state,
  * FUNCTION:    acpi_ds_exec_end_control_op
  *
  * PARAMETERS:  walk_list       - The list that owns the walk stack
- *              Op              - The control Op
+ *              op              - The control Op
  *
  * RETURN:      Status
  *
index cd243cf2cab2373f25c40f076b2d211b8f5641a2..3da6fd8530c5814aaeb18de5c3d28299017bd20a 100644 (file)
 ACPI_MODULE_NAME("dsfield")
 
 /* Local prototypes */
+#ifdef ACPI_ASL_COMPILER
+#include "acdisasm.h"
+static acpi_status
+acpi_ds_create_external_region(acpi_status lookup_status,
+                              union acpi_parse_object *op,
+                              char *path,
+                              struct acpi_walk_state *walk_state,
+                              struct acpi_namespace_node **node);
+#endif
+
 static acpi_status
 acpi_ds_get_field_names(struct acpi_create_field_info *info,
                        struct acpi_walk_state *walk_state,
                        union acpi_parse_object *arg);
 
+#ifdef ACPI_ASL_COMPILER
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_create_external_region (iASL Disassembler only)
+ *
+ * PARAMETERS:  lookup_status   - Status from ns_lookup operation
+ *              op              - Op containing the Field definition and args
+ *              path            - Pathname of the region
+ *  `           walk_state      - Current method state
+ *              node            - Where the new region node is returned
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Add region to the external list if NOT_FOUND. Create a new
+ *              region node/object.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ds_create_external_region(acpi_status lookup_status,
+                              union acpi_parse_object *op,
+                              char *path,
+                              struct acpi_walk_state *walk_state,
+                              struct acpi_namespace_node **node)
+{
+       acpi_status status;
+       union acpi_operand_object *obj_desc;
+
+       if (lookup_status != AE_NOT_FOUND) {
+               return (lookup_status);
+       }
+
+       /*
+        * Table disassembly:
+        * operation_region not found. Generate an External for it, and
+        * insert the name into the namespace.
+        */
+       acpi_dm_add_to_external_list(op, path, ACPI_TYPE_REGION, 0);
+       status = acpi_ns_lookup(walk_state->scope_info, path, ACPI_TYPE_REGION,
+                               ACPI_IMODE_LOAD_PASS1, ACPI_NS_SEARCH_PARENT,
+                               walk_state, node);
+       if (ACPI_FAILURE(status)) {
+               return (status);
+       }
+
+       /* Must create and install a region object for the new node */
+
+       obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_REGION);
+       if (!obj_desc) {
+               return (AE_NO_MEMORY);
+       }
+
+       obj_desc->region.node = *node;
+       status = acpi_ns_attach_object(*node, obj_desc, ACPI_TYPE_REGION);
+       return (status);
+}
+#endif
+
 /*******************************************************************************
  *
  * FUNCTION:    acpi_ds_create_buffer_field
  *
- * PARAMETERS:  Op                  - Current parse op (create_xXField)
+ * PARAMETERS:  op                  - Current parse op (create_XXField)
  *              walk_state          - Current state
  *
  * RETURN:      Status
@@ -99,7 +167,7 @@ acpi_ds_create_buffer_field(union acpi_parse_object *op,
 
                arg = acpi_ps_get_arg(op, 3);
        } else {
-               /* For all other create_xXXField operators, name is the 3rd argument */
+               /* For all other create_XXXField operators, name is the 3rd argument */
 
                arg = acpi_ps_get_arg(op, 2);
        }
@@ -203,9 +271,9 @@ acpi_ds_create_buffer_field(union acpi_parse_object *op,
  *
  * FUNCTION:    acpi_ds_get_field_names
  *
- * PARAMETERS:  Info            - create_field info structure
+ * PARAMETERS:  info            - create_field info structure
  *  `           walk_state      - Current method state
- *              Arg             - First parser arg for the field name list
+ *              arg             - First parser arg for the field name list
  *
  * RETURN:      Status
  *
@@ -234,10 +302,10 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
        while (arg) {
                /*
                 * Four types of field elements are handled:
-                * 1) Name - Enters a new named field into the namespace
-                * 2) Offset - specifies a bit offset
+                * 1) name - Enters a new named field into the namespace
+                * 2) offset - specifies a bit offset
                 * 3) access_as - changes the access mode/attributes
-                * 4) Connection - Associate a resource template with the field
+                * 4) connection - Associate a resource template with the field
                 */
                switch (arg->common.aml_opcode) {
                case AML_INT_RESERVEDFIELD_OP:
@@ -389,7 +457,7 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
  *
  * FUNCTION:    acpi_ds_create_field
  *
- * PARAMETERS:  Op              - Op containing the Field definition and args
+ * PARAMETERS:  op              - Op containing the Field definition and args
  *              region_node     - Object for the containing Operation Region
  *  `           walk_state      - Current method state
  *
@@ -413,12 +481,19 @@ acpi_ds_create_field(union acpi_parse_object *op,
        /* First arg is the name of the parent op_region (must already exist) */
 
        arg = op->common.value.arg;
+
        if (!region_node) {
                status =
                    acpi_ns_lookup(walk_state->scope_info,
                                   arg->common.value.name, ACPI_TYPE_REGION,
                                   ACPI_IMODE_EXECUTE, ACPI_NS_SEARCH_PARENT,
                                   walk_state, &region_node);
+#ifdef ACPI_ASL_COMPILER
+               status = acpi_ds_create_external_region(status, arg,
+                                                       arg->common.value.name,
+                                                       walk_state,
+                                                       &region_node);
+#endif
                if (ACPI_FAILURE(status)) {
                        ACPI_ERROR_NAMESPACE(arg->common.value.name, status);
                        return_ACPI_STATUS(status);
@@ -446,7 +521,7 @@ acpi_ds_create_field(union acpi_parse_object *op,
  *
  * FUNCTION:    acpi_ds_init_field_objects
  *
- * PARAMETERS:  Op              - Op containing the Field definition and args
+ * PARAMETERS:  op              - Op containing the Field definition and args
  *  `           walk_state      - Current method state
  *
  * RETURN:      Status
@@ -561,7 +636,7 @@ acpi_ds_init_field_objects(union acpi_parse_object *op,
  *
  * FUNCTION:    acpi_ds_create_bank_field
  *
- * PARAMETERS:  Op              - Op containing the Field definition and args
+ * PARAMETERS:  op              - Op containing the Field definition and args
  *              region_node     - Object for the containing Operation Region
  *              walk_state      - Current method state
  *
@@ -591,6 +666,12 @@ acpi_ds_create_bank_field(union acpi_parse_object *op,
                                   arg->common.value.name, ACPI_TYPE_REGION,
                                   ACPI_IMODE_EXECUTE, ACPI_NS_SEARCH_PARENT,
                                   walk_state, &region_node);
+#ifdef ACPI_ASL_COMPILER
+               status = acpi_ds_create_external_region(status, arg,
+                                                       arg->common.value.name,
+                                                       walk_state,
+                                                       &region_node);
+#endif
                if (ACPI_FAILURE(status)) {
                        ACPI_ERROR_NAMESPACE(arg->common.value.name, status);
                        return_ACPI_STATUS(status);
@@ -645,7 +726,7 @@ acpi_ds_create_bank_field(union acpi_parse_object *op,
  *
  * FUNCTION:    acpi_ds_create_index_field
  *
- * PARAMETERS:  Op              - Op containing the Field definition and args
+ * PARAMETERS:  op              - Op containing the Field definition and args
  *              region_node     - Object for the containing Operation Region
  *  `           walk_state      - Current method state
  *
index 9e5ac7f780a7e1f006bd7bd1dd256a9367859e32..87eff701ecfae63c066a88c0adb944133a9079af 100644 (file)
@@ -60,8 +60,8 @@ acpi_ds_init_one_object(acpi_handle obj_handle,
  * FUNCTION:    acpi_ds_init_one_object
  *
  * PARAMETERS:  obj_handle      - Node for the object
- *              Level           - Current nesting level
- *              Context         - Points to a init info struct
+ *              level           - Current nesting level
+ *              context         - Points to a init info struct
  *              return_value    - Not used
  *
  * RETURN:      Status
index 00f5dab5bcc0e7414cb4ea910daf37a6536bbf5c..aa9a5d4e4052b85f037efa12345306ed422bd0d1 100644 (file)
@@ -61,7 +61,7 @@ acpi_ds_create_method_mutex(union acpi_operand_object *method_desc);
  *
  * FUNCTION:    acpi_ds_method_error
  *
- * PARAMETERS:  Status          - Execution status
+ * PARAMETERS:  status          - Execution status
  *              walk_state      - Current state
  *
  * RETURN:      Status
@@ -306,9 +306,9 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
  *
  * FUNCTION:    acpi_ds_call_control_method
  *
- * PARAMETERS:  Thread              - Info for this thread
+ * PARAMETERS:  thread              - Info for this thread
  *              this_walk_state     - Current walk state
- *              Op                  - Current Op to be walked
+ *              op                  - Current Op to be walked
  *
  * RETURN:      Status
  *
index b40bd507be5dc33993e96667b5e2c50ad6395cff..8d55cebaa656d54f73f2ead58fa9e7e764ad4d76 100644 (file)
@@ -177,7 +177,7 @@ void acpi_ds_method_data_delete_all(struct acpi_walk_state *walk_state)
  *
  * FUNCTION:    acpi_ds_method_data_init_args
  *
- * PARAMETERS:  *Params         - Pointer to a parameter list for the method
+ * PARAMETERS:  *params         - Pointer to a parameter list for the method
  *              max_param_count - The arg count for this method
  *              walk_state      - Current walk state object
  *
@@ -232,11 +232,11 @@ acpi_ds_method_data_init_args(union acpi_operand_object **params,
  *
  * FUNCTION:    acpi_ds_method_data_get_node
  *
- * PARAMETERS:  Type                - Either ACPI_REFCLASS_LOCAL or
+ * PARAMETERS:  type                - Either ACPI_REFCLASS_LOCAL or
  *                                    ACPI_REFCLASS_ARG
- *              Index               - Which Local or Arg whose type to get
+ *              index               - Which Local or Arg whose type to get
  *              walk_state          - Current walk state object
- *              Node                - Where the node is returned.
+ *              node                - Where the node is returned.
  *
  * RETURN:      Status and node
  *
@@ -296,10 +296,10 @@ acpi_ds_method_data_get_node(u8 type,
  *
  * FUNCTION:    acpi_ds_method_data_set_value
  *
- * PARAMETERS:  Type                - Either ACPI_REFCLASS_LOCAL or
+ * PARAMETERS:  type                - Either ACPI_REFCLASS_LOCAL or
  *                                    ACPI_REFCLASS_ARG
- *              Index               - Which Local or Arg to get
- *              Object              - Object to be inserted into the stack entry
+ *              index               - Which Local or Arg to get
+ *              object              - Object to be inserted into the stack entry
  *              walk_state          - Current walk state object
  *
  * RETURN:      Status
@@ -336,7 +336,7 @@ acpi_ds_method_data_set_value(u8 type,
         * Increment ref count so object can't be deleted while installed.
         * NOTE: We do not copy the object in order to preserve the call by
         * reference semantics of ACPI Control Method invocation.
-        * (See ACPI Specification 2.0_c)
+        * (See ACPI Specification 2.0C)
         */
        acpi_ut_add_reference(object);
 
@@ -350,9 +350,9 @@ acpi_ds_method_data_set_value(u8 type,
  *
  * FUNCTION:    acpi_ds_method_data_get_value
  *
- * PARAMETERS:  Type                - Either ACPI_REFCLASS_LOCAL or
+ * PARAMETERS:  type                - Either ACPI_REFCLASS_LOCAL or
  *                                    ACPI_REFCLASS_ARG
- *              Index               - Which local_var or argument to get
+ *              index               - Which localVar or argument to get
  *              walk_state          - Current walk state object
  *              dest_desc           - Where Arg or Local value is returned
  *
@@ -458,9 +458,9 @@ acpi_ds_method_data_get_value(u8 type,
  *
  * FUNCTION:    acpi_ds_method_data_delete_value
  *
- * PARAMETERS:  Type                - Either ACPI_REFCLASS_LOCAL or
+ * PARAMETERS:  type                - Either ACPI_REFCLASS_LOCAL or
  *                                    ACPI_REFCLASS_ARG
- *              Index               - Which local_var or argument to delete
+ *              index               - Which localVar or argument to delete
  *              walk_state          - Current walk state object
  *
  * RETURN:      None
@@ -515,9 +515,9 @@ acpi_ds_method_data_delete_value(u8 type,
  *
  * FUNCTION:    acpi_ds_store_object_to_local
  *
- * PARAMETERS:  Type                - Either ACPI_REFCLASS_LOCAL or
+ * PARAMETERS:  type                - Either ACPI_REFCLASS_LOCAL or
  *                                    ACPI_REFCLASS_ARG
- *              Index               - Which Local or Arg to set
+ *              index               - Which Local or Arg to set
  *              obj_desc            - Value to be stored
  *              walk_state          - Current walk state
  *
@@ -670,8 +670,8 @@ acpi_ds_store_object_to_local(u8 type,
  *
  * FUNCTION:    acpi_ds_method_data_get_type
  *
- * PARAMETERS:  Opcode              - Either AML_LOCAL_OP or AML_ARG_OP
- *              Index               - Which Local or Arg whose type to get
+ * PARAMETERS:  opcode              - Either AML_LOCAL_OP or AML_ARG_OP
+ *              index               - Which Local or Arg whose type to get
  *              walk_state          - Current walk state object
  *
  * RETURN:      Data type of current value of the selected Arg or Local
index d7045ca3e32a59269e036bac0e05893e0a5d4795..68592dd349602f09bc6982f741b8fd3a9bef31b8 100644 (file)
@@ -64,7 +64,7 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
  * FUNCTION:    acpi_ds_build_internal_object
  *
  * PARAMETERS:  walk_state      - Current walk state
- *              Op              - Parser object to be translated
+ *              op              - Parser object to be translated
  *              obj_desc_ptr    - Where the ACPI internal object is returned
  *
  * RETURN:      Status
@@ -250,7 +250,7 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
  * FUNCTION:    acpi_ds_build_internal_buffer_obj
  *
  * PARAMETERS:  walk_state      - Current walk state
- *              Op              - Parser object to be translated
+ *              op              - Parser object to be translated
  *              buffer_length   - Length of the buffer
  *              obj_desc_ptr    - Where the ACPI internal object is returned
  *
@@ -354,7 +354,7 @@ acpi_ds_build_internal_buffer_obj(struct acpi_walk_state *walk_state,
  * FUNCTION:    acpi_ds_build_internal_package_obj
  *
  * PARAMETERS:  walk_state      - Current walk state
- *              Op              - Parser object to be translated
+ *              op              - Parser object to be translated
  *              element_count   - Number of elements in the package - this is
  *                                the num_elements argument to Package()
  *              obj_desc_ptr    - Where the ACPI internal object is returned
@@ -547,8 +547,8 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
  * FUNCTION:    acpi_ds_create_node
  *
  * PARAMETERS:  walk_state      - Current walk state
- *              Node            - NS Node to be initialized
- *              Op              - Parser object to be translated
+ *              node            - NS Node to be initialized
+ *              op              - Parser object to be translated
  *
  * RETURN:      Status
  *
@@ -611,8 +611,8 @@ acpi_ds_create_node(struct acpi_walk_state *walk_state,
  * FUNCTION:    acpi_ds_init_object_from_op
  *
  * PARAMETERS:  walk_state      - Current walk state
- *              Op              - Parser op used to init the internal object
- *              Opcode          - AML opcode associated with the object
+ *              op              - Parser op used to init the internal object
+ *              opcode          - AML opcode associated with the object
  *              ret_obj_desc    - Namespace object to be initialized
  *
  * RETURN:      Status
index e5eff758510266c0c68a1b772c8004598d1fc2a1..aa34d8984d34476084702f0bc9cb9077952a0edc 100644 (file)
@@ -286,7 +286,7 @@ acpi_ds_init_buffer_field(u16 aml_opcode,
  * FUNCTION:    acpi_ds_eval_buffer_field_operands
  *
  * PARAMETERS:  walk_state      - Current walk
- *              Op              - A valid buffer_field Op object
+ *              op              - A valid buffer_field Op object
  *
  * RETURN:      Status
  *
@@ -370,7 +370,7 @@ acpi_ds_eval_buffer_field_operands(struct acpi_walk_state *walk_state,
  * FUNCTION:    acpi_ds_eval_region_operands
  *
  * PARAMETERS:  walk_state      - Current walk
- *              Op              - A valid region Op object
+ *              op              - A valid region Op object
  *
  * RETURN:      Status
  *
@@ -397,7 +397,7 @@ acpi_ds_eval_region_operands(struct acpi_walk_state *walk_state,
         */
        node = op->common.node;
 
-       /* next_op points to the op that holds the space_iD */
+       /* next_op points to the op that holds the space_ID */
 
        next_op = op->common.value.arg;
 
@@ -461,7 +461,7 @@ acpi_ds_eval_region_operands(struct acpi_walk_state *walk_state,
  * FUNCTION:    acpi_ds_eval_table_region_operands
  *
  * PARAMETERS:  walk_state      - Current walk
- *              Op              - A valid region Op object
+ *              op              - A valid region Op object
  *
  * RETURN:      Status
  *
@@ -560,7 +560,7 @@ acpi_ds_eval_table_region_operands(struct acpi_walk_state *walk_state,
  * FUNCTION:    acpi_ds_eval_data_object_operands
  *
  * PARAMETERS:  walk_state      - Current walk
- *              Op              - A valid data_object Op object
+ *              op              - A valid data_object Op object
  *              obj_desc        - data_object
  *
  * RETURN:      Status
@@ -662,7 +662,7 @@ acpi_ds_eval_data_object_operands(struct acpi_walk_state *walk_state,
  * FUNCTION:    acpi_ds_eval_bank_field_operands
  *
  * PARAMETERS:  walk_state      - Current walk
- *              Op              - A valid bank_field Op object
+ *              op              - A valid bank_field Op object
  *
  * RETURN:      Status
  *
index 1abcda31037f20e6988fc073cdc00764f86d7357..73a5447475f5569ef140fc873957035df8bd57b1 100644 (file)
@@ -157,7 +157,7 @@ acpi_ds_do_implicit_return(union acpi_operand_object *return_desc,
  *
  * FUNCTION:    acpi_ds_is_result_used
  *
- * PARAMETERS:  Op                  - Current Op
+ * PARAMETERS:  op                  - Current Op
  *              walk_state          - Current State
  *
  * RETURN:      TRUE if result is used, FALSE otherwise
@@ -323,7 +323,7 @@ acpi_ds_is_result_used(union acpi_parse_object * op,
  *
  * FUNCTION:    acpi_ds_delete_result_if_not_used
  *
- * PARAMETERS:  Op              - Current parse Op
+ * PARAMETERS:  op              - Current parse Op
  *              result_obj      - Result of the operation
  *              walk_state      - Current state
  *
@@ -445,7 +445,7 @@ void acpi_ds_clear_operands(struct acpi_walk_state *walk_state)
  * FUNCTION:    acpi_ds_create_operand
  *
  * PARAMETERS:  walk_state      - Current walk state
- *              Arg             - Parse object for the argument
+ *              arg             - Parse object for the argument
  *              arg_index       - Which argument (zero based)
  *
  * RETURN:      Status
index 9e9490a9cbf0e22ab16bfa9c32972afbc1971962..f6c4295470ae9117226d0031c763c619248fe4eb 100644 (file)
@@ -85,8 +85,8 @@ void acpi_ds_scope_stack_clear(struct acpi_walk_state *walk_state)
  *
  * FUNCTION:    acpi_ds_scope_stack_push
  *
- * PARAMETERS:  Node            - Name to be made current
- *              Type            - Type of frame being pushed
+ * PARAMETERS:  node            - Name to be made current
+ *              type            - Type of frame being pushed
  *              walk_state      - Current state
  *
  * RETURN:      Status
index c9c2ac13e7cc926d977ad8cb3052af532416badc..d0e6555061e4c5544e487074e64f2768cd8b8296 100644 (file)
@@ -58,7 +58,7 @@ static acpi_status acpi_ds_result_stack_pop(struct acpi_walk_state *ws);
  *
  * FUNCTION:    acpi_ds_result_pop
  *
- * PARAMETERS:  Object              - Where to return the popped object
+ * PARAMETERS:  object              - Where to return the popped object
  *              walk_state          - Current Walk state
  *
  * RETURN:      Status
@@ -132,7 +132,7 @@ acpi_ds_result_pop(union acpi_operand_object **object,
  *
  * FUNCTION:    acpi_ds_result_push
  *
- * PARAMETERS:  Object              - Where to return the popped object
+ * PARAMETERS:  object              - Where to return the popped object
  *              walk_state          - Current Walk state
  *
  * RETURN:      Status
@@ -296,7 +296,7 @@ static acpi_status acpi_ds_result_stack_pop(struct acpi_walk_state *walk_state)
  *
  * FUNCTION:    acpi_ds_obj_stack_push
  *
- * PARAMETERS:  Object              - Object to push
+ * PARAMETERS:  object              - Object to push
  *              walk_state          - Current Walk state
  *
  * RETURN:      Status
@@ -433,7 +433,7 @@ acpi_ds_obj_stack_pop_and_delete(u32 pop_count,
  *
  * FUNCTION:    acpi_ds_get_current_walk_state
  *
- * PARAMETERS:  Thread          - Get current active state for this Thread
+ * PARAMETERS:  thread          - Get current active state for this Thread
  *
  * RETURN:      Pointer to the current walk state
  *
@@ -462,7 +462,7 @@ struct acpi_walk_state *acpi_ds_get_current_walk_state(struct acpi_thread_state
  * FUNCTION:    acpi_ds_push_walk_state
  *
  * PARAMETERS:  walk_state      - State to push
- *              Thread          - Thread state object
+ *              thread          - Thread state object
  *
  * RETURN:      None
  *
@@ -486,7 +486,7 @@ acpi_ds_push_walk_state(struct acpi_walk_state *walk_state,
  *
  * FUNCTION:    acpi_ds_pop_walk_state
  *
- * PARAMETERS:  Thread      - Current thread state
+ * PARAMETERS:  thread      - Current thread state
  *
  * RETURN:      A walk_state object popped from the thread's stack
  *
@@ -525,9 +525,9 @@ struct acpi_walk_state *acpi_ds_pop_walk_state(struct acpi_thread_state *thread)
  * FUNCTION:    acpi_ds_create_walk_state
  *
  * PARAMETERS:  owner_id        - ID for object creation
- *              Origin          - Starting point for this walk
+ *              origin          - Starting point for this walk
  *              method_desc     - Method object
- *              Thread          - Current thread state
+ *              thread          - Current thread state
  *
  * RETURN:      Pointer to the new walk state.
  *
@@ -578,11 +578,11 @@ struct acpi_walk_state *acpi_ds_create_walk_state(acpi_owner_id owner_id, union
  * FUNCTION:    acpi_ds_init_aml_walk
  *
  * PARAMETERS:  walk_state      - New state to be initialized
- *              Op              - Current parse op
+ *              op              - Current parse op
  *              method_node     - Control method NS node, if any
  *              aml_start       - Start of AML
  *              aml_length      - Length of AML
- *              Info            - Method info block (params, etc.)
+ *              info            - Method info block (params, etc.)
  *              pass_number     - 1, 2, or 3
  *
  * RETURN:      Status
index 07e4dc44f81cf4ee6339e504b27467c51b1ebe8f..d4acfbbe5b2947624b89d5cf651a759256c89ded 100644 (file)
@@ -251,7 +251,7 @@ u32 acpi_ev_fixed_event_detect(void)
  *
  * FUNCTION:    acpi_ev_fixed_event_dispatch
  *
- * PARAMETERS:  Event               - Event type
+ * PARAMETERS:  event               - Event type
  *
  * RETURN:      INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED
  *
index cfeab38795d88876e0881d087f3b2371254db444..af14a71376329bdd7aa7558da4bc69121726bb79 100644 (file)
@@ -135,7 +135,7 @@ acpi_status acpi_ev_remove_global_lock_handler(void)
  *
  * FUNCTION:    acpi_ev_global_lock_handler
  *
- * PARAMETERS:  Context         - From thread interface, not used
+ * PARAMETERS:  context         - From thread interface, not used
  *
  * RETURN:      ACPI_INTERRUPT_HANDLED
  *
@@ -182,7 +182,7 @@ static u32 acpi_ev_global_lock_handler(void *context)
  *
  * FUNCTION:    acpi_ev_acquire_global_lock
  *
- * PARAMETERS:  Timeout         - Max time to wait for the lock, in millisec.
+ * PARAMETERS:  timeout         - Max time to wait for the lock, in millisec.
  *
  * RETURN:      Status
  *
index 8ba0e5f170916a55d81244ef679eaa7715365972..afbd5cb391f671eeedef586c3c6aa515fd8eba62 100644 (file)
@@ -466,7 +466,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
        acpi_status status;
        struct acpi_gpe_event_info *local_gpe_event_info;
        struct acpi_evaluate_info *info;
-       struct acpi_gpe_notify_object *notify_object;
+       struct acpi_gpe_notify_info *notify;
 
        ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method);
 
@@ -517,17 +517,17 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
                 * completes. The notify handlers are NOT invoked synchronously
                 * from this thread -- because handlers may in turn run other
                 * control methods.
+                *
+                * June 2012: Expand implicit notify mechanism to support
+                * notifies on multiple device objects.
                 */
-               status = acpi_ev_queue_notify_request(
-                               local_gpe_event_info->dispatch.device.node,
-                               ACPI_NOTIFY_DEVICE_WAKE);
-
-               notify_object = local_gpe_event_info->dispatch.device.next;
-               while (ACPI_SUCCESS(status) && notify_object) {
-                       status = acpi_ev_queue_notify_request(
-                                       notify_object->node,
-                                       ACPI_NOTIFY_DEVICE_WAKE);
-                       notify_object = notify_object->next;
+               notify = local_gpe_event_info->dispatch.notify_list;
+               while (ACPI_SUCCESS(status) && notify) {
+                       status =
+                           acpi_ev_queue_notify_request(notify->device_node,
+                                                        ACPI_NOTIFY_DEVICE_WAKE);
+
+                       notify = notify->next;
                }
 
                break;
index 23a3ca86b2eb20cbaee3d1bd1514c45135b6572a..8cf4c104c7b746670a29f6f9aa61126336bfd877 100644 (file)
@@ -318,7 +318,7 @@ acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block)
  * FUNCTION:    acpi_ev_create_gpe_block
  *
  * PARAMETERS:  gpe_device          - Handle to the parent GPE block
- *              gpe_block_address   - Address and space_iD
+ *              gpe_block_address   - Address and space_ID
  *              register_count      - Number of GPE register pairs in the block
  *              gpe_block_base_number - Starting GPE number for the block
  *              interrupt_number    - H/W interrupt for the block
index 3c43796b8361dd67447162f2c39ac2ca45e1d856..cb50dd91bc186a80f8fb36649c69086794d0c9cd 100644 (file)
@@ -54,7 +54,7 @@ ACPI_MODULE_NAME("evgpeutil")
  * FUNCTION:    acpi_ev_walk_gpe_list
  *
  * PARAMETERS:  gpe_walk_callback   - Routine called for each GPE block
- *              Context             - Value passed to callback
+ *              context             - Value passed to callback
  *
  * RETURN:      Status
  *
@@ -347,6 +347,8 @@ acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
                            void *context)
 {
        struct acpi_gpe_event_info *gpe_event_info;
+       struct acpi_gpe_notify_info *notify;
+       struct acpi_gpe_notify_info *next;
        u32 i;
        u32 j;
 
@@ -365,10 +367,28 @@ acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
 
                        if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
                            ACPI_GPE_DISPATCH_HANDLER) {
+
+                               /* Delete an installed handler block */
+
                                ACPI_FREE(gpe_event_info->dispatch.handler);
                                gpe_event_info->dispatch.handler = NULL;
                                gpe_event_info->flags &=
                                    ~ACPI_GPE_DISPATCH_MASK;
+                       } else if ((gpe_event_info->
+                                flags & ACPI_GPE_DISPATCH_MASK) ==
+                               ACPI_GPE_DISPATCH_NOTIFY) {
+
+                               /* Delete the implicit notification device list */
+
+                               notify = gpe_event_info->dispatch.notify_list;
+                               while (notify) {
+                                       next = notify->next;
+                                       ACPI_FREE(notify);
+                                       notify = next;
+                               }
+                               gpe_event_info->dispatch.notify_list = NULL;
+                               gpe_event_info->flags &=
+                                   ~ACPI_GPE_DISPATCH_MASK;
                        }
                }
        }
index 51ef9f5e002da1048c4e99d2b8939d0c35976633..51f537937c1f3e364ddaf9753464e8a82e1716f2 100644 (file)
@@ -56,7 +56,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context);
  *
  * FUNCTION:    acpi_ev_is_notify_object
  *
- * PARAMETERS:  Node            - Node to check
+ * PARAMETERS:  node            - Node to check
  *
  * RETURN:      TRUE if notifies allowed on this object
  *
@@ -86,7 +86,7 @@ u8 acpi_ev_is_notify_object(struct acpi_namespace_node *node)
  *
  * FUNCTION:    acpi_ev_queue_notify_request
  *
- * PARAMETERS:  Node            - NS node for the notified object
+ * PARAMETERS:  node            - NS node for the notified object
  *              notify_value    - Value from the Notify() request
  *
  * RETURN:      Status
@@ -101,102 +101,77 @@ acpi_ev_queue_notify_request(struct acpi_namespace_node * node,
                             u32 notify_value)
 {
        union acpi_operand_object *obj_desc;
-       union acpi_operand_object *handler_obj = NULL;
-       union acpi_generic_state *notify_info;
+       union acpi_operand_object *handler_list_head = NULL;
+       union acpi_generic_state *info;
+       u8 handler_list_id = 0;
        acpi_status status = AE_OK;
 
        ACPI_FUNCTION_NAME(ev_queue_notify_request);
 
-       /*
-        * For value 0x03 (Ejection Request), may need to run a device method.
-        * For value 0x02 (Device Wake), if _PRW exists, may need to run
-        *   the _PS0 method.
-        * For value 0x80 (Status Change) on the power button or sleep button,
-        *   initiate soft-off or sleep operation.
-        *
-        * For all cases, simply dispatch the notify to the handler.
-        */
-       ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-                         "Dispatching Notify on [%4.4s] (%s) Value 0x%2.2X (%s) Node %p\n",
-                         acpi_ut_get_node_name(node),
-                         acpi_ut_get_type_name(node->type), notify_value,
-                         acpi_ut_get_notify_name(notify_value), node));
+       /* Are Notifies allowed on this object? */
 
-       /* Get the notify object attached to the NS Node */
-
-       obj_desc = acpi_ns_get_attached_object(node);
-       if (obj_desc) {
-
-               /* We have the notify object, Get the correct handler */
-
-               switch (node->type) {
+       if (!acpi_ev_is_notify_object(node)) {
+               return (AE_TYPE);
+       }
 
-                       /* Notify is allowed only on these types */
+       /* Get the correct notify list type (System or Device) */
 
-               case ACPI_TYPE_DEVICE:
-               case ACPI_TYPE_THERMAL:
-               case ACPI_TYPE_PROCESSOR:
+       if (notify_value <= ACPI_MAX_SYS_NOTIFY) {
+               handler_list_id = ACPI_SYSTEM_HANDLER_LIST;
+       } else {
+               handler_list_id = ACPI_DEVICE_HANDLER_LIST;
+       }
 
-                       if (notify_value <= ACPI_MAX_SYS_NOTIFY) {
-                               handler_obj =
-                                   obj_desc->common_notify.system_notify;
-                       } else {
-                               handler_obj =
-                                   obj_desc->common_notify.device_notify;
-                       }
-                       break;
+       /* Get the notify object attached to the namespace Node */
 
-               default:
+       obj_desc = acpi_ns_get_attached_object(node);
+       if (obj_desc) {
 
-                       /* All other types are not supported */
+               /* We have an attached object, Get the correct handler list */
 
-                       return (AE_TYPE);
-               }
+               handler_list_head =
+                   obj_desc->common_notify.notify_list[handler_list_id];
        }
 
        /*
-        * If there is a handler to run, schedule the dispatcher.
-        * Check for:
-        * 1) Global system notify handler
-        * 2) Global device notify handler
-        * 3) Per-device notify handler
+        * If there is no notify handler (Global or Local)
+        * for this object, just ignore the notify
         */
-       if ((acpi_gbl_system_notify.handler &&
-            (notify_value <= ACPI_MAX_SYS_NOTIFY)) ||
-           (acpi_gbl_device_notify.handler &&
-            (notify_value > ACPI_MAX_SYS_NOTIFY)) || handler_obj) {
-               notify_info = acpi_ut_create_generic_state();
-               if (!notify_info) {
-                       return (AE_NO_MEMORY);
-               }
+       if (!acpi_gbl_global_notify[handler_list_id].handler
+           && !handler_list_head) {
+               ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+                                 "No notify handler for Notify, ignoring (%4.4s, %X) node %p\n",
+                                 acpi_ut_get_node_name(node), notify_value,
+                                 node));
 
-               if (!handler_obj) {
-                       ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-                                         "Executing system notify handler for Notify (%4.4s, %X) "
-                                         "node %p\n",
-                                         acpi_ut_get_node_name(node),
-                                         notify_value, node));
-               }
+               return (AE_OK);
+       }
 
-               notify_info->common.descriptor_type =
-                   ACPI_DESC_TYPE_STATE_NOTIFY;
-               notify_info->notify.node = node;
-               notify_info->notify.value = (u16) notify_value;
-               notify_info->notify.handler_obj = handler_obj;
+       /* Setup notify info and schedule the notify dispatcher */
 
-               status =
-                   acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_ev_notify_dispatch,
-                                   notify_info);
-               if (ACPI_FAILURE(status)) {
-                       acpi_ut_delete_generic_state(notify_info);
-               }
-       } else {
-               /* There is no notify handler (per-device or system) for this device */
+       info = acpi_ut_create_generic_state();
+       if (!info) {
+               return (AE_NO_MEMORY);
+       }
 
-               ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-                                 "No notify handler for Notify (%4.4s, %X) node %p\n",
-                                 acpi_ut_get_node_name(node), notify_value,
-                                 node));
+       info->common.descriptor_type = ACPI_DESC_TYPE_STATE_NOTIFY;
+
+       info->notify.node = node;
+       info->notify.value = (u16)notify_value;
+       info->notify.handler_list_id = handler_list_id;
+       info->notify.handler_list_head = handler_list_head;
+       info->notify.global = &acpi_gbl_global_notify[handler_list_id];
+
+       ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+                         "Dispatching Notify on [%4.4s] (%s) Value 0x%2.2X (%s) Node %p\n",
+                         acpi_ut_get_node_name(node),
+                         acpi_ut_get_type_name(node->type), notify_value,
+                         acpi_ut_get_notify_name(notify_value), node));
+
+       status = acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_ev_notify_dispatch,
+                                info);
+       if (ACPI_FAILURE(status)) {
+               acpi_ut_delete_generic_state(info);
        }
 
        return (status);
@@ -206,7 +181,7 @@ acpi_ev_queue_notify_request(struct acpi_namespace_node * node,
  *
  * FUNCTION:    acpi_ev_notify_dispatch
  *
- * PARAMETERS:  Context         - To be passed to the notify handler
+ * PARAMETERS:  context         - To be passed to the notify handler
  *
  * RETURN:      None.
  *
@@ -217,60 +192,34 @@ acpi_ev_queue_notify_request(struct acpi_namespace_node * node,
 
 static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context)
 {
-       union acpi_generic_state *notify_info =
-           (union acpi_generic_state *)context;
-       acpi_notify_handler global_handler = NULL;
-       void *global_context = NULL;
+       union acpi_generic_state *info = (union acpi_generic_state *)context;
        union acpi_operand_object *handler_obj;
 
        ACPI_FUNCTION_ENTRY();
 
-       /*
-        * We will invoke a global notify handler if installed. This is done
-        * _before_ we invoke the per-device handler attached to the device.
-        */
-       if (notify_info->notify.value <= ACPI_MAX_SYS_NOTIFY) {
-
-               /* Global system notification handler */
-
-               if (acpi_gbl_system_notify.handler) {
-                       global_handler = acpi_gbl_system_notify.handler;
-                       global_context = acpi_gbl_system_notify.context;
-               }
-       } else {
-               /* Global driver notification handler */
-
-               if (acpi_gbl_device_notify.handler) {
-                       global_handler = acpi_gbl_device_notify.handler;
-                       global_context = acpi_gbl_device_notify.context;
-               }
-       }
-
-       /* Invoke the system handler first, if present */
+       /* Invoke a global notify handler if installed */
 
-       if (global_handler) {
-               global_handler(notify_info->notify.node,
-                              notify_info->notify.value, global_context);
+       if (info->notify.global->handler) {
+               info->notify.global->handler(info->notify.node,
+                                            info->notify.value,
+                                            info->notify.global->context);
        }
 
-       /* Now invoke the per-device handler, if present */
+       /* Now invoke the local notify handler(s) if any are installed */
 
-       handler_obj = notify_info->notify.handler_obj;
-       if (handler_obj) {
-               struct acpi_object_notify_handler *notifier;
+       handler_obj = info->notify.handler_list_head;
+       while (handler_obj) {
+               handler_obj->notify.handler(info->notify.node,
+                                           info->notify.value,
+                                           handler_obj->notify.context);
 
-               notifier = &handler_obj->notify;
-               while (notifier) {
-                       notifier->handler(notify_info->notify.node,
-                                         notify_info->notify.value,
-                                         notifier->context);
-                       notifier = notifier->next;
-               }
+               handler_obj =
+                   handler_obj->notify.next[info->notify.handler_list_id];
        }
 
        /* All done with the info object */
 
-       acpi_ut_delete_generic_state(notify_info);
+       acpi_ut_delete_generic_state(info);
 }
 
 #if (!ACPI_REDUCED_HARDWARE)
index 1b0180a1b798bb0184241ac12d75937b55d74902..0cc6a16fedc72c9fb7767369afe9e31dddcc8699 100644 (file)
@@ -150,7 +150,7 @@ acpi_status acpi_ev_install_region_handlers(void)
  *
  * FUNCTION:    acpi_ev_has_default_handler
  *
- * PARAMETERS:  Node                - Namespace node for the device
+ * PARAMETERS:  node                - Namespace node for the device
  *              space_id            - The address space ID
  *
  * RETURN:      TRUE if default handler is installed, FALSE otherwise
@@ -244,7 +244,7 @@ acpi_status acpi_ev_initialize_op_regions(void)
  * FUNCTION:    acpi_ev_execute_reg_method
  *
  * PARAMETERS:  region_obj          - Region object
- *              Function            - Passed to _REG: On (1) or Off (0)
+ *              function            - Passed to _REG: On (1) or Off (0)
  *
  * RETURN:      Status
  *
@@ -286,10 +286,10 @@ acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function)
        /*
         * The _REG method has two arguments:
         *
-        * Arg0 - Integer:
+        * arg0 - Integer:
         *  Operation region space ID Same value as region_obj->Region.space_id
         *
-        * Arg1 - Integer:
+        * arg1 - Integer:
         *  connection status 1 for connecting the handler, 0 for disconnecting
         *  the handler (Passed as a parameter)
         */
@@ -330,10 +330,10 @@ acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function)
  *
  * PARAMETERS:  region_obj          - Internal region object
  *              field_obj           - Corresponding field. Can be NULL.
- *              Function            - Read or Write operation
+ *              function            - Read or Write operation
  *              region_offset       - Where in the region to read or write
  *              bit_width           - Field width in bits (8, 16, 32, or 64)
- *              Value               - Pointer to in or out value, must be
+ *              value               - Pointer to in or out value, must be
  *                                    a full 64-bit integer
  *
  * RETURN:      Status
@@ -840,11 +840,11 @@ acpi_ev_install_handler(acpi_handle obj_handle,
  *
  * FUNCTION:    acpi_ev_install_space_handler
  *
- * PARAMETERS:  Node            - Namespace node for the device
+ * PARAMETERS:  node            - Namespace node for the device
  *              space_id        - The address space ID
- *              Handler         - Address of the handler
- *              Setup           - Address of the setup function
- *              Context         - Value passed to the handler on each access
+ *              handler         - Address of the handler
+ *              setup           - Address of the setup function
+ *              context         - Value passed to the handler on each access
  *
  * RETURN:      Status
  *
@@ -1061,7 +1061,7 @@ acpi_ev_install_space_handler(struct acpi_namespace_node * node,
  *
  * FUNCTION:    acpi_ev_execute_reg_methods
  *
- * PARAMETERS:  Node            - Namespace node for the device
+ * PARAMETERS:  node            - Namespace node for the device
  *              space_id        - The address space ID
  *
  * RETURN:      Status
@@ -1104,7 +1104,7 @@ acpi_ev_execute_reg_methods(struct acpi_namespace_node *node,
  *
  * PARAMETERS:  walk_namespace callback
  *
- * DESCRIPTION: Run _REG method for region objects of the requested space_iD
+ * DESCRIPTION: Run _REG method for region objects of the requested spaceID
  *
  ******************************************************************************/
 
index 819c17f5897ab664b67f9656803986ae6912ad58..4c1c8261166ff100f4f58e08dddba56ad3cbff97 100644 (file)
@@ -56,8 +56,8 @@ static u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node);
  *
  * FUNCTION:    acpi_ev_system_memory_region_setup
  *
- * PARAMETERS:  Handle              - Region we are interested in
- *              Function            - Start or stop
+ * PARAMETERS:  handle              - Region we are interested in
+ *              function            - Start or stop
  *              handler_context     - Address space handler context
  *              region_context      - Region specific context
  *
@@ -118,8 +118,8 @@ acpi_ev_system_memory_region_setup(acpi_handle handle,
  *
  * FUNCTION:    acpi_ev_io_space_region_setup
  *
- * PARAMETERS:  Handle              - Region we are interested in
- *              Function            - Start or stop
+ * PARAMETERS:  handle              - Region we are interested in
+ *              function            - Start or stop
  *              handler_context     - Address space handler context
  *              region_context      - Region specific context
  *
@@ -149,8 +149,8 @@ acpi_ev_io_space_region_setup(acpi_handle handle,
  *
  * FUNCTION:    acpi_ev_pci_config_region_setup
  *
- * PARAMETERS:  Handle              - Region we are interested in
- *              Function            - Start or stop
+ * PARAMETERS:  handle              - Region we are interested in
+ *              function            - Start or stop
  *              handler_context     - Address space handler context
  *              region_context      - Region specific context
  *
@@ -338,7 +338,7 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
  *
  * FUNCTION:    acpi_ev_is_pci_root_bridge
  *
- * PARAMETERS:  Node            - Device node being examined
+ * PARAMETERS:  node            - Device node being examined
  *
  * RETURN:      TRUE if device is a PCI/PCI-Express Root Bridge
  *
@@ -393,14 +393,14 @@ static u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node)
  *
  * FUNCTION:    acpi_ev_pci_bar_region_setup
  *
- * PARAMETERS:  Handle              - Region we are interested in
- *              Function            - Start or stop
+ * PARAMETERS:  handle              - Region we are interested in
+ *              function            - Start or stop
  *              handler_context     - Address space handler context
  *              region_context      - Region specific context
  *
  * RETURN:      Status
  *
- * DESCRIPTION: Setup a pci_bAR operation region
+ * DESCRIPTION: Setup a pci_BAR operation region
  *
  * MUTEX:       Assumes namespace is not locked
  *
@@ -420,8 +420,8 @@ acpi_ev_pci_bar_region_setup(acpi_handle handle,
  *
  * FUNCTION:    acpi_ev_cmos_region_setup
  *
- * PARAMETERS:  Handle              - Region we are interested in
- *              Function            - Start or stop
+ * PARAMETERS:  handle              - Region we are interested in
+ *              function            - Start or stop
  *              handler_context     - Address space handler context
  *              region_context      - Region specific context
  *
@@ -447,8 +447,8 @@ acpi_ev_cmos_region_setup(acpi_handle handle,
  *
  * FUNCTION:    acpi_ev_default_region_setup
  *
- * PARAMETERS:  Handle              - Region we are interested in
- *              Function            - Start or stop
+ * PARAMETERS:  handle              - Region we are interested in
+ *              function            - Start or stop
  *              handler_context     - Address space handler context
  *              region_context      - Region specific context
  *
index 6a57aa2d70d1af6982cbffb8197de6d73cae9847..f9661e2b46a981d8a53dca19425cfbd2b046ffbb 100644 (file)
@@ -56,7 +56,7 @@ static u32 ACPI_SYSTEM_XFACE acpi_ev_sci_xrupt_handler(void *context);
  *
  * FUNCTION:    acpi_ev_sci_xrupt_handler
  *
- * PARAMETERS:  Context   - Calling Context
+ * PARAMETERS:  context   - Calling Context
  *
  * RETURN:      Status code indicates whether interrupt was handled.
  *
@@ -96,7 +96,7 @@ static u32 ACPI_SYSTEM_XFACE acpi_ev_sci_xrupt_handler(void *context)
  *
  * FUNCTION:    acpi_ev_gpe_xrupt_handler
  *
- * PARAMETERS:  Context   - Calling Context
+ * PARAMETERS:  context   - Calling Context
  *
  * RETURN:      Status code indicates whether interrupt was handled.
  *
index 44bef5744ebb159f383f6b19515cf6346f656582..7587eb6c9584f4565e622512095b33fa77468f86 100644 (file)
 ACPI_MODULE_NAME("evxface")
 
 
-/*******************************************************************************
- *
- * FUNCTION:    acpi_populate_handler_object
- *
- * PARAMETERS:  handler_obj        - Handler object to populate
- *              handler_type       - The type of handler:
- *                                  ACPI_SYSTEM_NOTIFY: system_handler (00-7f)
- *                                  ACPI_DEVICE_NOTIFY: driver_handler (80-ff)
- *                                  ACPI_ALL_NOTIFY:  both system and device
- *              handler            - Address of the handler
- *              context            - Value passed to the handler on each GPE
- *              next               - Address of a handler object to link to
- *
- * RETURN:      None
- *
- * DESCRIPTION: Populate a handler object.
- *
- ******************************************************************************/
-static void
-acpi_populate_handler_object(struct acpi_object_notify_handler *handler_obj,
-                            u32 handler_type,
-                            acpi_notify_handler handler, void *context,
-                            struct acpi_object_notify_handler *next)
-{
-       handler_obj->handler_type = handler_type;
-       handler_obj->handler = handler;
-       handler_obj->context = context;
-       handler_obj->next = next;
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_add_handler_object
- *
- * PARAMETERS:  parent_obj         - Parent of the new object
- *              handler            - Address of the handler
- *              context            - Value passed to the handler on each GPE
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Create a new handler object and populate it.
- *
- ******************************************************************************/
-static acpi_status
-acpi_add_handler_object(struct acpi_object_notify_handler *parent_obj,
-                       acpi_notify_handler handler, void *context)
-{
-       struct acpi_object_notify_handler *handler_obj;
-
-       /* The parent must not be a defice notify handler object. */
-       if (parent_obj->handler_type & ACPI_DEVICE_NOTIFY)
-               return AE_BAD_PARAMETER;
-
-       handler_obj = ACPI_ALLOCATE_ZEROED(sizeof(*handler_obj));
-       if (!handler_obj)
-               return AE_NO_MEMORY;
-
-       acpi_populate_handler_object(handler_obj,
-                                       ACPI_SYSTEM_NOTIFY,
-                                       handler, context,
-                                       parent_obj->next);
-       parent_obj->next = handler_obj;
-
-       return AE_OK;
-}
-
-
 /*******************************************************************************
  *
  * FUNCTION:    acpi_install_notify_handler
  *
  * PARAMETERS:  Device          - The device for which notifies will be handled
  *              handler_type    - The type of handler:
- *                                  ACPI_SYSTEM_NOTIFY: system_handler (00-7f)
- *                                  ACPI_DEVICE_NOTIFY: driver_handler (80-ff)
- *                                  ACPI_ALL_NOTIFY:  both system and device
+ *                                  ACPI_SYSTEM_NOTIFY: System Handler (00-7F)
+ *                                  ACPI_DEVICE_NOTIFY: Device Handler (80-FF)
+ *                                  ACPI_ALL_NOTIFY:    Both System and Device
  *              Handler         - Address of the handler
  *              Context         - Value passed to the handler on each GPE
  *
  * RETURN:      Status
  *
- * DESCRIPTION: Install a handler for notifies on an ACPI device
+ * DESCRIPTION: Install a handler for notifications on an ACPI Device,
+ *              thermal_zone, or Processor object.
+ *
+ * NOTES:       The Root namespace object may have only one handler for each
+ *              type of notify (System/Device). Device/Thermal/Processor objects
+ *              may have one device notify handler, and multiple system notify
+ *              handlers.
  *
  ******************************************************************************/
 acpi_status
@@ -141,17 +80,19 @@ acpi_install_notify_handler(acpi_handle device,
                            u32 handler_type,
                            acpi_notify_handler handler, void *context)
 {
+       struct acpi_namespace_node *node =
+           ACPI_CAST_PTR(struct acpi_namespace_node, device);
        union acpi_operand_object *obj_desc;
-       union acpi_operand_object *notify_obj;
-       struct acpi_namespace_node *node;
+       union acpi_operand_object *handler_obj;
        acpi_status status;
+       u32 i;
 
        ACPI_FUNCTION_TRACE(acpi_install_notify_handler);
 
        /* Parameter validation */
 
-       if ((!device) ||
-           (!handler) || (handler_type > ACPI_MAX_NOTIFY_HANDLER_TYPE)) {
+       if ((!device) || (!handler) || (!handler_type) ||
+           (handler_type > ACPI_MAX_NOTIFY_HANDLER_TYPE)) {
                return_ACPI_STATUS(AE_BAD_PARAMETER);
        }
 
@@ -160,144 +101,112 @@ acpi_install_notify_handler(acpi_handle device,
                return_ACPI_STATUS(status);
        }
 
-       /* Convert and validate the device handle */
-
-       node = acpi_ns_validate_handle(device);
-       if (!node) {
-               status = AE_BAD_PARAMETER;
-               goto unlock_and_exit;
-       }
-
        /*
         * Root Object:
         * Registering a notify handler on the root object indicates that the
         * caller wishes to receive notifications for all objects. Note that
-        * only one <external> global handler can be regsitered (per notify type).
+        * only one global handler can be registered per notify type.
+        * Ensure that a handler is not already installed.
         */
        if (device == ACPI_ROOT_OBJECT) {
+               for (i = 0; i < ACPI_NUM_NOTIFY_TYPES; i++) {
+                       if (handler_type & (i + 1)) {
+                               if (acpi_gbl_global_notify[i].handler) {
+                                       status = AE_ALREADY_EXISTS;
+                                       goto unlock_and_exit;
+                               }
 
-               /* Make sure the handler is not already installed */
-
-               if (((handler_type & ACPI_SYSTEM_NOTIFY) &&
-                    acpi_gbl_system_notify.handler) ||
-                   ((handler_type & ACPI_DEVICE_NOTIFY) &&
-                    acpi_gbl_device_notify.handler)) {
-                       status = AE_ALREADY_EXISTS;
-                       goto unlock_and_exit;
-               }
-
-               if (handler_type & ACPI_SYSTEM_NOTIFY) {
-                       acpi_gbl_system_notify.node = node;
-                       acpi_gbl_system_notify.handler = handler;
-                       acpi_gbl_system_notify.context = context;
-               }
-
-               if (handler_type & ACPI_DEVICE_NOTIFY) {
-                       acpi_gbl_device_notify.node = node;
-                       acpi_gbl_device_notify.handler = handler;
-                       acpi_gbl_device_notify.context = context;
+                               acpi_gbl_global_notify[i].handler = handler;
+                               acpi_gbl_global_notify[i].context = context;
+                       }
                }
 
-               /* Global notify handler installed */
+               goto unlock_and_exit;   /* Global notify handler installed, all done */
        }
 
        /*
         * All Other Objects:
-        * Caller will only receive notifications specific to the target object.
-        * Note that only certain object types can receive notifications.
+        * Caller will only receive notifications specific to the target
+        * object. Note that only certain object types are allowed to
+        * receive notifications.
         */
-       else {
-               /* Notifies allowed on this object? */
 
-               if (!acpi_ev_is_notify_object(node)) {
-                       status = AE_TYPE;
-                       goto unlock_and_exit;
-               }
+       /* Are Notifies allowed on this object? */
 
-               /* Check for an existing internal object */
+       if (!acpi_ev_is_notify_object(node)) {
+               status = AE_TYPE;
+               goto unlock_and_exit;
+       }
 
-               obj_desc = acpi_ns_get_attached_object(node);
-               if (obj_desc) {
+       /* Check for an existing internal object, might not exist */
 
-                       /* Object exists. */
+       obj_desc = acpi_ns_get_attached_object(node);
+       if (!obj_desc) {
 
-                       /* For a device notify, make sure there's no handler. */
-                       if ((handler_type & ACPI_DEVICE_NOTIFY) &&
-                            obj_desc->common_notify.device_notify) {
-                               status = AE_ALREADY_EXISTS;
-                               goto unlock_and_exit;
-                       }
+               /* Create a new object */
 
-                       /* System notifies may have more handlers installed. */
-                       notify_obj = obj_desc->common_notify.system_notify;
+               obj_desc = acpi_ut_create_internal_object(node->type);
+               if (!obj_desc) {
+                       status = AE_NO_MEMORY;
+                       goto unlock_and_exit;
+               }
 
-                       if ((handler_type & ACPI_SYSTEM_NOTIFY) && notify_obj) {
-                               struct acpi_object_notify_handler *parent_obj;
+               /* Attach new object to the Node, remove local reference */
+
+               status = acpi_ns_attach_object(device, obj_desc, node->type);
+               acpi_ut_remove_reference(obj_desc);
+               if (ACPI_FAILURE(status)) {
+                       goto unlock_and_exit;
+               }
+       }
 
-                               if (handler_type & ACPI_DEVICE_NOTIFY) {
+       /* Ensure that the handler is not already installed in the lists */
+
+       for (i = 0; i < ACPI_NUM_NOTIFY_TYPES; i++) {
+               if (handler_type & (i + 1)) {
+                       handler_obj = obj_desc->common_notify.notify_list[i];
+                       while (handler_obj) {
+                               if (handler_obj->notify.handler == handler) {
                                        status = AE_ALREADY_EXISTS;
                                        goto unlock_and_exit;
                                }
 
-                               parent_obj = &notify_obj->notify;
-                               status = acpi_add_handler_object(parent_obj,
-                                                                handler,
-                                                                context);
-                               goto unlock_and_exit;
-                       }
-               } else {
-                       /* Create a new object */
-
-                       obj_desc = acpi_ut_create_internal_object(node->type);
-                       if (!obj_desc) {
-                               status = AE_NO_MEMORY;
-                               goto unlock_and_exit;
-                       }
-
-                       /* Attach new object to the Node */
-
-                       status =
-                           acpi_ns_attach_object(device, obj_desc, node->type);
-
-                       /* Remove local reference to the object */
-
-                       acpi_ut_remove_reference(obj_desc);
-                       if (ACPI_FAILURE(status)) {
-                               goto unlock_and_exit;
+                               handler_obj = handler_obj->notify.next[i];
                        }
                }
+       }
 
-               /* Install the handler */
+       /* Create and populate a new notify handler object */
 
-               notify_obj =
-                   acpi_ut_create_internal_object(ACPI_TYPE_LOCAL_NOTIFY);
-               if (!notify_obj) {
-                       status = AE_NO_MEMORY;
-                       goto unlock_and_exit;
-               }
+       handler_obj = acpi_ut_create_internal_object(ACPI_TYPE_LOCAL_NOTIFY);
+       if (!handler_obj) {
+               status = AE_NO_MEMORY;
+               goto unlock_and_exit;
+       }
 
-               acpi_populate_handler_object(&notify_obj->notify,
-                                               handler_type,
-                                               handler, context,
-                                               NULL);
+       handler_obj->notify.node = node;
+       handler_obj->notify.handler_type = handler_type;
+       handler_obj->notify.handler = handler;
+       handler_obj->notify.context = context;
 
-               if (handler_type & ACPI_SYSTEM_NOTIFY) {
-                       obj_desc->common_notify.system_notify = notify_obj;
-               }
+       /* Install the handler at the list head(s) */
 
-               if (handler_type & ACPI_DEVICE_NOTIFY) {
-                       obj_desc->common_notify.device_notify = notify_obj;
-               }
+       for (i = 0; i < ACPI_NUM_NOTIFY_TYPES; i++) {
+               if (handler_type & (i + 1)) {
+                       handler_obj->notify.next[i] =
+                           obj_desc->common_notify.notify_list[i];
 
-               if (handler_type == ACPI_ALL_NOTIFY) {
+                       obj_desc->common_notify.notify_list[i] = handler_obj;
+               }
+       }
 
-                       /* Extra ref if installed in both */
+       /* Add an extra reference if handler was installed in both lists */
 
-                       acpi_ut_add_reference(notify_obj);
-               }
+       if (handler_type == ACPI_ALL_NOTIFY) {
+               acpi_ut_add_reference(handler_obj);
        }
 
-      unlock_and_exit:
+unlock_and_exit:
        (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
        return_ACPI_STATUS(status);
 }
@@ -308,11 +217,11 @@ ACPI_EXPORT_SYMBOL(acpi_install_notify_handler)
  *
  * FUNCTION:    acpi_remove_notify_handler
  *
- * PARAMETERS:  Device          - The device for which notifies will be handled
+ * PARAMETERS:  Device          - The device for which the handler is installed
  *              handler_type    - The type of handler:
- *                                  ACPI_SYSTEM_NOTIFY: system_handler (00-7f)
- *                                  ACPI_DEVICE_NOTIFY: driver_handler (80-ff)
- *                                  ACPI_ALL_NOTIFY:  both system and device
+ *                                  ACPI_SYSTEM_NOTIFY: System Handler (00-7F)
+ *                                  ACPI_DEVICE_NOTIFY: Device Handler (80-FF)
+ *                                  ACPI_ALL_NOTIFY:    Both System and Device
  *              Handler         - Address of the handler
  *
  * RETURN:      Status
@@ -324,165 +233,106 @@ acpi_status
 acpi_remove_notify_handler(acpi_handle device,
                           u32 handler_type, acpi_notify_handler handler)
 {
-       union acpi_operand_object *notify_obj;
+       struct acpi_namespace_node *node =
+           ACPI_CAST_PTR(struct acpi_namespace_node, device);
        union acpi_operand_object *obj_desc;
-       struct acpi_namespace_node *node;
+       union acpi_operand_object *handler_obj;
+       union acpi_operand_object *previous_handler_obj;
        acpi_status status;
+       u32 i;
 
        ACPI_FUNCTION_TRACE(acpi_remove_notify_handler);
 
        /* Parameter validation */
 
-       if ((!device) ||
-           (!handler) || (handler_type > ACPI_MAX_NOTIFY_HANDLER_TYPE)) {
-               status = AE_BAD_PARAMETER;
-               goto exit;
+       if ((!device) || (!handler) || (!handler_type) ||
+           (handler_type > ACPI_MAX_NOTIFY_HANDLER_TYPE)) {
+               return_ACPI_STATUS(AE_BAD_PARAMETER);
        }
-
-
        /* Make sure all deferred tasks are completed */
-       acpi_os_wait_events_complete(NULL);
+
+       acpi_os_wait_events_complete();
 
        status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
        if (ACPI_FAILURE(status)) {
-               goto exit;
-       }
-
-       /* Convert and validate the device handle */
-
-       node = acpi_ns_validate_handle(device);
-       if (!node) {
-               status = AE_BAD_PARAMETER;
-               goto unlock_and_exit;
+               return_ACPI_STATUS(status);
        }
 
-       /* Root Object */
+       /* Root Object. Global handlers are removed here */
 
        if (device == ACPI_ROOT_OBJECT) {
-               ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-                                 "Removing notify handler for namespace root object\n"));
+               for (i = 0; i < ACPI_NUM_NOTIFY_TYPES; i++) {
+                       if (handler_type & (i + 1)) {
+                               if (!acpi_gbl_global_notify[i].handler ||
+                                   (acpi_gbl_global_notify[i].handler !=
+                                    handler)) {
+                                       status = AE_NOT_EXIST;
+                                       goto unlock_and_exit;
+                               }
 
-               if (((handler_type & ACPI_SYSTEM_NOTIFY) &&
-                    !acpi_gbl_system_notify.handler) ||
-                   ((handler_type & ACPI_DEVICE_NOTIFY) &&
-                    !acpi_gbl_device_notify.handler)) {
-                       status = AE_NOT_EXIST;
-                       goto unlock_and_exit;
-               }
+                               ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+                                                 "Removing global notify handler\n"));
 
-               if (handler_type & ACPI_SYSTEM_NOTIFY) {
-                       acpi_gbl_system_notify.node = NULL;
-                       acpi_gbl_system_notify.handler = NULL;
-                       acpi_gbl_system_notify.context = NULL;
+                               acpi_gbl_global_notify[i].handler = NULL;
+                               acpi_gbl_global_notify[i].context = NULL;
+                       }
                }
 
-               if (handler_type & ACPI_DEVICE_NOTIFY) {
-                       acpi_gbl_device_notify.node = NULL;
-                       acpi_gbl_device_notify.handler = NULL;
-                       acpi_gbl_device_notify.context = NULL;
-               }
+               goto unlock_and_exit;
        }
 
-       /* All Other Objects */
+       /* All other objects: Are Notifies allowed on this object? */
 
-       else {
-               /* Notifies allowed on this object? */
+       if (!acpi_ev_is_notify_object(node)) {
+               status = AE_TYPE;
+               goto unlock_and_exit;
+       }
 
-               if (!acpi_ev_is_notify_object(node)) {
-                       status = AE_TYPE;
-                       goto unlock_and_exit;
-               }
+       /* Must have an existing internal object */
 
-               /* Check for an existing internal object */
+       obj_desc = acpi_ns_get_attached_object(node);
+       if (!obj_desc) {
+               status = AE_NOT_EXIST;
+               goto unlock_and_exit;
+       }
 
-               obj_desc = acpi_ns_get_attached_object(node);
-               if (!obj_desc) {
-                       status = AE_NOT_EXIST;
-                       goto unlock_and_exit;
-               }
+       /* Internal object exists. Find the handler and remove it */
 
-               /* Object exists - make sure there's an existing handler */
+       for (i = 0; i < ACPI_NUM_NOTIFY_TYPES; i++) {
+               if (handler_type & (i + 1)) {
+                       handler_obj = obj_desc->common_notify.notify_list[i];
+                       previous_handler_obj = NULL;
 
-               if (handler_type & ACPI_SYSTEM_NOTIFY) {
-                       struct acpi_object_notify_handler *handler_obj;
-                       struct acpi_object_notify_handler *parent_obj;
+                       /* Attempt to find the handler in the handler list */
 
-                       notify_obj = obj_desc->common_notify.system_notify;
-                       if (!notify_obj) {
-                               status = AE_NOT_EXIST;
-                               goto unlock_and_exit;
-                       }
-
-                       handler_obj = &notify_obj->notify;
-                       parent_obj = NULL;
-                       while (handler_obj->handler != handler) {
-                               if (handler_obj->next) {
-                                       parent_obj = handler_obj;
-                                       handler_obj = handler_obj->next;
-                               } else {
-                                       break;
-                               }
+                       while (handler_obj &&
+                              (handler_obj->notify.handler != handler)) {
+                               previous_handler_obj = handler_obj;
+                               handler_obj = handler_obj->notify.next[i];
                        }
 
-                       if (handler_obj->handler != handler) {
-                               status = AE_BAD_PARAMETER;
+                       if (!handler_obj) {
+                               status = AE_NOT_EXIST;
                                goto unlock_and_exit;
                        }
 
-                       /*
-                        * Remove the handler.  There are three possible cases.
-                        * First, we may need to remove a non-embedded object.
-                        * Second, we may need to remove the embedded object's
-                        * handler data, while non-embedded objects exist.
-                        * Finally, we may need to remove the embedded object
-                        * entirely along with its container.
-                        */
-                       if (parent_obj) {
-                               /* Non-embedded object is being removed. */
-                               parent_obj->next = handler_obj->next;
-                               ACPI_FREE(handler_obj);
-                       } else if (notify_obj->notify.next) {
-                               /*
-                                * The handler matches the embedded object, but
-                                * there are more handler objects in the list.
-                                * Replace the embedded object's data with the
-                                * first next object's data and remove that
-                                * object.
-                                */
-                               parent_obj = &notify_obj->notify;
-                               handler_obj = notify_obj->notify.next;
-                               *parent_obj = *handler_obj;
-                               ACPI_FREE(handler_obj);
-                       } else {
-                               /* No more handler objects in the list. */
-                               obj_desc->common_notify.system_notify = NULL;
-                               acpi_ut_remove_reference(notify_obj);
-                       }
-               }
+                       /* Remove the handler object from the list */
 
-               if (handler_type & ACPI_DEVICE_NOTIFY) {
-                       notify_obj = obj_desc->common_notify.device_notify;
-                       if (!notify_obj) {
-                               status = AE_NOT_EXIST;
-                               goto unlock_and_exit;
-                       }
+                       if (previous_handler_obj) {     /* Handler is not at the list head */
+                               previous_handler_obj->notify.next[i] =
+                                   handler_obj->notify.next[i];
+                       } else {        /* Handler is at the list head */
 
-                       if (notify_obj->notify.handler != handler) {
-                               status = AE_BAD_PARAMETER;
-                               goto unlock_and_exit;
+                               obj_desc->common_notify.notify_list[i] =
+                                   handler_obj->notify.next[i];
                        }
 
-                       /* Remove the handler */
-                       obj_desc->common_notify.device_notify = NULL;
-                       acpi_ut_remove_reference(notify_obj);
+                       acpi_ut_remove_reference(handler_obj);
                }
        }
 
-      unlock_and_exit:
+unlock_and_exit:
        (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
-      exit:
-       if (ACPI_FAILURE(status))
-               ACPI_EXCEPTION((AE_INFO, status, "Removing notify handler"));
        return_ACPI_STATUS(status);
 }
 
@@ -492,7 +342,7 @@ ACPI_EXPORT_SYMBOL(acpi_remove_notify_handler)
  *
  * FUNCTION:    acpi_install_exception_handler
  *
- * PARAMETERS:  Handler         - Pointer to the handler function for the
+ * PARAMETERS:  handler         - Pointer to the handler function for the
  *                                event
  *
  * RETURN:      Status
@@ -536,8 +386,8 @@ ACPI_EXPORT_SYMBOL(acpi_install_exception_handler)
  *
  * FUNCTION:    acpi_install_global_event_handler
  *
- * PARAMETERS:  Handler         - Pointer to the global event handler function
- *              Context         - Value passed to the handler on each event
+ * PARAMETERS:  handler         - Pointer to the global event handler function
+ *              context         - Value passed to the handler on each event
  *
  * RETURN:      Status
  *
@@ -586,10 +436,10 @@ ACPI_EXPORT_SYMBOL(acpi_install_global_event_handler)
  *
  * FUNCTION:    acpi_install_fixed_event_handler
  *
- * PARAMETERS:  Event           - Event type to enable.
- *              Handler         - Pointer to the handler function for the
+ * PARAMETERS:  event           - Event type to enable.
+ *              handler         - Pointer to the handler function for the
  *                                event
- *              Context         - Value passed to the handler on each GPE
+ *              context         - Value passed to the handler on each GPE
  *
  * RETURN:      Status
  *
@@ -656,8 +506,8 @@ ACPI_EXPORT_SYMBOL(acpi_install_fixed_event_handler)
  *
  * FUNCTION:    acpi_remove_fixed_event_handler
  *
- * PARAMETERS:  Event           - Event type to disable.
- *              Handler         - Address of the handler
+ * PARAMETERS:  event           - Event type to disable.
+ *              handler         - Address of the handler
  *
  * RETURN:      Status
  *
@@ -713,10 +563,10 @@ ACPI_EXPORT_SYMBOL(acpi_remove_fixed_event_handler)
  * PARAMETERS:  gpe_device      - Namespace node for the GPE (NULL for FADT
  *                                defined GPEs)
  *              gpe_number      - The GPE number within the GPE block
- *              Type            - Whether this GPE should be treated as an
+ *              type            - Whether this GPE should be treated as an
  *                                edge- or level-triggered interrupt.
- *              Address         - Address of the handler
- *              Context         - Value passed to the handler on each GPE
+ *              address         - Address of the handler
+ *              context         - Value passed to the handler on each GPE
  *
  * RETURN:      Status
  *
@@ -823,7 +673,7 @@ ACPI_EXPORT_SYMBOL(acpi_install_gpe_handler)
  * PARAMETERS:  gpe_device      - Namespace node for the GPE (NULL for FADT
  *                                defined GPEs)
  *              gpe_number      - The event to remove a handler
- *              Address         - Address of the handler
+ *              address         - Address of the handler
  *
  * RETURN:      Status
  *
@@ -849,7 +699,7 @@ acpi_remove_gpe_handler(acpi_handle gpe_device,
 
        /* Make sure all deferred tasks are completed */
 
-       acpi_os_wait_events_complete(NULL);
+       acpi_os_wait_events_complete();
 
        status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
        if (ACPI_FAILURE(status)) {
@@ -919,8 +769,8 @@ ACPI_EXPORT_SYMBOL(acpi_remove_gpe_handler)
  *
  * FUNCTION:    acpi_acquire_global_lock
  *
- * PARAMETERS:  Timeout         - How long the caller is willing to wait
- *              Handle          - Where the handle to the lock is returned
+ * PARAMETERS:  timeout         - How long the caller is willing to wait
+ *              handle          - Where the handle to the lock is returned
  *                                (if acquired)
  *
  * RETURN:      Status
@@ -967,7 +817,7 @@ ACPI_EXPORT_SYMBOL(acpi_acquire_global_lock)
  *
  * FUNCTION:    acpi_release_global_lock
  *
- * PARAMETERS:  Handle      - Returned from acpi_acquire_global_lock
+ * PARAMETERS:  handle      - Returned from acpi_acquire_global_lock
  *
  * RETURN:      Status
  *
index 77cee5a5e891b5ea5eb22245afd34ea73a070651..35520c6eeefb4b52fbab8971fd0c36a8ae115a46 100644 (file)
@@ -153,8 +153,8 @@ ACPI_EXPORT_SYMBOL(acpi_disable)
  *
  * FUNCTION:    acpi_enable_event
  *
- * PARAMETERS:  Event           - The fixed eventto be enabled
- *              Flags           - Reserved
+ * PARAMETERS:  event           - The fixed eventto be enabled
+ *              flags           - Reserved
  *
  * RETURN:      Status
  *
@@ -265,7 +265,7 @@ ACPI_EXPORT_SYMBOL(acpi_disable_event)
  *
  * FUNCTION:    acpi_clear_event
  *
- * PARAMETERS:  Event           - The fixed event to be cleared
+ * PARAMETERS:  event           - The fixed event to be cleared
  *
  * RETURN:      Status
  *
@@ -301,7 +301,7 @@ ACPI_EXPORT_SYMBOL(acpi_clear_event)
  *
  * FUNCTION:    acpi_get_event_status
  *
- * PARAMETERS:  Event           - The fixed event
+ * PARAMETERS:  event           - The fixed event
  *              event_status    - Where the current status of the event will
  *                                be returned
  *
index 86f9b343ebd404c45833eda4b88fb37a4f6ec5f7..6affbdb4b88c930567f8c3a852b8c051fbbb0dbf 100644 (file)
@@ -197,12 +197,12 @@ acpi_status
 acpi_setup_gpe_for_wake(acpi_handle wake_device,
                        acpi_handle gpe_device, u32 gpe_number)
 {
-       acpi_status status = AE_BAD_PARAMETER;
+       acpi_status status;
        struct acpi_gpe_event_info *gpe_event_info;
        struct acpi_namespace_node *device_node;
-       struct acpi_gpe_notify_object *notify_object;
+       struct acpi_gpe_notify_info *notify;
+       struct acpi_gpe_notify_info *new_notify;
        acpi_cpu_flags flags;
-       u8 gpe_dispatch_mask;
 
        ACPI_FUNCTION_TRACE(acpi_setup_gpe_for_wake);
 
@@ -216,63 +216,95 @@ acpi_setup_gpe_for_wake(acpi_handle wake_device,
                return_ACPI_STATUS(AE_BAD_PARAMETER);
        }
 
+       /* Handle root object case */
+
+       if (wake_device == ACPI_ROOT_OBJECT) {
+               device_node = acpi_gbl_root_node;
+       } else {
+               device_node = ACPI_CAST_PTR(struct acpi_namespace_node, wake_device);
+       }
+
+       /* Validate WakeDevice is of type Device */
+
+       if (device_node->type != ACPI_TYPE_DEVICE) {
+               return_ACPI_STATUS (AE_BAD_PARAMETER);
+       }
+
+       /*
+        * Allocate a new notify object up front, in case it is needed.
+        * Memory allocation while holding a spinlock is a big no-no
+        * on some hosts.
+        */
+       new_notify = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_notify_info));
+       if (!new_notify) {
+               return_ACPI_STATUS(AE_NO_MEMORY);
+       }
+
        flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
 
        /* Ensure that we have a valid GPE number */
 
        gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
        if (!gpe_event_info) {
+               status = AE_BAD_PARAMETER;
                goto unlock_and_exit;
        }
 
-       if (wake_device == ACPI_ROOT_OBJECT) {
-               goto out;
-       }
-
        /*
         * If there is no method or handler for this GPE, then the
-        * wake_device will be notified whenever this GPE fires (aka
-        * "implicit notify") Note: The GPE is assumed to be
+        * wake_device will be notified whenever this GPE fires. This is
+        * known as an "implicit notify". Note: The GPE is assumed to be
         * level-triggered (for windows compatibility).
         */
-       gpe_dispatch_mask = gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK;
-       if (gpe_dispatch_mask != ACPI_GPE_DISPATCH_NONE
-           && gpe_dispatch_mask != ACPI_GPE_DISPATCH_NOTIFY) {
-               goto out;
-       }
-
-       /* Validate wake_device is of type Device */
-
-       device_node = ACPI_CAST_PTR(struct acpi_namespace_node, wake_device);
-       if (device_node->type != ACPI_TYPE_DEVICE) {
-               goto unlock_and_exit;
+       if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
+           ACPI_GPE_DISPATCH_NONE) {
+               /*
+                * This is the first device for implicit notify on this GPE.
+                * Just set the flags here, and enter the NOTIFY block below.
+                */
+               gpe_event_info->flags =
+                   (ACPI_GPE_DISPATCH_NOTIFY | ACPI_GPE_LEVEL_TRIGGERED);
        }
 
-       if (gpe_dispatch_mask == ACPI_GPE_DISPATCH_NONE) {
-               gpe_event_info->flags = (ACPI_GPE_DISPATCH_NOTIFY |
-                                        ACPI_GPE_LEVEL_TRIGGERED);
-               gpe_event_info->dispatch.device.node = device_node;
-               gpe_event_info->dispatch.device.next = NULL;
-       } else {
-               /* There are multiple devices to notify implicitly. */
-
-               notify_object = ACPI_ALLOCATE_ZEROED(sizeof(*notify_object));
-               if (!notify_object) {
-                       status = AE_NO_MEMORY;
-                       goto unlock_and_exit;
+       /*
+        * If we already have an implicit notify on this GPE, add
+        * this device to the notify list.
+        */
+       if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
+           ACPI_GPE_DISPATCH_NOTIFY) {
+
+               /* Ensure that the device is not already in the list */
+
+               notify = gpe_event_info->dispatch.notify_list;
+               while (notify) {
+                       if (notify->device_node == device_node) {
+                               status = AE_ALREADY_EXISTS;
+                               goto unlock_and_exit;
+                       }
+                       notify = notify->next;
                }
 
-               notify_object->node = device_node;
-               notify_object->next = gpe_event_info->dispatch.device.next;
-               gpe_event_info->dispatch.device.next = notify_object;
+               /* Add this device to the notify list for this GPE */
+
+               new_notify->device_node = device_node;
+               new_notify->next = gpe_event_info->dispatch.notify_list;
+               gpe_event_info->dispatch.notify_list = new_notify;
+               new_notify = NULL;
        }
 
- out:
+       /* Mark the GPE as a possible wake event */
+
        gpe_event_info->flags |= ACPI_GPE_CAN_WAKE;
        status = AE_OK;
 
- unlock_and_exit:
+unlock_and_exit:
        acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+
+       /* Delete the notify object if it was not used above */
+
+       if (new_notify) {
+               ACPI_FREE(new_notify);
+       }
        return_ACPI_STATUS(status);
 }
 ACPI_EXPORT_SYMBOL(acpi_setup_gpe_for_wake)
@@ -283,7 +315,7 @@ ACPI_EXPORT_SYMBOL(acpi_setup_gpe_for_wake)
  *
  * PARAMETERS:  gpe_device      - Parent GPE Device. NULL for GPE0/GPE1
  *              gpe_number      - GPE level within the GPE block
- *              Action          - Enable or Disable
+ *              action              - Enable or Disable
  *
  * RETURN:      Status
  *
@@ -508,7 +540,7 @@ ACPI_EXPORT_SYMBOL(acpi_enable_all_runtime_gpes)
  * FUNCTION:    acpi_install_gpe_block
  *
  * PARAMETERS:  gpe_device          - Handle to the parent GPE Block Device
- *              gpe_block_address   - Address and space_iD
+ *              gpe_block_address   - Address and space_ID
  *              register_count      - Number of GPE register pairs in the block
  *              interrupt_number    - H/W interrupt for the block
  *
@@ -653,7 +685,7 @@ ACPI_EXPORT_SYMBOL(acpi_remove_gpe_block)
  *
  * FUNCTION:    acpi_get_gpe_device
  *
- * PARAMETERS:  Index               - System GPE index (0-current_gpe_count)
+ * PARAMETERS:  index               - System GPE index (0-current_gpe_count)
  *              gpe_device          - Where the parent GPE Device is returned
  *
  * RETURN:      Status
index 6019208cd4b6f522227b70cb5577f13406b5b221..96b412d0395017fd41e12594d864ade0c949ae73 100644 (file)
@@ -55,11 +55,11 @@ ACPI_MODULE_NAME("evxfregn")
  *
  * FUNCTION:    acpi_install_address_space_handler
  *
- * PARAMETERS:  Device          - Handle for the device
+ * PARAMETERS:  device          - Handle for the device
  *              space_id        - The address space ID
- *              Handler         - Address of the handler
- *              Setup           - Address of the setup function
- *              Context         - Value passed to the handler on each access
+ *              handler         - Address of the handler
+ *              setup           - Address of the setup function
+ *              context         - Value passed to the handler on each access
  *
  * RETURN:      Status
  *
@@ -112,16 +112,16 @@ acpi_install_address_space_handler(acpi_handle device,
        }
 
        /*
-        * For the default space_iDs, (the IDs for which there are default region handlers
+        * For the default space_IDs, (the IDs for which there are default region handlers
         * installed) Only execute the _REG methods if the global initialization _REG
         * methods have already been run (via acpi_initialize_objects). In other words,
-        * we will defer the execution of the _REG methods for these space_iDs until
+        * we will defer the execution of the _REG methods for these space_IDs until
         * execution of acpi_initialize_objects. This is done because we need the handlers
         * for the default spaces (mem/io/pci/table) to be installed before we can run
         * any control methods (or _REG methods). There is known BIOS code that depends
         * on this.
         *
-        * For all other space_iDs, we can safely execute the _REG methods immediately.
+        * For all other space_IDs, we can safely execute the _REG methods immediately.
         * This means that for IDs like embedded_controller, this function should be called
         * only after acpi_enable_subsystem has been called.
         */
@@ -157,9 +157,9 @@ ACPI_EXPORT_SYMBOL(acpi_install_address_space_handler)
  *
  * FUNCTION:    acpi_remove_address_space_handler
  *
- * PARAMETERS:  Device          - Handle for the device
+ * PARAMETERS:  device          - Handle for the device
  *              space_id        - The address space ID
- *              Handler         - Address of the handler
+ *              handler         - Address of the handler
  *
  * RETURN:      Status
  *
index c86d44e41bc85bf8935a56c4a2f5c4e348bd0dc7..16219bde48da2ca4a7b2a7c60e427530da395a10 100644 (file)
@@ -66,7 +66,7 @@ acpi_ex_region_read(union acpi_operand_object *obj_desc,
  *
  * FUNCTION:    acpi_ex_add_table
  *
- * PARAMETERS:  Table               - Pointer to raw table
+ * PARAMETERS:  table               - Pointer to raw table
  *              parent_node         - Where to load the table (scope)
  *              ddb_handle          - Where to return the table handle.
  *
@@ -276,8 +276,8 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state,
  * FUNCTION:    acpi_ex_region_read
  *
  * PARAMETERS:  obj_desc        - Region descriptor
- *              Length          - Number of bytes to read
- *              Buffer          - Pointer to where to put the data
+ *              length          - Number of bytes to read
+ *              buffer          - Pointer to where to put the data
  *
  * RETURN:      Status
  *
@@ -318,7 +318,7 @@ acpi_ex_region_read(union acpi_operand_object *obj_desc, u32 length, u8 *buffer)
  *
  * PARAMETERS:  obj_desc        - Region or Buffer/Field where the table will be
  *                                obtained
- *              Target          - Where a handle to the table will be stored
+ *              target          - Where a handle to the table will be stored
  *              walk_state      - Current state
  *
  * RETURN:      Status
index e385436bd42422ef81ad64ca2decfa9a14eaeae7..bfb062e4c4b449cb444aae2c3ca32a7b7c7ed710 100644 (file)
@@ -60,7 +60,7 @@ acpi_ex_convert_to_ascii(u64 integer, u16 base, u8 *string, u8 max_length);
  * PARAMETERS:  obj_desc        - Object to be converted. Must be an
  *                                Integer, Buffer, or String
  *              result_desc     - Where the new Integer object is returned
- *              Flags           - Used for string conversion
+ *              flags           - Used for string conversion
  *
  * RETURN:      Status
  *
@@ -272,9 +272,9 @@ acpi_ex_convert_to_buffer(union acpi_operand_object *obj_desc,
  *
  * FUNCTION:    acpi_ex_convert_to_ascii
  *
- * PARAMETERS:  Integer         - Value to be converted
- *              Base            - ACPI_STRING_DECIMAL or ACPI_STRING_HEX
- *              String          - Where the string is returned
+ * PARAMETERS:  integer         - Value to be converted
+ *              base            - ACPI_STRING_DECIMAL or ACPI_STRING_HEX
+ *              string          - Where the string is returned
  *              data_width      - Size of data item to be converted, in bytes
  *
  * RETURN:      Actual string length
@@ -385,7 +385,7 @@ acpi_ex_convert_to_ascii(u64 integer, u16 base, u8 *string, u8 data_width)
  * PARAMETERS:  obj_desc        - Object to be converted. Must be an
  *                                Integer, Buffer, or String
  *              result_desc     - Where the string object is returned
- *              Type            - String flags (base and conversion type)
+ *              type            - String flags (base and conversion type)
  *
  * RETURN:      Status
  *
index 3f5bc998c1cb15fc7f9e212faa97552b0e87465e..691d4763102ca8db6a3aab3dce6530826299e277 100644 (file)
@@ -369,7 +369,7 @@ acpi_ex_create_region(u8 * aml_start,
  *
  * DESCRIPTION: Create a new processor object and populate the fields
  *
- *              Processor (Name[0], cpu_iD[1], pblock_addr[2], pblock_length[3])
+ *              Processor (Name[0], cpu_ID[1], pblock_addr[2], pblock_length[3])
  *
  ******************************************************************************/
 
index e211e9c192159b10af4c9a68bc17dfba742b57fd..bc5b9a6a131638ab8014228ea89ae95225955e3a 100644 (file)
@@ -54,8 +54,8 @@ ACPI_MODULE_NAME("exdebug")
  * FUNCTION:    acpi_ex_do_debug_object
  *
  * PARAMETERS:  source_desc         - Object to be output to "Debug Object"
- *              Level               - Indentation level (used for packages)
- *              Index               - Current package element, zero if not pkg
+ *              level               - Indentation level (used for packages)
+ *              index               - Current package element, zero if not pkg
  *
  * RETURN:      None
  *
index 2a6ac0a3bc1e661b25876a1247fbe763279ef91d..213c081776fc311b267f51268c72f83a2c34e855 100644 (file)
@@ -109,9 +109,9 @@ static struct acpi_exdump_info acpi_ex_dump_package[5] = {
 static struct acpi_exdump_info acpi_ex_dump_device[4] = {
        {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_device), NULL},
        {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(device.handler), "Handler"},
-       {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(device.system_notify),
+       {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(device.notify_list[0]),
         "System Notify"},
-       {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(device.device_notify),
+       {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(device.notify_list[1]),
         "Device Notify"}
 };
 
@@ -158,9 +158,9 @@ static struct acpi_exdump_info acpi_ex_dump_power[5] = {
         "System Level"},
        {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(power_resource.resource_order),
         "Resource Order"},
-       {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(power_resource.system_notify),
+       {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(power_resource.notify_list[0]),
         "System Notify"},
-       {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(power_resource.device_notify),
+       {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(power_resource.notify_list[1]),
         "Device Notify"}
 };
 
@@ -169,18 +169,18 @@ static struct acpi_exdump_info acpi_ex_dump_processor[7] = {
        {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(processor.proc_id), "Processor ID"},
        {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(processor.length), "Length"},
        {ACPI_EXD_ADDRESS, ACPI_EXD_OFFSET(processor.address), "Address"},
-       {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(processor.system_notify),
+       {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(processor.notify_list[0]),
         "System Notify"},
-       {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(processor.device_notify),
+       {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(processor.notify_list[1]),
         "Device Notify"},
        {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(processor.handler), "Handler"}
 };
 
 static struct acpi_exdump_info acpi_ex_dump_thermal[4] = {
        {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_thermal), NULL},
-       {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(thermal_zone.system_notify),
+       {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(thermal_zone.notify_list[0]),
         "System Notify"},
-       {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(thermal_zone.device_notify),
+       {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(thermal_zone.notify_list[1]),
         "Device Notify"},
        {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(thermal_zone.handler), "Handler"}
 };
@@ -241,10 +241,15 @@ static struct acpi_exdump_info acpi_ex_dump_address_handler[6] = {
        {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(address_space.context), "Context"}
 };
 
-static struct acpi_exdump_info acpi_ex_dump_notify[3] = {
+static struct acpi_exdump_info acpi_ex_dump_notify[7] = {
        {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_notify), NULL},
        {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(notify.node), "Node"},
-       {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(notify.context), "Context"}
+       {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(notify.handler_type), "Handler Type"},
+       {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(notify.handler), "Handler"},
+       {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(notify.context), "Context"},
+       {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(notify.next[0]),
+        "Next System Notify"},
+       {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(notify.next[1]), "Next Device Notify"}
 };
 
 /* Miscellaneous tables */
@@ -318,7 +323,7 @@ static struct acpi_exdump_info *acpi_ex_dump_info[] = {
  * FUNCTION:    acpi_ex_dump_object
  *
  * PARAMETERS:  obj_desc            - Descriptor to dump
- *              Info                - Info table corresponding to this object
+ *              info                - Info table corresponding to this object
  *                                    type
  *
  * RETURN:      None
@@ -444,7 +449,7 @@ acpi_ex_dump_object(union acpi_operand_object *obj_desc,
  * FUNCTION:    acpi_ex_dump_operand
  *
  * PARAMETERS:  *obj_desc       - Pointer to entry to be dumped
- *              Depth           - Current nesting depth
+ *              depth           - Current nesting depth
  *
  * RETURN:      None
  *
@@ -726,7 +731,7 @@ void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth)
  *
  * FUNCTION:    acpi_ex_dump_operands
  *
- * PARAMETERS: Operands            - A list of Operand objects
+ * PARAMETERS:  operands            - A list of Operand objects
  *             opcode_name         - AML opcode name
  *             num_operands        - Operand count for this opcode
  *
@@ -769,8 +774,8 @@ acpi_ex_dump_operands(union acpi_operand_object **operands,
  *
  * FUNCTION:    acpi_ex_out* functions
  *
- * PARAMETERS:  Title               - Descriptive text
- *              Value               - Value to be displayed
+ * PARAMETERS:  title               - Descriptive text
+ *              value               - Value to be displayed
  *
  * DESCRIPTION: Object dump output formatting functions.  These functions
  *              reduce the number of format strings required and keeps them
@@ -792,8 +797,8 @@ static void acpi_ex_out_pointer(char *title, void *value)
  *
  * FUNCTION:    acpi_ex_dump_namespace_node
  *
- * PARAMETERS:  Node                - Descriptor to dump
- *              Flags               - Force display if TRUE
+ * PARAMETERS:  node                - Descriptor to dump
+ *              flags               - Force display if TRUE
  *
  * DESCRIPTION: Dumps the members of the given.Node
  *
@@ -825,7 +830,7 @@ void acpi_ex_dump_namespace_node(struct acpi_namespace_node *node, u32 flags)
  *
  * FUNCTION:    acpi_ex_dump_reference_obj
  *
- * PARAMETERS:  Object              - Descriptor to dump
+ * PARAMETERS:  object              - Descriptor to dump
  *
  * DESCRIPTION: Dumps a reference object
  *
@@ -882,8 +887,8 @@ static void acpi_ex_dump_reference_obj(union acpi_operand_object *obj_desc)
  * FUNCTION:    acpi_ex_dump_package_obj
  *
  * PARAMETERS:  obj_desc            - Descriptor to dump
- *              Level               - Indentation Level
- *              Index               - Package index for this object
+ *              level               - Indentation Level
+ *              index               - Package index for this object
  *
  * DESCRIPTION: Dumps the elements of the package
  *
@@ -926,9 +931,7 @@ acpi_ex_dump_package_obj(union acpi_operand_object *obj_desc,
        case ACPI_TYPE_STRING:
 
                acpi_os_printf("[String] Value: ");
-               for (i = 0; i < obj_desc->string.length; i++) {
-                       acpi_os_printf("%c", obj_desc->string.pointer[i]);
-               }
+               acpi_ut_print_string(obj_desc->string.pointer, ACPI_UINT8_MAX);
                acpi_os_printf("\n");
                break;
 
@@ -977,7 +980,7 @@ acpi_ex_dump_package_obj(union acpi_operand_object *obj_desc,
  * FUNCTION:    acpi_ex_dump_object_descriptor
  *
  * PARAMETERS:  obj_desc            - Descriptor to dump
- *              Flags               - Force display if TRUE
+ *              flags               - Force display if TRUE
  *
  * DESCRIPTION: Dumps the members of the object descriptor given.
  *
index 149de45fdaddc659e134903e96689ce64289f31c..a7784152ed30b5f03d33d348593a1d4b7aa7f207 100644 (file)
@@ -222,9 +222,9 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
  * PARAMETERS:  obj_desc                - Field to be read
  *              field_datum_byte_offset - Byte offset of this datum within the
  *                                        parent field
- *              Value                   - Where to store value (must at least
+ *              value                   - Where to store value (must at least
  *                                        64 bits)
- *              Function                - Read or Write flag plus other region-
+ *              function                - Read or Write flag plus other region-
  *                                        dependent flags
  *
  * RETURN:      Status
@@ -315,7 +315,7 @@ acpi_ex_access_region(union acpi_operand_object *obj_desc,
  * FUNCTION:    acpi_ex_register_overflow
  *
  * PARAMETERS:  obj_desc                - Register(Field) to be written
- *              Value                   - Value to be stored
+ *              value                   - Value to be stored
  *
  * RETURN:      TRUE if value overflows the field, FALSE otherwise
  *
@@ -365,7 +365,7 @@ acpi_ex_register_overflow(union acpi_operand_object *obj_desc, u64 value)
  * PARAMETERS:  obj_desc                - Field to be read
  *              field_datum_byte_offset - Byte offset of this datum within the
  *                                        parent field
- *              Value                   - Where to store value (must be 64 bits)
+ *              value                   - Where to store value (must be 64 bits)
  *              read_write              - Read or Write flag
  *
  * RETURN:      Status
@@ -574,7 +574,7 @@ acpi_ex_field_datum_io(union acpi_operand_object *obj_desc,
  * FUNCTION:    acpi_ex_write_with_update_rule
  *
  * PARAMETERS:  obj_desc                - Field to be written
- *              Mask                    - bitmask within field datum
+ *              mask                    - bitmask within field datum
  *              field_value             - Value to write
  *              field_datum_byte_offset - Offset of datum within field
  *
@@ -678,7 +678,7 @@ acpi_ex_write_with_update_rule(union acpi_operand_object *obj_desc,
  * FUNCTION:    acpi_ex_extract_from_field
  *
  * PARAMETERS:  obj_desc            - Field to be read
- *              Buffer              - Where to store the field data
+ *              buffer              - Where to store the field data
  *              buffer_length       - Length of Buffer
  *
  * RETURN:      Status
@@ -823,7 +823,7 @@ acpi_ex_extract_from_field(union acpi_operand_object *obj_desc,
  * FUNCTION:    acpi_ex_insert_into_field
  *
  * PARAMETERS:  obj_desc            - Field to be written
- *              Buffer              - Data to be written
+ *              buffer              - Data to be written
  *              buffer_length       - Length of Buffer
  *
  * RETURN:      Status
index 0a0893310348e6ad35a8cf6cf1f56d9f1ba18961..271c0c57ea10dded9a7e681d46b2872d1da36452 100644 (file)
@@ -144,8 +144,8 @@ acpi_ex_get_object_reference(union acpi_operand_object *obj_desc,
  *
  * FUNCTION:    acpi_ex_concat_template
  *
- * PARAMETERS:  Operand0            - First source object
- *              Operand1            - Second source object
+ * PARAMETERS:  operand0            - First source object
+ *              operand1            - Second source object
  *              actual_return_desc  - Where to place the return object
  *              walk_state          - Current walk state
  *
@@ -229,8 +229,8 @@ acpi_ex_concat_template(union acpi_operand_object *operand0,
  *
  * FUNCTION:    acpi_ex_do_concatenate
  *
- * PARAMETERS:  Operand0            - First source object
- *              Operand1            - Second source object
+ * PARAMETERS:  operand0            - First source object
+ *              operand1            - Second source object
  *              actual_return_desc  - Where to place the return object
  *              walk_state          - Current walk state
  *
@@ -397,9 +397,9 @@ acpi_ex_do_concatenate(union acpi_operand_object *operand0,
  *
  * FUNCTION:    acpi_ex_do_math_op
  *
- * PARAMETERS:  Opcode              - AML opcode
- *              Integer0            - Integer operand #0
- *              Integer1            - Integer operand #1
+ * PARAMETERS:  opcode              - AML opcode
+ *              integer0            - Integer operand #0
+ *              integer1            - Integer operand #1
  *
  * RETURN:      Integer result of the operation
  *
@@ -479,9 +479,9 @@ u64 acpi_ex_do_math_op(u16 opcode, u64 integer0, u64 integer1)
  *
  * FUNCTION:    acpi_ex_do_logical_numeric_op
  *
- * PARAMETERS:  Opcode              - AML opcode
- *              Integer0            - Integer operand #0
- *              Integer1            - Integer operand #1
+ * PARAMETERS:  opcode              - AML opcode
+ *              integer0            - Integer operand #0
+ *              integer1            - Integer operand #1
  *              logical_result      - TRUE/FALSE result of the operation
  *
  * RETURN:      Status
@@ -534,9 +534,9 @@ acpi_ex_do_logical_numeric_op(u16 opcode,
  *
  * FUNCTION:    acpi_ex_do_logical_op
  *
- * PARAMETERS:  Opcode              - AML opcode
- *              Operand0            - operand #0
- *              Operand1            - operand #1
+ * PARAMETERS:  opcode              - AML opcode
+ *              operand0            - operand #0
+ *              operand1            - operand #1
  *              logical_result      - TRUE/FALSE result of the operation
  *
  * RETURN:      Status
index 60933e9dc3c0a6cf07f207cdb8d86894ce03c0da..bcceda5be9e3ece853d9864d5280346000276975 100644 (file)
@@ -102,7 +102,7 @@ void acpi_ex_unlink_mutex(union acpi_operand_object *obj_desc)
  * FUNCTION:    acpi_ex_link_mutex
  *
  * PARAMETERS:  obj_desc            - The mutex to be linked
- *              Thread              - Current executing thread object
+ *              thread              - Current executing thread object
  *
  * RETURN:      None
  *
@@ -138,7 +138,7 @@ acpi_ex_link_mutex(union acpi_operand_object *obj_desc,
  *
  * FUNCTION:    acpi_ex_acquire_mutex_object
  *
- * PARAMETERS:  Timeout             - Timeout in milliseconds
+ * PARAMETERS:  timeout             - Timeout in milliseconds
  *              obj_desc            - Mutex object
  *              thread_id           - Current thread state
  *
@@ -443,7 +443,7 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
  *
  * FUNCTION:    acpi_ex_release_all_mutexes
  *
- * PARAMETERS:  Thread              - Current executing thread object
+ * PARAMETERS:  thread              - Current executing thread object
  *
  * RETURN:      Status
  *
index 30157f5a12d7d774386ab834c6ee819232f23b2e..81eca60d274865d1f7e9b120b85c5f41e77dc4dc 100644 (file)
@@ -391,12 +391,12 @@ acpi_ex_prep_common_field_object(union acpi_operand_object *obj_desc,
  *
  * FUNCTION:    acpi_ex_prep_field_value
  *
- * PARAMETERS:  Info    - Contains all field creation info
+ * PARAMETERS:  info    - Contains all field creation info
  *
  * RETURN:      Status
  *
- * DESCRIPTION: Construct a union acpi_operand_object of type def_field and
- *              connect it to the parent Node.
+ * DESCRIPTION: Construct an object of type union acpi_operand_object with a
+ *              subtype of def_field and connect it to the parent Node.
  *
  ******************************************************************************/
 
index 12d51df6d3bf6ad354dc1dd9a3c095d725d61839..1f1ce0c3d2f8a71c05d34906bda5ab61e1f01c97 100644 (file)
@@ -53,10 +53,10 @@ ACPI_MODULE_NAME("exregion")
  *
  * FUNCTION:    acpi_ex_system_memory_space_handler
  *
- * PARAMETERS:  Function            - Read or Write operation
- *              Address             - Where in the space to read or write
+ * PARAMETERS:  function            - Read or Write operation
+ *              address             - Where in the space to read or write
  *              bit_width           - Field width in bits (8, 16, or 32)
- *              Value               - Pointer to in or out value
+ *              value               - Pointer to in or out value
  *              handler_context     - Pointer to Handler's context
  *              region_context      - Pointer to context specific to the
  *                                    accessed region
@@ -270,10 +270,10 @@ acpi_ex_system_memory_space_handler(u32 function,
  *
  * FUNCTION:    acpi_ex_system_io_space_handler
  *
- * PARAMETERS:  Function            - Read or Write operation
- *              Address             - Where in the space to read or write
+ * PARAMETERS:  function            - Read or Write operation
+ *              address             - Where in the space to read or write
  *              bit_width           - Field width in bits (8, 16, or 32)
- *              Value               - Pointer to in or out value
+ *              value               - Pointer to in or out value
  *              handler_context     - Pointer to Handler's context
  *              region_context      - Pointer to context specific to the
  *                                    accessed region
@@ -329,10 +329,10 @@ acpi_ex_system_io_space_handler(u32 function,
  *
  * FUNCTION:    acpi_ex_pci_config_space_handler
  *
- * PARAMETERS:  Function            - Read or Write operation
- *              Address             - Where in the space to read or write
+ * PARAMETERS:  function            - Read or Write operation
+ *              address             - Where in the space to read or write
  *              bit_width           - Field width in bits (8, 16, or 32)
- *              Value               - Pointer to in or out value
+ *              value               - Pointer to in or out value
  *              handler_context     - Pointer to Handler's context
  *              region_context      - Pointer to context specific to the
  *                                    accessed region
@@ -365,7 +365,7 @@ acpi_ex_pci_config_space_handler(u32 function,
         *  pci_function is the PCI device function number
         *  pci_register is the Config space register range 0-255 bytes
         *
-        *  Value - input value for write, output address for read
+        *  value - input value for write, output address for read
         *
         */
        pci_id = (struct acpi_pci_id *)region_context;
@@ -402,10 +402,10 @@ acpi_ex_pci_config_space_handler(u32 function,
  *
  * FUNCTION:    acpi_ex_cmos_space_handler
  *
- * PARAMETERS:  Function            - Read or Write operation
- *              Address             - Where in the space to read or write
+ * PARAMETERS:  function            - Read or Write operation
+ *              address             - Where in the space to read or write
  *              bit_width           - Field width in bits (8, 16, or 32)
- *              Value               - Pointer to in or out value
+ *              value               - Pointer to in or out value
  *              handler_context     - Pointer to Handler's context
  *              region_context      - Pointer to context specific to the
  *                                    accessed region
@@ -434,10 +434,10 @@ acpi_ex_cmos_space_handler(u32 function,
  *
  * FUNCTION:    acpi_ex_pci_bar_space_handler
  *
- * PARAMETERS:  Function            - Read or Write operation
- *              Address             - Where in the space to read or write
+ * PARAMETERS:  function            - Read or Write operation
+ *              address             - Where in the space to read or write
  *              bit_width           - Field width in bits (8, 16, or 32)
- *              Value               - Pointer to in or out value
+ *              value               - Pointer to in or out value
  *              handler_context     - Pointer to Handler's context
  *              region_context      - Pointer to context specific to the
  *                                    accessed region
@@ -466,10 +466,10 @@ acpi_ex_pci_bar_space_handler(u32 function,
  *
  * FUNCTION:    acpi_ex_data_table_space_handler
  *
- * PARAMETERS:  Function            - Read or Write operation
- *              Address             - Where in the space to read or write
+ * PARAMETERS:  function            - Read or Write operation
+ *              address             - Where in the space to read or write
  *              bit_width           - Field width in bits (8, 16, or 32)
- *              Value               - Pointer to in or out value
+ *              value               - Pointer to in or out value
  *              handler_context     - Pointer to Handler's context
  *              region_context      - Pointer to context specific to the
  *                                    accessed region
index 6e335dc345285a1bd0523b82c1a3cb3406261b1c..bbf40ac27585121db4a9f19d54feaa979664c379 100644 (file)
@@ -147,7 +147,7 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
 
        stack_desc = *stack_ptr;
 
-       /* This is a union acpi_operand_object    */
+       /* This is an object of type union acpi_operand_object */
 
        switch (stack_desc->common.type) {
        case ACPI_TYPE_LOCAL_REFERENCE:
@@ -321,7 +321,7 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
  * FUNCTION:    acpi_ex_resolve_multiple
  *
  * PARAMETERS:  walk_state          - Current state (contains AML opcode)
- *              Operand             - Starting point for resolution
+ *              operand             - Starting point for resolution
  *              return_type         - Where the object type is returned
  *              return_desc         - Where the resolved object is returned
  *
index a67b1d925dddca807fe3525dd5e2dee4826a53ac..f232fbabdea805a3f7817cbb631a4df8db5a643e 100644 (file)
@@ -113,7 +113,7 @@ acpi_ex_check_object_type(acpi_object_type type_needed,
  *
  * FUNCTION:    acpi_ex_resolve_operands
  *
- * PARAMETERS:  Opcode              - Opcode being interpreted
+ * PARAMETERS:  opcode              - Opcode being interpreted
  *              stack_ptr           - Pointer to the operand stack to be
  *                                    resolved
  *              walk_state          - Current state
@@ -307,7 +307,7 @@ acpi_ex_resolve_operands(u16 opcode,
                case ARGI_DEVICE_REF:
                case ARGI_TARGETREF:    /* Allows implicit conversion rules before store */
                case ARGI_FIXED_TARGET: /* No implicit conversion before store to target */
-               case ARGI_SIMPLE_TARGET:        /* Name, Local, or Arg - no implicit conversion  */
+               case ARGI_SIMPLE_TARGET:        /* Name, Local, or arg - no implicit conversion  */
 
                        /*
                         * Need an operand of type ACPI_TYPE_LOCAL_REFERENCE
@@ -410,7 +410,7 @@ acpi_ex_resolve_operands(u16 opcode,
                        /*
                         * Need an operand of type ACPI_TYPE_INTEGER,
                         * But we can implicitly convert from a STRING or BUFFER
-                        * Aka - "Implicit Source Operand Conversion"
+                        * aka - "Implicit Source Operand Conversion"
                         */
                        status =
                            acpi_ex_convert_to_integer(obj_desc, stack_ptr, 16);
@@ -437,7 +437,7 @@ acpi_ex_resolve_operands(u16 opcode,
                        /*
                         * Need an operand of type ACPI_TYPE_BUFFER,
                         * But we can implicitly convert from a STRING or INTEGER
-                        * Aka - "Implicit Source Operand Conversion"
+                        * aka - "Implicit Source Operand Conversion"
                         */
                        status = acpi_ex_convert_to_buffer(obj_desc, stack_ptr);
                        if (ACPI_FAILURE(status)) {
@@ -463,7 +463,7 @@ acpi_ex_resolve_operands(u16 opcode,
                        /*
                         * Need an operand of type ACPI_TYPE_STRING,
                         * But we can implicitly convert from a BUFFER or INTEGER
-                        * Aka - "Implicit Source Operand Conversion"
+                        * aka - "Implicit Source Operand Conversion"
                         */
                        status = acpi_ex_convert_to_string(obj_desc, stack_ptr,
                                                           ACPI_IMPLICIT_CONVERT_HEX);
index c6cf843cc4c990492441b61fad0839bd6e89efd6..5fffe7ab5ece75c2849e3c42b7fc5aa14320bb4e 100644 (file)
@@ -62,8 +62,8 @@ acpi_ex_store_object_to_index(union acpi_operand_object *val_desc,
  * FUNCTION:    acpi_ex_store
  *
  * PARAMETERS:  *source_desc        - Value to be stored
- *              *dest_desc          - Where to store it.  Must be an NS node
- *                                    or union acpi_operand_object of type
+ *              *dest_desc          - Where to store it. Must be an NS node
+ *                                    or union acpi_operand_object of type
  *                                    Reference;
  *              walk_state          - Current walk state
  *
@@ -361,7 +361,7 @@ acpi_ex_store_object_to_index(union acpi_operand_object *source_desc,
  * FUNCTION:    acpi_ex_store_object_to_node
  *
  * PARAMETERS:  source_desc             - Value to be stored
- *              Node                    - Named object to receive the value
+ *              node                    - Named object to receive the value
  *              walk_state              - Current walk state
  *              implicit_conversion     - Perform implicit conversion (yes/no)
  *
index 65a45d8335c8418882c8dff45b69966406270c06..53c248473547b1ff5ce2d0b2bbd448b6dd7b1607 100644 (file)
@@ -110,7 +110,7 @@ acpi_ex_store_buffer_to_buffer(union acpi_operand_object *source_desc,
                 * NOTE: ACPI versions up to 3.0 specified that the buffer must be
                 * truncated if the string is smaller than the buffer.  However, "other"
                 * implementations of ACPI never did this and thus became the defacto
-                * standard. ACPI 3.0_a changes this behavior such that the buffer
+                * standard. ACPI 3.0A changes this behavior such that the buffer
                 * is no longer truncated.
                 */
 
index 191a129452263e858e35e27551f291156ebda12a..b760641e2fc6684587255da64ada401c4914b193 100644 (file)
@@ -53,8 +53,8 @@ ACPI_MODULE_NAME("exsystem")
  *
  * FUNCTION:    acpi_ex_system_wait_semaphore
  *
- * PARAMETERS:  Semaphore       - Semaphore to wait on
- *              Timeout         - Max time to wait
+ * PARAMETERS:  semaphore       - Semaphore to wait on
+ *              timeout         - Max time to wait
  *
  * RETURN:      Status
  *
@@ -98,8 +98,8 @@ acpi_status acpi_ex_system_wait_semaphore(acpi_semaphore semaphore, u16 timeout)
  *
  * FUNCTION:    acpi_ex_system_wait_mutex
  *
- * PARAMETERS:  Mutex           - Mutex to wait on
- *              Timeout         - Max time to wait
+ * PARAMETERS:  mutex           - Mutex to wait on
+ *              timeout         - Max time to wait
  *
  * RETURN:      Status
  *
index eb6798ba8b59b092f88019d96d46a04b8637c27f..d1ab7917eed7dfbab2e4670d6451a086c7e1c71d 100644 (file)
@@ -109,7 +109,7 @@ void acpi_ex_enter_interpreter(void)
  *
  * DESCRIPTION: Reacquire the interpreter execution region from within the
  *              interpreter code. Failure to enter the interpreter region is a
- *              fatal system error. Used in  conjunction with
+ *              fatal system error. Used in conjunction with
  *              relinquish_interpreter
  *
  ******************************************************************************/
@@ -317,8 +317,8 @@ void acpi_ex_release_global_lock(u32 field_flags)
  *
  * FUNCTION:    acpi_ex_digits_needed
  *
- * PARAMETERS:  Value           - Value to be represented
- *              Base            - Base of representation
+ * PARAMETERS:  value           - Value to be represented
+ *              base            - Base of representation
  *
  * RETURN:      The number of digits.
  *
@@ -408,7 +408,7 @@ void acpi_ex_eisa_id_to_string(char *out_string, u64 compressed_id)
  * PARAMETERS:  out_string      - Where to put the converted string. At least
  *                                21 bytes are needed to hold the largest
  *                                possible 64-bit integer.
- *              Value           - Value to be converted
+ *              value           - Value to be converted
  *
  * RETURN:      None, string
  *
@@ -443,7 +443,7 @@ void acpi_ex_integer_to_string(char *out_string, u64 value)
  *
  * RETURN:      TRUE if valid/supported ID.
  *
- * DESCRIPTION: Validate an operation region space_iD.
+ * DESCRIPTION: Validate an operation region space_ID.
  *
  ******************************************************************************/
 
index d0b9ed5df97e29ce83323a7cb6e56d126c5614a2..a1e71d0ef57b163d2f2df9adf3638a14e4db23d5 100644 (file)
@@ -53,7 +53,7 @@ ACPI_MODULE_NAME("hwacpi")
  *
  * FUNCTION:    acpi_hw_set_mode
  *
- * PARAMETERS:  Mode            - SYS_MODE_ACPI or SYS_MODE_LEGACY
+ * PARAMETERS:  mode            - SYS_MODE_ACPI or SYS_MODE_LEGACY
  *
  * RETURN:      Status
  *
index 29e859293eddcb930753a6a22927d63412262c7b..48518dac534230a8f59ee7bbfe1e36475396bb19 100644 (file)
@@ -90,7 +90,7 @@ void acpi_hw_execute_sleep_method(char *method_pathname, u32 integer_argument)
  * FUNCTION:    acpi_hw_extended_sleep
  *
  * PARAMETERS:  sleep_state         - Which sleep state to enter
- *              Flags               - ACPI_EXECUTE_GTS to run optional method
+ *              flags               - ACPI_EXECUTE_GTS to run optional method
  *
  * RETURN:      Status
  *
@@ -117,7 +117,8 @@ acpi_status acpi_hw_extended_sleep(u8 sleep_state, u8 flags)
 
        /* Clear wake status (WAK_STS) */
 
-       status = acpi_write(ACPI_X_WAKE_STATUS, &acpi_gbl_FADT.sleep_status);
+       status =
+           acpi_write((u64)ACPI_X_WAKE_STATUS, &acpi_gbl_FADT.sleep_status);
        if (ACPI_FAILURE(status)) {
                return_ACPI_STATUS(status);
        }
@@ -147,7 +148,7 @@ acpi_status acpi_hw_extended_sleep(u8 sleep_state, u8 flags)
            ((acpi_gbl_sleep_type_a << ACPI_X_SLEEP_TYPE_POSITION) &
             ACPI_X_SLEEP_TYPE_MASK);
 
-       status = acpi_write((sleep_type_value | ACPI_X_SLEEP_ENABLE),
+       status = acpi_write((u64)(sleep_type_value | ACPI_X_SLEEP_ENABLE),
                            &acpi_gbl_FADT.sleep_control);
        if (ACPI_FAILURE(status)) {
                return_ACPI_STATUS(status);
@@ -171,7 +172,7 @@ acpi_status acpi_hw_extended_sleep(u8 sleep_state, u8 flags)
  * FUNCTION:    acpi_hw_extended_wake_prep
  *
  * PARAMETERS:  sleep_state         - Which sleep state we just exited
- *              Flags               - ACPI_EXECUTE_BFS to run optional method
+ *              flags               - ACPI_EXECUTE_BFS to run optional method
  *
  * RETURN:      Status
  *
@@ -195,7 +196,7 @@ acpi_status acpi_hw_extended_wake_prep(u8 sleep_state, u8 flags)
                    ((acpi_gbl_sleep_type_a << ACPI_X_SLEEP_TYPE_POSITION) &
                     ACPI_X_SLEEP_TYPE_MASK);
 
-               (void)acpi_write((sleep_type_value | ACPI_X_SLEEP_ENABLE),
+               (void)acpi_write((u64)(sleep_type_value | ACPI_X_SLEEP_ENABLE),
                                 &acpi_gbl_FADT.sleep_control);
        }
 
@@ -212,7 +213,7 @@ acpi_status acpi_hw_extended_wake_prep(u8 sleep_state, u8 flags)
  * FUNCTION:    acpi_hw_extended_wake
  *
  * PARAMETERS:  sleep_state         - Which sleep state we just exited
- *              Flags               - Reserved, set to zero
+ *              flags               - Reserved, set to zero
  *
  * RETURN:      Status
  *
@@ -239,7 +240,7 @@ acpi_status acpi_hw_extended_wake(u8 sleep_state, u8 flags)
         * and use it to determine whether the system is rebooting or
         * resuming. Clear WAK_STS for compatibility.
         */
-       (void)acpi_write(ACPI_X_WAKE_STATUS, &acpi_gbl_FADT.sleep_status);
+       (void)acpi_write((u64)ACPI_X_WAKE_STATUS, &acpi_gbl_FADT.sleep_status);
        acpi_gbl_system_awake_and_running = TRUE;
 
        acpi_hw_execute_sleep_method(METHOD_PATHNAME__SST, ACPI_SST_WORKING);
index 6b6c83b87b5215859e618f3bd85721438ed62bd8..4af6d20ef077227b74d4d89c7ab420b0733be547 100644 (file)
@@ -69,9 +69,9 @@ acpi_hw_write_multiple(u32 value,
  *
  * FUNCTION:    acpi_hw_validate_register
  *
- * PARAMETERS:  Reg                 - GAS register structure
+ * PARAMETERS:  reg                 - GAS register structure
  *              max_bit_width       - Max bit_width supported (32 or 64)
- *              Address             - Pointer to where the gas->address
+ *              address             - Pointer to where the gas->address
  *                                    is returned
  *
  * RETURN:      Status
@@ -102,7 +102,7 @@ acpi_hw_validate_register(struct acpi_generic_address *reg,
                return (AE_BAD_ADDRESS);
        }
 
-       /* Validate the space_iD */
+       /* Validate the space_ID */
 
        if ((reg->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) &&
            (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO)) {
@@ -137,8 +137,8 @@ acpi_hw_validate_register(struct acpi_generic_address *reg,
  *
  * FUNCTION:    acpi_hw_read
  *
- * PARAMETERS:  Value               - Where the value is returned
- *              Reg                 - GAS register structure
+ * PARAMETERS:  value               - Where the value is returned
+ *              reg                 - GAS register structure
  *
  * RETURN:      Status
  *
@@ -148,7 +148,7 @@ acpi_hw_validate_register(struct acpi_generic_address *reg,
  *
  * LIMITATIONS: <These limitations also apply to acpi_hw_write>
  *      bit_width must be exactly 8, 16, or 32.
- *      space_iD must be system_memory or system_iO.
+ *      space_ID must be system_memory or system_IO.
  *      bit_offset and access_width are currently ignored, as there has
  *          not been a need to implement these.
  *
@@ -200,8 +200,8 @@ acpi_status acpi_hw_read(u32 *value, struct acpi_generic_address *reg)
  *
  * FUNCTION:    acpi_hw_write
  *
- * PARAMETERS:  Value               - Value to be written
- *              Reg                 - GAS register structure
+ * PARAMETERS:  value               - Value to be written
+ *              reg                 - GAS register structure
  *
  * RETURN:      Status
  *
@@ -439,7 +439,7 @@ acpi_hw_register_read(u32 register_id, u32 * return_value)
  * FUNCTION:    acpi_hw_register_write
  *
  * PARAMETERS:  register_id         - ACPI Register ID
- *              Value               - The value to write
+ *              value               - The value to write
  *
  * RETURN:      Status
  *
@@ -571,7 +571,7 @@ acpi_status acpi_hw_register_write(u32 register_id, u32 value)
  *
  * FUNCTION:    acpi_hw_read_multiple
  *
- * PARAMETERS:  Value               - Where the register value is returned
+ * PARAMETERS:  value               - Where the register value is returned
  *              register_a           - First ACPI register (required)
  *              register_b           - Second ACPI register (optional)
  *
@@ -624,7 +624,7 @@ acpi_hw_read_multiple(u32 *value,
  *
  * FUNCTION:    acpi_hw_write_multiple
  *
- * PARAMETERS:  Value               - The value to write
+ * PARAMETERS:  value               - The value to write
  *              register_a           - First ACPI register (required)
  *              register_b           - Second ACPI register (optional)
  *
index 0ed85cac32314f956de3092f7087e3ab011b5ba7..9960fe9ef5334f719ed4e5cbd8cb2cbef530a2aa 100644 (file)
@@ -56,7 +56,7 @@ ACPI_MODULE_NAME("hwsleep")
  * FUNCTION:    acpi_hw_legacy_sleep
  *
  * PARAMETERS:  sleep_state         - Which sleep state to enter
- *              Flags               - ACPI_EXECUTE_GTS to run optional method
+ *              flags               - ACPI_EXECUTE_GTS to run optional method
  *
  * RETURN:      Status
  *
@@ -95,18 +95,6 @@ acpi_status acpi_hw_legacy_sleep(u8 sleep_state, u8 flags)
                return_ACPI_STATUS(status);
        }
 
-       if (sleep_state != ACPI_STATE_S5) {
-               /*
-                * Disable BM arbitration. This feature is contained within an
-                * optional register (PM2 Control), so ignore a BAD_ADDRESS
-                * exception.
-                */
-               status = acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
-               if (ACPI_FAILURE(status) && (status != AE_BAD_ADDRESS)) {
-                       return_ACPI_STATUS(status);
-               }
-       }
-
        /*
         * 1) Disable/Clear all GPEs
         * 2) Enable all wakeup GPEs
@@ -226,7 +214,7 @@ acpi_status acpi_hw_legacy_sleep(u8 sleep_state, u8 flags)
  * FUNCTION:    acpi_hw_legacy_wake_prep
  *
  * PARAMETERS:  sleep_state         - Which sleep state we just exited
- *              Flags               - ACPI_EXECUTE_BFS to run optional method
+ *              flags               - ACPI_EXECUTE_BFS to run optional method
  *
  * RETURN:      Status
  *
@@ -300,7 +288,7 @@ acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state, u8 flags)
  * FUNCTION:    acpi_hw_legacy_wake
  *
  * PARAMETERS:  sleep_state         - Which sleep state we just exited
- *              Flags               - Reserved, set to zero
+ *              flags               - Reserved, set to zero
  *
  * RETURN:      Status
  *
@@ -364,16 +352,6 @@ acpi_status acpi_hw_legacy_wake(u8 sleep_state, u8 flags)
                                    [ACPI_EVENT_POWER_BUTTON].
                                    status_register_id, ACPI_CLEAR_STATUS);
 
-       /*
-        * Enable BM arbitration. This feature is contained within an
-        * optional register (PM2 Control), so ignore a BAD_ADDRESS
-        * exception.
-        */
-       status = acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
-       if (ACPI_FAILURE(status) && (status != AE_BAD_ADDRESS)) {
-               return_ACPI_STATUS(status);
-       }
-
        acpi_hw_execute_sleep_method(METHOD_PATHNAME__SST, ACPI_SST_WORKING);
        return_ACPI_STATUS(status);
 }
index f1b2c3b94cac0e004669bb4d647497704f224e8e..b6411f16832f518fb05ab6200dd2380810c149ff 100644 (file)
@@ -54,7 +54,7 @@ ACPI_MODULE_NAME("hwtimer")
  *
  * FUNCTION:    acpi_get_timer_resolution
  *
- * PARAMETERS:  Resolution          - Where the resolution is returned
+ * PARAMETERS:  resolution          - Where the resolution is returned
  *
  * RETURN:      Status and timer resolution
  *
@@ -84,7 +84,7 @@ ACPI_EXPORT_SYMBOL(acpi_get_timer_resolution)
  *
  * FUNCTION:    acpi_get_timer
  *
- * PARAMETERS:  Ticks               - Where the timer value is returned
+ * PARAMETERS:  ticks               - Where the timer value is returned
  *
  * RETURN:      Status and current timer value (ticks)
  *
index 6e5c43a60bb723a54d7cc15543a11a89b8bcd056..c99d546b217fb4b78622e273f1269586b0cde438 100644 (file)
@@ -58,7 +58,7 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width);
  *
  * The table is used to implement the Microsoft port access rules that
  * first appeared in Windows XP. Some ports are always illegal, and some
- * ports are only illegal if the BIOS calls _OSI with a win_xP string or
+ * ports are only illegal if the BIOS calls _OSI with a win_XP string or
  * later (meaning that the BIOS itelf is post-XP.)
  *
  * This provides ACPICA with the desired port protections and
@@ -66,7 +66,7 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width);
  *
  * Description of port entries:
  *  DMA:   DMA controller
- *  PIC0:  Programmable Interrupt Controller (8259_a)
+ *  PIC0:  Programmable Interrupt Controller (8259A)
  *  PIT1:  System Timer 1
  *  PIT2:  System Timer 2 failsafe
  *  RTC:   Real-time clock
index a716fede4f25781d8e4e0633c21626ab03c83719..7bfd649d1996e6945d2d21787858ca9d0dcbc0e0 100644 (file)
@@ -104,8 +104,8 @@ ACPI_EXPORT_SYMBOL(acpi_reset)
  *
  * FUNCTION:    acpi_read
  *
- * PARAMETERS:  Value               - Where the value is returned
- *              Reg                 - GAS register structure
+ * PARAMETERS:  value               - Where the value is returned
+ *              reg                 - GAS register structure
  *
  * RETURN:      Status
  *
@@ -113,7 +113,7 @@ ACPI_EXPORT_SYMBOL(acpi_reset)
  *
  * LIMITATIONS: <These limitations also apply to acpi_write>
  *      bit_width must be exactly 8, 16, 32, or 64.
- *      space_iD must be system_memory or system_iO.
+ *      space_ID must be system_memory or system_IO.
  *      bit_offset and access_width are currently ignored, as there has
  *          not been a need to implement these.
  *
@@ -196,8 +196,8 @@ ACPI_EXPORT_SYMBOL(acpi_read)
  *
  * FUNCTION:    acpi_write
  *
- * PARAMETERS:  Value               - Value to be written
- *              Reg                 - GAS register structure
+ * PARAMETERS:  value               - Value to be written
+ *              reg                 - GAS register structure
  *
  * RETURN:      Status
  *
@@ -441,7 +441,7 @@ ACPI_EXPORT_SYMBOL(acpi_write_bit_register)
  *              *sleep_type_a        - Where SLP_TYPa is returned
  *              *sleep_type_b        - Where SLP_TYPb is returned
  *
- * RETURN:      Status - ACPI status
+ * RETURN:      status - ACPI status
  *
  * DESCRIPTION: Obtain the SLP_TYPa and SLP_TYPb values for the requested sleep
  *              state.
index 762d059bb508af7bd05f14829beea233e039bfd8..f8684bfe79079ba170c727b96d7ea404a91ee604 100644 (file)
@@ -205,7 +205,7 @@ acpi_status asmlinkage acpi_enter_sleep_state_s4bios(void)
        ACPI_FLUSH_CPU_CACHE();
 
        status = acpi_hw_write_port(acpi_gbl_FADT.smi_command,
-                                   (u32)acpi_gbl_FADT.S4bios_request, 8);
+                                   (u32)acpi_gbl_FADT.s4_bios_request, 8);
 
        do {
                acpi_os_stall(1000);
@@ -349,7 +349,7 @@ ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_prep)
  * FUNCTION:    acpi_enter_sleep_state
  *
  * PARAMETERS:  sleep_state         - Which sleep state to enter
- *              Flags               - ACPI_EXECUTE_GTS to run optional method
+ *              flags               - ACPI_EXECUTE_GTS to run optional method
  *
  * RETURN:      Status
  *
@@ -382,7 +382,7 @@ ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state)
  * FUNCTION:    acpi_leave_sleep_state_prep
  *
  * PARAMETERS:  sleep_state         - Which sleep state we are exiting
- *              Flags               - ACPI_EXECUTE_BFS to run optional method
+ *              flags               - ACPI_EXECUTE_BFS to run optional method
  *
  * RETURN:      Status
  *
index 61623f3f6826ccdb6286de6e61e4611be4f6675a..23db53ce2293fe8a5f02d89d28e797c4bc491429 100644 (file)
@@ -157,7 +157,7 @@ acpi_status acpi_ns_root_initialize(void)
 
 #if defined (ACPI_ASL_COMPILER)
 
-                               /* Save the parameter count for the i_aSL compiler */
+                               /* Save the parameter count for the iASL compiler */
 
                                new_node->value = obj_desc->method.param_count;
 #else
@@ -258,11 +258,11 @@ acpi_status acpi_ns_root_initialize(void)
  * FUNCTION:    acpi_ns_lookup
  *
  * PARAMETERS:  scope_info      - Current scope info block
- *              Pathname        - Search pathname, in internal format
+ *              pathname        - Search pathname, in internal format
  *                                (as represented in the AML stream)
- *              Type            - Type associated with name
+ *              type            - Type associated with name
  *              interpreter_mode - IMODE_LOAD_PASS2 => add name if not found
- *              Flags           - Flags describing the search restrictions
+ *              flags           - Flags describing the search restrictions
  *              walk_state      - Current state of the walk
  *              return_node     - Where the Node is placed (if found
  *                                or created successfully)
index 7c3d3ceb98b37798dedaaf059ff50ffedb6a4c8b..ac389e5bb594f00a7c24b6386bb7393889b89841 100644 (file)
@@ -52,7 +52,7 @@ ACPI_MODULE_NAME("nsalloc")
  *
  * FUNCTION:    acpi_ns_create_node
  *
- * PARAMETERS:  Name            - Name of the new node (4 char ACPI name)
+ * PARAMETERS:  name            - Name of the new node (4 char ACPI name)
  *
  * RETURN:      New namespace node (Null on failure)
  *
@@ -92,7 +92,7 @@ struct acpi_namespace_node *acpi_ns_create_node(u32 name)
  *
  * FUNCTION:    acpi_ns_delete_node
  *
- * PARAMETERS:  Node            - Node to be deleted
+ * PARAMETERS:  node            - Node to be deleted
  *
  * RETURN:      None
  *
@@ -143,7 +143,7 @@ void acpi_ns_delete_node(struct acpi_namespace_node *node)
  *
  * FUNCTION:    acpi_ns_remove_node
  *
- * PARAMETERS:  Node            - Node to be removed/deleted
+ * PARAMETERS:  node            - Node to be removed/deleted
  *
  * RETURN:      None
  *
@@ -196,8 +196,8 @@ void acpi_ns_remove_node(struct acpi_namespace_node *node)
  *
  * PARAMETERS:  walk_state      - Current state of the walk
  *              parent_node     - The parent of the new Node
- *              Node            - The new Node to install
- *              Type            - ACPI object type of the new Node
+ *              node            - The new Node to install
+ *              type            - ACPI object type of the new Node
  *
  * RETURN:      None
  *
index 3f7f3f6e7dd5b9ea711f0e31083cb19821b220c6..7ee4e6aeb0a2da82d219a7fdac501efb83338daa 100644 (file)
@@ -63,7 +63,7 @@ acpi_ns_dump_one_device(acpi_handle obj_handle,
  * FUNCTION:    acpi_ns_print_pathname
  *
  * PARAMETERS:  num_segments        - Number of ACPI name segments
- *              Pathname            - The compressed (internal) path
+ *              pathname            - The compressed (internal) path
  *
  * RETURN:      None
  *
@@ -107,10 +107,10 @@ void acpi_ns_print_pathname(u32 num_segments, char *pathname)
  *
  * FUNCTION:    acpi_ns_dump_pathname
  *
- * PARAMETERS:  Handle              - Object
- *              Msg                 - Prefix message
- *              Level               - Desired debug level
- *              Component           - Caller's component ID
+ * PARAMETERS:  handle              - Object
+ *              msg                 - Prefix message
+ *              level               - Desired debug level
+ *              component           - Caller's component ID
  *
  * RETURN:      None
  *
@@ -143,8 +143,8 @@ acpi_ns_dump_pathname(acpi_handle handle, char *msg, u32 level, u32 component)
  * FUNCTION:    acpi_ns_dump_one_object
  *
  * PARAMETERS:  obj_handle          - Node to be dumped
- *              Level               - Nesting level of the handle
- *              Context             - Passed into walk_namespace
+ *              level               - Nesting level of the handle
+ *              context             - Passed into walk_namespace
  *              return_value        - Not used
  *
  * RETURN:      Status
@@ -615,7 +615,7 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
  *
  * FUNCTION:    acpi_ns_dump_objects
  *
- * PARAMETERS:  Type                - Object type to be dumped
+ * PARAMETERS:  type                - Object type to be dumped
  *              display_type        - 0 or ACPI_DISPLAY_SUMMARY
  *              max_depth           - Maximum depth of dump. Use ACPI_UINT32_MAX
  *                                    for an effectively unlimited depth.
@@ -671,7 +671,7 @@ acpi_ns_dump_objects(acpi_object_type type,
  *
  * FUNCTION:    acpi_ns_dump_entry
  *
- * PARAMETERS:  Handle              - Node to be dumped
+ * PARAMETERS:  handle              - Node to be dumped
  *              debug_level         - Output level
  *
  * RETURN:      None
index 3b5acb0eb40648c4bbcacfca684ccd31a8e4ea51..944d4c8d94386747337588938821035dfca79909 100644 (file)
@@ -55,9 +55,9 @@ ACPI_MODULE_NAME("nsdumpdv")
  *
  * FUNCTION:    acpi_ns_dump_one_device
  *
- * PARAMETERS:  Handle              - Node to be dumped
- *              Level               - Nesting level of the handle
- *              Context             - Passed into walk_namespace
+ * PARAMETERS:  handle              - Node to be dumped
+ *              level               - Nesting level of the handle
+ *              context             - Passed into walk_namespace
  *              return_value        - Not used
  *
  * RETURN:      Status
index f375cb82e321b7958932be9e4c5bc00e65e3e30c..69074be498e886a50ed75c1fa82d9bcb0f96e7f0 100644 (file)
@@ -59,11 +59,11 @@ acpi_ns_exec_module_code(union acpi_operand_object *method_obj,
  *
  * FUNCTION:    acpi_ns_evaluate
  *
- * PARAMETERS:  Info            - Evaluation info block, contains:
+ * PARAMETERS:  info            - Evaluation info block, contains:
  *                  prefix_node     - Prefix or Method/Object Node to execute
- *                  Pathname        - Name of method to execute, If NULL, the
+ *                  pathname        - Name of method to execute, If NULL, the
  *                                    Node is the object to execute
- *                  Parameters      - List of parameters to pass to the method,
+ *                  parameters      - List of parameters to pass to the method,
  *                                    terminated by NULL. Params itself may be
  *                                    NULL if no parameters are being passed.
  *                  return_object   - Where to put method's return value (if
@@ -71,7 +71,7 @@ acpi_ns_exec_module_code(union acpi_operand_object *method_obj,
  *                  parameter_type  - Type of Parameter list
  *                  return_object   - Where to put method's return value (if
  *                                    any). If NULL, no value is returned.
- *                  Flags           - ACPI_IGNORE_RETURN_VALUE to delete return
+ *                  flags           - ACPI_IGNORE_RETURN_VALUE to delete return
  *
  * RETURN:      Status
  *
@@ -351,7 +351,7 @@ void acpi_ns_exec_module_code_list(void)
  * FUNCTION:    acpi_ns_exec_module_code
  *
  * PARAMETERS:  method_obj          - Object container for the module-level code
- *              Info                - Info block for method evaluation
+ *              info                - Info block for method evaluation
  *
  * RETURN:      None. Exceptions during method execution are ignored, since
  *              we cannot abort a table load.
index 9d84ec2f0211bde65d30d10b6a79b5e0fd313fd0..95ffe8dfa1f18feffeea9eb091ff3a44aa4c0ebc 100644 (file)
@@ -224,8 +224,8 @@ acpi_status acpi_ns_initialize_devices(void)
  * FUNCTION:    acpi_ns_init_one_object
  *
  * PARAMETERS:  obj_handle      - Node
- *              Level           - Current nesting level
- *              Context         - Points to a init info struct
+ *              level           - Current nesting level
+ *              context         - Points to a init info struct
  *              return_value    - Not used
  *
  * RETURN:      Status
@@ -530,7 +530,7 @@ acpi_ns_init_one_device(acpi_handle obj_handle,
                         * we will not run _INI, but we continue to examine the children
                         * of this device.
                         *
-                        * From the ACPI spec, description of _STA: (Note - no mention
+                        * From the ACPI spec, description of _STA: (note - no mention
                         * of whether to run _INI or not on the device in question)
                         *
                         * "_STA may return bit 0 clear (not present) with bit 3 set
index 5cbf15ffe7d8ed38a38782b26f10114374b109cb..76935ff29289c431d6a291ca0b2dd2db73ae75d4 100644 (file)
@@ -63,7 +63,7 @@ static acpi_status acpi_ns_delete_subtree(acpi_handle start_handle);
  * FUNCTION:    acpi_ns_load_table
  *
  * PARAMETERS:  table_index     - Index for table to be loaded
- *              Node            - Owning NS node
+ *              node            - Owning NS node
  *
  * RETURN:      Status
  *
@@ -278,7 +278,7 @@ static acpi_status acpi_ns_delete_subtree(acpi_handle start_handle)
  *
  *  FUNCTION:       acpi_ns_unload_name_space
  *
- *  PARAMETERS:     Handle          - Root of namespace subtree to be deleted
+ *  PARAMETERS:     handle          - Root of namespace subtree to be deleted
  *
  *  RETURN:         Status
  *
index b20e7c8c3ffbcc7414689f28c242ec8c266a1a21..96e0eb609bb465cc9f1b87f6404e8a44fcb1ec0d 100644 (file)
@@ -53,8 +53,8 @@ ACPI_MODULE_NAME("nsnames")
  *
  * FUNCTION:    acpi_ns_build_external_path
  *
- * PARAMETERS:  Node            - NS node whose pathname is needed
- *              Size            - Size of the pathname
+ * PARAMETERS:  node            - NS node whose pathname is needed
+ *              size            - Size of the pathname
  *              *name_buffer    - Where to return the pathname
  *
  * RETURN:      Status
@@ -120,7 +120,7 @@ acpi_ns_build_external_path(struct acpi_namespace_node *node,
  *
  * FUNCTION:    acpi_ns_get_external_pathname
  *
- * PARAMETERS:  Node            - Namespace node whose pathname is needed
+ * PARAMETERS:  node            - Namespace node whose pathname is needed
  *
  * RETURN:      Pointer to storage containing the fully qualified name of
  *              the node, In external format (name segments separated by path
@@ -168,7 +168,7 @@ char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node)
  *
  * FUNCTION:    acpi_ns_get_pathname_length
  *
- * PARAMETERS:  Node        - Namespace node
+ * PARAMETERS:  node        - Namespace node
  *
  * RETURN:      Length of path, including prefix
  *
@@ -214,7 +214,7 @@ acpi_size acpi_ns_get_pathname_length(struct acpi_namespace_node *node)
  *
  * PARAMETERS:  target_handle           - Handle of named object whose name is
  *                                        to be found
- *              Buffer                  - Where the pathname is returned
+ *              buffer                  - Where the pathname is returned
  *
  * RETURN:      Status, Buffer is filled with pathname if status is AE_OK
  *
index dd77a3ce6e508eb6bb6fa74d3be1a45e268d59f1..d6c9a3cc6716e87c07bf39a7ad58f50c331dd3cb 100644 (file)
@@ -53,9 +53,9 @@ ACPI_MODULE_NAME("nsobject")
  *
  * FUNCTION:    acpi_ns_attach_object
  *
- * PARAMETERS:  Node                - Parent Node
- *              Object              - Object to be attached
- *              Type                - Type of object, or ACPI_TYPE_ANY if not
+ * PARAMETERS:  node                - Parent Node
+ *              object              - Object to be attached
+ *              type                - Type of object, or ACPI_TYPE_ANY if not
  *                                    known
  *
  * RETURN:      Status
@@ -191,7 +191,7 @@ acpi_ns_attach_object(struct acpi_namespace_node *node,
  *
  * FUNCTION:    acpi_ns_detach_object
  *
- * PARAMETERS:  Node           - A Namespace node whose object will be detached
+ * PARAMETERS:  node           - A Namespace node whose object will be detached
  *
  * RETURN:      None.
  *
@@ -250,7 +250,7 @@ void acpi_ns_detach_object(struct acpi_namespace_node *node)
  *
  * FUNCTION:    acpi_ns_get_attached_object
  *
- * PARAMETERS:  Node             - Namespace node
+ * PARAMETERS:  node             - Namespace node
  *
  * RETURN:      Current value of the object field from the Node whose
  *              handle is passed
@@ -285,7 +285,7 @@ union acpi_operand_object *acpi_ns_get_attached_object(struct
  *
  * FUNCTION:    acpi_ns_get_secondary_object
  *
- * PARAMETERS:  Node             - Namespace node
+ * PARAMETERS:  node             - Namespace node
  *
  * RETURN:      Current value of the object field from the Node whose
  *              handle is passed.
@@ -315,9 +315,9 @@ union acpi_operand_object *acpi_ns_get_secondary_object(union
  *
  * FUNCTION:    acpi_ns_attach_data
  *
- * PARAMETERS:  Node            - Namespace node
- *              Handler         - Handler to be associated with the data
- *              Data            - Data to be attached
+ * PARAMETERS:  node            - Namespace node
+ *              handler         - Handler to be associated with the data
+ *              data            - Data to be attached
  *
  * RETURN:      Status
  *
@@ -372,8 +372,8 @@ acpi_ns_attach_data(struct acpi_namespace_node *node,
  *
  * FUNCTION:    acpi_ns_detach_data
  *
- * PARAMETERS:  Node            - Namespace node
- *              Handler         - Handler associated with the data
+ * PARAMETERS:  node            - Namespace node
+ *              handler         - Handler associated with the data
  *
  * RETURN:      Status
  *
@@ -416,9 +416,9 @@ acpi_ns_detach_data(struct acpi_namespace_node * node,
  *
  * FUNCTION:    acpi_ns_get_attached_data
  *
- * PARAMETERS:  Node            - Namespace node
- *              Handler         - Handler associated with the data
- *              Data            - Where the data is returned
+ * PARAMETERS:  node            - Namespace node
+ *              handler         - Handler associated with the data
+ *              data            - Where the data is returned
  *
  * RETURN:      Status
  *
index 23ce096864186a1cfd6ced9e57fd17d208646004..2419f417ea33220f5ee04212866499a476b6ed51 100644 (file)
@@ -116,7 +116,7 @@ static const char *acpi_rtype_names[] = {
  *
  * FUNCTION:    acpi_ns_check_predefined_names
  *
- * PARAMETERS:  Node            - Namespace node for the method/object
+ * PARAMETERS:  node            - Namespace node for the method/object
  *              user_param_count - Number of parameters actually passed
  *              return_status   - Status from the object evaluation
  *              return_object_ptr - Pointer to the object returned from the
@@ -275,10 +275,10 @@ cleanup:
  *
  * FUNCTION:    acpi_ns_check_parameter_count
  *
- * PARAMETERS:  Pathname        - Full pathname to the node (for error msgs)
- *              Node            - Namespace node for the method/object
+ * PARAMETERS:  pathname        - Full pathname to the node (for error msgs)
+ *              node            - Namespace node for the method/object
  *              user_param_count - Number of args passed in by the caller
- *              Predefined      - Pointer to entry in predefined name table
+ *              predefined      - Pointer to entry in predefined name table
  *
  * RETURN:      None
  *
@@ -364,7 +364,7 @@ acpi_ns_check_parameter_count(char *pathname,
  *
  * FUNCTION:    acpi_ns_check_for_predefined_name
  *
- * PARAMETERS:  Node            - Namespace node for the method/object
+ * PARAMETERS:  node            - Namespace node for the method/object
  *
  * RETURN:      Pointer to entry in predefined table. NULL indicates not found.
  *
@@ -410,7 +410,7 @@ const union acpi_predefined_info *acpi_ns_check_for_predefined_name(struct
  *
  * FUNCTION:    acpi_ns_check_package
  *
- * PARAMETERS:  Data            - Pointer to validation data structure
+ * PARAMETERS:  data            - Pointer to validation data structure
  *              return_object_ptr - Pointer to the object returned from the
  *                                evaluation of a method or object
  *
@@ -638,7 +638,7 @@ acpi_ns_check_package(struct acpi_predefined_data *data,
                        /* Create the new outer package and populate it */
 
                        status =
-                           acpi_ns_wrap_with_package(data, *elements,
+                           acpi_ns_wrap_with_package(data, return_object,
                                                      return_object_ptr);
                        if (ACPI_FAILURE(status)) {
                                return (status);
@@ -685,11 +685,11 @@ package_too_small:
  *
  * FUNCTION:    acpi_ns_check_package_list
  *
- * PARAMETERS:  Data            - Pointer to validation data structure
- *              Package         - Pointer to package-specific info for method
- *              Elements        - Element list of parent package. All elements
+ * PARAMETERS:  data            - Pointer to validation data structure
+ *              package         - Pointer to package-specific info for method
+ *              elements        - Element list of parent package. All elements
  *                                of this list should be of type Package.
- *              Count           - Count of subpackages
+ *              count           - Count of subpackages
  *
  * RETURN:      Status
  *
@@ -911,12 +911,12 @@ package_too_small:
  *
  * FUNCTION:    acpi_ns_check_package_elements
  *
- * PARAMETERS:  Data            - Pointer to validation data structure
- *              Elements        - Pointer to the package elements array
- *              Type1           - Object type for first group
- *              Count1          - Count for first group
- *              Type2           - Object type for second group
- *              Count2          - Count for second group
+ * PARAMETERS:  data            - Pointer to validation data structure
+ *              elements        - Pointer to the package elements array
+ *              type1           - Object type for first group
+ *              count1          - Count for first group
+ *              type2           - Object type for second group
+ *              count2          - Count for second group
  *              start_index     - Start of the first group of elements
  *
  * RETURN:      Status
@@ -968,7 +968,7 @@ acpi_ns_check_package_elements(struct acpi_predefined_data *data,
  *
  * FUNCTION:    acpi_ns_check_object_type
  *
- * PARAMETERS:  Data            - Pointer to validation data structure
+ * PARAMETERS:  data            - Pointer to validation data structure
  *              return_object_ptr - Pointer to the object returned from the
  *                                evaluation of a method or object
  *              expected_btypes - Bitmap of expected return type(s)
@@ -1102,7 +1102,7 @@ acpi_ns_check_object_type(struct acpi_predefined_data *data,
  *
  * FUNCTION:    acpi_ns_check_reference
  *
- * PARAMETERS:  Data            - Pointer to validation data structure
+ * PARAMETERS:  data            - Pointer to validation data structure
  *              return_object   - Object returned from the evaluation of a
  *                                method or object
  *
@@ -1140,7 +1140,7 @@ acpi_ns_check_reference(struct acpi_predefined_data *data,
  *
  * FUNCTION:    acpi_ns_get_expected_types
  *
- * PARAMETERS:  Buffer          - Pointer to where the string is returned
+ * PARAMETERS:  buffer          - Pointer to where the string is returned
  *              expected_btypes - Bitmap of expected return type(s)
  *
  * RETURN:      Buffer is populated with type names.
index 5519a64a353f7a7f2d0f36c36a6011f6f5311efc..8c5f292860fcd02a2cc5936742ac0b6087e026ef 100644 (file)
@@ -94,7 +94,7 @@ acpi_ns_convert_to_buffer(union acpi_operand_object *original_object,
  *
  * FUNCTION:    acpi_ns_repair_object
  *
- * PARAMETERS:  Data                - Pointer to validation data structure
+ * PARAMETERS:  data                - Pointer to validation data structure
  *              expected_btypes     - Object types expected
  *              package_index       - Index of object within parent package (if
  *                                    applicable - ACPI_NOT_PACKAGE_ELEMENT
@@ -470,7 +470,7 @@ acpi_ns_convert_to_buffer(union acpi_operand_object *original_object,
  *
  * FUNCTION:    acpi_ns_repair_null_element
  *
- * PARAMETERS:  Data                - Pointer to validation data structure
+ * PARAMETERS:  data                - Pointer to validation data structure
  *              expected_btypes     - Object types expected
  *              package_index       - Index of object within parent package (if
  *                                    applicable - ACPI_NOT_PACKAGE_ELEMENT
@@ -509,17 +509,17 @@ acpi_ns_repair_null_element(struct acpi_predefined_data *data,
         */
        if (expected_btypes & ACPI_RTYPE_INTEGER) {
 
-               /* Need an Integer - create a zero-value integer */
+               /* Need an integer - create a zero-value integer */
 
                new_object = acpi_ut_create_integer_object((u64)0);
        } else if (expected_btypes & ACPI_RTYPE_STRING) {
 
-               /* Need a String - create a NULL string */
+               /* Need a string - create a NULL string */
 
                new_object = acpi_ut_create_string_object(0);
        } else if (expected_btypes & ACPI_RTYPE_BUFFER) {
 
-               /* Need a Buffer - create a zero-length buffer */
+               /* Need a buffer - create a zero-length buffer */
 
                new_object = acpi_ut_create_buffer_object(0);
        } else {
@@ -552,7 +552,7 @@ acpi_ns_repair_null_element(struct acpi_predefined_data *data,
  *
  * FUNCTION:    acpi_ns_remove_null_elements
  *
- * PARAMETERS:  Data                - Pointer to validation data structure
+ * PARAMETERS:  data                - Pointer to validation data structure
  *              package_type        - An acpi_return_package_types value
  *              obj_desc            - A Package object
  *
@@ -635,7 +635,7 @@ acpi_ns_remove_null_elements(struct acpi_predefined_data *data,
  *
  * FUNCTION:    acpi_ns_wrap_with_package
  *
- * PARAMETERS:  Data                - Pointer to validation data structure
+ * PARAMETERS:  data                - Pointer to validation data structure
  *              original_object     - Pointer to the object to repair.
  *              obj_desc_ptr        - The new package object is returned here
  *
index 726bc8e687f7a68bca895b881ce2bea09823fd87..90189251cdf0e0dce07001b85c84c791621e4c60 100644 (file)
@@ -149,8 +149,8 @@ static const struct acpi_repair_info acpi_ns_repairable_names[] = {
  *
  * FUNCTION:    acpi_ns_complex_repairs
  *
- * PARAMETERS:  Data                - Pointer to validation data structure
- *              Node                - Namespace node for the method/object
+ * PARAMETERS:  data                - Pointer to validation data structure
+ *              node                - Namespace node for the method/object
  *              validate_status     - Original status of earlier validation
  *              return_object_ptr   - Pointer to the object returned from the
  *                                    evaluation of a method or object
@@ -187,7 +187,7 @@ acpi_ns_complex_repairs(struct acpi_predefined_data *data,
  *
  * FUNCTION:    acpi_ns_match_repairable_name
  *
- * PARAMETERS:  Node                - Namespace node for the method/object
+ * PARAMETERS:  node                - Namespace node for the method/object
  *
  * RETURN:      Pointer to entry in repair table. NULL indicates not found.
  *
@@ -218,7 +218,7 @@ static const struct acpi_repair_info *acpi_ns_match_repairable_name(struct
  *
  * FUNCTION:    acpi_ns_repair_ALR
  *
- * PARAMETERS:  Data                - Pointer to validation data structure
+ * PARAMETERS:  data                - Pointer to validation data structure
  *              return_object_ptr   - Pointer to the object returned from the
  *                                    evaluation of a method or object
  *
@@ -247,7 +247,7 @@ acpi_ns_repair_ALR(struct acpi_predefined_data *data,
  *
  * FUNCTION:    acpi_ns_repair_FDE
  *
- * PARAMETERS:  Data                - Pointer to validation data structure
+ * PARAMETERS:  data                - Pointer to validation data structure
  *              return_object_ptr   - Pointer to the object returned from the
  *                                    evaluation of a method or object
  *
@@ -335,7 +335,7 @@ acpi_ns_repair_FDE(struct acpi_predefined_data *data,
  *
  * FUNCTION:    acpi_ns_repair_CID
  *
- * PARAMETERS:  Data                - Pointer to validation data structure
+ * PARAMETERS:  data                - Pointer to validation data structure
  *              return_object_ptr   - Pointer to the object returned from the
  *                                    evaluation of a method or object
  *
@@ -405,7 +405,7 @@ acpi_ns_repair_CID(struct acpi_predefined_data *data,
  *
  * FUNCTION:    acpi_ns_repair_HID
  *
- * PARAMETERS:  Data                - Pointer to validation data structure
+ * PARAMETERS:  data                - Pointer to validation data structure
  *              return_object_ptr   - Pointer to the object returned from the
  *                                    evaluation of a method or object
  *
@@ -487,7 +487,7 @@ acpi_ns_repair_HID(struct acpi_predefined_data *data,
  *
  * FUNCTION:    acpi_ns_repair_TSS
  *
- * PARAMETERS:  Data                - Pointer to validation data structure
+ * PARAMETERS:  data                - Pointer to validation data structure
  *              return_object_ptr   - Pointer to the object returned from the
  *                                    evaluation of a method or object
  *
@@ -531,7 +531,7 @@ acpi_ns_repair_TSS(struct acpi_predefined_data *data,
  *
  * FUNCTION:    acpi_ns_repair_PSS
  *
- * PARAMETERS:  Data                - Pointer to validation data structure
+ * PARAMETERS:  data                - Pointer to validation data structure
  *              return_object_ptr   - Pointer to the object returned from the
  *                                    evaluation of a method or object
  *
@@ -600,7 +600,7 @@ acpi_ns_repair_PSS(struct acpi_predefined_data *data,
  *
  * FUNCTION:    acpi_ns_check_sorted_list
  *
- * PARAMETERS:  Data                - Pointer to validation data structure
+ * PARAMETERS:  data                - Pointer to validation data structure
  *              return_object       - Pointer to the top-level returned object
  *              expected_count      - Minimum length of each sub-package
  *              sort_index          - Sub-package entry to sort on
@@ -707,9 +707,9 @@ acpi_ns_check_sorted_list(struct acpi_predefined_data *data,
  *
  * FUNCTION:    acpi_ns_sort_list
  *
- * PARAMETERS:  Elements            - Package object element list
- *              Count               - Element count for above
- *              Index               - Sort by which package element
+ * PARAMETERS:  elements            - Package object element list
+ *              count               - Element count for above
+ *              index               - Sort by which package element
  *              sort_direction      - Ascending or Descending sort
  *
  * RETURN:      None
index 507043d6611428e939182515449eb288fde51395..456cc859f8698ecac208e20aa6e851a2d2a86237 100644 (file)
@@ -65,7 +65,7 @@ acpi_ns_search_parent_tree(u32 target_name,
  *
  * PARAMETERS:  target_name     - Ascii ACPI name to search for
  *              parent_node     - Starting node where search will begin
- *              Type            - Object type to match
+ *              type            - Object type to match
  *              return_node     - Where the matched Named obj is returned
  *
  * RETURN:      Status
@@ -175,8 +175,8 @@ acpi_ns_search_one_scope(u32 target_name,
  * FUNCTION:    acpi_ns_search_parent_tree
  *
  * PARAMETERS:  target_name     - Ascii ACPI name to search for
- *              Node            - Starting node where search will begin
- *              Type            - Object type to match
+ *              node            - Starting node where search will begin
+ *              type            - Object type to match
  *              return_node     - Where the matched Node is returned
  *
  * RETURN:      Status
@@ -264,11 +264,11 @@ acpi_ns_search_parent_tree(u32 target_name,
  *
  * PARAMETERS:  target_name         - Ascii ACPI name to search for (4 chars)
  *              walk_state          - Current state of the walk
- *              Node                - Starting node where search will begin
+ *              node                - Starting node where search will begin
  *              interpreter_mode    - Add names only in ACPI_MODE_LOAD_PASS_x.
  *                                    Otherwise,search only.
- *              Type                - Object type to match
- *              Flags               - Flags describing the search restrictions
+ *              type                - Object type to match
+ *              flags               - Flags describing the search restrictions
  *              return_node         - Where the Node is returned
  *
  * RETURN:      Status
index 75113759f69ddf22a07ce57b4f6117b6e1b4ca70..ef753a41e0872d43649961faf441dd77ad8ff728 100644 (file)
@@ -62,8 +62,8 @@ acpi_name acpi_ns_find_parent_name(struct acpi_namespace_node *node_to_search);
  *
  * FUNCTION:    acpi_ns_print_node_pathname
  *
- * PARAMETERS:  Node            - Object
- *              Message         - Prefix message
+ * PARAMETERS:  node            - Object
+ *              message         - Prefix message
  *
  * DESCRIPTION: Print an object's full namespace pathname
  *              Manages allocation/freeing of a pathname buffer
@@ -101,7 +101,7 @@ acpi_ns_print_node_pathname(struct acpi_namespace_node *node,
  *
  * FUNCTION:    acpi_ns_valid_root_prefix
  *
- * PARAMETERS:  Prefix          - Character to be checked
+ * PARAMETERS:  prefix          - Character to be checked
  *
  * RETURN:      TRUE if a valid prefix
  *
@@ -119,7 +119,7 @@ u8 acpi_ns_valid_root_prefix(char prefix)
  *
  * FUNCTION:    acpi_ns_valid_path_separator
  *
- * PARAMETERS:  Sep         - Character to be checked
+ * PARAMETERS:  sep         - Character to be checked
  *
  * RETURN:      TRUE if a valid path separator
  *
@@ -137,7 +137,7 @@ static u8 acpi_ns_valid_path_separator(char sep)
  *
  * FUNCTION:    acpi_ns_get_type
  *
- * PARAMETERS:  Node        - Parent Node to be examined
+ * PARAMETERS:  node        - Parent Node to be examined
  *
  * RETURN:      Type field from Node whose handle is passed
  *
@@ -161,7 +161,7 @@ acpi_object_type acpi_ns_get_type(struct acpi_namespace_node * node)
  *
  * FUNCTION:    acpi_ns_local
  *
- * PARAMETERS:  Type        - A namespace object type
+ * PARAMETERS:  type        - A namespace object type
  *
  * RETURN:      LOCAL if names must be found locally in objects of the
  *              passed type, 0 if enclosing scopes should be searched
@@ -189,7 +189,7 @@ u32 acpi_ns_local(acpi_object_type type)
  *
  * FUNCTION:    acpi_ns_get_internal_name_length
  *
- * PARAMETERS:  Info            - Info struct initialized with the
+ * PARAMETERS:  info            - Info struct initialized with the
  *                                external name pointer.
  *
  * RETURN:      None
@@ -260,7 +260,7 @@ void acpi_ns_get_internal_name_length(struct acpi_namestring_info *info)
  *
  * FUNCTION:    acpi_ns_build_internal_name
  *
- * PARAMETERS:  Info            - Info struct fully initialized
+ * PARAMETERS:  info            - Info struct fully initialized
  *
  * RETURN:      Status
  *
@@ -371,7 +371,7 @@ acpi_status acpi_ns_build_internal_name(struct acpi_namestring_info *info)
  * FUNCTION:    acpi_ns_internalize_name
  *
  * PARAMETERS:  *external_name          - External representation of name
- *              **Converted Name        - Where to return the resulting
+ *              **Converted name        - Where to return the resulting
  *                                        internal represention of the name
  *
  * RETURN:      Status
@@ -575,7 +575,7 @@ acpi_ns_externalize_name(u32 internal_name_length,
  *
  * FUNCTION:    acpi_ns_validate_handle
  *
- * PARAMETERS:  Handle          - Handle to be validated and typecast to a
+ * PARAMETERS:  handle          - Handle to be validated and typecast to a
  *                                namespace node.
  *
  * RETURN:      A pointer to a namespace node
@@ -651,7 +651,7 @@ void acpi_ns_terminate(void)
  *
  * FUNCTION:    acpi_ns_opens_scope
  *
- * PARAMETERS:  Type        - A valid namespace type
+ * PARAMETERS:  type        - A valid namespace type
  *
  * RETURN:      NEWSCOPE if the passed type "opens a name scope" according
  *              to the ACPI specification, else 0
@@ -677,14 +677,14 @@ u32 acpi_ns_opens_scope(acpi_object_type type)
  *
  * FUNCTION:    acpi_ns_get_node
  *
- * PARAMETERS:  *Pathname   - Name to be found, in external (ASL) format. The
+ * PARAMETERS:  *pathname   - Name to be found, in external (ASL) format. The
  *                            \ (backslash) and ^ (carat) prefixes, and the
  *                            . (period) to separate segments are supported.
  *              prefix_node  - Root of subtree to be searched, or NS_ALL for the
  *                            root of the name space.  If Name is fully
  *                            qualified (first s8 is '\'), the passed value
  *                            of Scope will not be accessed.
- *              Flags       - Used to indicate whether to perform upsearch or
+ *              flags       - Used to indicate whether to perform upsearch or
  *                            not.
  *              return_node - Where the Node is returned
  *
index f69895a548957761476e6ee4f5f29805729b6187..730bccc5e7f71969540b2b5e425366711e6591e0 100644 (file)
@@ -88,7 +88,7 @@ struct acpi_namespace_node *acpi_ns_get_next_node(struct acpi_namespace_node
  *
  * FUNCTION:    acpi_ns_get_next_node_typed
  *
- * PARAMETERS:  Type                - Type of node to be searched for
+ * PARAMETERS:  type                - Type of node to be searched for
  *              parent_node         - Parent node whose children we are
  *                                    getting
  *              child_node          - Previous child that was found.
@@ -151,16 +151,16 @@ struct acpi_namespace_node *acpi_ns_get_next_node_typed(acpi_object_type type,
  *
  * FUNCTION:    acpi_ns_walk_namespace
  *
- * PARAMETERS:  Type                - acpi_object_type to search for
+ * PARAMETERS:  type                - acpi_object_type to search for
  *              start_node          - Handle in namespace where search begins
  *              max_depth           - Depth to which search is to reach
- *              Flags               - Whether to unlock the NS before invoking
+ *              flags               - Whether to unlock the NS before invoking
  *                                    the callback routine
  *              pre_order_visit     - Called during tree pre-order visit
  *                                    when an object of "Type" is found
  *              post_order_visit    - Called during tree post-order visit
  *                                    when an object of "Type" is found
- *              Context             - Passed to user function(s) above
+ *              context             - Passed to user function(s) above
  *              return_value        - from the user_function if terminated
  *                                    early. Otherwise, returns NULL.
  * RETURNS:     Status
index 71d15f61807ba312e9f60b617c215a23ce643f9e..9692e6702333ab9fac520c024bcb590a86b7dae5 100644 (file)
@@ -58,8 +58,8 @@ static void acpi_ns_resolve_references(struct acpi_evaluate_info *info);
  *
  * FUNCTION:    acpi_evaluate_object_typed
  *
- * PARAMETERS:  Handle              - Object handle (optional)
- *              Pathname            - Object pathname (optional)
+ * PARAMETERS:  handle              - Object handle (optional)
+ *              pathname            - Object pathname (optional)
  *              external_params     - List of parameters to pass to method,
  *                                    terminated by NULL.  May be NULL
  *                                    if no parameters are being passed.
@@ -152,8 +152,8 @@ ACPI_EXPORT_SYMBOL(acpi_evaluate_object_typed)
  *
  * FUNCTION:    acpi_evaluate_object
  *
- * PARAMETERS:  Handle              - Object handle (optional)
- *              Pathname            - Object pathname (optional)
+ * PARAMETERS:  handle              - Object handle (optional)
+ *              pathname            - Object pathname (optional)
  *              external_params     - List of parameters to pass to method,
  *                                    terminated by NULL.  May be NULL
  *                                    if no parameters are being passed.
@@ -364,7 +364,7 @@ ACPI_EXPORT_SYMBOL(acpi_evaluate_object)
  *
  * FUNCTION:    acpi_ns_resolve_references
  *
- * PARAMETERS:  Info                    - Evaluation info block
+ * PARAMETERS:  info                    - Evaluation info block
  *
  * RETURN:      Info->return_object is replaced with the dereferenced object
  *
@@ -431,14 +431,14 @@ static void acpi_ns_resolve_references(struct acpi_evaluate_info *info)
  *
  * FUNCTION:    acpi_walk_namespace
  *
- * PARAMETERS:  Type                - acpi_object_type to search for
+ * PARAMETERS:  type                - acpi_object_type to search for
  *              start_object        - Handle in namespace where search begins
  *              max_depth           - Depth to which search is to reach
  *              pre_order_visit     - Called during tree pre-order visit
  *                                    when an object of "Type" is found
  *              post_order_visit    - Called during tree post-order visit
  *                                    when an object of "Type" is found
- *              Context             - Passed to user function(s) above
+ *              context             - Passed to user function(s) above
  *              return_value        - Location where return value of
  *                                    user_function is put if terminated early
  *
@@ -646,7 +646,7 @@ acpi_ns_get_device_callback(acpi_handle obj_handle,
  *
  * PARAMETERS:  HID                 - HID to search for. Can be NULL.
  *              user_function       - Called when a matching object is found
- *              Context             - Passed to user function
+ *              context             - Passed to user function
  *              return_value        - Location where return value of
  *                                    user_function is put if terminated early
  *
@@ -716,8 +716,8 @@ ACPI_EXPORT_SYMBOL(acpi_get_devices)
  * FUNCTION:    acpi_attach_data
  *
  * PARAMETERS:  obj_handle          - Namespace node
- *              Handler             - Handler for this attachment
- *              Data                - Pointer to data to be attached
+ *              handler             - Handler for this attachment
+ *              data                - Pointer to data to be attached
  *
  * RETURN:      Status
  *
@@ -764,7 +764,7 @@ ACPI_EXPORT_SYMBOL(acpi_attach_data)
  * FUNCTION:    acpi_detach_data
  *
  * PARAMETERS:  obj_handle          - Namespace node handle
- *              Handler             - Handler used in call to acpi_attach_data
+ *              handler             - Handler used in call to acpi_attach_data
  *
  * RETURN:      Status
  *
@@ -810,8 +810,8 @@ ACPI_EXPORT_SYMBOL(acpi_detach_data)
  * FUNCTION:    acpi_get_data
  *
  * PARAMETERS:  obj_handle          - Namespace node
- *              Handler             - Handler used in call to attach_data
- *              Data                - Where the data is returned
+ *              handler             - Handler used in call to attach_data
+ *              data                - Where the data is returned
  *
  * RETURN:      Status
  *
index af401c9c4dfc04f51c5d2091b8aac799873fbd10..08e9610b34cac6a8a0415865e9f5ad20a004fc18 100644 (file)
@@ -61,8 +61,8 @@ static char *acpi_ns_copy_device_id(struct acpica_device_id *dest,
  *
  * FUNCTION:    acpi_get_handle
  *
- * PARAMETERS:  Parent          - Object to search under (search scope).
- *              Pathname        - Pointer to an asciiz string containing the
+ * PARAMETERS:  parent          - Object to search under (search scope).
+ *              pathname        - Pointer to an asciiz string containing the
  *                                name
  *              ret_handle      - Where the return handle is returned
  *
@@ -142,9 +142,9 @@ ACPI_EXPORT_SYMBOL(acpi_get_handle)
  *
  * FUNCTION:    acpi_get_name
  *
- * PARAMETERS:  Handle          - Handle to be converted to a pathname
+ * PARAMETERS:  handle          - Handle to be converted to a pathname
  *              name_type       - Full pathname or single segment
- *              Buffer          - Buffer for returned path
+ *              buffer          - Buffer for returned path
  *
  * RETURN:      Pointer to a string containing the fully qualified Name.
  *
@@ -219,8 +219,8 @@ ACPI_EXPORT_SYMBOL(acpi_get_name)
  *
  * FUNCTION:    acpi_ns_copy_device_id
  *
- * PARAMETERS:  Dest                - Pointer to the destination DEVICE_ID
- *              Source              - Pointer to the source DEVICE_ID
+ * PARAMETERS:  dest                - Pointer to the destination DEVICE_ID
+ *              source              - Pointer to the source DEVICE_ID
  *              string_area         - Pointer to where to copy the dest string
  *
  * RETURN:      Pointer to the next string area
@@ -247,7 +247,7 @@ static char *acpi_ns_copy_device_id(struct acpica_device_id *dest,
  *
  * FUNCTION:    acpi_get_object_info
  *
- * PARAMETERS:  Handle              - Object Handle
+ * PARAMETERS:  handle              - Object Handle
  *              return_buffer       - Where the info is returned
  *
  * RETURN:      Status
@@ -493,7 +493,7 @@ ACPI_EXPORT_SYMBOL(acpi_get_object_info)
  *
  * FUNCTION:    acpi_install_method
  *
- * PARAMETERS:  Buffer         - An ACPI table containing one control method
+ * PARAMETERS:  buffer         - An ACPI table containing one control method
  *
  * RETURN:      Status
  *
index 880a605cee20e24b59d8173c0010c9ed456b11c2..6766fc4f088fc4baab21967004d1dae1703fc896 100644 (file)
@@ -98,7 +98,7 @@ ACPI_EXPORT_SYMBOL(acpi_get_id)
  *
  * FUNCTION:    acpi_get_type
  *
- * PARAMETERS:  Handle          - Handle of object whose type is desired
+ * PARAMETERS:  handle          - Handle of object whose type is desired
  *              ret_type        - Where the type will be placed
  *
  * RETURN:      Status
@@ -151,7 +151,7 @@ ACPI_EXPORT_SYMBOL(acpi_get_type)
  *
  * FUNCTION:    acpi_get_parent
  *
- * PARAMETERS:  Handle          - Handle of object whose parent is desired
+ * PARAMETERS:  handle          - Handle of object whose parent is desired
  *              ret_handle      - Where the parent handle will be placed
  *
  * RETURN:      Status
@@ -212,8 +212,8 @@ ACPI_EXPORT_SYMBOL(acpi_get_parent)
  *
  * FUNCTION:    acpi_get_next_object
  *
- * PARAMETERS:  Type            - Type of object to be searched for
- *              Parent          - Parent object whose children we are getting
+ * PARAMETERS:  type            - Type of object to be searched for
+ *              parent          - Parent object whose children we are getting
  *              last_child      - Previous child that was found.
  *                                The NEXT child will be returned
  *              ret_handle      - Where handle to the next object is placed
index 5ac36aba507c348192485115399f9cd8b02944fc..844464c4f901acac22f95c9f40bbc85bb33d9725 100644 (file)
@@ -210,7 +210,7 @@ char *acpi_ps_get_next_namestring(struct acpi_parse_state *parser_state)
  * FUNCTION:    acpi_ps_get_next_namepath
  *
  * PARAMETERS:  parser_state        - Current parser state object
- *              Arg                 - Where the namepath will be stored
+ *              arg                 - Where the namepath will be stored
  *              arg_count           - If the namepath points to a control method
  *                                    the method's argument is returned here.
  *              possible_method_call - Whether the namepath can possibly be the
@@ -379,7 +379,7 @@ acpi_ps_get_next_namepath(struct acpi_walk_state *walk_state,
  *
  * PARAMETERS:  parser_state        - Current parser state object
  *              arg_type            - The argument type (AML_*_ARG)
- *              Arg                 - Where the argument is returned
+ *              arg                 - Where the argument is returned
  *
  * RETURN:      None
  *
@@ -618,6 +618,7 @@ static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state
 
                                arg = acpi_ps_alloc_op(AML_INT_BYTELIST_OP);
                                if (!arg) {
+                                       acpi_ps_free_op(field);
                                        return_PTR(NULL);
                                }
 
@@ -662,6 +663,7 @@ static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state
                } else {
                        arg = acpi_ps_alloc_op(AML_INT_NAMEPATH_OP);
                        if (!arg) {
+                               acpi_ps_free_op(field);
                                return_PTR(NULL);
                        }
 
index 9547ad8a620bfc1e801e30a6bc8dfb1891288fb5..799162c1b6dfa1703b1ca29027e428700cf90db2 100644 (file)
@@ -167,7 +167,7 @@ static acpi_status acpi_ps_get_aml_opcode(struct acpi_walk_state *walk_state)
  * PARAMETERS:  walk_state          - Current state
  *              aml_op_start        - Begin of named Op in AML
  *              unnamed_op          - Early Op (not a named Op)
- *              Op                  - Returned Op
+ *              op                  - Returned Op
  *
  * RETURN:      Status
  *
@@ -323,7 +323,7 @@ acpi_ps_create_op(struct acpi_walk_state *walk_state,
 
        if (walk_state->op_info->flags & AML_CREATE) {
                /*
-                * Backup to beginning of create_xXXfield declaration
+                * Backup to beginning of create_XXXfield declaration
                 * body_length is unknown until we parse the body
                 */
                op->named.data = aml_op_start;
@@ -380,7 +380,7 @@ acpi_ps_create_op(struct acpi_walk_state *walk_state,
  *
  * PARAMETERS:  walk_state          - Current state
  *              aml_op_start        - Op start in AML
- *              Op                  - Current Op
+ *              op                  - Current Op
  *
  * RETURN:      Status
  *
@@ -679,8 +679,8 @@ acpi_ps_link_module_code(union acpi_parse_object *parent_op,
  * FUNCTION:    acpi_ps_complete_op
  *
  * PARAMETERS:  walk_state          - Current state
- *              Op                  - Returned Op
- *              Status              - Parse status before complete Op
+ *              op                  - Returned Op
+ *              status              - Parse status before complete Op
  *
  * RETURN:      Status
  *
@@ -853,8 +853,8 @@ acpi_ps_complete_op(struct acpi_walk_state *walk_state,
  * FUNCTION:    acpi_ps_complete_final_op
  *
  * PARAMETERS:  walk_state          - Current state
- *              Op                  - Current Op
- *              Status              - Current parse status before complete last
+ *              op                  - Current Op
+ *              status              - Current parse status before complete last
  *                                    Op
  *
  * RETURN:      Status
@@ -1165,7 +1165,7 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
 
                if (walk_state->op_info->flags & AML_CREATE) {
                        /*
-                        * Backup to beginning of create_xXXfield declaration (1 for
+                        * Backup to beginning of create_XXXfield declaration (1 for
                         * Opcode)
                         *
                         * body_length is unknown until we parse the body
index a0226fdcf75c0406e7f8bc0a9388000985d44637..ed1d457bd5ca2fea2d117e0dad6e8fd440d28460 100644 (file)
@@ -724,7 +724,7 @@ static const u8 acpi_gbl_long_op_index[NUM_EXTENDED_OPCODE] = {
  *
  * FUNCTION:    acpi_ps_get_opcode_info
  *
- * PARAMETERS:  Opcode              - The AML opcode
+ * PARAMETERS:  opcode              - The AML opcode
  *
  * RETURN:      A pointer to the info about the opcode.
  *
@@ -769,7 +769,7 @@ const struct acpi_opcode_info *acpi_ps_get_opcode_info(u16 opcode)
  *
  * FUNCTION:    acpi_ps_get_opcode_name
  *
- * PARAMETERS:  Opcode              - The AML opcode
+ * PARAMETERS:  opcode              - The AML opcode
  *
  * RETURN:      A pointer to the name of the opcode (ASCII String)
  *              Note: Never returns NULL.
index 2ff9c35a19686be19b16a1b203936a6ca45982bc..01985703bb983c3d73b0fc080ca70d6d8748d7c7 100644 (file)
@@ -64,7 +64,7 @@ ACPI_MODULE_NAME("psparse")
  *
  * FUNCTION:    acpi_ps_get_opcode_size
  *
- * PARAMETERS:  Opcode          - An AML opcode
+ * PARAMETERS:  opcode          - An AML opcode
  *
  * RETURN:      Size of the opcode, in bytes (1 or 2)
  *
@@ -121,7 +121,7 @@ u16 acpi_ps_peek_opcode(struct acpi_parse_state * parser_state)
  * FUNCTION:    acpi_ps_complete_this_op
  *
  * PARAMETERS:  walk_state      - Current State
- *              Op              - Op to complete
+ *              op              - Op to complete
  *
  * RETURN:      Status
  *
@@ -311,7 +311,7 @@ acpi_ps_complete_this_op(struct acpi_walk_state * walk_state,
  * FUNCTION:    acpi_ps_next_parse_state
  *
  * PARAMETERS:  walk_state          - Current state
- *              Op                  - Current parse op
+ *              op                  - Current parse op
  *              callback_status     - Status from previous operation
  *
  * RETURN:      Status
index c872aa4b926ec52bc1ddbec6921871529b97edcc..608dc20dc173b93845dba6025ccaf14d5d745ef7 100644 (file)
@@ -93,7 +93,7 @@ u8 acpi_ps_has_completed_scope(struct acpi_parse_state * parser_state)
  * FUNCTION:    acpi_ps_init_scope
  *
  * PARAMETERS:  parser_state        - Current parser state object
- *              Root                - the Root Node of this new scope
+ *              root                - the Root Node of this new scope
  *
  * RETURN:      Status
  *
@@ -131,7 +131,7 @@ acpi_ps_init_scope(struct acpi_parse_state * parser_state,
  * FUNCTION:    acpi_ps_push_scope
  *
  * PARAMETERS:  parser_state        - Current parser state object
- *              Op                  - Current op to be pushed
+ *              op                  - Current op to be pushed
  *              remaining_args      - List of args remaining
  *              arg_count           - Fixed or variable number of args
  *
@@ -184,7 +184,7 @@ acpi_ps_push_scope(struct acpi_parse_state *parser_state,
  * FUNCTION:    acpi_ps_pop_scope
  *
  * PARAMETERS:  parser_state        - Current parser state object
- *              Op                  - Where the popped op is returned
+ *              op                  - Where the popped op is returned
  *              arg_list            - Where the popped "next argument" is
  *                                    returned
  *              arg_count           - Count of objects in arg_list
index 2b03cdbbe1c0dc49436b6bc089b5a7bd771904db..fdb2e71f30461153036a117a7424036af1049b75 100644 (file)
@@ -58,8 +58,8 @@ union acpi_parse_object *acpi_ps_get_child(union acpi_parse_object *op);
  *
  * FUNCTION:    acpi_ps_get_arg
  *
- * PARAMETERS:  Op              - Get an argument for this op
- *              Argn            - Nth argument to get
+ * PARAMETERS:  op              - Get an argument for this op
+ *              argn            - Nth argument to get
  *
  * RETURN:      The argument (as an Op object). NULL if argument does not exist
  *
@@ -114,8 +114,8 @@ union acpi_parse_object *acpi_ps_get_arg(union acpi_parse_object *op, u32 argn)
  *
  * FUNCTION:    acpi_ps_append_arg
  *
- * PARAMETERS:  Op              - Append an argument to this Op.
- *              Arg             - Argument Op to append
+ * PARAMETERS:  op              - Append an argument to this Op.
+ *              arg             - Argument Op to append
  *
  * RETURN:      None.
  *
@@ -188,8 +188,8 @@ acpi_ps_append_arg(union acpi_parse_object *op, union acpi_parse_object *arg)
  *
  * FUNCTION:    acpi_ps_get_depth_next
  *
- * PARAMETERS:  Origin          - Root of subtree to search
- *              Op              - Last (previous) Op that was found
+ * PARAMETERS:  origin          - Root of subtree to search
+ *              op              - Last (previous) Op that was found
  *
  * RETURN:      Next Op found in the search.
  *
@@ -261,7 +261,7 @@ union acpi_parse_object *acpi_ps_get_depth_next(union acpi_parse_object *origin,
  *
  * FUNCTION:    acpi_ps_get_child
  *
- * PARAMETERS:  Op              - Get the child of this Op
+ * PARAMETERS:  op              - Get the child of this Op
  *
  * RETURN:      Child Op, Null if none is found.
  *
index 13bb131ae12517040401a7cbfa02b8c3c0735422..8736ad5f04d3a83c1557deb0056bdcfa03c71e86 100644 (file)
@@ -77,8 +77,8 @@ union acpi_parse_object *acpi_ps_create_scope_op(void)
  *
  * FUNCTION:    acpi_ps_init_op
  *
- * PARAMETERS:  Op              - A newly allocated Op object
- *              Opcode          - Opcode to store in the Op
+ * PARAMETERS:  op              - A newly allocated Op object
+ *              opcode          - Opcode to store in the Op
  *
  * RETURN:      None
  *
@@ -103,7 +103,7 @@ void acpi_ps_init_op(union acpi_parse_object *op, u16 opcode)
  *
  * FUNCTION:    acpi_ps_alloc_op
  *
- * PARAMETERS:  Opcode          - Opcode that will be stored in the new Op
+ * PARAMETERS:  opcode          - Opcode that will be stored in the new Op
  *
  * RETURN:      Pointer to the new Op, null on failure
  *
@@ -160,7 +160,7 @@ union acpi_parse_object *acpi_ps_alloc_op(u16 opcode)
  *
  * FUNCTION:    acpi_ps_free_op
  *
- * PARAMETERS:  Op              - Op to be freed
+ * PARAMETERS:  op              - Op to be freed
  *
  * RETURN:      None.
  *
index 9d98c5ff66a5f8c08a0704a658d962b1f2d82ff7..963e1622579779c19d201e00c90e10188ddf9210 100644 (file)
@@ -66,7 +66,7 @@ acpi_ps_update_parameter_list(struct acpi_evaluate_info *info, u16 action);
  * PARAMETERS:  method_name     - Valid ACPI name string
  *              debug_level     - Optional level mask. 0 to use default
  *              debug_layer     - Optional layer mask. 0 to use default
- *              Flags           - bit 1: one shot(1) or persistent(0)
+ *              flags           - bit 1: one shot(1) or persistent(0)
  *
  * RETURN:      Status
  *
@@ -105,7 +105,7 @@ acpi_debug_trace(char *name, u32 debug_level, u32 debug_layer, u32 flags)
  *
  * FUNCTION:    acpi_ps_start_trace
  *
- * PARAMETERS:  Info        - Method info struct
+ * PARAMETERS:  info        - Method info struct
  *
  * RETURN:      None
  *
@@ -150,7 +150,7 @@ static void acpi_ps_start_trace(struct acpi_evaluate_info *info)
  *
  * FUNCTION:    acpi_ps_stop_trace
  *
- * PARAMETERS:  Info        - Method info struct
+ * PARAMETERS:  info        - Method info struct
  *
  * RETURN:      None
  *
@@ -193,10 +193,10 @@ static void acpi_ps_stop_trace(struct acpi_evaluate_info *info)
  *
  * FUNCTION:    acpi_ps_execute_method
  *
- * PARAMETERS:  Info            - Method info block, contains:
- *                  Node            - Method Node to execute
+ * PARAMETERS:  info            - Method info block, contains:
+ *                  node            - Method Node to execute
  *                  obj_desc        - Method object
- *                  Parameters      - List of parameters to pass to the method,
+ *                  parameters      - List of parameters to pass to the method,
  *                                    terminated by NULL. Params itself may be
  *                                    NULL if no parameters are being passed.
  *                  return_object   - Where to put method's return value (if
@@ -361,9 +361,9 @@ acpi_status acpi_ps_execute_method(struct acpi_evaluate_info *info)
  *
  * FUNCTION:    acpi_ps_update_parameter_list
  *
- * PARAMETERS:  Info            - See struct acpi_evaluate_info
+ * PARAMETERS:  info            - See struct acpi_evaluate_info
  *                                (Used: parameter_type and Parameters)
- *              Action          - Add or Remove reference
+ *              action          - Add or Remove reference
  *
  * RETURN:      Status
  *
index a0305652394f1f03fbebc050e55ce5ad28bd1856..856ff075b6ab9a523b740f150bccba6561ee31ea 100644 (file)
@@ -182,8 +182,8 @@ struct acpi_rsconvert_info acpi_rs_convert_ext_address64[5] = {
 
        /* Revision ID */
 
-       {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.ext_address64.revision_iD),
-        AML_OFFSET(ext_address64.revision_iD),
+       {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.ext_address64.revision_ID),
+        AML_OFFSET(ext_address64.revision_ID),
         1},
        /*
         * These fields are contiguous in both the source and destination:
@@ -215,7 +215,7 @@ static struct acpi_rsconvert_info acpi_rs_convert_general_flags[6] = {
         AML_OFFSET(address.resource_type),
         1},
 
-       /* General Flags - Consume, Decode, min_fixed, max_fixed */
+       /* General flags - Consume, Decode, min_fixed, max_fixed */
 
        {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.address.producer_consumer),
         AML_OFFSET(address.flags),
@@ -293,8 +293,8 @@ static struct acpi_rsconvert_info acpi_rs_convert_io_flags[4] = {
  *
  * FUNCTION:    acpi_rs_get_address_common
  *
- * PARAMETERS:  Resource            - Pointer to the internal resource struct
- *              Aml                 - Pointer to the AML resource descriptor
+ * PARAMETERS:  resource            - Pointer to the internal resource struct
+ *              aml                 - Pointer to the AML resource descriptor
  *
  * RETURN:      TRUE if the resource_type field is OK, FALSE otherwise
  *
@@ -343,8 +343,8 @@ acpi_rs_get_address_common(struct acpi_resource *resource,
  *
  * FUNCTION:    acpi_rs_set_address_common
  *
- * PARAMETERS:  Aml                 - Pointer to the AML resource descriptor
- *              Resource            - Pointer to the internal resource struct
+ * PARAMETERS:  aml                 - Pointer to the AML resource descriptor
+ *              resource            - Pointer to the internal resource struct
  *
  * RETURN:      None
  *
index 3c6df4b7eb2ddc200575ea8849b905519efe6f87..de12469d1c9c765138920eae7064a1fdf8da4839 100644 (file)
@@ -173,7 +173,7 @@ acpi_rs_stream_option_length(u32 resource_length,
  *
  * FUNCTION:    acpi_rs_get_aml_length
  *
- * PARAMETERS:  Resource            - Pointer to the resource linked list
+ * PARAMETERS:  resource            - Pointer to the resource linked list
  *              size_needed         - Where the required size is returned
  *
  * RETURN:      Status
index 46d6eb38ae66f5598faba40c70ffaf4466e0459b..311cbc4f05fa18350e0b45c7b866a9b50503358a 100644 (file)
@@ -190,8 +190,8 @@ acpi_rs_create_resource_list(union acpi_operand_object *aml_buffer,
  *
  * FUNCTION:    acpi_rs_create_pci_routing_table
  *
- * PARAMETERS:  package_object          - Pointer to a union acpi_operand_object
- *                                        package
+ * PARAMETERS:  package_object          - Pointer to a package containing one
+ *                                        of more ACPI_OPERAND_OBJECTs
  *              output_buffer           - Pointer to the user's buffer
  *
  * RETURN:      Status  AE_OK if okay, else a valid acpi_status code.
@@ -199,7 +199,7 @@ acpi_rs_create_resource_list(union acpi_operand_object *aml_buffer,
  *              AE_BUFFER_OVERFLOW and output_buffer->Length will point
  *              to the size buffer needed.
  *
- * DESCRIPTION: Takes the union acpi_operand_object    package and creates a
+ * DESCRIPTION: Takes the union acpi_operand_object package and creates a
  *              linked list of PCI interrupt descriptions
  *
  * NOTE: It is the caller's responsibility to ensure that the start of the
index b4c5811323932324becf8a803a85fc542dc20579..4d11b072388c910915ee063f711fccd99f04561a 100644 (file)
@@ -703,7 +703,7 @@ acpi_rs_dump_resource_source(struct acpi_resource_source *resource_source)
  *
  * FUNCTION:    acpi_rs_dump_address_common
  *
- * PARAMETERS:  Resource        - Pointer to an internal resource descriptor
+ * PARAMETERS:  resource        - Pointer to an internal resource descriptor
  *
  * RETURN:      None
  *
@@ -850,8 +850,8 @@ void acpi_rs_dump_irq_list(u8 * route_table)
  *
  * FUNCTION:    acpi_rs_out*
  *
- * PARAMETERS:  Title       - Name of the resource field
- *              Value       - Value of the resource field
+ * PARAMETERS:  title       - Name of the resource field
+ *              value       - Value of the resource field
  *
  * RETURN:      None
  *
@@ -898,8 +898,8 @@ static void acpi_rs_out_title(char *title)
  *
  * FUNCTION:    acpi_rs_dump*List
  *
- * PARAMETERS:  Length      - Number of elements in the list
- *              Data        - Start of the list
+ * PARAMETERS:  length      - Number of elements in the list
+ *              data        - Start of the list
  *
  * RETURN:      None
  *
index 9be129f5d6f4ec431469a4bebf76a6f0b900df9a..46b5324b22d6236adb4e0ea6637b8e3be8dff501 100644 (file)
@@ -139,7 +139,7 @@ acpi_rs_convert_aml_to_resources(u8 * aml,
  *
  * FUNCTION:    acpi_rs_convert_resources_to_aml
  *
- * PARAMETERS:  Resource            - Pointer to the resource linked list
+ * PARAMETERS:  resource            - Pointer to the resource linked list
  *              aml_size_needed     - Calculated size of the byte stream
  *                                    needed from calling acpi_rs_get_aml_length()
  *                                    The size of the output_buffer is
index 8073b371cc7cd32b44a4cab6dba88e3ae19b149f..c6f291c2bc8355e5ccb7a7d1a0c25f593448fab8 100644 (file)
@@ -57,9 +57,9 @@ ACPI_MODULE_NAME("rsmisc")
  *
  * FUNCTION:    acpi_rs_convert_aml_to_resource
  *
- * PARAMETERS:  Resource            - Pointer to the resource descriptor
- *              Aml                 - Where the AML descriptor is returned
- *              Info                - Pointer to appropriate conversion table
+ * PARAMETERS:  resource            - Pointer to the resource descriptor
+ *              aml                 - Where the AML descriptor is returned
+ *              info                - Pointer to appropriate conversion table
  *
  * RETURN:      Status
  *
@@ -406,7 +406,7 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
 
                case ACPI_RSC_EXIT_NE:
                        /*
-                        * Control - Exit conversion if not equal
+                        * control - Exit conversion if not equal
                         */
                        switch (info->resource_offset) {
                        case ACPI_RSC_COMPARE_AML_LENGTH:
@@ -454,9 +454,9 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
  *
  * FUNCTION:    acpi_rs_convert_resource_to_aml
  *
- * PARAMETERS:  Resource            - Pointer to the resource descriptor
- *              Aml                 - Where the AML descriptor is returned
- *              Info                - Pointer to appropriate conversion table
+ * PARAMETERS:  resource            - Pointer to the resource descriptor
+ *              aml                 - Where the AML descriptor is returned
+ *              info                - Pointer to appropriate conversion table
  *
  * RETURN:      Status
  *
@@ -726,7 +726,7 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
 
                case ACPI_RSC_EXIT_LE:
                        /*
-                        * Control - Exit conversion if less than or equal
+                        * control - Exit conversion if less than or equal
                         */
                        if (item_count <= info->value) {
                                goto exit;
@@ -735,7 +735,7 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
 
                case ACPI_RSC_EXIT_NE:
                        /*
-                        * Control - Exit conversion if not equal
+                        * control - Exit conversion if not equal
                         */
                        switch (COMPARE_OPCODE(info)) {
                        case ACPI_RSC_COMPARE_VALUE:
@@ -757,7 +757,7 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
 
                case ACPI_RSC_EXIT_EQ:
                        /*
-                        * Control - Exit conversion if equal
+                        * control - Exit conversion if equal
                         */
                        if (*ACPI_ADD_PTR(u8, resource,
                                          COMPARE_TARGET(info)) ==
@@ -783,7 +783,7 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
 #if 0
 /* Previous resource validations */
 
-if (aml->ext_address64.revision_iD != AML_RESOURCE_EXTENDED_ADDRESS_REVISION) {
+if (aml->ext_address64.revision_ID != AML_RESOURCE_EXTENDED_ADDRESS_REVISION) {
        return_ACPI_STATUS(AE_SUPPORT);
 }
 
index 433a375deb9350e9635b103e3c593d6e7c3838da..37d5241c0acf731d2581bb51f68c84f5ae4643d5 100644 (file)
@@ -53,8 +53,8 @@ ACPI_MODULE_NAME("rsutils")
  *
  * FUNCTION:    acpi_rs_decode_bitmask
  *
- * PARAMETERS:  Mask            - Bitmask to decode
- *              List            - Where the converted list is returned
+ * PARAMETERS:  mask            - Bitmask to decode
+ *              list            - Where the converted list is returned
  *
  * RETURN:      Count of bits set (length of list)
  *
@@ -86,8 +86,8 @@ u8 acpi_rs_decode_bitmask(u16 mask, u8 * list)
  *
  * FUNCTION:    acpi_rs_encode_bitmask
  *
- * PARAMETERS:  List            - List of values to encode
- *              Count           - Length of list
+ * PARAMETERS:  list            - List of values to encode
+ *              count           - Length of list
  *
  * RETURN:      Encoded bitmask
  *
@@ -115,8 +115,8 @@ u16 acpi_rs_encode_bitmask(u8 * list, u8 count)
  *
  * FUNCTION:    acpi_rs_move_data
  *
- * PARAMETERS:  Destination         - Pointer to the destination descriptor
- *              Source              - Pointer to the source descriptor
+ * PARAMETERS:  destination         - Pointer to the destination descriptor
+ *              source              - Pointer to the source descriptor
  *              item_count          - How many items to move
  *              move_type           - Byte width
  *
@@ -183,7 +183,7 @@ acpi_rs_move_data(void *destination, void *source, u16 item_count, u8 move_type)
  *
  * PARAMETERS:  total_length        - Length of the AML descriptor, including
  *                                    the header and length fields.
- *              Aml                 - Pointer to the raw AML descriptor
+ *              aml                 - Pointer to the raw AML descriptor
  *
  * RETURN:      None
  *
@@ -235,7 +235,7 @@ acpi_rs_set_resource_length(acpi_rsdesc_size total_length,
  * PARAMETERS:  descriptor_type     - Byte to be inserted as the type
  *              total_length        - Length of the AML descriptor, including
  *                                    the header and length fields.
- *              Aml                 - Pointer to the raw AML descriptor
+ *              aml                 - Pointer to the raw AML descriptor
  *
  * RETURN:      None
  *
@@ -265,8 +265,8 @@ acpi_rs_set_resource_header(u8 descriptor_type,
  *
  * FUNCTION:    acpi_rs_strcpy
  *
- * PARAMETERS:  Destination         - Pointer to the destination string
- *              Source              - Pointer to the source string
+ * PARAMETERS:  destination         - Pointer to the destination string
+ *              source              - Pointer to the source string
  *
  * RETURN:      String length, including NULL terminator
  *
@@ -300,7 +300,7 @@ static u16 acpi_rs_strcpy(char *destination, char *source)
  *              minimum_length      - Minimum length of the descriptor (minus
  *                                    any optional fields)
  *              resource_source     - Where the resource_source is returned
- *              Aml                 - Pointer to the raw AML descriptor
+ *              aml                 - Pointer to the raw AML descriptor
  *              string_ptr          - (optional) where to store the actual
  *                                    resource_source string
  *
@@ -386,7 +386,7 @@ acpi_rs_get_resource_source(acpi_rs_length resource_length,
  *
  * FUNCTION:    acpi_rs_set_resource_source
  *
- * PARAMETERS:  Aml                 - Pointer to the raw AML descriptor
+ * PARAMETERS:  aml                 - Pointer to the raw AML descriptor
  *              minimum_length      - Minimum length of the descriptor (minus
  *                                    any optional fields)
  *              resource_source     - Internal resource_source
@@ -445,7 +445,7 @@ acpi_rs_set_resource_source(union aml_resource * aml,
  *
  * FUNCTION:    acpi_rs_get_prt_method_data
  *
- * PARAMETERS:  Node            - Device node
+ * PARAMETERS:  node            - Device node
  *              ret_buffer      - Pointer to a buffer structure for the
  *                                results
  *
@@ -494,7 +494,7 @@ acpi_rs_get_prt_method_data(struct acpi_namespace_node * node,
  *
  * FUNCTION:    acpi_rs_get_crs_method_data
  *
- * PARAMETERS:  Node            - Device node
+ * PARAMETERS:  node            - Device node
  *              ret_buffer      - Pointer to a buffer structure for the
  *                                results
  *
@@ -534,7 +534,7 @@ acpi_rs_get_crs_method_data(struct acpi_namespace_node *node,
         */
        status = acpi_rs_create_resource_list(obj_desc, ret_buffer);
 
-       /* On exit, we must delete the object returned by evaluate_object */
+       /* On exit, we must delete the object returned by evaluateObject */
 
        acpi_ut_remove_reference(obj_desc);
        return_ACPI_STATUS(status);
@@ -544,7 +544,7 @@ acpi_rs_get_crs_method_data(struct acpi_namespace_node *node,
  *
  * FUNCTION:    acpi_rs_get_prs_method_data
  *
- * PARAMETERS:  Node            - Device node
+ * PARAMETERS:  node            - Device node
  *              ret_buffer      - Pointer to a buffer structure for the
  *                                results
  *
@@ -585,7 +585,7 @@ acpi_rs_get_prs_method_data(struct acpi_namespace_node *node,
         */
        status = acpi_rs_create_resource_list(obj_desc, ret_buffer);
 
-       /* On exit, we must delete the object returned by evaluate_object */
+       /* On exit, we must delete the object returned by evaluateObject */
 
        acpi_ut_remove_reference(obj_desc);
        return_ACPI_STATUS(status);
@@ -596,7 +596,7 @@ acpi_rs_get_prs_method_data(struct acpi_namespace_node *node,
  *
  * FUNCTION:    acpi_rs_get_aei_method_data
  *
- * PARAMETERS:  Node            - Device node
+ * PARAMETERS:  node            - Device node
  *              ret_buffer      - Pointer to a buffer structure for the
  *                                results
  *
@@ -636,7 +636,7 @@ acpi_rs_get_aei_method_data(struct acpi_namespace_node *node,
         */
        status = acpi_rs_create_resource_list(obj_desc, ret_buffer);
 
-       /* On exit, we must delete the object returned by evaluate_object */
+       /* On exit, we must delete the object returned by evaluateObject */
 
        acpi_ut_remove_reference(obj_desc);
        return_ACPI_STATUS(status);
@@ -646,8 +646,8 @@ acpi_rs_get_aei_method_data(struct acpi_namespace_node *node,
  *
  * FUNCTION:    acpi_rs_get_method_data
  *
- * PARAMETERS:  Handle          - Handle to the containing object
- *              Path            - Path to method, relative to Handle
+ * PARAMETERS:  handle          - Handle to the containing object
+ *              path            - Path to method, relative to Handle
  *              ret_buffer      - Pointer to a buffer structure for the
  *                                results
  *
@@ -697,7 +697,7 @@ acpi_rs_get_method_data(acpi_handle handle,
  *
  * FUNCTION:    acpi_rs_set_srs_method_data
  *
- * PARAMETERS:  Node            - Device node
+ * PARAMETERS:  node            - Device node
  *              in_buffer       - Pointer to a buffer structure of the
  *                                parameter
  *
index f58c098c7aeb3fd5f5e35b532fbbd82da6eab42a..5aad744b5b833088323352110020c8c316abcd3d 100644 (file)
@@ -79,7 +79,7 @@ acpi_rs_validate_parameters(acpi_handle device_handle,
  * FUNCTION:    acpi_rs_validate_parameters
  *
  * PARAMETERS:  device_handle   - Handle to a device
- *              Buffer          - Pointer to a data buffer
+ *              buffer          - Pointer to a data buffer
  *              return_node     - Pointer to where the device node is returned
  *
  * RETURN:      Status
@@ -351,8 +351,8 @@ ACPI_EXPORT_SYMBOL(acpi_get_event_resources)
  *
  * FUNCTION:    acpi_resource_to_address64
  *
- * PARAMETERS:  Resource        - Pointer to a resource
- *              Out             - Pointer to the users's return buffer
+ * PARAMETERS:  resource        - Pointer to a resource
+ *              out             - Pointer to the users's return buffer
  *                                (a struct acpi_resource_address64)
  *
  * RETURN:      Status
@@ -415,9 +415,9 @@ ACPI_EXPORT_SYMBOL(acpi_resource_to_address64)
  * FUNCTION:    acpi_get_vendor_resource
  *
  * PARAMETERS:  device_handle   - Handle for the parent device object
- *              Name            - Method name for the parent resource
+ *              name            - Method name for the parent resource
  *                                (METHOD_NAME__CRS or METHOD_NAME__PRS)
- *              Uuid            - Pointer to the UUID to be matched.
+ *              uuid            - Pointer to the UUID to be matched.
  *                                includes both subtype and 16-byte UUID
  *              ret_buffer      - Where the vendor resource is returned
  *
@@ -526,11 +526,11 @@ acpi_rs_match_vendor_resource(struct acpi_resource *resource, void *context)
  *
  * PARAMETERS:  device_handle   - Handle to the device object for the
  *                                device we are querying
- *              Name            - Method name of the resources we want.
+ *              name            - Method name of the resources we want.
  *                                (METHOD_NAME__CRS, METHOD_NAME__PRS, or
  *                                METHOD_NAME__AEI)
  *              user_function   - Called for each resource
- *              Context         - Passed to user_function
+ *              context         - Passed to user_function
  *
  * RETURN:      Status
  *
index 4c9c760db4a469f2db6291ac2437f9308002c15f..390651860bf09580f62787758e4ba1b76a80944a 100644 (file)
 ACPI_MODULE_NAME("tbfadt")
 
 /* Local prototypes */
-static ACPI_INLINE void
+static void
 acpi_tb_init_generic_address(struct acpi_generic_address *generic_address,
-                            u8 space_id, u8 byte_width, u64 address);
+                            u8 space_id,
+                            u8 byte_width, u64 address, char *register_name);
 
 static void acpi_tb_convert_fadt(void);
 
@@ -172,7 +173,7 @@ static struct acpi_fadt_pm_info fadt_pm_info_table[] = {
  *
  * PARAMETERS:  generic_address     - GAS struct to be initialized
  *              byte_width          - Width of this register
- *              Address             - Address of the register
+ *              address             - Address of the register
  *
  * RETURN:      None
  *
@@ -182,10 +183,25 @@ static struct acpi_fadt_pm_info fadt_pm_info_table[] = {
  *
  ******************************************************************************/
 
-static ACPI_INLINE void
+static void
 acpi_tb_init_generic_address(struct acpi_generic_address *generic_address,
-                            u8 space_id, u8 byte_width, u64 address)
+                            u8 space_id,
+                            u8 byte_width, u64 address, char *register_name)
 {
+       u8 bit_width;
+
+       /* Bit width field in the GAS is only one byte long, 255 max */
+
+       bit_width = (u8)(byte_width * 8);
+
+       if (byte_width > 31) {  /* (31*8)=248 */
+               ACPI_ERROR((AE_INFO,
+                           "%s - 32-bit FADT register is too long (%u bytes, %u bits) "
+                           "to convert to GAS struct - 255 bits max, truncating",
+                           register_name, byte_width, (byte_width * 8)));
+
+               bit_width = 255;
+       }
 
        /*
         * The 64-bit Address field is non-aligned in the byte packed
@@ -196,7 +212,7 @@ acpi_tb_init_generic_address(struct acpi_generic_address *generic_address,
        /* All other fields are byte-wide */
 
        generic_address->space_id = space_id;
-       generic_address->bit_width = (u8)ACPI_MUL_8(byte_width);
+       generic_address->bit_width = bit_width;
        generic_address->bit_offset = 0;
        generic_address->access_width = 0;      /* Access width ANY */
 }
@@ -267,8 +283,8 @@ void acpi_tb_parse_fadt(u32 table_index)
  *
  * FUNCTION:    acpi_tb_create_local_fadt
  *
- * PARAMETERS:  Table               - Pointer to BIOS FADT
- *              Length              - Length of the table
+ * PARAMETERS:  table               - Pointer to BIOS FADT
+ *              length              - Length of the table
  *
  * RETURN:      None
  *
@@ -287,11 +303,11 @@ void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length)
         * a warning.
         */
        if (length > sizeof(struct acpi_table_fadt)) {
-               ACPI_WARNING((AE_INFO,
-                             "FADT (revision %u) is longer than ACPI 5.0 version, "
-                             "truncating length %u to %u",
-                             table->revision, length,
-                             (u32)sizeof(struct acpi_table_fadt)));
+               ACPI_BIOS_WARNING((AE_INFO,
+                                  "FADT (revision %u) is longer than ACPI 5.0 version, "
+                                  "truncating length %u to %u",
+                                  table->revision, length,
+                                  (u32)sizeof(struct acpi_table_fadt)));
        }
 
        /* Clear the entire local FADT */
@@ -436,11 +452,13 @@ static void acpi_tb_convert_fadt(void)
                 * they must match.
                 */
                if (address64->address && address32 &&
-                   (address64->address != (u64) address32)) {
-                       ACPI_ERROR((AE_INFO,
-                                   "32/64X address mismatch in %s: 0x%8.8X/0x%8.8X%8.8X, using 32",
-                                   fadt_info_table[i].name, address32,
-                                   ACPI_FORMAT_UINT64(address64->address)));
+                   (address64->address != (u64)address32)) {
+                       ACPI_BIOS_ERROR((AE_INFO,
+                                        "32/64X address mismatch in FADT/%s: "
+                                        "0x%8.8X/0x%8.8X%8.8X, using 32",
+                                        fadt_info_table[i].name, address32,
+                                        ACPI_FORMAT_UINT64(address64->
+                                                           address)));
                }
 
                /* Always use 32-bit address if it is valid (non-null) */
@@ -456,7 +474,8 @@ static void acpi_tb_convert_fadt(void)
                                                                   &acpi_gbl_FADT,
                                                                   fadt_info_table
                                                                   [i].length),
-                                                    (u64) address32);
+                                                    (u64) address32,
+                                                    fadt_info_table[i].name);
                }
        }
 }
@@ -465,7 +484,7 @@ static void acpi_tb_convert_fadt(void)
  *
  * FUNCTION:    acpi_tb_validate_fadt
  *
- * PARAMETERS:  Table           - Pointer to the FADT to be validated
+ * PARAMETERS:  table           - Pointer to the FADT to be validated
  *
  * RETURN:      None
  *
@@ -494,25 +513,25 @@ static void acpi_tb_validate_fadt(void)
         * DSDT/X_DSDT) would indicate the presence of two FACS or two DSDT tables.
         */
        if (acpi_gbl_FADT.facs &&
-           (acpi_gbl_FADT.Xfacs != (u64) acpi_gbl_FADT.facs)) {
-               ACPI_WARNING((AE_INFO,
-                             "32/64X FACS address mismatch in FADT - "
-                             "0x%8.8X/0x%8.8X%8.8X, using 32",
-                             acpi_gbl_FADT.facs,
-                             ACPI_FORMAT_UINT64(acpi_gbl_FADT.Xfacs)));
-
-               acpi_gbl_FADT.Xfacs = (u64) acpi_gbl_FADT.facs;
+           (acpi_gbl_FADT.Xfacs != (u64)acpi_gbl_FADT.facs)) {
+               ACPI_BIOS_WARNING((AE_INFO,
+                                  "32/64X FACS address mismatch in FADT - "
+                                  "0x%8.8X/0x%8.8X%8.8X, using 32",
+                                  acpi_gbl_FADT.facs,
+                                  ACPI_FORMAT_UINT64(acpi_gbl_FADT.Xfacs)));
+
+               acpi_gbl_FADT.Xfacs = (u64)acpi_gbl_FADT.facs;
        }
 
        if (acpi_gbl_FADT.dsdt &&
-           (acpi_gbl_FADT.Xdsdt != (u64) acpi_gbl_FADT.dsdt)) {
-               ACPI_WARNING((AE_INFO,
-                             "32/64X DSDT address mismatch in FADT - "
-                             "0x%8.8X/0x%8.8X%8.8X, using 32",
-                             acpi_gbl_FADT.dsdt,
-                             ACPI_FORMAT_UINT64(acpi_gbl_FADT.Xdsdt)));
-
-               acpi_gbl_FADT.Xdsdt = (u64) acpi_gbl_FADT.dsdt;
+           (acpi_gbl_FADT.Xdsdt != (u64)acpi_gbl_FADT.dsdt)) {
+               ACPI_BIOS_WARNING((AE_INFO,
+                                  "32/64X DSDT address mismatch in FADT - "
+                                  "0x%8.8X/0x%8.8X%8.8X, using 32",
+                                  acpi_gbl_FADT.dsdt,
+                                  ACPI_FORMAT_UINT64(acpi_gbl_FADT.Xdsdt)));
+
+               acpi_gbl_FADT.Xdsdt = (u64)acpi_gbl_FADT.dsdt;
        }
 
        /* If Hardware Reduced flag is set, we are all done */
@@ -542,10 +561,10 @@ static void acpi_tb_validate_fadt(void)
                 */
                if (address64->address &&
                    (address64->bit_width != ACPI_MUL_8(length))) {
-                       ACPI_WARNING((AE_INFO,
-                                     "32/64X length mismatch in %s: %u/%u",
-                                     name, ACPI_MUL_8(length),
-                                     address64->bit_width));
+                       ACPI_BIOS_WARNING((AE_INFO,
+                                          "32/64X length mismatch in FADT/%s: %u/%u",
+                                          name, ACPI_MUL_8(length),
+                                          address64->bit_width));
                }
 
                if (fadt_info_table[i].type & ACPI_FADT_REQUIRED) {
@@ -554,29 +573,29 @@ static void acpi_tb_validate_fadt(void)
                         * Both the address and length must be non-zero.
                         */
                        if (!address64->address || !length) {
-                               ACPI_ERROR((AE_INFO,
-                                           "Required field %s has zero address and/or length:"
-                                           0x%8.8X%8.8X/0x%X",
-                                           name,
-                                           ACPI_FORMAT_UINT64(address64->
-                                                              address),
-                                           length));
+                               ACPI_BIOS_ERROR((AE_INFO,
+                                                "Required FADT field %s has zero address and/or length: "
+                                                "0x%8.8X%8.8X/0x%X",
+                                                name,
+                                                ACPI_FORMAT_UINT64(address64->
+                                                                   address),
+                                                length));
                        }
                } else if (fadt_info_table[i].type & ACPI_FADT_SEPARATE_LENGTH) {
                        /*
-                        * Field is optional (PM2Control, GPE0, GPE1) AND has its own
+                        * Field is optional (Pm2_control, GPE0, GPE1) AND has its own
                         * length field. If present, both the address and length must
                         * be valid.
                         */
                        if ((address64->address && !length) ||
                            (!address64->address && length)) {
-                               ACPI_WARNING((AE_INFO,
-                                             "Optional field %s has zero address or length: "
-                                             "0x%8.8X%8.8X/0x%X",
-                                             name,
-                                             ACPI_FORMAT_UINT64(address64->
-                                                                address),
-                                             length));
+                               ACPI_BIOS_WARNING((AE_INFO,
+                                                  "Optional FADT field %s has zero address or length: "
+                                                  "0x%8.8X%8.8X/0x%X",
+                                                  name,
+                                                  ACPI_FORMAT_UINT64
+                                                  (address64->address),
+                                                  length));
                        }
                }
        }
@@ -621,12 +640,12 @@ static void acpi_tb_setup_fadt_registers(void)
                            (fadt_info_table[i].default_length > 0) &&
                            (fadt_info_table[i].default_length !=
                             target64->bit_width)) {
-                               ACPI_WARNING((AE_INFO,
-                                             "Invalid length for %s: %u, using default %u",
-                                             fadt_info_table[i].name,
-                                             target64->bit_width,
-                                             fadt_info_table[i].
-                                             default_length));
+                               ACPI_BIOS_WARNING((AE_INFO,
+                                                  "Invalid length for FADT/%s: %u, using default %u",
+                                                  fadt_info_table[i].name,
+                                                  target64->bit_width,
+                                                  fadt_info_table[i].
+                                                  default_length));
 
                                /* Incorrect size, set width to the default */
 
@@ -670,7 +689,8 @@ static void acpi_tb_setup_fadt_registers(void)
                                                     source64->address +
                                                     (fadt_pm_info_table[i].
                                                      register_num *
-                                                     pm1_register_byte_width));
+                                                     pm1_register_byte_width),
+                                                    "PmRegisters");
                }
        }
 }
index 4903e36ea75a3f4f24930c43b497626b930d8f0e..57deae16657789444fcc3a9d8728b29732abfa05 100644 (file)
@@ -52,7 +52,7 @@ ACPI_MODULE_NAME("tbfind")
  *
  * FUNCTION:    acpi_tb_find_table
  *
- * PARAMETERS:  Signature           - String with ACPI table signature
+ * PARAMETERS:  signature           - String with ACPI table signature
  *              oem_id              - String with the table OEM ID
  *              oem_table_id        - String with the OEM Table ID
  *              table_index         - Where the table index is returned
index c03500b4cc7ac999bc757205fd765708745220a9..74f97d74db1c7772bf79bc39eb0daea11ebf3049 100644 (file)
@@ -138,13 +138,14 @@ acpi_tb_add_table(struct acpi_table_desc *table_desc, u32 *table_index)
        if ((table_desc->pointer->signature[0] != 0x00) &&
            (!ACPI_COMPARE_NAME(table_desc->pointer->signature, ACPI_SIG_SSDT))
            && (ACPI_STRNCMP(table_desc->pointer->signature, "OEM", 3))) {
-               ACPI_ERROR((AE_INFO,
-                           "Table has invalid signature [%4.4s] (0x%8.8X), must be SSDT or OEMx",
-                           acpi_ut_valid_acpi_name(*(u32 *)table_desc->
-                                                   pointer->
-                                                   signature) ? table_desc->
-                           pointer->signature : "????",
-                           *(u32 *)table_desc->pointer->signature));
+               ACPI_BIOS_ERROR((AE_INFO,
+                                "Table has invalid signature [%4.4s] (0x%8.8X), "
+                                "must be SSDT or OEMx",
+                                acpi_ut_valid_acpi_name(*(u32 *)table_desc->
+                                                        pointer->
+                                                        signature) ?
+                                table_desc->pointer->signature : "????",
+                                *(u32 *)table_desc->pointer->signature));
 
                return_ACPI_STATUS(AE_BAD_SIGNATURE);
        }
@@ -396,10 +397,10 @@ acpi_status acpi_tb_resize_root_table_list(void)
  *
  * FUNCTION:    acpi_tb_store_table
  *
- * PARAMETERS:  Address             - Table address
- *              Table               - Table header
- *              Length              - Table length
- *              Flags               - flags
+ * PARAMETERS:  address             - Table address
+ *              table               - Table header
+ *              length              - Table length
+ *              flags               - flags
  *
  * RETURN:      Status and table index.
  *
index 0a706cac37de0736c6c758223ba242a16412c6c8..b6cea30da63879a970944a54642f2f404b3c918a 100644 (file)
@@ -178,8 +178,8 @@ u8 acpi_tb_tables_loaded(void)
  *
  * FUNCTION:    acpi_tb_fix_string
  *
- * PARAMETERS:  String              - String to be repaired
- *              Length              - Maximum length
+ * PARAMETERS:  string              - String to be repaired
+ *              length              - Maximum length
  *
  * RETURN:      None
  *
@@ -205,7 +205,7 @@ static void acpi_tb_fix_string(char *string, acpi_size length)
  * FUNCTION:    acpi_tb_cleanup_table_header
  *
  * PARAMETERS:  out_header          - Where the cleaned header is returned
- *              Header              - Input ACPI table header
+ *              header              - Input ACPI table header
  *
  * RETURN:      Returns the cleaned header in out_header
  *
@@ -231,8 +231,8 @@ acpi_tb_cleanup_table_header(struct acpi_table_header *out_header,
  *
  * FUNCTION:    acpi_tb_print_table_header
  *
- * PARAMETERS:  Address             - Table physical address
- *              Header              - Table header
+ * PARAMETERS:  address             - Table physical address
+ *              header              - Table header
  *
  * RETURN:      None
  *
@@ -296,8 +296,8 @@ acpi_tb_print_table_header(acpi_physical_address address,
  *
  * FUNCTION:    acpi_tb_validate_checksum
  *
- * PARAMETERS:  Table               - ACPI table to verify
- *              Length              - Length of entire table
+ * PARAMETERS:  table               - ACPI table to verify
+ *              length              - Length of entire table
  *
  * RETURN:      Status
  *
@@ -317,10 +317,11 @@ acpi_status acpi_tb_verify_checksum(struct acpi_table_header *table, u32 length)
        /* Checksum ok? (should be zero) */
 
        if (checksum) {
-               ACPI_WARNING((AE_INFO,
-                             "Incorrect checksum in table [%4.4s] - 0x%2.2X, should be 0x%2.2X",
-                             table->signature, table->checksum,
-                             (u8) (table->checksum - checksum)));
+               ACPI_BIOS_WARNING((AE_INFO,
+                                  "Incorrect checksum in table [%4.4s] - 0x%2.2X, "
+                                  "should be 0x%2.2X",
+                                  table->signature, table->checksum,
+                                  (u8)(table->checksum - checksum)));
 
 #if (ACPI_CHECKSUM_ABORT)
 
@@ -335,8 +336,8 @@ acpi_status acpi_tb_verify_checksum(struct acpi_table_header *table, u32 length)
  *
  * FUNCTION:    acpi_tb_checksum
  *
- * PARAMETERS:  Buffer          - Pointer to memory region to be checked
- *              Length          - Length of this memory region
+ * PARAMETERS:  buffer          - Pointer to memory region to be checked
+ *              length          - Length of this memory region
  *
  * RETURN:      Checksum (u8)
  *
@@ -377,8 +378,9 @@ void acpi_tb_check_dsdt_header(void)
 
        if (acpi_gbl_original_dsdt_header.length != acpi_gbl_DSDT->length ||
            acpi_gbl_original_dsdt_header.checksum != acpi_gbl_DSDT->checksum) {
-               ACPI_ERROR((AE_INFO,
-                           "The DSDT has been corrupted or replaced - old, new headers below"));
+               ACPI_BIOS_ERROR((AE_INFO,
+                                "The DSDT has been corrupted or replaced - "
+                                "old, new headers below"));
                acpi_tb_print_table_header(0, &acpi_gbl_original_dsdt_header);
                acpi_tb_print_table_header(0, acpi_gbl_DSDT);
 
@@ -438,8 +440,8 @@ struct acpi_table_header *acpi_tb_copy_dsdt(u32 table_index)
  *
  * FUNCTION:    acpi_tb_install_table
  *
- * PARAMETERS:  Address                 - Physical address of DSDT or FACS
- *              Signature               - Table signature, NULL if no need to
+ * PARAMETERS:  address                 - Physical address of DSDT or FACS
+ *              signature               - Table signature, NULL if no need to
  *                                        match
  *              table_index             - Index into root table array
  *
@@ -480,9 +482,10 @@ acpi_tb_install_table(acpi_physical_address address,
        /* If a particular signature is expected (DSDT/FACS), it must match */
 
        if (signature && !ACPI_COMPARE_NAME(table->signature, signature)) {
-               ACPI_ERROR((AE_INFO,
-                           "Invalid signature 0x%X for ACPI table, expected [%s]",
-                           *ACPI_CAST_PTR(u32, table->signature), signature));
+               ACPI_BIOS_ERROR((AE_INFO,
+                                "Invalid signature 0x%X for ACPI table, expected [%s]",
+                                *ACPI_CAST_PTR(u32, table->signature),
+                                signature));
                goto unmap_and_exit;
        }
 
@@ -589,10 +592,10 @@ acpi_tb_get_root_table_entry(u8 *table_entry, u32 table_entry_size)
 
                        /* Will truncate 64-bit address to 32 bits, issue warning */
 
-                       ACPI_WARNING((AE_INFO,
-                                     "64-bit Physical Address in XSDT is too large (0x%8.8X%8.8X),"
-                                     " truncating",
-                                     ACPI_FORMAT_UINT64(address64)));
+                       ACPI_BIOS_WARNING((AE_INFO,
+                                          "64-bit Physical Address in XSDT is too large (0x%8.8X%8.8X),"
+                                          " truncating",
+                                          ACPI_FORMAT_UINT64(address64)));
                }
 #endif
                return ((acpi_physical_address) (address64));
@@ -603,7 +606,7 @@ acpi_tb_get_root_table_entry(u8 *table_entry, u32 table_entry_size)
  *
  * FUNCTION:    acpi_tb_parse_root_table
  *
- * PARAMETERS:  Rsdp                    - Pointer to the RSDP
+ * PARAMETERS:  rsdp                    - Pointer to the RSDP
  *
  * RETURN:      Status
  *
@@ -694,8 +697,9 @@ acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
        acpi_os_unmap_memory(table, sizeof(struct acpi_table_header));
 
        if (length < sizeof(struct acpi_table_header)) {
-               ACPI_ERROR((AE_INFO, "Invalid length 0x%X in RSDT/XSDT",
-                           length));
+               ACPI_BIOS_ERROR((AE_INFO,
+                                "Invalid table length 0x%X in RSDT/XSDT",
+                                length));
                return_ACPI_STATUS(AE_INVALID_TABLE_LENGTH);
        }
 
index abcc6412c24492129b8bb003963a7f7e826f80fc..ea4c6d52605a2af79f141efab1f2615c87b0cab1 100644 (file)
@@ -1,7 +1,6 @@
 /******************************************************************************
  *
- * Module Name: tbxface - Public interfaces to the ACPI subsystem
- *                         ACPI table oriented interfaces
+ * Module Name: tbxface - ACPI table oriented external interfaces
  *
  *****************************************************************************/
 
 #define _COMPONENT          ACPI_TABLES
 ACPI_MODULE_NAME("tbxface")
 
-/* Local prototypes */
-static acpi_status acpi_tb_load_namespace(void);
-
-static int no_auto_ssdt;
-
 /*******************************************************************************
  *
  * FUNCTION:    acpi_allocate_root_table
@@ -65,11 +59,10 @@ static int no_auto_ssdt;
  *
  * RETURN:      Status
  *
- * DESCRIPTION: Allocate a root table array. Used by i_aSL compiler and
+ * DESCRIPTION: Allocate a root table array. Used by iASL compiler and
  *              acpi_initialize_tables.
  *
  ******************************************************************************/
-
 acpi_status acpi_allocate_root_table(u32 initial_table_count)
 {
 
@@ -220,54 +213,12 @@ acpi_status acpi_reallocate_root_table(void)
        return_ACPI_STATUS(AE_OK);
 }
 
-/*******************************************************************************
- *
- * FUNCTION:    acpi_load_table
- *
- * PARAMETERS:  table_ptr       - pointer to a buffer containing the entire
- *                                table to be loaded
- *
- * RETURN:      Status
- *
- * DESCRIPTION: This function is called to load a table from the caller's
- *              buffer. The buffer must contain an entire ACPI Table including
- *              a valid header. The header fields will be verified, and if it
- *              is determined that the table is invalid, the call will fail.
- *
- ******************************************************************************/
-acpi_status acpi_load_table(struct acpi_table_header *table_ptr)
-{
-       acpi_status status;
-       u32 table_index;
-       struct acpi_table_desc table_desc;
-
-       if (!table_ptr)
-               return AE_BAD_PARAMETER;
-
-       ACPI_MEMSET(&table_desc, 0, sizeof(struct acpi_table_desc));
-       table_desc.pointer = table_ptr;
-       table_desc.length = table_ptr->length;
-       table_desc.flags = ACPI_TABLE_ORIGIN_UNKNOWN;
-
-       /*
-        * Install the new table into the local data structures
-        */
-       status = acpi_tb_add_table(&table_desc, &table_index);
-       if (ACPI_FAILURE(status)) {
-               return status;
-       }
-       status = acpi_ns_load_table(table_index, acpi_gbl_root_node);
-       return status;
-}
-
-ACPI_EXPORT_SYMBOL(acpi_load_table)
-
 /*******************************************************************************
  *
  * FUNCTION:    acpi_get_table_header
  *
- * PARAMETERS:  Signature           - ACPI signature of needed table
- *              Instance            - Which instance (for SSDTs)
+ * PARAMETERS:  signature           - ACPI signature of needed table
+ *              instance            - Which instance (for SSDTs)
  *              out_table_header    - The pointer to the table header to fill
  *
  * RETURN:      Status and pointer to mapped table header
@@ -382,8 +333,8 @@ ACPI_EXPORT_SYMBOL(acpi_unload_table_id)
  *
  * FUNCTION:    acpi_get_table_with_size
  *
- * PARAMETERS:  Signature           - ACPI signature of needed table
- *              Instance            - Which instance (for SSDTs)
+ * PARAMETERS:  signature           - ACPI signature of needed table
+ *              instance            - Which instance (for SSDTs)
  *              out_table           - Where the pointer to the table is returned
  *
  * RETURN:      Status and pointer to table
@@ -453,7 +404,7 @@ ACPI_EXPORT_SYMBOL(acpi_get_table)
  * FUNCTION:    acpi_get_table_by_index
  *
  * PARAMETERS:  table_index         - Table index
- *              Table               - Where the pointer to the table is returned
+ *              table               - Where the pointer to the table is returned
  *
  * RETURN:      Status and pointer to the table
  *
@@ -502,157 +453,13 @@ acpi_get_table_by_index(u32 table_index, struct acpi_table_header **table)
 
 ACPI_EXPORT_SYMBOL(acpi_get_table_by_index)
 
-/*******************************************************************************
- *
- * FUNCTION:    acpi_tb_load_namespace
- *
- * PARAMETERS:  None
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Load the namespace from the DSDT and all SSDTs/PSDTs found in
- *              the RSDT/XSDT.
- *
- ******************************************************************************/
-static acpi_status acpi_tb_load_namespace(void)
-{
-       acpi_status status;
-       u32 i;
-       struct acpi_table_header *new_dsdt;
-
-       ACPI_FUNCTION_TRACE(tb_load_namespace);
-
-       (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
-
-       /*
-        * Load the namespace. The DSDT is required, but any SSDT and
-        * PSDT tables are optional. Verify the DSDT.
-        */
-       if (!acpi_gbl_root_table_list.current_table_count ||
-           !ACPI_COMPARE_NAME(&
-                              (acpi_gbl_root_table_list.
-                               tables[ACPI_TABLE_INDEX_DSDT].signature),
-                              ACPI_SIG_DSDT)
-           ||
-           ACPI_FAILURE(acpi_tb_verify_table
-                        (&acpi_gbl_root_table_list.
-                         tables[ACPI_TABLE_INDEX_DSDT]))) {
-               status = AE_NO_ACPI_TABLES;
-               goto unlock_and_exit;
-       }
-
-       /*
-        * Save the DSDT pointer for simple access. This is the mapped memory
-        * address. We must take care here because the address of the .Tables
-        * array can change dynamically as tables are loaded at run-time. Note:
-        * .Pointer field is not validated until after call to acpi_tb_verify_table.
-        */
-       acpi_gbl_DSDT =
-           acpi_gbl_root_table_list.tables[ACPI_TABLE_INDEX_DSDT].pointer;
-
-       /*
-        * Optionally copy the entire DSDT to local memory (instead of simply
-        * mapping it.) There are some BIOSs that corrupt or replace the original
-        * DSDT, creating the need for this option. Default is FALSE, do not copy
-        * the DSDT.
-        */
-       if (acpi_gbl_copy_dsdt_locally) {
-               new_dsdt = acpi_tb_copy_dsdt(ACPI_TABLE_INDEX_DSDT);
-               if (new_dsdt) {
-                       acpi_gbl_DSDT = new_dsdt;
-               }
-       }
-
-       /*
-        * Save the original DSDT header for detection of table corruption
-        * and/or replacement of the DSDT from outside the OS.
-        */
-       ACPI_MEMCPY(&acpi_gbl_original_dsdt_header, acpi_gbl_DSDT,
-                   sizeof(struct acpi_table_header));
-
-       (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
-
-       /* Load and parse tables */
-
-       status = acpi_ns_load_table(ACPI_TABLE_INDEX_DSDT, acpi_gbl_root_node);
-       if (ACPI_FAILURE(status)) {
-               return_ACPI_STATUS(status);
-       }
-
-       /* Load any SSDT or PSDT tables. Note: Loop leaves tables locked */
-
-       (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
-       for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) {
-               if ((!ACPI_COMPARE_NAME
-                    (&(acpi_gbl_root_table_list.tables[i].signature),
-                     ACPI_SIG_SSDT)
-                    &&
-                    !ACPI_COMPARE_NAME(&
-                                       (acpi_gbl_root_table_list.tables[i].
-                                        signature), ACPI_SIG_PSDT))
-                   ||
-                   ACPI_FAILURE(acpi_tb_verify_table
-                                (&acpi_gbl_root_table_list.tables[i]))) {
-                       continue;
-               }
-
-               if (no_auto_ssdt) {
-                       printk(KERN_WARNING "ACPI: SSDT ignored due to \"acpi_no_auto_ssdt\"\n");
-                       continue;
-               }
-
-               /* Ignore errors while loading tables, get as many as possible */
-
-               (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
-               (void)acpi_ns_load_table(i, acpi_gbl_root_node);
-               (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
-       }
-
-       ACPI_DEBUG_PRINT((ACPI_DB_INIT, "ACPI Tables successfully acquired\n"));
-
-      unlock_and_exit:
-       (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
-       return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_load_tables
- *
- * PARAMETERS:  None
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Load the ACPI tables from the RSDT/XSDT
- *
- ******************************************************************************/
-
-acpi_status acpi_load_tables(void)
-{
-       acpi_status status;
-
-       ACPI_FUNCTION_TRACE(acpi_load_tables);
-
-       /* Load the namespace from the tables */
-
-       status = acpi_tb_load_namespace();
-       if (ACPI_FAILURE(status)) {
-               ACPI_EXCEPTION((AE_INFO, status,
-                               "While loading namespace from ACPI tables"));
-       }
-
-       return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_load_tables)
-
 
 /*******************************************************************************
  *
  * FUNCTION:    acpi_install_table_handler
  *
- * PARAMETERS:  Handler         - Table event handler
- *              Context         - Value passed to the handler on each event
+ * PARAMETERS:  handler         - Table event handler
+ *              context         - Value passed to the handler on each event
  *
  * RETURN:      Status
  *
@@ -698,7 +505,7 @@ ACPI_EXPORT_SYMBOL(acpi_install_table_handler)
  *
  * FUNCTION:    acpi_remove_table_handler
  *
- * PARAMETERS:  Handler         - Table event handler that was installed
+ * PARAMETERS:  handler         - Table event handler that was installed
  *                                previously.
  *
  * RETURN:      Status
@@ -734,15 +541,3 @@ acpi_status acpi_remove_table_handler(acpi_tbl_handler handler)
 }
 
 ACPI_EXPORT_SYMBOL(acpi_remove_table_handler)
-
-
-static int __init acpi_no_auto_ssdt_setup(char *s) {
-
-        printk(KERN_NOTICE "ACPI: SSDT auto-load disabled\n");
-
-        no_auto_ssdt = 1;
-
-        return 1;
-}
-
-__setup("acpi_no_auto_ssdt", acpi_no_auto_ssdt_setup);
diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c
new file mode 100644 (file)
index 0000000..f87cc63
--- /dev/null
@@ -0,0 +1,389 @@
+/******************************************************************************
+ *
+ * Module Name: tbxfload - Table load/unload external interfaces
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2012, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <linux/export.h>
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acnamesp.h"
+#include "actables.h"
+
+#define _COMPONENT          ACPI_TABLES
+ACPI_MODULE_NAME("tbxfload")
+
+/* Local prototypes */
+static acpi_status acpi_tb_load_namespace(void);
+
+static int no_auto_ssdt;
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_load_tables
+ *
+ * PARAMETERS:  None
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Load the ACPI tables from the RSDT/XSDT
+ *
+ ******************************************************************************/
+
+acpi_status acpi_load_tables(void)
+{
+       acpi_status status;
+
+       ACPI_FUNCTION_TRACE(acpi_load_tables);
+
+       /* Load the namespace from the tables */
+
+       status = acpi_tb_load_namespace();
+       if (ACPI_FAILURE(status)) {
+               ACPI_EXCEPTION((AE_INFO, status,
+                               "While loading namespace from ACPI tables"));
+       }
+
+       return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_load_tables)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_tb_load_namespace
+ *
+ * PARAMETERS:  None
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Load the namespace from the DSDT and all SSDTs/PSDTs found in
+ *              the RSDT/XSDT.
+ *
+ ******************************************************************************/
+static acpi_status acpi_tb_load_namespace(void)
+{
+       acpi_status status;
+       u32 i;
+       struct acpi_table_header *new_dsdt;
+
+       ACPI_FUNCTION_TRACE(tb_load_namespace);
+
+       (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
+
+       /*
+        * Load the namespace. The DSDT is required, but any SSDT and
+        * PSDT tables are optional. Verify the DSDT.
+        */
+       if (!acpi_gbl_root_table_list.current_table_count ||
+           !ACPI_COMPARE_NAME(&
+                              (acpi_gbl_root_table_list.
+                               tables[ACPI_TABLE_INDEX_DSDT].signature),
+                              ACPI_SIG_DSDT)
+           ||
+           ACPI_FAILURE(acpi_tb_verify_table
+                        (&acpi_gbl_root_table_list.
+                         tables[ACPI_TABLE_INDEX_DSDT]))) {
+               status = AE_NO_ACPI_TABLES;
+               goto unlock_and_exit;
+       }
+
+       /*
+        * Save the DSDT pointer for simple access. This is the mapped memory
+        * address. We must take care here because the address of the .Tables
+        * array can change dynamically as tables are loaded at run-time. Note:
+        * .Pointer field is not validated until after call to acpi_tb_verify_table.
+        */
+       acpi_gbl_DSDT =
+           acpi_gbl_root_table_list.tables[ACPI_TABLE_INDEX_DSDT].pointer;
+
+       /*
+        * Optionally copy the entire DSDT to local memory (instead of simply
+        * mapping it.) There are some BIOSs that corrupt or replace the original
+        * DSDT, creating the need for this option. Default is FALSE, do not copy
+        * the DSDT.
+        */
+       if (acpi_gbl_copy_dsdt_locally) {
+               new_dsdt = acpi_tb_copy_dsdt(ACPI_TABLE_INDEX_DSDT);
+               if (new_dsdt) {
+                       acpi_gbl_DSDT = new_dsdt;
+               }
+       }
+
+       /*
+        * Save the original DSDT header for detection of table corruption
+        * and/or replacement of the DSDT from outside the OS.
+        */
+       ACPI_MEMCPY(&acpi_gbl_original_dsdt_header, acpi_gbl_DSDT,
+                   sizeof(struct acpi_table_header));
+
+       (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
+
+       /* Load and parse tables */
+
+       status = acpi_ns_load_table(ACPI_TABLE_INDEX_DSDT, acpi_gbl_root_node);
+       if (ACPI_FAILURE(status)) {
+               return_ACPI_STATUS(status);
+       }
+
+       /* Load any SSDT or PSDT tables. Note: Loop leaves tables locked */
+
+       (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
+       for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) {
+               if ((!ACPI_COMPARE_NAME
+                    (&(acpi_gbl_root_table_list.tables[i].signature),
+                     ACPI_SIG_SSDT)
+                    &&
+                    !ACPI_COMPARE_NAME(&
+                                       (acpi_gbl_root_table_list.tables[i].
+                                        signature), ACPI_SIG_PSDT))
+                   ||
+                   ACPI_FAILURE(acpi_tb_verify_table
+                                (&acpi_gbl_root_table_list.tables[i]))) {
+                       continue;
+               }
+
+               if (no_auto_ssdt) {
+                       printk(KERN_WARNING "ACPI: SSDT ignored due to \"acpi_no_auto_ssdt\"\n");
+                       continue;
+               }
+
+               /* Ignore errors while loading tables, get as many as possible */
+
+               (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
+               (void)acpi_ns_load_table(i, acpi_gbl_root_node);
+               (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
+       }
+
+       ACPI_DEBUG_PRINT((ACPI_DB_INIT, "ACPI Tables successfully acquired\n"));
+
+      unlock_and_exit:
+       (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
+       return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_load_table
+ *
+ * PARAMETERS:  table               - Pointer to a buffer containing the ACPI
+ *                                    table to be loaded.
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Dynamically load an ACPI table from the caller's buffer. Must
+ *              be a valid ACPI table with a valid ACPI table header.
+ *              Note1: Mainly intended to support hotplug addition of SSDTs.
+ *              Note2: Does not copy the incoming table. User is reponsible
+ *              to ensure that the table is not deleted or unmapped.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_load_table(struct acpi_table_header *table)
+{
+       acpi_status status;
+       struct acpi_table_desc table_desc;
+       u32 table_index;
+
+       ACPI_FUNCTION_TRACE(acpi_load_table);
+
+       /* Parameter validation */
+
+       if (!table) {
+               return_ACPI_STATUS(AE_BAD_PARAMETER);
+       }
+
+       /* Init local table descriptor */
+
+       ACPI_MEMSET(&table_desc, 0, sizeof(struct acpi_table_desc));
+       table_desc.address = ACPI_PTR_TO_PHYSADDR(table);
+       table_desc.pointer = table;
+       table_desc.length = table->length;
+       table_desc.flags = ACPI_TABLE_ORIGIN_UNKNOWN;
+
+       /* Must acquire the interpreter lock during this operation */
+
+       status = acpi_ut_acquire_mutex(ACPI_MTX_INTERPRETER);
+       if (ACPI_FAILURE(status)) {
+               return_ACPI_STATUS(status);
+       }
+
+       /* Install the table and load it into the namespace */
+
+       ACPI_INFO((AE_INFO, "Host-directed Dynamic ACPI Table Load:"));
+       status = acpi_tb_add_table(&table_desc, &table_index);
+       if (ACPI_FAILURE(status)) {
+               goto unlock_and_exit;
+       }
+
+       status = acpi_ns_load_table(table_index, acpi_gbl_root_node);
+
+       /* Invoke table handler if present */
+
+       if (acpi_gbl_table_handler) {
+               (void)acpi_gbl_table_handler(ACPI_TABLE_EVENT_LOAD, table,
+                                            acpi_gbl_table_handler_context);
+       }
+
+      unlock_and_exit:
+       (void)acpi_ut_release_mutex(ACPI_MTX_INTERPRETER);
+       return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_load_table)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_unload_parent_table
+ *
+ * PARAMETERS:  object              - Handle to any namespace object owned by
+ *                                    the table to be unloaded
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Via any namespace object within an SSDT or OEMx table, unloads
+ *              the table and deletes all namespace objects associated with
+ *              that table. Unloading of the DSDT is not allowed.
+ *              Note: Mainly intended to support hotplug removal of SSDTs.
+ *
+ ******************************************************************************/
+acpi_status acpi_unload_parent_table(acpi_handle object)
+{
+       struct acpi_namespace_node *node =
+           ACPI_CAST_PTR(struct acpi_namespace_node, object);
+       acpi_status status = AE_NOT_EXIST;
+       acpi_owner_id owner_id;
+       u32 i;
+
+       ACPI_FUNCTION_TRACE(acpi_unload_parent_table);
+
+       /* Parameter validation */
+
+       if (!object) {
+               return_ACPI_STATUS(AE_BAD_PARAMETER);
+       }
+
+       /*
+        * The node owner_id is currently the same as the parent table ID.
+        * However, this could change in the future.
+        */
+       owner_id = node->owner_id;
+       if (!owner_id) {
+
+               /* owner_id==0 means DSDT is the owner. DSDT cannot be unloaded */
+
+               return_ACPI_STATUS(AE_TYPE);
+       }
+
+       /* Must acquire the interpreter lock during this operation */
+
+       status = acpi_ut_acquire_mutex(ACPI_MTX_INTERPRETER);
+       if (ACPI_FAILURE(status)) {
+               return_ACPI_STATUS(status);
+       }
+
+       /* Find the table in the global table list */
+
+       for (i = 0; i < acpi_gbl_root_table_list.current_table_count; i++) {
+               if (owner_id != acpi_gbl_root_table_list.tables[i].owner_id) {
+                       continue;
+               }
+
+               /*
+                * Allow unload of SSDT and OEMx tables only. Do not allow unload
+                * of the DSDT. No other types of tables should get here, since
+                * only these types can contain AML and thus are the only types
+                * that can create namespace objects.
+                */
+               if (ACPI_COMPARE_NAME
+                   (acpi_gbl_root_table_list.tables[i].signature.ascii,
+                    ACPI_SIG_DSDT)) {
+                       status = AE_TYPE;
+                       break;
+               }
+
+               /* Ensure the table is actually loaded */
+
+               if (!acpi_tb_is_table_loaded(i)) {
+                       status = AE_NOT_EXIST;
+                       break;
+               }
+
+               /* Invoke table handler if present */
+
+               if (acpi_gbl_table_handler) {
+                       (void)acpi_gbl_table_handler(ACPI_TABLE_EVENT_UNLOAD,
+                                                    acpi_gbl_root_table_list.
+                                                    tables[i].pointer,
+                                                    acpi_gbl_table_handler_context);
+               }
+
+               /*
+                * Delete all namespace objects owned by this table. Note that
+                * these objects can appear anywhere in the namespace by virtue
+                * of the AML "Scope" operator. Thus, we need to track ownership
+                * by an ID, not simply a position within the hierarchy.
+                */
+               status = acpi_tb_delete_namespace_by_owner(i);
+               if (ACPI_FAILURE(status)) {
+                       break;
+               }
+
+               status = acpi_tb_release_owner_id(i);
+               acpi_tb_set_table_loaded_flag(i, FALSE);
+               break;
+       }
+
+       (void)acpi_ut_release_mutex(ACPI_MTX_INTERPRETER);
+       return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_unload_parent_table)
+
+static int __init acpi_no_auto_ssdt_setup(char *s) {
+
+        printk(KERN_NOTICE "ACPI: SSDT auto-load disabled\n");
+
+        no_auto_ssdt = 1;
+
+        return 1;
+}
+
+__setup("acpi_no_auto_ssdt", acpi_no_auto_ssdt_setup);
index 4258f647ca3d63cb20cf9ce2d0b01701f6d00e11..74e720800037e2499cb734dcd344bd50c2496626 100644 (file)
@@ -57,7 +57,7 @@ static acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp);
  *
  * FUNCTION:    acpi_tb_validate_rsdp
  *
- * PARAMETERS:  Rsdp                - Pointer to unvalidated RSDP
+ * PARAMETERS:  rsdp                - Pointer to unvalidated RSDP
  *
  * RETURN:      Status
  *
@@ -107,10 +107,10 @@ static acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp)
  *
  * RETURN:      Status, RSDP physical address
  *
- * DESCRIPTION: Search lower 1_mbyte of memory for the root system descriptor
+ * DESCRIPTION: Search lower 1Mbyte of memory for the root system descriptor
  *              pointer structure.  If it is found, set *RSDP to point to it.
  *
- * NOTE1:       The RSDP must be either in the first 1_k of the Extended
+ * NOTE1:       The RSDP must be either in the first 1K of the Extended
  *              BIOS Data Area or between E0000 and FFFFF (From ACPI Spec.)
  *              Only a 32-bit physical address is necessary.
  *
@@ -152,7 +152,7 @@ acpi_status acpi_find_root_pointer(acpi_size *table_address)
        if (physical_address > 0x400) {
                /*
                 * 1b) Search EBDA paragraphs (EBDA is required to be a
-                *     minimum of 1_k length)
+                *     minimum of 1K length)
                 */
                table_ptr = acpi_os_map_memory((acpi_physical_address)
                                               physical_address,
@@ -216,7 +216,7 @@ acpi_status acpi_find_root_pointer(acpi_size *table_address)
 
        /* A valid RSDP was not found */
 
-       ACPI_ERROR((AE_INFO, "A valid RSDP was not found"));
+       ACPI_BIOS_ERROR((AE_INFO, "A valid RSDP was not found"));
        return_ACPI_STATUS(AE_NOT_FOUND);
 }
 
@@ -225,7 +225,7 @@ acpi_status acpi_find_root_pointer(acpi_size *table_address)
  * FUNCTION:    acpi_tb_scan_memory_for_rsdp
  *
  * PARAMETERS:  start_address       - Starting pointer for search
- *              Length              - Maximum length to search
+ *              length              - Maximum length to search
  *
  * RETURN:      Pointer to the RSDP if found, otherwise NULL.
  *
index 67932aebe6dd36bc6c54458b2134ecb570dc82aa..64880306133d9156b5862812b12ff018ef48a59d 100644 (file)
@@ -53,8 +53,8 @@ ACPI_MODULE_NAME("utaddress")
  * FUNCTION:    acpi_ut_add_address_range
  *
  * PARAMETERS:  space_id            - Address space ID
- *              Address             - op_region start address
- *              Length              - op_region length
+ *              address             - op_region start address
+ *              length              - op_region length
  *              region_node         - op_region namespace node
  *
  * RETURN:      Status
@@ -186,9 +186,9 @@ acpi_ut_remove_address_range(acpi_adr_space_type space_id,
  * FUNCTION:    acpi_ut_check_address_range
  *
  * PARAMETERS:  space_id            - Address space ID
- *              Address             - Start address
- *              Length              - Length of address range
- *              Warn                - TRUE if warning on overlap desired
+ *              address             - Start address
+ *              length              - Length of address range
+ *              warn                - TRUE if warning on overlap desired
  *
  * RETURN:      Count of the number of conflicts detected. Zero is always
  *              returned for Space IDs other than Memory or I/O.
index 9982d2ea66fbbc382a4b6daaf64d1bdcf647b678..ed29d474095e9b2b64565f4c7934b848a3ed7a8f 100644 (file)
@@ -189,7 +189,7 @@ acpi_status acpi_ut_delete_caches(void)
  *
  * FUNCTION:    acpi_ut_validate_buffer
  *
- * PARAMETERS:  Buffer              - Buffer descriptor to be validated
+ * PARAMETERS:  buffer              - Buffer descriptor to be validated
  *
  * RETURN:      Status
  *
@@ -227,7 +227,7 @@ acpi_status acpi_ut_validate_buffer(struct acpi_buffer * buffer)
  *
  * FUNCTION:    acpi_ut_initialize_buffer
  *
- * PARAMETERS:  Buffer              - Buffer to be validated
+ * PARAMETERS:  buffer              - Buffer to be validated
  *              required_length     - Length needed
  *
  * RETURN:      Status
@@ -308,10 +308,10 @@ acpi_ut_initialize_buffer(struct acpi_buffer * buffer,
  *
  * FUNCTION:    acpi_ut_allocate
  *
- * PARAMETERS:  Size                - Size of the allocation
- *              Component           - Component type of caller
- *              Module              - Source file name of caller
- *              Line                - Line number of caller
+ * PARAMETERS:  size                - Size of the allocation
+ *              component           - Component type of caller
+ *              module              - Source file name of caller
+ *              line                - Line number of caller
  *
  * RETURN:      Address of the allocated memory on success, NULL on failure.
  *
@@ -352,10 +352,10 @@ void *acpi_ut_allocate(acpi_size size,
  *
  * FUNCTION:    acpi_ut_allocate_zeroed
  *
- * PARAMETERS:  Size                - Size of the allocation
- *              Component           - Component type of caller
- *              Module              - Source file name of caller
- *              Line                - Line number of caller
+ * PARAMETERS:  size                - Size of the allocation
+ *              component           - Component type of caller
+ *              module              - Source file name of caller
+ *              line                - Line number of caller
  *
  * RETURN:      Address of the allocated memory on success, NULL on failure.
  *
index 3317c0a406ee539381e5f80f93983c6653f8697f..294692ae76e918e25ab56e43b36b86d26950ea00 100644 (file)
@@ -317,7 +317,7 @@ acpi_ut_copy_ielement_to_eelement(u8 object_type,
  * FUNCTION:    acpi_ut_copy_ipackage_to_epackage
  *
  * PARAMETERS:  internal_object     - Pointer to the object we are returning
- *              Buffer              - Where the object is returned
+ *              buffer              - Where the object is returned
  *              space_used          - Where the object length is returned
  *
  * RETURN:      Status
index a0998a886318dba8f286f93e393fc2994848d2b5..e810894149ae349a6e87f0372b86f16689ac4e1d 100644 (file)
@@ -145,7 +145,7 @@ static const char *acpi_ut_trim_function_name(const char *function_name)
  *              function_name       - Caller's procedure name
  *              module_name         - Caller's module name
  *              component_id        - Caller's component ID
- *              Format              - Printf format field
+ *              format              - Printf format field
  *              ...                 - Optional printf arguments
  *
  * RETURN:      None
@@ -217,7 +217,7 @@ ACPI_EXPORT_SYMBOL(acpi_debug_print)
  *              function_name       - Caller's procedure name
  *              module_name         - Caller's module name
  *              component_id        - Caller's component ID
- *              Format              - Printf format field
+ *              format              - Printf format field
  *              ...                 - Optional printf arguments
  *
  * RETURN:      None
@@ -286,7 +286,7 @@ ACPI_EXPORT_SYMBOL(acpi_ut_trace)
  *              function_name       - Caller's procedure name
  *              module_name         - Caller's module name
  *              component_id        - Caller's component ID
- *              Pointer             - Pointer to display
+ *              pointer             - Pointer to display
  *
  * RETURN:      None
  *
@@ -315,7 +315,7 @@ acpi_ut_trace_ptr(u32 line_number,
  *              function_name       - Caller's procedure name
  *              module_name         - Caller's module name
  *              component_id        - Caller's component ID
- *              String              - Additional string to display
+ *              string              - Additional string to display
  *
  * RETURN:      None
  *
@@ -346,7 +346,7 @@ acpi_ut_trace_str(u32 line_number,
  *              function_name       - Caller's procedure name
  *              module_name         - Caller's module name
  *              component_id        - Caller's component ID
- *              Integer             - Integer to display
+ *              integer             - Integer to display
  *
  * RETURN:      None
  *
@@ -408,7 +408,7 @@ ACPI_EXPORT_SYMBOL(acpi_ut_exit)
  *              function_name       - Caller's procedure name
  *              module_name         - Caller's module name
  *              component_id        - Caller's component ID
- *              Status              - Exit status code
+ *              status              - Exit status code
  *
  * RETURN:      None
  *
@@ -449,7 +449,7 @@ ACPI_EXPORT_SYMBOL(acpi_ut_status_exit)
  *              function_name       - Caller's procedure name
  *              module_name         - Caller's module name
  *              component_id        - Caller's component ID
- *              Value               - Value to be printed with exit msg
+ *              value               - Value to be printed with exit msg
  *
  * RETURN:      None
  *
@@ -481,7 +481,7 @@ ACPI_EXPORT_SYMBOL(acpi_ut_value_exit)
  *              function_name       - Caller's procedure name
  *              module_name         - Caller's module name
  *              component_id        - Caller's component ID
- *              Ptr                 - Pointer to display
+ *              ptr                 - Pointer to display
  *
  * RETURN:      None
  *
@@ -508,10 +508,10 @@ acpi_ut_ptr_exit(u32 line_number,
  *
  * FUNCTION:    acpi_ut_dump_buffer
  *
- * PARAMETERS:  Buffer              - Buffer to dump
- *              Count               - Amount to dump, in bytes
- *              Display             - BYTE, WORD, DWORD, or QWORD display
- *              component_iD        - Caller's component ID
+ * PARAMETERS:  buffer              - Buffer to dump
+ *              count               - Amount to dump, in bytes
+ *              display             - BYTE, WORD, DWORD, or QWORD display
+ *              component_ID        - Caller's component ID
  *
  * RETURN:      None
  *
@@ -625,10 +625,10 @@ void acpi_ut_dump_buffer2(u8 * buffer, u32 count, u32 display)
  *
  * FUNCTION:    acpi_ut_dump_buffer
  *
- * PARAMETERS:  Buffer              - Buffer to dump
- *              Count               - Amount to dump, in bytes
- *              Display             - BYTE, WORD, DWORD, or QWORD display
- *              component_iD        - Caller's component ID
+ * PARAMETERS:  buffer              - Buffer to dump
+ *              count               - Amount to dump, in bytes
+ *              display             - BYTE, WORD, DWORD, or QWORD display
+ *              component_ID        - Caller's component ID
  *
  * RETURN:      None
  *
index 684849949bf3e64550e2a0cbb6e0f42d4525933b..60a158472d820ac7e8aeba53c77634674a8ee747 100644 (file)
 #define _COMPONENT          ACPI_UTILITIES
 ACPI_MODULE_NAME("utdecode")
 
-/*******************************************************************************
- *
- * FUNCTION:    acpi_format_exception
- *
- * PARAMETERS:  Status       - The acpi_status code to be formatted
- *
- * RETURN:      A string containing the exception text. A valid pointer is
- *              always returned.
- *
- * DESCRIPTION: This function translates an ACPI exception into an ASCII string
- *              It is here instead of utxface.c so it is always present.
- *
- ******************************************************************************/
-const char *acpi_format_exception(acpi_status status)
-{
-       const char *exception = NULL;
-
-       ACPI_FUNCTION_ENTRY();
-
-       exception = acpi_ut_validate_exception(status);
-       if (!exception) {
-
-               /* Exception code was not recognized */
-
-               ACPI_ERROR((AE_INFO,
-                           "Unknown exception code: 0x%8.8X", status));
-
-               exception = "UNKNOWN_STATUS_CODE";
-       }
-
-       return (ACPI_CAST_PTR(const char, exception));
-}
-
-ACPI_EXPORT_SYMBOL(acpi_format_exception)
-
 /*
  * Properties of the ACPI Object Types, both internal and external.
  * The table is indexed by values of acpi_object_type
@@ -126,8 +91,8 @@ const u8 acpi_gbl_ns_properties[ACPI_NUM_NS_TYPES] = {
  *
  * FUNCTION:    acpi_ut_hex_to_ascii_char
  *
- * PARAMETERS:  Integer             - Contains the hex digit
- *              Position            - bit position of the digit within the
+ * PARAMETERS:  integer             - Contains the hex digit
+ *              position            - bit position of the digit within the
  *                                    integer (multiple of 4)
  *
  * RETURN:      The converted Ascii character
@@ -164,16 +129,17 @@ char acpi_ut_hex_to_ascii_char(u64 integer, u32 position)
 /* Region type decoding */
 
 const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS] = {
-       "SystemMemory",
-       "SystemIO",
-       "PCI_Config",
-       "EmbeddedControl",
-       "SMBus",
-       "SystemCMOS",
-       "PCIBARTarget",
-       "IPMI",
-       "GeneralPurposeIo",
-       "GenericSerialBus"
+       "SystemMemory",         /* 0x00 */
+       "SystemIO",             /* 0x01 */
+       "PCI_Config",           /* 0x02 */
+       "EmbeddedControl",      /* 0x03 */
+       "SMBus",                /* 0x04 */
+       "SystemCMOS",           /* 0x05 */
+       "PCIBARTarget",         /* 0x06 */
+       "IPMI",                 /* 0x07 */
+       "GeneralPurposeIo",     /* 0x08 */
+       "GenericSerialBus",     /* 0x09 */
+       "PCC"                   /* 0x0A */
 };
 
 char *acpi_ut_get_region_name(u8 space_id)
@@ -228,7 +194,7 @@ char *acpi_ut_get_event_name(u32 event_id)
  *
  * FUNCTION:    acpi_ut_get_type_name
  *
- * PARAMETERS:  Type                - An ACPI object type
+ * PARAMETERS:  type                - An ACPI object type
  *
  * RETURN:      Decoded ACPI object type name
  *
@@ -306,7 +272,7 @@ char *acpi_ut_get_object_type_name(union acpi_operand_object *obj_desc)
  *
  * FUNCTION:    acpi_ut_get_node_name
  *
- * PARAMETERS:  Object               - A namespace node
+ * PARAMETERS:  object               - A namespace node
  *
  * RETURN:      ASCII name of the node
  *
@@ -351,7 +317,7 @@ char *acpi_ut_get_node_name(void *object)
  *
  * FUNCTION:    acpi_ut_get_descriptor_name
  *
- * PARAMETERS:  Object               - An ACPI object
+ * PARAMETERS:  object               - An ACPI object
  *
  * RETURN:      Decoded name of the descriptor type
  *
@@ -401,7 +367,7 @@ char *acpi_ut_get_descriptor_name(void *object)
  *
  * FUNCTION:    acpi_ut_get_reference_name
  *
- * PARAMETERS:  Object               - An ACPI reference object
+ * PARAMETERS:  object               - An ACPI reference object
  *
  * RETURN:      Decoded name of the type of reference
  *
@@ -532,7 +498,7 @@ const char *acpi_ut_get_notify_name(u32 notify_value)
  *
  * FUNCTION:    acpi_ut_valid_object_type
  *
- * PARAMETERS:  Type            - Object type to be validated
+ * PARAMETERS:  type            - Object type to be validated
  *
  * RETURN:      TRUE if valid object type, FALSE otherwise
  *
index 2a6c3e183697ac68a6c6bca589116c20f8bf4520..798105443d0f9c4689d26641ff7d206a63e56f1c 100644 (file)
@@ -60,7 +60,7 @@ acpi_ut_update_ref_count(union acpi_operand_object *object, u32 action);
  *
  * FUNCTION:    acpi_ut_delete_internal_obj
  *
- * PARAMETERS:  Object         - Object to be deleted
+ * PARAMETERS:  object         - Object to be deleted
  *
  * RETURN:      None
  *
@@ -152,7 +152,7 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
        case ACPI_TYPE_PROCESSOR:
        case ACPI_TYPE_THERMAL:
 
-               /* Walk the notify handler list for this object */
+               /* Walk the address handler list for this object */
 
                handler_desc = object->common_notify.handler;
                while (handler_desc) {
@@ -358,8 +358,8 @@ void acpi_ut_delete_internal_object_list(union acpi_operand_object **obj_list)
  *
  * FUNCTION:    acpi_ut_update_ref_count
  *
- * PARAMETERS:  Object          - Object whose ref count is to be updated
- *              Action          - What to do
+ * PARAMETERS:  object          - Object whose ref count is to be updated
+ *              action          - What to do
  *
  * RETURN:      New ref count
  *
@@ -456,9 +456,9 @@ acpi_ut_update_ref_count(union acpi_operand_object *object, u32 action)
  *
  * FUNCTION:    acpi_ut_update_object_reference
  *
- * PARAMETERS:  Object              - Increment ref count for this object
+ * PARAMETERS:  object              - Increment ref count for this object
  *                                    and all sub-objects
- *              Action              - Either REF_INCREMENT or REF_DECREMENT or
+ *              action              - Either REF_INCREMENT or REF_DECREMENT or
  *                                    REF_FORCE_DELETE
  *
  * RETURN:      Status
@@ -480,6 +480,7 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action)
        acpi_status status = AE_OK;
        union acpi_generic_state *state_list = NULL;
        union acpi_operand_object *next_object = NULL;
+       union acpi_operand_object *prev_object;
        union acpi_generic_state *state;
        u32 i;
 
@@ -505,12 +506,21 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action)
                case ACPI_TYPE_POWER:
                case ACPI_TYPE_THERMAL:
 
-                       /* Update the notify objects for these types (if present) */
-
-                       acpi_ut_update_ref_count(object->common_notify.
-                                                system_notify, action);
-                       acpi_ut_update_ref_count(object->common_notify.
-                                                device_notify, action);
+                       /*
+                        * Update the notify objects for these types (if present)
+                        * Two lists, system and device notify handlers.
+                        */
+                       for (i = 0; i < ACPI_NUM_NOTIFY_TYPES; i++) {
+                               prev_object =
+                                   object->common_notify.notify_list[i];
+                               while (prev_object) {
+                                       next_object =
+                                           prev_object->notify.next[i];
+                                       acpi_ut_update_ref_count(prev_object,
+                                                                action);
+                                       prev_object = next_object;
+                               }
+                       }
                        break;
 
                case ACPI_TYPE_PACKAGE:
@@ -630,7 +640,7 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action)
  *
  * FUNCTION:    acpi_ut_add_reference
  *
- * PARAMETERS:  Object          - Object whose reference count is to be
+ * PARAMETERS:  object          - Object whose reference count is to be
  *                                incremented
  *
  * RETURN:      None
@@ -664,7 +674,7 @@ void acpi_ut_add_reference(union acpi_operand_object *object)
  *
  * FUNCTION:    acpi_ut_remove_reference
  *
- * PARAMETERS:  Object         - Object whose ref count will be decremented
+ * PARAMETERS:  object         - Object whose ref count will be decremented
  *
  * RETURN:      None
  *
index 479f32b33415722742c10c7b125cebd682b4b054..a9c65fbea5f45d6313218acbd341aba25196d1b8 100644 (file)
@@ -53,7 +53,7 @@ ACPI_MODULE_NAME("uteval")
  * FUNCTION:    acpi_ut_evaluate_object
  *
  * PARAMETERS:  prefix_node         - Starting node
- *              Path                - Path to object from starting node
+ *              path                - Path to object from starting node
  *              expected_return_types - Bitmap of allowed return types
  *              return_desc         - Where a return value is stored
  *
@@ -187,7 +187,7 @@ acpi_ut_evaluate_object(struct acpi_namespace_node *prefix_node,
  *
  * PARAMETERS:  object_name         - Object name to be evaluated
  *              device_node         - Node for the device
- *              Value               - Where the value is returned
+ *              value               - Where the value is returned
  *
  * RETURN:      Status
  *
@@ -229,7 +229,7 @@ acpi_ut_evaluate_numeric_object(char *object_name,
  * FUNCTION:    acpi_ut_execute_STA
  *
  * PARAMETERS:  device_node         - Node for the device
- *              Flags               - Where the status flags are returned
+ *              flags               - Where the status flags are returned
  *
  * RETURN:      Status
  *
diff --git a/drivers/acpi/acpica/utexcep.c b/drivers/acpi/acpica/utexcep.c
new file mode 100644 (file)
index 0000000..23b9894
--- /dev/null
@@ -0,0 +1,153 @@
+/*******************************************************************************
+ *
+ * Module Name: utexcep - Exception code support
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2012, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#define ACPI_DEFINE_EXCEPTION_TABLE
+#include <linux/export.h>
+#include <acpi/acpi.h>
+#include "accommon.h"
+
+#define _COMPONENT          ACPI_UTILITIES
+ACPI_MODULE_NAME("utexcep")
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_format_exception
+ *
+ * PARAMETERS:  status              - The acpi_status code to be formatted
+ *
+ * RETURN:      A string containing the exception text. A valid pointer is
+ *              always returned.
+ *
+ * DESCRIPTION: This function translates an ACPI exception into an ASCII
+ *              string. Returns "unknown status" string for invalid codes.
+ *
+ ******************************************************************************/
+const char *acpi_format_exception(acpi_status status)
+{
+       const char *exception = NULL;
+
+       ACPI_FUNCTION_ENTRY();
+
+       exception = acpi_ut_validate_exception(status);
+       if (!exception) {
+
+               /* Exception code was not recognized */
+
+               ACPI_ERROR((AE_INFO,
+                           "Unknown exception code: 0x%8.8X", status));
+
+               exception = "UNKNOWN_STATUS_CODE";
+       }
+
+       return (ACPI_CAST_PTR(const char, exception));
+}
+
+ACPI_EXPORT_SYMBOL(acpi_format_exception)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_validate_exception
+ *
+ * PARAMETERS:  status              - The acpi_status code to be formatted
+ *
+ * RETURN:      A string containing the exception text. NULL if exception is
+ *              not valid.
+ *
+ * DESCRIPTION: This function validates and translates an ACPI exception into
+ *              an ASCII string.
+ *
+ ******************************************************************************/
+const char *acpi_ut_validate_exception(acpi_status status)
+{
+       u32 sub_status;
+       const char *exception = NULL;
+
+       ACPI_FUNCTION_ENTRY();
+
+       /*
+        * Status is composed of two parts, a "type" and an actual code
+        */
+       sub_status = (status & ~AE_CODE_MASK);
+
+       switch (status & AE_CODE_MASK) {
+       case AE_CODE_ENVIRONMENTAL:
+
+               if (sub_status <= AE_CODE_ENV_MAX) {
+                       exception = acpi_gbl_exception_names_env[sub_status];
+               }
+               break;
+
+       case AE_CODE_PROGRAMMER:
+
+               if (sub_status <= AE_CODE_PGM_MAX) {
+                       exception = acpi_gbl_exception_names_pgm[sub_status];
+               }
+               break;
+
+       case AE_CODE_ACPI_TABLES:
+
+               if (sub_status <= AE_CODE_TBL_MAX) {
+                       exception = acpi_gbl_exception_names_tbl[sub_status];
+               }
+               break;
+
+       case AE_CODE_AML:
+
+               if (sub_status <= AE_CODE_AML_MAX) {
+                       exception = acpi_gbl_exception_names_aml[sub_status];
+               }
+               break;
+
+       case AE_CODE_CONTROL:
+
+               if (sub_status <= AE_CODE_CTRL_MAX) {
+                       exception = acpi_gbl_exception_names_ctrl[sub_status];
+               }
+               break;
+
+       default:
+               break;
+       }
+
+       return (ACPI_CAST_PTR(const char, exception));
+}
index 90f53b42eca9fcdfe2cda7d88a47cdb076dfc02e..ed1893155f8b855172eb43e930ac144727cca697 100644 (file)
@@ -247,8 +247,9 @@ struct acpi_fixed_event_info acpi_gbl_fixed_event_info[ACPI_NUM_FIXED_EVENTS] =
  *
  * RETURN:      Status
  *
- * DESCRIPTION: Init library globals.  All globals that require specific
- *              initialization should be initialized here!
+ * DESCRIPTION: Initialize ACPICA globals. All globals that require specific
+ *              initialization should be initialized here. This allows for
+ *              a warm restart.
  *
  ******************************************************************************/
 
@@ -284,7 +285,7 @@ acpi_status acpi_ut_init_globals(void)
                acpi_gbl_owner_id_mask[i] = 0;
        }
 
-       /* Last owner_iD is never valid */
+       /* Last owner_ID is never valid */
 
        acpi_gbl_owner_id_mask[ACPI_NUM_OWNERID_MASKS - 1] = 0x80000000;
 
@@ -304,8 +305,8 @@ acpi_status acpi_ut_init_globals(void)
 
        /* Global handlers */
 
-       acpi_gbl_system_notify.handler = NULL;
-       acpi_gbl_device_notify.handler = NULL;
+       acpi_gbl_global_notify[0].handler = NULL;
+       acpi_gbl_global_notify[1].handler = NULL;
        acpi_gbl_exception_handler = NULL;
        acpi_gbl_init_handler = NULL;
        acpi_gbl_table_handler = NULL;
index c92eb1d937859d673ff1c08293ef4ad94f861328..5d84e19545752154874741bb2b29d9e82cc81c56 100644 (file)
@@ -1,6 +1,6 @@
 /******************************************************************************
  *
- * Module Name: utids - support for device IDs - HID, UID, CID
+ * Module Name: utids - support for device Ids - HID, UID, CID
  *
  *****************************************************************************/
 
index 155fd786d0f2a45b6174f210b621fee6b6b7436d..b1eb7f17e11065a608be7e8095bf24b9e1d17fb7 100644 (file)
@@ -52,7 +52,7 @@ ACPI_MODULE_NAME("utlock")
  * FUNCTION:    acpi_ut_create_rw_lock
  *              acpi_ut_delete_rw_lock
  *
- * PARAMETERS:  Lock                - Pointer to a valid RW lock
+ * PARAMETERS:  lock                - Pointer to a valid RW lock
  *
  * RETURN:      Status
  *
@@ -89,7 +89,7 @@ void acpi_ut_delete_rw_lock(struct acpi_rw_lock *lock)
  * FUNCTION:    acpi_ut_acquire_read_lock
  *              acpi_ut_release_read_lock
  *
- * PARAMETERS:  Lock                - Pointer to a valid RW lock
+ * PARAMETERS:  lock                - Pointer to a valid RW lock
  *
  * RETURN:      Status
  *
@@ -149,7 +149,7 @@ acpi_status acpi_ut_release_read_lock(struct acpi_rw_lock *lock)
  * FUNCTION:    acpi_ut_acquire_write_lock
  *              acpi_ut_release_write_lock
  *
- * PARAMETERS:  Lock                - Pointer to a valid RW lock
+ * PARAMETERS:  lock                - Pointer to a valid RW lock
  *
  * RETURN:      Status
  *
index 2491a552b0e69eeba847364f3ab643505f5afb09..d88a8aaab2a64107bd3ffe2d84579aa2d3c0408f 100644 (file)
@@ -73,8 +73,8 @@ typedef union uint64_overlay {
  *
  * FUNCTION:    acpi_ut_short_divide
  *
- * PARAMETERS:  Dividend            - 64-bit dividend
- *              Divisor             - 32-bit divisor
+ * PARAMETERS:  dividend            - 64-bit dividend
+ *              divisor             - 32-bit divisor
  *              out_quotient        - Pointer to where the quotient is returned
  *              out_remainder       - Pointer to where the remainder is returned
  *
index 86f19db74e0549949c9685e391166b282523b949..33c6cf7ff4675b896df5c44920eeab2ddd8f7f41 100644 (file)
 #define _COMPONENT          ACPI_UTILITIES
 ACPI_MODULE_NAME("utmisc")
 
+#if defined ACPI_ASL_COMPILER || defined ACPI_EXEC_APP
 /*******************************************************************************
  *
- * FUNCTION:    acpi_ut_validate_exception
+ * FUNCTION:    ut_convert_backslashes
  *
- * PARAMETERS:  Status       - The acpi_status code to be formatted
+ * PARAMETERS:  pathname        - File pathname string to be converted
  *
- * RETURN:      A string containing the exception text. NULL if exception is
- *              not valid.
+ * RETURN:      Modifies the input Pathname
  *
- * DESCRIPTION: This function validates and translates an ACPI exception into
- *              an ASCII string.
+ * DESCRIPTION: Convert all backslashes (0x5C) to forward slashes (0x2F) within
+ *              the entire input file pathname string.
  *
  ******************************************************************************/
-const char *acpi_ut_validate_exception(acpi_status status)
+void ut_convert_backslashes(char *pathname)
 {
-       u32 sub_status;
-       const char *exception = NULL;
 
-       ACPI_FUNCTION_ENTRY();
-
-       /*
-        * Status is composed of two parts, a "type" and an actual code
-        */
-       sub_status = (status & ~AE_CODE_MASK);
-
-       switch (status & AE_CODE_MASK) {
-       case AE_CODE_ENVIRONMENTAL:
-
-               if (sub_status <= AE_CODE_ENV_MAX) {
-                       exception = acpi_gbl_exception_names_env[sub_status];
-               }
-               break;
-
-       case AE_CODE_PROGRAMMER:
-
-               if (sub_status <= AE_CODE_PGM_MAX) {
-                       exception = acpi_gbl_exception_names_pgm[sub_status];
-               }
-               break;
-
-       case AE_CODE_ACPI_TABLES:
-
-               if (sub_status <= AE_CODE_TBL_MAX) {
-                       exception = acpi_gbl_exception_names_tbl[sub_status];
-               }
-               break;
-
-       case AE_CODE_AML:
-
-               if (sub_status <= AE_CODE_AML_MAX) {
-                       exception = acpi_gbl_exception_names_aml[sub_status];
-               }
-               break;
-
-       case AE_CODE_CONTROL:
+       if (!pathname) {
+               return;
+       }
 
-               if (sub_status <= AE_CODE_CTRL_MAX) {
-                       exception = acpi_gbl_exception_names_ctrl[sub_status];
+       while (*pathname) {
+               if (*pathname == '\\') {
+                       *pathname = '/';
                }
-               break;
 
-       default:
-               break;
+               pathname++;
        }
-
-       return (ACPI_CAST_PTR(const char, exception));
 }
+#endif
 
 /*******************************************************************************
  *
  * FUNCTION:    acpi_ut_is_pci_root_bridge
  *
- * PARAMETERS:  Id              - The HID/CID in string format
+ * PARAMETERS:  id              - The HID/CID in string format
  *
  * RETURN:      TRUE if the Id is a match for a PCI/PCI-Express Root Bridge
  *
@@ -150,7 +112,7 @@ u8 acpi_ut_is_pci_root_bridge(char *id)
  *
  * FUNCTION:    acpi_ut_is_aml_table
  *
- * PARAMETERS:  Table               - An ACPI table
+ * PARAMETERS:  table               - An ACPI table
  *
  * RETURN:      TRUE if table contains executable AML; FALSE otherwise
  *
@@ -284,7 +246,7 @@ acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id)
  *
  * FUNCTION:    acpi_ut_release_owner_id
  *
- * PARAMETERS:  owner_id_ptr        - Pointer to a previously allocated owner_iD
+ * PARAMETERS:  owner_id_ptr        - Pointer to a previously allocated owner_ID
  *
  * RETURN:      None. No error is returned because we are either exiting a
  *              control method or unloading a table. Either way, we would
@@ -307,7 +269,7 @@ void acpi_ut_release_owner_id(acpi_owner_id * owner_id_ptr)
 
        *owner_id_ptr = 0;
 
-       /* Zero is not a valid owner_iD */
+       /* Zero is not a valid owner_ID */
 
        if (owner_id == 0) {
                ACPI_ERROR((AE_INFO, "Invalid OwnerId: 0x%2.2X", owner_id));
@@ -381,7 +343,7 @@ void acpi_ut_strupr(char *src_string)
  *
  * FUNCTION:    acpi_ut_print_string
  *
- * PARAMETERS:  String          - Null terminated ASCII string
+ * PARAMETERS:  string          - Null terminated ASCII string
  *              max_length      - Maximum output length
  *
  * RETURN:      None
@@ -467,7 +429,7 @@ void acpi_ut_print_string(char *string, u8 max_length)
  *
  * FUNCTION:    acpi_ut_dword_byte_swap
  *
- * PARAMETERS:  Value           - Value to be converted
+ * PARAMETERS:  value           - Value to be converted
  *
  * RETURN:      u32 integer with bytes swapped
  *
@@ -537,9 +499,9 @@ void acpi_ut_set_integer_width(u8 revision)
  *
  * FUNCTION:    acpi_ut_display_init_pathname
  *
- * PARAMETERS:  Type                - Object type of the node
+ * PARAMETERS:  type                - Object type of the node
  *              obj_handle          - Handle whose pathname will be displayed
- *              Path                - Additional path string to be appended.
+ *              path                - Additional path string to be appended.
  *                                      (NULL if no extra path)
  *
  * RETURN:      acpi_status
@@ -604,8 +566,8 @@ acpi_ut_display_init_pathname(u8 type,
  *
  * FUNCTION:    acpi_ut_valid_acpi_char
  *
- * PARAMETERS:  Char            - The character to be examined
- *              Position        - Byte position (0-3)
+ * PARAMETERS:  char            - The character to be examined
+ *              position        - Byte position (0-3)
  *
  * RETURN:      TRUE if the character is valid, FALSE otherwise
  *
@@ -640,7 +602,7 @@ u8 acpi_ut_valid_acpi_char(char character, u32 position)
  *
  * FUNCTION:    acpi_ut_valid_acpi_name
  *
- * PARAMETERS:  Name            - The name to be examined
+ * PARAMETERS:  name            - The name to be examined
  *
  * RETURN:      TRUE if the name is valid, FALSE otherwise
  *
@@ -671,7 +633,7 @@ u8 acpi_ut_valid_acpi_name(u32 name)
  *
  * FUNCTION:    acpi_ut_repair_name
  *
- * PARAMETERS:  Name            - The ACPI name to be repaired
+ * PARAMETERS:  name            - The ACPI name to be repaired
  *
  * RETURN:      Repaired version of the name
  *
@@ -705,8 +667,8 @@ acpi_name acpi_ut_repair_name(char *name)
  *
  * FUNCTION:    acpi_ut_strtoul64
  *
- * PARAMETERS:  String          - Null terminated string
- *              Base            - Radix of the string: 16 or ACPI_ANY_BASE;
+ * PARAMETERS:  string          - Null terminated string
+ *              base            - Radix of the string: 16 or ACPI_ANY_BASE;
  *                                ACPI_ANY_BASE means 'in behalf of to_integer'
  *              ret_integer     - Where the converted integer is returned
  *
@@ -755,7 +717,7 @@ acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 * ret_integer)
 
        if (to_integer_op) {
                /*
-                * Base equal to ACPI_ANY_BASE means 'to_integer operation case'.
+                * Base equal to ACPI_ANY_BASE means 'ToInteger operation case'.
                 * We need to determine if it is decimal or hexadecimal.
                 */
                if ((*string == '0') && (ACPI_TOLOWER(*(string + 1)) == 'x')) {
@@ -878,8 +840,8 @@ acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 * ret_integer)
  *
  * FUNCTION:    acpi_ut_create_update_state_and_push
  *
- * PARAMETERS:  Object          - Object to be added to the new state
- *              Action          - Increment/Decrement
+ * PARAMETERS:  object          - Object to be added to the new state
+ *              action          - Increment/Decrement
  *              state_list      - List the state will be added to
  *
  * RETURN:      Status
@@ -919,7 +881,7 @@ acpi_ut_create_update_state_and_push(union acpi_operand_object *object,
  * PARAMETERS:  source_object       - The package to walk
  *              target_object       - Target object (if package is being copied)
  *              walk_callback       - Called once for each package element
- *              Context             - Passed to the callback function
+ *              context             - Passed to the callback function
  *
  * RETURN:      Status
  *
index 43174df3312100f0a5e9d4b58dfde1679c9a1fd3..296baa676bc59cbf60320145e304e1f0ba705757 100644 (file)
@@ -147,7 +147,7 @@ void acpi_ut_mutex_terminate(void)
  *
  * FUNCTION:    acpi_ut_create_mutex
  *
- * PARAMETERS:  mutex_iD        - ID of the mutex to be created
+ * PARAMETERS:  mutex_ID        - ID of the mutex to be created
  *
  * RETURN:      Status
  *
@@ -176,7 +176,7 @@ static acpi_status acpi_ut_create_mutex(acpi_mutex_handle mutex_id)
  *
  * FUNCTION:    acpi_ut_delete_mutex
  *
- * PARAMETERS:  mutex_iD        - ID of the mutex to be deleted
+ * PARAMETERS:  mutex_ID        - ID of the mutex to be deleted
  *
  * RETURN:      Status
  *
@@ -199,7 +199,7 @@ static void acpi_ut_delete_mutex(acpi_mutex_handle mutex_id)
  *
  * FUNCTION:    acpi_ut_acquire_mutex
  *
- * PARAMETERS:  mutex_iD        - ID of the mutex to be acquired
+ * PARAMETERS:  mutex_ID        - ID of the mutex to be acquired
  *
  * RETURN:      Status
  *
@@ -283,7 +283,7 @@ acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id)
  *
  * FUNCTION:    acpi_ut_release_mutex
  *
- * PARAMETERS:  mutex_iD        - ID of the mutex to be released
+ * PARAMETERS:  mutex_ID        - ID of the mutex to be released
  *
  * RETURN:      Status
  *
index b112744fc9ae3018dd591b5904f94b8059ea6b10..655f0799a3910556989745626eb65635ee6fab66 100644 (file)
@@ -69,7 +69,7 @@ acpi_ut_get_element_length(u8 object_type,
  * PARAMETERS:  module_name         - Source file name of caller
  *              line_number         - Line number of caller
  *              component_id        - Component type of caller
- *              Type                - ACPI Type of the new object
+ *              type                - ACPI Type of the new object
  *
  * RETURN:      A new internal object, null on failure
  *
@@ -150,7 +150,7 @@ union acpi_operand_object *acpi_ut_create_internal_object_dbg(const char
  *
  * FUNCTION:    acpi_ut_create_package_object
  *
- * PARAMETERS:  Count               - Number of package elements
+ * PARAMETERS:  count               - Number of package elements
  *
  * RETURN:      Pointer to a new Package object, null on failure
  *
@@ -323,11 +323,11 @@ union acpi_operand_object *acpi_ut_create_string_object(acpi_size string_size)
  *
  * FUNCTION:    acpi_ut_valid_internal_object
  *
- * PARAMETERS:  Object              - Object to be validated
+ * PARAMETERS:  object              - Object to be validated
  *
  * RETURN:      TRUE if object is valid, FALSE otherwise
  *
- * DESCRIPTION: Validate a pointer to be a union acpi_operand_object
+ * DESCRIPTION: Validate a pointer to be of type union acpi_operand_object
  *
  ******************************************************************************/
 
@@ -348,7 +348,7 @@ u8 acpi_ut_valid_internal_object(void *object)
        switch (ACPI_GET_DESCRIPTOR_TYPE(object)) {
        case ACPI_DESC_TYPE_OPERAND:
 
-               /* The object appears to be a valid union acpi_operand_object    */
+               /* The object appears to be a valid union acpi_operand_object */
 
                return (TRUE);
 
@@ -407,7 +407,7 @@ void *acpi_ut_allocate_object_desc_dbg(const char *module_name,
  *
  * FUNCTION:    acpi_ut_delete_object_desc
  *
- * PARAMETERS:  Object          - An Acpi internal object to be deleted
+ * PARAMETERS:  object          - An Acpi internal object to be deleted
  *
  * RETURN:      None.
  *
@@ -419,7 +419,7 @@ void acpi_ut_delete_object_desc(union acpi_operand_object *object)
 {
        ACPI_FUNCTION_TRACE_PTR(ut_delete_object_desc, object);
 
-       /* Object must be a union acpi_operand_object    */
+       /* Object must be a union acpi_operand_object */
 
        if (ACPI_GET_DESCRIPTOR_TYPE(object) != ACPI_DESC_TYPE_OPERAND) {
                ACPI_ERROR((AE_INFO,
index 2360cf70c18ccdcb810ccc08c2c520d3bcaf3948..34ef0bd7e4b414286fe191e1f4ab3df27f958b0e 100644 (file)
@@ -68,7 +68,7 @@ static struct acpi_interface_info acpi_default_supported_interfaces[] = {
        {"Windows 2001.1", NULL, 0, ACPI_OSI_WINSRV_2003},      /* Windows Server 2003 */
        {"Windows 2001 SP2", NULL, 0, ACPI_OSI_WIN_XP_SP2},     /* Windows XP SP2 */
        {"Windows 2001.1 SP1", NULL, 0, ACPI_OSI_WINSRV_2003_SP1},      /* Windows Server 2003 SP1 - Added 03/2006 */
-       {"Windows 2006", NULL, 0, ACPI_OSI_WIN_VISTA},  /* Windows Vista - Added 03/2006 */
+       {"Windows 2006", NULL, 0, ACPI_OSI_WIN_VISTA},  /* Windows vista - Added 03/2006 */
        {"Windows 2006.1", NULL, 0, ACPI_OSI_WINSRV_2008},      /* Windows Server 2008 - Added 09/2009 */
        {"Windows 2006 SP1", NULL, 0, ACPI_OSI_WIN_VISTA_SP1},  /* Windows Vista SP1 - Added 09/2009 */
        {"Windows 2006 SP2", NULL, 0, ACPI_OSI_WIN_VISTA_SP2},  /* Windows Vista SP2 - Added 09/2010 */
index 9d441ea703052545c2b9f98f122de42a6acfb26e..e38bef4980bce9c0be6a7741a339144c3548d390 100644 (file)
@@ -356,13 +356,13 @@ static const u8 acpi_gbl_resource_types[] = {
        ACPI_SMALL_VARIABLE_LENGTH,     /* 06 start_dependent_functions */
        ACPI_FIXED_LENGTH,      /* 07 end_dependent_functions */
        ACPI_FIXED_LENGTH,      /* 08 IO */
-       ACPI_FIXED_LENGTH,      /* 09 fixed_iO */
-       ACPI_FIXED_LENGTH,      /* 0_a fixed_dMA */
+       ACPI_FIXED_LENGTH,      /* 09 fixed_IO */
+       ACPI_FIXED_LENGTH,      /* 0A fixed_DMA */
        0,
        0,
        0,
-       ACPI_VARIABLE_LENGTH,   /* 0_e vendor_short */
-       ACPI_FIXED_LENGTH,      /* 0_f end_tag */
+       ACPI_VARIABLE_LENGTH,   /* 0E vendor_short */
+       ACPI_FIXED_LENGTH,      /* 0F end_tag */
 
        /* Large descriptors */
 
@@ -375,16 +375,16 @@ static const u8 acpi_gbl_resource_types[] = {
        ACPI_FIXED_LENGTH,      /* 06 memory32_fixed */
        ACPI_VARIABLE_LENGTH,   /* 07 Dword* address */
        ACPI_VARIABLE_LENGTH,   /* 08 Word* address */
-       ACPI_VARIABLE_LENGTH,   /* 09 extended_iRQ */
-       ACPI_VARIABLE_LENGTH,   /* 0_a Qword* address */
-       ACPI_FIXED_LENGTH,      /* 0_b Extended* address */
-       ACPI_VARIABLE_LENGTH,   /* 0_c Gpio* */
+       ACPI_VARIABLE_LENGTH,   /* 09 extended_IRQ */
+       ACPI_VARIABLE_LENGTH,   /* 0A Qword* address */
+       ACPI_FIXED_LENGTH,      /* 0B Extended* address */
+       ACPI_VARIABLE_LENGTH,   /* 0C Gpio* */
        0,
-       ACPI_VARIABLE_LENGTH    /* 0_e *serial_bus */
+       ACPI_VARIABLE_LENGTH    /* 0E *serial_bus */
 };
 
 /*
- * For the i_aSL compiler/disassembler, we don't want any error messages
+ * For the iASL compiler/disassembler, we don't want any error messages
  * because the disassembler uses the resource validation code to determine
  * if Buffer objects are actually Resource Templates.
  */
@@ -398,11 +398,11 @@ static const u8 acpi_gbl_resource_types[] = {
  *
  * FUNCTION:    acpi_ut_walk_aml_resources
  *
- * PARAMETERS:  Aml             - Pointer to the raw AML resource template
+ * PARAMETERS:  aml             - Pointer to the raw AML resource template
  *              aml_length      - Length of the entire template
  *              user_function   - Called once for each descriptor found. If
  *                                NULL, a pointer to the end_tag is returned
- *              Context         - Passed to user_function
+ *              context         - Passed to user_function
  *
  * RETURN:      Status
  *
@@ -513,7 +513,7 @@ acpi_ut_walk_aml_resources(u8 * aml,
  *
  * FUNCTION:    acpi_ut_validate_resource
  *
- * PARAMETERS:  Aml             - Pointer to the raw AML resource descriptor
+ * PARAMETERS:  aml             - Pointer to the raw AML resource descriptor
  *              return_index    - Where the resource index is returned. NULL
  *                                if the index is not required.
  *
@@ -664,7 +664,7 @@ acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index)
  *
  * FUNCTION:    acpi_ut_get_resource_type
  *
- * PARAMETERS:  Aml             - Pointer to the raw AML resource descriptor
+ * PARAMETERS:  aml             - Pointer to the raw AML resource descriptor
  *
  * RETURN:      The Resource Type with no extraneous bits (except the
  *              Large/Small descriptor bit -- this is left alone)
@@ -698,7 +698,7 @@ u8 acpi_ut_get_resource_type(void *aml)
  *
  * FUNCTION:    acpi_ut_get_resource_length
  *
- * PARAMETERS:  Aml             - Pointer to the raw AML resource descriptor
+ * PARAMETERS:  aml             - Pointer to the raw AML resource descriptor
  *
  * RETURN:      Byte Length
  *
@@ -738,7 +738,7 @@ u16 acpi_ut_get_resource_length(void *aml)
  *
  * FUNCTION:    acpi_ut_get_resource_header_length
  *
- * PARAMETERS:  Aml             - Pointer to the raw AML resource descriptor
+ * PARAMETERS:  aml             - Pointer to the raw AML resource descriptor
  *
  * RETURN:      Length of the AML header (depends on large/small descriptor)
  *
@@ -763,7 +763,7 @@ u8 acpi_ut_get_resource_header_length(void *aml)
  *
  * FUNCTION:    acpi_ut_get_descriptor_length
  *
- * PARAMETERS:  Aml             - Pointer to the raw AML resource descriptor
+ * PARAMETERS:  aml             - Pointer to the raw AML resource descriptor
  *
  * RETURN:      Byte length
  *
index 4267477c2797b2558becff9b23683d24a3cfadd1..a1c98826007328c8a53c851a10477d6ae37d50c2 100644 (file)
@@ -51,8 +51,8 @@ ACPI_MODULE_NAME("utstate")
  *
  * FUNCTION:    acpi_ut_create_pkg_state_and_push
  *
- * PARAMETERS:  Object          - Object to be added to the new state
- *              Action          - Increment/Decrement
+ * PARAMETERS:  object          - Object to be added to the new state
+ *              action          - Increment/Decrement
  *              state_list      - List the state will be added to
  *
  * RETURN:      Status
@@ -85,7 +85,7 @@ acpi_ut_create_pkg_state_and_push(void *internal_object,
  * FUNCTION:    acpi_ut_push_generic_state
  *
  * PARAMETERS:  list_head           - Head of the state stack
- *              State               - State object to push
+ *              state               - State object to push
  *
  * RETURN:      None
  *
@@ -214,8 +214,8 @@ struct acpi_thread_state *acpi_ut_create_thread_state(void)
  *
  * FUNCTION:    acpi_ut_create_update_state
  *
- * PARAMETERS:  Object          - Initial Object to be installed in the state
- *              Action          - Update action to be performed
+ * PARAMETERS:  object          - Initial Object to be installed in the state
+ *              action          - Update action to be performed
  *
  * RETURN:      New state object, null on failure
  *
@@ -252,8 +252,8 @@ union acpi_generic_state *acpi_ut_create_update_state(union acpi_operand_object
  *
  * FUNCTION:    acpi_ut_create_pkg_state
  *
- * PARAMETERS:  Object          - Initial Object to be installed in the state
- *              Action          - Update action to be performed
+ * PARAMETERS:  object          - Initial Object to be installed in the state
+ *              action          - Update action to be performed
  *
  * RETURN:      New state object, null on failure
  *
@@ -325,7 +325,7 @@ union acpi_generic_state *acpi_ut_create_control_state(void)
  *
  * FUNCTION:    acpi_ut_delete_generic_state
  *
- * PARAMETERS:  State               - The state object to be deleted
+ * PARAMETERS:  state               - The state object to be deleted
  *
  * RETURN:      None
  *
index afa94f51ff0b441911eb536b5b946b356fab95c1..534179f1177bf49239dc3561d7f29ac5422a6594 100644 (file)
@@ -131,7 +131,7 @@ acpi_status __init acpi_initialize_subsystem(void)
  *
  * FUNCTION:    acpi_enable_subsystem
  *
- * PARAMETERS:  Flags           - Init/enable Options
+ * PARAMETERS:  flags           - Init/enable Options
  *
  * RETURN:      Status
  *
@@ -234,7 +234,7 @@ ACPI_EXPORT_SYMBOL(acpi_enable_subsystem)
  *
  * FUNCTION:    acpi_initialize_objects
  *
- * PARAMETERS:  Flags           - Init/enable Options
+ * PARAMETERS:  flags           - Init/enable Options
  *
  * RETURN:      Status
  *
@@ -409,7 +409,7 @@ ACPI_EXPORT_SYMBOL(acpi_subsystem_status)
  * PARAMETERS:  out_buffer      - A buffer to receive the resources for the
  *                                device
  *
- * RETURN:      Status          - the status of the call
+ * RETURN:      status          - the status of the call
  *
  * DESCRIPTION: This function is called to get information about the current
  *              state of the ACPI subsystem.  It will return system information
@@ -480,8 +480,8 @@ ACPI_EXPORT_SYMBOL(acpi_get_system_info)
  *
  * FUNCTION:    acpi_install_initialization_handler
  *
- * PARAMETERS:  Handler             - Callback procedure
- *              Function            - Not (currently) used, see below
+ * PARAMETERS:  handler             - Callback procedure
+ *              function            - Not (currently) used, see below
  *
  * RETURN:      Status
  *
@@ -618,7 +618,7 @@ ACPI_EXPORT_SYMBOL(acpi_remove_interface)
  *
  * FUNCTION:    acpi_install_interface_handler
  *
- * PARAMETERS:  Handler             - The _OSI interface handler to install
+ * PARAMETERS:  handler             - The _OSI interface handler to install
  *                                    NULL means "remove existing handler"
  *
  * RETURN:      Status
@@ -651,9 +651,9 @@ ACPI_EXPORT_SYMBOL(acpi_install_interface_handler)
  * FUNCTION:    acpi_check_address_range
  *
  * PARAMETERS:  space_id            - Address space ID
- *              Address             - Start address
- *              Length              - Length
- *              Warn                - TRUE if warning on overlap desired
+ *              address             - Start address
+ *              length              - Length
+ *              warn                - TRUE if warning on overlap desired
  *
  * RETURN:      Count of the number of conflicts detected.
  *
index 52b568af18199570446431a81638814b3cc6c99a..6d63cc39b9aea3cbf43a0203a05a560e8c3a4edc 100644 (file)
@@ -53,7 +53,7 @@ ACPI_MODULE_NAME("utxferror")
  * This module is used for the in-kernel ACPICA as well as the ACPICA
  * tools/applications.
  *
- * For the i_aSL compiler case, the output is redirected to stderr so that
+ * For the iASL compiler case, the output is redirected to stderr so that
  * any of the various ACPI errors and warnings do not appear in the output
  * files, for either the compiler or disassembler portions of the tool.
  */
@@ -70,7 +70,7 @@ extern FILE *acpi_gbl_output_file;
 
 #else
 /*
- * non-i_aSL case - no redirection, nothing to do
+ * non-iASL case - no redirection, nothing to do
  */
 #define ACPI_MSG_REDIRECT_BEGIN
 #define ACPI_MSG_REDIRECT_END
@@ -82,6 +82,8 @@ extern FILE *acpi_gbl_output_file;
 #define ACPI_MSG_EXCEPTION      "ACPI Exception: "
 #define ACPI_MSG_WARNING        "ACPI Warning: "
 #define ACPI_MSG_INFO           "ACPI: "
+#define ACPI_MSG_BIOS_ERROR     "ACPI BIOS Bug: Error: "
+#define ACPI_MSG_BIOS_WARNING   "ACPI BIOS Bug: Warning: "
 /*
  * Common message suffix
  */
@@ -93,7 +95,7 @@ extern FILE *acpi_gbl_output_file;
  *
  * PARAMETERS:  module_name         - Caller's module name (for error output)
  *              line_number         - Caller's line number (for error output)
- *              Format              - Printf format string + additional args
+ *              format              - Printf format string + additional args
  *
  * RETURN:      None
  *
@@ -124,8 +126,8 @@ ACPI_EXPORT_SYMBOL(acpi_error)
  *
  * PARAMETERS:  module_name         - Caller's module name (for error output)
  *              line_number         - Caller's line number (for error output)
- *              Status              - Status to be formatted
- *              Format              - Printf format string + additional args
+ *              status              - Status to be formatted
+ *              format              - Printf format string + additional args
  *
  * RETURN:      None
  *
@@ -159,7 +161,7 @@ ACPI_EXPORT_SYMBOL(acpi_exception)
  *
  * PARAMETERS:  module_name         - Caller's module name (for error output)
  *              line_number         - Caller's line number (for error output)
- *              Format              - Printf format string + additional args
+ *              format              - Printf format string + additional args
  *
  * RETURN:      None
  *
@@ -190,7 +192,7 @@ ACPI_EXPORT_SYMBOL(acpi_warning)
  *
  * PARAMETERS:  module_name         - Caller's module name (for error output)
  *              line_number         - Caller's line number (for error output)
- *              Format              - Printf format string + additional args
+ *              format              - Printf format string + additional args
  *
  * RETURN:      None
  *
@@ -218,6 +220,72 @@ acpi_info(const char *module_name, u32 line_number, const char *format, ...)
 
 ACPI_EXPORT_SYMBOL(acpi_info)
 
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_bios_error
+ *
+ * PARAMETERS:  module_name         - Caller's module name (for error output)
+ *              line_number         - Caller's line number (for error output)
+ *              format              - Printf format string + additional args
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Print "ACPI Firmware Error" message with module/line/version
+ *              info
+ *
+ ******************************************************************************/
+void ACPI_INTERNAL_VAR_XFACE
+acpi_bios_error(const char *module_name,
+               u32 line_number, const char *format, ...)
+{
+       va_list arg_list;
+
+       ACPI_MSG_REDIRECT_BEGIN;
+       acpi_os_printf(ACPI_MSG_BIOS_ERROR);
+
+       va_start(arg_list, format);
+       acpi_os_vprintf(format, arg_list);
+       ACPI_MSG_SUFFIX;
+       va_end(arg_list);
+
+       ACPI_MSG_REDIRECT_END;
+}
+
+ACPI_EXPORT_SYMBOL(acpi_bios_error)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_bios_warning
+ *
+ * PARAMETERS:  module_name         - Caller's module name (for error output)
+ *              line_number         - Caller's line number (for error output)
+ *              format              - Printf format string + additional args
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Print "ACPI Firmware Warning" message with module/line/version
+ *              info
+ *
+ ******************************************************************************/
+void ACPI_INTERNAL_VAR_XFACE
+acpi_bios_warning(const char *module_name,
+                 u32 line_number, const char *format, ...)
+{
+       va_list arg_list;
+
+       ACPI_MSG_REDIRECT_BEGIN;
+       acpi_os_printf(ACPI_MSG_BIOS_WARNING);
+
+       va_start(arg_list, format);
+       acpi_os_vprintf(format, arg_list);
+       ACPI_MSG_SUFFIX;
+       va_end(arg_list);
+
+       ACPI_MSG_REDIRECT_END;
+}
+
+ACPI_EXPORT_SYMBOL(acpi_bios_warning)
+
 /*
  * The remainder of this module contains internal error functions that may
  * be configured out.
@@ -271,9 +339,9 @@ acpi_ut_predefined_warning(const char *module_name,
  *
  * PARAMETERS:  module_name     - Caller's module name (for error output)
  *              line_number     - Caller's line number (for error output)
- *              Pathname        - Full pathname to the node
+ *              pathname        - Full pathname to the node
  *              node_flags      - From Namespace node for the method/object
- *              Format          - Printf format string + additional args
+ *              format          - Printf format string + additional args
  *
  * RETURN:      None
  *
@@ -373,9 +441,9 @@ acpi_ut_namespace_error(const char *module_name,
  *
  * PARAMETERS:  module_name         - Caller's module name (for error output)
  *              line_number         - Caller's line number (for error output)
- *              Message             - Error message to use on failure
+ *              message             - Error message to use on failure
  *              prefix_node         - Prefix relative to the path
- *              Path                - Path to the node (optional)
+ *              path                - Path to the node (optional)
  *              method_status       - Execution status
  *
  * RETURN:      None
index 1427d191d15af43b85838e5f61ffb2599c96b2ee..0a40a851b3548fe71810e9b893fd01a2d5badd71 100644 (file)
@@ -58,8 +58,8 @@ acpi_ut_get_mutex_object(acpi_handle handle,
  *
  * FUNCTION:    acpi_ut_get_mutex_object
  *
- * PARAMETERS:  Handle              - Mutex or prefix handle (optional)
- *              Pathname            - Mutex pathname (optional)
+ * PARAMETERS:  handle              - Mutex or prefix handle (optional)
+ *              pathname            - Mutex pathname (optional)
  *              ret_obj             - Where the mutex object is returned
  *
  * RETURN:      Status
@@ -118,9 +118,9 @@ acpi_ut_get_mutex_object(acpi_handle handle,
  *
  * FUNCTION:    acpi_acquire_mutex
  *
- * PARAMETERS:  Handle              - Mutex or prefix handle (optional)
- *              Pathname            - Mutex pathname (optional)
- *              Timeout             - Max time to wait for the lock (millisec)
+ * PARAMETERS:  handle              - Mutex or prefix handle (optional)
+ *              pathname            - Mutex pathname (optional)
+ *              timeout             - Max time to wait for the lock (millisec)
  *
  * RETURN:      Status
  *
@@ -155,8 +155,8 @@ acpi_acquire_mutex(acpi_handle handle, acpi_string pathname, u16 timeout)
  *
  * FUNCTION:    acpi_release_mutex
  *
- * PARAMETERS:  Handle              - Mutex or prefix handle (optional)
- *              Pathname            - Mutex pathname (optional)
+ * PARAMETERS:  handle              - Mutex or prefix handle (optional)
+ *              pathname            - Mutex pathname (optional)
  *
  * RETURN:      Status
  *
index 5577762daee1d7d22a8dbf49a27cf02ead2e54a4..00a783661d0b5548b65882f312faa4a116ff5762 100644 (file)
@@ -243,7 +243,7 @@ static int pre_map_gar_callback(struct apei_exec_context *ctx,
        u8 ins = entry->instruction;
 
        if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)
-               return acpi_os_map_generic_address(&entry->register_region);
+               return apei_map_generic_address(&entry->register_region);
 
        return 0;
 }
@@ -276,7 +276,7 @@ static int post_unmap_gar_callback(struct apei_exec_context *ctx,
        u8 ins = entry->instruction;
 
        if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)
-               acpi_os_unmap_generic_address(&entry->register_region);
+               apei_unmap_generic_address(&entry->register_region);
 
        return 0;
 }
@@ -586,6 +586,11 @@ static int apei_check_gar(struct acpi_generic_address *reg, u64 *paddr,
        }
        *access_bit_width = 1UL << (access_size_code + 2);
 
+       /* Fixup common BIOS bug */
+       if (bit_width == 32 && bit_offset == 0 && (*paddr & 0x03) == 0 &&
+           *access_bit_width < 32)
+               *access_bit_width = 32;
+
        if ((bit_width + bit_offset) > *access_bit_width) {
                pr_warning(FW_BUG APEI_PFX
                           "Invalid bit width + offset in GAR [0x%llx/%u/%u/%u/%u]\n",
@@ -606,6 +611,19 @@ static int apei_check_gar(struct acpi_generic_address *reg, u64 *paddr,
        return 0;
 }
 
+int apei_map_generic_address(struct acpi_generic_address *reg)
+{
+       int rc;
+       u32 access_bit_width;
+       u64 address;
+
+       rc = apei_check_gar(reg, &address, &access_bit_width);
+       if (rc)
+               return rc;
+       return acpi_os_map_generic_address(reg);
+}
+EXPORT_SYMBOL_GPL(apei_map_generic_address);
+
 /* read GAR in interrupt (including NMI) or process context */
 int apei_read(u64 *val, struct acpi_generic_address *reg)
 {
index cca240a33038fe148ca5e9324214ba988c62c6c2..f220d642136ed94a1844411ee532e777be6f847e 100644 (file)
@@ -7,6 +7,8 @@
 #define APEI_INTERNAL_H
 
 #include <linux/cper.h>
+#include <linux/acpi.h>
+#include <linux/acpi_io.h>
 
 struct apei_exec_context;
 
@@ -68,6 +70,13 @@ static inline int apei_exec_run_optional(struct apei_exec_context *ctx, u8 actio
 /* IP has been set in instruction function */
 #define APEI_EXEC_SET_IP       1
 
+int apei_map_generic_address(struct acpi_generic_address *reg);
+
+static inline void apei_unmap_generic_address(struct acpi_generic_address *reg)
+{
+       acpi_os_unmap_generic_address(reg);
+}
+
 int apei_read(u64 *val, struct acpi_generic_address *reg);
 int apei_write(u64 val, struct acpi_generic_address *reg);
 
index 9b3cac0abecc33c672c884ece470dfb494d1836c..1599566ed1fe077113b10200d22d0db097fd25fd 100644 (file)
@@ -301,7 +301,7 @@ static struct ghes *ghes_new(struct acpi_hest_generic *generic)
        if (!ghes)
                return ERR_PTR(-ENOMEM);
        ghes->generic = generic;
-       rc = acpi_os_map_generic_address(&generic->error_status_address);
+       rc = apei_map_generic_address(&generic->error_status_address);
        if (rc)
                goto err_free;
        error_block_length = generic->error_block_length;
@@ -321,7 +321,7 @@ static struct ghes *ghes_new(struct acpi_hest_generic *generic)
        return ghes;
 
 err_unmap:
-       acpi_os_unmap_generic_address(&generic->error_status_address);
+       apei_unmap_generic_address(&generic->error_status_address);
 err_free:
        kfree(ghes);
        return ERR_PTR(rc);
@@ -330,7 +330,7 @@ err_free:
 static void ghes_fini(struct ghes *ghes)
 {
        kfree(ghes->estatus);
-       acpi_os_unmap_generic_address(&ghes->generic->error_status_address);
+       apei_unmap_generic_address(&ghes->generic->error_status_address);
 }
 
 enum {
index 86933ca8b4724372505530de753332907f488ebe..5662d64e67339fcf1c7e6c097d44294a7a747ecb 100644 (file)
@@ -250,6 +250,13 @@ static int acpi_battery_get_property(struct power_supply *psy,
                else
                        val->intval = battery->capacity_now * 1000;
                break;
+       case POWER_SUPPLY_PROP_CAPACITY:
+               if (battery->capacity_now && battery->full_charge_capacity)
+                       val->intval = battery->capacity_now * 100/
+                                       battery->full_charge_capacity;
+               else
+                       val->intval = 0;
+               break;
        case POWER_SUPPLY_PROP_MODEL_NAME:
                val->strval = battery->model_number;
                break;
@@ -276,6 +283,7 @@ static enum power_supply_property charge_battery_props[] = {
        POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
        POWER_SUPPLY_PROP_CHARGE_FULL,
        POWER_SUPPLY_PROP_CHARGE_NOW,
+       POWER_SUPPLY_PROP_CAPACITY,
        POWER_SUPPLY_PROP_MODEL_NAME,
        POWER_SUPPLY_PROP_MANUFACTURER,
        POWER_SUPPLY_PROP_SERIAL_NUMBER,
@@ -292,6 +300,7 @@ static enum power_supply_property energy_battery_props[] = {
        POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN,
        POWER_SUPPLY_PROP_ENERGY_FULL,
        POWER_SUPPLY_PROP_ENERGY_NOW,
+       POWER_SUPPLY_PROP_CAPACITY,
        POWER_SUPPLY_PROP_MODEL_NAME,
        POWER_SUPPLY_PROP_MANUFACTURER,
        POWER_SUPPLY_PROP_SERIAL_NUMBER,
@@ -643,11 +652,19 @@ static int acpi_battery_update(struct acpi_battery *battery)
 
 static void acpi_battery_refresh(struct acpi_battery *battery)
 {
+       int power_unit;
+
        if (!battery->bat.dev)
                return;
 
+       power_unit = battery->power_unit;
+
        acpi_battery_get_info(battery);
-       /* The battery may have changed its reporting units. */
+
+       if (power_unit == battery->power_unit)
+               return;
+
+       /* The battery has changed its reporting units. */
        sysfs_remove_battery(battery);
        sysfs_add_battery(battery);
 }
index 3d4fc7af02f7063bc02bc8098fc84e5abfdca3a5..9628652e080c590fb3f6e01dedc655584c12737b 100644 (file)
@@ -182,41 +182,66 @@ EXPORT_SYMBOL(acpi_bus_get_private_data);
                                  Power Management
    -------------------------------------------------------------------------- */
 
+static const char *state_string(int state)
+{
+       switch (state) {
+       case ACPI_STATE_D0:
+               return "D0";
+       case ACPI_STATE_D1:
+               return "D1";
+       case ACPI_STATE_D2:
+               return "D2";
+       case ACPI_STATE_D3_HOT:
+               return "D3hot";
+       case ACPI_STATE_D3_COLD:
+               return "D3";
+       default:
+               return "(unknown)";
+       }
+}
+
 static int __acpi_bus_get_power(struct acpi_device *device, int *state)
 {
-       int result = 0;
-       acpi_status status = 0;
-       unsigned long long psc = 0;
+       int result = ACPI_STATE_UNKNOWN;
 
        if (!device || !state)
                return -EINVAL;
 
-       *state = ACPI_STATE_UNKNOWN;
-
-       if (device->flags.power_manageable) {
-               /*
-                * Get the device's power state either directly (via _PSC) or
-                * indirectly (via power resources).
-                */
-               if (device->power.flags.power_resources) {
-                       result = acpi_power_get_inferred_state(device, state);
-                       if (result)
-                               return result;
-               } else if (device->power.flags.explicit_get) {
-                       status = acpi_evaluate_integer(device->handle, "_PSC",
-                                                      NULL, &psc);
-                       if (ACPI_FAILURE(status))
-                               return -ENODEV;
-                       *state = (int)psc;
-               }
-       } else {
+       if (!device->flags.power_manageable) {
                /* TBD: Non-recursive algorithm for walking up hierarchy. */
                *state = device->parent ?
                        device->parent->power.state : ACPI_STATE_D0;
+               goto out;
+       }
+
+       /*
+        * Get the device's power state either directly (via _PSC) or
+        * indirectly (via power resources).
+        */
+       if (device->power.flags.explicit_get) {
+               unsigned long long psc;
+               acpi_status status = acpi_evaluate_integer(device->handle,
+                                                          "_PSC", NULL, &psc);
+               if (ACPI_FAILURE(status))
+                       return -ENODEV;
+
+               result = psc;
+       }
+       /* The test below covers ACPI_STATE_UNKNOWN too. */
+       if (result <= ACPI_STATE_D2) {
+         ; /* Do nothing. */
+       } else if (device->power.flags.power_resources) {
+               int error = acpi_power_get_inferred_state(device, &result);
+               if (error)
+                       return error;
+       } else if (result == ACPI_STATE_D3_HOT) {
+               result = ACPI_STATE_D3;
        }
+       *state = result;
 
-       ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] power state is D%d\n",
-                         device->pnp.bus_id, *state));
+ out:
+       ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] power state is %s\n",
+                         device->pnp.bus_id, state_string(*state)));
 
        return 0;
 }
@@ -234,13 +259,14 @@ static int __acpi_bus_set_power(struct acpi_device *device, int state)
        /* Make sure this is a valid target state */
 
        if (state == device->power.state) {
-               ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device is already at D%d\n",
-                                 state));
+               ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device is already at %s\n",
+                                 state_string(state)));
                return 0;
        }
 
        if (!device->power.states[state].flags.valid) {
-               printk(KERN_WARNING PREFIX "Device does not support D%d\n", state);
+               printk(KERN_WARNING PREFIX "Device does not support %s\n",
+                      state_string(state));
                return -ENODEV;
        }
        if (device->parent && (state < device->parent->power.state)) {
@@ -294,13 +320,13 @@ static int __acpi_bus_set_power(struct acpi_device *device, int state)
       end:
        if (result)
                printk(KERN_WARNING PREFIX
-                             "Device [%s] failed to transition to D%d\n",
-                             device->pnp.bus_id, state);
+                             "Device [%s] failed to transition to %s\n",
+                             device->pnp.bus_id, state_string(state));
        else {
                device->power.state = state;
                ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-                                 "Device [%s] transitioned to D%d\n",
-                                 device->pnp.bus_id, state));
+                                 "Device [%s] transitioned to %s\n",
+                                 device->pnp.bus_id, state_string(state)));
        }
 
        return result;
index c3881b2eb8b2c5a0ac9b353aa7d7bdd699001ba0..9eaf708f588553ee78b10bb6571e55859fb1db3c 100644 (file)
@@ -891,7 +891,7 @@ static void acpi_os_execute_deferred(struct work_struct *work)
        struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
 
        if (dpc->wait)
-               acpi_os_wait_events_complete(NULL);
+               acpi_os_wait_events_complete();
 
        dpc->function(dpc->context);
        kfree(dpc);
@@ -987,7 +987,7 @@ acpi_status acpi_os_hotplug_execute(acpi_osd_exec_callback function,
        return __acpi_os_execute(0, function, context, 1);
 }
 
-void acpi_os_wait_events_complete(void *context)
+void acpi_os_wait_events_complete(void)
 {
        flush_workqueue(kacpid_wq);
        flush_workqueue(kacpi_notify_wq);
index 0500f719f63e2aa3d7e8c711e809f84961eb8f4d..dd6d6a3c6780d7e787c1c3427b808618d8c64fa4 100644 (file)
@@ -631,7 +631,7 @@ int acpi_power_get_inferred_state(struct acpi_device *device, int *state)
         * We know a device's inferred power state when all the resources
         * required for a given D-state are 'on'.
         */
-       for (i = ACPI_STATE_D0; i < ACPI_STATE_D3_HOT; i++) {
+       for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3_HOT; i++) {
                list = &device->power.states[i].resources;
                if (list->count < 1)
                        continue;
index c850de4c9a146883a7d91f16c8cd8ddc3b694940..eff722278ff539535bec85d5f5f6f4f2b04b79bf 100644 (file)
@@ -189,10 +189,12 @@ int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
                 *     Processor (CPU3, 0x03, 0x00000410, 0x06) {}
                 * }
                 *
-                * Ignores apic_id and always return 0 for CPU0's handle.
+                * Ignores apic_id and always returns 0 for the processor
+                * handle with acpi id 0 if nr_cpu_ids is 1.
+                * This should be the case if SMP tables are not found.
                 * Return -1 for other CPU's handle.
                 */
-               if (acpi_id == 0)
+               if (nr_cpu_ids <= 1 && acpi_id == 0)
                        return acpi_id;
                else
                        return apic_id;
index f3decb30223fd1b376775310ae25dec63424f920..9837c9c4f00957422dd3722e9fe2e3f5de0aaf02 100644 (file)
@@ -224,6 +224,7 @@ static void lapic_timer_state_broadcast(struct acpi_processor *pr,
 /*
  * Suspend / resume control
  */
+static int acpi_idle_suspend;
 static u32 saved_bm_rld;
 
 static void acpi_idle_bm_rld_save(void)
@@ -242,13 +243,21 @@ static void acpi_idle_bm_rld_restore(void)
 
 int acpi_processor_suspend(struct acpi_device * device, pm_message_t state)
 {
+       if (acpi_idle_suspend == 1)
+               return 0;
+
        acpi_idle_bm_rld_save();
+       acpi_idle_suspend = 1;
        return 0;
 }
 
 int acpi_processor_resume(struct acpi_device * device)
 {
+       if (acpi_idle_suspend == 0)
+               return 0;
+
        acpi_idle_bm_rld_restore();
+       acpi_idle_suspend = 0;
        return 0;
 }
 
@@ -304,16 +313,16 @@ static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
        pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5;
 
        /* determine latencies from FADT */
-       pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.C2latency;
-       pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.C3latency;
+       pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.c2_latency;
+       pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.c3_latency;
 
        /*
         * FADT specified C2 latency must be less than or equal to
         * 100 microseconds.
         */
-       if (acpi_gbl_FADT.C2latency > ACPI_PROCESSOR_MAX_C2_LATENCY) {
+       if (acpi_gbl_FADT.c2_latency > ACPI_PROCESSOR_MAX_C2_LATENCY) {
                ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-                       "C2 latency too large [%d]\n", acpi_gbl_FADT.C2latency));
+                       "C2 latency too large [%d]\n", acpi_gbl_FADT.c2_latency));
                /* invalidate C2 */
                pr->power.states[ACPI_STATE_C2].address = 0;
        }
@@ -322,9 +331,9 @@ static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
         * FADT supplied C3 latency must be less than or equal to
         * 1000 microseconds.
         */
-       if (acpi_gbl_FADT.C3latency > ACPI_PROCESSOR_MAX_C3_LATENCY) {
+       if (acpi_gbl_FADT.c3_latency > ACPI_PROCESSOR_MAX_C3_LATENCY) {
                ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-                       "C3 latency too large [%d]\n", acpi_gbl_FADT.C3latency));
+                       "C3 latency too large [%d]\n", acpi_gbl_FADT.c3_latency));
                /* invalidate C3 */
                pr->power.states[ACPI_STATE_C3].address = 0;
        }
@@ -754,6 +763,12 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
 
        local_irq_disable();
 
+       if (acpi_idle_suspend) {
+               local_irq_enable();
+               cpu_relax();
+               return -EBUSY;
+       }
+
        lapic_timer_state_broadcast(pr, cx, 1);
        kt1 = ktime_get_real();
        acpi_idle_do_entry(cx);
@@ -823,6 +838,12 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
 
        local_irq_disable();
 
+       if (acpi_idle_suspend) {
+               local_irq_enable();
+               cpu_relax();
+               return -EBUSY;
+       }
+
        if (cx->entry_method != ACPI_CSTATE_FFH) {
                current_thread_info()->status &= ~TS_POLLING;
                /*
@@ -907,14 +928,21 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
                                                drv, drv->safe_state_index);
                } else {
                        local_irq_disable();
-                       acpi_safe_halt();
+                       if (!acpi_idle_suspend)
+                               acpi_safe_halt();
                        local_irq_enable();
-                       return -EINVAL;
+                       return -EBUSY;
                }
        }
 
        local_irq_disable();
 
+       if (acpi_idle_suspend) {
+               local_irq_enable();
+               cpu_relax();
+               return -EBUSY;
+       }
+
        if (cx->entry_method != ACPI_CSTATE_FFH) {
                current_thread_info()->status &= ~TS_POLLING;
                /*
index 0af48a8554cd786620266c5449fb82798f034de1..a093dc163a42a8b677d9ccb7b27a61d0427c3f49 100644 (file)
@@ -333,6 +333,7 @@ static int acpi_processor_get_performance_states(struct acpi_processor *pr)
        struct acpi_buffer state = { 0, NULL };
        union acpi_object *pss = NULL;
        int i;
+       int last_invalid = -1;
 
 
        status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer);
@@ -394,14 +395,33 @@ static int acpi_processor_get_performance_states(struct acpi_processor *pr)
                    ((u32)(px->core_frequency * 1000) !=
                     (px->core_frequency * 1000))) {
                        printk(KERN_ERR FW_BUG PREFIX
-                              "Invalid BIOS _PSS frequency: 0x%llx MHz\n",
-                              px->core_frequency);
-                       result = -EFAULT;
-                       kfree(pr->performance->states);
-                       goto end;
+                              "Invalid BIOS _PSS frequency found for processor %d: 0x%llx MHz\n",
+                              pr->id, px->core_frequency);
+                       if (last_invalid == -1)
+                               last_invalid = i;
+               } else {
+                       if (last_invalid != -1) {
+                               /*
+                                * Copy this valid entry over last_invalid entry
+                                */
+                               memcpy(&(pr->performance->states[last_invalid]),
+                                      px, sizeof(struct acpi_processor_px));
+                               ++last_invalid;
+                       }
                }
        }
 
+       if (last_invalid == 0) {
+               printk(KERN_ERR FW_BUG PREFIX
+                      "No valid BIOS _PSS frequency found for processor %d\n", pr->id);
+               result = -EFAULT;
+               kfree(pr->performance->states);
+               pr->performance->states = NULL;
+       }
+
+       if (last_invalid > 0)
+               pr->performance->state_count = last_invalid;
+
       end:
        kfree(buffer.pointer);
 
index bea3ab6b524fe9a6e9cbf22a346a79b8e0091458..148556a8f0f23180cbb2f8acb8a0971aff6b414c 100644 (file)
@@ -1607,6 +1607,7 @@ static int acpi_bus_scan_fixed(void)
                                                ACPI_BUS_TYPE_POWER_BUTTON,
                                                ACPI_STA_DEFAULT,
                                                &ops);
+               device_init_wakeup(&device->dev, true);
        }
 
        if ((acpi_gbl_FADT.flags & ACPI_FADT_SLEEP_BUTTON) == 0) {
index 74ee4ab577b67b2dd22b1068240c4c651f1e2e3a..23a53c013f1eb6e594591189deb83c4e3f749301 100644 (file)
@@ -57,6 +57,7 @@ MODULE_PARM_DESC(gts, "Enable evaluation of _GTS on suspend.");
 MODULE_PARM_DESC(bfs, "Enable evaluation of _BFS on resume".);
 
 static u8 sleep_states[ACPI_S_STATE_COUNT];
+static bool pwr_btn_event_pending;
 
 static void acpi_sleep_tts_switch(u32 acpi_state)
 {
@@ -142,7 +143,7 @@ void __init acpi_old_suspend_ordering(void)
 static int acpi_pm_freeze(void)
 {
        acpi_disable_all_gpes();
-       acpi_os_wait_events_complete(NULL);
+       acpi_os_wait_events_complete();
        acpi_ec_block_transactions();
        return 0;
 }
@@ -184,6 +185,14 @@ static int acpi_pm_prepare(void)
        return error;
 }
 
+static int find_powerf_dev(struct device *dev, void *data)
+{
+       struct acpi_device *device = to_acpi_device(dev);
+       const char *hid = acpi_device_hid(device);
+
+       return !strcmp(hid, ACPI_BUTTON_HID_POWERF);
+}
+
 /**
  *     acpi_pm_finish - Instruct the platform to leave a sleep state.
  *
@@ -192,6 +201,7 @@ static int acpi_pm_prepare(void)
  */
 static void acpi_pm_finish(void)
 {
+       struct device *pwr_btn_dev;
        u32 acpi_state = acpi_target_sleep_state;
 
        acpi_ec_unblock_transactions();
@@ -209,6 +219,23 @@ static void acpi_pm_finish(void)
        acpi_set_firmware_waking_vector((acpi_physical_address) 0);
 
        acpi_target_sleep_state = ACPI_STATE_S0;
+
+       /* If we were woken with the fixed power button, provide a small
+        * hint to userspace in the form of a wakeup event on the fixed power
+        * button device (if it can be found).
+        *
+        * We delay the event generation til now, as the PM layer requires
+        * timekeeping to be running before we generate events. */
+       if (!pwr_btn_event_pending)
+               return;
+
+       pwr_btn_event_pending = false;
+       pwr_btn_dev = bus_find_device(&acpi_bus_type, NULL, NULL,
+                                     find_powerf_dev);
+       if (pwr_btn_dev) {
+               pm_wakeup_event(pwr_btn_dev, 0);
+               put_device(pwr_btn_dev);
+       }
 }
 
 /**
@@ -298,9 +325,23 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
        /* ACPI 3.0 specs (P62) says that it's the responsibility
         * of the OSPM to clear the status bit [ implying that the
         * POWER_BUTTON event should not reach userspace ]
+        *
+        * However, we do generate a small hint for userspace in the form of
+        * a wakeup event. We flag this condition for now and generate the
+        * event later, as we're currently too early in resume to be able to
+        * generate wakeup events.
         */
-       if (ACPI_SUCCESS(status) && (acpi_state == ACPI_STATE_S3))
-               acpi_clear_event(ACPI_EVENT_POWER_BUTTON);
+       if (ACPI_SUCCESS(status) && (acpi_state == ACPI_STATE_S3)) {
+               acpi_event_status pwr_btn_status;
+
+               acpi_get_event_status(ACPI_EVENT_POWER_BUTTON, &pwr_btn_status);
+
+               if (pwr_btn_status & ACPI_EVENT_FLAG_SET) {
+                       acpi_clear_event(ACPI_EVENT_POWER_BUTTON);
+                       /* Flag for later */
+                       pwr_btn_event_pending = true;
+               }
+       }
 
        /*
         * Disable and clear GPE status before interrupt is enabled. Some GPEs
@@ -730,8 +771,8 @@ int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p)
         * can wake the system.  _S0W may be valid, too.
         */
        if (acpi_target_sleep_state == ACPI_STATE_S0 ||
-           (device_may_wakeup(dev) &&
-            adev->wakeup.sleep_state <= acpi_target_sleep_state)) {
+           (device_may_wakeup(dev) && adev->wakeup.flags.valid &&
+            adev->wakeup.sleep_state >= acpi_target_sleep_state)) {
                acpi_status status;
 
                acpi_method[3] = 'W';
index 9f66181c814e78e2fc40c88ba743b5aa062e50e9..240a24400976929c663b9b3f8ce8bafa36435d5f 100644 (file)
@@ -173,7 +173,7 @@ static int param_set_trace_state(const char *val, struct kernel_param *kp)
 {
        int result = 0;
 
-       if (!strncmp(val, "enable", strlen("enable") - 1)) {
+       if (!strncmp(val, "enable", strlen("enable"))) {
                result = acpi_debug_trace(trace_method_name, trace_debug_level,
                                          trace_debug_layer, 0);
                if (result)
@@ -181,7 +181,7 @@ static int param_set_trace_state(const char *val, struct kernel_param *kp)
                goto exit;
        }
 
-       if (!strncmp(val, "disable", strlen("disable") - 1)) {
+       if (!strncmp(val, "disable", strlen("disable"))) {
                int name = 0;
                result = acpi_debug_trace((char *)&name, trace_debug_level,
                                          trace_debug_layer, 0);
index 7dbebea1ec31302bcb3eaac60d0aa3dd8fb9f7af..8275e7b069622d67bfb3528d01e90b28a864be25 100644 (file)
@@ -550,8 +550,6 @@ static int thermal_get_temp(struct thermal_zone_device *thermal,
        return 0;
 }
 
-static const char enabled[] = "kernel";
-static const char disabled[] = "user";
 static int thermal_get_mode(struct thermal_zone_device *thermal,
                                enum thermal_device_mode *mode)
 {
@@ -588,8 +586,8 @@ static int thermal_set_mode(struct thermal_zone_device *thermal,
        if (enable != tz->tz_enabled) {
                tz->tz_enabled = enable;
                ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-                       "%s ACPI thermal control\n",
-                       tz->tz_enabled ? enabled : disabled));
+                       "%s kernel ACPI thermal control\n",
+                       tz->tz_enabled ? "Enable" : "Disable"));
                acpi_thermal_check(tz);
        }
        return 0;
@@ -845,7 +843,7 @@ static int acpi_thermal_register_thermal_zone(struct acpi_thermal *tz)
 
        if (tz->trips.passive.flags.valid)
                tz->thermal_zone =
-                       thermal_zone_device_register("acpitz", trips, tz,
+                       thermal_zone_device_register("acpitz", trips, 0, tz,
                                                     &acpi_thermal_zone_ops,
                                                     tz->trips.passive.tc1,
                                                     tz->trips.passive.tc2,
@@ -853,7 +851,7 @@ static int acpi_thermal_register_thermal_zone(struct acpi_thermal *tz)
                                                     tz->polling_frequency*100);
        else
                tz->thermal_zone =
-                       thermal_zone_device_register("acpitz", trips, tz,
+                       thermal_zone_device_register("acpitz", trips, 0, tz,
                                                     &acpi_thermal_zone_ops,
                                                     0, 0, 0,
                                                     tz->polling_frequency*100);
index 9577b6fa26507cad1db1f2ddf7d6f114abfdf513..1e0a9e17c31d805a201e8dbcbf7933efe8eec111 100644 (file)
@@ -558,6 +558,8 @@ acpi_video_bus_DOS(struct acpi_video_bus *video, int bios_flag, int lcd_flag)
        union acpi_object arg0 = { ACPI_TYPE_INTEGER };
        struct acpi_object_list args = { 1, &arg0 };
 
+       if (!video->cap._DOS)
+               return 0;
 
        if (bios_flag < 0 || bios_flag > 3 || lcd_flag < 0 || lcd_flag > 1)
                return -EINVAL;
@@ -1687,10 +1689,6 @@ static int acpi_video_bus_add(struct acpi_device *device)
        set_bit(KEY_BRIGHTNESS_ZERO, input->keybit);
        set_bit(KEY_DISPLAY_OFF, input->keybit);
 
-       error = input_register_device(input);
-       if (error)
-               goto err_stop_video;
-
        printk(KERN_INFO PREFIX "%s [%s] (multi-head: %s  rom: %s  post: %s)\n",
               ACPI_VIDEO_DEVICE_NAME, acpi_device_bid(device),
               video->flags.multihead ? "yes" : "no",
@@ -1701,12 +1699,16 @@ static int acpi_video_bus_add(struct acpi_device *device)
        video->pm_nb.priority = 0;
        error = register_pm_notifier(&video->pm_nb);
        if (error)
-               goto err_unregister_input_dev;
+               goto err_stop_video;
+
+       error = input_register_device(input);
+       if (error)
+               goto err_unregister_pm_notifier;
 
        return 0;
 
- err_unregister_input_dev:
-       input_unregister_device(input);
+ err_unregister_pm_notifier:
+       unregister_pm_notifier(&video->pm_nb);
  err_stop_video:
        acpi_video_bus_stop_devices(video);
  err_free_input_dev:
@@ -1743,9 +1745,18 @@ static int acpi_video_bus_remove(struct acpi_device *device, int type)
        return 0;
 }
 
+static int __init is_i740(struct pci_dev *dev)
+{
+       if (dev->device == 0x00D1)
+               return 1;
+       if (dev->device == 0x7000)
+               return 1;
+       return 0;
+}
+
 static int __init intel_opregion_present(void)
 {
-#if defined(CONFIG_DRM_I915) || defined(CONFIG_DRM_I915_MODULE)
+       int opregion = 0;
        struct pci_dev *dev = NULL;
        u32 address;
 
@@ -1754,13 +1765,15 @@ static int __init intel_opregion_present(void)
                        continue;
                if (dev->vendor != PCI_VENDOR_ID_INTEL)
                        continue;
+               /* We don't want to poke around undefined i740 registers */
+               if (is_i740(dev))
+                       continue;
                pci_read_config_dword(dev, 0xfc, &address);
                if (!address)
                        continue;
-               return 1;
+               opregion = 1;
        }
-#endif
-       return 0;
+       return opregion;
 }
 
 int acpi_video_register(void)
index 3239517f4d902952654212f0a462d646fbd94608..ac6a5beb28f3b99e090f358257ef161476113c32 100644 (file)
@@ -4,7 +4,7 @@
  * Arasan Compact Flash host controller source file
  *
  * Copyright (C) 2011 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
@@ -959,7 +959,7 @@ static struct platform_driver arasan_cf_driver = {
 
 module_platform_driver(arasan_cf_driver);
 
-MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
+MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
 MODULE_DESCRIPTION("Arasan ATA Compact Flash driver");
 MODULE_LICENSE("GPL");
 MODULE_ALIAS("platform:" DRIVER_NAME);
index 1b1cbb571d38d72acce854b8fc6437b5323783c9..4b01ab3d2c249328c810aaa03aec450d7f8d2822 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/wait.h>
 #include <linux/async.h>
 #include <linux/pm_runtime.h>
+#include <scsi/scsi_scan.h>
 
 #include "base.h"
 #include "power/power.h"
@@ -100,7 +101,7 @@ static void driver_deferred_probe_add(struct device *dev)
        mutex_lock(&deferred_probe_mutex);
        if (list_empty(&dev->p->deferred_probe)) {
                dev_dbg(dev, "Added to deferred list\n");
-               list_add(&dev->p->deferred_probe, &deferred_probe_pending_list);
+               list_add_tail(&dev->p->deferred_probe, &deferred_probe_pending_list);
        }
        mutex_unlock(&deferred_probe_mutex);
 }
@@ -332,6 +333,7 @@ void wait_for_device_probe(void)
        /* wait for the known devices to complete their probing */
        wait_event(probe_waitqueue, atomic_read(&probe_count) == 0);
        async_synchronize_full();
+       scsi_complete_async_scans();
 }
 EXPORT_SYMBOL_GPL(wait_for_device_probe);
 
index e0fb5b0435a350abaa5a23c30e76f4e5d691001a..9cb845e49334c78d27a958066cf33dfc665c4ac1 100644 (file)
@@ -1031,7 +1031,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
        dpm_wait_for_children(dev, async);
 
        if (async_error)
-               return 0;
+               goto Complete;
 
        pm_runtime_get_noresume(dev);
        if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
@@ -1040,7 +1040,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
        if (pm_wakeup_pending()) {
                pm_runtime_put_sync(dev);
                async_error = -EBUSY;
-               return 0;
+               goto Complete;
        }
 
        device_lock(dev);
@@ -1097,6 +1097,8 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
        }
 
        device_unlock(dev);
+
+ Complete:
        complete_all(&dev->power.completion);
 
        if (error) {
index 0bcda488f11cd45e6252e113ffd025b058309e99..c89aa01fb1de8262838998dc68f778d5d7473d91 100644 (file)
@@ -246,11 +246,11 @@ struct regmap *regmap_init(struct device *dev,
                map->lock = regmap_lock_mutex;
                map->unlock = regmap_unlock_mutex;
        }
-       map->format.buf_size = (config->reg_bits + config->val_bits) / 8;
        map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8);
        map->format.pad_bytes = config->pad_bits / 8;
        map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8);
-       map->format.buf_size += map->format.pad_bytes;
+       map->format.buf_size = DIV_ROUND_UP(config->reg_bits +
+                       config->val_bits + config->pad_bits, 8);
        map->reg_shift = config->pad_bits % 8;
        if (config->reg_stride)
                map->reg_stride = config->reg_stride;
@@ -368,7 +368,7 @@ struct regmap *regmap_init(struct device *dev,
 
        ret = regcache_init(map, config);
        if (ret < 0)
-               goto err_free_workbuf;
+               goto err_debugfs;
 
        /* Add a devres resource for dev_get_regmap() */
        m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL);
@@ -383,7 +383,8 @@ struct regmap *regmap_init(struct device *dev,
 
 err_cache:
        regcache_exit(map);
-err_free_workbuf:
+err_debugfs:
+       regmap_debugfs_exit(map);
        kfree(map->work_buf);
 err_map:
        kfree(map);
@@ -471,6 +472,7 @@ int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config)
 
        return ret;
 }
+EXPORT_SYMBOL_GPL(regmap_reinit_cache);
 
 /**
  * regmap_exit(): Free a previously allocated register map
index a058842f14fdf54b92b495f01c34a02ed2b74bcc..61ce4054b3c33b3ed70e0de39d0e19a9f5167cf0 100644 (file)
@@ -139,7 +139,9 @@ void bcma_pmu_workarounds(struct bcma_drv_cc *cc)
                bcma_chipco_chipctl_maskset(cc, 0, ~0, 0x7);
                break;
        case 0x4331:
-               /* BCM4331 workaround is SPROM-related, we put it in sprom.c */
+       case 43431:
+               /* Ext PA lines must be enabled for tx on BCM4331 */
+               bcma_chipco_bcm4331_ext_pa_lines_ctl(cc, true);
                break;
        case 43224:
                if (bus->chipinfo.rev == 0) {
index 9a96f14c8f474fba41442bbdcbe3bc910a31efc6..c32ebd537abe3a3e5f8f5e777f119c8768c9ea38 100644 (file)
@@ -232,17 +232,19 @@ void __devinit bcma_core_pci_init(struct bcma_drv_pci *pc)
 int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc, struct bcma_device *core,
                          bool enable)
 {
-       struct pci_dev *pdev = pc->core->bus->host_pci;
+       struct pci_dev *pdev;
        u32 coremask, tmp;
        int err = 0;
 
-       if (core->bus->hosttype != BCMA_HOSTTYPE_PCI) {
+       if (!pc || core->bus->hosttype != BCMA_HOSTTYPE_PCI) {
                /* This bcma device is not on a PCI host-bus. So the IRQs are
                 * not routed through the PCI core.
                 * So we must not enable routing through the PCI core. */
                goto out;
        }
 
+       pdev = pc->core->bus->host_pci;
+
        err = pci_read_config_dword(pdev, BCMA_PCI_IRQMASK, &tmp);
        if (err)
                goto out;
index c7f93359acb09affe99a398f45af7974ccdc30e3..f16f42d36071371414574305d6f057f9240f3bfd 100644 (file)
@@ -579,13 +579,13 @@ int bcma_sprom_get(struct bcma_bus *bus)
        if (!sprom)
                return -ENOMEM;
 
-       if (bus->chipinfo.id == 0x4331)
+       if (bus->chipinfo.id == 0x4331 || bus->chipinfo.id == 43431)
                bcma_chipco_bcm4331_ext_pa_lines_ctl(&bus->drv_cc, false);
 
        pr_debug("SPROM offset 0x%x\n", offset);
        bcma_sprom_read(bus, offset, sprom);
 
-       if (bus->chipinfo.id == 0x4331)
+       if (bus->chipinfo.id == 0x4331 || bus->chipinfo.id == 43431)
                bcma_chipco_bcm4331_ext_pa_lines_ctl(&bus->drv_cc, true);
 
        err = bcma_sprom_valid(sprom);
index b5c5ff53cb57f74e89cc59bad8ca6e29df5e1db5..fcb956bb4b4c525875f961e7c2398b20c35723f4 100644 (file)
@@ -1475,10 +1475,17 @@ void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsi
                first_word = 0;
                spin_lock_irq(&b->bm_lock);
        }
-
        /* last page (respectively only page, for first page == last page) */
        last_word = MLPP(el >> LN2_BPL);
-       bm_set_full_words_within_one_page(mdev->bitmap, last_page, first_word, last_word);
+
+       /* consider bitmap->bm_bits = 32768, bitmap->bm_number_of_pages = 1. (or multiples).
+        * ==> e = 32767, el = 32768, last_page = 2,
+        * and now last_word = 0.
+        * We do not want to touch last_page in this case,
+        * as we did not allocate it, it is not present in bitmap->bm_pages.
+        */
+       if (last_word)
+               bm_set_full_words_within_one_page(mdev->bitmap, last_page, first_word, last_word);
 
        /* possibly trailing bits.
         * example: (e & 63) == 63, el will be e+1.
index 9c5c84946b056792fa45d99051bd5c28750db7e1..8e93a6ac9bb65e3497f53c92723a67429ffdf946 100644 (file)
@@ -472,12 +472,17 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
                req->rq_state |= RQ_LOCAL_COMPLETED;
                req->rq_state &= ~RQ_LOCAL_PENDING;
 
-               D_ASSERT(!(req->rq_state & RQ_NET_MASK));
+               if (req->rq_state & RQ_LOCAL_ABORTED) {
+                       _req_may_be_done(req, m);
+                       break;
+               }
 
                __drbd_chk_io_error(mdev, false);
 
        goto_queue_for_net_read:
 
+               D_ASSERT(!(req->rq_state & RQ_NET_MASK));
+
                /* no point in retrying if there is no good remote data,
                 * or we have no connection. */
                if (mdev->state.pdsk != D_UP_TO_DATE) {
@@ -765,6 +770,40 @@ static int drbd_may_do_local_read(struct drbd_conf *mdev, sector_t sector, int s
        return 0 == drbd_bm_count_bits(mdev, sbnr, ebnr);
 }
 
+static void maybe_pull_ahead(struct drbd_conf *mdev)
+{
+       int congested = 0;
+
+       /* If I don't even have good local storage, we can not reasonably try
+        * to pull ahead of the peer. We also need the local reference to make
+        * sure mdev->act_log is there.
+        * Note: caller has to make sure that net_conf is there.
+        */
+       if (!get_ldev_if_state(mdev, D_UP_TO_DATE))
+               return;
+
+       if (mdev->net_conf->cong_fill &&
+           atomic_read(&mdev->ap_in_flight) >= mdev->net_conf->cong_fill) {
+               dev_info(DEV, "Congestion-fill threshold reached\n");
+               congested = 1;
+       }
+
+       if (mdev->act_log->used >= mdev->net_conf->cong_extents) {
+               dev_info(DEV, "Congestion-extents threshold reached\n");
+               congested = 1;
+       }
+
+       if (congested) {
+               queue_barrier(mdev); /* last barrier, after mirrored writes */
+
+               if (mdev->net_conf->on_congestion == OC_PULL_AHEAD)
+                       _drbd_set_state(_NS(mdev, conn, C_AHEAD), 0, NULL);
+               else  /*mdev->net_conf->on_congestion == OC_DISCONNECT */
+                       _drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), 0, NULL);
+       }
+       put_ldev(mdev);
+}
+
 static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, unsigned long start_time)
 {
        const int rw = bio_rw(bio);
@@ -972,29 +1011,8 @@ allocate_barrier:
                _req_mod(req, queue_for_send_oos);
 
        if (remote &&
-           mdev->net_conf->on_congestion != OC_BLOCK && mdev->agreed_pro_version >= 96) {
-               int congested = 0;
-
-               if (mdev->net_conf->cong_fill &&
-                   atomic_read(&mdev->ap_in_flight) >= mdev->net_conf->cong_fill) {
-                       dev_info(DEV, "Congestion-fill threshold reached\n");
-                       congested = 1;
-               }
-
-               if (mdev->act_log->used >= mdev->net_conf->cong_extents) {
-                       dev_info(DEV, "Congestion-extents threshold reached\n");
-                       congested = 1;
-               }
-
-               if (congested) {
-                       queue_barrier(mdev); /* last barrier, after mirrored writes */
-
-                       if (mdev->net_conf->on_congestion == OC_PULL_AHEAD)
-                               _drbd_set_state(_NS(mdev, conn, C_AHEAD), 0, NULL);
-                       else  /*mdev->net_conf->on_congestion == OC_DISCONNECT */
-                               _drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), 0, NULL);
-               }
-       }
+           mdev->net_conf->on_congestion != OC_BLOCK && mdev->agreed_pro_version >= 96)
+               maybe_pull_ahead(mdev);
 
        spin_unlock_irq(&mdev->req_lock);
        kfree(b); /* if someone else has beaten us to it... */
index cce7df367b793d111ef875bd5cd30a6f5e08782c..553f43a90953cab40f421658ecb37fa9c20c8c54 100644 (file)
@@ -671,6 +671,7 @@ static void __reschedule_timeout(int drive, const char *message)
 
        if (drive == current_reqD)
                drive = current_drive;
+       __cancel_delayed_work(&fd_timeout);
 
        if (drive < 0 || drive >= N_DRIVE) {
                delay = 20UL * HZ;
index bbca966f8f66a20b04f62ac01ff41cd5ef18c037..3bba65510d23afdf39070f7d23552e8e15e27af2 100644 (file)
@@ -1597,14 +1597,12 @@ static int loop_add(struct loop_device **l, int i)
        struct gendisk *disk;
        int err;
 
+       err = -ENOMEM;
        lo = kzalloc(sizeof(*lo), GFP_KERNEL);
-       if (!lo) {
-               err = -ENOMEM;
+       if (!lo)
                goto out;
-       }
 
-       err = idr_pre_get(&loop_index_idr, GFP_KERNEL);
-       if (err < 0)
+       if (!idr_pre_get(&loop_index_idr, GFP_KERNEL))
                goto out_free_dev;
 
        if (i >= 0) {
index 264bc77dcb911c7030c787d3ad87cd79b654ce81..a8fddeb3d638ebcdf9a0191beda816d8f1338ccd 100644 (file)
@@ -37,6 +37,7 @@
 #include <linux/kthread.h>
 #include <../drivers/ata/ahci.h>
 #include <linux/export.h>
+#include <linux/debugfs.h>
 #include "mtip32xx.h"
 
 #define HW_CMD_SLOT_SZ         (MTIP_MAX_COMMAND_SLOTS * 32)
@@ -85,6 +86,7 @@ static int instance;
  * allocated in mtip_init().
  */
 static int mtip_major;
+static struct dentry *dfs_parent;
 
 static DEFINE_SPINLOCK(rssd_index_lock);
 static DEFINE_IDA(rssd_index_ida);
@@ -2546,7 +2548,7 @@ static struct scatterlist *mtip_hw_get_scatterlist(struct driver_data *dd,
 }
 
 /*
- * Sysfs register/status dump.
+ * Sysfs status dump.
  *
  * @dev  Pointer to the device structure, passed by the kernrel.
  * @attr Pointer to the device_attribute structure passed by the kernel.
@@ -2555,45 +2557,68 @@ static struct scatterlist *mtip_hw_get_scatterlist(struct driver_data *dd,
  * return value
  *     The size, in bytes, of the data copied into buf.
  */
-static ssize_t mtip_hw_show_registers(struct device *dev,
+static ssize_t mtip_hw_show_status(struct device *dev,
                                struct device_attribute *attr,
                                char *buf)
 {
-       u32 group_allocated;
        struct driver_data *dd = dev_to_disk(dev)->private_data;
        int size = 0;
+
+       if (test_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag))
+               size += sprintf(buf, "%s", "thermal_shutdown\n");
+       else if (test_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag))
+               size += sprintf(buf, "%s", "write_protect\n");
+       else
+               size += sprintf(buf, "%s", "online\n");
+
+       return size;
+}
+
+static DEVICE_ATTR(status, S_IRUGO, mtip_hw_show_status, NULL);
+
+static ssize_t mtip_hw_read_registers(struct file *f, char __user *ubuf,
+                                 size_t len, loff_t *offset)
+{
+       struct driver_data *dd =  (struct driver_data *)f->private_data;
+       char buf[MTIP_DFS_MAX_BUF_SIZE];
+       u32 group_allocated;
+       int size = *offset;
        int n;
 
-       size += sprintf(&buf[size], "Hardware\n--------\n");
-       size += sprintf(&buf[size], "S ACTive      : [ 0x");
+       if (!len || size)
+               return 0;
+
+       if (size < 0)
+               return -EINVAL;
+
+       size += sprintf(&buf[size], "H/ S ACTive      : [ 0x");
 
        for (n = dd->slot_groups-1; n >= 0; n--)
                size += sprintf(&buf[size], "%08X ",
                                         readl(dd->port->s_active[n]));
 
        size += sprintf(&buf[size], "]\n");
-       size += sprintf(&buf[size], "Command Issue : [ 0x");
+       size += sprintf(&buf[size], "H/ Command Issue : [ 0x");
 
        for (n = dd->slot_groups-1; n >= 0; n--)
                size += sprintf(&buf[size], "%08X ",
                                        readl(dd->port->cmd_issue[n]));
 
        size += sprintf(&buf[size], "]\n");
-       size += sprintf(&buf[size], "Completed     : [ 0x");
+       size += sprintf(&buf[size], "H/ Completed     : [ 0x");
 
        for (n = dd->slot_groups-1; n >= 0; n--)
                size += sprintf(&buf[size], "%08X ",
                                readl(dd->port->completed[n]));
 
        size += sprintf(&buf[size], "]\n");
-       size += sprintf(&buf[size], "PORT IRQ STAT : [ 0x%08X ]\n",
+       size += sprintf(&buf[size], "H/ PORT IRQ STAT : [ 0x%08X ]\n",
                                readl(dd->port->mmio + PORT_IRQ_STAT));
-       size += sprintf(&buf[size], "HOST IRQ STAT : [ 0x%08X ]\n",
+       size += sprintf(&buf[size], "H/ HOST IRQ STAT : [ 0x%08X ]\n",
                                readl(dd->mmio + HOST_IRQ_STAT));
        size += sprintf(&buf[size], "\n");
 
-       size += sprintf(&buf[size], "Local\n-----\n");
-       size += sprintf(&buf[size], "Allocated    : [ 0x");
+       size += sprintf(&buf[size], "L/ Allocated     : [ 0x");
 
        for (n = dd->slot_groups-1; n >= 0; n--) {
                if (sizeof(long) > sizeof(u32))
@@ -2605,7 +2630,7 @@ static ssize_t mtip_hw_show_registers(struct device *dev,
        }
        size += sprintf(&buf[size], "]\n");
 
-       size += sprintf(&buf[size], "Commands in Q: [ 0x");
+       size += sprintf(&buf[size], "L/ Commands in Q : [ 0x");
 
        for (n = dd->slot_groups-1; n >= 0; n--) {
                if (sizeof(long) > sizeof(u32))
@@ -2617,44 +2642,53 @@ static ssize_t mtip_hw_show_registers(struct device *dev,
        }
        size += sprintf(&buf[size], "]\n");
 
-       return size;
+       *offset = size <= len ? size : len;
+       size = copy_to_user(ubuf, buf, *offset);
+       if (size)
+               return -EFAULT;
+
+       return *offset;
 }
 
-static ssize_t mtip_hw_show_status(struct device *dev,
-                               struct device_attribute *attr,
-                               char *buf)
+static ssize_t mtip_hw_read_flags(struct file *f, char __user *ubuf,
+                                 size_t len, loff_t *offset)
 {
-       struct driver_data *dd = dev_to_disk(dev)->private_data;
-       int size = 0;
+       struct driver_data *dd =  (struct driver_data *)f->private_data;
+       char buf[MTIP_DFS_MAX_BUF_SIZE];
+       int size = *offset;
 
-       if (test_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag))
-               size += sprintf(buf, "%s", "thermal_shutdown\n");
-       else if (test_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag))
-               size += sprintf(buf, "%s", "write_protect\n");
-       else
-               size += sprintf(buf, "%s", "online\n");
-
-       return size;
-}
+       if (!len || size)
+               return 0;
 
-static ssize_t mtip_hw_show_flags(struct device *dev,
-                               struct device_attribute *attr,
-                               char *buf)
-{
-       struct driver_data *dd = dev_to_disk(dev)->private_data;
-       int size = 0;
+       if (size < 0)
+               return -EINVAL;
 
-       size += sprintf(&buf[size], "Flag in port struct : [ %08lX ]\n",
+       size += sprintf(&buf[size], "Flag-port : [ %08lX ]\n",
                                                        dd->port->flags);
-       size += sprintf(&buf[size], "Flag in dd struct   : [ %08lX ]\n",
+       size += sprintf(&buf[size], "Flag-dd   : [ %08lX ]\n",
                                                        dd->dd_flag);
 
-       return size;
+       *offset = size <= len ? size : len;
+       size = copy_to_user(ubuf, buf, *offset);
+       if (size)
+               return -EFAULT;
+
+       return *offset;
 }
 
-static DEVICE_ATTR(registers, S_IRUGO, mtip_hw_show_registers, NULL);
-static DEVICE_ATTR(status, S_IRUGO, mtip_hw_show_status, NULL);
-static DEVICE_ATTR(flags, S_IRUGO, mtip_hw_show_flags, NULL);
+static const struct file_operations mtip_regs_fops = {
+       .owner  = THIS_MODULE,
+       .open   = simple_open,
+       .read   = mtip_hw_read_registers,
+       .llseek = no_llseek,
+};
+
+static const struct file_operations mtip_flags_fops = {
+       .owner  = THIS_MODULE,
+       .open   = simple_open,
+       .read   = mtip_hw_read_flags,
+       .llseek = no_llseek,
+};
 
 /*
  * Create the sysfs related attributes.
@@ -2671,15 +2705,9 @@ static int mtip_hw_sysfs_init(struct driver_data *dd, struct kobject *kobj)
        if (!kobj || !dd)
                return -EINVAL;
 
-       if (sysfs_create_file(kobj, &dev_attr_registers.attr))
-               dev_warn(&dd->pdev->dev,
-                       "Error creating 'registers' sysfs entry\n");
        if (sysfs_create_file(kobj, &dev_attr_status.attr))
                dev_warn(&dd->pdev->dev,
                        "Error creating 'status' sysfs entry\n");
-       if (sysfs_create_file(kobj, &dev_attr_flags.attr))
-               dev_warn(&dd->pdev->dev,
-                       "Error creating 'flags' sysfs entry\n");
        return 0;
 }
 
@@ -2698,13 +2726,39 @@ static int mtip_hw_sysfs_exit(struct driver_data *dd, struct kobject *kobj)
        if (!kobj || !dd)
                return -EINVAL;
 
-       sysfs_remove_file(kobj, &dev_attr_registers.attr);
        sysfs_remove_file(kobj, &dev_attr_status.attr);
-       sysfs_remove_file(kobj, &dev_attr_flags.attr);
 
        return 0;
 }
 
+static int mtip_hw_debugfs_init(struct driver_data *dd)
+{
+       if (!dfs_parent)
+               return -1;
+
+       dd->dfs_node = debugfs_create_dir(dd->disk->disk_name, dfs_parent);
+       if (IS_ERR_OR_NULL(dd->dfs_node)) {
+               dev_warn(&dd->pdev->dev,
+                       "Error creating node %s under debugfs\n",
+                                               dd->disk->disk_name);
+               dd->dfs_node = NULL;
+               return -1;
+       }
+
+       debugfs_create_file("flags", S_IRUGO, dd->dfs_node, dd,
+                                                       &mtip_flags_fops);
+       debugfs_create_file("registers", S_IRUGO, dd->dfs_node, dd,
+                                                       &mtip_regs_fops);
+
+       return 0;
+}
+
+static void mtip_hw_debugfs_exit(struct driver_data *dd)
+{
+       debugfs_remove_recursive(dd->dfs_node);
+}
+
+
 /*
  * Perform any init/resume time hardware setup
  *
@@ -3730,6 +3784,7 @@ skip_create_disk:
                mtip_hw_sysfs_init(dd, kobj);
                kobject_put(kobj);
        }
+       mtip_hw_debugfs_init(dd);
 
        if (dd->mtip_svc_handler) {
                set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag);
@@ -3755,6 +3810,8 @@ start_service_thread:
        return rv;
 
 kthread_run_error:
+       mtip_hw_debugfs_exit(dd);
+
        /* Delete our gendisk. This also removes the device from /dev */
        del_gendisk(dd->disk);
 
@@ -3805,6 +3862,7 @@ static int mtip_block_remove(struct driver_data *dd)
                        kobject_put(kobj);
                }
        }
+       mtip_hw_debugfs_exit(dd);
 
        /*
         * Delete our gendisk structure. This also removes the device
@@ -4152,10 +4210,20 @@ static int __init mtip_init(void)
        }
        mtip_major = error;
 
+       if (!dfs_parent) {
+               dfs_parent = debugfs_create_dir("rssd", NULL);
+               if (IS_ERR_OR_NULL(dfs_parent)) {
+                       printk(KERN_WARNING "Error creating debugfs parent\n");
+                       dfs_parent = NULL;
+               }
+       }
+
        /* Register our PCI operations. */
        error = pci_register_driver(&mtip_pci_driver);
-       if (error)
+       if (error) {
+               debugfs_remove(dfs_parent);
                unregister_blkdev(mtip_major, MTIP_DRV_NAME);
+       }
 
        return error;
 }
@@ -4172,6 +4240,8 @@ static int __init mtip_init(void)
  */
 static void __exit mtip_exit(void)
 {
+       debugfs_remove_recursive(dfs_parent);
+
        /* Release the allocated major block device number. */
        unregister_blkdev(mtip_major, MTIP_DRV_NAME);
 
index b2c88da26b2a7b7f94ff77b6f1a1d2047f6b45f3..f51fc23d17bb0e0025c74ac9f495007dcbc8b784 100644 (file)
@@ -26,7 +26,6 @@
 #include <linux/ata.h>
 #include <linux/interrupt.h>
 #include <linux/genhd.h>
-#include <linux/version.h>
 
 /* Offset of Subsystem Device ID in pci confoguration space */
 #define PCI_SUBSYSTEM_DEVICEID 0x2E
  #define dbg_printk(format, arg...)
 #endif
 
+#define MTIP_DFS_MAX_BUF_SIZE 1024
+
 #define __force_bit2int (unsigned int __force)
 
 enum {
@@ -447,6 +448,8 @@ struct driver_data {
        unsigned long dd_flag; /* NOTE: use atomic bit operations on this */
 
        struct task_struct *mtip_svc_handler; /* task_struct of svc thd */
+
+       struct dentry *dfs_node;
 };
 
 #endif
index 65665c9c42c62ba5805324df96292e560f16b04a..8f428a8ab003d8c7a029036eea6a1666a96d7dd3 100644 (file)
@@ -499,7 +499,7 @@ static int rbd_header_from_disk(struct rbd_image_header *header,
                         / sizeof (*ondisk))
                return -EINVAL;
        header->snapc = kmalloc(sizeof(struct ceph_snap_context) +
-                               snap_count * sizeof (*ondisk),
+                               snap_count * sizeof(u64),
                                gfp_flags);
        if (!header->snapc)
                return -ENOMEM;
@@ -977,7 +977,7 @@ static void rbd_req_cb(struct ceph_osd_request *req, struct ceph_msg *msg)
        op = (void *)(replyhead + 1);
        rc = le32_to_cpu(replyhead->result);
        bytes = le64_to_cpu(op->extent.length);
-       read_op = (le32_to_cpu(op->op) == CEPH_OSD_OP_READ);
+       read_op = (le16_to_cpu(op->op) == CEPH_OSD_OP_READ);
 
        dout("rbd_req_cb bytes=%lld readop=%d rc=%d\n", bytes, read_op, rc);
 
index aa2712060bfbcd153e446f5adcf80a9d61377e75..9a72277a31df0cb131f879bb8a1503a65a57b7cc 100644 (file)
@@ -513,6 +513,44 @@ static void process_page(unsigned long data)
        }
 }
 
+struct mm_plug_cb {
+       struct blk_plug_cb cb;
+       struct cardinfo *card;
+};
+
+static void mm_unplug(struct blk_plug_cb *cb)
+{
+       struct mm_plug_cb *mmcb = container_of(cb, struct mm_plug_cb, cb);
+
+       spin_lock_irq(&mmcb->card->lock);
+       activate(mmcb->card);
+       spin_unlock_irq(&mmcb->card->lock);
+       kfree(mmcb);
+}
+
+static int mm_check_plugged(struct cardinfo *card)
+{
+       struct blk_plug *plug = current->plug;
+       struct mm_plug_cb *mmcb;
+
+       if (!plug)
+               return 0;
+
+       list_for_each_entry(mmcb, &plug->cb_list, cb.list) {
+               if (mmcb->cb.callback == mm_unplug && mmcb->card == card)
+                       return 1;
+       }
+       /* Not currently on the callback list */
+       mmcb = kmalloc(sizeof(*mmcb), GFP_ATOMIC);
+       if (!mmcb)
+               return 0;
+
+       mmcb->card = card;
+       mmcb->cb.callback = mm_unplug;
+       list_add(&mmcb->cb.list, &plug->cb_list);
+       return 1;
+}
+
 static void mm_make_request(struct request_queue *q, struct bio *bio)
 {
        struct cardinfo *card = q->queuedata;
@@ -523,6 +561,8 @@ static void mm_make_request(struct request_queue *q, struct bio *bio)
        *card->biotail = bio;
        bio->bi_next = NULL;
        card->biotail = &bio->bi_next;
+       if (bio->bi_rw & REQ_SYNC || !mm_check_plugged(card))
+               activate(card);
        spin_unlock_irq(&card->lock);
 
        return;
index 773cf27dc23fc595d6fca48ea8ec6c4992438d18..9ad3b5ec1dc1c521085db47a7928cc8cf1179701 100644 (file)
@@ -257,6 +257,7 @@ static inline void blkif_get_x86_32_req(struct blkif_request *dst,
                break;
        case BLKIF_OP_DISCARD:
                dst->u.discard.flag = src->u.discard.flag;
+               dst->u.discard.id = src->u.discard.id;
                dst->u.discard.sector_number = src->u.discard.sector_number;
                dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
                break;
@@ -287,6 +288,7 @@ static inline void blkif_get_x86_64_req(struct blkif_request *dst,
                break;
        case BLKIF_OP_DISCARD:
                dst->u.discard.flag = src->u.discard.flag;
+               dst->u.discard.id = src->u.discard.id;
                dst->u.discard.sector_number = src->u.discard.sector_number;
                dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
                break;
index 60eed4bdd2e4528ae3c3b8871cd65f85d3c34952..e4fb3374dcd2aaa6d0f834cd9394564a3c022a38 100644 (file)
@@ -141,14 +141,36 @@ static int get_id_from_freelist(struct blkfront_info *info)
        return free;
 }
 
-static void add_id_to_freelist(struct blkfront_info *info,
+static int add_id_to_freelist(struct blkfront_info *info,
                               unsigned long id)
 {
+       if (info->shadow[id].req.u.rw.id != id)
+               return -EINVAL;
+       if (info->shadow[id].request == NULL)
+               return -EINVAL;
        info->shadow[id].req.u.rw.id  = info->shadow_free;
        info->shadow[id].request = NULL;
        info->shadow_free = id;
+       return 0;
 }
 
+static const char *op_name(int op)
+{
+       static const char *const names[] = {
+               [BLKIF_OP_READ] = "read",
+               [BLKIF_OP_WRITE] = "write",
+               [BLKIF_OP_WRITE_BARRIER] = "barrier",
+               [BLKIF_OP_FLUSH_DISKCACHE] = "flush",
+               [BLKIF_OP_DISCARD] = "discard" };
+
+       if (op < 0 || op >= ARRAY_SIZE(names))
+               return "unknown";
+
+       if (!names[op])
+               return "reserved";
+
+       return names[op];
+}
 static int xlbd_reserve_minors(unsigned int minor, unsigned int nr)
 {
        unsigned int end = minor + nr;
@@ -746,20 +768,36 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
 
                bret = RING_GET_RESPONSE(&info->ring, i);
                id   = bret->id;
+               /*
+                * The backend has messed up and given us an id that we would
+                * never have given to it (we stamp it up to BLK_RING_SIZE -
+                * look in get_id_from_freelist.
+                */
+               if (id >= BLK_RING_SIZE) {
+                       WARN(1, "%s: response to %s has incorrect id (%ld)\n",
+                            info->gd->disk_name, op_name(bret->operation), id);
+                       /* We can't safely get the 'struct request' as
+                        * the id is busted. */
+                       continue;
+               }
                req  = info->shadow[id].request;
 
                if (bret->operation != BLKIF_OP_DISCARD)
                        blkif_completion(&info->shadow[id]);
 
-               add_id_to_freelist(info, id);
+               if (add_id_to_freelist(info, id)) {
+                       WARN(1, "%s: response to %s (id %ld) couldn't be recycled!\n",
+                            info->gd->disk_name, op_name(bret->operation), id);
+                       continue;
+               }
 
                error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO;
                switch (bret->operation) {
                case BLKIF_OP_DISCARD:
                        if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
                                struct request_queue *rq = info->rq;
-                               printk(KERN_WARNING "blkfront: %s: discard op failed\n",
-                                          info->gd->disk_name);
+                               printk(KERN_WARNING "blkfront: %s: %s op failed\n",
+                                          info->gd->disk_name, op_name(bret->operation));
                                error = -EOPNOTSUPP;
                                info->feature_discard = 0;
                                info->feature_secdiscard = 0;
@@ -771,18 +809,14 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
                case BLKIF_OP_FLUSH_DISKCACHE:
                case BLKIF_OP_WRITE_BARRIER:
                        if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
-                               printk(KERN_WARNING "blkfront: %s: write %s op failed\n",
-                                      info->flush_op == BLKIF_OP_WRITE_BARRIER ?
-                                      "barrier" :  "flush disk cache",
-                                      info->gd->disk_name);
+                               printk(KERN_WARNING "blkfront: %s: %s op failed\n",
+                                      info->gd->disk_name, op_name(bret->operation));
                                error = -EOPNOTSUPP;
                        }
                        if (unlikely(bret->status == BLKIF_RSP_ERROR &&
                                     info->shadow[id].req.u.rw.nr_segments == 0)) {
-                               printk(KERN_WARNING "blkfront: %s: empty write %s op failed\n",
-                                      info->flush_op == BLKIF_OP_WRITE_BARRIER ?
-                                      "barrier" :  "flush disk cache",
-                                      info->gd->disk_name);
+                               printk(KERN_WARNING "blkfront: %s: empty %s op failed\n",
+                                      info->gd->disk_name, op_name(bret->operation));
                                error = -EOPNOTSUPP;
                        }
                        if (unlikely(error)) {
index ad591bd240ec3d5fed2fd2444a4d26313c76eb12..10308cd8a7ed2276f146c86c752383de5517464a 100644 (file)
@@ -63,6 +63,7 @@ static struct usb_device_id ath3k_table[] = {
 
        /* Atheros AR3011 with sflash firmware*/
        { USB_DEVICE(0x0CF3, 0x3002) },
+       { USB_DEVICE(0x0CF3, 0xE019) },
        { USB_DEVICE(0x13d3, 0x3304) },
        { USB_DEVICE(0x0930, 0x0215) },
        { USB_DEVICE(0x0489, 0xE03D) },
@@ -77,6 +78,7 @@ static struct usb_device_id ath3k_table[] = {
        { USB_DEVICE(0x04CA, 0x3005) },
        { USB_DEVICE(0x13d3, 0x3362) },
        { USB_DEVICE(0x0CF3, 0xE004) },
+       { USB_DEVICE(0x0930, 0x0219) },
 
        /* Atheros AR5BBU12 with sflash firmware */
        { USB_DEVICE(0x0489, 0xE02C) },
@@ -101,6 +103,7 @@ static struct usb_device_id ath3k_blist_tbl[] = {
        { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
 
        /* Atheros AR5BBU22 with sflash firmware */
        { USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 },
index 94f2d65131c441d213365936e14e513e3248097d..27068d1493808ec3f2acda115fd51773cbc4cfc5 100644 (file)
@@ -136,7 +136,7 @@ int btmrvl_remove_card(struct btmrvl_private *priv);
 
 void btmrvl_interrupt(struct btmrvl_private *priv);
 
-void btmrvl_check_evtpkt(struct btmrvl_private *priv, struct sk_buff *skb);
+bool btmrvl_check_evtpkt(struct btmrvl_private *priv, struct sk_buff *skb);
 int btmrvl_process_event(struct btmrvl_private *priv, struct sk_buff *skb);
 
 int btmrvl_send_module_cfg_cmd(struct btmrvl_private *priv, int subcmd);
index 681ca9d18e125e39cbec92071560ce3d0da4269a..dc304def8400ca4d38c67c989d1560cea7711303 100644 (file)
@@ -44,23 +44,33 @@ void btmrvl_interrupt(struct btmrvl_private *priv)
 }
 EXPORT_SYMBOL_GPL(btmrvl_interrupt);
 
-void btmrvl_check_evtpkt(struct btmrvl_private *priv, struct sk_buff *skb)
+bool btmrvl_check_evtpkt(struct btmrvl_private *priv, struct sk_buff *skb)
 {
        struct hci_event_hdr *hdr = (void *) skb->data;
        struct hci_ev_cmd_complete *ec;
-       u16 opcode, ocf;
+       u16 opcode, ocf, ogf;
 
        if (hdr->evt == HCI_EV_CMD_COMPLETE) {
                ec = (void *) (skb->data + HCI_EVENT_HDR_SIZE);
                opcode = __le16_to_cpu(ec->opcode);
                ocf = hci_opcode_ocf(opcode);
+               ogf = hci_opcode_ogf(opcode);
+
                if (ocf == BT_CMD_MODULE_CFG_REQ &&
                                        priv->btmrvl_dev.sendcmdflag) {
                        priv->btmrvl_dev.sendcmdflag = false;
                        priv->adapter->cmd_complete = true;
                        wake_up_interruptible(&priv->adapter->cmd_wait_q);
                }
+
+               if (ogf == OGF) {
+                       BT_DBG("vendor event skipped: ogf 0x%4.4x", ogf);
+                       kfree_skb(skb);
+                       return false;
+               }
        }
+
+       return true;
 }
 EXPORT_SYMBOL_GPL(btmrvl_check_evtpkt);
 
index a853244e7fd7b59b1689c0281a1d44534b1e708c..0cd61d9f07cdb1d1d70c8dfc0d2e52c239903402 100644 (file)
@@ -562,10 +562,12 @@ static int btmrvl_sdio_card_to_host(struct btmrvl_private *priv)
                skb_put(skb, buf_len);
                skb_pull(skb, SDIO_HEADER_LEN);
 
-               if (type == HCI_EVENT_PKT)
-                       btmrvl_check_evtpkt(priv, skb);
+               if (type == HCI_EVENT_PKT) {
+                       if (btmrvl_check_evtpkt(priv, skb))
+                               hci_recv_frame(skb);
+               } else
+                       hci_recv_frame(skb);
 
-               hci_recv_frame(skb);
                hdev->stat.byte_rx += buf_len;
                break;
 
index c9463af8e564e8707bab12374e5324b0739d5994..83ebb241bfcc8bbd1f279875141f58325bbd09c3 100644 (file)
@@ -125,6 +125,7 @@ static struct usb_device_id blacklist_table[] = {
 
        /* Atheros 3011 with sflash firmware */
        { USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE },
+       { USB_DEVICE(0x0cf3, 0xe019), .driver_info = BTUSB_IGNORE },
        { USB_DEVICE(0x13d3, 0x3304), .driver_info = BTUSB_IGNORE },
        { USB_DEVICE(0x0930, 0x0215), .driver_info = BTUSB_IGNORE },
        { USB_DEVICE(0x0489, 0xe03d), .driver_info = BTUSB_IGNORE },
@@ -139,6 +140,7 @@ static struct usb_device_id blacklist_table[] = {
        { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
 
        /* Atheros AR5BBU12 with sflash firmware */
        { USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
index 764f70c5e690259dea53ea87e6ffa72946b8e7a3..0a41852794177636f62c91010430d85891f1425b 100644 (file)
@@ -898,6 +898,7 @@ static struct pci_device_id agp_intel_pci_table[] = {
        ID(PCI_DEVICE_ID_INTEL_B43_HB),
        ID(PCI_DEVICE_ID_INTEL_B43_1_HB),
        ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB),
+       ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D2_HB),
        ID(PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB),
        ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB),
        ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB),
index c0091753a0d191c0268f8093f3ccc89fc4dd3280..8e2d9140f300291ae506741d1c7bb4135b728599 100644 (file)
 #define PCI_DEVICE_ID_INTEL_G41_HB          0x2E30
 #define PCI_DEVICE_ID_INTEL_G41_IG          0x2E32
 #define PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB          0x0040
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_D2_HB         0x0069
 #define PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG          0x0042
 #define PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB          0x0044
 #define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB         0x0062
index f518b99f53f5b995a3c8f9cd5d8ff616a62d3855..731c9046cf7bf0dbd06d26be41ea1e1b25b87ac8 100644 (file)
@@ -34,8 +34,15 @@ static int atmel_trng_read(struct hwrng *rng, void *buf, size_t max,
        u32 *data = buf;
 
        /* data ready? */
-       if (readl(trng->base + TRNG_ODATA) & 1) {
+       if (readl(trng->base + TRNG_ISR) & 1) {
                *data = readl(trng->base + TRNG_ODATA);
+               /*
+                 ensure data ready is only set again AFTER the next data
+                 word is ready in case it got set between checking ISR
+                 and reading ODATA, so we don't risk re-reading the
+                 same word
+               */
+               readl(trng->base + TRNG_ISR);
                return 4;
        } else
                return 0;
index 687b00d67c8a77a88ad4e135605c1ed11a319efb..9a1eb0cfa95f3f0a00a7334f3b276bf623d88cf6 100644 (file)
@@ -850,18 +850,21 @@ static void clk_change_rate(struct clk *clk)
 {
        struct clk *child;
        unsigned long old_rate;
+       unsigned long best_parent_rate = 0;
        struct hlist_node *tmp;
 
        old_rate = clk->rate;
 
+       if (clk->parent)
+               best_parent_rate = clk->parent->rate;
+
        if (clk->ops->set_rate)
-               clk->ops->set_rate(clk->hw, clk->new_rate, clk->parent->rate);
+               clk->ops->set_rate(clk->hw, clk->new_rate, best_parent_rate);
 
        if (clk->ops->recalc_rate)
-               clk->rate = clk->ops->recalc_rate(clk->hw,
-                               clk->parent->rate);
+               clk->rate = clk->ops->recalc_rate(clk->hw, best_parent_rate);
        else
-               clk->rate = clk->parent->rate;
+               clk->rate = best_parent_rate;
 
        if (clk->notifier_count && old_rate != clk->rate)
                __clk_notify(clk, POST_RATE_CHANGE, old_rate, clk->rate);
@@ -999,7 +1002,7 @@ static struct clk *__clk_init_parent(struct clk *clk)
 
        if (!clk->parents)
                clk->parents =
-                       kmalloc((sizeof(struct clk*) * clk->num_parents),
+                       kzalloc((sizeof(struct clk*) * clk->num_parents),
                                        GFP_KERNEL);
 
        if (!clk->parents)
@@ -1064,21 +1067,24 @@ static int __clk_set_parent(struct clk *clk, struct clk *parent)
 
        old_parent = clk->parent;
 
-       /* find index of new parent clock using cached parent ptrs */
-       for (i = 0; i < clk->num_parents; i++)
-               if (clk->parents[i] == parent)
-                       break;
+       if (!clk->parents)
+               clk->parents = kzalloc((sizeof(struct clk*) * clk->num_parents),
+                                                               GFP_KERNEL);
 
        /*
-        * find index of new parent clock using string name comparison
-        * also try to cache the parent to avoid future calls to __clk_lookup
+        * find index of new parent clock using cached parent ptrs,
+        * or if not yet cached, use string name comparison and cache
+        * them now to avoid future calls to __clk_lookup.
         */
-       if (i == clk->num_parents)
-               for (i = 0; i < clk->num_parents; i++)
-                       if (!strcmp(clk->parent_names[i], parent->name)) {
+       for (i = 0; i < clk->num_parents; i++) {
+               if (clk->parents && clk->parents[i] == parent)
+                       break;
+               else if (!strcmp(clk->parent_names[i], parent->name)) {
+                       if (clk->parents)
                                clk->parents[i] = __clk_lookup(parent->name);
-                               break;
-                       }
+                       break;
+               }
+       }
 
        if (i == clk->num_parents) {
                pr_debug("%s: clock %s is not a possible parent of clock %s\n",
index f7be225f544cb36c4c733583dfb6d168f53d9a21..db2391c054ee0a51805368d0a369267635375cf1 100644 (file)
@@ -71,7 +71,7 @@ static void __init clk_misc_init(void)
        __mxs_setl(30 << BP_FRAC_IOFRAC, FRAC);
 }
 
-static struct clk_lookup uart_lookups[] __initdata = {
+static struct clk_lookup uart_lookups[] = {
        { .dev_id = "duart", },
        { .dev_id = "mxs-auart.0", },
        { .dev_id = "mxs-auart.1", },
@@ -80,31 +80,31 @@ static struct clk_lookup uart_lookups[] __initdata = {
        { .dev_id = "80070000.serial", },
 };
 
-static struct clk_lookup hbus_lookups[] __initdata = {
+static struct clk_lookup hbus_lookups[] = {
        { .dev_id = "imx23-dma-apbh", },
        { .dev_id = "80004000.dma-apbh", },
 };
 
-static struct clk_lookup xbus_lookups[] __initdata = {
+static struct clk_lookup xbus_lookups[] = {
        { .dev_id = "duart", .con_id = "apb_pclk"},
        { .dev_id = "80070000.serial", .con_id = "apb_pclk"},
        { .dev_id = "imx23-dma-apbx", },
        { .dev_id = "80024000.dma-apbx", },
 };
 
-static struct clk_lookup ssp_lookups[] __initdata = {
+static struct clk_lookup ssp_lookups[] = {
        { .dev_id = "imx23-mmc.0", },
        { .dev_id = "imx23-mmc.1", },
        { .dev_id = "80010000.ssp", },
        { .dev_id = "80034000.ssp", },
 };
 
-static struct clk_lookup lcdif_lookups[] __initdata = {
+static struct clk_lookup lcdif_lookups[] = {
        { .dev_id = "imx23-fb", },
        { .dev_id = "80030000.lcdif", },
 };
 
-static struct clk_lookup gpmi_lookups[] __initdata = {
+static struct clk_lookup gpmi_lookups[] = {
        { .dev_id = "imx23-gpmi-nand", },
        { .dev_id = "8000c000.gpmi", },
 };
index 2826a2606a29fca4f1474ce76fe87fc88fb968ea..7fad6c8c13d222fc9a528191835d141d06ffcec5 100644 (file)
@@ -120,7 +120,7 @@ static void __init clk_misc_init(void)
        writel_relaxed(val, FRAC0);
 }
 
-static struct clk_lookup uart_lookups[] __initdata = {
+static struct clk_lookup uart_lookups[] = {
        { .dev_id = "duart", },
        { .dev_id = "mxs-auart.0", },
        { .dev_id = "mxs-auart.1", },
@@ -135,71 +135,71 @@ static struct clk_lookup uart_lookups[] __initdata = {
        { .dev_id = "80074000.serial", },
 };
 
-static struct clk_lookup hbus_lookups[] __initdata = {
+static struct clk_lookup hbus_lookups[] = {
        { .dev_id = "imx28-dma-apbh", },
        { .dev_id = "80004000.dma-apbh", },
 };
 
-static struct clk_lookup xbus_lookups[] __initdata = {
+static struct clk_lookup xbus_lookups[] = {
        { .dev_id = "duart", .con_id = "apb_pclk"},
        { .dev_id = "80074000.serial", .con_id = "apb_pclk"},
        { .dev_id = "imx28-dma-apbx", },
        { .dev_id = "80024000.dma-apbx", },
 };
 
-static struct clk_lookup ssp0_lookups[] __initdata = {
+static struct clk_lookup ssp0_lookups[] = {
        { .dev_id = "imx28-mmc.0", },
        { .dev_id = "80010000.ssp", },
 };
 
-static struct clk_lookup ssp1_lookups[] __initdata = {
+static struct clk_lookup ssp1_lookups[] = {
        { .dev_id = "imx28-mmc.1", },
        { .dev_id = "80012000.ssp", },
 };
 
-static struct clk_lookup ssp2_lookups[] __initdata = {
+static struct clk_lookup ssp2_lookups[] = {
        { .dev_id = "imx28-mmc.2", },
        { .dev_id = "80014000.ssp", },
 };
 
-static struct clk_lookup ssp3_lookups[] __initdata = {
+static struct clk_lookup ssp3_lookups[] = {
        { .dev_id = "imx28-mmc.3", },
        { .dev_id = "80016000.ssp", },
 };
 
-static struct clk_lookup lcdif_lookups[] __initdata = {
+static struct clk_lookup lcdif_lookups[] = {
        { .dev_id = "imx28-fb", },
        { .dev_id = "80030000.lcdif", },
 };
 
-static struct clk_lookup gpmi_lookups[] __initdata = {
+static struct clk_lookup gpmi_lookups[] = {
        { .dev_id = "imx28-gpmi-nand", },
        { .dev_id = "8000c000.gpmi", },
 };
 
-static struct clk_lookup fec_lookups[] __initdata = {
+static struct clk_lookup fec_lookups[] = {
        { .dev_id = "imx28-fec.0", },
        { .dev_id = "imx28-fec.1", },
        { .dev_id = "800f0000.ethernet", },
        { .dev_id = "800f4000.ethernet", },
 };
 
-static struct clk_lookup can0_lookups[] __initdata = {
+static struct clk_lookup can0_lookups[] = {
        { .dev_id = "flexcan.0", },
        { .dev_id = "80032000.can", },
 };
 
-static struct clk_lookup can1_lookups[] __initdata = {
+static struct clk_lookup can1_lookups[] = {
        { .dev_id = "flexcan.1", },
        { .dev_id = "80034000.can", },
 };
 
-static struct clk_lookup saif0_lookups[] __initdata = {
+static struct clk_lookup saif0_lookups[] = {
        { .dev_id = "mxs-saif.0", },
        { .dev_id = "80042000.saif", },
 };
 
-static struct clk_lookup saif1_lookups[] __initdata = {
+static struct clk_lookup saif1_lookups[] = {
        { .dev_id = "mxs-saif.1", },
        { .dev_id = "80046000.saif", },
 };
@@ -245,8 +245,8 @@ int __init mx28_clocks_init(void)
        clks[pll2] = mxs_clk_pll("pll2", "ref_xtal", PLL2CTRL0, 23, 50000000);
        clks[ref_cpu] = mxs_clk_ref("ref_cpu", "pll0", FRAC0, 0);
        clks[ref_emi] = mxs_clk_ref("ref_emi", "pll0", FRAC0, 1);
-       clks[ref_io0] = mxs_clk_ref("ref_io0", "pll0", FRAC0, 2);
-       clks[ref_io1] = mxs_clk_ref("ref_io1", "pll0", FRAC0, 3);
+       clks[ref_io1] = mxs_clk_ref("ref_io1", "pll0", FRAC0, 2);
+       clks[ref_io0] = mxs_clk_ref("ref_io0", "pll0", FRAC0, 3);
        clks[ref_pix] = mxs_clk_ref("ref_pix", "pll0", FRAC1, 0);
        clks[ref_hsadc] = mxs_clk_ref("ref_hsadc", "pll0", FRAC1, 1);
        clks[ref_gpmi] = mxs_clk_ref("ref_gpmi", "pll0", FRAC1, 2);
index af34074e702b8930167c7291b0aa9ae041c93dcb..6756e7c3bc07d496f9136e98f15ce46dad0bc82a 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 4dbdb3fe18e02bd042867a3ae7c72aa77fbaa017..958aa3ad1d6023bbe39a70c594b8e45099ac1a8d 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index b471c9762a9763471ebd8cefb1d64ee164a894d5..1afc18c4effcc150f6bda6dede54e134e9ab4d68 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index dcd4bdf4b0d99f796747d47559d8eedbc1fdb1c3..5f1b6badeb15be0a8fa8a9285f90c0bfe9c9b55f 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 376d4e5ff32609985e53f6fdc7f6b3d63331e895..7cd63788d546d885e70ef2b543c3f1ef93dffe43 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 3321c46a071c5641169e94e34e01e15602a93f04..931737677dfab553032f8a720893c719c4f0a639 100644 (file)
@@ -2,7 +2,7 @@
  * Clock framework definitions for SPEAr platform
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 42b68df9aeef20f2857373a06f235fb58de8f5e6..0fcec2aae19cc032dfbfb8475319044420a22501 100644 (file)
@@ -4,7 +4,7 @@
  * SPEAr1310 machine clock framework source file
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
@@ -345,31 +345,30 @@ static struct frac_rate_tbl gen_rtbl[] = {
 /* clock parents */
 static const char *vco_parents[] = { "osc_24m_clk", "osc_25m_clk", };
 static const char *gpt_parents[] = { "osc_24m_clk", "apb_clk", };
-static const char *uart0_parents[] = { "pll5_clk", "uart_synth_gate_clk", };
-static const char *c3_parents[] = { "pll5_clk", "c3_synth_gate_clk", };
-static const char *gmac_phy_input_parents[] = { "gmii_125m_pad_clk", "pll2_clk",
+static const char *uart0_parents[] = { "pll5_clk", "uart_syn_gclk", };
+static const char *c3_parents[] = { "pll5_clk", "c3_syn_gclk", };
+static const char *gmac_phy_input_parents[] = { "gmii_pad_clk", "pll2_clk",
        "osc_25m_clk", };
-static const char *gmac_phy_parents[] = { "gmac_phy_input_mux_clk",
-       "gmac_phy_synth_gate_clk", };
+static const char *gmac_phy_parents[] = { "phy_input_mclk", "phy_syn_gclk", };
 static const char *clcd_synth_parents[] = { "vco1div4_clk", "pll2_clk", };
-static const char *clcd_pixel_parents[] = { "pll5_clk", "clcd_synth_clk", };
+static const char *clcd_pixel_parents[] = { "pll5_clk", "clcd_syn_clk", };
 static const char *i2s_src_parents[] = { "vco1div2_clk", "none", "pll3_clk",
        "i2s_src_pad_clk", };
-static const char *i2s_ref_parents[] = { "i2s_src_mux_clk", "i2s_prs1_clk", };
+static const char *i2s_ref_parents[] = { "i2s_src_mclk", "i2s_prs1_clk", };
 static const char *gen_synth0_1_parents[] = { "vco1div4_clk", "vco3div2_clk",
        "pll3_clk", };
 static const char *gen_synth2_3_parents[] = { "vco1div4_clk", "vco3div2_clk",
        "pll2_clk", };
 static const char *rmii_phy_parents[] = { "ras_tx50_clk", "none",
-       "ras_pll2_clk", "ras_synth0_clk", };
+       "ras_pll2_clk", "ras_syn0_clk", };
 static const char *smii_rgmii_phy_parents[] = { "none", "ras_tx125_clk",
-       "ras_pll2_clk", "ras_synth0_clk", };
-static const char *uart_parents[] = { "ras_apb_clk", "gen_synth3_clk", };
-static const char *i2c_parents[] = { "ras_apb_clk", "gen_synth1_clk", };
-static const char *ssp1_parents[] = { "ras_apb_clk", "gen_synth1_clk",
+       "ras_pll2_clk", "ras_syn0_clk", };
+static const char *uart_parents[] = { "ras_apb_clk", "gen_syn3_clk", };
+static const char *i2c_parents[] = { "ras_apb_clk", "gen_syn1_clk", };
+static const char *ssp1_parents[] = { "ras_apb_clk", "gen_syn1_clk",
        "ras_plclk0_clk", };
-static const char *pci_parents[] = { "ras_pll3_clk", "gen_synth2_clk", };
-static const char *tdm_parents[] = { "ras_pll3_clk", "gen_synth1_clk", };
+static const char *pci_parents[] = { "ras_pll3_clk", "gen_syn2_clk", };
+static const char *tdm_parents[] = { "ras_pll3_clk", "gen_syn1_clk", };
 
 void __init spear1310_clk_init(void)
 {
@@ -390,9 +389,9 @@ void __init spear1310_clk_init(void)
                        25000000);
        clk_register_clkdev(clk, "osc_25m_clk", NULL);
 
-       clk = clk_register_fixed_rate(NULL, "gmii_125m_pad_clk", NULL,
-                       CLK_IS_ROOT, 125000000);
-       clk_register_clkdev(clk, "gmii_125m_pad_clk", NULL);
+       clk = clk_register_fixed_rate(NULL, "gmii_pad_clk", NULL, CLK_IS_ROOT,
+                       125000000);
+       clk_register_clkdev(clk, "gmii_pad_clk", NULL);
 
        clk = clk_register_fixed_rate(NULL, "i2s_src_pad_clk", NULL,
                        CLK_IS_ROOT, 12288000);
@@ -406,34 +405,34 @@ void __init spear1310_clk_init(void)
 
        /* clock derived from 24 or 25 MHz osc clk */
        /* vco-pll */
-       clk = clk_register_mux(NULL, "vco1_mux_clk", vco_parents,
+       clk = clk_register_mux(NULL, "vco1_mclk", vco_parents,
                        ARRAY_SIZE(vco_parents), 0, SPEAR1310_PLL_CFG,
                        SPEAR1310_PLL1_CLK_SHIFT, SPEAR1310_PLL_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "vco1_mux_clk", NULL);
-       clk = clk_register_vco_pll("vco1_clk", "pll1_clk", NULL, "vco1_mux_clk",
+       clk_register_clkdev(clk, "vco1_mclk", NULL);
+       clk = clk_register_vco_pll("vco1_clk", "pll1_clk", NULL, "vco1_mclk",
                        0, SPEAR1310_PLL1_CTR, SPEAR1310_PLL1_FRQ, pll_rtbl,
                        ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
        clk_register_clkdev(clk, "vco1_clk", NULL);
        clk_register_clkdev(clk1, "pll1_clk", NULL);
 
-       clk = clk_register_mux(NULL, "vco2_mux_clk", vco_parents,
+       clk = clk_register_mux(NULL, "vco2_mclk", vco_parents,
                        ARRAY_SIZE(vco_parents), 0, SPEAR1310_PLL_CFG,
                        SPEAR1310_PLL2_CLK_SHIFT, SPEAR1310_PLL_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "vco2_mux_clk", NULL);
-       clk = clk_register_vco_pll("vco2_clk", "pll2_clk", NULL, "vco2_mux_clk",
+       clk_register_clkdev(clk, "vco2_mclk", NULL);
+       clk = clk_register_vco_pll("vco2_clk", "pll2_clk", NULL, "vco2_mclk",
                        0, SPEAR1310_PLL2_CTR, SPEAR1310_PLL2_FRQ, pll_rtbl,
                        ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
        clk_register_clkdev(clk, "vco2_clk", NULL);
        clk_register_clkdev(clk1, "pll2_clk", NULL);
 
-       clk = clk_register_mux(NULL, "vco3_mux_clk", vco_parents,
+       clk = clk_register_mux(NULL, "vco3_mclk", vco_parents,
                        ARRAY_SIZE(vco_parents), 0, SPEAR1310_PLL_CFG,
                        SPEAR1310_PLL3_CLK_SHIFT, SPEAR1310_PLL_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "vco3_mux_clk", NULL);
-       clk = clk_register_vco_pll("vco3_clk", "pll3_clk", NULL, "vco3_mux_clk",
+       clk_register_clkdev(clk, "vco3_mclk", NULL);
+       clk = clk_register_vco_pll("vco3_clk", "pll3_clk", NULL, "vco3_mclk",
                        0, SPEAR1310_PLL3_CTR, SPEAR1310_PLL3_FRQ, pll_rtbl,
                        ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
        clk_register_clkdev(clk, "vco3_clk", NULL);
@@ -473,7 +472,7 @@ void __init spear1310_clk_init(void)
        /* peripherals */
        clk_register_fixed_factor(NULL, "thermal_clk", "osc_24m_clk", 0, 1,
                        128);
-       clk = clk_register_gate(NULL, "thermal_gate_clk", "thermal_clk", 0,
+       clk = clk_register_gate(NULL, "thermal_gclk", "thermal_clk", 0,
                        SPEAR1310_PERIP2_CLK_ENB, SPEAR1310_THSENS_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "spear_thermal");
@@ -500,177 +499,176 @@ void __init spear1310_clk_init(void)
        clk_register_clkdev(clk, "apb_clk", NULL);
 
        /* gpt clocks */
-       clk = clk_register_mux(NULL, "gpt0_mux_clk", gpt_parents,
+       clk = clk_register_mux(NULL, "gpt0_mclk", gpt_parents,
                        ARRAY_SIZE(gpt_parents), 0, SPEAR1310_PERIP_CLK_CFG,
                        SPEAR1310_GPT0_CLK_SHIFT, SPEAR1310_GPT_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "gpt0_mux_clk", NULL);
-       clk = clk_register_gate(NULL, "gpt0_clk", "gpt0_mux_clk", 0,
+       clk_register_clkdev(clk, "gpt0_mclk", NULL);
+       clk = clk_register_gate(NULL, "gpt0_clk", "gpt0_mclk", 0,
                        SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_GPT0_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "gpt0");
 
-       clk = clk_register_mux(NULL, "gpt1_mux_clk", gpt_parents,
+       clk = clk_register_mux(NULL, "gpt1_mclk", gpt_parents,
                        ARRAY_SIZE(gpt_parents), 0, SPEAR1310_PERIP_CLK_CFG,
                        SPEAR1310_GPT1_CLK_SHIFT, SPEAR1310_GPT_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "gpt1_mux_clk", NULL);
-       clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mux_clk", 0,
+       clk_register_clkdev(clk, "gpt1_mclk", NULL);
+       clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mclk", 0,
                        SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_GPT1_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "gpt1");
 
-       clk = clk_register_mux(NULL, "gpt2_mux_clk", gpt_parents,
+       clk = clk_register_mux(NULL, "gpt2_mclk", gpt_parents,
                        ARRAY_SIZE(gpt_parents), 0, SPEAR1310_PERIP_CLK_CFG,
                        SPEAR1310_GPT2_CLK_SHIFT, SPEAR1310_GPT_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "gpt2_mux_clk", NULL);
-       clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mux_clk", 0,
+       clk_register_clkdev(clk, "gpt2_mclk", NULL);
+       clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mclk", 0,
                        SPEAR1310_PERIP2_CLK_ENB, SPEAR1310_GPT2_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "gpt2");
 
-       clk = clk_register_mux(NULL, "gpt3_mux_clk", gpt_parents,
+       clk = clk_register_mux(NULL, "gpt3_mclk", gpt_parents,
                        ARRAY_SIZE(gpt_parents), 0, SPEAR1310_PERIP_CLK_CFG,
                        SPEAR1310_GPT3_CLK_SHIFT, SPEAR1310_GPT_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "gpt3_mux_clk", NULL);
-       clk = clk_register_gate(NULL, "gpt3_clk", "gpt3_mux_clk", 0,
+       clk_register_clkdev(clk, "gpt3_mclk", NULL);
+       clk = clk_register_gate(NULL, "gpt3_clk", "gpt3_mclk", 0,
                        SPEAR1310_PERIP2_CLK_ENB, SPEAR1310_GPT3_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "gpt3");
 
        /* others */
-       clk = clk_register_aux("uart_synth_clk", "uart_synth_gate_clk",
-                       "vco1div2_clk", 0, SPEAR1310_UART_CLK_SYNT, NULL,
-                       aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
-       clk_register_clkdev(clk, "uart_synth_clk", NULL);
-       clk_register_clkdev(clk1, "uart_synth_gate_clk", NULL);
+       clk = clk_register_aux("uart_syn_clk", "uart_syn_gclk", "vco1div2_clk",
+                       0, SPEAR1310_UART_CLK_SYNT, NULL, aux_rtbl,
+                       ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+       clk_register_clkdev(clk, "uart_syn_clk", NULL);
+       clk_register_clkdev(clk1, "uart_syn_gclk", NULL);
 
-       clk = clk_register_mux(NULL, "uart0_mux_clk", uart0_parents,
+       clk = clk_register_mux(NULL, "uart0_mclk", uart0_parents,
                        ARRAY_SIZE(uart0_parents), 0, SPEAR1310_PERIP_CLK_CFG,
                        SPEAR1310_UART_CLK_SHIFT, SPEAR1310_UART_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "uart0_mux_clk", NULL);
+       clk_register_clkdev(clk, "uart0_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "uart0_clk", "uart0_mux_clk", 0,
+       clk = clk_register_gate(NULL, "uart0_clk", "uart0_mclk", 0,
                        SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_UART_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "e0000000.serial");
 
-       clk = clk_register_aux("sdhci_synth_clk", "sdhci_synth_gate_clk",
+       clk = clk_register_aux("sdhci_syn_clk", "sdhci_syn_gclk",
                        "vco1div2_clk", 0, SPEAR1310_SDHCI_CLK_SYNT, NULL,
                        aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
-       clk_register_clkdev(clk, "sdhci_synth_clk", NULL);
-       clk_register_clkdev(clk1, "sdhci_synth_gate_clk", NULL);
+       clk_register_clkdev(clk, "sdhci_syn_clk", NULL);
+       clk_register_clkdev(clk1, "sdhci_syn_gclk", NULL);
 
-       clk = clk_register_gate(NULL, "sdhci_clk", "sdhci_synth_gate_clk", 0,
+       clk = clk_register_gate(NULL, "sdhci_clk", "sdhci_syn_gclk", 0,
                        SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_SDHCI_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "b3000000.sdhci");
 
-       clk = clk_register_aux("cfxd_synth_clk", "cfxd_synth_gate_clk",
-                       "vco1div2_clk", 0, SPEAR1310_CFXD_CLK_SYNT, NULL,
-                       aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
-       clk_register_clkdev(clk, "cfxd_synth_clk", NULL);
-       clk_register_clkdev(clk1, "cfxd_synth_gate_clk", NULL);
+       clk = clk_register_aux("cfxd_syn_clk", "cfxd_syn_gclk", "vco1div2_clk",
+                       0, SPEAR1310_CFXD_CLK_SYNT, NULL, aux_rtbl,
+                       ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+       clk_register_clkdev(clk, "cfxd_syn_clk", NULL);
+       clk_register_clkdev(clk1, "cfxd_syn_gclk", NULL);
 
-       clk = clk_register_gate(NULL, "cfxd_clk", "cfxd_synth_gate_clk", 0,
+       clk = clk_register_gate(NULL, "cfxd_clk", "cfxd_syn_gclk", 0,
                        SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_CFXD_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "b2800000.cf");
        clk_register_clkdev(clk, NULL, "arasan_xd");
 
-       clk = clk_register_aux("c3_synth_clk", "c3_synth_gate_clk",
-                       "vco1div2_clk", 0, SPEAR1310_C3_CLK_SYNT, NULL,
-                       aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
-       clk_register_clkdev(clk, "c3_synth_clk", NULL);
-       clk_register_clkdev(clk1, "c3_synth_gate_clk", NULL);
+       clk = clk_register_aux("c3_syn_clk", "c3_syn_gclk", "vco1div2_clk",
+                       0, SPEAR1310_C3_CLK_SYNT, NULL, aux_rtbl,
+                       ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+       clk_register_clkdev(clk, "c3_syn_clk", NULL);
+       clk_register_clkdev(clk1, "c3_syn_gclk", NULL);
 
-       clk = clk_register_mux(NULL, "c3_mux_clk", c3_parents,
+       clk = clk_register_mux(NULL, "c3_mclk", c3_parents,
                        ARRAY_SIZE(c3_parents), 0, SPEAR1310_PERIP_CLK_CFG,
                        SPEAR1310_C3_CLK_SHIFT, SPEAR1310_C3_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "c3_mux_clk", NULL);
+       clk_register_clkdev(clk, "c3_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "c3_clk", "c3_mux_clk", 0,
+       clk = clk_register_gate(NULL, "c3_clk", "c3_mclk", 0,
                        SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_C3_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "c3");
 
        /* gmac */
-       clk = clk_register_mux(NULL, "gmac_phy_input_mux_clk",
-                       gmac_phy_input_parents,
+       clk = clk_register_mux(NULL, "phy_input_mclk", gmac_phy_input_parents,
                        ARRAY_SIZE(gmac_phy_input_parents), 0,
                        SPEAR1310_GMAC_CLK_CFG,
                        SPEAR1310_GMAC_PHY_INPUT_CLK_SHIFT,
                        SPEAR1310_GMAC_PHY_INPUT_CLK_MASK, 0, &_lock);
-       clk_register_clkdev(clk, "gmac_phy_input_mux_clk", NULL);
+       clk_register_clkdev(clk, "phy_input_mclk", NULL);
 
-       clk = clk_register_aux("gmac_phy_synth_clk", "gmac_phy_synth_gate_clk",
-                       "gmac_phy_input_mux_clk", 0, SPEAR1310_GMAC_CLK_SYNT,
-                       NULL, gmac_rtbl, ARRAY_SIZE(gmac_rtbl), &_lock, &clk1);
-       clk_register_clkdev(clk, "gmac_phy_synth_clk", NULL);
-       clk_register_clkdev(clk1, "gmac_phy_synth_gate_clk", NULL);
+       clk = clk_register_aux("phy_syn_clk", "phy_syn_gclk", "phy_input_mclk",
+                       0, SPEAR1310_GMAC_CLK_SYNT, NULL, gmac_rtbl,
+                       ARRAY_SIZE(gmac_rtbl), &_lock, &clk1);
+       clk_register_clkdev(clk, "phy_syn_clk", NULL);
+       clk_register_clkdev(clk1, "phy_syn_gclk", NULL);
 
-       clk = clk_register_mux(NULL, "gmac_phy_mux_clk", gmac_phy_parents,
+       clk = clk_register_mux(NULL, "phy_mclk", gmac_phy_parents,
                        ARRAY_SIZE(gmac_phy_parents), 0,
                        SPEAR1310_PERIP_CLK_CFG, SPEAR1310_GMAC_PHY_CLK_SHIFT,
                        SPEAR1310_GMAC_PHY_CLK_MASK, 0, &_lock);
        clk_register_clkdev(clk, NULL, "stmmacphy.0");
 
        /* clcd */
-       clk = clk_register_mux(NULL, "clcd_synth_mux_clk", clcd_synth_parents,
+       clk = clk_register_mux(NULL, "clcd_syn_mclk", clcd_synth_parents,
                        ARRAY_SIZE(clcd_synth_parents), 0,
                        SPEAR1310_CLCD_CLK_SYNT, SPEAR1310_CLCD_SYNT_CLK_SHIFT,
                        SPEAR1310_CLCD_SYNT_CLK_MASK, 0, &_lock);
-       clk_register_clkdev(clk, "clcd_synth_mux_clk", NULL);
+       clk_register_clkdev(clk, "clcd_syn_mclk", NULL);
 
-       clk = clk_register_frac("clcd_synth_clk", "clcd_synth_mux_clk", 0,
+       clk = clk_register_frac("clcd_syn_clk", "clcd_syn_mclk", 0,
                        SPEAR1310_CLCD_CLK_SYNT, clcd_rtbl,
                        ARRAY_SIZE(clcd_rtbl), &_lock);
-       clk_register_clkdev(clk, "clcd_synth_clk", NULL);
+       clk_register_clkdev(clk, "clcd_syn_clk", NULL);
 
-       clk = clk_register_mux(NULL, "clcd_pixel_mux_clk", clcd_pixel_parents,
+       clk = clk_register_mux(NULL, "clcd_pixel_mclk", clcd_pixel_parents,
                        ARRAY_SIZE(clcd_pixel_parents), 0,
                        SPEAR1310_PERIP_CLK_CFG, SPEAR1310_CLCD_CLK_SHIFT,
                        SPEAR1310_CLCD_CLK_MASK, 0, &_lock);
        clk_register_clkdev(clk, "clcd_pixel_clk", NULL);
 
-       clk = clk_register_gate(NULL, "clcd_clk", "clcd_pixel_mux_clk", 0,
+       clk = clk_register_gate(NULL, "clcd_clk", "clcd_pixel_mclk", 0,
                        SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_CLCD_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, "clcd_clk", NULL);
 
        /* i2s */
-       clk = clk_register_mux(NULL, "i2s_src_mux_clk", i2s_src_parents,
+       clk = clk_register_mux(NULL, "i2s_src_mclk", i2s_src_parents,
                        ARRAY_SIZE(i2s_src_parents), 0, SPEAR1310_I2S_CLK_CFG,
                        SPEAR1310_I2S_SRC_CLK_SHIFT, SPEAR1310_I2S_SRC_CLK_MASK,
                        0, &_lock);
        clk_register_clkdev(clk, "i2s_src_clk", NULL);
 
-       clk = clk_register_aux("i2s_prs1_clk", NULL, "i2s_src_mux_clk", 0,
+       clk = clk_register_aux("i2s_prs1_clk", NULL, "i2s_src_mclk", 0,
                        SPEAR1310_I2S_CLK_CFG, &i2s_prs1_masks, i2s_prs1_rtbl,
                        ARRAY_SIZE(i2s_prs1_rtbl), &_lock, NULL);
        clk_register_clkdev(clk, "i2s_prs1_clk", NULL);
 
-       clk = clk_register_mux(NULL, "i2s_ref_mux_clk", i2s_ref_parents,
+       clk = clk_register_mux(NULL, "i2s_ref_mclk", i2s_ref_parents,
                        ARRAY_SIZE(i2s_ref_parents), 0, SPEAR1310_I2S_CLK_CFG,
                        SPEAR1310_I2S_REF_SHIFT, SPEAR1310_I2S_REF_SEL_MASK, 0,
                        &_lock);
        clk_register_clkdev(clk, "i2s_ref_clk", NULL);
 
-       clk = clk_register_gate(NULL, "i2s_ref_pad_clk", "i2s_ref_mux_clk", 0,
+       clk = clk_register_gate(NULL, "i2s_ref_pad_clk", "i2s_ref_mclk", 0,
                        SPEAR1310_PERIP2_CLK_ENB, SPEAR1310_I2S_REF_PAD_CLK_ENB,
                        0, &_lock);
        clk_register_clkdev(clk, "i2s_ref_pad_clk", NULL);
 
-       clk = clk_register_aux("i2s_sclk_clk", "i2s_sclk_gate_clk",
+       clk = clk_register_aux("i2s_sclk_clk", "i2s_sclk_gclk",
                        "i2s_ref_pad_clk", 0, SPEAR1310_I2S_CLK_CFG,
                        &i2s_sclk_masks, i2s_sclk_rtbl,
                        ARRAY_SIZE(i2s_sclk_rtbl), &_lock, &clk1);
        clk_register_clkdev(clk, "i2s_sclk_clk", NULL);
-       clk_register_clkdev(clk1, "i2s_sclk_gate_clk", NULL);
+       clk_register_clkdev(clk1, "i2s_sclk_gclk", NULL);
 
        /* clock derived from ahb clk */
        clk = clk_register_gate(NULL, "i2c0_clk", "ahb_clk", 0,
@@ -747,13 +745,13 @@ void __init spear1310_clk_init(void)
                        &_lock);
        clk_register_clkdev(clk, "sysram1_clk", NULL);
 
-       clk = clk_register_aux("adc_synth_clk", "adc_synth_gate_clk", "ahb_clk",
+       clk = clk_register_aux("adc_syn_clk", "adc_syn_gclk", "ahb_clk",
                        0, SPEAR1310_ADC_CLK_SYNT, NULL, adc_rtbl,
                        ARRAY_SIZE(adc_rtbl), &_lock, &clk1);
-       clk_register_clkdev(clk, "adc_synth_clk", NULL);
-       clk_register_clkdev(clk1, "adc_synth_gate_clk", NULL);
+       clk_register_clkdev(clk, "adc_syn_clk", NULL);
+       clk_register_clkdev(clk1, "adc_syn_gclk", NULL);
 
-       clk = clk_register_gate(NULL, "adc_clk", "adc_synth_gate_clk", 0,
+       clk = clk_register_gate(NULL, "adc_clk", "adc_syn_gclk", 0,
                        SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_ADC_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "adc_clk");
@@ -790,37 +788,37 @@ void __init spear1310_clk_init(void)
        clk_register_clkdev(clk, NULL, "e0300000.kbd");
 
        /* RAS clks */
-       clk = clk_register_mux(NULL, "gen_synth0_1_mux_clk",
-                       gen_synth0_1_parents, ARRAY_SIZE(gen_synth0_1_parents),
-                       0, SPEAR1310_PLL_CFG, SPEAR1310_RAS_SYNT0_1_CLK_SHIFT,
+       clk = clk_register_mux(NULL, "gen_syn0_1_mclk", gen_synth0_1_parents,
+                       ARRAY_SIZE(gen_synth0_1_parents), 0, SPEAR1310_PLL_CFG,
+                       SPEAR1310_RAS_SYNT0_1_CLK_SHIFT,
                        SPEAR1310_RAS_SYNT_CLK_MASK, 0, &_lock);
-       clk_register_clkdev(clk, "gen_synth0_1_clk", NULL);
+       clk_register_clkdev(clk, "gen_syn0_1_clk", NULL);
 
-       clk = clk_register_mux(NULL, "gen_synth2_3_mux_clk",
-                       gen_synth2_3_parents, ARRAY_SIZE(gen_synth2_3_parents),
-                       0, SPEAR1310_PLL_CFG, SPEAR1310_RAS_SYNT2_3_CLK_SHIFT,
+       clk = clk_register_mux(NULL, "gen_syn2_3_mclk", gen_synth2_3_parents,
+                       ARRAY_SIZE(gen_synth2_3_parents), 0, SPEAR1310_PLL_CFG,
+                       SPEAR1310_RAS_SYNT2_3_CLK_SHIFT,
                        SPEAR1310_RAS_SYNT_CLK_MASK, 0, &_lock);
-       clk_register_clkdev(clk, "gen_synth2_3_clk", NULL);
+       clk_register_clkdev(clk, "gen_syn2_3_clk", NULL);
 
-       clk = clk_register_frac("gen_synth0_clk", "gen_synth0_1_clk", 0,
+       clk = clk_register_frac("gen_syn0_clk", "gen_syn0_1_clk", 0,
                        SPEAR1310_RAS_CLK_SYNT0, gen_rtbl, ARRAY_SIZE(gen_rtbl),
                        &_lock);
-       clk_register_clkdev(clk, "gen_synth0_clk", NULL);
+       clk_register_clkdev(clk, "gen_syn0_clk", NULL);
 
-       clk = clk_register_frac("gen_synth1_clk", "gen_synth0_1_clk", 0,
+       clk = clk_register_frac("gen_syn1_clk", "gen_syn0_1_clk", 0,
                        SPEAR1310_RAS_CLK_SYNT1, gen_rtbl, ARRAY_SIZE(gen_rtbl),
                        &_lock);
-       clk_register_clkdev(clk, "gen_synth1_clk", NULL);
+       clk_register_clkdev(clk, "gen_syn1_clk", NULL);
 
-       clk = clk_register_frac("gen_synth2_clk", "gen_synth2_3_clk", 0,
+       clk = clk_register_frac("gen_syn2_clk", "gen_syn2_3_clk", 0,
                        SPEAR1310_RAS_CLK_SYNT2, gen_rtbl, ARRAY_SIZE(gen_rtbl),
                        &_lock);
-       clk_register_clkdev(clk, "gen_synth2_clk", NULL);
+       clk_register_clkdev(clk, "gen_syn2_clk", NULL);
 
-       clk = clk_register_frac("gen_synth3_clk", "gen_synth2_3_clk", 0,
+       clk = clk_register_frac("gen_syn3_clk", "gen_syn2_3_clk", 0,
                        SPEAR1310_RAS_CLK_SYNT3, gen_rtbl, ARRAY_SIZE(gen_rtbl),
                        &_lock);
-       clk_register_clkdev(clk, "gen_synth3_clk", NULL);
+       clk_register_clkdev(clk, "gen_syn3_clk", NULL);
 
        clk = clk_register_gate(NULL, "ras_osc_24m_clk", "osc_24m_clk", 0,
                        SPEAR1310_RAS_CLK_ENB, SPEAR1310_OSC_24M_CLK_ENB, 0,
@@ -847,7 +845,7 @@ void __init spear1310_clk_init(void)
                        &_lock);
        clk_register_clkdev(clk, "ras_pll3_clk", NULL);
 
-       clk = clk_register_gate(NULL, "ras_tx125_clk", "gmii_125m_pad_clk", 0,
+       clk = clk_register_gate(NULL, "ras_tx125_clk", "gmii_pad_clk", 0,
                        SPEAR1310_RAS_CLK_ENB, SPEAR1310_C125M_PAD_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, "ras_tx125_clk", NULL);
@@ -912,7 +910,7 @@ void __init spear1310_clk_init(void)
                        &_lock);
        clk_register_clkdev(clk, NULL, "5c700000.eth");
 
-       clk = clk_register_mux(NULL, "smii_rgmii_phy_mux_clk",
+       clk = clk_register_mux(NULL, "smii_rgmii_phy_mclk",
                        smii_rgmii_phy_parents,
                        ARRAY_SIZE(smii_rgmii_phy_parents), 0,
                        SPEAR1310_RAS_CTRL_REG1,
@@ -922,184 +920,184 @@ void __init spear1310_clk_init(void)
        clk_register_clkdev(clk, NULL, "stmmacphy.2");
        clk_register_clkdev(clk, NULL, "stmmacphy.4");
 
-       clk = clk_register_mux(NULL, "rmii_phy_mux_clk", rmii_phy_parents,
+       clk = clk_register_mux(NULL, "rmii_phy_mclk", rmii_phy_parents,
                        ARRAY_SIZE(rmii_phy_parents), 0,
                        SPEAR1310_RAS_CTRL_REG1, SPEAR1310_RMII_PHY_CLK_SHIFT,
                        SPEAR1310_PHY_CLK_MASK, 0, &_lock);
        clk_register_clkdev(clk, NULL, "stmmacphy.3");
 
-       clk = clk_register_mux(NULL, "uart1_mux_clk", uart_parents,
+       clk = clk_register_mux(NULL, "uart1_mclk", uart_parents,
                        ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0,
                        SPEAR1310_UART1_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK,
                        0, &_lock);
-       clk_register_clkdev(clk, "uart1_mux_clk", NULL);
+       clk_register_clkdev(clk, "uart1_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "uart1_clk", "uart1_mux_clk", 0,
+       clk = clk_register_gate(NULL, "uart1_clk", "uart1_mclk", 0,
                        SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_UART1_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "5c800000.serial");
 
-       clk = clk_register_mux(NULL, "uart2_mux_clk", uart_parents,
+       clk = clk_register_mux(NULL, "uart2_mclk", uart_parents,
                        ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0,
                        SPEAR1310_UART2_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK,
                        0, &_lock);
-       clk_register_clkdev(clk, "uart2_mux_clk", NULL);
+       clk_register_clkdev(clk, "uart2_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "uart2_clk", "uart2_mux_clk", 0,
+       clk = clk_register_gate(NULL, "uart2_clk", "uart2_mclk", 0,
                        SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_UART2_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "5c900000.serial");
 
-       clk = clk_register_mux(NULL, "uart3_mux_clk", uart_parents,
+       clk = clk_register_mux(NULL, "uart3_mclk", uart_parents,
                        ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0,
                        SPEAR1310_UART3_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK,
                        0, &_lock);
-       clk_register_clkdev(clk, "uart3_mux_clk", NULL);
+       clk_register_clkdev(clk, "uart3_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "uart3_clk", "uart3_mux_clk", 0,
+       clk = clk_register_gate(NULL, "uart3_clk", "uart3_mclk", 0,
                        SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_UART3_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "5ca00000.serial");
 
-       clk = clk_register_mux(NULL, "uart4_mux_clk", uart_parents,
+       clk = clk_register_mux(NULL, "uart4_mclk", uart_parents,
                        ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0,
                        SPEAR1310_UART4_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK,
                        0, &_lock);
-       clk_register_clkdev(clk, "uart4_mux_clk", NULL);
+       clk_register_clkdev(clk, "uart4_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "uart4_clk", "uart4_mux_clk", 0,
+       clk = clk_register_gate(NULL, "uart4_clk", "uart4_mclk", 0,
                        SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_UART4_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "5cb00000.serial");
 
-       clk = clk_register_mux(NULL, "uart5_mux_clk", uart_parents,
+       clk = clk_register_mux(NULL, "uart5_mclk", uart_parents,
                        ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0,
                        SPEAR1310_UART5_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK,
                        0, &_lock);
-       clk_register_clkdev(clk, "uart5_mux_clk", NULL);
+       clk_register_clkdev(clk, "uart5_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "uart5_clk", "uart5_mux_clk", 0,
+       clk = clk_register_gate(NULL, "uart5_clk", "uart5_mclk", 0,
                        SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_UART5_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "5cc00000.serial");
 
-       clk = clk_register_mux(NULL, "i2c1_mux_clk", i2c_parents,
+       clk = clk_register_mux(NULL, "i2c1_mclk", i2c_parents,
                        ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
                        SPEAR1310_I2C1_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "i2c1_mux_clk", NULL);
+       clk_register_clkdev(clk, "i2c1_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "i2c1_clk", "i2c1_mux_clk", 0,
+       clk = clk_register_gate(NULL, "i2c1_clk", "i2c1_mclk", 0,
                        SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C1_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "5cd00000.i2c");
 
-       clk = clk_register_mux(NULL, "i2c2_mux_clk", i2c_parents,
+       clk = clk_register_mux(NULL, "i2c2_mclk", i2c_parents,
                        ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
                        SPEAR1310_I2C2_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "i2c2_mux_clk", NULL);
+       clk_register_clkdev(clk, "i2c2_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "i2c2_clk", "i2c2_mux_clk", 0,
+       clk = clk_register_gate(NULL, "i2c2_clk", "i2c2_mclk", 0,
                        SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C2_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "5ce00000.i2c");
 
-       clk = clk_register_mux(NULL, "i2c3_mux_clk", i2c_parents,
+       clk = clk_register_mux(NULL, "i2c3_mclk", i2c_parents,
                        ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
                        SPEAR1310_I2C3_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "i2c3_mux_clk", NULL);
+       clk_register_clkdev(clk, "i2c3_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "i2c3_clk", "i2c3_mux_clk", 0,
+       clk = clk_register_gate(NULL, "i2c3_clk", "i2c3_mclk", 0,
                        SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C3_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "5cf00000.i2c");
 
-       clk = clk_register_mux(NULL, "i2c4_mux_clk", i2c_parents,
+       clk = clk_register_mux(NULL, "i2c4_mclk", i2c_parents,
                        ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
                        SPEAR1310_I2C4_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "i2c4_mux_clk", NULL);
+       clk_register_clkdev(clk, "i2c4_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "i2c4_clk", "i2c4_mux_clk", 0,
+       clk = clk_register_gate(NULL, "i2c4_clk", "i2c4_mclk", 0,
                        SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C4_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "5d000000.i2c");
 
-       clk = clk_register_mux(NULL, "i2c5_mux_clk", i2c_parents,
+       clk = clk_register_mux(NULL, "i2c5_mclk", i2c_parents,
                        ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
                        SPEAR1310_I2C5_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "i2c5_mux_clk", NULL);
+       clk_register_clkdev(clk, "i2c5_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "i2c5_clk", "i2c5_mux_clk", 0,
+       clk = clk_register_gate(NULL, "i2c5_clk", "i2c5_mclk", 0,
                        SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C5_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "5d100000.i2c");
 
-       clk = clk_register_mux(NULL, "i2c6_mux_clk", i2c_parents,
+       clk = clk_register_mux(NULL, "i2c6_mclk", i2c_parents,
                        ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
                        SPEAR1310_I2C6_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "i2c6_mux_clk", NULL);
+       clk_register_clkdev(clk, "i2c6_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "i2c6_clk", "i2c6_mux_clk", 0,
+       clk = clk_register_gate(NULL, "i2c6_clk", "i2c6_mclk", 0,
                        SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C6_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "5d200000.i2c");
 
-       clk = clk_register_mux(NULL, "i2c7_mux_clk", i2c_parents,
+       clk = clk_register_mux(NULL, "i2c7_mclk", i2c_parents,
                        ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
                        SPEAR1310_I2C7_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "i2c7_mux_clk", NULL);
+       clk_register_clkdev(clk, "i2c7_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "i2c7_clk", "i2c7_mux_clk", 0,
+       clk = clk_register_gate(NULL, "i2c7_clk", "i2c7_mclk", 0,
                        SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C7_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "5d300000.i2c");
 
-       clk = clk_register_mux(NULL, "ssp1_mux_clk", ssp1_parents,
+       clk = clk_register_mux(NULL, "ssp1_mclk", ssp1_parents,
                        ARRAY_SIZE(ssp1_parents), 0, SPEAR1310_RAS_CTRL_REG0,
                        SPEAR1310_SSP1_CLK_SHIFT, SPEAR1310_SSP1_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "ssp1_mux_clk", NULL);
+       clk_register_clkdev(clk, "ssp1_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "ssp1_clk", "ssp1_mux_clk", 0,
+       clk = clk_register_gate(NULL, "ssp1_clk", "ssp1_mclk", 0,
                        SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_SSP1_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "5d400000.spi");
 
-       clk = clk_register_mux(NULL, "pci_mux_clk", pci_parents,
+       clk = clk_register_mux(NULL, "pci_mclk", pci_parents,
                        ARRAY_SIZE(pci_parents), 0, SPEAR1310_RAS_CTRL_REG0,
                        SPEAR1310_PCI_CLK_SHIFT, SPEAR1310_PCI_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "pci_mux_clk", NULL);
+       clk_register_clkdev(clk, "pci_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "pci_clk", "pci_mux_clk", 0,
+       clk = clk_register_gate(NULL, "pci_clk", "pci_mclk", 0,
                        SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_PCI_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "pci");
 
-       clk = clk_register_mux(NULL, "tdm1_mux_clk", tdm_parents,
+       clk = clk_register_mux(NULL, "tdm1_mclk", tdm_parents,
                        ARRAY_SIZE(tdm_parents), 0, SPEAR1310_RAS_CTRL_REG0,
                        SPEAR1310_TDM1_CLK_SHIFT, SPEAR1310_TDM_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "tdm1_mux_clk", NULL);
+       clk_register_clkdev(clk, "tdm1_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "tdm1_clk", "tdm1_mux_clk", 0,
+       clk = clk_register_gate(NULL, "tdm1_clk", "tdm1_mclk", 0,
                        SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_TDM1_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "tdm_hdlc.0");
 
-       clk = clk_register_mux(NULL, "tdm2_mux_clk", tdm_parents,
+       clk = clk_register_mux(NULL, "tdm2_mclk", tdm_parents,
                        ARRAY_SIZE(tdm_parents), 0, SPEAR1310_RAS_CTRL_REG0,
                        SPEAR1310_TDM2_CLK_SHIFT, SPEAR1310_TDM_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "tdm2_mux_clk", NULL);
+       clk_register_clkdev(clk, "tdm2_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "tdm2_clk", "tdm2_mux_clk", 0,
+       clk = clk_register_gate(NULL, "tdm2_clk", "tdm2_mclk", 0,
                        SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_TDM2_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "tdm_hdlc.1");
index f130919d5bf86b2391f490eaf65c6d9f574c97f1..2352cee7f6455ed95c8b22e6d91824bad15fd1e4 100644 (file)
@@ -4,7 +4,7 @@
  * SPEAr1340 machine clock framework source file
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
@@ -369,27 +369,25 @@ static struct frac_rate_tbl gen_rtbl[] = {
 
 /* clock parents */
 static const char *vco_parents[] = { "osc_24m_clk", "osc_25m_clk", };
-static const char *sys_parents[] = { "none", "pll1_clk", "none", "none",
-       "sys_synth_clk", "none", "pll2_clk", "pll3_clk", };
-static const char *ahb_parents[] = { "cpu_div3_clk", "amba_synth_clk", };
+static const char *sys_parents[] = { "pll1_clk", "pll1_clk", "pll1_clk",
+       "pll1_clk", "sys_synth_clk", "sys_synth_clk", "pll2_clk", "pll3_clk", };
+static const char *ahb_parents[] = { "cpu_div3_clk", "amba_syn_clk", };
 static const char *gpt_parents[] = { "osc_24m_clk", "apb_clk", };
 static const char *uart0_parents[] = { "pll5_clk", "osc_24m_clk",
-       "uart0_synth_gate_clk", };
+       "uart0_syn_gclk", };
 static const char *uart1_parents[] = { "pll5_clk", "osc_24m_clk",
-       "uart1_synth_gate_clk", };
-static const char *c3_parents[] = { "pll5_clk", "c3_synth_gate_clk", };
-static const char *gmac_phy_input_parents[] = { "gmii_125m_pad_clk", "pll2_clk",
+       "uart1_syn_gclk", };
+static const char *c3_parents[] = { "pll5_clk", "c3_syn_gclk", };
+static const char *gmac_phy_input_parents[] = { "gmii_pad_clk", "pll2_clk",
        "osc_25m_clk", };
-static const char *gmac_phy_parents[] = { "gmac_phy_input_mux_clk",
-       "gmac_phy_synth_gate_clk", };
+static const char *gmac_phy_parents[] = { "phy_input_mclk", "phy_syn_gclk", };
 static const char *clcd_synth_parents[] = { "vco1div4_clk", "pll2_clk", };
-static const char *clcd_pixel_parents[] = { "pll5_clk", "clcd_synth_clk", };
+static const char *clcd_pixel_parents[] = { "pll5_clk", "clcd_syn_clk", };
 static const char *i2s_src_parents[] = { "vco1div2_clk", "pll2_clk", "pll3_clk",
        "i2s_src_pad_clk", };
-static const char *i2s_ref_parents[] = { "i2s_src_mux_clk", "i2s_prs1_clk", };
-static const char *spdif_out_parents[] = { "i2s_src_pad_clk", "gen_synth2_clk",
-};
-static const char *spdif_in_parents[] = { "pll2_clk", "gen_synth3_clk", };
+static const char *i2s_ref_parents[] = { "i2s_src_mclk", "i2s_prs1_clk", };
+static const char *spdif_out_parents[] = { "i2s_src_pad_clk", "gen_syn2_clk", };
+static const char *spdif_in_parents[] = { "pll2_clk", "gen_syn3_clk", };
 
 static const char *gen_synth0_1_parents[] = { "vco1div4_clk", "vco3div2_clk",
        "pll3_clk", };
@@ -415,9 +413,9 @@ void __init spear1340_clk_init(void)
                        25000000);
        clk_register_clkdev(clk, "osc_25m_clk", NULL);
 
-       clk = clk_register_fixed_rate(NULL, "gmii_125m_pad_clk", NULL,
-                       CLK_IS_ROOT, 125000000);
-       clk_register_clkdev(clk, "gmii_125m_pad_clk", NULL);
+       clk = clk_register_fixed_rate(NULL, "gmii_pad_clk", NULL, CLK_IS_ROOT,
+                       125000000);
+       clk_register_clkdev(clk, "gmii_pad_clk", NULL);
 
        clk = clk_register_fixed_rate(NULL, "i2s_src_pad_clk", NULL,
                        CLK_IS_ROOT, 12288000);
@@ -431,35 +429,35 @@ void __init spear1340_clk_init(void)
 
        /* clock derived from 24 or 25 MHz osc clk */
        /* vco-pll */
-       clk = clk_register_mux(NULL, "vco1_mux_clk", vco_parents,
+       clk = clk_register_mux(NULL, "vco1_mclk", vco_parents,
                        ARRAY_SIZE(vco_parents), 0, SPEAR1340_PLL_CFG,
                        SPEAR1340_PLL1_CLK_SHIFT, SPEAR1340_PLL_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "vco1_mux_clk", NULL);
-       clk = clk_register_vco_pll("vco1_clk", "pll1_clk", NULL, "vco1_mux_clk",
-                       0, SPEAR1340_PLL1_CTR, SPEAR1340_PLL1_FRQ, pll_rtbl,
+       clk_register_clkdev(clk, "vco1_mclk", NULL);
+       clk = clk_register_vco_pll("vco1_clk", "pll1_clk", NULL, "vco1_mclk", 0,
+                       SPEAR1340_PLL1_CTR, SPEAR1340_PLL1_FRQ, pll_rtbl,
                        ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
        clk_register_clkdev(clk, "vco1_clk", NULL);
        clk_register_clkdev(clk1, "pll1_clk", NULL);
 
-       clk = clk_register_mux(NULL, "vco2_mux_clk", vco_parents,
+       clk = clk_register_mux(NULL, "vco2_mclk", vco_parents,
                        ARRAY_SIZE(vco_parents), 0, SPEAR1340_PLL_CFG,
                        SPEAR1340_PLL2_CLK_SHIFT, SPEAR1340_PLL_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "vco2_mux_clk", NULL);
-       clk = clk_register_vco_pll("vco2_clk", "pll2_clk", NULL, "vco2_mux_clk",
-                       0, SPEAR1340_PLL2_CTR, SPEAR1340_PLL2_FRQ, pll_rtbl,
+       clk_register_clkdev(clk, "vco2_mclk", NULL);
+       clk = clk_register_vco_pll("vco2_clk", "pll2_clk", NULL, "vco2_mclk", 0,
+                       SPEAR1340_PLL2_CTR, SPEAR1340_PLL2_FRQ, pll_rtbl,
                        ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
        clk_register_clkdev(clk, "vco2_clk", NULL);
        clk_register_clkdev(clk1, "pll2_clk", NULL);
 
-       clk = clk_register_mux(NULL, "vco3_mux_clk", vco_parents,
+       clk = clk_register_mux(NULL, "vco3_mclk", vco_parents,
                        ARRAY_SIZE(vco_parents), 0, SPEAR1340_PLL_CFG,
                        SPEAR1340_PLL3_CLK_SHIFT, SPEAR1340_PLL_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "vco3_mux_clk", NULL);
-       clk = clk_register_vco_pll("vco3_clk", "pll3_clk", NULL, "vco3_mux_clk",
-                       0, SPEAR1340_PLL3_CTR, SPEAR1340_PLL3_FRQ, pll_rtbl,
+       clk_register_clkdev(clk, "vco3_mclk", NULL);
+       clk = clk_register_vco_pll("vco3_clk", "pll3_clk", NULL, "vco3_mclk", 0,
+                       SPEAR1340_PLL3_CTR, SPEAR1340_PLL3_FRQ, pll_rtbl,
                        ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
        clk_register_clkdev(clk, "vco3_clk", NULL);
        clk_register_clkdev(clk1, "pll3_clk", NULL);
@@ -498,7 +496,7 @@ void __init spear1340_clk_init(void)
        /* peripherals */
        clk_register_fixed_factor(NULL, "thermal_clk", "osc_24m_clk", 0, 1,
                        128);
-       clk = clk_register_gate(NULL, "thermal_gate_clk", "thermal_clk", 0,
+       clk = clk_register_gate(NULL, "thermal_gclk", "thermal_clk", 0,
                        SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_THSENS_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "spear_thermal");
@@ -509,23 +507,23 @@ void __init spear1340_clk_init(void)
        clk_register_clkdev(clk, "ddr_clk", NULL);
 
        /* clock derived from pll1 clk */
-       clk = clk_register_frac("sys_synth_clk", "vco1div2_clk", 0,
+       clk = clk_register_frac("sys_syn_clk", "vco1div2_clk", 0,
                        SPEAR1340_SYS_CLK_SYNT, sys_synth_rtbl,
                        ARRAY_SIZE(sys_synth_rtbl), &_lock);
-       clk_register_clkdev(clk, "sys_synth_clk", NULL);
+       clk_register_clkdev(clk, "sys_syn_clk", NULL);
 
-       clk = clk_register_frac("amba_synth_clk", "vco1div2_clk", 0,
+       clk = clk_register_frac("amba_syn_clk", "vco1div2_clk", 0,
                        SPEAR1340_AMBA_CLK_SYNT, amba_synth_rtbl,
                        ARRAY_SIZE(amba_synth_rtbl), &_lock);
-       clk_register_clkdev(clk, "amba_synth_clk", NULL);
+       clk_register_clkdev(clk, "amba_syn_clk", NULL);
 
-       clk = clk_register_mux(NULL, "sys_mux_clk", sys_parents,
+       clk = clk_register_mux(NULL, "sys_mclk", sys_parents,
                        ARRAY_SIZE(sys_parents), 0, SPEAR1340_SYS_CLK_CTRL,
                        SPEAR1340_SCLK_SRC_SEL_SHIFT,
                        SPEAR1340_SCLK_SRC_SEL_MASK, 0, &_lock);
        clk_register_clkdev(clk, "sys_clk", NULL);
 
-       clk = clk_register_fixed_factor(NULL, "cpu_clk", "sys_mux_clk", 0, 1,
+       clk = clk_register_fixed_factor(NULL, "cpu_clk", "sys_mclk", 0, 1,
                        2);
        clk_register_clkdev(clk, "cpu_clk", NULL);
 
@@ -548,194 +546,193 @@ void __init spear1340_clk_init(void)
        clk_register_clkdev(clk, "apb_clk", NULL);
 
        /* gpt clocks */
-       clk = clk_register_mux(NULL, "gpt0_mux_clk", gpt_parents,
+       clk = clk_register_mux(NULL, "gpt0_mclk", gpt_parents,
                        ARRAY_SIZE(gpt_parents), 0, SPEAR1340_PERIP_CLK_CFG,
                        SPEAR1340_GPT0_CLK_SHIFT, SPEAR1340_GPT_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "gpt0_mux_clk", NULL);
-       clk = clk_register_gate(NULL, "gpt0_clk", "gpt0_mux_clk", 0,
+       clk_register_clkdev(clk, "gpt0_mclk", NULL);
+       clk = clk_register_gate(NULL, "gpt0_clk", "gpt0_mclk", 0,
                        SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_GPT0_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "gpt0");
 
-       clk = clk_register_mux(NULL, "gpt1_mux_clk", gpt_parents,
+       clk = clk_register_mux(NULL, "gpt1_mclk", gpt_parents,
                        ARRAY_SIZE(gpt_parents), 0, SPEAR1340_PERIP_CLK_CFG,
                        SPEAR1340_GPT1_CLK_SHIFT, SPEAR1340_GPT_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "gpt1_mux_clk", NULL);
-       clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mux_clk", 0,
+       clk_register_clkdev(clk, "gpt1_mclk", NULL);
+       clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mclk", 0,
                        SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_GPT1_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "gpt1");
 
-       clk = clk_register_mux(NULL, "gpt2_mux_clk", gpt_parents,
+       clk = clk_register_mux(NULL, "gpt2_mclk", gpt_parents,
                        ARRAY_SIZE(gpt_parents), 0, SPEAR1340_PERIP_CLK_CFG,
                        SPEAR1340_GPT2_CLK_SHIFT, SPEAR1340_GPT_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "gpt2_mux_clk", NULL);
-       clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mux_clk", 0,
+       clk_register_clkdev(clk, "gpt2_mclk", NULL);
+       clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mclk", 0,
                        SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_GPT2_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "gpt2");
 
-       clk = clk_register_mux(NULL, "gpt3_mux_clk", gpt_parents,
+       clk = clk_register_mux(NULL, "gpt3_mclk", gpt_parents,
                        ARRAY_SIZE(gpt_parents), 0, SPEAR1340_PERIP_CLK_CFG,
                        SPEAR1340_GPT3_CLK_SHIFT, SPEAR1340_GPT_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "gpt3_mux_clk", NULL);
-       clk = clk_register_gate(NULL, "gpt3_clk", "gpt3_mux_clk", 0,
+       clk_register_clkdev(clk, "gpt3_mclk", NULL);
+       clk = clk_register_gate(NULL, "gpt3_clk", "gpt3_mclk", 0,
                        SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_GPT3_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "gpt3");
 
        /* others */
-       clk = clk_register_aux("uart0_synth_clk", "uart0_synth_gate_clk",
+       clk = clk_register_aux("uart0_syn_clk", "uart0_syn_gclk",
                        "vco1div2_clk", 0, SPEAR1340_UART0_CLK_SYNT, NULL,
                        aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
-       clk_register_clkdev(clk, "uart0_synth_clk", NULL);
-       clk_register_clkdev(clk1, "uart0_synth_gate_clk", NULL);
+       clk_register_clkdev(clk, "uart0_syn_clk", NULL);
+       clk_register_clkdev(clk1, "uart0_syn_gclk", NULL);
 
-       clk = clk_register_mux(NULL, "uart0_mux_clk", uart0_parents,
+       clk = clk_register_mux(NULL, "uart0_mclk", uart0_parents,
                        ARRAY_SIZE(uart0_parents), 0, SPEAR1340_PERIP_CLK_CFG,
                        SPEAR1340_UART0_CLK_SHIFT, SPEAR1340_UART_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "uart0_mux_clk", NULL);
+       clk_register_clkdev(clk, "uart0_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "uart0_clk", "uart0_mux_clk", 0,
+       clk = clk_register_gate(NULL, "uart0_clk", "uart0_mclk", 0,
                        SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_UART0_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "e0000000.serial");
 
-       clk = clk_register_aux("uart1_synth_clk", "uart1_synth_gate_clk",
+       clk = clk_register_aux("uart1_syn_clk", "uart1_syn_gclk",
                        "vco1div2_clk", 0, SPEAR1340_UART1_CLK_SYNT, NULL,
                        aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
-       clk_register_clkdev(clk, "uart1_synth_clk", NULL);
-       clk_register_clkdev(clk1, "uart1_synth_gate_clk", NULL);
+       clk_register_clkdev(clk, "uart1_syn_clk", NULL);
+       clk_register_clkdev(clk1, "uart1_syn_gclk", NULL);
 
-       clk = clk_register_mux(NULL, "uart1_mux_clk", uart1_parents,
+       clk = clk_register_mux(NULL, "uart1_mclk", uart1_parents,
                        ARRAY_SIZE(uart1_parents), 0, SPEAR1340_PERIP_CLK_CFG,
                        SPEAR1340_UART1_CLK_SHIFT, SPEAR1340_UART_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "uart1_mux_clk", NULL);
+       clk_register_clkdev(clk, "uart1_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "uart1_clk", "uart1_mux_clk", 0,
-                       SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_UART1_CLK_ENB, 0,
+       clk = clk_register_gate(NULL, "uart1_clk", "uart1_mclk", 0,
+                       SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_UART1_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "b4100000.serial");
 
-       clk = clk_register_aux("sdhci_synth_clk", "sdhci_synth_gate_clk",
+       clk = clk_register_aux("sdhci_syn_clk", "sdhci_syn_gclk",
                        "vco1div2_clk", 0, SPEAR1340_SDHCI_CLK_SYNT, NULL,
                        aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
-       clk_register_clkdev(clk, "sdhci_synth_clk", NULL);
-       clk_register_clkdev(clk1, "sdhci_synth_gate_clk", NULL);
+       clk_register_clkdev(clk, "sdhci_syn_clk", NULL);
+       clk_register_clkdev(clk1, "sdhci_syn_gclk", NULL);
 
-       clk = clk_register_gate(NULL, "sdhci_clk", "sdhci_synth_gate_clk", 0,
+       clk = clk_register_gate(NULL, "sdhci_clk", "sdhci_syn_gclk", 0,
                        SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_SDHCI_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "b3000000.sdhci");
 
-       clk = clk_register_aux("cfxd_synth_clk", "cfxd_synth_gate_clk",
-                       "vco1div2_clk", 0, SPEAR1340_CFXD_CLK_SYNT, NULL,
-                       aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
-       clk_register_clkdev(clk, "cfxd_synth_clk", NULL);
-       clk_register_clkdev(clk1, "cfxd_synth_gate_clk", NULL);
+       clk = clk_register_aux("cfxd_syn_clk", "cfxd_syn_gclk", "vco1div2_clk",
+                       0, SPEAR1340_CFXD_CLK_SYNT, NULL, aux_rtbl,
+                       ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+       clk_register_clkdev(clk, "cfxd_syn_clk", NULL);
+       clk_register_clkdev(clk1, "cfxd_syn_gclk", NULL);
 
-       clk = clk_register_gate(NULL, "cfxd_clk", "cfxd_synth_gate_clk", 0,
+       clk = clk_register_gate(NULL, "cfxd_clk", "cfxd_syn_gclk", 0,
                        SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_CFXD_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "b2800000.cf");
        clk_register_clkdev(clk, NULL, "arasan_xd");
 
-       clk = clk_register_aux("c3_synth_clk", "c3_synth_gate_clk",
-                       "vco1div2_clk", 0, SPEAR1340_C3_CLK_SYNT, NULL,
-                       aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
-       clk_register_clkdev(clk, "c3_synth_clk", NULL);
-       clk_register_clkdev(clk1, "c3_synth_gate_clk", NULL);
+       clk = clk_register_aux("c3_syn_clk", "c3_syn_gclk", "vco1div2_clk", 0,
+                       SPEAR1340_C3_CLK_SYNT, NULL, aux_rtbl,
+                       ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+       clk_register_clkdev(clk, "c3_syn_clk", NULL);
+       clk_register_clkdev(clk1, "c3_syn_gclk", NULL);
 
-       clk = clk_register_mux(NULL, "c3_mux_clk", c3_parents,
+       clk = clk_register_mux(NULL, "c3_mclk", c3_parents,
                        ARRAY_SIZE(c3_parents), 0, SPEAR1340_PERIP_CLK_CFG,
                        SPEAR1340_C3_CLK_SHIFT, SPEAR1340_C3_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "c3_mux_clk", NULL);
+       clk_register_clkdev(clk, "c3_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "c3_clk", "c3_mux_clk", 0,
+       clk = clk_register_gate(NULL, "c3_clk", "c3_mclk", 0,
                        SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_C3_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "c3");
 
        /* gmac */
-       clk = clk_register_mux(NULL, "gmac_phy_input_mux_clk",
-                       gmac_phy_input_parents,
+       clk = clk_register_mux(NULL, "phy_input_mclk", gmac_phy_input_parents,
                        ARRAY_SIZE(gmac_phy_input_parents), 0,
                        SPEAR1340_GMAC_CLK_CFG,
                        SPEAR1340_GMAC_PHY_INPUT_CLK_SHIFT,
                        SPEAR1340_GMAC_PHY_INPUT_CLK_MASK, 0, &_lock);
-       clk_register_clkdev(clk, "gmac_phy_input_mux_clk", NULL);
+       clk_register_clkdev(clk, "phy_input_mclk", NULL);
 
-       clk = clk_register_aux("gmac_phy_synth_clk", "gmac_phy_synth_gate_clk",
-                       "gmac_phy_input_mux_clk", 0, SPEAR1340_GMAC_CLK_SYNT,
-                       NULL, gmac_rtbl, ARRAY_SIZE(gmac_rtbl), &_lock, &clk1);
-       clk_register_clkdev(clk, "gmac_phy_synth_clk", NULL);
-       clk_register_clkdev(clk1, "gmac_phy_synth_gate_clk", NULL);
+       clk = clk_register_aux("phy_syn_clk", "phy_syn_gclk", "phy_input_mclk",
+                       0, SPEAR1340_GMAC_CLK_SYNT, NULL, gmac_rtbl,
+                       ARRAY_SIZE(gmac_rtbl), &_lock, &clk1);
+       clk_register_clkdev(clk, "phy_syn_clk", NULL);
+       clk_register_clkdev(clk1, "phy_syn_gclk", NULL);
 
-       clk = clk_register_mux(NULL, "gmac_phy_mux_clk", gmac_phy_parents,
+       clk = clk_register_mux(NULL, "phy_mclk", gmac_phy_parents,
                        ARRAY_SIZE(gmac_phy_parents), 0,
                        SPEAR1340_PERIP_CLK_CFG, SPEAR1340_GMAC_PHY_CLK_SHIFT,
                        SPEAR1340_GMAC_PHY_CLK_MASK, 0, &_lock);
        clk_register_clkdev(clk, NULL, "stmmacphy.0");
 
        /* clcd */
-       clk = clk_register_mux(NULL, "clcd_synth_mux_clk", clcd_synth_parents,
+       clk = clk_register_mux(NULL, "clcd_syn_mclk", clcd_synth_parents,
                        ARRAY_SIZE(clcd_synth_parents), 0,
                        SPEAR1340_CLCD_CLK_SYNT, SPEAR1340_CLCD_SYNT_CLK_SHIFT,
                        SPEAR1340_CLCD_SYNT_CLK_MASK, 0, &_lock);
-       clk_register_clkdev(clk, "clcd_synth_mux_clk", NULL);
+       clk_register_clkdev(clk, "clcd_syn_mclk", NULL);
 
-       clk = clk_register_frac("clcd_synth_clk", "clcd_synth_mux_clk", 0,
+       clk = clk_register_frac("clcd_syn_clk", "clcd_syn_mclk", 0,
                        SPEAR1340_CLCD_CLK_SYNT, clcd_rtbl,
                        ARRAY_SIZE(clcd_rtbl), &_lock);
-       clk_register_clkdev(clk, "clcd_synth_clk", NULL);
+       clk_register_clkdev(clk, "clcd_syn_clk", NULL);
 
-       clk = clk_register_mux(NULL, "clcd_pixel_mux_clk", clcd_pixel_parents,
+       clk = clk_register_mux(NULL, "clcd_pixel_mclk", clcd_pixel_parents,
                        ARRAY_SIZE(clcd_pixel_parents), 0,
                        SPEAR1340_PERIP_CLK_CFG, SPEAR1340_CLCD_CLK_SHIFT,
                        SPEAR1340_CLCD_CLK_MASK, 0, &_lock);
        clk_register_clkdev(clk, "clcd_pixel_clk", NULL);
 
-       clk = clk_register_gate(NULL, "clcd_clk", "clcd_pixel_mux_clk", 0,
+       clk = clk_register_gate(NULL, "clcd_clk", "clcd_pixel_mclk", 0,
                        SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_CLCD_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, "clcd_clk", NULL);
 
        /* i2s */
-       clk = clk_register_mux(NULL, "i2s_src_mux_clk", i2s_src_parents,
+       clk = clk_register_mux(NULL, "i2s_src_mclk", i2s_src_parents,
                        ARRAY_SIZE(i2s_src_parents), 0, SPEAR1340_I2S_CLK_CFG,
                        SPEAR1340_I2S_SRC_CLK_SHIFT, SPEAR1340_I2S_SRC_CLK_MASK,
                        0, &_lock);
        clk_register_clkdev(clk, "i2s_src_clk", NULL);
 
-       clk = clk_register_aux("i2s_prs1_clk", NULL, "i2s_src_mux_clk", 0,
+       clk = clk_register_aux("i2s_prs1_clk", NULL, "i2s_src_mclk", 0,
                        SPEAR1340_I2S_CLK_CFG, &i2s_prs1_masks, i2s_prs1_rtbl,
                        ARRAY_SIZE(i2s_prs1_rtbl), &_lock, NULL);
        clk_register_clkdev(clk, "i2s_prs1_clk", NULL);
 
-       clk = clk_register_mux(NULL, "i2s_ref_mux_clk", i2s_ref_parents,
+       clk = clk_register_mux(NULL, "i2s_ref_mclk", i2s_ref_parents,
                        ARRAY_SIZE(i2s_ref_parents), 0, SPEAR1340_I2S_CLK_CFG,
                        SPEAR1340_I2S_REF_SHIFT, SPEAR1340_I2S_REF_SEL_MASK, 0,
                        &_lock);
        clk_register_clkdev(clk, "i2s_ref_clk", NULL);
 
-       clk = clk_register_gate(NULL, "i2s_ref_pad_clk", "i2s_ref_mux_clk", 0,
+       clk = clk_register_gate(NULL, "i2s_ref_pad_clk", "i2s_ref_mclk", 0,
                        SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_I2S_REF_PAD_CLK_ENB,
                        0, &_lock);
        clk_register_clkdev(clk, "i2s_ref_pad_clk", NULL);
 
-       clk = clk_register_aux("i2s_sclk_clk", "i2s_sclk_gate_clk",
-                       "i2s_ref_mux_clk", 0, SPEAR1340_I2S_CLK_CFG,
-                       &i2s_sclk_masks, i2s_sclk_rtbl,
-                       ARRAY_SIZE(i2s_sclk_rtbl), &_lock, &clk1);
+       clk = clk_register_aux("i2s_sclk_clk", "i2s_sclk_gclk", "i2s_ref_mclk",
+                       0, SPEAR1340_I2S_CLK_CFG, &i2s_sclk_masks,
+                       i2s_sclk_rtbl, ARRAY_SIZE(i2s_sclk_rtbl), &_lock,
+                       &clk1);
        clk_register_clkdev(clk, "i2s_sclk_clk", NULL);
-       clk_register_clkdev(clk1, "i2s_sclk_gate_clk", NULL);
+       clk_register_clkdev(clk1, "i2s_sclk_gclk", NULL);
 
        /* clock derived from ahb clk */
        clk = clk_register_gate(NULL, "i2c0_clk", "ahb_clk", 0,
@@ -744,7 +741,7 @@ void __init spear1340_clk_init(void)
        clk_register_clkdev(clk, NULL, "e0280000.i2c");
 
        clk = clk_register_gate(NULL, "i2c1_clk", "ahb_clk", 0,
-                       SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_I2C1_CLK_ENB, 0,
+                       SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_I2C1_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "b4000000.i2c");
 
@@ -800,13 +797,13 @@ void __init spear1340_clk_init(void)
                        &_lock);
        clk_register_clkdev(clk, "sysram1_clk", NULL);
 
-       clk = clk_register_aux("adc_synth_clk", "adc_synth_gate_clk", "ahb_clk",
+       clk = clk_register_aux("adc_syn_clk", "adc_syn_gclk", "ahb_clk",
                        0, SPEAR1340_ADC_CLK_SYNT, NULL, adc_rtbl,
                        ARRAY_SIZE(adc_rtbl), &_lock, &clk1);
-       clk_register_clkdev(clk, "adc_synth_clk", NULL);
-       clk_register_clkdev(clk1, "adc_synth_gate_clk", NULL);
+       clk_register_clkdev(clk, "adc_syn_clk", NULL);
+       clk_register_clkdev(clk1, "adc_syn_gclk", NULL);
 
-       clk = clk_register_gate(NULL, "adc_clk", "adc_synth_gate_clk", 0,
+       clk = clk_register_gate(NULL, "adc_clk", "adc_syn_gclk", 0,
                        SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_ADC_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "adc_clk");
@@ -843,39 +840,39 @@ void __init spear1340_clk_init(void)
        clk_register_clkdev(clk, NULL, "e0300000.kbd");
 
        /* RAS clks */
-       clk = clk_register_mux(NULL, "gen_synth0_1_mux_clk",
-                       gen_synth0_1_parents, ARRAY_SIZE(gen_synth0_1_parents),
-                       0, SPEAR1340_PLL_CFG, SPEAR1340_GEN_SYNT0_1_CLK_SHIFT,
+       clk = clk_register_mux(NULL, "gen_syn0_1_mclk", gen_synth0_1_parents,
+                       ARRAY_SIZE(gen_synth0_1_parents), 0, SPEAR1340_PLL_CFG,
+                       SPEAR1340_GEN_SYNT0_1_CLK_SHIFT,
                        SPEAR1340_GEN_SYNT_CLK_MASK, 0, &_lock);
-       clk_register_clkdev(clk, "gen_synth0_1_clk", NULL);
+       clk_register_clkdev(clk, "gen_syn0_1_clk", NULL);
 
-       clk = clk_register_mux(NULL, "gen_synth2_3_mux_clk",
-                       gen_synth2_3_parents, ARRAY_SIZE(gen_synth2_3_parents),
-                       0, SPEAR1340_PLL_CFG, SPEAR1340_GEN_SYNT2_3_CLK_SHIFT,
+       clk = clk_register_mux(NULL, "gen_syn2_3_mclk", gen_synth2_3_parents,
+                       ARRAY_SIZE(gen_synth2_3_parents), 0, SPEAR1340_PLL_CFG,
+                       SPEAR1340_GEN_SYNT2_3_CLK_SHIFT,
                        SPEAR1340_GEN_SYNT_CLK_MASK, 0, &_lock);
-       clk_register_clkdev(clk, "gen_synth2_3_clk", NULL);
+       clk_register_clkdev(clk, "gen_syn2_3_clk", NULL);
 
-       clk = clk_register_frac("gen_synth0_clk", "gen_synth0_1_clk", 0,
+       clk = clk_register_frac("gen_syn0_clk", "gen_syn0_1_clk", 0,
                        SPEAR1340_GEN_CLK_SYNT0, gen_rtbl, ARRAY_SIZE(gen_rtbl),
                        &_lock);
-       clk_register_clkdev(clk, "gen_synth0_clk", NULL);
+       clk_register_clkdev(clk, "gen_syn0_clk", NULL);
 
-       clk = clk_register_frac("gen_synth1_clk", "gen_synth0_1_clk", 0,
+       clk = clk_register_frac("gen_syn1_clk", "gen_syn0_1_clk", 0,
                        SPEAR1340_GEN_CLK_SYNT1, gen_rtbl, ARRAY_SIZE(gen_rtbl),
                        &_lock);
-       clk_register_clkdev(clk, "gen_synth1_clk", NULL);
+       clk_register_clkdev(clk, "gen_syn1_clk", NULL);
 
-       clk = clk_register_frac("gen_synth2_clk", "gen_synth2_3_clk", 0,
+       clk = clk_register_frac("gen_syn2_clk", "gen_syn2_3_clk", 0,
                        SPEAR1340_GEN_CLK_SYNT2, gen_rtbl, ARRAY_SIZE(gen_rtbl),
                        &_lock);
-       clk_register_clkdev(clk, "gen_synth2_clk", NULL);
+       clk_register_clkdev(clk, "gen_syn2_clk", NULL);
 
-       clk = clk_register_frac("gen_synth3_clk", "gen_synth2_3_clk", 0,
+       clk = clk_register_frac("gen_syn3_clk", "gen_syn2_3_clk", 0,
                        SPEAR1340_GEN_CLK_SYNT3, gen_rtbl, ARRAY_SIZE(gen_rtbl),
                        &_lock);
-       clk_register_clkdev(clk, "gen_synth3_clk", NULL);
+       clk_register_clkdev(clk, "gen_syn3_clk", NULL);
 
-       clk = clk_register_gate(NULL, "mali_clk", "gen_synth3_clk", 0,
+       clk = clk_register_gate(NULL, "mali_clk", "gen_syn3_clk", 0,
                        SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_MALI_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "mali");
@@ -890,74 +887,74 @@ void __init spear1340_clk_init(void)
                        &_lock);
        clk_register_clkdev(clk, NULL, "spear_cec.1");
 
-       clk = clk_register_mux(NULL, "spdif_out_mux_clk", spdif_out_parents,
+       clk = clk_register_mux(NULL, "spdif_out_mclk", spdif_out_parents,
                        ARRAY_SIZE(spdif_out_parents), 0,
                        SPEAR1340_PERIP_CLK_CFG, SPEAR1340_SPDIF_OUT_CLK_SHIFT,
                        SPEAR1340_SPDIF_CLK_MASK, 0, &_lock);
-       clk_register_clkdev(clk, "spdif_out_mux_clk", NULL);
+       clk_register_clkdev(clk, "spdif_out_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "spdif_out_clk", "spdif_out_mux_clk", 0,
+       clk = clk_register_gate(NULL, "spdif_out_clk", "spdif_out_mclk", 0,
                        SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_SPDIF_OUT_CLK_ENB,
                        0, &_lock);
        clk_register_clkdev(clk, NULL, "spdif-out");
 
-       clk = clk_register_mux(NULL, "spdif_in_mux_clk", spdif_in_parents,
+       clk = clk_register_mux(NULL, "spdif_in_mclk", spdif_in_parents,
                        ARRAY_SIZE(spdif_in_parents), 0,
                        SPEAR1340_PERIP_CLK_CFG, SPEAR1340_SPDIF_IN_CLK_SHIFT,
                        SPEAR1340_SPDIF_CLK_MASK, 0, &_lock);
-       clk_register_clkdev(clk, "spdif_in_mux_clk", NULL);
+       clk_register_clkdev(clk, "spdif_in_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "spdif_in_clk", "spdif_in_mux_clk", 0,
+       clk = clk_register_gate(NULL, "spdif_in_clk", "spdif_in_mclk", 0,
                        SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_SPDIF_IN_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "spdif-in");
 
-       clk = clk_register_gate(NULL, "acp_clk", "acp_mux_clk", 0,
+       clk = clk_register_gate(NULL, "acp_clk", "acp_mclk", 0,
                        SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_ACP_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "acp_clk");
 
-       clk = clk_register_gate(NULL, "plgpio_clk", "plgpio_mux_clk", 0,
+       clk = clk_register_gate(NULL, "plgpio_clk", "plgpio_mclk", 0,
                        SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_PLGPIO_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "plgpio");
 
-       clk = clk_register_gate(NULL, "video_dec_clk", "video_dec_mux_clk", 0,
+       clk = clk_register_gate(NULL, "video_dec_clk", "video_dec_mclk", 0,
                        SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_VIDEO_DEC_CLK_ENB,
                        0, &_lock);
        clk_register_clkdev(clk, NULL, "video_dec");
 
-       clk = clk_register_gate(NULL, "video_enc_clk", "video_enc_mux_clk", 0,
+       clk = clk_register_gate(NULL, "video_enc_clk", "video_enc_mclk", 0,
                        SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_VIDEO_ENC_CLK_ENB,
                        0, &_lock);
        clk_register_clkdev(clk, NULL, "video_enc");
 
-       clk = clk_register_gate(NULL, "video_in_clk", "video_in_mux_clk", 0,
+       clk = clk_register_gate(NULL, "video_in_clk", "video_in_mclk", 0,
                        SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_VIDEO_IN_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "spear_vip");
 
-       clk = clk_register_gate(NULL, "cam0_clk", "cam0_mux_clk", 0,
+       clk = clk_register_gate(NULL, "cam0_clk", "cam0_mclk", 0,
                        SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_CAM0_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "spear_camif.0");
 
-       clk = clk_register_gate(NULL, "cam1_clk", "cam1_mux_clk", 0,
+       clk = clk_register_gate(NULL, "cam1_clk", "cam1_mclk", 0,
                        SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_CAM1_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "spear_camif.1");
 
-       clk = clk_register_gate(NULL, "cam2_clk", "cam2_mux_clk", 0,
+       clk = clk_register_gate(NULL, "cam2_clk", "cam2_mclk", 0,
                        SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_CAM2_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "spear_camif.2");
 
-       clk = clk_register_gate(NULL, "cam3_clk", "cam3_mux_clk", 0,
+       clk = clk_register_gate(NULL, "cam3_clk", "cam3_mclk", 0,
                        SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_CAM3_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "spear_camif.3");
 
-       clk = clk_register_gate(NULL, "pwm_clk", "pwm_mux_clk", 0,
+       clk = clk_register_gate(NULL, "pwm_clk", "pwm_mclk", 0,
                        SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_PWM_CLK_ENB, 0,
                        &_lock);
        clk_register_clkdev(clk, NULL, "pwm");
index 440bb3e4c971262f04b7e5cad6ed875c903e48bc..c3157454bb3fc907bb787a7abc9ee5259cf7dce5 100644 (file)
@@ -2,7 +2,7 @@
  * SPEAr3xx machines clock framework source file
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
@@ -122,12 +122,12 @@ static struct gpt_rate_tbl gpt_rtbl[] = {
 };
 
 /* clock parents */
-static const char *uart0_parents[] = { "pll3_48m_clk", "uart_synth_gate_clk", };
-static const char *firda_parents[] = { "pll3_48m_clk", "firda_synth_gate_clk",
+static const char *uart0_parents[] = { "pll3_clk", "uart_syn_gclk", };
+static const char *firda_parents[] = { "pll3_clk", "firda_syn_gclk",
 };
-static const char *gpt0_parents[] = { "pll3_48m_clk", "gpt0_synth_clk", };
-static const char *gpt1_parents[] = { "pll3_48m_clk", "gpt1_synth_clk", };
-static const char *gpt2_parents[] = { "pll3_48m_clk", "gpt2_synth_clk", };
+static const char *gpt0_parents[] = { "pll3_clk", "gpt0_syn_clk", };
+static const char *gpt1_parents[] = { "pll3_clk", "gpt1_syn_clk", };
+static const char *gpt2_parents[] = { "pll3_clk", "gpt2_syn_clk", };
 static const char *gen2_3_parents[] = { "pll1_clk", "pll2_clk", };
 static const char *ddr_parents[] = { "ahb_clk", "ahbmult2_clk", "none",
        "pll2_clk", };
@@ -137,7 +137,7 @@ static void __init spear300_clk_init(void)
 {
        struct clk *clk;
 
-       clk = clk_register_fixed_factor(NULL, "clcd_clk", "ras_pll3_48m_clk", 0,
+       clk = clk_register_fixed_factor(NULL, "clcd_clk", "ras_pll3_clk", 0,
                        1, 1);
        clk_register_clkdev(clk, NULL, "60000000.clcd");
 
@@ -219,15 +219,11 @@ static void __init spear310_clk_init(void)
        #define SPEAR320_UARTX_PCLK_VAL_SYNTH1          0x0
        #define SPEAR320_UARTX_PCLK_VAL_APB             0x1
 
-static const char *i2s_ref_parents[] = { "ras_pll2_clk",
-       "ras_gen2_synth_gate_clk", };
-static const char *sdhci_parents[] = { "ras_pll3_48m_clk",
-       "ras_gen3_synth_gate_clk",
-};
+static const char *i2s_ref_parents[] = { "ras_pll2_clk", "ras_syn2_gclk", };
+static const char *sdhci_parents[] = { "ras_pll3_clk", "ras_syn3_gclk", };
 static const char *smii0_parents[] = { "smii_125m_pad", "ras_pll2_clk",
-       "ras_gen0_synth_gate_clk", };
-static const char *uartx_parents[] = { "ras_gen1_synth_gate_clk", "ras_apb_clk",
-};
+       "ras_syn0_gclk", };
+static const char *uartx_parents[] = { "ras_syn1_gclk", "ras_apb_clk", };
 
 static void __init spear320_clk_init(void)
 {
@@ -237,7 +233,7 @@ static void __init spear320_clk_init(void)
                        CLK_IS_ROOT, 125000000);
        clk_register_clkdev(clk, "smii_125m_pad", NULL);
 
-       clk = clk_register_fixed_factor(NULL, "clcd_clk", "ras_pll3_48m_clk", 0,
+       clk = clk_register_fixed_factor(NULL, "clcd_clk", "ras_pll3_clk", 0,
                        1, 1);
        clk_register_clkdev(clk, NULL, "90000000.clcd");
 
@@ -363,9 +359,9 @@ void __init spear3xx_clk_init(void)
        clk_register_clkdev(clk, NULL, "fc900000.rtc");
 
        /* clock derived from 24 MHz osc clk */
-       clk = clk_register_fixed_rate(NULL, "pll3_48m_clk", "osc_24m_clk", 0,
+       clk = clk_register_fixed_rate(NULL, "pll3_clk", "osc_24m_clk", 0,
                        48000000);
-       clk_register_clkdev(clk, "pll3_48m_clk", NULL);
+       clk_register_clkdev(clk, "pll3_clk", NULL);
 
        clk = clk_register_fixed_factor(NULL, "wdt_clk", "osc_24m_clk", 0, 1,
                        1);
@@ -392,98 +388,98 @@ void __init spear3xx_clk_init(void)
                        HCLK_RATIO_MASK, 0, &_lock);
        clk_register_clkdev(clk, "ahb_clk", NULL);
 
-       clk = clk_register_aux("uart_synth_clk", "uart_synth_gate_clk",
-                       "pll1_clk", 0, UART_CLK_SYNT, NULL, aux_rtbl,
-                       ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
-       clk_register_clkdev(clk, "uart_synth_clk", NULL);
-       clk_register_clkdev(clk1, "uart_synth_gate_clk", NULL);
+       clk = clk_register_aux("uart_syn_clk", "uart_syn_gclk", "pll1_clk", 0,
+                       UART_CLK_SYNT, NULL, aux_rtbl, ARRAY_SIZE(aux_rtbl),
+                       &_lock, &clk1);
+       clk_register_clkdev(clk, "uart_syn_clk", NULL);
+       clk_register_clkdev(clk1, "uart_syn_gclk", NULL);
 
-       clk = clk_register_mux(NULL, "uart0_mux_clk", uart0_parents,
+       clk = clk_register_mux(NULL, "uart0_mclk", uart0_parents,
                        ARRAY_SIZE(uart0_parents), 0, PERIP_CLK_CFG,
                        UART_CLK_SHIFT, UART_CLK_MASK, 0, &_lock);
-       clk_register_clkdev(clk, "uart0_mux_clk", NULL);
+       clk_register_clkdev(clk, "uart0_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "uart0", "uart0_mux_clk", 0,
-                       PERIP1_CLK_ENB, UART_CLK_ENB, 0, &_lock);
+       clk = clk_register_gate(NULL, "uart0", "uart0_mclk", 0, PERIP1_CLK_ENB,
+                       UART_CLK_ENB, 0, &_lock);
        clk_register_clkdev(clk, NULL, "d0000000.serial");
 
-       clk = clk_register_aux("firda_synth_clk", "firda_synth_gate_clk",
-                       "pll1_clk", 0, FIRDA_CLK_SYNT, NULL, aux_rtbl,
-                       ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
-       clk_register_clkdev(clk, "firda_synth_clk", NULL);
-       clk_register_clkdev(clk1, "firda_synth_gate_clk", NULL);
+       clk = clk_register_aux("firda_syn_clk", "firda_syn_gclk", "pll1_clk", 0,
+                       FIRDA_CLK_SYNT, NULL, aux_rtbl, ARRAY_SIZE(aux_rtbl),
+                       &_lock, &clk1);
+       clk_register_clkdev(clk, "firda_syn_clk", NULL);
+       clk_register_clkdev(clk1, "firda_syn_gclk", NULL);
 
-       clk = clk_register_mux(NULL, "firda_mux_clk", firda_parents,
+       clk = clk_register_mux(NULL, "firda_mclk", firda_parents,
                        ARRAY_SIZE(firda_parents), 0, PERIP_CLK_CFG,
                        FIRDA_CLK_SHIFT, FIRDA_CLK_MASK, 0, &_lock);
-       clk_register_clkdev(clk, "firda_mux_clk", NULL);
+       clk_register_clkdev(clk, "firda_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "firda_clk", "firda_mux_clk", 0,
+       clk = clk_register_gate(NULL, "firda_clk", "firda_mclk", 0,
                        PERIP1_CLK_ENB, FIRDA_CLK_ENB, 0, &_lock);
        clk_register_clkdev(clk, NULL, "firda");
 
        /* gpt clocks */
-       clk_register_gpt("gpt0_synth_clk", "pll1_clk", 0, PRSC0_CLK_CFG,
-                       gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock);
+       clk_register_gpt("gpt0_syn_clk", "pll1_clk", 0, PRSC0_CLK_CFG, gpt_rtbl,
+                       ARRAY_SIZE(gpt_rtbl), &_lock);
        clk = clk_register_mux(NULL, "gpt0_clk", gpt0_parents,
                        ARRAY_SIZE(gpt0_parents), 0, PERIP_CLK_CFG,
                        GPT0_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
        clk_register_clkdev(clk, NULL, "gpt0");
 
-       clk_register_gpt("gpt1_synth_clk", "pll1_clk", 0, PRSC1_CLK_CFG,
-                       gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock);
-       clk = clk_register_mux(NULL, "gpt1_mux_clk", gpt1_parents,
+       clk_register_gpt("gpt1_syn_clk", "pll1_clk", 0, PRSC1_CLK_CFG, gpt_rtbl,
+                       ARRAY_SIZE(gpt_rtbl), &_lock);
+       clk = clk_register_mux(NULL, "gpt1_mclk", gpt1_parents,
                        ARRAY_SIZE(gpt1_parents), 0, PERIP_CLK_CFG,
                        GPT1_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
-       clk_register_clkdev(clk, "gpt1_mux_clk", NULL);
-       clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mux_clk", 0,
+       clk_register_clkdev(clk, "gpt1_mclk", NULL);
+       clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mclk", 0,
                        PERIP1_CLK_ENB, GPT1_CLK_ENB, 0, &_lock);
        clk_register_clkdev(clk, NULL, "gpt1");
 
-       clk_register_gpt("gpt2_synth_clk", "pll1_clk", 0, PRSC2_CLK_CFG,
-                       gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock);
-       clk = clk_register_mux(NULL, "gpt2_mux_clk", gpt2_parents,
+       clk_register_gpt("gpt2_syn_clk", "pll1_clk", 0, PRSC2_CLK_CFG, gpt_rtbl,
+                       ARRAY_SIZE(gpt_rtbl), &_lock);
+       clk = clk_register_mux(NULL, "gpt2_mclk", gpt2_parents,
                        ARRAY_SIZE(gpt2_parents), 0, PERIP_CLK_CFG,
                        GPT2_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
-       clk_register_clkdev(clk, "gpt2_mux_clk", NULL);
-       clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mux_clk", 0,
+       clk_register_clkdev(clk, "gpt2_mclk", NULL);
+       clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mclk", 0,
                        PERIP1_CLK_ENB, GPT2_CLK_ENB, 0, &_lock);
        clk_register_clkdev(clk, NULL, "gpt2");
 
        /* general synths clocks */
-       clk = clk_register_aux("gen0_synth_clk", "gen0_synth_gate_clk",
-                       "pll1_clk", 0, GEN0_CLK_SYNT, NULL, aux_rtbl,
-                       ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
-       clk_register_clkdev(clk, "gen0_synth_clk", NULL);
-       clk_register_clkdev(clk1, "gen0_synth_gate_clk", NULL);
-
-       clk = clk_register_aux("gen1_synth_clk", "gen1_synth_gate_clk",
-                       "pll1_clk", 0, GEN1_CLK_SYNT, NULL, aux_rtbl,
-                       ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
-       clk_register_clkdev(clk, "gen1_synth_clk", NULL);
-       clk_register_clkdev(clk1, "gen1_synth_gate_clk", NULL);
-
-       clk = clk_register_mux(NULL, "gen2_3_parent_clk", gen2_3_parents,
+       clk = clk_register_aux("gen0_syn_clk", "gen0_syn_gclk", "pll1_clk",
+                       0, GEN0_CLK_SYNT, NULL, aux_rtbl, ARRAY_SIZE(aux_rtbl),
+                       &_lock, &clk1);
+       clk_register_clkdev(clk, "gen0_syn_clk", NULL);
+       clk_register_clkdev(clk1, "gen0_syn_gclk", NULL);
+
+       clk = clk_register_aux("gen1_syn_clk", "gen1_syn_gclk", "pll1_clk",
+                       0, GEN1_CLK_SYNT, NULL, aux_rtbl, ARRAY_SIZE(aux_rtbl),
+                       &_lock, &clk1);
+       clk_register_clkdev(clk, "gen1_syn_clk", NULL);
+       clk_register_clkdev(clk1, "gen1_syn_gclk", NULL);
+
+       clk = clk_register_mux(NULL, "gen2_3_par_clk", gen2_3_parents,
                        ARRAY_SIZE(gen2_3_parents), 0, CORE_CLK_CFG,
                        GEN_SYNTH2_3_CLK_SHIFT, GEN_SYNTH2_3_CLK_MASK, 0,
                        &_lock);
-       clk_register_clkdev(clk, "gen2_3_parent_clk", NULL);
+       clk_register_clkdev(clk, "gen2_3_par_clk", NULL);
 
-       clk = clk_register_aux("gen2_synth_clk", "gen2_synth_gate_clk",
-                       "gen2_3_parent_clk", 0, GEN2_CLK_SYNT, NULL, aux_rtbl,
+       clk = clk_register_aux("gen2_syn_clk", "gen2_syn_gclk",
+                       "gen2_3_par_clk", 0, GEN2_CLK_SYNT, NULL, aux_rtbl,
                        ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
-       clk_register_clkdev(clk, "gen2_synth_clk", NULL);
-       clk_register_clkdev(clk1, "gen2_synth_gate_clk", NULL);
+       clk_register_clkdev(clk, "gen2_syn_clk", NULL);
+       clk_register_clkdev(clk1, "gen2_syn_gclk", NULL);
 
-       clk = clk_register_aux("gen3_synth_clk", "gen3_synth_gate_clk",
-                       "gen2_3_parent_clk", 0, GEN3_CLK_SYNT, NULL, aux_rtbl,
+       clk = clk_register_aux("gen3_syn_clk", "gen3_syn_gclk",
+                       "gen2_3_par_clk", 0, GEN3_CLK_SYNT, NULL, aux_rtbl,
                        ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
-       clk_register_clkdev(clk, "gen3_synth_clk", NULL);
-       clk_register_clkdev(clk1, "gen3_synth_gate_clk", NULL);
+       clk_register_clkdev(clk, "gen3_syn_clk", NULL);
+       clk_register_clkdev(clk1, "gen3_syn_gclk", NULL);
 
        /* clock derived from pll3 clk */
-       clk = clk_register_gate(NULL, "usbh_clk", "pll3_48m_clk", 0,
-                       PERIP1_CLK_ENB, USBH_CLK_ENB, 0, &_lock);
+       clk = clk_register_gate(NULL, "usbh_clk", "pll3_clk", 0, PERIP1_CLK_ENB,
+                       USBH_CLK_ENB, 0, &_lock);
        clk_register_clkdev(clk, "usbh_clk", NULL);
 
        clk = clk_register_fixed_factor(NULL, "usbh.0_clk", "usbh_clk", 0, 1,
@@ -494,8 +490,8 @@ void __init spear3xx_clk_init(void)
                        1);
        clk_register_clkdev(clk, "usbh.1_clk", NULL);
 
-       clk = clk_register_gate(NULL, "usbd_clk", "pll3_48m_clk", 0,
-                       PERIP1_CLK_ENB, USBD_CLK_ENB, 0, &_lock);
+       clk = clk_register_gate(NULL, "usbd_clk", "pll3_clk", 0, PERIP1_CLK_ENB,
+                       USBD_CLK_ENB, 0, &_lock);
        clk_register_clkdev(clk, NULL, "designware_udc");
 
        /* clock derived from ahb clk */
@@ -579,29 +575,25 @@ void __init spear3xx_clk_init(void)
                        RAS_CLK_ENB, RAS_PLL2_CLK_ENB, 0, &_lock);
        clk_register_clkdev(clk, "ras_pll2_clk", NULL);
 
-       clk = clk_register_gate(NULL, "ras_pll3_48m_clk", "pll3_48m_clk", 0,
+       clk = clk_register_gate(NULL, "ras_pll3_clk", "pll3_clk", 0,
                        RAS_CLK_ENB, RAS_48M_CLK_ENB, 0, &_lock);
-       clk_register_clkdev(clk, "ras_pll3_48m_clk", NULL);
-
-       clk = clk_register_gate(NULL, "ras_gen0_synth_gate_clk",
-                       "gen0_synth_gate_clk", 0, RAS_CLK_ENB,
-                       RAS_SYNT0_CLK_ENB, 0, &_lock);
-       clk_register_clkdev(clk, "ras_gen0_synth_gate_clk", NULL);
-
-       clk = clk_register_gate(NULL, "ras_gen1_synth_gate_clk",
-                       "gen1_synth_gate_clk", 0, RAS_CLK_ENB,
-                       RAS_SYNT1_CLK_ENB, 0, &_lock);
-       clk_register_clkdev(clk, "ras_gen1_synth_gate_clk", NULL);
-
-       clk = clk_register_gate(NULL, "ras_gen2_synth_gate_clk",
-                       "gen2_synth_gate_clk", 0, RAS_CLK_ENB,
-                       RAS_SYNT2_CLK_ENB, 0, &_lock);
-       clk_register_clkdev(clk, "ras_gen2_synth_gate_clk", NULL);
-
-       clk = clk_register_gate(NULL, "ras_gen3_synth_gate_clk",
-                       "gen3_synth_gate_clk", 0, RAS_CLK_ENB,
-                       RAS_SYNT3_CLK_ENB, 0, &_lock);
-       clk_register_clkdev(clk, "ras_gen3_synth_gate_clk", NULL);
+       clk_register_clkdev(clk, "ras_pll3_clk", NULL);
+
+       clk = clk_register_gate(NULL, "ras_syn0_gclk", "gen0_syn_gclk", 0,
+                       RAS_CLK_ENB, RAS_SYNT0_CLK_ENB, 0, &_lock);
+       clk_register_clkdev(clk, "ras_syn0_gclk", NULL);
+
+       clk = clk_register_gate(NULL, "ras_syn1_gclk", "gen1_syn_gclk", 0,
+                       RAS_CLK_ENB, RAS_SYNT1_CLK_ENB, 0, &_lock);
+       clk_register_clkdev(clk, "ras_syn1_gclk", NULL);
+
+       clk = clk_register_gate(NULL, "ras_syn2_gclk", "gen2_syn_gclk", 0,
+                       RAS_CLK_ENB, RAS_SYNT2_CLK_ENB, 0, &_lock);
+       clk_register_clkdev(clk, "ras_syn2_gclk", NULL);
+
+       clk = clk_register_gate(NULL, "ras_syn3_gclk", "gen3_syn_gclk", 0,
+                       RAS_CLK_ENB, RAS_SYNT3_CLK_ENB, 0, &_lock);
+       clk_register_clkdev(clk, "ras_syn3_gclk", NULL);
 
        if (of_machine_is_compatible("st,spear300"))
                spear300_clk_init();
index f9a20b382304e9b7d0b209902ab648802cce499e..a98d0866f5416b4dab5d4073362f37c91825d873 100644 (file)
@@ -2,7 +2,7 @@
  * SPEAr6xx machines clock framework source file
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
@@ -97,13 +97,12 @@ static struct aux_rate_tbl aux_rtbl[] = {
        {.xscale = 1, .yscale = 2, .eq = 1}, /* 166 MHz */
 };
 
-static const char *clcd_parents[] = { "pll3_48m_clk", "clcd_synth_gate_clk", };
-static const char *firda_parents[] = { "pll3_48m_clk", "firda_synth_gate_clk",
-};
-static const char *uart_parents[] = { "pll3_48m_clk", "uart_synth_gate_clk", };
-static const char *gpt0_1_parents[] = { "pll3_48m_clk", "gpt0_1_synth_clk", };
-static const char *gpt2_parents[] = { "pll3_48m_clk", "gpt2_synth_clk", };
-static const char *gpt3_parents[] = { "pll3_48m_clk", "gpt3_synth_clk", };
+static const char *clcd_parents[] = { "pll3_clk", "clcd_syn_gclk", };
+static const char *firda_parents[] = { "pll3_clk", "firda_syn_gclk", };
+static const char *uart_parents[] = { "pll3_clk", "uart_syn_gclk", };
+static const char *gpt0_1_parents[] = { "pll3_clk", "gpt0_1_syn_clk", };
+static const char *gpt2_parents[] = { "pll3_clk", "gpt2_syn_clk", };
+static const char *gpt3_parents[] = { "pll3_clk", "gpt3_syn_clk", };
 static const char *ddr_parents[] = { "ahb_clk", "ahbmult2_clk", "none",
        "pll2_clk", };
 
@@ -136,9 +135,9 @@ void __init spear6xx_clk_init(void)
        clk_register_clkdev(clk, NULL, "rtc-spear");
 
        /* clock derived from 30 MHz osc clk */
-       clk = clk_register_fixed_rate(NULL, "pll3_48m_clk", "osc_24m_clk", 0,
+       clk = clk_register_fixed_rate(NULL, "pll3_clk", "osc_24m_clk", 0,
                        48000000);
-       clk_register_clkdev(clk, "pll3_48m_clk", NULL);
+       clk_register_clkdev(clk, "pll3_clk", NULL);
 
        clk = clk_register_vco_pll("vco1_clk", "pll1_clk", NULL, "osc_30m_clk",
                        0, PLL1_CTR, PLL1_FRQ, pll_rtbl, ARRAY_SIZE(pll_rtbl),
@@ -146,9 +145,9 @@ void __init spear6xx_clk_init(void)
        clk_register_clkdev(clk, "vco1_clk", NULL);
        clk_register_clkdev(clk1, "pll1_clk", NULL);
 
-       clk = clk_register_vco_pll("vco2_clk", "pll2_clk", NULL,
-                       "osc_30m_clk", 0, PLL2_CTR, PLL2_FRQ, pll_rtbl,
-                       ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
+       clk = clk_register_vco_pll("vco2_clk", "pll2_clk", NULL, "osc_30m_clk",
+                       0, PLL2_CTR, PLL2_FRQ, pll_rtbl, ARRAY_SIZE(pll_rtbl),
+                       &_lock, &clk1, NULL);
        clk_register_clkdev(clk, "vco2_clk", NULL);
        clk_register_clkdev(clk1, "pll2_clk", NULL);
 
@@ -165,111 +164,111 @@ void __init spear6xx_clk_init(void)
                        HCLK_RATIO_MASK, 0, &_lock);
        clk_register_clkdev(clk, "ahb_clk", NULL);
 
-       clk = clk_register_aux("uart_synth_clk", "uart_synth_gate_clk",
-                       "pll1_clk", 0, UART_CLK_SYNT, NULL, aux_rtbl,
-                       ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
-       clk_register_clkdev(clk, "uart_synth_clk", NULL);
-       clk_register_clkdev(clk1, "uart_synth_gate_clk", NULL);
+       clk = clk_register_aux("uart_syn_clk", "uart_syn_gclk", "pll1_clk", 0,
+                       UART_CLK_SYNT, NULL, aux_rtbl, ARRAY_SIZE(aux_rtbl),
+                       &_lock, &clk1);
+       clk_register_clkdev(clk, "uart_syn_clk", NULL);
+       clk_register_clkdev(clk1, "uart_syn_gclk", NULL);
 
-       clk = clk_register_mux(NULL, "uart_mux_clk", uart_parents,
+       clk = clk_register_mux(NULL, "uart_mclk", uart_parents,
                        ARRAY_SIZE(uart_parents), 0, PERIP_CLK_CFG,
                        UART_CLK_SHIFT, UART_CLK_MASK, 0, &_lock);
-       clk_register_clkdev(clk, "uart_mux_clk", NULL);
+       clk_register_clkdev(clk, "uart_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "uart0", "uart_mux_clk", 0,
-                       PERIP1_CLK_ENB, UART0_CLK_ENB, 0, &_lock);
+       clk = clk_register_gate(NULL, "uart0", "uart_mclk", 0, PERIP1_CLK_ENB,
+                       UART0_CLK_ENB, 0, &_lock);
        clk_register_clkdev(clk, NULL, "d0000000.serial");
 
-       clk = clk_register_gate(NULL, "uart1", "uart_mux_clk", 0,
-                       PERIP1_CLK_ENB, UART1_CLK_ENB, 0, &_lock);
+       clk = clk_register_gate(NULL, "uart1", "uart_mclk", 0, PERIP1_CLK_ENB,
+                       UART1_CLK_ENB, 0, &_lock);
        clk_register_clkdev(clk, NULL, "d0080000.serial");
 
-       clk = clk_register_aux("firda_synth_clk", "firda_synth_gate_clk",
-                       "pll1_clk", 0, FIRDA_CLK_SYNT, NULL, aux_rtbl,
-                       ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
-       clk_register_clkdev(clk, "firda_synth_clk", NULL);
-       clk_register_clkdev(clk1, "firda_synth_gate_clk", NULL);
+       clk = clk_register_aux("firda_syn_clk", "firda_syn_gclk", "pll1_clk",
+                       0, FIRDA_CLK_SYNT, NULL, aux_rtbl, ARRAY_SIZE(aux_rtbl),
+                       &_lock, &clk1);
+       clk_register_clkdev(clk, "firda_syn_clk", NULL);
+       clk_register_clkdev(clk1, "firda_syn_gclk", NULL);
 
-       clk = clk_register_mux(NULL, "firda_mux_clk", firda_parents,
+       clk = clk_register_mux(NULL, "firda_mclk", firda_parents,
                        ARRAY_SIZE(firda_parents), 0, PERIP_CLK_CFG,
                        FIRDA_CLK_SHIFT, FIRDA_CLK_MASK, 0, &_lock);
-       clk_register_clkdev(clk, "firda_mux_clk", NULL);
+       clk_register_clkdev(clk, "firda_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "firda_clk", "firda_mux_clk", 0,
+       clk = clk_register_gate(NULL, "firda_clk", "firda_mclk", 0,
                        PERIP1_CLK_ENB, FIRDA_CLK_ENB, 0, &_lock);
        clk_register_clkdev(clk, NULL, "firda");
 
-       clk = clk_register_aux("clcd_synth_clk", "clcd_synth_gate_clk",
-                       "pll1_clk", 0, CLCD_CLK_SYNT, NULL, aux_rtbl,
-                       ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
-       clk_register_clkdev(clk, "clcd_synth_clk", NULL);
-       clk_register_clkdev(clk1, "clcd_synth_gate_clk", NULL);
+       clk = clk_register_aux("clcd_syn_clk", "clcd_syn_gclk", "pll1_clk",
+                       0, CLCD_CLK_SYNT, NULL, aux_rtbl, ARRAY_SIZE(aux_rtbl),
+                       &_lock, &clk1);
+       clk_register_clkdev(clk, "clcd_syn_clk", NULL);
+       clk_register_clkdev(clk1, "clcd_syn_gclk", NULL);
 
-       clk = clk_register_mux(NULL, "clcd_mux_clk", clcd_parents,
+       clk = clk_register_mux(NULL, "clcd_mclk", clcd_parents,
                        ARRAY_SIZE(clcd_parents), 0, PERIP_CLK_CFG,
                        CLCD_CLK_SHIFT, CLCD_CLK_MASK, 0, &_lock);
-       clk_register_clkdev(clk, "clcd_mux_clk", NULL);
+       clk_register_clkdev(clk, "clcd_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "clcd_clk", "clcd_mux_clk", 0,
+       clk = clk_register_gate(NULL, "clcd_clk", "clcd_mclk", 0,
                        PERIP1_CLK_ENB, CLCD_CLK_ENB, 0, &_lock);
        clk_register_clkdev(clk, NULL, "clcd");
 
        /* gpt clocks */
-       clk = clk_register_gpt("gpt0_1_synth_clk", "pll1_clk", 0, PRSC0_CLK_CFG,
+       clk = clk_register_gpt("gpt0_1_syn_clk", "pll1_clk", 0, PRSC0_CLK_CFG,
                        gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock);
-       clk_register_clkdev(clk, "gpt0_1_synth_clk", NULL);
+       clk_register_clkdev(clk, "gpt0_1_syn_clk", NULL);
 
-       clk = clk_register_mux(NULL, "gpt0_mux_clk", gpt0_1_parents,
+       clk = clk_register_mux(NULL, "gpt0_mclk", gpt0_1_parents,
                        ARRAY_SIZE(gpt0_1_parents), 0, PERIP_CLK_CFG,
                        GPT0_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
        clk_register_clkdev(clk, NULL, "gpt0");
 
-       clk = clk_register_mux(NULL, "gpt1_mux_clk", gpt0_1_parents,
+       clk = clk_register_mux(NULL, "gpt1_mclk", gpt0_1_parents,
                        ARRAY_SIZE(gpt0_1_parents), 0, PERIP_CLK_CFG,
                        GPT1_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
-       clk_register_clkdev(clk, "gpt1_mux_clk", NULL);
+       clk_register_clkdev(clk, "gpt1_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mux_clk", 0,
+       clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mclk", 0,
                        PERIP1_CLK_ENB, GPT1_CLK_ENB, 0, &_lock);
        clk_register_clkdev(clk, NULL, "gpt1");
 
-       clk = clk_register_gpt("gpt2_synth_clk", "pll1_clk", 0, PRSC1_CLK_CFG,
+       clk = clk_register_gpt("gpt2_syn_clk", "pll1_clk", 0, PRSC1_CLK_CFG,
                        gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock);
-       clk_register_clkdev(clk, "gpt2_synth_clk", NULL);
+       clk_register_clkdev(clk, "gpt2_syn_clk", NULL);
 
-       clk = clk_register_mux(NULL, "gpt2_mux_clk", gpt2_parents,
+       clk = clk_register_mux(NULL, "gpt2_mclk", gpt2_parents,
                        ARRAY_SIZE(gpt2_parents), 0, PERIP_CLK_CFG,
                        GPT2_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
-       clk_register_clkdev(clk, "gpt2_mux_clk", NULL);
+       clk_register_clkdev(clk, "gpt2_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mux_clk", 0,
+       clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mclk", 0,
                        PERIP1_CLK_ENB, GPT2_CLK_ENB, 0, &_lock);
        clk_register_clkdev(clk, NULL, "gpt2");
 
-       clk = clk_register_gpt("gpt3_synth_clk", "pll1_clk", 0, PRSC2_CLK_CFG,
+       clk = clk_register_gpt("gpt3_syn_clk", "pll1_clk", 0, PRSC2_CLK_CFG,
                        gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock);
-       clk_register_clkdev(clk, "gpt3_synth_clk", NULL);
+       clk_register_clkdev(clk, "gpt3_syn_clk", NULL);
 
-       clk = clk_register_mux(NULL, "gpt3_mux_clk", gpt3_parents,
+       clk = clk_register_mux(NULL, "gpt3_mclk", gpt3_parents,
                        ARRAY_SIZE(gpt3_parents), 0, PERIP_CLK_CFG,
                        GPT3_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
-       clk_register_clkdev(clk, "gpt3_mux_clk", NULL);
+       clk_register_clkdev(clk, "gpt3_mclk", NULL);
 
-       clk = clk_register_gate(NULL, "gpt3_clk", "gpt3_mux_clk", 0,
+       clk = clk_register_gate(NULL, "gpt3_clk", "gpt3_mclk", 0,
                        PERIP1_CLK_ENB, GPT3_CLK_ENB, 0, &_lock);
        clk_register_clkdev(clk, NULL, "gpt3");
 
        /* clock derived from pll3 clk */
-       clk = clk_register_gate(NULL, "usbh0_clk", "pll3_48m_clk", 0,
+       clk = clk_register_gate(NULL, "usbh0_clk", "pll3_clk", 0,
                        PERIP1_CLK_ENB, USBH0_CLK_ENB, 0, &_lock);
        clk_register_clkdev(clk, NULL, "usbh.0_clk");
 
-       clk = clk_register_gate(NULL, "usbh1_clk", "pll3_48m_clk", 0,
+       clk = clk_register_gate(NULL, "usbh1_clk", "pll3_clk", 0,
                        PERIP1_CLK_ENB, USBH1_CLK_ENB, 0, &_lock);
        clk_register_clkdev(clk, NULL, "usbh.1_clk");
 
-       clk = clk_register_gate(NULL, "usbd_clk", "pll3_48m_clk", 0,
-                       PERIP1_CLK_ENB, USBD_CLK_ENB, 0, &_lock);
+       clk = clk_register_gate(NULL, "usbd_clk", "pll3_clk", 0, PERIP1_CLK_ENB,
+                       USBD_CLK_ENB, 0, &_lock);
        clk_register_clkdev(clk, NULL, "designware_udc");
 
        /* clock derived from ahb clk */
@@ -278,9 +277,8 @@ void __init spear6xx_clk_init(void)
        clk_register_clkdev(clk, "ahbmult2_clk", NULL);
 
        clk = clk_register_mux(NULL, "ddr_clk", ddr_parents,
-                       ARRAY_SIZE(ddr_parents),
-                       0, PLL_CLK_CFG, MCTR_CLK_SHIFT, MCTR_CLK_MASK, 0,
-                       &_lock);
+                       ARRAY_SIZE(ddr_parents), 0, PLL_CLK_CFG, MCTR_CLK_SHIFT,
+                       MCTR_CLK_MASK, 0, &_lock);
        clk_register_clkdev(clk, "ddr_clk", NULL);
 
        clk = clk_register_divider(NULL, "apb_clk", "ahb_clk",
@@ -298,7 +296,7 @@ void __init spear6xx_clk_init(void)
 
        clk = clk_register_gate(NULL, "gmac_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
                        GMAC_CLK_ENB, 0, &_lock);
-       clk_register_clkdev(clk, NULL, "gmac");
+       clk_register_clkdev(clk, NULL, "e0800000.ethernet");
 
        clk = clk_register_gate(NULL, "i2c_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
                        I2C_CLK_ENB, 0, &_lock);
index 8d81a1d32653d890094066461694ebd4dac81f2e..dd3e661a124d2ba9161a9872360495beef40cd76 100644 (file)
@@ -6,6 +6,7 @@ obj-$(CONFIG_CS5535_CLOCK_EVENT_SRC)    += cs5535-clockevt.o
 obj-$(CONFIG_SH_TIMER_CMT)     += sh_cmt.o
 obj-$(CONFIG_SH_TIMER_MTU2)    += sh_mtu2.o
 obj-$(CONFIG_SH_TIMER_TMU)     += sh_tmu.o
+obj-$(CONFIG_EM_TIMER_STI)     += em_sti.o
 obj-$(CONFIG_CLKBLD_I8253)     += i8253.o
 obj-$(CONFIG_CLKSRC_MMIO)      += mmio.o
 obj-$(CONFIG_DW_APB_TIMER)     += dw_apb_timer.o
diff --git a/drivers/clocksource/em_sti.c b/drivers/clocksource/em_sti.c
new file mode 100644 (file)
index 0000000..372051d
--- /dev/null
@@ -0,0 +1,406 @@
+/*
+ * Emma Mobile Timer Support - STI
+ *
+ *  Copyright (C) 2012 Magnus Damm
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/irq.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+enum { USER_CLOCKSOURCE, USER_CLOCKEVENT, USER_NR };
+
+struct em_sti_priv {
+       void __iomem *base;
+       struct clk *clk;
+       struct platform_device *pdev;
+       unsigned int active[USER_NR];
+       unsigned long rate;
+       raw_spinlock_t lock;
+       struct clock_event_device ced;
+       struct clocksource cs;
+};
+
+#define STI_CONTROL 0x00
+#define STI_COMPA_H 0x10
+#define STI_COMPA_L 0x14
+#define STI_COMPB_H 0x18
+#define STI_COMPB_L 0x1c
+#define STI_COUNT_H 0x20
+#define STI_COUNT_L 0x24
+#define STI_COUNT_RAW_H 0x28
+#define STI_COUNT_RAW_L 0x2c
+#define STI_SET_H 0x30
+#define STI_SET_L 0x34
+#define STI_INTSTATUS 0x40
+#define STI_INTRAWSTATUS 0x44
+#define STI_INTENSET 0x48
+#define STI_INTENCLR 0x4c
+#define STI_INTFFCLR 0x50
+
+static inline unsigned long em_sti_read(struct em_sti_priv *p, int offs)
+{
+       return ioread32(p->base + offs);
+}
+
+static inline void em_sti_write(struct em_sti_priv *p, int offs,
+                               unsigned long value)
+{
+       iowrite32(value, p->base + offs);
+}
+
+static int em_sti_enable(struct em_sti_priv *p)
+{
+       int ret;
+
+       /* enable clock */
+       ret = clk_enable(p->clk);
+       if (ret) {
+               dev_err(&p->pdev->dev, "cannot enable clock\n");
+               return ret;
+       }
+
+       /* configure channel, periodic mode and maximum timeout */
+       p->rate = clk_get_rate(p->clk);
+
+       /* reset the counter */
+       em_sti_write(p, STI_SET_H, 0x40000000);
+       em_sti_write(p, STI_SET_L, 0x00000000);
+
+       /* mask and clear pending interrupts */
+       em_sti_write(p, STI_INTENCLR, 3);
+       em_sti_write(p, STI_INTFFCLR, 3);
+
+       /* enable updates of counter registers */
+       em_sti_write(p, STI_CONTROL, 1);
+
+       return 0;
+}
+
+static void em_sti_disable(struct em_sti_priv *p)
+{
+       /* mask interrupts */
+       em_sti_write(p, STI_INTENCLR, 3);
+
+       /* stop clock */
+       clk_disable(p->clk);
+}
+
+static cycle_t em_sti_count(struct em_sti_priv *p)
+{
+       cycle_t ticks;
+       unsigned long flags;
+
+       /* the STI hardware buffers the 48-bit count, but to
+        * break it out into two 32-bit access the registers
+        * must be accessed in a certain order.
+        * Always read STI_COUNT_H before STI_COUNT_L.
+        */
+       raw_spin_lock_irqsave(&p->lock, flags);
+       ticks = (cycle_t)(em_sti_read(p, STI_COUNT_H) & 0xffff) << 32;
+       ticks |= em_sti_read(p, STI_COUNT_L);
+       raw_spin_unlock_irqrestore(&p->lock, flags);
+
+       return ticks;
+}
+
+static cycle_t em_sti_set_next(struct em_sti_priv *p, cycle_t next)
+{
+       unsigned long flags;
+
+       raw_spin_lock_irqsave(&p->lock, flags);
+
+       /* mask compare A interrupt */
+       em_sti_write(p, STI_INTENCLR, 1);
+
+       /* update compare A value */
+       em_sti_write(p, STI_COMPA_H, next >> 32);
+       em_sti_write(p, STI_COMPA_L, next & 0xffffffff);
+
+       /* clear compare A interrupt source */
+       em_sti_write(p, STI_INTFFCLR, 1);
+
+       /* unmask compare A interrupt */
+       em_sti_write(p, STI_INTENSET, 1);
+
+       raw_spin_unlock_irqrestore(&p->lock, flags);
+
+       return next;
+}
+
+static irqreturn_t em_sti_interrupt(int irq, void *dev_id)
+{
+       struct em_sti_priv *p = dev_id;
+
+       p->ced.event_handler(&p->ced);
+       return IRQ_HANDLED;
+}
+
+static int em_sti_start(struct em_sti_priv *p, unsigned int user)
+{
+       unsigned long flags;
+       int used_before;
+       int ret = 0;
+
+       raw_spin_lock_irqsave(&p->lock, flags);
+       used_before = p->active[USER_CLOCKSOURCE] | p->active[USER_CLOCKEVENT];
+       if (!used_before)
+               ret = em_sti_enable(p);
+
+       if (!ret)
+               p->active[user] = 1;
+       raw_spin_unlock_irqrestore(&p->lock, flags);
+
+       return ret;
+}
+
+static void em_sti_stop(struct em_sti_priv *p, unsigned int user)
+{
+       unsigned long flags;
+       int used_before, used_after;
+
+       raw_spin_lock_irqsave(&p->lock, flags);
+       used_before = p->active[USER_CLOCKSOURCE] | p->active[USER_CLOCKEVENT];
+       p->active[user] = 0;
+       used_after = p->active[USER_CLOCKSOURCE] | p->active[USER_CLOCKEVENT];
+
+       if (used_before && !used_after)
+               em_sti_disable(p);
+       raw_spin_unlock_irqrestore(&p->lock, flags);
+}
+
+static struct em_sti_priv *cs_to_em_sti(struct clocksource *cs)
+{
+       return container_of(cs, struct em_sti_priv, cs);
+}
+
+static cycle_t em_sti_clocksource_read(struct clocksource *cs)
+{
+       return em_sti_count(cs_to_em_sti(cs));
+}
+
+static int em_sti_clocksource_enable(struct clocksource *cs)
+{
+       int ret;
+       struct em_sti_priv *p = cs_to_em_sti(cs);
+
+       ret = em_sti_start(p, USER_CLOCKSOURCE);
+       if (!ret)
+               __clocksource_updatefreq_hz(cs, p->rate);
+       return ret;
+}
+
+static void em_sti_clocksource_disable(struct clocksource *cs)
+{
+       em_sti_stop(cs_to_em_sti(cs), USER_CLOCKSOURCE);
+}
+
+static void em_sti_clocksource_resume(struct clocksource *cs)
+{
+       em_sti_clocksource_enable(cs);
+}
+
+static int em_sti_register_clocksource(struct em_sti_priv *p)
+{
+       struct clocksource *cs = &p->cs;
+
+       memset(cs, 0, sizeof(*cs));
+       cs->name = dev_name(&p->pdev->dev);
+       cs->rating = 200;
+       cs->read = em_sti_clocksource_read;
+       cs->enable = em_sti_clocksource_enable;
+       cs->disable = em_sti_clocksource_disable;
+       cs->suspend = em_sti_clocksource_disable;
+       cs->resume = em_sti_clocksource_resume;
+       cs->mask = CLOCKSOURCE_MASK(48);
+       cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
+
+       dev_info(&p->pdev->dev, "used as clock source\n");
+
+       /* Register with dummy 1 Hz value, gets updated in ->enable() */
+       clocksource_register_hz(cs, 1);
+       return 0;
+}
+
+static struct em_sti_priv *ced_to_em_sti(struct clock_event_device *ced)
+{
+       return container_of(ced, struct em_sti_priv, ced);
+}
+
+static void em_sti_clock_event_mode(enum clock_event_mode mode,
+                                   struct clock_event_device *ced)
+{
+       struct em_sti_priv *p = ced_to_em_sti(ced);
+
+       /* deal with old setting first */
+       switch (ced->mode) {
+       case CLOCK_EVT_MODE_ONESHOT:
+               em_sti_stop(p, USER_CLOCKEVENT);
+               break;
+       default:
+               break;
+       }
+
+       switch (mode) {
+       case CLOCK_EVT_MODE_ONESHOT:
+               dev_info(&p->pdev->dev, "used for oneshot clock events\n");
+               em_sti_start(p, USER_CLOCKEVENT);
+               clockevents_config(&p->ced, p->rate);
+               break;
+       case CLOCK_EVT_MODE_SHUTDOWN:
+       case CLOCK_EVT_MODE_UNUSED:
+               em_sti_stop(p, USER_CLOCKEVENT);
+               break;
+       default:
+               break;
+       }
+}
+
+static int em_sti_clock_event_next(unsigned long delta,
+                                  struct clock_event_device *ced)
+{
+       struct em_sti_priv *p = ced_to_em_sti(ced);
+       cycle_t next;
+       int safe;
+
+       next = em_sti_set_next(p, em_sti_count(p) + delta);
+       safe = em_sti_count(p) < (next - 1);
+
+       return !safe;
+}
+
+static void em_sti_register_clockevent(struct em_sti_priv *p)
+{
+       struct clock_event_device *ced = &p->ced;
+
+       memset(ced, 0, sizeof(*ced));
+       ced->name = dev_name(&p->pdev->dev);
+       ced->features = CLOCK_EVT_FEAT_ONESHOT;
+       ced->rating = 200;
+       ced->cpumask = cpumask_of(0);
+       ced->set_next_event = em_sti_clock_event_next;
+       ced->set_mode = em_sti_clock_event_mode;
+
+       dev_info(&p->pdev->dev, "used for clock events\n");
+
+       /* Register with dummy 1 Hz value, gets updated in ->set_mode() */
+       clockevents_config_and_register(ced, 1, 2, 0xffffffff);
+}
+
+static int __devinit em_sti_probe(struct platform_device *pdev)
+{
+       struct em_sti_priv *p;
+       struct resource *res;
+       int irq, ret;
+
+       p = kzalloc(sizeof(*p), GFP_KERNEL);
+       if (p == NULL) {
+               dev_err(&pdev->dev, "failed to allocate driver data\n");
+               ret = -ENOMEM;
+               goto err0;
+       }
+
+       p->pdev = pdev;
+       platform_set_drvdata(pdev, p);
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!res) {
+               dev_err(&pdev->dev, "failed to get I/O memory\n");
+               ret = -EINVAL;
+               goto err0;
+       }
+
+       irq = platform_get_irq(pdev, 0);
+       if (irq < 0) {
+               dev_err(&pdev->dev, "failed to get irq\n");
+               ret = -EINVAL;
+               goto err0;
+       }
+
+       /* map memory, let base point to the STI instance */
+       p->base = ioremap_nocache(res->start, resource_size(res));
+       if (p->base == NULL) {
+               dev_err(&pdev->dev, "failed to remap I/O memory\n");
+               ret = -ENXIO;
+               goto err0;
+       }
+
+       /* get hold of clock */
+       p->clk = clk_get(&pdev->dev, "sclk");
+       if (IS_ERR(p->clk)) {
+               dev_err(&pdev->dev, "cannot get clock\n");
+               ret = PTR_ERR(p->clk);
+               goto err1;
+       }
+
+       if (request_irq(irq, em_sti_interrupt,
+                       IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
+                       dev_name(&pdev->dev), p)) {
+               dev_err(&pdev->dev, "failed to request low IRQ\n");
+               ret = -ENOENT;
+               goto err2;
+       }
+
+       raw_spin_lock_init(&p->lock);
+       em_sti_register_clockevent(p);
+       em_sti_register_clocksource(p);
+       return 0;
+
+err2:
+       clk_put(p->clk);
+err1:
+       iounmap(p->base);
+err0:
+       kfree(p);
+       return ret;
+}
+
+static int __devexit em_sti_remove(struct platform_device *pdev)
+{
+       return -EBUSY; /* cannot unregister clockevent and clocksource */
+}
+
+static const struct of_device_id em_sti_dt_ids[] __devinitconst = {
+       { .compatible = "renesas,em-sti", },
+       {},
+};
+MODULE_DEVICE_TABLE(of, em_sti_dt_ids);
+
+static struct platform_driver em_sti_device_driver = {
+       .probe          = em_sti_probe,
+       .remove         = __devexit_p(em_sti_remove),
+       .driver         = {
+               .name   = "em_sti",
+               .of_match_table = em_sti_dt_ids,
+       }
+};
+
+module_platform_driver(em_sti_device_driver);
+
+MODULE_AUTHOR("Magnus Damm");
+MODULE_DESCRIPTION("Renesas Emma Mobile STI Timer Driver");
+MODULE_LICENSE("GPL v2");
index 32fe9ef5cc5c374d3a9dabf5c733aa17bd73342f..98b06baafcc64dd95c2a16965ea261210080c324 100644 (file)
@@ -48,13 +48,13 @@ struct sh_cmt_priv {
        unsigned long next_match_value;
        unsigned long max_match_value;
        unsigned long rate;
-       spinlock_t lock;
+       raw_spinlock_t lock;
        struct clock_event_device ced;
        struct clocksource cs;
        unsigned long total_cycles;
 };
 
-static DEFINE_SPINLOCK(sh_cmt_lock);
+static DEFINE_RAW_SPINLOCK(sh_cmt_lock);
 
 #define CMSTR -1 /* shared register */
 #define CMCSR 0 /* channel register */
@@ -139,7 +139,7 @@ static void sh_cmt_start_stop_ch(struct sh_cmt_priv *p, int start)
        unsigned long flags, value;
 
        /* start stop register shared by multiple timer channels */
-       spin_lock_irqsave(&sh_cmt_lock, flags);
+       raw_spin_lock_irqsave(&sh_cmt_lock, flags);
        value = sh_cmt_read(p, CMSTR);
 
        if (start)
@@ -148,7 +148,7 @@ static void sh_cmt_start_stop_ch(struct sh_cmt_priv *p, int start)
                value &= ~(1 << cfg->timer_bit);
 
        sh_cmt_write(p, CMSTR, value);
-       spin_unlock_irqrestore(&sh_cmt_lock, flags);
+       raw_spin_unlock_irqrestore(&sh_cmt_lock, flags);
 }
 
 static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate)
@@ -328,9 +328,9 @@ static void sh_cmt_set_next(struct sh_cmt_priv *p, unsigned long delta)
 {
        unsigned long flags;
 
-       spin_lock_irqsave(&p->lock, flags);
+       raw_spin_lock_irqsave(&p->lock, flags);
        __sh_cmt_set_next(p, delta);
-       spin_unlock_irqrestore(&p->lock, flags);
+       raw_spin_unlock_irqrestore(&p->lock, flags);
 }
 
 static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id)
@@ -385,7 +385,7 @@ static int sh_cmt_start(struct sh_cmt_priv *p, unsigned long flag)
        int ret = 0;
        unsigned long flags;
 
-       spin_lock_irqsave(&p->lock, flags);
+       raw_spin_lock_irqsave(&p->lock, flags);
 
        if (!(p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE)))
                ret = sh_cmt_enable(p, &p->rate);
@@ -398,7 +398,7 @@ static int sh_cmt_start(struct sh_cmt_priv *p, unsigned long flag)
        if ((flag == FLAG_CLOCKSOURCE) && (!(p->flags & FLAG_CLOCKEVENT)))
                __sh_cmt_set_next(p, p->max_match_value);
  out:
-       spin_unlock_irqrestore(&p->lock, flags);
+       raw_spin_unlock_irqrestore(&p->lock, flags);
 
        return ret;
 }
@@ -408,7 +408,7 @@ static void sh_cmt_stop(struct sh_cmt_priv *p, unsigned long flag)
        unsigned long flags;
        unsigned long f;
 
-       spin_lock_irqsave(&p->lock, flags);
+       raw_spin_lock_irqsave(&p->lock, flags);
 
        f = p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE);
        p->flags &= ~flag;
@@ -420,7 +420,7 @@ static void sh_cmt_stop(struct sh_cmt_priv *p, unsigned long flag)
        if ((flag == FLAG_CLOCKEVENT) && (p->flags & FLAG_CLOCKSOURCE))
                __sh_cmt_set_next(p, p->max_match_value);
 
-       spin_unlock_irqrestore(&p->lock, flags);
+       raw_spin_unlock_irqrestore(&p->lock, flags);
 }
 
 static struct sh_cmt_priv *cs_to_sh_cmt(struct clocksource *cs)
@@ -435,13 +435,13 @@ static cycle_t sh_cmt_clocksource_read(struct clocksource *cs)
        unsigned long value;
        int has_wrapped;
 
-       spin_lock_irqsave(&p->lock, flags);
+       raw_spin_lock_irqsave(&p->lock, flags);
        value = p->total_cycles;
        raw = sh_cmt_get_counter(p, &has_wrapped);
 
        if (unlikely(has_wrapped))
                raw += p->match_value + 1;
-       spin_unlock_irqrestore(&p->lock, flags);
+       raw_spin_unlock_irqrestore(&p->lock, flags);
 
        return value + raw;
 }
@@ -591,7 +591,7 @@ static int sh_cmt_register(struct sh_cmt_priv *p, char *name,
                p->max_match_value = (1 << p->width) - 1;
 
        p->match_value = p->max_match_value;
-       spin_lock_init(&p->lock);
+       raw_spin_lock_init(&p->lock);
 
        if (clockevent_rating)
                sh_cmt_register_clockevent(p, name, clockevent_rating);
index a2172f6904180fd9f30d63cfd3cab2448ab9c856..d9b76ca64a611327c0ea79d979482f638e3c14d3 100644 (file)
@@ -43,7 +43,7 @@ struct sh_mtu2_priv {
        struct clock_event_device ced;
 };
 
-static DEFINE_SPINLOCK(sh_mtu2_lock);
+static DEFINE_RAW_SPINLOCK(sh_mtu2_lock);
 
 #define TSTR -1 /* shared register */
 #define TCR  0 /* channel register */
@@ -107,7 +107,7 @@ static void sh_mtu2_start_stop_ch(struct sh_mtu2_priv *p, int start)
        unsigned long flags, value;
 
        /* start stop register shared by multiple timer channels */
-       spin_lock_irqsave(&sh_mtu2_lock, flags);
+       raw_spin_lock_irqsave(&sh_mtu2_lock, flags);
        value = sh_mtu2_read(p, TSTR);
 
        if (start)
@@ -116,7 +116,7 @@ static void sh_mtu2_start_stop_ch(struct sh_mtu2_priv *p, int start)
                value &= ~(1 << cfg->timer_bit);
 
        sh_mtu2_write(p, TSTR, value);
-       spin_unlock_irqrestore(&sh_mtu2_lock, flags);
+       raw_spin_unlock_irqrestore(&sh_mtu2_lock, flags);
 }
 
 static int sh_mtu2_enable(struct sh_mtu2_priv *p)
index 97f54b634be43234d820beb1661b0493bd79637f..c1b51d49d106e90d8f4927cf174983878ad58b2a 100644 (file)
@@ -45,7 +45,7 @@ struct sh_tmu_priv {
        struct clocksource cs;
 };
 
-static DEFINE_SPINLOCK(sh_tmu_lock);
+static DEFINE_RAW_SPINLOCK(sh_tmu_lock);
 
 #define TSTR -1 /* shared register */
 #define TCOR  0 /* channel register */
@@ -95,7 +95,7 @@ static void sh_tmu_start_stop_ch(struct sh_tmu_priv *p, int start)
        unsigned long flags, value;
 
        /* start stop register shared by multiple timer channels */
-       spin_lock_irqsave(&sh_tmu_lock, flags);
+       raw_spin_lock_irqsave(&sh_tmu_lock, flags);
        value = sh_tmu_read(p, TSTR);
 
        if (start)
@@ -104,7 +104,7 @@ static void sh_tmu_start_stop_ch(struct sh_tmu_priv *p, int start)
                value &= ~(1 << cfg->timer_bit);
 
        sh_tmu_write(p, TSTR, value);
-       spin_unlock_irqrestore(&sh_tmu_lock, flags);
+       raw_spin_unlock_irqrestore(&sh_tmu_lock, flags);
 }
 
 static int sh_tmu_enable(struct sh_tmu_priv *p)
@@ -245,12 +245,7 @@ static void sh_tmu_clock_event_start(struct sh_tmu_priv *p, int periodic)
 
        sh_tmu_enable(p);
 
-       /* TODO: calculate good shift from rate and counter bit width */
-
-       ced->shift = 32;
-       ced->mult = div_sc(p->rate, NSEC_PER_SEC, ced->shift);
-       ced->max_delta_ns = clockevent_delta2ns(0xffffffff, ced);
-       ced->min_delta_ns = 5000;
+       clockevents_config(ced, p->rate);
 
        if (periodic) {
                p->periodic = (p->rate + HZ/2) / HZ;
@@ -323,7 +318,8 @@ static void sh_tmu_register_clockevent(struct sh_tmu_priv *p,
        ced->set_mode = sh_tmu_clock_event_mode;
 
        dev_info(&p->pdev->dev, "used for clock events\n");
-       clockevents_register_device(ced);
+
+       clockevents_config_and_register(ced, 1, 0x300, 0xffffffff);
 
        ret = setup_irq(p->irqaction.irq, &p->irqaction);
        if (ret) {
index 78a666d1e5f589892620edc200b24c4b254ec273..a76b689e553b64744f5e5782482d5bc91e257a55 100644 (file)
@@ -18,3 +18,6 @@ config CPU_IDLE_GOV_MENU
        bool
        depends on CPU_IDLE && NO_HZ
        default y
+
+config ARCH_NEEDS_CPU_IDLE_COUPLED
+       def_bool n
index 5634f88379df9797fac371a3b3b8376df5d631db..38c8f69f30cf1ab93b403c08f192be0c13d6b598 100644 (file)
@@ -3,3 +3,4 @@
 #
 
 obj-y += cpuidle.o driver.o governor.o sysfs.o governors/
+obj-$(CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED) += coupled.o
diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c
new file mode 100644 (file)
index 0000000..2c9bf26
--- /dev/null
@@ -0,0 +1,715 @@
+/*
+ * coupled.c - helper functions to enter the same idle state on multiple cpus
+ *
+ * Copyright (c) 2011 Google, Inc.
+ *
+ * Author: Colin Cross <ccross@android.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/cpu.h>
+#include <linux/cpuidle.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "cpuidle.h"
+
+/**
+ * DOC: Coupled cpuidle states
+ *
+ * On some ARM SMP SoCs (OMAP4460, Tegra 2, and probably more), the
+ * cpus cannot be independently powered down, either due to
+ * sequencing restrictions (on Tegra 2, cpu 0 must be the last to
+ * power down), or due to HW bugs (on OMAP4460, a cpu powering up
+ * will corrupt the gic state unless the other cpu runs a work
+ * around).  Each cpu has a power state that it can enter without
+ * coordinating with the other cpu (usually Wait For Interrupt, or
+ * WFI), and one or more "coupled" power states that affect blocks
+ * shared between the cpus (L2 cache, interrupt controller, and
+ * sometimes the whole SoC).  Entering a coupled power state must
+ * be tightly controlled on both cpus.
+ *
+ * This file implements a solution, where each cpu will wait in the
+ * WFI state until all cpus are ready to enter a coupled state, at
+ * which point the coupled state function will be called on all
+ * cpus at approximately the same time.
+ *
+ * Once all cpus are ready to enter idle, they are woken by an smp
+ * cross call.  At this point, there is a chance that one of the
+ * cpus will find work to do, and choose not to enter idle.  A
+ * final pass is needed to guarantee that all cpus will call the
+ * power state enter function at the same time.  During this pass,
+ * each cpu will increment the ready counter, and continue once the
+ * ready counter matches the number of online coupled cpus.  If any
+ * cpu exits idle, the other cpus will decrement their counter and
+ * retry.
+ *
+ * requested_state stores the deepest coupled idle state each cpu
+ * is ready for.  It is assumed that the states are indexed from
+ * shallowest (highest power, lowest exit latency) to deepest
+ * (lowest power, highest exit latency).  The requested_state
+ * variable is not locked.  It is only written from the cpu that
+ * it stores (or by the on/offlining cpu if that cpu is offline),
+ * and only read after all the cpus are ready for the coupled idle
+ * state are are no longer updating it.
+ *
+ * Three atomic counters are used.  alive_count tracks the number
+ * of cpus in the coupled set that are currently or soon will be
+ * online.  waiting_count tracks the number of cpus that are in
+ * the waiting loop, in the ready loop, or in the coupled idle state.
+ * ready_count tracks the number of cpus that are in the ready loop
+ * or in the coupled idle state.
+ *
+ * To use coupled cpuidle states, a cpuidle driver must:
+ *
+ *    Set struct cpuidle_device.coupled_cpus to the mask of all
+ *    coupled cpus, usually the same as cpu_possible_mask if all cpus
+ *    are part of the same cluster.  The coupled_cpus mask must be
+ *    set in the struct cpuidle_device for each cpu.
+ *
+ *    Set struct cpuidle_device.safe_state to a state that is not a
+ *    coupled state.  This is usually WFI.
+ *
+ *    Set CPUIDLE_FLAG_COUPLED in struct cpuidle_state.flags for each
+ *    state that affects multiple cpus.
+ *
+ *    Provide a struct cpuidle_state.enter function for each state
+ *    that affects multiple cpus.  This function is guaranteed to be
+ *    called on all cpus at approximately the same time.  The driver
+ *    should ensure that the cpus all abort together if any cpu tries
+ *    to abort once the function is called.  The function should return
+ *    with interrupts still disabled.
+ */
+
+/**
+ * struct cpuidle_coupled - data for set of cpus that share a coupled idle state
+ * @coupled_cpus: mask of cpus that are part of the coupled set
+ * @requested_state: array of requested states for cpus in the coupled set
+ * @ready_waiting_counts: combined count of cpus  in ready or waiting loops
+ * @online_count: count of cpus that are online
+ * @refcnt: reference count of cpuidle devices that are using this struct
+ * @prevent: flag to prevent coupled idle while a cpu is hotplugging
+ */
+struct cpuidle_coupled {
+       cpumask_t coupled_cpus;
+       int requested_state[NR_CPUS];
+       atomic_t ready_waiting_counts;
+       int online_count;
+       int refcnt;
+       int prevent;
+};
+
+#define WAITING_BITS 16
+#define MAX_WAITING_CPUS (1 << WAITING_BITS)
+#define WAITING_MASK (MAX_WAITING_CPUS - 1)
+#define READY_MASK (~WAITING_MASK)
+
+#define CPUIDLE_COUPLED_NOT_IDLE       (-1)
+
+static DEFINE_MUTEX(cpuidle_coupled_lock);
+static DEFINE_PER_CPU(struct call_single_data, cpuidle_coupled_poke_cb);
+
+/*
+ * The cpuidle_coupled_poked_mask mask is used to avoid calling
+ * __smp_call_function_single with the per cpu call_single_data struct already
+ * in use.  This prevents a deadlock where two cpus are waiting for each others
+ * call_single_data struct to be available
+ */
+static cpumask_t cpuidle_coupled_poked_mask;
+
+/**
+ * cpuidle_coupled_parallel_barrier - synchronize all online coupled cpus
+ * @dev: cpuidle_device of the calling cpu
+ * @a:   atomic variable to hold the barrier
+ *
+ * No caller to this function will return from this function until all online
+ * cpus in the same coupled group have called this function.  Once any caller
+ * has returned from this function, the barrier is immediately available for
+ * reuse.
+ *
+ * The atomic variable a must be initialized to 0 before any cpu calls
+ * this function, will be reset to 0 before any cpu returns from this function.
+ *
+ * Must only be called from within a coupled idle state handler
+ * (state.enter when state.flags has CPUIDLE_FLAG_COUPLED set).
+ *
+ * Provides full smp barrier semantics before and after calling.
+ */
+void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a)
+{
+       int n = dev->coupled->online_count;
+
+       smp_mb__before_atomic_inc();
+       atomic_inc(a);
+
+       while (atomic_read(a) < n)
+               cpu_relax();
+
+       if (atomic_inc_return(a) == n * 2) {
+               atomic_set(a, 0);
+               return;
+       }
+
+       while (atomic_read(a) > n)
+               cpu_relax();
+}
+
+/**
+ * cpuidle_state_is_coupled - check if a state is part of a coupled set
+ * @dev: struct cpuidle_device for the current cpu
+ * @drv: struct cpuidle_driver for the platform
+ * @state: index of the target state in drv->states
+ *
+ * Returns true if the target state is coupled with cpus besides this one
+ */
+bool cpuidle_state_is_coupled(struct cpuidle_device *dev,
+       struct cpuidle_driver *drv, int state)
+{
+       return drv->states[state].flags & CPUIDLE_FLAG_COUPLED;
+}
+
+/**
+ * cpuidle_coupled_set_ready - mark a cpu as ready
+ * @coupled: the struct coupled that contains the current cpu
+ */
+static inline void cpuidle_coupled_set_ready(struct cpuidle_coupled *coupled)
+{
+       atomic_add(MAX_WAITING_CPUS, &coupled->ready_waiting_counts);
+}
+
+/**
+ * cpuidle_coupled_set_not_ready - mark a cpu as not ready
+ * @coupled: the struct coupled that contains the current cpu
+ *
+ * Decrements the ready counter, unless the ready (and thus the waiting) counter
+ * is equal to the number of online cpus.  Prevents a race where one cpu
+ * decrements the waiting counter and then re-increments it just before another
+ * cpu has decremented its ready counter, leading to the ready counter going
+ * down from the number of online cpus without going through the coupled idle
+ * state.
+ *
+ * Returns 0 if the counter was decremented successfully, -EINVAL if the ready
+ * counter was equal to the number of online cpus.
+ */
+static
+inline int cpuidle_coupled_set_not_ready(struct cpuidle_coupled *coupled)
+{
+       int all;
+       int ret;
+
+       all = coupled->online_count || (coupled->online_count << WAITING_BITS);
+       ret = atomic_add_unless(&coupled->ready_waiting_counts,
+               -MAX_WAITING_CPUS, all);
+
+       return ret ? 0 : -EINVAL;
+}
+
+/**
+ * cpuidle_coupled_no_cpus_ready - check if no cpus in a coupled set are ready
+ * @coupled: the struct coupled that contains the current cpu
+ *
+ * Returns true if all of the cpus in a coupled set are out of the ready loop.
+ */
+static inline int cpuidle_coupled_no_cpus_ready(struct cpuidle_coupled *coupled)
+{
+       int r = atomic_read(&coupled->ready_waiting_counts) >> WAITING_BITS;
+       return r == 0;
+}
+
+/**
+ * cpuidle_coupled_cpus_ready - check if all cpus in a coupled set are ready
+ * @coupled: the struct coupled that contains the current cpu
+ *
+ * Returns true if all cpus coupled to this target state are in the ready loop
+ */
+static inline bool cpuidle_coupled_cpus_ready(struct cpuidle_coupled *coupled)
+{
+       int r = atomic_read(&coupled->ready_waiting_counts) >> WAITING_BITS;
+       return r == coupled->online_count;
+}
+
+/**
+ * cpuidle_coupled_cpus_waiting - check if all cpus in a coupled set are waiting
+ * @coupled: the struct coupled that contains the current cpu
+ *
+ * Returns true if all cpus coupled to this target state are in the wait loop
+ */
+static inline bool cpuidle_coupled_cpus_waiting(struct cpuidle_coupled *coupled)
+{
+       int w = atomic_read(&coupled->ready_waiting_counts) & WAITING_MASK;
+       return w == coupled->online_count;
+}
+
+/**
+ * cpuidle_coupled_no_cpus_waiting - check if no cpus in coupled set are waiting
+ * @coupled: the struct coupled that contains the current cpu
+ *
+ * Returns true if all of the cpus in a coupled set are out of the waiting loop.
+ */
+static inline int cpuidle_coupled_no_cpus_waiting(struct cpuidle_coupled *coupled)
+{
+       int w = atomic_read(&coupled->ready_waiting_counts) & WAITING_MASK;
+       return w == 0;
+}
+
+/**
+ * cpuidle_coupled_get_state - determine the deepest idle state
+ * @dev: struct cpuidle_device for this cpu
+ * @coupled: the struct coupled that contains the current cpu
+ *
+ * Returns the deepest idle state that all coupled cpus can enter
+ */
+static inline int cpuidle_coupled_get_state(struct cpuidle_device *dev,
+               struct cpuidle_coupled *coupled)
+{
+       int i;
+       int state = INT_MAX;
+
+       /*
+        * Read barrier ensures that read of requested_state is ordered after
+        * reads of ready_count.  Matches the write barriers
+        * cpuidle_set_state_waiting.
+        */
+       smp_rmb();
+
+       for_each_cpu_mask(i, coupled->coupled_cpus)
+               if (cpu_online(i) && coupled->requested_state[i] < state)
+                       state = coupled->requested_state[i];
+
+       return state;
+}
+
+static void cpuidle_coupled_poked(void *info)
+{
+       int cpu = (unsigned long)info;
+       cpumask_clear_cpu(cpu, &cpuidle_coupled_poked_mask);
+}
+
+/**
+ * cpuidle_coupled_poke - wake up a cpu that may be waiting
+ * @cpu: target cpu
+ *
+ * Ensures that the target cpu exits it's waiting idle state (if it is in it)
+ * and will see updates to waiting_count before it re-enters it's waiting idle
+ * state.
+ *
+ * If cpuidle_coupled_poked_mask is already set for the target cpu, that cpu
+ * either has or will soon have a pending IPI that will wake it out of idle,
+ * or it is currently processing the IPI and is not in idle.
+ */
+static void cpuidle_coupled_poke(int cpu)
+{
+       struct call_single_data *csd = &per_cpu(cpuidle_coupled_poke_cb, cpu);
+
+       if (!cpumask_test_and_set_cpu(cpu, &cpuidle_coupled_poked_mask))
+               __smp_call_function_single(cpu, csd, 0);
+}
+
+/**
+ * cpuidle_coupled_poke_others - wake up all other cpus that may be waiting
+ * @dev: struct cpuidle_device for this cpu
+ * @coupled: the struct coupled that contains the current cpu
+ *
+ * Calls cpuidle_coupled_poke on all other online cpus.
+ */
+static void cpuidle_coupled_poke_others(int this_cpu,
+               struct cpuidle_coupled *coupled)
+{
+       int cpu;
+
+       for_each_cpu_mask(cpu, coupled->coupled_cpus)
+               if (cpu != this_cpu && cpu_online(cpu))
+                       cpuidle_coupled_poke(cpu);
+}
+
+/**
+ * cpuidle_coupled_set_waiting - mark this cpu as in the wait loop
+ * @dev: struct cpuidle_device for this cpu
+ * @coupled: the struct coupled that contains the current cpu
+ * @next_state: the index in drv->states of the requested state for this cpu
+ *
+ * Updates the requested idle state for the specified cpuidle device,
+ * poking all coupled cpus out of idle if necessary to let them see the new
+ * state.
+ */
+static void cpuidle_coupled_set_waiting(int cpu,
+               struct cpuidle_coupled *coupled, int next_state)
+{
+       int w;
+
+       coupled->requested_state[cpu] = next_state;
+
+       /*
+        * If this is the last cpu to enter the waiting state, poke
+        * all the other cpus out of their waiting state so they can
+        * enter a deeper state.  This can race with one of the cpus
+        * exiting the waiting state due to an interrupt and
+        * decrementing waiting_count, see comment below.
+        *
+        * The atomic_inc_return provides a write barrier to order the write
+        * to requested_state with the later write that increments ready_count.
+        */
+       w = atomic_inc_return(&coupled->ready_waiting_counts) & WAITING_MASK;
+       if (w == coupled->online_count)
+               cpuidle_coupled_poke_others(cpu, coupled);
+}
+
+/**
+ * cpuidle_coupled_set_not_waiting - mark this cpu as leaving the wait loop
+ * @dev: struct cpuidle_device for this cpu
+ * @coupled: the struct coupled that contains the current cpu
+ *
+ * Removes the requested idle state for the specified cpuidle device.
+ */
+static void cpuidle_coupled_set_not_waiting(int cpu,
+               struct cpuidle_coupled *coupled)
+{
+       /*
+        * Decrementing waiting count can race with incrementing it in
+        * cpuidle_coupled_set_waiting, but that's OK.  Worst case, some
+        * cpus will increment ready_count and then spin until they
+        * notice that this cpu has cleared it's requested_state.
+        */
+       atomic_dec(&coupled->ready_waiting_counts);
+
+       coupled->requested_state[cpu] = CPUIDLE_COUPLED_NOT_IDLE;
+}
+
+/**
+ * cpuidle_coupled_set_done - mark this cpu as leaving the ready loop
+ * @cpu: the current cpu
+ * @coupled: the struct coupled that contains the current cpu
+ *
+ * Marks this cpu as no longer in the ready and waiting loops.  Decrements
+ * the waiting count first to prevent another cpu looping back in and seeing
+ * this cpu as waiting just before it exits idle.
+ */
+static void cpuidle_coupled_set_done(int cpu, struct cpuidle_coupled *coupled)
+{
+       cpuidle_coupled_set_not_waiting(cpu, coupled);
+       atomic_sub(MAX_WAITING_CPUS, &coupled->ready_waiting_counts);
+}
+
+/**
+ * cpuidle_coupled_clear_pokes - spin until the poke interrupt is processed
+ * @cpu - this cpu
+ *
+ * Turns on interrupts and spins until any outstanding poke interrupts have
+ * been processed and the poke bit has been cleared.
+ *
+ * Other interrupts may also be processed while interrupts are enabled, so
+ * need_resched() must be tested after turning interrupts off again to make sure
+ * the interrupt didn't schedule work that should take the cpu out of idle.
+ *
+ * Returns 0 if need_resched was false, -EINTR if need_resched was true.
+ */
+static int cpuidle_coupled_clear_pokes(int cpu)
+{
+       local_irq_enable();
+       while (cpumask_test_cpu(cpu, &cpuidle_coupled_poked_mask))
+               cpu_relax();
+       local_irq_disable();
+
+       return need_resched() ? -EINTR : 0;
+}
+
+/**
+ * cpuidle_enter_state_coupled - attempt to enter a state with coupled cpus
+ * @dev: struct cpuidle_device for the current cpu
+ * @drv: struct cpuidle_driver for the platform
+ * @next_state: index of the requested state in drv->states
+ *
+ * Coordinate with coupled cpus to enter the target state.  This is a two
+ * stage process.  In the first stage, the cpus are operating independently,
+ * and may call into cpuidle_enter_state_coupled at completely different times.
+ * To save as much power as possible, the first cpus to call this function will
+ * go to an intermediate state (the cpuidle_device's safe state), and wait for
+ * all the other cpus to call this function.  Once all coupled cpus are idle,
+ * the second stage will start.  Each coupled cpu will spin until all cpus have
+ * guaranteed that they will call the target_state.
+ *
+ * This function must be called with interrupts disabled.  It may enable
+ * interrupts while preparing for idle, and it will always return with
+ * interrupts enabled.
+ */
+int cpuidle_enter_state_coupled(struct cpuidle_device *dev,
+               struct cpuidle_driver *drv, int next_state)
+{
+       int entered_state = -1;
+       struct cpuidle_coupled *coupled = dev->coupled;
+
+       if (!coupled)
+               return -EINVAL;
+
+       while (coupled->prevent) {
+               if (cpuidle_coupled_clear_pokes(dev->cpu)) {
+                       local_irq_enable();
+                       return entered_state;
+               }
+               entered_state = cpuidle_enter_state(dev, drv,
+                       dev->safe_state_index);
+       }
+
+       /* Read barrier ensures online_count is read after prevent is cleared */
+       smp_rmb();
+
+       cpuidle_coupled_set_waiting(dev->cpu, coupled, next_state);
+
+retry:
+       /*
+        * Wait for all coupled cpus to be idle, using the deepest state
+        * allowed for a single cpu.
+        */
+       while (!cpuidle_coupled_cpus_waiting(coupled)) {
+               if (cpuidle_coupled_clear_pokes(dev->cpu)) {
+                       cpuidle_coupled_set_not_waiting(dev->cpu, coupled);
+                       goto out;
+               }
+
+               if (coupled->prevent) {
+                       cpuidle_coupled_set_not_waiting(dev->cpu, coupled);
+                       goto out;
+               }
+
+               entered_state = cpuidle_enter_state(dev, drv,
+                       dev->safe_state_index);
+       }
+
+       if (cpuidle_coupled_clear_pokes(dev->cpu)) {
+               cpuidle_coupled_set_not_waiting(dev->cpu, coupled);
+               goto out;
+       }
+
+       /*
+        * All coupled cpus are probably idle.  There is a small chance that
+        * one of the other cpus just became active.  Increment the ready count,
+        * and spin until all coupled cpus have incremented the counter. Once a
+        * cpu has incremented the ready counter, it cannot abort idle and must
+        * spin until either all cpus have incremented the ready counter, or
+        * another cpu leaves idle and decrements the waiting counter.
+        */
+
+       cpuidle_coupled_set_ready(coupled);
+       while (!cpuidle_coupled_cpus_ready(coupled)) {
+               /* Check if any other cpus bailed out of idle. */
+               if (!cpuidle_coupled_cpus_waiting(coupled))
+                       if (!cpuidle_coupled_set_not_ready(coupled))
+                               goto retry;
+
+               cpu_relax();
+       }
+
+       /* all cpus have acked the coupled state */
+       next_state = cpuidle_coupled_get_state(dev, coupled);
+
+       entered_state = cpuidle_enter_state(dev, drv, next_state);
+
+       cpuidle_coupled_set_done(dev->cpu, coupled);
+
+out:
+       /*
+        * Normal cpuidle states are expected to return with irqs enabled.
+        * That leads to an inefficiency where a cpu receiving an interrupt
+        * that brings it out of idle will process that interrupt before
+        * exiting the idle enter function and decrementing ready_count.  All
+        * other cpus will need to spin waiting for the cpu that is processing
+        * the interrupt.  If the driver returns with interrupts disabled,
+        * all other cpus will loop back into the safe idle state instead of
+        * spinning, saving power.
+        *
+        * Calling local_irq_enable here allows coupled states to return with
+        * interrupts disabled, but won't cause problems for drivers that
+        * exit with interrupts enabled.
+        */
+       local_irq_enable();
+
+       /*
+        * Wait until all coupled cpus have exited idle.  There is no risk that
+        * a cpu exits and re-enters the ready state because this cpu has
+        * already decremented its waiting_count.
+        */
+       while (!cpuidle_coupled_no_cpus_ready(coupled))
+               cpu_relax();
+
+       return entered_state;
+}
+
+static void cpuidle_coupled_update_online_cpus(struct cpuidle_coupled *coupled)
+{
+       cpumask_t cpus;
+       cpumask_and(&cpus, cpu_online_mask, &coupled->coupled_cpus);
+       coupled->online_count = cpumask_weight(&cpus);
+}
+
+/**
+ * cpuidle_coupled_register_device - register a coupled cpuidle device
+ * @dev: struct cpuidle_device for the current cpu
+ *
+ * Called from cpuidle_register_device to handle coupled idle init.  Finds the
+ * cpuidle_coupled struct for this set of coupled cpus, or creates one if none
+ * exists yet.
+ */
+int cpuidle_coupled_register_device(struct cpuidle_device *dev)
+{
+       int cpu;
+       struct cpuidle_device *other_dev;
+       struct call_single_data *csd;
+       struct cpuidle_coupled *coupled;
+
+       if (cpumask_empty(&dev->coupled_cpus))
+               return 0;
+
+       for_each_cpu_mask(cpu, dev->coupled_cpus) {
+               other_dev = per_cpu(cpuidle_devices, cpu);
+               if (other_dev && other_dev->coupled) {
+                       coupled = other_dev->coupled;
+                       goto have_coupled;
+               }
+       }
+
+       /* No existing coupled info found, create a new one */
+       coupled = kzalloc(sizeof(struct cpuidle_coupled), GFP_KERNEL);
+       if (!coupled)
+               return -ENOMEM;
+
+       coupled->coupled_cpus = dev->coupled_cpus;
+
+have_coupled:
+       dev->coupled = coupled;
+       if (WARN_ON(!cpumask_equal(&dev->coupled_cpus, &coupled->coupled_cpus)))
+               coupled->prevent++;
+
+       cpuidle_coupled_update_online_cpus(coupled);
+
+       coupled->refcnt++;
+
+       csd = &per_cpu(cpuidle_coupled_poke_cb, dev->cpu);
+       csd->func = cpuidle_coupled_poked;
+       csd->info = (void *)(unsigned long)dev->cpu;
+
+       return 0;
+}
+
+/**
+ * cpuidle_coupled_unregister_device - unregister a coupled cpuidle device
+ * @dev: struct cpuidle_device for the current cpu
+ *
+ * Called from cpuidle_unregister_device to tear down coupled idle.  Removes the
+ * cpu from the coupled idle set, and frees the cpuidle_coupled_info struct if
+ * this was the last cpu in the set.
+ */
+void cpuidle_coupled_unregister_device(struct cpuidle_device *dev)
+{
+       struct cpuidle_coupled *coupled = dev->coupled;
+
+       if (cpumask_empty(&dev->coupled_cpus))
+               return;
+
+       if (--coupled->refcnt)
+               kfree(coupled);
+       dev->coupled = NULL;
+}
+
+/**
+ * cpuidle_coupled_prevent_idle - prevent cpus from entering a coupled state
+ * @coupled: the struct coupled that contains the cpu that is changing state
+ *
+ * Disables coupled cpuidle on a coupled set of cpus.  Used to ensure that
+ * cpu_online_mask doesn't change while cpus are coordinating coupled idle.
+ */
+static void cpuidle_coupled_prevent_idle(struct cpuidle_coupled *coupled)
+{
+       int cpu = get_cpu();
+
+       /* Force all cpus out of the waiting loop. */
+       coupled->prevent++;
+       cpuidle_coupled_poke_others(cpu, coupled);
+       put_cpu();
+       while (!cpuidle_coupled_no_cpus_waiting(coupled))
+               cpu_relax();
+}
+
+/**
+ * cpuidle_coupled_allow_idle - allows cpus to enter a coupled state
+ * @coupled: the struct coupled that contains the cpu that is changing state
+ *
+ * Enables coupled cpuidle on a coupled set of cpus.  Used to ensure that
+ * cpu_online_mask doesn't change while cpus are coordinating coupled idle.
+ */
+static void cpuidle_coupled_allow_idle(struct cpuidle_coupled *coupled)
+{
+       int cpu = get_cpu();
+
+       /*
+        * Write barrier ensures readers see the new online_count when they
+        * see prevent == 0.
+        */
+       smp_wmb();
+       coupled->prevent--;
+       /* Force cpus out of the prevent loop. */
+       cpuidle_coupled_poke_others(cpu, coupled);
+       put_cpu();
+}
+
+/**
+ * cpuidle_coupled_cpu_notify - notifier called during hotplug transitions
+ * @nb: notifier block
+ * @action: hotplug transition
+ * @hcpu: target cpu number
+ *
+ * Called when a cpu is brought on or offline using hotplug.  Updates the
+ * coupled cpu set appropriately
+ */
+static int cpuidle_coupled_cpu_notify(struct notifier_block *nb,
+               unsigned long action, void *hcpu)
+{
+       int cpu = (unsigned long)hcpu;
+       struct cpuidle_device *dev;
+
+       mutex_lock(&cpuidle_lock);
+
+       dev = per_cpu(cpuidle_devices, cpu);
+       if (!dev->coupled)
+               goto out;
+
+       switch (action & ~CPU_TASKS_FROZEN) {
+       case CPU_UP_PREPARE:
+       case CPU_DOWN_PREPARE:
+               cpuidle_coupled_prevent_idle(dev->coupled);
+               break;
+       case CPU_ONLINE:
+       case CPU_DEAD:
+               cpuidle_coupled_update_online_cpus(dev->coupled);
+               /* Fall through */
+       case CPU_UP_CANCELED:
+       case CPU_DOWN_FAILED:
+               cpuidle_coupled_allow_idle(dev->coupled);
+               break;
+       }
+
+out:
+       mutex_unlock(&cpuidle_lock);
+       return NOTIFY_OK;
+}
+
+static struct notifier_block cpuidle_coupled_cpu_notifier = {
+       .notifier_call = cpuidle_coupled_cpu_notify,
+};
+
+static int __init cpuidle_coupled_init(void)
+{
+       return register_cpu_notifier(&cpuidle_coupled_cpu_notifier);
+}
+core_initcall(cpuidle_coupled_init);
index d90519cec8807da1952825da4e9b3dd9f540bad2..bb4e827434ce4b2af71ae30b46c84918a3c4375b 100644 (file)
@@ -91,6 +91,34 @@ int cpuidle_play_dead(void)
        return -ENODEV;
 }
 
+/**
+ * cpuidle_enter_state - enter the state and update stats
+ * @dev: cpuidle device for this cpu
+ * @drv: cpuidle driver for this cpu
+ * @next_state: index into drv->states of the state to enter
+ */
+int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
+               int next_state)
+{
+       int entered_state;
+
+       entered_state = cpuidle_enter_ops(dev, drv, next_state);
+
+       if (entered_state >= 0) {
+               /* Update cpuidle counters */
+               /* This can be moved to within driver enter routine
+                * but that results in multiple copies of same code.
+                */
+               dev->states_usage[entered_state].time +=
+                               (unsigned long long)dev->last_residency;
+               dev->states_usage[entered_state].usage++;
+       } else {
+               dev->last_residency = 0;
+       }
+
+       return entered_state;
+}
+
 /**
  * cpuidle_idle_call - the main idle loop
  *
@@ -113,15 +141,6 @@ int cpuidle_idle_call(void)
        if (!dev || !dev->enabled)
                return -EBUSY;
 
-#if 0
-       /* shows regressions, re-enable for 2.6.29 */
-       /*
-        * run any timers that can be run now, at this point
-        * before calculating the idle duration etc.
-        */
-       hrtimer_peek_ahead_timers();
-#endif
-
        /* ask the governor for the next state */
        next_state = cpuidle_curr_governor->select(drv, dev);
        if (need_resched()) {
@@ -132,23 +151,15 @@ int cpuidle_idle_call(void)
        trace_power_start_rcuidle(POWER_CSTATE, next_state, dev->cpu);
        trace_cpu_idle_rcuidle(next_state, dev->cpu);
 
-       entered_state = cpuidle_enter_ops(dev, drv, next_state);
+       if (cpuidle_state_is_coupled(dev, drv, next_state))
+               entered_state = cpuidle_enter_state_coupled(dev, drv,
+                                                           next_state);
+       else
+               entered_state = cpuidle_enter_state(dev, drv, next_state);
 
        trace_power_end_rcuidle(dev->cpu);
        trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
 
-       if (entered_state >= 0) {
-               /* Update cpuidle counters */
-               /* This can be moved to within driver enter routine
-                * but that results in multiple copies of same code.
-                */
-               dev->states_usage[entered_state].time +=
-                               (unsigned long long)dev->last_residency;
-               dev->states_usage[entered_state].usage++;
-       } else {
-               dev->last_residency = 0;
-       }
-
        /* give the governor an opportunity to reflect on the outcome */
        if (cpuidle_curr_governor->reflect)
                cpuidle_curr_governor->reflect(dev, entered_state);
@@ -283,6 +294,9 @@ int cpuidle_enable_device(struct cpuidle_device *dev)
        int ret, i;
        struct cpuidle_driver *drv = cpuidle_get_driver();
 
+       if (!dev)
+               return -EINVAL;
+
        if (dev->enabled)
                return 0;
        if (!drv || !cpuidle_curr_governor)
@@ -367,8 +381,6 @@ static int __cpuidle_register_device(struct cpuidle_device *dev)
        struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu);
        struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver();
 
-       if (!dev)
-               return -EINVAL;
        if (!try_module_get(cpuidle_driver->owner))
                return -EINVAL;
 
@@ -376,13 +388,25 @@ static int __cpuidle_register_device(struct cpuidle_device *dev)
 
        per_cpu(cpuidle_devices, dev->cpu) = dev;
        list_add(&dev->device_list, &cpuidle_detected_devices);
-       if ((ret = cpuidle_add_sysfs(cpu_dev))) {
-               module_put(cpuidle_driver->owner);
-               return ret;
-       }
+       ret = cpuidle_add_sysfs(cpu_dev);
+       if (ret)
+               goto err_sysfs;
+
+       ret = cpuidle_coupled_register_device(dev);
+       if (ret)
+               goto err_coupled;
 
        dev->registered = 1;
        return 0;
+
+err_coupled:
+       cpuidle_remove_sysfs(cpu_dev);
+       wait_for_completion(&dev->kobj_unregister);
+err_sysfs:
+       list_del(&dev->device_list);
+       per_cpu(cpuidle_devices, dev->cpu) = NULL;
+       module_put(cpuidle_driver->owner);
+       return ret;
 }
 
 /**
@@ -393,6 +417,9 @@ int cpuidle_register_device(struct cpuidle_device *dev)
 {
        int ret;
 
+       if (!dev)
+               return -EINVAL;
+
        mutex_lock(&cpuidle_lock);
 
        if ((ret = __cpuidle_register_device(dev))) {
@@ -432,6 +459,8 @@ void cpuidle_unregister_device(struct cpuidle_device *dev)
        wait_for_completion(&dev->kobj_unregister);
        per_cpu(cpuidle_devices, dev->cpu) = NULL;
 
+       cpuidle_coupled_unregister_device(dev);
+
        cpuidle_resume_and_unlock();
 
        module_put(cpuidle_driver->owner);
index 7db186685c275a78176217e6a97209eb9ae47949..76e7f696ad8c62862e5ae1ca555955f755f5c33e 100644 (file)
@@ -14,6 +14,8 @@ extern struct list_head cpuidle_detected_devices;
 extern struct mutex cpuidle_lock;
 extern spinlock_t cpuidle_driver_lock;
 extern int cpuidle_disabled(void);
+extern int cpuidle_enter_state(struct cpuidle_device *dev,
+               struct cpuidle_driver *drv, int next_state);
 
 /* idle loop */
 extern void cpuidle_install_idle_handler(void);
@@ -30,4 +32,34 @@ extern void cpuidle_remove_state_sysfs(struct cpuidle_device *device);
 extern int cpuidle_add_sysfs(struct device *dev);
 extern void cpuidle_remove_sysfs(struct device *dev);
 
+#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
+bool cpuidle_state_is_coupled(struct cpuidle_device *dev,
+               struct cpuidle_driver *drv, int state);
+int cpuidle_enter_state_coupled(struct cpuidle_device *dev,
+               struct cpuidle_driver *drv, int next_state);
+int cpuidle_coupled_register_device(struct cpuidle_device *dev);
+void cpuidle_coupled_unregister_device(struct cpuidle_device *dev);
+#else
+static inline bool cpuidle_state_is_coupled(struct cpuidle_device *dev,
+               struct cpuidle_driver *drv, int state)
+{
+       return false;
+}
+
+static inline int cpuidle_enter_state_coupled(struct cpuidle_device *dev,
+               struct cpuidle_driver *drv, int next_state)
+{
+       return -1;
+}
+
+static inline int cpuidle_coupled_register_device(struct cpuidle_device *dev)
+{
+       return 0;
+}
+
+static inline void cpuidle_coupled_unregister_device(struct cpuidle_device *dev)
+{
+}
+#endif
+
 #endif /* __DRIVER_CPUIDLE_H */
index e23dc82d43acbb726c0825e3f259c53803cd3c58..7212961575770df8c17d00d666bd11a79553a0b2 100644 (file)
@@ -1626,4 +1626,4 @@ module_exit(dw_exit);
 MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver");
 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
-MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
+MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
index fb4f4990f5ebf9f6c8e1f97c9704f8b03642bd18..1dc2a4ad0026d21c43720941a2670cf0518c60e0 100644 (file)
@@ -815,8 +815,6 @@ static int sdma_request_channel(struct sdma_channel *sdmac)
 
        init_completion(&sdmac->done);
 
-       sdmac->buf_tail = 0;
-
        return 0;
 out:
 
@@ -927,6 +925,8 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
 
        sdmac->flags = 0;
 
+       sdmac->buf_tail = 0;
+
        dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n",
                        sg_len, channel);
 
@@ -1027,6 +1027,8 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
 
        sdmac->status = DMA_IN_PROGRESS;
 
+       sdmac->buf_tail = 0;
+
        sdmac->flags |= IMX_DMA_SG_LOOP;
        sdmac->direction = direction;
        ret = sdma_load_context(sdmac);
index cbcc28e79be6331570af5ccd3132e12efe67aebc..e4feba6b03c00e6f2ae412a9a1104eef21fb94f4 100644 (file)
@@ -392,6 +392,8 @@ struct pl330_req {
        struct pl330_reqcfg *cfg;
        /* Pointer to first xfer in the request. */
        struct pl330_xfer *x;
+       /* Hook to attach to DMAC's list of reqs with due callback */
+       struct list_head rqd;
 };
 
 /*
@@ -461,8 +463,6 @@ struct _pl330_req {
        /* Number of bytes taken to setup MC for the req */
        u32 mc_len;
        struct pl330_req *r;
-       /* Hook to attach to DMAC's list of reqs with due callback */
-       struct list_head rqd;
 };
 
 /* ToBeDone for tasklet */
@@ -1683,7 +1683,7 @@ static void pl330_dotask(unsigned long data)
 /* Returns 1 if state was updated, 0 otherwise */
 static int pl330_update(const struct pl330_info *pi)
 {
-       struct _pl330_req *rqdone;
+       struct pl330_req *rqdone, *tmp;
        struct pl330_dmac *pl330;
        unsigned long flags;
        void __iomem *regs;
@@ -1750,7 +1750,10 @@ static int pl330_update(const struct pl330_info *pi)
                        if (active == -1) /* Aborted */
                                continue;
 
-                       rqdone = &thrd->req[active];
+                       /* Detach the req */
+                       rqdone = thrd->req[active].r;
+                       thrd->req[active].r = NULL;
+
                        mark_free(thrd, active);
 
                        /* Get going again ASAP */
@@ -1762,20 +1765,11 @@ static int pl330_update(const struct pl330_info *pi)
        }
 
        /* Now that we are in no hurry, do the callbacks */
-       while (!list_empty(&pl330->req_done)) {
-               struct pl330_req *r;
-
-               rqdone = container_of(pl330->req_done.next,
-                                       struct _pl330_req, rqd);
-
-               list_del_init(&rqdone->rqd);
-
-               /* Detach the req */
-               r = rqdone->r;
-               rqdone->r = NULL;
+       list_for_each_entry_safe(rqdone, tmp, &pl330->req_done, rqd) {
+               list_del(&rqdone->rqd);
 
                spin_unlock_irqrestore(&pl330->lock, flags);
-               _callback(r, PL330_ERR_NONE);
+               _callback(rqdone, PL330_ERR_NONE);
                spin_lock_irqsave(&pl330->lock, flags);
        }
 
@@ -2321,7 +2315,7 @@ static void pl330_tasklet(unsigned long data)
        /* Pick up ripe tomatoes */
        list_for_each_entry_safe(desc, _dt, &pch->work_list, node)
                if (desc->status == DONE) {
-                       if (pch->cyclic)
+                       if (!pch->cyclic)
                                dma_cookie_complete(&desc->txd);
                        list_move_tail(&desc->node, &list);
                }
@@ -2539,7 +2533,7 @@ static inline void _init_desc(struct dma_pl330_desc *desc)
 }
 
 /* Returns the number of descriptors added to the DMAC pool */
-int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count)
+static int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count)
 {
        struct dma_pl330_desc *desc;
        unsigned long flags;
index 10f375032e9686f7c719c857ceca5dd3f9ef81ed..de5ba86e8b8998df830a0e62647378e4667489f4 100644 (file)
@@ -164,7 +164,7 @@ void *edac_align_ptr(void **p, unsigned size, int n_elems)
        else
                return (char *)ptr;
 
-       r = size % align;
+       r = (unsigned long)p % align;
 
        if (r == 0)
                return (char *)ptr;
index d27778f65a5dc3d8f5731cc4e6c1cff7cc6f4a1c..a499c7ed820ae62d8fc489478ef8522e756d833b 100644 (file)
@@ -1814,12 +1814,6 @@ static int i7core_mce_check_error(struct notifier_block *nb, unsigned long val,
        if (mce->bank != 8)
                return NOTIFY_DONE;
 
-#ifdef CONFIG_SMP
-       /* Only handle if it is the right mc controller */
-       if (mce->socketid != pvt->i7core_dev->socket)
-               return NOTIFY_DONE;
-#endif
-
        smp_rmb();
        if ((pvt->mce_out + 1) % MCE_LOG_LEN == pvt->mce_in) {
                smp_wmb();
@@ -2116,8 +2110,6 @@ static void i7core_unregister_mci(struct i7core_dev *i7core_dev)
        if (pvt->enable_scrub)
                disable_sdram_scrub_setting(mci);
 
-       mce_unregister_decode_chain(&i7_mce_dec);
-
        /* Disable EDAC polling */
        i7core_pci_ctl_release(pvt);
 
@@ -2222,8 +2214,6 @@ static int i7core_register_mci(struct i7core_dev *i7core_dev)
        /* DCLK for scrub rate setting */
        pvt->dclk_freq = get_dclk_freq();
 
-       mce_register_decode_chain(&i7_mce_dec);
-
        return 0;
 
 fail0:
@@ -2367,8 +2357,10 @@ static int __init i7core_init(void)
 
        pci_rc = pci_register_driver(&i7core_driver);
 
-       if (pci_rc >= 0)
+       if (pci_rc >= 0) {
+               mce_register_decode_chain(&i7_mce_dec);
                return 0;
+       }
 
        i7core_printk(KERN_ERR, "Failed to register device with error %d.\n",
                      pci_rc);
@@ -2384,6 +2376,7 @@ static void __exit i7core_exit(void)
 {
        debugf2("MC: " __FILE__ ": %s()\n", __func__);
        pci_unregister_driver(&i7core_driver);
+       mce_unregister_decode_chain(&i7_mce_dec);
 }
 
 module_init(i7core_init);
index 4c402353ba98d9aeceb5bbd8062eeec916f39d9d..0e374625f6f894a20272df947ffe2f3b7620422f 100644 (file)
@@ -980,7 +980,8 @@ static int __devinit mpc85xx_mc_err_probe(struct platform_device *op)
        layers[1].type = EDAC_MC_LAYER_CHANNEL;
        layers[1].size = 1;
        layers[1].is_virt_csrow = false;
-       mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), sizeof(*pdata));
+       mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), layers,
+                           sizeof(*pdata));
        if (!mci) {
                devres_release_group(&op->dev, mpc85xx_mc_err_probe);
                return -ENOMEM;
index 4adaf4b7da993c3d6b1f25d430dbdf1aa3e7a875..36ad17e79d6183e3f21bf63f3266dc169f3941a0 100644 (file)
@@ -555,7 +555,7 @@ static int get_dimm_config(struct mem_ctl_info *mci)
                pvt->is_close_pg = false;
        }
 
-       pci_read_config_dword(pvt->pci_ta, RANK_CFG_A, &reg);
+       pci_read_config_dword(pvt->pci_ddrio, RANK_CFG_A, &reg);
        if (IS_RDIMM_ENABLED(reg)) {
                /* FIXME: Can also be LRDIMM */
                debugf0("Memory is registered\n");
@@ -1604,8 +1604,6 @@ static void sbridge_unregister_mci(struct sbridge_dev *sbridge_dev)
        debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n",
                __func__, mci, &sbridge_dev->pdev[0]->dev);
 
-       mce_unregister_decode_chain(&sbridge_mce_dec);
-
        /* Remove MC sysfs nodes */
        edac_mc_del_mc(mci->dev);
 
@@ -1682,7 +1680,6 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev)
                goto fail0;
        }
 
-       mce_register_decode_chain(&sbridge_mce_dec);
        return 0;
 
 fail0:
@@ -1811,8 +1808,10 @@ static int __init sbridge_init(void)
 
        pci_rc = pci_register_driver(&sbridge_driver);
 
-       if (pci_rc >= 0)
+       if (pci_rc >= 0) {
+               mce_register_decode_chain(&sbridge_mce_dec);
                return 0;
+       }
 
        sbridge_printk(KERN_ERR, "Failed to register device with error %d.\n",
                      pci_rc);
@@ -1828,6 +1827,7 @@ static void __exit sbridge_exit(void)
 {
        debugf2("MC: " __FILE__ ": %s()\n", __func__);
        pci_unregister_driver(&sbridge_driver);
+       mce_unregister_decode_chain(&sbridge_mce_dec);
 }
 
 module_init(sbridge_init);
index 23416e443765ec0d1da62405518866a9e7851370..a4ed30bd9a4182c26168bd3bbc81b206641739a2 100644 (file)
@@ -116,8 +116,8 @@ const char *max8997_extcon_cable[] = {
        [5] = "Charge-downstream",
        [6] = "MHL",
        [7] = "Dock-desk",
-       [7] = "Dock-card",
-       [8] = "JIG",
+       [8] = "Dock-card",
+       [9] = "JIG",
 
        NULL,
 };
@@ -514,6 +514,7 @@ static int __devexit max8997_muic_remove(struct platform_device *pdev)
 
        extcon_dev_unregister(info->edev);
 
+       kfree(info->edev);
        kfree(info);
 
        return 0;
index f598a700ec15e06d808bd2e00b3bbe86c91be5a2..159aeb07b3baf332a9df0660f082ccfc283fca64 100644 (file)
@@ -762,7 +762,7 @@ int extcon_dev_register(struct extcon_dev *edev, struct device *dev)
 #if defined(CONFIG_ANDROID)
        if (switch_class)
                ret = class_compat_create_link(switch_class, edev->dev,
-                                              dev);
+                                              NULL);
 #endif /* CONFIG_ANDROID */
 
        spin_lock_init(&edev->lock);
index fe7a07b473363033246afb2437531547cf6bb979..8a0dcc11c7c73e9c1abb64b7537047d58115064b 100644 (file)
@@ -125,6 +125,7 @@ static int __devinit gpio_extcon_probe(struct platform_device *pdev)
        if (ret < 0)
                goto err_request_irq;
 
+       platform_set_drvdata(pdev, extcon_data);
        /* Perform initial detection */
        gpio_extcon_work(&extcon_data->work.work);
 
@@ -146,6 +147,7 @@ static int __devexit gpio_extcon_remove(struct platform_device *pdev)
        struct gpio_extcon_data *extcon_data = platform_get_drvdata(pdev);
 
        cancel_delayed_work_sync(&extcon_data->work);
+       free_irq(extcon_data->irq, extcon_data);
        gpio_free(extcon_data->gpio);
        extcon_dev_unregister(&extcon_data->edev);
        devm_kfree(&pdev->dev, extcon_data);
index c4067d0141f7c083ea9de58c72dc0ea3122ca300..542f0c04b6958037b3cf1c468e7705f779f8e11b 100644 (file)
@@ -136,7 +136,7 @@ config GPIO_MPC8XXX
 
 config GPIO_MSM_V1
        tristate "Qualcomm MSM GPIO v1"
-       depends on GPIOLIB && ARCH_MSM
+       depends on GPIOLIB && ARCH_MSM && (ARCH_MSM7X00A || ARCH_MSM7X30 || ARCH_QSD8X50)
        help
          Say yes here to support the GPIO interface on ARM v6 based
          Qualcomm MSM chips.  Most of the pins on the MSM can be
index 9e9947cb86a3d4fe29bb018987176275d4476e2c..1077754f8289e37ca639541e8d9dd5936c858e7c 100644 (file)
@@ -98,6 +98,7 @@ int devm_gpio_request_one(struct device *dev, unsigned gpio,
 
        return 0;
 }
+EXPORT_SYMBOL(devm_gpio_request_one);
 
 /**
  *      devm_gpio_free - free an interrupt
index c337143b18f8f97d1fa80c61d84cfe9149b75aff..c89c4c1e668d97cf170f4fc07dc5911d83cb52d9 100644 (file)
@@ -398,10 +398,12 @@ static int __devinit mxc_gpio_probe(struct platform_device *pdev)
        writel(~0, port->base + GPIO_ISR);
 
        if (mxc_gpio_hwtype == IMX21_GPIO) {
-               /* setup one handler for all GPIO interrupts */
-               if (pdev->id == 0)
-                       irq_set_chained_handler(port->irq,
-                                               mx2_gpio_irq_handler);
+               /*
+                * Setup one handler for all GPIO interrupts. Actually setting
+                * the handler is needed only once, but doing it for every port
+                * is more robust and easier.
+                */
+               irq_set_chained_handler(port->irq, mx2_gpio_irq_handler);
        } else {
                /* setup one handler for each entry */
                irq_set_chained_handler(port->irq, mx3_gpio_irq_handler);
index c4ed1722734c9eadb190c0ccaa19a9c2eb3b2d39..4fbc208c32cfa213812f6356323a36a51c37087c 100644 (file)
@@ -174,12 +174,22 @@ static inline void _gpio_dbck_enable(struct gpio_bank *bank)
        if (bank->dbck_enable_mask && !bank->dbck_enabled) {
                clk_enable(bank->dbck);
                bank->dbck_enabled = true;
+
+               __raw_writel(bank->dbck_enable_mask,
+                            bank->base + bank->regs->debounce_en);
        }
 }
 
 static inline void _gpio_dbck_disable(struct gpio_bank *bank)
 {
        if (bank->dbck_enable_mask && bank->dbck_enabled) {
+               /*
+                * Disable debounce before cutting it's clock. If debounce is
+                * enabled but the clock is not, GPIO module seems to be unable
+                * to detect events and generate interrupts at least on OMAP3.
+                */
+               __raw_writel(0, bank->base + bank->regs->debounce_en);
+
                clk_disable(bank->dbck);
                bank->dbck_enabled = false;
        }
@@ -1081,7 +1091,6 @@ static int __devinit omap_gpio_probe(struct platform_device *pdev)
        bank->is_mpuio = pdata->is_mpuio;
        bank->non_wakeup_gpios = pdata->non_wakeup_gpios;
        bank->loses_context = pdata->loses_context;
-       bank->get_context_loss_count = pdata->get_context_loss_count;
        bank->regs = pdata->regs;
 #ifdef CONFIG_OF_GPIO
        bank->chip.of_node = of_node_get(node);
@@ -1135,6 +1144,9 @@ static int __devinit omap_gpio_probe(struct platform_device *pdev)
        omap_gpio_chip_init(bank);
        omap_gpio_show_rev(bank);
 
+       if (bank->loses_context)
+               bank->get_context_loss_count = pdata->get_context_loss_count;
+
        pm_runtime_put(bank->dev);
 
        list_add_tail(&bank->node, &omap_gpio_list);
index 7bb00448e13d2cd30140b06c9c1ca635708d9bbd..b6453d0e44add0846373be7aed37f1f41d66b06f 100644 (file)
@@ -2833,7 +2833,7 @@ static __init void exynos5_gpiolib_init(void)
        }
 
        /* need to set base address for gpc4 */
-       exonys5_gpios_1[11].base = gpio_base1 + 0x2E0;
+       exynos5_gpios_1[11].base = gpio_base1 + 0x2E0;
 
        /* need to set base address for gpx */
        chip = &exynos5_gpios_1[21];
index 38416be8ba1186c41b90b7dfd5df070ad3b7acee..6064fb376e11638f8900c6b6cfc9282900fff5ec 100644 (file)
@@ -383,8 +383,9 @@ static int __devinit gsta_probe(struct platform_device *dev)
        }
        spin_lock_init(&chip->lock);
        gsta_gpio_setup(chip);
-       for (i = 0; i < GSTA_NR_GPIO; i++)
-               gsta_set_config(chip, i, gpio_pdata->pinconfig[i]);
+       if (gpio_pdata)
+               for (i = 0; i < GSTA_NR_GPIO; i++)
+                       gsta_set_config(chip, i, gpio_pdata->pinconfig[i]);
 
        /* 384 was used in previous code: be compatible for other drivers */
        err = irq_alloc_descs(-1, 384, GSTA_NR_GPIO, NUMA_NO_NODE);
index c1ad2884f2edb0b79f3894a9a16662e5776bda51..11f29c82253c4af5fca18b09f26923a8aa3a48a3 100644 (file)
@@ -149,6 +149,9 @@ static int __devinit tps65910_gpio_probe(struct platform_device *pdev)
        tps65910_gpio->gpio_chip.set    = tps65910_gpio_set;
        tps65910_gpio->gpio_chip.get    = tps65910_gpio_get;
        tps65910_gpio->gpio_chip.dev = &pdev->dev;
+#ifdef CONFIG_OF_GPIO
+       tps65910_gpio->gpio_chip.of_node = tps65910->dev->of_node;
+#endif
        if (pdata && pdata->gpio_base)
                tps65910_gpio->gpio_chip.base = pdata->gpio_base;
        else
index 92ea5350dfe96dd6907ea235bb55b2d25faac08c..aa61ad2fcaaa6573e1de350a82d08151c0040289 100644 (file)
@@ -89,8 +89,11 @@ static int wm8994_gpio_direction_out(struct gpio_chip *chip,
        struct wm8994_gpio *wm8994_gpio = to_wm8994_gpio(chip);
        struct wm8994 *wm8994 = wm8994_gpio->wm8994;
 
+       if (value)
+               value = WM8994_GPN_LVL;
+
        return wm8994_set_bits(wm8994, WM8994_GPIO_1 + offset,
-                              WM8994_GPN_DIR, 0);
+                              WM8994_GPN_DIR | WM8994_GPN_LVL, value);
 }
 
 static void wm8994_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
index eb92fe257a3937441a35ac01ec8dd414ee4d57d3..a8743c399e83234c976ebdb4b471542a0645c42d 100644 (file)
@@ -610,7 +610,7 @@ static bool
 drm_monitor_supports_rb(struct edid *edid)
 {
        if (edid->revision >= 4) {
-               bool ret;
+               bool ret = false;
                drm_for_each_detailed_block((u8 *)edid, is_rb, &ret);
                return ret;
        }
@@ -1039,6 +1039,24 @@ mode_in_range(const struct drm_display_mode *mode, struct edid *edid,
        return true;
 }
 
+static bool valid_inferred_mode(const struct drm_connector *connector,
+                               const struct drm_display_mode *mode)
+{
+       struct drm_display_mode *m;
+       bool ok = false;
+
+       list_for_each_entry(m, &connector->probed_modes, head) {
+               if (mode->hdisplay == m->hdisplay &&
+                   mode->vdisplay == m->vdisplay &&
+                   drm_mode_vrefresh(mode) == drm_mode_vrefresh(m))
+                       return false; /* duplicated */
+               if (mode->hdisplay <= m->hdisplay &&
+                   mode->vdisplay <= m->vdisplay)
+                       ok = true;
+       }
+       return ok;
+}
+
 static int
 drm_dmt_modes_for_range(struct drm_connector *connector, struct edid *edid,
                        struct detailed_timing *timing)
@@ -1048,7 +1066,8 @@ drm_dmt_modes_for_range(struct drm_connector *connector, struct edid *edid,
        struct drm_device *dev = connector->dev;
 
        for (i = 0; i < drm_num_dmt_modes; i++) {
-               if (mode_in_range(drm_dmt_modes + i, edid, timing)) {
+               if (mode_in_range(drm_dmt_modes + i, edid, timing) &&
+                   valid_inferred_mode(connector, drm_dmt_modes + i)) {
                        newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]);
                        if (newmode) {
                                drm_mode_probed_add(connector, newmode);
@@ -1088,7 +1107,8 @@ drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid,
                        return modes;
 
                fixup_mode_1366x768(newmode);
-               if (!mode_in_range(newmode, edid, timing)) {
+               if (!mode_in_range(newmode, edid, timing) ||
+                   !valid_inferred_mode(connector, newmode)) {
                        drm_mode_destroy(dev, newmode);
                        continue;
                }
@@ -1116,7 +1136,8 @@ drm_cvt_modes_for_range(struct drm_connector *connector, struct edid *edid,
                        return modes;
 
                fixup_mode_1366x768(newmode);
-               if (!mode_in_range(newmode, edid, timing)) {
+               if (!mode_in_range(newmode, edid, timing) ||
+                   !valid_inferred_mode(connector, newmode)) {
                        drm_mode_destroy(dev, newmode);
                        continue;
                }
index 420953197d0ae76c73aa5ef2cb4e5ff8271ee6f7..d6de2e07fa034ea33d796cd6ad97c5902e369924 100644 (file)
@@ -244,8 +244,8 @@ static const struct file_operations exynos_drm_driver_fops = {
 };
 
 static struct drm_driver exynos_drm_driver = {
-       .driver_features        = DRIVER_HAVE_IRQ | DRIVER_BUS_PLATFORM |
-                                 DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
+       .driver_features        = DRIVER_HAVE_IRQ | DRIVER_MODESET |
+                                       DRIVER_GEM | DRIVER_PRIME,
        .load                   = exynos_drm_load,
        .unload                 = exynos_drm_unload,
        .open                   = exynos_drm_open,
index 6e9ac7bd1dcf7bb2c2ad2883ea17c53c61459cfd..23d5ad379f86743a1be36effb214d43f0ff17456 100644 (file)
@@ -172,19 +172,12 @@ static void exynos_drm_encoder_commit(struct drm_encoder *encoder)
                manager_ops->commit(manager->dev);
 }
 
-static struct drm_crtc *
-exynos_drm_encoder_get_crtc(struct drm_encoder *encoder)
-{
-       return encoder->crtc;
-}
-
 static struct drm_encoder_helper_funcs exynos_encoder_helper_funcs = {
        .dpms           = exynos_drm_encoder_dpms,
        .mode_fixup     = exynos_drm_encoder_mode_fixup,
        .mode_set       = exynos_drm_encoder_mode_set,
        .prepare        = exynos_drm_encoder_prepare,
        .commit         = exynos_drm_encoder_commit,
-       .get_crtc       = exynos_drm_encoder_get_crtc,
 };
 
 static void exynos_drm_encoder_destroy(struct drm_encoder *encoder)
index f82a299553fb9278d8ebde3ede5382b521f9e525..4ccfe4328fab130ab1e7c1269ff793ac8a9ee004 100644 (file)
@@ -51,11 +51,22 @@ struct exynos_drm_fb {
 static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
 {
        struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
+       unsigned int i;
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
        drm_framebuffer_cleanup(fb);
 
+       for (i = 0; i < ARRAY_SIZE(exynos_fb->exynos_gem_obj); i++) {
+               struct drm_gem_object *obj;
+
+               if (exynos_fb->exynos_gem_obj[i] == NULL)
+                       continue;
+
+               obj = &exynos_fb->exynos_gem_obj[i]->base;
+               drm_gem_object_unreference_unlocked(obj);
+       }
+
        kfree(exynos_fb);
        exynos_fb = NULL;
 }
@@ -134,11 +145,11 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
                return ERR_PTR(-ENOENT);
        }
 
-       drm_gem_object_unreference_unlocked(obj);
-
        fb = exynos_drm_framebuffer_init(dev, mode_cmd, obj);
-       if (IS_ERR(fb))
+       if (IS_ERR(fb)) {
+               drm_gem_object_unreference_unlocked(obj);
                return fb;
+       }
 
        exynos_fb = to_exynos_fb(fb);
        nr = exynos_drm_format_num_buffers(fb->pixel_format);
@@ -152,8 +163,6 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
                        return ERR_PTR(-ENOENT);
                }
 
-               drm_gem_object_unreference_unlocked(obj);
-
                exynos_fb->exynos_gem_obj[i] = to_exynos_gem_obj(obj);
        }
 
index 3ecb30d93552dbae84a7c555af2fda02ff754a1d..50823756cdea903ca4c94c95554a727c2328687d 100644 (file)
 static inline int exynos_drm_format_num_buffers(uint32_t format)
 {
        switch (format) {
-       case DRM_FORMAT_NV12M:
+       case DRM_FORMAT_NV12:
        case DRM_FORMAT_NV12MT:
                return 2;
-       case DRM_FORMAT_YUV420M:
+       case DRM_FORMAT_YUV420:
                return 3;
        default:
                return 1;
index fc91293c456041ff995c4eb2eda340df7e39a2f5..5c8b683029ea64c0938c1054aa1157c4eb94e694 100644 (file)
@@ -689,7 +689,6 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
                                   struct drm_device *dev, uint32_t handle,
                                   uint64_t *offset)
 {
-       struct exynos_drm_gem_obj *exynos_gem_obj;
        struct drm_gem_object *obj;
        int ret = 0;
 
@@ -710,15 +709,13 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
                goto unlock;
        }
 
-       exynos_gem_obj = to_exynos_gem_obj(obj);
-
-       if (!exynos_gem_obj->base.map_list.map) {
-               ret = drm_gem_create_mmap_offset(&exynos_gem_obj->base);
+       if (!obj->map_list.map) {
+               ret = drm_gem_create_mmap_offset(obj);
                if (ret)
                        goto out;
        }
 
-       *offset = (u64)exynos_gem_obj->base.map_list.hash.key << PAGE_SHIFT;
+       *offset = (u64)obj->map_list.hash.key << PAGE_SHIFT;
        DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
 
 out:
index 68ef010283751403135261cb1bed9f821c03c599..e2147a2ddcecab72570db6f3055aabdb18fe85ea 100644 (file)
@@ -365,7 +365,7 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
        switch (win_data->pixel_format) {
        case DRM_FORMAT_NV12MT:
                tiled_mode = true;
-       case DRM_FORMAT_NV12M:
+       case DRM_FORMAT_NV12:
                crcb_mode = false;
                buf_num = 2;
                break;
@@ -601,18 +601,20 @@ static void mixer_win_reset(struct mixer_context *ctx)
        mixer_reg_write(res, MXR_BG_COLOR2, 0x008080);
 
        /* setting graphical layers */
-
        val  = MXR_GRP_CFG_COLOR_KEY_DISABLE; /* no blank key */
        val |= MXR_GRP_CFG_WIN_BLEND_EN;
+       val |= MXR_GRP_CFG_BLEND_PRE_MUL;
+       val |= MXR_GRP_CFG_PIXEL_BLEND_EN;
        val |= MXR_GRP_CFG_ALPHA_VAL(0xff); /* non-transparent alpha */
 
        /* the same configuration for both layers */
        mixer_reg_write(res, MXR_GRAPHIC_CFG(0), val);
-
-       val |= MXR_GRP_CFG_BLEND_PRE_MUL;
-       val |= MXR_GRP_CFG_PIXEL_BLEND_EN;
        mixer_reg_write(res, MXR_GRAPHIC_CFG(1), val);
 
+       /* setting video layers */
+       val = MXR_GRP_CFG_ALPHA_VAL(0);
+       mixer_reg_write(res, MXR_VIDEO_CFG, val);
+
        /* configuration of Video Processor Registers */
        vp_win_reset(ctx);
        vp_default_filter(res);
index 9764045428ce345a98721cc3e521804ab9b0e608..b7e7b49d8f627b77c290bf9491c87d508b7e3377 100644 (file)
@@ -78,21 +78,6 @@ static int cdv_backlight_combination_mode(struct drm_device *dev)
        return REG_READ(BLC_PWM_CTL2) & PWM_LEGACY_MODE;
 }
 
-static int cdv_get_brightness(struct backlight_device *bd)
-{
-       struct drm_device *dev = bl_get_data(bd);
-       u32 val = REG_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
-
-       if (cdv_backlight_combination_mode(dev)) {
-               u8 lbpc;
-
-               val &= ~1;
-               pci_read_config_byte(dev->pdev, 0xF4, &lbpc);
-               val *= lbpc;
-       }
-       return val;
-}
-
 static u32 cdv_get_max_backlight(struct drm_device *dev)
 {
        u32 max = REG_READ(BLC_PWM_CTL);
@@ -110,6 +95,22 @@ static u32 cdv_get_max_backlight(struct drm_device *dev)
        return max;
 }
 
+static int cdv_get_brightness(struct backlight_device *bd)
+{
+       struct drm_device *dev = bl_get_data(bd);
+       u32 val = REG_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
+
+       if (cdv_backlight_combination_mode(dev)) {
+               u8 lbpc;
+
+               val &= ~1;
+               pci_read_config_byte(dev->pdev, 0xF4, &lbpc);
+               val *= lbpc;
+       }
+       return (val * 100)/cdv_get_max_backlight(dev);
+
+}
+
 static int cdv_set_brightness(struct backlight_device *bd)
 {
        struct drm_device *dev = bl_get_data(bd);
@@ -120,6 +121,9 @@ static int cdv_set_brightness(struct backlight_device *bd)
        if (level < 1)
                level = 1;
 
+       level *= cdv_get_max_backlight(dev);
+       level /= 100;
+
        if (cdv_backlight_combination_mode(dev)) {
                u32 max = cdv_get_max_backlight(dev);
                u8 lbpc;
@@ -157,7 +161,6 @@ static int cdv_backlight_init(struct drm_device *dev)
 
        cdv_backlight_device->props.brightness =
                        cdv_get_brightness(cdv_backlight_device);
-       cdv_backlight_device->props.max_brightness = cdv_get_max_backlight(dev);
        backlight_update_status(cdv_backlight_device);
        dev_priv->backlight_device = cdv_backlight_device;
        return 0;
index 4f186eca3a3039dc804775a80e55f461c2ff7792..c430bd424681c0b28625972a796aeb96b0524423 100644 (file)
@@ -144,6 +144,8 @@ struct opregion_asle {
 
 #define ASLE_CBLV_VALID         (1<<31)
 
+static struct psb_intel_opregion *system_opregion;
+
 static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
 {
        struct drm_psb_private *dev_priv = dev->dev_private;
@@ -205,7 +207,7 @@ void psb_intel_opregion_enable_asle(struct drm_device *dev)
        struct drm_psb_private *dev_priv = dev->dev_private;
        struct opregion_asle *asle = dev_priv->opregion.asle;
 
-       if (asle) {
+       if (asle && system_opregion ) {
                /* Don't do this on Medfield or other non PC like devices, they
                   use the bit for something different altogether */
                psb_enable_pipestat(dev_priv, 0, PIPE_LEGACY_BLC_EVENT_ENABLE);
@@ -221,7 +223,6 @@ void psb_intel_opregion_enable_asle(struct drm_device *dev)
 #define ACPI_EV_LID            (1<<1)
 #define ACPI_EV_DOCK           (1<<2)
 
-static struct psb_intel_opregion *system_opregion;
 
 static int psb_intel_opregion_video_event(struct notifier_block *nb,
                                          unsigned long val, void *data)
@@ -266,9 +267,6 @@ void psb_intel_opregion_init(struct drm_device *dev)
                system_opregion = opregion;
                register_acpi_notifier(&psb_intel_opregion_notifier);
        }
-
-       if (opregion->asle)
-               psb_intel_opregion_enable_asle(dev);
 }
 
 void psb_intel_opregion_fini(struct drm_device *dev)
index 72dc6b9212656d67a4356149aede32c5691d4e60..4a90f8b0e16cb122ac0ebd62c6b8ddfcd9df9783 100644 (file)
@@ -27,6 +27,7 @@ extern void psb_intel_opregion_asle_intr(struct drm_device *dev);
 extern void psb_intel_opregion_init(struct drm_device *dev);
 extern void psb_intel_opregion_fini(struct drm_device *dev);
 extern int psb_intel_opregion_setup(struct drm_device *dev);
+extern void psb_intel_opregion_enable_asle(struct drm_device *dev);
 
 #else
 
@@ -46,4 +47,8 @@ extern inline int psb_intel_opregion_setup(struct drm_device *dev)
 {
        return 0;
 }
+
+extern inline void psb_intel_opregion_enable_asle(struct drm_device *dev)
+{
+}
 #endif
index eff039bf92d40ae37fa1481cb5ec7006036ffae4..5971bc82b765cd87e6b32a347719ee3a5a218a64 100644 (file)
@@ -144,6 +144,10 @@ static int psb_backlight_init(struct drm_device *dev)
        psb_backlight_device->props.max_brightness = 100;
        backlight_update_status(psb_backlight_device);
        dev_priv->backlight_device = psb_backlight_device;
+
+       /* This must occur after the backlight is properly initialised */
+       psb_lid_timer_init(dev_priv);
+
        return 0;
 }
 
@@ -354,13 +358,6 @@ static int psb_chip_setup(struct drm_device *dev)
        return 0;
 }
 
-/* Not exactly an erratum more an irritation */
-static void psb_chip_errata(struct drm_device *dev)
-{
-       struct drm_psb_private *dev_priv = dev->dev_private;
-       psb_lid_timer_init(dev_priv);
-}
-
 static void psb_chip_teardown(struct drm_device *dev)
 {
        struct drm_psb_private *dev_priv = dev->dev_private;
@@ -379,7 +376,6 @@ const struct psb_ops psb_chip_ops = {
        .sgx_offset = PSB_SGX_OFFSET,
        .chip_setup = psb_chip_setup,
        .chip_teardown = psb_chip_teardown,
-       .errata = psb_chip_errata,
 
        .crtc_helper = &psb_intel_helper_funcs,
        .crtc_funcs = &psb_intel_crtc_funcs,
index caba6e08693cb83119f3f22e32ba8f14e1b4fbdd..a8858a907f47b8ce2944157bbbd6fb1d3b562a10 100644 (file)
@@ -374,6 +374,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
 
        if (ret)
                return ret;
+       psb_intel_opregion_enable_asle(dev);
 #if 0
        /*enable runtime pm at last*/
        pm_runtime_enable(&dev->pdev->dev);
index f94792626b94fadae47303f150e7aff278d371ef..36822b924eb12974f7c73af8fe8368707ab08abf 100644 (file)
@@ -1401,6 +1401,27 @@ i915_mtrr_setup(struct drm_i915_private *dev_priv, unsigned long base,
        }
 }
 
+static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
+{
+       struct apertures_struct *ap;
+       struct pci_dev *pdev = dev_priv->dev->pdev;
+       bool primary;
+
+       ap = alloc_apertures(1);
+       if (!ap)
+               return;
+
+       ap->ranges[0].base = dev_priv->dev->agp->base;
+       ap->ranges[0].size =
+               dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
+       primary =
+               pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
+
+       remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
+
+       kfree(ap);
+}
+
 /**
  * i915_driver_load - setup chip and create an initial config
  * @dev: DRM device
@@ -1446,6 +1467,15 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
                goto free_priv;
        }
 
+       dev_priv->mm.gtt = intel_gtt_get();
+       if (!dev_priv->mm.gtt) {
+               DRM_ERROR("Failed to initialize GTT\n");
+               ret = -ENODEV;
+               goto put_bridge;
+       }
+
+       i915_kick_out_firmware_fb(dev_priv);
+
        pci_set_master(dev->pdev);
 
        /* overlay on gen2 is broken and can't address above 1G */
@@ -1471,13 +1501,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
                goto put_bridge;
        }
 
-       dev_priv->mm.gtt = intel_gtt_get();
-       if (!dev_priv->mm.gtt) {
-               DRM_ERROR("Failed to initialize GTT\n");
-               ret = -ENODEV;
-               goto out_rmmap;
-       }
-
        aperture_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
 
        dev_priv->mm.gtt_mapping =
index 238a521658330bbf7468b95b351148436d383602..9fe9ebe52a7ade8424472b4388b903afb5f681c0 100644 (file)
@@ -233,6 +233,7 @@ static const struct intel_device_info intel_sandybridge_d_info = {
        .has_blt_ring = 1,
        .has_llc = 1,
        .has_pch_split = 1,
+       .has_force_wake = 1,
 };
 
 static const struct intel_device_info intel_sandybridge_m_info = {
@@ -243,6 +244,7 @@ static const struct intel_device_info intel_sandybridge_m_info = {
        .has_blt_ring = 1,
        .has_llc = 1,
        .has_pch_split = 1,
+       .has_force_wake = 1,
 };
 
 static const struct intel_device_info intel_ivybridge_d_info = {
@@ -252,6 +254,7 @@ static const struct intel_device_info intel_ivybridge_d_info = {
        .has_blt_ring = 1,
        .has_llc = 1,
        .has_pch_split = 1,
+       .has_force_wake = 1,
 };
 
 static const struct intel_device_info intel_ivybridge_m_info = {
@@ -262,6 +265,7 @@ static const struct intel_device_info intel_ivybridge_m_info = {
        .has_blt_ring = 1,
        .has_llc = 1,
        .has_pch_split = 1,
+       .has_force_wake = 1,
 };
 
 static const struct intel_device_info intel_valleyview_m_info = {
@@ -289,6 +293,7 @@ static const struct intel_device_info intel_haswell_d_info = {
        .has_blt_ring = 1,
        .has_llc = 1,
        .has_pch_split = 1,
+       .has_force_wake = 1,
 };
 
 static const struct intel_device_info intel_haswell_m_info = {
@@ -298,6 +303,7 @@ static const struct intel_device_info intel_haswell_m_info = {
        .has_blt_ring = 1,
        .has_llc = 1,
        .has_pch_split = 1,
+       .has_force_wake = 1,
 };
 
 static const struct pci_device_id pciidlist[] = {              /* aka */
@@ -1139,10 +1145,9 @@ MODULE_LICENSE("GPL and additional rights");
 
 /* We give fast paths for the really cool registers */
 #define NEEDS_FORCE_WAKE(dev_priv, reg) \
-       (((dev_priv)->info->gen >= 6) && \
-        ((reg) < 0x40000) &&            \
-        ((reg) != FORCEWAKE)) && \
-       (!IS_VALLEYVIEW((dev_priv)->dev))
+       ((HAS_FORCE_WAKE((dev_priv)->dev)) && \
+        ((reg) < 0x40000) &&            \
+        ((reg) != FORCEWAKE))
 
 #define __i915_read(x, y) \
 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
index c9cfc67c2cf58acdf7871a6e81fda66d3c45dedb..b0b676abde0de1314f1181f79f1667d35f8eee67 100644 (file)
@@ -285,6 +285,7 @@ struct intel_device_info {
        u8 is_ivybridge:1;
        u8 is_valleyview:1;
        u8 has_pch_split:1;
+       u8 has_force_wake:1;
        u8 is_haswell:1;
        u8 has_fbc:1;
        u8 has_pipe_cxsr:1;
@@ -1101,6 +1102,8 @@ struct drm_i915_file_private {
 #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
 #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
 
+#define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake)
+
 #include "i915_trace.h"
 
 /**
index 1417660a93ec00a0a8a24cc797acc7b7754db063..ed3224c3742389c6d9709039e1778cb806dc69a2 100644 (file)
@@ -412,7 +412,6 @@ static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
         */
 
        spin_lock_irqsave(&dev_priv->rps_lock, flags);
-       WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n");
        dev_priv->pm_iir |= pm_iir;
        I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
        POSTING_READ(GEN6_PMIMR);
@@ -510,7 +509,7 @@ out:
        return ret;
 }
 
-static void pch_irq_handler(struct drm_device *dev, u32 pch_iir)
+static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        int pipe;
@@ -550,6 +549,35 @@ static void pch_irq_handler(struct drm_device *dev, u32 pch_iir)
                DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n");
 }
 
+static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
+{
+       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+       int pipe;
+
+       if (pch_iir & SDE_AUDIO_POWER_MASK_CPT)
+               DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
+                                (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
+                                SDE_AUDIO_POWER_SHIFT_CPT);
+
+       if (pch_iir & SDE_AUX_MASK_CPT)
+               DRM_DEBUG_DRIVER("AUX channel interrupt\n");
+
+       if (pch_iir & SDE_GMBUS_CPT)
+               DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n");
+
+       if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
+               DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
+
+       if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
+               DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
+
+       if (pch_iir & SDE_FDI_MASK_CPT)
+               for_each_pipe(pipe)
+                       DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
+                                        pipe_name(pipe),
+                                        I915_READ(FDI_RX_IIR(pipe)));
+}
+
 static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
 {
        struct drm_device *dev = (struct drm_device *) arg;
@@ -591,7 +619,7 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
 
                        if (pch_iir & SDE_HOTPLUG_MASK_CPT)
                                queue_work(dev_priv->wq, &dev_priv->hotplug_work);
-                       pch_irq_handler(dev, pch_iir);
+                       cpt_irq_handler(dev, pch_iir);
 
                        /* clear PCH hotplug event before clear CPU irq */
                        I915_WRITE(SDEIIR, pch_iir);
@@ -684,7 +712,10 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
        if (de_iir & DE_PCH_EVENT) {
                if (pch_iir & hotplug_mask)
                        queue_work(dev_priv->wq, &dev_priv->hotplug_work);
-               pch_irq_handler(dev, pch_iir);
+               if (HAS_PCH_CPT(dev))
+                       cpt_irq_handler(dev, pch_iir);
+               else
+                       ibx_irq_handler(dev, pch_iir);
        }
 
        if (de_iir & DE_PCU_EVENT) {
index 2d49b9507ed05b2f0693e3c52a22b36dc028b3da..48d5e8e051cf6a1057f247457e65045e679051ab 100644 (file)
 #define MI_DISPLAY_FLIP                MI_INSTR(0x14, 2)
 #define MI_DISPLAY_FLIP_I915   MI_INSTR(0x14, 1)
 #define   MI_DISPLAY_FLIP_PLANE(n) ((n) << 20)
+/* IVB has funny definitions for which plane to flip. */
+#define   MI_DISPLAY_FLIP_IVB_PLANE_A  (0 << 19)
+#define   MI_DISPLAY_FLIP_IVB_PLANE_B  (1 << 19)
+#define   MI_DISPLAY_FLIP_IVB_SPRITE_A (2 << 19)
+#define   MI_DISPLAY_FLIP_IVB_SPRITE_B (3 << 19)
+#define   MI_DISPLAY_FLIP_IVB_PLANE_C  (4 << 19)
+#define   MI_DISPLAY_FLIP_IVB_SPRITE_C (5 << 19)
+
 #define MI_SET_CONTEXT         MI_INSTR(0x18, 0)
 #define   MI_MM_SPACE_GTT              (1<<8)
 #define   MI_MM_SPACE_PHYSICAL         (0<<8)
 
 /* PCH */
 
-/* south display engine interrupt */
+/* south display engine interrupt: IBX */
 #define SDE_AUDIO_POWER_D      (1 << 27)
 #define SDE_AUDIO_POWER_C      (1 << 26)
 #define SDE_AUDIO_POWER_B      (1 << 25)
 #define SDE_TRANSA_CRC_ERR     (1 << 1)
 #define SDE_TRANSA_FIFO_UNDER  (1 << 0)
 #define SDE_TRANS_MASK         (0x3f)
-/* CPT */
-#define SDE_CRT_HOTPLUG_CPT    (1 << 19)
+
+/* south display engine interrupt: CPT/PPT */
+#define SDE_AUDIO_POWER_D_CPT  (1 << 31)
+#define SDE_AUDIO_POWER_C_CPT  (1 << 30)
+#define SDE_AUDIO_POWER_B_CPT  (1 << 29)
+#define SDE_AUDIO_POWER_SHIFT_CPT   29
+#define SDE_AUDIO_POWER_MASK_CPT    (7 << 29)
+#define SDE_AUXD_CPT           (1 << 27)
+#define SDE_AUXC_CPT           (1 << 26)
+#define SDE_AUXB_CPT           (1 << 25)
+#define SDE_AUX_MASK_CPT       (7 << 25)
 #define SDE_PORTD_HOTPLUG_CPT  (1 << 23)
 #define SDE_PORTC_HOTPLUG_CPT  (1 << 22)
 #define SDE_PORTB_HOTPLUG_CPT  (1 << 21)
+#define SDE_CRT_HOTPLUG_CPT    (1 << 19)
 #define SDE_HOTPLUG_MASK_CPT   (SDE_CRT_HOTPLUG_CPT |          \
                                 SDE_PORTD_HOTPLUG_CPT |        \
                                 SDE_PORTC_HOTPLUG_CPT |        \
                                 SDE_PORTB_HOTPLUG_CPT)
+#define SDE_GMBUS_CPT          (1 << 17)
+#define SDE_AUDIO_CP_REQ_C_CPT (1 << 10)
+#define SDE_AUDIO_CP_CHG_C_CPT (1 << 9)
+#define SDE_FDI_RXC_CPT                (1 << 8)
+#define SDE_AUDIO_CP_REQ_B_CPT (1 << 6)
+#define SDE_AUDIO_CP_CHG_B_CPT (1 << 5)
+#define SDE_FDI_RXB_CPT                (1 << 4)
+#define SDE_AUDIO_CP_REQ_A_CPT (1 << 2)
+#define SDE_AUDIO_CP_CHG_A_CPT (1 << 1)
+#define SDE_FDI_RXA_CPT                (1 << 0)
+#define SDE_AUDIO_CP_REQ_CPT   (SDE_AUDIO_CP_REQ_C_CPT | \
+                                SDE_AUDIO_CP_REQ_B_CPT | \
+                                SDE_AUDIO_CP_REQ_A_CPT)
+#define SDE_AUDIO_CP_CHG_CPT   (SDE_AUDIO_CP_CHG_C_CPT | \
+                                SDE_AUDIO_CP_CHG_B_CPT | \
+                                SDE_AUDIO_CP_CHG_A_CPT)
+#define SDE_FDI_MASK_CPT       (SDE_FDI_RXC_CPT | \
+                                SDE_FDI_RXB_CPT | \
+                                SDE_FDI_RXA_CPT)
 
 #define SDEISR  0xc4000
 #define SDEIMR  0xc4004
index 0ede02a99d914544d145b043b1ea066b5254bb02..a748e5cabe14e1e3ca4550db29078e0c45464682 100644 (file)
@@ -740,8 +740,11 @@ static void i915_restore_display(struct drm_device *dev)
        if (HAS_PCH_SPLIT(dev)) {
                I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->saveBLC_PWM_CTL);
                I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->saveBLC_PWM_CTL2);
-               I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->saveBLC_CPU_PWM_CTL);
+               /* NOTE: BLC_PWM_CPU_CTL must be written after BLC_PWM_CPU_CTL2;
+                * otherwise we get blank eDP screen after S3 on some machines
+                */
                I915_WRITE(BLC_PWM_CPU_CTL2, dev_priv->saveBLC_CPU_PWM_CTL2);
+               I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->saveBLC_CPU_PWM_CTL);
                I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS);
                I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
                I915_WRITE(PCH_PP_DIVISOR, dev_priv->savePP_DIVISOR);
index 9147894209061dd69c12ddc9bde72f904ebae3ac..a8538ac0299dac4252394b1fc28d9c3f58e173cc 100644 (file)
@@ -6158,17 +6158,34 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
+       uint32_t plane_bit = 0;
        int ret;
 
        ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
        if (ret)
                goto err;
 
+       switch(intel_crtc->plane) {
+       case PLANE_A:
+               plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A;
+               break;
+       case PLANE_B:
+               plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B;
+               break;
+       case PLANE_C:
+               plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C;
+               break;
+       default:
+               WARN_ONCE(1, "unknown plane in flip command\n");
+               ret = -ENODEV;
+               goto err;
+       }
+
        ret = intel_ring_begin(ring, 4);
        if (ret)
                goto err_unpin;
 
-       intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | (intel_crtc->plane << 19));
+       intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
        intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
        intel_ring_emit(ring, (obj->gtt_offset));
        intel_ring_emit(ring, (MI_NOOP));
@@ -6541,7 +6558,7 @@ static void intel_setup_outputs(struct drm_device *dev)
                if (I915_READ(HDMIC) & PORT_DETECTED)
                        intel_hdmi_init(dev, HDMIC);
 
-               if (I915_READ(HDMID) & PORT_DETECTED)
+               if (!dpd_is_edp && I915_READ(HDMID) & PORT_DETECTED)
                        intel_hdmi_init(dev, HDMID);
 
                if (I915_READ(PCH_DP_C) & DP_DETECTED)
@@ -6904,19 +6921,6 @@ static void i915_disable_vga(struct drm_device *dev)
        POSTING_READ(vga_reg);
 }
 
-static void ivb_pch_pwm_override(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       /*
-        * IVB has CPU eDP backlight regs too, set things up to let the
-        * PCH regs control the backlight
-        */
-       I915_WRITE(BLC_PWM_CPU_CTL2, PWM_ENABLE);
-       I915_WRITE(BLC_PWM_CPU_CTL, 0);
-       I915_WRITE(BLC_PWM_PCH_CTL1, PWM_ENABLE | (1<<30));
-}
-
 void intel_modeset_init_hw(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -6933,9 +6937,6 @@ void intel_modeset_init_hw(struct drm_device *dev)
                gen6_enable_rps(dev_priv);
                gen6_update_ring_freq(dev_priv);
        }
-
-       if (IS_IVYBRIDGE(dev))
-               ivb_pch_pwm_override(dev);
 }
 
 void intel_modeset_init(struct drm_device *dev)
index 296cfc201a81ea9a0017abfb0e97eee7eaf8dc18..c0449324143cb62f1ecd837e042f28af39682d77 100644 (file)
@@ -32,6 +32,7 @@
 #include "drm.h"
 #include "drm_crtc.h"
 #include "drm_crtc_helper.h"
+#include "drm_edid.h"
 #include "intel_drv.h"
 #include "i915_drm.h"
 #include "i915_drv.h"
@@ -67,6 +68,8 @@ struct intel_dp {
        struct drm_display_mode *panel_fixed_mode;  /* for eDP */
        struct delayed_work panel_vdd_work;
        bool want_panel_vdd;
+       struct edid *edid; /* cached EDID for eDP */
+       int edid_mode_count;
 };
 
 /**
@@ -371,7 +374,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
        int recv_bytes;
        uint32_t status;
        uint32_t aux_clock_divider;
-       int try, precharge = 5;
+       int try, precharge;
 
        intel_dp_check_edp(intel_dp);
        /* The clock divider is based off the hrawclk,
@@ -391,6 +394,11 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
        else
                aux_clock_divider = intel_hrawclk(dev) / 2;
 
+       if (IS_GEN6(dev))
+               precharge = 3;
+       else
+               precharge = 5;
+
        /* Try to wait for any previous AUX channel activity */
        for (try = 0; try < 3; try++) {
                status = I915_READ(ch_ctl);
@@ -1973,6 +1981,8 @@ intel_dp_probe_oui(struct intel_dp *intel_dp)
        if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
                return;
 
+       ironlake_edp_panel_vdd_on(intel_dp);
+
        if (intel_dp_aux_native_read_retry(intel_dp, DP_SINK_OUI, buf, 3))
                DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
                              buf[0], buf[1], buf[2]);
@@ -1980,6 +1990,8 @@ intel_dp_probe_oui(struct intel_dp *intel_dp)
        if (intel_dp_aux_native_read_retry(intel_dp, DP_BRANCH_OUI, buf, 3))
                DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
                              buf[0], buf[1], buf[2]);
+
+       ironlake_edp_panel_vdd_off(intel_dp, false);
 }
 
 static bool
@@ -2116,10 +2128,22 @@ intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
 {
        struct intel_dp *intel_dp = intel_attached_dp(connector);
        struct edid     *edid;
+       int size;
+
+       if (is_edp(intel_dp)) {
+               if (!intel_dp->edid)
+                       return NULL;
+
+               size = (intel_dp->edid->extensions + 1) * EDID_LENGTH;
+               edid = kmalloc(size, GFP_KERNEL);
+               if (!edid)
+                       return NULL;
+
+               memcpy(edid, intel_dp->edid, size);
+               return edid;
+       }
 
-       ironlake_edp_panel_vdd_on(intel_dp);
        edid = drm_get_edid(connector, adapter);
-       ironlake_edp_panel_vdd_off(intel_dp, false);
        return edid;
 }
 
@@ -2129,9 +2153,17 @@ intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *ada
        struct intel_dp *intel_dp = intel_attached_dp(connector);
        int     ret;
 
-       ironlake_edp_panel_vdd_on(intel_dp);
+       if (is_edp(intel_dp)) {
+               drm_mode_connector_update_edid_property(connector,
+                                                       intel_dp->edid);
+               ret = drm_add_edid_modes(connector, intel_dp->edid);
+               drm_edid_to_eld(connector,
+                               intel_dp->edid);
+               connector->display_info.raw_edid = NULL;
+               return intel_dp->edid_mode_count;
+       }
+
        ret = intel_ddc_get_modes(connector, adapter);
-       ironlake_edp_panel_vdd_off(intel_dp, false);
        return ret;
 }
 
@@ -2321,6 +2353,7 @@ static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
        i2c_del_adapter(&intel_dp->adapter);
        drm_encoder_cleanup(encoder);
        if (is_edp(intel_dp)) {
+               kfree(intel_dp->edid);
                cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
                ironlake_panel_vdd_off_sync(intel_dp);
        }
@@ -2504,11 +2537,14 @@ intel_dp_init(struct drm_device *dev, int output_reg)
                        break;
        }
 
+       intel_dp_i2c_init(intel_dp, intel_connector, name);
+
        /* Cache some DPCD data in the eDP case */
        if (is_edp(intel_dp)) {
                bool ret;
                struct edp_power_seq    cur, vbt;
                u32 pp_on, pp_off, pp_div;
+               struct edid *edid;
 
                pp_on = I915_READ(PCH_PP_ON_DELAYS);
                pp_off = I915_READ(PCH_PP_OFF_DELAYS);
@@ -2576,9 +2612,19 @@ intel_dp_init(struct drm_device *dev, int output_reg)
                        intel_dp_destroy(&intel_connector->base);
                        return;
                }
-       }
 
-       intel_dp_i2c_init(intel_dp, intel_connector, name);
+               ironlake_edp_panel_vdd_on(intel_dp);
+               edid = drm_get_edid(connector, &intel_dp->adapter);
+               if (edid) {
+                       drm_mode_connector_update_edid_property(connector,
+                                                               edid);
+                       intel_dp->edid_mode_count =
+                               drm_add_edid_modes(connector, edid);
+                       drm_edid_to_eld(connector, edid);
+                       intel_dp->edid = edid;
+               }
+               ironlake_edp_panel_vdd_off(intel_dp, false);
+       }
 
        intel_encoder->hot_plug = intel_dp_hot_plug;
 
index b59b6d5b75833e37e204da899c4dda2f65a9bf2d..e5b84ff89ca58234b5c7e0d30fa8e97583d6357d 100644 (file)
@@ -266,10 +266,15 @@ u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
 
 static int init_ring_common(struct intel_ring_buffer *ring)
 {
-       drm_i915_private_t *dev_priv = ring->dev->dev_private;
+       struct drm_device *dev = ring->dev;
+       drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj = ring->obj;
+       int ret = 0;
        u32 head;
 
+       if (HAS_FORCE_WAKE(dev))
+               gen6_gt_force_wake_get(dev_priv);
+
        /* Stop the ring if it's running. */
        I915_WRITE_CTL(ring, 0);
        I915_WRITE_HEAD(ring, 0);
@@ -317,7 +322,8 @@ static int init_ring_common(struct intel_ring_buffer *ring)
                                I915_READ_HEAD(ring),
                                I915_READ_TAIL(ring),
                                I915_READ_START(ring));
-               return -EIO;
+               ret = -EIO;
+               goto out;
        }
 
        if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
@@ -326,9 +332,14 @@ static int init_ring_common(struct intel_ring_buffer *ring)
                ring->head = I915_READ_HEAD(ring);
                ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
                ring->space = ring_space(ring);
+               ring->last_retired_head = -1;
        }
 
-       return 0;
+out:
+       if (HAS_FORCE_WAKE(dev))
+               gen6_gt_force_wake_put(dev_priv);
+
+       return ret;
 }
 
 static int
@@ -987,6 +998,10 @@ static int intel_init_ring_buffer(struct drm_device *dev,
        if (ret)
                goto err_unref;
 
+       ret = i915_gem_object_set_to_gtt_domain(obj, true);
+       if (ret)
+               goto err_unpin;
+
        ring->virtual_start = ioremap_wc(dev->agp->base + obj->gtt_offset,
                                         ring->size);
        if (ring->virtual_start == NULL) {
index 153b9a15469b5053507507587bd20dc411417146..1074bc5dd418e3a33b9214969dd850d0f230464d 100644 (file)
@@ -467,7 +467,7 @@ int nouveau_fbcon_init(struct drm_device *dev)
        nfbdev->helper.funcs = &nouveau_fbcon_helper_funcs;
 
        ret = drm_fb_helper_init(dev, &nfbdev->helper,
-                                nv_two_heads(dev) ? 2 : 1, 4);
+                                dev->mode_config.num_crtc, 4);
        if (ret) {
                kfree(nfbdev);
                return ret;
index a89240e5fb2962334e601d7f4ff6fa1363f7f061..a25cf2cb931f104e7ce69c3de13696635290d196 100644 (file)
@@ -1,3 +1,26 @@
+/*
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ */
 
 #include "drmP.h"
 #include "drm.h"
index 01d77d1554f4258899da5e14eb0044e3d529cc02..3904d7964a4b02e3b0e34930f3bc97a65c07add3 100644 (file)
@@ -1149,7 +1149,9 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
        }
 
        if (tiling_flags & RADEON_TILING_MACRO) {
-               if (rdev->family >= CHIP_CAYMAN)
+               if (rdev->family >= CHIP_TAHITI)
+                       tmp = rdev->config.si.tile_config;
+               else if (rdev->family >= CHIP_CAYMAN)
                        tmp = rdev->config.cayman.tile_config;
                else
                        tmp = rdev->config.evergreen.tile_config;
@@ -1177,6 +1179,12 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
        } else if (tiling_flags & RADEON_TILING_MICRO)
                fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1);
 
+       if ((rdev->family == CHIP_TAHITI) ||
+           (rdev->family == CHIP_PITCAIRN))
+               fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P8_32x32_8x16);
+       else if (rdev->family == CHIP_VERDE)
+               fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P4_8x16);
+
        switch (radeon_crtc->crtc_id) {
        case 0:
                WREG32(AVIVO_D1VGA_CONTROL, 0);
index e7b1ec5ae8c6207adf567f4942b422b1ae2cf140..486ccdf4aacda9b66681346e744b463bea046089 100644 (file)
@@ -1926,7 +1926,9 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
 
        if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
                r600_hdmi_enable(encoder);
-               if (ASIC_IS_DCE4(rdev))
+               if (ASIC_IS_DCE6(rdev))
+                       ; /* TODO (use pointers instead of if-s?) */
+               else if (ASIC_IS_DCE4(rdev))
                        evergreen_hdmi_setmode(encoder, adjusted_mode);
                else
                        r600_hdmi_setmode(encoder, adjusted_mode);
index 01550d05e2738d12d6e29fb191740ba8a5dc6637..7fb3d2e0434c71d52725633573bc3f17a34a010d 100644 (file)
@@ -1932,6 +1932,9 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
        smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.evergreen.sx_num_of_sets);
        WREG32(SMX_DC_CTL0, smx_dc_ctl0);
 
+       if (rdev->family <= CHIP_SUMO2)
+               WREG32(SMX_SAR_CTL0, 0x00010000);
+
        WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_size / 4) - 1) |
                                        POSITION_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_pos_size / 4) - 1) |
                                        SMX_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_smx_size / 4) - 1)));
index 4e7dd2b4843d94b0b88c778aca8fa08c7f089c61..c16554122ccd0fb482aa2f03bc93e8b47af55430 100644 (file)
@@ -52,6 +52,7 @@ struct evergreen_cs_track {
        u32                     cb_color_view[12];
        u32                     cb_color_pitch[12];
        u32                     cb_color_slice[12];
+       u32                     cb_color_slice_idx[12];
        u32                     cb_color_attrib[12];
        u32                     cb_color_cmask_slice[8];/* unused */
        u32                     cb_color_fmask_slice[8];/* unused */
@@ -127,12 +128,14 @@ static void evergreen_cs_track_init(struct evergreen_cs_track *track)
                track->cb_color_info[i] = 0;
                track->cb_color_view[i] = 0xFFFFFFFF;
                track->cb_color_pitch[i] = 0;
-               track->cb_color_slice[i] = 0;
+               track->cb_color_slice[i] = 0xfffffff;
+               track->cb_color_slice_idx[i] = 0;
        }
        track->cb_target_mask = 0xFFFFFFFF;
        track->cb_shader_mask = 0xFFFFFFFF;
        track->cb_dirty = true;
 
+       track->db_depth_slice = 0xffffffff;
        track->db_depth_view = 0xFFFFC000;
        track->db_depth_size = 0xFFFFFFFF;
        track->db_depth_control = 0xFFFFFFFF;
@@ -250,10 +253,9 @@ static int evergreen_surface_check_2d(struct radeon_cs_parser *p,
 {
        struct evergreen_cs_track *track = p->track;
        unsigned palign, halign, tileb, slice_pt;
+       unsigned mtile_pr, mtile_ps, mtileb;
 
        tileb = 64 * surf->bpe * surf->nsamples;
-       palign = track->group_size / (8 * surf->bpe * surf->nsamples);
-       palign = MAX(8, palign);
        slice_pt = 1;
        if (tileb > surf->tsplit) {
                slice_pt = tileb / surf->tsplit;
@@ -262,7 +264,10 @@ static int evergreen_surface_check_2d(struct radeon_cs_parser *p,
        /* macro tile width & height */
        palign = (8 * surf->bankw * track->npipes) * surf->mtilea;
        halign = (8 * surf->bankh * surf->nbanks) / surf->mtilea;
-       surf->layer_size = surf->nbx * surf->nby * surf->bpe * slice_pt;
+       mtileb = (palign / 8) * (halign / 8) * tileb;;
+       mtile_pr = surf->nbx / palign;
+       mtile_ps = (mtile_pr * surf->nby) / halign;
+       surf->layer_size = mtile_ps * mtileb * slice_pt;
        surf->base_align = (palign / 8) * (halign / 8) * tileb;
        surf->palign = palign;
        surf->halign = halign;
@@ -434,6 +439,39 @@ static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned i
 
        offset += surf.layer_size * mslice;
        if (offset > radeon_bo_size(track->cb_color_bo[id])) {
+               /* old ddx are broken they allocate bo with w*h*bpp but
+                * program slice with ALIGN(h, 8), catch this and patch
+                * command stream.
+                */
+               if (!surf.mode) {
+                       volatile u32 *ib = p->ib.ptr;
+                       unsigned long tmp, nby, bsize, size, min = 0;
+
+                       /* find the height the ddx wants */
+                       if (surf.nby > 8) {
+                               min = surf.nby - 8;
+                       }
+                       bsize = radeon_bo_size(track->cb_color_bo[id]);
+                       tmp = track->cb_color_bo_offset[id] << 8;
+                       for (nby = surf.nby; nby > min; nby--) {
+                               size = nby * surf.nbx * surf.bpe * surf.nsamples;
+                               if ((tmp + size * mslice) <= bsize) {
+                                       break;
+                               }
+                       }
+                       if (nby > min) {
+                               surf.nby = nby;
+                               slice = ((nby * surf.nbx) / 64) - 1;
+                               if (!evergreen_surface_check(p, &surf, "cb")) {
+                                       /* check if this one works */
+                                       tmp += surf.layer_size * mslice;
+                                       if (tmp <= bsize) {
+                                               ib[track->cb_color_slice_idx[id]] = slice;
+                                               goto old_ddx_ok;
+                                       }
+                               }
+                       }
+               }
                dev_warn(p->dev, "%s:%d cb[%d] bo too small (layer size %d, "
                         "offset %d, max layer %d, bo size %ld, slice %d)\n",
                         __func__, __LINE__, id, surf.layer_size,
@@ -446,6 +484,7 @@ static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned i
                        surf.tsplit, surf.mtilea);
                return -EINVAL;
        }
+old_ddx_ok:
 
        return 0;
 }
@@ -1532,6 +1571,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
        case CB_COLOR7_SLICE:
                tmp = (reg - CB_COLOR0_SLICE) / 0x3c;
                track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx);
+               track->cb_color_slice_idx[tmp] = idx;
                track->cb_dirty = true;
                break;
        case CB_COLOR8_SLICE:
@@ -1540,6 +1580,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
        case CB_COLOR11_SLICE:
                tmp = ((reg - CB_COLOR8_SLICE) / 0x1c) + 8;
                track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx);
+               track->cb_color_slice_idx[tmp] = idx;
                track->cb_dirty = true;
                break;
        case CB_COLOR0_ATTRIB:
index a51f880985f8baa8acb38082070aa34c6a65910a..65c54160028b2833c3a9576f57e7081506cd0260 100644 (file)
@@ -156,9 +156,6 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
        struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
        uint32_t offset;
 
-       if (ASIC_IS_DCE5(rdev))
-               return;
-
        /* Silent, r600_hdmi_enable will raise WARN for us */
        if (!dig->afmt->enabled)
                return;
index 2773039b49027917b6568dff37ca1e95ec2b9a95..b50b15c7049839c0b7a04d2dd33acff9fd19a27a 100644 (file)
 #define        SCRATCH_UMSK                                    0x8540
 #define        SCRATCH_ADDR                                    0x8544
 
+#define        SMX_SAR_CTL0                                    0xA008
 #define        SMX_DC_CTL0                                     0xA020
 #define                USE_HASH_FUNCTION                               (1 << 0)
 #define                NUMBER_OF_SETS(x)                               ((x) << 1)
index 3df4efa1194206b516efeacddabefae16744b064..b7bf18e40215c3afe45766457fa9fe802f217fae 100644 (file)
@@ -460,15 +460,28 @@ static void cayman_gpu_init(struct radeon_device *rdev)
                rdev->config.cayman.max_pipes_per_simd = 4;
                rdev->config.cayman.max_tile_pipes = 2;
                if ((rdev->pdev->device == 0x9900) ||
-                   (rdev->pdev->device == 0x9901)) {
+                   (rdev->pdev->device == 0x9901) ||
+                   (rdev->pdev->device == 0x9905) ||
+                   (rdev->pdev->device == 0x9906) ||
+                   (rdev->pdev->device == 0x9907) ||
+                   (rdev->pdev->device == 0x9908) ||
+                   (rdev->pdev->device == 0x9909) ||
+                   (rdev->pdev->device == 0x9910) ||
+                   (rdev->pdev->device == 0x9917)) {
                        rdev->config.cayman.max_simds_per_se = 6;
                        rdev->config.cayman.max_backends_per_se = 2;
                } else if ((rdev->pdev->device == 0x9903) ||
-                          (rdev->pdev->device == 0x9904)) {
+                          (rdev->pdev->device == 0x9904) ||
+                          (rdev->pdev->device == 0x990A) ||
+                          (rdev->pdev->device == 0x9913) ||
+                          (rdev->pdev->device == 0x9918)) {
                        rdev->config.cayman.max_simds_per_se = 4;
                        rdev->config.cayman.max_backends_per_se = 2;
-               } else if ((rdev->pdev->device == 0x9990) ||
-                          (rdev->pdev->device == 0x9991)) {
+               } else if ((rdev->pdev->device == 0x9919) ||
+                          (rdev->pdev->device == 0x9990) ||
+                          (rdev->pdev->device == 0x9991) ||
+                          (rdev->pdev->device == 0x9994) ||
+                          (rdev->pdev->device == 0x99A0)) {
                        rdev->config.cayman.max_simds_per_se = 3;
                        rdev->config.cayman.max_backends_per_se = 1;
                } else {
@@ -1290,6 +1303,10 @@ static int cayman_startup(struct radeon_device *rdev)
        if (r)
                return r;
 
+       r = r600_audio_init(rdev);
+       if (r)
+               return r;
+
        return 0;
 }
 
@@ -1316,6 +1333,7 @@ int cayman_resume(struct radeon_device *rdev)
 
 int cayman_suspend(struct radeon_device *rdev)
 {
+       r600_audio_fini(rdev);
        /* FIXME: we should wait for ring to be empty */
        radeon_ib_pool_suspend(rdev);
        radeon_vm_manager_suspend(rdev);
index 45cfcea635076f17f35619d7e916aea10546bffb..bff62729381215996778ee493515bb9b6a6469f9 100644 (file)
@@ -1839,6 +1839,7 @@ void r600_gpu_init(struct radeon_device *rdev)
        WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
                               NUM_CLIP_SEQ(3)));
        WREG32(PA_SC_ENHANCE, FORCE_EOV_MAX_CLK_CNT(4095));
+       WREG32(VC_ENHANCE, 0);
 }
 
 
@@ -2426,6 +2427,12 @@ int r600_startup(struct radeon_device *rdev)
        if (r)
                return r;
 
+       r = r600_audio_init(rdev);
+       if (r) {
+               DRM_ERROR("radeon: audio init failed\n");
+               return r;
+       }
+
        return 0;
 }
 
@@ -2462,12 +2469,6 @@ int r600_resume(struct radeon_device *rdev)
                return r;
        }
 
-       r = r600_audio_init(rdev);
-       if (r) {
-               DRM_ERROR("radeon: audio resume failed\n");
-               return r;
-       }
-
        return r;
 }
 
@@ -2577,9 +2578,6 @@ int r600_init(struct radeon_device *rdev)
                rdev->accel_working = false;
        }
 
-       r = r600_audio_init(rdev);
-       if (r)
-               return r; /* TODO error handling */
        return 0;
 }
 
index 7c4fa77f018f7380b0481f385eb8c123b807701b..79b55916cf90c0ddfe397c007a867715cf0fea33 100644 (file)
@@ -57,7 +57,7 @@ static bool radeon_dig_encoder(struct drm_encoder *encoder)
  */
 static int r600_audio_chipset_supported(struct radeon_device *rdev)
 {
-       return (rdev->family >= CHIP_R600 && !ASIC_IS_DCE5(rdev))
+       return (rdev->family >= CHIP_R600 && !ASIC_IS_DCE6(rdev))
                || rdev->family == CHIP_RS600
                || rdev->family == CHIP_RS690
                || rdev->family == CHIP_RS740;
@@ -192,6 +192,7 @@ void r600_audio_set_clock(struct drm_encoder *encoder, int clock)
        struct radeon_device *rdev = dev->dev_private;
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
        struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+       struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
        int base_rate = 48000;
 
        switch (radeon_encoder->encoder_id) {
@@ -217,8 +218,8 @@ void r600_audio_set_clock(struct drm_encoder *encoder, int clock)
                WREG32(EVERGREEN_AUDIO_PLL1_DIV, clock * 10);
                WREG32(EVERGREEN_AUDIO_PLL1_UNK, 0x00000071);
 
-               /* Some magic trigger or src sel? */
-               WREG32_P(0x5ac, 0x01, ~0x77);
+               /* Select DTO source */
+               WREG32(0x5ac, radeon_crtc->crtc_id);
        } else {
                switch (dig->dig_encoder) {
                case 0:
index 0133f5f09bd6c71cc95d2472beaacef51c3ffa2a..ca87f7afaf2374d02117ec91e479c1e5a385b8d2 100644 (file)
@@ -2079,6 +2079,48 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
                        return -EINVAL;
                }
                break;
+       case PACKET3_STRMOUT_BASE_UPDATE:
+               if (p->family < CHIP_RV770) {
+                       DRM_ERROR("STRMOUT_BASE_UPDATE only supported on 7xx\n");
+                       return -EINVAL;
+               }
+               if (pkt->count != 1) {
+                       DRM_ERROR("bad STRMOUT_BASE_UPDATE packet count\n");
+                       return -EINVAL;
+               }
+               if (idx_value > 3) {
+                       DRM_ERROR("bad STRMOUT_BASE_UPDATE index\n");
+                       return -EINVAL;
+               }
+               {
+                       u64 offset;
+
+                       r = r600_cs_packet_next_reloc(p, &reloc);
+                       if (r) {
+                               DRM_ERROR("bad STRMOUT_BASE_UPDATE reloc\n");
+                               return -EINVAL;
+                       }
+
+                       if (reloc->robj != track->vgt_strmout_bo[idx_value]) {
+                               DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo does not match\n");
+                               return -EINVAL;
+                       }
+
+                       offset = radeon_get_ib_value(p, idx+1) << 8;
+                       if (offset != track->vgt_strmout_bo_offset[idx_value]) {
+                               DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo offset does not match: 0x%llx, 0x%x\n",
+                                         offset, track->vgt_strmout_bo_offset[idx_value]);
+                               return -EINVAL;
+                       }
+
+                       if ((offset + 4) > radeon_bo_size(reloc->robj)) {
+                               DRM_ERROR("bad STRMOUT_BASE_UPDATE bo too small: 0x%llx, 0x%lx\n",
+                                         offset + 4, radeon_bo_size(reloc->robj));
+                               return -EINVAL;
+                       }
+                       ib[idx+1] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+               }
+               break;
        case PACKET3_SURFACE_BASE_UPDATE:
                if (p->family >= CHIP_RV770 || p->family == CHIP_R600) {
                        DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
index 226379e00ac1de31bc7dcdb1781ff64ee4457b8c..82a0a4c919c027ea77308917a0309aa9a818e4d8 100644 (file)
@@ -322,9 +322,6 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
        struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
        uint32_t offset;
 
-       if (ASIC_IS_DCE5(rdev))
-               return;
-
        /* Silent, r600_hdmi_enable will raise WARN for us */
        if (!dig->afmt->enabled)
                return;
@@ -348,7 +345,6 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
                WREG32(HDMI0_AUDIO_PACKET_CONTROL + offset,
                       HDMI0_AUDIO_SAMPLE_SEND | /* send audio packets */
                       HDMI0_AUDIO_DELAY_EN(1) | /* default audio delay */
-                      HDMI0_AUDIO_SEND_MAX_PACKETS | /* send NULL packets if no audio is available */
                       HDMI0_AUDIO_PACKETS_PER_LINE(3) | /* should be suffient for all audio modes and small enough for all hblanks */
                       HDMI0_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */
        }
@@ -484,7 +480,7 @@ void r600_hdmi_enable(struct drm_encoder *encoder)
        uint32_t offset;
        u32 hdmi;
 
-       if (ASIC_IS_DCE5(rdev))
+       if (ASIC_IS_DCE6(rdev))
                return;
 
        /* Silent, r600_hdmi_enable will raise WARN for us */
@@ -544,7 +540,7 @@ void r600_hdmi_disable(struct drm_encoder *encoder)
        struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
        uint32_t offset;
 
-       if (ASIC_IS_DCE5(rdev))
+       if (ASIC_IS_DCE6(rdev))
                return;
 
        /* Called for ATOM_ENCODER_MODE_HDMI only */
index a0dbf1fe6a40815b335a06d9a7c14202aad625bf..025fd5b6c08c8c0d68bc93c990586cf2cc2b4689 100644 (file)
 #define                TC_L2_SIZE(x)                                   ((x)<<5)
 #define                L2_DISABLE_LATE_HIT                             (1<<9)
 
+#define        VC_ENHANCE                                      0x9714
 
 #define        VGT_CACHE_INVALIDATION                          0x88C4
 #define                CACHE_INVALIDATION(x)                           ((x)<<0)
 #define        PACKET3_SET_CTL_CONST                           0x6F
 #define                PACKET3_SET_CTL_CONST_OFFSET                    0x0003cff0
 #define                PACKET3_SET_CTL_CONST_END                       0x0003e200
+#define        PACKET3_STRMOUT_BASE_UPDATE                     0x72 /* r7xx */
 #define        PACKET3_SURFACE_BASE_UPDATE                     0x73
 
 
index 85dac33e3cce3c0eeca90ae3d57d066b4c771a24..fefcca55c1eb8bb76c7611cc0f38e5f04fe47765 100644 (file)
@@ -1374,9 +1374,9 @@ struct cayman_asic {
 
 struct si_asic {
        unsigned max_shader_engines;
-       unsigned max_pipes_per_simd;
        unsigned max_tile_pipes;
-       unsigned max_simds_per_se;
+       unsigned max_cu_per_sh;
+       unsigned max_sh_per_se;
        unsigned max_backends_per_se;
        unsigned max_texture_channel_caches;
        unsigned max_gprs;
@@ -1387,7 +1387,6 @@ struct si_asic {
        unsigned sc_hiz_tile_fifo_size;
        unsigned sc_earlyz_tile_fifo_size;
 
-       unsigned num_shader_engines;
        unsigned num_tile_pipes;
        unsigned num_backends_per_se;
        unsigned backend_disable_mask_per_asic;
index f0bb2b543b13d2f5ab437c00efdc1a0ea52c663a..2c4d53fd20c5c1e010e0d297d62c591e6ff618b9 100644 (file)
  *   2.13.0 - virtual memory support, streamout
  *   2.14.0 - add evergreen tiling informations
  *   2.15.0 - add max_pipes query
+ *   2.16.0 - fix evergreen 2D tiled surface calculation
+ *   2.17.0 - add STRMOUT_BASE_UPDATE for r7xx
  */
 #define KMS_DRIVER_MAJOR       2
-#define KMS_DRIVER_MINOR       15
+#define KMS_DRIVER_MINOR       17
 #define KMS_DRIVER_PATCHLEVEL  0
 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
 int radeon_driver_unload_kms(struct drm_device *dev);
index 79db56e6c2ac2ca66d48bc6519e6406ee1a477ae..84b648a7ddd8cf697fc07fd00a1e2eed39ff46bd 100644 (file)
@@ -289,8 +289,9 @@ int radeon_vm_manager_init(struct radeon_device *rdev)
        rdev->vm_manager.enabled = false;
 
        /* mark first vm as always in use, it's the system one */
+       /* allocate enough for 2 full VM pts */
        r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager,
-                                     rdev->vm_manager.max_pfn * 8,
+                                     rdev->vm_manager.max_pfn * 8 * 2,
                                      RADEON_GEM_DOMAIN_VRAM);
        if (r) {
                dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n",
@@ -476,12 +477,18 @@ int radeon_vm_bo_add(struct radeon_device *rdev,
 
        mutex_lock(&vm->mutex);
        if (last_pfn > vm->last_pfn) {
-               /* grow va space 32M by 32M */
-               unsigned align = ((32 << 20) >> 12) - 1;
+               /* release mutex and lock in right order */
+               mutex_unlock(&vm->mutex);
                radeon_mutex_lock(&rdev->cs_mutex);
-               radeon_vm_unbind_locked(rdev, vm);
+               mutex_lock(&vm->mutex);
+               /* and check again */
+               if (last_pfn > vm->last_pfn) {
+                       /* grow va space 32M by 32M */
+                       unsigned align = ((32 << 20) >> 12) - 1;
+                       radeon_vm_unbind_locked(rdev, vm);
+                       vm->last_pfn = (last_pfn + align) & ~align;
+               }
                radeon_mutex_unlock(&rdev->cs_mutex);
-               vm->last_pfn = (last_pfn + align) & ~align;
        }
        head = &vm->va;
        last_offset = 0;
@@ -595,8 +602,8 @@ int radeon_vm_bo_rmv(struct radeon_device *rdev,
        if (bo_va == NULL)
                return 0;
 
-       mutex_lock(&vm->mutex);
        radeon_mutex_lock(&rdev->cs_mutex);
+       mutex_lock(&vm->mutex);
        radeon_vm_bo_update_pte(rdev, vm, bo, NULL);
        radeon_mutex_unlock(&rdev->cs_mutex);
        list_del(&bo_va->vm_list);
@@ -627,7 +634,15 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
        mutex_init(&vm->mutex);
        INIT_LIST_HEAD(&vm->list);
        INIT_LIST_HEAD(&vm->va);
-       vm->last_pfn = 0;
+       /* SI requires equal sized PTs for all VMs, so always set
+        * last_pfn to max_pfn.  cayman allows variable sized
+        * pts so we can grow then as needed.  Once we switch
+        * to two level pts we can unify this again.
+        */
+       if (rdev->family >= CHIP_TAHITI)
+               vm->last_pfn = rdev->vm_manager.max_pfn;
+       else
+               vm->last_pfn = 0;
        /* map the ib pool buffer at 0 in virtual address space, set
         * read only
         */
@@ -641,9 +656,8 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
        struct radeon_bo_va *bo_va, *tmp;
        int r;
 
-       mutex_lock(&vm->mutex);
-
        radeon_mutex_lock(&rdev->cs_mutex);
+       mutex_lock(&vm->mutex);
        radeon_vm_unbind_locked(rdev, vm);
        radeon_mutex_unlock(&rdev->cs_mutex);
 
index f28bd4b7ef980937c88eb54c30b5534adc5abf3b..21ec9f5653cedc94e729e521e8cf62e4b14c2884 100644 (file)
@@ -292,6 +292,7 @@ int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
 int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
                          struct drm_file *filp)
 {
+       struct radeon_device *rdev = dev->dev_private;
        struct drm_radeon_gem_busy *args = data;
        struct drm_gem_object *gobj;
        struct radeon_bo *robj;
@@ -317,13 +318,14 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
                break;
        }
        drm_gem_object_unreference_unlocked(gobj);
-       r = radeon_gem_handle_lockup(robj->rdev, r);
+       r = radeon_gem_handle_lockup(rdev, r);
        return r;
 }
 
 int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
                              struct drm_file *filp)
 {
+       struct radeon_device *rdev = dev->dev_private;
        struct drm_radeon_gem_wait_idle *args = data;
        struct drm_gem_object *gobj;
        struct radeon_bo *robj;
@@ -336,10 +338,10 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
        robj = gem_to_radeon_bo(gobj);
        r = radeon_bo_wait(robj, NULL, false);
        /* callback hw specific functions if any */
-       if (robj->rdev->asic->ioctl_wait_idle)
-               robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj);
+       if (rdev->asic->ioctl_wait_idle)
+               robj->rdev->asic->ioctl_wait_idle(rdev, robj);
        drm_gem_object_unreference_unlocked(gobj);
-       r = radeon_gem_handle_lockup(robj->rdev, r);
+       r = radeon_gem_handle_lockup(rdev, r);
        return r;
 }
 
index f1016a5820d1f0b0cc3e89195f4576b836e9a269..5c58d7d90cb2a356710d9f4a2a17fe450a1aee8d 100644 (file)
@@ -273,7 +273,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
                break;
        case RADEON_INFO_MAX_PIPES:
                if (rdev->family >= CHIP_TAHITI)
-                       value = rdev->config.si.max_pipes_per_simd;
+                       value = rdev->config.si.max_cu_per_sh;
                else if (rdev->family >= CHIP_CAYMAN)
                        value = rdev->config.cayman.max_pipes_per_simd;
                else if (rdev->family >= CHIP_CEDAR)
index 08825548ee69c487909e16019ddbfca0e7cbcb6a..5b37e283ec38575a86d59a695b1f453251f9ae43 100644 (file)
@@ -801,9 +801,13 @@ static void radeon_dynpm_idle_work_handler(struct work_struct *work)
                int i;
 
                for (i = 0; i < RADEON_NUM_RINGS; ++i) {
-                       not_processed += radeon_fence_count_emitted(rdev, i);
-                       if (not_processed >= 3)
-                               break;
+                       struct radeon_ring *ring = &rdev->ring[i];
+
+                       if (ring->ready) {
+                               not_processed += radeon_fence_count_emitted(rdev, i);
+                               if (not_processed >= 3)
+                                       break;
+                       }
                }
 
                if (not_processed >= 3) { /* should upclock */
index 8ddab4c76710f1b00e3917e0415a6aad91965f9a..6bef46ace8315d7706998577fa507769b12c17a3 100644 (file)
@@ -169,11 +169,17 @@ struct dma_buf *radeon_gem_prime_export(struct drm_device *dev,
        struct radeon_bo *bo = gem_to_radeon_bo(obj);
        int ret = 0;
 
+       ret = radeon_bo_reserve(bo, false);
+       if (unlikely(ret != 0))
+               return ERR_PTR(ret);
+
        /* pin buffer into GTT */
        ret = radeon_bo_pin(bo, RADEON_GEM_DOMAIN_GTT, NULL);
-       if (ret)
+       if (ret) {
+               radeon_bo_unreserve(bo);
                return ERR_PTR(ret);
-
+       }
+       radeon_bo_unreserve(bo);
        return dma_buf_export(bo, &radeon_dmabuf_ops, obj->size, flags);
 }
 
index 25f9eef12c42a8caf6d05a0de3b37d5d6d6352a7..e95c5e61d4e2211a86859a0b0143cdc3139f8883 100644 (file)
@@ -908,12 +908,6 @@ static int rs600_startup(struct radeon_device *rdev)
                return r;
        }
 
-       r = r600_audio_init(rdev);
-       if (r) {
-               dev_err(rdev->dev, "failed initializing audio\n");
-               return r;
-       }
-
        r = radeon_ib_pool_start(rdev);
        if (r)
                return r;
@@ -922,6 +916,12 @@ static int rs600_startup(struct radeon_device *rdev)
        if (r)
                return r;
 
+       r = r600_audio_init(rdev);
+       if (r) {
+               dev_err(rdev->dev, "failed initializing audio\n");
+               return r;
+       }
+
        return 0;
 }
 
index 3277ddecfe9fbd7755ba3c6bde888ebb7df68e36..159b6a43fda06598c3f76ca28c12b337d53105e6 100644 (file)
@@ -637,12 +637,6 @@ static int rs690_startup(struct radeon_device *rdev)
                return r;
        }
 
-       r = r600_audio_init(rdev);
-       if (r) {
-               dev_err(rdev->dev, "failed initializing audio\n");
-               return r;
-       }
-
        r = radeon_ib_pool_start(rdev);
        if (r)
                return r;
@@ -651,6 +645,12 @@ static int rs690_startup(struct radeon_device *rdev)
        if (r)
                return r;
 
+       r = r600_audio_init(rdev);
+       if (r) {
+               dev_err(rdev->dev, "failed initializing audio\n");
+               return r;
+       }
+
        return 0;
 }
 
index 04ddc365a908c8110537b80dc998a95ee337d0e1..b4f51c569c369962c2afebdee3ded9b1f71679c5 100644 (file)
@@ -616,6 +616,9 @@ static void rv770_gpu_init(struct radeon_device *rdev)
                                       ACK_FLUSH_CTL(3) |
                                       SYNC_FLUSH_CTL));
 
+       if (rdev->family != CHIP_RV770)
+               WREG32(SMX_SAR_CTL0, 0x00003f3f);
+
        db_debug3 = RREG32(DB_DEBUG3);
        db_debug3 &= ~DB_CLK_OFF_DELAY(0x1f);
        switch (rdev->family) {
@@ -792,7 +795,7 @@ static void rv770_gpu_init(struct radeon_device *rdev)
 
        WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
                                          NUM_CLIP_SEQ(3)));
-
+       WREG32(VC_ENHANCE, 0);
 }
 
 void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
@@ -956,6 +959,12 @@ static int rv770_startup(struct radeon_device *rdev)
        if (r)
                return r;
 
+       r = r600_audio_init(rdev);
+       if (r) {
+               DRM_ERROR("radeon: audio init failed\n");
+               return r;
+       }
+
        return 0;
 }
 
@@ -978,12 +987,6 @@ int rv770_resume(struct radeon_device *rdev)
                return r;
        }
 
-       r = r600_audio_init(rdev);
-       if (r) {
-               dev_err(rdev->dev, "radeon: audio init failed\n");
-               return r;
-       }
-
        return r;
 
 }
@@ -1092,12 +1095,6 @@ int rv770_init(struct radeon_device *rdev)
                rdev->accel_working = false;
        }
 
-       r = r600_audio_init(rdev);
-       if (r) {
-               dev_err(rdev->dev, "radeon: audio init failed\n");
-               return r;
-       }
-
        return 0;
 }
 
index fdc0898960119d8c6f38104c32e1960a7e0d16f9..b0adfc595d7541cdeb782c903101cf92c6d1ce37 100644 (file)
 #define        SCRATCH_UMSK                                    0x8540
 #define        SCRATCH_ADDR                                    0x8544
 
+#define        SMX_SAR_CTL0                                    0xA008
 #define        SMX_DC_CTL0                                     0xA020
 #define                USE_HASH_FUNCTION                               (1 << 0)
 #define                CACHE_DEPTH(x)                                  ((x) << 1)
 #define        TCP_CNTL                                        0x9610
 #define        TCP_CHAN_STEER                                  0x9614
 
+#define        VC_ENHANCE                                      0x9714
+
 #define        VGT_CACHE_INVALIDATION                          0x88C4
 #define                CACHE_INVALIDATION(x)                           ((x)<<0)
 #define                        VC_ONLY                                         0
index 549732e56ca959b0f07d4ce32f0ff0af0e480ab1..0b0279291a73e79109dd95db5ceee4823be1da66 100644 (file)
@@ -867,200 +867,6 @@ void dce6_bandwidth_update(struct radeon_device *rdev)
 /*
  * Core functions
  */
-static u32 si_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
-                                          u32 num_tile_pipes,
-                                          u32 num_backends_per_asic,
-                                          u32 *backend_disable_mask_per_asic,
-                                          u32 num_shader_engines)
-{
-       u32 backend_map = 0;
-       u32 enabled_backends_mask = 0;
-       u32 enabled_backends_count = 0;
-       u32 num_backends_per_se;
-       u32 cur_pipe;
-       u32 swizzle_pipe[SI_MAX_PIPES];
-       u32 cur_backend = 0;
-       u32 i;
-       bool force_no_swizzle;
-
-       /* force legal values */
-       if (num_tile_pipes < 1)
-               num_tile_pipes = 1;
-       if (num_tile_pipes > rdev->config.si.max_tile_pipes)
-               num_tile_pipes = rdev->config.si.max_tile_pipes;
-       if (num_shader_engines < 1)
-               num_shader_engines = 1;
-       if (num_shader_engines > rdev->config.si.max_shader_engines)
-               num_shader_engines = rdev->config.si.max_shader_engines;
-       if (num_backends_per_asic < num_shader_engines)
-               num_backends_per_asic = num_shader_engines;
-       if (num_backends_per_asic > (rdev->config.si.max_backends_per_se * num_shader_engines))
-               num_backends_per_asic = rdev->config.si.max_backends_per_se * num_shader_engines;
-
-       /* make sure we have the same number of backends per se */
-       num_backends_per_asic = ALIGN(num_backends_per_asic, num_shader_engines);
-       /* set up the number of backends per se */
-       num_backends_per_se = num_backends_per_asic / num_shader_engines;
-       if (num_backends_per_se > rdev->config.si.max_backends_per_se) {
-               num_backends_per_se = rdev->config.si.max_backends_per_se;
-               num_backends_per_asic = num_backends_per_se * num_shader_engines;
-       }
-
-       /* create enable mask and count for enabled backends */
-       for (i = 0; i < SI_MAX_BACKENDS; ++i) {
-               if (((*backend_disable_mask_per_asic >> i) & 1) == 0) {
-                       enabled_backends_mask |= (1 << i);
-                       ++enabled_backends_count;
-               }
-               if (enabled_backends_count == num_backends_per_asic)
-                       break;
-       }
-
-       /* force the backends mask to match the current number of backends */
-       if (enabled_backends_count != num_backends_per_asic) {
-               u32 this_backend_enabled;
-               u32 shader_engine;
-               u32 backend_per_se;
-
-               enabled_backends_mask = 0;
-               enabled_backends_count = 0;
-               *backend_disable_mask_per_asic = SI_MAX_BACKENDS_MASK;
-               for (i = 0; i < SI_MAX_BACKENDS; ++i) {
-                       /* calc the current se */
-                       shader_engine = i / rdev->config.si.max_backends_per_se;
-                       /* calc the backend per se */
-                       backend_per_se = i % rdev->config.si.max_backends_per_se;
-                       /* default to not enabled */
-                       this_backend_enabled = 0;
-                       if ((shader_engine < num_shader_engines) &&
-                           (backend_per_se < num_backends_per_se))
-                               this_backend_enabled = 1;
-                       if (this_backend_enabled) {
-                               enabled_backends_mask |= (1 << i);
-                               *backend_disable_mask_per_asic &= ~(1 << i);
-                               ++enabled_backends_count;
-                       }
-               }
-       }
-
-
-       memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * SI_MAX_PIPES);
-       switch (rdev->family) {
-       case CHIP_TAHITI:
-       case CHIP_PITCAIRN:
-       case CHIP_VERDE:
-               force_no_swizzle = true;
-               break;
-       default:
-               force_no_swizzle = false;
-               break;
-       }
-       if (force_no_swizzle) {
-               bool last_backend_enabled = false;
-
-               force_no_swizzle = false;
-               for (i = 0; i < SI_MAX_BACKENDS; ++i) {
-                       if (((enabled_backends_mask >> i) & 1) == 1) {
-                               if (last_backend_enabled)
-                                       force_no_swizzle = true;
-                               last_backend_enabled = true;
-                       } else
-                               last_backend_enabled = false;
-               }
-       }
-
-       switch (num_tile_pipes) {
-       case 1:
-       case 3:
-       case 5:
-       case 7:
-               DRM_ERROR("odd number of pipes!\n");
-               break;
-       case 2:
-               swizzle_pipe[0] = 0;
-               swizzle_pipe[1] = 1;
-               break;
-       case 4:
-               if (force_no_swizzle) {
-                       swizzle_pipe[0] = 0;
-                       swizzle_pipe[1] = 1;
-                       swizzle_pipe[2] = 2;
-                       swizzle_pipe[3] = 3;
-               } else {
-                       swizzle_pipe[0] = 0;
-                       swizzle_pipe[1] = 2;
-                       swizzle_pipe[2] = 1;
-                       swizzle_pipe[3] = 3;
-               }
-               break;
-       case 6:
-               if (force_no_swizzle) {
-                       swizzle_pipe[0] = 0;
-                       swizzle_pipe[1] = 1;
-                       swizzle_pipe[2] = 2;
-                       swizzle_pipe[3] = 3;
-                       swizzle_pipe[4] = 4;
-                       swizzle_pipe[5] = 5;
-               } else {
-                       swizzle_pipe[0] = 0;
-                       swizzle_pipe[1] = 2;
-                       swizzle_pipe[2] = 4;
-                       swizzle_pipe[3] = 1;
-                       swizzle_pipe[4] = 3;
-                       swizzle_pipe[5] = 5;
-               }
-               break;
-       case 8:
-               if (force_no_swizzle) {
-                       swizzle_pipe[0] = 0;
-                       swizzle_pipe[1] = 1;
-                       swizzle_pipe[2] = 2;
-                       swizzle_pipe[3] = 3;
-                       swizzle_pipe[4] = 4;
-                       swizzle_pipe[5] = 5;
-                       swizzle_pipe[6] = 6;
-                       swizzle_pipe[7] = 7;
-               } else {
-                       swizzle_pipe[0] = 0;
-                       swizzle_pipe[1] = 2;
-                       swizzle_pipe[2] = 4;
-                       swizzle_pipe[3] = 6;
-                       swizzle_pipe[4] = 1;
-                       swizzle_pipe[5] = 3;
-                       swizzle_pipe[6] = 5;
-                       swizzle_pipe[7] = 7;
-               }
-               break;
-       }
-
-       for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
-               while (((1 << cur_backend) & enabled_backends_mask) == 0)
-                       cur_backend = (cur_backend + 1) % SI_MAX_BACKENDS;
-
-               backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4)));
-
-               cur_backend = (cur_backend + 1) % SI_MAX_BACKENDS;
-       }
-
-       return backend_map;
-}
-
-static u32 si_get_disable_mask_per_asic(struct radeon_device *rdev,
-                                       u32 disable_mask_per_se,
-                                       u32 max_disable_mask_per_se,
-                                       u32 num_shader_engines)
-{
-       u32 disable_field_width_per_se = r600_count_pipe_bits(disable_mask_per_se);
-       u32 disable_mask_per_asic = disable_mask_per_se & max_disable_mask_per_se;
-
-       if (num_shader_engines == 1)
-               return disable_mask_per_asic;
-       else if (num_shader_engines == 2)
-               return disable_mask_per_asic | (disable_mask_per_asic << disable_field_width_per_se);
-       else
-               return 0xffffffff;
-}
-
 static void si_tiling_mode_table_init(struct radeon_device *rdev)
 {
        const u32 num_tile_mode_states = 32;
@@ -1562,18 +1368,151 @@ static void si_tiling_mode_table_init(struct radeon_device *rdev)
                DRM_ERROR("unknown asic: 0x%x\n", rdev->family);
 }
 
+static void si_select_se_sh(struct radeon_device *rdev,
+                           u32 se_num, u32 sh_num)
+{
+       u32 data = INSTANCE_BROADCAST_WRITES;
+
+       if ((se_num == 0xffffffff) && (sh_num == 0xffffffff))
+               data = SH_BROADCAST_WRITES | SE_BROADCAST_WRITES;
+       else if (se_num == 0xffffffff)
+               data |= SE_BROADCAST_WRITES | SH_INDEX(sh_num);
+       else if (sh_num == 0xffffffff)
+               data |= SH_BROADCAST_WRITES | SE_INDEX(se_num);
+       else
+               data |= SH_INDEX(sh_num) | SE_INDEX(se_num);
+       WREG32(GRBM_GFX_INDEX, data);
+}
+
+static u32 si_create_bitmask(u32 bit_width)
+{
+       u32 i, mask = 0;
+
+       for (i = 0; i < bit_width; i++) {
+               mask <<= 1;
+               mask |= 1;
+       }
+       return mask;
+}
+
+static u32 si_get_cu_enabled(struct radeon_device *rdev, u32 cu_per_sh)
+{
+       u32 data, mask;
+
+       data = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
+       if (data & 1)
+               data &= INACTIVE_CUS_MASK;
+       else
+               data = 0;
+       data |= RREG32(GC_USER_SHADER_ARRAY_CONFIG);
+
+       data >>= INACTIVE_CUS_SHIFT;
+
+       mask = si_create_bitmask(cu_per_sh);
+
+       return ~data & mask;
+}
+
+static void si_setup_spi(struct radeon_device *rdev,
+                        u32 se_num, u32 sh_per_se,
+                        u32 cu_per_sh)
+{
+       int i, j, k;
+       u32 data, mask, active_cu;
+
+       for (i = 0; i < se_num; i++) {
+               for (j = 0; j < sh_per_se; j++) {
+                       si_select_se_sh(rdev, i, j);
+                       data = RREG32(SPI_STATIC_THREAD_MGMT_3);
+                       active_cu = si_get_cu_enabled(rdev, cu_per_sh);
+
+                       mask = 1;
+                       for (k = 0; k < 16; k++) {
+                               mask <<= k;
+                               if (active_cu & mask) {
+                                       data &= ~mask;
+                                       WREG32(SPI_STATIC_THREAD_MGMT_3, data);
+                                       break;
+                               }
+                       }
+               }
+       }
+       si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
+}
+
+static u32 si_get_rb_disabled(struct radeon_device *rdev,
+                             u32 max_rb_num, u32 se_num,
+                             u32 sh_per_se)
+{
+       u32 data, mask;
+
+       data = RREG32(CC_RB_BACKEND_DISABLE);
+       if (data & 1)
+               data &= BACKEND_DISABLE_MASK;
+       else
+               data = 0;
+       data |= RREG32(GC_USER_RB_BACKEND_DISABLE);
+
+       data >>= BACKEND_DISABLE_SHIFT;
+
+       mask = si_create_bitmask(max_rb_num / se_num / sh_per_se);
+
+       return data & mask;
+}
+
+static void si_setup_rb(struct radeon_device *rdev,
+                       u32 se_num, u32 sh_per_se,
+                       u32 max_rb_num)
+{
+       int i, j;
+       u32 data, mask;
+       u32 disabled_rbs = 0;
+       u32 enabled_rbs = 0;
+
+       for (i = 0; i < se_num; i++) {
+               for (j = 0; j < sh_per_se; j++) {
+                       si_select_se_sh(rdev, i, j);
+                       data = si_get_rb_disabled(rdev, max_rb_num, se_num, sh_per_se);
+                       disabled_rbs |= data << ((i * sh_per_se + j) * TAHITI_RB_BITMAP_WIDTH_PER_SH);
+               }
+       }
+       si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
+
+       mask = 1;
+       for (i = 0; i < max_rb_num; i++) {
+               if (!(disabled_rbs & mask))
+                       enabled_rbs |= mask;
+               mask <<= 1;
+       }
+
+       for (i = 0; i < se_num; i++) {
+               si_select_se_sh(rdev, i, 0xffffffff);
+               data = 0;
+               for (j = 0; j < sh_per_se; j++) {
+                       switch (enabled_rbs & 3) {
+                       case 1:
+                               data |= (RASTER_CONFIG_RB_MAP_0 << (i * sh_per_se + j) * 2);
+                               break;
+                       case 2:
+                               data |= (RASTER_CONFIG_RB_MAP_3 << (i * sh_per_se + j) * 2);
+                               break;
+                       case 3:
+                       default:
+                               data |= (RASTER_CONFIG_RB_MAP_2 << (i * sh_per_se + j) * 2);
+                               break;
+                       }
+                       enabled_rbs >>= 2;
+               }
+               WREG32(PA_SC_RASTER_CONFIG, data);
+       }
+       si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
+}
+
 static void si_gpu_init(struct radeon_device *rdev)
 {
-       u32 cc_rb_backend_disable = 0;
-       u32 cc_gc_shader_array_config;
        u32 gb_addr_config = 0;
        u32 mc_shared_chmap, mc_arb_ramcfg;
-       u32 gb_backend_map;
-       u32 cgts_tcc_disable;
        u32 sx_debug_1;
-       u32 gc_user_shader_array_config;
-       u32 gc_user_rb_backend_disable;
-       u32 cgts_user_tcc_disable;
        u32 hdp_host_path_cntl;
        u32 tmp;
        int i, j;
@@ -1581,9 +1520,9 @@ static void si_gpu_init(struct radeon_device *rdev)
        switch (rdev->family) {
        case CHIP_TAHITI:
                rdev->config.si.max_shader_engines = 2;
-               rdev->config.si.max_pipes_per_simd = 4;
                rdev->config.si.max_tile_pipes = 12;
-               rdev->config.si.max_simds_per_se = 8;
+               rdev->config.si.max_cu_per_sh = 8;
+               rdev->config.si.max_sh_per_se = 2;
                rdev->config.si.max_backends_per_se = 4;
                rdev->config.si.max_texture_channel_caches = 12;
                rdev->config.si.max_gprs = 256;
@@ -1594,12 +1533,13 @@ static void si_gpu_init(struct radeon_device *rdev)
                rdev->config.si.sc_prim_fifo_size_backend = 0x100;
                rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
                rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
+               gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN;
                break;
        case CHIP_PITCAIRN:
                rdev->config.si.max_shader_engines = 2;
-               rdev->config.si.max_pipes_per_simd = 4;
                rdev->config.si.max_tile_pipes = 8;
-               rdev->config.si.max_simds_per_se = 5;
+               rdev->config.si.max_cu_per_sh = 5;
+               rdev->config.si.max_sh_per_se = 2;
                rdev->config.si.max_backends_per_se = 4;
                rdev->config.si.max_texture_channel_caches = 8;
                rdev->config.si.max_gprs = 256;
@@ -1610,13 +1550,14 @@ static void si_gpu_init(struct radeon_device *rdev)
                rdev->config.si.sc_prim_fifo_size_backend = 0x100;
                rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
                rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
+               gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN;
                break;
        case CHIP_VERDE:
        default:
                rdev->config.si.max_shader_engines = 1;
-               rdev->config.si.max_pipes_per_simd = 4;
                rdev->config.si.max_tile_pipes = 4;
-               rdev->config.si.max_simds_per_se = 2;
+               rdev->config.si.max_cu_per_sh = 2;
+               rdev->config.si.max_sh_per_se = 2;
                rdev->config.si.max_backends_per_se = 4;
                rdev->config.si.max_texture_channel_caches = 4;
                rdev->config.si.max_gprs = 256;
@@ -1627,6 +1568,7 @@ static void si_gpu_init(struct radeon_device *rdev)
                rdev->config.si.sc_prim_fifo_size_backend = 0x40;
                rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
                rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
+               gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN;
                break;
        }
 
@@ -1648,31 +1590,7 @@ static void si_gpu_init(struct radeon_device *rdev)
        mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
        mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
 
-       cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE);
-       cc_gc_shader_array_config = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
-       cgts_tcc_disable = 0xffff0000;
-       for (i = 0; i < rdev->config.si.max_texture_channel_caches; i++)
-               cgts_tcc_disable &= ~(1 << (16 + i));
-       gc_user_rb_backend_disable = RREG32(GC_USER_RB_BACKEND_DISABLE);
-       gc_user_shader_array_config = RREG32(GC_USER_SHADER_ARRAY_CONFIG);
-       cgts_user_tcc_disable = RREG32(CGTS_USER_TCC_DISABLE);
-
-       rdev->config.si.num_shader_engines = rdev->config.si.max_shader_engines;
        rdev->config.si.num_tile_pipes = rdev->config.si.max_tile_pipes;
-       tmp = ((~gc_user_rb_backend_disable) & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT;
-       rdev->config.si.num_backends_per_se = r600_count_pipe_bits(tmp);
-       tmp = (gc_user_rb_backend_disable & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT;
-       rdev->config.si.backend_disable_mask_per_asic =
-               si_get_disable_mask_per_asic(rdev, tmp, SI_MAX_BACKENDS_PER_SE_MASK,
-                                            rdev->config.si.num_shader_engines);
-       rdev->config.si.backend_map =
-               si_get_tile_pipe_to_backend_map(rdev, rdev->config.si.num_tile_pipes,
-                                               rdev->config.si.num_backends_per_se *
-                                               rdev->config.si.num_shader_engines,
-                                               &rdev->config.si.backend_disable_mask_per_asic,
-                                               rdev->config.si.num_shader_engines);
-       tmp = ((~cgts_user_tcc_disable) & TCC_DISABLE_MASK) >> TCC_DISABLE_SHIFT;
-       rdev->config.si.num_texture_channel_caches = r600_count_pipe_bits(tmp);
        rdev->config.si.mem_max_burst_length_bytes = 256;
        tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT;
        rdev->config.si.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
@@ -1683,55 +1601,8 @@ static void si_gpu_init(struct radeon_device *rdev)
        rdev->config.si.num_gpus = 1;
        rdev->config.si.multi_gpu_tile_size = 64;
 
-       gb_addr_config = 0;
-       switch (rdev->config.si.num_tile_pipes) {
-       case 1:
-               gb_addr_config |= NUM_PIPES(0);
-               break;
-       case 2:
-               gb_addr_config |= NUM_PIPES(1);
-               break;
-       case 4:
-               gb_addr_config |= NUM_PIPES(2);
-               break;
-       case 8:
-       default:
-               gb_addr_config |= NUM_PIPES(3);
-               break;
-       }
-
-       tmp = (rdev->config.si.mem_max_burst_length_bytes / 256) - 1;
-       gb_addr_config |= PIPE_INTERLEAVE_SIZE(tmp);
-       gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.si.num_shader_engines - 1);
-       tmp = (rdev->config.si.shader_engine_tile_size / 16) - 1;
-       gb_addr_config |= SHADER_ENGINE_TILE_SIZE(tmp);
-       switch (rdev->config.si.num_gpus) {
-       case 1:
-       default:
-               gb_addr_config |= NUM_GPUS(0);
-               break;
-       case 2:
-               gb_addr_config |= NUM_GPUS(1);
-               break;
-       case 4:
-               gb_addr_config |= NUM_GPUS(2);
-               break;
-       }
-       switch (rdev->config.si.multi_gpu_tile_size) {
-       case 16:
-               gb_addr_config |= MULTI_GPU_TILE_SIZE(0);
-               break;
-       case 32:
-       default:
-               gb_addr_config |= MULTI_GPU_TILE_SIZE(1);
-               break;
-       case 64:
-               gb_addr_config |= MULTI_GPU_TILE_SIZE(2);
-               break;
-       case 128:
-               gb_addr_config |= MULTI_GPU_TILE_SIZE(3);
-               break;
-       }
+       /* fix up row size */
+       gb_addr_config &= ~ROW_SIZE_MASK;
        switch (rdev->config.si.mem_row_size_in_kb) {
        case 1:
        default:
@@ -1745,26 +1616,6 @@ static void si_gpu_init(struct radeon_device *rdev)
                break;
        }
 
-       tmp = (gb_addr_config & NUM_PIPES_MASK) >> NUM_PIPES_SHIFT;
-       rdev->config.si.num_tile_pipes = (1 << tmp);
-       tmp = (gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT;
-       rdev->config.si.mem_max_burst_length_bytes = (tmp + 1) * 256;
-       tmp = (gb_addr_config & NUM_SHADER_ENGINES_MASK) >> NUM_SHADER_ENGINES_SHIFT;
-       rdev->config.si.num_shader_engines = tmp + 1;
-       tmp = (gb_addr_config & NUM_GPUS_MASK) >> NUM_GPUS_SHIFT;
-       rdev->config.si.num_gpus = tmp + 1;
-       tmp = (gb_addr_config & MULTI_GPU_TILE_SIZE_MASK) >> MULTI_GPU_TILE_SIZE_SHIFT;
-       rdev->config.si.multi_gpu_tile_size = 1 << tmp;
-       tmp = (gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT;
-       rdev->config.si.mem_row_size_in_kb = 1 << tmp;
-
-       gb_backend_map =
-               si_get_tile_pipe_to_backend_map(rdev, rdev->config.si.num_tile_pipes,
-                                               rdev->config.si.num_backends_per_se *
-                                               rdev->config.si.num_shader_engines,
-                                               &rdev->config.si.backend_disable_mask_per_asic,
-                                               rdev->config.si.num_shader_engines);
-
        /* setup tiling info dword.  gb_addr_config is not adequate since it does
         * not have bank info, so create a custom tiling dword.
         * bits 3:0   num_pipes
@@ -1789,33 +1640,29 @@ static void si_gpu_init(struct radeon_device *rdev)
                rdev->config.si.tile_config |= (3 << 0);
                break;
        }
-       rdev->config.si.tile_config |=
-               ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4;
+       if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
+               rdev->config.si.tile_config |= 1 << 4;
+       else
+               rdev->config.si.tile_config |= 0 << 4;
        rdev->config.si.tile_config |=
                ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
        rdev->config.si.tile_config |=
                ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
 
-       rdev->config.si.backend_map = gb_backend_map;
        WREG32(GB_ADDR_CONFIG, gb_addr_config);
        WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
        WREG32(HDP_ADDR_CONFIG, gb_addr_config);
 
-       /* primary versions */
-       WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
-       WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
-       WREG32(CC_GC_SHADER_ARRAY_CONFIG, cc_gc_shader_array_config);
-
-       WREG32(CGTS_TCC_DISABLE, cgts_tcc_disable);
+       si_tiling_mode_table_init(rdev);
 
-       /* user versions */
-       WREG32(GC_USER_RB_BACKEND_DISABLE, cc_rb_backend_disable);
-       WREG32(GC_USER_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
-       WREG32(GC_USER_SHADER_ARRAY_CONFIG, cc_gc_shader_array_config);
+       si_setup_rb(rdev, rdev->config.si.max_shader_engines,
+                   rdev->config.si.max_sh_per_se,
+                   rdev->config.si.max_backends_per_se);
 
-       WREG32(CGTS_USER_TCC_DISABLE, cgts_tcc_disable);
+       si_setup_spi(rdev, rdev->config.si.max_shader_engines,
+                    rdev->config.si.max_sh_per_se,
+                    rdev->config.si.max_cu_per_sh);
 
-       si_tiling_mode_table_init(rdev);
 
        /* set HW defaults for 3D engine */
        WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
@@ -2518,12 +2365,12 @@ int si_pcie_gart_enable(struct radeon_device *rdev)
        WREG32(0x15DC, 0);
 
        /* empty context1-15 */
-       /* FIXME start with 1G, once using 2 level pt switch to full
+       /* FIXME start with 4G, once using 2 level pt switch to full
         * vm size space
         */
        /* set vm size, must be a multiple of 4 */
        WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
-       WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, (1 << 30) / RADEON_GPU_PAGE_SIZE);
+       WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn);
        for (i = 1; i < 16; i++) {
                if (i < 8)
                        WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
index eda938a7cb6e1ffea2c2f6f7cedac0430541ec3c..501f9d431d5785de91f3aba3cb3a8e1c3aa74ea3 100644 (file)
 #define SI_DC_GPIO_HPD_EN                        0x65b8
 #define SI_DC_GPIO_HPD_Y                         0x65bc
 
+#define SI_GRPH_CONTROL                          0x6804
+#       define SI_GRPH_DEPTH(x)                  (((x) & 0x3) << 0)
+#       define SI_GRPH_DEPTH_8BPP                0
+#       define SI_GRPH_DEPTH_16BPP               1
+#       define SI_GRPH_DEPTH_32BPP               2
+#       define SI_GRPH_NUM_BANKS(x)              (((x) & 0x3) << 2)
+#       define SI_ADDR_SURF_2_BANK               0
+#       define SI_ADDR_SURF_4_BANK               1
+#       define SI_ADDR_SURF_8_BANK               2
+#       define SI_ADDR_SURF_16_BANK              3
+#       define SI_GRPH_Z(x)                      (((x) & 0x3) << 4)
+#       define SI_GRPH_BANK_WIDTH(x)             (((x) & 0x3) << 6)
+#       define SI_ADDR_SURF_BANK_WIDTH_1         0
+#       define SI_ADDR_SURF_BANK_WIDTH_2         1
+#       define SI_ADDR_SURF_BANK_WIDTH_4         2
+#       define SI_ADDR_SURF_BANK_WIDTH_8         3
+#       define SI_GRPH_FORMAT(x)                 (((x) & 0x7) << 8)
+/* 8 BPP */
+#       define SI_GRPH_FORMAT_INDEXED            0
+/* 16 BPP */
+#       define SI_GRPH_FORMAT_ARGB1555           0
+#       define SI_GRPH_FORMAT_ARGB565            1
+#       define SI_GRPH_FORMAT_ARGB4444           2
+#       define SI_GRPH_FORMAT_AI88               3
+#       define SI_GRPH_FORMAT_MONO16             4
+#       define SI_GRPH_FORMAT_BGRA5551           5
+/* 32 BPP */
+#       define SI_GRPH_FORMAT_ARGB8888           0
+#       define SI_GRPH_FORMAT_ARGB2101010        1
+#       define SI_GRPH_FORMAT_32BPP_DIG          2
+#       define SI_GRPH_FORMAT_8B_ARGB2101010     3
+#       define SI_GRPH_FORMAT_BGRA1010102        4
+#       define SI_GRPH_FORMAT_8B_BGRA1010102     5
+#       define SI_GRPH_FORMAT_RGB111110          6
+#       define SI_GRPH_FORMAT_BGR101111          7
+#       define SI_GRPH_BANK_HEIGHT(x)            (((x) & 0x3) << 11)
+#       define SI_ADDR_SURF_BANK_HEIGHT_1        0
+#       define SI_ADDR_SURF_BANK_HEIGHT_2        1
+#       define SI_ADDR_SURF_BANK_HEIGHT_4        2
+#       define SI_ADDR_SURF_BANK_HEIGHT_8        3
+#       define SI_GRPH_TILE_SPLIT(x)             (((x) & 0x7) << 13)
+#       define SI_ADDR_SURF_TILE_SPLIT_64B       0
+#       define SI_ADDR_SURF_TILE_SPLIT_128B      1
+#       define SI_ADDR_SURF_TILE_SPLIT_256B      2
+#       define SI_ADDR_SURF_TILE_SPLIT_512B      3
+#       define SI_ADDR_SURF_TILE_SPLIT_1KB       4
+#       define SI_ADDR_SURF_TILE_SPLIT_2KB       5
+#       define SI_ADDR_SURF_TILE_SPLIT_4KB       6
+#       define SI_GRPH_MACRO_TILE_ASPECT(x)      (((x) & 0x3) << 18)
+#       define SI_ADDR_SURF_MACRO_TILE_ASPECT_1  0
+#       define SI_ADDR_SURF_MACRO_TILE_ASPECT_2  1
+#       define SI_ADDR_SURF_MACRO_TILE_ASPECT_4  2
+#       define SI_ADDR_SURF_MACRO_TILE_ASPECT_8  3
+#       define SI_GRPH_ARRAY_MODE(x)             (((x) & 0x7) << 20)
+#       define SI_GRPH_ARRAY_LINEAR_GENERAL      0
+#       define SI_GRPH_ARRAY_LINEAR_ALIGNED      1
+#       define SI_GRPH_ARRAY_1D_TILED_THIN1      2
+#       define SI_GRPH_ARRAY_2D_TILED_THIN1      4
+#       define SI_GRPH_PIPE_CONFIG(x)           (((x) & 0x1f) << 24)
+#       define SI_ADDR_SURF_P2                  0
+#       define SI_ADDR_SURF_P4_8x16             4
+#       define SI_ADDR_SURF_P4_16x16            5
+#       define SI_ADDR_SURF_P4_16x32            6
+#       define SI_ADDR_SURF_P4_32x32            7
+#       define SI_ADDR_SURF_P8_16x16_8x16       8
+#       define SI_ADDR_SURF_P8_16x32_8x16       9
+#       define SI_ADDR_SURF_P8_32x32_8x16       10
+#       define SI_ADDR_SURF_P8_16x32_16x16      11
+#       define SI_ADDR_SURF_P8_32x32_16x16      12
+#       define SI_ADDR_SURF_P8_32x32_16x32      13
+#       define SI_ADDR_SURF_P8_32x64_32x32      14
+
 #endif
index 53ea2c42dbd6f96954180fb617b13a1c4ff20313..db4067962868a07c0814c037b7c4eab2a8b52114 100644 (file)
 #ifndef SI_H
 #define SI_H
 
+#define TAHITI_RB_BITMAP_WIDTH_PER_SH  2
+
+#define TAHITI_GB_ADDR_CONFIG_GOLDEN        0x12011003
+#define VERDE_GB_ADDR_CONFIG_GOLDEN         0x12010002
+
 #define        CG_MULT_THERMAL_STATUS                                  0x714
 #define                ASIC_MAX_TEMP(x)                                ((x) << 0)
 #define                ASIC_MAX_TEMP_MASK                              0x000001ff
 #define                SOFT_RESET_IA                                   (1 << 15)
 
 #define GRBM_GFX_INDEX                                 0x802C
+#define                INSTANCE_INDEX(x)                       ((x) << 0)
+#define                SH_INDEX(x)                             ((x) << 8)
+#define                SE_INDEX(x)                             ((x) << 16)
+#define                SH_BROADCAST_WRITES                     (1 << 29)
+#define                INSTANCE_BROADCAST_WRITES               (1 << 30)
+#define                SE_BROADCAST_WRITES                     (1 << 31)
 
 #define GRBM_INT_CNTL                                   0x8060
 #       define RDERR_INT_ENABLE                         (1 << 0)
 #define        VGT_TF_MEMORY_BASE                              0x89B8
 
 #define CC_GC_SHADER_ARRAY_CONFIG                      0x89bc
+#define                INACTIVE_CUS_MASK                       0xFFFF0000
+#define                INACTIVE_CUS_SHIFT                      16
 #define GC_USER_SHADER_ARRAY_CONFIG                    0x89c0
 
 #define        PA_CL_ENHANCE                                   0x8A14
 #define RLC_MC_CNTL                                       0xC344
 #define RLC_UCODE_CNTL                                    0xC348
 
+#define PA_SC_RASTER_CONFIG                             0x28350
+#       define RASTER_CONFIG_RB_MAP_0                   0
+#       define RASTER_CONFIG_RB_MAP_1                   1
+#       define RASTER_CONFIG_RB_MAP_2                   2
+#       define RASTER_CONFIG_RB_MAP_3                   3
+
 #define VGT_EVENT_INITIATOR                             0x28a90
 #       define SAMPLE_STREAMOUTSTATS1                   (1 << 0)
 #       define SAMPLE_STREAMOUTSTATS2                   (2 << 0)
index 30d98d14b5c586bd6d53a845488c2368a7ab9bc4..dd14cd1a0033e5a91d948c08a92e81772926674e 100644 (file)
@@ -47,9 +47,9 @@ static int sis_driver_load(struct drm_device *dev, unsigned long chipset)
        if (dev_priv == NULL)
                return -ENOMEM;
 
+       idr_init(&dev_priv->object_idr);
        dev->dev_private = (void *)dev_priv;
        dev_priv->chipset = chipset;
-       idr_init(&dev->object_name_idr);
 
        return 0;
 }
index b67cfcaa661f87bffbb84d3f0ed9786efb591e5a..36f4b28c1b90a499043a42b9b2bce9894401d36e 100644 (file)
@@ -1204,6 +1204,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
                        (*destroy)(bo);
                else
                        kfree(bo);
+               ttm_mem_global_free(mem_glob, acc_size);
                return -EINVAL;
        }
        bo->destroy = destroy;
@@ -1307,22 +1308,14 @@ int ttm_bo_create(struct ttm_bo_device *bdev,
                        struct ttm_buffer_object **p_bo)
 {
        struct ttm_buffer_object *bo;
-       struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
        size_t acc_size;
        int ret;
 
-       acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
-       ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
-       if (unlikely(ret != 0))
-               return ret;
-
        bo = kzalloc(sizeof(*bo), GFP_KERNEL);
-
-       if (unlikely(bo == NULL)) {
-               ttm_mem_global_free(mem_glob, acc_size);
+       if (unlikely(bo == NULL))
                return -ENOMEM;
-       }
 
+       acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
        ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
                                buffer_start, interruptible,
                          persistent_swap_storage, acc_size, NULL, NULL);
index 4d02c46a9420b9043b13981d2f10f95b913f1ed5..6e52069894b35d91037474521e5ebf6e2f157e98 100644 (file)
 
 static struct drm_driver driver;
 
+/*
+ * There are many DisplayLink-based graphics products, all with unique PIDs.
+ * So we match on DisplayLink's VID + Vendor-Defined Interface Class (0xff)
+ * We also require a match on SubClass (0x00) and Protocol (0x00),
+ * which is compatible with all known USB 2.0 era graphics chips and firmware,
+ * but allows DisplayLink to increment those for any future incompatible chips
+ */
 static struct usb_device_id id_table[] = {
-       {.idVendor = 0x17e9, .match_flags = USB_DEVICE_ID_MATCH_VENDOR,},
+       {.idVendor = 0x17e9, .bInterfaceClass = 0xff,
+        .bInterfaceSubClass = 0x00,
+        .bInterfaceProtocol = 0x00,
+        .match_flags = USB_DEVICE_ID_MATCH_VENDOR |
+                       USB_DEVICE_ID_MATCH_INT_CLASS |
+                       USB_DEVICE_ID_MATCH_INT_SUBCLASS |
+                       USB_DEVICE_ID_MATCH_INT_PROTOCOL,},
        {},
 };
 MODULE_DEVICE_TABLE(usb, id_table);
index a8d5f09428c72a1887c26d7374b6f971bc871a1e..4c2d836a0893f36f91247a5ce38d6cfed883faa8 100644 (file)
@@ -61,7 +61,7 @@ static int udl_parse_vendor_descriptor(struct drm_device *dev,
                        u8 length;
                        u16 key;
 
-                       key = *((u16 *) desc);
+                       key = le16_to_cpu(*((u16 *) desc));
                        desc += sizeof(u16);
                        length = *desc;
                        desc++;
index 1f182254e81e84beb0c4ed613c02e081ebf38258..c126182ac07eee1411e453dffdff43703302c13c 100644 (file)
@@ -100,12 +100,11 @@ int via_driver_load(struct drm_device *dev, unsigned long chipset)
        if (dev_priv == NULL)
                return -ENOMEM;
 
+       idr_init(&dev_priv->object_idr);
        dev->dev_private = (void *)dev_priv;
 
        dev_priv->chipset = chipset;
 
-       idr_init(&dev->object_name_idr);
-
        pci_set_master(dev->pdev);
 
        ret = drm_vblank_init(dev, 1);
index 38f9534ac513bcf51b90fcfff56d6bd6ed84c988..5b3c7d135dc91c407fc9b2bf19b2e3e912ccfed5 100644 (file)
@@ -190,6 +190,19 @@ find_active_client(struct list_head *head)
        return NULL;
 }
 
+int vga_switcheroo_get_client_state(struct pci_dev *pdev)
+{
+       struct vga_switcheroo_client *client;
+
+       client = find_client_from_pci(&vgasr_priv.clients, pdev);
+       if (!client)
+               return VGA_SWITCHEROO_NOT_FOUND;
+       if (!vgasr_priv.active)
+               return VGA_SWITCHEROO_INIT;
+       return client->pwr_state;
+}
+EXPORT_SYMBOL(vga_switcheroo_get_client_state);
+
 void vga_switcheroo_unregister_client(struct pci_dev *pdev)
 {
        struct vga_switcheroo_client *client;
@@ -291,8 +304,6 @@ static int vga_switchto_stage1(struct vga_switcheroo_client *new_client)
                vga_switchon(new_client);
 
        vga_set_default_device(new_client->pdev);
-       set_audio_state(new_client->id, VGA_SWITCHEROO_ON);
-
        return 0;
 }
 
@@ -308,6 +319,8 @@ static int vga_switchto_stage2(struct vga_switcheroo_client *new_client)
 
        active->active = false;
 
+       set_audio_state(active->id, VGA_SWITCHEROO_OFF);
+
        if (new_client->fb_info) {
                struct fb_event event;
                event.info = new_client->fb_info;
@@ -321,11 +334,11 @@ static int vga_switchto_stage2(struct vga_switcheroo_client *new_client)
        if (new_client->ops->reprobe)
                new_client->ops->reprobe(new_client->pdev);
 
-       set_audio_state(active->id, VGA_SWITCHEROO_OFF);
-
        if (active->pwr_state == VGA_SWITCHEROO_ON)
                vga_switchoff(active);
 
+       set_audio_state(new_client->id, VGA_SWITCHEROO_ON);
+
        new_client->active = true;
        return 0;
 }
@@ -371,8 +384,9 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
        /* pwr off the device not in use */
        if (strncmp(usercmd, "OFF", 3) == 0) {
                list_for_each_entry(client, &vgasr_priv.clients, list) {
-                       if (client->active)
+                       if (client->active || client_is_audio(client))
                                continue;
+                       set_audio_state(client->id, VGA_SWITCHEROO_OFF);
                        if (client->pwr_state == VGA_SWITCHEROO_ON)
                                vga_switchoff(client);
                }
@@ -381,10 +395,11 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
        /* pwr on the device not in use */
        if (strncmp(usercmd, "ON", 2) == 0) {
                list_for_each_entry(client, &vgasr_priv.clients, list) {
-                       if (client->active)
+                       if (client->active || client_is_audio(client))
                                continue;
                        if (client->pwr_state == VGA_SWITCHEROO_OFF)
                                vga_switchon(client);
+                       set_audio_state(client->id, VGA_SWITCHEROO_ON);
                }
                goto out;
        }
index 034c80a10f1fd547ed2757503d63f74965105118..3fda8c87f02cd21e4c5eaf16d67010db203aaaf4 100644 (file)
@@ -1,20 +1,11 @@
 #
 # HID driver configuration
 #
-menuconfig HID_SUPPORT
-       bool "HID Devices"
-       depends on INPUT
-       default y
-       ---help---
-         Say Y here to get to see options for various computer-human interface
-         device drivers. This option alone does not add any kernel code.
-
-         If you say N, all options in this submenu will be skipped and disabled.
-
-if HID_SUPPORT
+menu "HID support"
+     depends on INPUT
 
 config HID
-       tristate "Generic HID support"
+       tristate "HID bus support"
        depends on INPUT
        default y
        ---help---
@@ -23,14 +14,17 @@ config HID
          most commonly used to refer to the USB-HID specification, but other
          devices (such as, but not strictly limited to, Bluetooth) are
          designed using HID specification (this involves certain keyboards,
-         mice, tablets, etc). This option compiles into kernel the generic
-         HID layer code (parser, usages, etc.), which can then be used by
-         transport-specific HID implementation (like USB or Bluetooth).
+         mice, tablets, etc). This option adds the HID bus to the kernel,
+         together with generic HID layer code. The HID devices are added and
+         removed from the HID bus by the transport-layer drivers, such as
+         usbhid (USB_HID) and hidp (BT_HIDP).
 
          For docs and specs, see http://www.usb.org/developers/hidpage/
 
          If unsure, say Y.
 
+if HID
+
 config HID_BATTERY_STRENGTH
        bool "Battery level reporting for HID devices"
        depends on HID && POWER_SUPPLY && HID = POWER_SUPPLY
@@ -59,23 +53,22 @@ config HIDRAW
 
        If unsure, say Y.
 
-source "drivers/hid/usbhid/Kconfig"
-
-menu "Special HID drivers"
-       depends on HID
-
 config HID_GENERIC
        tristate "Generic HID driver"
        depends on HID
-       default y
+       default HID
        ---help---
-       Support for generic HID devices.
+       Support for generic devices on the HID bus. This includes most
+       keyboards and mice, joysticks, tablets and digitizers.
 
        To compile this driver as a module, choose M here: the module
        will be called hid-generic.
 
        If unsure, say Y.
 
+menu "Special HID drivers"
+       depends on HID
+
 config HID_A4TECH
        tristate "A4 tech mice" if EXPERT
        depends on USB_HID
@@ -393,6 +386,7 @@ config HID_MULTITOUCH
          - Unitec Panels
          - XAT optical touch panels
          - Xiroku optical touch panels
+         - Zytronic touch panels
 
          If unsure, say N.
 
@@ -662,4 +656,8 @@ config HID_ZYDACRON
 
 endmenu
 
-endif # HID_SUPPORT
+endif # HID
+
+source "drivers/hid/usbhid/Kconfig"
+
+endmenu
index fa10f847f7dbf163ba5856c296690039ba07b242..585344b6d33815632b8d477ebb1f0eb9e7f80818 100644 (file)
@@ -517,6 +517,12 @@ static const struct hid_device_id apple_devices[] = {
                .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS),
                .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI),
+               .driver_data = APPLE_HAS_FN },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO),
+               .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS),
+               .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI),
                .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO),
index 8e3a6b261477d9866ec45c83df4b2766098d281d..4c87276c8ddba2f2a6f072cb536e84f8b7a6d256 100644 (file)
@@ -1503,6 +1503,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI) },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO) },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS) },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
@@ -1880,6 +1883,7 @@ static const struct hid_device_id hid_ignore_list[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM)},
        { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM2)},
        { HID_USB_DEVICE(USB_VENDOR_ID_AVERMEDIA, USB_DEVICE_ID_AVER_FM_MR800) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_AXENTIA, USB_DEVICE_ID_AXENTIA_FM_RADIO) },
        { HID_USB_DEVICE(USB_VENDOR_ID_BERKSHIRE, USB_DEVICE_ID_BERKSHIRE_PCWD) },
        { HID_USB_DEVICE(USB_VENDOR_ID_CIDC, 0x0103) },
        { HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_RADIO_SI470X) },
@@ -1994,6 +1998,7 @@ static const struct hid_device_id hid_ignore_list[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MCT) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HYBRID) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HEATCONTROL) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_BEATPAD) },
        { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1024LS) },
        { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1208LS) },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICKIT1) },
@@ -2088,6 +2093,9 @@ static const struct hid_device_id hid_mouse_ignore_list[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI) },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO) },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS) },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
        { }
index 9373f535dfe9908c14b8ff04afbe90a981693a54..32039235cfeea6e9025c2ce6d0eed04f3294b487 100644 (file)
 #define USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI   0x024c
 #define USB_DEVICE_ID_APPLE_WELLSPRING6_ISO    0x024d
 #define USB_DEVICE_ID_APPLE_WELLSPRING6_JIS    0x024e
+#define USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI   0x0262
+#define USB_DEVICE_ID_APPLE_WELLSPRING7_ISO    0x0263
+#define USB_DEVICE_ID_APPLE_WELLSPRING7_JIS    0x0264
 #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI  0x0239
 #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO   0x023a
 #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS   0x023b
 #define USB_VENDOR_ID_AVERMEDIA                0x07ca
 #define USB_DEVICE_ID_AVER_FM_MR800    0xb800
 
+#define USB_VENDOR_ID_AXENTIA          0x12cf
+#define USB_DEVICE_ID_AXENTIA_FM_RADIO 0x7111
+
 #define USB_VENDOR_ID_BAANTO           0x2453
 #define USB_DEVICE_ID_BAANTO_MT_190W2  0x0100
 
 #define USB_DEVICE_ID_CRYSTALTOUCH     0x0006
 #define USB_DEVICE_ID_CRYSTALTOUCH_DUAL        0x0007
 
+#define USB_VENDOR_ID_MADCATZ          0x0738
+#define USB_DEVICE_ID_MADCATZ_BEATPAD  0x4540
+
 #define USB_VENDOR_ID_MCC              0x09db
 #define USB_DEVICE_ID_MCC_PMD1024LS    0x0076
 #define USB_DEVICE_ID_MCC_PMD1208LS    0x007a
 #define USB_DEVICE_ID_SAMSUNG_IR_REMOTE        0x0001
 #define USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE       0x0600
 
+#define USB_VENDOR_ID_SENNHEISER       0x1395
+#define USB_DEVICE_ID_SENNHEISER_BTD500USB     0x002c
+
 #define USB_VENDOR_ID_SIGMA_MICRO      0x1c4f
 #define USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD     0x0002
 
 #define USB_VENDOR_ID_ZYDACRON 0x13EC
 #define USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL  0x0006
 
+#define USB_VENDOR_ID_ZYTRONIC         0x14c8
+#define USB_DEVICE_ID_ZYTRONIC_ZXY100  0x0005
+
 #define USB_VENDOR_ID_PRIMAX   0x0461
 #define USB_DEVICE_ID_PRIMAX_KEYBOARD  0x4e05
 
index 132b0019365eed9057eeaaf0a3b222eece0853ad..5301006f6c15fb0246d42073c79931259db9312a 100644 (file)
@@ -301,6 +301,9 @@ static const struct hid_device_id hid_battery_quirks[] = {
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
                               USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI),
          HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE },
+       { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
+               USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI),
+         HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE },
        {}
 };
 
index 5e8a7ed42344d67d1c874fe3e3242766aed1b537..0f9c146fc00d7391932bcaef9df70e4fbc55541e 100644 (file)
@@ -436,27 +436,37 @@ static int logi_dj_recv_send_report(struct dj_receiver_dev *djrcv_dev,
 
 static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev)
 {
-       struct dj_report dj_report;
+       struct dj_report *dj_report;
+       int retval;
 
-       memset(&dj_report, 0, sizeof(dj_report));
-       dj_report.report_id = REPORT_ID_DJ_SHORT;
-       dj_report.device_index = 0xFF;
-       dj_report.report_type = REPORT_TYPE_CMD_GET_PAIRED_DEVICES;
-       return logi_dj_recv_send_report(djrcv_dev, &dj_report);
+       dj_report = kzalloc(sizeof(dj_report), GFP_KERNEL);
+       if (!dj_report)
+               return -ENOMEM;
+       dj_report->report_id = REPORT_ID_DJ_SHORT;
+       dj_report->device_index = 0xFF;
+       dj_report->report_type = REPORT_TYPE_CMD_GET_PAIRED_DEVICES;
+       retval = logi_dj_recv_send_report(djrcv_dev, dj_report);
+       kfree(dj_report);
+       return retval;
 }
 
 static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev,
                                          unsigned timeout)
 {
-       struct dj_report dj_report;
+       struct dj_report *dj_report;
+       int retval;
 
-       memset(&dj_report, 0, sizeof(dj_report));
-       dj_report.report_id = REPORT_ID_DJ_SHORT;
-       dj_report.device_index = 0xFF;
-       dj_report.report_type = REPORT_TYPE_CMD_SWITCH;
-       dj_report.report_params[CMD_SWITCH_PARAM_DEVBITFIELD] = 0x3F;
-       dj_report.report_params[CMD_SWITCH_PARAM_TIMEOUT_SECONDS] = (u8)timeout;
-       return logi_dj_recv_send_report(djrcv_dev, &dj_report);
+       dj_report = kzalloc(sizeof(dj_report), GFP_KERNEL);
+       if (!dj_report)
+               return -ENOMEM;
+       dj_report->report_id = REPORT_ID_DJ_SHORT;
+       dj_report->device_index = 0xFF;
+       dj_report->report_type = REPORT_TYPE_CMD_SWITCH;
+       dj_report->report_params[CMD_SWITCH_PARAM_DEVBITFIELD] = 0x3F;
+       dj_report->report_params[CMD_SWITCH_PARAM_TIMEOUT_SECONDS] = (u8)timeout;
+       retval = logi_dj_recv_send_report(djrcv_dev, dj_report);
+       kfree(dj_report);
+       return retval;
 }
 
 
index 7cf3ffe4b7bc26f4eeda3f2167c1f88f6e46e29b..40ac6654f1d19f5d1bf5b02dacfd4d7d5f98c14e 100644 (file)
@@ -426,8 +426,10 @@ static void magicmouse_setup_input(struct input_dev *input, struct hid_device *h
                __set_bit(EV_ABS, input->evbit);
 
                input_set_abs_params(input, ABS_MT_TRACKING_ID, 0, 15, 0, 0);
-               input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0, 255, 4, 0);
-               input_set_abs_params(input, ABS_MT_TOUCH_MINOR, 0, 255, 4, 0);
+               input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0, 255 << 2,
+                                    4, 0);
+               input_set_abs_params(input, ABS_MT_TOUCH_MINOR, 0, 255 << 2,
+                                    4, 0);
                input_set_abs_params(input, ABS_MT_ORIENTATION, -31, 32, 1, 0);
 
                /* Note: Touch Y position from the device is inverted relative
index 6e3332a99976d669e008131e0851ae8788113749..76479246d4ee35ef537cf66e83fe5c9f1a6683af 100644 (file)
@@ -1048,6 +1048,11 @@ static const struct hid_device_id mt_devices[] = {
                MT_USB_DEVICE(USB_VENDOR_ID_XIROKU,
                        USB_DEVICE_ID_XIROKU_CSR2) },
 
+       /* Zytronic panels */
+       { .driver_data = MT_CLS_SERIAL,
+               MT_USB_DEVICE(USB_VENDOR_ID_ZYTRONIC,
+                       USB_DEVICE_ID_ZYTRONIC_ZXY100) },
+
        /* Generic MT device */
        { HID_DEVICE(HID_BUS_ANY, HID_GROUP_MULTITOUCH, HID_ANY_ID, HID_ANY_ID) },
        { }
index 0f20fd17cf066ad6b32a868dd8069c0be83077cf..0108c5991a0417679d04a9d048d757d9e3f178f4 100644 (file)
@@ -1,13 +1,13 @@
-comment "USB Input Devices"
+menu "USB HID support"
        depends on USB
 
 config USB_HID
-       tristate "USB Human Interface Device (full HID) support"
+       tristate "USB HID transport layer"
        default y
        depends on USB && INPUT
        select HID
        ---help---
-         Say Y here if you want full HID support to connect USB keyboards,
+         Say Y here if you want to connect USB keyboards,
          mice, joysticks, graphic tablets, or any other HID based devices
          to your computer via USB, as well as Uninterruptible Power Supply
          (UPS) and monitor control devices.
@@ -81,4 +81,4 @@ config USB_MOUSE
 
 endmenu
 
-
+endmenu
index 0597ee604f6e1d096bf1404a6161949df73de154..903eef3d3e10034f8e36974ba4e95ee746438702 100644 (file)
@@ -76,6 +76,7 @@ static const struct hid_blacklist {
        { USB_VENDOR_ID_PRODIGE, USB_DEVICE_ID_PRODIGE_CORDLESS, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008, HID_QUIRK_NOGET },
+       { USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_SYMBOL, USB_DEVICE_ID_SYMBOL_SCANNER_1, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_SYMBOL, USB_DEVICE_ID_SYMBOL_SCANNER_2, HID_QUIRK_NOGET },
index f082e48ab11395e3f99545d3feea2b3def29d32a..2cde9ecf7731b6a66bd745997171300b799a35a7 100644 (file)
@@ -8,7 +8,7 @@
  *
  * Based on hdaps.c driver:
  * Copyright (C) 2005 Robert Love <rml@novell.com>
- * Copyright (C) 2005 Jesper Juhl <jesper.juhl@gmail.com>
+ * Copyright (C) 2005 Jesper Juhl <jj@chaosbits.net>
  *
  * Fan control based on smcFanControl:
  * Copyright (C) 2006 Hendrik Holtmann <holtmann@mac.com>
@@ -215,7 +215,7 @@ static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len)
        int i;
 
        if (send_command(cmd) || send_argument(key)) {
-               pr_warn("%s: read arg fail\n", key);
+               pr_warn("%.4s: read arg fail\n", key);
                return -EIO;
        }
 
@@ -223,7 +223,7 @@ static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len)
 
        for (i = 0; i < len; i++) {
                if (__wait_status(0x05)) {
-                       pr_warn("%s: read data fail\n", key);
+                       pr_warn("%.4s: read data fail\n", key);
                        return -EIO;
                }
                buffer[i] = inb(APPLESMC_DATA_PORT);
index b9d512331ed49561331b638b78f29e5120288b39..637c51c11b44ca774057404b6e5326a4ba1de31d 100644 (file)
@@ -191,6 +191,24 @@ static ssize_t show_temp(struct device *dev,
        return tdata->valid ? sprintf(buf, "%d\n", tdata->temp) : -EAGAIN;
 }
 
+struct tjmax {
+       char const *id;
+       int tjmax;
+};
+
+static struct tjmax __cpuinitconst tjmax_table[] = {
+       { "CPU D410", 100000 },
+       { "CPU D425", 100000 },
+       { "CPU D510", 100000 },
+       { "CPU D525", 100000 },
+       { "CPU N450", 100000 },
+       { "CPU N455", 100000 },
+       { "CPU N470", 100000 },
+       { "CPU N475", 100000 },
+       { "CPU  230", 100000 },
+       { "CPU  330", 125000 },
+};
+
 static int __cpuinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id,
                                  struct device *dev)
 {
@@ -202,6 +220,13 @@ static int __cpuinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id,
        int err;
        u32 eax, edx;
        struct pci_dev *host_bridge;
+       int i;
+
+       /* explicit tjmax table entries override heuristics */
+       for (i = 0; i < ARRAY_SIZE(tjmax_table); i++) {
+               if (strstr(c->x86_model_id, tjmax_table[i].id))
+                       return tjmax_table[i].tjmax;
+       }
 
        /* Early chips have no MSR for TjMax */
 
@@ -210,7 +235,8 @@ static int __cpuinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id,
 
        /* Atom CPUs */
 
-       if (c->x86_model == 0x1c) {
+       if (c->x86_model == 0x1c || c->x86_model == 0x26
+           || c->x86_model == 0x27) {
                usemsr_ee = 0;
 
                host_bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
@@ -223,6 +249,9 @@ static int __cpuinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id,
                        tjmax = 90000;
 
                pci_dev_put(host_bridge);
+       } else if (c->x86_model == 0x36) {
+               usemsr_ee = 0;
+               tjmax = 100000;
        }
 
        if (c->x86_model > 0xe && usemsr_ee) {
@@ -664,7 +693,7 @@ static void __cpuinit get_core_online(unsigned int cpu)
         * sensors. We check this bit only, all the early CPUs
         * without thermal sensors will be filtered out.
         */
-       if (!cpu_has(c, X86_FEATURE_DTS))
+       if (!cpu_has(c, X86_FEATURE_DTHERM))
                return;
 
        if (!pdev) {
@@ -765,14 +794,14 @@ static struct notifier_block coretemp_cpu_notifier __refdata = {
 };
 
 static const struct x86_cpu_id coretemp_ids[] = {
-       { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_DTS },
+       { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_DTHERM },
        {}
 };
 MODULE_DEVICE_TABLE(x86cpu, coretemp_ids);
 
 static int __init coretemp_init(void)
 {
-       int i, err = -ENODEV;
+       int i, err;
 
        /*
         * CPUID.06H.EAX[0] indicates whether the CPU has thermal
index 9691f664c76eb236eba72407990cc5b5aaadbf6c..e7d234b59312f7ed479c9fcc0ed309500e230c93 100644 (file)
@@ -451,11 +451,15 @@ static ssize_t set_pwm_enable(struct device *dev, struct device_attribute *da,
                data->fan_rpm_control = true;
                break;
        default:
-               mutex_unlock(&data->update_lock);
-               return -EINVAL;
+               count = -EINVAL;
+               goto err;
        }
 
-       read_u8_from_i2c(client, REG_FAN_CONF1, &conf_reg);
+       result = read_u8_from_i2c(client, REG_FAN_CONF1, &conf_reg);
+       if (result) {
+               count = result;
+               goto err;
+       }
 
        if (data->fan_rpm_control)
                conf_reg |= 0x80;
@@ -463,7 +467,7 @@ static ssize_t set_pwm_enable(struct device *dev, struct device_attribute *da,
                conf_reg &= ~0x80;
 
        i2c_smbus_write_byte_data(client, REG_FAN_CONF1, conf_reg);
-
+err:
        mutex_unlock(&data->update_lock);
        return count;
 }
index e7701d99f8e8598207a18de6b6ad7f48a0796073..f1de3979181fd22517c1f068a57d4526cb0ce6e8 100644 (file)
@@ -2341,7 +2341,7 @@ static void __devinit it87_init_device(struct platform_device *pdev)
 
        /* Start monitoring */
        it87_write_value(data, IT87_REG_CONFIG,
-                        (it87_read_value(data, IT87_REG_CONFIG) & 0x36)
+                        (it87_read_value(data, IT87_REG_CONFIG) & 0x3e)
                         | (update_vbat ? 0x41 : 0x01));
 }
 
index a9bfd6736d9a8b46b2ac143757e695ec0b695b74..e72ba5d2a8248e04ebf4bf65662007e57b960ca3 100644 (file)
@@ -590,6 +590,6 @@ abort:
 
 module_i2c_driver(jc42_driver);
 
-MODULE_AUTHOR("Guenter Roeck <guenter.roeck@ericsson.com>");
+MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>");
 MODULE_DESCRIPTION("JC42 driver");
 MODULE_LICENSE("GPL");
index d264937c7f5e09f8fa9c2cf453d6c4e4b37bb3bf..bd75d2415432dfefada86bff1471a9cc016cc032 100644 (file)
@@ -567,6 +567,6 @@ static struct i2c_driver pem_driver = {
 
 module_i2c_driver(pem_driver);
 
-MODULE_AUTHOR("Guenter Roeck <guenter.roeck@ericsson.com>");
+MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>");
 MODULE_DESCRIPTION("Lineage CPL PEM hardware monitoring driver");
 MODULE_LICENSE("GPL");
index 069b7d34d8f9216252678a056efcf5203c02e114..77476a575c4e6340b5987ff7a50e600c42c55b13 100644 (file)
@@ -292,6 +292,6 @@ static struct i2c_driver ltc4261_driver = {
 
 module_i2c_driver(ltc4261_driver);
 
-MODULE_AUTHOR("Guenter Roeck <guenter.roeck@ericsson.com>");
+MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>");
 MODULE_DESCRIPTION("LTC4261 driver");
 MODULE_LICENSE("GPL");
index 822261be84dd5ed65639ebf31e04f8ecf0edb364..019427d7a5fd8be8f3bc1fbc1550f375778f34fa 100644 (file)
@@ -692,6 +692,6 @@ static struct i2c_driver max16065_driver = {
 
 module_i2c_driver(max16065_driver);
 
-MODULE_AUTHOR("Guenter Roeck <guenter.roeck@ericsson.com>");
+MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>");
 MODULE_DESCRIPTION("MAX16065 driver");
 MODULE_LICENSE("GPL");
index 61c9cf15fa52ecd50cb1a1b5421c8fef3666f24c..1201a15784c3a0eec329affa6f6edf1546df0b51 100644 (file)
@@ -345,7 +345,7 @@ int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev,
                spin_lock_init(&hwlock->lock);
                hwlock->bank = bank;
 
-               ret = hwspin_lock_register_single(hwlock, i);
+               ret = hwspin_lock_register_single(hwlock, base_id + i);
                if (ret)
                        goto reg_failed;
        }
@@ -354,7 +354,7 @@ int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev,
 
 reg_failed:
        while (--i >= 0)
-               hwspin_lock_unregister_single(i);
+               hwspin_lock_unregister_single(base_id + i);
        return ret;
 }
 EXPORT_SYMBOL_GPL(hwspin_lock_register);
index beb2491db274ade737e785931920c2ef2b10771b..a0edd98542189680073c04b933bf855d0f492aea 100644 (file)
@@ -37,4 +37,16 @@ config I2C_MUX_PCA954x
          This driver can also be built as a module.  If so, the module
          will be called i2c-mux-pca954x.
 
+config I2C_MUX_PINCTRL
+       tristate "pinctrl-based I2C multiplexer"
+       depends on PINCTRL
+       help
+         If you say yes to this option, support will be included for an I2C
+         multiplexer that uses the pinctrl subsystem, i.e. pin multiplexing.
+         This is useful for SoCs whose I2C module's signals can be routed to
+         different sets of pins at run-time.
+
+         This driver can also be built as a module. If so, the module will be
+         called pinctrl-i2cmux.
+
 endmenu
index 5826249b29ca4664f32a0f1e75e97bc70056b3c7..76da8692afff037074041747898202cbedced244 100644 (file)
@@ -4,5 +4,6 @@
 obj-$(CONFIG_I2C_MUX_GPIO)     += i2c-mux-gpio.o
 obj-$(CONFIG_I2C_MUX_PCA9541)  += i2c-mux-pca9541.o
 obj-$(CONFIG_I2C_MUX_PCA954x)  += i2c-mux-pca954x.o
+obj-$(CONFIG_I2C_MUX_PINCTRL)  += i2c-mux-pinctrl.o
 
 ccflags-$(CONFIG_I2C_DEBUG_BUS) := -DDEBUG
diff --git a/drivers/i2c/muxes/i2c-mux-pinctrl.c b/drivers/i2c/muxes/i2c-mux-pinctrl.c
new file mode 100644 (file)
index 0000000..46a6697
--- /dev/null
@@ -0,0 +1,279 @@
+/*
+ * I2C multiplexer using pinctrl API
+ *
+ * Copyright (c) 2012, NVIDIA CORPORATION.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/i2c.h>
+#include <linux/i2c-mux.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_i2c.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/i2c-mux-pinctrl.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+struct i2c_mux_pinctrl {
+       struct device *dev;
+       struct i2c_mux_pinctrl_platform_data *pdata;
+       struct pinctrl *pinctrl;
+       struct pinctrl_state **states;
+       struct pinctrl_state *state_idle;
+       struct i2c_adapter *parent;
+       struct i2c_adapter **busses;
+};
+
+static int i2c_mux_pinctrl_select(struct i2c_adapter *adap, void *data,
+                                 u32 chan)
+{
+       struct i2c_mux_pinctrl *mux = data;
+
+       return pinctrl_select_state(mux->pinctrl, mux->states[chan]);
+}
+
+static int i2c_mux_pinctrl_deselect(struct i2c_adapter *adap, void *data,
+                                   u32 chan)
+{
+       struct i2c_mux_pinctrl *mux = data;
+
+       return pinctrl_select_state(mux->pinctrl, mux->state_idle);
+}
+
+#ifdef CONFIG_OF
+static int i2c_mux_pinctrl_parse_dt(struct i2c_mux_pinctrl *mux,
+                               struct platform_device *pdev)
+{
+       struct device_node *np = pdev->dev.of_node;
+       int num_names, i, ret;
+       struct device_node *adapter_np;
+       struct i2c_adapter *adapter;
+
+       if (!np)
+               return 0;
+
+       mux->pdata = devm_kzalloc(&pdev->dev, sizeof(*mux->pdata), GFP_KERNEL);
+       if (!mux->pdata) {
+               dev_err(mux->dev,
+                       "Cannot allocate i2c_mux_pinctrl_platform_data\n");
+               return -ENOMEM;
+       }
+
+       num_names = of_property_count_strings(np, "pinctrl-names");
+       if (num_names < 0) {
+               dev_err(mux->dev, "Cannot parse pinctrl-names: %d\n",
+                       num_names);
+               return num_names;
+       }
+
+       mux->pdata->pinctrl_states = devm_kzalloc(&pdev->dev,
+               sizeof(*mux->pdata->pinctrl_states) * num_names,
+               GFP_KERNEL);
+       if (!mux->pdata->pinctrl_states) {
+               dev_err(mux->dev, "Cannot allocate pinctrl_states\n");
+               return -ENOMEM;
+       }
+
+       for (i = 0; i < num_names; i++) {
+               ret = of_property_read_string_index(np, "pinctrl-names", i,
+                       &mux->pdata->pinctrl_states[mux->pdata->bus_count]);
+               if (ret < 0) {
+                       dev_err(mux->dev, "Cannot parse pinctrl-names: %d\n",
+                               ret);
+                       return ret;
+               }
+               if (!strcmp(mux->pdata->pinctrl_states[mux->pdata->bus_count],
+                           "idle")) {
+                       if (i != num_names - 1) {
+                               dev_err(mux->dev, "idle state must be last\n");
+                               return -EINVAL;
+                       }
+                       mux->pdata->pinctrl_state_idle = "idle";
+               } else {
+                       mux->pdata->bus_count++;
+               }
+       }
+
+       adapter_np = of_parse_phandle(np, "i2c-parent", 0);
+       if (!adapter_np) {
+               dev_err(mux->dev, "Cannot parse i2c-parent\n");
+               return -ENODEV;
+       }
+       adapter = of_find_i2c_adapter_by_node(adapter_np);
+       if (!adapter) {
+               dev_err(mux->dev, "Cannot find parent bus\n");
+               return -ENODEV;
+       }
+       mux->pdata->parent_bus_num = i2c_adapter_id(adapter);
+       put_device(&adapter->dev);
+
+       return 0;
+}
+#else
+static inline int i2c_mux_pinctrl_parse_dt(struct i2c_mux_pinctrl *mux,
+                                          struct platform_device *pdev)
+{
+       return 0;
+}
+#endif
+
+static int __devinit i2c_mux_pinctrl_probe(struct platform_device *pdev)
+{
+       struct i2c_mux_pinctrl *mux;
+       int (*deselect)(struct i2c_adapter *, void *, u32);
+       int i, ret;
+
+       mux = devm_kzalloc(&pdev->dev, sizeof(*mux), GFP_KERNEL);
+       if (!mux) {
+               dev_err(&pdev->dev, "Cannot allocate i2c_mux_pinctrl\n");
+               ret = -ENOMEM;
+               goto err;
+       }
+       platform_set_drvdata(pdev, mux);
+
+       mux->dev = &pdev->dev;
+
+       mux->pdata = pdev->dev.platform_data;
+       if (!mux->pdata) {
+               ret = i2c_mux_pinctrl_parse_dt(mux, pdev);
+               if (ret < 0)
+                       goto err;
+       }
+       if (!mux->pdata) {
+               dev_err(&pdev->dev, "Missing platform data\n");
+               ret = -ENODEV;
+               goto err;
+       }
+
+       mux->states = devm_kzalloc(&pdev->dev,
+                                  sizeof(*mux->states) * mux->pdata->bus_count,
+                                  GFP_KERNEL);
+       if (!mux->states) {
+               dev_err(&pdev->dev, "Cannot allocate states\n");
+               ret = -ENOMEM;
+               goto err;
+       }
+
+       mux->busses = devm_kzalloc(&pdev->dev,
+                                  sizeof(mux->busses) * mux->pdata->bus_count,
+                                  GFP_KERNEL);
+       if (!mux->states) {
+               dev_err(&pdev->dev, "Cannot allocate busses\n");
+               ret = -ENOMEM;
+               goto err;
+       }
+
+       mux->pinctrl = devm_pinctrl_get(&pdev->dev);
+       if (IS_ERR(mux->pinctrl)) {
+               ret = PTR_ERR(mux->pinctrl);
+               dev_err(&pdev->dev, "Cannot get pinctrl: %d\n", ret);
+               goto err;
+       }
+       for (i = 0; i < mux->pdata->bus_count; i++) {
+               mux->states[i] = pinctrl_lookup_state(mux->pinctrl,
+                                               mux->pdata->pinctrl_states[i]);
+                       if (IS_ERR(mux->states[i])) {
+                               ret = PTR_ERR(mux->states[i]);
+                               dev_err(&pdev->dev,
+                                       "Cannot look up pinctrl state %s: %d\n",
+                                       mux->pdata->pinctrl_states[i], ret);
+                               goto err;
+                       }
+       }
+       if (mux->pdata->pinctrl_state_idle) {
+               mux->state_idle = pinctrl_lookup_state(mux->pinctrl,
+                                               mux->pdata->pinctrl_state_idle);
+               if (IS_ERR(mux->state_idle)) {
+                       ret = PTR_ERR(mux->state_idle);
+                       dev_err(&pdev->dev,
+                               "Cannot look up pinctrl state %s: %d\n",
+                               mux->pdata->pinctrl_state_idle, ret);
+                       goto err;
+               }
+
+               deselect = i2c_mux_pinctrl_deselect;
+       } else {
+               deselect = NULL;
+       }
+
+       mux->parent = i2c_get_adapter(mux->pdata->parent_bus_num);
+       if (!mux->parent) {
+               dev_err(&pdev->dev, "Parent adapter (%d) not found\n",
+                       mux->pdata->parent_bus_num);
+               ret = -ENODEV;
+               goto err;
+       }
+
+       for (i = 0; i < mux->pdata->bus_count; i++) {
+               u32 bus = mux->pdata->base_bus_num ?
+                               (mux->pdata->base_bus_num + i) : 0;
+
+               mux->busses[i] = i2c_add_mux_adapter(mux->parent, &pdev->dev,
+                                                    mux, bus, i,
+                                                    i2c_mux_pinctrl_select,
+                                                    deselect);
+               if (!mux->busses[i]) {
+                       ret = -ENODEV;
+                       dev_err(&pdev->dev, "Failed to add adapter %d\n", i);
+                       goto err_del_adapter;
+               }
+       }
+
+       return 0;
+
+err_del_adapter:
+       for (; i > 0; i--)
+               i2c_del_mux_adapter(mux->busses[i - 1]);
+       i2c_put_adapter(mux->parent);
+err:
+       return ret;
+}
+
+static int __devexit i2c_mux_pinctrl_remove(struct platform_device *pdev)
+{
+       struct i2c_mux_pinctrl *mux = platform_get_drvdata(pdev);
+       int i;
+
+       for (i = 0; i < mux->pdata->bus_count; i++)
+               i2c_del_mux_adapter(mux->busses[i]);
+
+       i2c_put_adapter(mux->parent);
+
+       return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id i2c_mux_pinctrl_of_match[] __devinitconst = {
+       { .compatible = "i2c-mux-pinctrl", },
+       {},
+};
+MODULE_DEVICE_TABLE(of, i2c_mux_pinctrl_of_match);
+#endif
+
+static struct platform_driver i2c_mux_pinctrl_driver = {
+       .driver = {
+               .name   = "i2c-mux-pinctrl",
+               .owner  = THIS_MODULE,
+               .of_match_table = of_match_ptr(i2c_mux_pinctrl_of_match),
+       },
+       .probe  = i2c_mux_pinctrl_probe,
+       .remove = __devexit_p(i2c_mux_pinctrl_remove),
+};
+module_platform_driver(i2c_mux_pinctrl_driver);
+
+MODULE_DESCRIPTION("pinctrl-based I2C multiplexer driver");
+MODULE_AUTHOR("Stephen Warren <swarren@nvidia.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:i2c-mux-pinctrl");
index 8716066a2f2b79c1ddeb1236a9b2091d7bd62f63..bcb507b0cfd4c22f4b9b988ac9c789f3e27dc3cf 100644 (file)
@@ -236,7 +236,7 @@ static const struct ide_port_ops icside_v6_no_dma_port_ops = {
  */
 static void icside_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
 {
-       unsigned long cycle_time;
+       unsigned long cycle_time = 0;
        int use_dma_info = 0;
        const u8 xfer_mode = drive->dma_mode;
 
@@ -271,9 +271,9 @@ static void icside_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
 
        ide_set_drivedata(drive, (void *)cycle_time);
 
-       printk("%s: %s selected (peak %dMB/s)\n", drive->name,
-               ide_xfer_verbose(xfer_mode),
-               2000 / (unsigned long)ide_get_drivedata(drive));
+       printk(KERN_INFO "%s: %s selected (peak %luMB/s)\n",
+              drive->name, ide_xfer_verbose(xfer_mode),
+              2000 / (cycle_time ? cycle_time : (unsigned long) -1));
 }
 
 static const struct ide_port_ops icside_v6_port_ops = {
@@ -375,8 +375,6 @@ static const struct ide_dma_ops icside_v6_dma_ops = {
        .dma_test_irq           = icside_dma_test_irq,
        .dma_lost_irq           = ide_dma_lost_irq,
 };
-#else
-#define icside_v6_dma_ops NULL
 #endif
 
 static int icside_dma_off_init(ide_hwif_t *hwif, const struct ide_port_info *d)
@@ -456,7 +454,6 @@ err_free:
 static const struct ide_port_info icside_v6_port_info __initdata = {
        .init_dma               = icside_dma_off_init,
        .port_ops               = &icside_v6_no_dma_port_ops,
-       .dma_ops                = &icside_v6_dma_ops,
        .host_flags             = IDE_HFLAG_SERIALIZE | IDE_HFLAG_MMIO,
        .mwdma_mask             = ATA_MWDMA2,
        .swdma_mask             = ATA_SWDMA2,
@@ -518,11 +515,13 @@ icside_register_v6(struct icside_state *state, struct expansion_card *ec)
 
        ecard_set_drvdata(ec, state);
 
+#ifdef CONFIG_BLK_DEV_IDEDMA_ICS
        if (ec->dma != NO_DMA && !request_dma(ec->dma, DRV_NAME)) {
                d.init_dma = icside_dma_init;
                d.port_ops = &icside_v6_port_ops;
-       } else
-               d.dma_ops = NULL;
+               d.dma_ops  = &icside_v6_dma_ops;
+       }
+#endif
 
        ret = ide_host_register(host, &d, hws);
        if (ret)
index 28e344ea514cc18ba8060c6cb6d6d2beb0a2a70b..f1e922e2479af988f7da82b838a1b337414cd8f2 100644 (file)
@@ -167,7 +167,8 @@ static int pcmcia_check_one_config(struct pcmcia_device *pdev, void *priv_data)
 {
        int *is_kme = priv_data;
 
-       if (!(pdev->resource[0]->flags & IO_DATA_PATH_WIDTH_8)) {
+       if ((pdev->resource[0]->flags & IO_DATA_PATH_WIDTH)
+           != IO_DATA_PATH_WIDTH_8) {
                pdev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
                pdev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO;
        }
index d0f59c3f87eff08c9af04bd7d13f2b91e6f3ea17..a759a4c4bb0a90614486f737a4ad2cf53312ac2e 100644 (file)
@@ -169,6 +169,38 @@ static struct cpuidle_state snb_cstates[MWAIT_MAX_NUM_CSTATES] = {
                .enter = &intel_idle },
 };
 
+static struct cpuidle_state ivb_cstates[MWAIT_MAX_NUM_CSTATES] = {
+       { /* MWAIT C0 */ },
+       { /* MWAIT C1 */
+               .name = "C1-IVB",
+               .desc = "MWAIT 0x00",
+               .flags = CPUIDLE_FLAG_TIME_VALID,
+               .exit_latency = 1,
+               .target_residency = 1,
+               .enter = &intel_idle },
+       { /* MWAIT C2 */
+               .name = "C3-IVB",
+               .desc = "MWAIT 0x10",
+               .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+               .exit_latency = 59,
+               .target_residency = 156,
+               .enter = &intel_idle },
+       { /* MWAIT C3 */
+               .name = "C6-IVB",
+               .desc = "MWAIT 0x20",
+               .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+               .exit_latency = 80,
+               .target_residency = 300,
+               .enter = &intel_idle },
+       { /* MWAIT C4 */
+               .name = "C7-IVB",
+               .desc = "MWAIT 0x30",
+               .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+               .exit_latency = 87,
+               .target_residency = 300,
+               .enter = &intel_idle },
+};
+
 static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = {
        { /* MWAIT C0 */ },
        { /* MWAIT C1 */
@@ -347,6 +379,10 @@ static const struct idle_cpu idle_cpu_snb = {
        .state_table = snb_cstates,
 };
 
+static const struct idle_cpu idle_cpu_ivb = {
+       .state_table = ivb_cstates,
+};
+
 #define ICPU(model, cpu) \
        { X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (unsigned long)&cpu }
 
@@ -362,6 +398,7 @@ static const struct x86_cpu_id intel_idle_ids[] = {
        ICPU(0x2f, idle_cpu_nehalem),
        ICPU(0x2a, idle_cpu_snb),
        ICPU(0x2d, idle_cpu_snb),
+       ICPU(0x3a, idle_cpu_ivb),
        {}
 };
 MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids);
index 56eecefcec7581743c274912446b7cec052ef9cd..2ec93da41e2c49164b5f905572fa6f314edf1968 100644 (file)
@@ -8,8 +8,7 @@ menuconfig IIO
        help
          The industrial I/O subsystem provides a unified framework for
          drivers for many different types of embedded sensors using a
-         number of different physical interfaces (i2c, spi, etc). See
-         Documentation/iio for more information.
+         number of different physical interfaces (i2c, spi, etc).
 
 if IIO
 
index 1ddd8861c71b543f449ddc5794af2db5a6f0c241..4f947e4377eff4fb6cfbec8d29fd18230e446555 100644 (file)
@@ -661,7 +661,6 @@ static int iio_device_register_sysfs(struct iio_dev *indio_dev)
         * New channel registration method - relies on the fact a group does
         * not need to be initialized if it is name is NULL.
         */
-       INIT_LIST_HEAD(&indio_dev->channel_attr_list);
        if (indio_dev->channels)
                for (i = 0; i < indio_dev->num_channels; i++) {
                        ret = iio_device_add_channel_sysfs(indio_dev,
@@ -725,12 +724,16 @@ static void iio_device_unregister_sysfs(struct iio_dev *indio_dev)
 static void iio_dev_release(struct device *device)
 {
        struct iio_dev *indio_dev = dev_to_iio_dev(device);
-       cdev_del(&indio_dev->chrdev);
+       if (indio_dev->chrdev.dev)
+               cdev_del(&indio_dev->chrdev);
        if (indio_dev->modes & INDIO_BUFFER_TRIGGERED)
                iio_device_unregister_trigger_consumer(indio_dev);
        iio_device_unregister_eventset(indio_dev);
        iio_device_unregister_sysfs(indio_dev);
        iio_device_unregister_debugfs(indio_dev);
+
+       ida_simple_remove(&iio_ida, indio_dev->id);
+       kfree(indio_dev);
 }
 
 static struct device_type iio_dev_type = {
@@ -761,6 +764,7 @@ struct iio_dev *iio_device_alloc(int sizeof_priv)
                dev_set_drvdata(&dev->dev, (void *)dev);
                mutex_init(&dev->mlock);
                mutex_init(&dev->info_exist_lock);
+               INIT_LIST_HEAD(&dev->channel_attr_list);
 
                dev->id = ida_simple_get(&iio_ida, 0, 0, GFP_KERNEL);
                if (dev->id < 0) {
@@ -778,10 +782,8 @@ EXPORT_SYMBOL(iio_device_alloc);
 
 void iio_device_free(struct iio_dev *dev)
 {
-       if (dev) {
-               ida_simple_remove(&iio_ida, dev->id);
-               kfree(dev);
-       }
+       if (dev)
+               put_device(&dev->dev);
 }
 EXPORT_SYMBOL(iio_device_free);
 
@@ -902,7 +904,7 @@ void iio_device_unregister(struct iio_dev *indio_dev)
        mutex_lock(&indio_dev->info_exist_lock);
        indio_dev->info = NULL;
        mutex_unlock(&indio_dev->info_exist_lock);
-       device_unregister(&indio_dev->dev);
+       device_del(&indio_dev->dev);
 }
 EXPORT_SYMBOL(iio_device_unregister);
 subsys_initcall(iio_init);
index 55d5642eb10ada70ec2f87419e5909ae52906323..2e826f9702c6d76cfc895c24466c068a8e042a83 100644 (file)
@@ -1184,7 +1184,7 @@ static void cma_set_req_event_data(struct rdma_cm_event *event,
 
 static int cma_check_req_qp_type(struct rdma_cm_id *id, struct ib_cm_event *ib_event)
 {
-       return (((ib_event->event == IB_CM_REQ_RECEIVED) ||
+       return (((ib_event->event == IB_CM_REQ_RECEIVED) &&
                 (ib_event->param.req_rcvd.qp_type == id->qp_type)) ||
                ((ib_event->event == IB_CM_SIDR_REQ_RECEIVED) &&
                 (id->qp_type == IB_QPT_UD)) ||
index 55ab284e22f2c02c037ca67825d24e3d8a827208..b18870c455adde5546ec90a4bf356813bbb34432 100644 (file)
@@ -1593,6 +1593,10 @@ static int import_ep(struct c4iw_ep *ep, __be32 peer_ip, struct dst_entry *dst,
                struct net_device *pdev;
 
                pdev = ip_dev_find(&init_net, peer_ip);
+               if (!pdev) {
+                       err = -ENODEV;
+                       goto out;
+               }
                ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
                                        n, pdev, 0);
                if (!ep->l2t)
index ee1c577238f7a261c4773c49372ad6ff2c304c0d..3530c41fcd1f28036f0c0a7d951e811bf77752d9 100644 (file)
@@ -140,7 +140,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
        props->max_mr_size         = ~0ull;
        props->page_size_cap       = dev->dev->caps.page_size_cap;
        props->max_qp              = dev->dev->caps.num_qps - dev->dev->caps.reserved_qps;
-       props->max_qp_wr           = dev->dev->caps.max_wqes;
+       props->max_qp_wr           = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE;
        props->max_sge             = min(dev->dev->caps.max_sq_sg,
                                         dev->dev->caps.max_rq_sg);
        props->max_cq              = dev->dev->caps.num_cqs - dev->dev->caps.reserved_cqs;
@@ -1084,12 +1084,9 @@ static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
        int total_eqs = 0;
        int i, j, eq;
 
-       /* Init eq table */
-       ibdev->eq_table = NULL;
-       ibdev->eq_added = 0;
-
-       /* Legacy mode? */
-       if (dev->caps.comp_pool == 0)
+       /* Legacy mode or comp_pool is not large enough */
+       if (dev->caps.comp_pool == 0 ||
+           dev->caps.num_ports > dev->caps.comp_pool)
                return;
 
        eq_per_port = rounddown_pow_of_two(dev->caps.comp_pool/
@@ -1135,7 +1132,10 @@ static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
 static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
 {
        int i;
-       int total_eqs;
+
+       /* no additional eqs were added */
+       if (!ibdev->eq_table)
+               return;
 
        /* Reset the advertised EQ number */
        ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors;
@@ -1148,12 +1148,7 @@ static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
                mlx4_release_eq(dev, ibdev->eq_table[i]);
        }
 
-       total_eqs = dev->caps.num_comp_vectors + ibdev->eq_added;
-       memset(ibdev->eq_table, 0, total_eqs * sizeof(int));
        kfree(ibdev->eq_table);
-
-       ibdev->eq_table = NULL;
-       ibdev->eq_added = 0;
 }
 
 static void *mlx4_ib_add(struct mlx4_dev *dev)
index e62297cc77cc24396d3460498720508dd3e0fde0..ff36655d23d387953fd9a1fc1b7fb8ffa1bf1153 100644 (file)
 #include <linux/mlx4/device.h>
 #include <linux/mlx4/doorbell.h>
 
+enum {
+       MLX4_IB_SQ_MIN_WQE_SHIFT = 6,
+       MLX4_IB_MAX_HEADROOM     = 2048
+};
+
+#define MLX4_IB_SQ_HEADROOM(shift)     ((MLX4_IB_MAX_HEADROOM >> (shift)) + 1)
+#define MLX4_IB_SQ_MAX_SPARE           (MLX4_IB_SQ_HEADROOM(MLX4_IB_SQ_MIN_WQE_SHIFT))
+
 struct mlx4_ib_ucontext {
        struct ib_ucontext      ibucontext;
        struct mlx4_uar         uar;
index ceb33327091a8244a8d16d73766c5c2eab3840c1..8d4ed24aef931e1e89186a9a5cdfccbfaf268771 100644 (file)
@@ -310,8 +310,8 @@ static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
                       int is_user, int has_rq, struct mlx4_ib_qp *qp)
 {
        /* Sanity check RQ size before proceeding */
-       if (cap->max_recv_wr  > dev->dev->caps.max_wqes  ||
-           cap->max_recv_sge > dev->dev->caps.max_rq_sg)
+       if (cap->max_recv_wr > dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE ||
+           cap->max_recv_sge > min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg))
                return -EINVAL;
 
        if (!has_rq) {
@@ -329,8 +329,17 @@ static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
                qp->rq.wqe_shift = ilog2(qp->rq.max_gs * sizeof (struct mlx4_wqe_data_seg));
        }
 
-       cap->max_recv_wr  = qp->rq.max_post = qp->rq.wqe_cnt;
-       cap->max_recv_sge = qp->rq.max_gs;
+       /* leave userspace return values as they were, so as not to break ABI */
+       if (is_user) {
+               cap->max_recv_wr  = qp->rq.max_post = qp->rq.wqe_cnt;
+               cap->max_recv_sge = qp->rq.max_gs;
+       } else {
+               cap->max_recv_wr  = qp->rq.max_post =
+                       min(dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE, qp->rq.wqe_cnt);
+               cap->max_recv_sge = min(qp->rq.max_gs,
+                                       min(dev->dev->caps.max_sq_sg,
+                                           dev->dev->caps.max_rq_sg));
+       }
 
        return 0;
 }
@@ -341,8 +350,8 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
        int s;
 
        /* Sanity check SQ size before proceeding */
-       if (cap->max_send_wr     > dev->dev->caps.max_wqes  ||
-           cap->max_send_sge    > dev->dev->caps.max_sq_sg ||
+       if (cap->max_send_wr  > (dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE) ||
+           cap->max_send_sge > min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg) ||
            cap->max_inline_data + send_wqe_overhead(type, qp->flags) +
            sizeof (struct mlx4_wqe_inline_seg) > dev->dev->caps.max_sq_desc_sz)
                return -EINVAL;
index 85a69c958559b8a1aedf7bab27d3ad2097268656..48970af236794cea69ace19502a874071c1ee0cb 100644 (file)
@@ -61,6 +61,7 @@ struct ocrdma_dev_attr {
        u32 max_inline_data;
        int max_send_sge;
        int max_recv_sge;
+       int max_srq_sge;
        int max_mr;
        u64 max_mr_size;
        u32 max_num_mr_pbl;
@@ -231,7 +232,6 @@ struct ocrdma_qp_hwq_info {
        u32 entry_size;
        u32 max_cnt;
        u32 max_wqe_idx;
-       u32 free_delta;
        u16 dbid;               /* qid, where to ring the doorbell. */
        u32 len;
        dma_addr_t pa;
index a411a4e3193d33dd7b113210868e1d754ea26af8..517ab20b727c51feac5314a64e717adfdedacde4 100644 (file)
@@ -101,8 +101,6 @@ struct ocrdma_create_qp_uresp {
        u32 rsvd1;
        u32 num_wqe_allocated;
        u32 num_rqe_allocated;
-       u32 free_wqe_delta;
-       u32 free_rqe_delta;
        u32 db_sq_offset;
        u32 db_rq_offset;
        u32 db_shift;
@@ -126,8 +124,7 @@ struct ocrdma_create_srq_uresp {
        u32 db_rq_offset;
        u32 db_shift;
 
-       u32 free_rqe_delta;
-       u32 rsvd2;
+       u64 rsvd2;
        u64 rsvd3;
 } __packed;
 
index 9b204b1ba3366d624bfc01213eb122179e295310..71942af4fce94695753dacbb8417ee191aaaf9a6 100644 (file)
@@ -732,7 +732,7 @@ static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev,
                break;
        case OCRDMA_SRQ_LIMIT_EVENT:
                ib_evt.element.srq = &qp->srq->ibsrq;
-               ib_evt.event = IB_EVENT_QP_LAST_WQE_REACHED;
+               ib_evt.event = IB_EVENT_SRQ_LIMIT_REACHED;
                srq_event = 1;
                qp_event = 0;
                break;
@@ -990,8 +990,6 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev,
                              struct ocrdma_dev_attr *attr,
                              struct ocrdma_mbx_query_config *rsp)
 {
-       int max_q_mem;
-
        attr->max_pd =
            (rsp->max_pd_ca_ack_delay & OCRDMA_MBX_QUERY_CFG_MAX_PD_MASK) >>
            OCRDMA_MBX_QUERY_CFG_MAX_PD_SHIFT;
@@ -1004,6 +1002,9 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev,
        attr->max_recv_sge = (rsp->max_write_send_sge &
                              OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK) >>
            OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT;
+       attr->max_srq_sge = (rsp->max_srq_rqe_sge &
+                             OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_MASK) >>
+           OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_OFFSET;
        attr->max_ord_per_qp = (rsp->max_ird_ord_per_qp &
                                OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK) >>
            OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT;
@@ -1037,18 +1038,15 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev,
        attr->max_inline_data =
            attr->wqe_size - (sizeof(struct ocrdma_hdr_wqe) +
                              sizeof(struct ocrdma_sge));
-       max_q_mem = OCRDMA_Q_PAGE_BASE_SIZE << (OCRDMA_MAX_Q_PAGE_SIZE_CNT - 1);
-       /* hw can queue one less then the configured size,
-        * so publish less by one to stack.
-        */
        if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
-               dev->attr.max_wqe = max_q_mem / dev->attr.wqe_size;
                attr->ird = 1;
                attr->ird_page_size = OCRDMA_MIN_Q_PAGE_SIZE;
                attr->num_ird_pages = MAX_OCRDMA_IRD_PAGES;
-       } else
-               dev->attr.max_wqe = (max_q_mem / dev->attr.wqe_size) - 1;
-       dev->attr.max_rqe = (max_q_mem / dev->attr.rqe_size) - 1;
+       }
+       dev->attr.max_wqe = rsp->max_wqes_rqes_per_q >>
+                OCRDMA_MBX_QUERY_CFG_MAX_WQES_PER_WQ_OFFSET;
+       dev->attr.max_rqe = rsp->max_wqes_rqes_per_q &
+               OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_MASK;
 }
 
 static int ocrdma_check_fw_config(struct ocrdma_dev *dev,
@@ -1990,19 +1988,12 @@ static void ocrdma_get_create_qp_rsp(struct ocrdma_create_qp_rsp *rsp,
        max_wqe_allocated = 1 << max_wqe_allocated;
        max_rqe_allocated = 1 << ((u16)rsp->max_wqe_rqe);
 
-       if (qp->dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
-               qp->sq.free_delta = 0;
-               qp->rq.free_delta = 1;
-       } else
-               qp->sq.free_delta = 1;
-
        qp->sq.max_cnt = max_wqe_allocated;
        qp->sq.max_wqe_idx = max_wqe_allocated - 1;
 
        if (!attrs->srq) {
                qp->rq.max_cnt = max_rqe_allocated;
                qp->rq.max_wqe_idx = max_rqe_allocated - 1;
-               qp->rq.free_delta = 1;
        }
 }
 
index a20d16eaae71ae5e1bb4da23979db0f26a287f3c..b050e629e9c3bd19d2e089fdd7792a53d49fe2b0 100644 (file)
@@ -26,7 +26,6 @@
  *******************************************************************/
 
 #include <linux/module.h>
-#include <linux/version.h>
 #include <linux/idr.h>
 #include <rdma/ib_verbs.h>
 #include <rdma/ib_user_verbs.h>
@@ -98,13 +97,11 @@ static void ocrdma_build_sgid_mac(union ib_gid *sgid, unsigned char *mac_addr,
        sgid->raw[15] = mac_addr[5];
 }
 
-static void ocrdma_add_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr,
+static bool ocrdma_add_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr,
                            bool is_vlan, u16 vlan_id)
 {
        int i;
-       bool found = false;
        union ib_gid new_sgid;
-       int free_idx = OCRDMA_MAX_SGID;
        unsigned long flags;
 
        memset(&ocrdma_zero_sgid, 0, sizeof(union ib_gid));
@@ -116,23 +113,19 @@ static void ocrdma_add_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr,
                if (!memcmp(&dev->sgid_tbl[i], &ocrdma_zero_sgid,
                            sizeof(union ib_gid))) {
                        /* found free entry */
-                       if (!found) {
-                               free_idx = i;
-                               found = true;
-                               break;
-                       }
+                       memcpy(&dev->sgid_tbl[i], &new_sgid,
+                              sizeof(union ib_gid));
+                       spin_unlock_irqrestore(&dev->sgid_lock, flags);
+                       return true;
                } else if (!memcmp(&dev->sgid_tbl[i], &new_sgid,
                                   sizeof(union ib_gid))) {
                        /* entry already present, no addition is required. */
                        spin_unlock_irqrestore(&dev->sgid_lock, flags);
-                       return;
+                       return false;
                }
        }
-       /* if entry doesn't exist and if table has some space, add entry */
-       if (found)
-               memcpy(&dev->sgid_tbl[free_idx], &new_sgid,
-                      sizeof(union ib_gid));
        spin_unlock_irqrestore(&dev->sgid_lock, flags);
+       return false;
 }
 
 static bool ocrdma_del_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr,
@@ -168,7 +161,8 @@ static void ocrdma_add_default_sgid(struct ocrdma_dev *dev)
        ocrdma_get_guid(dev, &sgid->raw[8]);
 }
 
-static int ocrdma_build_sgid_tbl(struct ocrdma_dev *dev)
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+static void ocrdma_add_vlan_sgids(struct ocrdma_dev *dev)
 {
        struct net_device *netdev, *tmp;
        u16 vlan_id;
@@ -176,8 +170,6 @@ static int ocrdma_build_sgid_tbl(struct ocrdma_dev *dev)
 
        netdev = dev->nic_info.netdev;
 
-       ocrdma_add_default_sgid(dev);
-
        rcu_read_lock();
        for_each_netdev_rcu(&init_net, tmp) {
                if (netdev == tmp || vlan_dev_real_dev(tmp) == netdev) {
@@ -195,10 +187,23 @@ static int ocrdma_build_sgid_tbl(struct ocrdma_dev *dev)
                }
        }
        rcu_read_unlock();
+}
+#else
+static void ocrdma_add_vlan_sgids(struct ocrdma_dev *dev)
+{
+
+}
+#endif /* VLAN */
+
+static int ocrdma_build_sgid_tbl(struct ocrdma_dev *dev)
+{
+       ocrdma_add_default_sgid(dev);
+       ocrdma_add_vlan_sgids(dev);
        return 0;
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) || \
+defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
 
 static int ocrdma_inet6addr_event(struct notifier_block *notifier,
                                  unsigned long event, void *ptr)
@@ -209,6 +214,7 @@ static int ocrdma_inet6addr_event(struct notifier_block *notifier,
        struct ib_event gid_event;
        struct ocrdma_dev *dev;
        bool found = false;
+       bool updated = false;
        bool is_vlan = false;
        u16 vid = 0;
 
@@ -234,23 +240,21 @@ static int ocrdma_inet6addr_event(struct notifier_block *notifier,
        mutex_lock(&dev->dev_lock);
        switch (event) {
        case NETDEV_UP:
-               ocrdma_add_sgid(dev, netdev->dev_addr, is_vlan, vid);
+               updated = ocrdma_add_sgid(dev, netdev->dev_addr, is_vlan, vid);
                break;
        case NETDEV_DOWN:
-               found = ocrdma_del_sgid(dev, netdev->dev_addr, is_vlan, vid);
-               if (found) {
-                       /* found the matching entry, notify
-                        * the consumers about it
-                        */
-                       gid_event.device = &dev->ibdev;
-                       gid_event.element.port_num = 1;
-                       gid_event.event = IB_EVENT_GID_CHANGE;
-                       ib_dispatch_event(&gid_event);
-               }
+               updated = ocrdma_del_sgid(dev, netdev->dev_addr, is_vlan, vid);
                break;
        default:
                break;
        }
+       if (updated) {
+               /* GID table updated, notify the consumers about it */
+               gid_event.device = &dev->ibdev;
+               gid_event.element.port_num = 1;
+               gid_event.event = IB_EVENT_GID_CHANGE;
+               ib_dispatch_event(&gid_event);
+       }
        mutex_unlock(&dev->dev_lock);
        return NOTIFY_OK;
 }
@@ -259,7 +263,7 @@ static struct notifier_block ocrdma_inet6addr_notifier = {
        .notifier_call = ocrdma_inet6addr_event
 };
 
-#endif /* IPV6 */
+#endif /* IPV6 and VLAN */
 
 static enum rdma_link_layer ocrdma_link_layer(struct ib_device *device,
                                              u8 port_num)
index 7fd80cc0f0374159d5b9de9492bc6bfc64a838e4..c75cbdfa87e7b82a3e3d6cd5d1e7be78b4dd1f19 100644 (file)
@@ -418,6 +418,9 @@ enum {
 
        OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT         = 0,
        OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK          = 0xFFFF,
+       OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_SHIFT        = 16,
+       OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_MASK         = 0xFFFF <<
+                               OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_SHIFT,
 
        OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT       = 0,
        OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK        = 0xFFFF,
@@ -458,7 +461,7 @@ enum {
                                OCRDMA_MBX_QUERY_CFG_MAX_WQES_PER_WQ_OFFSET,
        OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_OFFSET     = 0,
        OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_MASK       = 0xFFFF <<
-                               OCRDMA_MBX_QUERY_CFG_MAX_WQES_PER_WQ_OFFSET,
+                               OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_OFFSET,
 
        OCRDMA_MBX_QUERY_CFG_MAX_CQ_OFFSET              = 16,
        OCRDMA_MBX_QUERY_CFG_MAX_CQ_MASK                = 0xFFFF <<
index e9f74d1b48f639f9d4ae6ff31cff0f0fa58bd173..2e2e7aecc9907a87432dce09b218e046a141c124 100644 (file)
@@ -53,7 +53,7 @@ int ocrdma_query_gid(struct ib_device *ibdev, u8 port,
 
        dev = get_ocrdma_dev(ibdev);
        memset(sgid, 0, sizeof(*sgid));
-       if (index > OCRDMA_MAX_SGID)
+       if (index >= OCRDMA_MAX_SGID)
                return -EINVAL;
 
        memcpy(sgid, &dev->sgid_tbl[index], sizeof(*sgid));
@@ -83,8 +83,8 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr)
                                        IB_DEVICE_SHUTDOWN_PORT |
                                        IB_DEVICE_SYS_IMAGE_GUID |
                                        IB_DEVICE_LOCAL_DMA_LKEY;
-       attr->max_sge = dev->attr.max_send_sge;
-       attr->max_sge_rd = dev->attr.max_send_sge;
+       attr->max_sge = min(dev->attr.max_send_sge, dev->attr.max_srq_sge);
+       attr->max_sge_rd = 0;
        attr->max_cq = dev->attr.max_cq;
        attr->max_cqe = dev->attr.max_cqe;
        attr->max_mr = dev->attr.max_mr;
@@ -97,7 +97,7 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr)
            min(dev->attr.max_ord_per_qp, dev->attr.max_ird_per_qp);
        attr->max_qp_init_rd_atom = dev->attr.max_ord_per_qp;
        attr->max_srq = (dev->attr.max_qp - 1);
-       attr->max_srq_sge = attr->max_sge;
+       attr->max_srq_sge = attr->max_srq_sge;
        attr->max_srq_wr = dev->attr.max_rqe;
        attr->local_ca_ack_delay = dev->attr.local_ca_ack_delay;
        attr->max_fast_reg_page_list_len = 0;
@@ -940,8 +940,6 @@ static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp,
                uresp.db_rq_offset = OCRDMA_DB_RQ_OFFSET;
                uresp.db_shift = 16;
        }
-       uresp.free_wqe_delta = qp->sq.free_delta;
-       uresp.free_rqe_delta = qp->rq.free_delta;
 
        if (qp->dpp_enabled) {
                uresp.dpp_credit = dpp_credit_lmt;
@@ -1307,8 +1305,6 @@ static int ocrdma_hwq_free_cnt(struct ocrdma_qp_hwq_info *q)
                free_cnt = (q->max_cnt - q->head) + q->tail;
        else
                free_cnt = q->tail - q->head;
-       if (q->free_delta)
-               free_cnt -= q->free_delta;
        return free_cnt;
 }
 
@@ -1501,7 +1497,6 @@ static int ocrdma_copy_srq_uresp(struct ocrdma_srq *srq, struct ib_udata *udata)
            (srq->pd->id * srq->dev->nic_info.db_page_size);
        uresp.db_page_size = srq->dev->nic_info.db_page_size;
        uresp.num_rqe_allocated = srq->rq.max_cnt;
-       uresp.free_rqe_delta = 1;
        if (srq->dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
                uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ1_OFFSET;
                uresp.db_shift = 24;
@@ -2306,8 +2301,10 @@ static bool ocrdma_poll_err_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
                        *stop = true;
                        expand = false;
                }
-       } else
+       } else {
+               *polled = true;
                expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status);
+       }
        return expand;
 }
 
index e6483439f25f0dc6d1a2fac2b4d15e53b9606cb5..633f03d802746908bcb4f7a58adef800b024ce2f 100644 (file)
@@ -28,7 +28,6 @@
 #ifndef __OCRDMA_VERBS_H__
 #define __OCRDMA_VERBS_H__
 
-#include <linux/version.h>
 int ocrdma_post_send(struct ib_qp *, struct ib_send_wr *,
                     struct ib_send_wr **bad_wr);
 int ocrdma_post_recv(struct ib_qp *, struct ib_recv_wr *,
index 5c1bc995e5603ced273bc4731ff67ff88eaf6f76..f10221f40803959198a3b85e21b8b57ce01ea60a 100644 (file)
@@ -123,7 +123,7 @@ static void ipoib_ud_skb_put_frags(struct ipoib_dev_priv *priv,
 
                skb_frag_size_set(frag, size);
                skb->data_len += size;
-               skb->truesize += size;
+               skb->truesize += PAGE_SIZE;
        } else
                skb_put(skb, length);
 
@@ -156,14 +156,18 @@ static struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev, int id)
        struct ipoib_dev_priv *priv = netdev_priv(dev);
        struct sk_buff *skb;
        int buf_size;
+       int tailroom;
        u64 *mapping;
 
-       if (ipoib_ud_need_sg(priv->max_ib_mtu))
+       if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
                buf_size = IPOIB_UD_HEAD_SIZE;
-       else
+               tailroom = 128; /* reserve some tailroom for IP/TCP headers */
+       } else {
                buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
+               tailroom = 0;
+       }
 
-       skb = dev_alloc_skb(buf_size + 4);
+       skb = dev_alloc_skb(buf_size + tailroom + 4);
        if (unlikely(!skb))
                return NULL;
 
index 57d19d4e0a2d94a22f68d13c6f0ee44a9ab8807b..c96653b58867deb406a13913774a7e35c074dec7 100644 (file)
@@ -282,7 +282,8 @@ static int __devinit as5011_probe(struct i2c_client *client,
 
        error = request_threaded_irq(as5011->button_irq,
                                     NULL, as5011_button_interrupt,
-                                    IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+                                    IRQF_TRIGGER_RISING |
+                                       IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
                                     "as5011_button", as5011);
        if (error < 0) {
                dev_err(&client->dev,
@@ -296,7 +297,7 @@ static int __devinit as5011_probe(struct i2c_client *client,
 
        error = request_threaded_irq(as5011->axis_irq, NULL,
                                     as5011_axis_interrupt,
-                                    plat_data->axis_irqflags,
+                                    plat_data->axis_irqflags | IRQF_ONESHOT,
                                     "as5011_joystick", as5011);
        if (error) {
                dev_err(&client->dev,
index ee16fb67b7ae235bd9469bc50b9b0f588b99a2e4..83811e45d6339b013cc4f9cc16b90c5c66ba8af8 100644 (file)
@@ -142,6 +142,7 @@ static const struct xpad_device {
        { 0x0c12, 0x880a, "Pelican Eclipse PL-2023", 0, XTYPE_XBOX },
        { 0x0c12, 0x8810, "Zeroplus Xbox Controller", 0, XTYPE_XBOX },
        { 0x0c12, 0x9902, "HAMA VibraX - *FAULTY HARDWARE*", 0, XTYPE_XBOX },
+       { 0x0d2f, 0x0002, "Andamiro Pump It Up pad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
        { 0x0e4c, 0x1097, "Radica Gamester Controller", 0, XTYPE_XBOX },
        { 0x0e4c, 0x2390, "Radica Games Jtech Controller", 0, XTYPE_XBOX },
        { 0x0e6f, 0x0003, "Logic3 Freebird wireless Controller", 0, XTYPE_XBOX },
@@ -164,6 +165,7 @@ static const struct xpad_device {
        { 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
        { 0x0f0d, 0x0016, "Hori Real Arcade Pro.EX", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
        { 0x0f0d, 0x000d, "Hori Fighting Stick EX2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
+       { 0x1689, 0xfd00, "Razer Onza Tournament Edition", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
        { 0xffff, 0xffff, "Chinese-made Xbox Controller", 0, XTYPE_XBOX },
        { 0x0000, 0x0000, "Generic X-Box pad", 0, XTYPE_UNKNOWN }
 };
@@ -238,12 +240,14 @@ static struct usb_device_id xpad_table [] = {
        XPAD_XBOX360_VENDOR(0x045e),            /* Microsoft X-Box 360 controllers */
        XPAD_XBOX360_VENDOR(0x046d),            /* Logitech X-Box 360 style controllers */
        XPAD_XBOX360_VENDOR(0x0738),            /* Mad Catz X-Box 360 controllers */
+       { USB_DEVICE(0x0738, 0x4540) },         /* Mad Catz Beat Pad */
        XPAD_XBOX360_VENDOR(0x0e6f),            /* 0x0e6f X-Box 360 controllers */
        XPAD_XBOX360_VENDOR(0x12ab),            /* X-Box 360 dance pads */
        XPAD_XBOX360_VENDOR(0x1430),            /* RedOctane X-Box 360 controllers */
        XPAD_XBOX360_VENDOR(0x146b),            /* BigBen Interactive Controllers */
        XPAD_XBOX360_VENDOR(0x1bad),            /* Harminix Rock Band Guitar and Drums */
-       XPAD_XBOX360_VENDOR(0x0f0d),            /* Hori Controllers */
+       XPAD_XBOX360_VENDOR(0x0f0d),            /* Hori Controllers */
+       XPAD_XBOX360_VENDOR(0x1689),            /* Razer Onza */
        { }
 };
 
index 64a0ca4c92f3376562fe07c8f4855ce889417b1d..0d77f6c84950f7d921e03ae0e078a3c788b72bb6 100644 (file)
@@ -178,7 +178,8 @@ static int __devinit mcs_touchkey_probe(struct i2c_client *client,
        }
 
        error = request_threaded_irq(client->irq, NULL, mcs_touchkey_interrupt,
-                       IRQF_TRIGGER_FALLING, client->dev.driver->name, data);
+                                    IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+                                    client->dev.driver->name, data);
        if (error) {
                dev_err(&client->dev, "Failed to register interrupt\n");
                goto err_free_mem;
index caa218a51b5ac94bd89cade097a41cd926251dcd..7613f1cac9517c1ecf30e18f2946c872c3e25252 100644 (file)
@@ -248,7 +248,7 @@ static int __devinit mpr_touchkey_probe(struct i2c_client *client,
 
        error = request_threaded_irq(client->irq, NULL,
                                     mpr_touchkey_interrupt,
-                                    IRQF_TRIGGER_FALLING,
+                                    IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
                                     client->dev.driver->name, mpr121);
        if (error) {
                dev_err(&client->dev, "Failed to register interrupt\n");
index 0b7b2f891752061a50ab92745e67e8025c146523..ca68f2992d7292ebdebafc0d6947e201448fc623 100644 (file)
@@ -201,7 +201,8 @@ static int __devinit qt1070_probe(struct i2c_client *client,
        msleep(QT1070_RESET_TIME);
 
        err = request_threaded_irq(client->irq, NULL, qt1070_interrupt,
-               IRQF_TRIGGER_NONE, client->dev.driver->name, data);
+                                  IRQF_TRIGGER_NONE | IRQF_ONESHOT,
+                                  client->dev.driver->name, data);
        if (err) {
                dev_err(&client->dev, "fail to request irq\n");
                goto err_free_mem;
index 3afea3f897182adba5c1439dc38e7a34126fa440..c355cdde8d223e0f7184c297df95d41f3048fb43 100644 (file)
@@ -278,7 +278,8 @@ static int __devinit tca6416_keypad_probe(struct i2c_client *client,
 
                error = request_threaded_irq(chip->irqnum, NULL,
                                             tca6416_keys_isr,
-                                            IRQF_TRIGGER_FALLING,
+                                            IRQF_TRIGGER_FALLING |
+                                               IRQF_ONESHOT,
                                             "tca6416-keypad", chip);
                if (error) {
                        dev_dbg(&client->dev,
index 5f87b28b31920b92caf1644413c8cab7fe4da66a..893869b29ed9895c0f1998fbb2068ce7a41ef402 100644 (file)
@@ -360,7 +360,7 @@ static int __devinit tca8418_keypad_probe(struct i2c_client *client,
                client->irq = gpio_to_irq(client->irq);
 
        error = request_threaded_irq(client->irq, NULL, tca8418_irq_handler,
-                                    IRQF_TRIGGER_FALLING,
+                                    IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
                                     client->name, keypad_data);
        if (error) {
                dev_dbg(&client->dev,
index a4a445fb7020bcf1df98d1a36e958c7e70d83b39..4c34f21fbe2dff4a8b823e48dd96fb4635a81d16 100644 (file)
@@ -227,15 +227,15 @@ static int __devinit keypad_probe(struct platform_device *pdev)
                goto error_clk;
        }
 
-       error = request_threaded_irq(kp->irq_press, NULL, keypad_irq, 0,
-                                    dev_name(dev), kp);
+       error = request_threaded_irq(kp->irq_press, NULL, keypad_irq,
+                                    IRQF_ONESHOT, dev_name(dev), kp);
        if (error < 0) {
                dev_err(kp->dev, "Could not allocate keypad press key irq\n");
                goto error_irq_press;
        }
 
-       error = request_threaded_irq(kp->irq_release, NULL, keypad_irq, 0,
-                                    dev_name(dev), kp);
+       error = request_threaded_irq(kp->irq_release, NULL, keypad_irq,
+                                    IRQF_ONESHOT, dev_name(dev), kp);
        if (error < 0) {
                dev_err(kp->dev, "Could not allocate keypad release key irq\n");
                goto error_irq_release;
index 0ac75bbad4d69d61e64719d1a2550778f121f599..2e5d5e1de64787f17c2349e1a3c575144411518f 100644 (file)
@@ -972,6 +972,7 @@ struct ad714x_chip *ad714x_probe(struct device *dev, u16 bus_type, int irq,
        struct ad714x_platform_data *plat_data = dev->platform_data;
        struct ad714x_chip *ad714x;
        void *drv_mem;
+       unsigned long irqflags;
 
        struct ad714x_button_drv *bt_drv;
        struct ad714x_slider_drv *sd_drv;
@@ -1162,10 +1163,11 @@ struct ad714x_chip *ad714x_probe(struct device *dev, u16 bus_type, int irq,
                alloc_idx++;
        }
 
+       irqflags = plat_data->irqflags ?: IRQF_TRIGGER_FALLING;
+       irqflags |= IRQF_ONESHOT;
+
        error = request_threaded_irq(ad714x->irq, NULL, ad714x_interrupt_thread,
-                               plat_data->irqflags ?
-                                       plat_data->irqflags : IRQF_TRIGGER_FALLING,
-                               "ad714x_captouch", ad714x);
+                                    irqflags, "ad714x_captouch", ad714x);
        if (error) {
                dev_err(dev, "can't allocate irq %d\n", ad714x->irq);
                goto err_unreg_dev;
index 35083c6836c351ba5110f6fe3ec0cecc98b074e9..c1313d8535c349993f0bb64ba3940ffe3e32b1d2 100644 (file)
@@ -213,7 +213,8 @@ static int __devinit dm355evm_keys_probe(struct platform_device *pdev)
        /* REVISIT:  flush the event queue? */
 
        status = request_threaded_irq(keys->irq, NULL, dm355evm_keys_irq,
-                       IRQF_TRIGGER_FALLING, dev_name(&pdev->dev), keys);
+                                     IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+                                     dev_name(&pdev->dev), keys);
        if (status < 0)
                goto fail2;
 
index 2cf681d98c0d2d6433a6f8d11c502935d40c202a..d528c23e194f6efcbe7efbe8651d218aa6ef7965 100644 (file)
 #define USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI  0x0252
 #define USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO   0x0253
 #define USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS   0x0254
+/* MacbookPro10,1 (unibody, June 2012) */
+#define USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI   0x0262
+#define USB_DEVICE_ID_APPLE_WELLSPRING7_ISO    0x0263
+#define USB_DEVICE_ID_APPLE_WELLSPRING7_JIS    0x0264
 
 #define BCM5974_DEVICE(prod) {                                 \
        .match_flags = (USB_DEVICE_ID_MATCH_DEVICE |            \
@@ -128,6 +132,10 @@ static const struct usb_device_id bcm5974_table[] = {
        BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI),
        BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO),
        BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS),
+       /* MacbookPro10,1 */
+       BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI),
+       BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING7_ISO),
+       BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING7_JIS),
        /* Terminating entry */
        {}
 };
@@ -354,6 +362,18 @@ static const struct bcm5974_config bcm5974_config_table[] = {
                { DIM_X, DIM_X / SN_COORD, -4620, 5140 },
                { DIM_Y, DIM_Y / SN_COORD, -150, 6600 }
        },
+       {
+               USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI,
+               USB_DEVICE_ID_APPLE_WELLSPRING7_ISO,
+               USB_DEVICE_ID_APPLE_WELLSPRING7_JIS,
+               HAS_INTEGRATED_BUTTON,
+               0x84, sizeof(struct bt_data),
+               0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
+               { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 },
+               { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 },
+               { DIM_X, DIM_X / SN_COORD, -4750, 5280 },
+               { DIM_Y, DIM_Y / SN_COORD, -150, 6730 }
+       },
        {}
 };
 
index cad5602d3ce45a525ca70d9cc1cf7f766348c24e..8b31473a81fe9bd6056dd95363405f0e7610c4ee 100644 (file)
@@ -216,7 +216,7 @@ static void wacom_retrieve_report_data(struct usb_interface *intf,
 
                rep_data[0] = 12;
                result = wacom_get_report(intf, WAC_HID_FEATURE_REPORT,
-                                         rep_data[0], &rep_data, 2,
+                                         rep_data[0], rep_data, 2,
                                          WAC_MSG_RETRIES);
 
                if (result >= 0 && rep_data[1] > 2)
@@ -401,7 +401,9 @@ static int wacom_parse_hid(struct usb_interface *intf,
                                break;
 
                        case HID_USAGE_CONTACTMAX:
-                               wacom_retrieve_report_data(intf, features);
+                               /* leave touch_max as is if predefined */
+                               if (!features->touch_max)
+                                       wacom_retrieve_report_data(intf, features);
                                i++;
                                break;
                        }
index e2482b40da5198fdb1406e135fff827d049e5895..bd4eb42776973b211aee79e40f51b47e2f5e8d27 100644 (file)
@@ -597,7 +597,7 @@ struct ad7879 *ad7879_probe(struct device *dev, u8 devid, unsigned int irq,
                        AD7879_TMR(ts->pen_down_acc_interval);
 
        err = request_threaded_irq(ts->irq, NULL, ad7879_irq,
-                                  IRQF_TRIGGER_FALLING,
+                                  IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
                                   dev_name(dev), ts);
        if (err) {
                dev_err(dev, "irq %d busy?\n", ts->irq);
index 42e645062c208e842b6feb5ecaf6824004c49a0c..25fd0561a17d2f1afe83a84c28864e81510acbba 100644 (file)
@@ -1149,7 +1149,8 @@ static int __devinit mxt_probe(struct i2c_client *client,
                goto err_free_object;
 
        error = request_threaded_irq(client->irq, NULL, mxt_interrupt,
-                       pdata->irqflags, client->dev.driver->name, data);
+                                    pdata->irqflags | IRQF_ONESHOT,
+                                    client->dev.driver->name, data);
        if (error) {
                dev_err(&client->dev, "Failed to register interrupt\n");
                goto err_free_object;
index f2d03c06c2da6660c30f64c177164dafb432385f..5c487d23f11cbaedd29e65c5143ba611a90e7d78 100644 (file)
@@ -509,7 +509,8 @@ static int __devinit bu21013_probe(struct i2c_client *client,
        input_set_drvdata(in_dev, bu21013_data);
 
        error = request_threaded_irq(pdata->irq, NULL, bu21013_gpio_irq,
-                                    IRQF_TRIGGER_FALLING | IRQF_SHARED,
+                                    IRQF_TRIGGER_FALLING | IRQF_SHARED |
+                                       IRQF_ONESHOT,
                                     DRIVER_TP, bu21013_data);
        if (error) {
                dev_err(&client->dev, "request irq %d failed\n", pdata->irq);
index 237753ad10318b9fab24b6d559448ea41d492480..464f1bf4b61dcbf62af90196ba28608b6896e6f8 100644 (file)
@@ -251,7 +251,8 @@ static int __devinit cy8ctmg110_probe(struct i2c_client *client,
        }
 
        err = request_threaded_irq(client->irq, NULL, cy8ctmg110_irq_thread,
-                                  IRQF_TRIGGER_RISING, "touch_reset_key", ts);
+                                  IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+                                  "touch_reset_key", ts);
        if (err < 0) {
                dev_err(&client->dev,
                        "irq %d busy? error %d\n", client->irq, err);
index 3cd7a837f82b203f41f5ff1aa7cefcf5b3714594..cf299377fc4980e54c02a0bfeb25c41ae2fbc5ec 100644 (file)
@@ -620,7 +620,7 @@ static int __devinit mrstouch_probe(struct platform_device *pdev)
                             MRST_PRESSURE_MIN, MRST_PRESSURE_MAX, 0, 0);
 
        err = request_threaded_irq(tsdev->irq, NULL, mrstouch_pendet_irq,
-                                  0, "mrstouch", tsdev);
+                                  IRQF_ONESHOT, "mrstouch", tsdev);
        if (err) {
                dev_err(tsdev->dev, "unable to allocate irq\n");
                goto err_free_mem;
index 72f6ba3a470937d2528207d20c53d182a0b7803f..953b4c105cad75ead98062869d7a6c0cc5237068 100644 (file)
@@ -165,7 +165,7 @@ static int __devinit pixcir_i2c_ts_probe(struct i2c_client *client,
        input_set_drvdata(input, tsdata);
 
        error = request_threaded_irq(client->irq, NULL, pixcir_ts_isr,
-                                    IRQF_TRIGGER_FALLING,
+                                    IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
                                     client->name, tsdata);
        if (error) {
                dev_err(&client->dev, "Unable to request touchscreen IRQ.\n");
index 7e74880973591bb61daa6e3317a94e43bb8f5415..368d2c6cf780cb0e9a1ffad750bace5a5d6b6e6c 100644 (file)
@@ -297,7 +297,7 @@ static int __devinit tsc_probe(struct platform_device *pdev)
                goto error_clk;
        }
 
-       error = request_threaded_irq(ts->tsc_irq, NULL, tsc_irq, 0,
+       error = request_threaded_irq(ts->tsc_irq, NULL, tsc_irq, IRQF_ONESHOT,
                                     dev_name(dev), ts);
        if (error < 0) {
                dev_err(ts->dev, "Could not allocate ts irq\n");
index b6adeaee9cc5f0f226781f59ba79961fd9eb1e0b..5ce3fa8ce6465e049dd415b784eaafe7e0fbf7ed 100644 (file)
@@ -650,7 +650,8 @@ static int __devinit tsc2005_probe(struct spi_device *spi)
        tsc2005_stop_scan(ts);
 
        error = request_threaded_irq(spi->irq, NULL, tsc2005_irq_thread,
-                                    IRQF_TRIGGER_RISING, "tsc2005", ts);
+                                    IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+                                    "tsc2005", ts);
        if (error) {
                dev_err(&spi->dev, "Failed to request irq, err: %d\n", error);
                goto err_free_mem;
index d90a421e9caccbbfc38a3d2eb4dd20ef2a50c77b..625626391f2d39d3802710a1677ac49ef3181c9a 100644 (file)
@@ -83,6 +83,8 @@ static struct iommu_ops amd_iommu_ops;
 static ATOMIC_NOTIFIER_HEAD(ppr_notifier);
 int amd_iommu_max_glx_val = -1;
 
+static struct dma_map_ops amd_iommu_dma_ops;
+
 /*
  * general struct to manage commands send to an IOMMU
  */
@@ -402,7 +404,7 @@ static void amd_iommu_stats_init(void)
                return;
 
        de_fflush  = debugfs_create_bool("fullflush", 0444, stats_dir,
-                                        (u32 *)&amd_iommu_unmap_flush);
+                                        &amd_iommu_unmap_flush);
 
        amd_iommu_stats_add(&compl_wait);
        amd_iommu_stats_add(&cnt_map_single);
@@ -547,26 +549,12 @@ static void iommu_poll_events(struct amd_iommu *iommu)
        spin_unlock_irqrestore(&iommu->lock, flags);
 }
 
-static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u32 head)
+static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw)
 {
        struct amd_iommu_fault fault;
-       volatile u64 *raw;
-       int i;
 
        INC_STATS_COUNTER(pri_requests);
 
-       raw = (u64 *)(iommu->ppr_log + head);
-
-       /*
-        * Hardware bug: Interrupt may arrive before the entry is written to
-        * memory. If this happens we need to wait for the entry to arrive.
-        */
-       for (i = 0; i < LOOP_TIMEOUT; ++i) {
-               if (PPR_REQ_TYPE(raw[0]) != 0)
-                       break;
-               udelay(1);
-       }
-
        if (PPR_REQ_TYPE(raw[0]) != PPR_REQ_FAULT) {
                pr_err_ratelimited("AMD-Vi: Unknown PPR request received\n");
                return;
@@ -578,12 +566,6 @@ static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u32 head)
        fault.tag       = PPR_TAG(raw[0]);
        fault.flags     = PPR_FLAGS(raw[0]);
 
-       /*
-        * To detect the hardware bug we need to clear the entry
-        * to back to zero.
-        */
-       raw[0] = raw[1] = 0;
-
        atomic_notifier_call_chain(&ppr_notifier, 0, &fault);
 }
 
@@ -595,25 +577,62 @@ static void iommu_poll_ppr_log(struct amd_iommu *iommu)
        if (iommu->ppr_log == NULL)
                return;
 
+       /* enable ppr interrupts again */
+       writel(MMIO_STATUS_PPR_INT_MASK, iommu->mmio_base + MMIO_STATUS_OFFSET);
+
        spin_lock_irqsave(&iommu->lock, flags);
 
        head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
        tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
 
        while (head != tail) {
+               volatile u64 *raw;
+               u64 entry[2];
+               int i;
 
-               /* Handle PPR entry */
-               iommu_handle_ppr_entry(iommu, head);
+               raw = (u64 *)(iommu->ppr_log + head);
+
+               /*
+                * Hardware bug: Interrupt may arrive before the entry is
+                * written to memory. If this happens we need to wait for the
+                * entry to arrive.
+                */
+               for (i = 0; i < LOOP_TIMEOUT; ++i) {
+                       if (PPR_REQ_TYPE(raw[0]) != 0)
+                               break;
+                       udelay(1);
+               }
 
-               /* Update and refresh ring-buffer state*/
+               /* Avoid memcpy function-call overhead */
+               entry[0] = raw[0];
+               entry[1] = raw[1];
+
+               /*
+                * To detect the hardware bug we need to clear the entry
+                * back to zero.
+                */
+               raw[0] = raw[1] = 0UL;
+
+               /* Update head pointer of hardware ring-buffer */
                head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE;
                writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
+
+               /*
+                * Release iommu->lock because ppr-handling might need to
+                * re-aquire it
+                */
+               spin_unlock_irqrestore(&iommu->lock, flags);
+
+               /* Handle PPR entry */
+               iommu_handle_ppr_entry(iommu, entry);
+
+               spin_lock_irqsave(&iommu->lock, flags);
+
+               /* Refresh ring-buffer information */
+               head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
                tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
        }
 
-       /* enable ppr interrupts again */
-       writel(MMIO_STATUS_PPR_INT_MASK, iommu->mmio_base + MMIO_STATUS_OFFSET);
-
        spin_unlock_irqrestore(&iommu->lock, flags);
 }
 
@@ -2250,6 +2269,13 @@ static int device_change_notifier(struct notifier_block *nb,
                list_add_tail(&dma_domain->list, &iommu_pd_list);
                spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
 
+               dev_data = get_dev_data(dev);
+
+               if (!dev_data->passthrough)
+                       dev->archdata.dma_ops = &amd_iommu_dma_ops;
+               else
+                       dev->archdata.dma_ops = &nommu_dma_ops;
+
                break;
        case BUS_NOTIFY_DEL_DEVICE:
 
index c56790375e0fd25d66c9b09cb3cfa694fae05acc..a33612f3206f25f1146df2c84b254b372b306382 100644 (file)
@@ -129,7 +129,7 @@ u16 amd_iommu_last_bdf;                     /* largest PCI device id we have
                                           to handle */
 LIST_HEAD(amd_iommu_unity_map);                /* a list of required unity mappings
                                           we find in ACPI */
-bool amd_iommu_unmap_flush;            /* if true, flush on every unmap */
+u32 amd_iommu_unmap_flush;             /* if true, flush on every unmap */
 
 LIST_HEAD(amd_iommu_list);             /* list of all AMD IOMMUs in the
                                           system */
@@ -1029,6 +1029,9 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
        if (!iommu->dev)
                return 1;
 
+       iommu->root_pdev = pci_get_bus_and_slot(iommu->dev->bus->number,
+                                               PCI_DEVFN(0, 0));
+
        iommu->cap_ptr = h->cap_ptr;
        iommu->pci_seg = h->pci_seg;
        iommu->mmio_phys = h->mmio_phys;
@@ -1323,20 +1326,16 @@ static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
 {
        int i, j;
        u32 ioc_feature_control;
-       struct pci_dev *pdev = NULL;
+       struct pci_dev *pdev = iommu->root_pdev;
 
        /* RD890 BIOSes may not have completely reconfigured the iommu */
-       if (!is_rd890_iommu(iommu->dev))
+       if (!is_rd890_iommu(iommu->dev) || !pdev)
                return;
 
        /*
         * First, we need to ensure that the iommu is enabled. This is
         * controlled by a register in the northbridge
         */
-       pdev = pci_get_bus_and_slot(iommu->dev->bus->number, PCI_DEVFN(0, 0));
-
-       if (!pdev)
-               return;
 
        /* Select Northbridge indirect register 0x75 and enable writing */
        pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7));
@@ -1346,8 +1345,6 @@ static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
        if (!(ioc_feature_control & 0x1))
                pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1);
 
-       pci_dev_put(pdev);
-
        /* Restore the iommu BAR */
        pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
                               iommu->stored_addr_lo);
@@ -1644,6 +1641,8 @@ static int __init amd_iommu_init(void)
 
        amd_iommu_init_api();
 
+       x86_platform.iommu_shutdown = disable_iommus;
+
        if (iommu_pass_through)
                goto out;
 
@@ -1652,8 +1651,6 @@ static int __init amd_iommu_init(void)
        else
                printk(KERN_INFO "AMD-Vi: Lazy IO/TLB flushing enabled\n");
 
-       x86_platform.iommu_shutdown = disable_iommus;
-
 out:
        return ret;
 
index 2452f3b7173619c449c04f025e2e2ff73cc44f29..c1b1d489817e2b667edbbeb95d36eae1d503a994 100644 (file)
@@ -481,6 +481,9 @@ struct amd_iommu {
        /* Pointer to PCI device of this IOMMU */
        struct pci_dev *dev;
 
+       /* Cache pdev to root device for resume quirks */
+       struct pci_dev *root_pdev;
+
        /* physical address of MMIO space */
        u64 mmio_phys;
        /* virtual address of MMIO space */
@@ -649,7 +652,7 @@ extern unsigned long *amd_iommu_pd_alloc_bitmap;
  * If true, the addresses will be flushed on unmap time, not when
  * they are reused
  */
-extern bool amd_iommu_unmap_flush;
+extern u32 amd_iommu_unmap_flush;
 
 /* Smallest number of PASIDs supported by any IOMMU in the system */
 extern u32 amd_iommu_max_pasids;
index ecd679043d7740e6883aae9cbee68da6321fedc1..3f3d09d560ea3c6ce5e3bc7d019e377ac9e63dfe 100644 (file)
@@ -550,13 +550,13 @@ static int alloc_pdir(struct smmu_as *as)
                return 0;
 
        as->pte_count = devm_kzalloc(smmu->dev,
-                    sizeof(as->pte_count[0]) * SMMU_PDIR_COUNT, GFP_KERNEL);
+                    sizeof(as->pte_count[0]) * SMMU_PDIR_COUNT, GFP_ATOMIC);
        if (!as->pte_count) {
                dev_err(smmu->dev,
                        "failed to allocate smmu_device PTE cunters\n");
                return -ENOMEM;
        }
-       as->pdir_page = alloc_page(GFP_KERNEL | __GFP_DMA);
+       as->pdir_page = alloc_page(GFP_ATOMIC | __GFP_DMA);
        if (!as->pdir_page) {
                dev_err(smmu->dev,
                        "failed to allocate smmu_device page directory\n");
index 1a0ae4445ff2b47c4202359e6bba0b0f5bf8f336..5f21f629b7aebeac0163083a2a83a50f4e53c1a8 100644 (file)
@@ -135,8 +135,8 @@ send_layer2(struct mISDNstack *st, struct sk_buff *skb)
                        skb = NULL;
                else if (*debug & DEBUG_SEND_ERR)
                        printk(KERN_DEBUG
-                              "%s ch%d mgr prim(%x) addr(%x) err %d\n",
-                              __func__, ch->nr, hh->prim, ch->addr, ret);
+                              "%s mgr prim(%x) err %d\n",
+                              __func__, hh->prim, ret);
        }
 out:
        mutex_unlock(&st->lmutex);
index 04cb8c88d74b7678d12389898ba0702cf0c17b55..12b2b55c519e7b3038677edfd9c1b4f710493f4c 100644 (file)
@@ -379,7 +379,7 @@ config LEDS_NETXBIG
 
 config LEDS_ASIC3
        bool "LED support for the HTC ASIC3"
-       depends on LEDS_CLASS
+       depends on LEDS_CLASS=y
        depends on MFD_ASIC3
        default y
        help
@@ -390,7 +390,7 @@ config LEDS_ASIC3
 
 config LEDS_RENESAS_TPU
        bool "LED support for Renesas TPU"
-       depends on LEDS_CLASS && HAVE_CLK && GENERIC_GPIO
+       depends on LEDS_CLASS=y && HAVE_CLK && GENERIC_GPIO
        help
          This option enables build of the LED TPU platform driver,
          suitable to drive any TPU channel on newer Renesas SoCs.
index 8ee92c81aec2c1577c3fb1cfd173d8835c2c7725..e663e6f413e989d835067811a992bb62dc316f36 100644 (file)
@@ -29,7 +29,7 @@ static void led_update_brightness(struct led_classdev *led_cdev)
                led_cdev->brightness = led_cdev->brightness_get(led_cdev);
 }
 
-static ssize_t led_brightness_show(struct device *dev, 
+static ssize_t led_brightness_show(struct device *dev,
                struct device_attribute *attr, char *buf)
 {
        struct led_classdev *led_cdev = dev_get_drvdata(dev);
index d6860043f6f99f1fc0e4f819681d8fed9089c542..d65353d8d3fcb4ae012460c20bc33f20c2afa84f 100644 (file)
@@ -44,13 +44,6 @@ static void led_set_software_blink(struct led_classdev *led_cdev,
        if (!led_cdev->blink_brightness)
                led_cdev->blink_brightness = led_cdev->max_brightness;
 
-       if (led_get_trigger_data(led_cdev) &&
-           delay_on == led_cdev->blink_delay_on &&
-           delay_off == led_cdev->blink_delay_off)
-               return;
-
-       led_stop_software_blink(led_cdev);
-
        led_cdev->blink_delay_on = delay_on;
        led_cdev->blink_delay_off = delay_off;
 
index 41dc76db43118a347e4a8a06923b0fd076a12dc3..a019fbb70880bd402aea095b8b90a8ca26a0e66a 100644 (file)
@@ -21,6 +21,8 @@
 #include <linux/reboot.h>
 #include "leds.h"
 
+static int panic_heartbeats;
+
 struct heartbeat_trig_data {
        unsigned int phase;
        unsigned int period;
@@ -34,6 +36,11 @@ static void led_heartbeat_function(unsigned long data)
        unsigned long brightness = LED_OFF;
        unsigned long delay = 0;
 
+       if (unlikely(panic_heartbeats)) {
+               led_set_brightness(led_cdev, LED_OFF);
+               return;
+       }
+
        /* acts like an actual heart beat -- ie thump-thump-pause... */
        switch (heartbeat_data->phase) {
        case 0:
@@ -111,12 +118,19 @@ static int heartbeat_reboot_notifier(struct notifier_block *nb,
        return NOTIFY_DONE;
 }
 
+static int heartbeat_panic_notifier(struct notifier_block *nb,
+                                    unsigned long code, void *unused)
+{
+       panic_heartbeats = 1;
+       return NOTIFY_DONE;
+}
+
 static struct notifier_block heartbeat_reboot_nb = {
        .notifier_call = heartbeat_reboot_notifier,
 };
 
 static struct notifier_block heartbeat_panic_nb = {
-       .notifier_call = heartbeat_reboot_notifier,
+       .notifier_call = heartbeat_panic_notifier,
 };
 
 static int __init heartbeat_trig_init(void)
index d039de8322f0a314fca492df4b932289d9033e0d..b58b7a33914abd721e3508835a61afd699221dbf 100644 (file)
@@ -1084,6 +1084,7 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
        ti->split_io = dm_rh_get_region_size(ms->rh);
        ti->num_flush_requests = 1;
        ti->num_discard_requests = 1;
+       ti->discard_zeroes_data_unsupported = 1;
 
        ms->kmirrord_wq = alloc_workqueue("kmirrord",
                                          WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0);
@@ -1214,7 +1215,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio,
         * We need to dec pending if this was a write.
         */
        if (rw == WRITE) {
-               if (!(bio->bi_rw & REQ_FLUSH))
+               if (!(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD)))
                        dm_rh_dec(ms->rh, map_context->ll);
                return error;
        }
index 7771ed2121820ac50c026f45e2f6c0a288ffba85..69732e03eb3490d636a0183bd22303742ab65c65 100644 (file)
@@ -404,6 +404,9 @@ void dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio)
                return;
        }
 
+       if (bio->bi_rw & REQ_DISCARD)
+               return;
+
        /* We must inform the log that the sync count has changed. */
        log->type->set_region_sync(log, region, 0);
 
@@ -524,7 +527,7 @@ void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios)
        struct bio *bio;
 
        for (bio = bios->head; bio; bio = bio->bi_next) {
-               if (bio->bi_rw & REQ_FLUSH)
+               if (bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))
                        continue;
                rh_inc(rh, dm_rh_bio_to_region(rh, bio));
        }
index 37fdaf81bd1f89abfd28f6a46d2be7ce95f8bcba..68694da0d21d0566a61649339cb3a4f3a3bbc943 100644 (file)
@@ -1245,7 +1245,10 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
 
                        cell_release_singleton(cell, bio);
                        cell_release_singleton(cell2, bio);
-                       remap_and_issue(tc, bio, lookup_result.block);
+                       if ((!lookup_result.shared) && pool->pf.discard_passdown)
+                               remap_and_issue(tc, bio, lookup_result.block);
+                       else
+                               bio_endio(bio, 0);
                }
                break;
 
@@ -2292,6 +2295,13 @@ static int process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct
        if (r)
                return r;
 
+       r = dm_pool_commit_metadata(pool->pmd);
+       if (r) {
+               DMERR("%s: dm_pool_commit_metadata() failed, error = %d",
+                     __func__, r);
+               return r;
+       }
+
        r = dm_pool_reserve_metadata_snap(pool->pmd);
        if (r)
                DMWARN("reserve_metadata_snap message failed.");
@@ -2621,6 +2631,7 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
        if (tc->pool->pf.discard_enabled) {
                ti->discards_supported = 1;
                ti->num_discard_requests = 1;
+               ti->discard_zeroes_data_unsupported = 1;
        }
 
        dm_put(pool_md);
index 1c2f9048e1ae010864c4d2478c05f9a510c729e1..d5ab4493c8be656ecd28227994d7f7bbe06b8e2d 100644 (file)
@@ -2931,6 +2931,7 @@ offset_store(struct md_rdev *rdev, const char *buf, size_t len)
                 * can be sane */
                return -EBUSY;
        rdev->data_offset = offset;
+       rdev->new_data_offset = offset;
        return len;
 }
 
@@ -3926,8 +3927,8 @@ array_state_show(struct mddev *mddev, char *page)
        return sprintf(page, "%s\n", array_states[st]);
 }
 
-static int do_md_stop(struct mddev * mddev, int ro, int is_open);
-static int md_set_readonly(struct mddev * mddev, int is_open);
+static int do_md_stop(struct mddev * mddev, int ro, struct block_device *bdev);
+static int md_set_readonly(struct mddev * mddev, struct block_device *bdev);
 static int do_md_run(struct mddev * mddev);
 static int restart_array(struct mddev *mddev);
 
@@ -3943,14 +3944,14 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
                /* stopping an active array */
                if (atomic_read(&mddev->openers) > 0)
                        return -EBUSY;
-               err = do_md_stop(mddev, 0, 0);
+               err = do_md_stop(mddev, 0, NULL);
                break;
        case inactive:
                /* stopping an active array */
                if (mddev->pers) {
                        if (atomic_read(&mddev->openers) > 0)
                                return -EBUSY;
-                       err = do_md_stop(mddev, 2, 0);
+                       err = do_md_stop(mddev, 2, NULL);
                } else
                        err = 0; /* already inactive */
                break;
@@ -3958,7 +3959,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
                break; /* not supported yet */
        case readonly:
                if (mddev->pers)
-                       err = md_set_readonly(mddev, 0);
+                       err = md_set_readonly(mddev, NULL);
                else {
                        mddev->ro = 1;
                        set_disk_ro(mddev->gendisk, 1);
@@ -3968,7 +3969,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
        case read_auto:
                if (mddev->pers) {
                        if (mddev->ro == 0)
-                               err = md_set_readonly(mddev, 0);
+                               err = md_set_readonly(mddev, NULL);
                        else if (mddev->ro == 1)
                                err = restart_array(mddev);
                        if (err == 0) {
@@ -5351,15 +5352,17 @@ void md_stop(struct mddev *mddev)
 }
 EXPORT_SYMBOL_GPL(md_stop);
 
-static int md_set_readonly(struct mddev *mddev, int is_open)
+static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
 {
        int err = 0;
        mutex_lock(&mddev->open_mutex);
-       if (atomic_read(&mddev->openers) > is_open) {
+       if (atomic_read(&mddev->openers) > !!bdev) {
                printk("md: %s still in use.\n",mdname(mddev));
                err = -EBUSY;
                goto out;
        }
+       if (bdev)
+               sync_blockdev(bdev);
        if (mddev->pers) {
                __md_stop_writes(mddev);
 
@@ -5381,18 +5384,26 @@ out:
  *   0 - completely stop and dis-assemble array
  *   2 - stop but do not disassemble array
  */
-static int do_md_stop(struct mddev * mddev, int mode, int is_open)
+static int do_md_stop(struct mddev * mddev, int mode,
+                     struct block_device *bdev)
 {
        struct gendisk *disk = mddev->gendisk;
        struct md_rdev *rdev;
 
        mutex_lock(&mddev->open_mutex);
-       if (atomic_read(&mddev->openers) > is_open ||
+       if (atomic_read(&mddev->openers) > !!bdev ||
            mddev->sysfs_active) {
                printk("md: %s still in use.\n",mdname(mddev));
                mutex_unlock(&mddev->open_mutex);
                return -EBUSY;
        }
+       if (bdev)
+               /* It is possible IO was issued on some other
+                * open file which was closed before we took ->open_mutex.
+                * As that was not the last close __blkdev_put will not
+                * have called sync_blockdev, so we must.
+                */
+               sync_blockdev(bdev);
 
        if (mddev->pers) {
                if (mddev->ro)
@@ -5466,7 +5477,7 @@ static void autorun_array(struct mddev *mddev)
        err = do_md_run(mddev);
        if (err) {
                printk(KERN_WARNING "md: do_md_run() returned %d\n", err);
-               do_md_stop(mddev, 0, 0);
+               do_md_stop(mddev, 0, NULL);
        }
 }
 
@@ -5784,8 +5795,7 @@ static int add_new_disk(struct mddev * mddev, mdu_disk_info_t *info)
                        super_types[mddev->major_version].
                                validate_super(mddev, rdev);
                if ((info->state & (1<<MD_DISK_SYNC)) &&
-                   (!test_bit(In_sync, &rdev->flags) ||
-                    rdev->raid_disk != info->raid_disk)) {
+                    rdev->raid_disk != info->raid_disk) {
                        /* This was a hot-add request, but events doesn't
                         * match, so reject it.
                         */
@@ -6482,11 +6492,11 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
                        goto done_unlock;
 
                case STOP_ARRAY:
-                       err = do_md_stop(mddev, 0, 1);
+                       err = do_md_stop(mddev, 0, bdev);
                        goto done_unlock;
 
                case STOP_ARRAY_RO:
-                       err = md_set_readonly(mddev, 1);
+                       err = md_set_readonly(mddev, bdev);
                        goto done_unlock;
 
                case BLKROSET:
@@ -6751,7 +6761,7 @@ struct md_thread *md_register_thread(void (*run) (struct mddev *), struct mddev
        thread->tsk = kthread_run(md_thread, thread,
                                  "%s_%s",
                                  mdname(thread->mddev),
-                                 name ?: mddev->pers->name);
+                                 name);
        if (IS_ERR(thread->tsk)) {
                kfree(thread);
                return NULL;
@@ -7298,6 +7308,7 @@ void md_do_sync(struct mddev *mddev)
        int skipped = 0;
        struct md_rdev *rdev;
        char *desc;
+       struct blk_plug plug;
 
        /* just incase thread restarts... */
        if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
@@ -7447,6 +7458,7 @@ void md_do_sync(struct mddev *mddev)
        }
        mddev->curr_resync_completed = j;
 
+       blk_start_plug(&plug);
        while (j < max_sectors) {
                sector_t sectors;
 
@@ -7552,6 +7564,7 @@ void md_do_sync(struct mddev *mddev)
         * this also signals 'finished resyncing' to md_stop
         */
  out:
+       blk_finish_plug(&plug);
        wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
 
        /* tell personality that we are finished */
index 9339e67fcc79a9f961d016d3f80291ce012323bb..61a1833ebaf33ec40afaf752db9a849439c9bd13 100644 (file)
@@ -474,7 +474,8 @@ static int multipath_run (struct mddev *mddev)
        }
 
        {
-               mddev->thread = md_register_thread(multipathd, mddev, NULL);
+               mddev->thread = md_register_thread(multipathd, mddev,
+                                                  "multipath");
                if (!mddev->thread) {
                        printk(KERN_ERR "multipath: couldn't allocate thread"
                                " for %s\n", mdname(mddev));
index 50ed53bf4aa2b1efe1136c2520067ece16bb9b35..fc90c11620adb026c9842d6e95497248d02556e8 100644 (file)
@@ -8,6 +8,7 @@
 
 #include <linux/device-mapper.h>
 #include <linux/export.h>
+#include <linux/vmalloc.h>
 
 #ifdef CONFIG_DM_DEBUG_SPACE_MAPS
 
@@ -89,13 +90,23 @@ static int ca_create(struct count_array *ca, struct dm_space_map *sm)
 
        ca->nr = nr_blocks;
        ca->nr_free = nr_blocks;
-       ca->counts = kzalloc(sizeof(*ca->counts) * nr_blocks, GFP_KERNEL);
-       if (!ca->counts)
-               return -ENOMEM;
+
+       if (!nr_blocks)
+               ca->counts = NULL;
+       else {
+               ca->counts = vzalloc(sizeof(*ca->counts) * nr_blocks);
+               if (!ca->counts)
+                       return -ENOMEM;
+       }
 
        return 0;
 }
 
+static void ca_destroy(struct count_array *ca)
+{
+       vfree(ca->counts);
+}
+
 static int ca_load(struct count_array *ca, struct dm_space_map *sm)
 {
        int r;
@@ -126,12 +137,14 @@ static int ca_load(struct count_array *ca, struct dm_space_map *sm)
 static int ca_extend(struct count_array *ca, dm_block_t extra_blocks)
 {
        dm_block_t nr_blocks = ca->nr + extra_blocks;
-       uint32_t *counts = kzalloc(sizeof(*counts) * nr_blocks, GFP_KERNEL);
+       uint32_t *counts = vzalloc(sizeof(*counts) * nr_blocks);
        if (!counts)
                return -ENOMEM;
 
-       memcpy(counts, ca->counts, sizeof(*counts) * ca->nr);
-       kfree(ca->counts);
+       if (ca->counts) {
+               memcpy(counts, ca->counts, sizeof(*counts) * ca->nr);
+               ca_destroy(ca);
+       }
        ca->nr = nr_blocks;
        ca->nr_free += extra_blocks;
        ca->counts = counts;
@@ -151,11 +164,6 @@ static int ca_commit(struct count_array *old, struct count_array *new)
        return 0;
 }
 
-static void ca_destroy(struct count_array *ca)
-{
-       kfree(ca->counts);
-}
-
 /*----------------------------------------------------------------*/
 
 struct sm_checker {
@@ -343,25 +351,25 @@ struct dm_space_map *dm_sm_checker_create(struct dm_space_map *sm)
        int r;
        struct sm_checker *smc;
 
-       if (!sm)
-               return NULL;
+       if (IS_ERR_OR_NULL(sm))
+               return ERR_PTR(-EINVAL);
 
        smc = kmalloc(sizeof(*smc), GFP_KERNEL);
        if (!smc)
-               return NULL;
+               return ERR_PTR(-ENOMEM);
 
        memcpy(&smc->sm, &ops_, sizeof(smc->sm));
        r = ca_create(&smc->old_counts, sm);
        if (r) {
                kfree(smc);
-               return NULL;
+               return ERR_PTR(r);
        }
 
        r = ca_create(&smc->counts, sm);
        if (r) {
                ca_destroy(&smc->old_counts);
                kfree(smc);
-               return NULL;
+               return ERR_PTR(r);
        }
 
        smc->real_sm = sm;
@@ -371,7 +379,7 @@ struct dm_space_map *dm_sm_checker_create(struct dm_space_map *sm)
                ca_destroy(&smc->counts);
                ca_destroy(&smc->old_counts);
                kfree(smc);
-               return NULL;
+               return ERR_PTR(r);
        }
 
        r = ca_commit(&smc->old_counts, &smc->counts);
@@ -379,7 +387,7 @@ struct dm_space_map *dm_sm_checker_create(struct dm_space_map *sm)
                ca_destroy(&smc->counts);
                ca_destroy(&smc->old_counts);
                kfree(smc);
-               return NULL;
+               return ERR_PTR(r);
        }
 
        return &smc->sm;
@@ -391,25 +399,25 @@ struct dm_space_map *dm_sm_checker_create_fresh(struct dm_space_map *sm)
        int r;
        struct sm_checker *smc;
 
-       if (!sm)
-               return NULL;
+       if (IS_ERR_OR_NULL(sm))
+               return ERR_PTR(-EINVAL);
 
        smc = kmalloc(sizeof(*smc), GFP_KERNEL);
        if (!smc)
-               return NULL;
+               return ERR_PTR(-ENOMEM);
 
        memcpy(&smc->sm, &ops_, sizeof(smc->sm));
        r = ca_create(&smc->old_counts, sm);
        if (r) {
                kfree(smc);
-               return NULL;
+               return ERR_PTR(r);
        }
 
        r = ca_create(&smc->counts, sm);
        if (r) {
                ca_destroy(&smc->old_counts);
                kfree(smc);
-               return NULL;
+               return ERR_PTR(r);
        }
 
        smc->real_sm = sm;
index fc469ba9f6277a7c701615a1b3d788a05fa52559..3d0ed53328831627ab6ec79cb85946f36b0fe7a4 100644 (file)
@@ -290,7 +290,16 @@ struct dm_space_map *dm_sm_disk_create(struct dm_transaction_manager *tm,
                                       dm_block_t nr_blocks)
 {
        struct dm_space_map *sm = dm_sm_disk_create_real(tm, nr_blocks);
-       return dm_sm_checker_create_fresh(sm);
+       struct dm_space_map *smc;
+
+       if (IS_ERR_OR_NULL(sm))
+               return sm;
+
+       smc = dm_sm_checker_create_fresh(sm);
+       if (IS_ERR(smc))
+               dm_sm_destroy(sm);
+
+       return smc;
 }
 EXPORT_SYMBOL_GPL(dm_sm_disk_create);
 
index 400fe144c0cd1c94dd5c325c5ed690a425b7ece3..e5604b32d91ff0017a5a625cfd8106fca92bfe19 100644 (file)
@@ -138,6 +138,9 @@ EXPORT_SYMBOL_GPL(dm_tm_create_non_blocking_clone);
 
 void dm_tm_destroy(struct dm_transaction_manager *tm)
 {
+       if (!tm->is_clone)
+               wipe_shadow_table(tm);
+
        kfree(tm);
 }
 EXPORT_SYMBOL_GPL(dm_tm_destroy);
@@ -344,8 +347,10 @@ static int dm_tm_create_internal(struct dm_block_manager *bm,
                }
 
                *sm = dm_sm_checker_create(inner);
-               if (!*sm)
+               if (IS_ERR(*sm)) {
+                       r = PTR_ERR(*sm);
                        goto bad2;
+               }
 
        } else {
                r = dm_bm_write_lock(dm_tm_get_bm(*tm), sb_location,
@@ -364,8 +369,10 @@ static int dm_tm_create_internal(struct dm_block_manager *bm,
                }
 
                *sm = dm_sm_checker_create(inner);
-               if (!*sm)
+               if (IS_ERR(*sm)) {
+                       r = PTR_ERR(*sm);
                        goto bad2;
+               }
        }
 
        return 0;
index 835de7168cd3ae7d28409e46ebcc281e4b8fe917..cacd008d68644428914b39822417209cd00dddc8 100644 (file)
@@ -517,8 +517,8 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
                int bad_sectors;
 
                int disk = start_disk + i;
-               if (disk >= conf->raid_disks)
-                       disk -= conf->raid_disks;
+               if (disk >= conf->raid_disks * 2)
+                       disk -= conf->raid_disks * 2;
 
                rdev = rcu_dereference(conf->mirrors[disk].rdev);
                if (r1_bio->bios[disk] == IO_BLOCKED
@@ -883,7 +883,6 @@ static void make_request(struct mddev *mddev, struct bio * bio)
        const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
        const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA));
        struct md_rdev *blocked_rdev;
-       int plugged;
        int first_clone;
        int sectors_handled;
        int max_sectors;
@@ -1034,7 +1033,6 @@ read_again:
         * the bad blocks.  Each set of writes gets it's own r1bio
         * with a set of bios attached.
         */
-       plugged = mddev_check_plugged(mddev);
 
        disks = conf->raid_disks * 2;
  retry_write:
@@ -1191,6 +1189,8 @@ read_again:
                bio_list_add(&conf->pending_bio_list, mbio);
                conf->pending_count++;
                spin_unlock_irqrestore(&conf->device_lock, flags);
+               if (!mddev_check_plugged(mddev))
+                       md_wakeup_thread(mddev->thread);
        }
        /* Mustn't call r1_bio_write_done before this next test,
         * as it could result in the bio being freed.
@@ -1213,9 +1213,6 @@ read_again:
 
        /* In case raid1d snuck in to freeze_array */
        wake_up(&conf->wait_barrier);
-
-       if (do_sync || !bitmap || !plugged)
-               md_wakeup_thread(mddev->thread);
 }
 
 static void status(struct seq_file *seq, struct mddev *mddev)
@@ -1821,8 +1818,14 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
 
        if (atomic_dec_and_test(&r1_bio->remaining)) {
                /* if we're here, all write(s) have completed, so clean up */
-               md_done_sync(mddev, r1_bio->sectors, 1);
-               put_buf(r1_bio);
+               int s = r1_bio->sectors;
+               if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
+                   test_bit(R1BIO_WriteError, &r1_bio->state))
+                       reschedule_retry(r1_bio);
+               else {
+                       put_buf(r1_bio);
+                       md_done_sync(mddev, s, 1);
+               }
        }
 }
 
@@ -2488,9 +2491,10 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
         */
        if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
                atomic_set(&r1_bio->remaining, read_targets);
-               for (i = 0; i < conf->raid_disks * 2; i++) {
+               for (i = 0; i < conf->raid_disks * 2 && read_targets; i++) {
                        bio = r1_bio->bios[i];
                        if (bio->bi_end_io == end_sync_read) {
+                               read_targets--;
                                md_sync_acct(bio->bi_bdev, nr_sectors);
                                generic_make_request(bio);
                        }
@@ -2550,6 +2554,7 @@ static struct r1conf *setup_conf(struct mddev *mddev)
        err = -EINVAL;
        spin_lock_init(&conf->device_lock);
        rdev_for_each(rdev, mddev) {
+               struct request_queue *q;
                int disk_idx = rdev->raid_disk;
                if (disk_idx >= mddev->raid_disks
                    || disk_idx < 0)
@@ -2562,6 +2567,9 @@ static struct r1conf *setup_conf(struct mddev *mddev)
                if (disk->rdev)
                        goto abort;
                disk->rdev = rdev;
+               q = bdev_get_queue(rdev->bdev);
+               if (q->merge_bvec_fn)
+                       mddev->merge_check_needed = 1;
 
                disk->head_position = 0;
        }
@@ -2617,7 +2625,7 @@ static struct r1conf *setup_conf(struct mddev *mddev)
                goto abort;
        }
        err = -ENOMEM;
-       conf->thread = md_register_thread(raid1d, mddev, NULL);
+       conf->thread = md_register_thread(raid1d, mddev, "raid1");
        if (!conf->thread) {
                printk(KERN_ERR
                       "md/raid1:%s: couldn't allocate thread\n",
index 987db37cb875ead2041fede756062835158ecf0c..8da6282254c3e822a27702c469b9afce720bda43 100644 (file)
@@ -1039,7 +1039,6 @@ static void make_request(struct mddev *mddev, struct bio * bio)
        const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
        unsigned long flags;
        struct md_rdev *blocked_rdev;
-       int plugged;
        int sectors_handled;
        int max_sectors;
        int sectors;
@@ -1239,7 +1238,6 @@ read_again:
         * of r10_bios is recored in bio->bi_phys_segments just as with
         * the read case.
         */
-       plugged = mddev_check_plugged(mddev);
 
        r10_bio->read_slot = -1; /* make sure repl_bio gets freed */
        raid10_find_phys(conf, r10_bio);
@@ -1396,6 +1394,8 @@ retry_write:
                bio_list_add(&conf->pending_bio_list, mbio);
                conf->pending_count++;
                spin_unlock_irqrestore(&conf->device_lock, flags);
+               if (!mddev_check_plugged(mddev))
+                       md_wakeup_thread(mddev->thread);
 
                if (!r10_bio->devs[i].repl_bio)
                        continue;
@@ -1423,6 +1423,8 @@ retry_write:
                bio_list_add(&conf->pending_bio_list, mbio);
                conf->pending_count++;
                spin_unlock_irqrestore(&conf->device_lock, flags);
+               if (!mddev_check_plugged(mddev))
+                       md_wakeup_thread(mddev->thread);
        }
 
        /* Don't remove the bias on 'remaining' (one_write_done) until
@@ -1448,9 +1450,6 @@ retry_write:
 
        /* In case raid10d snuck in to freeze_array */
        wake_up(&conf->wait_barrier);
-
-       if (do_sync || !mddev->bitmap || !plugged)
-               md_wakeup_thread(mddev->thread);
 }
 
 static void status(struct seq_file *seq, struct mddev *mddev)
@@ -2310,7 +2309,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
                        if (r10_sync_page_io(rdev,
                                             r10_bio->devs[sl].addr +
                                             sect,
-                                            s<<9, conf->tmppage, WRITE)
+                                            s, conf->tmppage, WRITE)
                            == 0) {
                                /* Well, this device is dead */
                                printk(KERN_NOTICE
@@ -2349,7 +2348,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
                        switch (r10_sync_page_io(rdev,
                                             r10_bio->devs[sl].addr +
                                             sect,
-                                            s<<9, conf->tmppage,
+                                            s, conf->tmppage,
                                                 READ)) {
                        case 0:
                                /* Well, this device is dead */
@@ -2512,7 +2511,7 @@ read_more:
        slot = r10_bio->read_slot;
        printk_ratelimited(
                KERN_ERR
-               "md/raid10:%s: %s: redirecting"
+               "md/raid10:%s: %s: redirecting "
                "sector %llu to another mirror\n",
                mdname(mddev),
                bdevname(rdev->bdev, b),
@@ -2661,7 +2660,8 @@ static void raid10d(struct mddev *mddev)
        blk_start_plug(&plug);
        for (;;) {
 
-               flush_pending_writes(conf);
+               if (atomic_read(&mddev->plug_cnt) == 0)
+                       flush_pending_writes(conf);
 
                spin_lock_irqsave(&conf->device_lock, flags);
                if (list_empty(head)) {
@@ -2890,6 +2890,12 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
                        /* want to reconstruct this device */
                        rb2 = r10_bio;
                        sect = raid10_find_virt(conf, sector_nr, i);
+                       if (sect >= mddev->resync_max_sectors) {
+                               /* last stripe is not complete - don't
+                                * try to recover this sector.
+                                */
+                               continue;
+                       }
                        /* Unless we are doing a full sync, or a replacement
                         * we only need to recover the block if it is set in
                         * the bitmap
@@ -3421,7 +3427,7 @@ static struct r10conf *setup_conf(struct mddev *mddev)
        spin_lock_init(&conf->resync_lock);
        init_waitqueue_head(&conf->wait_barrier);
 
-       conf->thread = md_register_thread(raid10d, mddev, NULL);
+       conf->thread = md_register_thread(raid10d, mddev, "raid10");
        if (!conf->thread)
                goto out;
 
@@ -3475,6 +3481,7 @@ static int run(struct mddev *mddev)
 
        rdev_for_each(rdev, mddev) {
                long long diff;
+               struct request_queue *q;
 
                disk_idx = rdev->raid_disk;
                if (disk_idx < 0)
@@ -3493,6 +3500,9 @@ static int run(struct mddev *mddev)
                                goto out_free_conf;
                        disk->rdev = rdev;
                }
+               q = bdev_get_queue(rdev->bdev);
+               if (q->merge_bvec_fn)
+                       mddev->merge_check_needed = 1;
                diff = (rdev->new_data_offset - rdev->data_offset);
                if (!mddev->reshape_backwards)
                        diff = -diff;
index d26767246d26ad1d2bb9a2da371ab1bbbdf130fc..04348d76bb30fa8831964ea980ec2df912a45f92 100644 (file)
@@ -196,12 +196,14 @@ static void __release_stripe(struct r5conf *conf, struct stripe_head *sh)
                BUG_ON(!list_empty(&sh->lru));
                BUG_ON(atomic_read(&conf->active_stripes)==0);
                if (test_bit(STRIPE_HANDLE, &sh->state)) {
-                       if (test_bit(STRIPE_DELAYED, &sh->state))
+                       if (test_bit(STRIPE_DELAYED, &sh->state) &&
+                           !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
                                list_add_tail(&sh->lru, &conf->delayed_list);
                        else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
                                   sh->bm_seq - conf->seq_write > 0)
                                list_add_tail(&sh->lru, &conf->bitmap_list);
                        else {
+                               clear_bit(STRIPE_DELAYED, &sh->state);
                                clear_bit(STRIPE_BIT_DELAY, &sh->state);
                                list_add_tail(&sh->lru, &conf->handle_list);
                        }
@@ -606,6 +608,12 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
                                         * a chance*/
                                        md_check_recovery(conf->mddev);
                                }
+                               /*
+                                * Because md_wait_for_blocked_rdev
+                                * will dec nr_pending, we must
+                                * increment it first.
+                                */
+                               atomic_inc(&rdev->nr_pending);
                                md_wait_for_blocked_rdev(rdev, conf->mddev);
                        } else {
                                /* Acknowledged bad block - skip the write */
@@ -1737,6 +1745,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
        } else {
                const char *bdn = bdevname(rdev->bdev, b);
                int retry = 0;
+               int set_bad = 0;
 
                clear_bit(R5_UPTODATE, &sh->dev[i].flags);
                atomic_inc(&rdev->read_errors);
@@ -1748,7 +1757,8 @@ static void raid5_end_read_request(struct bio * bi, int error)
                                mdname(conf->mddev),
                                (unsigned long long)s,
                                bdn);
-               else if (conf->mddev->degraded >= conf->max_degraded)
+               else if (conf->mddev->degraded >= conf->max_degraded) {
+                       set_bad = 1;
                        printk_ratelimited(
                                KERN_WARNING
                                "md/raid:%s: read error not correctable "
@@ -1756,8 +1766,9 @@ static void raid5_end_read_request(struct bio * bi, int error)
                                mdname(conf->mddev),
                                (unsigned long long)s,
                                bdn);
-               else if (test_bit(R5_ReWrite, &sh->dev[i].flags))
+               } else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) {
                        /* Oh, no!!! */
+                       set_bad = 1;
                        printk_ratelimited(
                                KERN_WARNING
                                "md/raid:%s: read error NOT corrected!! "
@@ -1765,7 +1776,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
                                mdname(conf->mddev),
                                (unsigned long long)s,
                                bdn);
-               else if (atomic_read(&rdev->read_errors)
+               else if (atomic_read(&rdev->read_errors)
                         > conf->max_nr_stripes)
                        printk(KERN_WARNING
                               "md/raid:%s: Too many read errors, failing device %s.\n",
@@ -1777,7 +1788,11 @@ static void raid5_end_read_request(struct bio * bi, int error)
                else {
                        clear_bit(R5_ReadError, &sh->dev[i].flags);
                        clear_bit(R5_ReWrite, &sh->dev[i].flags);
-                       md_error(conf->mddev, rdev);
+                       if (!(set_bad
+                             && test_bit(In_sync, &rdev->flags)
+                             && rdev_set_badblocks(
+                                     rdev, sh->sector, STRIPE_SECTORS, 0)))
+                               md_error(conf->mddev, rdev);
                }
        }
        rdev_dec_pending(rdev, conf->mddev);
@@ -3582,8 +3597,18 @@ static void handle_stripe(struct stripe_head *sh)
 
 finish:
        /* wait for this device to become unblocked */
-       if (conf->mddev->external && unlikely(s.blocked_rdev))
-               md_wait_for_blocked_rdev(s.blocked_rdev, conf->mddev);
+       if (unlikely(s.blocked_rdev)) {
+               if (conf->mddev->external)
+                       md_wait_for_blocked_rdev(s.blocked_rdev,
+                                                conf->mddev);
+               else
+                       /* Internal metadata will immediately
+                        * be written by raid5d, so we don't
+                        * need to wait here.
+                        */
+                       rdev_dec_pending(s.blocked_rdev,
+                                        conf->mddev);
+       }
 
        if (s.handle_bad_blocks)
                for (i = disks; i--; ) {
@@ -3881,8 +3906,6 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
                raid_bio->bi_next = (void*)rdev;
                align_bi->bi_bdev =  rdev->bdev;
                align_bi->bi_flags &= ~(1 << BIO_SEG_VALID);
-               /* No reshape active, so we can trust rdev->data_offset */
-               align_bi->bi_sector += rdev->data_offset;
 
                if (!bio_fits_rdev(align_bi) ||
                    is_badblock(rdev, align_bi->bi_sector, align_bi->bi_size>>9,
@@ -3893,6 +3916,9 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
                        return 0;
                }
 
+               /* No reshape active, so we can trust rdev->data_offset */
+               align_bi->bi_sector += rdev->data_offset;
+
                spin_lock_irq(&conf->device_lock);
                wait_event_lock_irq(conf->wait_for_stripe,
                                    conf->quiesce == 0,
@@ -3971,7 +3997,6 @@ static void make_request(struct mddev *mddev, struct bio * bi)
        struct stripe_head *sh;
        const int rw = bio_data_dir(bi);
        int remaining;
-       int plugged;
 
        if (unlikely(bi->bi_rw & REQ_FLUSH)) {
                md_flush_request(mddev, bi);
@@ -3990,7 +4015,6 @@ static void make_request(struct mddev *mddev, struct bio * bi)
        bi->bi_next = NULL;
        bi->bi_phys_segments = 1;       /* over-loaded to count active stripes */
 
-       plugged = mddev_check_plugged(mddev);
        for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
                DEFINE_WAIT(w);
                int previous;
@@ -4092,6 +4116,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
                        if ((bi->bi_rw & REQ_SYNC) &&
                            !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
                                atomic_inc(&conf->preread_active_stripes);
+                       mddev_check_plugged(mddev);
                        release_stripe(sh);
                } else {
                        /* cannot get stripe for read-ahead, just give-up */
@@ -4099,10 +4124,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
                        finish_wait(&conf->wait_for_overlap, &w);
                        break;
                }
-                       
        }
-       if (!plugged)
-               md_wakeup_thread(mddev->thread);
 
        spin_lock_irq(&conf->device_lock);
        remaining = raid5_dec_bi_phys_segments(bi);
@@ -4823,6 +4845,7 @@ static struct r5conf *setup_conf(struct mddev *mddev)
        int raid_disk, memory, max_disks;
        struct md_rdev *rdev;
        struct disk_info *disk;
+       char pers_name[6];
 
        if (mddev->new_level != 5
            && mddev->new_level != 4
@@ -4946,7 +4969,8 @@ static struct r5conf *setup_conf(struct mddev *mddev)
                printk(KERN_INFO "md/raid:%s: allocated %dkB\n",
                       mdname(mddev), memory);
 
-       conf->thread = md_register_thread(raid5d, mddev, NULL);
+       sprintf(pers_name, "raid%d", mddev->new_level);
+       conf->thread = md_register_thread(raid5d, mddev, pers_name);
        if (!conf->thread) {
                printk(KERN_ERR
                       "md/raid:%s: couldn't allocate thread.\n",
@@ -5465,10 +5489,9 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
        if (rdev->saved_raid_disk >= 0 &&
            rdev->saved_raid_disk >= first &&
            conf->disks[rdev->saved_raid_disk].rdev == NULL)
-               disk = rdev->saved_raid_disk;
-       else
-               disk = first;
-       for ( ; disk <= last ; disk++) {
+               first = rdev->saved_raid_disk;
+
+       for (disk = first; disk <= last; disk++) {
                p = conf->disks + disk;
                if (p->rdev == NULL) {
                        clear_bit(In_sync, &rdev->flags);
@@ -5477,8 +5500,11 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
                        if (rdev->saved_raid_disk != disk)
                                conf->fullsync = 1;
                        rcu_assign_pointer(p->rdev, rdev);
-                       break;
+                       goto out;
                }
+       }
+       for (disk = first; disk <= last; disk++) {
+               p = conf->disks + disk;
                if (test_bit(WantReplacement, &p->rdev->flags) &&
                    p->replacement == NULL) {
                        clear_bit(In_sync, &rdev->flags);
@@ -5490,6 +5516,7 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
                        break;
                }
        }
+out:
        print_raid5_conf(conf);
        return err;
 }
index 7d42c11c868434020c8aaddc2c6b9a0fee5d96aa..0cdbd742974ae0404ccf724d734ea5da08a6f821 100644 (file)
@@ -198,7 +198,6 @@ static int fops_open(struct file *file)
        struct saa7146_dev *dev = video_drvdata(file);
        struct saa7146_fh *fh = NULL;
        int result = 0;
-       enum v4l2_buf_type type;
 
        DEB_EE("file:%p, dev:%s\n", file, video_device_node_name(vdev));
 
@@ -207,10 +206,6 @@ static int fops_open(struct file *file)
 
        DEB_D("using: %p\n", dev);
 
-       type = vdev->vfl_type == VFL_TYPE_GRABBER
-            ? V4L2_BUF_TYPE_VIDEO_CAPTURE
-            : V4L2_BUF_TYPE_VBI_CAPTURE;
-
        /* check if an extension is registered */
        if( NULL == dev->ext ) {
                DEB_S("no extension registered for this device\n");
index 00a67326c1931dd01496a1fdb8544ae0ceb77206..39eab73b01ae9bb5e0a9c4d9667e3c99b4b51415 100644 (file)
@@ -243,6 +243,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
        if (minor == MAX_DVB_MINORS) {
                kfree(dvbdevfops);
                kfree(dvbdev);
+               up_write(&minor_rwsem);
                mutex_unlock(&dvbdev_register_lock);
                return -EINVAL;
        }
index 98ecaf0900d683c1560d92f9f48fcc12e98f7a84..3180f5b2a6a60d8bf3930876e788b3217efa8814 100644 (file)
@@ -516,9 +516,9 @@ static int cx24110_read_ucblocks(struct dvb_frontend* fe, u32* ucblocks)
        if(cx24110_readreg(state,0x10)&0x40) {
                /* the RS error counter has finished one counting window */
                cx24110_writereg(state,0x10,0x60); /* select the byer reg */
-               cx24110_readreg(state, 0x12) |
+               (void)(cx24110_readreg(state, 0x12) |
                        (cx24110_readreg(state, 0x13) << 8) |
-                       (cx24110_readreg(state, 0x14) << 16);
+                       (cx24110_readreg(state, 0x14) << 16));
                cx24110_writereg(state,0x10,0x70); /* select the bler reg */
                state->lastbler=cx24110_readreg(state,0x12)|
                        (cx24110_readreg(state,0x13)<<8)|
index 945404991529d1517a046281d807b23385aed6c3..ed3b0ba624dec672aaf350790731aa597027346b 100644 (file)
@@ -121,7 +121,7 @@ int cxd2820r_get_frontend_c(struct dvb_frontend *fe)
        if (ret)
                goto error;
 
-       switch ((buf[0] >> 0) & 0x03) {
+       switch ((buf[0] >> 0) & 0x07) {
        case 0:
                c->modulation = QAM_16;
                break;
index a3ab1a5b6597fd71933639489d9769c91e42ef8a..cc11260e99df96f7ce19837e6ba268811aa825cf 100644 (file)
@@ -126,7 +126,7 @@ static int lg216x_write_regs(struct lg216x_state *state,
 
        lg_reg("writing %d registers...\n", len);
 
-       for (i = 0; i < len - 1; i++) {
+       for (i = 0; i < len; i++) {
                ret = lg216x_write_reg(state, regs[i].reg, regs[i].val);
                if (lg_fail(ret))
                        return ret;
index 63c004a25e0b811badb95b5b4d5029bcbc031548..664e460f247b5a7d1d53629959da7705fb259aa1 100644 (file)
@@ -544,6 +544,8 @@ static const struct usb_device_id smsusb_id_table[] __devinitconst = {
                .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
        { USB_DEVICE(0x2040, 0xc0a0),
                .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
+       { USB_DEVICE(0x2040, 0xf5a0),
+               .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
        { } /* Terminating entry */
        };
 
index 740a3d5520c7f8301bd5b637005d1e98343c59c6..b415211d0c4bee08a38093f3ddb11c9e9918a79e 100644 (file)
@@ -157,7 +157,7 @@ static int __devinit maxiradio_probe(struct pci_dev *pdev, const struct pci_devi
                goto err_out_free_region;
 
        dev->io = pci_resource_start(pdev, 0);
-       if (snd_tea575x_init(&dev->tea)) {
+       if (snd_tea575x_init(&dev->tea, THIS_MODULE)) {
                printk(KERN_ERR "radio-maxiradio: Unable to detect TEA575x tuner\n");
                goto err_out_free_region;
        }
index 52b8011f1b2314f5b9b12c76aef1b1ae9c1f6cf5..4efcbec74c52dd0f63532891f4f7ff8e7176e83e 100644 (file)
@@ -238,7 +238,7 @@ static int __devinit fmr2_probe(struct fmr2 *fmr2, struct device *pdev, int io)
        snprintf(fmr2->tea.bus_info, sizeof(fmr2->tea.bus_info), "%s:%s",
                        fmr2->is_fmd2 ? "PnP" : "ISA", dev_name(pdev));
 
-       if (snd_tea575x_init(&fmr2->tea)) {
+       if (snd_tea575x_init(&fmr2->tea, THIS_MODULE)) {
                printk(KERN_ERR "radio-sf16fmr2: Unable to detect TEA575x tuner\n");
                release_region(fmr2->io, 2);
                return -ENODEV;
index e9f63876129623fb0e9e887bf9d295867d49272e..f412f7ab270b63e0f39bc47aefdf58b3d3e5f82c 100644 (file)
@@ -51,6 +51,8 @@ static struct usb_device_id si470x_usb_driver_id_table[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(0x1b80, 0xd700, USB_CLASS_HID, 0, 0) },
        /* Sanei Electric, Inc. FM USB Radio (sold as DealExtreme.com PCear) */
        { USB_DEVICE_AND_INTERFACE_INFO(0x10c5, 0x819a, USB_CLASS_HID, 0, 0) },
+       /* Axentia ALERT FM USB Receiver */
+       { USB_DEVICE_AND_INTERFACE_INFO(0x12cf, 0x7111, USB_CLASS_HID, 0, 0) },
        /* Terminating entry */
        { }
 };
index 342c2c8c1ddfcff71f493f3f6f08a4f1969df440..54ee34872d143cea7345f60a5ad9038e91266a51 100644 (file)
@@ -232,7 +232,7 @@ MODULE_PARM_DESC(invert, "Invert the signal from the IR receiver");
 
 static bool txandrx; /* default = 0 */
 module_param(txandrx, bool, 0444);
-MODULE_PARM_DESC(invert, "Allow simultaneous TX and RX");
+MODULE_PARM_DESC(txandrx, "Allow simultaneous TX and RX");
 
 static unsigned int wake_sc = 0x800F040C;
 module_param(wake_sc, uint, 0644);
@@ -1032,6 +1032,8 @@ wbcir_probe(struct pnp_dev *device, const struct pnp_device_id *dev_id)
        data->dev->tx_ir = wbcir_tx;
        data->dev->priv = data;
        data->dev->dev.parent = &device->dev;
+       data->dev->timeout = MS_TO_NS(100);
+       data->dev->allowed_protos = RC_TYPE_ALL;
 
        if (!request_region(data->wbase, WAKEUP_IOMEM_LEN, DRVNAME)) {
                dev_err(dev, "Region 0x%lx-0x%lx already in use!\n",
index ff2933ab705ff2608dd38827dcab12ff0b0ca2ee..856ab962cd63516b79865d47414a2b7b59fa0926 100644 (file)
@@ -371,7 +371,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 2, 0, 0, 0 },
                .gpiomute       = 10,
-               .needs_tvaudio  = 1,
                .tuner_type     = UNSET,
                .tuner_addr     = ADDR_UNSET,
        },
@@ -384,7 +383,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 0, 1, 2, 3 },
                .gpiomute       = 4,
-               .needs_tvaudio  = 1,
                .tuner_type     = UNSET,
                .tuner_addr     = ADDR_UNSET,
        },
@@ -398,7 +396,6 @@ struct tvcard bttv_tvcards[] = {
                .gpiomux        = { 4, 0, 2, 3 },
                .gpiomute       = 1,
                .no_msp34xx     = 1,
-               .needs_tvaudio  = 1,
                .tuner_type     = TUNER_PHILIPS_NTSC,
                .tuner_addr     = ADDR_UNSET,
                .pll            = PLL_28,
@@ -414,7 +411,6 @@ struct tvcard bttv_tvcards[] = {
                .gpiomask       = 0,
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 0 },
-               .needs_tvaudio  = 0,
                .tuner_type     = TUNER_ABSENT,
                .tuner_addr     = ADDR_UNSET,
        },
@@ -427,7 +423,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 0),
                .gpiomux        = { 0, 1, 0, 1 },
                .gpiomute       = 3,
-               .needs_tvaudio  = 1,
                .tuner_type     = UNSET,
                .tuner_addr     = ADDR_UNSET,
        },
@@ -440,7 +435,6 @@ struct tvcard bttv_tvcards[] = {
                .gpiomask       = 0x0f,
                .gpiomux        = { 0x0c, 0x04, 0x08, 0x04 },
                /*                0x04 for some cards ?? */
-               .needs_tvaudio  = 1,
                .tuner_type     = UNSET,
                .tuner_addr     = ADDR_UNSET,
                .audio_mode_gpio= avermedia_tvphone_audio,
@@ -454,7 +448,6 @@ struct tvcard bttv_tvcards[] = {
                .gpiomask       = 0,
                .muxsel         = MUXSEL(2, 3, 1, 0, 0),
                .gpiomux        = { 0 },
-               .needs_tvaudio  = 1,
                .tuner_type     = TUNER_ABSENT,
                .tuner_addr     = ADDR_UNSET,
        },
@@ -469,7 +462,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 0, 0xc00, 0x800, 0x400 },
                .gpiomute       = 0xc00,
-               .needs_tvaudio  = 1,
                .pll            = PLL_28,
                .tuner_type     = UNSET,
                .tuner_addr     = ADDR_UNSET,
@@ -482,7 +474,6 @@ struct tvcard bttv_tvcards[] = {
                .gpiomask       = 3,
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 1, 1, 2, 3 },
-               .needs_tvaudio  = 0,
                .pll            = PLL_28,
                .tuner_type     = TUNER_TEMIC_PAL,
                .tuner_addr     = ADDR_UNSET,
@@ -496,7 +487,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 0, 1, 1),
                .gpiomux        = { 0, 1, 2, 3 },
                .gpiomute       = 4,
-               .needs_tvaudio  = 1,
                .pll            = PLL_28,
                .tuner_type     = UNSET,
                .tuner_addr     = ADDR_UNSET,
@@ -510,7 +500,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 0x20001,0x10001, 0, 0 },
                .gpiomute       = 10,
-               .needs_tvaudio  = 1,
                .tuner_type     = UNSET,
                .tuner_addr     = ADDR_UNSET,
        },
@@ -524,7 +513,6 @@ struct tvcard bttv_tvcards[] = {
                .gpiomask       = 15,
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 13, 14, 11, 7 },
-               .needs_tvaudio  = 1,
                .tuner_type     = UNSET,
                .tuner_addr     = ADDR_UNSET,
        },
@@ -536,7 +524,6 @@ struct tvcard bttv_tvcards[] = {
                .gpiomask       = 15,
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 13, 14, 11, 7 },
-               .needs_tvaudio  = 1,
                .msp34xx_alt    = 1,
                .pll            = PLL_28,
                .tuner_type     = TUNER_PHILIPS_PAL,
@@ -553,7 +540,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 0, 2, 1, 3 }, /* old: {0, 1, 2, 3, 4} */
                .gpiomute       = 4,
-               .needs_tvaudio  = 1,
                .pll            = PLL_28,
                .tuner_type     = UNSET,
                .tuner_addr     = ADDR_UNSET,
@@ -567,7 +553,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 0, 0, 1, 0 },
                .gpiomute       = 10,
-               .needs_tvaudio  = 1,
                .tuner_type     = UNSET,
                .tuner_addr     = ADDR_UNSET,
        },
@@ -583,7 +568,6 @@ struct tvcard bttv_tvcards[] = {
                /* 2003-10-20 by "Anton A. Arapov" <arapov@mail.ru> */
                .gpiomux        = { 0x001e00, 0, 0x018000, 0x014000 },
                .gpiomute       = 0x002000,
-               .needs_tvaudio  = 1,
                .pll            = PLL_28,
                .tuner_type     = UNSET,
                .tuner_addr     = ADDR_UNSET,
@@ -597,7 +581,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1, 0),
                .gpiomux        = { 0x4fa007,0xcfa007,0xcfa007,0xcfa007 },
                .gpiomute       = 0xcfa007,
-               .needs_tvaudio  = 1,
                .tuner_type     = UNSET,
                .tuner_addr     = ADDR_UNSET,
                .volume_gpio    = winview_volume,
@@ -611,7 +594,6 @@ struct tvcard bttv_tvcards[] = {
                .gpiomask       = 0,
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 1, 0, 0, 0 },
-               .needs_tvaudio  = 1,
                .tuner_type     = UNSET,
                .tuner_addr     = ADDR_UNSET,
        },
@@ -660,7 +642,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 0, 1, 0x800, 0x400 },
                .gpiomute       = 0xc00,
-               .needs_tvaudio  = 1,
                .pll            = PLL_28,
                .tuner_type     = UNSET,
                .tuner_addr     = ADDR_UNSET,
@@ -691,7 +672,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = {0x400, 0x400, 0x400, 0x400 },
                .gpiomute       = 0xc00,
-               .needs_tvaudio  = 1,
                .pll            = PLL_28,
                .tuner_type     = UNSET,
                .tuner_addr     = ADDR_UNSET,
@@ -706,7 +686,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 0x20000, 0x30000, 0x10000, 0 },
                .gpiomute       = 0x40000,
-               .needs_tvaudio  = 0,
                .tuner_type     = TUNER_PHILIPS_PAL,
                .tuner_addr     = ADDR_UNSET,
                .audio_mode_gpio= terratv_audio,
@@ -720,7 +699,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 0, 1, 1),
                .gpiomux        = { 0, 1, 2, 3 },
                .gpiomute       = 4,
-               .needs_tvaudio  = 1,
                .tuner_type     = UNSET,
                .tuner_addr     = ADDR_UNSET,
        },
@@ -748,7 +726,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 0x20000, 0x30000, 0x10000, 0x00000 },
                .gpiomute       = 0x40000,
-               .needs_tvaudio  = 0,
                .tuner_type     = TUNER_PHILIPS_PAL,
                .tuner_addr     = ADDR_UNSET,
                .audio_mode_gpio= terratv_audio,
@@ -793,7 +770,6 @@ struct tvcard bttv_tvcards[] = {
                .gpiomask       = 0,
                .muxsel         = MUXSEL(2, 3, 1, 0, 0),
                .gpiomux        = { 0 },
-               .needs_tvaudio  = 1,
                .tuner_type     = TUNER_ABSENT,
                .tuner_addr     = ADDR_UNSET,
                .muxsel_hook    = PXC200_muxsel,
@@ -834,7 +810,6 @@ struct tvcard bttv_tvcards[] = {
                .gpiomask       = 0,
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 0 },
-               .needs_tvaudio  = 0,
                .tuner_type     = TUNER_ABSENT,
                .tuner_addr     = ADDR_UNSET,
        },
@@ -847,7 +822,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 0x500, 0, 0x300, 0x900 },
                .gpiomute       = 0x900,
-               .needs_tvaudio  = 1,
                .pll            = PLL_28,
                .tuner_type     = TUNER_PHILIPS_PAL,
                .tuner_addr     = ADDR_UNSET,
@@ -874,7 +848,6 @@ struct tvcard bttv_tvcards[] = {
                Note: There exists another variant "Winfast 2000" with tv stereo !?
                Note: eeprom only contains FF and pci subsystem id 107d:6606
                */
-               .needs_tvaudio  = 0,
                .pll            = PLL_28,
                .has_radio      = 1,
                .tuner_type     = TUNER_PHILIPS_PAL, /* default for now, gpio reads BFFF06 for Pal bg+dk */
@@ -934,7 +907,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 0),
                .gpiomux        = { 0x551400, 0x551200, 0, 0 },
                .gpiomute       = 0x551c00,
-               .needs_tvaudio  = 1,
                .pll            = PLL_28,
                .tuner_type     = TUNER_PHILIPS_PAL_I,
                .tuner_addr     = ADDR_UNSET,
@@ -949,7 +921,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 2, 0xd0001, 0, 0 },
                .gpiomute       = 1,
-               .needs_tvaudio  = 0,
                .pll            = PLL_28,
                .tuner_type     = UNSET,
                .tuner_addr     = ADDR_UNSET,
@@ -966,7 +937,6 @@ struct tvcard bttv_tvcards[] = {
                .gpiomux        = { 4, 0, 2, 3 },
                .gpiomute       = 1,
                .no_msp34xx     = 1,
-               .needs_tvaudio  = 1,
                .tuner_type     = TUNER_PHILIPS_NTSC,
                .tuner_addr     = ADDR_UNSET,
                .pll            = PLL_28,
@@ -980,7 +950,6 @@ struct tvcard bttv_tvcards[] = {
                .gpiomask       = 15,
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 13, 4, 11, 7 },
-               .needs_tvaudio  = 1,
                .pll            = PLL_28,
                .tuner_type     = UNSET,
                .tuner_addr     = ADDR_UNSET,
@@ -995,7 +964,6 @@ struct tvcard bttv_tvcards[] = {
                .gpiomask       = 0,
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 0, 0, 0, 0},
-               .needs_tvaudio  = 1,
                .no_msp34xx     = 1,
                .pll            = PLL_28,
                .tuner_type     = TUNER_PHILIPS_PAL_I,
@@ -1066,7 +1034,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 0x20000, 0x30000, 0x10000, 0 },
                .gpiomute       = 0x40000,
-               .needs_tvaudio  = 1,
                .no_msp34xx     = 1,
                .pll            = PLL_35,
                .tuner_type     = TUNER_PHILIPS_PAL_I,
@@ -1084,7 +1051,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = {2,0,0,0 },
                .gpiomute       = 1,
-               .needs_tvaudio  = 1,
                .pll            = PLL_28,
                .tuner_type     = UNSET,
                .tuner_addr     = ADDR_UNSET,
@@ -1163,7 +1129,6 @@ struct tvcard bttv_tvcards[] = {
                                MUX2 (mask 0x30000):
                                        0,2,3= from MSP34xx
                                        1= FM stereo Radio from Tuner */
-               .needs_tvaudio  = 0,
                .pll            = PLL_28,
                .tuner_type     = UNSET,
                .tuner_addr     = ADDR_UNSET,
@@ -1179,7 +1144,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 0, 0, 0x10, 8 },
                .gpiomute       = 4,
-               .needs_tvaudio  = 1,
                .pll            = PLL_28,
                .tuner_type     = TUNER_PHILIPS_PAL,
                .tuner_addr     = ADDR_UNSET,
@@ -1218,7 +1182,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 0),
                .gpiomux        = { 2, 0, 0, 0 },
                .gpiomute       = 10,
-               .needs_tvaudio  = 0,
                .pll            = PLL_28,
                .tuner_type     = TUNER_TEMIC_PAL,
                .tuner_addr     = ADDR_UNSET,
@@ -1250,7 +1213,6 @@ struct tvcard bttv_tvcards[] = {
                .gpiomask       = 0,
                .muxsel         = MUXSEL(3, 1),
                .gpiomux        = { 0 },
-               .needs_tvaudio  = 0,
                .no_msp34xx     = 1,
                .pll            = PLL_35,
                .tuner_type     = TUNER_ABSENT,
@@ -1266,7 +1228,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 0x400, 0x400, 0x400, 0x400 },
                .gpiomute       = 0x800,
-               .needs_tvaudio  = 1,
                .pll            = PLL_28,
                .tuner_type     = TUNER_TEMIC_4036FY5_NTSC,
                .tuner_addr     = ADDR_UNSET,
@@ -1312,7 +1273,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 2),
                .gpiomux        = { },
                .no_msp34xx     = 1,
-               .needs_tvaudio  = 0,
                .pll            = PLL_28,
                .tuner_type     = TUNER_ABSENT,
                .tuner_addr     = ADDR_UNSET,
@@ -1329,7 +1289,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 0),
                .gpiomux        = { 1, 0, 4, 4 },
                .gpiomute       = 9,
-               .needs_tvaudio  = 0,
                .pll            = PLL_28,
                .tuner_type     = TUNER_PHILIPS_PAL,
                .tuner_addr     = ADDR_UNSET,
@@ -1379,7 +1338,6 @@ struct tvcard bttv_tvcards[] = {
                .gpiomute       = 0x1800,
                .audio_mode_gpio= fv2000s_audio,
                .no_msp34xx     = 1,
-               .needs_tvaudio  = 1,
                .pll            = PLL_28,
                .tuner_type     = TUNER_PHILIPS_PAL,
                .tuner_addr     = ADDR_UNSET,
@@ -1393,7 +1351,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 0x500, 0x500, 0x300, 0x900 },
                .gpiomute       = 0x900,
-               .needs_tvaudio  = 1,
                .pll            = PLL_28,
                .tuner_type     = TUNER_PHILIPS_PAL,
                .tuner_addr     = ADDR_UNSET,
@@ -1477,7 +1434,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 0, 0, 11, 7 }, /* TV and Radio with same GPIO ! */
                .gpiomute       = 13,
-               .needs_tvaudio  = 1,
                .pll            = PLL_28,
                .tuner_type     = TUNER_LG_PAL_I_FM,
                .tuner_addr     = ADDR_UNSET,
@@ -1514,7 +1470,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 0x01, 0x00, 0x03, 0x03 },
                .gpiomute       = 0x09,
-               .needs_tvaudio  = 1,
                .no_msp34xx     = 1,
                .pll            = PLL_28,
                .tuner_type     = TUNER_PHILIPS_PAL,
@@ -1540,7 +1495,6 @@ struct tvcard bttv_tvcards[] = {
                .gpiomask       = 0,
                .muxsel         = MUXSEL(2, 3, 1, 0, 0),
                .gpiomux        = { 0 },
-               .needs_tvaudio  = 0,
                .tuner_type     = TUNER_ABSENT,
                .tuner_addr     = ADDR_UNSET,
        },
@@ -1567,7 +1521,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 1, 1),
                .gpiomux        = { 0, 1, 2, 2 },
                .gpiomute       = 4,
-               .needs_tvaudio  = 0,
                .tuner_type     = TUNER_PHILIPS_PAL,
                .tuner_addr     = ADDR_UNSET,
                .pll            = PLL_28,
@@ -1597,7 +1550,6 @@ struct tvcard bttv_tvcards[] = {
                .gpiomask       = 0,
                .muxsel         = MUXSEL(2, 3, 1, 0),
                .gpiomux        = { 0 },
-               .needs_tvaudio  = 0,
                .no_msp34xx     = 1,
                .pll            = PLL_28,
                .tuner_type     = TUNER_ABSENT,
@@ -1619,7 +1571,6 @@ struct tvcard bttv_tvcards[] = {
                                                * btwincap uses 0x80000/0x80003
                                                */
                .gpiomute       = 4,
-               .needs_tvaudio  = 0,
                .no_msp34xx     = 1,
                .pll            = PLL_28,
                .tuner_type     = TUNER_PHILIPS_PAL,
@@ -1655,7 +1606,6 @@ struct tvcard bttv_tvcards[] = {
                /* .audio_inputs= 1, */
                .svhs           = 2,
                .muxsel         = MUXSEL(2, 0, 1, 1),
-               .needs_tvaudio  = 1,
                .pll            = PLL_28,
                .tuner_type     = UNSET,
                .tuner_addr     = ADDR_UNSET,
@@ -1875,7 +1825,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 0, 1, 2, 3},
                .gpiomute       = 4,
-               .needs_tvaudio  = 1,
                .tuner_type     = TUNER_PHILIPS_PAL,
                .tuner_addr     = ADDR_UNSET,
                .pll            = PLL_28,
@@ -1902,7 +1851,6 @@ struct tvcard bttv_tvcards[] = {
                .gpiomask       = 0,
                .muxsel         = MUXSEL(2, 3),
                .gpiomux        = { 0 },
-               .needs_tvaudio  = 0,
                .no_msp34xx     = 1,
                .pll            = PLL_28,
                .tuner_type     = TUNER_ABSENT,
@@ -1920,7 +1868,6 @@ struct tvcard bttv_tvcards[] = {
                /*                  Tuner, Radio, external, internal, off,  on */
                .gpiomux        = { 0x08,  0x0f,  0x0a,     0x08 },
                .gpiomute       = 0x0f,
-               .needs_tvaudio  = 0,
                .no_msp34xx     = 1,
                .pll            = PLL_28,
                .tuner_type     = TUNER_PHILIPS_NTSC,
@@ -1936,7 +1883,6 @@ struct tvcard bttv_tvcards[] = {
                .svhs           = 2,
                .gpiomask       = 0x00,
                .muxsel         = MUXSEL(2, 3, 1, 1),
-               .needs_tvaudio  = 1,
                .no_msp34xx     = 1,
                .pll            = PLL_28,
                .tuner_type     = TUNER_PHILIPS_PAL,
@@ -2034,7 +1980,6 @@ struct tvcard bttv_tvcards[] = {
                .gpiomask       = 0,
                .muxsel         = MUXSEL(2, 3, 1, 0),
                .gpiomux        = { 0 },
-               .needs_tvaudio  = 0,
                .no_msp34xx     = 1,
                .pll            = PLL_28,
                .tuner_type     = TUNER_ABSENT,
@@ -2049,7 +1994,6 @@ struct tvcard bttv_tvcards[] = {
                .gpiomask       = 0x00,
                .muxsel         = MUXSEL(2, 3, 1, 0),
                .gpiomux        = { 0, 0, 0, 0 }, /* card has no audio */
-               .needs_tvaudio  = 0,
                .pll            = PLL_28,
                .tuner_type     = TUNER_ABSENT,
                .tuner_addr     = ADDR_UNSET,
@@ -2062,7 +2006,6 @@ struct tvcard bttv_tvcards[] = {
                .gpiomask       = 0x00,
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 0, 0, 0, 0 }, /* card has no audio */
-               .needs_tvaudio  = 0,
                .pll            = PLL_28,
                .tuner_type     = TUNER_ABSENT,
                .tuner_addr     = ADDR_UNSET,
@@ -2079,7 +2022,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 2, 2, 2, 3, 3, 3, 3, 1, 0),
                .muxsel_hook    = phytec_muxsel,
                .gpiomux        = { 0, 0, 0, 0 }, /* card has no audio */
-               .needs_tvaudio  = 1,
                .pll            = PLL_28,
                .tuner_type     = TUNER_ABSENT,
                .tuner_addr     = ADDR_UNSET,
@@ -2094,7 +2036,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 2, 2, 2, 3, 3, 3, 3, 1, 1),
                .muxsel_hook    = phytec_muxsel,
                .gpiomux        = { 0, 0, 0, 0 }, /* card has no audio */
-               .needs_tvaudio  = 1,
                .pll            = PLL_28,
                .tuner_type     = TUNER_ABSENT,
                .tuner_addr     = ADDR_UNSET,
@@ -2118,7 +2059,6 @@ struct tvcard bttv_tvcards[] = {
                .tuner_type     = TUNER_ABSENT,
                .tuner_addr     = ADDR_UNSET,
                .svhs           = NO_SVHS,   /* card has no svhs */
-               .needs_tvaudio  = 0,
                .no_msp34xx     = 1,
                .no_tda7432     = 1,
                .gpiomask       = 0x00,
@@ -2168,7 +2108,6 @@ struct tvcard bttv_tvcards[] = {
                .gpiomask       = 3,
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 1, 1, 1, 1 },
-               .needs_tvaudio  = 1,
                .tuner_type     = TUNER_PHILIPS_PAL,
                .tuner_addr     = ADDR_UNSET,
                .pll            = PLL_35,
@@ -2210,7 +2149,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 0),
                .no_msp34xx     = 1,
                .no_tda7432     = 1,
-               .needs_tvaudio  = 0,
                .tuner_type     = TUNER_ABSENT,
                .tuner_addr     = ADDR_UNSET,
        },
@@ -2222,7 +2160,6 @@ struct tvcard bttv_tvcards[] = {
                .tuner_type     = TUNER_PHILIPS_PAL,
                .tuner_addr     = ADDR_UNSET,
                .svhs           = 2,
-               .needs_tvaudio  = 0,
                .gpiomask       = 0x68,
                .muxsel         = MUXSEL(2, 3, 1),
                .gpiomux        = { 0x68, 0x68, 0x61, 0x61 },
@@ -2241,7 +2178,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 0, 1, 2, 2 },
                .gpiomute       = 3,
-               .needs_tvaudio  = 0,
                .pll            = PLL_28,
                .tuner_type     = TUNER_PHILIPS_PAL,
                .tuner_addr     = ADDR_UNSET,
@@ -2265,7 +2201,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 2, 2, 2),
                .gpiomux        = { 0, 0, 0, 0 }, /* card has no audio */
                .pll            = PLL_28,
-               .needs_tvaudio  = 0,
                .muxsel_hook    = picolo_tetra_muxsel,/*Required as it doesn't follow the classic input selection policy*/
                .tuner_type     = TUNER_ABSENT,
                .tuner_addr     = ADDR_UNSET,
@@ -2358,7 +2293,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 2, 0, 0, 0 },
                .gpiomute       = 10,
-               .needs_tvaudio  = 0,
                .pll            = PLL_28,
                .tuner_type     = TUNER_PHILIPS_PAL,
                .tuner_addr     = ADDR_UNSET,
@@ -2405,7 +2339,6 @@ struct tvcard bttv_tvcards[] = {
                .tuner_addr     = ADDR_UNSET,
                .gpiomask       = 0x008007,
                .gpiomux        = { 0, 0x000001,0,0 },
-               .needs_tvaudio  = 1,
                .has_radio      = 1,
        },
        [BTTV_BOARD_TIBET_CS16] = {
@@ -2518,7 +2451,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 0x001e00, 0, 0x018000, 0x014000 },
                .gpiomute       = 0x002000,
-               .needs_tvaudio  = 1,
                .pll            = PLL_28,
                .tuner_type     = TUNER_YMEC_TVF66T5_B_DFF,
                .tuner_addr     = 0xc1 >>1,
@@ -2534,7 +2466,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 0, 1, 2, 2 },
                .gpiomute       = 3,
-               .needs_tvaudio  = 0,
                .pll            = PLL_28,
                .tuner_type     = TUNER_TENA_9533_DI,
                .tuner_addr     = ADDR_UNSET,
@@ -2615,7 +2546,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 2, 0, 0, 0 },
                .gpiomute       = 1,
-               .needs_tvaudio  = 1,
                .pll            = PLL_28,
                .tuner_type     = TUNER_PHILIPS_NTSC,
                .tuner_addr     = ADDR_UNSET,
@@ -2714,7 +2644,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 0x20001,0x10001, 0, 0 },
                .gpiomute       = 10,
-               .needs_tvaudio  = 1,
                .pll            = PLL_28,
                .tuner_type     = TUNER_PHILIPS_PAL_I,
                .tuner_addr     = ADDR_UNSET,
@@ -2746,7 +2675,6 @@ struct tvcard bttv_tvcards[] = {
                .muxsel         = MUXSEL(2, 3, 1, 1),
                .gpiomux        = { 0, 1, 2, 2 }, /* CONTVFMi */
                .gpiomute       = 3, /* CONTVFMi */
-               .needs_tvaudio  = 0,
                .tuner_type     = TUNER_PHILIPS_FM1216ME_MK3, /* TCL MK3 */
                .tuner_addr     = ADDR_UNSET,
                .pll            = PLL_28,
@@ -2785,7 +2713,6 @@ struct tvcard bttv_tvcards[] = {
                .gpiomask       = 0x00,
                .muxsel         = MUXSEL(0, 2, 3, 1),
                .gpiomux        = { 0, 0, 0, 0 }, /* card has no audio */
-               .needs_tvaudio  = 0,
                .pll            = PLL_28,
                .tuner_type     = TUNER_ABSENT,
                .tuner_addr     = ADDR_UNSET,
@@ -2799,7 +2726,6 @@ struct tvcard bttv_tvcards[] = {
                .gpiomask       = 0x00,
                .muxsel         = MUXSEL(2, 3, 1),
                .gpiomux        = { 0, 0, 0, 0 }, /* card has no audio */
-               .needs_tvaudio  = 0,
                .pll            = PLL_28,
                .tuner_type     = TUNER_ABSENT,
                .tuner_addr     = ADDR_UNSET,
@@ -2813,7 +2739,6 @@ struct tvcard bttv_tvcards[] = {
                .gpiomask       = 0x00,
                .muxsel         = MUXSEL(3, 2, 1),
                .gpiomux        = { 0, 0, 0, 0 }, /* card has no audio */
-               .needs_tvaudio  = 0,
                .pll            = PLL_28,
                .tuner_type     = TUNER_ABSENT,
                .tuner_addr     = ADDR_UNSET,
@@ -2877,7 +2802,6 @@ struct tvcard bttv_tvcards[] = {
                .gpiomask       = 0,
                .muxsel         = MUXSEL(2, 3),
                .gpiomux        = { 0 },
-               .needs_tvaudio  = 0,
                .no_msp34xx     = 1,
                .pll            = PLL_28,
                .tuner_type     = TUNER_ABSENT,
@@ -3649,7 +3573,7 @@ void __devinit bttv_init_tuner(struct bttv *btv)
                struct tuner_setup tun_setup;
 
                /* Load tuner module before issuing tuner config call! */
-               if (bttv_tvcards[btv->c.type].has_radio)
+               if (btv->has_radio)
                        v4l2_i2c_new_subdev(&btv->c.v4l2_dev,
                                &btv->c.i2c_adap, "tuner",
                                0, v4l2_i2c_tuner_addrs(ADDRS_RADIO));
@@ -3664,7 +3588,7 @@ void __devinit bttv_init_tuner(struct bttv *btv)
                tun_setup.type = btv->tuner_type;
                tun_setup.addr = addr;
 
-               if (bttv_tvcards[btv->c.type].has_radio)
+               if (btv->has_radio)
                        tun_setup.mode_mask |= T_RADIO;
 
                bttv_call_all(btv, tuner, s_type_addr, &tun_setup);
@@ -3724,6 +3648,10 @@ static void __devinit hauppauge_eeprom(struct bttv *btv)
                        bttv_tvcards[BTTV_BOARD_HAUPPAUGE_IMPACTVCB].name);
                btv->c.type = BTTV_BOARD_HAUPPAUGE_IMPACTVCB;
        }
+
+       /* The 61334 needs the msp3410 to do the radio demod to get sound */
+       if (tv.model == 61334)
+               btv->radio_uses_msp_demodulator = 1;
 }
 
 static int terratec_active_radio_upgrade(struct bttv *btv)
index a9cfb0f4be48183ee13a0b042013448ac73395e8..ff7a589d8e0f5e0486dd72c8870e4d965a8ce50e 100644 (file)
@@ -1218,6 +1218,11 @@ audio_mux(struct bttv *btv, int input, int mute)
                   For now this is sufficient. */
                switch (input) {
                case TVAUDIO_INPUT_RADIO:
+                       /* Some boards need the msp do to the radio demod */
+                       if (btv->radio_uses_msp_demodulator) {
+                               in = MSP_INPUT_DEFAULT;
+                               break;
+                       }
                        in = MSP_INPUT(MSP_IN_SCART2, MSP_IN_TUNER1,
                                    MSP_DSP_IN_SCART, MSP_DSP_IN_SCART);
                        break;
index c5171619ac7998369511c515da9046beda33d315..acfe2f3b92d9abdbb050a6586a7d9f4f310409e6 100644 (file)
@@ -236,7 +236,6 @@ struct tvcard {
        /* i2c audio flags */
        unsigned int no_msp34xx:1;
        unsigned int no_tda7432:1;
-       unsigned int needs_tvaudio:1;
        unsigned int msp34xx_alt:1;
        /* Note: currently no card definition needs to mark the presence
           of a RDS saa6588 chip. If this is ever needed, then add a new
index db943a8d580db131a1bcd4daaece5b6bf0552093..70fd4f23f605aa8374799239ca71ca18219bf9d2 100644 (file)
@@ -440,6 +440,7 @@ struct bttv {
        /* radio data/state */
        int has_radio;
        int radio_user;
+       int radio_uses_msp_demodulator;
 
        /* miro/pinnacle + Aimslab VHX
           philips matchbox (tea5757 radio tuner) support */
index 2520219f01ba39fbc99ce3c17eaaa071e7097644..5b75a64b199bc924016a3d75e13a0c48f87a82e9 100644 (file)
@@ -607,8 +607,9 @@ static long qc_capture(struct qcam *q, char __user *buf, unsigned long len)
                                }
                                o = i * pixels_per_line + pixels_read + k;
                                if (o < len) {
+                                       u8 ch = invert - buffer[k];
                                        got++;
-                                       put_user((invert - buffer[k]) << shift, buf + o);
+                                       put_user(ch << shift, buf + o);
                                }
                        }
                        pixels_read += bytes;
@@ -648,8 +649,8 @@ static int qcam_querycap(struct file *file, void  *priv,
        struct qcam *qcam = video_drvdata(file);
 
        strlcpy(vcap->driver, qcam->v4l2_dev.name, sizeof(vcap->driver));
-       strlcpy(vcap->card, "B&W Quickcam", sizeof(vcap->card));
-       strlcpy(vcap->bus_info, "parport", sizeof(vcap->bus_info));
+       strlcpy(vcap->card, "Connectix B&W Quickcam", sizeof(vcap->card));
+       strlcpy(vcap->bus_info, qcam->pport->name, sizeof(vcap->bus_info));
        vcap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE;
        vcap->capabilities = vcap->device_caps | V4L2_CAP_DEVICE_CAPS;
        return 0;
@@ -688,8 +689,8 @@ static int qcam_g_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *f
        pix->height = qcam->height / qcam->transfer_scale;
        pix->pixelformat = (qcam->bpp == 4) ? V4L2_PIX_FMT_Y4 : V4L2_PIX_FMT_Y6;
        pix->field = V4L2_FIELD_NONE;
-       pix->bytesperline = qcam->width;
-       pix->sizeimage = qcam->width * qcam->height;
+       pix->bytesperline = pix->width;
+       pix->sizeimage = pix->width * pix->height;
        /* Just a guess */
        pix->colorspace = V4L2_COLORSPACE_SRGB;
        return 0;
@@ -757,7 +758,7 @@ static int qcam_enum_fmt_vid_cap(struct file *file, void *fh, struct v4l2_fmtdes
                  "4-Bit Monochrome", V4L2_PIX_FMT_Y4,
                  { 0, 0, 0, 0 }
                },
-               { 0, 0, 0,
+               { 1, 0, 0,
                  "6-Bit Monochrome", V4L2_PIX_FMT_Y6,
                  { 0, 0, 0, 0 }
                },
@@ -772,6 +773,25 @@ static int qcam_enum_fmt_vid_cap(struct file *file, void *fh, struct v4l2_fmtdes
        return 0;
 }
 
+static int qcam_enum_framesizes(struct file *file, void *fh,
+                                        struct v4l2_frmsizeenum *fsize)
+{
+       static const struct v4l2_frmsize_discrete sizes[] = {
+               {  80,  60 },
+               { 160, 120 },
+               { 320, 240 },
+       };
+
+       if (fsize->index > 2)
+               return -EINVAL;
+       if (fsize->pixel_format != V4L2_PIX_FMT_Y4 &&
+           fsize->pixel_format != V4L2_PIX_FMT_Y6)
+               return -EINVAL;
+       fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
+       fsize->discrete = sizes[fsize->index];
+       return 0;
+}
+
 static ssize_t qcam_read(struct file *file, char __user *buf,
                size_t count, loff_t *ppos)
 {
@@ -795,6 +815,11 @@ static ssize_t qcam_read(struct file *file, char __user *buf,
        return len;
 }
 
+static unsigned int qcam_poll(struct file *filp, poll_table *wait)
+{
+       return v4l2_ctrl_poll(filp, wait) | POLLIN | POLLRDNORM;
+}
+
 static int qcam_s_ctrl(struct v4l2_ctrl *ctrl)
 {
        struct qcam *qcam =
@@ -828,7 +853,7 @@ static const struct v4l2_file_operations qcam_fops = {
        .owner          = THIS_MODULE,
        .open           = v4l2_fh_open,
        .release        = v4l2_fh_release,
-       .poll           = v4l2_ctrl_poll,
+       .poll           = qcam_poll,
        .unlocked_ioctl = video_ioctl2,
        .read           = qcam_read,
 };
@@ -839,6 +864,7 @@ static const struct v4l2_ioctl_ops qcam_ioctl_ops = {
        .vidioc_s_input                     = qcam_s_input,
        .vidioc_enum_input                  = qcam_enum_input,
        .vidioc_enum_fmt_vid_cap            = qcam_enum_fmt_vid_cap,
+       .vidioc_enum_framesizes             = qcam_enum_framesizes,
        .vidioc_g_fmt_vid_cap               = qcam_g_fmt_vid_cap,
        .vidioc_s_fmt_vid_cap               = qcam_s_fmt_vid_cap,
        .vidioc_try_fmt_vid_cap             = qcam_try_fmt_vid_cap,
@@ -864,9 +890,9 @@ static struct qcam *qcam_init(struct parport *port)
                return NULL;
 
        v4l2_dev = &qcam->v4l2_dev;
-       strlcpy(v4l2_dev->name, "bw-qcam", sizeof(v4l2_dev->name));
+       snprintf(v4l2_dev->name, sizeof(v4l2_dev->name), "bw-qcam%d", num_cams);
 
-       if (v4l2_device_register(NULL, v4l2_dev) < 0) {
+       if (v4l2_device_register(port->dev, v4l2_dev) < 0) {
                v4l2_err(v4l2_dev, "Could not register v4l2_device\n");
                kfree(qcam);
                return NULL;
@@ -886,7 +912,7 @@ static struct qcam *qcam_init(struct parport *port)
                return NULL;
        }
        qcam->pport = port;
-       qcam->pdev = parport_register_device(port, "bw-qcam", NULL, NULL,
+       qcam->pdev = parport_register_device(port, v4l2_dev->name, NULL, NULL,
                        NULL, 0, NULL);
        if (qcam->pdev == NULL) {
                v4l2_err(v4l2_dev, "couldn't register for %s.\n", port->name);
@@ -975,6 +1001,7 @@ static int init_bwqcam(struct parport *port)
                return -ENODEV;
        }
        qc_calibrate(qcam);
+       v4l2_ctrl_handler_setup(&qcam->hdl);
 
        parport_release(qcam->pdev);
 
index b55d57cc1a1c24972fad9e56f3eea804dccab535..7e5ffd6f51786d3390b629b8794b4139aa410551 100644 (file)
@@ -838,10 +838,10 @@ static int cx18_setup_pci(struct cx18 *cx, struct pci_dev *pci_dev,
        }
 
        CX18_DEBUG_INFO("cx%d (rev %d) at %02x:%02x.%x, "
-                  "irq: %d, latency: %d, memory: 0x%lx\n",
+                  "irq: %d, latency: %d, memory: 0x%llx\n",
                   cx->pci_dev->device, cx->card_rev, pci_dev->bus->number,
                   PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn),
-                  cx->pci_dev->irq, pci_latency, (unsigned long)cx->base_addr);
+                  cx->pci_dev->irq, pci_latency, (u64)cx->base_addr);
 
        return 0;
 }
@@ -938,7 +938,7 @@ static int __devinit cx18_probe(struct pci_dev *pci_dev,
        if (retval)
                goto err;
 
-       CX18_DEBUG_INFO("base addr: 0x%08x\n", cx->base_addr);
+       CX18_DEBUG_INFO("base addr: 0x%llx\n", (u64)cx->base_addr);
 
        /* PCI Device Setup */
        retval = cx18_setup_pci(cx, pci_dev, pci_id);
@@ -946,8 +946,8 @@ static int __devinit cx18_probe(struct pci_dev *pci_dev,
                goto free_workqueues;
 
        /* map io memory */
-       CX18_DEBUG_INFO("attempting ioremap at 0x%08x len 0x%08x\n",
-                  cx->base_addr + CX18_MEM_OFFSET, CX18_MEM_SIZE);
+       CX18_DEBUG_INFO("attempting ioremap at 0x%llx len 0x%08x\n",
+                  (u64)cx->base_addr + CX18_MEM_OFFSET, CX18_MEM_SIZE);
        cx->enc_mem = ioremap_nocache(cx->base_addr + CX18_MEM_OFFSET,
                                       CX18_MEM_SIZE);
        if (!cx->enc_mem) {
index 7a37e0ee136f095aa8cfb8c696811aa2165efe46..2767c64df0c87f9c044aca755a63fbfd0c75adf3 100644 (file)
@@ -622,7 +622,7 @@ struct cx18 {
                                   unique ID. Starts at 1, so 0 can be used as
                                   uninitialized value in the stream->id. */
 
-       u32 base_addr;
+       resource_size_t base_addr;
 
        u8 card_rev;
        void __iomem *enc_mem, *reg_mem;
index 1b3fb502e6be5fdb31164407a72f8de96fa9712a..b85c292a849ac2debe36c828e506254cd65214a7 100644 (file)
@@ -164,8 +164,13 @@ static int load_apu_fw_direct(const char *fn, u8 __iomem *dst, struct cx18 *cx,
 
        apu_version = (vers[0] << 24) | (vers[4] << 16) | vers[32];
        while (offset + sizeof(seghdr) < fw->size) {
-               /* TODO: byteswapping */
-               memcpy(&seghdr, src + offset / 4, sizeof(seghdr));
+               const u32 *shptr = src + offset / 4;
+
+               seghdr.sync1 = le32_to_cpu(shptr[0]);
+               seghdr.sync2 = le32_to_cpu(shptr[1]);
+               seghdr.addr = le32_to_cpu(shptr[2]);
+               seghdr.size = le32_to_cpu(shptr[3]);
+
                offset += sizeof(seghdr);
                if (seghdr.sync1 != APU_ROM_SYNC1 ||
                    seghdr.sync2 != APU_ROM_SYNC2) {
index ed8118390b02ab9fd3ec38bd53d549053c2d5616..eabf00c6351b82365494d27bbac6ffd7c8a83c29 100644 (file)
@@ -434,6 +434,7 @@ static int epu_dma_done_irq(struct cx18 *cx, struct cx18_in_work_order *order)
 {
        u32 handle, mdl_ack_offset, mdl_ack_count;
        struct cx18_mailbox *mb;
+       int i;
 
        mb = &order->mb;
        handle = mb->args[0];
@@ -447,8 +448,9 @@ static int epu_dma_done_irq(struct cx18 *cx, struct cx18_in_work_order *order)
                return -1;
        }
 
-       cx18_memcpy_fromio(cx, order->mdl_ack, cx->enc_mem + mdl_ack_offset,
-                          sizeof(struct cx18_mdl_ack) * mdl_ack_count);
+       for (i = 0; i < sizeof(struct cx18_mdl_ack) * mdl_ack_count; i += sizeof(u32))
+               ((u32 *)order->mdl_ack)[i / sizeof(u32)] =
+                       cx18_readl(cx, cx->enc_mem + mdl_ack_offset + i);
 
        if ((order->flags & CX18_F_EWO_MB_STALE) == 0)
                mb_ack_irq(cx, order);
@@ -538,6 +540,7 @@ void cx18_api_epu_cmd_irq(struct cx18 *cx, int rpu)
        struct cx18_mailbox *order_mb;
        struct cx18_in_work_order *order;
        int submit;
+       int i;
 
        switch (rpu) {
        case CPU:
@@ -562,10 +565,12 @@ void cx18_api_epu_cmd_irq(struct cx18 *cx, int rpu)
        order_mb = &order->mb;
 
        /* mb->cmd and mb->args[0] through mb->args[2] */
-       cx18_memcpy_fromio(cx, &order_mb->cmd, &mb->cmd, 4 * sizeof(u32));
+       for (i = 0; i < 4; i++)
+               (&order_mb->cmd)[i] = cx18_readl(cx, &mb->cmd + i);
+
        /* mb->request and mb->ack.  N.B. we want to read mb->ack last */
-       cx18_memcpy_fromio(cx, &order_mb->request, &mb->request,
-                          2 * sizeof(u32));
+       for (i = 0; i < 2; i++)
+               (&order_mb->request)[i] = cx18_readl(cx, &mb->request + i);
 
        if (order_mb->request == order_mb->ack) {
                CX18_DEBUG_WARN("Possibly falling behind: %s self-ack'ed our "
index 068f78dc5d13fa0268eef854b4956d18f7521bba..b4c99c7270cf8fc8d28d26226248208f8b930dad 100644 (file)
@@ -307,7 +307,7 @@ static int cx231xx_init_audio_isoc(struct cx231xx *dev)
                urb->context = dev;
                urb->pipe = usb_rcvisocpipe(dev->udev,
                                                dev->adev.end_point_addr);
-               urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP;
+               urb->transfer_flags = URB_ISO_ASAP;
                urb->transfer_buffer = dev->adev.transfer_buffer[i];
                urb->interval = 1;
                urb->complete = cx231xx_audio_isocirq;
@@ -368,7 +368,7 @@ static int cx231xx_init_audio_bulk(struct cx231xx *dev)
                urb->context = dev;
                urb->pipe = usb_rcvbulkpipe(dev->udev,
                                                dev->adev.end_point_addr);
-               urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
+               urb->transfer_flags = 0;
                urb->transfer_buffer = dev->adev.transfer_buffer[i];
                urb->complete = cx231xx_audio_bulkirq;
                urb->transfer_buffer_length = sb_size;
index 3d15314e1f88d1ff71924a7a65362dd13b75fd12..ac7db52f404ffbc9c95207f7a2bd5b1414aa4187 100644 (file)
@@ -448,7 +448,7 @@ int cx231xx_init_vbi_isoc(struct cx231xx *dev, int max_packets,
                        return -ENOMEM;
                }
                dev->vbi_mode.bulk_ctl.urb[i] = urb;
-               urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
+               urb->transfer_flags = 0;
 
                dev->vbi_mode.bulk_ctl.transfer_buffer[i] =
                    kzalloc(sb_size, GFP_KERNEL);
index 13739e002a63fb6460bec16572bad10581fcb998..080e11157e5fe89afdb384c702376d80528daf5d 100644 (file)
@@ -127,22 +127,37 @@ struct cx23885_board cx23885_boards[] = {
        },
        [CX23885_BOARD_HAUPPAUGE_HVR1250] = {
                .name           = "Hauppauge WinTV-HVR1250",
+               .porta          = CX23885_ANALOG_VIDEO,
                .portc          = CX23885_MPEG_DVB,
+#ifdef MT2131_NO_ANALOG_SUPPORT_YET
+               .tuner_type     = TUNER_PHILIPS_TDA8290,
+               .tuner_addr     = 0x42, /* 0x84 >> 1 */
+               .tuner_bus      = 1,
+#endif
+               .force_bff      = 1,
                .input          = {{
+#ifdef MT2131_NO_ANALOG_SUPPORT_YET
                        .type   = CX23885_VMUX_TELEVISION,
-                       .vmux   = 0,
+                       .vmux   =       CX25840_VIN7_CH3 |
+                                       CX25840_VIN5_CH2 |
+                                       CX25840_VIN2_CH1,
+                       .amux   = CX25840_AUDIO8,
                        .gpio0  = 0xff00,
                }, {
-                       .type   = CX23885_VMUX_DEBUG,
-                       .vmux   = 0,
-                       .gpio0  = 0xff01,
-               }, {
+#endif
                        .type   = CX23885_VMUX_COMPOSITE1,
-                       .vmux   = 1,
+                       .vmux   =       CX25840_VIN7_CH3 |
+                                       CX25840_VIN4_CH2 |
+                                       CX25840_VIN6_CH1,
+                       .amux   = CX25840_AUDIO7,
                        .gpio0  = 0xff02,
                }, {
                        .type   = CX23885_VMUX_SVIDEO,
-                       .vmux   = 2,
+                       .vmux   =       CX25840_VIN7_CH3 |
+                                       CX25840_VIN4_CH2 |
+                                       CX25840_VIN8_CH1 |
+                                       CX25840_SVIDEO_ON,
+                       .amux   = CX25840_AUDIO7,
                        .gpio0  = 0xff02,
                } },
        },
@@ -267,7 +282,55 @@ struct cx23885_board cx23885_boards[] = {
        },
        [CX23885_BOARD_HAUPPAUGE_HVR1255] = {
                .name           = "Hauppauge WinTV-HVR1255",
+               .porta          = CX23885_ANALOG_VIDEO,
+               .portc          = CX23885_MPEG_DVB,
+               .tuner_type     = TUNER_ABSENT,
+               .tuner_addr     = 0x42, /* 0x84 >> 1 */
+               .force_bff      = 1,
+               .input          = {{
+                       .type   = CX23885_VMUX_TELEVISION,
+                       .vmux   =       CX25840_VIN7_CH3 |
+                                       CX25840_VIN5_CH2 |
+                                       CX25840_VIN2_CH1 |
+                                       CX25840_DIF_ON,
+                       .amux   = CX25840_AUDIO8,
+               }, {
+                       .type   = CX23885_VMUX_COMPOSITE1,
+                       .vmux   =       CX25840_VIN7_CH3 |
+                                       CX25840_VIN4_CH2 |
+                                       CX25840_VIN6_CH1,
+                       .amux   = CX25840_AUDIO7,
+               }, {
+                       .type   = CX23885_VMUX_SVIDEO,
+                       .vmux   =       CX25840_VIN7_CH3 |
+                                       CX25840_VIN4_CH2 |
+                                       CX25840_VIN8_CH1 |
+                                       CX25840_SVIDEO_ON,
+                       .amux   = CX25840_AUDIO7,
+               } },
+       },
+       [CX23885_BOARD_HAUPPAUGE_HVR1255_22111] = {
+               .name           = "Hauppauge WinTV-HVR1255",
+               .porta          = CX23885_ANALOG_VIDEO,
                .portc          = CX23885_MPEG_DVB,
+               .tuner_type     = TUNER_ABSENT,
+               .tuner_addr     = 0x42, /* 0x84 >> 1 */
+               .force_bff      = 1,
+               .input          = {{
+                       .type   = CX23885_VMUX_TELEVISION,
+                       .vmux   =       CX25840_VIN7_CH3 |
+                                       CX25840_VIN5_CH2 |
+                                       CX25840_VIN2_CH1 |
+                                       CX25840_DIF_ON,
+                       .amux   = CX25840_AUDIO8,
+               }, {
+                       .type   = CX23885_VMUX_SVIDEO,
+                       .vmux   =       CX25840_VIN7_CH3 |
+                                       CX25840_VIN4_CH2 |
+                                       CX25840_VIN8_CH1 |
+                                       CX25840_SVIDEO_ON,
+                       .amux   = CX25840_AUDIO7,
+               } },
        },
        [CX23885_BOARD_HAUPPAUGE_HVR1210] = {
                .name           = "Hauppauge WinTV-HVR1210",
@@ -624,7 +687,7 @@ struct cx23885_subid cx23885_subids[] = {
        }, {
                .subvendor = 0x0070,
                .subdevice = 0x2259,
-               .card      = CX23885_BOARD_HAUPPAUGE_HVR1255,
+               .card      = CX23885_BOARD_HAUPPAUGE_HVR1255_22111,
        }, {
                .subvendor = 0x0070,
                .subdevice = 0x2291,
@@ -900,7 +963,7 @@ int cx23885_tuner_callback(void *priv, int component, int command, int arg)
        struct cx23885_dev *dev = port->dev;
        u32 bitmask = 0;
 
-       if (command == XC2028_RESET_CLK)
+       if ((command == XC2028_RESET_CLK) || (command == XC2028_I2C_FLUSH))
                return 0;
 
        if (command != 0) {
@@ -1130,6 +1193,7 @@ void cx23885_gpio_setup(struct cx23885_dev *dev)
        case CX23885_BOARD_HAUPPAUGE_HVR1270:
        case CX23885_BOARD_HAUPPAUGE_HVR1275:
        case CX23885_BOARD_HAUPPAUGE_HVR1255:
+       case CX23885_BOARD_HAUPPAUGE_HVR1255_22111:
        case CX23885_BOARD_HAUPPAUGE_HVR1210:
                /* GPIO-5 RF Control: 0 = RF1 Terrestrial, 1 = RF2 Cable */
                /* GPIO-6 I2C Gate which can isolate the demod from the bus */
@@ -1267,6 +1331,7 @@ int cx23885_ir_init(struct cx23885_dev *dev)
        case CX23885_BOARD_HAUPPAUGE_HVR1400:
        case CX23885_BOARD_HAUPPAUGE_HVR1275:
        case CX23885_BOARD_HAUPPAUGE_HVR1255:
+       case CX23885_BOARD_HAUPPAUGE_HVR1255_22111:
        case CX23885_BOARD_HAUPPAUGE_HVR1210:
                /* FIXME: Implement me */
                break;
@@ -1424,6 +1489,7 @@ void cx23885_card_setup(struct cx23885_dev *dev)
        case CX23885_BOARD_HAUPPAUGE_HVR1270:
        case CX23885_BOARD_HAUPPAUGE_HVR1275:
        case CX23885_BOARD_HAUPPAUGE_HVR1255:
+       case CX23885_BOARD_HAUPPAUGE_HVR1255_22111:
        case CX23885_BOARD_HAUPPAUGE_HVR1210:
        case CX23885_BOARD_HAUPPAUGE_HVR1850:
        case CX23885_BOARD_HAUPPAUGE_HVR1290:
@@ -1511,6 +1577,7 @@ void cx23885_card_setup(struct cx23885_dev *dev)
        case CX23885_BOARD_HAUPPAUGE_HVR1270:
        case CX23885_BOARD_HAUPPAUGE_HVR1275:
        case CX23885_BOARD_HAUPPAUGE_HVR1255:
+       case CX23885_BOARD_HAUPPAUGE_HVR1255_22111:
        case CX23885_BOARD_HAUPPAUGE_HVR1210:
        case CX23885_BOARD_COMPRO_VIDEOMATE_E800:
        case CX23885_BOARD_HAUPPAUGE_HVR1290:
@@ -1526,10 +1593,10 @@ void cx23885_card_setup(struct cx23885_dev *dev)
         */
        switch (dev->board) {
        case CX23885_BOARD_TEVII_S470:
-       case CX23885_BOARD_HAUPPAUGE_HVR1250:
                /* Currently only enabled for the integrated IR controller */
                if (!enable_885_ir)
                        break;
+       case CX23885_BOARD_HAUPPAUGE_HVR1250:
        case CX23885_BOARD_HAUPPAUGE_HVR1800:
        case CX23885_BOARD_HAUPPAUGE_HVR1800lp:
        case CX23885_BOARD_HAUPPAUGE_HVR1700:
@@ -1539,6 +1606,8 @@ void cx23885_card_setup(struct cx23885_dev *dev)
        case CX23885_BOARD_NETUP_DUAL_DVBS2_CI:
        case CX23885_BOARD_NETUP_DUAL_DVB_T_C_CI_RF:
        case CX23885_BOARD_COMPRO_VIDEOMATE_E800:
+       case CX23885_BOARD_HAUPPAUGE_HVR1255:
+       case CX23885_BOARD_HAUPPAUGE_HVR1255_22111:
        case CX23885_BOARD_HAUPPAUGE_HVR1270:
        case CX23885_BOARD_HAUPPAUGE_HVR1850:
        case CX23885_BOARD_MYGICA_X8506:
index a80a92c474558a4c555a473c89aa1df568c88288..cd542684ba022c4f194928e37cfc318d6a1b0b79 100644 (file)
@@ -712,6 +712,7 @@ static int dvb_register(struct cx23885_tsport *port)
                }
                break;
        case CX23885_BOARD_HAUPPAUGE_HVR1255:
+       case CX23885_BOARD_HAUPPAUGE_HVR1255_22111:
                i2c_bus = &dev->i2c_bus[0];
                fe0->dvb.frontend = dvb_attach(s5h1411_attach,
                                               &hcw_s5h1411_config,
@@ -721,6 +722,11 @@ static int dvb_register(struct cx23885_tsport *port)
                                   0x60, &dev->i2c_bus[1].i2c_adap,
                                   &hauppauge_tda18271_config);
                }
+
+               tda18271_attach(&dev->ts1.analog_fe,
+                       0x60, &dev->i2c_bus[1].i2c_adap,
+                       &hauppauge_tda18271_config);
+
                break;
        case CX23885_BOARD_HAUPPAUGE_HVR1800:
                i2c_bus = &dev->i2c_bus[0];
index c654bdc7ccb201dd4e285c0b7ddfe703ab89cb61..22f8e7fbd6656fe81f33f824832a38cc7e2dd14c 100644 (file)
@@ -505,6 +505,9 @@ static int cx23885_video_mux(struct cx23885_dev *dev, unsigned int input)
 
        if ((dev->board == CX23885_BOARD_HAUPPAUGE_HVR1800) ||
                (dev->board == CX23885_BOARD_MPX885) ||
+               (dev->board == CX23885_BOARD_HAUPPAUGE_HVR1250) ||
+               (dev->board == CX23885_BOARD_HAUPPAUGE_HVR1255) ||
+               (dev->board == CX23885_BOARD_HAUPPAUGE_HVR1255_22111) ||
                (dev->board == CX23885_BOARD_HAUPPAUGE_HVR1850)) {
                /* Configure audio routing */
                v4l2_subdev_call(dev->sd_cx25840, audio, s_routing,
@@ -1578,7 +1581,9 @@ static int cx23885_set_freq_via_ops(struct cx23885_dev *dev,
 
        fe = vfe->dvb.frontend;
 
-       if (dev->board == CX23885_BOARD_HAUPPAUGE_HVR1850)
+       if ((dev->board == CX23885_BOARD_HAUPPAUGE_HVR1850) ||
+           (dev->board == CX23885_BOARD_HAUPPAUGE_HVR1255) ||
+           (dev->board == CX23885_BOARD_HAUPPAUGE_HVR1255_22111))
                fe = &dev->ts1.analog_fe;
 
        if (fe && fe->ops.tuner_ops.set_analog_params) {
@@ -1608,6 +1613,8 @@ int cx23885_set_frequency(struct file *file, void *priv,
        int ret;
 
        switch (dev->board) {
+       case CX23885_BOARD_HAUPPAUGE_HVR1255:
+       case CX23885_BOARD_HAUPPAUGE_HVR1255_22111:
        case CX23885_BOARD_HAUPPAUGE_HVR1850:
                ret = cx23885_set_freq_via_ops(dev, f);
                break;
index d884784a1c8582f8a97c379dacc5ef603d2f934c..13c37ec07ae7e250b54694aad7c821c670fd584f 100644 (file)
@@ -90,6 +90,7 @@
 #define CX23885_BOARD_MYGICA_X8507             33
 #define CX23885_BOARD_TERRATEC_CINERGY_T_PCIE_DUAL 34
 #define CX23885_BOARD_TEVII_S471               35
+#define CX23885_BOARD_HAUPPAUGE_HVR1255_22111  36
 
 #define GPIO_0 0x00000001
 #define GPIO_1 0x00000002
index 83c1aa6b2e6c9a8762065e4c5994c62f033f3fba..f11f6f07e9154cd4d00790031d3da5e63c1ec8d1 100644 (file)
@@ -904,9 +904,6 @@ static int cx25821_dev_setup(struct cx25821_dev *dev)
        list_add_tail(&dev->devlist, &cx25821_devlist);
        mutex_unlock(&cx25821_devlist_mutex);
 
-       strcpy(cx25821_boards[UNKNOWN_BOARD].name, "unknown");
-       strcpy(cx25821_boards[CX25821_BOARD].name, "cx25821");
-
        if (dev->pci->device != 0x8210) {
                pr_info("%s(): Exiting. Incorrect Hardware device = 0x%02x\n",
                        __func__, dev->pci->device);
index b9aa801b00a7b29c01c45334dd14d6c7c61bb978..029f2934a6d88bccdb409becdfd17849b99dff3b 100644 (file)
@@ -187,7 +187,7 @@ enum port {
 };
 
 struct cx25821_board {
-       char *name;
+       const char *name;
        enum port porta;
        enum port portb;
        enum port portc;
index fc1ff69cffd0d4917e86292573691be2436282fc..d8eac3e30a7ea99217e5b149de79752e73f594cf 100644 (file)
@@ -84,7 +84,7 @@ MODULE_PARM_DESC(debug, "Debugging messages [0=Off (default) 1=On]");
 
 
 /* ----------------------------------------------------------------------- */
-static void cx23885_std_setup(struct i2c_client *client);
+static void cx23888_std_setup(struct i2c_client *client);
 
 int cx25840_write(struct i2c_client *client, u16 addr, u8 value)
 {
@@ -638,10 +638,13 @@ static void cx23885_initialize(struct i2c_client *client)
        finish_wait(&state->fw_wait, &wait);
        destroy_workqueue(q);
 
-       /* Call the cx23885 specific std setup func, we no longer rely on
+       /* Call the cx23888 specific std setup func, we no longer rely on
         * the generic cx24840 func.
         */
-       cx23885_std_setup(client);
+       if (is_cx23888(state))
+               cx23888_std_setup(client);
+       else
+               cx25840_std_setup(client);
 
        /* (re)set input */
        set_input(client, state->vid_input, state->aud_input);
@@ -1103,9 +1106,23 @@ static int set_input(struct i2c_client *client, enum cx25840_video_input vid_inp
 
                        cx25840_write4(client, 0x410, 0xffff0dbf);
                        cx25840_write4(client, 0x414, 0x00137d03);
-                       cx25840_write4(client, 0x418, 0x01008080);
+
+                       /* on the 887, 0x418 is HSCALE_CTRL, on the 888 it is 
+                          CHROMA_CTRL */
+                       if (is_cx23888(state))
+                               cx25840_write4(client, 0x418, 0x01008080);
+                       else
+                               cx25840_write4(client, 0x418, 0x01000000);
+
                        cx25840_write4(client, 0x41c, 0x00000000);
-                       cx25840_write4(client, 0x420, 0x001c3e0f);
+
+                       /* on the 887, 0x420 is CHROMA_CTRL, on the 888 it is 
+                          CRUSH_CTRL */
+                       if (is_cx23888(state))
+                               cx25840_write4(client, 0x420, 0x001c3e0f);
+                       else
+                               cx25840_write4(client, 0x420, 0x001c8282);
+
                        cx25840_write4(client, 0x42c, 0x42600000);
                        cx25840_write4(client, 0x430, 0x0000039b);
                        cx25840_write4(client, 0x438, 0x00000000);
@@ -1233,7 +1250,7 @@ static int set_input(struct i2c_client *client, enum cx25840_video_input vid_inp
                cx25840_write4(client, 0x8d0, 0x1f063870);
        }
 
-       if (is_cx2388x(state)) {
+       if (is_cx23888(state)) {
                /* HVR1850 */
                /* AUD_IO_CTRL - I2S Input, Parallel1*/
                /*  - Channel 1 src - Parallel1 (Merlin out) */
@@ -1298,8 +1315,8 @@ static int set_v4lstd(struct i2c_client *client)
        }
        cx25840_and_or(client, 0x400, ~0xf, fmt);
        cx25840_and_or(client, 0x403, ~0x3, pal_m);
-       if (is_cx2388x(state))
-               cx23885_std_setup(client);
+       if (is_cx23888(state))
+               cx23888_std_setup(client);
        else
                cx25840_std_setup(client);
        if (!is_cx2583x(state))
@@ -1312,6 +1329,7 @@ static int set_v4lstd(struct i2c_client *client)
 static int cx25840_s_ctrl(struct v4l2_ctrl *ctrl)
 {
        struct v4l2_subdev *sd = to_sd(ctrl);
+       struct cx25840_state *state = to_state(sd);
        struct i2c_client *client = v4l2_get_subdevdata(sd);
 
        switch (ctrl->id) {
@@ -1324,12 +1342,20 @@ static int cx25840_s_ctrl(struct v4l2_ctrl *ctrl)
                break;
 
        case V4L2_CID_SATURATION:
-               cx25840_write(client, 0x420, ctrl->val << 1);
-               cx25840_write(client, 0x421, ctrl->val << 1);
+               if (is_cx23888(state)) {
+                       cx25840_write(client, 0x418, ctrl->val << 1);
+                       cx25840_write(client, 0x419, ctrl->val << 1);
+               } else {
+                       cx25840_write(client, 0x420, ctrl->val << 1);
+                       cx25840_write(client, 0x421, ctrl->val << 1);
+               }
                break;
 
        case V4L2_CID_HUE:
-               cx25840_write(client, 0x422, ctrl->val);
+               if (is_cx23888(state))
+                       cx25840_write(client, 0x41a, ctrl->val);
+               else
+                       cx25840_write(client, 0x422, ctrl->val);
                break;
 
        default:
@@ -1354,11 +1380,21 @@ static int cx25840_s_mbus_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt
        fmt->field = V4L2_FIELD_INTERLACED;
        fmt->colorspace = V4L2_COLORSPACE_SMPTE170M;
 
-       Vsrc = (cx25840_read(client, 0x476) & 0x3f) << 4;
-       Vsrc |= (cx25840_read(client, 0x475) & 0xf0) >> 4;
+       if (is_cx23888(state)) {
+               Vsrc = (cx25840_read(client, 0x42a) & 0x3f) << 4;
+               Vsrc |= (cx25840_read(client, 0x429) & 0xf0) >> 4;
+       } else {
+               Vsrc = (cx25840_read(client, 0x476) & 0x3f) << 4;
+               Vsrc |= (cx25840_read(client, 0x475) & 0xf0) >> 4;
+       }
 
-       Hsrc = (cx25840_read(client, 0x472) & 0x3f) << 4;
-       Hsrc |= (cx25840_read(client, 0x471) & 0xf0) >> 4;
+       if (is_cx23888(state)) {
+               Hsrc = (cx25840_read(client, 0x426) & 0x3f) << 4;
+               Hsrc |= (cx25840_read(client, 0x425) & 0xf0) >> 4;
+       } else {
+               Hsrc = (cx25840_read(client, 0x472) & 0x3f) << 4;
+               Hsrc |= (cx25840_read(client, 0x471) & 0xf0) >> 4;
+       }
 
        Vlines = fmt->height + (is_50Hz ? 4 : 7);
 
@@ -1782,8 +1818,8 @@ static int cx25840_s_video_routing(struct v4l2_subdev *sd,
        struct cx25840_state *state = to_state(sd);
        struct i2c_client *client = v4l2_get_subdevdata(sd);
 
-       if (is_cx2388x(state))
-               cx23885_std_setup(client);
+       if (is_cx23888(state))
+               cx23888_std_setup(client);
 
        return set_input(client, input, state->aud_input);
 }
@@ -1794,8 +1830,8 @@ static int cx25840_s_audio_routing(struct v4l2_subdev *sd,
        struct cx25840_state *state = to_state(sd);
        struct i2c_client *client = v4l2_get_subdevdata(sd);
 
-       if (is_cx2388x(state))
-               cx23885_std_setup(client);
+       if (is_cx23888(state))
+               cx23888_std_setup(client);
        return set_input(client, state->vid_input, input);
 }
 
@@ -4939,7 +4975,7 @@ void cx23885_dif_setup(struct i2c_client *client, u32 ifHz)
        }
 }
 
-static void cx23885_std_setup(struct i2c_client *client)
+static void cx23888_std_setup(struct i2c_client *client)
 {
        struct cx25840_state *state = to_state(i2c_get_clientdata(client));
        v4l2_std_id std = state->std;
index e46446a449c090c91dc43e4eeb1e34e2baf2c16f..ed7b2aa1ed831d4582c68679476ce8b08b733da7 100644 (file)
@@ -471,7 +471,7 @@ static int blackbird_load_firmware(struct cx8802_dev *dev)
        dprintk(1,"Loading firmware ...\n");
        dataptr = (u32*)firmware->data;
        for (i = 0; i < (firmware->size >> 2); i++) {
-               value = *dataptr;
+               value = le32_to_cpu(*dataptr);
                checksum += ~value;
                memory_write(dev->core, i, value);
                dataptr++;
index 20a7e24de6fba66e3ce806153a3dda5995c34fed..862c6575c55791fa7a6f4488d7cd933226fd02cb 100644 (file)
@@ -974,6 +974,7 @@ struct em28xx_board em28xx_boards[] = {
        [EM2884_BOARD_CINERGY_HTC_STICK] = {
                .name         = "Terratec Cinergy HTC Stick",
                .has_dvb      = 1,
+               .ir_codes     = RC_MAP_NEC_TERRATEC_CINERGY_XS,
 #if 0
                .tuner_type   = TUNER_PHILIPS_TDA8290,
                .tuner_addr   = 0x41,
@@ -2892,7 +2893,7 @@ static void request_module_async(struct work_struct *work)
 
        if (dev->board.has_dvb)
                request_module("em28xx-dvb");
-       if (dev->board.has_ir_i2c && !disable_ir)
+       if (dev->board.ir_codes && !disable_ir)
                request_module("em28xx-rc");
 }
 
index fce5f7680c99603938a931ade5a54fc351e11299..5e30c4f3f248ac5427caf86201eaec09f83efc02 100644 (file)
@@ -527,6 +527,8 @@ static int em28xx_ir_init(struct em28xx *dev)
 
        if (dev->board.ir_codes == NULL) {
                /* No remote control support */
+               em28xx_warn("Remote control support is not available for "
+                               "this card.\n");
                return 0;
        }
 
index 137166d73945fdd94fee6a1dc4dc9115da65dd4e..31721eadc597de48e749f6202a98ddb819910cbf 100644 (file)
@@ -1653,7 +1653,7 @@ static int vidioc_streamoff(struct file *file, void *priv,
                                enum v4l2_buf_type buf_type)
 {
        struct gspca_dev *gspca_dev = video_drvdata(file);
-       int ret;
+       int i, ret;
 
        if (buf_type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
                return -EINVAL;
@@ -1678,6 +1678,8 @@ static int vidioc_streamoff(struct file *file, void *priv,
        wake_up_interruptible(&gspca_dev->wq);
 
        /* empty the transfer queues */
+       for (i = 0; i < gspca_dev->nframes; i++)
+               gspca_dev->frame[i].v4l2_buf.flags &= ~BUF_ALL_FLAGS;
        atomic_set(&gspca_dev->fr_q, 0);
        atomic_set(&gspca_dev->fr_i, 0);
        gspca_dev->fr_o = 0;
index b5acb1e4b4e7ce26dc0b644411872847ca8e5f77..80c81dd6d68b3ce4d6a40566449b89a110ce05a4 100644 (file)
@@ -96,7 +96,7 @@ static void setbrightness(struct gspca_dev *gspca_dev);
 static void setcontrast(struct gspca_dev *gspca_dev);
 static void setgain(struct gspca_dev *gspca_dev);
 static void setexposure(struct gspca_dev *gspca_dev);
-static int sd_setagc(struct gspca_dev *gspca_dev, __s32 val);
+static void setagc(struct gspca_dev *gspca_dev);
 static void setawb(struct gspca_dev *gspca_dev);
 static void setaec(struct gspca_dev *gspca_dev);
 static void setsharpness(struct gspca_dev *gspca_dev);
@@ -189,7 +189,7 @@ static const struct ctrl sd_ctrls[] = {
                        .step    = 1,
                        .default_value = 1,
                },
-               .set = sd_setagc
+               .set_control = setagc
        },
 [AWB] = {
                {
@@ -851,6 +851,7 @@ static int sccb_check_status(struct gspca_dev *gspca_dev)
        int i;
 
        for (i = 0; i < 5; i++) {
+               msleep(10);
                data = ov534_reg_read(gspca_dev, OV534_REG_STATUS);
 
                switch (data) {
@@ -1242,10 +1243,6 @@ static int sd_config(struct gspca_dev *gspca_dev,
 
        cam->ctrls = sd->ctrls;
 
-       /* the auto white balance control works only when auto gain is set */
-       if (sd_ctrls[AGC].qctrl.default_value == 0)
-               gspca_dev->ctrl_inac |= (1 << AWB);
-
        cam->cam_mode = ov772x_mode;
        cam->nmodes = ARRAY_SIZE(ov772x_mode);
 
@@ -1486,29 +1483,6 @@ scan_next:
        } while (remaining_len > 0);
 }
 
-static int sd_setagc(struct gspca_dev *gspca_dev, __s32 val)
-{
-       struct sd *sd = (struct sd *) gspca_dev;
-
-       sd->ctrls[AGC].val = val;
-
-       /* the auto white balance control works only
-        * when auto gain is set */
-       if (val) {
-               gspca_dev->ctrl_inac &= ~(1 << AWB);
-       } else {
-               gspca_dev->ctrl_inac |= (1 << AWB);
-               if (sd->ctrls[AWB].val) {
-                       sd->ctrls[AWB].val = 0;
-                       if (gspca_dev->streaming)
-                               setawb(gspca_dev);
-               }
-       }
-       if (gspca_dev->streaming)
-               setagc(gspca_dev);
-       return gspca_dev->usb_err;
-}
-
 static int sd_querymenu(struct gspca_dev *gspca_dev,
                struct v4l2_querymenu *menu)
 {
index b5797308a39b365b2d5b075fc462efe9e2a1d977..1fd41f0d2e9514a408db394e9a613bc4ee2badd7 100644 (file)
@@ -1008,6 +1008,7 @@ static int sccb_check_status(struct gspca_dev *gspca_dev)
        int i;
 
        for (i = 0; i < 5; i++) {
+               msleep(10);
                data = reg_r(gspca_dev, OV534_REG_STATUS);
 
                switch (data) {
index 2cb7d95f7be7ef7c21d465ac36a4dbed3c86155e..115da169f32af62de40570cdb25f179abad21fba 100644 (file)
@@ -418,7 +418,7 @@ static int sd_init_controls(struct gspca_dev *gspca_dev)
        struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler;
 
        gspca_dev->vdev.ctrl_handler = hdl;
-       v4l2_ctrl_handler_init(hdl, 4);
+       v4l2_ctrl_handler_init(hdl, 5);
 
        sd->contrast = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
                                        V4L2_CID_CONTRAST, 0, 15, 1, 7);
index ad098202d7f0fa086900d9bbde499615917f7b2f..b9c6f17eabb245fde118e3b198f163782d92a7bc 100644 (file)
@@ -1761,7 +1761,6 @@ static int sd_init_controls(struct gspca_dev *gspca_dev)
                        V4L2_CID_SATURATION, 0, 255, 1, 127);
        sd->hue = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
                        V4L2_CID_HUE, -180, 180, 1, 0);
-       v4l2_ctrl_cluster(4, &sd->brightness);
 
        sd->gamma = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
                        V4L2_CID_GAMMA, 0, 255, 1, 0x10);
@@ -1770,7 +1769,6 @@ static int sd_init_controls(struct gspca_dev *gspca_dev)
                        V4L2_CID_BLUE_BALANCE, 0, 127, 1, 0x28);
        sd->red = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
                        V4L2_CID_RED_BALANCE, 0, 127, 1, 0x28);
-       v4l2_ctrl_cluster(2, &sd->blue);
 
        if (sd->sensor != SENSOR_OV9655 && sd->sensor != SENSOR_SOI968 &&
            sd->sensor != SENSOR_OV7670 && sd->sensor != SENSOR_MT9M001 &&
@@ -1779,7 +1777,6 @@ static int sd_init_controls(struct gspca_dev *gspca_dev)
                        V4L2_CID_HFLIP, 0, 1, 1, 0);
                sd->vflip = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
                        V4L2_CID_VFLIP, 0, 1, 1, 0);
-               v4l2_ctrl_cluster(2, &sd->hflip);
        }
 
        if (sd->sensor != SENSOR_SOI968 && sd->sensor != SENSOR_MT9VPRB &&
@@ -1794,6 +1791,20 @@ static int sd_init_controls(struct gspca_dev *gspca_dev)
                        V4L2_CID_GAIN, 0, 28, 1, 0);
                sd->autogain = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
                        V4L2_CID_AUTOGAIN, 0, 1, 1, 1);
+       }
+
+       sd->jpegqual = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+                       V4L2_CID_JPEG_COMPRESSION_QUALITY, 50, 90, 1, 80);
+       if (hdl->error) {
+               pr_err("Could not initialize controls\n");
+               return hdl->error;
+       }
+
+       v4l2_ctrl_cluster(4, &sd->brightness);
+       v4l2_ctrl_cluster(2, &sd->blue);
+       if (sd->hflip)
+               v4l2_ctrl_cluster(2, &sd->hflip);
+       if (sd->autogain) {
                if (sd->sensor == SENSOR_SOI968)
                        /* this sensor doesn't have the exposure control and
                           autogain is clustered with gain instead. This works
@@ -1803,13 +1814,6 @@ static int sd_init_controls(struct gspca_dev *gspca_dev)
                        /* Otherwise autogain is clustered with exposure. */
                        v4l2_ctrl_auto_cluster(2, &sd->autogain, 0, false);
        }
-
-       sd->jpegqual = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
-                       V4L2_CID_JPEG_COMPRESSION_QUALITY, 50, 90, 1, 80);
-       if (hdl->error) {
-               pr_err("Could not initialize controls\n");
-               return hdl->error;
-       }
        return 0;
 }
 
@@ -2066,10 +2070,13 @@ static int sd_start(struct gspca_dev *gspca_dev)
        set_gamma(gspca_dev, v4l2_ctrl_g_ctrl(sd->gamma));
        set_redblue(gspca_dev, v4l2_ctrl_g_ctrl(sd->blue),
                        v4l2_ctrl_g_ctrl(sd->red));
-       set_gain(gspca_dev, v4l2_ctrl_g_ctrl(sd->gain));
-       set_exposure(gspca_dev, v4l2_ctrl_g_ctrl(sd->exposure));
-       set_hvflip(gspca_dev, v4l2_ctrl_g_ctrl(sd->hflip),
-                       v4l2_ctrl_g_ctrl(sd->vflip));
+       if (sd->gain)
+               set_gain(gspca_dev, v4l2_ctrl_g_ctrl(sd->gain));
+       if (sd->exposure)
+               set_exposure(gspca_dev, v4l2_ctrl_g_ctrl(sd->exposure));
+       if (sd->hflip)
+               set_hvflip(gspca_dev, v4l2_ctrl_g_ctrl(sd->hflip),
+                               v4l2_ctrl_g_ctrl(sd->vflip));
 
        reg_w1(gspca_dev, 0x1007, 0x20);
        reg_w1(gspca_dev, 0x1061, 0x03);
@@ -2172,7 +2179,7 @@ static void sd_dqcallback(struct gspca_dev *gspca_dev)
        struct sd *sd = (struct sd *) gspca_dev;
        int avg_lum;
 
-       if (!v4l2_ctrl_g_ctrl(sd->autogain))
+       if (sd->autogain == NULL || !v4l2_ctrl_g_ctrl(sd->autogain))
                return;
 
        avg_lum = atomic_read(&sd->avg_lum);
index 4d1696d1a7f4022d0cca06a90f40f6a912a5b877..f38faa9b37c3078270abdf408a29b65560e23fde 100644 (file)
@@ -3120,7 +3120,7 @@ static const struct sd_desc sd_desc = {
                        | (SENSOR_ ## sensor << 8) \
                        | (flags)
 static const struct usb_device_id device_table[] = {
-       {USB_DEVICE(0x0458, 0x7025), BS(SN9C120, MI0360)},
+       {USB_DEVICE(0x0458, 0x7025), BSF(SN9C120, MI0360B, F_PDN_INV)},
        {USB_DEVICE(0x0458, 0x702e), BS(SN9C120, OV7660)},
        {USB_DEVICE(0x045e, 0x00f5), BSF(SN9C105, OV7660, F_PDN_INV)},
        {USB_DEVICE(0x045e, 0x00f7), BSF(SN9C105, OV7660, F_PDN_INV)},
index 057929e165ab6f32fa875736f782d21ca8218c7f..5462ce2f60ea9dc647572a0ae93796c26f1b3775 100644 (file)
@@ -866,10 +866,10 @@ static int ivtv_setup_pci(struct ivtv *itv, struct pci_dev *pdev,
        pci_write_config_dword(pdev, 0x40, 0xffff);
 
        IVTV_DEBUG_INFO("%d (rev %d) at %02x:%02x.%x, "
-                  "irq: %d, latency: %d, memory: 0x%lx\n",
+                  "irq: %d, latency: %d, memory: 0x%llx\n",
                   pdev->device, pdev->revision, pdev->bus->number,
                   PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
-                  pdev->irq, pci_latency, (unsigned long)itv->base_addr);
+                  pdev->irq, pci_latency, (u64)itv->base_addr);
 
        return 0;
 }
@@ -1007,7 +1007,7 @@ static int __devinit ivtv_probe(struct pci_dev *pdev,
        itv->cxhdl.priv = itv;
        itv->cxhdl.func = ivtv_api_func;
 
-       IVTV_DEBUG_INFO("base addr: 0x%08x\n", itv->base_addr);
+       IVTV_DEBUG_INFO("base addr: 0x%llx\n", (u64)itv->base_addr);
 
        /* PCI Device Setup */
        retval = ivtv_setup_pci(itv, pdev, pci_id);
@@ -1017,8 +1017,8 @@ static int __devinit ivtv_probe(struct pci_dev *pdev,
                goto free_mem;
 
        /* map io memory */
-       IVTV_DEBUG_INFO("attempting ioremap at 0x%08x len 0x%08x\n",
-                  itv->base_addr + IVTV_ENCODER_OFFSET, IVTV_ENCODER_SIZE);
+       IVTV_DEBUG_INFO("attempting ioremap at 0x%llx len 0x%08x\n",
+                  (u64)itv->base_addr + IVTV_ENCODER_OFFSET, IVTV_ENCODER_SIZE);
        itv->enc_mem = ioremap_nocache(itv->base_addr + IVTV_ENCODER_OFFSET,
                                       IVTV_ENCODER_SIZE);
        if (!itv->enc_mem) {
@@ -1034,8 +1034,8 @@ static int __devinit ivtv_probe(struct pci_dev *pdev,
        }
 
        if (itv->has_cx23415) {
-               IVTV_DEBUG_INFO("attempting ioremap at 0x%08x len 0x%08x\n",
-                               itv->base_addr + IVTV_DECODER_OFFSET, IVTV_DECODER_SIZE);
+               IVTV_DEBUG_INFO("attempting ioremap at 0x%llx len 0x%08x\n",
+                               (u64)itv->base_addr + IVTV_DECODER_OFFSET, IVTV_DECODER_SIZE);
                itv->dec_mem = ioremap_nocache(itv->base_addr + IVTV_DECODER_OFFSET,
                                IVTV_DECODER_SIZE);
                if (!itv->dec_mem) {
@@ -1056,8 +1056,8 @@ static int __devinit ivtv_probe(struct pci_dev *pdev,
        }
 
        /* map registers memory */
-       IVTV_DEBUG_INFO("attempting ioremap at 0x%08x len 0x%08x\n",
-                  itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE);
+       IVTV_DEBUG_INFO("attempting ioremap at 0x%llx len 0x%08x\n",
+                  (u64)itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE);
        itv->reg_mem =
            ioremap_nocache(itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE);
        if (!itv->reg_mem) {
index 2e220028aad2f383417904ce9d629d5bb4857d4e..a7e00f8938f818a1b2a0730fac2517f5005d65fb 100644 (file)
@@ -622,7 +622,7 @@ struct ivtv {
        struct v4l2_subdev *sd_video;   /* controlling video decoder subdev */
        struct v4l2_subdev *sd_audio;   /* controlling audio subdev */
        struct v4l2_subdev *sd_muxer;   /* controlling audio muxer subdev */
-       u32 base_addr;                  /* PCI resource base address */
+       resource_size_t base_addr;      /* PCI resource base address */
        volatile void __iomem *enc_mem; /* pointer to mapped encoder memory */
        volatile void __iomem *dec_mem; /* pointer to mapped decoder memory */
        volatile void __iomem *reg_mem; /* pointer to mapped registers */
index d2dec585e61b8d3c8bd2940f220bcaaf21c668a9..3945556f573384f77fb19752ac05b4bfdba23464 100644 (file)
@@ -110,22 +110,6 @@ enum {
        V4L2_M2M_DST = 1,
 };
 
-/* Source and destination queue data */
-static struct m2mtest_q_data q_data[2];
-
-static struct m2mtest_q_data *get_q_data(enum v4l2_buf_type type)
-{
-       switch (type) {
-       case V4L2_BUF_TYPE_VIDEO_OUTPUT:
-               return &q_data[V4L2_M2M_SRC];
-       case V4L2_BUF_TYPE_VIDEO_CAPTURE:
-               return &q_data[V4L2_M2M_DST];
-       default:
-               BUG();
-       }
-       return NULL;
-}
-
 #define V4L2_CID_TRANS_TIME_MSEC       V4L2_CID_PRIVATE_BASE
 #define V4L2_CID_TRANS_NUM_BUFS                (V4L2_CID_PRIVATE_BASE + 1)
 
@@ -198,8 +182,26 @@ struct m2mtest_ctx {
        int                     aborting;
 
        struct v4l2_m2m_ctx     *m2m_ctx;
+
+       /* Source and destination queue data */
+       struct m2mtest_q_data   q_data[2];
 };
 
+static struct m2mtest_q_data *get_q_data(struct m2mtest_ctx *ctx,
+                                        enum v4l2_buf_type type)
+{
+       switch (type) {
+       case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+               return &ctx->q_data[V4L2_M2M_SRC];
+       case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+               return &ctx->q_data[V4L2_M2M_DST];
+       default:
+               BUG();
+       }
+       return NULL;
+}
+
+
 static struct v4l2_queryctrl *get_ctrl(int id)
 {
        int i;
@@ -223,7 +225,7 @@ static int device_process(struct m2mtest_ctx *ctx,
        int tile_w, bytes_left;
        int width, height, bytesperline;
 
-       q_data = get_q_data(V4L2_BUF_TYPE_VIDEO_OUTPUT);
+       q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
 
        width   = q_data->width;
        height  = q_data->height;
@@ -436,7 +438,7 @@ static int vidioc_g_fmt(struct m2mtest_ctx *ctx, struct v4l2_format *f)
        if (!vq)
                return -EINVAL;
 
-       q_data = get_q_data(f->type);
+       q_data = get_q_data(ctx, f->type);
 
        f->fmt.pix.width        = q_data->width;
        f->fmt.pix.height       = q_data->height;
@@ -535,7 +537,7 @@ static int vidioc_s_fmt(struct m2mtest_ctx *ctx, struct v4l2_format *f)
        if (!vq)
                return -EINVAL;
 
-       q_data = get_q_data(f->type);
+       q_data = get_q_data(ctx, f->type);
        if (!q_data)
                return -EINVAL;
 
@@ -747,7 +749,7 @@ static int m2mtest_queue_setup(struct vb2_queue *vq,
        struct m2mtest_q_data *q_data;
        unsigned int size, count = *nbuffers;
 
-       q_data = get_q_data(vq->type);
+       q_data = get_q_data(ctx, vq->type);
 
        size = q_data->width * q_data->height * q_data->fmt->depth >> 3;
 
@@ -775,7 +777,7 @@ static int m2mtest_buf_prepare(struct vb2_buffer *vb)
 
        dprintk(ctx->dev, "type: %d\n", vb->vb2_queue->type);
 
-       q_data = get_q_data(vb->vb2_queue->type);
+       q_data = get_q_data(ctx, vb->vb2_queue->type);
 
        if (vb2_plane_size(vb, 0) < q_data->sizeimage) {
                dprintk(ctx->dev, "%s data will not fit into plane (%lu < %lu)\n",
@@ -860,6 +862,9 @@ static int m2mtest_open(struct file *file)
        ctx->transtime = MEM2MEM_DEF_TRANSTIME;
        ctx->num_processed = 0;
 
+       ctx->q_data[V4L2_M2M_SRC].fmt = &formats[0];
+       ctx->q_data[V4L2_M2M_DST].fmt = &formats[0];
+
        ctx->m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx, &queue_init);
 
        if (IS_ERR(ctx->m2m_ctx)) {
@@ -986,9 +991,6 @@ static int m2mtest_probe(struct platform_device *pdev)
                goto err_m2m;
        }
 
-       q_data[V4L2_M2M_SRC].fmt = &formats[0];
-       q_data[V4L2_M2M_DST].fmt = &formats[0];
-
        return 0;
 
        v4l2_m2m_release(dev->m2m_dev);
index ded26b7286faaea01bac1220dfc9c4ea8a0a273b..637bde8aca28e25c2799cd85aded6eedafa3075c 100644 (file)
@@ -83,6 +83,7 @@
 #define CSICR1_INV_DATA                (1 << 3)
 #define CSICR1_INV_PCLK                (1 << 2)
 #define CSICR1_REDGE           (1 << 1)
+#define CSICR1_FMT_MASK                (CSICR1_PACK_DIR | CSICR1_SWAP16_EN)
 
 #define SHIFT_STATFF_LEVEL     22
 #define SHIFT_RXFF_LEVEL       19
@@ -230,6 +231,7 @@ struct mx2_prp_cfg {
        u32 src_pixel;
        u32 ch1_pixel;
        u32 irq_flags;
+       u32 csicr1;
 };
 
 /* prp resizing parameters */
@@ -330,6 +332,7 @@ static struct mx2_fmt_cfg mx27_emma_prp_table[] = {
                        .ch1_pixel      = 0x2ca00565, /* RGB565 */
                        .irq_flags      = PRP_INTR_RDERR | PRP_INTR_CH1WERR |
                                                PRP_INTR_CH1FC | PRP_INTR_LBOVF,
+                       .csicr1         = 0,
                }
        },
        {
@@ -343,6 +346,7 @@ static struct mx2_fmt_cfg mx27_emma_prp_table[] = {
                        .irq_flags      = PRP_INTR_RDERR | PRP_INTR_CH2WERR |
                                        PRP_INTR_CH2FC | PRP_INTR_LBOVF |
                                        PRP_INTR_CH2OVF,
+                       .csicr1         = CSICR1_PACK_DIR,
                }
        },
        {
@@ -356,6 +360,7 @@ static struct mx2_fmt_cfg mx27_emma_prp_table[] = {
                        .irq_flags      = PRP_INTR_RDERR | PRP_INTR_CH2WERR |
                                        PRP_INTR_CH2FC | PRP_INTR_LBOVF |
                                        PRP_INTR_CH2OVF,
+                       .csicr1         = CSICR1_SWAP16_EN,
                }
        },
 };
@@ -984,7 +989,6 @@ static int mx2_camera_set_bus_param(struct soc_camera_device *icd)
        struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
        struct mx2_camera_dev *pcdev = ici->priv;
        struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,};
-       const struct soc_camera_format_xlate *xlate;
        unsigned long common_flags;
        int ret;
        int bytesperline;
@@ -1029,24 +1033,7 @@ static int mx2_camera_set_bus_param(struct soc_camera_device *icd)
                return ret;
        }
 
-       xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
-       if (!xlate) {
-               dev_warn(icd->parent, "Format %x not found\n", pixfmt);
-               return -EINVAL;
-       }
-
-       if (xlate->code == V4L2_MBUS_FMT_YUYV8_2X8) {
-               csicr1 |= CSICR1_PACK_DIR;
-               csicr1 &= ~CSICR1_SWAP16_EN;
-               dev_dbg(icd->parent, "already yuyv format, don't convert\n");
-       } else if (xlate->code == V4L2_MBUS_FMT_UYVY8_2X8) {
-               csicr1 &= ~CSICR1_PACK_DIR;
-               csicr1 |= CSICR1_SWAP16_EN;
-               dev_dbg(icd->parent, "convert uyvy mbus format into yuyv\n");
-       } else {
-               dev_warn(icd->parent, "mbus format not supported\n");
-               return -EINVAL;
-       }
+       csicr1 = (csicr1 & ~CSICR1_FMT_MASK) | pcdev->emma_prp->cfg.csicr1;
 
        if (common_flags & V4L2_MBUS_PCLK_SAMPLE_RISING)
                csicr1 |= CSICR1_REDGE;
@@ -1155,18 +1142,6 @@ static int mx2_camera_get_formats(struct soc_camera_device *icd,
                }
        }
 
-       if (code == V4L2_MBUS_FMT_UYVY8_2X8) {
-               formats++;
-               if (xlate) {
-                       xlate->host_fmt =
-                               soc_mbus_get_fmtdesc(V4L2_MBUS_FMT_YUYV8_2X8);
-                       xlate->code     = code;
-                       dev_dbg(dev, "Providing host format %s for sensor code %d\n",
-                               xlate->host_fmt->name, code);
-                       xlate++;
-               }
-       }
-
        /* Generic pass-trough */
        formats++;
        if (xlate) {
index 8a4935ecc655e9c114cd2f7defb51498f50d38c5..dd91da26f1b088f66bdb40b212fa9b1c84a46b5a 100644 (file)
@@ -888,12 +888,12 @@ static const struct preview_update update_attrs[] = {
                preview_config_contrast,
                NULL,
                offsetof(struct prev_params, contrast),
-               0, true,
+               0, 0, true,
        }, /* OMAP3ISP_PREV_BRIGHTNESS */ {
                preview_config_brightness,
                NULL,
                offsetof(struct prev_params, brightness),
-               0, true,
+               0, 0, true,
        },
 };
 
@@ -1102,7 +1102,7 @@ static void preview_config_input_size(struct isp_prev_device *prev, u32 active)
        unsigned int elv = prev->crop.top + prev->crop.height - 1;
        u32 features;
 
-       if (format->code == V4L2_MBUS_FMT_Y10_1X10) {
+       if (format->code != V4L2_MBUS_FMT_Y10_1X10) {
                sph -= 2;
                eph += 2;
                slv -= 2;
index af2d9086d7e8a6e26bbb2d14171b68ce19cf19b5..b4c679b3fb0f7a052550a7d5a71ea42367e49ceb 100644 (file)
 #include <linux/fs.h>
 #include <linux/kernel.h>
 #include <linux/mm.h>
+#include <linux/slab.h>
 #include <linux/ioport.h>
 #include <linux/init.h>
 #include <linux/mutex.h>
+#include <linux/slab.h>
 #include <linux/uaccess.h>
 #include <linux/isa.h>
 #include <asm/io.h>
index 354574591908ee9cbbea18a5c518e04e2b9b4839..725812aa0c3044f5ffa31190a734e75ffcc9c01b 100644 (file)
@@ -350,7 +350,8 @@ static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *pfmt,
                if (pixm)
                        sizes[i] = max(size, pixm->plane_fmt[i].sizeimage);
                else
-                       sizes[i] = size;
+                       sizes[i] = max_t(u32, size, frame->payload[i]);
+
                allocators[i] = ctx->fimc_dev->alloc_ctx;
        }
 
@@ -479,37 +480,39 @@ static int fimc_capture_set_default_format(struct fimc_dev *fimc);
 static int fimc_capture_open(struct file *file)
 {
        struct fimc_dev *fimc = video_drvdata(file);
-       int ret = v4l2_fh_open(file);
-
-       if (ret)
-               return ret;
+       int ret;
 
        dbg("pid: %d, state: 0x%lx", task_pid_nr(current), fimc->state);
 
-       /* Return if the corresponding video mem2mem node is already opened. */
        if (fimc_m2m_active(fimc))
                return -EBUSY;
 
        set_bit(ST_CAPT_BUSY, &fimc->state);
-       pm_runtime_get_sync(&fimc->pdev->dev);
+       ret = pm_runtime_get_sync(&fimc->pdev->dev);
+       if (ret < 0)
+               return ret;
 
-       if (++fimc->vid_cap.refcnt == 1) {
-               ret = fimc_pipeline_initialize(&fimc->pipeline,
-                              &fimc->vid_cap.vfd->entity, true);
-               if (ret < 0) {
-                       dev_err(&fimc->pdev->dev,
-                               "Video pipeline initialization failed\n");
-                       pm_runtime_put_sync(&fimc->pdev->dev);
-                       fimc->vid_cap.refcnt--;
-                       v4l2_fh_release(file);
-                       clear_bit(ST_CAPT_BUSY, &fimc->state);
-                       return ret;
-               }
-               ret = fimc_capture_ctrls_create(fimc);
+       ret = v4l2_fh_open(file);
+       if (ret)
+               return ret;
 
-               if (!ret && !fimc->vid_cap.user_subdev_api)
-                       ret = fimc_capture_set_default_format(fimc);
+       if (++fimc->vid_cap.refcnt != 1)
+               return 0;
+
+       ret = fimc_pipeline_initialize(&fimc->pipeline,
+                                      &fimc->vid_cap.vfd->entity, true);
+       if (ret < 0) {
+               clear_bit(ST_CAPT_BUSY, &fimc->state);
+               pm_runtime_put_sync(&fimc->pdev->dev);
+               fimc->vid_cap.refcnt--;
+               v4l2_fh_release(file);
+               return ret;
        }
+       ret = fimc_capture_ctrls_create(fimc);
+
+       if (!ret && !fimc->vid_cap.user_subdev_api)
+               ret = fimc_capture_set_default_format(fimc);
+
        return ret;
 }
 
@@ -818,9 +821,6 @@ static int fimc_cap_g_fmt_mplane(struct file *file, void *fh,
        struct fimc_dev *fimc = video_drvdata(file);
        struct fimc_ctx *ctx = fimc->vid_cap.ctx;
 
-       if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
-               return -EINVAL;
-
        return fimc_fill_format(&ctx->d_frame, f);
 }
 
@@ -833,9 +833,6 @@ static int fimc_cap_try_fmt_mplane(struct file *file, void *fh,
        struct v4l2_mbus_framefmt mf;
        struct fimc_fmt *ffmt = NULL;
 
-       if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
-               return -EINVAL;
-
        if (pix->pixelformat == V4L2_PIX_FMT_JPEG) {
                fimc_capture_try_format(ctx, &pix->width, &pix->height,
                                        NULL, &pix->pixelformat,
@@ -887,8 +884,6 @@ static int fimc_capture_set_format(struct fimc_dev *fimc, struct v4l2_format *f)
        struct fimc_fmt *s_fmt = NULL;
        int ret, i;
 
-       if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
-               return -EINVAL;
        if (vb2_is_busy(&fimc->vid_cap.vbq))
                return -EBUSY;
 
@@ -924,10 +919,10 @@ static int fimc_capture_set_format(struct fimc_dev *fimc, struct v4l2_format *f)
                pix->width  = mf->width;
                pix->height = mf->height;
        }
+
        fimc_adjust_mplane_format(ff->fmt, pix->width, pix->height, pix);
        for (i = 0; i < ff->fmt->colplanes; i++)
-               ff->payload[i] =
-                       (pix->width * pix->height * ff->fmt->depth[i]) / 8;
+               ff->payload[i] = pix->plane_fmt[i].sizeimage;
 
        set_frame_bounds(ff, pix->width, pix->height);
        /* Reset the composition rectangle if not yet configured */
@@ -1045,18 +1040,22 @@ static int fimc_cap_streamon(struct file *file, void *priv,
 {
        struct fimc_dev *fimc = video_drvdata(file);
        struct fimc_pipeline *p = &fimc->pipeline;
+       struct v4l2_subdev *sd = p->subdevs[IDX_SENSOR];
        int ret;
 
        if (fimc_capture_active(fimc))
                return -EBUSY;
 
-       media_entity_pipeline_start(&p->subdevs[IDX_SENSOR]->entity,
-                                   p->m_pipeline);
+       ret = media_entity_pipeline_start(&sd->entity, p->m_pipeline);
+       if (ret < 0)
+               return ret;
 
        if (fimc->vid_cap.user_subdev_api) {
                ret = fimc_pipeline_validate(fimc);
-               if (ret)
+               if (ret < 0) {
+                       media_entity_pipeline_stop(&sd->entity);
                        return ret;
+               }
        }
        return vb2_streamon(&fimc->vid_cap.vbq, type);
 }
index fedcd561ba27f37122caa01adf2da1199c3db536..a4646ca1d56f31fb30eb35d7cd9829fa5aa9f46a 100644 (file)
@@ -153,7 +153,7 @@ static struct fimc_fmt fimc_formats[] = {
                .colplanes      = 2,
                .flags          = FMT_FLAGS_M2M,
        }, {
-               .name           = "YUV 4:2:0 non-contiguous 2-planar, Y/CbCr",
+               .name           = "YUV 4:2:0 non-contig. 2p, Y/CbCr",
                .fourcc         = V4L2_PIX_FMT_NV12M,
                .color          = FIMC_FMT_YCBCR420,
                .depth          = { 8, 4 },
@@ -161,7 +161,7 @@ static struct fimc_fmt fimc_formats[] = {
                .colplanes      = 2,
                .flags          = FMT_FLAGS_M2M,
        }, {
-               .name           = "YUV 4:2:0 non-contiguous 3-planar, Y/Cb/Cr",
+               .name           = "YUV 4:2:0 non-contig. 3p, Y/Cb/Cr",
                .fourcc         = V4L2_PIX_FMT_YUV420M,
                .color          = FIMC_FMT_YCBCR420,
                .depth          = { 8, 2, 2 },
@@ -169,7 +169,7 @@ static struct fimc_fmt fimc_formats[] = {
                .colplanes      = 3,
                .flags          = FMT_FLAGS_M2M,
        }, {
-               .name           = "YUV 4:2:0 non-contiguous 2-planar, Y/CbCr, tiled",
+               .name           = "YUV 4:2:0 non-contig. 2p, tiled",
                .fourcc         = V4L2_PIX_FMT_NV12MT,
                .color          = FIMC_FMT_YCBCR420,
                .depth          = { 8, 4 },
@@ -615,7 +615,7 @@ int fimc_ctrls_create(struct fimc_ctx *ctx)
        ctx->effect.type = FIMC_REG_CIIMGEFF_FIN_BYPASS;
 
        if (!handler->error) {
-               v4l2_ctrl_cluster(3, &ctrls->colorfx);
+               v4l2_ctrl_cluster(2, &ctrls->colorfx);
                ctrls->ready = true;
        }
 
@@ -641,7 +641,7 @@ void fimc_ctrls_activate(struct fimc_ctx *ctx, bool active)
        if (!ctrls->ready)
                return;
 
-       mutex_lock(&ctrls->handler.lock);
+       mutex_lock(ctrls->handler.lock);
        v4l2_ctrl_activate(ctrls->rotate, active);
        v4l2_ctrl_activate(ctrls->hflip, active);
        v4l2_ctrl_activate(ctrls->vflip, active);
@@ -660,7 +660,7 @@ void fimc_ctrls_activate(struct fimc_ctx *ctx, bool active)
                ctx->hflip    = 0;
                ctx->vflip    = 0;
        }
-       mutex_unlock(&ctrls->handler.lock);
+       mutex_unlock(ctrls->handler.lock);
 }
 
 /* Update maximum value of the alpha color control */
@@ -741,8 +741,8 @@ void fimc_adjust_mplane_format(struct fimc_fmt *fmt, u32 width, u32 height,
        pix->width = width;
 
        for (i = 0; i < pix->num_planes; ++i) {
-               u32 bpl = pix->plane_fmt[i].bytesperline;
-               u32 *sizeimage = &pix->plane_fmt[i].sizeimage;
+               struct v4l2_plane_pix_format *plane_fmt = &pix->plane_fmt[i];
+               u32 bpl = plane_fmt->bytesperline;
 
                if (fmt->colplanes > 1 && (bpl == 0 || bpl < pix->width))
                        bpl = pix->width; /* Planar */
@@ -754,8 +754,9 @@ void fimc_adjust_mplane_format(struct fimc_fmt *fmt, u32 width, u32 height,
                if (i == 0) /* Same bytesperline for each plane. */
                        bytesperline = bpl;
 
-               pix->plane_fmt[i].bytesperline = bytesperline;
-               *sizeimage = (pix->width * pix->height * fmt->depth[i]) / 8;
+               plane_fmt->bytesperline = bytesperline;
+               plane_fmt->sizeimage = max((pix->width * pix->height *
+                                  fmt->depth[i]) / 8, plane_fmt->sizeimage);
        }
 }
 
index 400d701aef04126d8d3b469a51b7a9adb360985c..74ff310db30cd6755393b5b6eaf2a933495fa0e9 100644 (file)
@@ -451,34 +451,44 @@ static void fimc_lite_clear_event_counters(struct fimc_lite *fimc)
 static int fimc_lite_open(struct file *file)
 {
        struct fimc_lite *fimc = video_drvdata(file);
-       int ret = v4l2_fh_open(file);
+       int ret;
 
-       if (ret)
-               return ret;
+       if (mutex_lock_interruptible(&fimc->lock))
+               return -ERESTARTSYS;
 
        set_bit(ST_FLITE_IN_USE, &fimc->state);
-       pm_runtime_get_sync(&fimc->pdev->dev);
+       ret = pm_runtime_get_sync(&fimc->pdev->dev);
+       if (ret < 0)
+               goto done;
 
-       if (++fimc->ref_count != 1 || fimc->out_path != FIMC_IO_DMA)
-               return ret;
+       ret = v4l2_fh_open(file);
+       if (ret < 0)
+               goto done;
 
-       ret = fimc_pipeline_initialize(&fimc->pipeline, &fimc->vfd->entity,
-                                      true);
-       if (ret < 0) {
-               v4l2_err(fimc->vfd, "Video pipeline initialization failed\n");
-               pm_runtime_put_sync(&fimc->pdev->dev);
-               fimc->ref_count--;
-               v4l2_fh_release(file);
-               clear_bit(ST_FLITE_IN_USE, &fimc->state);
-       }
+       if (++fimc->ref_count == 1 && fimc->out_path == FIMC_IO_DMA) {
+               ret = fimc_pipeline_initialize(&fimc->pipeline,
+                                              &fimc->vfd->entity, true);
+               if (ret < 0) {
+                       pm_runtime_put_sync(&fimc->pdev->dev);
+                       fimc->ref_count--;
+                       v4l2_fh_release(file);
+                       clear_bit(ST_FLITE_IN_USE, &fimc->state);
+               }
 
-       fimc_lite_clear_event_counters(fimc);
+               fimc_lite_clear_event_counters(fimc);
+       }
+done:
+       mutex_unlock(&fimc->lock);
        return ret;
 }
 
 static int fimc_lite_close(struct file *file)
 {
        struct fimc_lite *fimc = video_drvdata(file);
+       int ret;
+
+       if (mutex_lock_interruptible(&fimc->lock))
+               return -ERESTARTSYS;
 
        if (--fimc->ref_count == 0 && fimc->out_path == FIMC_IO_DMA) {
                clear_bit(ST_FLITE_IN_USE, &fimc->state);
@@ -492,20 +502,39 @@ static int fimc_lite_close(struct file *file)
        if (fimc->ref_count == 0)
                vb2_queue_release(&fimc->vb_queue);
 
-       return v4l2_fh_release(file);
+       ret = v4l2_fh_release(file);
+
+       mutex_unlock(&fimc->lock);
+       return ret;
 }
 
 static unsigned int fimc_lite_poll(struct file *file,
                                   struct poll_table_struct *wait)
 {
        struct fimc_lite *fimc = video_drvdata(file);
-       return vb2_poll(&fimc->vb_queue, file, wait);
+       int ret;
+
+       if (mutex_lock_interruptible(&fimc->lock))
+               return POLL_ERR;
+
+       ret = vb2_poll(&fimc->vb_queue, file, wait);
+       mutex_unlock(&fimc->lock);
+
+       return ret;
 }
 
 static int fimc_lite_mmap(struct file *file, struct vm_area_struct *vma)
 {
        struct fimc_lite *fimc = video_drvdata(file);
-       return vb2_mmap(&fimc->vb_queue, vma);
+       int ret;
+
+       if (mutex_lock_interruptible(&fimc->lock))
+               return -ERESTARTSYS;
+
+       ret = vb2_mmap(&fimc->vb_queue, vma);
+       mutex_unlock(&fimc->lock);
+
+       return ret;
 }
 
 static const struct v4l2_file_operations fimc_lite_fops = {
@@ -762,7 +791,9 @@ static int fimc_lite_streamon(struct file *file, void *priv,
        if (fimc_lite_active(fimc))
                return -EBUSY;
 
-       media_entity_pipeline_start(&sensor->entity, p->m_pipeline);
+       ret = media_entity_pipeline_start(&sensor->entity, p->m_pipeline);
+       if (ret < 0)
+               return ret;
 
        ret = fimc_pipeline_validate(fimc);
        if (ret) {
@@ -1508,7 +1539,7 @@ static int fimc_lite_suspend(struct device *dev)
                return 0;
 
        ret = fimc_lite_stop_capture(fimc, suspend);
-       if (ret)
+       if (ret < 0 || !fimc_lite_active(fimc))
                return ret;
 
        return fimc_pipeline_shutdown(&fimc->pipeline);
index 6753c45631b856e1a06d19492e56e97edf01f9f6..52cef4865423ef2be451df098d0c2ada3412fe21 100644 (file)
@@ -193,9 +193,13 @@ int __fimc_pipeline_shutdown(struct fimc_pipeline *p)
 
 int fimc_pipeline_shutdown(struct fimc_pipeline *p)
 {
-       struct media_entity *me = &p->subdevs[IDX_SENSOR]->entity;
+       struct media_entity *me;
        int ret;
 
+       if (!p || !p->subdevs[IDX_SENSOR])
+               return -EINVAL;
+
+       me = &p->subdevs[IDX_SENSOR]->entity;
        mutex_lock(&me->parent->graph_mutex);
        ret = __fimc_pipeline_shutdown(p);
        mutex_unlock(&me->parent->graph_mutex);
@@ -498,12 +502,12 @@ static void fimc_md_unregister_entities(struct fimc_md *fmd)
  * @source: the source entity to create links to all fimc entities from
  * @sensor: sensor subdev linked to FIMC[fimc_id] entity, may be null
  * @pad: the source entity pad index
- * @fimc_id: index of the fimc device for which link should be enabled
+ * @link_mask: bitmask of the fimc devices for which link should be enabled
  */
 static int __fimc_md_create_fimc_sink_links(struct fimc_md *fmd,
                                            struct media_entity *source,
                                            struct v4l2_subdev *sensor,
-                                           int pad, int fimc_id)
+                                           int pad, int link_mask)
 {
        struct fimc_sensor_info *s_info;
        struct media_entity *sink;
@@ -520,7 +524,7 @@ static int __fimc_md_create_fimc_sink_links(struct fimc_md *fmd,
                if (!fmd->fimc[i]->variant->has_cam_if)
                        continue;
 
-               flags = (i == fimc_id) ? MEDIA_LNK_FL_ENABLED : 0;
+               flags = ((1 << i) & link_mask) ? MEDIA_LNK_FL_ENABLED : 0;
 
                sink = &fmd->fimc[i]->vid_cap.subdev.entity;
                ret = media_entity_create_link(source, pad, sink,
@@ -552,7 +556,10 @@ static int __fimc_md_create_fimc_sink_links(struct fimc_md *fmd,
                if (!fmd->fimc_lite[i])
                        continue;
 
-               flags = (i == fimc_id) ? MEDIA_LNK_FL_ENABLED : 0;
+               if (link_mask & (1 << (i + FIMC_MAX_DEVS)))
+                       flags = MEDIA_LNK_FL_ENABLED;
+               else
+                       flags = 0;
 
                sink = &fmd->fimc_lite[i]->subdev.entity;
                ret = media_entity_create_link(source, pad, sink,
@@ -614,9 +621,8 @@ static int fimc_md_create_links(struct fimc_md *fmd)
        struct s5p_fimc_isp_info *pdata;
        struct fimc_sensor_info *s_info;
        struct media_entity *source, *sink;
-       int i, pad, fimc_id = 0;
-       int ret = 0;
-       u32 flags;
+       int i, pad, fimc_id = 0, ret = 0;
+       u32 flags, link_mask = 0;
 
        for (i = 0; i < fmd->num_sensors; i++) {
                if (fmd->sensor[i].subdev == NULL)
@@ -668,19 +674,20 @@ static int fimc_md_create_links(struct fimc_md *fmd)
                if (source == NULL)
                        continue;
 
+               link_mask = 1 << fimc_id++;
                ret = __fimc_md_create_fimc_sink_links(fmd, source, sensor,
-                                                      pad, fimc_id++);
+                                                      pad, link_mask);
        }
 
-       fimc_id = 0;
        for (i = 0; i < ARRAY_SIZE(fmd->csis); i++) {
                if (fmd->csis[i].sd == NULL)
                        continue;
                source = &fmd->csis[i].sd->entity;
                pad = CSIS_PAD_SOURCE;
 
+               link_mask = 1 << fimc_id++;
                ret = __fimc_md_create_fimc_sink_links(fmd, source, NULL,
-                                                      pad, fimc_id++);
+                                                      pad, link_mask);
        }
 
        /* Create immutable links between each FIMC's subdev and video node */
@@ -734,8 +741,8 @@ static void fimc_md_put_clocks(struct fimc_md *fmd)
 }
 
 static int __fimc_md_set_camclk(struct fimc_md *fmd,
-                                        struct fimc_sensor_info *s_info,
-                                        bool on)
+                               struct fimc_sensor_info *s_info,
+                               bool on)
 {
        struct s5p_fimc_isp_info *pdata = s_info->pdata;
        struct fimc_camclk_info *camclk;
@@ -744,12 +751,10 @@ static int __fimc_md_set_camclk(struct fimc_md *fmd,
        if (WARN_ON(pdata->clk_id >= FIMC_MAX_CAMCLKS) || fmd == NULL)
                return -EINVAL;
 
-       if (s_info->clk_on == on)
-               return 0;
        camclk = &fmd->camclk[pdata->clk_id];
 
-       dbg("camclk %d, f: %lu, clk: %p, on: %d",
-           pdata->clk_id, pdata->clk_frequency, camclk, on);
+       dbg("camclk %d, f: %lu, use_count: %d, on: %d",
+           pdata->clk_id, pdata->clk_frequency, camclk->use_count, on);
 
        if (on) {
                if (camclk->use_count > 0 &&
@@ -760,11 +765,9 @@ static int __fimc_md_set_camclk(struct fimc_md *fmd,
                        clk_set_rate(camclk->clock, pdata->clk_frequency);
                        camclk->frequency = pdata->clk_frequency;
                        ret = clk_enable(camclk->clock);
+                       dbg("Enabled camclk %d: f: %lu", pdata->clk_id,
+                           clk_get_rate(camclk->clock));
                }
-               s_info->clk_on = 1;
-               dbg("Enabled camclk %d: f: %lu", pdata->clk_id,
-                   clk_get_rate(camclk->clock));
-
                return ret;
        }
 
@@ -773,7 +776,6 @@ static int __fimc_md_set_camclk(struct fimc_md *fmd,
 
        if (--camclk->use_count == 0) {
                clk_disable(camclk->clock);
-               s_info->clk_on = 0;
                dbg("Disabled camclk %d", pdata->clk_id);
        }
        return ret;
@@ -789,8 +791,6 @@ static int __fimc_md_set_camclk(struct fimc_md *fmd,
  * devices to which sensors can be attached, either directly or through
  * the MIPI CSI receiver. The clock is allowed here to be used by
  * multiple sensors concurrently if they use same frequency.
- * The per sensor subdev clk_on attribute helps to synchronize accesses
- * to the sclk_cam clocks from the video and media device nodes.
  * This function should only be called when the graph mutex is held.
  */
 int fimc_md_set_camclk(struct v4l2_subdev *sd, bool on)
index 3b8a3492a17671fc86658db8eb410861e8cba8f2..1f5dbaff5442a7df686b4e6dc023952b8436939f 100644 (file)
@@ -47,7 +47,6 @@ struct fimc_camclk_info {
  * @pdata: sensor's atrributes passed as media device's platform data
  * @subdev: image sensor v4l2 subdev
  * @host: fimc device the sensor is currently linked to
- * @clk_on: sclk_cam clock's state associated with this subdev
  *
  * This data structure applies to image sensor and the writeback subdevs.
  */
@@ -55,7 +54,6 @@ struct fimc_sensor_info {
        struct s5p_fimc_isp_info *pdata;
        struct v4l2_subdev *subdev;
        struct fimc_dev *host;
-       bool clk_on;
 };
 
 /**
index 053a8a872fd743ca1f8e3cae687f827247b9453e..a19bece41ba9d0c4a53845943c8219e27659382b 100644 (file)
                                                                decoded pic */
 #define S5P_FIMV_SI_DISPLAY_Y_ADR      0x2010 /* luma addr of displayed pic */
 #define S5P_FIMV_SI_DISPLAY_C_ADR      0x2014 /* chroma addrof displayed pic */
+
 #define S5P_FIMV_SI_CONSUMED_BYTES     0x2018 /* Consumed number of bytes to
                                                        decode a frame */
 #define S5P_FIMV_SI_DISPLAY_STATUS     0x201c /* status of decoded picture */
 
+#define S5P_FIMV_SI_DECODE_Y_ADR       0x2024 /* luma addr of decoded pic */
+#define S5P_FIMV_SI_DECODE_C_ADR       0x2028 /* chroma addrof decoded pic */
+#define S5P_FIMV_SI_DECODE_STATUS      0x202c /* status of decoded picture */
+
 #define S5P_FIMV_SI_CH0_SB_ST_ADR      0x2044 /* start addr of stream buf */
 #define S5P_FIMV_SI_CH0_SB_FRM_SIZE    0x2048 /* size of stream buf */
 #define S5P_FIMV_SI_CH0_DESC_ADR       0x204c /* addr of descriptor buf */
index c25ec022d2678f734e46b1f1f3b5b0d496150c8d..feea867f318c25a6bce189d2bf6e95fb91a42422 100644 (file)
@@ -627,13 +627,13 @@ static int s5p_mfc_dec_s_ctrl(struct v4l2_ctrl *ctrl)
 
        switch (ctrl->id) {
        case V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY:
-               ctx->loop_filter_mpeg4 = ctrl->val;
+               ctx->display_delay = ctrl->val;
                break;
        case V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY_ENABLE:
                ctx->display_delay_enable = ctrl->val;
                break;
        case V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER:
-               ctx->display_delay = ctrl->val;
+               ctx->loop_filter_mpeg4 = ctrl->val;
                break;
        case V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE:
                ctx->slice_interface = ctrl->val;
@@ -996,6 +996,7 @@ int s5p_mfc_dec_ctrls_setup(struct s5p_mfc_ctx *ctx)
 
        for (i = 0; i < NUM_CTRLS; i++) {
                if (IS_MFC51_PRIV(controls[i].id)) {
+                       memset(&cfg, 0, sizeof(struct v4l2_ctrl_config));
                        cfg.ops = &s5p_mfc_dec_ctrl_ops;
                        cfg.id = controls[i].id;
                        cfg.min = controls[i].minimum;
index acedb2004be325e541928cf1f567c525d9a5ffa6..158b78989b89dc43dd7ced55dec2ca91a6baa28e 100644 (file)
@@ -243,12 +243,6 @@ static struct mfc_control controls[] = {
                .minimum = V4L2_MPEG_VIDEO_H264_LEVEL_1_0,
                .maximum = V4L2_MPEG_VIDEO_H264_LEVEL_4_0,
                .default_value = V4L2_MPEG_VIDEO_H264_LEVEL_1_0,
-               .menu_skip_mask = ~(
-                               (1 << V4L2_MPEG_VIDEO_H264_LEVEL_4_1) |
-                               (1 << V4L2_MPEG_VIDEO_H264_LEVEL_4_2) |
-                               (1 << V4L2_MPEG_VIDEO_H264_LEVEL_5_0) |
-                               (1 << V4L2_MPEG_VIDEO_H264_LEVEL_5_1)
-                               ),
        },
        {
                .id = V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL,
@@ -494,7 +488,7 @@ static struct mfc_control controls[] = {
                .type = V4L2_CTRL_TYPE_MENU,
                .minimum = V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_UNSPECIFIED,
                .maximum = V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_EXTENDED,
-               .default_value = 0,
+               .default_value = V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_UNSPECIFIED,
                .menu_skip_mask = 0,
        },
        {
@@ -534,7 +528,7 @@ static struct mfc_control controls[] = {
                .type = V4L2_CTRL_TYPE_MENU,
                .minimum = V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE,
                .maximum = V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_SIMPLE,
-               .default_value = 0,
+               .default_value = V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE,
                .menu_skip_mask = 0,
        },
        {
@@ -907,6 +901,8 @@ static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
                        mfc_err("failed to try output format\n");
                        return -EINVAL;
                }
+               v4l_bound_align_image(&pix_fmt_mp->width, 8, 1920, 1,
+                       &pix_fmt_mp->height, 4, 1080, 1, 0);
        } else {
                mfc_err("invalid buf type\n");
                return -EINVAL;
@@ -1777,6 +1773,7 @@ int s5p_mfc_enc_ctrls_setup(struct s5p_mfc_ctx *ctx)
        }
        for (i = 0; i < NUM_CTRLS; i++) {
                if (IS_MFC51_PRIV(controls[i].id)) {
+                       memset(&cfg, 0, sizeof(struct v4l2_ctrl_config));
                        cfg.ops = &s5p_mfc_enc_ctrl_ops;
                        cfg.id = controls[i].id;
                        cfg.min = controls[i].minimum;
index db83836e6a9fc05b98a11d1edf7a267c74b99b53..5932d1c782c5dba36e18b55a6a3dcf6cda9f77da 100644 (file)
@@ -57,10 +57,12 @@ void s5p_mfc_cleanup_queue(struct list_head *lh, struct vb2_queue *vq);
                                        S5P_FIMV_SI_DISPLAY_Y_ADR) << \
                                        MFC_OFFSET_SHIFT)
 #define s5p_mfc_get_dec_y_adr()                (readl(dev->regs_base + \
-                                       S5P_FIMV_SI_DISPLAY_Y_ADR) << \
+                                       S5P_FIMV_SI_DECODE_Y_ADR) << \
                                        MFC_OFFSET_SHIFT)
 #define s5p_mfc_get_dspl_status()      readl(dev->regs_base + \
                                                S5P_FIMV_SI_DISPLAY_STATUS)
+#define s5p_mfc_get_dec_status()       readl(dev->regs_base + \
+                                               S5P_FIMV_SI_DECODE_STATUS)
 #define s5p_mfc_get_frame_type()       (readl(dev->regs_base + \
                                                S5P_FIMV_DECODE_FRAME_TYPE) \
                                        & S5P_FIMV_DECODE_FRAME_MASK)
index 764eac6bcc4c91d732880b31f024389f03fde692..cf962a4662766db5b1888f01c7e910fe245447c6 100644 (file)
@@ -13,8 +13,7 @@
 #ifndef S5P_MFC_SHM_H_
 #define S5P_MFC_SHM_H_
 
-enum MFC_SHM_OFS
-{
+enum MFC_SHM_OFS {
        EXTENEDED_DECODE_STATUS = 0x00, /* D */
        SET_FRAME_TAG           = 0x04, /* D */
        GET_FRAME_TAG_TOP       = 0x08, /* D */
index f7b35ff443bf976d192ee5522bbd765beb1a04cb..fb99ff18be077255810f43e71fd0a9163ee883b5 100644 (file)
@@ -1,6 +1,6 @@
 config VIDEO_SMIAPP
        tristate "SMIA++/SMIA sensor support"
-       depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+       depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && HAVE_CLK
        select VIDEO_SMIAPP_PLL
        ---help---
          This is a generic driver for SMIA++/SMIA camera modules.
index f518026cb67b6e4099522b75ccdc12eeff5f8f24..9cf5bda35fbe1cfe332e8b18ae3d9218b3e8f54b 100644 (file)
@@ -31,7 +31,9 @@
 #include <linux/device.h>
 #include <linux/gpio.h>
 #include <linux/module.h>
+#include <linux/slab.h>
 #include <linux/regulator/consumer.h>
+#include <linux/slab.h>
 #include <linux/v4l2-mediabus.h>
 #include <media/v4l2-device.h>
 
index 3e050e12153b3522d67c701c3187dcfd6b037536..1ad5ab6ce5cf9ea46187d2ac03224033957b1247 100644 (file)
@@ -1178,7 +1178,7 @@ static int tuner_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt)
                return 0;
        if (vt->type == t->mode && analog_ops->get_afc)
                vt->afc = analog_ops->get_afc(&t->fe);
-       if (t->mode != V4L2_TUNER_RADIO) {
+       if (vt->type != V4L2_TUNER_RADIO) {
                vt->capability |= V4L2_TUNER_CAP_NORM;
                vt->rangelow = tv_range[0] * 16;
                vt->rangehigh = tv_range[1] * 16;
index 5ccbd4629f9c34eb48322e29a5c0dc3aec668a4b..0cbada18f6f57376d980d34345335279499fec03 100644 (file)
@@ -656,7 +656,7 @@ static void determine_valid_ioctls(struct video_device *vdev)
        SET_VALID_IOCTL(ops, VIDIOC_TRY_ENCODER_CMD, vidioc_try_encoder_cmd);
        SET_VALID_IOCTL(ops, VIDIOC_DECODER_CMD, vidioc_decoder_cmd);
        SET_VALID_IOCTL(ops, VIDIOC_TRY_DECODER_CMD, vidioc_try_decoder_cmd);
-       if (ops->vidioc_g_parm || vdev->current_norm)
+       if (ops->vidioc_g_parm || vdev->vfl_type == VFL_TYPE_GRABBER)
                set_bit(_IOC_NR(VIDIOC_G_PARM), valid_ioctls);
        SET_VALID_IOCTL(ops, VIDIOC_S_PARM, vidioc_s_parm);
        SET_VALID_IOCTL(ops, VIDIOC_G_TUNER, vidioc_g_tuner);
@@ -679,6 +679,9 @@ static void determine_valid_ioctls(struct video_device *vdev)
        SET_VALID_IOCTL(ops, VIDIOC_QUERY_DV_PRESET, vidioc_query_dv_preset);
        SET_VALID_IOCTL(ops, VIDIOC_S_DV_TIMINGS, vidioc_s_dv_timings);
        SET_VALID_IOCTL(ops, VIDIOC_G_DV_TIMINGS, vidioc_g_dv_timings);
+       SET_VALID_IOCTL(ops, VIDIOC_ENUM_DV_TIMINGS, vidioc_enum_dv_timings);
+       SET_VALID_IOCTL(ops, VIDIOC_QUERY_DV_TIMINGS, vidioc_query_dv_timings);
+       SET_VALID_IOCTL(ops, VIDIOC_DV_TIMINGS_CAP, vidioc_dv_timings_cap);
        /* yes, really vidioc_subscribe_event */
        SET_VALID_IOCTL(ops, VIDIOC_DQEVENT, vidioc_subscribe_event);
        SET_VALID_IOCTL(ops, VIDIOC_SUBSCRIBE_EVENT, vidioc_subscribe_event);
index 91be4e871f43644c2052c942092a3f808688c9a8..d7fa8962d8b3129940514cb487d0d75a8931dbdd 100644 (file)
@@ -1680,6 +1680,7 @@ static long __video_do_ioctl(struct file *file,
                                break;
 
                        ret = 0;
+                       p->parm.capture.readbuffers = 2;
                        if (ops->vidioc_g_std)
                                ret = ops->vidioc_g_std(file, fh, &std);
                        if (ret == 0)
index 4d7391ec80013279d965db1070f978f3948cf241..aae1720b2f2d14a0ceb1ca1080fafdb125d28fc4 100644 (file)
@@ -2561,7 +2561,7 @@ static int vino_acquire_input(struct vino_channel_settings *vcs)
        } else if (vino_drvdata->decoder
                   && (vino_drvdata->decoder_owner == VINO_NO_CHANNEL)) {
                int input;
-               int data_norm;
+               int data_norm = 0;
                v4l2_std_id norm;
 
                input = VINO_INPUT_COMPOSITE;
@@ -2651,7 +2651,7 @@ static int vino_set_input(struct vino_channel_settings *vcs, int input)
                }
 
                if (vino_drvdata->decoder_owner == vcs->channel) {
-                       int data_norm;
+                       int data_norm = 0;
                        v4l2_std_id norm;
 
                        ret = decoder_call(video, s_routing,
index 0960d7f0d3947a7fa4b6ae727b5ca55739b2b854..08c10240e70fba4063f2abd8cab3904b1f935a4b 100644 (file)
@@ -1149,10 +1149,14 @@ static ssize_t
 vivi_read(struct file *file, char __user *data, size_t count, loff_t *ppos)
 {
        struct vivi_dev *dev = video_drvdata(file);
+       int err;
 
        dprintk(dev, 1, "read called\n");
-       return vb2_read(&dev->vb_vidq, data, count, ppos,
+       mutex_lock(&dev->mutex);
+       err = vb2_read(&dev->vb_vidq, data, count, ppos,
                       file->f_flags & O_NONBLOCK);
+       mutex_unlock(&dev->mutex);
+       return err;
 }
 
 static unsigned int
index e129c820df7da7d6430e62891558f92cdbe235ab..92144ed1ad469d8257eab6636c4d7f8cc4c65180 100644 (file)
@@ -286,6 +286,7 @@ config TWL6040_CORE
        depends on I2C=y && GENERIC_HARDIRQS
        select MFD_CORE
        select REGMAP_I2C
+       select IRQ_DOMAIN
        default n
        help
          Say yes here if you want support for Texas Instruments TWL6040 audio
diff --git a/drivers/mfd/ab5500-core.h b/drivers/mfd/ab5500-core.h
deleted file mode 100644 (file)
index 63b30b1..0000000
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Copyright (C) 2011 ST-Ericsson
- * License terms: GNU General Public License (GPL) version 2
- * Shared definitions and data structures for the AB5500 MFD driver
- */
-
-/* Read/write operation values. */
-#define AB5500_PERM_RD (0x01)
-#define AB5500_PERM_WR (0x02)
-
-/* Read/write permissions. */
-#define AB5500_PERM_RO (AB5500_PERM_RD)
-#define AB5500_PERM_RW (AB5500_PERM_RD | AB5500_PERM_WR)
-
-#define AB5500_MASK_BASE (0x60)
-#define AB5500_MASK_END (0x79)
-#define AB5500_CHIP_ID (0x20)
-
-/**
- * struct ab5500_reg_range
- * @first: the first address of the range
- * @last: the last address of the range
- * @perm: access permissions for the range
- */
-struct ab5500_reg_range {
-       u8 first;
-       u8 last;
-       u8 perm;
-};
-
-/**
- * struct ab5500_i2c_ranges
- * @count: the number of ranges in the list
- * @range: the list of register ranges
- */
-struct ab5500_i2c_ranges {
-       u8 nranges;
-       u8 bankid;
-       const struct ab5500_reg_range *range;
-};
-
-/**
- * struct ab5500_i2c_banks
- * @count: the number of ranges in the list
- * @range: the list of register ranges
- */
-struct ab5500_i2c_banks {
-       u8 nbanks;
-       const struct ab5500_i2c_ranges *bank;
-};
-
-/**
- * struct ab5500_bank
- * @slave_addr: I2C slave_addr found in AB5500 specification
- * @name: Documentation name of the bank. For reference
- */
-struct ab5500_bank {
-       u8 slave_addr;
-       const char *name;
-};
-
-static const struct ab5500_bank bankinfo[AB5500_NUM_BANKS] = {
-       [AB5500_BANK_VIT_IO_I2C_CLK_TST_OTP] = {
-               AB5500_ADDR_VIT_IO_I2C_CLK_TST_OTP, "VIT_IO_I2C_CLK_TST_OTP"},
-       [AB5500_BANK_VDDDIG_IO_I2C_CLK_TST] = {
-               AB5500_ADDR_VDDDIG_IO_I2C_CLK_TST, "VDDDIG_IO_I2C_CLK_TST"},
-       [AB5500_BANK_VDENC] = {AB5500_ADDR_VDENC, "VDENC"},
-       [AB5500_BANK_SIM_USBSIM] = {AB5500_ADDR_SIM_USBSIM, "SIM_USBSIM"},
-       [AB5500_BANK_LED] = {AB5500_ADDR_LED, "LED"},
-       [AB5500_BANK_ADC] = {AB5500_ADDR_ADC, "ADC"},
-       [AB5500_BANK_RTC] = {AB5500_ADDR_RTC, "RTC"},
-       [AB5500_BANK_STARTUP] = {AB5500_ADDR_STARTUP, "STARTUP"},
-       [AB5500_BANK_DBI_ECI] = {AB5500_ADDR_DBI_ECI, "DBI-ECI"},
-       [AB5500_BANK_CHG] = {AB5500_ADDR_CHG, "CHG"},
-       [AB5500_BANK_FG_BATTCOM_ACC] = {
-               AB5500_ADDR_FG_BATTCOM_ACC, "FG_BATCOM_ACC"},
-       [AB5500_BANK_USB] = {AB5500_ADDR_USB, "USB"},
-       [AB5500_BANK_IT] = {AB5500_ADDR_IT, "IT"},
-       [AB5500_BANK_VIBRA] = {AB5500_ADDR_VIBRA, "VIBRA"},
-       [AB5500_BANK_AUDIO_HEADSETUSB] = {
-               AB5500_ADDR_AUDIO_HEADSETUSB, "AUDIO_HEADSETUSB"},
-};
-
-int ab5500_get_register_interruptible_raw(struct ab5500 *ab, u8 bank, u8 reg,
-       u8 *value);
-int ab5500_mask_and_set_register_interruptible_raw(struct ab5500 *ab, u8 bank,
-       u8 reg, u8 bitmask, u8 bitvalues);
index 3fcdab3eb8eb67fb02a08c02166613e11742924c..03df422feb763ab075ac5fa741ef09b71c066d1d 100644 (file)
@@ -49,10 +49,72 @@ static struct regmap_config mc13xxx_regmap_spi_config = {
        .reg_bits = 7,
        .pad_bits = 1,
        .val_bits = 24,
+       .write_flag_mask = 0x80,
 
        .max_register = MC13XXX_NUMREGS,
 
        .cache_type = REGCACHE_NONE,
+       .use_single_rw = 1,
+};
+
+static int mc13xxx_spi_read(void *context, const void *reg, size_t reg_size,
+                               void *val, size_t val_size)
+{
+       unsigned char w[4] = { *((unsigned char *) reg), 0, 0, 0};
+       unsigned char r[4];
+       unsigned char *p = val;
+       struct device *dev = context;
+       struct spi_device *spi = to_spi_device(dev);
+       struct spi_transfer t = {
+               .tx_buf = w,
+               .rx_buf = r,
+               .len = 4,
+       };
+
+       struct spi_message m;
+       int ret;
+
+       if (val_size != 3 || reg_size != 1)
+               return -ENOTSUPP;
+
+       spi_message_init(&m);
+       spi_message_add_tail(&t, &m);
+       ret = spi_sync(spi, &m);
+
+       memcpy(p, &r[1], 3);
+
+       return ret;
+}
+
+static int mc13xxx_spi_write(void *context, const void *data, size_t count)
+{
+       struct device *dev = context;
+       struct spi_device *spi = to_spi_device(dev);
+
+       if (count != 4)
+               return -ENOTSUPP;
+
+       return spi_write(spi, data, count);
+}
+
+/*
+ * We cannot use regmap-spi generic bus implementation here.
+ * The MC13783 chip will get corrupted if CS signal is deasserted
+ * and on i.Mx31 SoC (the target SoC for MC13783 PMIC) the SPI controller
+ * has the following errata (DSPhl22960):
+ * "The CSPI negates SS when the FIFO becomes empty with
+ * SSCTL= 0. Software cannot guarantee that the FIFO will not
+ * drain because of higher priority interrupts and the
+ * non-realtime characteristics of the operating system. As a
+ * result, the SS will negate before all of the data has been
+ * transferred to/from the peripheral."
+ * We workaround this by accessing the SPI controller with a
+ * single transfert.
+ */
+
+static struct regmap_bus regmap_mc13xxx_bus = {
+       .write = mc13xxx_spi_write,
+       .read = mc13xxx_spi_read,
 };
 
 static int mc13xxx_spi_probe(struct spi_device *spi)
@@ -73,12 +135,13 @@ static int mc13xxx_spi_probe(struct spi_device *spi)
 
        dev_set_drvdata(&spi->dev, mc13xxx);
        spi->mode = SPI_MODE_0 | SPI_CS_HIGH;
-       spi->bits_per_word = 32;
 
        mc13xxx->dev = &spi->dev;
        mutex_init(&mc13xxx->lock);
 
-       mc13xxx->regmap = regmap_init_spi(spi, &mc13xxx_regmap_spi_config);
+       mc13xxx->regmap = regmap_init(&spi->dev, &regmap_mc13xxx_bus, &spi->dev,
+                                       &mc13xxx_regmap_spi_config);
+
        if (IS_ERR(mc13xxx->regmap)) {
                ret = PTR_ERR(mc13xxx->regmap);
                dev_err(mc13xxx->dev, "Failed to initialize register map: %d\n",
index 7e96bb2297244f7946537a5da5f78226e6a4db40..41088ecbb2a92e3c6350038ad61f77ceb500e8ce 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/clk.h>
 #include <linux/dma-mapping.h>
 #include <linux/spinlock.h>
+#include <linux/gpio.h>
 #include <plat/cpu.h>
 #include <plat/usb.h>
 #include <linux/pm_runtime.h>
@@ -500,8 +501,21 @@ static void omap_usbhs_init(struct device *dev)
        dev_dbg(dev, "starting TI HSUSB Controller\n");
 
        pm_runtime_get_sync(dev);
-       spin_lock_irqsave(&omap->lock, flags);
 
+       if (pdata->ehci_data->phy_reset) {
+               if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0]))
+                       gpio_request_one(pdata->ehci_data->reset_gpio_port[0],
+                                        GPIOF_OUT_INIT_LOW, "USB1 PHY reset");
+
+               if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1]))
+                       gpio_request_one(pdata->ehci_data->reset_gpio_port[1],
+                                        GPIOF_OUT_INIT_LOW, "USB2 PHY reset");
+
+               /* Hold the PHY in RESET for enough time till DIR is high */
+               udelay(10);
+       }
+
+       spin_lock_irqsave(&omap->lock, flags);
        omap->usbhs_rev = usbhs_read(omap->uhh_base, OMAP_UHH_REVISION);
        dev_dbg(dev, "OMAP UHH_REVISION 0x%x\n", omap->usbhs_rev);
 
@@ -581,9 +595,39 @@ static void omap_usbhs_init(struct device *dev)
        }
 
        spin_unlock_irqrestore(&omap->lock, flags);
+
+       if (pdata->ehci_data->phy_reset) {
+               /* Hold the PHY in RESET for enough time till
+                * PHY is settled and ready
+                */
+               udelay(10);
+
+               if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0]))
+                       gpio_set_value_cansleep
+                               (pdata->ehci_data->reset_gpio_port[0], 1);
+
+               if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1]))
+                       gpio_set_value_cansleep
+                               (pdata->ehci_data->reset_gpio_port[1], 1);
+       }
+
        pm_runtime_put_sync(dev);
 }
 
+static void omap_usbhs_deinit(struct device *dev)
+{
+       struct usbhs_hcd_omap           *omap = dev_get_drvdata(dev);
+       struct usbhs_omap_platform_data *pdata = &omap->platdata;
+
+       if (pdata->ehci_data->phy_reset) {
+               if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0]))
+                       gpio_free(pdata->ehci_data->reset_gpio_port[0]);
+
+               if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1]))
+                       gpio_free(pdata->ehci_data->reset_gpio_port[1]);
+       }
+}
+
 
 /**
  * usbhs_omap_probe - initialize TI-based HCDs
@@ -767,6 +811,7 @@ static int __devinit usbhs_omap_probe(struct platform_device *pdev)
        goto end_probe;
 
 err_alloc:
+       omap_usbhs_deinit(&pdev->dev);
        iounmap(omap->tll_base);
 
 err_tll:
@@ -818,6 +863,7 @@ static int __devexit usbhs_omap_remove(struct platform_device *pdev)
 {
        struct usbhs_hcd_omap *omap = platform_get_drvdata(pdev);
 
+       omap_usbhs_deinit(&pdev->dev);
        iounmap(omap->tll_base);
        iounmap(omap->uhh_base);
        clk_put(omap->init_60m_fclk);
index 00c0aba7eba000de65742b04014419e0115c6eba..c4a69f193a1df1985abfbaeeffb8e39cda933493 100644 (file)
@@ -356,7 +356,14 @@ static int __devinit palmas_i2c_probe(struct i2c_client *i2c,
                }
        }
 
-       ret = regmap_add_irq_chip(palmas->regmap[1], palmas->irq,
+       /* Change IRQ into clear on read mode for efficiency */
+       slave = PALMAS_BASE_TO_SLAVE(PALMAS_INTERRUPT_BASE);
+       addr = PALMAS_BASE_TO_REG(PALMAS_INTERRUPT_BASE, PALMAS_INT_CTRL);
+       reg = PALMAS_INT_CTRL_INT_CLEAR;
+
+       regmap_write(palmas->regmap[slave], addr, reg);
+
+       ret = regmap_add_irq_chip(palmas->regmap[slave], palmas->irq,
                        IRQF_ONESHOT | IRQF_TRIGGER_LOW, -1, &palmas_irq_chip,
                        &palmas->irq_data);
        if (ret < 0)
@@ -441,6 +448,9 @@ static int __devinit palmas_i2c_probe(struct i2c_client *i2c,
                goto err;
        }
 
+       children[PALMAS_PMIC_ID].platform_data = pdata->pmic_pdata;
+       children[PALMAS_PMIC_ID].pdata_size = sizeof(*pdata->pmic_pdata);
+
        ret = mfd_add_devices(palmas->dev, -1,
                              children, ARRAY_SIZE(palmas_children),
                              NULL, regmap_irq_chip_get_base(palmas->irq_data));
@@ -472,6 +482,7 @@ static const struct i2c_device_id palmas_i2c_id[] = {
        { "twl6035", },
        { "twl6037", },
        { "tps65913", },
+       { /* end */ }
 };
 MODULE_DEVICE_TABLE(i2c, palmas_i2c_id);
 
index 373f423b118164dbe1058fcba74694d4122c1527..947a06a1845f601980cc1d730decdc8129c1ad7e 100644 (file)
@@ -6,7 +6,7 @@
  *
  * License Terms: GNU General Public License, version 2
  * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
- * Author: Viresh Kumar <viresh.kumar@st.com> for ST Microelectronics
+ * Author: Viresh Kumar <viresh.linux@gmail.com> for ST Microelectronics
  */
 
 #include <linux/i2c.h>
index afd459013ecbb1e89485cf37f643f6d7d3c91a20..9edfe864cc056b5cd543fd696d63a325cda34274 100644 (file)
@@ -4,7 +4,7 @@
  * Copyright (C) ST Microelectronics SA 2011
  *
  * License Terms: GNU General Public License, version 2
- * Author: Viresh Kumar <viresh.kumar@st.com> for ST Microelectronics
+ * Author: Viresh Kumar <viresh.linux@gmail.com> for ST Microelectronics
  */
 
 #include <linux/spi/spi.h>
@@ -146,4 +146,4 @@ module_exit(stmpe_exit);
 
 MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION("STMPE MFD SPI Interface Driver");
-MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
+MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
index 93936f1b75eb1d89ac47a75284eebc3f1504df15..23f5463d4cae432e5e1b65ac97e5ba6042a2c59e 100644 (file)
@@ -835,7 +835,7 @@ static int _mei_irq_thread_read(struct mei_device *dev,     s32 *slots,
                        struct mei_cl *cl,
                        struct mei_io_list *cmpl_list)
 {
-       if ((*slots * sizeof(u32)) >= (sizeof(struct mei_msg_hdr) +
+       if ((*slots * sizeof(u32)) < (sizeof(struct mei_msg_hdr) +
                        sizeof(struct hbm_flow_control))) {
                /* return the cancel routine */
                list_del(&cb_pos->cb_list);
index c70333228337d28bc76d1c61510bb57b615d3a68..783fcd7365bc1e770739db884bba4a5088479e22 100644 (file)
@@ -982,7 +982,7 @@ static int __devinit mei_probe(struct pci_dev *pdev,
                err = request_threaded_irq(pdev->irq,
                        NULL,
                        mei_interrupt_thread_handler,
-                       0, mei_driver_name, dev);
+                       IRQF_ONESHOT, mei_driver_name, dev);
        else
                err = request_threaded_irq(pdev->irq,
                        mei_interrupt_quick_handler,
@@ -992,7 +992,7 @@ static int __devinit mei_probe(struct pci_dev *pdev,
        if (err) {
                dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n",
                       pdev->irq);
-               goto unmap_memory;
+               goto disable_msi;
        }
        INIT_DELAYED_WORK(&dev->timer_work, mei_timer);
        if (mei_hw_init(dev)) {
@@ -1023,8 +1023,8 @@ release_irq:
        mei_disable_interrupts(dev);
        flush_scheduled_work();
        free_irq(pdev->irq, dev);
+disable_msi:
        pci_disable_msi(pdev);
-unmap_memory:
        pci_iounmap(pdev, dev->mem_addr);
 free_device:
        kfree(dev);
@@ -1101,6 +1101,8 @@ static void __devexit mei_remove(struct pci_dev *pdev)
 
        pci_release_regions(pdev);
        pci_disable_device(pdev);
+
+       misc_deregister(&mei_misc_device);
 }
 #ifdef CONFIG_PM
 static int mei_pci_suspend(struct device *device)
@@ -1145,7 +1147,7 @@ static int mei_pci_resume(struct device *device)
                err = request_threaded_irq(pdev->irq,
                        NULL,
                        mei_interrupt_thread_handler,
-                       0, mei_driver_name, dev);
+                       IRQF_ONESHOT, mei_driver_name, dev);
        else
                err = request_threaded_irq(pdev->irq,
                        mei_interrupt_quick_handler,
@@ -1216,7 +1218,6 @@ module_init(mei_init_module);
  */
 static void __exit mei_exit_module(void)
 {
-       misc_deregister(&mei_misc_device);
        pci_unregister_driver(&mei_driver);
 
        pr_debug("unloaded successfully.\n");
index 6be5605707b46036c433a7e8e441eb8b4ee1ccca..e2ec0505eb5c05458568f8d12a7cdd6d3da9939d 100644 (file)
@@ -341,7 +341,7 @@ static const struct watchdog_ops wd_ops = {
 };
 static const struct watchdog_info wd_info = {
                .identity = INTEL_AMT_WATCHDOG_ID,
-               .options = WDIOF_KEEPALIVEPING,
+               .options = WDIOF_KEEPALIVEPING | WDIOF_ALARMONLY,
 };
 
 static struct watchdog_device amt_wd_dev = {
index 17bbacb1b4b131b119b0076b339f44012fcc51f7..87b251ab6ec582f2c8177687d0b330d97110fb50 100644 (file)
@@ -452,9 +452,9 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part,
 
                if (msg->activate_gru_mq_desc_gpa !=
                    part_uv->activate_gru_mq_desc_gpa) {
-                       spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
+                       spin_lock(&part_uv->flags_lock);
                        part_uv->flags &= ~XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV;
-                       spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
+                       spin_unlock(&part_uv->flags_lock);
                        part_uv->activate_gru_mq_desc_gpa =
                            msg->activate_gru_mq_desc_gpa;
                }
index dd2d374dcc7aa43363a5366350f79c6a17e81a70..276d21ce6bc1ba6a18c844258331d6417f6278d7 100644 (file)
@@ -554,7 +554,6 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
        struct mmc_request mrq = {NULL};
        struct mmc_command cmd = {0};
        struct mmc_data data = {0};
-       unsigned int timeout_us;
 
        struct scatterlist sg;
 
@@ -574,23 +573,12 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
        cmd.arg = 0;
        cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
 
-       data.timeout_ns = card->csd.tacc_ns * 100;
-       data.timeout_clks = card->csd.tacc_clks * 100;
-
-       timeout_us = data.timeout_ns / 1000;
-       timeout_us += data.timeout_clks * 1000 /
-               (card->host->ios.clock / 1000);
-
-       if (timeout_us > 100000) {
-               data.timeout_ns = 100000000;
-               data.timeout_clks = 0;
-       }
-
        data.blksz = 4;
        data.blocks = 1;
        data.flags = MMC_DATA_READ;
        data.sg = &sg;
        data.sg_len = 1;
+       mmc_set_data_timeout(&data, card);
 
        mrq.cmd = &cmd;
        mrq.data = &data;
index f13e38deceac760fcbd9ae4cd2a1fdf7e5d79671..8f5dc08d65989d8526f8bc59d1ed91426e93108d 100644 (file)
@@ -50,8 +50,8 @@ int mmc_cd_gpio_request(struct mmc_host *host, unsigned int gpio)
                goto egpioreq;
 
        ret = request_threaded_irq(irq, NULL, mmc_cd_gpio_irqt,
-                                  IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
-                                  cd->label, host);
+                                  IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
+                                  IRQF_ONESHOT, cd->label, host);
        if (ret < 0)
                goto eirqreq;
 
index 2d4a4b74675060133fecd7021fa2d774e5f303c9..4f4489aa6baede795ae21c222e0aa9e99e7b5800 100644 (file)
@@ -717,10 +717,6 @@ static int mmc_select_powerclass(struct mmc_card *card,
                                 card->ext_csd.generic_cmd6_time);
        }
 
-       if (err)
-               pr_err("%s: power class selection for ext_csd_bus_width %d"
-                      " failed\n", mmc_hostname(card->host), bus_width);
-
        return err;
 }
 
@@ -1104,7 +1100,9 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
                                EXT_CSD_BUS_WIDTH_8 : EXT_CSD_BUS_WIDTH_4;
                err = mmc_select_powerclass(card, ext_csd_bits, ext_csd);
                if (err)
-                       goto err;
+                       pr_warning("%s: power class selection to bus width %d"
+                                  " failed\n", mmc_hostname(card->host),
+                                  1 << bus_width);
        }
 
        /*
@@ -1136,7 +1134,10 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
                        err = mmc_select_powerclass(card, ext_csd_bits[idx][0],
                                                    ext_csd);
                        if (err)
-                               goto err;
+                               pr_warning("%s: power class selection to "
+                                          "bus width %d failed\n",
+                                          mmc_hostname(card->host),
+                                          1 << bus_width);
 
                        err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
                                         EXT_CSD_BUS_WIDTH,
@@ -1164,7 +1165,10 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
                        err = mmc_select_powerclass(card, ext_csd_bits[idx][1],
                                                    ext_csd);
                        if (err)
-                               goto err;
+                               pr_warning("%s: power class selection to "
+                                          "bus width %d ddr %d failed\n",
+                                          mmc_hostname(card->host),
+                                          1 << bus_width, ddr);
 
                        err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
                                         EXT_CSD_BUS_WIDTH,
@@ -1326,7 +1330,7 @@ static int mmc_suspend(struct mmc_host *host)
                if (!err)
                        mmc_card_set_sleep(host->card);
        } else if (!mmc_host_is_spi(host))
-               mmc_deselect_cards(host);
+               err = mmc_deselect_cards(host);
        host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_200);
        mmc_release_host(host);
 
index c272c6868ecf6d11a39c3cf2be99f1257fccd026..b2b43f624b9edd13a8ad4d836ec2229636de2a9c 100644 (file)
@@ -1075,16 +1075,18 @@ static void mmc_sd_detect(struct mmc_host *host)
  */
 static int mmc_sd_suspend(struct mmc_host *host)
 {
+       int err = 0;
+
        BUG_ON(!host);
        BUG_ON(!host->card);
 
        mmc_claim_host(host);
        if (!mmc_host_is_spi(host))
-               mmc_deselect_cards(host);
+               err = mmc_deselect_cards(host);
        host->card->state &= ~MMC_STATE_HIGHSPEED;
        mmc_release_host(host);
 
-       return 0;
+       return err;
 }
 
 /*
index 13d0e95380ab8f73d601060c3415b82bb253d12b..41c5fd8848f4d72c171d9e001b2171607e13eb83 100644 (file)
@@ -218,6 +218,12 @@ static int sdio_enable_wide(struct mmc_card *card)
        if (ret)
                return ret;
 
+       if ((ctrl & SDIO_BUS_WIDTH_MASK) == SDIO_BUS_WIDTH_RESERVED)
+               pr_warning("%s: SDIO_CCCR_IF is invalid: 0x%02x\n",
+                          mmc_hostname(card->host), ctrl);
+
+       /* set as 4-bit bus width */
+       ctrl &= ~SDIO_BUS_WIDTH_MASK;
        ctrl |= SDIO_BUS_WIDTH_4BIT;
 
        ret = mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_IF, ctrl, NULL);
index 787aba1682bb362efa7f06baf16370f4551f2074..ab56f7db53150e907c406daff62be29fd5686cec 100644 (file)
 #define atmci_writel(port,reg,value)                   \
        __raw_writel((value), (port)->regs + reg)
 
+/*
+ * Fix sconfig's burst size according to atmel MCI. We need to convert them as:
+ * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.
+ *
+ * This can be done by finding most significant bit set.
+ */
+static inline unsigned int atmci_convert_chksize(unsigned int maxburst)
+{
+       if (maxburst > 1)
+               return fls(maxburst) - 2;
+       else
+               return 0;
+}
+
 #endif /* __DRIVERS_MMC_ATMEL_MCI_H__ */
index 420aca642b14ba42a6fa7fe014cc865e49445b73..f2c115e064387715a3c2f2cf796c5ae8b70bddd0 100644 (file)
@@ -910,6 +910,7 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
        enum dma_data_direction         direction;
        enum dma_transfer_direction     slave_dirn;
        unsigned int                    sglen;
+       u32                             maxburst;
        u32 iflags;
 
        data->error = -EINPROGRESS;
@@ -943,17 +944,18 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
        if (!chan)
                return -ENODEV;
 
-       if (host->caps.has_dma)
-               atmci_writel(host, ATMCI_DMA, ATMCI_DMA_CHKSIZE(3) | ATMCI_DMAEN);
-
        if (data->flags & MMC_DATA_READ) {
                direction = DMA_FROM_DEVICE;
                host->dma_conf.direction = slave_dirn = DMA_DEV_TO_MEM;
+               maxburst = atmci_convert_chksize(host->dma_conf.src_maxburst);
        } else {
                direction = DMA_TO_DEVICE;
                host->dma_conf.direction = slave_dirn = DMA_MEM_TO_DEV;
+               maxburst = atmci_convert_chksize(host->dma_conf.dst_maxburst);
        }
 
+       atmci_writel(host, ATMCI_DMA, ATMCI_DMA_CHKSIZE(maxburst) | ATMCI_DMAEN);
+
        sglen = dma_map_sg(chan->device->dev, data->sg,
                        data->sg_len, direction);
 
@@ -2314,6 +2316,8 @@ static int __init atmci_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, host);
 
+       setup_timer(&host->timer, atmci_timeout_timer, (unsigned long)host);
+
        /* We need at least one slot to succeed */
        nr_slots = 0;
        ret = -ENODEV;
@@ -2352,8 +2356,6 @@ static int __init atmci_probe(struct platform_device *pdev)
                }
        }
 
-       setup_timer(&host->timer, atmci_timeout_timer, (unsigned long)host);
-
        dev_info(&pdev->dev,
                        "Atmel MCI controller at 0x%08lx irq %d, %u slots\n",
                        host->mapbase, irq, nr_slots);
index 9bbf45f8c538ade0990444c0c5d63614ddc09c3f..1ca5e72ceb651e84544a8cc451698ea54de6aafd 100644 (file)
@@ -418,6 +418,8 @@ static int dw_mci_idmac_init(struct dw_mci *host)
        p->des3 = host->sg_dma;
        p->des0 = IDMAC_DES0_ER;
 
+       mci_writel(host, BMOD, SDMMC_IDMAC_SWRESET);
+
        /* Mask out interrupts - get Tx & Rx complete only */
        mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
                   SDMMC_IDMAC_INT_TI);
@@ -615,14 +617,15 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot)
        u32 div;
 
        if (slot->clock != host->current_speed) {
-               if (host->bus_hz % slot->clock)
+               div = host->bus_hz / slot->clock;
+               if (host->bus_hz % slot->clock && host->bus_hz > slot->clock)
                        /*
                         * move the + 1 after the divide to prevent
                         * over-clocking the card.
                         */
-                       div = ((host->bus_hz / slot->clock) >> 1) + 1;
-               else
-                       div = (host->bus_hz  / slot->clock) >> 1;
+                       div += 1;
+
+               div = (host->bus_hz != slot->clock) ? DIV_ROUND_UP(div, 2) : 0;
 
                dev_info(&slot->mmc->class_dev,
                         "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ"
@@ -939,8 +942,8 @@ static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd
                        mdelay(20);
 
                if (cmd->data) {
-                       host->data = NULL;
                        dw_mci_stop_dma(host);
+                       host->data = NULL;
                }
        }
 }
@@ -1623,7 +1626,6 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
        if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
                mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
                mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
-               set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
                host->dma_ops->complete(host);
        }
 #endif
@@ -1725,7 +1727,8 @@ static void dw_mci_work_routine_card(struct work_struct *work)
 
 #ifdef CONFIG_MMC_DW_IDMAC
                                ctrl = mci_readl(host, BMOD);
-                               ctrl |= 0x01; /* Software reset of DMA */
+                               /* Software reset of DMA */
+                               ctrl |= SDMMC_IDMAC_SWRESET;
                                mci_writel(host, BMOD, ctrl);
 #endif
 
@@ -1950,10 +1953,6 @@ int dw_mci_probe(struct dw_mci *host)
        spin_lock_init(&host->lock);
        INIT_LIST_HEAD(&host->queue);
 
-
-       host->dma_ops = host->pdata->dma_ops;
-       dw_mci_init_dma(host);
-
        /*
         * Get the host data width - this assumes that HCON has been set with
         * the correct values.
@@ -1981,10 +1980,11 @@ int dw_mci_probe(struct dw_mci *host)
        }
 
        /* Reset all blocks */
-       if (!mci_wait_reset(&host->dev, host)) {
-               ret = -ENODEV;
-               goto err_dmaunmap;
-       }
+       if (!mci_wait_reset(&host->dev, host))
+               return -ENODEV;
+
+       host->dma_ops = host->pdata->dma_ops;
+       dw_mci_init_dma(host);
 
        /* Clear the interrupts for the host controller */
        mci_writel(host, RINTSTS, 0xFFFFFFFF);
@@ -2170,14 +2170,14 @@ int dw_mci_resume(struct dw_mci *host)
        if (host->vmmc)
                regulator_enable(host->vmmc);
 
-       if (host->dma_ops->init)
-               host->dma_ops->init(host);
-
        if (!mci_wait_reset(&host->dev, host)) {
                ret = -ENODEV;
                return ret;
        }
 
+       if (host->dma_ops->init)
+               host->dma_ops->init(host);
+
        /* Restore the old value at FIFOTH register */
        mci_writel(host, FIFOTH, host->fifoth_val);
 
index f0fcce40cd8daa27a6a10e44bb28ad9abcf5624d..50ff19a6236829b3143174b1372e141b86fb58fa 100644 (file)
@@ -1216,12 +1216,7 @@ static void mmci_dt_populate_generic_pdata(struct device_node *np,
        int bus_width = 0;
 
        pdata->gpio_wp = of_get_named_gpio(np, "wp-gpios", 0);
-       if (!pdata->gpio_wp)
-               pdata->gpio_wp = -1;
-
        pdata->gpio_cd = of_get_named_gpio(np, "cd-gpios", 0);
-       if (!pdata->gpio_cd)
-               pdata->gpio_cd = -1;
 
        if (of_get_property(np, "cd-inverted", NULL))
                pdata->cd_invert = true;
@@ -1276,6 +1271,12 @@ static int __devinit mmci_probe(struct amba_device *dev,
                return -EINVAL;
        }
 
+       if (!plat) {
+               plat = devm_kzalloc(&dev->dev, sizeof(*plat), GFP_KERNEL);
+               if (!plat)
+                       return -ENOMEM;
+       }
+
        if (np)
                mmci_dt_populate_generic_pdata(np, plat);
 
@@ -1424,6 +1425,10 @@ static int __devinit mmci_probe(struct amba_device *dev,
        writel(0, host->base + MMCIMASK1);
        writel(0xfff, host->base + MMCICLEAR);
 
+       if (plat->gpio_cd == -EPROBE_DEFER) {
+               ret = -EPROBE_DEFER;
+               goto err_gpio_cd;
+       }
        if (gpio_is_valid(plat->gpio_cd)) {
                ret = gpio_request(plat->gpio_cd, DRIVER_NAME " (cd)");
                if (ret == 0)
@@ -1447,6 +1452,10 @@ static int __devinit mmci_probe(struct amba_device *dev,
                if (ret >= 0)
                        host->gpio_cd_irq = gpio_to_irq(plat->gpio_cd);
        }
+       if (plat->gpio_wp == -EPROBE_DEFER) {
+               ret = -EPROBE_DEFER;
+               goto err_gpio_wp;
+       }
        if (gpio_is_valid(plat->gpio_wp)) {
                ret = gpio_request(plat->gpio_wp, DRIVER_NAME " (wp)");
                if (ret == 0)
index 34a90266ab11710d69f85c2a5648c68d1d6ec430..277161d279b8048600e6b7333d05d9c022562886 100644 (file)
@@ -894,8 +894,8 @@ static struct platform_driver mxs_mmc_driver = {
                .owner  = THIS_MODULE,
 #ifdef CONFIG_PM
                .pm     = &mxs_mmc_pm_ops,
-               .of_match_table = mxs_mmc_dt_ids,
 #endif
+               .of_match_table = mxs_mmc_dt_ids,
        },
 };
 
index 552196c764d40bf4925e5bec5861729ad6af3716..3e8dcf8d2e051efea306e933fa701d7f92d53333 100644 (file)
@@ -1300,7 +1300,7 @@ static const struct mmc_host_ops mmc_omap_ops = {
        .set_ios        = mmc_omap_set_ios,
 };
 
-static int __init mmc_omap_new_slot(struct mmc_omap_host *host, int id)
+static int __devinit mmc_omap_new_slot(struct mmc_omap_host *host, int id)
 {
        struct mmc_omap_slot *slot = NULL;
        struct mmc_host *mmc;
@@ -1485,24 +1485,26 @@ static int __devinit mmc_omap_probe(struct platform_device *pdev)
        }
 
        host->nr_slots = pdata->nr_slots;
+       host->reg_shift = (cpu_is_omap7xx() ? 1 : 2);
+
+       host->mmc_omap_wq = alloc_workqueue("mmc_omap", 0, 0);
+       if (!host->mmc_omap_wq)
+               goto err_plat_cleanup;
+
        for (i = 0; i < pdata->nr_slots; i++) {
                ret = mmc_omap_new_slot(host, i);
                if (ret < 0) {
                        while (--i >= 0)
                                mmc_omap_remove_slot(host->slots[i]);
 
-                       goto err_plat_cleanup;
+                       goto err_destroy_wq;
                }
        }
 
-       host->reg_shift = (cpu_is_omap7xx() ? 1 : 2);
-
-       host->mmc_omap_wq = alloc_workqueue("mmc_omap", 0, 0);
-       if (!host->mmc_omap_wq)
-               goto err_plat_cleanup;
-
        return 0;
 
+err_destroy_wq:
+       destroy_workqueue(host->mmc_omap_wq);
 err_plat_cleanup:
        if (pdata->cleanup)
                pdata->cleanup(&pdev->dev);
index 9a7a60aeb19ea35dc921cd43f2e796c2bfd75469..389a3eedfc24505de5034e7d842f6e5678e4bc25 100644 (file)
@@ -85,7 +85,6 @@
 #define BRR_ENABLE             (1 << 5)
 #define DTO_ENABLE             (1 << 20)
 #define INIT_STREAM            (1 << 1)
-#define ACEN_ACMD12            (1 << 2)
 #define DP_SELECT              (1 << 21)
 #define DDIR                   (1 << 4)
 #define DMA_EN                 0x1
 #define OMAP_MMC_MAX_CLOCK     52000000
 #define DRIVER_NAME            "omap_hsmmc"
 
-#define AUTO_CMD12             (1 << 0)        /* Auto CMD12 support */
 /*
  * One controller can have multiple slots, like on some omap boards using
  * omap.c controller driver. Luckily this is not currently done on any known
@@ -177,7 +175,6 @@ struct omap_hsmmc_host {
        int                     reqs_blocked;
        int                     use_reg;
        int                     req_in_progress;
-       unsigned int            flags;
        struct omap_hsmmc_next  next_data;
 
        struct  omap_mmc_platform_data  *pdata;
@@ -773,8 +770,6 @@ omap_hsmmc_start_command(struct omap_hsmmc_host *host, struct mmc_command *cmd,
                cmdtype = 0x3;
 
        cmdreg = (cmd->opcode << 24) | (resptype << 16) | (cmdtype << 22);
-       if ((host->flags & AUTO_CMD12) && mmc_op_multi(cmd->opcode))
-               cmdreg |= ACEN_ACMD12;
 
        if (data) {
                cmdreg |= DP_SELECT | MSBS | BCE;
@@ -847,14 +842,11 @@ omap_hsmmc_xfer_done(struct omap_hsmmc_host *host, struct mmc_data *data)
        else
                data->bytes_xfered = 0;
 
-       if (data->stop && ((!(host->flags & AUTO_CMD12)) || data->error)) {
-               omap_hsmmc_start_command(host, data->stop, NULL);
-       } else {
-               if (data->stop)
-                       data->stop->resp[0] = OMAP_HSMMC_READ(host->base,
-                                                       RSP76);
+       if (!data->stop) {
                omap_hsmmc_request_done(host, data->mrq);
+               return;
        }
+       omap_hsmmc_start_command(host, data->stop, NULL);
 }
 
 /*
@@ -1859,7 +1851,6 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
        host->mapbase   = res->start + pdata->reg_offset;
        host->base      = ioremap(host->mapbase, SZ_4K);
        host->power_mode = MMC_POWER_OFF;
-       host->flags     = AUTO_CMD12;
        host->next_data.cookie = 1;
 
        platform_set_drvdata(pdev, host);
index 55a164fcaa157ece64d9c2be581e5c7b46b71e9d..a50c205ea2085f3266ee906b673d062e3a9aaf10 100644 (file)
@@ -404,7 +404,7 @@ static void sdhci_s3c_setup_card_detect_gpio(struct sdhci_s3c *sc)
                if (sc->ext_cd_irq &&
                    request_threaded_irq(sc->ext_cd_irq, NULL,
                                         sdhci_s3c_gpio_card_detect_thread,
-                                        IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+                                        IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
                                         dev_name(dev), sc) == 0) {
                        int status = gpio_get_value(sc->ext_cd_gpio);
                        if (pdata->ext_cd_gpio_invert)
index 1fe32dfa7cd4913fdb5fde321c2c0fd2da90428b..423da8194cd84e5597c169316311a5ff9629b845 100644 (file)
@@ -4,7 +4,7 @@
  * Support of SDHCI platform devices for spear soc family
  *
  * Copyright (C) 2010 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * Inspired by sdhci-pltfm.c
  *
@@ -289,5 +289,5 @@ static struct platform_driver sdhci_driver = {
 module_platform_driver(sdhci_driver);
 
 MODULE_DESCRIPTION("SPEAr Secure Digital Host Controller Interface driver");
-MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
+MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
 MODULE_LICENSE("GPL v2");
index e626732aff77d3ebd0563c3d1720da62aab6d7ff..f4b8b4db3a9acd8ccc1c3fdf32c18efc2a8bc88d 100644 (file)
@@ -680,8 +680,8 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
        }
 
        if (count >= 0xF) {
-               pr_warning("%s: Too large timeout 0x%x requested for CMD%d!\n",
-                          mmc_hostname(host->mmc), count, cmd->opcode);
+               DBG("%s: Too large timeout 0x%x requested for CMD%d!\n",
+                   mmc_hostname(host->mmc), count, cmd->opcode);
                count = 0xE;
        }
 
index ae36d7e1e91368dd36239a57f73ef4ccda637f56..551e316e4454d99f9b552a2924bea045784c7a8c 100644 (file)
@@ -304,32 +304,17 @@ static void find_next_position(struct mtdoops_context *cxt)
 }
 
 static void mtdoops_do_dump(struct kmsg_dumper *dumper,
-               enum kmsg_dump_reason reason, const char *s1, unsigned long l1,
-               const char *s2, unsigned long l2)
+                           enum kmsg_dump_reason reason)
 {
        struct mtdoops_context *cxt = container_of(dumper,
                        struct mtdoops_context, dump);
-       unsigned long s1_start, s2_start;
-       unsigned long l1_cpy, l2_cpy;
-       char *dst;
-
-       if (reason != KMSG_DUMP_OOPS &&
-           reason != KMSG_DUMP_PANIC)
-               return;
 
        /* Only dump oopses if dump_oops is set */
        if (reason == KMSG_DUMP_OOPS && !dump_oops)
                return;
 
-       dst = cxt->oops_buf + MTDOOPS_HEADER_SIZE; /* Skip the header */
-       l2_cpy = min(l2, record_size - MTDOOPS_HEADER_SIZE);
-       l1_cpy = min(l1, record_size - MTDOOPS_HEADER_SIZE - l2_cpy);
-
-       s2_start = l2 - l2_cpy;
-       s1_start = l1 - l1_cpy;
-
-       memcpy(dst, s1 + s1_start, l1_cpy);
-       memcpy(dst + l1_cpy, s2 + s2_start, l2_cpy);
+       kmsg_dump_get_buffer(dumper, true, cxt->oops_buf + MTDOOPS_HEADER_SIZE,
+                            record_size - MTDOOPS_HEADER_SIZE, NULL);
 
        /* Panics must be written immediately */
        if (reason != KMSG_DUMP_OOPS)
@@ -375,6 +360,7 @@ static void mtdoops_notify_add(struct mtd_info *mtd)
                return;
        }
 
+       cxt->dump.max_reason = KMSG_DUMP_OOPS;
        cxt->dump.dump = mtdoops_do_dump;
        err = kmsg_dump_register(&cxt->dump);
        if (err) {
index 41371ba1a8117c87aaaf85b63383e60bda6303d6..f3f6cfedd69eb5e1367c196de7e8e249740bb77b 100644 (file)
@@ -102,7 +102,7 @@ static const char *part_probes[] = { "cmdlinepart", "RedBoot", NULL };
 static int cafe_device_ready(struct mtd_info *mtd)
 {
        struct cafe_priv *cafe = mtd->priv;
-       int result = !!(cafe_readl(cafe, NAND_STATUS) | 0x40000000);
+       int result = !!(cafe_readl(cafe, NAND_STATUS) & 0x40000000);
        uint32_t irqs = cafe_readl(cafe, NAND_IRQ);
 
        cafe_writel(cafe, irqs, NAND_IRQ);
index a05b7b444d4f1f8a92d6f0e10a2ee1db19d3c722..a6cad5caba788fe8b665270a3fc7494d7a071887 100644 (file)
@@ -920,12 +920,12 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
                 */
                memset(chip->oob_poi, ~0, mtd->oobsize);
                chip->oob_poi[0] = ((uint8_t *) auxiliary_virt)[0];
-
-               read_page_swap_end(this, buf, mtd->writesize,
-                               this->payload_virt, this->payload_phys,
-                               nfc_geo->payload_size,
-                               payload_virt, payload_phys);
        }
+
+       read_page_swap_end(this, buf, mtd->writesize,
+                       this->payload_virt, this->payload_phys,
+                       nfc_geo->payload_size,
+                       payload_virt, payload_phys);
 exit_nfc:
        return ret;
 }
index c58e6a93f44501d68056d46ea40dc0373d9a51a7..6acc790c2fbb96880ec29642a1d2e7e2528dbee9 100644 (file)
@@ -273,6 +273,26 @@ static struct nand_ecclayout nandv2_hw_eccoob_4k = {
 
 static const char *part_probes[] = { "RedBoot", "cmdlinepart", "ofpart", NULL };
 
+static void memcpy32_fromio(void *trg, const void __iomem  *src, size_t size)
+{
+       int i;
+       u32 *t = trg;
+       const __iomem u32 *s = src;
+
+       for (i = 0; i < (size >> 2); i++)
+               *t++ = __raw_readl(s++);
+}
+
+static void memcpy32_toio(void __iomem *trg, const void *src, int size)
+{
+       int i;
+       u32 __iomem *t = trg;
+       const u32 *s = src;
+
+       for (i = 0; i < (size >> 2); i++)
+               __raw_writel(*s++, t++);
+}
+
 static int check_int_v3(struct mxc_nand_host *host)
 {
        uint32_t tmp;
@@ -519,7 +539,7 @@ static void send_read_id_v3(struct mxc_nand_host *host)
 
        wait_op_done(host, true);
 
-       memcpy_fromio(host->data_buf, host->main_area0, 16);
+       memcpy32_fromio(host->data_buf, host->main_area0, 16);
 }
 
 /* Request the NANDFC to perform a read of the NAND device ID. */
@@ -535,7 +555,7 @@ static void send_read_id_v1_v2(struct mxc_nand_host *host)
        /* Wait for operation to complete */
        wait_op_done(host, true);
 
-       memcpy_fromio(host->data_buf, host->main_area0, 16);
+       memcpy32_fromio(host->data_buf, host->main_area0, 16);
 
        if (this->options & NAND_BUSWIDTH_16) {
                /* compress the ID info */
@@ -797,16 +817,16 @@ static void copy_spare(struct mtd_info *mtd, bool bfrom)
 
        if (bfrom) {
                for (i = 0; i < n - 1; i++)
-                       memcpy_fromio(d + i * j, s + i * t, j);
+                       memcpy32_fromio(d + i * j, s + i * t, j);
 
                /* the last section */
-               memcpy_fromio(d + i * j, s + i * t, mtd->oobsize - i * j);
+               memcpy32_fromio(d + i * j, s + i * t, mtd->oobsize - i * j);
        } else {
                for (i = 0; i < n - 1; i++)
-                       memcpy_toio(&s[i * t], &d[i * j], j);
+                       memcpy32_toio(&s[i * t], &d[i * j], j);
 
                /* the last section */
-               memcpy_toio(&s[i * t], &d[i * j], mtd->oobsize - i * j);
+               memcpy32_toio(&s[i * t], &d[i * j], mtd->oobsize - i * j);
        }
 }
 
@@ -1070,7 +1090,8 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
 
                host->devtype_data->send_page(mtd, NFC_OUTPUT);
 
-               memcpy_fromio(host->data_buf, host->main_area0, mtd->writesize);
+               memcpy32_fromio(host->data_buf, host->main_area0,
+                               mtd->writesize);
                copy_spare(mtd, true);
                break;
 
@@ -1086,7 +1107,7 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
                break;
 
        case NAND_CMD_PAGEPROG:
-               memcpy_toio(host->main_area0, host->data_buf, mtd->writesize);
+               memcpy32_toio(host->main_area0, host->data_buf, mtd->writesize);
                copy_spare(mtd, false);
                host->devtype_data->send_page(mtd, NFC_INPUT);
                host->devtype_data->send_cmd(host, command, true);
index d47586cf64ce4af2c802f8783869af8b96ccc0a4..a11253a0fcabd6ef7362b9fcae4972ba06fc4966 100644 (file)
@@ -3501,6 +3501,13 @@ int nand_scan_tail(struct mtd_info *mtd)
        /* propagate ecc info to mtd_info */
        mtd->ecclayout = chip->ecc.layout;
        mtd->ecc_strength = chip->ecc.strength;
+       /*
+        * Initialize bitflip_threshold to its default prior scan_bbt() call.
+        * scan_bbt() might invoke mtd_read(), thus bitflip_threshold must be
+        * properly set.
+        */
+       if (!mtd->bitflip_threshold)
+               mtd->bitflip_threshold = mtd->ecc_strength;
 
        /* Check, if we should skip the bad block table scan */
        if (chip->options & NAND_SKIP_BBTSCAN)
index 6cc8fbfabb8e2b71d50db215222d380f9e0061a1..cf0cd3146817f9275706f1d4f61b53724d0d5dc1 100644 (file)
@@ -28,7 +28,7 @@
 #include <linux/module.h>
 #include <linux/moduleparam.h>
 #include <linux/vmalloc.h>
-#include <asm/div64.h>
+#include <linux/math64.h>
 #include <linux/slab.h>
 #include <linux/errno.h>
 #include <linux/string.h>
@@ -546,12 +546,6 @@ static char *get_partition_name(int i)
        return kstrdup(buf, GFP_KERNEL);
 }
 
-static uint64_t divide(uint64_t n, uint32_t d)
-{
-       do_div(n, d);
-       return n;
-}
-
 /*
  * Initialize the nandsim structure.
  *
@@ -580,7 +574,7 @@ static int init_nandsim(struct mtd_info *mtd)
        ns->geom.oobsz    = mtd->oobsize;
        ns->geom.secsz    = mtd->erasesize;
        ns->geom.pgszoob  = ns->geom.pgsz + ns->geom.oobsz;
-       ns->geom.pgnum    = divide(ns->geom.totsz, ns->geom.pgsz);
+       ns->geom.pgnum    = div_u64(ns->geom.totsz, ns->geom.pgsz);
        ns->geom.totszoob = ns->geom.totsz + (uint64_t)ns->geom.pgnum * ns->geom.oobsz;
        ns->geom.secshift = ffs(ns->geom.secsz) - 1;
        ns->geom.pgshift  = chip->page_shift;
@@ -921,7 +915,7 @@ static int setup_wear_reporting(struct mtd_info *mtd)
 
        if (!rptwear)
                return 0;
-       wear_eb_count = divide(mtd->size, mtd->erasesize);
+       wear_eb_count = div_u64(mtd->size, mtd->erasesize);
        mem = wear_eb_count * sizeof(unsigned long);
        if (mem / sizeof(unsigned long) != wear_eb_count) {
                NS_ERR("Too many erase blocks for wear reporting\n");
index 9f957c2d48e94fbd509c51e9f8d9f06f6fafe894..7c1380305219724bc0837cb6c1dc93320d4087c1 100644 (file)
@@ -264,6 +264,9 @@ static struct dentry *dfs_rootdir;
  */
 int ubi_debugfs_init(void)
 {
+       if (!IS_ENABLED(CONFIG_DEBUG_FS))
+               return 0;
+
        dfs_rootdir = debugfs_create_dir("ubi", NULL);
        if (IS_ERR_OR_NULL(dfs_rootdir)) {
                int err = dfs_rootdir ? -ENODEV : PTR_ERR(dfs_rootdir);
@@ -281,7 +284,8 @@ int ubi_debugfs_init(void)
  */
 void ubi_debugfs_exit(void)
 {
-       debugfs_remove(dfs_rootdir);
+       if (IS_ENABLED(CONFIG_DEBUG_FS))
+               debugfs_remove(dfs_rootdir);
 }
 
 /* Read an UBI debugfs file */
@@ -403,6 +407,9 @@ int ubi_debugfs_init_dev(struct ubi_device *ubi)
        struct dentry *dent;
        struct ubi_debug_info *d = ubi->dbg;
 
+       if (!IS_ENABLED(CONFIG_DEBUG_FS))
+               return 0;
+
        n = snprintf(d->dfs_dir_name, UBI_DFS_DIR_LEN + 1, UBI_DFS_DIR_NAME,
                     ubi->ubi_num);
        if (n == UBI_DFS_DIR_LEN) {
@@ -470,5 +477,6 @@ out:
  */
 void ubi_debugfs_exit_dev(struct ubi_device *ubi)
 {
-       debugfs_remove_recursive(ubi->dbg->dfs_dir);
+       if (IS_ENABLED(CONFIG_DEBUG_FS))
+               debugfs_remove_recursive(ubi->dbg->dfs_dir);
 }
index 9df100a4ec3886b6f6b58893f3ed772235da055e..b6be644e7b85f5194c9f0e1303ffa4d2a51e9cf6 100644 (file)
@@ -1262,11 +1262,11 @@ int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum)
        dbg_wl("flush pending work for LEB %d:%d (%d pending works)",
               vol_id, lnum, ubi->works_count);
 
-       down_write(&ubi->work_sem);
        while (found) {
                struct ubi_work *wrk;
                found = 0;
 
+               down_read(&ubi->work_sem);
                spin_lock(&ubi->wl_lock);
                list_for_each_entry(wrk, &ubi->works, list) {
                        if ((vol_id == UBI_ALL || wrk->vol_id == vol_id) &&
@@ -1277,18 +1277,27 @@ int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum)
                                spin_unlock(&ubi->wl_lock);
 
                                err = wrk->func(ubi, wrk, 0);
-                               if (err)
-                                       goto out;
+                               if (err) {
+                                       up_read(&ubi->work_sem);
+                                       return err;
+                               }
+
                                spin_lock(&ubi->wl_lock);
                                found = 1;
                                break;
                        }
                }
                spin_unlock(&ubi->wl_lock);
+               up_read(&ubi->work_sem);
        }
 
-out:
+       /*
+        * Make sure all the works which have been done in parallel are
+        * finished.
+        */
+       down_write(&ubi->work_sem);
        up_write(&ubi->work_sem);
+
        return err;
 }
 
index 3680aa251dea953d172e224a02b91b713f2dba22..2cf084eb9d524d995c4e3bf72a6d327e5f1ea9cb 100644 (file)
@@ -6,7 +6,7 @@
 #include "bonding.h"
 #include "bond_alb.h"
 
-#ifdef CONFIG_DEBUG_FS
+#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_NET_NS)
 
 #include <linux/debugfs.h>
 #include <linux/seq_file.h>
index 2ee8cf9e8a3b9fe8e728e1bc6d2334793712deb6..2ee76993f052ce517b2f65e92b3dd16b89ebc758 100644 (file)
@@ -76,6 +76,7 @@
 #include <net/route.h>
 #include <net/net_namespace.h>
 #include <net/netns/generic.h>
+#include <net/pkt_sched.h>
 #include "bonding.h"
 #include "bond_3ad.h"
 #include "bond_alb.h"
@@ -381,8 +382,6 @@ struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr)
        return next;
 }
 
-#define bond_queue_mapping(skb) (*(u16 *)((skb)->cb))
-
 /**
  * bond_dev_queue_xmit - Prepare skb for xmit.
  *
@@ -395,7 +394,9 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
 {
        skb->dev = slave_dev;
 
-       skb->queue_mapping = bond_queue_mapping(skb);
+       BUILD_BUG_ON(sizeof(skb->queue_mapping) !=
+                    sizeof(qdisc_skb_cb(skb)->bond_queue_mapping));
+       skb->queue_mapping = qdisc_skb_cb(skb)->bond_queue_mapping;
 
        if (unlikely(netpoll_tx_running(slave_dev)))
                bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb);
@@ -3226,6 +3227,12 @@ static int bond_master_netdev_event(unsigned long event,
        switch (event) {
        case NETDEV_CHANGENAME:
                return bond_event_changename(event_bond);
+       case NETDEV_UNREGISTER:
+               bond_remove_proc_entry(event_bond);
+               break;
+       case NETDEV_REGISTER:
+               bond_create_proc_entry(event_bond);
+               break;
        default:
                break;
        }
@@ -4171,7 +4178,7 @@ static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb)
        /*
         * Save the original txq to restore before passing to the driver
         */
-       bond_queue_mapping(skb) = skb->queue_mapping;
+       qdisc_skb_cb(skb)->bond_queue_mapping = skb->queue_mapping;
 
        if (unlikely(txq >= dev->real_num_tx_queues)) {
                do {
@@ -4410,8 +4417,6 @@ static void bond_uninit(struct net_device *bond_dev)
 
        bond_work_cancel_all(bond);
 
-       bond_remove_proc_entry(bond);
-
        bond_debug_unregister(bond);
 
        __hw_addr_flush(&bond->mc_list);
@@ -4813,7 +4818,6 @@ static int bond_init(struct net_device *bond_dev)
 
        bond_set_lockdep_class(bond_dev);
 
-       bond_create_proc_entry(bond);
        list_add_tail(&bond->bond_list, &bn->dev_list);
 
        bond_prepare_sysfs_group(bond);
index ad284baafe87df64599c3335106a31aa0d6b3ec0..3cea38d373446826b7163974ed8e0b0836d19dc3 100644 (file)
@@ -150,14 +150,25 @@ static void bond_info_show_master(struct seq_file *seq)
        }
 }
 
+static const char *bond_slave_link_status(s8 link)
+{
+       static const char * const status[] = {
+               [BOND_LINK_UP] = "up",
+               [BOND_LINK_FAIL] = "going down",
+               [BOND_LINK_DOWN] = "down",
+               [BOND_LINK_BACK] = "going back",
+       };
+
+       return status[link];
+}
+
 static void bond_info_show_slave(struct seq_file *seq,
                                 const struct slave *slave)
 {
        struct bonding *bond = seq->private;
 
        seq_printf(seq, "\nSlave Interface: %s\n", slave->dev->name);
-       seq_printf(seq, "MII Status: %s\n",
-                  (slave->link == BOND_LINK_UP) ?  "up" : "down");
+       seq_printf(seq, "MII Status: %s\n", bond_slave_link_status(slave->link));
        if (slave->speed == SPEED_UNKNOWN)
                seq_printf(seq, "Speed: %s\n", "Unknown");
        else
index aef42f045320ae86eed4014835f90ccd786d1586..485bedb8278c1cd7bc7f85322232caced433c3f6 100644 (file)
@@ -1082,8 +1082,12 @@ static ssize_t bonding_store_primary(struct device *d,
                }
        }
 
-       pr_info("%s: Unable to set %.*s as primary slave.\n",
-               bond->dev->name, (int)strlen(buf) - 1, buf);
+       strncpy(bond->params.primary, ifname, IFNAMSIZ);
+       bond->params.primary[IFNAMSIZ - 1] = 0;
+
+       pr_info("%s: Recording %s as primary, "
+               "but it has not been enslaved to %s yet.\n",
+               bond->dev->name, ifname, bond->dev->name);
 out:
        write_unlock_bh(&bond->curr_slave_lock);
        read_unlock(&bond->lock);
index 1520814c77c7d1a4a94b0569069abb66cb356b51..4a27adb7ae67f7011ba1ae4bfae7784405435178 100644 (file)
@@ -693,8 +693,6 @@ static void cfhsi_rx_done(struct cfhsi *cfhsi)
                         */
                        memcpy(rx_buf, (u8 *)piggy_desc,
                                        CFHSI_DESC_SHORT_SZ);
-                       /* Mark no embedded frame here */
-                       piggy_desc->offset = 0;
                        if (desc_pld_len == -EPROTO)
                                goto out_of_sync;
                }
@@ -737,6 +735,8 @@ static void cfhsi_rx_done(struct cfhsi *cfhsi)
                        /* Extract any payload in piggyback descriptor. */
                        if (cfhsi_rx_desc(piggy_desc, cfhsi) < 0)
                                goto out_of_sync;
+                       /* Mark no embedded frame after extracting it */
+                       piggy_desc->offset = 0;
                }
        }
 
@@ -1178,6 +1178,7 @@ int cfhsi_probe(struct platform_device *pdev)
                dev_err(&ndev->dev, "%s: Registration error: %d.\n",
                        __func__, res);
                free_netdev(ndev);
+               return -ENODEV;
        }
        /* Add CAIF HSI device to list. */
        spin_lock(&cfhsi_list_lock);
index 536bda072a1677a18a396125f7df714c172ced54..86cd532c78f90f9e4f42e8e795f2b8f1423cf424 100644 (file)
@@ -590,8 +590,8 @@ static void c_can_chip_config(struct net_device *dev)
        priv->write_reg(priv, &priv->regs->control,
                        CONTROL_ENABLE_AR);
 
-       if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY &
-                                       CAN_CTRLMODE_LOOPBACK)) {
+       if ((priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) &&
+           (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)) {
                /* loopback + silent mode : useful for hot self-test */
                priv->write_reg(priv, &priv->regs->control, CONTROL_EIE |
                                CONTROL_SIE | CONTROL_IE | CONTROL_TEST);
@@ -686,7 +686,7 @@ static int c_can_get_berr_counter(const struct net_device *dev,
  *
  * We iterate from priv->tx_echo to priv->tx_next and check if the
  * packet has been transmitted, echo it back to the CAN framework.
- * If we discover a not yet transmitted package, stop looking for more.
+ * If we discover a not yet transmitted packet, stop looking for more.
  */
 static void c_can_do_tx(struct net_device *dev)
 {
@@ -698,7 +698,7 @@ static void c_can_do_tx(struct net_device *dev)
        for (/* nix */; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) {
                msg_obj_no = get_tx_echo_msg_obj(priv);
                val = c_can_read_reg32(priv, &priv->regs->txrqst1);
-               if (!(val & (1 << msg_obj_no))) {
+               if (!(val & (1 << (msg_obj_no - 1)))) {
                        can_get_echo_skb(dev,
                                        msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST);
                        stats->tx_bytes += priv->read_reg(priv,
@@ -706,6 +706,8 @@ static void c_can_do_tx(struct net_device *dev)
                                        & IF_MCONT_DLC_MASK;
                        stats->tx_packets++;
                        c_can_inval_msg_object(dev, 0, msg_obj_no);
+               } else {
+                       break;
                }
        }
 
@@ -950,7 +952,7 @@ static int c_can_poll(struct napi_struct *napi, int quota)
        struct net_device *dev = napi->dev;
        struct c_can_priv *priv = netdev_priv(dev);
 
-       irqstatus = priv->read_reg(priv, &priv->regs->interrupt);
+       irqstatus = priv->irqstatus;
        if (!irqstatus)
                goto end;
 
@@ -1028,12 +1030,11 @@ end:
 
 static irqreturn_t c_can_isr(int irq, void *dev_id)
 {
-       u16 irqstatus;
        struct net_device *dev = (struct net_device *)dev_id;
        struct c_can_priv *priv = netdev_priv(dev);
 
-       irqstatus = priv->read_reg(priv, &priv->regs->interrupt);
-       if (!irqstatus)
+       priv->irqstatus = priv->read_reg(priv, &priv->regs->interrupt);
+       if (!priv->irqstatus)
                return IRQ_NONE;
 
        /* disable all interrupts and schedule the NAPI */
@@ -1063,10 +1064,11 @@ static int c_can_open(struct net_device *dev)
                goto exit_irq_fail;
        }
 
+       napi_enable(&priv->napi);
+
        /* start the c_can controller */
        c_can_start(dev);
 
-       napi_enable(&priv->napi);
        netif_start_queue(dev);
 
        return 0;
index 9b7fbef3d09a1248cda69974ac9c3cc4cf9e464e..5f32d34af507e7a9d51c4b37f8add3cd6ff0ddcb 100644 (file)
@@ -76,6 +76,7 @@ struct c_can_priv {
        unsigned int tx_next;
        unsigned int tx_echo;
        void *priv;             /* for board-specific data */
+       u16 irqstatus;
 };
 
 struct net_device *alloc_c_can_dev(void);
index 53115eee80758fd99e45a5a6258986fa81b9f8d5..688371cda37afc51ff125efa547e819126e4ca24 100644 (file)
@@ -154,7 +154,7 @@ static int __devinit cc770_get_platform_data(struct platform_device *pdev,
        struct cc770_platform_data *pdata = pdev->dev.platform_data;
 
        priv->can.clock.freq = pdata->osc_freq;
-       if (priv->cpu_interface | CPUIF_DSC)
+       if (priv->cpu_interface & CPUIF_DSC)
                priv->can.clock.freq /= 2;
        priv->clkout = pdata->cor;
        priv->bus_config = pdata->bcr;
index 38c0690df5c8ae9ae283bde27eb523ca55bf8b19..81d474102378dddf0fee978f66c08c4634077d81 100644 (file)
@@ -939,12 +939,12 @@ static int __devinit flexcan_probe(struct platform_device *pdev)
                return PTR_ERR(pinctrl);
 
        if (pdev->dev.of_node) {
-               const u32 *clock_freq_p;
+               const __be32 *clock_freq_p;
 
                clock_freq_p = of_get_property(pdev->dev.of_node,
                                                "clock-frequency", NULL);
                if (clock_freq_p)
-                       clock_freq = *clock_freq_p;
+                       clock_freq = be32_to_cpup(clock_freq_p);
        }
 
        if (!clock_freq) {
index 442d91a2747b9d8136dd5809ec3d192fc8c1e04c..bab0158f1cc3180f112c3d296cb5ffb22bddeb0d 100644 (file)
@@ -187,8 +187,10 @@ static int __init dummy_init_module(void)
        rtnl_lock();
        err = __rtnl_link_register(&dummy_link_ops);
 
-       for (i = 0; i < numdummies && !err; i++)
+       for (i = 0; i < numdummies && !err; i++) {
                err = dummy_init_one();
+               cond_resched();
+       }
        if (err < 0)
                __rtnl_link_unregister(&dummy_link_ops);
        rtnl_unlock();
index 9cc15701101b8cfd98ae12799244089daec15c2e..1f78b63d5efe514d82472eaac9d1a3567c3a2981 100644 (file)
@@ -261,7 +261,6 @@ static void atl1c_check_link_status(struct atl1c_adapter *adapter)
        if ((phy_data & BMSR_LSTATUS) == 0) {
                /* link down */
                netif_carrier_off(netdev);
-               netif_stop_queue(netdev);
                hw->hibernate = true;
                if (atl1c_reset_mac(hw) != 0)
                        if (netif_msg_hw(adapter))
index 46b8b7d81633eaa95fb4755a2225494a8b93856a..d09c6b583d17b2570dffdcabc0a4766722af9bf7 100644 (file)
@@ -656,7 +656,7 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
                        dma_unmap_single(bp->sdev->dma_dev, mapping,
                                             RX_PKT_BUF_SZ, DMA_FROM_DEVICE);
                dev_kfree_skb_any(skb);
-               skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA);
+               skb = alloc_skb(RX_PKT_BUF_SZ, GFP_ATOMIC | GFP_DMA);
                if (skb == NULL)
                        return -ENOMEM;
                mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
@@ -967,7 +967,7 @@ static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
                        dma_unmap_single(bp->sdev->dma_dev, mapping, len,
                                             DMA_TO_DEVICE);
 
-               bounce_skb = __netdev_alloc_skb(dev, len, GFP_ATOMIC | GFP_DMA);
+               bounce_skb = alloc_skb(len, GFP_ATOMIC | GFP_DMA);
                if (!bounce_skb)
                        goto err_out;
 
index ac7b74488531be6f59ee885c23386b01eabd60f5..1fa4927a45b1dadbd6da8fa764792c57c3c67cf0 100644 (file)
@@ -5372,7 +5372,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
                        int k, last;
 
                        if (skb == NULL) {
-                               j++;
+                               j = NEXT_TX_BD(j);
                                continue;
                        }
 
@@ -5384,8 +5384,8 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
                        tx_buf->skb = NULL;
 
                        last = tx_buf->nr_frags;
-                       j++;
-                       for (k = 0; k < last; k++, j++) {
+                       j = NEXT_TX_BD(j);
+                       for (k = 0; k < last; k++, j = NEXT_TX_BD(j)) {
                                tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
                                dma_unmap_page(&bp->pdev->dev,
                                        dma_unmap_addr(tx_buf, mapping),
index e30e2a2f354c8fc30f2c59750c5b1ce63fa66676..7de824184979788b2f740b6e824c271c60ff45d9 100644 (file)
@@ -747,21 +747,6 @@ struct bnx2x_fastpath {
 
 #define ETH_RX_ERROR_FALGS             ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG
 
-#define BNX2X_IP_CSUM_ERR(cqe) \
-                       (!((cqe)->fast_path_cqe.status_flags & \
-                          ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG) && \
-                        ((cqe)->fast_path_cqe.type_error_flags & \
-                         ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG))
-
-#define BNX2X_L4_CSUM_ERR(cqe) \
-                       (!((cqe)->fast_path_cqe.status_flags & \
-                          ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG) && \
-                        ((cqe)->fast_path_cqe.type_error_flags & \
-                         ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
-
-#define BNX2X_RX_CSUM_OK(cqe) \
-                       (!(BNX2X_L4_CSUM_ERR(cqe) || BNX2X_IP_CSUM_ERR(cqe)))
-
 #define BNX2X_PRS_FLAG_OVERETH_IPV4(flags) \
                                (((le16_to_cpu(flags) & \
                                   PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) >> \
index ad0743bf4bdece7ac3cbc17e98f881e92530b22f..8098eea9704df6ffea4b904ab173ce88ef6f1016 100644 (file)
@@ -190,7 +190,7 @@ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
 
                if ((netif_tx_queue_stopped(txq)) &&
                    (bp->state == BNX2X_STATE_OPEN) &&
-                   (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3))
+                   (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 4))
                        netif_tx_wake_queue(txq);
 
                __netif_tx_unlock(txq);
@@ -617,6 +617,25 @@ static int bnx2x_alloc_rx_data(struct bnx2x *bp,
        return 0;
 }
 
+static void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
+                               struct bnx2x_fastpath *fp)
+{
+       /* Do nothing if no IP/L4 csum validation was done */
+
+       if (cqe->fast_path_cqe.status_flags &
+           (ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG |
+            ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG))
+               return;
+
+       /* If both IP/L4 validation were done, check if an error was found. */
+
+       if (cqe->fast_path_cqe.type_error_flags &
+           (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
+            ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
+               fp->eth_q_stats.hw_csum_err++;
+       else
+               skb->ip_summed = CHECKSUM_UNNECESSARY;
+}
 
 int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
 {
@@ -806,13 +825,9 @@ reuse_rx:
 
                skb_checksum_none_assert(skb);
 
-               if (bp->dev->features & NETIF_F_RXCSUM) {
+               if (bp->dev->features & NETIF_F_RXCSUM)
+                       bnx2x_csum_validate(skb, cqe, fp);
 
-                       if (likely(BNX2X_RX_CSUM_OK(cqe)))
-                               skb->ip_summed = CHECKSUM_UNNECESSARY;
-                       else
-                               fp->eth_q_stats.hw_csum_err++;
-               }
 
                skb_record_rx_queue(skb, fp->rx_queue);
 
@@ -2501,8 +2516,6 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
 /* we split the first BD into headers and data BDs
  * to ease the pain of our fellow microcode engineers
  * we use one mapping for both BDs
- * So far this has only been observed to happen
- * in Other Operating Systems(TM)
  */
 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
                                   struct bnx2x_fp_txdata *txdata,
@@ -3156,7 +3169,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
        txdata->tx_bd_prod += nbd;
 
-       if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 3)) {
+       if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 4)) {
                netif_tx_stop_queue(txq);
 
                /* paired memory barrier is in bnx2x_tx_int(), we have to keep
@@ -3165,7 +3178,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
                smp_mb();
 
                fp->eth_q_stats.driver_xoff++;
-               if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3)
+               if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 4)
                        netif_tx_wake_queue(txq);
        }
        txdata->tx_pkt++;
index a3fb7215cd8910846606e8b9ddeaee7ba0c28b89..6e7d5c0843b4c4bd825b0cd505b489011fd748bd 100644 (file)
@@ -40,6 +40,7 @@
 #define I2C_BSC0                       0
 #define I2C_BSC1                       1
 #define I2C_WA_RETRY_CNT               3
+#define I2C_WA_PWR_ITER                        (I2C_WA_RETRY_CNT - 1)
 #define MCPR_IMC_COMMAND_READ_OP       1
 #define MCPR_IMC_COMMAND_WRITE_OP      2
 
@@ -7659,6 +7660,28 @@ static int bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
        return -EINVAL;
 }
 
+static void bnx2x_warpcore_power_module(struct link_params *params,
+                                       struct bnx2x_phy *phy,
+                                       u8 power)
+{
+       u32 pin_cfg;
+       struct bnx2x *bp = params->bp;
+
+       pin_cfg = (REG_RD(bp, params->shmem_base +
+                         offsetof(struct shmem_region,
+                       dev_info.port_hw_config[params->port].e3_sfp_ctrl)) &
+                       PORT_HW_CFG_E3_PWR_DIS_MASK) >>
+                       PORT_HW_CFG_E3_PWR_DIS_SHIFT;
+
+       if (pin_cfg == PIN_CFG_NA)
+               return;
+       DP(NETIF_MSG_LINK, "Setting SFP+ module power to %d using pin cfg %d\n",
+                      power, pin_cfg);
+       /* Low ==> corresponding SFP+ module is powered
+        * high ==> the SFP+ module is powered down
+        */
+       bnx2x_set_cfg_pin(bp, pin_cfg, power ^ 1);
+}
 static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy,
                                                 struct link_params *params,
                                                 u16 addr, u8 byte_cnt,
@@ -7678,6 +7701,12 @@ static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy,
        /* 4 byte aligned address */
        addr32 = addr & (~0x3);
        do {
+               if (cnt == I2C_WA_PWR_ITER) {
+                       bnx2x_warpcore_power_module(params, phy, 0);
+                       /* Note that 100us are not enough here */
+                       usleep_range(1000,1000);
+                       bnx2x_warpcore_power_module(params, phy, 1);
+               }
                rc = bnx2x_bsc_read(params, phy, 0xa0, addr32, 0, byte_cnt,
                                    data_array);
        } while ((rc != 0) && (++cnt < I2C_WA_RETRY_CNT));
@@ -8200,29 +8229,6 @@ static void bnx2x_set_sfp_module_fault_led(struct link_params *params,
                bnx2x_set_e1e2_module_fault_led(params, gpio_mode);
 }
 
-static void bnx2x_warpcore_power_module(struct link_params *params,
-                                       struct bnx2x_phy *phy,
-                                       u8 power)
-{
-       u32 pin_cfg;
-       struct bnx2x *bp = params->bp;
-
-       pin_cfg = (REG_RD(bp, params->shmem_base +
-                         offsetof(struct shmem_region,
-                       dev_info.port_hw_config[params->port].e3_sfp_ctrl)) &
-                       PORT_HW_CFG_E3_PWR_DIS_MASK) >>
-                       PORT_HW_CFG_E3_PWR_DIS_SHIFT;
-
-       if (pin_cfg == PIN_CFG_NA)
-               return;
-       DP(NETIF_MSG_LINK, "Setting SFP+ module power to %d using pin cfg %d\n",
-                      power, pin_cfg);
-       /* Low ==> corresponding SFP+ module is powered
-        * high ==> the SFP+ module is powered down
-        */
-       bnx2x_set_cfg_pin(bp, pin_cfg, power ^ 1);
-}
-
 static void bnx2x_warpcore_hw_reset(struct bnx2x_phy *phy,
                                    struct link_params *params)
 {
@@ -9748,7 +9754,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
 
        msleep(1);
 
-       if (!(CHIP_IS_E1(bp)))
+       if (!(CHIP_IS_E1x(bp)))
                port = BP_PATH(bp);
        else
                port = params->port;
index c95e7b5e2b85589db86db943c3c77b697394312b..2c89d17cbb292cfc3e971dbf180b95728d8e06b7 100644 (file)
@@ -534,7 +534,8 @@ int cnic_unregister_driver(int ulp_type)
        }
 
        if (atomic_read(&ulp_ops->ref_count) != 0)
-               netdev_warn(dev->netdev, "Failed waiting for ref count to go to zero\n");
+               pr_warn("%s: Failed waiting for ref count to go to zero\n",
+                       __func__);
        return 0;
 
 out_unlock:
@@ -1053,12 +1054,13 @@ static int cnic_init_uio(struct cnic_dev *dev)
 
        uinfo = &udev->cnic_uinfo;
 
-       uinfo->mem[0].addr = dev->netdev->base_addr;
+       uinfo->mem[0].addr = pci_resource_start(dev->pcidev, 0);
        uinfo->mem[0].internal_addr = dev->regview;
-       uinfo->mem[0].size = dev->netdev->mem_end - dev->netdev->mem_start;
        uinfo->mem[0].memtype = UIO_MEM_PHYS;
 
        if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
+               uinfo->mem[0].size = MB_GET_CID_ADDR(TX_TSS_CID +
+                                                    TX_MAX_TSS_RINGS + 1);
                uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen &
                                        PAGE_MASK;
                if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
@@ -1068,6 +1070,8 @@ static int cnic_init_uio(struct cnic_dev *dev)
 
                uinfo->name = "bnx2_cnic";
        } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
+               uinfo->mem[0].size = pci_resource_len(dev->pcidev, 0);
+
                uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk &
                        PAGE_MASK;
                uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk);
index edeeb516807a1399ceb8f8a9846bbd105fcce7c3..e47ff8be1d7b5c27be543c7d41f4584d56054644 100644 (file)
@@ -14275,7 +14275,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
                }
        }
 
-       if (tg3_flag(tp, 5755_PLUS))
+       if (tg3_flag(tp, 5755_PLUS) ||
+           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
                tg3_flag_set(tp, SHORT_DMA_BUG);
 
        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
index 8d06ea381741cca9178534c843838452872d1cf3..921c2082af4cccf58c808601126a63410aab08b4 100644 (file)
@@ -122,15 +122,15 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
                        goto done;
 
                if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
-                       dev_warn(&adapter->pdev->dev, "This domain(VM) is not "
-                               "permitted to execute this cmd (opcode %d)\n",
-                               opcode);
+                       dev_warn(&adapter->pdev->dev,
+                                "opcode %d-%d is not permitted\n",
+                                opcode, subsystem);
                } else {
                        extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
                                        CQE_STATUS_EXTD_MASK;
-                       dev_err(&adapter->pdev->dev, "Cmd (opcode %d) failed:"
-                               "status %d, extd-status %d\n",
-                               opcode, compl_status, extd_status);
+                       dev_err(&adapter->pdev->dev,
+                               "opcode %d-%d failed:status %d-%d\n",
+                               opcode, subsystem, compl_status, extd_status);
                }
        }
 done:
index 9625bf420c161efb92ccc89c16b18b1fa4c90c68..b3f3fc3d132374207aa66fa6844d91374371c05b 100644 (file)
@@ -1566,7 +1566,7 @@ struct be_hw_stats_v1 {
        u32 rsvd0[BE_TXP_SW_SZ];
        struct be_erx_stats_v1 erx;
        struct be_pmem_stats pmem;
-       u32 rsvd1[3];
+       u32 rsvd1[18];
 };
 
 struct be_cmd_req_get_stats_v1 {
index 08efd308d78ae40640f73953a41531aabba2c054..501dfa9c88ec4cfad5ea5b642f6e78decbb8f731 100644 (file)
@@ -736,6 +736,8 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
 
        copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
        if (copied) {
+               int gso_segs = skb_shinfo(skb)->gso_segs;
+
                /* record the sent skb in the sent_skb table */
                BUG_ON(txo->sent_skb_list[start]);
                txo->sent_skb_list[start] = skb;
@@ -753,8 +755,7 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
 
                be_txq_notify(adapter, txq->id, wrb_cnt);
 
-               be_tx_stats_update(txo, wrb_cnt, copied,
-                               skb_shinfo(skb)->gso_segs, stopped);
+               be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
        } else {
                txq->head = start;
                dev_kfree_skb_any(skb);
@@ -3236,7 +3237,7 @@ static void be_netdev_init(struct net_device *netdev)
 
        netdev->flags |= IFF_MULTICAST;
 
-       netif_set_gso_max_size(netdev, 65535);
+       netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
 
        netdev->netdev_ops = &be_netdev_ops;
 
index 0741aded9eb057bbc2454a220bd6a19318e975fa..ab1d80ff0791c36f937b0c06c4a52c33df340591 100644 (file)
@@ -1804,18 +1804,16 @@ void gfar_configure_coalescing(struct gfar_private *priv,
        if (priv->mode == MQ_MG_MODE) {
                baddr = &regs->txic0;
                for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
-                       if (likely(priv->tx_queue[i]->txcoalescing)) {
-                               gfar_write(baddr + i, 0);
+                       gfar_write(baddr + i, 0);
+                       if (likely(priv->tx_queue[i]->txcoalescing))
                                gfar_write(baddr + i, priv->tx_queue[i]->txic);
-                       }
                }
 
                baddr = &regs->rxic0;
                for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
-                       if (likely(priv->rx_queue[i]->rxcoalescing)) {
-                               gfar_write(baddr + i, 0);
+                       gfar_write(baddr + i, 0);
+                       if (likely(priv->rx_queue[i]->rxcoalescing))
                                gfar_write(baddr + i, priv->rx_queue[i]->rxic);
-                       }
                }
        }
 }
@@ -2065,10 +2063,9 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
                        return NETDEV_TX_OK;
                }
 
-               /* Steal sock reference for processing TX time stamps */
-               swap(skb_new->sk, skb->sk);
-               swap(skb_new->destructor, skb->destructor);
-               kfree_skb(skb);
+               if (skb->sk)
+                       skb_set_owner_w(skb_new, skb->sk);
+               consume_skb(skb);
                skb = skb_new;
        }
 
index 79b07ec6726f93f8eb59f07ef2c97d11b828ce95..0cafe4fe9406125af6375cd8859b1919ab0d62d4 100644 (file)
@@ -122,8 +122,10 @@ config IGB_DCA
 
 config IGB_PTP
        bool "PTP Hardware Clock (PHC)"
-       default y
-       depends on IGB && PTP_1588_CLOCK
+       default n
+       depends on IGB && EXPERIMENTAL
+       select PPS
+       select PTP_1588_CLOCK
        ---help---
          Say Y here if you want to use PTP Hardware Clock (PHC) in the
          driver.  Only the basic clock operations have been implemented.
@@ -223,7 +225,9 @@ config IXGBE_DCB
 config IXGBE_PTP
        bool "PTP Clock Support"
        default n
-       depends on IXGBE && PTP_1588_CLOCK
+       depends on IXGBE && EXPERIMENTAL
+       select PPS
+       select PTP_1588_CLOCK
        ---help---
          Say Y here if you want support for 1588 Timestamping with a
          PHC device, using the PTP 1588 Clock support. This is
index 36db4df09aed6531cf66d893b40449a1b0cceb2c..1f063dcd8f85e39c15da5d82c935113af8044ffb 100644 (file)
@@ -1572,6 +1572,9 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
        ctrl = er32(CTRL);
        status = er32(STATUS);
        rxcw = er32(RXCW);
+       /* SYNCH bit and IV bit are sticky */
+       udelay(10);
+       rxcw = er32(RXCW);
 
        if ((rxcw & E1000_RXCW_SYNCH) && !(rxcw & E1000_RXCW_IV)) {
 
index 351a4097b2baec09c53ce45cace4c03c1c8dcb47..76edbc1be33b4d1151e72c24c1f621c830c3a039 100644 (file)
 #define E1000_RXD_ERR_SEQ       0x04    /* Sequence Error */
 #define E1000_RXD_ERR_CXE       0x10    /* Carrier Extension Error */
 #define E1000_RXD_ERR_TCPE      0x20    /* TCP/UDP Checksum Error */
+#define E1000_RXD_ERR_IPE       0x40    /* IP Checksum Error */
 #define E1000_RXD_ERR_RXE       0x80    /* Rx Data Error */
 #define E1000_RXD_SPC_VLAN_MASK 0x0FFF  /* VLAN ID is in lower 12 bits */
 
index d863075df7a407cc59e194feebd652c622729384..905e2147d9182f4c8d9b26de66820fdc130debf0 100644 (file)
@@ -258,7 +258,8 @@ static int e1000_set_settings(struct net_device *netdev,
         * When SoL/IDER sessions are active, autoneg/speed/duplex
         * cannot be changed
         */
-       if (hw->phy.ops.check_reset_block(hw)) {
+       if (hw->phy.ops.check_reset_block &&
+           hw->phy.ops.check_reset_block(hw)) {
                e_err("Cannot change link characteristics when SoL/IDER is active.\n");
                return -EINVAL;
        }
@@ -1615,7 +1616,8 @@ static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data)
         * PHY loopback cannot be performed if SoL/IDER
         * sessions are active
         */
-       if (hw->phy.ops.check_reset_block(hw)) {
+       if (hw->phy.ops.check_reset_block &&
+           hw->phy.ops.check_reset_block(hw)) {
                e_err("Cannot do PHY loopback test when SoL/IDER is active.\n");
                *data = 0;
                goto out;
index 238ab2f8a5e7ceff37869043c1ac83d3e41d3b26..e3a7b07df6294781559a72537d7949b711e82d00 100644 (file)
@@ -325,24 +325,46 @@ static inline void __ew32flash(struct e1000_hw *hw, unsigned long reg, u32 val)
  **/
 static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
 {
-       u16 phy_reg;
-       u32 phy_id;
+       u16 phy_reg = 0;
+       u32 phy_id = 0;
+       s32 ret_val;
+       u16 retry_count;
+
+       for (retry_count = 0; retry_count < 2; retry_count++) {
+               ret_val = e1e_rphy_locked(hw, PHY_ID1, &phy_reg);
+               if (ret_val || (phy_reg == 0xFFFF))
+                       continue;
+               phy_id = (u32)(phy_reg << 16);
 
-       e1e_rphy_locked(hw, PHY_ID1, &phy_reg);
-       phy_id = (u32)(phy_reg << 16);
-       e1e_rphy_locked(hw, PHY_ID2, &phy_reg);
-       phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
+               ret_val = e1e_rphy_locked(hw, PHY_ID2, &phy_reg);
+               if (ret_val || (phy_reg == 0xFFFF)) {
+                       phy_id = 0;
+                       continue;
+               }
+               phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
+               break;
+       }
 
        if (hw->phy.id) {
                if (hw->phy.id == phy_id)
                        return true;
-       } else {
-               if ((phy_id != 0) && (phy_id != PHY_REVISION_MASK))
-                       hw->phy.id = phy_id;
+       } else if (phy_id) {
+               hw->phy.id = phy_id;
+               hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK);
                return true;
        }
 
-       return false;
+       /*
+        * In case the PHY needs to be in mdio slow mode,
+        * set slow mode and try to get the PHY id again.
+        */
+       hw->phy.ops.release(hw);
+       ret_val = e1000_set_mdio_slow_mode_hv(hw);
+       if (!ret_val)
+               ret_val = e1000e_get_phy_id(hw);
+       hw->phy.ops.acquire(hw);
+
+       return !ret_val;
 }
 
 /**
index 026e8b3ab52eee6be5ecadf5ee0de2745eae5faf..a13439928488c7aeab640d0a99f0833a5e74f203 100644 (file)
@@ -709,7 +709,7 @@ s32 e1000e_setup_link_generic(struct e1000_hw *hw)
         * In the case of the phy reset being blocked, we already have a link.
         * We do not need to set it up again.
         */
-       if (hw->phy.ops.check_reset_block(hw))
+       if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw))
                return 0;
 
        /*
index a4b0435b00dc83078776346d9a510923b1cc20ac..623e30b9964de29e091f77564ccd53ce8ba30876 100644 (file)
@@ -496,7 +496,7 @@ static void e1000_receive_skb(struct e1000_adapter *adapter,
  * @sk_buff: socket buffer with received data
  **/
 static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
-                             __le16 csum, struct sk_buff *skb)
+                             struct sk_buff *skb)
 {
        u16 status = (u16)status_err;
        u8 errors = (u8)(status_err >> 24);
@@ -511,8 +511,8 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
        if (status & E1000_RXD_STAT_IXSM)
                return;
 
-       /* TCP/UDP checksum error bit is set */
-       if (errors & E1000_RXD_ERR_TCPE) {
+       /* TCP/UDP checksum error bit or IP checksum error bit is set */
+       if (errors & (E1000_RXD_ERR_TCPE | E1000_RXD_ERR_IPE)) {
                /* let the stack verify checksum errors */
                adapter->hw_csum_err++;
                return;
@@ -523,19 +523,7 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
                return;
 
        /* It must be a TCP or UDP packet with a valid checksum */
-       if (status & E1000_RXD_STAT_TCPCS) {
-               /* TCP checksum is good */
-               skb->ip_summed = CHECKSUM_UNNECESSARY;
-       } else {
-               /*
-                * IP fragment with UDP payload
-                * Hardware complements the payload checksum, so we undo it
-                * and then put the value in host order for further stack use.
-                */
-               __sum16 sum = (__force __sum16)swab16((__force u16)csum);
-               skb->csum = csum_unfold(~sum);
-               skb->ip_summed = CHECKSUM_COMPLETE;
-       }
+       skb->ip_summed = CHECKSUM_UNNECESSARY;
        adapter->hw_csum_good++;
 }
 
@@ -954,8 +942,7 @@ static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done,
                skb_put(skb, length);
 
                /* Receive Checksum Offload */
-               e1000_rx_checksum(adapter, staterr,
-                                 rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
+               e1000_rx_checksum(adapter, staterr, skb);
 
                e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
 
@@ -1341,8 +1328,7 @@ copydone:
                total_rx_bytes += skb->len;
                total_rx_packets++;
 
-               e1000_rx_checksum(adapter, staterr,
-                                 rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
+               e1000_rx_checksum(adapter, staterr, skb);
 
                e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
 
@@ -1512,9 +1498,8 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
                        }
                }
 
-               /* Receive Checksum Offload XXX recompute due to CRC strip? */
-               e1000_rx_checksum(adapter, staterr,
-                                 rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
+               /* Receive Checksum Offload */
+               e1000_rx_checksum(adapter, staterr, skb);
 
                e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
 
@@ -3098,19 +3083,10 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
 
        /* Enable Receive Checksum Offload for TCP and UDP */
        rxcsum = er32(RXCSUM);
-       if (adapter->netdev->features & NETIF_F_RXCSUM) {
+       if (adapter->netdev->features & NETIF_F_RXCSUM)
                rxcsum |= E1000_RXCSUM_TUOFL;
-
-               /*
-                * IPv4 payload checksum for UDP fragments must be
-                * used in conjunction with packet-split.
-                */
-               if (adapter->rx_ps_pages)
-                       rxcsum |= E1000_RXCSUM_IPPCSE;
-       } else {
+       else
                rxcsum &= ~E1000_RXCSUM_TUOFL;
-               /* no need to clear IPPCSE as it defaults to 0 */
-       }
        ew32(RXCSUM, rxcsum);
 
        if (adapter->hw.mac.type == e1000_pch2lan) {
@@ -5241,22 +5217,10 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
        int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
 
        /* Jumbo frame support */
-       if (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) {
-               if (!(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
-                       e_err("Jumbo Frames not supported.\n");
-                       return -EINVAL;
-               }
-
-               /*
-                * IP payload checksum (enabled with jumbos/packet-split when
-                * Rx checksum is enabled) and generation of RSS hash is
-                * mutually exclusive in the hardware.
-                */
-               if ((netdev->features & NETIF_F_RXCSUM) &&
-                   (netdev->features & NETIF_F_RXHASH)) {
-                       e_err("Jumbo frames cannot be enabled when both receive checksum offload and receive hashing are enabled.  Disable one of the receive offload features before enabling jumbos.\n");
-                       return -EINVAL;
-               }
+       if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) &&
+           !(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
+               e_err("Jumbo Frames not supported.\n");
+               return -EINVAL;
        }
 
        /* Supported frame sizes */
@@ -6030,17 +5994,6 @@ static int e1000_set_features(struct net_device *netdev,
                         NETIF_F_RXALL)))
                return 0;
 
-       /*
-        * IP payload checksum (enabled with jumbos/packet-split when Rx
-        * checksum is enabled) and generation of RSS hash is mutually
-        * exclusive in the hardware.
-        */
-       if (adapter->rx_ps_pages &&
-           (features & NETIF_F_RXCSUM) && (features & NETIF_F_RXHASH)) {
-               e_err("Enabling both receive checksum offload and receive hashing is not possible with jumbo frames.  Disable jumbos or enable only one of the receive offload features.\n");
-               return -EINVAL;
-       }
-
        if (changed & NETIF_F_RXFCS) {
                if (features & NETIF_F_RXFCS) {
                        adapter->flags2 &= ~FLAG2_CRC_STRIPPING;
@@ -6237,7 +6190,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
                adapter->hw.phy.ms_type = e1000_ms_hw_default;
        }
 
-       if (hw->phy.ops.check_reset_block(hw))
+       if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw))
                e_info("PHY reset is blocked due to SOL/IDER session.\n");
 
        /* Set initial default active device features */
@@ -6404,7 +6357,7 @@ err_register:
        if (!(adapter->flags & FLAG_HAS_AMT))
                e1000e_release_hw_control(adapter);
 err_eeprom:
-       if (!hw->phy.ops.check_reset_block(hw))
+       if (hw->phy.ops.check_reset_block && !hw->phy.ops.check_reset_block(hw))
                e1000_phy_hw_reset(&adapter->hw);
 err_hw_init:
        kfree(adapter->tx_ring);
index 0334d013bc3c828fc2256ae117287809f46a9d2c..b860d4f7ea2a950a7b24d0db8ca6f15446f1bfd3 100644 (file)
@@ -2155,9 +2155,11 @@ s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw)
        s32 ret_val;
        u32 ctrl;
 
-       ret_val = phy->ops.check_reset_block(hw);
-       if (ret_val)
-               return 0;
+       if (phy->ops.check_reset_block) {
+               ret_val = phy->ops.check_reset_block(hw);
+               if (ret_val)
+                       return 0;
+       }
 
        ret_val = phy->ops.acquire(hw);
        if (ret_val)
index e65083958421bf06bdd7d45a51b1f53128de2dec..5e84eaac48c191727d9ac733c80ffbb13ce1473f 100644 (file)
@@ -206,8 +206,6 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
                mac->rar_entry_count = E1000_RAR_ENTRIES_82580;
                break;
        case e1000_i350:
-       case e1000_i210:
-       case e1000_i211:
                mac->rar_entry_count = E1000_RAR_ENTRIES_I350;
                break;
        default:
index 8ce67064b9c5802efe098f702486477b424c273e..90eef07943f4d50bbd027646e170abca5abfb1a3 100644 (file)
@@ -357,21 +357,28 @@ static int igbvf_set_coalesce(struct net_device *netdev,
        struct igbvf_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
 
-       if ((ec->rx_coalesce_usecs > IGBVF_MAX_ITR_USECS) ||
-           ((ec->rx_coalesce_usecs > 3) &&
-            (ec->rx_coalesce_usecs < IGBVF_MIN_ITR_USECS)) ||
-           (ec->rx_coalesce_usecs == 2))
-               return -EINVAL;
-
-       /* convert to rate of irq's per second */
-       if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) {
+       if ((ec->rx_coalesce_usecs >= IGBVF_MIN_ITR_USECS) &&
+            (ec->rx_coalesce_usecs <= IGBVF_MAX_ITR_USECS)) {
+               adapter->current_itr = ec->rx_coalesce_usecs << 2;
+               adapter->requested_itr = 1000000000 /
+                                       (adapter->current_itr * 256);
+       } else if ((ec->rx_coalesce_usecs == 3) ||
+                  (ec->rx_coalesce_usecs == 2)) {
                adapter->current_itr = IGBVF_START_ITR;
                adapter->requested_itr = ec->rx_coalesce_usecs;
-       } else {
-               adapter->current_itr = ec->rx_coalesce_usecs << 2;
+       } else if (ec->rx_coalesce_usecs == 0) {
+               /*
+                * The user's desire is to turn off interrupt throttling
+                * altogether, but due to HW limitations, we can't do that.
+                * Instead we set a very small value in EITR, which would
+                * allow ~967k interrupts per second, but allow the adapter's
+                * internal clocking to still function properly.
+                */
+               adapter->current_itr = 4;
                adapter->requested_itr = 1000000000 /
                                        (adapter->current_itr * 256);
-       }
+       } else
+               return -EINVAL;
 
        writel(adapter->current_itr,
               hw->hw_addr + adapter->rx_ring->itr_register);
index 3ef3c5284e522af9f797aada6e721486cce5190a..7af291e236bf91b7317fd02ddac47ee6c7cf0bcf 100644 (file)
@@ -196,7 +196,7 @@ enum ixgbe_ring_state_t {
        __IXGBE_HANG_CHECK_ARMED,
        __IXGBE_RX_RSC_ENABLED,
        __IXGBE_RX_CSUM_UDP_ZERO_ERR,
-       __IXGBE_RX_FCOE_BUFSZ,
+       __IXGBE_RX_FCOE,
 };
 
 #define check_for_tx_hang(ring) \
@@ -290,7 +290,7 @@ struct ixgbe_ring_feature {
 #if defined(IXGBE_FCOE) && (PAGE_SIZE < 8192)
 static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring)
 {
-       return test_bit(__IXGBE_RX_FCOE_BUFSZ, &ring->state) ? 1 : 0;
+       return test_bit(__IXGBE_RX_FCOE, &ring->state) ? 1 : 0;
 }
 #else
 #define ixgbe_rx_pg_order(_ring) 0
index af1a5314b494f90d362d647ad5921d708a5fa87f..c377706e81a8f75d89f800c0b527109e4d66f335 100644 (file)
@@ -634,7 +634,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
                        f = &adapter->ring_feature[RING_F_FCOE];
                        if ((rxr_idx >= f->mask) &&
                            (rxr_idx < f->mask + f->indices))
-                               set_bit(__IXGBE_RX_FCOE_BUFSZ, &ring->state);
+                               set_bit(__IXGBE_RX_FCOE, &ring->state);
                }
 
 #endif /* IXGBE_FCOE */
index bf20457ea23aba4a249837aca419d4250a079fcf..e242104ab471a23ed6203dc4a4e4cda82edf44e7 100644 (file)
@@ -1058,17 +1058,17 @@ static inline void ixgbe_rx_hash(struct ixgbe_ring *ring,
 #ifdef IXGBE_FCOE
 /**
  * ixgbe_rx_is_fcoe - check the rx desc for incoming pkt type
- * @adapter: address of board private structure
+ * @ring: structure containing ring specific data
  * @rx_desc: advanced rx descriptor
  *
  * Returns : true if it is FCoE pkt
  */
-static inline bool ixgbe_rx_is_fcoe(struct ixgbe_adapter *adapter,
+static inline bool ixgbe_rx_is_fcoe(struct ixgbe_ring *ring,
                                    union ixgbe_adv_rx_desc *rx_desc)
 {
        __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
 
-       return (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
+       return test_bit(__IXGBE_RX_FCOE, &ring->state) &&
               ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_ETQF_MASK)) ==
                (cpu_to_le16(IXGBE_ETQF_FILTER_FCOE <<
                             IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT)));
@@ -1148,7 +1148,7 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
 
        /* alloc new page for storage */
        if (likely(!page)) {
-               page = alloc_pages(GFP_ATOMIC | __GFP_COLD,
+               page = alloc_pages(GFP_ATOMIC | __GFP_COLD | __GFP_COMP,
                                   ixgbe_rx_pg_order(rx_ring));
                if (unlikely(!page)) {
                        rx_ring->rx_stats.alloc_rx_page_failed++;
@@ -1390,6 +1390,8 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
                                     union ixgbe_adv_rx_desc *rx_desc,
                                     struct sk_buff *skb)
 {
+       struct net_device *dev = rx_ring->netdev;
+
        ixgbe_update_rsc_stats(rx_ring, skb);
 
        ixgbe_rx_hash(rx_ring, rx_desc, skb);
@@ -1401,14 +1403,15 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
                ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, skb);
 #endif
 
-       if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
+       if ((dev->features & NETIF_F_HW_VLAN_RX) &&
+           ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
                u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
                __vlan_hwaccel_put_tag(skb, vid);
        }
 
        skb_record_rx_queue(skb, rx_ring->queue_index);
 
-       skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+       skb->protocol = eth_type_trans(skb, dev);
 }
 
 static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
@@ -1546,6 +1549,12 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
                skb->truesize -= ixgbe_rx_bufsz(rx_ring);
        }
 
+#ifdef IXGBE_FCOE
+       /* do not attempt to pad FCoE Frames as this will disrupt DDP */
+       if (ixgbe_rx_is_fcoe(rx_ring, rx_desc))
+               return false;
+
+#endif
        /* if skb_pad returns an error the skb was freed */
        if (unlikely(skb->len < 60)) {
                int pad_len = 60 - skb->len;
@@ -1772,7 +1781,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
 
 #ifdef IXGBE_FCOE
                /* if ddp, not passing to ULD unless for FCP_RSP or error */
-               if (ixgbe_rx_is_fcoe(adapter, rx_desc)) {
+               if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) {
                        ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb);
                        if (!ddp_bytes) {
                                dev_kfree_skb_any(skb);
@@ -3607,10 +3616,6 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
        if (hw->mac.type == ixgbe_mac_82598EB)
                netif_set_gso_max_size(adapter->netdev, 32768);
 
-
-       /* Enable VLAN tag insert/strip */
-       adapter->netdev->features |= NETIF_F_HW_VLAN_RX;
-
        hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true);
 
 #ifdef IXGBE_FCOE
@@ -6642,6 +6647,11 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
                return -EINVAL;
        }
 
+       if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
+               e_err(drv, "Enable failed, SR-IOV enabled\n");
+               return -EINVAL;
+       }
+
        /* Hardware supports up to 8 traffic classes */
        if (tc > adapter->dcb_cfg.num_tcs.pg_tcs ||
            (hw->mac.type == ixgbe_mac_82598EB &&
@@ -6701,11 +6711,6 @@ static netdev_features_t ixgbe_fix_features(struct net_device *netdev,
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
-#ifdef CONFIG_DCB
-       if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
-               features &= ~NETIF_F_HW_VLAN_RX;
-#endif
-
        /* return error if RXHASH is being enabled when RSS is not supported */
        if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
                features &= ~NETIF_F_RXHASH;
@@ -6718,7 +6723,6 @@ static netdev_features_t ixgbe_fix_features(struct net_device *netdev,
        if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE))
                features &= ~NETIF_F_LRO;
 
-
        return features;
 }
 
@@ -6766,6 +6770,11 @@ static int ixgbe_set_features(struct net_device *netdev,
                need_reset = true;
        }
 
+       if (features & NETIF_F_HW_VLAN_RX)
+               ixgbe_vlan_strip_enable(adapter);
+       else
+               ixgbe_vlan_strip_disable(adapter);
+
        if (changed & NETIF_F_RXALL)
                need_reset = true;
 
index ddc6a4d193028694f30c9835e5df76df09a91366..dcebd128becf96976c95797dfd2c32a6aeade5e9 100644 (file)
@@ -708,6 +708,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
 {
        struct ixgbe_hw *hw = &adapter->hw;
        u32 incval = 0;
+       u32 timinca = 0;
        u32 shift = 0;
        u32 cycle_speed;
        unsigned long flags;
@@ -730,8 +731,16 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
                break;
        }
 
-       /* Bail if the cycle speed didn't change */
-       if (adapter->cycle_speed == cycle_speed)
+       /*
+        * grab the current TIMINCA value from the register so that it can be
+        * double checked. If the register value has been cleared, it must be
+        * reset to the correct value for generating a cyclecounter. If
+        * TIMINCA is zero, the SYSTIME registers do not increment at all.
+        */
+       timinca = IXGBE_READ_REG(hw, IXGBE_TIMINCA);
+
+       /* Bail if the cycle speed didn't change and TIMINCA is non-zero */
+       if (adapter->cycle_speed == cycle_speed && timinca)
                return;
 
        /* disable the SDP clock out */
index f69ec4288b104e6c175b54b17fe4ceecc363fba1..41e32257a4e873e37d4daf24798400c394416b69 100644 (file)
@@ -201,6 +201,9 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter,
        unsigned int i, eop, count = 0;
        unsigned int total_bytes = 0, total_packets = 0;
 
+       if (test_bit(__IXGBEVF_DOWN, &adapter->state))
+               return true;
+
        i = tx_ring->next_to_clean;
        eop = tx_ring->tx_buffer_info[i].next_to_watch;
        eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
@@ -969,8 +972,6 @@ static irqreturn_t ixgbevf_msix_clean_tx(int irq, void *data)
        r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
        for (i = 0; i < q_vector->txr_count; i++) {
                tx_ring = &(adapter->tx_ring[r_idx]);
-               tx_ring->total_bytes = 0;
-               tx_ring->total_packets = 0;
                ixgbevf_clean_tx_irq(adapter, tx_ring);
                r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
                                      r_idx + 1);
@@ -994,16 +995,6 @@ static irqreturn_t ixgbevf_msix_clean_rx(int irq, void *data)
        struct ixgbe_hw *hw = &adapter->hw;
        struct ixgbevf_ring  *rx_ring;
        int r_idx;
-       int i;
-
-       r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
-       for (i = 0; i < q_vector->rxr_count; i++) {
-               rx_ring = &(adapter->rx_ring[r_idx]);
-               rx_ring->total_bytes = 0;
-               rx_ring->total_packets = 0;
-               r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
-                                     r_idx + 1);
-       }
 
        if (!q_vector->rxr_count)
                return IRQ_HANDLED;
index 04d901d0ff635f284185175bdc3698bc7940038b..f0f06b2bc28b3f951933c19bde6e788adcc7c5bf 100644 (file)
@@ -436,7 +436,9 @@ struct mv643xx_eth_private {
        /*
         * Hardware-specific parameters.
         */
+#if defined(CONFIG_HAVE_CLK)
        struct clk *clk;
+#endif
        unsigned int t_clk;
 };
 
@@ -2895,17 +2897,17 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
        mp->dev = dev;
 
        /*
-        * Get the clk rate, if there is one, otherwise use the default.
+        * Start with a default rate, and if there is a clock, allow
+        * it to override the default.
         */
+       mp->t_clk = 133000000;
+#if defined(CONFIG_HAVE_CLK)
        mp->clk = clk_get(&pdev->dev, (pdev->id ? "1" : "0"));
        if (!IS_ERR(mp->clk)) {
                clk_prepare_enable(mp->clk);
                mp->t_clk = clk_get_rate(mp->clk);
-       } else {
-               mp->t_clk = 133000000;
-               printk(KERN_WARNING "Unable to get clock");
        }
-
+#endif
        set_params(mp, pd);
        netif_set_real_num_tx_queues(dev, mp->txq_count);
        netif_set_real_num_rx_queues(dev, mp->rxq_count);
@@ -2995,10 +2997,13 @@ static int mv643xx_eth_remove(struct platform_device *pdev)
                phy_detach(mp->phy);
        cancel_work_sync(&mp->tx_timeout_task);
 
+#if defined(CONFIG_HAVE_CLK)
        if (!IS_ERR(mp->clk)) {
                clk_disable_unprepare(mp->clk);
                clk_put(mp->clk);
        }
+#endif
+
        free_netdev(mp->dev);
 
        platform_set_drvdata(pdev, NULL);
index cace36f2ab921772417b515182762c54412f13fa..28a54451a3e5060344c91af03cc1d51d557bae59 100644 (file)
@@ -4381,10 +4381,12 @@ static int sky2_set_features(struct net_device *dev, netdev_features_t features)
        struct sky2_port *sky2 = netdev_priv(dev);
        netdev_features_t changed = dev->features ^ features;
 
-       if (changed & NETIF_F_RXCSUM) {
-               bool on = features & NETIF_F_RXCSUM;
-               sky2_write32(sky2->hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR),
-                            on ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
+       if ((changed & NETIF_F_RXCSUM) &&
+           !(sky2->hw->flags & SKY2_HW_NEW_LE)) {
+               sky2_write32(sky2->hw,
+                            Q_ADDR(rxqaddr[sky2->port], Q_CSR),
+                            (features & NETIF_F_RXCSUM)
+                            ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
        }
 
        if (changed & NETIF_F_RXHASH)
index 926d8aac941c67c8e1160c120073b14aaddb778b..073b85b45fc5d8db3f4be778f32c085eea0b7790 100644 (file)
@@ -929,15 +929,20 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv)
                if (priv->rx_cq[i].buf)
                        mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
        }
+
+       if (priv->base_tx_qpn) {
+               mlx4_qp_release_range(priv->mdev->dev, priv->base_tx_qpn, priv->tx_ring_num);
+               priv->base_tx_qpn = 0;
+       }
 }
 
 int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
 {
        struct mlx4_en_port_profile *prof = priv->prof;
        int i;
-       int base_tx_qpn, err;
+       int err;
 
-       err = mlx4_qp_reserve_range(priv->mdev->dev, priv->tx_ring_num, 256, &base_tx_qpn);
+       err = mlx4_qp_reserve_range(priv->mdev->dev, priv->tx_ring_num, 256, &priv->base_tx_qpn);
        if (err) {
                en_err(priv, "failed reserving range for TX rings\n");
                return err;
@@ -949,7 +954,7 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
                                      prof->tx_ring_size, i, TX))
                        goto err;
 
-               if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], base_tx_qpn + i,
+               if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], priv->base_tx_qpn + i,
                                           prof->tx_ring_size, TXBB_SIZE))
                        goto err;
        }
@@ -969,7 +974,6 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
 
 err:
        en_err(priv, "Failed to allocate NIC resources\n");
-       mlx4_qp_release_range(priv->mdev->dev, base_tx_qpn, priv->tx_ring_num);
        return -ENOMEM;
 }
 
@@ -1204,9 +1208,11 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
        en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
 
        /* Configure port */
+       mlx4_en_calc_rx_buf(dev);
        err = mlx4_SET_PORT_general(mdev->dev, priv->port,
-                                   MLX4_EN_MIN_MTU,
-                                   0, 0, 0, 0);
+                                   priv->rx_skb_size + ETH_FCS_LEN,
+                                   prof->tx_pause, prof->tx_ppp,
+                                   prof->rx_pause, prof->rx_ppp);
        if (err) {
                en_err(priv, "Failed setting port general configurations "
                       "for port %d, with error %d\n", priv->port, err);
index ee6f4fe00837ea2ecd2324f076a7bfa60df2d5b7..a0313de122de2338684035d51f29bbc5a8ae4e34 100644 (file)
@@ -1975,6 +1975,8 @@ slave_start:
        if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X) &&
            !mlx4_is_mfunc(dev)) {
                dev->flags &= ~MLX4_FLAG_MSI_X;
+               dev->caps.num_comp_vectors = 1;
+               dev->caps.comp_pool        = 0;
                pci_disable_msix(pdev);
                err = mlx4_setup_hca(dev);
        }
index 6ae350921b1afa460a85e42f8aa498e33881a26b..225c20d47900b488770298ffa34a6defe6d06844 100644 (file)
@@ -495,6 +495,7 @@ struct mlx4_en_priv {
        int vids[128];
        bool wol;
        struct device *ddev;
+       int base_tx_qpn;
 
 #ifdef CONFIG_MLX4_EN_DCB
        struct ieee_ets ets;
index 1fe2c7a8b40c44883fe3eb4d8def444686e64c06..a8fb52992c6403618806d6bcc1744f5c2c22ed0e 100644 (file)
@@ -697,10 +697,10 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
        if (slave != dev->caps.function)
                memset(inbox->buf, 0, 256);
        if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
-               *(u8 *) inbox->buf         = !!reset_qkey_viols << 6;
+               *(u8 *) inbox->buf         |= !!reset_qkey_viols << 6;
                ((__be32 *) inbox->buf)[2] = agg_cap_mask;
        } else {
-               ((u8 *) inbox->buf)[3]     = !!reset_qkey_viols;
+               ((u8 *) inbox->buf)[3]     |= !!reset_qkey_viols;
                ((__be32 *) inbox->buf)[1] = agg_cap_mask;
        }
 
index 8d2666fcffd7eea6e2d913497be0f7ab080439c3..083d6715335cdb76200b905af3ee3ff4f25495f7 100644 (file)
@@ -946,16 +946,16 @@ static void __lpc_handle_xmit(struct net_device *ndev)
                        /* Update stats */
                        ndev->stats.tx_packets++;
                        ndev->stats.tx_bytes += skb->len;
-
-                       /* Free buffer */
-                       dev_kfree_skb_irq(skb);
                }
+               dev_kfree_skb_irq(skb);
 
                txcidx = readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base));
        }
 
-       if (netif_queue_stopped(ndev))
-               netif_wake_queue(ndev);
+       if (pldat->num_used_tx_buffs <= ENET_TX_DESC/2) {
+               if (netif_queue_stopped(ndev))
+                       netif_wake_queue(ndev);
+       }
 }
 
 static int __lpc_handle_recv(struct net_device *ndev, int budget)
@@ -1320,6 +1320,7 @@ static const struct net_device_ops lpc_netdev_ops = {
        .ndo_set_rx_mode        = lpc_eth_set_multicast_list,
        .ndo_do_ioctl           = lpc_eth_ioctl,
        .ndo_set_mac_address    = lpc_set_mac_address,
+       .ndo_change_mtu         = eth_change_mtu,
 };
 
 static int lpc_eth_drv_probe(struct platform_device *pdev)
index 46e77a2c51219223909f40d6fb223c0981d37a26..ad98f4d7919deaa73154810ceedf24949e98beeb 100644 (file)
@@ -479,7 +479,7 @@ qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
 
        for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
                pfn = pci_info[i].id;
-               if (pfn > QLCNIC_MAX_PCI_FUNC) {
+               if (pfn >= QLCNIC_MAX_PCI_FUNC) {
                        ret = QL_STATUS_INVALID_PARAM;
                        goto err_eswitch;
                }
index 9757ce3543a08746150e73cc933bf25537f71b12..d7a04e0911012649f48b85bbf5521ee2dc05ddbe 100644 (file)
@@ -3894,6 +3894,7 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
        case RTL_GIGA_MAC_VER_22:
        case RTL_GIGA_MAC_VER_23:
        case RTL_GIGA_MAC_VER_24:
+       case RTL_GIGA_MAC_VER_34:
                RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
                break;
        default:
@@ -5889,11 +5890,7 @@ static void rtl_slow_event_work(struct rtl8169_private *tp)
        if (status & LinkChg)
                __rtl8169_check_link_status(dev, tp, tp->mmio_addr, true);
 
-       napi_disable(&tp->napi);
-       rtl_irq_disable(tp);
-
-       napi_enable(&tp->napi);
-       napi_schedule(&tp->napi);
+       rtl_irq_enable_all(tp);
 }
 
 static void rtl_task(struct work_struct *work)
index 667169b825263d96402b44ba22784a1696f80c82..79bf09b419715773a33dca0d43161e911d0c818a 100644 (file)
@@ -1011,7 +1011,7 @@ static int sh_eth_txfree(struct net_device *ndev)
 }
 
 /* Packet receive function */
-static int sh_eth_rx(struct net_device *ndev)
+static int sh_eth_rx(struct net_device *ndev, u32 intr_status)
 {
        struct sh_eth_private *mdp = netdev_priv(ndev);
        struct sh_eth_rxdesc *rxdesc;
@@ -1102,9 +1102,11 @@ static int sh_eth_rx(struct net_device *ndev)
        /* Restart Rx engine if stopped. */
        /* If we don't need to check status, don't. -KDU */
        if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) {
-               /* fix the values for the next receiving */
-               mdp->cur_rx = mdp->dirty_rx = (sh_eth_read(ndev, RDFAR) -
-                                              sh_eth_read(ndev, RDLAR)) >> 4;
+               /* fix the values for the next receiving if RDE is set */
+               if (intr_status & EESR_RDE)
+                       mdp->cur_rx = mdp->dirty_rx =
+                               (sh_eth_read(ndev, RDFAR) -
+                                sh_eth_read(ndev, RDLAR)) >> 4;
                sh_eth_write(ndev, EDRRR_R, EDRRR);
        }
 
@@ -1273,7 +1275,7 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
                        EESR_RTSF | /* short frame recv */
                        EESR_PRE  | /* PHY-LSI recv error */
                        EESR_CERF)){ /* recv frame CRC error */
-               sh_eth_rx(ndev);
+               sh_eth_rx(ndev, intr_status);
        }
 
        /* Tx Check */
index 036428348faa3e5b58cd261b9f0ec45e0ad3e4fc..9f448279e12a52ea7965c3500bd910c859afbb6b 100644 (file)
@@ -13,7 +13,7 @@ config STMMAC_ETH
 if STMMAC_ETH
 
 config STMMAC_PLATFORM
-       tristate "STMMAC platform bus support"
+       bool "STMMAC Platform bus support"
        depends on STMMAC_ETH
        default y
        ---help---
@@ -26,7 +26,7 @@ config STMMAC_PLATFORM
          If unsure, say N.
 
 config STMMAC_PCI
-       tristate "STMMAC support on PCI bus (EXPERIMENTAL)"
+       bool "STMMAC PCI bus support (EXPERIMENTAL)"
        depends on STMMAC_ETH && PCI && EXPERIMENTAL
        ---help---
          This is to select the Synopsys DWMAC available on PCI devices,
index fb8377da1687c2166d3ebda44f5a7fa24c6bd785..4b785e10f2ed7d0994d17df944f01907480a767e 100644 (file)
@@ -51,7 +51,7 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
                desc->des3 = desc->des2 + BUF_SIZE_4KiB;
                priv->hw->desc->prepare_tx_desc(desc, 1, bmax,
                                                csum);
-
+               wmb();
                entry = (++priv->cur_tx) % txsize;
                desc = priv->dma_tx + entry;
 
@@ -59,6 +59,7 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
                                            len, DMA_TO_DEVICE);
                desc->des3 = desc->des2 + BUF_SIZE_4KiB;
                priv->hw->desc->prepare_tx_desc(desc, 0, len, csum);
+               wmb();
                priv->hw->desc->set_tx_owner(desc);
                priv->tx_skbuff[entry] = NULL;
        } else {
index 6b5d060ee9def7dd5fb5a8edac7037cc71bd869b..dc20c56efc9d6dcca84d8981d6648af10e5d66d3 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/clk.h>
 #include <linux/stmmac.h>
 #include <linux/phy.h>
+#include <linux/pci.h>
 #include "common.h"
 #ifdef CONFIG_STMMAC_TIMER
 #include "stmmac_timer.h"
@@ -95,7 +96,6 @@ extern int stmmac_mdio_register(struct net_device *ndev);
 extern void stmmac_set_ethtool_ops(struct net_device *netdev);
 extern const struct stmmac_desc_ops enh_desc_ops;
 extern const struct stmmac_desc_ops ndesc_ops;
-
 int stmmac_freeze(struct net_device *ndev);
 int stmmac_restore(struct net_device *ndev);
 int stmmac_resume(struct net_device *ndev);
@@ -109,7 +109,7 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
 static inline int stmmac_clk_enable(struct stmmac_priv *priv)
 {
        if (!IS_ERR(priv->stmmac_clk))
-               return clk_enable(priv->stmmac_clk);
+               return clk_prepare_enable(priv->stmmac_clk);
 
        return 0;
 }
@@ -119,7 +119,7 @@ static inline void stmmac_clk_disable(struct stmmac_priv *priv)
        if (IS_ERR(priv->stmmac_clk))
                return;
 
-       clk_disable(priv->stmmac_clk);
+       clk_disable_unprepare(priv->stmmac_clk);
 }
 static inline int stmmac_clk_get(struct stmmac_priv *priv)
 {
@@ -143,3 +143,60 @@ static inline int stmmac_clk_get(struct stmmac_priv *priv)
        return 0;
 }
 #endif /* CONFIG_HAVE_CLK */
+
+
+#ifdef CONFIG_STMMAC_PLATFORM
+extern struct platform_driver stmmac_pltfr_driver;
+static inline int stmmac_register_platform(void)
+{
+       int err;
+
+       err = platform_driver_register(&stmmac_pltfr_driver);
+       if (err)
+               pr_err("stmmac: failed to register the platform driver\n");
+
+       return err;
+}
+static inline void stmmac_unregister_platform(void)
+{
+       platform_driver_register(&stmmac_pltfr_driver);
+}
+#else
+static inline int stmmac_register_platform(void)
+{
+       pr_debug("stmmac: do not register the platf driver\n");
+
+       return -EINVAL;
+}
+static inline void stmmac_unregister_platform(void)
+{
+}
+#endif /* CONFIG_STMMAC_PLATFORM */
+
+#ifdef CONFIG_STMMAC_PCI
+extern struct pci_driver stmmac_pci_driver;
+static inline int stmmac_register_pci(void)
+{
+       int err;
+
+       err = pci_register_driver(&stmmac_pci_driver);
+       if (err)
+               pr_err("stmmac: failed to register the PCI driver\n");
+
+       return err;
+}
+static inline void stmmac_unregister_pci(void)
+{
+       pci_unregister_driver(&stmmac_pci_driver);
+}
+#else
+static inline int stmmac_register_pci(void)
+{
+       pr_debug("stmmac: do not register the PCI driver\n");
+
+       return -EINVAL;
+}
+static inline void stmmac_unregister_pci(void)
+{
+}
+#endif /* CONFIG_STMMAC_PCI */
index 70966330f44eca825d9456f5cea6723328c5e71b..ea3003edde18671ac13fd887fda9abc5d8a018ce 100644 (file)
@@ -833,8 +833,9 @@ static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv)
 
 /**
  * stmmac_selec_desc_mode
- * @dev : device pointer
- * Description: select the Enhanced/Alternate or Normal descriptors */
+ * @priv : private structure
+ * Description: select the Enhanced/Alternate or Normal descriptors
+ */
 static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
 {
        if (priv->plat->enh_desc) {
@@ -1211,6 +1212,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
                priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion);
                wmb();
                priv->hw->desc->set_tx_owner(desc);
+               wmb();
        }
 
        /* Interrupt on completition only for the latest segment */
@@ -1226,6 +1228,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
 
        /* To avoid raise condition */
        priv->hw->desc->set_tx_owner(first);
+       wmb();
 
        priv->cur_tx++;
 
@@ -1289,6 +1292,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
                }
                wmb();
                priv->hw->desc->set_rx_owner(p + entry);
+               wmb();
        }
 }
 
@@ -1861,6 +1865,8 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
 /**
  * stmmac_dvr_probe
  * @device: device pointer
+ * @plat_dat: platform data pointer
+ * @addr: iobase memory address
  * Description: this is the main probe function used to
  * call the alloc_etherdev, allocate the priv structure.
  */
@@ -2090,6 +2096,34 @@ int stmmac_restore(struct net_device *ndev)
 }
 #endif /* CONFIG_PM */
 
+/* Driver can be configured w/ and w/ both PCI and Platf drivers
+ * depending on the configuration selected.
+ */
+static int __init stmmac_init(void)
+{
+       int err_plt = 0;
+       int err_pci = 0;
+
+       err_plt = stmmac_register_platform();
+       err_pci = stmmac_register_pci();
+
+       if ((err_pci) && (err_plt)) {
+               pr_err("stmmac: driver registration failed\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static void __exit stmmac_exit(void)
+{
+       stmmac_unregister_platform();
+       stmmac_unregister_pci();
+}
+
+module_init(stmmac_init);
+module_exit(stmmac_exit);
+
 #ifndef MODULE
 static int __init stmmac_cmdline_opt(char *str)
 {
index 58fab5303e9cb45daaf4c134900dbb4ea47b60d8..cf826e6b6aa1d21eee5703d1b7edbcaef7b8e1c2 100644 (file)
@@ -179,7 +179,7 @@ static DEFINE_PCI_DEVICE_TABLE(stmmac_id_table) = {
 
 MODULE_DEVICE_TABLE(pci, stmmac_id_table);
 
-static struct pci_driver stmmac_driver = {
+struct pci_driver stmmac_pci_driver = {
        .name = STMMAC_RESOURCE_NAME,
        .id_table = stmmac_id_table,
        .probe = stmmac_pci_probe,
@@ -190,33 +190,6 @@ static struct pci_driver stmmac_driver = {
 #endif
 };
 
-/**
- * stmmac_init_module - Entry point for the driver
- * Description: This function is the entry point for the driver.
- */
-static int __init stmmac_init_module(void)
-{
-       int ret;
-
-       ret = pci_register_driver(&stmmac_driver);
-       if (ret < 0)
-               pr_err("%s: ERROR: driver registration failed\n", __func__);
-
-       return ret;
-}
-
-/**
- * stmmac_cleanup_module - Cleanup routine for the driver
- * Description: This function is the cleanup routine for the driver.
- */
-static void __exit stmmac_cleanup_module(void)
-{
-       pci_unregister_driver(&stmmac_driver);
-}
-
-module_init(stmmac_init_module);
-module_exit(stmmac_cleanup_module);
-
 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PCI driver");
 MODULE_AUTHOR("Rayagond Kokatanur <rayagond.kokatanur@vayavyalabs.com>");
 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
index 3dd8f08038086896121e9e0031b7ea5af32778b1..680d2b8dfe27990852849744426e115b27e41e4f 100644 (file)
@@ -255,7 +255,7 @@ static const struct of_device_id stmmac_dt_ids[] = {
 };
 MODULE_DEVICE_TABLE(of, stmmac_dt_ids);
 
-static struct platform_driver stmmac_driver = {
+struct platform_driver stmmac_pltfr_driver = {
        .probe = stmmac_pltfr_probe,
        .remove = stmmac_pltfr_remove,
        .driver = {
@@ -266,8 +266,6 @@ static struct platform_driver stmmac_driver = {
                   },
 };
 
-module_platform_driver(stmmac_driver);
-
 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PLATFORM driver");
 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
 MODULE_LICENSE("GPL");
index 703c8cce2a2cfae470546a3043fa69dc676ea84f..8c726b7004d32a3ab1c8ba3c677135e82b2e07dc 100644 (file)
@@ -3598,7 +3598,6 @@ static int release_tx_packet(struct niu *np, struct tx_ring_info *rp, int idx)
 static void niu_tx_work(struct niu *np, struct tx_ring_info *rp)
 {
        struct netdev_queue *txq;
-       unsigned int tx_bytes;
        u16 pkt_cnt, tmp;
        int cons, index;
        u64 cs;
@@ -3621,18 +3620,12 @@ static void niu_tx_work(struct niu *np, struct tx_ring_info *rp)
        netif_printk(np, tx_done, KERN_DEBUG, np->dev,
                     "%s() pkt_cnt[%u] cons[%d]\n", __func__, pkt_cnt, cons);
 
-       tx_bytes = 0;
-       tmp = pkt_cnt;
-       while (tmp--) {
-               tx_bytes += rp->tx_buffs[cons].skb->len;
+       while (pkt_cnt--)
                cons = release_tx_packet(np, rp, cons);
-       }
 
        rp->cons = cons;
        smp_mb();
 
-       netdev_tx_completed_queue(txq, pkt_cnt, tx_bytes);
-
 out:
        if (unlikely(netif_tx_queue_stopped(txq) &&
                     (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) {
@@ -4333,7 +4326,6 @@ static void niu_free_channels(struct niu *np)
                        struct tx_ring_info *rp = &np->tx_rings[i];
 
                        niu_free_tx_ring_info(np, rp);
-                       netdev_tx_reset_queue(netdev_get_tx_queue(np->dev, i));
                }
                kfree(np->tx_rings);
                np->tx_rings = NULL;
@@ -6739,8 +6731,6 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
                prod = NEXT_TX(rp, prod);
        }
 
-       netdev_tx_sent_queue(txq, skb->len);
-
        if (prod < rp->prod)
                rp->wrap_bit ^= TX_RING_KICK_WRAP;
        rp->prod = prod;
index d614c374ed9d2ada5b6923e288528b8201a09807..3b5c4571b55e3c922a4b6e5a94a0a4a8adcb2fce 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/kernel.h>
 #include <linux/spinlock.h>
 #include <linux/device.h>
+#include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/err.h>
 #include <linux/dma-mapping.h>
index 2d9218f86bca7fdbb71e61515efc0974b28fa6ad..098b1c42b39368faef868e50fdbb3174a6ccf8d9 100644 (file)
@@ -7,6 +7,8 @@ config TILE_NET
        depends on TILE
        default y
        select CRC32
+       select TILE_GXIO_MPIPE if TILEGX
+       select HIGH_RES_TIMERS if TILEGX
        ---help---
          This is a standard Linux network device driver for the
          on-chip Tilera Gigabit Ethernet and XAUI interfaces.
index f634f142cab417b48e846de6a464032ee858733c..0ef9eefd32110560be6a51c141154aed71e758cc 100644 (file)
@@ -4,7 +4,7 @@
 
 obj-$(CONFIG_TILE_NET) += tile_net.o
 ifdef CONFIG_TILEGX
-tile_net-objs := tilegx.o mpipe.o iorpc_mpipe.o dma_queue.o
+tile_net-y := tilegx.o
 else
-tile_net-objs := tilepro.o
+tile_net-y := tilepro.o
 endif
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
new file mode 100644 (file)
index 0000000..83b4b38
--- /dev/null
@@ -0,0 +1,1898 @@
+/*
+ * Copyright 2012 Tilera Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *   NON INFRINGEMENT.  See the GNU General Public License for
+ *   more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/moduleparam.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>      /* printk() */
+#include <linux/slab.h>        /* kmalloc() */
+#include <linux/errno.h>       /* error codes */
+#include <linux/types.h>       /* size_t */
+#include <linux/interrupt.h>
+#include <linux/in.h>
+#include <linux/irq.h>
+#include <linux/netdevice.h>   /* struct device, and other headers */
+#include <linux/etherdevice.h> /* eth_type_trans */
+#include <linux/skbuff.h>
+#include <linux/ioctl.h>
+#include <linux/cdev.h>
+#include <linux/hugetlb.h>
+#include <linux/in6.h>
+#include <linux/timer.h>
+#include <linux/hrtimer.h>
+#include <linux/ktime.h>
+#include <linux/io.h>
+#include <linux/ctype.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+
+#include <asm/checksum.h>
+#include <asm/homecache.h>
+#include <gxio/mpipe.h>
+#include <arch/sim.h>
+
+/* Default transmit lockup timeout period, in jiffies. */
+#define TILE_NET_TIMEOUT (5 * HZ)
+
+/* The maximum number of distinct channels (idesc.channel is 5 bits). */
+#define TILE_NET_CHANNELS 32
+
+/* Maximum number of idescs to handle per "poll". */
+#define TILE_NET_BATCH 128
+
+/* Maximum number of packets to handle per "poll". */
+#define TILE_NET_WEIGHT 64
+
+/* Number of entries in each iqueue. */
+#define IQUEUE_ENTRIES 512
+
+/* Number of entries in each equeue. */
+#define EQUEUE_ENTRIES 2048
+
+/* Total header bytes per equeue slot.  Must be big enough for 2 bytes
+ * of NET_IP_ALIGN alignment, plus 14 bytes (?) of L2 header, plus up to
+ * 60 bytes of actual TCP header.  We round up to align to cache lines.
+ */
+#define HEADER_BYTES 128
+
+/* Maximum completions per cpu per device (must be a power of two).
+ * ISSUE: What is the right number here?  If this is too small, then
+ * egress might block waiting for free space in a completions array.
+ * ISSUE: At the least, allocate these only for initialized echannels.
+ */
+#define TILE_NET_MAX_COMPS 64
+
+#define MAX_FRAGS (MAX_SKB_FRAGS + 1)
+
+/* Size of completions data to allocate.
+ * ISSUE: Probably more than needed since we don't use all the channels.
+ */
+#define COMPS_SIZE (TILE_NET_CHANNELS * sizeof(struct tile_net_comps))
+
+/* Size of NotifRing data to allocate. */
+#define NOTIF_RING_SIZE (IQUEUE_ENTRIES * sizeof(gxio_mpipe_idesc_t))
+
+/* Timeout to wake the per-device TX timer after we stop the queue.
+ * We don't want the timeout too short (adds overhead, and might end
+ * up causing stop/wake/stop/wake cycles) or too long (affects performance).
+ * For the 10 Gb NIC, 30 usec means roughly 30+ 1500-byte packets.
+ */
+#define TX_TIMER_DELAY_USEC 30
+
+/* Timeout to wake the per-cpu egress timer to free completions. */
+#define EGRESS_TIMER_DELAY_USEC 1000
+
+MODULE_AUTHOR("Tilera Corporation");
+MODULE_LICENSE("GPL");
+
+/* A "packet fragment" (a chunk of memory). */
+struct frag {
+       void *buf;
+       size_t length;
+};
+
+/* A single completion. */
+struct tile_net_comp {
+       /* The "complete_count" when the completion will be complete. */
+       s64 when;
+       /* The buffer to be freed when the completion is complete. */
+       struct sk_buff *skb;
+};
+
+/* The completions for a given cpu and echannel. */
+struct tile_net_comps {
+       /* The completions. */
+       struct tile_net_comp comp_queue[TILE_NET_MAX_COMPS];
+       /* The number of completions used. */
+       unsigned long comp_next;
+       /* The number of completions freed. */
+       unsigned long comp_last;
+};
+
+/* The transmit wake timer for a given cpu and echannel. */
+struct tile_net_tx_wake {
+       struct hrtimer timer;
+       struct net_device *dev;
+};
+
+/* Info for a specific cpu. */
+struct tile_net_info {
+       /* The NAPI struct. */
+       struct napi_struct napi;
+       /* Packet queue. */
+       gxio_mpipe_iqueue_t iqueue;
+       /* Our cpu. */
+       int my_cpu;
+       /* True if iqueue is valid. */
+       bool has_iqueue;
+       /* NAPI flags. */
+       bool napi_added;
+       bool napi_enabled;
+       /* Number of small sk_buffs which must still be provided. */
+       unsigned int num_needed_small_buffers;
+       /* Number of large sk_buffs which must still be provided. */
+       unsigned int num_needed_large_buffers;
+       /* A timer for handling egress completions. */
+       struct hrtimer egress_timer;
+       /* True if "egress_timer" is scheduled. */
+       bool egress_timer_scheduled;
+       /* Comps for each egress channel. */
+       struct tile_net_comps *comps_for_echannel[TILE_NET_CHANNELS];
+       /* Transmit wake timer for each egress channel. */
+       struct tile_net_tx_wake tx_wake[TILE_NET_CHANNELS];
+};
+
+/* Info for egress on a particular egress channel. */
+struct tile_net_egress {
+       /* The "equeue". */
+       gxio_mpipe_equeue_t *equeue;
+       /* The headers for TSO. */
+       unsigned char *headers;
+};
+
+/* Info for a specific device. */
+struct tile_net_priv {
+       /* Our network device. */
+       struct net_device *dev;
+       /* The primary link. */
+       gxio_mpipe_link_t link;
+       /* The primary channel, if open, else -1. */
+       int channel;
+       /* The "loopify" egress link, if needed. */
+       gxio_mpipe_link_t loopify_link;
+       /* The "loopify" egress channel, if open, else -1. */
+       int loopify_channel;
+       /* The egress channel (channel or loopify_channel). */
+       int echannel;
+       /* Total stats. */
+       struct net_device_stats stats;
+};
+
+/* Egress info, indexed by "priv->echannel" (lazily created as needed). */
+static struct tile_net_egress egress_for_echannel[TILE_NET_CHANNELS];
+
+/* Devices currently associated with each channel.
+ * NOTE: The array entry can become NULL after ifconfig down, but
+ * we do not free the underlying net_device structures, so it is
+ * safe to use a pointer after reading it from this array.
+ */
+static struct net_device *tile_net_devs_for_channel[TILE_NET_CHANNELS];
+
+/* A mutex for "tile_net_devs_for_channel". */
+static DEFINE_MUTEX(tile_net_devs_for_channel_mutex);
+
+/* The per-cpu info. */
+static DEFINE_PER_CPU(struct tile_net_info, per_cpu_info);
+
+/* The "context" for all devices. */
+static gxio_mpipe_context_t context;
+
+/* Buffer sizes and mpipe enum codes for buffer stacks.
+ * See arch/tile/include/gxio/mpipe.h for the set of possible values.
+ */
+#define BUFFER_SIZE_SMALL_ENUM GXIO_MPIPE_BUFFER_SIZE_128
+#define BUFFER_SIZE_SMALL 128
+#define BUFFER_SIZE_LARGE_ENUM GXIO_MPIPE_BUFFER_SIZE_1664
+#define BUFFER_SIZE_LARGE 1664
+
+/* The small/large "buffer stacks". */
+static int small_buffer_stack = -1;
+static int large_buffer_stack = -1;
+
+/* Amount of memory allocated for each buffer stack. */
+static size_t buffer_stack_size;
+
+/* The actual memory allocated for the buffer stacks. */
+static void *small_buffer_stack_va;
+static void *large_buffer_stack_va;
+
+/* The buckets. */
+static int first_bucket = -1;
+static int num_buckets = 1;
+
+/* The ingress irq. */
+static int ingress_irq = -1;
+
+/* Text value of tile_net.cpus if passed as a module parameter. */
+static char *network_cpus_string;
+
+/* The actual cpus in "network_cpus". */
+static struct cpumask network_cpus_map;
+
+/* If "loopify=LINK" was specified, this is "LINK". */
+static char *loopify_link_name;
+
+/* If "tile_net.custom" was specified, this is non-NULL. */
+static char *custom_str;
+
+/* The "tile_net.cpus" argument specifies the cpus that are dedicated
+ * to handle ingress packets.
+ *
+ * The parameter should be in the form "tile_net.cpus=m-n[,x-y]", where
+ * m, n, x, y are integer numbers that represent the cpus that can be
+ * neither a dedicated cpu nor a dataplane cpu.
+ */
+static bool network_cpus_init(void)
+{
+       char buf[1024];
+       int rc;
+
+       if (network_cpus_string == NULL)
+               return false;
+
+       rc = cpulist_parse_crop(network_cpus_string, &network_cpus_map);
+       if (rc != 0) {
+               pr_warn("tile_net.cpus=%s: malformed cpu list\n",
+                       network_cpus_string);
+               return false;
+       }
+
+       /* Remove dedicated cpus. */
+       cpumask_and(&network_cpus_map, &network_cpus_map, cpu_possible_mask);
+
+       if (cpumask_empty(&network_cpus_map)) {
+               pr_warn("Ignoring empty tile_net.cpus='%s'.\n",
+                       network_cpus_string);
+               return false;
+       }
+
+       cpulist_scnprintf(buf, sizeof(buf), &network_cpus_map);
+       pr_info("Linux network CPUs: %s\n", buf);
+       return true;
+}
+
+module_param_named(cpus, network_cpus_string, charp, 0444);
+MODULE_PARM_DESC(cpus, "cpulist of cores that handle network interrupts");
+
+/* The "tile_net.loopify=LINK" argument causes the named device to
+ * actually use "loop0" for ingress, and "loop1" for egress.  This
+ * allows an app to sit between the actual link and linux, passing
+ * (some) packets along to linux, and forwarding (some) packets sent
+ * out by linux.
+ */
+module_param_named(loopify, loopify_link_name, charp, 0444);
+MODULE_PARM_DESC(loopify, "name the device to use loop0/1 for ingress/egress");
+
+/* The "tile_net.custom" argument causes us to ignore the "conventional"
+ * classifier metadata, in particular, the "l2_offset".
+ */
+module_param_named(custom, custom_str, charp, 0444);
+MODULE_PARM_DESC(custom, "indicates a (heavily) customized classifier");
+
+/* Atomically update a statistics field.
+ * Note that on TILE-Gx, this operation is fire-and-forget on the
+ * issuing core (single-cycle dispatch) and takes only a few cycles
+ * longer than a regular store when the request reaches the home cache.
+ * No expensive bus management overhead is required.
+ */
+static void tile_net_stats_add(unsigned long value, unsigned long *field)
+{
+       BUILD_BUG_ON(sizeof(atomic_long_t) != sizeof(unsigned long));
+       atomic_long_add(value, (atomic_long_t *)field);
+}
+
+/* Allocate and push a buffer. */
+static bool tile_net_provide_buffer(bool small)
+{
+       int stack = small ? small_buffer_stack : large_buffer_stack;
+       const unsigned long buffer_alignment = 128;
+       struct sk_buff *skb;
+       int len;
+
+       len = sizeof(struct sk_buff **) + buffer_alignment;
+       len += (small ? BUFFER_SIZE_SMALL : BUFFER_SIZE_LARGE);
+       skb = dev_alloc_skb(len);
+       if (skb == NULL)
+               return false;
+
+       /* Make room for a back-pointer to 'skb' and guarantee alignment. */
+       skb_reserve(skb, sizeof(struct sk_buff **));
+       skb_reserve(skb, -(long)skb->data & (buffer_alignment - 1));
+
+       /* Save a back-pointer to 'skb'. */
+       *(struct sk_buff **)(skb->data - sizeof(struct sk_buff **)) = skb;
+
+       /* Make sure "skb" and the back-pointer have been flushed. */
+       wmb();
+
+       gxio_mpipe_push_buffer(&context, stack,
+                              (void *)va_to_tile_io_addr(skb->data));
+
+       return true;
+}
+
+/* Convert a raw mpipe buffer to its matching skb pointer. */
+static struct sk_buff *mpipe_buf_to_skb(void *va)
+{
+       /* Acquire the associated "skb". */
+       struct sk_buff **skb_ptr = va - sizeof(*skb_ptr);
+       struct sk_buff *skb = *skb_ptr;
+
+       /* Paranoia. */
+       if (skb->data != va) {
+               /* Panic here since there's a reasonable chance
+                * that corrupt buffers means generic memory
+                * corruption, with unpredictable system effects.
+                */
+               panic("Corrupt linux buffer! va=%p, skb=%p, skb->data=%p",
+                     va, skb, skb->data);
+       }
+
+       return skb;
+}
+
+static void tile_net_pop_all_buffers(int stack)
+{
+       for (;;) {
+               tile_io_addr_t addr =
+                       (tile_io_addr_t)gxio_mpipe_pop_buffer(&context, stack);
+               if (addr == 0)
+                       break;
+               dev_kfree_skb_irq(mpipe_buf_to_skb(tile_io_addr_to_va(addr)));
+       }
+}
+
+/* Provide linux buffers to mPIPE. */
+static void tile_net_provide_needed_buffers(void)
+{
+       struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+
+       while (info->num_needed_small_buffers != 0) {
+               if (!tile_net_provide_buffer(true))
+                       goto oops;
+               info->num_needed_small_buffers--;
+       }
+
+       while (info->num_needed_large_buffers != 0) {
+               if (!tile_net_provide_buffer(false))
+                       goto oops;
+               info->num_needed_large_buffers--;
+       }
+
+       return;
+
+oops:
+       /* Add a description to the page allocation failure dump. */
+       pr_notice("Tile %d still needs some buffers\n", info->my_cpu);
+}
+
+static inline bool filter_packet(struct net_device *dev, void *buf)
+{
+       /* Filter packets received before we're up. */
+       if (dev == NULL || !(dev->flags & IFF_UP))
+               return true;
+
+       /* Filter out packets that aren't for us. */
+       if (!(dev->flags & IFF_PROMISC) &&
+           !is_multicast_ether_addr(buf) &&
+           compare_ether_addr(dev->dev_addr, buf) != 0)
+               return true;
+
+       return false;
+}
+
+static void tile_net_receive_skb(struct net_device *dev, struct sk_buff *skb,
+                                gxio_mpipe_idesc_t *idesc, unsigned long len)
+{
+       struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+       struct tile_net_priv *priv = netdev_priv(dev);
+
+       /* Encode the actual packet length. */
+       skb_put(skb, len);
+
+       skb->protocol = eth_type_trans(skb, dev);
+
+       /* Acknowledge "good" hardware checksums. */
+       if (idesc->cs && idesc->csum_seed_val == 0xFFFF)
+               skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+       netif_receive_skb(skb);
+
+       /* Update stats. */
+       tile_net_stats_add(1, &priv->stats.rx_packets);
+       tile_net_stats_add(len, &priv->stats.rx_bytes);
+
+       /* Need a new buffer. */
+       if (idesc->size == BUFFER_SIZE_SMALL_ENUM)
+               info->num_needed_small_buffers++;
+       else
+               info->num_needed_large_buffers++;
+}
+
+/* Handle a packet.  Return true if "processed", false if "filtered". */
+static bool tile_net_handle_packet(gxio_mpipe_idesc_t *idesc)
+{
+       struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+       struct net_device *dev = tile_net_devs_for_channel[idesc->channel];
+       uint8_t l2_offset;
+       void *va;
+       void *buf;
+       unsigned long len;
+       bool filter;
+
+       /* Drop packets for which no buffer was available.
+        * NOTE: This happens under heavy load.
+        */
+       if (idesc->be) {
+               struct tile_net_priv *priv = netdev_priv(dev);
+               tile_net_stats_add(1, &priv->stats.rx_dropped);
+               gxio_mpipe_iqueue_consume(&info->iqueue, idesc);
+               if (net_ratelimit())
+                       pr_info("Dropping packet (insufficient buffers).\n");
+               return false;
+       }
+
+       /* Get the "l2_offset", if allowed. */
+       l2_offset = custom_str ? 0 : gxio_mpipe_idesc_get_l2_offset(idesc);
+
+       /* Get the raw buffer VA (includes "headroom"). */
+       va = tile_io_addr_to_va((unsigned long)(long)idesc->va);
+
+       /* Get the actual packet start/length. */
+       buf = va + l2_offset;
+       len = idesc->l2_size - l2_offset;
+
+       /* Point "va" at the raw buffer. */
+       va -= NET_IP_ALIGN;
+
+       filter = filter_packet(dev, buf);
+       if (filter) {
+               gxio_mpipe_iqueue_drop(&info->iqueue, idesc);
+       } else {
+               struct sk_buff *skb = mpipe_buf_to_skb(va);
+
+               /* Skip headroom, and any custom header. */
+               skb_reserve(skb, NET_IP_ALIGN + l2_offset);
+
+               tile_net_receive_skb(dev, skb, idesc, len);
+       }
+
+       gxio_mpipe_iqueue_consume(&info->iqueue, idesc);
+       return !filter;
+}
+
+/* Handle some packets for the current CPU.
+ *
+ * This function handles up to TILE_NET_BATCH idescs per call.
+ *
+ * ISSUE: Since we do not provide new buffers until this function is
+ * complete, we must initially provide enough buffers for each network
+ * cpu to fill its iqueue and also its batched idescs.
+ *
+ * ISSUE: The "rotting packet" race condition occurs if a packet
+ * arrives after the queue appears to be empty, and before the
+ * hypervisor interrupt is re-enabled.
+ */
+static int tile_net_poll(struct napi_struct *napi, int budget)
+{
+       struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+       unsigned int work = 0;
+       gxio_mpipe_idesc_t *idesc;
+       int i, n;
+
+       /* Process packets. */
+       while ((n = gxio_mpipe_iqueue_try_peek(&info->iqueue, &idesc)) > 0) {
+               for (i = 0; i < n; i++) {
+                       if (i == TILE_NET_BATCH)
+                               goto done;
+                       if (tile_net_handle_packet(idesc + i)) {
+                               if (++work >= budget)
+                                       goto done;
+                       }
+               }
+       }
+
+       /* There are no packets left. */
+       napi_complete(&info->napi);
+
+       /* Re-enable hypervisor interrupts. */
+       gxio_mpipe_enable_notif_ring_interrupt(&context, info->iqueue.ring);
+
+       /* HACK: Avoid the "rotting packet" problem. */
+       if (gxio_mpipe_iqueue_try_peek(&info->iqueue, &idesc) > 0)
+               napi_schedule(&info->napi);
+
+       /* ISSUE: Handle completions? */
+
+done:
+       tile_net_provide_needed_buffers();
+
+       return work;
+}
+
+/* Handle an ingress interrupt on the current cpu. */
+static irqreturn_t tile_net_handle_ingress_irq(int irq, void *unused)
+{
+       struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+       napi_schedule(&info->napi);
+       return IRQ_HANDLED;
+}
+
+/* Free some completions.  This must be called with interrupts blocked. */
+static int tile_net_free_comps(gxio_mpipe_equeue_t *equeue,
+                               struct tile_net_comps *comps,
+                               int limit, bool force_update)
+{
+       int n = 0;
+       while (comps->comp_last < comps->comp_next) {
+               unsigned int cid = comps->comp_last % TILE_NET_MAX_COMPS;
+               struct tile_net_comp *comp = &comps->comp_queue[cid];
+               if (!gxio_mpipe_equeue_is_complete(equeue, comp->when,
+                                                  force_update || n == 0))
+                       break;
+               dev_kfree_skb_irq(comp->skb);
+               comps->comp_last++;
+               if (++n == limit)
+                       break;
+       }
+       return n;
+}
+
+/* Add a completion.  This must be called with interrupts blocked.
+ * tile_net_equeue_try_reserve() will have ensured a free completion entry.
+ */
+static void add_comp(gxio_mpipe_equeue_t *equeue,
+                    struct tile_net_comps *comps,
+                    uint64_t when, struct sk_buff *skb)
+{
+       int cid = comps->comp_next % TILE_NET_MAX_COMPS;
+       comps->comp_queue[cid].when = when;
+       comps->comp_queue[cid].skb = skb;
+       comps->comp_next++;
+}
+
+static void tile_net_schedule_tx_wake_timer(struct net_device *dev)
+{
+       struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+       struct tile_net_priv *priv = netdev_priv(dev);
+
+       hrtimer_start(&info->tx_wake[priv->echannel].timer,
+                     ktime_set(0, TX_TIMER_DELAY_USEC * 1000UL),
+                     HRTIMER_MODE_REL_PINNED);
+}
+
+static enum hrtimer_restart tile_net_handle_tx_wake_timer(struct hrtimer *t)
+{
+       struct tile_net_tx_wake *tx_wake =
+               container_of(t, struct tile_net_tx_wake, timer);
+       netif_wake_subqueue(tx_wake->dev, smp_processor_id());
+       return HRTIMER_NORESTART;
+}
+
+/* Make sure the egress timer is scheduled. */
+static void tile_net_schedule_egress_timer(void)
+{
+       struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+
+       if (!info->egress_timer_scheduled) {
+               hrtimer_start(&info->egress_timer,
+                             ktime_set(0, EGRESS_TIMER_DELAY_USEC * 1000UL),
+                             HRTIMER_MODE_REL_PINNED);
+               info->egress_timer_scheduled = true;
+       }
+}
+
+/* The "function" for "info->egress_timer".
+ *
+ * This timer will reschedule itself as long as there are any pending
+ * completions expected for this tile.
+ */
+static enum hrtimer_restart tile_net_handle_egress_timer(struct hrtimer *t)
+{
+       struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+       unsigned long irqflags;
+       bool pending = false;
+       int i;
+
+       local_irq_save(irqflags);
+
+       /* The timer is no longer scheduled. */
+       info->egress_timer_scheduled = false;
+
+       /* Free all possible comps for this tile. */
+       for (i = 0; i < TILE_NET_CHANNELS; i++) {
+               struct tile_net_egress *egress = &egress_for_echannel[i];
+               struct tile_net_comps *comps = info->comps_for_echannel[i];
+               if (comps->comp_last >= comps->comp_next)
+                       continue;
+               tile_net_free_comps(egress->equeue, comps, -1, true);
+               pending = pending || (comps->comp_last < comps->comp_next);
+       }
+
+       /* Reschedule timer if needed. */
+       if (pending)
+               tile_net_schedule_egress_timer();
+
+       local_irq_restore(irqflags);
+
+       return HRTIMER_NORESTART;
+}
+
+/* Helper function for "tile_net_update()".
+ * "dev" (i.e. arg) is the device being brought up or down,
+ * or NULL if all devices are now down.
+ */
+static void tile_net_update_cpu(void *arg)
+{
+       struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+       struct net_device *dev = arg;
+
+       if (!info->has_iqueue)
+               return;
+
+       if (dev != NULL) {
+               if (!info->napi_added) {
+                       netif_napi_add(dev, &info->napi,
+                                      tile_net_poll, TILE_NET_WEIGHT);
+                       info->napi_added = true;
+               }
+               if (!info->napi_enabled) {
+                       napi_enable(&info->napi);
+                       info->napi_enabled = true;
+               }
+               enable_percpu_irq(ingress_irq, 0);
+       } else {
+               disable_percpu_irq(ingress_irq);
+               if (info->napi_enabled) {
+                       napi_disable(&info->napi);
+                       info->napi_enabled = false;
+               }
+               /* FIXME: Drain the iqueue. */
+       }
+}
+
+/* Helper function for tile_net_open() and tile_net_stop().
+ * Always called under tile_net_devs_for_channel_mutex.
+ */
+static int tile_net_update(struct net_device *dev)
+{
+       static gxio_mpipe_rules_t rules;  /* too big to fit on the stack */
+       bool saw_channel = false;
+       int channel;
+       int rc;
+       int cpu;
+
+       gxio_mpipe_rules_init(&rules, &context);
+
+       for (channel = 0; channel < TILE_NET_CHANNELS; channel++) {
+               if (tile_net_devs_for_channel[channel] == NULL)
+                       continue;
+               if (!saw_channel) {
+                       saw_channel = true;
+                       gxio_mpipe_rules_begin(&rules, first_bucket,
+                                              num_buckets, NULL);
+                       gxio_mpipe_rules_set_headroom(&rules, NET_IP_ALIGN);
+               }
+               gxio_mpipe_rules_add_channel(&rules, channel);
+       }
+
+       /* NOTE: This can fail if there is no classifier.
+        * ISSUE: Can anything else cause it to fail?
+        */
+       rc = gxio_mpipe_rules_commit(&rules);
+       if (rc != 0) {
+               netdev_warn(dev, "gxio_mpipe_rules_commit failed: %d\n", rc);
+               return -EIO;
+       }
+
+       /* Update all cpus, sequentially (to protect "netif_napi_add()"). */
+       for_each_online_cpu(cpu)
+               smp_call_function_single(cpu, tile_net_update_cpu,
+                                        (saw_channel ? dev : NULL), 1);
+
+       /* HACK: Allow packets to flow in the simulator. */
+       if (saw_channel)
+               sim_enable_mpipe_links(0, -1);
+
+       return 0;
+}
+
+/* Allocate and initialize mpipe buffer stacks, and register them in
+ * the mPIPE TLBs, for both small and large packet sizes.
+ * This routine supports tile_net_init_mpipe(), below.
+ */
+static int init_buffer_stacks(struct net_device *dev, int num_buffers)
+{
+       pte_t hash_pte = pte_set_home((pte_t) { 0 }, PAGE_HOME_HASH);
+       int rc;
+
+       /* Compute stack bytes; we round up to 64KB and then use
+        * alloc_pages() so we get the required 64KB alignment as well.
+        */
+       buffer_stack_size =
+               ALIGN(gxio_mpipe_calc_buffer_stack_bytes(num_buffers),
+                     64 * 1024);
+
+       /* Allocate two buffer stack indices. */
+       rc = gxio_mpipe_alloc_buffer_stacks(&context, 2, 0, 0);
+       if (rc < 0) {
+               netdev_err(dev, "gxio_mpipe_alloc_buffer_stacks failed: %d\n",
+                          rc);
+               return rc;
+       }
+       small_buffer_stack = rc;
+       large_buffer_stack = rc + 1;
+
+       /* Allocate the small memory stack. */
+       small_buffer_stack_va =
+               alloc_pages_exact(buffer_stack_size, GFP_KERNEL);
+       if (small_buffer_stack_va == NULL) {
+               netdev_err(dev,
+                          "Could not alloc %zd bytes for buffer stacks\n",
+                          buffer_stack_size);
+               return -ENOMEM;
+       }
+       rc = gxio_mpipe_init_buffer_stack(&context, small_buffer_stack,
+                                         BUFFER_SIZE_SMALL_ENUM,
+                                         small_buffer_stack_va,
+                                         buffer_stack_size, 0);
+       if (rc != 0) {
+               netdev_err(dev, "gxio_mpipe_init_buffer_stack: %d\n", rc);
+               return rc;
+       }
+       rc = gxio_mpipe_register_client_memory(&context, small_buffer_stack,
+                                              hash_pte, 0);
+       if (rc != 0) {
+               netdev_err(dev,
+                          "gxio_mpipe_register_buffer_memory failed: %d\n",
+                          rc);
+               return rc;
+       }
+
+       /* Allocate the large buffer stack. */
+       large_buffer_stack_va =
+               alloc_pages_exact(buffer_stack_size, GFP_KERNEL);
+       if (large_buffer_stack_va == NULL) {
+               netdev_err(dev,
+                          "Could not alloc %zd bytes for buffer stacks\n",
+                          buffer_stack_size);
+               return -ENOMEM;
+       }
+       rc = gxio_mpipe_init_buffer_stack(&context, large_buffer_stack,
+                                         BUFFER_SIZE_LARGE_ENUM,
+                                         large_buffer_stack_va,
+                                         buffer_stack_size, 0);
+       if (rc != 0) {
+               netdev_err(dev, "gxio_mpipe_init_buffer_stack failed: %d\n",
+                          rc);
+               return rc;
+       }
+       rc = gxio_mpipe_register_client_memory(&context, large_buffer_stack,
+                                              hash_pte, 0);
+       if (rc != 0) {
+               netdev_err(dev,
+                          "gxio_mpipe_register_buffer_memory failed: %d\n",
+                          rc);
+               return rc;
+       }
+
+       return 0;
+}
+
+/* Allocate per-cpu resources (memory for completions and idescs).
+ * This routine supports tile_net_init_mpipe(), below.
+ */
+static int alloc_percpu_mpipe_resources(struct net_device *dev,
+                                       int cpu, int ring)
+{
+       struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
+       int order, i, rc;
+       struct page *page;
+       void *addr;
+
+       /* Allocate the "comps". */
+       order = get_order(COMPS_SIZE);
+       page = homecache_alloc_pages(GFP_KERNEL, order, cpu);
+       if (page == NULL) {
+               netdev_err(dev, "Failed to alloc %zd bytes comps memory\n",
+                          COMPS_SIZE);
+               return -ENOMEM;
+       }
+       addr = pfn_to_kaddr(page_to_pfn(page));
+       memset(addr, 0, COMPS_SIZE);
+       for (i = 0; i < TILE_NET_CHANNELS; i++)
+               info->comps_for_echannel[i] =
+                       addr + i * sizeof(struct tile_net_comps);
+
+       /* If this is a network cpu, create an iqueue. */
+       if (cpu_isset(cpu, network_cpus_map)) {
+               order = get_order(NOTIF_RING_SIZE);
+               page = homecache_alloc_pages(GFP_KERNEL, order, cpu);
+               if (page == NULL) {
+                       netdev_err(dev,
+                                  "Failed to alloc %zd bytes iqueue memory\n",
+                                  NOTIF_RING_SIZE);
+                       return -ENOMEM;
+               }
+               addr = pfn_to_kaddr(page_to_pfn(page));
+               rc = gxio_mpipe_iqueue_init(&info->iqueue, &context, ring++,
+                                           addr, NOTIF_RING_SIZE, 0);
+               if (rc < 0) {
+                       netdev_err(dev,
+                                  "gxio_mpipe_iqueue_init failed: %d\n", rc);
+                       return rc;
+               }
+               info->has_iqueue = true;
+       }
+
+       return ring;
+}
+
+/* Initialize NotifGroup and buckets.
+ * This routine supports tile_net_init_mpipe(), below.
+ */
+static int init_notif_group_and_buckets(struct net_device *dev,
+                                       int ring, int network_cpus_count)
+{
+       int group, rc;
+
+       /* Allocate one NotifGroup. */
+       rc = gxio_mpipe_alloc_notif_groups(&context, 1, 0, 0);
+       if (rc < 0) {
+               netdev_err(dev, "gxio_mpipe_alloc_notif_groups failed: %d\n",
+                          rc);
+               return rc;
+       }
+       group = rc;
+
+       /* Initialize global num_buckets value. */
+       if (network_cpus_count > 4)
+               num_buckets = 256;
+       else if (network_cpus_count > 1)
+               num_buckets = 16;
+
+       /* Allocate some buckets, and set global first_bucket value. */
+       rc = gxio_mpipe_alloc_buckets(&context, num_buckets, 0, 0);
+       if (rc < 0) {
+               netdev_err(dev, "gxio_mpipe_alloc_buckets failed: %d\n", rc);
+               return rc;
+       }
+       first_bucket = rc;
+
+       /* Init group and buckets. */
+       rc = gxio_mpipe_init_notif_group_and_buckets(
+               &context, group, ring, network_cpus_count,
+               first_bucket, num_buckets,
+               GXIO_MPIPE_BUCKET_STICKY_FLOW_LOCALITY);
+       if (rc != 0) {
+               netdev_err(
+                       dev,
+                       "gxio_mpipe_init_notif_group_and_buckets failed: %d\n",
+                       rc);
+               return rc;
+       }
+
+       return 0;
+}
+
+/* Create an irq and register it, then activate the irq and request
+ * interrupts on all cores.  Note that "ingress_irq" being initialized
+ * is how we know not to call tile_net_init_mpipe() again.
+ * This routine supports tile_net_init_mpipe(), below.
+ */
+static int tile_net_setup_interrupts(struct net_device *dev)
+{
+       int cpu, rc;
+
+       rc = create_irq();
+       if (rc < 0) {
+               netdev_err(dev, "create_irq failed: %d\n", rc);
+               return rc;
+       }
+       ingress_irq = rc;
+       tile_irq_activate(ingress_irq, TILE_IRQ_PERCPU);
+       rc = request_irq(ingress_irq, tile_net_handle_ingress_irq,
+                        0, NULL, NULL);
+       if (rc != 0) {
+               netdev_err(dev, "request_irq failed: %d\n", rc);
+               destroy_irq(ingress_irq);
+               ingress_irq = -1;
+               return rc;
+       }
+
+       for_each_online_cpu(cpu) {
+               struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
+               if (info->has_iqueue) {
+                       gxio_mpipe_request_notif_ring_interrupt(
+                               &context, cpu_x(cpu), cpu_y(cpu),
+                               1, ingress_irq, info->iqueue.ring);
+               }
+       }
+
+       return 0;
+}
+
+/* Undo any state set up partially by a failed call to tile_net_init_mpipe. */
+static void tile_net_init_mpipe_fail(void)
+{
+       int cpu;
+
+       /* Do cleanups that require the mpipe context first. */
+       if (small_buffer_stack >= 0)
+               tile_net_pop_all_buffers(small_buffer_stack);
+       if (large_buffer_stack >= 0)
+               tile_net_pop_all_buffers(large_buffer_stack);
+
+       /* Destroy mpipe context so the hardware no longer owns any memory. */
+       gxio_mpipe_destroy(&context);
+
+       for_each_online_cpu(cpu) {
+               struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
+               free_pages((unsigned long)(info->comps_for_echannel[0]),
+                          get_order(COMPS_SIZE));
+               info->comps_for_echannel[0] = NULL;
+               free_pages((unsigned long)(info->iqueue.idescs),
+                          get_order(NOTIF_RING_SIZE));
+               info->iqueue.idescs = NULL;
+       }
+
+       if (small_buffer_stack_va)
+               free_pages_exact(small_buffer_stack_va, buffer_stack_size);
+       if (large_buffer_stack_va)
+               free_pages_exact(large_buffer_stack_va, buffer_stack_size);
+
+       small_buffer_stack_va = NULL;
+       large_buffer_stack_va = NULL;
+       large_buffer_stack = -1;
+       small_buffer_stack = -1;
+       first_bucket = -1;
+}
+
+/* The first time any tilegx network device is opened, we initialize
+ * the global mpipe state.  If this step fails, we fail to open the
+ * device, but if it succeeds, we never need to do it again, and since
+ * tile_net can't be unloaded, we never undo it.
+ *
+ * Note that some resources in this path (buffer stack indices,
+ * bindings from init_buffer_stack, etc.) are hypervisor resources
+ * that are freed implicitly by gxio_mpipe_destroy().
+ */
+static int tile_net_init_mpipe(struct net_device *dev)
+{
+       int i, num_buffers, rc;
+       int cpu;
+       int first_ring, ring;
+       int network_cpus_count = cpus_weight(network_cpus_map);
+
+       if (!hash_default) {
+               netdev_err(dev, "Networking requires hash_default!\n");
+               return -EIO;
+       }
+
+       rc = gxio_mpipe_init(&context, 0);
+       if (rc != 0) {
+               netdev_err(dev, "gxio_mpipe_init failed: %d\n", rc);
+               return -EIO;
+       }
+
+       /* Set up the buffer stacks. */
+       num_buffers =
+               network_cpus_count * (IQUEUE_ENTRIES + TILE_NET_BATCH);
+       rc = init_buffer_stacks(dev, num_buffers);
+       if (rc != 0)
+               goto fail;
+
+       /* Provide initial buffers. */
+       rc = -ENOMEM;
+       for (i = 0; i < num_buffers; i++) {
+               if (!tile_net_provide_buffer(true)) {
+                       netdev_err(dev, "Cannot allocate initial sk_bufs!\n");
+                       goto fail;
+               }
+       }
+       for (i = 0; i < num_buffers; i++) {
+               if (!tile_net_provide_buffer(false)) {
+                       netdev_err(dev, "Cannot allocate initial sk_bufs!\n");
+                       goto fail;
+               }
+       }
+
+       /* Allocate one NotifRing for each network cpu. */
+       rc = gxio_mpipe_alloc_notif_rings(&context, network_cpus_count, 0, 0);
+       if (rc < 0) {
+               netdev_err(dev, "gxio_mpipe_alloc_notif_rings failed %d\n",
+                          rc);
+               goto fail;
+       }
+
+       /* Init NotifRings per-cpu. */
+       first_ring = rc;
+       ring = first_ring;
+       for_each_online_cpu(cpu) {
+               rc = alloc_percpu_mpipe_resources(dev, cpu, ring);
+               if (rc < 0)
+                       goto fail;
+               ring = rc;
+       }
+
+       /* Initialize NotifGroup and buckets. */
+       rc = init_notif_group_and_buckets(dev, first_ring, network_cpus_count);
+       if (rc != 0)
+               goto fail;
+
+       /* Create and enable interrupts. */
+       rc = tile_net_setup_interrupts(dev);
+       if (rc != 0)
+               goto fail;
+
+       return 0;
+
+fail:
+       tile_net_init_mpipe_fail();
+       return rc;
+}
+
+/* Create persistent egress info for a given egress channel.
+ * Note that this may be shared between, say, "gbe0" and "xgbe0".
+ * ISSUE: Defer header allocation until TSO is actually needed?
+ */
+static int tile_net_init_egress(struct net_device *dev, int echannel)
+{
+       struct page *headers_page, *edescs_page, *equeue_page;
+       gxio_mpipe_edesc_t *edescs;
+       gxio_mpipe_equeue_t *equeue;
+       unsigned char *headers;
+       int headers_order, edescs_order, equeue_order;
+       size_t edescs_size;
+       int edma;
+       int rc = -ENOMEM;
+
+       /* Only initialize once. */
+       if (egress_for_echannel[echannel].equeue != NULL)
+               return 0;
+
+       /* Allocate memory for the "headers". */
+       headers_order = get_order(EQUEUE_ENTRIES * HEADER_BYTES);
+       headers_page = alloc_pages(GFP_KERNEL, headers_order);
+       if (headers_page == NULL) {
+               netdev_warn(dev,
+                           "Could not alloc %zd bytes for TSO headers.\n",
+                           PAGE_SIZE << headers_order);
+               goto fail;
+       }
+       headers = pfn_to_kaddr(page_to_pfn(headers_page));
+
+       /* Allocate memory for the "edescs". */
+       edescs_size = EQUEUE_ENTRIES * sizeof(*edescs);
+       edescs_order = get_order(edescs_size);
+       edescs_page = alloc_pages(GFP_KERNEL, edescs_order);
+       if (edescs_page == NULL) {
+               netdev_warn(dev,
+                           "Could not alloc %zd bytes for eDMA ring.\n",
+                           edescs_size);
+               goto fail_headers;
+       }
+       edescs = pfn_to_kaddr(page_to_pfn(edescs_page));
+
+       /* Allocate memory for the "equeue". */
+       equeue_order = get_order(sizeof(*equeue));
+       equeue_page = alloc_pages(GFP_KERNEL, equeue_order);
+       if (equeue_page == NULL) {
+               netdev_warn(dev,
+                           "Could not alloc %zd bytes for equeue info.\n",
+                           PAGE_SIZE << equeue_order);
+               goto fail_edescs;
+       }
+       equeue = pfn_to_kaddr(page_to_pfn(equeue_page));
+
+       /* Allocate an edma ring.  Note that in practice this can't
+        * fail, which is good, because we will leak an edma ring if so.
+        */
+       rc = gxio_mpipe_alloc_edma_rings(&context, 1, 0, 0);
+       if (rc < 0) {
+               netdev_warn(dev, "gxio_mpipe_alloc_edma_rings failed: %d\n",
+                           rc);
+               goto fail_equeue;
+       }
+       edma = rc;
+
+       /* Initialize the equeue. */
+       rc = gxio_mpipe_equeue_init(equeue, &context, edma, echannel,
+                                   edescs, edescs_size, 0);
+       if (rc != 0) {
+               netdev_err(dev, "gxio_mpipe_equeue_init failed: %d\n", rc);
+               goto fail_equeue;
+       }
+
+       /* Done. */
+       egress_for_echannel[echannel].equeue = equeue;
+       egress_for_echannel[echannel].headers = headers;
+       return 0;
+
+fail_equeue:
+       __free_pages(equeue_page, equeue_order);
+
+fail_edescs:
+       __free_pages(edescs_page, edescs_order);
+
+fail_headers:
+       __free_pages(headers_page, headers_order);
+
+fail:
+       return rc;
+}
+
+/* Return channel number for a newly-opened link. */
+static int tile_net_link_open(struct net_device *dev, gxio_mpipe_link_t *link,
+                             const char *link_name)
+{
+       int rc = gxio_mpipe_link_open(link, &context, link_name, 0);
+       if (rc < 0) {
+               netdev_err(dev, "Failed to open '%s'\n", link_name);
+               return rc;
+       }
+       rc = gxio_mpipe_link_channel(link);
+       if (rc < 0 || rc >= TILE_NET_CHANNELS) {
+               netdev_err(dev, "gxio_mpipe_link_channel bad value: %d\n", rc);
+               gxio_mpipe_link_close(link);
+               return -EINVAL;
+       }
+       return rc;
+}
+
+/* Help the kernel activate the given network interface. */
+static int tile_net_open(struct net_device *dev)
+{
+       struct tile_net_priv *priv = netdev_priv(dev);
+       int cpu, rc;
+
+       mutex_lock(&tile_net_devs_for_channel_mutex);
+
+       /* Do one-time initialization the first time any device is opened. */
+       if (ingress_irq < 0) {
+               rc = tile_net_init_mpipe(dev);
+               if (rc != 0)
+                       goto fail;
+       }
+
+       /* Determine if this is the "loopify" device. */
+       if (unlikely((loopify_link_name != NULL) &&
+                    !strcmp(dev->name, loopify_link_name))) {
+               rc = tile_net_link_open(dev, &priv->link, "loop0");
+               if (rc < 0)
+                       goto fail;
+               priv->channel = rc;
+               rc = tile_net_link_open(dev, &priv->loopify_link, "loop1");
+               if (rc < 0)
+                       goto fail;
+               priv->loopify_channel = rc;
+               priv->echannel = rc;
+       } else {
+               rc = tile_net_link_open(dev, &priv->link, dev->name);
+               if (rc < 0)
+                       goto fail;
+               priv->channel = rc;
+               priv->echannel = rc;
+       }
+
+       /* Initialize egress info (if needed).  Once ever, per echannel. */
+       rc = tile_net_init_egress(dev, priv->echannel);
+       if (rc != 0)
+               goto fail;
+
+       tile_net_devs_for_channel[priv->channel] = dev;
+
+       rc = tile_net_update(dev);
+       if (rc != 0)
+               goto fail;
+
+       mutex_unlock(&tile_net_devs_for_channel_mutex);
+
+       /* Initialize the transmit wake timer for this device for each cpu. */
+       for_each_online_cpu(cpu) {
+               struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
+               struct tile_net_tx_wake *tx_wake =
+                       &info->tx_wake[priv->echannel];
+
+               hrtimer_init(&tx_wake->timer, CLOCK_MONOTONIC,
+                            HRTIMER_MODE_REL);
+               tx_wake->timer.function = tile_net_handle_tx_wake_timer;
+               tx_wake->dev = dev;
+       }
+
+       for_each_online_cpu(cpu)
+               netif_start_subqueue(dev, cpu);
+       netif_carrier_on(dev);
+       return 0;
+
+fail:
+       if (priv->loopify_channel >= 0) {
+               if (gxio_mpipe_link_close(&priv->loopify_link) != 0)
+                       netdev_warn(dev, "Failed to close loopify link!\n");
+               priv->loopify_channel = -1;
+       }
+       if (priv->channel >= 0) {
+               if (gxio_mpipe_link_close(&priv->link) != 0)
+                       netdev_warn(dev, "Failed to close link!\n");
+               priv->channel = -1;
+       }
+       priv->echannel = -1;
+       tile_net_devs_for_channel[priv->channel] = NULL;
+       mutex_unlock(&tile_net_devs_for_channel_mutex);
+
+       /* Don't return raw gxio error codes to generic Linux. */
+       return (rc > -512) ? rc : -EIO;
+}
+
+/* Help the kernel deactivate the given network interface. */
+static int tile_net_stop(struct net_device *dev)
+{
+       struct tile_net_priv *priv = netdev_priv(dev);
+       int cpu;
+
+       for_each_online_cpu(cpu) {
+               struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
+               struct tile_net_tx_wake *tx_wake =
+                       &info->tx_wake[priv->echannel];
+
+               hrtimer_cancel(&tx_wake->timer);
+               netif_stop_subqueue(dev, cpu);
+       }
+
+       mutex_lock(&tile_net_devs_for_channel_mutex);
+       tile_net_devs_for_channel[priv->channel] = NULL;
+       (void)tile_net_update(dev);
+       if (priv->loopify_channel >= 0) {
+               if (gxio_mpipe_link_close(&priv->loopify_link) != 0)
+                       netdev_warn(dev, "Failed to close loopify link!\n");
+               priv->loopify_channel = -1;
+       }
+       if (priv->channel >= 0) {
+               if (gxio_mpipe_link_close(&priv->link) != 0)
+                       netdev_warn(dev, "Failed to close link!\n");
+               priv->channel = -1;
+       }
+       priv->echannel = -1;
+       mutex_unlock(&tile_net_devs_for_channel_mutex);
+
+       return 0;
+}
+
+/* Determine the VA for a fragment. */
+static inline void *tile_net_frag_buf(skb_frag_t *f)
+{
+       unsigned long pfn = page_to_pfn(skb_frag_page(f));
+       return pfn_to_kaddr(pfn) + f->page_offset;
+}
+
+/* Acquire a completion entry and an egress slot, or if we can't,
+ * stop the queue and schedule the tx_wake timer.
+ */
+static s64 tile_net_equeue_try_reserve(struct net_device *dev,
+                                      struct tile_net_comps *comps,
+                                      gxio_mpipe_equeue_t *equeue,
+                                      int num_edescs)
+{
+       /* Try to acquire a completion entry. */
+       if (comps->comp_next - comps->comp_last < TILE_NET_MAX_COMPS - 1 ||
+           tile_net_free_comps(equeue, comps, 32, false) != 0) {
+
+               /* Try to acquire an egress slot. */
+               s64 slot = gxio_mpipe_equeue_try_reserve(equeue, num_edescs);
+               if (slot >= 0)
+                       return slot;
+
+               /* Freeing some completions gives the equeue time to drain. */
+               tile_net_free_comps(equeue, comps, TILE_NET_MAX_COMPS, false);
+
+               slot = gxio_mpipe_equeue_try_reserve(equeue, num_edescs);
+               if (slot >= 0)
+                       return slot;
+       }
+
+       /* Still nothing; give up and stop the queue for a short while. */
+       netif_stop_subqueue(dev, smp_processor_id());
+       tile_net_schedule_tx_wake_timer(dev);
+       return -1;
+}
+
+/* Determine how many edesc's are needed for TSO.
+ *
+ * Sometimes, if "sendfile()" requires copying, we will be called with
+ * "data" containing the header and payload, with "frags" being empty.
+ * Sometimes, for example when using NFS over TCP, a single segment can
+ * span 3 fragments.  This requires special care.
+ */
+static int tso_count_edescs(struct sk_buff *skb)
+{
+       struct skb_shared_info *sh = skb_shinfo(skb);
+       unsigned int data_len = skb->data_len;
+       unsigned int p_len = sh->gso_size;
+       long f_id = -1;    /* id of the current fragment */
+       long f_size = -1;  /* size of the current fragment */
+       long f_used = -1;  /* bytes used from the current fragment */
+       long n;            /* size of the current piece of payload */
+       int num_edescs = 0;
+       int segment;
+
+       for (segment = 0; segment < sh->gso_segs; segment++) {
+
+               unsigned int p_used = 0;
+
+               /* One edesc for header and for each piece of the payload. */
+               for (num_edescs++; p_used < p_len; num_edescs++) {
+
+                       /* Advance as needed. */
+                       while (f_used >= f_size) {
+                               f_id++;
+                               f_size = sh->frags[f_id].size;
+                               f_used = 0;
+                       }
+
+                       /* Use bytes from the current fragment. */
+                       n = p_len - p_used;
+                       if (n > f_size - f_used)
+                               n = f_size - f_used;
+                       f_used += n;
+                       p_used += n;
+               }
+
+               /* The last segment may be less than gso_size. */
+               data_len -= p_len;
+               if (data_len < p_len)
+                       p_len = data_len;
+       }
+
+       return num_edescs;
+}
+
+/* Prepare modified copies of the skbuff headers.
+ * FIXME: add support for IPv6.
+ */
+static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers,
+                               s64 slot)
+{
+       struct skb_shared_info *sh = skb_shinfo(skb);
+       struct iphdr *ih;
+       struct tcphdr *th;
+       unsigned int data_len = skb->data_len;
+       unsigned char *data = skb->data;
+       unsigned int ih_off, th_off, sh_len, p_len;
+       unsigned int isum_seed, tsum_seed, id, seq;
+       long f_id = -1;    /* id of the current fragment */
+       long f_size = -1;  /* size of the current fragment */
+       long f_used = -1;  /* bytes used from the current fragment */
+       long n;            /* size of the current piece of payload */
+       int segment;
+
+       /* Locate original headers and compute various lengths. */
+       ih = ip_hdr(skb);
+       th = tcp_hdr(skb);
+       ih_off = skb_network_offset(skb);
+       th_off = skb_transport_offset(skb);
+       sh_len = th_off + tcp_hdrlen(skb);
+       p_len = sh->gso_size;
+
+       /* Set up seed values for IP and TCP csum and initialize id and seq. */
+       isum_seed = ((0xFFFF - ih->check) +
+                    (0xFFFF - ih->tot_len) +
+                    (0xFFFF - ih->id));
+       tsum_seed = th->check + (0xFFFF ^ htons(skb->len));
+       id = ntohs(ih->id);
+       seq = ntohl(th->seq);
+
+       /* Prepare all the headers. */
+       for (segment = 0; segment < sh->gso_segs; segment++) {
+               unsigned char *buf;
+               unsigned int p_used = 0;
+
+               /* Copy to the header memory for this segment. */
+               buf = headers + (slot % EQUEUE_ENTRIES) * HEADER_BYTES +
+                       NET_IP_ALIGN;
+               memcpy(buf, data, sh_len);
+
+               /* Update copied ip header. */
+               ih = (struct iphdr *)(buf + ih_off);
+               ih->tot_len = htons(sh_len + p_len - ih_off);
+               ih->id = htons(id);
+               ih->check = csum_long(isum_seed + ih->tot_len +
+                                     ih->id) ^ 0xffff;
+
+               /* Update copied tcp header. */
+               th = (struct tcphdr *)(buf + th_off);
+               th->seq = htonl(seq);
+               th->check = csum_long(tsum_seed + htons(sh_len + p_len));
+               if (segment != sh->gso_segs - 1) {
+                       th->fin = 0;
+                       th->psh = 0;
+               }
+
+               /* Skip past the header. */
+               slot++;
+
+               /* Skip past the payload. */
+               while (p_used < p_len) {
+
+                       /* Advance as needed. */
+                       while (f_used >= f_size) {
+                               f_id++;
+                               f_size = sh->frags[f_id].size;
+                               f_used = 0;
+                       }
+
+                       /* Use bytes from the current fragment. */
+                       n = p_len - p_used;
+                       if (n > f_size - f_used)
+                               n = f_size - f_used;
+                       f_used += n;
+                       p_used += n;
+
+                       slot++;
+               }
+
+               id++;
+               seq += p_len;
+
+               /* The last segment may be less than gso_size. */
+               data_len -= p_len;
+               if (data_len < p_len)
+                       p_len = data_len;
+       }
+
+       /* Flush the headers so they are ready for hardware DMA. */
+       wmb();
+}
+
+/* Pass all the data to mpipe for egress. */
+static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue,
+                      struct sk_buff *skb, unsigned char *headers, s64 slot)
+{
+       struct tile_net_priv *priv = netdev_priv(dev);
+       struct skb_shared_info *sh = skb_shinfo(skb);
+       unsigned int data_len = skb->data_len;
+       unsigned int p_len = sh->gso_size;
+       gxio_mpipe_edesc_t edesc_head = { { 0 } };
+       gxio_mpipe_edesc_t edesc_body = { { 0 } };
+       long f_id = -1;    /* id of the current fragment */
+       long f_size = -1;  /* size of the current fragment */
+       long f_used = -1;  /* bytes used from the current fragment */
+       long n;            /* size of the current piece of payload */
+       unsigned long tx_packets = 0, tx_bytes = 0;
+       unsigned int csum_start, sh_len;
+       int segment;
+
+       /* Prepare to egress the headers: set up header edesc. */
+       csum_start = skb_checksum_start_offset(skb);
+       sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+       edesc_head.csum = 1;
+       edesc_head.csum_start = csum_start;
+       edesc_head.csum_dest = csum_start + skb->csum_offset;
+       edesc_head.xfer_size = sh_len;
+
+       /* This is only used to specify the TLB. */
+       edesc_head.stack_idx = large_buffer_stack;
+       edesc_body.stack_idx = large_buffer_stack;
+
+       /* Egress all the edescs. */
+       for (segment = 0; segment < sh->gso_segs; segment++) {
+               void *va;
+               unsigned char *buf;
+               unsigned int p_used = 0;
+
+               /* Egress the header. */
+               buf = headers + (slot % EQUEUE_ENTRIES) * HEADER_BYTES +
+                       NET_IP_ALIGN;
+               edesc_head.va = va_to_tile_io_addr(buf);
+               gxio_mpipe_equeue_put_at(equeue, edesc_head, slot);
+               slot++;
+
+               /* Egress the payload. */
+               while (p_used < p_len) {
+
+                       /* Advance as needed. */
+                       while (f_used >= f_size) {
+                               f_id++;
+                               f_size = sh->frags[f_id].size;
+                               f_used = 0;
+                       }
+
+                       va = tile_net_frag_buf(&sh->frags[f_id]) + f_used;
+
+                       /* Use bytes from the current fragment. */
+                       n = p_len - p_used;
+                       if (n > f_size - f_used)
+                               n = f_size - f_used;
+                       f_used += n;
+                       p_used += n;
+
+                       /* Egress a piece of the payload. */
+                       edesc_body.va = va_to_tile_io_addr(va);
+                       edesc_body.xfer_size = n;
+                       edesc_body.bound = !(p_used < p_len);
+                       gxio_mpipe_equeue_put_at(equeue, edesc_body, slot);
+                       slot++;
+               }
+
+               tx_packets++;
+               tx_bytes += sh_len + p_len;
+
+               /* The last segment may be less than gso_size. */
+               data_len -= p_len;
+               if (data_len < p_len)
+                       p_len = data_len;
+       }
+
+       /* Update stats. */
+       tile_net_stats_add(tx_packets, &priv->stats.tx_packets);
+       tile_net_stats_add(tx_bytes, &priv->stats.tx_bytes);
+}
+
+/* Do "TSO" handling for egress.
+ *
+ * Normally drivers set NETIF_F_TSO only to support hardware TSO;
+ * otherwise the stack uses scatter-gather to implement GSO in software.
+ * On our testing, enabling GSO support (via NETIF_F_SG) drops network
+ * performance down to around 7.5 Gbps on the 10G interfaces, although
+ * also dropping cpu utilization way down, to under 8%.  But
+ * implementing "TSO" in the driver brings performance back up to line
+ * rate, while dropping cpu usage even further, to less than 4%.  In
+ * practice, profiling of GSO shows that skb_segment() is what causes
+ * the performance overheads; we benefit in the driver from using
+ * preallocated memory to duplicate the TCP/IP headers.
+ */
+static int tile_net_tx_tso(struct sk_buff *skb, struct net_device *dev)
+{
+       struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+       struct tile_net_priv *priv = netdev_priv(dev);
+       int channel = priv->echannel;
+       struct tile_net_egress *egress = &egress_for_echannel[channel];
+       struct tile_net_comps *comps = info->comps_for_echannel[channel];
+       gxio_mpipe_equeue_t *equeue = egress->equeue;
+       unsigned long irqflags;
+       int num_edescs;
+       s64 slot;
+
+       /* Determine how many mpipe edesc's are needed. */
+       num_edescs = tso_count_edescs(skb);
+
+       local_irq_save(irqflags);
+
+       /* Try to acquire a completion entry and an egress slot. */
+       slot = tile_net_equeue_try_reserve(dev, comps, equeue, num_edescs);
+       if (slot < 0) {
+               local_irq_restore(irqflags);
+               return NETDEV_TX_BUSY;
+       }
+
+       /* Set up copies of header data properly. */
+       tso_headers_prepare(skb, egress->headers, slot);
+
+       /* Actually pass the data to the network hardware. */
+       tso_egress(dev, equeue, skb, egress->headers, slot);
+
+       /* Add a completion record. */
+       add_comp(equeue, comps, slot + num_edescs - 1, skb);
+
+       local_irq_restore(irqflags);
+
+       /* Make sure the egress timer is scheduled. */
+       tile_net_schedule_egress_timer();
+
+       return NETDEV_TX_OK;
+}
+
+/* Analyze the body and frags for a transmit request. */
+static unsigned int tile_net_tx_frags(struct frag *frags,
+                                      struct sk_buff *skb,
+                                      void *b_data, unsigned int b_len)
+{
+       unsigned int i, n = 0;
+
+       struct skb_shared_info *sh = skb_shinfo(skb);
+
+       if (b_len != 0) {
+               frags[n].buf = b_data;
+               frags[n++].length = b_len;
+       }
+
+       for (i = 0; i < sh->nr_frags; i++) {
+               skb_frag_t *f = &sh->frags[i];
+               frags[n].buf = tile_net_frag_buf(f);
+               frags[n++].length = skb_frag_size(f);
+       }
+
+       return n;
+}
+
+/* Help the kernel transmit a packet. */
+static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
+{
+       struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+       struct tile_net_priv *priv = netdev_priv(dev);
+       struct tile_net_egress *egress = &egress_for_echannel[priv->echannel];
+       gxio_mpipe_equeue_t *equeue = egress->equeue;
+       struct tile_net_comps *comps =
+               info->comps_for_echannel[priv->echannel];
+       unsigned int len = skb->len;
+       unsigned char *data = skb->data;
+       unsigned int num_edescs;
+       struct frag frags[MAX_FRAGS];
+       gxio_mpipe_edesc_t edescs[MAX_FRAGS];
+       unsigned long irqflags;
+       gxio_mpipe_edesc_t edesc = { { 0 } };
+       unsigned int i;
+       s64 slot;
+
+       if (skb_is_gso(skb))
+               return tile_net_tx_tso(skb, dev);
+
+       num_edescs = tile_net_tx_frags(frags, skb, data, skb_headlen(skb));
+
+       /* This is only used to specify the TLB. */
+       edesc.stack_idx = large_buffer_stack;
+
+       /* Prepare the edescs. */
+       for (i = 0; i < num_edescs; i++) {
+               edesc.xfer_size = frags[i].length;
+               edesc.va = va_to_tile_io_addr(frags[i].buf);
+               edescs[i] = edesc;
+       }
+
+       /* Mark the final edesc. */
+       edescs[num_edescs - 1].bound = 1;
+
+       /* Add checksum info to the initial edesc, if needed. */
+       if (skb->ip_summed == CHECKSUM_PARTIAL) {
+               unsigned int csum_start = skb_checksum_start_offset(skb);
+               edescs[0].csum = 1;
+               edescs[0].csum_start = csum_start;
+               edescs[0].csum_dest = csum_start + skb->csum_offset;
+       }
+
+       local_irq_save(irqflags);
+
+       /* Try to acquire a completion entry and an egress slot. */
+       slot = tile_net_equeue_try_reserve(dev, comps, equeue, num_edescs);
+       if (slot < 0) {
+               local_irq_restore(irqflags);
+               return NETDEV_TX_BUSY;
+       }
+
+       for (i = 0; i < num_edescs; i++)
+               gxio_mpipe_equeue_put_at(equeue, edescs[i], slot++);
+
+       /* Add a completion record. */
+       add_comp(equeue, comps, slot - 1, skb);
+
+       /* NOTE: Use ETH_ZLEN for short packets (e.g. 42 < 60). */
+       tile_net_stats_add(1, &priv->stats.tx_packets);
+       tile_net_stats_add(max_t(unsigned int, len, ETH_ZLEN),
+                          &priv->stats.tx_bytes);
+
+       local_irq_restore(irqflags);
+
+       /* Make sure the egress timer is scheduled. */
+       tile_net_schedule_egress_timer();
+
+       return NETDEV_TX_OK;
+}
+
+/* Return subqueue id on this core (one per core). */
+static u16 tile_net_select_queue(struct net_device *dev, struct sk_buff *skb)
+{
+       return smp_processor_id();
+}
+
+/* Deal with a transmit timeout. */
+static void tile_net_tx_timeout(struct net_device *dev)
+{
+       int cpu;
+
+       for_each_online_cpu(cpu)
+               netif_wake_subqueue(dev, cpu);
+}
+
+/* Ioctl commands. */
+static int tile_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+       return -EOPNOTSUPP;
+}
+
+/* Get system network statistics for device. */
+static struct net_device_stats *tile_net_get_stats(struct net_device *dev)
+{
+       struct tile_net_priv *priv = netdev_priv(dev);
+       return &priv->stats;
+}
+
+/* Change the MTU. */
+static int tile_net_change_mtu(struct net_device *dev, int new_mtu)
+{
+       if ((new_mtu < 68) || (new_mtu > 1500))
+               return -EINVAL;
+       dev->mtu = new_mtu;
+       return 0;
+}
+
+/* Change the Ethernet address of the NIC.
+ *
+ * The hypervisor driver does not support changing MAC address.  However,
+ * the hardware does not do anything with the MAC address, so the address
+ * which gets used on outgoing packets, and which is accepted on incoming
+ * packets, is completely up to us.
+ *
+ * Returns 0 on success, negative on failure.
+ */
+static int tile_net_set_mac_address(struct net_device *dev, void *p)
+{
+       struct sockaddr *addr = p;
+
+       if (!is_valid_ether_addr(addr->sa_data))
+               return -EINVAL;
+       memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+       return 0;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/* Polling 'interrupt' - used by things like netconsole to send skbs
+ * without having to re-enable interrupts. It's not called while
+ * the interrupt routine is executing.
+ */
+static void tile_net_netpoll(struct net_device *dev)
+{
+       disable_percpu_irq(ingress_irq);
+       tile_net_handle_ingress_irq(ingress_irq, NULL);
+       enable_percpu_irq(ingress_irq, 0);
+}
+#endif
+
+static const struct net_device_ops tile_net_ops = {
+       .ndo_open = tile_net_open,
+       .ndo_stop = tile_net_stop,
+       .ndo_start_xmit = tile_net_tx,
+       .ndo_select_queue = tile_net_select_queue,
+       .ndo_do_ioctl = tile_net_ioctl,
+       .ndo_get_stats = tile_net_get_stats,
+       .ndo_change_mtu = tile_net_change_mtu,
+       .ndo_tx_timeout = tile_net_tx_timeout,
+       .ndo_set_mac_address = tile_net_set_mac_address,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+       .ndo_poll_controller = tile_net_netpoll,
+#endif
+};
+
+/* The setup function.
+ *
+ * This uses ether_setup() to assign various fields in dev, including
+ * setting IFF_BROADCAST and IFF_MULTICAST, then sets some extra fields.
+ */
+static void tile_net_setup(struct net_device *dev)
+{
+       ether_setup(dev);
+       dev->netdev_ops = &tile_net_ops;
+       dev->watchdog_timeo = TILE_NET_TIMEOUT;
+       dev->features |= NETIF_F_LLTX;
+       dev->features |= NETIF_F_HW_CSUM;
+       dev->features |= NETIF_F_SG;
+       dev->features |= NETIF_F_TSO;
+       dev->mtu = 1500;
+}
+
+/* Allocate the device structure, register the device, and obtain the
+ * MAC address from the hypervisor.
+ */
+static void tile_net_dev_init(const char *name, const uint8_t *mac)
+{
+       int ret;
+       int i;
+       int nz_addr = 0;
+       struct net_device *dev;
+       struct tile_net_priv *priv;
+
+       /* HACK: Ignore "loop" links. */
+       if (strncmp(name, "loop", 4) == 0)
+               return;
+
+       /* Allocate the device structure.  Normally, "name" is a
+        * template, instantiated by register_netdev(), but not for us.
+        */
+       dev = alloc_netdev_mqs(sizeof(*priv), name, tile_net_setup,
+                              NR_CPUS, 1);
+       if (!dev) {
+               pr_err("alloc_netdev_mqs(%s) failed\n", name);
+               return;
+       }
+
+       /* Initialize "priv". */
+       priv = netdev_priv(dev);
+       memset(priv, 0, sizeof(*priv));
+       priv->dev = dev;
+       priv->channel = -1;
+       priv->loopify_channel = -1;
+       priv->echannel = -1;
+
+       /* Get the MAC address and set it in the device struct; this must
+        * be done before the device is opened.  If the MAC is all zeroes,
+        * we use a random address, since we're probably on the simulator.
+        */
+       for (i = 0; i < 6; i++)
+               nz_addr |= mac[i];
+
+       if (nz_addr) {
+               memcpy(dev->dev_addr, mac, 6);
+               dev->addr_len = 6;
+       } else {
+               random_ether_addr(dev->dev_addr);
+       }
+
+       /* Register the network device. */
+       ret = register_netdev(dev);
+       if (ret) {
+               netdev_err(dev, "register_netdev failed %d\n", ret);
+               free_netdev(dev);
+               return;
+       }
+}
+
+/* Per-cpu module initialization. */
+static void tile_net_init_module_percpu(void *unused)
+{
+       struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+       int my_cpu = smp_processor_id();
+
+       info->has_iqueue = false;
+
+       info->my_cpu = my_cpu;
+
+       /* Initialize the egress timer. */
+       hrtimer_init(&info->egress_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+       info->egress_timer.function = tile_net_handle_egress_timer;
+}
+
+/* Module initialization. */
+static int __init tile_net_init_module(void)
+{
+       int i;
+       char name[GXIO_MPIPE_LINK_NAME_LEN];
+       uint8_t mac[6];
+
+       pr_info("Tilera Network Driver\n");
+
+       mutex_init(&tile_net_devs_for_channel_mutex);
+
+       /* Initialize each CPU. */
+       on_each_cpu(tile_net_init_module_percpu, NULL, 1);
+
+       /* Find out what devices we have, and initialize them. */
+       for (i = 0; gxio_mpipe_link_enumerate_mac(i, name, mac) >= 0; i++)
+               tile_net_dev_init(name, mac);
+
+       if (!network_cpus_init())
+               network_cpus_map = *cpu_online_mask;
+
+       return 0;
+}
+
+module_init(tile_net_init_module);
index 4ffcd57b011b142fd367530ba032b3495a74a657..2857ab078aac1f9940f406f49916706dc3255441 100644 (file)
@@ -478,6 +478,7 @@ struct netvsc_device {
        u32 nvsp_version;
 
        atomic_t num_outstanding_sends;
+       wait_queue_head_t wait_drain;
        bool start_remove;
        bool destroy;
        /*
index 8b919471472fb1dba4d34ffcf0bfe5b4c723af7b..0c569831db5aeb0de77ef9a4d81d6d7e7f3281b8 100644 (file)
@@ -42,6 +42,7 @@ static struct netvsc_device *alloc_net_device(struct hv_device *device)
        if (!net_device)
                return NULL;
 
+       init_waitqueue_head(&net_device->wait_drain);
        net_device->start_remove = false;
        net_device->destroy = false;
        net_device->dev = device;
@@ -387,12 +388,8 @@ int netvsc_device_remove(struct hv_device *device)
        spin_unlock_irqrestore(&device->channel->inbound_lock, flags);
 
        /* Wait for all send completions */
-       while (atomic_read(&net_device->num_outstanding_sends)) {
-               dev_info(&device->device,
-                       "waiting for %d requests to complete...\n",
-                       atomic_read(&net_device->num_outstanding_sends));
-               udelay(100);
-       }
+       wait_event(net_device->wait_drain,
+                  atomic_read(&net_device->num_outstanding_sends) == 0);
 
        netvsc_disconnect_vsp(net_device);
 
@@ -486,6 +483,9 @@ static void netvsc_send_completion(struct hv_device *device,
                num_outstanding_sends =
                        atomic_dec_return(&net_device->num_outstanding_sends);
 
+               if (net_device->destroy && num_outstanding_sends == 0)
+                       wake_up(&net_device->wait_drain);
+
                if (netif_queue_stopped(ndev) && !net_device->start_remove &&
                        (hv_ringbuf_avail_percent(&device->channel->outbound)
                        > RING_AVAIL_PERCENT_HIWATER ||
index 5ac46f5226f3c5b4b1d35e3450ec922326902896..47f8e8939266fd64ce097e49362a8e13a7019ef1 100644 (file)
@@ -41,6 +41,8 @@ MODULE_LICENSE("GPL");
 #define IP1001_APS_ON                  11      /* IP1001 APS Mode  bit */
 #define IP101A_G_APS_ON                        2       /* IP101A/G APS Mode bit */
 #define IP101A_G_IRQ_CONF_STATUS       0x11    /* Conf Info IRQ & Status Reg */
+#define        IP101A_G_IRQ_PIN_USED           (1<<15) /* INTR pin used */
+#define        IP101A_G_IRQ_DEFAULT            IP101A_G_IRQ_PIN_USED
 
 static int ip175c_config_init(struct phy_device *phydev)
 {
@@ -136,6 +138,11 @@ static int ip1001_config_init(struct phy_device *phydev)
        if (c < 0)
                return c;
 
+       /* INTR pin used: speed/link/duplex will cause an interrupt */
+       c = phy_write(phydev, IP101A_G_IRQ_CONF_STATUS, IP101A_G_IRQ_DEFAULT);
+       if (c < 0)
+               return c;
+
        if (phydev->interface == PHY_INTERFACE_MODE_RGMII) {
                /* Additional delay (2ns) used to adjust RX clock phase
                 * at RGMII interface */
index 39ea0674dcde8cde6b31d71202b9b81873897ba6..5c120189ec86d419866b7320c36f4d618dc5df64 100644 (file)
@@ -46,7 +46,13 @@ static int mdio_mux_read(struct mii_bus *bus, int phy_id, int regnum)
        struct mdio_mux_parent_bus *pb = cb->parent;
        int r;
 
-       mutex_lock(&pb->mii_bus->mdio_lock);
+       /* In theory multiple mdio_mux could be stacked, thus creating
+        * more than a single level of nesting.  But in practice,
+        * SINGLE_DEPTH_NESTING will cover the vast majority of use
+        * cases.  We use it, instead of trying to handle the general
+        * case.
+        */
+       mutex_lock_nested(&pb->mii_bus->mdio_lock, SINGLE_DEPTH_NESTING);
        r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data);
        if (r)
                goto out;
@@ -71,7 +77,7 @@ static int mdio_mux_write(struct mii_bus *bus, int phy_id,
 
        int r;
 
-       mutex_lock(&pb->mii_bus->mdio_lock);
+       mutex_lock_nested(&pb->mii_bus->mdio_lock, SINGLE_DEPTH_NESTING);
        r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data);
        if (r)
                goto out;
index 683ef1ce55196315a90f69f35f015b4773899830..5061608f408c67a41ab6a0432f10a206032af56a 100644 (file)
@@ -96,7 +96,7 @@ static int of_mdio_bus_match(struct device *dev, void *mdio_bus_np)
 }
 /**
  * of_mdio_find_bus - Given an mii_bus node, find the mii_bus.
- * @mdio_np: Pointer to the mii_bus.
+ * @mdio_bus_np: Pointer to the mii_bus.
  *
  * Returns a pointer to the mii_bus, or NULL if none found.
  *
index 590f902deb6ba29f348c78dad429275cac23a276..9d6c80c8a0cf7693eacf92356c107be15fe78760 100644 (file)
@@ -161,7 +161,7 @@ static struct phy_driver ks8051_driver = {
 static struct phy_driver ks8001_driver = {
        .phy_id         = PHY_ID_KS8001,
        .name           = "Micrel KS8001 or KS8721",
-       .phy_id_mask    = 0x00fffff0,
+       .phy_id_mask    = 0x00ffffff,
        .features       = (PHY_BASIC_FEATURES | SUPPORTED_Pause),
        .flags          = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
        .config_init    = kszphy_config_init,
@@ -174,7 +174,7 @@ static struct phy_driver ks8001_driver = {
 
 static struct phy_driver ksz9021_driver = {
        .phy_id         = PHY_ID_KSZ9021,
-       .phy_id_mask    = 0x000fff10,
+       .phy_id_mask    = 0x000ffffe,
        .name           = "Micrel KSZ9021 Gigabit PHY",
        .features       = (PHY_GBIT_FEATURES | SUPPORTED_Pause
                                | SUPPORTED_Asym_Pause),
@@ -240,8 +240,8 @@ MODULE_AUTHOR("David J. Choi");
 MODULE_LICENSE("GPL");
 
 static struct mdio_device_id __maybe_unused micrel_tbl[] = {
-       { PHY_ID_KSZ9021, 0x000fff10 },
-       { PHY_ID_KS8001, 0x00fffff0 },
+       { PHY_ID_KSZ9021, 0x000ffffe },
+       { PHY_ID_KS8001, 0x00ffffff },
        { PHY_ID_KS8737, 0x00fffff0 },
        { PHY_ID_KS8041, 0x00fffff0 },
        { PHY_ID_KS8051, 0x00fffff0 },
index 964031e3da877f64c54cde0fa92f7cc6b7b06f69..a28a983d465e6d434a3272a36a9ab35a7b5b3eee 100644 (file)
@@ -59,6 +59,7 @@
 #define USB_PRODUCT_IPHONE_3G   0x1292
 #define USB_PRODUCT_IPHONE_3GS  0x1294
 #define USB_PRODUCT_IPHONE_4   0x1297
+#define USB_PRODUCT_IPAD 0x129a
 #define USB_PRODUCT_IPHONE_4_VZW 0x129c
 #define USB_PRODUCT_IPHONE_4S  0x12a0
 
@@ -100,6 +101,10 @@ static struct usb_device_id ipheth_table[] = {
                USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4,
                IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
                IPHETH_USBINTF_PROTO) },
+       { USB_DEVICE_AND_INTERFACE_INFO(
+               USB_VENDOR_APPLE, USB_PRODUCT_IPAD,
+               IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
+               IPHETH_USBINTF_PROTO) },
        { USB_DEVICE_AND_INTERFACE_INFO(
                USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4_VZW,
                IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
index 3b206786b5e7d8196d2fd1e63060f17cd7101c6e..a051cedd64bde58179a1cb539b94a888fcd29416 100644 (file)
@@ -197,6 +197,10 @@ err:
 static int qmi_wwan_cdc_wdm_manage_power(struct usb_interface *intf, int on)
 {
        struct usbnet *dev = usb_get_intfdata(intf);
+
+       /* can be called while disconnecting */
+       if (!dev)
+               return 0;
        return qmi_wwan_manage_power(dev, on);
 }
 
@@ -257,29 +261,6 @@ err:
        return rv;
 }
 
-/* Gobi devices uses identical class/protocol codes for all interfaces regardless
- * of function. Some of these are CDC ACM like and have the exact same endpoints
- * we are looking for. This leaves two possible strategies for identifying the
- * correct interface:
- *   a) hardcoding interface number, or
- *   b) use the fact that the wwan interface is the only one lacking additional
- *      (CDC functional) descriptors
- *
- * Let's see if we can get away with the generic b) solution.
- */
-static int qmi_wwan_bind_gobi(struct usbnet *dev, struct usb_interface *intf)
-{
-       int rv = -EINVAL;
-
-       /* ignore any interface with additional descriptors */
-       if (intf->cur_altsetting->extralen)
-               goto err;
-
-       rv = qmi_wwan_bind_shared(dev, intf);
-err:
-       return rv;
-}
-
 static void qmi_wwan_unbind_shared(struct usbnet *dev, struct usb_interface *intf)
 {
        struct usb_driver *subdriver = (void *)dev->data[0];
@@ -347,15 +328,15 @@ static const struct driver_info   qmi_wwan_shared = {
        .manage_power   = qmi_wwan_manage_power,
 };
 
-static const struct driver_info        qmi_wwan_gobi = {
-       .description    = "Qualcomm Gobi wwan/QMI device",
+static const struct driver_info        qmi_wwan_force_int0 = {
+       .description    = "Qualcomm WWAN/QMI device",
        .flags          = FLAG_WWAN,
-       .bind           = qmi_wwan_bind_gobi,
+       .bind           = qmi_wwan_bind_shared,
        .unbind         = qmi_wwan_unbind_shared,
        .manage_power   = qmi_wwan_manage_power,
+       .data           = BIT(0), /* interface whitelist bitmap */
 };
 
-/* ZTE suck at making USB descriptors */
 static const struct driver_info        qmi_wwan_force_int1 = {
        .description    = "Qualcomm WWAN/QMI device",
        .flags          = FLAG_WWAN,
@@ -365,6 +346,24 @@ static const struct driver_info    qmi_wwan_force_int1 = {
        .data           = BIT(1), /* interface whitelist bitmap */
 };
 
+static const struct driver_info qmi_wwan_force_int2 = {
+       .description    = "Qualcomm WWAN/QMI device",
+       .flags          = FLAG_WWAN,
+       .bind           = qmi_wwan_bind_shared,
+       .unbind         = qmi_wwan_unbind_shared,
+       .manage_power   = qmi_wwan_manage_power,
+       .data           = BIT(2), /* interface whitelist bitmap */
+};
+
+static const struct driver_info        qmi_wwan_force_int3 = {
+       .description    = "Qualcomm WWAN/QMI device",
+       .flags          = FLAG_WWAN,
+       .bind           = qmi_wwan_bind_shared,
+       .unbind         = qmi_wwan_unbind_shared,
+       .manage_power   = qmi_wwan_manage_power,
+       .data           = BIT(3), /* interface whitelist bitmap */
+};
+
 static const struct driver_info        qmi_wwan_force_int4 = {
        .description    = "Qualcomm WWAN/QMI device",
        .flags          = FLAG_WWAN,
@@ -390,16 +389,23 @@ static const struct driver_info   qmi_wwan_force_int4 = {
 static const struct driver_info        qmi_wwan_sierra = {
        .description    = "Sierra Wireless wwan/QMI device",
        .flags          = FLAG_WWAN,
-       .bind           = qmi_wwan_bind_gobi,
+       .bind           = qmi_wwan_bind_shared,
        .unbind         = qmi_wwan_unbind_shared,
        .manage_power   = qmi_wwan_manage_power,
        .data           = BIT(8) | BIT(19), /* interface whitelist bitmap */
 };
 
 #define HUAWEI_VENDOR_ID       0x12D1
+
+/* Gobi 1000 QMI/wwan interface number is 3 according to qcserial */
+#define QMI_GOBI1K_DEVICE(vend, prod) \
+       USB_DEVICE(vend, prod), \
+       .driver_info = (unsigned long)&qmi_wwan_force_int3
+
+/* Gobi 2000 and Gobi 3000 QMI/wwan interface number is 0 according to qcserial */
 #define QMI_GOBI_DEVICE(vend, prod) \
        USB_DEVICE(vend, prod), \
-       .driver_info = (unsigned long)&qmi_wwan_gobi
+       .driver_info = (unsigned long)&qmi_wwan_force_int0
 
 static const struct usb_device_id products[] = {
        {       /* Huawei E392, E398 and possibly others sharing both device id and more... */
@@ -501,6 +507,15 @@ static const struct usb_device_id products[] = {
                .bInterfaceProtocol = 0xff,
                .driver_info        = (unsigned long)&qmi_wwan_force_int4,
        },
+       {       /* ZTE MF60 */
+               .match_flags        = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
+               .idVendor           = 0x19d2,
+               .idProduct          = 0x1402,
+               .bInterfaceClass    = 0xff,
+               .bInterfaceSubClass = 0xff,
+               .bInterfaceProtocol = 0xff,
+               .driver_info        = (unsigned long)&qmi_wwan_force_int2,
+       },
        {       /* Sierra Wireless MC77xx in QMI mode */
                .match_flags        = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
                .idVendor           = 0x1199,
@@ -510,20 +525,24 @@ static const struct usb_device_id products[] = {
                .bInterfaceProtocol = 0xff,
                .driver_info        = (unsigned long)&qmi_wwan_sierra,
        },
-       {QMI_GOBI_DEVICE(0x05c6, 0x9212)},      /* Acer Gobi Modem Device */
-       {QMI_GOBI_DEVICE(0x03f0, 0x1f1d)},      /* HP un2400 Gobi Modem Device */
-       {QMI_GOBI_DEVICE(0x03f0, 0x371d)},      /* HP un2430 Mobile Broadband Module */
-       {QMI_GOBI_DEVICE(0x04da, 0x250d)},      /* Panasonic Gobi Modem device */
-       {QMI_GOBI_DEVICE(0x413c, 0x8172)},      /* Dell Gobi Modem device */
-       {QMI_GOBI_DEVICE(0x1410, 0xa001)},      /* Novatel Gobi Modem device */
-       {QMI_GOBI_DEVICE(0x0b05, 0x1776)},      /* Asus Gobi Modem device */
-       {QMI_GOBI_DEVICE(0x19d2, 0xfff3)},      /* ONDA Gobi Modem device */
-       {QMI_GOBI_DEVICE(0x05c6, 0x9001)},      /* Generic Gobi Modem device */
-       {QMI_GOBI_DEVICE(0x05c6, 0x9002)},      /* Generic Gobi Modem device */
-       {QMI_GOBI_DEVICE(0x05c6, 0x9202)},      /* Generic Gobi Modem device */
-       {QMI_GOBI_DEVICE(0x05c6, 0x9203)},      /* Generic Gobi Modem device */
-       {QMI_GOBI_DEVICE(0x05c6, 0x9222)},      /* Generic Gobi Modem device */
-       {QMI_GOBI_DEVICE(0x05c6, 0x9009)},      /* Generic Gobi Modem device */
+
+       /* Gobi 1000 devices */
+       {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)},    /* Acer Gobi Modem Device */
+       {QMI_GOBI1K_DEVICE(0x03f0, 0x1f1d)},    /* HP un2400 Gobi Modem Device */
+       {QMI_GOBI1K_DEVICE(0x03f0, 0x371d)},    /* HP un2430 Mobile Broadband Module */
+       {QMI_GOBI1K_DEVICE(0x04da, 0x250d)},    /* Panasonic Gobi Modem device */
+       {QMI_GOBI1K_DEVICE(0x413c, 0x8172)},    /* Dell Gobi Modem device */
+       {QMI_GOBI1K_DEVICE(0x1410, 0xa001)},    /* Novatel Gobi Modem device */
+       {QMI_GOBI1K_DEVICE(0x0b05, 0x1776)},    /* Asus Gobi Modem device */
+       {QMI_GOBI1K_DEVICE(0x19d2, 0xfff3)},    /* ONDA Gobi Modem device */
+       {QMI_GOBI1K_DEVICE(0x05c6, 0x9001)},    /* Generic Gobi Modem device */
+       {QMI_GOBI1K_DEVICE(0x05c6, 0x9002)},    /* Generic Gobi Modem device */
+       {QMI_GOBI1K_DEVICE(0x05c6, 0x9202)},    /* Generic Gobi Modem device */
+       {QMI_GOBI1K_DEVICE(0x05c6, 0x9203)},    /* Generic Gobi Modem device */
+       {QMI_GOBI1K_DEVICE(0x05c6, 0x9222)},    /* Generic Gobi Modem device */
+       {QMI_GOBI1K_DEVICE(0x05c6, 0x9009)},    /* Generic Gobi Modem device */
+
+       /* Gobi 2000 and 3000 devices */
        {QMI_GOBI_DEVICE(0x413c, 0x8186)},      /* Dell Gobi 2000 Modem device (N0218, VU936) */
        {QMI_GOBI_DEVICE(0x05c6, 0x920b)},      /* Generic Gobi 2000 Modem device */
        {QMI_GOBI_DEVICE(0x05c6, 0x9225)},      /* Sony Gobi 2000 Modem device (N0279, VU730) */
index 3faef5670d1ff60547ec9fcbb42eb956fa7b58df..d75d1f56becff95ae9cf7b8f8dc08990d142d06c 100644 (file)
@@ -946,7 +946,7 @@ struct sk_buff *sierra_net_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
 }
 
 static const u8 sierra_net_ifnum_list[] = { 7, 10, 11 };
-static const struct sierra_net_info_data sierra_net_info_data_68A3 = {
+static const struct sierra_net_info_data sierra_net_info_data_direct_ip = {
        .rx_urb_size = 8 * 1024,
        .whitelist = {
                .infolen = ARRAY_SIZE(sierra_net_ifnum_list),
@@ -954,7 +954,7 @@ static const struct sierra_net_info_data sierra_net_info_data_68A3 = {
        }
 };
 
-static const struct driver_info sierra_net_info_68A3 = {
+static const struct driver_info sierra_net_info_direct_ip = {
        .description = "Sierra Wireless USB-to-WWAN Modem",
        .flags = FLAG_WWAN | FLAG_SEND_ZLP,
        .bind = sierra_net_bind,
@@ -962,12 +962,18 @@ static const struct driver_info sierra_net_info_68A3 = {
        .status = sierra_net_status,
        .rx_fixup = sierra_net_rx_fixup,
        .tx_fixup = sierra_net_tx_fixup,
-       .data = (unsigned long)&sierra_net_info_data_68A3,
+       .data = (unsigned long)&sierra_net_info_data_direct_ip,
 };
 
 static const struct usb_device_id products[] = {
        {USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless USB-to-WWAN modem */
-       .driver_info = (unsigned long) &sierra_net_info_68A3},
+       .driver_info = (unsigned long) &sierra_net_info_direct_ip},
+       {USB_DEVICE(0x0F3D, 0x68A3), /* AT&T Direct IP modem */
+       .driver_info = (unsigned long) &sierra_net_info_direct_ip},
+       {USB_DEVICE(0x1199, 0x68AA), /* Sierra Wireless Direct IP LTE modem */
+       .driver_info = (unsigned long) &sierra_net_info_direct_ip},
+       {USB_DEVICE(0x0F3D, 0x68AA), /* AT&T Direct IP LTE modem */
+       .driver_info = (unsigned long) &sierra_net_info_direct_ip},
 
        {}, /* last item */
 };
index 9f58330f1312059d37deeb072b2d5c32d3d6df03..aba769d77459b61e6cc42deff0a101acd709af89 100644 (file)
@@ -796,11 +796,13 @@ int usbnet_open (struct net_device *net)
        if (info->manage_power) {
                retval = info->manage_power(dev, 1);
                if (retval < 0)
-                       goto done;
+                       goto done_manage_power_error;
                usb_autopm_put_interface(dev->intf);
        }
        return retval;
 
+done_manage_power_error:
+       clear_bit(EVENT_DEV_OPEN, &dev->flags);
 done:
        usb_autopm_put_interface(dev->intf);
 done_nopm:
@@ -876,9 +878,9 @@ void usbnet_get_drvinfo (struct net_device *net, struct ethtool_drvinfo *info)
 {
        struct usbnet *dev = netdev_priv(net);
 
-       strncpy (info->driver, dev->driver_name, sizeof info->driver);
-       strncpy (info->version, DRIVER_VERSION, sizeof info->version);
-       strncpy (info->fw_version, dev->driver_info->description,
+       strlcpy (info->driver, dev->driver_name, sizeof info->driver);
+       strlcpy (info->version, DRIVER_VERSION, sizeof info->version);
+       strlcpy (info->fw_version, dev->driver_info->description,
                sizeof info->fw_version);
        usb_make_path (dev->udev, info->bus_info, sizeof info->bus_info);
 }
@@ -1202,6 +1204,21 @@ deferred:
 }
 EXPORT_SYMBOL_GPL(usbnet_start_xmit);
 
+static void rx_alloc_submit(struct usbnet *dev, gfp_t flags)
+{
+       struct urb      *urb;
+       int             i;
+
+       /* don't refill the queue all at once */
+       for (i = 0; i < 10 && dev->rxq.qlen < RX_QLEN(dev); i++) {
+               urb = usb_alloc_urb(0, flags);
+               if (urb != NULL) {
+                       if (rx_submit(dev, urb, flags) == -ENOLINK)
+                               return;
+               }
+       }
+}
+
 /*-------------------------------------------------------------------------*/
 
 // tasklet (work deferred from completions, in_irq) or timer
@@ -1241,26 +1258,14 @@ static void usbnet_bh (unsigned long param)
                   !timer_pending (&dev->delay) &&
                   !test_bit (EVENT_RX_HALT, &dev->flags)) {
                int     temp = dev->rxq.qlen;
-               int     qlen = RX_QLEN (dev);
-
-               if (temp < qlen) {
-                       struct urb      *urb;
-                       int             i;
-
-                       // don't refill the queue all at once
-                       for (i = 0; i < 10 && dev->rxq.qlen < qlen; i++) {
-                               urb = usb_alloc_urb (0, GFP_ATOMIC);
-                               if (urb != NULL) {
-                                       if (rx_submit (dev, urb, GFP_ATOMIC) ==
-                                           -ENOLINK)
-                                               return;
-                               }
-                       }
+
+               if (temp < RX_QLEN(dev)) {
+                       rx_alloc_submit(dev, GFP_ATOMIC);
                        if (temp != dev->rxq.qlen)
                                netif_dbg(dev, link, dev->net,
                                          "rxqlen %d --> %d\n",
                                          temp, dev->rxq.qlen);
-                       if (dev->rxq.qlen < qlen)
+                       if (dev->rxq.qlen < RX_QLEN(dev))
                                tasklet_schedule (&dev->bh);
                }
                if (dev->txq.qlen < TX_QLEN (dev))
@@ -1513,6 +1518,7 @@ int usbnet_suspend (struct usb_interface *intf, pm_message_t message)
                spin_lock_irq(&dev->txq.lock);
                /* don't autosuspend while transmitting */
                if (dev->txq.qlen && PMSG_IS_AUTO(message)) {
+                       dev->suspend_count--;
                        spin_unlock_irq(&dev->txq.lock);
                        return -EBUSY;
                } else {
@@ -1569,6 +1575,13 @@ int usbnet_resume (struct usb_interface *intf)
                spin_unlock_irq(&dev->txq.lock);
 
                if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
+                       /* handle remote wakeup ASAP */
+                       if (!dev->wait &&
+                               netif_device_present(dev->net) &&
+                               !timer_pending(&dev->delay) &&
+                               !test_bit(EVENT_RX_HALT, &dev->flags))
+                                       rx_alloc_submit(dev, GFP_KERNEL);
+
                        if (!(dev->txq.qlen >= TX_QLEN(dev)))
                                netif_tx_wake_all_queues(dev->net);
                        tasklet_schedule (&dev->bh);
index 5214b1eceb9516282cb9ae8b38f79a606da0ecb7..f18149ae2588e682e661100f026f7bcf60a16d06 100644 (file)
@@ -42,7 +42,8 @@ module_param(gso, bool, 0444);
 #define VIRTNET_DRIVER_VERSION "1.0.0"
 
 struct virtnet_stats {
-       struct u64_stats_sync syncp;
+       struct u64_stats_sync tx_syncp;
+       struct u64_stats_sync rx_syncp;
        u64 tx_bytes;
        u64 tx_packets;
 
@@ -300,10 +301,10 @@ static void receive_buf(struct net_device *dev, void *buf, unsigned int len)
 
        hdr = skb_vnet_hdr(skb);
 
-       u64_stats_update_begin(&stats->syncp);
+       u64_stats_update_begin(&stats->rx_syncp);
        stats->rx_bytes += skb->len;
        stats->rx_packets++;
-       u64_stats_update_end(&stats->syncp);
+       u64_stats_update_end(&stats->rx_syncp);
 
        if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
                pr_debug("Needs csum!\n");
@@ -565,10 +566,10 @@ static unsigned int free_old_xmit_skbs(struct virtnet_info *vi)
        while ((skb = virtqueue_get_buf(vi->svq, &len)) != NULL) {
                pr_debug("Sent skb %p\n", skb);
 
-               u64_stats_update_begin(&stats->syncp);
+               u64_stats_update_begin(&stats->tx_syncp);
                stats->tx_bytes += skb->len;
                stats->tx_packets++;
-               u64_stats_update_end(&stats->syncp);
+               u64_stats_update_end(&stats->tx_syncp);
 
                tot_sgs += skb_vnet_hdr(skb)->num_sg;
                dev_kfree_skb_any(skb);
@@ -703,12 +704,16 @@ static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev,
                u64 tpackets, tbytes, rpackets, rbytes;
 
                do {
-                       start = u64_stats_fetch_begin(&stats->syncp);
+                       start = u64_stats_fetch_begin(&stats->tx_syncp);
                        tpackets = stats->tx_packets;
                        tbytes   = stats->tx_bytes;
+               } while (u64_stats_fetch_retry(&stats->tx_syncp, start));
+
+               do {
+                       start = u64_stats_fetch_begin(&stats->rx_syncp);
                        rpackets = stats->rx_packets;
                        rbytes   = stats->rx_bytes;
-               } while (u64_stats_fetch_retry(&stats->syncp, start));
+               } while (u64_stats_fetch_retry(&stats->rx_syncp, start));
 
                tot->rx_packets += rpackets;
                tot->tx_packets += tpackets;
index 520a4b2eb9cc8b2d6c2a1e1281cf243b8ec8a9f4..a747c632597aab52e9e78f8f8c21a614f44b6fb4 100644 (file)
@@ -7233,8 +7233,8 @@ static int airo_get_aplist(struct net_device *dev,
                }
        } else {
                dwrq->flags = 1; /* Should be define'd */
-               memcpy(extra + sizeof(struct sockaddr)*i,
-                      &qual,  sizeof(struct iw_quality)*i);
+               memcpy(extra + sizeof(struct sockaddr) * i, qual,
+                      sizeof(struct iw_quality) * i);
        }
        dwrq->length = i;
 
index c54b7d37bff159f1fc8a7482a4d8bdfe806b9beb..420d69b2674ceae1282fcc9d8cbbc38d8c5d00af 100644 (file)
@@ -143,6 +143,7 @@ struct ath_common {
        u32 keymax;
        DECLARE_BITMAP(keymap, ATH_KEYMAX);
        DECLARE_BITMAP(tkip_keymap, ATH_KEYMAX);
+       DECLARE_BITMAP(ccmp_keymap, ATH_KEYMAX);
        enum ath_crypt_caps crypt_caps;
 
        unsigned int clockrate;
index fbaa309300764ef791faff16d06f85a28d7c431d..44ad6fe0278f410ce51019a24c13ee996a0f71bb 100644 (file)
@@ -1045,11 +1045,11 @@ ath5k_drain_tx_buffs(struct ath5k_hw *ah)
 
                                ath5k_txbuf_free_skb(ah, bf);
 
-                               spin_lock_bh(&ah->txbuflock);
+                               spin_lock(&ah->txbuflock);
                                list_move_tail(&bf->list, &ah->txbuf);
                                ah->txbuf_len++;
                                txq->txq_len--;
-                               spin_unlock_bh(&ah->txbuflock);
+                               spin_unlock(&ah->txbuflock);
                        }
                        txq->link = NULL;
                        txq->txq_poll_mark = false;
index a277cf6f339d4364413548f95e2f7fa5823ad45f..4866550ddd965eb0370133bc21ad55b22e29b240 100644 (file)
@@ -214,6 +214,7 @@ struct ath_frame_info {
        enum ath9k_key_type keytype;
        u8 keyix;
        u8 retries;
+       u8 rtscts_rate;
 };
 
 struct ath_buf_state {
index 2b8f61c210e1c2dd74ef5a779a5282f96253b856..abbd6effd60d31eb6a871e101ac10d0bbfa16f2e 100644 (file)
@@ -1496,6 +1496,7 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
                        priv->num_sta_assoc_vif++ : priv->num_sta_assoc_vif--;
 
                if (priv->ah->opmode == NL80211_IFTYPE_STATION) {
+                       ath9k_htc_choose_set_bssid(priv);
                        if (bss_conf->assoc && (priv->num_sta_assoc_vif == 1))
                                ath9k_htc_start_ani(priv);
                        else if (priv->num_sta_assoc_vif == 0)
@@ -1503,13 +1504,11 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
                }
        }
 
-       if (changed & BSS_CHANGED_BSSID) {
+       if (changed & BSS_CHANGED_IBSS) {
                if (priv->ah->opmode == NL80211_IFTYPE_ADHOC) {
                        common->curaid = bss_conf->aid;
                        memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
                        ath9k_htc_set_bssid(priv);
-               } else if (priv->ah->opmode == NL80211_IFTYPE_STATION) {
-                       ath9k_htc_choose_set_bssid(priv);
                }
        }
 
index 7db1890448f20fbfe85ccccea1966633de0d84ad..995ca8e1302efc7562ff9905d8e91d00ffb25f74 100644 (file)
@@ -622,7 +622,7 @@ static int __ath9k_hw_init(struct ath_hw *ah)
 
        if (NR_CPUS > 1 && ah->config.serialize_regmode == SER_REG_MODE_AUTO) {
                if (ah->hw_version.macVersion == AR_SREV_VERSION_5416_PCI ||
-                   ((AR_SREV_9160(ah) || AR_SREV_9280(ah)) &&
+                   ((AR_SREV_9160(ah) || AR_SREV_9280(ah) || AR_SREV_9287(ah)) &&
                     !ah->is_pciexpress)) {
                        ah->config.serialize_regmode =
                                SER_REG_MODE_ON;
@@ -784,13 +784,25 @@ static void ath9k_hw_init_qos(struct ath_hw *ah)
 
 u32 ar9003_get_pll_sqsum_dvc(struct ath_hw *ah)
 {
+       struct ath_common *common = ath9k_hw_common(ah);
+       int i = 0;
+
        REG_CLR_BIT(ah, PLL3, PLL3_DO_MEAS_MASK);
        udelay(100);
        REG_SET_BIT(ah, PLL3, PLL3_DO_MEAS_MASK);
 
-       while ((REG_READ(ah, PLL4) & PLL4_MEAS_DONE) == 0)
+       while ((REG_READ(ah, PLL4) & PLL4_MEAS_DONE) == 0) {
+
                udelay(100);
 
+               if (WARN_ON_ONCE(i >= 100)) {
+                       ath_err(common, "PLL4 meaurement not done\n");
+                       break;
+               }
+
+               i++;
+       }
+
        return (REG_READ(ah, PLL3) & SQSUM_DVC_MASK) >> 3;
 }
 EXPORT_SYMBOL(ar9003_get_pll_sqsum_dvc);
index 4de4473776acd808f52eeed3994af2858897796d..dac1a2709e3cb7aa961240a088ef15fe3311bec2 100644 (file)
@@ -971,6 +971,15 @@ void ath_hw_pll_work(struct work_struct *work)
                                            hw_pll_work.work);
        u32 pll_sqsum;
 
+       /*
+        * ensure that the PLL WAR is executed only
+        * after the STA is associated (or) if the
+        * beaconing had started in interfaces that
+        * uses beacons.
+        */
+       if (!(sc->sc_flags & SC_OP_BEACONS))
+               return;
+
        if (AR_SREV_9485(sc->sc_ah)) {
 
                ath9k_ps_wakeup(sc);
@@ -1443,15 +1452,6 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
                }
        }
 
-       if ((ah->opmode == NL80211_IFTYPE_ADHOC) ||
-           ((vif->type == NL80211_IFTYPE_ADHOC) &&
-            sc->nvifs > 0)) {
-               ath_err(common, "Cannot create ADHOC interface when other"
-                       " interfaces already exist.\n");
-               ret = -EINVAL;
-               goto out;
-       }
-
        ath_dbg(common, CONFIG, "Attach a VIF of type: %d\n", vif->type);
 
        sc->nvifs++;
@@ -1476,15 +1476,6 @@ static int ath9k_change_interface(struct ieee80211_hw *hw,
        mutex_lock(&sc->mutex);
        ath9k_ps_wakeup(sc);
 
-       /* See if new interface type is valid. */
-       if ((new_type == NL80211_IFTYPE_ADHOC) &&
-           (sc->nvifs > 1)) {
-               ath_err(common, "When using ADHOC, it must be the only"
-                       " interface.\n");
-               ret = -EINVAL;
-               goto out;
-       }
-
        if (ath9k_uses_beacons(new_type) &&
            !ath9k_uses_beacons(vif->type)) {
                if (sc->nbcnvifs >= ATH_BCBUF) {
index e1fcc68124dc3bc78359cf72e2da459ddf9c4560..0735aeb3b26cefcd15c2847a1775718d36cecbc6 100644 (file)
@@ -695,9 +695,9 @@ static bool ath_edma_get_buffers(struct ath_softc *sc,
                        __skb_unlink(skb, &rx_edma->rx_fifo);
                        list_add_tail(&bf->list, &sc->rx.rxbuf);
                        ath_rx_edma_buf_link(sc, qtype);
-               } else {
-                       bf = NULL;
                }
+
+               bf = NULL;
        }
 
        *dest = bf;
@@ -822,7 +822,8 @@ static bool ath9k_rx_accept(struct ath_common *common,
         * descriptor does contain a valid key index. This has been observed
         * mostly with CCMP encryption.
         */
-       if (rx_stats->rs_keyix == ATH9K_RXKEYIX_INVALID)
+       if (rx_stats->rs_keyix == ATH9K_RXKEYIX_INVALID ||
+           !test_bit(rx_stats->rs_keyix, common->ccmp_keymap))
                rx_stats->rs_status &= ~ATH9K_RXERR_KEYMISS;
 
        if (!rx_stats->rs_datalen) {
index d59dd01d6cdeda200280f0f856fe9ecf26e5fbcc..4d571394c7a82523404db2d9ae3ef0ba0af760c6 100644 (file)
@@ -938,6 +938,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
        struct ieee80211_tx_rate *rates;
        const struct ieee80211_rate *rate;
        struct ieee80211_hdr *hdr;
+       struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
        int i;
        u8 rix = 0;
 
@@ -948,18 +949,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
 
        /* set dur_update_en for l-sig computation except for PS-Poll frames */
        info->dur_update = !ieee80211_is_pspoll(hdr->frame_control);
-
-       /*
-        * We check if Short Preamble is needed for the CTS rate by
-        * checking the BSS's global flag.
-        * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
-        */
-       rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
-       info->rtscts_rate = rate->hw_value;
-
-       if (tx_info->control.vif &&
-           tx_info->control.vif->bss_conf.use_short_preamble)
-               info->rtscts_rate |= rate->hw_value_short;
+       info->rtscts_rate = fi->rtscts_rate;
 
        for (i = 0; i < 4; i++) {
                bool is_40, is_sgi, is_sp;
@@ -1001,13 +991,13 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
                }
 
                /* legacy rates */
+               rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
                if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
                    !(rate->flags & IEEE80211_RATE_ERP_G))
                        phy = WLAN_RC_PHY_CCK;
                else
                        phy = WLAN_RC_PHY_OFDM;
 
-               rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
                info->rates[i].Rate = rate->hw_value;
                if (rate->hw_value_short) {
                        if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
@@ -1776,10 +1766,22 @@ static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
        struct ieee80211_sta *sta = tx_info->control.sta;
        struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+       const struct ieee80211_rate *rate;
        struct ath_frame_info *fi = get_frame_info(skb);
        struct ath_node *an = NULL;
        enum ath9k_key_type keytype;
+       bool short_preamble = false;
+
+       /*
+        * We check if Short Preamble is needed for the CTS rate by
+        * checking the BSS's global flag.
+        * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
+        */
+       if (tx_info->control.vif &&
+           tx_info->control.vif->bss_conf.use_short_preamble)
+               short_preamble = true;
 
+       rate = ieee80211_get_rts_cts_rate(hw, tx_info);
        keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
 
        if (sta)
@@ -1794,6 +1796,9 @@ static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
                fi->keyix = ATH9K_TXKEYIX_INVALID;
        fi->keytype = keytype;
        fi->framelen = framelen;
+       fi->rtscts_rate = rate->hw_value;
+       if (short_preamble)
+               fi->rtscts_rate |= rate->hw_value_short;
 }
 
 u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
index 0e81904956cf1c04fa37ad01e65282b55e2bf8f9..5c54aa43ca2d81b3936d917aeb68f7331aabb34b 100644 (file)
@@ -556,6 +556,9 @@ int ath_key_config(struct ath_common *common,
                return -EIO;
 
        set_bit(idx, common->keymap);
+       if (key->cipher == WLAN_CIPHER_SUITE_CCMP)
+               set_bit(idx, common->ccmp_keymap);
+
        if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
                set_bit(idx + 64, common->keymap);
                set_bit(idx, common->tkip_keymap);
@@ -582,6 +585,7 @@ void ath_key_delete(struct ath_common *common, struct ieee80211_key_conf *key)
                return;
 
        clear_bit(key->hw_key_idx, common->keymap);
+       clear_bit(key->hw_key_idx, common->ccmp_keymap);
        if (key->cipher != WLAN_CIPHER_SUITE_TKIP)
                return;
 
index 67c13af6f206e04475996642b00698ac9e9d12f5..c06b6cb5c91ea6c64c14a4b38faf7c5a2dd74e1b 100644 (file)
@@ -877,6 +877,10 @@ struct b43_wl {
         * from the mac80211 subsystem. */
        u16 mac80211_initially_registered_queues;
 
+       /* Set this if we call ieee80211_register_hw() and check if we call
+        * ieee80211_unregister_hw(). */
+       bool hw_registred;
+
        /* We can only have one operating interface (802.11 core)
         * at a time. General information about this interface follows.
         */
index 5a39b226b2e3193958bf29472c3112c7361de0e5..1b988f26bdf1114b8fb24af9626d13599c329d08 100644 (file)
@@ -2437,6 +2437,7 @@ start_ieee80211:
        err = ieee80211_register_hw(wl->hw);
        if (err)
                goto err_one_core_detach;
+       wl->hw_registred = true;
        b43_leds_register(wl->current_dev);
        goto out;
 
@@ -3766,7 +3767,7 @@ static int b43_switch_band(struct b43_wl *wl, struct ieee80211_channel *chan)
        if (prev_status >= B43_STAT_STARTED) {
                err = b43_wireless_core_start(up_dev);
                if (err) {
-                       b43err(wl, "Fatal: Coult not start device for "
+                       b43err(wl, "Fatal: Could not start device for "
                               "selected %s-GHz band\n",
                               band_to_string(chan->band));
                        b43_wireless_core_exit(up_dev);
@@ -5299,6 +5300,7 @@ static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev)
 
        hw->queues = modparam_qos ? B43_QOS_QUEUE_NUM : 1;
        wl->mac80211_initially_registered_queues = hw->queues;
+       wl->hw_registred = false;
        hw->max_rates = 2;
        SET_IEEE80211_DEV(hw, dev->dev);
        if (is_valid_ether_addr(sprom->et1mac))
@@ -5370,12 +5372,15 @@ static void b43_bcma_remove(struct bcma_device *core)
         * as the ieee80211 unreg will destroy the workqueue. */
        cancel_work_sync(&wldev->restart_work);
 
-       /* Restore the queues count before unregistering, because firmware detect
-        * might have modified it. Restoring is important, so the networking
-        * stack can properly free resources. */
-       wl->hw->queues = wl->mac80211_initially_registered_queues;
-       b43_leds_stop(wldev);
-       ieee80211_unregister_hw(wl->hw);
+       B43_WARN_ON(!wl);
+       if (wl->current_dev == wldev && wl->hw_registred) {
+               /* Restore the queues count before unregistering, because firmware detect
+                * might have modified it. Restoring is important, so the networking
+                * stack can properly free resources. */
+               wl->hw->queues = wl->mac80211_initially_registered_queues;
+               b43_leds_stop(wldev);
+               ieee80211_unregister_hw(wl->hw);
+       }
 
        b43_one_core_detach(wldev->dev);
 
@@ -5446,7 +5451,7 @@ static void b43_ssb_remove(struct ssb_device *sdev)
        cancel_work_sync(&wldev->restart_work);
 
        B43_WARN_ON(!wl);
-       if (wl->current_dev == wldev) {
+       if (wl->current_dev == wldev && wl->hw_registred) {
                /* Restore the queues count before unregistering, because firmware detect
                 * might have modified it. Restoring is important, so the networking
                 * stack can properly free resources. */
index f1f8bd09bd87fd378cb405d6aa56a38eafe4fb3c..c8baf020c20f7a4fd73104bd56a87cae375b935e 100644 (file)
@@ -1072,7 +1072,7 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring,
        meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
        /* create a bounce buffer in zone_dma on mapping failure. */
        if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
-               bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
+               bounce_skb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
                if (!bounce_skb) {
                        ring->current_slot = old_top_slot;
                        ring->used_slots = old_used_slots;
index cd9c9bc186d93099f56b8b912e905255ee7411c9..eae691e2f7dd4abcd1d08c5bf8bcf3e6872e97fd 100644 (file)
@@ -2633,7 +2633,7 @@ static int b43legacy_switch_phymode(struct b43legacy_wl *wl,
        if (prev_status >= B43legacy_STAT_STARTED) {
                err = b43legacy_wireless_core_start(up_dev);
                if (err) {
-                       b43legacyerr(wl, "Fatal: Coult not start device for "
+                       b43legacyerr(wl, "Fatal: Could not start device for "
                               "newly selected %s-PHY mode\n",
                               phymode_to_string(new_mode));
                        b43legacy_wireless_core_exit(up_dev);
index e2480d19627679c89cb166f9a4954db74dd21ff8..8e7e6928c93699bf9b7df35f9efc4c9b26928481 100644 (file)
@@ -89,9 +89,9 @@ int brcmf_sdio_intr_register(struct brcmf_sdio_dev *sdiodev)
        data |= 1 << SDIO_FUNC_1 | 1 << SDIO_FUNC_2 | 1;
        brcmf_sdio_regwb(sdiodev, SDIO_CCCR_IENx, data, &ret);
 
-       /* redirect, configure ane enable io for interrupt signal */
+       /* redirect, configure and enable io for interrupt signal */
        data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE;
-       if (sdiodev->irq_flags | IRQF_TRIGGER_HIGH)
+       if (sdiodev->irq_flags & IRQF_TRIGGER_HIGH)
                data |= SDIO_SEPINT_ACT_HI;
        brcmf_sdio_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, data, &ret);
 
index 9cfae0c08707d95e21a68bdd8822bc4f966e223f..95aa8e1683ecb4bdeb663e0cf605d699407b96f0 100644 (file)
@@ -1903,14 +1903,6 @@ static void ipw2100_down(struct ipw2100_priv *priv)
        netif_stop_queue(priv->net_dev);
 }
 
-/* Called by register_netdev() */
-static int ipw2100_net_init(struct net_device *dev)
-{
-       struct ipw2100_priv *priv = libipw_priv(dev);
-
-       return ipw2100_up(priv, 1);
-}
-
 static int ipw2100_wdev_init(struct net_device *dev)
 {
        struct ipw2100_priv *priv = libipw_priv(dev);
@@ -6087,7 +6079,6 @@ static const struct net_device_ops ipw2100_netdev_ops = {
        .ndo_stop               = ipw2100_close,
        .ndo_start_xmit         = libipw_xmit,
        .ndo_change_mtu         = libipw_change_mtu,
-       .ndo_init               = ipw2100_net_init,
        .ndo_tx_timeout         = ipw2100_tx_timeout,
        .ndo_set_mac_address    = ipw2100_set_address,
        .ndo_validate_addr      = eth_validate_addr,
@@ -6329,6 +6320,10 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
        printk(KERN_INFO DRV_NAME
               ": Detected Intel PRO/Wireless 2100 Network Connection\n");
 
+       err = ipw2100_up(priv, 1);
+       if (err)
+               goto fail;
+
        err = ipw2100_wdev_init(dev);
        if (err)
                goto fail;
@@ -6338,12 +6333,7 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
         * network device we would call ipw2100_up.  This introduced a race
         * condition with newer hotplug configurations (network was coming
         * up and making calls before the device was initialized).
-        *
-        * If we called ipw2100_up before we registered the device, then the
-        * device name wasn't registered.  So, we instead use the net_dev->init
-        * member to call a function that then just turns and calls ipw2100_up.
-        * net_dev->init is called after name allocation but before the
-        * notifier chain is called */
+        */
        err = register_netdev(dev);
        if (err) {
                printk(KERN_WARNING DRV_NAME
index 509301a5e7e264ae079201a246ded0252582308a..ff5d689e13f34884222b58fe06291140db1f9682 100644 (file)
@@ -3405,7 +3405,7 @@ il4965_remove_dynamic_key(struct il_priv *il,
                return 0;
        }
 
-       if (il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET) {
+       if (il->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_INVALID) {
                IL_WARN("Removing wrong key %d 0x%x\n", keyconf->keyidx,
                        key_flags);
                spin_unlock_irqrestore(&il->sta_lock, flags);
@@ -3420,7 +3420,7 @@ il4965_remove_dynamic_key(struct il_priv *il,
        memset(&il->stations[sta_id].sta.key, 0, sizeof(struct il4965_keyinfo));
        il->stations[sta_id].sta.key.key_flags =
            STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID;
-       il->stations[sta_id].sta.key.key_offset = WEP_INVALID_OFFSET;
+       il->stations[sta_id].sta.key.key_offset = keyconf->hw_key_idx;
        il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
        il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
 
index cbf2dc18341f58eec6141158e0c6d3aaad02d12b..5d4807c2b56d80aaafe47089a2498b29856ba40c 100644 (file)
@@ -4767,14 +4767,12 @@ il_bg_watchdog(unsigned long data)
                return;
 
        /* monitor and check for other stuck queues */
-       if (il_is_any_associated(il)) {
-               for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) {
-                       /* skip as we already checked the command queue */
-                       if (cnt == il->cmd_queue)
-                               continue;
-                       if (il_check_stuck_queue(il, cnt))
-                               return;
-               }
+       for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) {
+               /* skip as we already checked the command queue */
+               if (cnt == il->cmd_queue)
+                       continue;
+               if (il_check_stuck_queue(il, cnt))
+                       return;
        }
 
        mod_timer(&il->watchdog,
index 19f7ee84ae89e2b76ba493016cb508745eaae173..e5e8ada4aaf6ad93cd109ee2112921ed136b3c00 100644 (file)
 #define IWL6000_UCODE_API_MAX 6
 #define IWL6050_UCODE_API_MAX 5
 #define IWL6000G2_UCODE_API_MAX 6
+#define IWL6035_UCODE_API_MAX 6
 
 /* Oldest version we won't warn about */
 #define IWL6000_UCODE_API_OK 4
 #define IWL6000G2_UCODE_API_OK 5
 #define IWL6050_UCODE_API_OK 5
 #define IWL6000G2B_UCODE_API_OK 6
+#define IWL6035_UCODE_API_OK 6
 
 /* Lowest firmware API version supported */
 #define IWL6000_UCODE_API_MIN 4
 #define IWL6050_UCODE_API_MIN 4
-#define IWL6000G2_UCODE_API_MIN 4
+#define IWL6000G2_UCODE_API_MIN 5
+#define IWL6035_UCODE_API_MIN 6
 
 /* EEPROM versions */
 #define EEPROM_6000_TX_POWER_VERSION   (4)
@@ -227,9 +230,25 @@ const struct iwl_cfg iwl6030_2bg_cfg = {
        IWL_DEVICE_6030,
 };
 
+#define IWL_DEVICE_6035                                                \
+       .fw_name_pre = IWL6030_FW_PRE,                          \
+       .ucode_api_max = IWL6035_UCODE_API_MAX,                 \
+       .ucode_api_ok = IWL6035_UCODE_API_OK,                   \
+       .ucode_api_min = IWL6035_UCODE_API_MIN,                 \
+       .device_family = IWL_DEVICE_FAMILY_6030,                \
+       .max_inst_size = IWL60_RTC_INST_SIZE,                   \
+       .max_data_size = IWL60_RTC_DATA_SIZE,                   \
+       .eeprom_ver = EEPROM_6030_EEPROM_VERSION,               \
+       .eeprom_calib_ver = EEPROM_6030_TX_POWER_VERSION,       \
+       .base_params = &iwl6000_g2_base_params,                 \
+       .bt_params = &iwl6000_bt_params,                        \
+       .need_temp_offset_calib = true,                         \
+       .led_mode = IWL_LED_RF_STATE,                           \
+       .adv_pm = true
+
 const struct iwl_cfg iwl6035_2agn_cfg = {
        .name = "Intel(R) Centrino(R) Advanced-N 6235 AGN",
-       IWL_DEVICE_6030,
+       IWL_DEVICE_6035,
        .ht_params = &iwl6000_ht_params,
 };
 
index aea07aab3c9e82c44f6b417d20794f4523e6ae70..eb6a8eaf42fc54b54ddfc57926f6f29c60371bdc 100644 (file)
@@ -1267,7 +1267,7 @@ int iwl_remove_dynamic_key(struct iwl_priv *priv,
                key_flags |= STA_KEY_MULTICAST_MSK;
 
        sta_cmd.key.key_flags = key_flags;
-       sta_cmd.key.key_offset = WEP_INVALID_OFFSET;
+       sta_cmd.key.key_offset = keyconf->hw_key_idx;
        sta_cmd.sta.modify_mask = STA_MODIFY_KEY_MASK;
        sta_cmd.mode = STA_CONTROL_MODIFY_MSK;
 
index e7c157e5ebebdb2c181d1473a02de769e570e3e0..7f97dec8534db1f60ba843bfa317b5cf303a6285 100644 (file)
@@ -2239,6 +2239,7 @@ static ssize_t iwl_dbgfs_echo_test_write(struct file *file,
        return count;
 }
 
+#ifdef CONFIG_IWLWIFI_DEBUG
 static ssize_t iwl_dbgfs_log_event_read(struct file *file,
                                         char __user *user_buf,
                                         size_t count, loff_t *ppos)
@@ -2276,6 +2277,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
 
        return count;
 }
+#endif
 
 static ssize_t iwl_dbgfs_calib_disabled_read(struct file *file,
                                         char __user *user_buf,
@@ -2345,7 +2347,9 @@ DEBUGFS_READ_FILE_OPS(bt_traffic);
 DEBUGFS_READ_WRITE_FILE_OPS(protection_mode);
 DEBUGFS_READ_FILE_OPS(reply_tx_error);
 DEBUGFS_WRITE_FILE_OPS(echo_test);
+#ifdef CONFIG_IWLWIFI_DEBUG
 DEBUGFS_READ_WRITE_FILE_OPS(log_event);
+#endif
 DEBUGFS_READ_WRITE_FILE_OPS(calib_disabled);
 
 /*
@@ -2405,7 +2409,9 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
        DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR);
        DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR);
        DEBUGFS_ADD_FILE(echo_test, dir_debug, S_IWUSR);
+#ifdef CONFIG_IWLWIFI_DEBUG
        DEBUGFS_ADD_FILE(log_event, dir_debug, S_IWUSR | S_IRUSR);
+#endif
 
        if (iwl_advanced_bt_coexist(priv))
                DEBUGFS_ADD_FILE(bt_traffic, dir_debug, S_IRUSR);
index d742900969eabc913feb5e3643b3cfdc0ddf3840..fac67a526a30880199bb12b8fb7310e6a3471a75 100644 (file)
@@ -861,13 +861,18 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
 
        /* We have our copies now, allow OS release its copies */
        release_firmware(ucode_raw);
-       complete(&drv->request_firmware_complete);
 
        drv->op_mode = iwl_dvm_ops.start(drv->trans, drv->cfg, &drv->fw);
 
        if (!drv->op_mode)
-               goto out_free_fw;
+               goto out_unbind;
 
+       /*
+        * Complete the firmware request last so that
+        * a driver unbind (stop) doesn't run while we
+        * are doing the start() above.
+        */
+       complete(&drv->request_firmware_complete);
        return;
 
  try_again:
index 50c58911e7188c3a7a2f29a6d4bf55f7a9e05953..b8e2b223ac36b6c1634e78256253d6095bcce08a 100644 (file)
@@ -568,28 +568,28 @@ static int iwl_find_otp_image(struct iwl_trans *trans,
  * iwl_get_max_txpower_avg - get the highest tx power from all chains.
  *     find the highest tx power from all chains for the channel
  */
-static s8 iwl_get_max_txpower_avg(const struct iwl_cfg *cfg,
+static s8 iwl_get_max_txpower_avg(struct iwl_priv *priv,
                struct iwl_eeprom_enhanced_txpwr *enhanced_txpower,
                int element, s8 *max_txpower_in_half_dbm)
 {
        s8 max_txpower_avg = 0; /* (dBm) */
 
        /* Take the highest tx power from any valid chains */
-       if ((cfg->valid_tx_ant & ANT_A) &&
+       if ((priv->hw_params.valid_tx_ant & ANT_A) &&
            (enhanced_txpower[element].chain_a_max > max_txpower_avg))
                max_txpower_avg = enhanced_txpower[element].chain_a_max;
-       if ((cfg->valid_tx_ant & ANT_B) &&
+       if ((priv->hw_params.valid_tx_ant & ANT_B) &&
            (enhanced_txpower[element].chain_b_max > max_txpower_avg))
                max_txpower_avg = enhanced_txpower[element].chain_b_max;
-       if ((cfg->valid_tx_ant & ANT_C) &&
+       if ((priv->hw_params.valid_tx_ant & ANT_C) &&
            (enhanced_txpower[element].chain_c_max > max_txpower_avg))
                max_txpower_avg = enhanced_txpower[element].chain_c_max;
-       if (((cfg->valid_tx_ant == ANT_AB) |
-           (cfg->valid_tx_ant == ANT_BC) |
-           (cfg->valid_tx_ant == ANT_AC)) &&
+       if (((priv->hw_params.valid_tx_ant == ANT_AB) |
+           (priv->hw_params.valid_tx_ant == ANT_BC) |
+           (priv->hw_params.valid_tx_ant == ANT_AC)) &&
            (enhanced_txpower[element].mimo2_max > max_txpower_avg))
                max_txpower_avg =  enhanced_txpower[element].mimo2_max;
-       if ((cfg->valid_tx_ant == ANT_ABC) &&
+       if ((priv->hw_params.valid_tx_ant == ANT_ABC) &&
            (enhanced_txpower[element].mimo3_max > max_txpower_avg))
                max_txpower_avg = enhanced_txpower[element].mimo3_max;
 
@@ -691,7 +691,7 @@ static void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv)
                                 ((txp->delta_20_in_40 & 0xf0) >> 4),
                                 (txp->delta_20_in_40 & 0x0f));
 
-               max_txp_avg = iwl_get_max_txpower_avg(priv->cfg, txp_array, idx,
+               max_txp_avg = iwl_get_max_txpower_avg(priv, txp_array, idx,
                                                      &max_txp_avg_halfdbm);
 
                /*
index ab2f4d7500a40df03d68293986d8c6ae46fc9969..013680332f075be85a9035bcec33eaec0c2e9285 100644 (file)
@@ -199,6 +199,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
                            WIPHY_FLAG_DISABLE_BEACON_HINTS |
                            WIPHY_FLAG_IBSS_RSN;
 
+#ifdef CONFIG_PM_SLEEP
        if (priv->fw->img[IWL_UCODE_WOWLAN].sec[0].len &&
            priv->trans->ops->wowlan_suspend &&
            device_can_wakeup(priv->trans->dev)) {
@@ -217,6 +218,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
                hw->wiphy->wowlan.pattern_max_len =
                                        IWLAGN_WOWLAN_MAX_PATTERN_LEN;
        }
+#endif
 
        if (iwlwifi_mod_params.power_save)
                hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
@@ -249,6 +251,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
        ret = ieee80211_register_hw(priv->hw);
        if (ret) {
                IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
+               iwl_leds_exit(priv);
                return ret;
        }
        priv->mac80211_registered = 1;
@@ -793,6 +796,18 @@ int iwlagn_mac_sta_state(struct ieee80211_hw *hw,
        switch (op) {
        case ADD:
                ret = iwlagn_mac_sta_add(hw, vif, sta);
+               if (ret)
+                       break;
+               /*
+                * Clear the in-progress flag, the AP station entry was added
+                * but we'll initialize LQ only when we've associated (which
+                * would also clear the in-progress flag). This is necessary
+                * in case we never initialize LQ because association fails.
+                */
+               spin_lock_bh(&priv->sta_lock);
+               priv->stations[iwl_sta_id(sta)].used &=
+                       ~IWL_STA_UCODE_INPROGRESS;
+               spin_unlock_bh(&priv->sta_lock);
                break;
        case REMOVE:
                ret = iwlagn_mac_sta_remove(hw, vif, sta);
index 3b1069290fa9a9cf646a2f04325d6c1862b70a7a..dfd54662e3e675bc2c560894cd47d002cbf5ff76 100644 (file)
 #define SCD_TXFACT             (SCD_BASE + 0x10)
 #define SCD_ACTIVE             (SCD_BASE + 0x14)
 #define SCD_QUEUECHAIN_SEL     (SCD_BASE + 0xe8)
+#define SCD_CHAINEXT_EN                (SCD_BASE + 0x244)
 #define SCD_AGGR_SEL           (SCD_BASE + 0x248)
 #define SCD_INTERRUPT_MASK     (SCD_BASE + 0x108)
 
index ec6fb395b84d0aca4e7d7bfcf2e1729c3959dc5a..79c6b91417f9430c2982b4e51aefd8f3f7dc47b3 100644 (file)
@@ -1058,6 +1058,11 @@ static void iwl_tx_start(struct iwl_trans *trans)
        iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
                       trans_pcie->scd_bc_tbls.dma >> 10);
 
+       /* The chain extension of the SCD doesn't work well. This feature is
+        * enabled by default by the HW, so we need to disable it manually.
+        */
+       iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
+
        /* Enable DMA channel */
        for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++)
                iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
index fb787df0166699f9c31e7adf8ff25c924c08f7a8..a0b7cfd3468532e705eeedaf4cf93cdb1e5be536 100644 (file)
@@ -1555,6 +1555,7 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
                        hdr = (struct ieee80211_hdr *) skb->data;
                        mac80211_hwsim_monitor_ack(data2->hw, hdr->addr2);
                }
+               txi->flags |= IEEE80211_TX_STAT_ACK;
        }
        ieee80211_tx_status_irqsafe(data2->hw, skb);
        return 0;
@@ -1721,6 +1722,24 @@ static void hwsim_exit_netlink(void)
                       "unregister family %i\n", ret);
 }
 
+static const struct ieee80211_iface_limit hwsim_if_limits[] = {
+       { .max = 1, .types = BIT(NL80211_IFTYPE_ADHOC) },
+       { .max = 2048,  .types = BIT(NL80211_IFTYPE_STATION) |
+                                BIT(NL80211_IFTYPE_P2P_CLIENT) |
+#ifdef CONFIG_MAC80211_MESH
+                                BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+                                BIT(NL80211_IFTYPE_AP) |
+                                BIT(NL80211_IFTYPE_P2P_GO) },
+};
+
+static const struct ieee80211_iface_combination hwsim_if_comb = {
+       .limits = hwsim_if_limits,
+       .n_limits = ARRAY_SIZE(hwsim_if_limits),
+       .max_interfaces = 2048,
+       .num_different_channels = 1,
+};
+
 static int __init init_mac80211_hwsim(void)
 {
        int i, err = 0;
@@ -1782,6 +1801,9 @@ static int __init init_mac80211_hwsim(void)
                hw->wiphy->n_addresses = 2;
                hw->wiphy->addresses = data->addresses;
 
+               hw->wiphy->iface_combinations = &hwsim_if_comb;
+               hw->wiphy->n_iface_combinations = 1;
+
                if (fake_hw_scan) {
                        hw->wiphy->max_scan_ssids = 255;
                        hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
index 9c44088054dd90bd967626d8416e63c083225fc8..900ee129e825987dfb2dbe6b485bfa0bb40dbc74 100644 (file)
@@ -256,7 +256,8 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
        else
                last_seq = priv->rx_seq[tid];
 
-       if (last_seq >= new_node->start_win)
+       if (last_seq != MWIFIEX_DEF_11N_RX_SEQ_NUM &&
+           last_seq >= new_node->start_win)
                new_node->start_win = last_seq + 1;
 
        new_node->win_size = win_size;
@@ -596,5 +597,5 @@ void mwifiex_11n_cleanup_reorder_tbl(struct mwifiex_private *priv)
        spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 
        INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr);
-       memset(priv->rx_seq, 0, sizeof(priv->rx_seq));
+       mwifiex_reset_11n_rx_seq_num(priv);
 }
index f1bffebabc60a6ab1059276664bc3f3f71e129bb..6c9815a0f5d8b0d7aebcb5d6a9953a24819ad45a 100644 (file)
 
 #define ADDBA_RSP_STATUS_ACCEPT 0
 
+#define MWIFIEX_DEF_11N_RX_SEQ_NUM     0xffff
+
+static inline void mwifiex_reset_11n_rx_seq_num(struct mwifiex_private *priv)
+{
+       memset(priv->rx_seq, 0xff, sizeof(priv->rx_seq));
+}
+
 int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *,
                               u16 seqNum,
                               u16 tid, u8 *ta,
index 87671446e24b00203e0398142894e8fec547d308..5c7fd185373cfb37c0ee57e62b9b54135af505b7 100644 (file)
@@ -948,6 +948,20 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
                bss_cfg->ssid.ssid_len = params->ssid_len;
        }
 
+       switch (params->hidden_ssid) {
+       case NL80211_HIDDEN_SSID_NOT_IN_USE:
+               bss_cfg->bcast_ssid_ctl = 1;
+               break;
+       case NL80211_HIDDEN_SSID_ZERO_LEN:
+               bss_cfg->bcast_ssid_ctl = 0;
+               break;
+       case NL80211_HIDDEN_SSID_ZERO_CONTENTS:
+               /* firmware doesn't support this type of hidden SSID */
+       default:
+               kfree(bss_cfg);
+               return -EINVAL;
+       }
+
        if (mwifiex_set_secure_params(priv, bss_cfg, params)) {
                kfree(bss_cfg);
                wiphy_err(wiphy, "Failed to parse secuirty parameters!\n");
@@ -1471,7 +1485,7 @@ struct net_device *mwifiex_add_virtual_intf(struct wiphy *wiphy,
        struct wireless_dev *wdev;
 
        if (!adapter)
-               return NULL;
+               return ERR_PTR(-EFAULT);
 
        switch (type) {
        case NL80211_IFTYPE_UNSPECIFIED:
@@ -1481,12 +1495,12 @@ struct net_device *mwifiex_add_virtual_intf(struct wiphy *wiphy,
                if (priv->bss_mode) {
                        wiphy_err(wiphy,
                                  "cannot create multiple sta/adhoc ifaces\n");
-                       return NULL;
+                       return ERR_PTR(-EINVAL);
                }
 
                wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL);
                if (!wdev)
-                       return NULL;
+                       return ERR_PTR(-ENOMEM);
 
                wdev->wiphy = wiphy;
                priv->wdev = wdev;
@@ -1509,12 +1523,12 @@ struct net_device *mwifiex_add_virtual_intf(struct wiphy *wiphy,
 
                if (priv->bss_mode) {
                        wiphy_err(wiphy, "Can't create multiple AP interfaces");
-                       return NULL;
+                       return ERR_PTR(-EINVAL);
                }
 
                wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL);
                if (!wdev)
-                       return NULL;
+                       return ERR_PTR(-ENOMEM);
 
                priv->wdev = wdev;
                wdev->wiphy = wiphy;
@@ -1531,14 +1545,15 @@ struct net_device *mwifiex_add_virtual_intf(struct wiphy *wiphy,
                break;
        default:
                wiphy_err(wiphy, "type not supported\n");
-               return NULL;
+               return ERR_PTR(-EINVAL);
        }
 
        dev = alloc_netdev_mq(sizeof(struct mwifiex_private *), name,
                              ether_setup, 1);
        if (!dev) {
                wiphy_err(wiphy, "no memory available for netdevice\n");
-               goto error;
+               priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
+               return ERR_PTR(-ENOMEM);
        }
 
        mwifiex_init_priv_params(priv, dev);
@@ -1569,7 +1584,9 @@ struct net_device *mwifiex_add_virtual_intf(struct wiphy *wiphy,
        /* Register network device */
        if (register_netdevice(dev)) {
                wiphy_err(wiphy, "cannot register virtual network device\n");
-               goto error;
+               free_netdev(dev);
+               priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
+               return ERR_PTR(-EFAULT);
        }
 
        sema_init(&priv->async_sem, 1);
@@ -1581,12 +1598,6 @@ struct net_device *mwifiex_add_virtual_intf(struct wiphy *wiphy,
        mwifiex_dev_debugfs_init(priv);
 #endif
        return dev;
-error:
-       if (dev && (dev->reg_state == NETREG_UNREGISTERED))
-               free_netdev(dev);
-       priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
-
-       return NULL;
 }
 EXPORT_SYMBOL_GPL(mwifiex_add_virtual_intf);
 
index 9f674bbebe65afc2f3fa2edf001af0d414cd8b4d..561452a5c818f4a3d6ea653d029cd32fdab80587 100644 (file)
@@ -122,6 +122,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
 #define TLV_TYPE_CHANNELBANDLIST    (PROPRIETARY_TLV_BASE_ID + 42)
 #define TLV_TYPE_UAP_BEACON_PERIOD  (PROPRIETARY_TLV_BASE_ID + 44)
 #define TLV_TYPE_UAP_DTIM_PERIOD    (PROPRIETARY_TLV_BASE_ID + 45)
+#define TLV_TYPE_UAP_BCAST_SSID     (PROPRIETARY_TLV_BASE_ID + 48)
 #define TLV_TYPE_UAP_RTS_THRESHOLD  (PROPRIETARY_TLV_BASE_ID + 51)
 #define TLV_TYPE_UAP_WPA_PASSPHRASE (PROPRIETARY_TLV_BASE_ID + 60)
 #define TLV_TYPE_UAP_ENCRY_PROTOCOL (PROPRIETARY_TLV_BASE_ID + 64)
@@ -1209,6 +1210,11 @@ struct host_cmd_tlv_ssid {
        u8 ssid[0];
 } __packed;
 
+struct host_cmd_tlv_bcast_ssid {
+       struct host_cmd_tlv tlv;
+       u8 bcast_ctl;
+} __packed;
+
 struct host_cmd_tlv_beacon_period {
        struct host_cmd_tlv tlv;
        __le16 period;
index ceb82cd749cc7ba1ca2054ae0b675802d2194190..383820a52beba0d62428614131adbe42def1e5db 100644 (file)
@@ -213,6 +213,7 @@ mwifiex_update_uap_custom_ie(struct mwifiex_private *priv,
                /* save assoc resp ie index after auto-indexing */
                *assoc_idx = *((u16 *)pos);
 
+       kfree(ap_custom_ie);
        return ret;
 }
 
index e0377473282f05fab01835578cb9b2579a40217f..fc8a9bfa1248305fababa37b59ca90110a57afaa 100644 (file)
@@ -978,10 +978,10 @@ static int mwifiex_decode_rx_packet(struct mwifiex_adapter *adapter,
                dev_dbg(adapter->dev, "info: --- Rx: Event ---\n");
                adapter->event_cause = *(u32 *) skb->data;
 
-               skb_pull(skb, MWIFIEX_EVENT_HEADER_LEN);
-
                if ((skb->len > 0) && (skb->len  < MAX_EVENT_SIZE))
-                       memcpy(adapter->event_body, skb->data, skb->len);
+                       memcpy(adapter->event_body,
+                              skb->data + MWIFIEX_EVENT_HEADER_LEN,
+                              skb->len);
 
                /* event cause has been saved to adapter->event_cause */
                adapter->event_received = true;
index 4ace5a3dcd237c5aada48223e224a7f943ef2692..11e731f3581c2dbf4856d8676ca6ee66c12ccabe 100644 (file)
@@ -406,9 +406,9 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
                break;
 
        case EVENT_UAP_STA_ASSOC:
-               skb_pull(adapter->event_skb, MWIFIEX_UAP_EVENT_EXTRA_HEADER);
                memset(&sinfo, 0, sizeof(sinfo));
-               event = (struct mwifiex_assoc_event *)adapter->event_skb->data;
+               event = (struct mwifiex_assoc_event *)
+                       (adapter->event_body + MWIFIEX_UAP_EVENT_EXTRA_HEADER);
                if (le16_to_cpu(event->type) == TLV_TYPE_UAP_MGMT_FRAME) {
                        len = -1;
 
@@ -433,9 +433,8 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
                                 GFP_KERNEL);
                break;
        case EVENT_UAP_STA_DEAUTH:
-               skb_pull(adapter->event_skb, MWIFIEX_UAP_EVENT_EXTRA_HEADER);
-               cfg80211_del_sta(priv->netdev, adapter->event_skb->data,
-                                GFP_KERNEL);
+               cfg80211_del_sta(priv->netdev, adapter->event_body +
+                                MWIFIEX_UAP_EVENT_EXTRA_HEADER, GFP_KERNEL);
                break;
        case EVENT_UAP_BSS_IDLE:
                priv->media_connected = false;
index e2faec4db10863c28c7156e922b14b2ca4fa02c9..cecb27283196150afdcecc56c451fa396456891f 100644 (file)
@@ -161,15 +161,11 @@ int mwifiex_write_data_complete(struct mwifiex_adapter *adapter,
                goto done;
 
        for (i = 0; i < adapter->priv_num; i++) {
-
                tpriv = adapter->priv[i];
 
-               if ((GET_BSS_ROLE(tpriv) == MWIFIEX_BSS_ROLE_STA) &&
-                   (tpriv->media_connected)) {
-                       if (netif_queue_stopped(tpriv->netdev))
-                               mwifiex_wake_up_net_dev_queue(tpriv->netdev,
-                                                             adapter);
-               }
+               if (tpriv->media_connected &&
+                   netif_queue_stopped(tpriv->netdev))
+                       mwifiex_wake_up_net_dev_queue(tpriv->netdev, adapter);
        }
 done:
        dev_kfree_skb_any(skb);
index 76dfbc42a732fc92530fc68a7b8573ea0f54d437..89f9a2a45de3f772e15cbd4084735e9bc7013d54 100644 (file)
@@ -27,6 +27,17 @@ int mwifiex_set_secure_params(struct mwifiex_private *priv,
                              struct cfg80211_ap_settings *params) {
        int i;
 
+       if (!params->privacy) {
+               bss_config->protocol = PROTOCOL_NO_SECURITY;
+               bss_config->key_mgmt = KEY_MGMT_NONE;
+               bss_config->wpa_cfg.length = 0;
+               priv->sec_info.wep_enabled = 0;
+               priv->sec_info.wpa_enabled = 0;
+               priv->sec_info.wpa2_enabled = 0;
+
+               return 0;
+       }
+
        switch (params->auth_type) {
        case NL80211_AUTHTYPE_OPEN_SYSTEM:
                bss_config->auth_mode = WLAN_AUTH_OPEN;
@@ -132,6 +143,7 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
        struct host_cmd_tlv_dtim_period *dtim_period;
        struct host_cmd_tlv_beacon_period *beacon_period;
        struct host_cmd_tlv_ssid *ssid;
+       struct host_cmd_tlv_bcast_ssid *bcast_ssid;
        struct host_cmd_tlv_channel_band *chan_band;
        struct host_cmd_tlv_frag_threshold *frag_threshold;
        struct host_cmd_tlv_rts_threshold *rts_threshold;
@@ -153,6 +165,14 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
                cmd_size += sizeof(struct host_cmd_tlv) +
                            bss_cfg->ssid.ssid_len;
                tlv += sizeof(struct host_cmd_tlv) + bss_cfg->ssid.ssid_len;
+
+               bcast_ssid = (struct host_cmd_tlv_bcast_ssid *)tlv;
+               bcast_ssid->tlv.type = cpu_to_le16(TLV_TYPE_UAP_BCAST_SSID);
+               bcast_ssid->tlv.len =
+                               cpu_to_le16(sizeof(bcast_ssid->bcast_ctl));
+               bcast_ssid->bcast_ctl = bss_cfg->bcast_ssid_ctl;
+               cmd_size += sizeof(struct host_cmd_tlv_bcast_ssid);
+               tlv += sizeof(struct host_cmd_tlv_bcast_ssid);
        }
        if (bss_cfg->channel && bss_cfg->channel <= MAX_CHANNEL_BAND_BG) {
                chan_band = (struct host_cmd_tlv_channel_band *)tlv;
@@ -416,6 +436,7 @@ int mwifiex_uap_set_channel(struct mwifiex_private *priv, int channel)
        if (!bss_cfg)
                return -ENOMEM;
 
+       mwifiex_set_sys_config_invalid_data(bss_cfg);
        bss_cfg->band_cfg = BAND_CONFIG_MANUAL;
        bss_cfg->channel = channel;
 
index 49ebf20c56ebc0ac98dc8d5b25bc3a1164847020..22a5916564b84099576e1c554b37d1fcbcd420b2 100644 (file)
@@ -49,6 +49,7 @@ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter,
        struct device *dev = adapter->dev;
        u32 recv_type;
        __le32 tmp;
+       int ret;
 
        if (adapter->hs_activated)
                mwifiex_process_hs_config(adapter);
@@ -69,16 +70,19 @@ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter,
                case MWIFIEX_USB_TYPE_CMD:
                        if (skb->len > MWIFIEX_SIZE_OF_CMD_BUFFER) {
                                dev_err(dev, "CMD: skb->len too large\n");
-                               return -1;
+                               ret = -1;
+                               goto exit_restore_skb;
                        } else if (!adapter->curr_cmd) {
                                dev_dbg(dev, "CMD: no curr_cmd\n");
                                if (adapter->ps_state == PS_STATE_SLEEP_CFM) {
                                        mwifiex_process_sleep_confirm_resp(
                                                        adapter, skb->data,
                                                        skb->len);
-                                       return 0;
+                                       ret = 0;
+                                       goto exit_restore_skb;
                                }
-                               return -1;
+                               ret = -1;
+                               goto exit_restore_skb;
                        }
 
                        adapter->curr_cmd->resp_skb = skb;
@@ -87,20 +91,22 @@ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter,
                case MWIFIEX_USB_TYPE_EVENT:
                        if (skb->len < sizeof(u32)) {
                                dev_err(dev, "EVENT: skb->len too small\n");
-                               return -1;
+                               ret = -1;
+                               goto exit_restore_skb;
                        }
                        skb_copy_from_linear_data(skb, &tmp, sizeof(u32));
                        adapter->event_cause = le32_to_cpu(tmp);
-                       skb_pull(skb, sizeof(u32));
                        dev_dbg(dev, "event_cause %#x\n", adapter->event_cause);
 
                        if (skb->len > MAX_EVENT_SIZE) {
                                dev_err(dev, "EVENT: event body too large\n");
-                               return -1;
+                               ret = -1;
+                               goto exit_restore_skb;
                        }
 
-                       skb_copy_from_linear_data(skb, adapter->event_body,
-                                                 skb->len);
+                       memcpy(adapter->event_body, skb->data +
+                              MWIFIEX_EVENT_HEADER_LEN, skb->len);
+
                        adapter->event_received = true;
                        adapter->event_skb = skb;
                        break;
@@ -124,6 +130,12 @@ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter,
        }
 
        return -EINPROGRESS;
+
+exit_restore_skb:
+       /* The buffer will be reused for further cmds/events */
+       skb_push(skb, INTF_HEADER_LEN);
+
+       return ret;
 }
 
 static void mwifiex_usb_rx_complete(struct urb *urb)
index f3fc6551585780b08724bb8112457caafeef220e..3fa4d417699381225e853a56238e0d8506a2f99b 100644 (file)
@@ -404,6 +404,8 @@ mwifiex_wmm_init(struct mwifiex_adapter *adapter)
                priv->add_ba_param.tx_win_size = MWIFIEX_AMPDU_DEF_TXWINSIZE;
                priv->add_ba_param.rx_win_size = MWIFIEX_AMPDU_DEF_RXWINSIZE;
 
+               mwifiex_reset_11n_rx_seq_num(priv);
+
                atomic_set(&priv->wmm.tx_pkts_queued, 0);
                atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID);
        }
@@ -1221,6 +1223,7 @@ mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter)
 
        if (!ptr->is_11n_enabled ||
            mwifiex_is_ba_stream_setup(priv, ptr, tid) ||
+           priv->wps.session_enable ||
            ((priv->sec_info.wpa_enabled ||
              priv->sec_info.wpa2_enabled) &&
             !priv->wpa_is_gtk_set)) {
index 2e9e6af21362c1fe302d68e61cb4a83d229f5b24..dfcd02ab6cae5add70a2eb47c703d54b32c324ce 100644 (file)
@@ -2110,7 +2110,7 @@ resize_buf:
        while (check_bssid_list_item(bssid, bssid_len, buf, len)) {
                if (rndis_bss_info_update(usbdev, bssid) && match_bssid &&
                    matched) {
-                       if (!ether_addr_equal(bssid->mac, match_bssid))
+                       if (ether_addr_equal(bssid->mac, match_bssid))
                                *matched = true;
                }
 
index ca36cccaba31d87b1920f4238aad0be364fce060..8f754025b06ead9b9a2c9d5be62d5d19b871690f 100644 (file)
@@ -396,8 +396,7 @@ struct rt2x00_intf {
         * for hardware which doesn't support hardware
         * sequence counting.
         */
-       spinlock_t seqlock;
-       u16 seqno;
+       atomic_t seqno;
 };
 
 static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
index b49773ef72f2d0c32e3e4e5f55f2ebac3bd540bc..dd24b2663b5e528e04a0814726ec0f06ceae6cf3 100644 (file)
@@ -277,7 +277,6 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
        else
                rt2x00dev->intf_sta_count++;
 
-       spin_lock_init(&intf->seqlock);
        mutex_init(&intf->beacon_skb_mutex);
        intf->beacon = entry;
 
index 4c662eccf53c60e32e352ac96259268103b539f7..2fd830103415dca94a8d582ec1de91c93b074cb0 100644 (file)
@@ -207,6 +207,7 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
        struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
        struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif);
+       u16 seqno;
 
        if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ))
                return;
@@ -238,15 +239,13 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
         * sequence counting per-frame, since those will override the
         * sequence counter given by mac80211.
         */
-       spin_lock(&intf->seqlock);
-
        if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
-               intf->seqno += 0x10;
-       hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
-       hdr->seq_ctrl |= cpu_to_le16(intf->seqno);
-
-       spin_unlock(&intf->seqlock);
+               seqno = atomic_add_return(0x10, &intf->seqno);
+       else
+               seqno = atomic_read(&intf->seqno);
 
+       hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
+       hdr->seq_ctrl |= cpu_to_le16(seqno);
 }
 
 static void rt2x00queue_create_tx_descriptor_plcp(struct rt2x00_dev *rt2x00dev,
index d357d1ed92f6ddc363f0babca95652d8f446b3c2..74ecc33fdd90c249d5fcd3c46d68d646113e0985 100644 (file)
@@ -436,8 +436,8 @@ void rt2x00usb_kick_queue(struct data_queue *queue)
        case QID_RX:
                if (!rt2x00queue_full(queue))
                        rt2x00queue_for_each_entry(queue,
-                                                  Q_INDEX_DONE,
                                                   Q_INDEX,
+                                                  Q_INDEX_DONE,
                                                   NULL,
                                                   rt2x00usb_kick_rx_entry);
                break;
index 2e0de2f5f0f92d22a1e384dab601c72912216f9a..c2d5b495c179a1021dd4cd4221c0032f3a99e34a 100644 (file)
@@ -117,7 +117,7 @@ static void rtl8187_led_brightness_set(struct led_classdev *led_dev,
                        radio_on = true;
                } else if (radio_on) {
                        radio_on = false;
-                       cancel_delayed_work_sync(&priv->led_on);
+                       cancel_delayed_work(&priv->led_on);
                        ieee80211_queue_delayed_work(hw, &priv->led_off, 0);
                }
        } else if (radio_on) {
index d228358e6a403b9c8eee56d63ce4991058975dc9..9970c2b1b19979dc3577472c7c21e27703a38a76 100644 (file)
@@ -301,9 +301,11 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
        {RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/
        {RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/
        {RTL_USB_DEVICE(0x0846, 0x9041, rtl92cu_hal_cfg)}, /*NetGear WNA1000M*/
+       {RTL_USB_DEVICE(0x0bda, 0x5088, rtl92cu_hal_cfg)}, /*Thinkware-CC&C*/
        {RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
        {RTL_USB_DEVICE(0x0df6, 0x005c, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
        {RTL_USB_DEVICE(0x0eb0, 0x9071, rtl92cu_hal_cfg)}, /*NO Brand - Etop*/
+       {RTL_USB_DEVICE(0x4856, 0x0091, rtl92cu_hal_cfg)}, /*NetweeN - Feixun*/
        /* HP - Lite-On ,8188CUS Slim Combo */
        {RTL_USB_DEVICE(0x103c, 0x1629, rtl92cu_hal_cfg)},
        {RTL_USB_DEVICE(0x13d3, 0x3357, rtl92cu_hal_cfg)}, /* AzureWave */
@@ -346,6 +348,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
        {RTL_USB_DEVICE(0x07b8, 0x8178, rtl92cu_hal_cfg)}, /*Funai -Abocom*/
        {RTL_USB_DEVICE(0x0846, 0x9021, rtl92cu_hal_cfg)}, /*Netgear-Sercomm*/
        {RTL_USB_DEVICE(0x0b05, 0x17ab, rtl92cu_hal_cfg)}, /*ASUS-Edimax*/
+       {RTL_USB_DEVICE(0x0bda, 0x8186, rtl92cu_hal_cfg)}, /*Realtek 92CE-VAU*/
        {RTL_USB_DEVICE(0x0df6, 0x0061, rtl92cu_hal_cfg)}, /*Sitecom-Edimax*/
        {RTL_USB_DEVICE(0x0e66, 0x0019, rtl92cu_hal_cfg)}, /*Hawking-Edimax*/
        {RTL_USB_DEVICE(0x2001, 0x3307, rtl92cu_hal_cfg)}, /*D-Link-Cameo*/
index ad87a1ac6462aab40befe12db72eef99fcc52f5b..db6430c1a08414650a894d3e8a6d41c57cba1711 100644 (file)
@@ -869,7 +869,7 @@ int wl1251_acx_tsf_info(struct wl1251 *wl, u64 *mactime)
        }
 
        *mactime = tsf_info->current_tsf_lsb |
-               (tsf_info->current_tsf_msb << 31);
+               ((u64)tsf_info->current_tsf_msb << 32);
 
 out:
        kfree(tsf_info);
index 9f15ccaf8f05aec733882e98a3ff443fdb5737d0..5ec50a476a69c084ccba378813db16fca7087db1 100644 (file)
@@ -76,8 +76,7 @@ static int wl1251_event_process(struct wl1251 *wl, struct event_mailbox *mbox)
                }
        }
 
-       if (vector & SYNCHRONIZATION_TIMEOUT_EVENT_ID &&
-           wl->station_mode != STATION_ACTIVE_MODE) {
+       if (vector & SYNCHRONIZATION_TIMEOUT_EVENT_ID) {
                wl1251_debug(DEBUG_EVENT, "SYNCHRONIZATION_TIMEOUT_EVENT");
 
                /* indicate to the stack, that beacons have been lost */
index 87f6305bda2cc5ced7f6377da9a3a3f9a16b72a4..567660cd2fcd3f3ead3a5c82baa38f6eed6d3690 100644 (file)
@@ -73,6 +73,8 @@ static void wl1251_spi_reset(struct wl1251 *wl)
        spi_sync(wl_to_spi(wl), &m);
 
        wl1251_dump(DEBUG_SPI, "spi reset -> ", cmd, WSPI_INIT_CMD_LEN);
+
+       kfree(cmd);
 }
 
 static void wl1251_spi_wake(struct wl1251 *wl)
@@ -127,6 +129,8 @@ static void wl1251_spi_wake(struct wl1251 *wl)
        spi_sync(wl_to_spi(wl), &m);
 
        wl1251_dump(DEBUG_SPI, "spi init -> ", cmd, WSPI_INIT_CMD_LEN);
+
+       kfree(cmd);
 }
 
 static void wl1251_spi_reset_wake(struct wl1251 *wl)
index 54156b0b5c2d7defe6b42791d022d4c982fd945f..d7b907e67170348e70302e6a1cfe70642a8c6f7e 100644 (file)
@@ -1,7 +1,6 @@
 config WLCORE
        tristate "TI wlcore support"
        depends on WL_TI && GENERIC_HARDIRQS && MAC80211
-       depends on INET
        select FW_LOADER
        ---help---
          This module contains the main code for TI WLAN chips.  It abstracts
index 2027afe405fed60de8d0bf2dbe85a06456edcf28..30899901aef56b00e05c5f062c86cabcb54127d8 100644 (file)
@@ -1935,14 +1935,14 @@ static int __devexit xennet_remove(struct xenbus_device *dev)
 
        dev_dbg(&dev->dev, "%s\n", dev->nodename);
 
-       unregister_netdev(info->netdev);
-
        xennet_disconnect_backend(info);
 
-       del_timer_sync(&info->rx_refill_timer);
-
        xennet_sysfs_delif(info->netdev);
 
+       unregister_netdev(info->netdev);
+
+       del_timer_sync(&info->rx_refill_timer);
+
        free_percpu(info->stats);
 
        free_netdev(info->netdev);
index 343ad29e211c66768491e325046ff0b58bcb15ec..e44f8c2d239d253afc045164834f0476b94cf932 100644 (file)
@@ -317,10 +317,9 @@ static const struct of_dev_auxdata *of_dev_lookup(const struct of_dev_auxdata *l
        for(; lookup->compatible != NULL; lookup++) {
                if (!of_device_is_compatible(np, lookup->compatible))
                        continue;
-               if (of_address_to_resource(np, 0, &res))
-                       continue;
-               if (res.start != lookup->phys_addr)
-                       continue;
+               if (!of_address_to_resource(np, 0, &res))
+                       if (res.start != lookup->phys_addr)
+                               continue;
                pr_debug("%s: devname=%s\n", np->full_name, lookup->name);
                return lookup;
        }
@@ -462,4 +461,5 @@ int of_platform_populate(struct device_node *root,
        of_node_put(root);
        return rc;
 }
+EXPORT_SYMBOL_GPL(of_platform_populate);
 #endif /* CONFIG_OF_ADDRESS */
index da14432806c6e1662e536c2c18c21763094a5329..efc4b7f308cfd6e1c37a44a0407c6e070f9d514e 100644 (file)
@@ -25,7 +25,7 @@ static int oprofile_perf_enabled;
 static DEFINE_MUTEX(oprofile_perf_mutex);
 
 static struct op_counter_config *counter_config;
-static struct perf_event **perf_events[nr_cpumask_bits];
+static struct perf_event **perf_events[NR_CPUS];
 static int num_counters;
 
 /*
index bf0cee629b60f8bb763b4407f631e7ba7e778aa8..099f46cd8e87a4ca823dc24f707458857f26eaa5 100644 (file)
@@ -748,6 +748,18 @@ static int pci_pm_suspend_noirq(struct device *dev)
 
        pci_pm_set_unknown_state(pci_dev);
 
+       /*
+        * Some BIOSes from ASUS have a bug: If a USB EHCI host controller's
+        * PCI COMMAND register isn't 0, the BIOS assumes that the controller
+        * hasn't been quiesced and tries to turn it off.  If the controller
+        * is already in D3, this can hang or cause memory corruption.
+        *
+        * Since the value of the COMMAND register doesn't matter once the
+        * device has been suspended, we can safely set it to 0 here.
+        */
+       if (pci_dev->class == PCI_CLASS_SERIAL_USB_EHCI)
+               pci_write_config_word(pci_dev, PCI_COMMAND, 0);
+
        return 0;
 }
 
index c3b331b74fa0a5702300745b0d6423a19d88d30a..0cc053af70bd361e806a94993f64bb4077e69b88 100644 (file)
@@ -61,7 +61,7 @@ static LIST_HEAD(pinctrl_maps);
        list_for_each_entry(_maps_node_, &pinctrl_maps, node) \
                for (_i_ = 0, _map_ = &_maps_node_->maps[_i_]; \
                        _i_ < _maps_node_->num_maps; \
-                       i++, _map_ = &_maps_node_->maps[_i_])
+                       _i_++, _map_ = &_maps_node_->maps[_i_])
 
 /**
  * pinctrl_provide_dummies() - indicate if pinctrl provides dummy state support
index f6e7c670906cb16644f61ec83e99bba88f30e666..90c837f469a612be02304415a85c384d7b9363af 100644 (file)
 #include "core.h"
 #include "pinctrl-imx.h"
 
-#define IMX_PMX_DUMP(info, p, m, c, n)         \
-{                                              \
-       int i, j;                               \
-       printk("Format: Pin Mux Config\n");     \
-       for (i = 0; i < n; i++) {               \
-               j = p[i];                       \
-               printk("%s %d 0x%lx\n",         \
-                       info->pins[j].name,     \
-                       m[i], c[i]);            \
-       }                                       \
+#define IMX_PMX_DUMP(info, p, m, c, n)                 \
+{                                                      \
+       int i, j;                                       \
+       printk(KERN_DEBUG "Format: Pin Mux Config\n");  \
+       for (i = 0; i < n; i++) {                       \
+               j = p[i];                               \
+               printk(KERN_DEBUG "%s %d 0x%lx\n",      \
+                       info->pins[j].name,             \
+                       m[i], c[i]);                    \
+       }                                               \
 }
 
 /* The bits in CONFIG cell defined in binding doc*/
@@ -173,8 +173,10 @@ static int imx_dt_node_to_map(struct pinctrl_dev *pctldev,
 
        /* create mux map */
        parent = of_get_parent(np);
-       if (!parent)
+       if (!parent) {
+               kfree(new_map);
                return -EINVAL;
+       }
        new_map[0].type = PIN_MAP_TYPE_MUX_GROUP;
        new_map[0].data.mux.function = parent->name;
        new_map[0].data.mux.group = np->name;
@@ -193,7 +195,7 @@ static int imx_dt_node_to_map(struct pinctrl_dev *pctldev,
        }
 
        dev_dbg(pctldev->dev, "maps: function %s group %s num %d\n",
-               new_map->data.mux.function, new_map->data.mux.group, map_num);
+               (*map)->data.mux.function, (*map)->data.mux.group, map_num);
 
        return 0;
 }
@@ -201,10 +203,7 @@ static int imx_dt_node_to_map(struct pinctrl_dev *pctldev,
 static void imx_dt_free_map(struct pinctrl_dev *pctldev,
                                struct pinctrl_map *map, unsigned num_maps)
 {
-       int i;
-
-       for (i = 0; i < num_maps; i++)
-               kfree(map);
+       kfree(map);
 }
 
 static struct pinctrl_ops imx_pctrl_ops = {
@@ -478,6 +477,7 @@ static int __devinit imx_pinctrl_parse_groups(struct device_node *np,
 #ifdef DEBUG
        IMX_PMX_DUMP(info, grp->pins, grp->mux_mode, grp->configs, grp->npins);
 #endif
+
        return 0;
 }
 
index 7737d4d71a3cc7fa8541a603615a29b96b9f173f..e9bf71fbedcafc9a14f14d662d6ec6ca1d7436bc 100644 (file)
@@ -1950,6 +1950,8 @@ static struct imx_pin_reg imx6q_pin_regs[] = {
        IMX_PIN_REG(MX6Q_PAD_SD2_DAT3, 0x0744, 0x035C, 5, 0x0000, 0), /* MX6Q_PAD_SD2_DAT3__GPIO_1_12 */
        IMX_PIN_REG(MX6Q_PAD_SD2_DAT3, 0x0744, 0x035C, 6, 0x0000, 0), /* MX6Q_PAD_SD2_DAT3__SJC_DONE */
        IMX_PIN_REG(MX6Q_PAD_SD2_DAT3, 0x0744, 0x035C, 7, 0x0000, 0), /* MX6Q_PAD_SD2_DAT3__ANATOP_TESTO_3 */
+       IMX_PIN_REG(MX6Q_PAD_ENET_RX_ER, 0x04EC, 0x01D8, 0, 0x0000, 0), /* MX6Q_PAD_ENET_RX_ER__ANATOP_USBOTG_ID */
+       IMX_PIN_REG(MX6Q_PAD_GPIO_1, 0x05F4, 0x0224, 3, 0x0000, 0), /* MX6Q_PAD_GPIO_1__ANATOP_USBOTG_ID */
 };
 
 /* Pad names for the pinmux subsystem */
index 556e45a213eb1997f0ce351e884f4bf958dbe3cf..4ba4636b6a4ac6b67d30b366299b2452bb983b76 100644 (file)
@@ -107,8 +107,10 @@ static int mxs_dt_node_to_map(struct pinctrl_dev *pctldev,
 
                /* Compose group name */
                group = kzalloc(length, GFP_KERNEL);
-               if (!group)
-                       return -ENOMEM;
+               if (!group) {
+                       ret = -ENOMEM;
+                       goto free;
+               }
                snprintf(group, length, "%s.%d", np->name, reg);
                new_map[i].data.mux.group = group;
                i++;
@@ -118,7 +120,7 @@ static int mxs_dt_node_to_map(struct pinctrl_dev *pctldev,
                pconfig = kmemdup(&config, sizeof(config), GFP_KERNEL);
                if (!pconfig) {
                        ret = -ENOMEM;
-                       goto free;
+                       goto free_group;
                }
 
                new_map[i].type = PIN_MAP_TYPE_CONFIGS_GROUP;
@@ -133,6 +135,9 @@ static int mxs_dt_node_to_map(struct pinctrl_dev *pctldev,
 
        return 0;
 
+free_group:
+       if (!purecfg)
+               kfree(group);
 free:
        kfree(new_map);
        return ret;
@@ -511,6 +516,7 @@ int __devinit mxs_pinctrl_probe(struct platform_device *pdev,
        return 0;
 
 err:
+       platform_set_drvdata(pdev, NULL);
        iounmap(d->base);
        return ret;
 }
@@ -520,6 +526,7 @@ int __devexit mxs_pinctrl_remove(struct platform_device *pdev)
 {
        struct mxs_pinctrl_data *d = platform_get_drvdata(pdev);
 
+       platform_set_drvdata(pdev, NULL);
        pinctrl_unregister(d->pctl);
        iounmap(d->base);
 
index b26395d16347db6dcecf40d8aa438d2409c02bd3..3e7e47d6b38526178b0985bf0e7162f8fab16d70 100644 (file)
@@ -673,7 +673,7 @@ static void __nmk_gpio_set_wake(struct nmk_gpio_chip *nmk_chip,
         * wakeup is anyhow controlled by the RIMSC and FIMSC registers.
         */
        if (nmk_chip->sleepmode && on) {
-               __nmk_gpio_set_slpm(nmk_chip, gpio % nmk_chip->chip.base,
+               __nmk_gpio_set_slpm(nmk_chip, gpio % NMK_GPIO_PER_CHIP,
                                    NMK_GPIO_SLPM_WAKEUP_ENABLE);
        }
 
@@ -1246,6 +1246,7 @@ static int __devinit nmk_gpio_probe(struct platform_device *dev)
                ret = PTR_ERR(clk);
                goto out_unmap;
        }
+       clk_prepare(clk);
 
        nmk_chip = kzalloc(sizeof(*nmk_chip), GFP_KERNEL);
        if (!nmk_chip) {
@@ -1437,7 +1438,27 @@ static int nmk_pmx_enable(struct pinctrl_dev *pctldev, unsigned function,
 
        dev_dbg(npct->dev, "enable group %s, %u pins\n", g->name, g->npins);
 
-       /* Handle this special glitch on altfunction C */
+       /*
+        * If we're setting altfunc C by setting both AFSLA and AFSLB to 1,
+        * we may pass through an undesired state. In this case we take
+        * some extra care.
+        *
+        * Safe sequence used to switch IOs between GPIO and Alternate-C mode:
+        *  - Save SLPM registers (since we have a shadow register in the
+        *    nmk_chip we're using that as backup)
+        *  - Set SLPM=0 for the IOs you want to switch and others to 1
+        *  - Configure the GPIO registers for the IOs that are being switched
+        *  - Set IOFORCE=1
+        *  - Modify the AFLSA/B registers for the IOs that are being switched
+        *  - Set IOFORCE=0
+        *  - Restore SLPM registers
+        *  - Any spurious wake up event during switch sequence to be ignored
+        *    and cleared
+        *
+        * We REALLY need to save ALL slpm registers, because the external
+        * IOFORCE will switch *all* ports to their sleepmode setting to as
+        * to avoid glitches. (Not just one port!)
+        */
        glitch = (g->altsetting == NMK_GPIO_ALT_C);
 
        if (glitch) {
index ba15b1a29e524103563a6035ebd4250e01417d0f..e9f8e7d110018c14d6011ccf8903e086af983ce3 100644 (file)
@@ -1184,7 +1184,7 @@ out_no_gpio_remap:
        return ret;
 }
 
-static const struct of_device_id pinmux_ids[]  = {
+static const struct of_device_id pinmux_ids[] __devinitconst = {
        { .compatible = "sirf,prima2-gpio-pinmux" },
        {}
 };
index 5ae50aadf88548ef9267414427bc76c14b0c7b68..b3f6b2873fdd4cb0ebba5e249913846c2ce51985 100644 (file)
@@ -2,7 +2,7 @@
  * Driver for the ST Microelectronics SPEAr pinmux
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * Inspired from:
  * - U300 Pinctl drivers
index 9155783bb47fbd332648fecff577b67a0519e1f1..d950eb78d939679af768c754f45adea5ea2a7f23 100644 (file)
@@ -2,7 +2,7 @@
  * Driver header file for the ST Microelectronics SPEAr pinmux
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index fff168be7f0062ae4bd0049460aaef831a37f48a..d6cca8c81b92cf9cd9cb63f9d972965e46670249 100644 (file)
@@ -2,7 +2,7 @@
  * Driver for the ST Microelectronics SPEAr1310 pinmux
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
@@ -2192,7 +2192,7 @@ static void __exit spear1310_pinctrl_exit(void)
 }
 module_exit(spear1310_pinctrl_exit);
 
-MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
+MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
 MODULE_DESCRIPTION("ST Microelectronics SPEAr1310 pinctrl driver");
 MODULE_LICENSE("GPL v2");
 MODULE_DEVICE_TABLE(of, spear1310_pinctrl_of_match);
index a8ab2a6f51bf313eb51421f15870b545a6e0cd7b..a0eb057e55bd3f91e390364a9187ee1f4e0768ab 100644 (file)
@@ -2,7 +2,7 @@
  * Driver for the ST Microelectronics SPEAr1340 pinmux
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
@@ -1983,7 +1983,7 @@ static void __exit spear1340_pinctrl_exit(void)
 }
 module_exit(spear1340_pinctrl_exit);
 
-MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
+MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
 MODULE_DESCRIPTION("ST Microelectronics SPEAr1340 pinctrl driver");
 MODULE_LICENSE("GPL v2");
 MODULE_DEVICE_TABLE(of, spear1340_pinctrl_of_match);
index 9c82a35e4e788212be656e98101b954f661e2dce..4dfc2849b1728d33a913958fbe0bda5fc67c0001 100644 (file)
@@ -2,7 +2,7 @@
  * Driver for the ST Microelectronics SPEAr300 pinmux
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
@@ -702,7 +702,7 @@ static void __exit spear300_pinctrl_exit(void)
 }
 module_exit(spear300_pinctrl_exit);
 
-MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
+MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
 MODULE_DESCRIPTION("ST Microelectronics SPEAr300 pinctrl driver");
 MODULE_LICENSE("GPL v2");
 MODULE_DEVICE_TABLE(of, spear300_pinctrl_of_match);
index 1a97076051254f87095dbda35a1f49dadbb76e84..96883693fb7eeb9d2b5b403ce0a17b10a2bcc94e 100644 (file)
@@ -2,7 +2,7 @@
  * Driver for the ST Microelectronics SPEAr310 pinmux
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
@@ -425,7 +425,7 @@ static void __exit spear310_pinctrl_exit(void)
 }
 module_exit(spear310_pinctrl_exit);
 
-MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
+MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
 MODULE_DESCRIPTION("ST Microelectronics SPEAr310 pinctrl driver");
 MODULE_LICENSE("GPL v2");
 MODULE_DEVICE_TABLE(of, SPEAr310_pinctrl_of_match);
index de726e6c283a611afe19095d2f1ae7c7af7191f8..020b1e0bdb3ee77c0e4fd254e91467c3436604e9 100644 (file)
@@ -2,7 +2,7 @@
  * Driver for the ST Microelectronics SPEAr320 pinmux
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
@@ -3462,7 +3462,7 @@ static void __exit spear320_pinctrl_exit(void)
 }
 module_exit(spear320_pinctrl_exit);
 
-MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
+MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
 MODULE_DESCRIPTION("ST Microelectronics SPEAr320 pinctrl driver");
 MODULE_LICENSE("GPL v2");
 MODULE_DEVICE_TABLE(of, spear320_pinctrl_of_match);
index 91c883bc46a6b1e0ab40f6446a736936210a3c8b..0242378f7cb86c677a055d9c2c6bcc93797efad7 100644 (file)
@@ -2,7 +2,7 @@
  * Driver for the ST Microelectronics SPEAr3xx pinmux
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 5d5fdd8df7b8e8e99ad7387e91cb420ece60cbcd..31f44347f17ccfd32e3ca0f0913d85b2998aaa05 100644 (file)
@@ -2,7 +2,7 @@
  * Header file for the ST Microelectronics SPEAr3xx pinmux
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 639db4d0aa768ef70f3be5293fc5209c8ba60cb8..39abb150bdd4d9f28469b7c7aff5e561dd11d207 100644 (file)
@@ -5,7 +5,7 @@
  *
  * (C) 2009 - Peter Feuerer     peter (a) piie.net
  *                              http://piie.net
- *     2009 Borislav Petkov <petkovbb@gmail.com>
+ *     2009 Borislav Petkov    bp (a) alien8.de
  *
  * Inspired by and many thanks to:
  *  o acerfand   - Rachel Greenham
@@ -660,7 +660,7 @@ static int acerhdf_register_thermal(void)
        if (IS_ERR(cl_dev))
                return -EINVAL;
 
-       thz_dev = thermal_zone_device_register("acerhdf", 1, NULL,
+       thz_dev = thermal_zone_device_register("acerhdf", 1, 0, NULL,
                                              &acerhdf_dev_ops, 0, 0, 0,
                                              (kernelmode) ? interval*1000 : 0);
        if (IS_ERR(thz_dev))
index 4f20f8dd3d7cd054d2b3ef4c72cda6c75e265094..17f6dfd8dbfb093a332d7d9d9ebd572df9b850e7 100644 (file)
@@ -694,10 +694,10 @@ MODULE_DEVICE_TABLE(acpi, ideapad_device_ids);
 static int __devinit ideapad_acpi_add(struct acpi_device *adevice)
 {
        int ret, i;
-       unsigned long cfg;
+       int cfg;
        struct ideapad_private *priv;
 
-       if (read_method_int(adevice->handle, "_CFG", (int *)&cfg))
+       if (read_method_int(adevice->handle, "_CFG", &cfg))
                return -ENODEV;
 
        priv = kzalloc(sizeof(*priv), GFP_KERNEL);
@@ -721,7 +721,7 @@ static int __devinit ideapad_acpi_add(struct acpi_device *adevice)
                goto input_failed;
 
        for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++) {
-               if (test_bit(ideapad_rfk_data[i].cfgbit, &cfg))
+               if (test_bit(ideapad_rfk_data[i].cfgbit, &priv->cfg))
                        ideapad_register_rfkill(adevice, i);
                else
                        priv->rfk[i] = NULL;
index 0ffdb3cde2bbc3ff569fee774dcd842333585031..9af4257d49018443b19e447455969787b480a944 100644 (file)
@@ -72,6 +72,7 @@
 #include <linux/string.h>
 #include <linux/tick.h>
 #include <linux/timer.h>
+#include <linux/dmi.h>
 #include <drm/i915_drm.h>
 #include <asm/msr.h>
 #include <asm/processor.h>
@@ -1485,6 +1486,24 @@ static DEFINE_PCI_DEVICE_TABLE(ips_id_table) = {
 
 MODULE_DEVICE_TABLE(pci, ips_id_table);
 
+static int ips_blacklist_callback(const struct dmi_system_id *id)
+{
+       pr_info("Blacklisted intel_ips for %s\n", id->ident);
+       return 1;
+}
+
+static const struct dmi_system_id ips_blacklist[] = {
+       {
+               .callback = ips_blacklist_callback,
+               .ident = "HP ProBook",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook"),
+               },
+       },
+       { }     /* terminating entry */
+};
+
 static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id)
 {
        u64 platform_info;
@@ -1494,6 +1513,9 @@ static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id)
        u16 htshi, trc, trc_required_mask;
        u8 tse;
 
+       if (dmi_check_system(ips_blacklist))
+               return -ENODEV;
+
        ips = kzalloc(sizeof(struct ips_driver), GFP_KERNEL);
        if (!ips)
                return -ENOMEM;
index 5ae9cd9c7e6e10f4da2dc254725f397286598731..2b2c212ad37d50252f3663a48787599d58484b91 100644 (file)
@@ -499,7 +499,7 @@ static int mid_thermal_probe(struct platform_device *pdev)
                        goto err;
                }
                pinfo->tzd[i] = thermal_zone_device_register(name[i],
-                               0, td_info, &tzd_ops, 0, 0, 0, 0);
+                               0, 0, td_info, &tzd_ops, 0, 0, 0, 0);
                if (IS_ERR(pinfo->tzd[i])) {
                        kfree(td_info);
                        ret = PTR_ERR(pinfo->tzd[i]);
index 210d4ae547c201eafd438ca896357f10f22a3502..d456ff0c73b73614339d4109c1bc282037c64e08 100644 (file)
@@ -973,7 +973,7 @@ static ssize_t sony_nc_sysfs_store(struct device *dev,
                               struct device_attribute *attr,
                               const char *buffer, size_t count)
 {
-       unsigned long value = 0;
+       int value;
        int ret = 0;
        struct sony_nc_value *item =
            container_of(attr, struct sony_nc_value, devattr);
@@ -984,7 +984,7 @@ static ssize_t sony_nc_sysfs_store(struct device *dev,
        if (count > 31)
                return -EINVAL;
 
-       if (kstrtoul(buffer, 10, &value))
+       if (kstrtoint(buffer, 10, &value))
                return -EINVAL;
 
        if (item->validate)
@@ -994,7 +994,7 @@ static ssize_t sony_nc_sysfs_store(struct device *dev,
                return value;
 
        ret = sony_nc_int_call(sony_nc_acpi_handle, *item->acpiset,
-                       (int *)&value, NULL);
+                              &value, NULL);
        if (ret < 0)
                return -EIO;
 
@@ -1010,6 +1010,7 @@ static ssize_t sony_nc_sysfs_store(struct device *dev,
 struct sony_backlight_props {
        struct backlight_device *dev;
        int                     handle;
+       int                     cmd_base;
        u8                      offset;
        u8                      maxlvl;
 };
@@ -1037,7 +1038,7 @@ static int sony_nc_get_brightness_ng(struct backlight_device *bd)
        struct sony_backlight_props *sdev =
                (struct sony_backlight_props *)bl_get_data(bd);
 
-       sony_call_snc_handle(sdev->handle, 0x0200, &result);
+       sony_call_snc_handle(sdev->handle, sdev->cmd_base + 0x100, &result);
 
        return (result & 0xff) - sdev->offset;
 }
@@ -1049,7 +1050,8 @@ static int sony_nc_update_status_ng(struct backlight_device *bd)
                (struct sony_backlight_props *)bl_get_data(bd);
 
        value = bd->props.brightness + sdev->offset;
-       if (sony_call_snc_handle(sdev->handle, 0x0100 | (value << 16), &result))
+       if (sony_call_snc_handle(sdev->handle, sdev->cmd_base | (value << 0x10),
+                               &result))
                return -EIO;
 
        return value;
@@ -1172,6 +1174,11 @@ static int sony_nc_hotkeys_decode(u32 event, unsigned int handle)
 /*
  * ACPI callbacks
  */
+enum event_types {
+       HOTKEY = 1,
+       KILLSWITCH,
+       GFX_SWITCH
+};
 static void sony_nc_notify(struct acpi_device *device, u32 event)
 {
        u32 real_ev = event;
@@ -1196,7 +1203,7 @@ static void sony_nc_notify(struct acpi_device *device, u32 event)
                /* hotkey event */
                case 0x0100:
                case 0x0127:
-                       ev_type = 1;
+                       ev_type = HOTKEY;
                        real_ev = sony_nc_hotkeys_decode(event, handle);
 
                        if (real_ev > 0)
@@ -1216,7 +1223,7 @@ static void sony_nc_notify(struct acpi_device *device, u32 event)
                         * update the rfkill device status when the
                         * switch is moved.
                         */
-                       ev_type = 2;
+                       ev_type = KILLSWITCH;
                        sony_call_snc_handle(handle, 0x0100, &result);
                        real_ev = result & 0x03;
 
@@ -1226,6 +1233,24 @@ static void sony_nc_notify(struct acpi_device *device, u32 event)
 
                        break;
 
+               case 0x0128:
+               case 0x0146:
+                       /* Hybrid GFX switching */
+                       sony_call_snc_handle(handle, 0x0000, &result);
+                       dprintk("GFX switch event received (reason: %s)\n",
+                                       (result & 0x01) ?
+                                       "switch change" : "unknown");
+
+                       /* verify the switch state
+                        * 1: discrete GFX
+                        * 0: integrated GFX
+                        */
+                       sony_call_snc_handle(handle, 0x0100, &result);
+
+                       ev_type = GFX_SWITCH;
+                       real_ev = result & 0xff;
+                       break;
+
                default:
                        dprintk("Unknown event 0x%x for handle 0x%x\n",
                                        event, handle);
@@ -1238,7 +1263,7 @@ static void sony_nc_notify(struct acpi_device *device, u32 event)
 
        } else {
                /* old style event */
-               ev_type = 1;
+               ev_type = HOTKEY;
                sony_laptop_report_input_event(real_ev);
        }
 
@@ -1893,32 +1918,33 @@ static ssize_t sony_nc_battery_care_limit_store(struct device *dev,
         *  bits 4,5: store the limit into the EC
         *  bits 6,7: store the limit into the battery
         */
+       cmd = 0;
 
-       /*
-        * handle 0x0115 should allow storing on battery too;
-        * handle 0x0136 same as 0x0115 + health status;
-        * handle 0x013f, same as 0x0136 but no storing on the battery
-        *
-        * Store only inside the EC for now, regardless the handle number
-        */
-       if (value == 0)
-               /* disable limits */
-               cmd = 0x0;
+       if (value > 0) {
+               if (value <= 50)
+                       cmd = 0x20;
 
-       else if (value <= 50)
-               cmd = 0x21;
+               else if (value <= 80)
+                       cmd = 0x10;
 
-       else if (value <= 80)
-               cmd = 0x11;
+               else if (value <= 100)
+                       cmd = 0x30;
 
-       else if (value <= 100)
-               cmd = 0x31;
+               else
+                       return -EINVAL;
 
-       else
-               return -EINVAL;
+               /*
+                * handle 0x0115 should allow storing on battery too;
+                * handle 0x0136 same as 0x0115 + health status;
+                * handle 0x013f, same as 0x0136 but no storing on the battery
+                */
+               if (bcare_ctl->handle != 0x013f)
+                       cmd = cmd | (cmd << 2);
 
-       if (sony_call_snc_handle(bcare_ctl->handle, (cmd << 0x10) | 0x0100,
-                               &result))
+               cmd = (cmd | 0x1) << 0x10;
+       }
+
+       if (sony_call_snc_handle(bcare_ctl->handle, cmd | 0x0100, &result))
                return -EIO;
 
        return count;
@@ -2113,7 +2139,7 @@ static ssize_t sony_nc_thermal_mode_show(struct device *dev,
                struct device_attribute *attr, char *buffer)
 {
        ssize_t count = 0;
-       unsigned int mode = sony_nc_thermal_mode_get();
+       int mode = sony_nc_thermal_mode_get();
 
        if (mode < 0)
                return mode;
@@ -2472,6 +2498,7 @@ static void sony_nc_backlight_ng_read_limits(int handle,
 {
        u64 offset;
        int i;
+       int lvl_table_len = 0;
        u8 min = 0xff, max = 0x00;
        unsigned char buffer[32] = { 0 };
 
@@ -2480,8 +2507,6 @@ static void sony_nc_backlight_ng_read_limits(int handle,
        props->maxlvl = 0xff;
 
        offset = sony_find_snc_handle(handle);
-       if (offset < 0)
-               return;
 
        /* try to read the boundaries from ACPI tables, if we fail the above
         * defaults should be reasonable
@@ -2491,11 +2516,21 @@ static void sony_nc_backlight_ng_read_limits(int handle,
        if (i < 0)
                return;
 
+       switch (handle) {
+       case 0x012f:
+       case 0x0137:
+               lvl_table_len = 9;
+               break;
+       case 0x143:
+               lvl_table_len = 16;
+               break;
+       }
+
        /* the buffer lists brightness levels available, brightness levels are
         * from position 0 to 8 in the array, other values are used by ALS
         * control.
         */
-       for (i = 0; i < 9 && i < ARRAY_SIZE(buffer); i++) {
+       for (i = 0; i < lvl_table_len && i < ARRAY_SIZE(buffer); i++) {
 
                dprintk("Brightness level: %d\n", buffer[i]);
 
@@ -2520,16 +2555,24 @@ static void sony_nc_backlight_setup(void)
        const struct backlight_ops *ops = NULL;
        struct backlight_properties props;
 
-       if (sony_find_snc_handle(0x12f) != -1) {
+       if (sony_find_snc_handle(0x12f) >= 0) {
                ops = &sony_backlight_ng_ops;
+               sony_bl_props.cmd_base = 0x0100;
                sony_nc_backlight_ng_read_limits(0x12f, &sony_bl_props);
                max_brightness = sony_bl_props.maxlvl - sony_bl_props.offset;
 
-       } else if (sony_find_snc_handle(0x137) != -1) {
+       } else if (sony_find_snc_handle(0x137) >= 0) {
                ops = &sony_backlight_ng_ops;
+               sony_bl_props.cmd_base = 0x0100;
                sony_nc_backlight_ng_read_limits(0x137, &sony_bl_props);
                max_brightness = sony_bl_props.maxlvl - sony_bl_props.offset;
 
+       } else if (sony_find_snc_handle(0x143) >= 0) {
+               ops = &sony_backlight_ng_ops;
+               sony_bl_props.cmd_base = 0x3000;
+               sony_nc_backlight_ng_read_limits(0x143, &sony_bl_props);
+               max_brightness = sony_bl_props.maxlvl - sony_bl_props.offset;
+
        } else if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "GBRT",
                                                &unused))) {
                ops = &sony_backlight_ops;
@@ -2597,6 +2640,12 @@ static int sony_nc_add(struct acpi_device *device)
                }
        }
 
+       result = sony_laptop_setup_input(device);
+       if (result) {
+               pr_err("Unable to create input devices\n");
+               goto outplatform;
+       }
+
        if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "ECON",
                                         &handle))) {
                int arg = 1;
@@ -2614,12 +2663,6 @@ static int sony_nc_add(struct acpi_device *device)
        }
 
        /* setup input devices and helper fifo */
-       result = sony_laptop_setup_input(device);
-       if (result) {
-               pr_err("Unable to create input devices\n");
-               goto outsnc;
-       }
-
        if (acpi_video_backlight_support()) {
                pr_info("brightness ignored, must be controlled by ACPI video driver\n");
        } else {
@@ -2667,22 +2710,21 @@ static int sony_nc_add(struct acpi_device *device)
 
        return 0;
 
-      out_sysfs:
+out_sysfs:
        for (item = sony_nc_values; item->name; ++item) {
                device_remove_file(&sony_pf_device->dev, &item->devattr);
        }
        sony_nc_backlight_cleanup();
-
-       sony_laptop_remove_input();
-
-      outsnc:
        sony_nc_function_cleanup(sony_pf_device);
        sony_nc_handles_cleanup(sony_pf_device);
 
-      outpresent:
+outplatform:
+       sony_laptop_remove_input();
+
+outpresent:
        sony_pf_remove();
 
-      outwalk:
+outwalk:
        sony_nc_rfkill_cleanup();
        return result;
 }
index e1b8c54ace5a2a68e0d4f3ee28f36ff626f06645..a739f5ca936a878481f408d67e0a7edf8688a42e 100644 (file)
@@ -794,17 +794,17 @@ static __devinit int ab8500_regulator_register(struct platform_device *pdev,
 }
 
 static struct of_regulator_match ab8500_regulator_matches[] = {
-       { .name = "LDO-AUX1",    .driver_data = (void *) AB8500_LDO_AUX1, },
-       { .name = "LDO-AUX2",    .driver_data = (void *) AB8500_LDO_AUX2, },
-       { .name = "LDO-AUX3",    .driver_data = (void *) AB8500_LDO_AUX3, },
-       { .name = "LDO-INTCORE", .driver_data = (void *) AB8500_LDO_INTCORE, },
-       { .name = "LDO-TVOUT",   .driver_data = (void *) AB8500_LDO_TVOUT, },
-       { .name = "LDO-USB",     .driver_data = (void *) AB8500_LDO_USB, },
-       { .name = "LDO-AUDIO",   .driver_data = (void *) AB8500_LDO_AUDIO, },
-       { .name = "LDO-ANAMIC1", .driver_data = (void *) AB8500_LDO_ANAMIC1, },
-       { .name = "LDO-ANAMIC2", .driver_data = (void *) AB8500_LDO_ANAMIC2, },
-       { .name = "LDO-DMIC",    .driver_data = (void *) AB8500_LDO_DMIC, },
-       { .name = "LDO-ANA",     .driver_data = (void *) AB8500_LDO_ANA, },
+       { .name = "ab8500_ldo_aux1",    .driver_data = (void *) AB8500_LDO_AUX1, },
+       { .name = "ab8500_ldo_aux2",    .driver_data = (void *) AB8500_LDO_AUX2, },
+       { .name = "ab8500_ldo_aux3",    .driver_data = (void *) AB8500_LDO_AUX3, },
+       { .name = "ab8500_ldo_intcore", .driver_data = (void *) AB8500_LDO_INTCORE, },
+       { .name = "ab8500_ldo_tvout",   .driver_data = (void *) AB8500_LDO_TVOUT, },
+       { .name = "ab8500_ldo_usb",     .driver_data = (void *) AB8500_LDO_USB, },
+       { .name = "ab8500_ldo_audio",   .driver_data = (void *) AB8500_LDO_AUDIO, },
+       { .name = "ab8500_ldo_anamic1", .driver_data = (void *) AB8500_LDO_ANAMIC1, },
+       { .name = "ab8500_ldo_amamic2", .driver_data = (void *) AB8500_LDO_ANAMIC2, },
+       { .name = "ab8500_ldo_dmic",    .driver_data = (void *) AB8500_LDO_DMIC, },
+       { .name = "ab8500_ldo_ana",     .driver_data = (void *) AB8500_LDO_ANA, },
 };
 
 static __devinit int
index 3660bace123c97adc3f3ce4e1165216655578a3d..e82e7eaac0f18fd2a50ba3d5506c5620e3e50183 100644 (file)
@@ -224,7 +224,7 @@ static struct platform_driver anatop_regulator_driver = {
                .of_match_table = of_anatop_regulator_match_tbl,
        },
        .probe  = anatop_regulator_probe,
-       .remove = anatop_regulator_remove,
+       .remove = __devexit_p(anatop_regulator_remove),
 };
 
 static int __init anatop_regulator_init(void)
index 7584a74eec8a4f7706a59e9013e40bab34d30ef8..8b4b3829d9e719f1acb57ef0d5b158beeefe914d 100644 (file)
@@ -2050,6 +2050,9 @@ int regulator_map_voltage_linear(struct regulator_dev *rdev,
                return -EINVAL;
        }
 
+       if (min_uV < rdev->desc->min_uV)
+               min_uV = rdev->desc->min_uV;
+
        ret = DIV_ROUND_UP(min_uV - rdev->desc->min_uV, rdev->desc->uV_step);
        if (ret < 0)
                return ret;
@@ -2516,9 +2519,12 @@ int regulator_set_optimum_mode(struct regulator *regulator, int uA_load)
 {
        struct regulator_dev *rdev = regulator->rdev;
        struct regulator *consumer;
-       int ret, output_uV, input_uV, total_uA_load = 0;
+       int ret, output_uV, input_uV = 0, total_uA_load = 0;
        unsigned int mode;
 
+       if (rdev->supply)
+               input_uV = regulator_get_voltage(rdev->supply);
+
        mutex_lock(&rdev->mutex);
 
        /*
@@ -2551,10 +2557,7 @@ int regulator_set_optimum_mode(struct regulator *regulator, int uA_load)
                goto out;
        }
 
-       /* get input voltage */
-       input_uV = 0;
-       if (rdev->supply)
-               input_uV = regulator_get_voltage(rdev->supply);
+       /* No supply? Use constraint voltage */
        if (input_uV <= 0)
                input_uV = rdev->constraints->input_uV;
        if (input_uV <= 0) {
index 968f97f3cb3d5ec8cc299a55aafbdd4bcc09e705..9dbb491b6efa827a64198baccfc42f6fe6612f79 100644 (file)
@@ -452,26 +452,26 @@ static __devinit int db8500_regulator_register(struct platform_device *pdev,
 }
 
 static struct of_regulator_match db8500_regulator_matches[] = {
-       { .name = "db8500-vape",          .driver_data = (void *) DB8500_REGULATOR_VAPE, },
-       { .name = "db8500-varm",          .driver_data = (void *) DB8500_REGULATOR_VARM, },
-       { .name = "db8500-vmodem",        .driver_data = (void *) DB8500_REGULATOR_VMODEM, },
-       { .name = "db8500-vpll",          .driver_data = (void *) DB8500_REGULATOR_VPLL, },
-       { .name = "db8500-vsmps1",        .driver_data = (void *) DB8500_REGULATOR_VSMPS1, },
-       { .name = "db8500-vsmps2",        .driver_data = (void *) DB8500_REGULATOR_VSMPS2, },
-       { .name = "db8500-vsmps3",        .driver_data = (void *) DB8500_REGULATOR_VSMPS3, },
-       { .name = "db8500-vrf1",          .driver_data = (void *) DB8500_REGULATOR_VRF1, },
-       { .name = "db8500-sva-mmdsp",     .driver_data = (void *) DB8500_REGULATOR_SWITCH_SVAMMDSP, },
-       { .name = "db8500-sva-mmdsp-ret", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SVAMMDSPRET, },
-       { .name = "db8500-sva-pipe",      .driver_data = (void *) DB8500_REGULATOR_SWITCH_SVAPIPE, },
-       { .name = "db8500-sia-mmdsp",     .driver_data = (void *) DB8500_REGULATOR_SWITCH_SIAMMDSP, },
-       { .name = "db8500-sia-mmdsp-ret", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SIAMMDSPRET, },
-       { .name = "db8500-sia-pipe",      .driver_data = (void *) DB8500_REGULATOR_SWITCH_SIAPIPE, },
-       { .name = "db8500-sga",           .driver_data = (void *) DB8500_REGULATOR_SWITCH_SGA, },
-       { .name = "db8500-b2r2-mcde",     .driver_data = (void *) DB8500_REGULATOR_SWITCH_B2R2_MCDE, },
-       { .name = "db8500-esram12",       .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM12, },
-       { .name = "db8500-esram12-ret",   .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM12RET, },
-       { .name = "db8500-esram34",       .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM34, },
-       { .name = "db8500-esram34-ret",   .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM34RET, },
+       { .name = "db8500_vape",          .driver_data = (void *) DB8500_REGULATOR_VAPE, },
+       { .name = "db8500_varm",          .driver_data = (void *) DB8500_REGULATOR_VARM, },
+       { .name = "db8500_vmodem",        .driver_data = (void *) DB8500_REGULATOR_VMODEM, },
+       { .name = "db8500_vpll",          .driver_data = (void *) DB8500_REGULATOR_VPLL, },
+       { .name = "db8500_vsmps1",        .driver_data = (void *) DB8500_REGULATOR_VSMPS1, },
+       { .name = "db8500_vsmps2",        .driver_data = (void *) DB8500_REGULATOR_VSMPS2, },
+       { .name = "db8500_vsmps3",        .driver_data = (void *) DB8500_REGULATOR_VSMPS3, },
+       { .name = "db8500_vrf1",          .driver_data = (void *) DB8500_REGULATOR_VRF1, },
+       { .name = "db8500_sva_mmdsp",     .driver_data = (void *) DB8500_REGULATOR_SWITCH_SVAMMDSP, },
+       { .name = "db8500_sva_mmdsp_ret", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SVAMMDSPRET, },
+       { .name = "db8500_sva_pipe",      .driver_data = (void *) DB8500_REGULATOR_SWITCH_SVAPIPE, },
+       { .name = "db8500_sia_mmdsp",     .driver_data = (void *) DB8500_REGULATOR_SWITCH_SIAMMDSP, },
+       { .name = "db8500_sia_mmdsp_ret", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SIAMMDSPRET, },
+       { .name = "db8500_sia_pipe",      .driver_data = (void *) DB8500_REGULATOR_SWITCH_SIAPIPE, },
+       { .name = "db8500_sga",           .driver_data = (void *) DB8500_REGULATOR_SWITCH_SGA, },
+       { .name = "db8500_b2r2_mcde",     .driver_data = (void *) DB8500_REGULATOR_SWITCH_B2R2_MCDE, },
+       { .name = "db8500_esram12",       .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM12, },
+       { .name = "db8500_esram12_ret",   .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM12RET, },
+       { .name = "db8500_esram34",       .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM34, },
+       { .name = "db8500_esram34_ret",   .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM34RET, },
 };
 
 static __devinit int
index 9997d7aaca84294d03462f428b606d93de39a9df..242851a4c1a606d69b41efad88f74dc1060817db 100644 (file)
@@ -101,16 +101,20 @@ static int gpio_regulator_get_value(struct regulator_dev *dev)
 }
 
 static int gpio_regulator_set_value(struct regulator_dev *dev,
-                                       int min, int max)
+                                       int min, int max, unsigned *selector)
 {
        struct gpio_regulator_data *data = rdev_get_drvdata(dev);
-       int ptr, target, state, best_val = INT_MAX;
+       int ptr, target = 0, state, best_val = INT_MAX;
 
        for (ptr = 0; ptr < data->nr_states; ptr++)
                if (data->states[ptr].value < best_val &&
                    data->states[ptr].value >= min &&
-                   data->states[ptr].value <= max)
+                   data->states[ptr].value <= max) {
                        target = data->states[ptr].gpios;
+                       best_val = data->states[ptr].value;
+                       if (selector)
+                               *selector = ptr;
+               }
 
        if (best_val == INT_MAX)
                return -EINVAL;
@@ -128,7 +132,7 @@ static int gpio_regulator_set_voltage(struct regulator_dev *dev,
                                        int min_uV, int max_uV,
                                        unsigned *selector)
 {
-       return gpio_regulator_set_value(dev, min_uV, max_uV);
+       return gpio_regulator_set_value(dev, min_uV, max_uV, selector);
 }
 
 static int gpio_regulator_list_voltage(struct regulator_dev *dev,
@@ -145,7 +149,7 @@ static int gpio_regulator_list_voltage(struct regulator_dev *dev,
 static int gpio_regulator_set_current_limit(struct regulator_dev *dev,
                                        int min_uA, int max_uA)
 {
-       return gpio_regulator_set_value(dev, min_uA, max_uA);
+       return gpio_regulator_set_value(dev, min_uA, max_uA, NULL);
 }
 
 static struct regulator_ops gpio_regulator_voltage_ops = {
@@ -286,7 +290,7 @@ static int __devinit gpio_regulator_probe(struct platform_device *pdev)
 
        cfg.dev = &pdev->dev;
        cfg.init_data = config->init_data;
-       cfg.driver_data = &drvdata;
+       cfg.driver_data = drvdata;
 
        drvdata->dev = regulator_register(&drvdata->desc, &cfg);
        if (IS_ERR(drvdata->dev)) {
index 1f4bb80457b3a8a153a4ae9c44f46196ec8733db..9d540cd02dab5f4de264dc35599322b773d6c484 100644 (file)
@@ -259,6 +259,7 @@ static int __devinit max8649_regulator_probe(struct i2c_client *client,
        config.dev = &client->dev;
        config.init_data = pdata->regulator;
        config.driver_data = info;
+       config.regmap = info->regmap;
 
        info->regulator = regulator_register(&dcdc_desc, &config);
        if (IS_ERR(info->regulator)) {
index c4435f608df7e2210dedbbaa3e3bfd0ac2f63de9..795f75a6ac3342537a18600e2d0414b1f011caaf 100644 (file)
@@ -673,7 +673,9 @@ static __devinit int palmas_probe(struct platform_device *pdev)
                        pmic->desc[id].ops = &palmas_ops_smps10;
                        pmic->desc[id].vsel_reg = PALMAS_SMPS10_CTRL;
                        pmic->desc[id].vsel_mask = SMPS10_VSEL;
-                       pmic->desc[id].enable_reg = PALMAS_SMPS10_STATUS;
+                       pmic->desc[id].enable_reg =
+                                       PALMAS_BASE_TO_REG(PALMAS_SMPS_BASE,
+                                                       PALMAS_SMPS10_STATUS);
                        pmic->desc[id].enable_mask = SMPS10_BOOST_EN;
                }
 
@@ -739,7 +741,8 @@ static __devinit int palmas_probe(struct platform_device *pdev)
 
                pmic->desc[id].type = REGULATOR_VOLTAGE;
                pmic->desc[id].owner = THIS_MODULE;
-               pmic->desc[id].enable_reg = palmas_regs_info[id].ctrl_addr;
+               pmic->desc[id].enable_reg = PALMAS_BASE_TO_REG(PALMAS_LDO_BASE,
+                                               palmas_regs_info[id].ctrl_addr);
                pmic->desc[id].enable_mask = PALMAS_LDO1_CTRL_MODE_ACTIVE;
 
                if (pdata && pdata->reg_data)
@@ -775,9 +778,6 @@ static __devinit int palmas_probe(struct platform_device *pdev)
 err_unregister_regulator:
        while (--id >= 0)
                regulator_unregister(pmic->rdev[id]);
-       kfree(pmic->rdev);
-       kfree(pmic->desc);
-       kfree(pmic);
        return ret;
 }
 
@@ -788,10 +788,6 @@ static int __devexit palmas_remove(struct platform_device *pdev)
 
        for (id = 0; id < PALMAS_NUM_REGS; id++)
                regulator_unregister(pmic->rdev[id]);
-
-       kfree(pmic->rdev);
-       kfree(pmic->desc);
-       kfree(pmic);
        return 0;
 }
 
index 290d6fc01029a92cbb4a9e7a4783d6cebaa0da02..9caadb4821786308aa624e9ef03cd7408dc2083f 100644 (file)
@@ -451,7 +451,7 @@ static int s5m8767_set_voltage_time_sel(struct regulator_dev *rdev,
 
        desc = reg_voltage_map[reg_id];
 
-       if (old_sel < new_sel)
+       if ((old_sel < new_sel) && s5m8767->ramp_delay)
                return DIV_ROUND_UP(desc->step * (new_sel - old_sel),
                                        s5m8767->ramp_delay * 1000);
        return 0;
index f841bd0db6aabe523d5c4c8089743c448c72138f..8f1be8586c724a99061092d787273573eda5644e 100644 (file)
@@ -71,7 +71,7 @@
 
 /* LDO_CTRL bitfields */
 #define TPS65023_LDO_CTRL_LDOx_SHIFT(ldo_id)   ((ldo_id)*4)
-#define TPS65023_LDO_CTRL_LDOx_MASK(ldo_id)    (0x0F << ((ldo_id)*4))
+#define TPS65023_LDO_CTRL_LDOx_MASK(ldo_id)    (0x07 << ((ldo_id)*4))
 
 /* Number of step-down converters available */
 #define TPS65023_NUM_DCDC              3
index b88b3df82381dc15bb81288c32874480a3eaad94..1b299aacf22ffa2023652c5a45cf61ac1a1c1416 100644 (file)
@@ -482,7 +482,7 @@ static int get_voltage_sel(struct regulator_dev *rdev)
        info    = &supply_info[rdev_get_id(rdev)];
 
        if (info->flags & FIXED_VOLTAGE)
-               return info->fixed_voltage;
+               return 0;
 
        ret = read_field(hw, &info->voltage);
        if (ret < 0)
index 24d880e78ec6704e8b84e6d0f6547602f1f639bb..f8d818abf98caf4dc8275a65e18695c41c639537 100644 (file)
@@ -4,9 +4,11 @@ menu "Remoteproc drivers (EXPERIMENTAL)"
 config REMOTEPROC
        tristate
        depends on EXPERIMENTAL
+       select FW_CONFIG
 
 config OMAP_REMOTEPROC
        tristate "OMAP remoteproc support"
+       depends on EXPERIMENTAL
        depends on ARCH_OMAP4
        depends on OMAP_IOMMU
        select REMOTEPROC
index 69425c4e86f30323e765d59151c0ab6d7517355c..de138e30d3e6ca0417ef444732721c14290f959c 100644 (file)
@@ -182,7 +182,7 @@ static int __devinit omap_rproc_probe(struct platform_device *pdev)
 
        ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
        if (ret) {
-               dev_err(pdev->dev.parent, "dma_set_coherent_mask: %d\n", ret);
+               dev_err(&pdev->dev, "dma_set_coherent_mask: %d\n", ret);
                return ret;
        }
 
index 8ea7bccc71007fd94132cf7c65da5f5ed5095a12..66324ee4678f45984348519813119dc41d71ade9 100644 (file)
@@ -247,7 +247,7 @@ rproc_load_segments(struct rproc *rproc, const u8 *elf_data, size_t len)
                }
 
                if (offset + filesz > len) {
-                       dev_err(dev, "truncated fw: need 0x%x avail 0x%x\n",
+                       dev_err(dev, "truncated fw: need 0x%x avail 0x%zx\n",
                                        offset + filesz, len);
                        ret = -EINVAL;
                        break;
@@ -934,7 +934,7 @@ static void rproc_resource_cleanup(struct rproc *rproc)
                unmapped = iommu_unmap(rproc->domain, entry->da, entry->len);
                if (unmapped != entry->len) {
                        /* nothing much to do besides complaining */
-                       dev_err(dev, "failed to unmap %u/%u\n", entry->len,
+                       dev_err(dev, "failed to unmap %u/%zu\n", entry->len,
                                                                unmapped);
                }
 
@@ -1020,7 +1020,7 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
 
        ehdr = (struct elf32_hdr *)fw->data;
 
-       dev_info(dev, "Booting fw image %s, size %d\n", name, fw->size);
+       dev_info(dev, "Booting fw image %s, size %zd\n", name, fw->size);
 
        /*
         * if enabling an IOMMU isn't relevant for this rproc, this is
@@ -1041,8 +1041,10 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
 
        /* look for the resource table */
        table = rproc_find_rsc_table(rproc, fw->data, fw->size, &tablesz);
-       if (!table)
+       if (!table) {
+               ret = -EINVAL;
                goto clean_up;
+       }
 
        /* handle fw resources which are required to boot rproc */
        ret = rproc_handle_boot_rsc(rproc, table, tablesz);
index 75506ec2840e2ec4d1e331f209377d874f604386..f56c8ba3a861cda16870e7456391fe564ec741c6 100644 (file)
@@ -188,6 +188,26 @@ static int rpmsg_uevent(struct device *dev, struct kobj_uevent_env *env)
                                        rpdev->id.name);
 }
 
+/**
+ * __ept_release() - deallocate an rpmsg endpoint
+ * @kref: the ept's reference count
+ *
+ * This function deallocates an ept, and is invoked when its @kref refcount
+ * drops to zero.
+ *
+ * Never invoke this function directly!
+ */
+static void __ept_release(struct kref *kref)
+{
+       struct rpmsg_endpoint *ept = container_of(kref, struct rpmsg_endpoint,
+                                                 refcount);
+       /*
+        * At this point no one holds a reference to ept anymore,
+        * so we can directly free it
+        */
+       kfree(ept);
+}
+
 /* for more info, see below documentation of rpmsg_create_ept() */
 static struct rpmsg_endpoint *__rpmsg_create_ept(struct virtproc_info *vrp,
                struct rpmsg_channel *rpdev, rpmsg_rx_cb_t cb,
@@ -206,6 +226,9 @@ static struct rpmsg_endpoint *__rpmsg_create_ept(struct virtproc_info *vrp,
                return NULL;
        }
 
+       kref_init(&ept->refcount);
+       mutex_init(&ept->cb_lock);
+
        ept->rpdev = rpdev;
        ept->cb = cb;
        ept->priv = priv;
@@ -238,7 +261,7 @@ rem_idr:
        idr_remove(&vrp->endpoints, request);
 free_ept:
        mutex_unlock(&vrp->endpoints_lock);
-       kfree(ept);
+       kref_put(&ept->refcount, __ept_release);
        return NULL;
 }
 
@@ -302,11 +325,17 @@ EXPORT_SYMBOL(rpmsg_create_ept);
 static void
 __rpmsg_destroy_ept(struct virtproc_info *vrp, struct rpmsg_endpoint *ept)
 {
+       /* make sure new inbound messages can't find this ept anymore */
        mutex_lock(&vrp->endpoints_lock);
        idr_remove(&vrp->endpoints, ept->addr);
        mutex_unlock(&vrp->endpoints_lock);
 
-       kfree(ept);
+       /* make sure in-flight inbound messages won't invoke cb anymore */
+       mutex_lock(&ept->cb_lock);
+       ept->cb = NULL;
+       mutex_unlock(&ept->cb_lock);
+
+       kref_put(&ept->refcount, __ept_release);
 }
 
 /**
@@ -790,12 +819,28 @@ static void rpmsg_recv_done(struct virtqueue *rvq)
 
        /* use the dst addr to fetch the callback of the appropriate user */
        mutex_lock(&vrp->endpoints_lock);
+
        ept = idr_find(&vrp->endpoints, msg->dst);
+
+       /* let's make sure no one deallocates ept while we use it */
+       if (ept)
+               kref_get(&ept->refcount);
+
        mutex_unlock(&vrp->endpoints_lock);
 
-       if (ept && ept->cb)
-               ept->cb(ept->rpdev, msg->data, msg->len, ept->priv, msg->src);
-       else
+       if (ept) {
+               /* make sure ept->cb doesn't go away while we use it */
+               mutex_lock(&ept->cb_lock);
+
+               if (ept->cb)
+                       ept->cb(ept->rpdev, msg->data, msg->len, ept->priv,
+                               msg->src);
+
+               mutex_unlock(&ept->cb_lock);
+
+               /* farewell, ept, we don't need you anymore */
+               kref_put(&ept->refcount, __ept_release);
+       } else
                dev_warn(dev, "msg received with no recepient\n");
 
        /* publish the real size of the buffer */
@@ -1040,7 +1085,7 @@ static int __init rpmsg_init(void)
 
        return ret;
 }
-module_init(rpmsg_init);
+subsys_initcall(rpmsg_init);
 
 static void __exit rpmsg_fini(void)
 {
index 4bcf9ca2818ae740eadd016df48cc0461da23f6e..370889d0489bf9e4f196abcbbcd6f11b25e6f5a8 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/mfd/abx500.h>
 #include <linux/mfd/abx500/ab8500.h>
 #include <linux/delay.h>
+#include <linux/of.h>
 
 #define AB8500_RTC_SOFF_STAT_REG       0x00
 #define AB8500_RTC_CC_CONF_REG         0x01
@@ -422,7 +423,7 @@ static int __devinit ab8500_rtc_probe(struct platform_device *pdev)
        }
 
        err = request_threaded_irq(irq, NULL, rtc_alarm_handler,
-               IRQF_NO_SUSPEND, "ab8500-rtc", rtc);
+               IRQF_NO_SUSPEND | IRQF_ONESHOT, "ab8500-rtc", rtc);
        if (err < 0) {
                rtc_device_unregister(rtc);
                return err;
@@ -430,7 +431,6 @@ static int __devinit ab8500_rtc_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, rtc);
 
-
        err = ab8500_sysfs_rtc_register(&pdev->dev);
        if (err) {
                dev_err(&pdev->dev, "sysfs RTC failed to register\n");
@@ -454,10 +454,16 @@ static int __devexit ab8500_rtc_remove(struct platform_device *pdev)
        return 0;
 }
 
+static const struct of_device_id ab8500_rtc_match[] = {
+       { .compatible = "stericsson,ab8500-rtc", },
+       {}
+};
+
 static struct platform_driver ab8500_rtc_driver = {
        .driver = {
                .name = "ab8500-rtc",
                .owner = THIS_MODULE,
+               .of_match_table = ab8500_rtc_match,
        },
        .probe  = ab8500_rtc_probe,
        .remove = __devexit_p(ab8500_rtc_remove),
index 7d5f56edb8efc30e8beb70cbad177a0cca32dc35..4267789ca9959413e90df5ea053154e07481d3ce 100644 (file)
@@ -910,14 +910,17 @@ static inline int cmos_poweroff(struct device *dev)
 
 static u32 rtc_handler(void *context)
 {
+       struct device *dev = context;
+
+       pm_wakeup_event(dev, 0);
        acpi_clear_event(ACPI_EVENT_RTC);
        acpi_disable_event(ACPI_EVENT_RTC, 0);
        return ACPI_INTERRUPT_HANDLED;
 }
 
-static inline void rtc_wake_setup(void)
+static inline void rtc_wake_setup(struct device *dev)
 {
-       acpi_install_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler, NULL);
+       acpi_install_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler, dev);
        /*
         * After the RTC handler is installed, the Fixed_RTC event should
         * be disabled. Only when the RTC alarm is set will it be enabled.
@@ -950,7 +953,7 @@ cmos_wake_setup(struct device *dev)
        if (acpi_disabled)
                return;
 
-       rtc_wake_setup();
+       rtc_wake_setup(dev);
        acpi_rtc_info.wake_on = rtc_wake_on;
        acpi_rtc_info.wake_off = rtc_wake_off;
 
index 5e1d64ee52289b9e7a47990f8c7de0b41c7fccaf..e3e50d69baf85e75966bdea6d7f7b95ae6d65af0 100644 (file)
@@ -202,10 +202,11 @@ static irqreturn_t mxc_rtc_interrupt(int irq, void *dev_id)
        struct platform_device *pdev = dev_id;
        struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
        void __iomem *ioaddr = pdata->ioaddr;
+       unsigned long flags;
        u32 status;
        u32 events = 0;
 
-       spin_lock_irq(&pdata->rtc->irq_lock);
+       spin_lock_irqsave(&pdata->rtc->irq_lock, flags);
        status = readw(ioaddr + RTC_RTCISR) & readw(ioaddr + RTC_RTCIENR);
        /* clear interrupt sources */
        writew(status, ioaddr + RTC_RTCISR);
@@ -224,7 +225,7 @@ static irqreturn_t mxc_rtc_interrupt(int irq, void *dev_id)
                events |= (RTC_PF | RTC_IRQF);
 
        rtc_update_irq(pdata->rtc, 1, events);
-       spin_unlock_irq(&pdata->rtc->irq_lock);
+       spin_unlock_irqrestore(&pdata->rtc->irq_lock, flags);
 
        return IRQ_HANDLED;
 }
index 1f76320e545b1cf15d563cc8996b8eed7122f14a..e2785479113ca06648949d6c6a78adee92c78367 100644 (file)
@@ -458,12 +458,12 @@ static int __devexit spear_rtc_remove(struct platform_device *pdev)
        clk_disable(config->clk);
        clk_put(config->clk);
        iounmap(config->ioaddr);
-       kfree(config);
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (res)
                release_mem_region(res->start, resource_size(res));
        platform_set_drvdata(pdev, NULL);
        rtc_device_unregister(config->rtc);
+       kfree(config);
 
        return 0;
 }
index 258abeabf6246a1ed7e4db116713bb3128009fa0..c5d06fe83bba6274f2e6b944a43489395e04e01c 100644 (file)
@@ -510,7 +510,7 @@ static int __devinit twl_rtc_probe(struct platform_device *pdev)
        }
 
        ret = request_threaded_irq(irq, NULL, twl_rtc_interrupt,
-                                  IRQF_TRIGGER_RISING,
+                                  IRQF_TRIGGER_RISING | IRQF_ONESHOT,
                                   dev_name(&rtc->dev), rtc);
        if (ret < 0) {
                dev_err(&pdev->dev, "IRQ is not free.\n");
index 532d212b6b2c7b633a9b4338da14c78439a1d3db..393e7ce8e95a15ef5907c820dcb2ff9bd3542ac8 100644 (file)
@@ -201,7 +201,7 @@ static void asd_get_response_tasklet(struct asd_ascb *ascb,
 
                if (SAS_STATUS_BUF_SIZE >= sizeof(*resp)) {
                        resp->frame_len = le16_to_cpu(*(__le16 *)(r+6));
-                       memcpy(&resp->ending_fis[0], r+16, 24);
+                       memcpy(&resp->ending_fis[0], r+16, ATA_RESP_FIS_SIZE);
                        ts->buf_valid_size = sizeof(*resp);
                }
        }
index 0c53c28dc3d379a5a09960e2e9ab8fae53cb8526..7e77cf62029190562d718fe7c365cb087c3e573e 100644 (file)
@@ -350,6 +350,7 @@ struct bnx2i_hba {
        struct pci_dev *pcidev;
        struct net_device *netdev;
        void __iomem *regview;
+       resource_size_t reg_base;
 
        u32 age;
        unsigned long cnic_dev_type;
index ece47e502282d389b91e5d170ca023688a22deb6..86a12b48e477e804e12765e7d535d7e70f44405e 100644 (file)
@@ -2724,7 +2724,6 @@ int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep)
                goto arm_cq;
        }
 
-       reg_base = ep->hba->netdev->base_addr;
        if ((test_bit(BNX2I_NX2_DEV_5709, &ep->hba->cnic_dev_type)) &&
            (ep->hba->mail_queue_access == BNX2I_MQ_BIN_MODE)) {
                config2 = REG_RD(ep->hba, BNX2_MQ_CONFIG2);
@@ -2740,7 +2739,7 @@ int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep)
                /* 5709 device in normal node and 5706/5708 devices */
                reg_off = CTX_OFFSET + (MB_KERNEL_CTX_SIZE * cid_num);
 
-       ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off,
+       ep->qp.ctx_base = ioremap_nocache(ep->hba->reg_base + reg_off,
                                          MB_KERNEL_CTX_SIZE);
        if (!ep->qp.ctx_base)
                return -ENOMEM;
index f8d516b531617426cc05d626188075facf4e5a6a..621538b8b5445fd3e3e2685213ae7118f7229fd0 100644 (file)
@@ -811,13 +811,13 @@ struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic)
        bnx2i_identify_device(hba);
        bnx2i_setup_host_queue_size(hba, shost);
 
+       hba->reg_base = pci_resource_start(hba->pcidev, 0);
        if (test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type)) {
-               hba->regview = ioremap_nocache(hba->netdev->base_addr,
-                                              BNX2_MQ_CONFIG2);
+               hba->regview = pci_iomap(hba->pcidev, 0, BNX2_MQ_CONFIG2);
                if (!hba->regview)
                        goto ioreg_map_err;
        } else if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
-               hba->regview = ioremap_nocache(hba->netdev->base_addr, 4096);
+               hba->regview = pci_iomap(hba->pcidev, 0, 4096);
                if (!hba->regview)
                        goto ioreg_map_err;
        }
@@ -884,7 +884,7 @@ cid_que_err:
        bnx2i_free_mp_bdt(hba);
 mp_bdt_mem_err:
        if (hba->regview) {
-               iounmap(hba->regview);
+               pci_iounmap(hba->pcidev, hba->regview);
                hba->regview = NULL;
        }
 ioreg_map_err:
@@ -910,7 +910,7 @@ void bnx2i_free_hba(struct bnx2i_hba *hba)
        pci_dev_put(hba->pcidev);
 
        if (hba->regview) {
-               iounmap(hba->regview);
+               pci_iounmap(hba->pcidev, hba->regview);
                hba->regview = NULL;
        }
        bnx2i_free_mp_bdt(hba);
index 441d88ad99a7bb3abadb8b1e9af25281ced8334b..d109cc3a17b64126ee9ff38a5da320a070ffc2c5 100644 (file)
@@ -139,12 +139,12 @@ static void sas_ata_task_done(struct sas_task *task)
        if (stat->stat == SAS_PROTO_RESPONSE || stat->stat == SAM_STAT_GOOD ||
            ((stat->stat == SAM_STAT_CHECK_CONDITION &&
              dev->sata_dev.command_set == ATAPI_COMMAND_SET))) {
-               ata_tf_from_fis(resp->ending_fis, &dev->sata_dev.tf);
+               memcpy(dev->sata_dev.fis, resp->ending_fis, ATA_RESP_FIS_SIZE);
 
                if (!link->sactive) {
-                       qc->err_mask |= ac_err_mask(dev->sata_dev.tf.command);
+                       qc->err_mask |= ac_err_mask(dev->sata_dev.fis[2]);
                } else {
-                       link->eh_info.err_mask |= ac_err_mask(dev->sata_dev.tf.command);
+                       link->eh_info.err_mask |= ac_err_mask(dev->sata_dev.fis[2]);
                        if (unlikely(link->eh_info.err_mask))
                                qc->flags |= ATA_QCFLAG_FAILED;
                }
@@ -161,8 +161,8 @@ static void sas_ata_task_done(struct sas_task *task)
                                qc->flags |= ATA_QCFLAG_FAILED;
                        }
 
-                       dev->sata_dev.tf.feature = 0x04; /* status err */
-                       dev->sata_dev.tf.command = ATA_ERR;
+                       dev->sata_dev.fis[3] = 0x04; /* status err */
+                       dev->sata_dev.fis[2] = ATA_ERR;
                }
        }
 
@@ -269,7 +269,7 @@ static bool sas_ata_qc_fill_rtf(struct ata_queued_cmd *qc)
 {
        struct domain_device *dev = qc->ap->private_data;
 
-       memcpy(&qc->result_tf, &dev->sata_dev.tf, sizeof(qc->result_tf));
+       ata_tf_from_fis(dev->sata_dev.fis, &qc->result_tf);
        return true;
 }
 
index 6102ef2cb2d863ff23712d2cc5e6db2032f878bb..9d46fcbe7755fd2aa258e3de91c5a8cd76e3079c 100644 (file)
@@ -1792,7 +1792,7 @@ static inline void _base_writeq(__u64 b, volatile void __iomem *addr,
 static inline u8
 _base_get_msix_index(struct MPT2SAS_ADAPTER *ioc)
 {
-       return ioc->cpu_msix_table[smp_processor_id()];
+       return ioc->cpu_msix_table[raw_smp_processor_id()];
 }
 
 /**
index 04f80ebf09ebcb691dd2764ea07882e93c9e01df..77759c78cc21167cb93c002f6f951efeb4cab0db 100644 (file)
@@ -26,7 +26,6 @@
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/types.h>
-#include <linux/version.h>
 #include <linux/blkdev.h>
 #include <linux/interrupt.h>
 #include <linux/pci.h>
@@ -2477,11 +2476,9 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
        }
 
        cmd = qlt_ctio_to_cmd(vha, handle, ctio);
-       if (cmd == NULL) {
-               if (status != CTIO_SUCCESS)
-                       qlt_term_ctio_exchange(vha, ctio, NULL, status);
+       if (cmd == NULL)
                return;
-       }
+
        se_cmd = &cmd->se_cmd;
        tfo = se_cmd->se_tfo;
 
@@ -2727,10 +2724,12 @@ static void qlt_do_work(struct work_struct *work)
 out_term:
        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf020, "Terminating work cmd %p", cmd);
        /*
-        * cmd has not sent to target yet, so pass NULL as the second argument
+        * cmd has not sent to target yet, so pass NULL as the second
+        * argument to qlt_send_term_exchange() and free the memory here.
         */
        spin_lock_irqsave(&ha->hardware_lock, flags);
        qlt_send_term_exchange(vha, NULL, &cmd->atio, 1);
+       kmem_cache_free(qla_tgt_cmd_cachep, cmd);
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
        if (sess)
                ha->tgt.tgt_ops->put_sess(sess);
@@ -3961,7 +3960,7 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
 {
        struct qla_hw_data *ha = vha->hw;
        struct qla_tgt *tgt = ha->tgt.qla_tgt;
-       int reason_code;
+       int login_code;
 
        ql_dbg(ql_dbg_tgt, vha, 0xe039,
            "scsi(%ld): ha state %d init_done %d oper_mode %d topo %d\n",
@@ -4004,9 +4003,9 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
        {
                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03b,
                    "qla_target(%d): Async LOOP_UP occured "
-                   "(m[1]=%x, m[2]=%x, m[3]=%x, m[4]=%x)", vha->vp_idx,
-                   le16_to_cpu(mailbox[1]), le16_to_cpu(mailbox[2]),
-                   le16_to_cpu(mailbox[3]), le16_to_cpu(mailbox[4]));
+                   "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx,
+                   le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
+                   le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
                if (tgt->link_reinit_iocb_pending) {
                        qlt_send_notify_ack(vha, (void *)&tgt->link_reinit_iocb,
                            0, 0, 0, 0, 0, 0);
@@ -4021,23 +4020,24 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
        case MBA_RSCN_UPDATE:
                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03c,
                    "qla_target(%d): Async event %#x occured "
-                   "(m[1]=%x, m[2]=%x, m[3]=%x, m[4]=%x)", vha->vp_idx, code,
-                   le16_to_cpu(mailbox[1]), le16_to_cpu(mailbox[2]),
-                   le16_to_cpu(mailbox[3]), le16_to_cpu(mailbox[4]));
+                   "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, code,
+                   le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
+                   le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
                break;
 
        case MBA_PORT_UPDATE:
                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03d,
                    "qla_target(%d): Port update async event %#x "
-                   "occured: updating the ports database (m[1]=%x, m[2]=%x, "
-                   "m[3]=%x, m[4]=%x)", vha->vp_idx, code,
-                   le16_to_cpu(mailbox[1]), le16_to_cpu(mailbox[2]),
-                   le16_to_cpu(mailbox[3]), le16_to_cpu(mailbox[4]));
-               reason_code = le16_to_cpu(mailbox[2]);
-               if (reason_code == 0x4)
+                   "occured: updating the ports database (m[0]=%x, m[1]=%x, "
+                   "m[2]=%x, m[3]=%x)", vha->vp_idx, code,
+                   le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
+                   le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
+
+               login_code = le16_to_cpu(mailbox[2]);
+               if (login_code == 0x4)
                        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03e,
                            "Async MB 2: Got PLOGI Complete\n");
-               else if (reason_code == 0x7)
+               else if (login_code == 0x7)
                        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03f,
                            "Async MB 2: Port Logged Out\n");
                break;
@@ -4045,9 +4045,9 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
        default:
                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf040,
                    "qla_target(%d): Async event %#x occured: "
-                   "ignore (m[1]=%x, m[2]=%x, m[3]=%x, m[4]=%x)", vha->vp_idx,
-                   code, le16_to_cpu(mailbox[1]), le16_to_cpu(mailbox[2]),
-                   le16_to_cpu(mailbox[3]), le16_to_cpu(mailbox[4]));
+                   "ignore (m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx,
+                   code, le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
+                   le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
                break;
        }
 
index 9ec19bc2f0fe262720024e5b634a2ecb7131480e..9f9ef1644fd97d1fbcb5ba1f978c5080411f9991 100644 (file)
@@ -919,7 +919,6 @@ struct qla_tgt_srr_ctio {
 #define QLA_TGT_XMIT_STATUS            2
 #define QLA_TGT_XMIT_ALL               (QLA_TGT_XMIT_STATUS|QLA_TGT_XMIT_DATA)
 
-#include <linux/version.h>
 
 extern struct qla_tgt_data qla_target;
 /*
index 436598f57404738fd038f3a013b5d5ddad0e0fdf..6e64314dbbb3d33e70782e1506039f9480d1b5c3 100644 (file)
@@ -137,13 +137,15 @@ static char *tcm_qla2xxx_get_fabric_name(void)
  */
 static int tcm_qla2xxx_npiv_extract_wwn(const char *ns, u64 *nm)
 {
-       unsigned int i, j, value;
+       unsigned int i, j;
        u8 wwn[8];
 
        memset(wwn, 0, sizeof(wwn));
 
        /* Validate and store the new name */
        for (i = 0, j = 0; i < 16; i++) {
+               int value;
+
                value = hex_to_bin(*ns++);
                if (value >= 0)
                        j = (j << 4) | value;
@@ -652,8 +654,8 @@ static int tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd)
 /*
  * Called from qla_target.c:qlt_issue_task_mgmt()
  */
-int tcm_qla2xxx_handle_tmr(struct qla_tgt_mgmt_cmd *mcmd, uint32_t lun,
-                       uint8_t tmr_func, uint32_t tag)
+static int tcm_qla2xxx_handle_tmr(struct qla_tgt_mgmt_cmd *mcmd, uint32_t lun,
+       uint8_t tmr_func, uint32_t tag)
 {
        struct qla_tgt_sess *sess = mcmd->sess;
        struct se_cmd *se_cmd = &mcmd->se_cmd;
@@ -762,65 +764,8 @@ static u16 tcm_qla2xxx_set_fabric_sense_len(struct se_cmd *se_cmd,
 struct target_fabric_configfs *tcm_qla2xxx_fabric_configfs;
 struct target_fabric_configfs *tcm_qla2xxx_npiv_fabric_configfs;
 
-static int tcm_qla2xxx_setup_nacl_from_rport(
-       struct se_portal_group *se_tpg,
-       struct se_node_acl *se_nacl,
-       struct tcm_qla2xxx_lport *lport,
-       struct tcm_qla2xxx_nacl *nacl,
-       u64 rport_wwnn)
-{
-       struct scsi_qla_host *vha = lport->qla_vha;
-       struct Scsi_Host *sh = vha->host;
-       struct fc_host_attrs *fc_host = shost_to_fc_host(sh);
-       struct fc_rport *rport;
-       unsigned long flags;
-       void *node;
-       int rc;
-
-       /*
-        * Scan the existing rports, and create a session for the
-        * explict NodeACL is an matching rport->node_name already
-        * exists.
-        */
-       spin_lock_irqsave(sh->host_lock, flags);
-       list_for_each_entry(rport, &fc_host->rports, peers) {
-               if (rport_wwnn != rport->node_name)
-                       continue;
-
-               pr_debug("Located existing rport_wwpn and rport->node_name: 0x%016LX, port_id: 0x%04x\n",
-                   rport->node_name, rport->port_id);
-               nacl->nport_id = rport->port_id;
-
-               spin_unlock_irqrestore(sh->host_lock, flags);
-
-               spin_lock_irqsave(&vha->hw->hardware_lock, flags);
-               node = btree_lookup32(&lport->lport_fcport_map, rport->port_id);
-               if (node) {
-                       rc = btree_update32(&lport->lport_fcport_map,
-                                           rport->port_id, se_nacl);
-               } else {
-                       rc = btree_insert32(&lport->lport_fcport_map,
-                                           rport->port_id, se_nacl,
-                                           GFP_ATOMIC);
-               }
-               spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
-
-               if (rc) {
-                       pr_err("Unable to insert se_nacl into fcport_map");
-                       WARN_ON(rc > 0);
-                       return rc;
-               }
-
-               pr_debug("Inserted into fcport_map: %p for WWNN: 0x%016LX, port_id: 0x%08x\n",
-                   se_nacl, rport_wwnn, nacl->nport_id);
-
-               return 1;
-       }
-       spin_unlock_irqrestore(sh->host_lock, flags);
-
-       return 0;
-}
-
+static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *,
+                       struct tcm_qla2xxx_nacl *, struct qla_tgt_sess *);
 /*
  * Expected to be called with struct qla_hw_data->hardware_lock held
  */
@@ -842,11 +787,40 @@ static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct qla_tgt_sess *sess)
 
        pr_debug("Removed from fcport_map: %p for WWNN: 0x%016LX, port_id: 0x%06x\n",
            se_nacl, nacl->nport_wwnn, nacl->nport_id);
+       /*
+        * Now clear the se_nacl and session pointers from our HW lport lookup
+        * table mapping for this initiator's fabric S_ID and LOOP_ID entries.
+        *
+        * This is done ahead of callbacks into tcm_qla2xxx_free_session() ->
+        * target_wait_for_sess_cmds() before the session waits for outstanding
+        * I/O to complete, to avoid a race between session shutdown execution
+        * and incoming ATIOs or TMRs picking up a stale se_node_act reference.
+        */
+       tcm_qla2xxx_clear_sess_lookup(lport, nacl, sess);
+}
+
+static void tcm_qla2xxx_release_session(struct kref *kref)
+{
+       struct se_session *se_sess = container_of(kref,
+                       struct se_session, sess_kref);
+
+       qlt_unreg_sess(se_sess->fabric_sess_ptr);
+}
+
+static void tcm_qla2xxx_put_session(struct se_session *se_sess)
+{
+       struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr;
+       struct qla_hw_data *ha = sess->vha->hw;
+       unsigned long flags;
+
+       spin_lock_irqsave(&ha->hardware_lock, flags);
+       kref_put(&se_sess->sess_kref, tcm_qla2xxx_release_session);
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
 }
 
 static void tcm_qla2xxx_put_sess(struct qla_tgt_sess *sess)
 {
-       target_put_session(sess->se_sess);
+       tcm_qla2xxx_put_session(sess->se_sess);
 }
 
 static void tcm_qla2xxx_shutdown_sess(struct qla_tgt_sess *sess)
@@ -859,14 +833,10 @@ static struct se_node_acl *tcm_qla2xxx_make_nodeacl(
        struct config_group *group,
        const char *name)
 {
-       struct se_wwn *se_wwn = se_tpg->se_tpg_wwn;
-       struct tcm_qla2xxx_lport *lport = container_of(se_wwn,
-                               struct tcm_qla2xxx_lport, lport_wwn);
        struct se_node_acl *se_nacl, *se_nacl_new;
        struct tcm_qla2xxx_nacl *nacl;
        u64 wwnn;
        u32 qla2xxx_nexus_depth;
-       int rc;
 
        if (tcm_qla2xxx_parse_wwn(name, &wwnn, 1) < 0)
                return ERR_PTR(-EINVAL);
@@ -893,16 +863,6 @@ static struct se_node_acl *tcm_qla2xxx_make_nodeacl(
        nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
        nacl->nport_wwnn = wwnn;
        tcm_qla2xxx_format_wwn(&nacl->nport_name[0], TCM_QLA2XXX_NAMELEN, wwnn);
-       /*
-        * Setup a se_nacl handle based on an a matching struct fc_rport setup
-        * via drivers/scsi/qla2xxx/qla_init.c:qla2x00_reg_remote_port()
-        */
-       rc = tcm_qla2xxx_setup_nacl_from_rport(se_tpg, se_nacl, lport,
-                                       nacl, wwnn);
-       if (rc < 0) {
-               tcm_qla2xxx_release_fabric_acl(se_tpg, se_nacl_new);
-               return ERR_PTR(rc);
-       }
 
        return se_nacl;
 }
@@ -1390,6 +1350,25 @@ static void tcm_qla2xxx_set_sess_by_loop_id(
            nacl->qla_tgt_sess, new_se_nacl, new_se_nacl->initiatorname);
 }
 
+/*
+ * Should always be called with qla_hw_data->hardware_lock held.
+ */
+static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *lport,
+               struct tcm_qla2xxx_nacl *nacl, struct qla_tgt_sess *sess)
+{
+       struct se_session *se_sess = sess->se_sess;
+       unsigned char be_sid[3];
+
+       be_sid[0] = sess->s_id.b.domain;
+       be_sid[1] = sess->s_id.b.area;
+       be_sid[2] = sess->s_id.b.al_pa;
+
+       tcm_qla2xxx_set_sess_by_s_id(lport, NULL, nacl, se_sess,
+                               sess, be_sid);
+       tcm_qla2xxx_set_sess_by_loop_id(lport, NULL, nacl, se_sess,
+                               sess, sess->loop_id);
+}
+
 static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess)
 {
        struct qla_tgt *tgt = sess->tgt;
@@ -1398,8 +1377,6 @@ static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess)
        struct se_node_acl *se_nacl;
        struct tcm_qla2xxx_lport *lport;
        struct tcm_qla2xxx_nacl *nacl;
-       unsigned char be_sid[3];
-       unsigned long flags;
 
        BUG_ON(in_interrupt());
 
@@ -1419,21 +1396,6 @@ static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess)
                return;
        }
        target_wait_for_sess_cmds(se_sess, 0);
-       /*
-        * And now clear the se_nacl and session pointers from our HW lport
-        * mappings for fabric S_ID and LOOP_ID.
-        */
-       memset(&be_sid, 0, 3);
-       be_sid[0] = sess->s_id.b.domain;
-       be_sid[1] = sess->s_id.b.area;
-       be_sid[2] = sess->s_id.b.al_pa;
-
-       spin_lock_irqsave(&ha->hardware_lock, flags);
-       tcm_qla2xxx_set_sess_by_s_id(lport, NULL, nacl, se_sess,
-                       sess, be_sid);
-       tcm_qla2xxx_set_sess_by_loop_id(lport, NULL, nacl, se_sess,
-                       sess, sess->loop_id);
-       spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
        transport_deregister_session_configfs(sess->se_sess);
        transport_deregister_session(sess->se_sess);
@@ -1731,6 +1693,7 @@ static struct target_core_fabric_ops tcm_qla2xxx_ops = {
        .new_cmd_map                    = NULL,
        .check_stop_free                = tcm_qla2xxx_check_stop_free,
        .release_cmd                    = tcm_qla2xxx_release_cmd,
+       .put_session                    = tcm_qla2xxx_put_session,
        .shutdown_session               = tcm_qla2xxx_shutdown_session,
        .close_session                  = tcm_qla2xxx_close_session,
        .sess_get_index                 = tcm_qla2xxx_sess_get_index,
@@ -1779,6 +1742,7 @@ static struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = {
        .tpg_release_fabric_acl         = tcm_qla2xxx_release_fabric_acl,
        .tpg_get_inst_index             = tcm_qla2xxx_tpg_get_inst_index,
        .release_cmd                    = tcm_qla2xxx_release_cmd,
+       .put_session                    = tcm_qla2xxx_put_session,
        .shutdown_session               = tcm_qla2xxx_shutdown_session,
        .close_session                  = tcm_qla2xxx_close_session,
        .sess_get_index                 = tcm_qla2xxx_sess_get_index,
index 61c82a345f8275ea0ece24bcf0bcc447becf97b3..bbbc9c918d4cc515f15d3b7edce9fa2359f3594a 100644 (file)
@@ -90,11 +90,9 @@ unsigned int scsi_logging_level;
 EXPORT_SYMBOL(scsi_logging_level);
 #endif
 
-#if IS_ENABLED(CONFIG_PM) || IS_ENABLED(CONFIG_BLK_DEV_SD)
-/* sd and scsi_pm need to coordinate flushing async actions */
+/* sd, scsi core and power management need to coordinate flushing async actions */
 LIST_HEAD(scsi_sd_probe_domain);
 EXPORT_SYMBOL(scsi_sd_probe_domain);
-#endif
 
 /* NB: These are exposed through /proc/scsi/scsi and form part of the ABI.
  * You may not alter any existing entry (although adding new ones is
index ae781487461829ae190f41f52d6f3e267e1fa5bb..0727345388766d22cfbbc9441c92e291a5f8c841 100644 (file)
@@ -22,11 +22,6 @@ static int __init wait_scan_init(void)
         * and might not yet have reached the scsi async scanning
         */
        wait_for_device_probe();
-       /*
-        * and then we wait for the actual asynchronous scsi scan
-        * to finish.
-        */
-       scsi_complete_async_scans();
        return 0;
 }
 
index 6f0a4c612b3bf0f60a952d080033f98f1b2b4cff..6f72b80121a02afdc9dda6fc82530af4e16017d9 100644 (file)
@@ -1899,6 +1899,8 @@ static int sd_try_rc16_first(struct scsi_device *sdp)
 {
        if (sdp->host->max_cmd_len < 16)
                return 0;
+       if (sdp->try_rc_10_first)
+               return 0;
        if (sdp->scsi_level > SCSI_SPC_2)
                return 1;
        if (scsi_device_protection(sdp))
index 46ef5fe51db5476b797bffe719fed5bbfccca395..0c73dd4f43a0d0a7e727e1f92a0929095becd392 100644 (file)
@@ -801,7 +801,7 @@ static int omap2_mcspi_setup(struct spi_device *spi)
        mcspi_dma = &mcspi->dma_channels[spi->chip_select];
 
        if (!cs) {
-               cs = devm_kzalloc(&spi->dev , sizeof *cs, GFP_KERNEL);
+               cs = kzalloc(sizeof *cs, GFP_KERNEL);
                if (!cs)
                        return -ENOMEM;
                cs->base = mcspi->base + spi->chip_select * 0x14;
@@ -842,6 +842,7 @@ static void omap2_mcspi_cleanup(struct spi_device *spi)
                cs = spi->controller_state;
                list_del(&cs->node);
 
+               kfree(cs);
        }
 
        if (spi->chip_select < spi->master->num_chipselect) {
index 1c3d6386ea36a918d71a1dbed9c3c07cbafaa18a..aeac1caba3f9918b2f574afc4189bd05743f403c 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/pci.h>
 #include <linux/usb.h>
 #include <linux/errno.h>
+#include <linux/kconfig.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
 #include <linux/fcntl.h>
@@ -981,6 +982,8 @@ void comedi_pci_driver_unregister(struct comedi_driver *comedi_driver,
 }
 EXPORT_SYMBOL_GPL(comedi_pci_driver_unregister);
 
+#if IS_ENABLED(CONFIG_USB)
+
 static int comedi_old_usb_auto_config(struct usb_interface *intf,
                                      struct comedi_driver *driver)
 {
@@ -1043,3 +1046,5 @@ void comedi_usb_driver_unregister(struct comedi_driver *comedi_driver,
        comedi_driver_unregister(comedi_driver);
 }
 EXPORT_SYMBOL_GPL(comedi_usb_driver_unregister);
+
+#endif
index 292af0f7f4511668b03e7cc2662f85093549612f..51665132c61b8057e6158d63ee5a595504308225 100644 (file)
@@ -104,7 +104,7 @@ struct sock *netlink_init(int unit, void (*cb)(struct net_device *dev, u16 type,
 
 void netlink_exit(struct sock *sock)
 {
-       sock_release(sock->sk_socket);
+       netlink_kernel_release(sock);
 }
 
 int netlink_send(struct sock *sock, int group, u16 type, void *msg, int len)
index 0338c7cd0a8b4a7d5f1ead8a4c766c981c59cf07..f03fbd3bb4547618ced53e342ee9d546052fed9f 100644 (file)
@@ -29,8 +29,6 @@ Then fill in the following:
        * info->driver_module:
                Set to THIS_MODULE. Used to ensure correct ownership
                of various resources allocate by the core.
-       * info->num_interrupt_lines:
-               Number of event triggering hardware lines the device has.
        * info->event_attrs:
                Attributes used to enable / disable hardware events.
        * info->attrs:
index 2490dd25093b43cdc889de09d0bf264e81369ed8..8f1b3af02f299b869f7aeffa629ccdfc3cfb1c19 100644 (file)
@@ -13,6 +13,7 @@ config AD7291
 config AD7298
        tristate "Analog Devices AD7298 ADC driver"
        depends on SPI
+       select IIO_KFIFO_BUF if IIO_BUFFER
        help
          Say yes here to build support for Analog Devices AD7298
          8 Channel ADC with temperature sensor.
index 10ab6dc823b911453275dbce860b68ee4ef957c0..a13afff2dfe6d219264096bcfc1f51d8f8b7e009 100644 (file)
@@ -235,7 +235,8 @@ static const struct attribute_group ad7606_attribute_group_range = {
                .indexed = 1,                                   \
                .channel = num,                                 \
                .address = num,                                 \
-               .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT,    \
+               .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |   \
+                               IIO_CHAN_INFO_SCALE_SHARED_BIT, \
                .scan_index = num,                              \
                .scan_type = IIO_ST('s', 16, 16, 0),            \
        }
index 3295ea63f3eb5d53bcafabc39a372846ba0610b3..97ef67036e3f2befa84f148b45b471df36d22650 100644 (file)
@@ -129,6 +129,7 @@ static void send_space_homebrew(long length);
 
 static struct lirc_serial hardware[] = {
        [LIRC_HOMEBREW] = {
+               .lock = __SPIN_LOCK_UNLOCKED(hardware[LIRC_HOMEBREW].lock),
                .signal_pin        = UART_MSR_DCD,
                .signal_pin_change = UART_MSR_DDCD,
                .on  = (UART_MCR_RTS | UART_MCR_OUT2 | UART_MCR_DTR),
@@ -145,6 +146,7 @@ static struct lirc_serial hardware[] = {
        },
 
        [LIRC_IRDEO] = {
+               .lock = __SPIN_LOCK_UNLOCKED(hardware[LIRC_IRDEO].lock),
                .signal_pin        = UART_MSR_DSR,
                .signal_pin_change = UART_MSR_DDSR,
                .on  = UART_MCR_OUT2,
@@ -156,6 +158,7 @@ static struct lirc_serial hardware[] = {
        },
 
        [LIRC_IRDEO_REMOTE] = {
+               .lock = __SPIN_LOCK_UNLOCKED(hardware[LIRC_IRDEO_REMOTE].lock),
                .signal_pin        = UART_MSR_DSR,
                .signal_pin_change = UART_MSR_DDSR,
                .on  = (UART_MCR_RTS | UART_MCR_DTR | UART_MCR_OUT2),
@@ -167,6 +170,7 @@ static struct lirc_serial hardware[] = {
        },
 
        [LIRC_ANIMAX] = {
+               .lock = __SPIN_LOCK_UNLOCKED(hardware[LIRC_ANIMAX].lock),
                .signal_pin        = UART_MSR_DCD,
                .signal_pin_change = UART_MSR_DDCD,
                .on  = 0,
@@ -177,6 +181,7 @@ static struct lirc_serial hardware[] = {
        },
 
        [LIRC_IGOR] = {
+               .lock = __SPIN_LOCK_UNLOCKED(hardware[LIRC_IGOR].lock),
                .signal_pin        = UART_MSR_DSR,
                .signal_pin_change = UART_MSR_DDSR,
                .on  = (UART_MCR_RTS | UART_MCR_OUT2 | UART_MCR_DTR),
@@ -201,6 +206,7 @@ static struct lirc_serial hardware[] = {
         * See also http://www.nslu2-linux.org for this device
         */
        [LIRC_NSLU2] = {
+               .lock = __SPIN_LOCK_UNLOCKED(hardware[LIRC_NSLU2].lock),
                .signal_pin        = UART_MSR_CTS,
                .signal_pin_change = UART_MSR_DCTS,
                .on  = (UART_MCR_RTS | UART_MCR_OUT2 | UART_MCR_DTR),
index 11acd4c35ed2c0d043cb8b45a12e5a36ca87460e..8c6ed3b0c6f6c9e63c15d0c42ca1985878a2c270 100644 (file)
@@ -208,7 +208,8 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
         */
        ret = omap_gem_get_paddr(fbdev->bo, &paddr, true);
        if (ret) {
-               dev_err(dev->dev, "could not map (paddr)!\n");
+               dev_err(dev->dev,
+                       "could not map (paddr)!  Skipping framebuffer alloc\n");
                ret = -ENOMEM;
                goto fail;
        }
@@ -388,8 +389,11 @@ void omap_fbdev_free(struct drm_device *dev)
 
        fbi = helper->fbdev;
 
-       unregister_framebuffer(fbi);
-       framebuffer_release(fbi);
+       /* only cleanup framebuffer if it is present */
+       if (fbi) {
+               unregister_framebuffer(fbi);
+               framebuffer_release(fbi);
+       }
 
        drm_fb_helper_fini(helper);
 
index 4e7ef0e6b79c0f13bbb0c05d360a368ea9cb3cd9..d46764b5aaba07513dbf399be27a032b6eb1b702 100644 (file)
@@ -3002,7 +3002,7 @@ static inline struct tmem_oid oswiz(unsigned type, u32 ind)
        return oid;
 }
 
-static int zcache_frontswap_put_page(unsigned type, pgoff_t offset,
+static int zcache_frontswap_store(unsigned type, pgoff_t offset,
                                   struct page *page)
 {
        u64 ind64 = (u64)offset;
@@ -3025,7 +3025,7 @@ static int zcache_frontswap_put_page(unsigned type, pgoff_t offset,
 
 /* returns 0 if the page was successfully gotten from frontswap, -1 if
  * was not present (should never happen!) */
-static int zcache_frontswap_get_page(unsigned type, pgoff_t offset,
+static int zcache_frontswap_load(unsigned type, pgoff_t offset,
                                   struct page *page)
 {
        u64 ind64 = (u64)offset;
@@ -3080,8 +3080,8 @@ static void zcache_frontswap_init(unsigned ignored)
 }
 
 static struct frontswap_ops zcache_frontswap_ops = {
-       .put_page = zcache_frontswap_put_page,
-       .get_page = zcache_frontswap_get_page,
+       .store = zcache_frontswap_store,
+       .load = zcache_frontswap_load,
        .invalidate_page = zcache_frontswap_flush_page,
        .invalidate_area = zcache_frontswap_flush_area,
        .init = zcache_frontswap_init
index 9bd18e2d05130dcbab81c55cd4853c68521870bf..69f616c6964ecd752c23f2f5918d03abee2cd7c9 100644 (file)
@@ -102,6 +102,8 @@ static struct usb_device_id rtl871x_usb_id_tbl[] = {
        /* - */
        {USB_DEVICE(0x20F4, 0x646B)},
        {USB_DEVICE(0x083A, 0xC512)},
+       {USB_DEVICE(0x25D4, 0x4CA1)},
+       {USB_DEVICE(0x25D4, 0x4CAB)},
 
 /* RTL8191SU */
        /* Realtek */
index 2734dacacbaf3d19e29a4399f4fb4e50dc6d86d4..784c796b9848a12b6167150d3a94d20a086e68b8 100644 (file)
@@ -1835,7 +1835,7 @@ static int zcache_frontswap_poolid = -1;
  * Swizzling increases objects per swaptype, increasing tmem concurrency
  * for heavy swaploads.  Later, larger nr_cpus -> larger SWIZ_BITS
  * Setting SWIZ_BITS to 27 basically reconstructs the swap entry from
- * frontswap_get_page(), but has side-effects. Hence using 8.
+ * frontswap_load(), but has side-effects. Hence using 8.
  */
 #define SWIZ_BITS              8
 #define SWIZ_MASK              ((1 << SWIZ_BITS) - 1)
@@ -1849,7 +1849,7 @@ static inline struct tmem_oid oswiz(unsigned type, u32 ind)
        return oid;
 }
 
-static int zcache_frontswap_put_page(unsigned type, pgoff_t offset,
+static int zcache_frontswap_store(unsigned type, pgoff_t offset,
                                   struct page *page)
 {
        u64 ind64 = (u64)offset;
@@ -1870,7 +1870,7 @@ static int zcache_frontswap_put_page(unsigned type, pgoff_t offset,
 
 /* returns 0 if the page was successfully gotten from frontswap, -1 if
  * was not present (should never happen!) */
-static int zcache_frontswap_get_page(unsigned type, pgoff_t offset,
+static int zcache_frontswap_load(unsigned type, pgoff_t offset,
                                   struct page *page)
 {
        u64 ind64 = (u64)offset;
@@ -1919,8 +1919,8 @@ static void zcache_frontswap_init(unsigned ignored)
 }
 
 static struct frontswap_ops zcache_frontswap_ops = {
-       .put_page = zcache_frontswap_put_page,
-       .get_page = zcache_frontswap_get_page,
+       .store = zcache_frontswap_store,
+       .load = zcache_frontswap_load,
        .invalidate_page = zcache_frontswap_flush_page,
        .invalidate_area = zcache_frontswap_flush_area,
        .init = zcache_frontswap_init
index 37c609898f84b53ffdd5b959bd9b3fa663739247..7e6136e2ce81dc256babc191fab05953aabf6bfc 100644 (file)
@@ -587,14 +587,14 @@ static void sbp_management_request_logout(
 {
        struct sbp_tport *tport = agent->tport;
        struct sbp_tpg *tpg = tport->tpg;
-       int login_id;
+       int id;
        struct sbp_login_descriptor *login;
 
-       login_id = LOGOUT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc));
+       id = LOGOUT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc));
 
-       login = sbp_login_find_by_id(tpg, login_id);
+       login = sbp_login_find_by_id(tpg, id);
        if (!login) {
-               pr_warn("cannot find login: %d\n", login_id);
+               pr_warn("cannot find login: %d\n", id);
 
                req->status.status = cpu_to_be32(
                        STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
index e624b836469cdc48af94a51be106df42a85ab63f..91799973081a3d907cd260792df3f573d1dbec82 100644 (file)
@@ -374,8 +374,9 @@ int target_emulate_set_target_port_groups(struct se_cmd *cmd)
 
 out:
        transport_kunmap_data_sg(cmd);
-       target_complete_cmd(cmd, GOOD);
-       return 0;
+       if (!rc)
+               target_complete_cmd(cmd, GOOD);
+       return rc;
 }
 
 static inline int core_alua_state_nonoptimized(
index 9888693a18fe0c7a024ef150483097a12e19fcc4..664f6e775d0e45e1f4baac3f1c68f724f8185fc4 100644 (file)
@@ -1095,7 +1095,7 @@ int target_emulate_write_same(struct se_cmd *cmd)
        if (num_blocks != 0)
                range = num_blocks;
        else
-               range = (dev->transport->get_blocks(dev) - lba);
+               range = (dev->transport->get_blocks(dev) - lba) + 1;
 
        pr_debug("WRITE_SAME UNMAP: LBA: %llu Range: %llu\n",
                 (unsigned long long)lba, (unsigned long long)range);
index 686dba189f8eba7fac5bd9ab1d13e6833ec1e4b4..9f99d0404908fd04fe4a68e30c00b3e015f48879 100644 (file)
@@ -133,16 +133,11 @@ static struct se_device *fd_create_virtdevice(
                ret = PTR_ERR(dev_p);
                goto fail;
        }
-
-       /* O_DIRECT too? */
-       flags = O_RDWR | O_CREAT | O_LARGEFILE;
-
        /*
-        * If fd_buffered_io=1 has not been set explicitly (the default),
-        * use O_SYNC to force FILEIO writes to disk.
+        * Use O_DSYNC by default instead of O_SYNC to forgo syncing
+        * of pure timestamp updates.
         */
-       if (!(fd_dev->fbd_flags & FDBD_USE_BUFFERED_IO))
-               flags |= O_SYNC;
+       flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC;
 
        file = filp_open(dev_p, flags, 0600);
        if (IS_ERR(file)) {
@@ -380,23 +375,6 @@ static void fd_emulate_sync_cache(struct se_cmd *cmd)
        }
 }
 
-static void fd_emulate_write_fua(struct se_cmd *cmd)
-{
-       struct se_device *dev = cmd->se_dev;
-       struct fd_dev *fd_dev = dev->dev_ptr;
-       loff_t start = cmd->t_task_lba *
-               dev->se_sub_dev->se_dev_attrib.block_size;
-       loff_t end = start + cmd->data_length;
-       int ret;
-
-       pr_debug("FILEIO: FUA WRITE LBA: %llu, bytes: %u\n",
-               cmd->t_task_lba, cmd->data_length);
-
-       ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1);
-       if (ret != 0)
-               pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret);
-}
-
 static int fd_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
                u32 sgl_nents, enum dma_data_direction data_direction)
 {
@@ -411,19 +389,21 @@ static int fd_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
                ret = fd_do_readv(cmd, sgl, sgl_nents);
        } else {
                ret = fd_do_writev(cmd, sgl, sgl_nents);
-
+               /*
+                * Perform implict vfs_fsync_range() for fd_do_writev() ops
+                * for SCSI WRITEs with Forced Unit Access (FUA) set.
+                * Allow this to happen independent of WCE=0 setting.
+                */
                if (ret > 0 &&
-                   dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0 &&
                    dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
                    (cmd->se_cmd_flags & SCF_FUA)) {
-                       /*
-                        * We might need to be a bit smarter here
-                        * and return some sense data to let the initiator
-                        * know the FUA WRITE cache sync failed..?
-                        */
-                       fd_emulate_write_fua(cmd);
-               }
+                       struct fd_dev *fd_dev = dev->dev_ptr;
+                       loff_t start = cmd->t_task_lba *
+                               dev->se_sub_dev->se_dev_attrib.block_size;
+                       loff_t end = start + cmd->data_length;
 
+                       vfs_fsync_range(fd_dev->fd_file, start, end, 1);
+               }
        }
 
        if (ret < 0) {
@@ -442,7 +422,6 @@ enum {
 static match_table_t tokens = {
        {Opt_fd_dev_name, "fd_dev_name=%s"},
        {Opt_fd_dev_size, "fd_dev_size=%s"},
-       {Opt_fd_buffered_io, "fd_buffered_io=%d"},
        {Opt_err, NULL}
 };
 
@@ -454,7 +433,7 @@ static ssize_t fd_set_configfs_dev_params(
        struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
        char *orig, *ptr, *arg_p, *opts;
        substring_t args[MAX_OPT_ARGS];
-       int ret = 0, arg, token;
+       int ret = 0, token;
 
        opts = kstrdup(page, GFP_KERNEL);
        if (!opts)
@@ -498,19 +477,6 @@ static ssize_t fd_set_configfs_dev_params(
                                        " bytes\n", fd_dev->fd_dev_size);
                        fd_dev->fbd_flags |= FBDF_HAS_SIZE;
                        break;
-               case Opt_fd_buffered_io:
-                       match_int(args, &arg);
-                       if (arg != 1) {
-                               pr_err("bogus fd_buffered_io=%d value\n", arg);
-                               ret = -EINVAL;
-                               goto out;
-                       }
-
-                       pr_debug("FILEIO: Using buffered I/O"
-                               " operations for struct fd_dev\n");
-
-                       fd_dev->fbd_flags |= FDBD_USE_BUFFERED_IO;
-                       break;
                default:
                        break;
                }
@@ -542,10 +508,8 @@ static ssize_t fd_show_configfs_dev_params(
        ssize_t bl = 0;
 
        bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id);
-       bl += sprintf(b + bl, "        File: %s  Size: %llu  Mode: %s\n",
-               fd_dev->fd_dev_name, fd_dev->fd_dev_size,
-               (fd_dev->fbd_flags & FDBD_USE_BUFFERED_IO) ?
-               "Buffered" : "Synchronous");
+       bl += sprintf(b + bl, "        File: %s  Size: %llu  Mode: O_DSYNC\n",
+               fd_dev->fd_dev_name, fd_dev->fd_dev_size);
        return bl;
 }
 
index fbd59ef7d8be4e7fec85229634cfb8244ce84b40..70ce7fd7111dd81da3cd1b4d89f5eff822889c8a 100644 (file)
@@ -14,7 +14,6 @@
 
 #define FBDF_HAS_PATH          0x01
 #define FBDF_HAS_SIZE          0x02
-#define FDBD_USE_BUFFERED_IO   0x04
 
 struct fd_dev {
        u32             fbd_flags;
index 85564998500a5bc1330fefbd0860a5425fda5939..a1bcd927a9e60ed6a80903ff737c661c075b675b 100644 (file)
@@ -2031,7 +2031,7 @@ static int __core_scsi3_write_aptpl_to_file(
        if (IS_ERR(file) || !file || !file->f_dentry) {
                pr_err("filp_open(%s) for APTPL metadata"
                        " failed\n", path);
-               return (PTR_ERR(file) < 0 ? PTR_ERR(file) : -ENOENT);
+               return IS_ERR(file) ? PTR_ERR(file) : -ENOENT;
        }
 
        iov[0].iov_base = &buf[0];
@@ -3818,7 +3818,7 @@ int target_scsi3_emulate_pr_out(struct se_cmd *cmd)
                        " SPC-2 reservation is held, returning"
                        " RESERVATION_CONFLICT\n");
                cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
-               ret = EINVAL;
+               ret = -EINVAL;
                goto out;
        }
 
@@ -3828,7 +3828,8 @@ int target_scsi3_emulate_pr_out(struct se_cmd *cmd)
         */
        if (!cmd->se_sess) {
                cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-               return -EINVAL;
+               ret = -EINVAL;
+               goto out;
        }
 
        if (cmd->data_length < 24) {
index b05fdc0c05d33df0560d03222f332fa40b249864..634d0f31a28cc59f621ea6122a13777c320339d5 100644 (file)
@@ -315,7 +315,7 @@ void transport_register_session(
 }
 EXPORT_SYMBOL(transport_register_session);
 
-static void target_release_session(struct kref *kref)
+void target_release_session(struct kref *kref)
 {
        struct se_session *se_sess = container_of(kref,
                        struct se_session, sess_kref);
@@ -332,6 +332,12 @@ EXPORT_SYMBOL(target_get_session);
 
 void target_put_session(struct se_session *se_sess)
 {
+       struct se_portal_group *tpg = se_sess->se_tpg;
+
+       if (tpg->se_tpg_tfo->put_session != NULL) {
+               tpg->se_tpg_tfo->put_session(se_sess);
+               return;
+       }
        kref_put(&se_sess->sess_kref, target_release_session);
 }
 EXPORT_SYMBOL(target_put_session);
index f03fb9730f5bb89a0cd003817544693e3600e383..5b65f33939a84ab6eef4a12cc0fe84b61d64e96c 100644 (file)
@@ -230,6 +230,8 @@ u32 ft_get_task_tag(struct se_cmd *se_cmd)
 {
        struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
 
+       if (cmd->aborted)
+               return ~0;
        return fc_seq_exch(cmd->seq)->rxid;
 }
 
index cb99da920068986d2c2153f6af507781561074d5..87901fa74dd7e1acb136cbdf0607aa4b8eeeb652 100644 (file)
@@ -58,7 +58,8 @@ static struct ft_tport *ft_tport_create(struct fc_lport *lport)
        struct ft_tport *tport;
        int i;
 
-       tport = rcu_dereference(lport->prov[FC_TYPE_FCP]);
+       tport = rcu_dereference_protected(lport->prov[FC_TYPE_FCP],
+                                         lockdep_is_held(&ft_lport_lock));
        if (tport && tport->tpg)
                return tport;
 
index 514a691abea0e2bf7ed517c7651f61474e82b293..3ab2bd540b547814d80d1e090bf171867c9d7dde 100644 (file)
@@ -23,6 +23,7 @@ config SPEAR_THERMAL
        bool "SPEAr thermal sensor driver"
        depends on THERMAL
        depends on PLAT_SPEAR
+       depends on OF
        help
          Enable this to plug the SPEAr thermal sensor driver into the Linux
          thermal framework
index c2e32df3b164e852137a2c720578899ac8f46b22..5f8ee39f200027c729879e31f4039ae8cd54fd38 100644 (file)
@@ -20,9 +20,9 @@
 #include <linux/err.h>
 #include <linux/io.h>
 #include <linux/kernel.h>
+#include <linux/of.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
-#include <linux/platform_data/spear_thermal.h>
 #include <linux/thermal.h>
 
 #define MD_FACTOR      1000
@@ -103,21 +103,20 @@ static int spear_thermal_probe(struct platform_device *pdev)
 {
        struct thermal_zone_device *spear_thermal = NULL;
        struct spear_thermal_dev *stdev;
-       struct spear_thermal_pdata *pdata;
-       int ret = 0;
+       struct device_node *np = pdev->dev.of_node;
        struct resource *stres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       int ret = 0, val;
+
+       if (!np || !of_property_read_u32(np, "st,thermal-flags", &val)) {
+               dev_err(&pdev->dev, "Failed: DT Pdata not passed\n");
+               return -EINVAL;
+       }
 
        if (!stres) {
                dev_err(&pdev->dev, "memory resource missing\n");
                return -ENODEV;
        }
 
-       pdata = dev_get_platdata(&pdev->dev);
-       if (!pdata) {
-               dev_err(&pdev->dev, "platform data is NULL\n");
-               return -EINVAL;
-       }
-
        stdev = devm_kzalloc(&pdev->dev, sizeof(*stdev), GFP_KERNEL);
        if (!stdev) {
                dev_err(&pdev->dev, "kzalloc fail\n");
@@ -144,10 +143,10 @@ static int spear_thermal_probe(struct platform_device *pdev)
                goto put_clk;
        }
 
-       stdev->flags = pdata->thermal_flags;
+       stdev->flags = val;
        writel_relaxed(stdev->flags, stdev->thermal_base);
 
-       spear_thermal = thermal_zone_device_register("spear_thermal", 0,
+       spear_thermal = thermal_zone_device_register("spear_thermal", 0, 0,
                                stdev, &ops, 0, 0, 0, 0);
        if (IS_ERR(spear_thermal)) {
                dev_err(&pdev->dev, "thermal zone device is NULL\n");
@@ -189,6 +188,12 @@ static int spear_thermal_exit(struct platform_device *pdev)
        return 0;
 }
 
+static const struct of_device_id spear_thermal_id_table[] = {
+       { .compatible = "st,thermal-spear1340" },
+       {}
+};
+MODULE_DEVICE_TABLE(of, spear_thermal_id_table);
+
 static struct platform_driver spear_thermal_driver = {
        .probe = spear_thermal_probe,
        .remove = spear_thermal_exit,
@@ -196,6 +201,7 @@ static struct platform_driver spear_thermal_driver = {
                .name = "spear_thermal",
                .owner = THIS_MODULE,
                .pm = &spear_thermal_pm_ops,
+               .of_match_table = of_match_ptr(spear_thermal_id_table),
        },
 };
 
index 022bacb71a7ede51571a6976830859d3f674bb71..2d7a9fe8f365b64670dca63b98f925e7b0a15e73 100644 (file)
@@ -195,6 +195,28 @@ trip_point_type_show(struct device *dev, struct device_attribute *attr,
        }
 }
 
+static ssize_t
+trip_point_temp_store(struct device *dev, struct device_attribute *attr,
+                    const char *buf, size_t count)
+{
+       struct thermal_zone_device *tz = to_thermal_zone(dev);
+       int trip, ret;
+       unsigned long temperature;
+
+       if (!tz->ops->set_trip_temp)
+               return -EPERM;
+
+       if (!sscanf(attr->attr.name, "trip_point_%d_temp", &trip))
+               return -EINVAL;
+
+       if (kstrtoul(buf, 10, &temperature))
+               return -EINVAL;
+
+       ret = tz->ops->set_trip_temp(tz, trip, temperature);
+
+       return ret ? ret : count;
+}
+
 static ssize_t
 trip_point_temp_show(struct device *dev, struct device_attribute *attr,
                     char *buf)
@@ -217,6 +239,52 @@ trip_point_temp_show(struct device *dev, struct device_attribute *attr,
        return sprintf(buf, "%ld\n", temperature);
 }
 
+static ssize_t
+trip_point_hyst_store(struct device *dev, struct device_attribute *attr,
+                       const char *buf, size_t count)
+{
+       struct thermal_zone_device *tz = to_thermal_zone(dev);
+       int trip, ret;
+       unsigned long temperature;
+
+       if (!tz->ops->set_trip_hyst)
+               return -EPERM;
+
+       if (!sscanf(attr->attr.name, "trip_point_%d_hyst", &trip))
+               return -EINVAL;
+
+       if (kstrtoul(buf, 10, &temperature))
+               return -EINVAL;
+
+       /*
+        * We are not doing any check on the 'temperature' value
+        * here. The driver implementing 'set_trip_hyst' has to
+        * take care of this.
+        */
+       ret = tz->ops->set_trip_hyst(tz, trip, temperature);
+
+       return ret ? ret : count;
+}
+
+static ssize_t
+trip_point_hyst_show(struct device *dev, struct device_attribute *attr,
+                       char *buf)
+{
+       struct thermal_zone_device *tz = to_thermal_zone(dev);
+       int trip, ret;
+       unsigned long temperature;
+
+       if (!tz->ops->get_trip_hyst)
+               return -EPERM;
+
+       if (!sscanf(attr->attr.name, "trip_point_%d_hyst", &trip))
+               return -EINVAL;
+
+       ret = tz->ops->get_trip_hyst(tz, trip, &temperature);
+
+       return ret ? ret : sprintf(buf, "%ld\n", temperature);
+}
+
 static ssize_t
 passive_store(struct device *dev, struct device_attribute *attr,
                    const char *buf, size_t count)
@@ -283,33 +351,6 @@ static DEVICE_ATTR(temp, 0444, temp_show, NULL);
 static DEVICE_ATTR(mode, 0644, mode_show, mode_store);
 static DEVICE_ATTR(passive, S_IRUGO | S_IWUSR, passive_show, passive_store);
 
-static struct device_attribute trip_point_attrs[] = {
-       __ATTR(trip_point_0_type, 0444, trip_point_type_show, NULL),
-       __ATTR(trip_point_0_temp, 0444, trip_point_temp_show, NULL),
-       __ATTR(trip_point_1_type, 0444, trip_point_type_show, NULL),
-       __ATTR(trip_point_1_temp, 0444, trip_point_temp_show, NULL),
-       __ATTR(trip_point_2_type, 0444, trip_point_type_show, NULL),
-       __ATTR(trip_point_2_temp, 0444, trip_point_temp_show, NULL),
-       __ATTR(trip_point_3_type, 0444, trip_point_type_show, NULL),
-       __ATTR(trip_point_3_temp, 0444, trip_point_temp_show, NULL),
-       __ATTR(trip_point_4_type, 0444, trip_point_type_show, NULL),
-       __ATTR(trip_point_4_temp, 0444, trip_point_temp_show, NULL),
-       __ATTR(trip_point_5_type, 0444, trip_point_type_show, NULL),
-       __ATTR(trip_point_5_temp, 0444, trip_point_temp_show, NULL),
-       __ATTR(trip_point_6_type, 0444, trip_point_type_show, NULL),
-       __ATTR(trip_point_6_temp, 0444, trip_point_temp_show, NULL),
-       __ATTR(trip_point_7_type, 0444, trip_point_type_show, NULL),
-       __ATTR(trip_point_7_temp, 0444, trip_point_temp_show, NULL),
-       __ATTR(trip_point_8_type, 0444, trip_point_type_show, NULL),
-       __ATTR(trip_point_8_temp, 0444, trip_point_temp_show, NULL),
-       __ATTR(trip_point_9_type, 0444, trip_point_type_show, NULL),
-       __ATTR(trip_point_9_temp, 0444, trip_point_temp_show, NULL),
-       __ATTR(trip_point_10_type, 0444, trip_point_type_show, NULL),
-       __ATTR(trip_point_10_temp, 0444, trip_point_temp_show, NULL),
-       __ATTR(trip_point_11_type, 0444, trip_point_type_show, NULL),
-       __ATTR(trip_point_11_temp, 0444, trip_point_temp_show, NULL),
-};
-
 /* sys I/F for cooling device */
 #define to_cooling_device(_dev)        \
        container_of(_dev, struct thermal_cooling_device, device)
@@ -1088,10 +1129,114 @@ leave:
 }
 EXPORT_SYMBOL(thermal_zone_device_update);
 
+/**
+ * create_trip_attrs - create attributes for trip points
+ * @tz:                the thermal zone device
+ * @mask:      Writeable trip point bitmap.
+ */
+static int create_trip_attrs(struct thermal_zone_device *tz, int mask)
+{
+       int indx;
+       int size = sizeof(struct thermal_attr) * tz->trips;
+
+       tz->trip_type_attrs = kzalloc(size, GFP_KERNEL);
+       if (!tz->trip_type_attrs)
+               return -ENOMEM;
+
+       tz->trip_temp_attrs = kzalloc(size, GFP_KERNEL);
+       if (!tz->trip_temp_attrs) {
+               kfree(tz->trip_type_attrs);
+               return -ENOMEM;
+       }
+
+       if (tz->ops->get_trip_hyst) {
+               tz->trip_hyst_attrs = kzalloc(size, GFP_KERNEL);
+               if (!tz->trip_hyst_attrs) {
+                       kfree(tz->trip_type_attrs);
+                       kfree(tz->trip_temp_attrs);
+                       return -ENOMEM;
+               }
+       }
+
+
+       for (indx = 0; indx < tz->trips; indx++) {
+               /* create trip type attribute */
+               snprintf(tz->trip_type_attrs[indx].name, THERMAL_NAME_LENGTH,
+                        "trip_point_%d_type", indx);
+
+               sysfs_attr_init(&tz->trip_type_attrs[indx].attr.attr);
+               tz->trip_type_attrs[indx].attr.attr.name =
+                                               tz->trip_type_attrs[indx].name;
+               tz->trip_type_attrs[indx].attr.attr.mode = S_IRUGO;
+               tz->trip_type_attrs[indx].attr.show = trip_point_type_show;
+
+               device_create_file(&tz->device,
+                                  &tz->trip_type_attrs[indx].attr);
+
+               /* create trip temp attribute */
+               snprintf(tz->trip_temp_attrs[indx].name, THERMAL_NAME_LENGTH,
+                        "trip_point_%d_temp", indx);
+
+               sysfs_attr_init(&tz->trip_temp_attrs[indx].attr.attr);
+               tz->trip_temp_attrs[indx].attr.attr.name =
+                                               tz->trip_temp_attrs[indx].name;
+               tz->trip_temp_attrs[indx].attr.attr.mode = S_IRUGO;
+               tz->trip_temp_attrs[indx].attr.show = trip_point_temp_show;
+               if (mask & (1 << indx)) {
+                       tz->trip_temp_attrs[indx].attr.attr.mode |= S_IWUSR;
+                       tz->trip_temp_attrs[indx].attr.store =
+                                                       trip_point_temp_store;
+               }
+
+               device_create_file(&tz->device,
+                                  &tz->trip_temp_attrs[indx].attr);
+
+               /* create Optional trip hyst attribute */
+               if (!tz->ops->get_trip_hyst)
+                       continue;
+               snprintf(tz->trip_hyst_attrs[indx].name, THERMAL_NAME_LENGTH,
+                        "trip_point_%d_hyst", indx);
+
+               sysfs_attr_init(&tz->trip_hyst_attrs[indx].attr.attr);
+               tz->trip_hyst_attrs[indx].attr.attr.name =
+                                       tz->trip_hyst_attrs[indx].name;
+               tz->trip_hyst_attrs[indx].attr.attr.mode = S_IRUGO;
+               tz->trip_hyst_attrs[indx].attr.show = trip_point_hyst_show;
+               if (tz->ops->set_trip_hyst) {
+                       tz->trip_hyst_attrs[indx].attr.attr.mode |= S_IWUSR;
+                       tz->trip_hyst_attrs[indx].attr.store =
+                                       trip_point_hyst_store;
+               }
+
+               device_create_file(&tz->device,
+                                  &tz->trip_hyst_attrs[indx].attr);
+       }
+       return 0;
+}
+
+static void remove_trip_attrs(struct thermal_zone_device *tz)
+{
+       int indx;
+
+       for (indx = 0; indx < tz->trips; indx++) {
+               device_remove_file(&tz->device,
+                                  &tz->trip_type_attrs[indx].attr);
+               device_remove_file(&tz->device,
+                                  &tz->trip_temp_attrs[indx].attr);
+               if (tz->ops->get_trip_hyst)
+                       device_remove_file(&tz->device,
+                                 &tz->trip_hyst_attrs[indx].attr);
+       }
+       kfree(tz->trip_type_attrs);
+       kfree(tz->trip_temp_attrs);
+       kfree(tz->trip_hyst_attrs);
+}
+
 /**
  * thermal_zone_device_register - register a new thermal zone device
  * @type:      the thermal zone device type
  * @trips:     the number of trip points the thermal zone support
+ * @mask:      a bit string indicating the writeablility of trip points
  * @devdata:   private device data
  * @ops:       standard thermal zone device callbacks
  * @tc1:       thermal coefficient 1 for passive calculations
@@ -1107,7 +1252,7 @@ EXPORT_SYMBOL(thermal_zone_device_update);
  * section 11.1.5.1 of the ACPI specification 3.0.
  */
 struct thermal_zone_device *thermal_zone_device_register(char *type,
-       int trips, void *devdata,
+       int trips, int mask, void *devdata,
        const struct thermal_zone_device_ops *ops,
        int tc1, int tc2, int passive_delay, int polling_delay)
 {
@@ -1121,7 +1266,7 @@ struct thermal_zone_device *thermal_zone_device_register(char *type,
        if (strlen(type) >= THERMAL_NAME_LENGTH)
                return ERR_PTR(-EINVAL);
 
-       if (trips > THERMAL_MAX_TRIPS || trips < 0)
+       if (trips > THERMAL_MAX_TRIPS || trips < 0 || mask >> trips)
                return ERR_PTR(-EINVAL);
 
        if (!ops || !ops->get_temp)
@@ -1175,15 +1320,11 @@ struct thermal_zone_device *thermal_zone_device_register(char *type,
                        goto unregister;
        }
 
+       result = create_trip_attrs(tz, mask);
+       if (result)
+               goto unregister;
+
        for (count = 0; count < trips; count++) {
-               result = device_create_file(&tz->device,
-                                           &trip_point_attrs[count * 2]);
-               if (result)
-                       break;
-               result = device_create_file(&tz->device,
-                                           &trip_point_attrs[count * 2 + 1]);
-               if (result)
-                       goto unregister;
                tz->ops->get_trip_type(tz, count, &trip_type);
                if (trip_type == THERMAL_TRIP_PASSIVE)
                        passive = 1;
@@ -1232,7 +1373,6 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz)
 {
        struct thermal_cooling_device *cdev;
        struct thermal_zone_device *pos = NULL;
-       int count;
 
        if (!tz)
                return;
@@ -1259,13 +1399,8 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz)
        device_remove_file(&tz->device, &dev_attr_temp);
        if (tz->ops->get_mode)
                device_remove_file(&tz->device, &dev_attr_mode);
+       remove_trip_attrs(tz);
 
-       for (count = 0; count < tz->trips; count++) {
-               device_remove_file(&tz->device,
-                                  &trip_point_attrs[count * 2]);
-               device_remove_file(&tz->device,
-                                  &trip_point_attrs[count * 2 + 1]);
-       }
        thermal_remove_hwmon_sysfs(tz);
        release_idr(&thermal_tz_idr, &thermal_idr_lock, tz->id);
        idr_destroy(&tz->idr);
index ced26c8ccd573eb8e6757a30681901b7a0ac88eb..0d2ea0c224c35c6012e0d6c1f7fe04ff337a8931 100644 (file)
@@ -401,7 +401,7 @@ out:
 }
 
 #ifdef CONFIG_PPC_EARLY_DEBUG_OPAL_RAW
-void __init udbg_init_debug_opal(void)
+void __init udbg_init_debug_opal_raw(void)
 {
        u32 index = CONFIG_PPC_EARLY_DEBUG_OPAL_VTERMNO;
        hvc_opal_privs[index] = &hvc_opal_boot_priv;
index d3d91dae065cfdceb3c6b4918c27ad6c252caf33..944eaeb8e0cff62b89ea735b0d98e167ddd806d0 100644 (file)
@@ -214,24 +214,24 @@ static int xen_hvm_console_init(void)
        /* already configured */
        if (info->intf != NULL)
                return 0;
-
+       /*
+        * If the toolstack (or the hypervisor) hasn't set these values, the
+        * default value is 0. Even though mfn = 0 and evtchn = 0 are
+        * theoretically correct values, in practice they never are and they
+        * mean that a legacy toolstack hasn't initialized the pv console correctly.
+        */
        r = hvm_get_parameter(HVM_PARAM_CONSOLE_EVTCHN, &v);
-       if (r < 0) {
-               kfree(info);
-               return -ENODEV;
-       }
+       if (r < 0 || v == 0)
+               goto err;
        info->evtchn = v;
-       hvm_get_parameter(HVM_PARAM_CONSOLE_PFN, &v);
-       if (r < 0) {
-               kfree(info);
-               return -ENODEV;
-       }
+       v = 0;
+       r = hvm_get_parameter(HVM_PARAM_CONSOLE_PFN, &v);
+       if (r < 0 || v == 0)
+               goto err;
        mfn = v;
        info->intf = ioremap(mfn << PAGE_SHIFT, PAGE_SIZE);
-       if (info->intf == NULL) {
-               kfree(info);
-               return -ENODEV;
-       }
+       if (info->intf == NULL)
+               goto err;
        info->vtermno = HVC_COOKIE;
 
        spin_lock(&xencons_lock);
@@ -239,6 +239,9 @@ static int xen_hvm_console_init(void)
        spin_unlock(&xencons_lock);
 
        return 0;
+err:
+       kfree(info);
+       return -ENODEV;
 }
 
 static int xen_pv_console_init(void)
index 47d061b9ad4d24d24ac980e783e9539cebc27f3d..6e1958a325bd8b5512675dd52d399b32bf04803e 100644 (file)
@@ -3113,7 +3113,7 @@ static struct uart_8250_port *serial8250_find_match_or_unused(struct uart_port *
 
 /**
  *     serial8250_register_8250_port - register a serial port
- *     @port: serial port template
+ *     @up: serial port template
  *
  *     Configure the serial port specified by the request. If the
  *     port exists and is in use, it is hung up and unregistered
index 4ad721fb84052a61c3452d12532bcc9d21c9a84a..c17923ec6e9505cee3f15c80670f63d9e0644916 100644 (file)
@@ -133,6 +133,10 @@ struct pl011_dmatx_data {
 struct uart_amba_port {
        struct uart_port        port;
        struct clk              *clk;
+       /* Two optional pin states - default & sleep */
+       struct pinctrl          *pinctrl;
+       struct pinctrl_state    *pins_default;
+       struct pinctrl_state    *pins_sleep;
        const struct vendor_data *vendor;
        unsigned int            dmacr;          /* dma control reg */
        unsigned int            im;             /* interrupt mask */
@@ -1312,6 +1316,14 @@ static int pl011_startup(struct uart_port *port)
        unsigned int cr;
        int retval;
 
+       /* Optionaly enable pins to be muxed in and configured */
+       if (!IS_ERR(uap->pins_default)) {
+               retval = pinctrl_select_state(uap->pinctrl, uap->pins_default);
+               if (retval)
+                       dev_err(port->dev,
+                               "could not set default pins\n");
+       }
+
        retval = clk_prepare(uap->clk);
        if (retval)
                goto out;
@@ -1420,6 +1432,7 @@ static void pl011_shutdown(struct uart_port *port)
 {
        struct uart_amba_port *uap = (struct uart_amba_port *)port;
        unsigned int cr;
+       int retval;
 
        /*
         * disable all interrupts
@@ -1462,6 +1475,14 @@ static void pl011_shutdown(struct uart_port *port)
         */
        clk_disable(uap->clk);
        clk_unprepare(uap->clk);
+       /* Optionally let pins go into sleep states */
+       if (!IS_ERR(uap->pins_sleep)) {
+               retval = pinctrl_select_state(uap->pinctrl, uap->pins_sleep);
+               if (retval)
+                       dev_err(port->dev,
+                               "could not set pins to sleep state\n");
+       }
+
 
        if (uap->port.dev->platform_data) {
                struct amba_pl011_data *plat;
@@ -1792,6 +1813,14 @@ static int __init pl011_console_setup(struct console *co, char *options)
        if (!uap)
                return -ENODEV;
 
+       /* Allow pins to be muxed in and configured */
+       if (!IS_ERR(uap->pins_default)) {
+               ret = pinctrl_select_state(uap->pinctrl, uap->pins_default);
+               if (ret)
+                       dev_err(uap->port.dev,
+                               "could not set default pins\n");
+       }
+
        ret = clk_prepare(uap->clk);
        if (ret)
                return ret;
@@ -1844,7 +1873,6 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
 {
        struct uart_amba_port *uap;
        struct vendor_data *vendor = id->data;
-       struct pinctrl *pinctrl;
        void __iomem *base;
        int i, ret;
 
@@ -1869,11 +1897,20 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
                goto free;
        }
 
-       pinctrl = devm_pinctrl_get_select_default(&dev->dev);
-       if (IS_ERR(pinctrl)) {
-               ret = PTR_ERR(pinctrl);
+       uap->pinctrl = devm_pinctrl_get(&dev->dev);
+       if (IS_ERR(uap->pinctrl)) {
+               ret = PTR_ERR(uap->pinctrl);
                goto unmap;
        }
+       uap->pins_default = pinctrl_lookup_state(uap->pinctrl,
+                                                PINCTRL_STATE_DEFAULT);
+       if (IS_ERR(uap->pins_default))
+               dev_err(&dev->dev, "could not get default pinstate\n");
+
+       uap->pins_sleep = pinctrl_lookup_state(uap->pinctrl,
+                                              PINCTRL_STATE_SLEEP);
+       if (IS_ERR(uap->pins_sleep))
+               dev_dbg(&dev->dev, "could not get sleep pinstate\n");
 
        uap->clk = clk_get(&dev->dev, NULL);
        if (IS_ERR(uap->clk)) {
index 34bd345da7751d9ed0ada9af43d98940f478c301..6ae2a58d62f2f82d152c60c5bc570a5dc9742846 100644 (file)
@@ -466,7 +466,7 @@ static void serial_txx9_break_ctl(struct uart_port *port, int break_state)
        spin_unlock_irqrestore(&up->port.lock, flags);
 }
 
-#if defined(CONFIG_SERIAL_TXX9_CONSOLE) || (CONFIG_CONSOLE_POLL)
+#if defined(CONFIG_SERIAL_TXX9_CONSOLE) || defined(CONFIG_CONSOLE_POLL)
 /*
  *     Wait for transmitter & holding register to empty
  */
index 4604153b7954b70288399bb675c373f1e16d2bf3..1bd9163bc1181eb0aaeae952acfaf910911b3720 100644 (file)
@@ -2179,6 +2179,16 @@ static int __devinit sci_init_single(struct platform_device *dev,
        return 0;
 }
 
+static void sci_cleanup_single(struct sci_port *port)
+{
+       sci_free_gpios(port);
+
+       clk_put(port->iclk);
+       clk_put(port->fclk);
+
+       pm_runtime_disable(port->port.dev);
+}
+
 #ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
 static void serial_console_putchar(struct uart_port *port, int ch)
 {
@@ -2360,14 +2370,10 @@ static int sci_remove(struct platform_device *dev)
        cpufreq_unregister_notifier(&port->freq_transition,
                                    CPUFREQ_TRANSITION_NOTIFIER);
 
-       sci_free_gpios(port);
-
        uart_remove_one_port(&sci_uart_driver, &port->port);
 
-       clk_put(port->iclk);
-       clk_put(port->fclk);
+       sci_cleanup_single(port);
 
-       pm_runtime_disable(&dev->dev);
        return 0;
 }
 
@@ -2385,14 +2391,20 @@ static int __devinit sci_probe_single(struct platform_device *dev,
                           index+1, SCI_NPORTS);
                dev_notice(&dev->dev, "Consider bumping "
                           "CONFIG_SERIAL_SH_SCI_NR_UARTS!\n");
-               return 0;
+               return -EINVAL;
        }
 
        ret = sci_init_single(dev, sciport, index, p);
        if (ret)
                return ret;
 
-       return uart_add_one_port(&sci_uart_driver, &sciport->port);
+       ret = uart_add_one_port(&sci_uart_driver, &sciport->port);
+       if (ret) {
+               sci_cleanup_single(sciport);
+               return ret;
+       }
+
+       return 0;
 }
 
 static int __devinit sci_probe(struct platform_device *dev)
@@ -2413,24 +2425,22 @@ static int __devinit sci_probe(struct platform_device *dev)
 
        ret = sci_probe_single(dev, dev->id, p, sp);
        if (ret)
-               goto err_unreg;
+               return ret;
 
        sp->freq_transition.notifier_call = sci_notifier;
 
        ret = cpufreq_register_notifier(&sp->freq_transition,
                                        CPUFREQ_TRANSITION_NOTIFIER);
-       if (unlikely(ret < 0))
-               goto err_unreg;
+       if (unlikely(ret < 0)) {
+               sci_cleanup_single(sp);
+               return ret;
+       }
 
 #ifdef CONFIG_SH_STANDARD_BIOS
        sh_bios_gdb_detach();
 #endif
 
        return 0;
-
-err_unreg:
-       sci_remove(dev);
-       return ret;
 }
 
 static int sci_suspend(struct device *dev)
index c691eea515375ac1e38f2afaec9ebc8509a2c256..f5ed3d75fa5a2ce615f32b10b7f0461f839e5757 100644 (file)
@@ -46,7 +46,7 @@ obj-$(CONFIG_USB_MICROTEK)    += image/
 obj-$(CONFIG_USB_SERIAL)       += serial/
 
 obj-$(CONFIG_USB)              += misc/
-obj-$(CONFIG_USB)              += phy/
+obj-$(CONFIG_USB_COMMON)       += phy/
 obj-$(CONFIG_EARLY_PRINTK_DBGP)        += early/
 
 obj-$(CONFIG_USB_ATM)          += atm/
index f2a120eea9d4872657879dcc00689ff28c41dfed..36a2a0b7b82cc61f2e8427463927149cdd86bf2e 100644 (file)
@@ -567,6 +567,14 @@ static int acm_port_activate(struct tty_port *port, struct tty_struct *tty)
 
        usb_autopm_put_interface(acm->control);
 
+       /*
+        * Unthrottle device in case the TTY was closed while throttled.
+        */
+       spin_lock_irq(&acm->read_lock);
+       acm->throttled = 0;
+       acm->throttle_req = 0;
+       spin_unlock_irq(&acm->read_lock);
+
        if (acm_submit_read_urbs(acm, GFP_KERNEL))
                goto error_submit_read_urbs;
 
index ea8b304f0e853af63651e0d70c5f6d1b69938fe9..ee469274a3fe0d03aed9259f5f4c380330742aa3 100644 (file)
@@ -55,6 +55,15 @@ static const struct usb_device_id wdm_ids[] = {
                .bInterfaceSubClass = 1,
                .bInterfaceProtocol = 9, /* NOTE: CDC ECM control interface! */
        },
+       {
+                /* Vodafone/Huawei K5005 (12d1:14c8) and similar modems */
+               .match_flags        = USB_DEVICE_ID_MATCH_VENDOR |
+                                     USB_DEVICE_ID_MATCH_INT_INFO,
+               .idVendor           = HUAWEI_VENDOR_ID,
+               .bInterfaceClass    = USB_CLASS_VENDOR_SPEC,
+               .bInterfaceSubClass = 1,
+               .bInterfaceProtocol = 57, /* NOTE: CDC ECM control interface! */
+       },
        { }
 };
 
@@ -491,6 +500,8 @@ retry:
                        goto retry;
                }
                if (!desc->reslength) { /* zero length read */
+                       dev_dbg(&desc->intf->dev, "%s: zero length - clearing WDM_READ\n", __func__);
+                       clear_bit(WDM_READ, &desc->flags);
                        spin_unlock_irq(&desc->iuspin);
                        goto retry;
                }
index 57ed9e400c06d938a91fd9713b95a9e97030b3a3..622b4a48e732e7a83b6c760d6563994394b0caa9 100644 (file)
@@ -493,15 +493,6 @@ static int hcd_pci_suspend_noirq(struct device *dev)
 
        pci_save_state(pci_dev);
 
-       /*
-        * Some systems crash if an EHCI controller is in D3 during
-        * a sleep transition.  We have to leave such controllers in D0.
-        */
-       if (hcd->broken_pci_sleep) {
-               dev_dbg(dev, "Staying in PCI D0\n");
-               return retval;
-       }
-
        /* If the root hub is dead rather than suspended, disallow remote
         * wakeup.  usb_hc_died() should ensure that both hosts are marked as
         * dying, so we only need to check the primary roothub.
index 04fb834c3fa1395c4d4df4a90d485929931ea0e3..8fb484984c86c6f6532d4ac8696cd5f6f4a7e5ff 100644 (file)
@@ -2324,12 +2324,16 @@ static unsigned hub_is_wusb(struct usb_hub *hub)
 static int hub_port_reset(struct usb_hub *hub, int port1,
                        struct usb_device *udev, unsigned int delay, bool warm);
 
-/* Is a USB 3.0 port in the Inactive state? */
-static bool hub_port_inactive(struct usb_hub *hub, u16 portstatus)
+/* Is a USB 3.0 port in the Inactive or Complinance Mode state?
+ * Port worm reset is required to recover
+ */
+static bool hub_port_warm_reset_required(struct usb_hub *hub, u16 portstatus)
 {
        return hub_is_superspeed(hub->hdev) &&
-               (portstatus & USB_PORT_STAT_LINK_STATE) ==
-               USB_SS_PORT_LS_SS_INACTIVE;
+               (((portstatus & USB_PORT_STAT_LINK_STATE) ==
+                 USB_SS_PORT_LS_SS_INACTIVE) ||
+                ((portstatus & USB_PORT_STAT_LINK_STATE) ==
+                 USB_SS_PORT_LS_COMP_MOD)) ;
 }
 
 static int hub_port_wait_reset(struct usb_hub *hub, int port1,
@@ -2365,7 +2369,7 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
                         *
                         * See https://bugzilla.kernel.org/show_bug.cgi?id=41752
                         */
-                       if (hub_port_inactive(hub, portstatus)) {
+                       if (hub_port_warm_reset_required(hub, portstatus)) {
                                int ret;
 
                                if ((portchange & USB_PORT_STAT_C_CONNECTION))
@@ -3379,7 +3383,7 @@ int usb_disable_lpm(struct usb_device *udev)
                return 0;
 
        udev->lpm_disable_count++;
-       if ((udev->u1_params.timeout == 0 && udev->u1_params.timeout == 0))
+       if ((udev->u1_params.timeout == 0 && udev->u2_params.timeout == 0))
                return 0;
 
        /* If LPM is enabled, attempt to disable it. */
@@ -4408,9 +4412,7 @@ static void hub_events(void)
                        /* Warm reset a USB3 protocol port if it's in
                         * SS.Inactive state.
                         */
-                       if (hub_is_superspeed(hub->hdev) &&
-                               (portstatus & USB_PORT_STAT_LINK_STATE)
-                                       == USB_SS_PORT_LS_SS_INACTIVE) {
+                       if (hub_port_warm_reset_required(hub, portstatus)) {
                                dev_dbg(hub_dev, "warm reset port %d\n", i);
                                hub_port_reset(hub, i, NULL,
                                                HUB_BH_RESET_TIME, true);
index b548cf1dbc625c66181c1a6928c160fc597bc2e6..bdd1c6749d88a9206208a27f7cbd7d966ebeb386 100644 (file)
@@ -1838,7 +1838,6 @@ free_interfaces:
                intfc = cp->intf_cache[i];
                intf->altsetting = intfc->altsetting;
                intf->num_altsetting = intfc->num_altsetting;
-               intf->intf_assoc = find_iad(dev, cp, i);
                kref_get(&intfc->ref);
 
                alt = usb_altnum_to_altsetting(intf, 0);
@@ -1851,6 +1850,8 @@ free_interfaces:
                if (!alt)
                        alt = &intf->altsetting[0];
 
+               intf->intf_assoc =
+                       find_iad(dev, cp, alt->desc.bInterfaceNumber);
                intf->cur_altsetting = alt;
                usb_enable_interface(dev, intf, true);
                intf->dev.parent = &dev->dev;
index 3df1a1973b0559d7bb53fd4ce7eb148d25dff1e7..ec70df7aba17e45635306b0a924625f7c1c59aec 100644 (file)
@@ -1091,7 +1091,7 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
                if (r == req) {
                        /* wait until it is processed */
                        dwc3_stop_active_transfer(dwc, dep->number);
-                       goto out0;
+                       goto out1;
                }
                dev_err(dwc->dev, "request %p was not queued to %s\n",
                                request, ep->name);
@@ -1099,6 +1099,7 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
                goto out0;
        }
 
+out1:
        /* giveback the request */
        dwc3_gadget_giveback(dep, req, -ECONNRESET);
 
index e23bf7984aaf673e966ca358ea85c047f58481a4..9a9bced813ed9194bd8bd8f18b23a3fce1887f3c 100644 (file)
@@ -599,12 +599,6 @@ usba_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
 
        spin_lock_irqsave(&ep->udc->lock, flags);
 
-       if (ep->ep.desc) {
-               spin_unlock_irqrestore(&ep->udc->lock, flags);
-               DBG(DBG_ERR, "ep%d already enabled\n", ep->index);
-               return -EBUSY;
-       }
-
        ep->ep.desc = desc;
        ep->ep.maxpacket = maxpacket;
 
index 51881f3bd07aa34cb93e937033d6458071fbaddc..b09452d6f33a8d0cbb382308cc7cb09fa2958eea 100644 (file)
@@ -1596,7 +1596,7 @@ static int qe_ep_enable(struct usb_ep *_ep,
        ep = container_of(_ep, struct qe_ep, ep);
 
        /* catch various bogus parameters */
-       if (!_ep || !desc || ep->ep.desc || _ep->name == ep_name[0] ||
+       if (!_ep || !desc || _ep->name == ep_name[0] ||
                        (desc->bDescriptorType != USB_DT_ENDPOINT))
                return -EINVAL;
 
index 4c07ca9cebf355741736d8a60306bd58a47d9e5b..7026919fc9014a1be634a3003248da199b7ecb5d 100644 (file)
@@ -153,10 +153,10 @@ struct usb_ep_para{
 #define USB_BUSMODE_DTB                0x02
 
 /* Endpoint basic handle */
-#define ep_index(EP)           ((EP)->desc->bEndpointAddress & 0xF)
+#define ep_index(EP)           ((EP)->ep.desc->bEndpointAddress & 0xF)
 #define ep_maxpacket(EP)       ((EP)->ep.maxpacket)
 #define ep_is_in(EP)   ((ep_index(EP) == 0) ? (EP->udc->ep0_dir == \
-                       USB_DIR_IN) : ((EP)->desc->bEndpointAddress \
+                       USB_DIR_IN) : ((EP)->ep.desc->bEndpointAddress \
                        & USB_DIR_IN) == USB_DIR_IN)
 
 /* ep0 transfer state */
index 28316858208bfeed10ab933b770295a9e6619b50..bc6f9bb9994a61a08d7579f59217aa764e0ec09b 100644 (file)
@@ -567,7 +567,7 @@ static int fsl_ep_enable(struct usb_ep *_ep,
        ep = container_of(_ep, struct fsl_ep, ep);
 
        /* catch various bogus parameters */
-       if (!_ep || !desc || ep->ep.desc
+       if (!_ep || !desc
                        || (desc->bDescriptorType != USB_DT_ENDPOINT))
                return -EINVAL;
 
@@ -2575,7 +2575,7 @@ static int __init fsl_udc_probe(struct platform_device *pdev)
        /* for ep0: the desc defined here;
         * for other eps, gadget layer called ep_enable with defined desc
         */
-       udc_controller->eps[0].desc = &fsl_ep0_desc;
+       udc_controller->eps[0].ep.desc = &fsl_ep0_desc;
        udc_controller->eps[0].ep.maxpacket = USB_MAX_CTRL_PAYLOAD;
 
        /* setup the udc->eps[] for non-control endpoints and link
index 5cd7b7e7ddb4e0488108730434ec224c4bd40e9e..f61a967f70828dd21c7c7e401388610e6b62ce74 100644 (file)
@@ -568,10 +568,10 @@ static void dump_msg(const char *label, const u8 * buf, unsigned int length)
 /*
  * ### internal used help routines.
  */
-#define ep_index(EP)           ((EP)->desc->bEndpointAddress&0xF)
+#define ep_index(EP)           ((EP)->ep.desc->bEndpointAddress&0xF)
 #define ep_maxpacket(EP)       ((EP)->ep.maxpacket)
 #define ep_is_in(EP)   ( (ep_index(EP) == 0) ? (EP->udc->ep0_dir == \
-                       USB_DIR_IN ):((EP)->desc->bEndpointAddress \
+                       USB_DIR_IN) : ((EP)->ep.desc->bEndpointAddress \
                        & USB_DIR_IN)==USB_DIR_IN)
 #define get_ep_by_pipe(udc, pipe)      ((pipe == 1)? &udc->eps[0]: \
                                        &udc->eps[pipe])
index b241e6c6a7f2d545197ad5145bbecf07090dae2b..3d28fb976c7821898ea44fc4c8ac4656011d546a 100644 (file)
@@ -102,7 +102,7 @@ goku_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
        unsigned long   flags;
 
        ep = container_of(_ep, struct goku_ep, ep);
-       if (!_ep || !desc || ep->ep.desc
+       if (!_ep || !desc
                        || desc->bDescriptorType != USB_DT_ENDPOINT)
                return -EINVAL;
        dev = ep->dev;
index 262acfd53e32b04f9d98f2c36d6adc84b64ef06a..2ab0388d93ebc06a4e11ac29988bdf5cca32b39d 100644 (file)
@@ -61,6 +61,7 @@
 #include <mach/irqs.h>
 #include <mach/board.h>
 #ifdef CONFIG_USB_GADGET_DEBUG_FILES
+#include <linux/debugfs.h>
 #include <linux/seq_file.h>
 #endif
 
index dbcd1329495ef015ed097ef8fec074ac71371011..117a4bba1b8c26a32b22e09400f6f346e531732d 100644 (file)
@@ -464,7 +464,7 @@ static int mv_ep_enable(struct usb_ep *_ep,
        ep = container_of(_ep, struct mv_ep, ep);
        udc = ep->udc;
 
-       if (!_ep || !desc || ep->ep.desc
+       if (!_ep || !desc
                        || desc->bDescriptorType != USB_DT_ENDPOINT)
                return -EINVAL;
 
index 7ba32469c5bdc789ac55abbe883b288bc1607c19..a460e8c204f42c9bacbac82c55c83bf9896e6672 100644 (file)
@@ -153,7 +153,7 @@ static int omap_ep_enable(struct usb_ep *_ep,
        u16             maxp;
 
        /* catch various bogus parameters */
-       if (!_ep || !desc || ep->ep.desc
+       if (!_ep || !desc
                        || desc->bDescriptorType != USB_DT_ENDPOINT
                        || ep->bEndpointAddress != desc->bEndpointAddress
                        || ep->maxpacket < usb_endpoint_maxp(desc)) {
index d7c8cb3bf759f66e02daecda062da3f0c4e0ac90..f7ff9e8e746a796f944713414ecb8acb43248f2a 100644 (file)
@@ -218,7 +218,7 @@ static int pxa25x_ep_enable (struct usb_ep *_ep,
        struct pxa25x_udc       *dev;
 
        ep = container_of (_ep, struct pxa25x_ep, ep);
-       if (!_ep || !desc || ep->ep.desc || _ep->name == ep0name
+       if (!_ep || !desc || _ep->name == ep0name
                        || desc->bDescriptorType != USB_DT_ENDPOINT
                        || ep->bEndpointAddress != desc->bEndpointAddress
                        || ep->fifo_size < usb_endpoint_maxp (desc)) {
index 36c6836eeb0f93818be4eb01367863db2e9e6ed8..236b271871a0bde44e353ebe5650c705bf48f163 100644 (file)
@@ -760,7 +760,7 @@ static int s3c_hsudc_ep_enable(struct usb_ep *_ep,
        u32 ecr = 0;
 
        hsep = our_ep(_ep);
-       if (!_ep || !desc || hsep->ep.desc || _ep->name == ep0name
+       if (!_ep || !desc || _ep->name == ep0name
                || desc->bDescriptorType != USB_DT_ENDPOINT
                || hsep->bEndpointAddress != desc->bEndpointAddress
                || ep_maxpacket(hsep) < usb_endpoint_maxp(desc))
index 3de71d37d75e209787f2dfaa0e783f8952808527..f2e51f50e528d8b532d7fcac3558c58e8554f382 100644 (file)
@@ -1062,7 +1062,7 @@ static int s3c2410_udc_ep_enable(struct usb_ep *_ep,
 
        ep = to_s3c2410_ep(_ep);
 
-       if (!_ep || !desc || ep->ep.desc
+       if (!_ep || !desc
                        || _ep->name == ep0name
                        || desc->bDescriptorType != USB_DT_ENDPOINT)
                return -EINVAL;
index b100f5f9f4b63a3b9ad75f2cb39341109a14cf28..800be38c78b47f5d7990243bc71bd8fb14c2bdc6 100644 (file)
@@ -671,7 +671,9 @@ static int ehci_init(struct usb_hcd *hcd)
        hw = ehci->async->hw;
        hw->hw_next = QH_NEXT(ehci, ehci->async->qh_dma);
        hw->hw_info1 = cpu_to_hc32(ehci, QH_HEAD);
+#if defined(CONFIG_PPC_PS3)
        hw->hw_info1 |= cpu_to_hc32(ehci, (1 << 7));    /* I = 1 */
+#endif
        hw->hw_token = cpu_to_hc32(ehci, QTD_STS_HALT);
        hw->hw_qtd_next = EHCI_LIST_END(ehci);
        ehci->async->qh_state = QH_STATE_LINKED;
index a44294d13494a8c985a887db01e07a6cbe984277..c30435499a029de5ea40c24bfe6148feeadbebf5 100644 (file)
@@ -43,6 +43,7 @@
 #include <linux/regulator/consumer.h>
 #include <linux/pm_runtime.h>
 #include <linux/gpio.h>
+#include <linux/clk.h>
 
 /* EHCI Register Set */
 #define EHCI_INSNREG04                                 (0xA0)
 #define        EHCI_INSNREG05_ULPI_EXTREGADD_SHIFT             8
 #define        EHCI_INSNREG05_ULPI_WRDATA_SHIFT                0
 
+/* Errata i693 */
+static struct clk      *utmi_p1_fck;
+static struct clk      *utmi_p2_fck;
+static struct clk      *xclk60mhsp1_ck;
+static struct clk      *xclk60mhsp2_ck;
+static struct clk      *usbhost_p1_fck;
+static struct clk      *usbhost_p2_fck;
+static struct clk      *init_60m_fclk;
+
 /*-------------------------------------------------------------------------*/
 
 static const struct hc_driver ehci_omap_hc_driver;
@@ -70,6 +80,41 @@ static inline u32 ehci_read(void __iomem *base, u32 reg)
        return __raw_readl(base + reg);
 }
 
+/* Erratum i693 workaround sequence */
+static void omap_ehci_erratum_i693(struct ehci_hcd *ehci)
+{
+       int ret = 0;
+
+       /* Switch to the internal 60 MHz clock */
+       ret = clk_set_parent(utmi_p1_fck, init_60m_fclk);
+       if (ret != 0)
+               ehci_err(ehci, "init_60m_fclk set parent"
+                       "failed error:%d\n", ret);
+
+       ret = clk_set_parent(utmi_p2_fck, init_60m_fclk);
+       if (ret != 0)
+               ehci_err(ehci, "init_60m_fclk set parent"
+                       "failed error:%d\n", ret);
+
+       clk_enable(usbhost_p1_fck);
+       clk_enable(usbhost_p2_fck);
+
+       /* Wait 1ms and switch back to the external clock */
+       mdelay(1);
+       ret = clk_set_parent(utmi_p1_fck, xclk60mhsp1_ck);
+       if (ret != 0)
+               ehci_err(ehci, "xclk60mhsp1_ck set parent"
+                       "failed error:%d\n", ret);
+
+       ret = clk_set_parent(utmi_p2_fck, xclk60mhsp2_ck);
+       if (ret != 0)
+               ehci_err(ehci, "xclk60mhsp2_ck set parent"
+                       "failed error:%d\n", ret);
+
+       clk_disable(usbhost_p1_fck);
+       clk_disable(usbhost_p2_fck);
+}
+
 static void omap_ehci_soft_phy_reset(struct platform_device *pdev, u8 port)
 {
        struct usb_hcd  *hcd = dev_get_drvdata(&pdev->dev);
@@ -100,6 +145,50 @@ static void omap_ehci_soft_phy_reset(struct platform_device *pdev, u8 port)
        }
 }
 
+static int omap_ehci_hub_control(
+       struct usb_hcd  *hcd,
+       u16             typeReq,
+       u16             wValue,
+       u16             wIndex,
+       char            *buf,
+       u16             wLength
+)
+{
+       struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+       u32 __iomem *status_reg = &ehci->regs->port_status[
+                               (wIndex & 0xff) - 1];
+       u32             temp;
+       unsigned long   flags;
+       int             retval = 0;
+
+       spin_lock_irqsave(&ehci->lock, flags);
+
+       if (typeReq == SetPortFeature && wValue == USB_PORT_FEAT_SUSPEND) {
+               temp = ehci_readl(ehci, status_reg);
+               if ((temp & PORT_PE) == 0 || (temp & PORT_RESET) != 0) {
+                       retval = -EPIPE;
+                       goto done;
+               }
+
+               temp &= ~PORT_WKCONN_E;
+               temp |= PORT_WKDISC_E | PORT_WKOC_E;
+               ehci_writel(ehci, temp | PORT_SUSPEND, status_reg);
+
+               omap_ehci_erratum_i693(ehci);
+
+               set_bit((wIndex & 0xff) - 1, &ehci->suspended_ports);
+               goto done;
+       }
+
+       spin_unlock_irqrestore(&ehci->lock, flags);
+
+       /* Handle the hub control events here */
+       return ehci_hub_control(hcd, typeReq, wValue, wIndex, buf, wLength);
+done:
+       spin_unlock_irqrestore(&ehci->lock, flags);
+       return retval;
+}
+
 static void disable_put_regulator(
                struct ehci_hcd_omap_platform_data *pdata)
 {
@@ -192,14 +281,13 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
                }
        }
 
+       /* Hold PHYs in reset while initializing EHCI controller */
        if (pdata->phy_reset) {
                if (gpio_is_valid(pdata->reset_gpio_port[0]))
-                       gpio_request_one(pdata->reset_gpio_port[0],
-                                        GPIOF_OUT_INIT_LOW, "USB1 PHY reset");
+                       gpio_set_value_cansleep(pdata->reset_gpio_port[0], 0);
 
                if (gpio_is_valid(pdata->reset_gpio_port[1]))
-                       gpio_request_one(pdata->reset_gpio_port[1],
-                                        GPIOF_OUT_INIT_LOW, "USB2 PHY reset");
+                       gpio_set_value_cansleep(pdata->reset_gpio_port[1], 0);
 
                /* Hold the PHY in RESET for enough time till DIR is high */
                udelay(10);
@@ -241,6 +329,11 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
        omap_ehci->hcs_params = readl(&omap_ehci->caps->hcs_params);
 
        ehci_reset(omap_ehci);
+       ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
+       if (ret) {
+               dev_err(dev, "failed to add hcd with err %d\n", ret);
+               goto err_add_hcd;
+       }
 
        if (pdata->phy_reset) {
                /* Hold the PHY in RESET for enough time till
@@ -255,17 +348,79 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
                        gpio_set_value_cansleep(pdata->reset_gpio_port[1], 1);
        }
 
-       ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
-       if (ret) {
-               dev_err(dev, "failed to add hcd with err %d\n", ret);
+       /* root ports should always stay powered */
+       ehci_port_power(omap_ehci, 1);
+
+       /* get clocks */
+       utmi_p1_fck = clk_get(dev, "utmi_p1_gfclk");
+       if (IS_ERR(utmi_p1_fck)) {
+               ret = PTR_ERR(utmi_p1_fck);
+               dev_err(dev, "utmi_p1_gfclk failed error:%d\n", ret);
                goto err_add_hcd;
        }
 
-       /* root ports should always stay powered */
-       ehci_port_power(omap_ehci, 1);
+       xclk60mhsp1_ck = clk_get(dev, "xclk60mhsp1_ck");
+       if (IS_ERR(xclk60mhsp1_ck)) {
+               ret = PTR_ERR(xclk60mhsp1_ck);
+               dev_err(dev, "xclk60mhsp1_ck failed error:%d\n", ret);
+               goto err_utmi_p1_fck;
+       }
+
+       utmi_p2_fck = clk_get(dev, "utmi_p2_gfclk");
+       if (IS_ERR(utmi_p2_fck)) {
+               ret = PTR_ERR(utmi_p2_fck);
+               dev_err(dev, "utmi_p2_gfclk failed error:%d\n", ret);
+               goto err_xclk60mhsp1_ck;
+       }
+
+       xclk60mhsp2_ck = clk_get(dev, "xclk60mhsp2_ck");
+       if (IS_ERR(xclk60mhsp2_ck)) {
+               ret = PTR_ERR(xclk60mhsp2_ck);
+               dev_err(dev, "xclk60mhsp2_ck failed error:%d\n", ret);
+               goto err_utmi_p2_fck;
+       }
+
+       usbhost_p1_fck = clk_get(dev, "usb_host_hs_utmi_p1_clk");
+       if (IS_ERR(usbhost_p1_fck)) {
+               ret = PTR_ERR(usbhost_p1_fck);
+               dev_err(dev, "usbhost_p1_fck failed error:%d\n", ret);
+               goto err_xclk60mhsp2_ck;
+       }
+
+       usbhost_p2_fck = clk_get(dev, "usb_host_hs_utmi_p2_clk");
+       if (IS_ERR(usbhost_p2_fck)) {
+               ret = PTR_ERR(usbhost_p2_fck);
+               dev_err(dev, "usbhost_p2_fck failed error:%d\n", ret);
+               goto err_usbhost_p1_fck;
+       }
+
+       init_60m_fclk = clk_get(dev, "init_60m_fclk");
+       if (IS_ERR(init_60m_fclk)) {
+               ret = PTR_ERR(init_60m_fclk);
+               dev_err(dev, "init_60m_fclk failed error:%d\n", ret);
+               goto err_usbhost_p2_fck;
+       }
 
        return 0;
 
+err_usbhost_p2_fck:
+       clk_put(usbhost_p2_fck);
+
+err_usbhost_p1_fck:
+       clk_put(usbhost_p1_fck);
+
+err_xclk60mhsp2_ck:
+       clk_put(xclk60mhsp2_ck);
+
+err_utmi_p2_fck:
+       clk_put(utmi_p2_fck);
+
+err_xclk60mhsp1_ck:
+       clk_put(xclk60mhsp1_ck);
+
+err_utmi_p1_fck:
+       clk_put(utmi_p1_fck);
+
 err_add_hcd:
        disable_put_regulator(pdata);
        pm_runtime_put_sync(dev);
@@ -294,6 +449,15 @@ static int ehci_hcd_omap_remove(struct platform_device *pdev)
        disable_put_regulator(dev->platform_data);
        iounmap(hcd->regs);
        usb_put_hcd(hcd);
+
+       clk_put(utmi_p1_fck);
+       clk_put(utmi_p2_fck);
+       clk_put(xclk60mhsp1_ck);
+       clk_put(xclk60mhsp2_ck);
+       clk_put(usbhost_p1_fck);
+       clk_put(usbhost_p2_fck);
+       clk_put(init_60m_fclk);
+
        pm_runtime_put_sync(dev);
        pm_runtime_disable(dev);
 
@@ -364,7 +528,7 @@ static const struct hc_driver ehci_omap_hc_driver = {
         * root hub support
         */
        .hub_status_data        = ehci_hub_status_data,
-       .hub_control            = ehci_hub_control,
+       .hub_control            = omap_ehci_hub_control,
        .bus_suspend            = ehci_bus_suspend,
        .bus_resume             = ehci_bus_resume,
 
index bc94d7bf072d822fb32da0867b6759dec223f69a..123481793a47afcdfc22cf476a8a8ed14301c6e1 100644 (file)
@@ -144,14 +144,6 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
                        hcd->has_tt = 1;
                        tdi_reset(ehci);
                }
-               if (pdev->subsystem_vendor == PCI_VENDOR_ID_ASUSTEK) {
-                       /* EHCI #1 or #2 on 6 Series/C200 Series chipset */
-                       if (pdev->device == 0x1c26 || pdev->device == 0x1c2d) {
-                               ehci_info(ehci, "broken D3 during system sleep on ASUS\n");
-                               hcd->broken_pci_sleep = 1;
-                               device_set_wakeup_capable(&pdev->dev, false);
-                       }
-               }
                break;
        case PCI_VENDOR_ID_TDI:
                if (pdev->device == PCI_DEVICE_ID_TDI_EHCI) {
index ca819cdd0c5ecb895929ae2fbf7a6a7f2b3664bc..e7cb3925abf8e0276dc4c119e8e03116d0ea48bd 100644 (file)
@@ -126,8 +126,7 @@ static int ehci_hcd_sh_probe(struct platform_device *pdev)
                goto fail_create_hcd;
        }
 
-       if (pdev->dev.platform_data != NULL)
-               pdata = pdev->dev.platform_data;
+       pdata = pdev->dev.platform_data;
 
        /* initialize hcd */
        hcd = usb_create_hcd(&ehci_sh_hc_driver, &pdev->dev,
index 9c2cc4633894b152047ed587b2cf9494d5fdc684..e9713d589e30d20c9a7c4d10b253e431592692b0 100644 (file)
@@ -270,14 +270,12 @@ static int ehci_hcd_xilinx_of_remove(struct platform_device *op)
  *
  * Properly shutdown the hcd, call driver's shutdown routine.
  */
-static int ehci_hcd_xilinx_of_shutdown(struct platform_device *op)
+static void ehci_hcd_xilinx_of_shutdown(struct platform_device *op)
 {
        struct usb_hcd *hcd = dev_get_drvdata(&op->dev);
 
        if (hcd->driver->shutdown)
                hcd->driver->shutdown(hcd);
-
-       return 0;
 }
 
 
index 836772dfabd3e0deb077f0441981feb29c462d4b..2f3619eefefa9bca87189e5da44dd6bbb0cd8191 100644 (file)
@@ -317,7 +317,7 @@ static int ohci_bus_resume (struct usb_hcd *hcd)
 }
 
 /* Carry out the final steps of resuming the controller device */
-static void ohci_finish_controller_resume(struct usb_hcd *hcd)
+static void __maybe_unused ohci_finish_controller_resume(struct usb_hcd *hcd)
 {
        struct ohci_hcd         *ohci = hcd_to_ohci(hcd);
        int                     port;
index 2732ef660c5c08b85baeb38c23d5ec597bf15673..7b01094d7993957dba556c4231b781a524ad72e9 100644 (file)
@@ -462,6 +462,42 @@ void xhci_test_and_clear_bit(struct xhci_hcd *xhci, __le32 __iomem **port_array,
        }
 }
 
+/* Updates Link Status for super Speed port */
+static void xhci_hub_report_link_state(u32 *status, u32 status_reg)
+{
+       u32 pls = status_reg & PORT_PLS_MASK;
+
+       /* resume state is a xHCI internal state.
+        * Do not report it to usb core.
+        */
+       if (pls == XDEV_RESUME)
+               return;
+
+       /* When the CAS bit is set then warm reset
+        * should be performed on port
+        */
+       if (status_reg & PORT_CAS) {
+               /* The CAS bit can be set while the port is
+                * in any link state.
+                * Only roothubs have CAS bit, so we
+                * pretend to be in compliance mode
+                * unless we're already in compliance
+                * or the inactive state.
+                */
+               if (pls != USB_SS_PORT_LS_COMP_MOD &&
+                   pls != USB_SS_PORT_LS_SS_INACTIVE) {
+                       pls = USB_SS_PORT_LS_COMP_MOD;
+               }
+               /* Return also connection bit -
+                * hub state machine resets port
+                * when this bit is set.
+                */
+               pls |= USB_PORT_STAT_CONNECTION;
+       }
+       /* update status field */
+       *status |= pls;
+}
+
 int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
                u16 wIndex, char *buf, u16 wLength)
 {
@@ -606,13 +642,9 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
                        else
                                status |= USB_PORT_STAT_POWER;
                }
-               /* Port Link State */
+               /* Update Port Link State for super speed ports*/
                if (hcd->speed == HCD_USB3) {
-                       /* resume state is a xHCI internal state.
-                        * Do not report it to usb core.
-                        */
-                       if ((temp & PORT_PLS_MASK) != XDEV_RESUME)
-                               status |= (temp & PORT_PLS_MASK);
+                       xhci_hub_report_link_state(&status, temp);
                }
                if (bus_state->port_c_suspend & (1 << wIndex))
                        status |= 1 << USB_PORT_FEAT_C_SUSPEND;
index ec4338eec8262c520d785e53e32f269b02dfabd8..77689bd64cace84001204bb21cc55085c18d94be 100644 (file)
@@ -793,10 +793,9 @@ static void xhci_free_tt_info(struct xhci_hcd *xhci,
                struct xhci_virt_device *virt_dev,
                int slot_id)
 {
-       struct list_head *tt;
        struct list_head *tt_list_head;
-       struct list_head *tt_next;
-       struct xhci_tt_bw_info *tt_info;
+       struct xhci_tt_bw_info *tt_info, *next;
+       bool slot_found = false;
 
        /* If the device never made it past the Set Address stage,
         * it may not have the real_port set correctly.
@@ -808,34 +807,16 @@ static void xhci_free_tt_info(struct xhci_hcd *xhci,
        }
 
        tt_list_head = &(xhci->rh_bw[virt_dev->real_port - 1].tts);
-       if (list_empty(tt_list_head))
-               return;
-
-       list_for_each(tt, tt_list_head) {
-               tt_info = list_entry(tt, struct xhci_tt_bw_info, tt_list);
-               if (tt_info->slot_id == slot_id)
+       list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
+               /* Multi-TT hubs will have more than one entry */
+               if (tt_info->slot_id == slot_id) {
+                       slot_found = true;
+                       list_del(&tt_info->tt_list);
+                       kfree(tt_info);
+               } else if (slot_found) {
                        break;
+               }
        }
-       /* Cautionary measure in case the hub was disconnected before we
-        * stored the TT information.
-        */
-       if (tt_info->slot_id != slot_id)
-               return;
-
-       tt_next = tt->next;
-       tt_info = list_entry(tt, struct xhci_tt_bw_info,
-                       tt_list);
-       /* Multi-TT hubs will have more than one entry */
-       do {
-               list_del(tt);
-               kfree(tt_info);
-               tt = tt_next;
-               if (list_empty(tt_list_head))
-                       break;
-               tt_next = tt->next;
-               tt_info = list_entry(tt, struct xhci_tt_bw_info,
-                               tt_list);
-       } while (tt_info->slot_id == slot_id);
 }
 
 int xhci_alloc_tt_info(struct xhci_hcd *xhci,
@@ -1791,17 +1772,9 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
 {
        struct pci_dev  *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
        struct dev_info *dev_info, *next;
-       struct list_head *tt_list_head;
-       struct list_head *tt;
-       struct list_head *endpoints;
-       struct list_head *ep, *q;
-       struct xhci_tt_bw_info *tt_info;
-       struct xhci_interval_bw_table *bwt;
-       struct xhci_virt_ep *virt_ep;
-
        unsigned long   flags;
        int size;
-       int i;
+       int i, j, num_ports;
 
        /* Free the Event Ring Segment Table and the actual Event Ring */
        size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
@@ -1860,21 +1833,22 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
        }
        spin_unlock_irqrestore(&xhci->lock, flags);
 
-       bwt = &xhci->rh_bw->bw_table;
-       for (i = 0; i < XHCI_MAX_INTERVAL; i++) {
-               endpoints = &bwt->interval_bw[i].endpoints;
-               list_for_each_safe(ep, q, endpoints) {
-                       virt_ep = list_entry(ep, struct xhci_virt_ep, bw_endpoint_list);
-                       list_del(&virt_ep->bw_endpoint_list);
-                       kfree(virt_ep);
+       num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
+       for (i = 0; i < num_ports; i++) {
+               struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table;
+               for (j = 0; j < XHCI_MAX_INTERVAL; j++) {
+                       struct list_head *ep = &bwt->interval_bw[j].endpoints;
+                       while (!list_empty(ep))
+                               list_del_init(ep->next);
                }
        }
 
-       tt_list_head = &xhci->rh_bw->tts;
-       list_for_each_safe(tt, q, tt_list_head) {
-               tt_info = list_entry(tt, struct xhci_tt_bw_info, tt_list);
-               list_del(tt);
-               kfree(tt_info);
+       for (i = 0; i < num_ports; i++) {
+               struct xhci_tt_bw_info *tt, *n;
+               list_for_each_entry_safe(tt, n, &xhci->rh_bw[i].tts, tt_list) {
+                       list_del(&tt->tt_list);
+                       kfree(tt);
+               }
        }
 
        xhci->num_usb2_ports = 0;
index 23b4aefd103609bcf91b8341a6758a9fc4c3b618..8275645889da4ce779c08c88ac4ea06473f222e0 100644 (file)
@@ -885,6 +885,17 @@ static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci,
        num_trbs_free_temp = ep_ring->num_trbs_free;
        dequeue_temp = ep_ring->dequeue;
 
+       /* If we get two back-to-back stalls, and the first stalled transfer
+        * ends just before a link TRB, the dequeue pointer will be left on
+        * the link TRB by the code in the while loop.  So we have to update
+        * the dequeue pointer one segment further, or we'll jump off
+        * the segment into la-la-land.
+        */
+       if (last_trb(xhci, ep_ring, ep_ring->deq_seg, ep_ring->dequeue)) {
+               ep_ring->deq_seg = ep_ring->deq_seg->next;
+               ep_ring->dequeue = ep_ring->deq_seg->trbs;
+       }
+
        while (ep_ring->dequeue != dev->eps[ep_index].queued_deq_ptr) {
                /* We have more usable TRBs */
                ep_ring->num_trbs_free++;
index afdc73ee84a61461326eada42fe2c110f414f793..a979cd0dbe0f8f61970c694b2821643fd8f5a09b 100644 (file)
@@ -795,8 +795,8 @@ int xhci_suspend(struct xhci_hcd *xhci)
        command = xhci_readl(xhci, &xhci->op_regs->command);
        command |= CMD_CSS;
        xhci_writel(xhci, command, &xhci->op_regs->command);
-       if (handshake(xhci, &xhci->op_regs->status, STS_SAVE, 0, 10*100)) {
-               xhci_warn(xhci, "WARN: xHC CMD_CSS timeout\n");
+       if (handshake(xhci, &xhci->op_regs->status, STS_SAVE, 0, 10 * 1000)) {
+               xhci_warn(xhci, "WARN: xHC save state timeout\n");
                spin_unlock_irq(&xhci->lock);
                return -ETIMEDOUT;
        }
@@ -848,8 +848,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
                command |= CMD_CRS;
                xhci_writel(xhci, command, &xhci->op_regs->command);
                if (handshake(xhci, &xhci->op_regs->status,
-                             STS_RESTORE, 0, 10*100)) {
-                       xhci_dbg(xhci, "WARN: xHC CMD_CSS timeout\n");
+                             STS_RESTORE, 0, 10 * 1000)) {
+                       xhci_warn(xhci, "WARN: xHC restore state timeout\n");
                        spin_unlock_irq(&xhci->lock);
                        return -ETIMEDOUT;
                }
@@ -3906,7 +3906,7 @@ static u16 xhci_get_timeout_no_hub_lpm(struct usb_device *udev,
        default:
                dev_warn(&udev->dev, "%s: Can't get timeout for non-U1 or U2 state.\n",
                                __func__);
-               return -EINVAL;
+               return USB3_LPM_DISABLED;
        }
 
        if (sel <= max_sel_pel && pel <= max_sel_pel)
index de3d6e3e57be4b12e840f5a168b439b151fe84fc..55c0785810c99fb5286e29d39cd4d7ff2b98addb 100644 (file)
@@ -341,7 +341,11 @@ struct xhci_op_regs {
 #define PORT_PLC       (1 << 22)
 /* port configure error change - port failed to configure its link partner */
 #define PORT_CEC       (1 << 23)
-/* bit 24 reserved */
+/* Cold Attach Status - xHC can set this bit to report device attached during
+ * Sx state. Warm port reset should be perfomed to clear this bit and move port
+ * to connected state.
+ */
+#define PORT_CAS       (1 << 24)
 /* wake on connect (enable) */
 #define PORT_WKCONN_E  (1 << 25)
 /* wake on disconnect (enable) */
index 768b4b55c816a6960733f73d7e0d7b86be54697f..9d63ba4d10d6fe1500cf1b73e3ccf5e3c111f952 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/dma-mapping.h>
 
 #include <mach/cputype.h>
+#include <mach/hardware.h>
 
 #include <asm/mach-types.h>
 
index 046c84433cadc92e6f8f7d8788520d73f91e4380..371baa0ee509020241b33b1f9af66a60749c5571 100644 (file)
@@ -15,7 +15,7 @@
  */
 
 /* Integrated highspeed/otg PHY */
-#define USBPHY_CTL_PADDR       (DAVINCI_SYSTEM_MODULE_BASE + 0x34)
+#define USBPHY_CTL_PADDR       0x01c40034
 #define USBPHY_DATAPOL         BIT(11) /* (dm355) switch D+/D- */
 #define USBPHY_PHYCLKGD                BIT(8)
 #define USBPHY_SESNDEN         BIT(7)  /* v(sess_end) comparator */
@@ -27,7 +27,7 @@
 #define USBPHY_OTGPDWN         BIT(1)
 #define USBPHY_PHYPDWN         BIT(0)
 
-#define DM355_DEEPSLEEP_PADDR  (DAVINCI_SYSTEM_MODULE_BASE + 0x48)
+#define DM355_DEEPSLEEP_PADDR  0x01c40048
 #define DRVVBUS_FORCE          BIT(2)
 #define DRVVBUS_OVERRIDE       BIT(1)
 
index f42c29b11f713ddbb95f7a6115e27e2921325dd8..95918dacc99ad8496d80409f18e6cf74aeffa065 100644 (file)
@@ -1232,6 +1232,7 @@ static int musb_gadget_disable(struct usb_ep *ep)
        }
 
        musb_ep->desc = NULL;
+       musb_ep->end_point.desc = NULL;
 
        /* abort all pending DMA and requests */
        nuke(musb_ep, -ESHUTDOWN);
index ef8d744800ac29c58dc24e089a645211fcd665ff..e090c799d87bcf18c5d869b1762d2b7f5feea9fa 100644 (file)
@@ -375,11 +375,21 @@ static void musb_advance_schedule(struct musb *musb, struct urb *urb,
         */
        if (list_empty(&qh->hep->urb_list)) {
                struct list_head        *head;
+               struct dma_controller   *dma = musb->dma_controller;
 
-               if (is_in)
+               if (is_in) {
                        ep->rx_reinit = 1;
-               else
+                       if (ep->rx_channel) {
+                               dma->channel_release(ep->rx_channel);
+                               ep->rx_channel = NULL;
+                       }
+               } else {
                        ep->tx_reinit = 1;
+                       if (ep->tx_channel) {
+                               dma->channel_release(ep->tx_channel);
+                               ep->tx_channel = NULL;
+                       }
+               }
 
                /* Clobber old pointers to this qh */
                musb_ep_set_qh(ep, is_in, NULL);
index d2a9a8e691b9dd47ea6dbeca37c870e028d5dd1e..0eabb049b6a94efb7fbb14d16cd42b3c9c0d212d 100644 (file)
@@ -305,9 +305,8 @@ static irqreturn_t twl6030_usbotg_irq(int irq, void *_twl)
 
                regulator_enable(twl->usb3v3);
                twl->asleep = 1;
-               twl6030_writeb(twl, TWL_MODULE_USB, USB_ID_INT_EN_HI_CLR, 0x1);
-               twl6030_writeb(twl, TWL_MODULE_USB, USB_ID_INT_EN_HI_SET,
-                                                               0x10);
+               twl6030_writeb(twl, TWL_MODULE_USB, 0x1, USB_ID_INT_EN_HI_CLR);
+               twl6030_writeb(twl, TWL_MODULE_USB, 0x10, USB_ID_INT_EN_HI_SET);
                status = USB_EVENT_ID;
                otg->default_a = true;
                twl->phy.state = OTG_STATE_A_IDLE;
@@ -316,12 +315,10 @@ static irqreturn_t twl6030_usbotg_irq(int irq, void *_twl)
                atomic_notifier_call_chain(&twl->phy.notifier, status,
                                                        otg->gadget);
        } else  {
-               twl6030_writeb(twl, TWL_MODULE_USB, USB_ID_INT_EN_HI_CLR,
-                                                               0x10);
-               twl6030_writeb(twl, TWL_MODULE_USB, USB_ID_INT_EN_HI_SET,
-                                                               0x1);
+               twl6030_writeb(twl, TWL_MODULE_USB, 0x10, USB_ID_INT_EN_HI_CLR);
+               twl6030_writeb(twl, TWL_MODULE_USB, 0x1, USB_ID_INT_EN_HI_SET);
        }
-       twl6030_writeb(twl, TWL_MODULE_USB, USB_ID_INT_LATCH_CLR, status);
+       twl6030_writeb(twl, TWL_MODULE_USB, status, USB_ID_INT_LATCH_CLR);
 
        return IRQ_HANDLED;
 }
@@ -343,7 +340,7 @@ static int twl6030_enable_irq(struct usb_phy *x)
 {
        struct twl6030_usb *twl = phy_to_twl(x);
 
-       twl6030_writeb(twl, TWL_MODULE_USB, USB_ID_INT_EN_HI_SET, 0x1);
+       twl6030_writeb(twl, TWL_MODULE_USB, 0x1, USB_ID_INT_EN_HI_SET);
        twl6030_interrupt_unmask(0x05, REG_INT_MSK_LINE_C);
        twl6030_interrupt_unmask(0x05, REG_INT_MSK_STS_C);
 
index 3cfabcba7447d2ccaf55d77038b43357d8cbaeec..e7cf84f0751a2a6f1d8ea27614c59abf8a7303fa 100644 (file)
@@ -2,11 +2,11 @@
 # Physical Layer USB driver configuration
 #
 comment "USB Physical Layer drivers"
-       depends on USB
+       depends on USB || USB_GADGET
 
 config USB_ISP1301
        tristate "NXP ISP1301 USB transceiver support"
-       depends on USB
+       depends on USB || USB_GADGET
        depends on I2C
        help
          Say Y here to add support for the NXP ISP1301 USB transceiver driver.
index 1b1926200ba791c1335ea9305369258898d955c1..1e71079ce33b7128c116f602d865428297804da7 100644 (file)
@@ -82,6 +82,7 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x10C4, 0x8066) }, /* Argussoft In-System Programmer */
        { USB_DEVICE(0x10C4, 0x806F) }, /* IMS USB to RS422 Converter Cable */
        { USB_DEVICE(0x10C4, 0x807A) }, /* Crumb128 board */
+       { USB_DEVICE(0x10C4, 0x80C4) }, /* Cygnal Integrated Products, Inc., Optris infrared thermometer */
        { USB_DEVICE(0x10C4, 0x80CA) }, /* Degree Controls Inc */
        { USB_DEVICE(0x10C4, 0x80DD) }, /* Tracient RFID */
        { USB_DEVICE(0x10C4, 0x80F6) }, /* Suunto sports instrument */
@@ -92,6 +93,7 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */
        { USB_DEVICE(0x10C4, 0x8156) }, /* B&G H3000 link cable */
        { USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */
+       { USB_DEVICE(0x10C4, 0x815F) }, /* Timewave HamLinkUSB */
        { USB_DEVICE(0x10C4, 0x818B) }, /* AVIT Research USB to TTL */
        { USB_DEVICE(0x10C4, 0x819F) }, /* MJS USB Toslink Switcher */
        { USB_DEVICE(0x10C4, 0x81A6) }, /* ThinkOptics WavIt */
@@ -133,7 +135,13 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x10CE, 0xEA6A) }, /* Silicon Labs MobiData GPRS USB Modem 100EU */
        { USB_DEVICE(0x13AD, 0x9999) }, /* Baltech card reader */
        { USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */
+       { USB_DEVICE(0x166A, 0x0201) }, /* Clipsal 5500PACA C-Bus Pascal Automation Controller */
+       { USB_DEVICE(0x166A, 0x0301) }, /* Clipsal 5800PC C-Bus Wireless PC Interface */
        { USB_DEVICE(0x166A, 0x0303) }, /* Clipsal 5500PCU C-Bus USB interface */
+       { USB_DEVICE(0x166A, 0x0304) }, /* Clipsal 5000CT2 C-Bus Black and White Touchscreen */
+       { USB_DEVICE(0x166A, 0x0305) }, /* Clipsal C-5000CT2 C-Bus Spectrum Colour Touchscreen */
+       { USB_DEVICE(0x166A, 0x0401) }, /* Clipsal L51xx C-Bus Architectural Dimmer */
+       { USB_DEVICE(0x166A, 0x0101) }, /* Clipsal 5560884 C-Bus Multi-room Audio Matrix Switcher */
        { USB_DEVICE(0x16D6, 0x0001) }, /* Jablotron serial interface */
        { USB_DEVICE(0x16DC, 0x0010) }, /* W-IE-NE-R Plein & Baus GmbH PL512 Power Supply */
        { USB_DEVICE(0x16DC, 0x0011) }, /* W-IE-NE-R Plein & Baus GmbH RCM Remote Control for MARATON Power Supply */
@@ -145,7 +153,11 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
        { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
        { USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */
+       { USB_DEVICE(0x1E29, 0x0102) }, /* Festo CPX-USB */
+       { USB_DEVICE(0x1E29, 0x0501) }, /* Festo CMSP */
        { USB_DEVICE(0x3195, 0xF190) }, /* Link Instruments MSO-19 */
+       { USB_DEVICE(0x3195, 0xF280) }, /* Link Instruments MSO-28 */
+       { USB_DEVICE(0x3195, 0xF281) }, /* Link Instruments MSO-28 */
        { USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */
        { } /* Terminating Entry */
 };
index 8c084ea34e264fe056d7430fc3af008f1e0c86b1..bc912e5a3bebddf3073fe17e015b6195be3708be 100644 (file)
@@ -737,6 +737,7 @@ static struct usb_device_id id_table_combined [] = {
        { USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) },
        { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_SERIAL_VX7_PID) },
        { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_CT29B_PID) },
+       { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_RTS01_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_MAXSTREAM_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_PHI_FISCO_PID) },
        { USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) },
index f3c7c78ede33a567d4b8cf7121aaf6fcb33f6567..5661c7e2d4151cbb8f2335c4572f83fc3deccff8 100644 (file)
 #define RTSYSTEMS_VID                  0x2100  /* Vendor ID */
 #define RTSYSTEMS_SERIAL_VX7_PID       0x9e52  /* Serial converter for VX-7 Radios using FT232RL */
 #define RTSYSTEMS_CT29B_PID            0x9e54  /* CT29B Radio Cable */
+#define RTSYSTEMS_RTS01_PID            0x9e57  /* USB-RTS01 Radio Cable */
 
 
 /*
index 105a6d898ca4a6595b30f88e09d889b4c5d6d844..9b026bf7afefe7002a4baf106e059d8d63d5b891 100644 (file)
@@ -39,13 +39,6 @@ MODULE_PARM_DESC(product, "User specified USB idProduct");
 
 static struct usb_device_id generic_device_ids[2]; /* Initially all zeroes. */
 
-/* we want to look at all devices, as the vendor/product id can change
- * depending on the command line argument */
-static const struct usb_device_id generic_serial_ids[] = {
-       {.driver_info = 42},
-       {}
-};
-
 /* All of the device info needed for the Generic Serial Converter */
 struct usb_serial_driver usb_serial_generic_device = {
        .driver = {
@@ -79,7 +72,8 @@ int usb_serial_generic_register(int _debug)
                USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_PRODUCT;
 
        /* register our generic driver with ourselves */
-       retval = usb_serial_register_drivers(serial_drivers, "usbserial_generic", generic_serial_ids);
+       retval = usb_serial_register_drivers(serial_drivers,
+                       "usbserial_generic", generic_device_ids);
 #endif
        return retval;
 }
index d0ec1aa527199b455e152da7d692c6e126f89a83..a71fa0aa04066dd38758fe35c87cf007a3b55f4a 100644 (file)
@@ -309,13 +309,16 @@ static int mct_u232_set_modem_ctrl(struct usb_serial *serial,
                        MCT_U232_SET_REQUEST_TYPE,
                        0, 0, buf, MCT_U232_SET_MODEM_CTRL_SIZE,
                        WDR_TIMEOUT);
-       if (rc < 0)
-               dev_err(&serial->dev->dev,
-                       "Set MODEM CTRL 0x%x failed (error = %d)\n", mcr, rc);
+       kfree(buf);
+
        dbg("set_modem_ctrl: state=0x%x ==> mcr=0x%x", control_state, mcr);
 
-       kfree(buf);
-       return rc;
+       if (rc < 0) {
+               dev_err(&serial->dev->dev,
+                       "Set MODEM CTRL 0x%x failed (error = %d)\n", mcr, rc);
+               return rc;
+       }
+       return 0;
 } /* mct_u232_set_modem_ctrl */
 
 static int mct_u232_get_modem_stat(struct usb_serial *serial,
index 81423f7361dbe8f770086c3a03f6aca09b4beb2c..d47eb06fe463b51463cb899659a5f2428d5bd24b 100644 (file)
@@ -222,14 +222,6 @@ static int metrousb_open(struct tty_struct *tty, struct usb_serial_port *port)
        metro_priv->throttled = 0;
        spin_unlock_irqrestore(&metro_priv->lock, flags);
 
-       /*
-        * Force low_latency on so that our tty_push actually forces the data
-        * through, otherwise it is scheduled, and with high data rates (like
-        * with OHCI) data can get lost.
-        */
-       if (tty)
-               tty->low_latency = 1;
-
        /* Clear the urb pipe. */
        usb_clear_halt(serial->dev, port->interrupt_in_urb->pipe);
 
index 29160f8b510146054726b68795467d8bf08a2020..57eca244842431fa100f742424c9dbc6310c4156 100644 (file)
 
 static int device_type;
 
-static const struct usb_device_id id_table[] __devinitconst = {
+static const struct usb_device_id id_table[] = {
        {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)},
        {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)},
        {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7810)},
index 1aae9028cd0bd9e58a75461bbbceb74b9079f0b0..417ab1b0aa30fe4088daf4017b5035b325bb0f3a 100644 (file)
@@ -47,6 +47,7 @@
 /* Function prototypes */
 static int  option_probe(struct usb_serial *serial,
                        const struct usb_device_id *id);
+static void option_release(struct usb_serial *serial);
 static int option_send_setup(struct usb_serial_port *port);
 static void option_instat_callback(struct urb *urb);
 
@@ -150,6 +151,7 @@ static void option_instat_callback(struct urb *urb);
 #define HUAWEI_PRODUCT_E14AC                   0x14AC
 #define HUAWEI_PRODUCT_K3806                   0x14AE
 #define HUAWEI_PRODUCT_K4605                   0x14C6
+#define HUAWEI_PRODUCT_K5005                   0x14C8
 #define HUAWEI_PRODUCT_K3770                   0x14C9
 #define HUAWEI_PRODUCT_K3771                   0x14CA
 #define HUAWEI_PRODUCT_K4510                   0x14CB
@@ -234,6 +236,7 @@ static void option_instat_callback(struct urb *urb);
 #define NOVATELWIRELESS_PRODUCT_G1             0xA001
 #define NOVATELWIRELESS_PRODUCT_G1_M           0xA002
 #define NOVATELWIRELESS_PRODUCT_G2             0xA010
+#define NOVATELWIRELESS_PRODUCT_MC551          0xB001
 
 /* AMOI PRODUCTS */
 #define AMOI_VENDOR_ID                         0x1614
@@ -425,7 +428,7 @@ static void option_instat_callback(struct urb *urb);
 #define SAMSUNG_VENDOR_ID                       0x04e8
 #define SAMSUNG_PRODUCT_GT_B3730                0x6889
 
-/* YUGA products  www.yuga-info.com*/
+/* YUGA products  www.yuga-info.com gavin.kx@qq.com */
 #define YUGA_VENDOR_ID                         0x257A
 #define YUGA_PRODUCT_CEM600                    0x1601
 #define YUGA_PRODUCT_CEM610                    0x1602
@@ -442,6 +445,8 @@ static void option_instat_callback(struct urb *urb);
 #define YUGA_PRODUCT_CEU516                    0x160C
 #define YUGA_PRODUCT_CEU528                    0x160D
 #define YUGA_PRODUCT_CEU526                    0x160F
+#define YUGA_PRODUCT_CEU881                    0x161F
+#define YUGA_PRODUCT_CEU882                    0x162F
 
 #define YUGA_PRODUCT_CWM600                    0x2601
 #define YUGA_PRODUCT_CWM610                    0x2602
@@ -457,23 +462,26 @@ static void option_instat_callback(struct urb *urb);
 #define YUGA_PRODUCT_CWU518                    0x260B
 #define YUGA_PRODUCT_CWU516                    0x260C
 #define YUGA_PRODUCT_CWU528                    0x260D
+#define YUGA_PRODUCT_CWU581                    0x260E
 #define YUGA_PRODUCT_CWU526                    0x260F
-
-#define YUGA_PRODUCT_CLM600                    0x2601
-#define YUGA_PRODUCT_CLM610                    0x2602
-#define YUGA_PRODUCT_CLM500                    0x2603
-#define YUGA_PRODUCT_CLM510                    0x2604
-#define YUGA_PRODUCT_CLM800                    0x2605
-#define YUGA_PRODUCT_CLM900                    0x2606
-
-#define YUGA_PRODUCT_CLU718                    0x2607
-#define YUGA_PRODUCT_CLU716                    0x2608
-#define YUGA_PRODUCT_CLU728                    0x2609
-#define YUGA_PRODUCT_CLU726                    0x260A
-#define YUGA_PRODUCT_CLU518                    0x260B
-#define YUGA_PRODUCT_CLU516                    0x260C
-#define YUGA_PRODUCT_CLU528                    0x260D
-#define YUGA_PRODUCT_CLU526                    0x260F
+#define YUGA_PRODUCT_CWU582                    0x261F
+#define YUGA_PRODUCT_CWU583                    0x262F
+
+#define YUGA_PRODUCT_CLM600                    0x3601
+#define YUGA_PRODUCT_CLM610                    0x3602
+#define YUGA_PRODUCT_CLM500                    0x3603
+#define YUGA_PRODUCT_CLM510                    0x3604
+#define YUGA_PRODUCT_CLM800                    0x3605
+#define YUGA_PRODUCT_CLM900                    0x3606
+
+#define YUGA_PRODUCT_CLU718                    0x3607
+#define YUGA_PRODUCT_CLU716                    0x3608
+#define YUGA_PRODUCT_CLU728                    0x3609
+#define YUGA_PRODUCT_CLU726                    0x360A
+#define YUGA_PRODUCT_CLU518                    0x360B
+#define YUGA_PRODUCT_CLU516                    0x360C
+#define YUGA_PRODUCT_CLU528                    0x360D
+#define YUGA_PRODUCT_CLU526                    0x360F
 
 /* Viettel products */
 #define VIETTEL_VENDOR_ID                      0x2262
@@ -489,6 +497,19 @@ static void option_instat_callback(struct urb *urb);
 
 /* MediaTek products */
 #define MEDIATEK_VENDOR_ID                     0x0e8d
+#define MEDIATEK_PRODUCT_DC_1COM               0x00a0
+#define MEDIATEK_PRODUCT_DC_4COM               0x00a5
+#define MEDIATEK_PRODUCT_DC_5COM               0x00a4
+#define MEDIATEK_PRODUCT_7208_1COM             0x7101
+#define MEDIATEK_PRODUCT_7208_2COM             0x7102
+#define MEDIATEK_PRODUCT_FP_1COM               0x0003
+#define MEDIATEK_PRODUCT_FP_2COM               0x0023
+#define MEDIATEK_PRODUCT_FPDC_1COM             0x0043
+#define MEDIATEK_PRODUCT_FPDC_2COM             0x0033
+
+/* Cellient products */
+#define CELLIENT_VENDOR_ID                     0x2692
+#define CELLIENT_PRODUCT_MEN200                        0x9005
 
 /* some devices interfaces need special handling due to a number of reasons */
 enum option_blacklist_reason {
@@ -542,6 +563,10 @@ static const struct option_blacklist_info net_intf1_blacklist = {
        .reserved = BIT(1),
 };
 
+static const struct option_blacklist_info net_intf2_blacklist = {
+       .reserved = BIT(2),
+};
+
 static const struct option_blacklist_info net_intf3_blacklist = {
        .reserved = BIT(3),
 };
@@ -666,6 +691,11 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3806, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0xff, 0xff),
                .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
+       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0x01, 0x31) },
+       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0x01, 0x32) },
+       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K5005, 0xff, 0x01, 0x31) },
+       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K5005, 0xff, 0x01, 0x32) },
+       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K5005, 0xff, 0x01, 0x33) },
        { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x31) },
        { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x32) },
        { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3771, 0xff, 0x02, 0x31) },
@@ -722,6 +752,8 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G1) },
        { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G1_M) },
        { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G2) },
+       /* Novatel Ovation MC551 a.k.a. Verizon USB551L */
+       { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC551, 0xff, 0xff, 0xff) },
 
        { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) },
        { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) },
@@ -1080,6 +1112,8 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1298, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1299, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1300, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1402, 0xff, 0xff, 0xff),
+               .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff,
          0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_k3765_z_blacklist },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) },
@@ -1209,6 +1243,11 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU516) },
        { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU528) },
        { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU526) },
+       { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU881) },
+       { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU882) },
+       { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU581) },
+       { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU582) },
+       { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU583) },
        { USB_DEVICE_AND_INTERFACE_INFO(VIETTEL_VENDOR_ID, VIETTEL_PRODUCT_VT1000, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZD_VENDOR_ID, ZD_PRODUCT_7000, 0xff, 0xff, 0xff) },
        { USB_DEVICE(LG_VENDOR_ID, LG_PRODUCT_L02C) }, /* docomo L-02C modem */
@@ -1216,6 +1255,18 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a1, 0xff, 0x02, 0x01) },
        { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a2, 0xff, 0x00, 0x00) },
        { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a2, 0xff, 0x02, 0x01) },        /* MediaTek MT6276M modem & app port */
+       { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_1COM, 0x0a, 0x00, 0x00) },
+       { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_5COM, 0xff, 0x02, 0x01) },
+       { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_5COM, 0xff, 0x00, 0x00) },
+       { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM, 0xff, 0x02, 0x01) },
+       { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM, 0xff, 0x00, 0x00) },
+       { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_7208_1COM, 0x02, 0x00, 0x00) },
+       { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_7208_2COM, 0x02, 0x02, 0x01) },
+       { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FP_1COM, 0x0a, 0x00, 0x00) },
+       { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FP_2COM, 0x0a, 0x00, 0x00) },
+       { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FPDC_1COM, 0x0a, 0x00, 0x00) },
+       { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FPDC_2COM, 0x0a, 0x00, 0x00) },
+       { USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) },
        { } /* Terminating entry */
 };
 MODULE_DEVICE_TABLE(usb, option_ids);
@@ -1245,7 +1296,7 @@ static struct usb_serial_driver option_1port_device = {
        .ioctl             = usb_wwan_ioctl,
        .attach            = usb_wwan_startup,
        .disconnect        = usb_wwan_disconnect,
-       .release           = usb_wwan_release,
+       .release           = option_release,
        .read_int_callback = option_instat_callback,
 #ifdef CONFIG_PM
        .suspend           = usb_wwan_suspend,
@@ -1259,35 +1310,6 @@ static struct usb_serial_driver * const serial_drivers[] = {
 
 static bool debug;
 
-/* per port private data */
-
-#define N_IN_URB 4
-#define N_OUT_URB 4
-#define IN_BUFLEN 4096
-#define OUT_BUFLEN 4096
-
-struct option_port_private {
-       /* Input endpoints and buffer for this port */
-       struct urb *in_urbs[N_IN_URB];
-       u8 *in_buffer[N_IN_URB];
-       /* Output endpoints and buffer for this port */
-       struct urb *out_urbs[N_OUT_URB];
-       u8 *out_buffer[N_OUT_URB];
-       unsigned long out_busy;         /* Bit vector of URBs in use */
-       int opened;
-       struct usb_anchor delayed;
-
-       /* Settings for the port */
-       int rts_state;  /* Handshaking pins (outputs) */
-       int dtr_state;
-       int cts_state;  /* Handshaking pins (inputs) */
-       int dsr_state;
-       int dcd_state;
-       int ri_state;
-
-       unsigned long tx_start_time[N_OUT_URB];
-};
-
 module_usb_serial_driver(serial_drivers, option_ids);
 
 static bool is_blacklisted(const u8 ifnum, enum option_blacklist_reason reason,
@@ -1356,12 +1378,22 @@ static int option_probe(struct usb_serial *serial,
        return 0;
 }
 
+static void option_release(struct usb_serial *serial)
+{
+       struct usb_wwan_intf_private *priv = usb_get_serial_data(serial);
+
+       usb_wwan_release(serial);
+
+       kfree(priv);
+}
+
 static void option_instat_callback(struct urb *urb)
 {
        int err;
        int status = urb->status;
        struct usb_serial_port *port =  urb->context;
-       struct option_port_private *portdata = usb_get_serial_port_data(port);
+       struct usb_wwan_port_private *portdata =
+                                       usb_get_serial_port_data(port);
 
        dbg("%s: urb %p port %p has data %p", __func__, urb, port, portdata);
 
@@ -1421,7 +1453,7 @@ static int option_send_setup(struct usb_serial_port *port)
        struct usb_serial *serial = port->serial;
        struct usb_wwan_intf_private *intfdata =
                (struct usb_wwan_intf_private *) serial->private;
-       struct option_port_private *portdata;
+       struct usb_wwan_port_private *portdata;
        int ifNum = serial->interface->cur_altsetting->desc.bInterfaceNumber;
        int val = 0;
 
index 0d5fe59ebb9e980c9b2180599839f0e71ed0d09b..996015c5f1acd8e9768586bf0c81a68b886f4c07 100644 (file)
@@ -105,7 +105,13 @@ static const struct usb_device_id id_table[] = {
        {USB_DEVICE(0x1410, 0xa021)},   /* Novatel Gobi 3000 Composite */
        {USB_DEVICE(0x413c, 0x8193)},   /* Dell Gobi 3000 QDL */
        {USB_DEVICE(0x413c, 0x8194)},   /* Dell Gobi 3000 Composite */
+       {USB_DEVICE(0x1199, 0x9010)},   /* Sierra Wireless Gobi 3000 QDL */
+       {USB_DEVICE(0x1199, 0x9012)},   /* Sierra Wireless Gobi 3000 QDL */
        {USB_DEVICE(0x1199, 0x9013)},   /* Sierra Wireless Gobi 3000 Modem device (MC8355) */
+       {USB_DEVICE(0x1199, 0x9014)},   /* Sierra Wireless Gobi 3000 QDL */
+       {USB_DEVICE(0x1199, 0x9015)},   /* Sierra Wireless Gobi 3000 Modem device */
+       {USB_DEVICE(0x1199, 0x9018)},   /* Sierra Wireless Gobi 3000 QDL */
+       {USB_DEVICE(0x1199, 0x9019)},   /* Sierra Wireless Gobi 3000 Modem device */
        {USB_DEVICE(0x12D1, 0x14F0)},   /* Sony Gobi 3000 QDL */
        {USB_DEVICE(0x12D1, 0x14F1)},   /* Sony Gobi 3000 Composite */
        { }                             /* Terminating entry */
index ba54a0a8235c08f71319098cd77ba62833f00301..d423d36acc043f9084951dfb0c9e4b3292f222f7 100644 (file)
@@ -294,6 +294,10 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x1199, 0x68A3),   /* Sierra Wireless Direct IP modems */
          .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
        },
+       /* AT&T Direct IP LTE modems */
+       { USB_DEVICE_AND_INTERFACE_INFO(0x0F3D, 0x68AA, 0xFF, 0xFF, 0xFF),
+         .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
+       },
        { USB_DEVICE(0x0f3d, 0x68A3),   /* Airprime/Sierra Wireless Direct IP modems */
          .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
        },
index 6a1b609a0d94cff247460de6716a71ecfadbe707..27483f91a4a38a41bfdd3f2a5c9583be33ac8027 100644 (file)
@@ -659,12 +659,14 @@ exit:
 static struct usb_serial_driver *search_serial_device(
                                        struct usb_interface *iface)
 {
-       const struct usb_device_id *id;
+       const struct usb_device_id *id = NULL;
        struct usb_serial_driver *drv;
+       struct usb_driver *driver = to_usb_driver(iface->dev.driver);
 
        /* Check if the usb id matches a known device */
        list_for_each_entry(drv, &usb_serial_driver_list, driver_list) {
-               id = get_iface_id(drv, iface);
+               if (drv->usb_driver == driver)
+                       id = get_iface_id(drv, iface);
                if (id)
                        return drv;
        }
@@ -755,7 +757,7 @@ static int usb_serial_probe(struct usb_interface *interface,
 
                if (retval) {
                        dbg("sub driver rejected device");
-                       kfree(serial);
+                       usb_serial_put(serial);
                        module_put(type->driver.owner);
                        return retval;
                }
@@ -827,7 +829,7 @@ static int usb_serial_probe(struct usb_interface *interface,
                 */
                if (num_bulk_in == 0 || num_bulk_out == 0) {
                        dev_info(&interface->dev, "PL-2303 hack: descriptors matched but endpoints did not\n");
-                       kfree(serial);
+                       usb_serial_put(serial);
                        module_put(type->driver.owner);
                        return -ENODEV;
                }
@@ -841,7 +843,7 @@ static int usb_serial_probe(struct usb_interface *interface,
                if (num_ports == 0) {
                        dev_err(&interface->dev,
                            "Generic device with no bulk out, not allowed.\n");
-                       kfree(serial);
+                       usb_serial_put(serial);
                        module_put(type->driver.owner);
                        return -EIO;
                }
index a324a5d21e99e5ec7d7cba790e804ce003c064c9..11418da9bc0927492b820383ca433f309897e2fe 100644 (file)
@@ -202,6 +202,12 @@ static int slave_configure(struct scsi_device *sdev)
                if (us->fflags & US_FL_NO_READ_CAPACITY_16)
                        sdev->no_read_capacity_16 = 1;
 
+               /*
+                * Many devices do not respond properly to READ_CAPACITY_16.
+                * Tell the SCSI layer to try READ_CAPACITY_10 first.
+                */
+               sdev->try_rc_10_first = 1;
+
                /* assume SPC3 or latter devices support sense size > 18 */
                if (sdev->scsi_level > SCSI_SPC_2)
                        us->fflags |= US_FL_SANE_SENSE;
index 94dbd25caa303c44c4c972230fcb2752da619fc0..112156f68afb2117875087447d01222d74b33ba7 100644 (file)
@@ -191,7 +191,9 @@ static int vhost_worker(void *data)
        struct vhost_dev *dev = data;
        struct vhost_work *work = NULL;
        unsigned uninitialized_var(seq);
+       mm_segment_t oldfs = get_fs();
 
+       set_fs(USER_DS);
        use_mm(dev->mm);
 
        for (;;) {
@@ -229,6 +231,7 @@ static int vhost_worker(void *data)
 
        }
        unuse_mm(dev->mm);
+       set_fs(oldfs);
        return 0;
 }
 
index fa2b03750316b175f36f72d6f7cf8cf8a69bc375..2979292650d6494a5fddef3f3835da5931f37ec0 100644 (file)
@@ -88,7 +88,7 @@ config LCD_PLATFORM
 
 config LCD_TOSA
        tristate "Sharp SL-6000 LCD Driver"
-       depends on SPI && MACH_TOSA
+       depends on I2C && SPI && MACH_TOSA
        help
          If you have an Sharp SL-6000 Zaurus say Y to enable a driver
          for its LCD.
index 6c9399341bcf4aefcac58ffacae39f6b83ac5247..9327cd1b3143a93962ef5595895a43f655439ee5 100644 (file)
@@ -263,7 +263,7 @@ int __devinit ili9320_probe_spi(struct spi_device *spi,
 
 EXPORT_SYMBOL_GPL(ili9320_probe_spi);
 
-int __devexit ili9320_remove(struct ili9320 *ili)
+int ili9320_remove(struct ili9320 *ili)
 {
        ili9320_power(ili, FB_BLANK_POWERDOWN);
 
index 33ea874c87d236e34691a386b0fb8ea4cdd6c607..9bdd4b0c18c8e393994fe542babdbedc22bb6070 100644 (file)
@@ -353,18 +353,16 @@ adv7393_read_proc(char *page, char **start, off_t off,
 
 static int
 adv7393_write_proc(struct file *file, const char __user * buffer,
-                  unsigned long count, void *data)
+                  size_t count, void *data)
 {
        struct adv7393fb_device *fbdev = data;
-       char line[8];
        unsigned int val;
        int ret;
 
-       ret = copy_from_user(line, buffer, count);
+       ret = kstrtouint_from_user(buffer, count, 0, &val);
        if (ret)
                return -EFAULT;
 
-       val = simple_strtoul(line, NULL, 0);
        adv7393_write(fbdev->client, val >> 8, val & 0xff);
 
        return count;
index 377dde3d5bfc8954aaccfc643b9664408e0c9da1..c95b417d0d41ae037cbcb4a01ff01c575e1b0aab 100644 (file)
@@ -1211,7 +1211,7 @@ static int __devexit broadsheetfb_remove(struct platform_device *dev)
 
 static struct platform_driver broadsheetfb_driver = {
        .probe  = broadsheetfb_probe,
-       .remove = broadsheetfb_remove,
+       .remove = __devexit_p(broadsheetfb_remove),
        .driver = {
                .owner  = THIS_MODULE,
                .name   = "broadsheetfb",
index c2d11fef114b0ae7bc64045fea9f1e8cf58af6bf..e2c96d01d8f5dc8cc1a954e8cbfba69fdfd422c0 100644 (file)
@@ -224,5 +224,19 @@ config FONT_10x18
          big letters. It fits between the sun 12x22 and the normal 8x16 font.
          If other fonts are too big or too small for you, say Y, otherwise say N.
 
+config FONT_AUTOSELECT
+       def_bool y
+       depends on FRAMEBUFFER_CONSOLE || SGI_NEWPORT_CONSOLE || STI_CONSOLE || USB_SISUSBVGA_CON
+       depends on !FONT_8x8
+       depends on !FONT_6x11
+       depends on !FONT_7x14
+       depends on !FONT_PEARL_8x8
+       depends on !FONT_ACORN_8x8
+       depends on !FONT_MINI_4x6
+       depends on !FONT_SUN8x16
+       depends on !FONT_SUN12x22
+       depends on !FONT_10x18
+       select FONT_8x16
+
 endmenu
 
index ab0a8e527333b9f68edd884bfdc91067f1b214ad..85e4f44bfa61df1e5ab463b326d881bac37c544b 100644 (file)
@@ -1045,7 +1045,7 @@ static int __devexit mbxfb_remove(struct platform_device *dev)
 
 static struct platform_driver mbxfb_driver = {
        .probe = mbxfb_probe,
-       .remove = mbxfb_remove,
+       .remove = __devexit_p(mbxfb_remove),
        .suspend = mbxfb_suspend,
        .resume = mbxfb_resume,
        .driver = {
index 2ce9992f403b838efababfbc9a7ec7e65a13e533..901576eb5a8425995e57f680715c649e68dc0a54 100644 (file)
@@ -526,7 +526,7 @@ static ssize_t taal_num_errors_show(struct device *dev,
 {
        struct omap_dss_device *dssdev = to_dss_device(dev);
        struct taal_data *td = dev_get_drvdata(&dssdev->dev);
-       u8 errors;
+       u8 errors = 0;
        int r;
 
        mutex_lock(&td->lock);
index 72ded9cd2cb0fa0e0540eeb19721ae8a602d72ef..58bd9c27369df9d4a39d1352f9d9cad74ec51b37 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/io.h>
 #include <linux/device.h>
 #include <linux/regulator/consumer.h>
+#include <linux/suspend.h>
 
 #include <video/omapdss.h>
 
@@ -194,14 +195,35 @@ static inline int dss_initialize_debugfs(void)
 static inline void dss_uninitialize_debugfs(void)
 {
 }
-static inline int dss_debugfs_create_file(const char *name,
-               void (*write)(struct seq_file *))
+int dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *))
 {
        return 0;
 }
 #endif /* CONFIG_DEBUG_FS && CONFIG_OMAP2_DSS_DEBUG_SUPPORT */
 
 /* PLATFORM DEVICE */
+static int omap_dss_pm_notif(struct notifier_block *b, unsigned long v, void *d)
+{
+       DSSDBG("pm notif %lu\n", v);
+
+       switch (v) {
+       case PM_SUSPEND_PREPARE:
+               DSSDBG("suspending displays\n");
+               return dss_suspend_all_devices();
+
+       case PM_POST_SUSPEND:
+               DSSDBG("resuming displays\n");
+               return dss_resume_all_devices();
+
+       default:
+               return 0;
+       }
+}
+
+static struct notifier_block omap_dss_pm_notif_block = {
+       .notifier_call = omap_dss_pm_notif,
+};
+
 static int __init omap_dss_probe(struct platform_device *pdev)
 {
        struct omap_dss_board_info *pdata = pdev->dev.platform_data;
@@ -225,6 +247,8 @@ static int __init omap_dss_probe(struct platform_device *pdev)
        else if (pdata->default_device)
                core.default_display_name = pdata->default_device->name;
 
+       register_pm_notifier(&omap_dss_pm_notif_block);
+
        return 0;
 
 err_debugfs:
@@ -234,6 +258,8 @@ err_debugfs:
 
 static int omap_dss_remove(struct platform_device *pdev)
 {
+       unregister_pm_notifier(&omap_dss_pm_notif_block);
+
        dss_uninitialize_debugfs();
 
        dss_uninit_overlays(pdev);
@@ -248,25 +274,9 @@ static void omap_dss_shutdown(struct platform_device *pdev)
        dss_disable_all_devices();
 }
 
-static int omap_dss_suspend(struct platform_device *pdev, pm_message_t state)
-{
-       DSSDBG("suspend %d\n", state.event);
-
-       return dss_suspend_all_devices();
-}
-
-static int omap_dss_resume(struct platform_device *pdev)
-{
-       DSSDBG("resume\n");
-
-       return dss_resume_all_devices();
-}
-
 static struct platform_driver omap_dss_driver = {
        .remove         = omap_dss_remove,
        .shutdown       = omap_dss_shutdown,
-       .suspend        = omap_dss_suspend,
-       .resume         = omap_dss_resume,
        .driver         = {
                .name   = "omapdss",
                .owner  = THIS_MODULE,
index 4749ac356469ea8645b3e92c1d72512a102f8e36..397d4eee11bb7715d9d01e3b8a1330abcdc2f2e4 100644 (file)
@@ -384,7 +384,7 @@ void dispc_runtime_put(void)
        DSSDBG("dispc_runtime_put\n");
 
        r = pm_runtime_put_sync(&dispc.pdev->dev);
-       WARN_ON(r < 0);
+       WARN_ON(r < 0 && r != -ENOSYS);
 }
 
 static inline bool dispc_mgr_is_lcd(enum omap_channel channel)
index ec363d8390edd53873647fe96dff416f8d9d06f7..14ce8cc079e3d1840e982734b4e2644770ba9b72 100644 (file)
@@ -1075,7 +1075,7 @@ void dsi_runtime_put(struct platform_device *dsidev)
        DSSDBG("dsi_runtime_put\n");
 
        r = pm_runtime_put_sync(&dsi->pdev->dev);
-       WARN_ON(r < 0);
+       WARN_ON(r < 0 && r != -ENOSYS);
 }
 
 /* source clock for DSI PLL. this could also be PCLKFREE */
@@ -3724,7 +3724,7 @@ static int dsi_compute_interleave_lp(int blank, int enter_hs, int exit_hs,
        /* CLKIN4DDR = 16 * TXBYTECLKHS */
        tlp_avail = thsbyte_clk * (blank - trans_lp);
 
-       ttxclkesc = tdsi_fclk / lp_clk_div;
+       ttxclkesc = tdsi_fclk * lp_clk_div;
 
        lp_inter = ((tlp_avail - 8 * thsbyte_clk - 5 * tdsi_fclk) / ttxclkesc -
                        26) / 16;
index 6ea1ff149f6f08cf3311dc3c73341fca59372063..d2b57197b292086cd95a32ef1b33136c0e216166 100644 (file)
@@ -731,7 +731,7 @@ static void dss_runtime_put(void)
        DSSDBG("dss_runtime_put\n");
 
        r = pm_runtime_put_sync(&dss.pdev->dev);
-       WARN_ON(r < 0);
+       WARN_ON(r < 0 && r != -ENOSYS && r != -EBUSY);
 }
 
 /* DEBUGFS */
index 8195c7166d200c8e575aba54f5e4f46c335df002..26a2430a70288d6cf468357d854fe3429b245f72 100644 (file)
@@ -138,7 +138,7 @@ static void hdmi_runtime_put(void)
        DSSDBG("hdmi_runtime_put\n");
 
        r = pm_runtime_put_sync(&hdmi.pdev->dev);
-       WARN_ON(r < 0);
+       WARN_ON(r < 0 && r != -ENOSYS);
 }
 
 static int __init hdmi_init_display(struct omap_dss_device *dssdev)
index 3d8c206e90e5d93631a0dc493f9ab180fbfa181e..7985fa12b9b46c8c3f6da328f578a76b6267a66e 100644 (file)
@@ -141,7 +141,7 @@ static void rfbi_runtime_put(void)
        DSSDBG("rfbi_runtime_put\n");
 
        r = pm_runtime_put_sync(&rfbi.pdev->dev);
-       WARN_ON(r < 0);
+       WARN_ON(r < 0 && r != -ENOSYS);
 }
 
 void rfbi_bus_lock(void)
index 2b8973931ff48e845cd9a1386b7b437b369b24e4..3907c8b6ecbca991e3cc9313c1a1473c247cb216 100644 (file)
@@ -402,7 +402,7 @@ static void venc_runtime_put(void)
        DSSDBG("venc_runtime_put\n");
 
        r = pm_runtime_put_sync(&venc.pdev->dev);
-       WARN_ON(r < 0);
+       WARN_ON(r < 0 && r != -ENOSYS);
 }
 
 static const struct venc_config *venc_timings_to_config(
index 5f9d8e69029ee2e8ab3be25b8c1bd348c8cf49fb..ea7b661e7229039c29770c50414f4846e6865192 100644 (file)
@@ -361,7 +361,7 @@ static int s3c_fb_calc_pixclk(struct s3c_fb *sfb, unsigned int pixclk)
        result = (unsigned int)tmp / 1000;
 
        dev_dbg(sfb->dev, "pixclk=%u, clk=%lu, div=%d (%lu)\n",
-               pixclk, clk, result, clk / result);
+               pixclk, clk, result, result ? clk / result : clk);
 
        return result;
 }
@@ -1348,8 +1348,14 @@ static void s3c_fb_clear_win(struct s3c_fb *sfb, int win)
        writel(0, regs + VIDOSD_A(win, sfb->variant));
        writel(0, regs + VIDOSD_B(win, sfb->variant));
        writel(0, regs + VIDOSD_C(win, sfb->variant));
-       reg = readl(regs + SHADOWCON);
-       writel(reg & ~SHADOWCON_WINx_PROTECT(win), regs + SHADOWCON);
+
+       if (sfb->variant.has_shadowcon) {
+               reg = readl(sfb->regs + SHADOWCON);
+               reg &= ~(SHADOWCON_WINx_PROTECT(win) |
+                       SHADOWCON_CHx_ENABLE(win) |
+                       SHADOWCON_CHx_LOCAL_ENABLE(win));
+               writel(reg, sfb->regs + SHADOWCON);
+       }
 }
 
 static int __devinit s3c_fb_probe(struct platform_device *pdev)
index cee7803a0a1c74fe09c1129f5edc927e58dfe86f..f3d3b9ce4751315afbfe15d6fc9c4b0b5a07ae3d 100644 (file)
@@ -1351,7 +1351,7 @@ static void savagefb_set_par_int(struct savagefb_par  *par, struct savage_reg *r
        /* following part not present in X11 driver */
        cr67 = vga_in8(0x3d5, par) & 0xf;
        vga_out8(0x3d5, 0x50 | cr67, par);
-       udelay(10000);
+       mdelay(10);
        vga_out8(0x3d4, 0x67, par);
        /* end of part */
        vga_out8(0x3d5, reg->CR67 & ~0x0c, par);
@@ -1904,11 +1904,11 @@ static int savage_init_hw(struct savagefb_par *par)
        vga_out8(0x3d4, 0x66, par);
        cr66 = vga_in8(0x3d5, par);
        vga_out8(0x3d5, cr66 | 0x02, par);
-       udelay(10000);
+       mdelay(10);
 
        vga_out8(0x3d4, 0x66, par);
        vga_out8(0x3d5, cr66 & ~0x02, par);     /* clear reset flag */
-       udelay(10000);
+       mdelay(10);
 
 
        /*
@@ -1918,11 +1918,11 @@ static int savage_init_hw(struct savagefb_par *par)
        vga_out8(0x3d4, 0x3f, par);
        cr3f = vga_in8(0x3d5, par);
        vga_out8(0x3d5, cr3f | 0x08, par);
-       udelay(10000);
+       mdelay(10);
 
        vga_out8(0x3d4, 0x3f, par);
        vga_out8(0x3d5, cr3f & ~0x08, par);     /* clear reset flags */
-       udelay(10000);
+       mdelay(10);
 
        /* Savage ramdac speeds */
        par->numClocks = 4;
index bfbc15ca38ddd3a0ac57cf0860624558a35aee47..0908e604433303d605b9a7cd6ab4ffe36087f96f 100644 (file)
@@ -47,7 +47,7 @@ struct virtio_balloon
        struct task_struct *thread;
 
        /* Waiting for host to ack the pages we released. */
-       struct completion acked;
+       wait_queue_head_t acked;
 
        /* Number of balloon pages we've told the Host we're not using. */
        unsigned int num_pages;
@@ -89,29 +89,25 @@ static struct page *balloon_pfn_to_page(u32 pfn)
 
 static void balloon_ack(struct virtqueue *vq)
 {
-       struct virtio_balloon *vb;
-       unsigned int len;
+       struct virtio_balloon *vb = vq->vdev->priv;
 
-       vb = virtqueue_get_buf(vq, &len);
-       if (vb)
-               complete(&vb->acked);
+       wake_up(&vb->acked);
 }
 
 static void tell_host(struct virtio_balloon *vb, struct virtqueue *vq)
 {
        struct scatterlist sg;
+       unsigned int len;
 
        sg_init_one(&sg, vb->pfns, sizeof(vb->pfns[0]) * vb->num_pfns);
 
-       init_completion(&vb->acked);
-
        /* We should always be able to add one buffer to an empty queue. */
        if (virtqueue_add_buf(vq, &sg, 1, 0, vb, GFP_KERNEL) < 0)
                BUG();
        virtqueue_kick(vq);
 
        /* When host has read buffer, this completes via balloon_ack */
-       wait_for_completion(&vb->acked);
+       wait_event(vb->acked, virtqueue_get_buf(vq, &len));
 }
 
 static void set_page_pfns(u32 pfns[], struct page *page)
@@ -231,12 +227,8 @@ static void update_balloon_stats(struct virtio_balloon *vb)
  */
 static void stats_request(struct virtqueue *vq)
 {
-       struct virtio_balloon *vb;
-       unsigned int len;
+       struct virtio_balloon *vb = vq->vdev->priv;
 
-       vb = virtqueue_get_buf(vq, &len);
-       if (!vb)
-               return;
        vb->need_stats_update = 1;
        wake_up(&vb->config_change);
 }
@@ -245,11 +237,14 @@ static void stats_handle_request(struct virtio_balloon *vb)
 {
        struct virtqueue *vq;
        struct scatterlist sg;
+       unsigned int len;
 
        vb->need_stats_update = 0;
        update_balloon_stats(vb);
 
        vq = vb->stats_vq;
+       if (!virtqueue_get_buf(vq, &len))
+               return;
        sg_init_one(&sg, vb->stats, sizeof(vb->stats));
        if (virtqueue_add_buf(vq, &sg, 1, 0, vb, GFP_KERNEL) < 0)
                BUG();
@@ -358,6 +353,7 @@ static int virtballoon_probe(struct virtio_device *vdev)
        INIT_LIST_HEAD(&vb->pages);
        vb->num_pages = 0;
        init_waitqueue_head(&vb->config_change);
+       init_waitqueue_head(&vb->acked);
        vb->vdev = vdev;
        vb->need_stats_update = 0;
 
index 2b763815aeecc90ebad7fdf36f17de949d365bfd..1eff743ec4970071f8451704fb51d4b06de70950 100644 (file)
@@ -146,7 +146,7 @@ struct cmn_registers {
 }  __attribute__((packed));
 
 static unsigned int hpwdt_nmi_decoding;
-static unsigned int allow_kdump;
+static unsigned int allow_kdump = 1;
 static unsigned int is_icru;
 static DEFINE_SPINLOCK(rom_lock);
 static void *cru_rom_addr;
@@ -756,6 +756,8 @@ error:
 static void hpwdt_exit_nmi_decoding(void)
 {
        unregister_nmi_handler(NMI_UNKNOWN, "hpwdt");
+       unregister_nmi_handler(NMI_SERR, "hpwdt");
+       unregister_nmi_handler(NMI_IO_CHECK, "hpwdt");
        if (cru_rom_addr)
                iounmap(cru_rom_addr);
 }
index bc47e9012f370ff43c0e50d33581fe751cc4742f..9c2c27c3b4240b5c2ecc71aa82ab5b40704240b6 100644 (file)
@@ -699,3 +699,4 @@ MODULE_DESCRIPTION("Intel TCO WatchDog Timer Driver");
 MODULE_VERSION(DRV_VERSION);
 MODULE_LICENSE("GPL");
 MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
+MODULE_ALIAS("platform:" DRV_NAME);
index afcd13676542338a4d4f231e0193ad11626f5d1a..e4841c36798bd758d9a037f8d89de467047d88ae 100644 (file)
@@ -4,7 +4,7 @@
  * Watchdog driver for ARM SP805 watchdog module
  *
  * Copyright (C) 2010 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2 or later. This program is licensed "as is" without any
@@ -331,6 +331,6 @@ static struct amba_driver sp805_wdt_driver = {
 
 module_amba_driver(sp805_wdt_driver);
 
-MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
+MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
 MODULE_DESCRIPTION("ARM SP805 Watchdog Driver");
 MODULE_LICENSE("GPL");
index 672d169bf1dacfa4ae0150da6109caeb266d0590..ef8edecfc526cedaa27bb22b23a2729289221f1b 100644 (file)
@@ -349,7 +349,7 @@ static long watchdog_ioctl(struct file *file, unsigned int cmd,
                        sizeof(struct watchdog_info)) ? -EFAULT : 0;
        case WDIOC_GETSTATUS:
                err = watchdog_get_status(wdd, &val);
-               if (err)
+               if (err == -ENODEV)
                        return err;
                return put_user(val, p);
        case WDIOC_GETBOOTSTATUS:
index 6908e4ce2a0d69aa67ca0e11251d834770bbd68c..7595581d032cc9d9c5a7f04ee12b861dc76051ee 100644 (file)
@@ -827,6 +827,9 @@ int bind_evtchn_to_irq(unsigned int evtchn)
                                              handle_edge_irq, "event");
 
                xen_irq_info_evtchn_init(irq, evtchn);
+       } else {
+               struct irq_info *info = info_for_irq(irq);
+               WARN_ON(info == NULL || info->type != IRQT_EVTCHN);
        }
 
 out:
@@ -862,6 +865,9 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
                xen_irq_info_ipi_init(cpu, irq, evtchn, ipi);
 
                bind_evtchn_to_cpu(evtchn, cpu);
+       } else {
+               struct irq_info *info = info_for_irq(irq);
+               WARN_ON(info == NULL || info->type != IRQT_IPI);
        }
 
  out:
@@ -939,6 +945,9 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
                xen_irq_info_virq_init(cpu, irq, evtchn, virq);
 
                bind_evtchn_to_cpu(evtchn, cpu);
+       } else {
+               struct irq_info *info = info_for_irq(irq);
+               WARN_ON(info == NULL || info->type != IRQT_VIRQ);
        }
 
 out:
index b84bf0b6cc34c4fd34bfd35015bd8057f0ddf6b9..18fff88254ebd9bd9e4c76b1df63ecf40dc4e139 100644 (file)
@@ -59,7 +59,7 @@ static int xen_add_device(struct device *dev)
 
 #ifdef CONFIG_ACPI
                handle = DEVICE_ACPI_HANDLE(&pci_dev->dev);
-               if (!handle)
+               if (!handle && pci_dev->bus->bridge)
                        handle = DEVICE_ACPI_HANDLE(pci_dev->bus->bridge);
 #ifdef CONFIG_PCI_IOV
                if (!handle && pci_dev->is_virtfn)
index dcb79521e6c8c256769c75295f90fb3444a8eaec..89f264c67420c2448f9fe029e8193fa438369ae8 100644 (file)
@@ -269,7 +269,7 @@ static inline struct tmem_oid oswiz(unsigned type, u32 ind)
 }
 
 /* returns 0 if the page was successfully put into frontswap, -1 if not */
-static int tmem_frontswap_put_page(unsigned type, pgoff_t offset,
+static int tmem_frontswap_store(unsigned type, pgoff_t offset,
                                   struct page *page)
 {
        u64 ind64 = (u64)offset;
@@ -295,7 +295,7 @@ static int tmem_frontswap_put_page(unsigned type, pgoff_t offset,
  * returns 0 if the page was successfully gotten from frontswap, -1 if
  * was not present (should never happen!)
  */
-static int tmem_frontswap_get_page(unsigned type, pgoff_t offset,
+static int tmem_frontswap_load(unsigned type, pgoff_t offset,
                                   struct page *page)
 {
        u64 ind64 = (u64)offset;
@@ -362,8 +362,8 @@ static int __init no_frontswap(char *s)
 __setup("nofrontswap", no_frontswap);
 
 static struct frontswap_ops __initdata tmem_frontswap_ops = {
-       .put_page = tmem_frontswap_put_page,
-       .get_page = tmem_frontswap_get_page,
+       .store = tmem_frontswap_store,
+       .load = tmem_frontswap_load,
        .invalidate_page = tmem_frontswap_flush_page,
        .invalidate_area = tmem_frontswap_flush_area,
        .init = tmem_frontswap_init
index 3f75895c919bcc3b80ae63ab1fca42dcf335f95b..a383c18e74e86eebaa847d756e3493e7ca3c9bfd 100644 (file)
@@ -179,60 +179,74 @@ static int __add_prelim_ref(struct list_head *head, u64 root_id,
 
 static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
                                struct ulist *parents, int level,
-                               struct btrfs_key *key, u64 wanted_disk_byte,
+                               struct btrfs_key *key_for_search, u64 time_seq,
+                               u64 wanted_disk_byte,
                                const u64 *extent_item_pos)
 {
-       int ret;
-       int slot = path->slots[level];
-       struct extent_buffer *eb = path->nodes[level];
+       int ret = 0;
+       int slot;
+       struct extent_buffer *eb;
+       struct btrfs_key key;
        struct btrfs_file_extent_item *fi;
        struct extent_inode_elem *eie = NULL;
        u64 disk_byte;
-       u64 wanted_objectid = key->objectid;
 
-add_parent:
-       if (level == 0 && extent_item_pos) {
-               fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
-               ret = check_extent_in_eb(key, eb, fi, *extent_item_pos, &eie);
+       if (level != 0) {
+               eb = path->nodes[level];
+               ret = ulist_add(parents, eb->start, 0, GFP_NOFS);
                if (ret < 0)
                        return ret;
-       }
-       ret = ulist_add(parents, eb->start, (unsigned long)eie, GFP_NOFS);
-       if (ret < 0)
-               return ret;
-
-       if (level != 0)
                return 0;
+       }
 
        /*
-        * if the current leaf is full with EXTENT_DATA items, we must
-        * check the next one if that holds a reference as well.
-        * ref->count cannot be used to skip this check.
-        * repeat this until we don't find any additional EXTENT_DATA items.
+        * We normally enter this function with the path already pointing to
+        * the first item to check. But sometimes, we may enter it with
+        * slot==nritems. In that case, go to the next leaf before we continue.
         */
-       while (1) {
-               eie = NULL;
-               ret = btrfs_next_leaf(root, path);
-               if (ret < 0)
-                       return ret;
-               if (ret)
-                       return 0;
+       if (path->slots[0] >= btrfs_header_nritems(path->nodes[0]))
+               ret = btrfs_next_old_leaf(root, path, time_seq);
 
+       while (!ret) {
                eb = path->nodes[0];
-               for (slot = 0; slot < btrfs_header_nritems(eb); ++slot) {
-                       btrfs_item_key_to_cpu(eb, key, slot);
-                       if (key->objectid != wanted_objectid ||
-                           key->type != BTRFS_EXTENT_DATA_KEY)
-                               return 0;
-                       fi = btrfs_item_ptr(eb, slot,
-                                               struct btrfs_file_extent_item);
-                       disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
-                       if (disk_byte == wanted_disk_byte)
-                               goto add_parent;
+               slot = path->slots[0];
+
+               btrfs_item_key_to_cpu(eb, &key, slot);
+
+               if (key.objectid != key_for_search->objectid ||
+                   key.type != BTRFS_EXTENT_DATA_KEY)
+                       break;
+
+               fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
+               disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
+
+               if (disk_byte == wanted_disk_byte) {
+                       eie = NULL;
+                       if (extent_item_pos) {
+                               ret = check_extent_in_eb(&key, eb, fi,
+                                               *extent_item_pos,
+                                               &eie);
+                               if (ret < 0)
+                                       break;
+                       }
+                       if (!ret) {
+                               ret = ulist_add(parents, eb->start,
+                                               (unsigned long)eie, GFP_NOFS);
+                               if (ret < 0)
+                                       break;
+                               if (!extent_item_pos) {
+                                       ret = btrfs_next_old_leaf(root, path,
+                                                       time_seq);
+                                       continue;
+                               }
+                       }
                }
+               ret = btrfs_next_old_item(root, path, time_seq);
        }
 
-       return 0;
+       if (ret > 0)
+               ret = 0;
+       return ret;
 }
 
 /*
@@ -249,7 +263,6 @@ static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info,
        struct btrfs_path *path;
        struct btrfs_root *root;
        struct btrfs_key root_key;
-       struct btrfs_key key = {0};
        struct extent_buffer *eb;
        int ret = 0;
        int root_level;
@@ -288,25 +301,19 @@ static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info,
                goto out;
 
        eb = path->nodes[level];
-       if (!eb) {
-               WARN_ON(1);
-               ret = 1;
-               goto out;
-       }
-
-       if (level == 0) {
-               if (ret == 1 && path->slots[0] >= btrfs_header_nritems(eb)) {
-                       ret = btrfs_next_leaf(root, path);
-                       if (ret)
-                               goto out;
-                       eb = path->nodes[0];
+       while (!eb) {
+               if (!level) {
+                       WARN_ON(1);
+                       ret = 1;
+                       goto out;
                }
-
-               btrfs_item_key_to_cpu(eb, &key, path->slots[0]);
+               level--;
+               eb = path->nodes[level];
        }
 
-       ret = add_all_parents(root, path, parents, level, &key,
-                               ref->wanted_disk_byte, extent_item_pos);
+       ret = add_all_parents(root, path, parents, level, &ref->key_for_search,
+                               time_seq, ref->wanted_disk_byte,
+                               extent_item_pos);
 out:
        btrfs_free_path(path);
        return ret;
@@ -832,6 +839,7 @@ again:
                        }
                        ret = __add_delayed_refs(head, delayed_ref_seq,
                                                 &prefs_delayed);
+                       mutex_unlock(&head->mutex);
                        if (ret) {
                                spin_unlock(&delayed_refs->lock);
                                goto out;
@@ -925,8 +933,6 @@ again:
        }
 
 out:
-       if (head)
-               mutex_unlock(&head->mutex);
        btrfs_free_path(path);
        while (!list_empty(&prefs)) {
                ref = list_first_entry(&prefs, struct __prelim_ref, list);
index e616f8872e69bb0cf3b9a3f369ba49eeca57f2de..12394a90d60fb7115d5b8feee0e7df701f9e743d 100644 (file)
@@ -37,6 +37,7 @@
 #define BTRFS_INODE_IN_DEFRAG                  3
 #define BTRFS_INODE_DELALLOC_META_RESERVED     4
 #define BTRFS_INODE_HAS_ORPHAN_ITEM            5
+#define BTRFS_INODE_HAS_ASYNC_EXTENT           6
 
 /* in memory btrfs inode */
 struct btrfs_inode {
index 9cebb1fd6a3cc59919c7c990d3016caee52b5849..da6e9364a5e3caa48b67c5e17f95a21ded5ee9ec 100644 (file)
@@ -93,6 +93,7 @@
 #include "print-tree.h"
 #include "locking.h"
 #include "check-integrity.h"
+#include "rcu-string.h"
 
 #define BTRFSIC_BLOCK_HASHTABLE_SIZE 0x10000
 #define BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE 0x10000
@@ -843,13 +844,14 @@ static int btrfsic_process_superblock_dev_mirror(
                superblock_tmp->never_written = 0;
                superblock_tmp->mirror_num = 1 + superblock_mirror_num;
                if (state->print_mask & BTRFSIC_PRINT_MASK_SUPERBLOCK_WRITE)
-                       printk(KERN_INFO "New initial S-block (bdev %p, %s)"
-                              " @%llu (%s/%llu/%d)\n",
-                              superblock_bdev, device->name,
-                              (unsigned long long)dev_bytenr,
-                              dev_state->name,
-                              (unsigned long long)dev_bytenr,
-                              superblock_mirror_num);
+                       printk_in_rcu(KERN_INFO "New initial S-block (bdev %p, %s)"
+                                    " @%llu (%s/%llu/%d)\n",
+                                    superblock_bdev,
+                                    rcu_str_deref(device->name),
+                                    (unsigned long long)dev_bytenr,
+                                    dev_state->name,
+                                    (unsigned long long)dev_bytenr,
+                                    superblock_mirror_num);
                list_add(&superblock_tmp->all_blocks_node,
                         &state->all_blocks_list);
                btrfsic_block_hashtable_add(superblock_tmp,
index d7a96cfdc50ae6a2d8afef1dad7ca3642248bbb8..8206b3900587efa23570b5b8f7f8071ab6e73156 100644 (file)
@@ -467,6 +467,15 @@ static inline int tree_mod_dont_log(struct btrfs_fs_info *fs_info,
        return 0;
 }
 
+/*
+ * This allocates memory and gets a tree modification sequence number when
+ * needed.
+ *
+ * Returns 0 when no sequence number is needed, < 0 on error.
+ * Returns 1 when a sequence number was added. In this case,
+ * fs_info->tree_mod_seq_lock was acquired and must be released by the caller
+ * after inserting into the rb tree.
+ */
 static inline int tree_mod_alloc(struct btrfs_fs_info *fs_info, gfp_t flags,
                                 struct tree_mod_elem **tm_ret)
 {
@@ -491,11 +500,11 @@ static inline int tree_mod_alloc(struct btrfs_fs_info *fs_info, gfp_t flags,
                 */
                kfree(tm);
                seq = 0;
+               spin_unlock(&fs_info->tree_mod_seq_lock);
        } else {
                __get_tree_mod_seq(fs_info, &tm->elem);
                seq = tm->elem.seq;
        }
-       spin_unlock(&fs_info->tree_mod_seq_lock);
 
        return seq;
 }
@@ -521,7 +530,9 @@ tree_mod_log_insert_key_mask(struct btrfs_fs_info *fs_info,
        tm->slot = slot;
        tm->generation = btrfs_node_ptr_generation(eb, slot);
 
-       return __tree_mod_log_insert(fs_info, tm);
+       ret = __tree_mod_log_insert(fs_info, tm);
+       spin_unlock(&fs_info->tree_mod_seq_lock);
+       return ret;
 }
 
 static noinline int
@@ -559,7 +570,9 @@ tree_mod_log_insert_move(struct btrfs_fs_info *fs_info,
        tm->move.nr_items = nr_items;
        tm->op = MOD_LOG_MOVE_KEYS;
 
-       return __tree_mod_log_insert(fs_info, tm);
+       ret = __tree_mod_log_insert(fs_info, tm);
+       spin_unlock(&fs_info->tree_mod_seq_lock);
+       return ret;
 }
 
 static noinline int
@@ -580,7 +593,9 @@ tree_mod_log_insert_root(struct btrfs_fs_info *fs_info,
        tm->generation = btrfs_header_generation(old_root);
        tm->op = MOD_LOG_ROOT_REPLACE;
 
-       return __tree_mod_log_insert(fs_info, tm);
+       ret = __tree_mod_log_insert(fs_info, tm);
+       spin_unlock(&fs_info->tree_mod_seq_lock);
+       return ret;
 }
 
 static struct tree_mod_elem *
@@ -1009,11 +1024,18 @@ __tree_mod_log_oldest_root(struct btrfs_fs_info *fs_info,
                if (!looped && !tm)
                        return 0;
                /*
-                * we must have key remove operations in the log before the
-                * replace operation.
+                * if there are no tree operation for the oldest root, we simply
+                * return it. this should only happen if that (old) root is at
+                * level 0.
                 */
-               BUG_ON(!tm);
+               if (!tm)
+                       break;
 
+               /*
+                * if there's an operation that's not a root replacement, we
+                * found the oldest version of our root. normally, we'll find a
+                * MOD_LOG_KEY_REMOVE_WHILE_FREEING operation here.
+                */
                if (tm->op != MOD_LOG_ROOT_REPLACE)
                        break;
 
@@ -1023,6 +1045,10 @@ __tree_mod_log_oldest_root(struct btrfs_fs_info *fs_info,
                looped = 1;
        }
 
+       /* if there's no old root to return, return what we found instead */
+       if (!found)
+               found = tm;
+
        return found;
 }
 
@@ -1068,11 +1094,7 @@ __tree_mod_log_rewind(struct extent_buffer *eb, u64 time_seq,
                                                      tm->generation);
                        break;
                case MOD_LOG_KEY_ADD:
-                       if (tm->slot != n - 1) {
-                               o_dst = btrfs_node_key_ptr_offset(tm->slot);
-                               o_src = btrfs_node_key_ptr_offset(tm->slot + 1);
-                               memmove_extent_buffer(eb, o_dst, o_src, p_size);
-                       }
+                       /* if a move operation is needed it's in the log */
                        n--;
                        break;
                case MOD_LOG_MOVE_KEYS:
@@ -1143,45 +1165,57 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
        return eb_rewin;
 }
 
+/*
+ * get_old_root() rewinds the state of @root's root node to the given @time_seq
+ * value. If there are no changes, the current root->root_node is returned. If
+ * anything changed in between, there's a fresh buffer allocated on which the
+ * rewind operations are done. In any case, the returned buffer is read locked.
+ * Returns NULL on error (with no locks held).
+ */
 static inline struct extent_buffer *
 get_old_root(struct btrfs_root *root, u64 time_seq)
 {
        struct tree_mod_elem *tm;
        struct extent_buffer *eb;
-       struct tree_mod_root *old_root;
-       u64 old_generation;
+       struct tree_mod_root *old_root = NULL;
+       u64 old_generation = 0;
+       u64 logical;
 
+       eb = btrfs_read_lock_root_node(root);
        tm = __tree_mod_log_oldest_root(root->fs_info, root, time_seq);
        if (!tm)
                return root->node;
 
-       old_root = &tm->old_root;
-       old_generation = tm->generation;
-
-       tm = tree_mod_log_search(root->fs_info, old_root->logical, time_seq);
-       /*
-        * there was an item in the log when __tree_mod_log_oldest_root
-        * returned. this one must not go away, because the time_seq passed to
-        * us must be blocking its removal.
-        */
-       BUG_ON(!tm);
+       if (tm->op == MOD_LOG_ROOT_REPLACE) {
+               old_root = &tm->old_root;
+               old_generation = tm->generation;
+               logical = old_root->logical;
+       } else {
+               logical = root->node->start;
+       }
 
-       if (old_root->logical == root->node->start) {
-               /* there are logged operations for the current root */
+       tm = tree_mod_log_search(root->fs_info, logical, time_seq);
+       if (old_root)
+               eb = alloc_dummy_extent_buffer(logical, root->nodesize);
+       else
                eb = btrfs_clone_extent_buffer(root->node);
-       } else {
-               /* there's a root replace operation for the current root */
-               eb = alloc_dummy_extent_buffer(tm->index << PAGE_CACHE_SHIFT,
-                                              root->nodesize);
+       btrfs_tree_read_unlock(root->node);
+       free_extent_buffer(root->node);
+       if (!eb)
+               return NULL;
+       btrfs_tree_read_lock(eb);
+       if (old_root) {
                btrfs_set_header_bytenr(eb, eb->start);
                btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV);
                btrfs_set_header_owner(eb, root->root_key.objectid);
+               btrfs_set_header_level(eb, old_root->level);
+               btrfs_set_header_generation(eb, old_generation);
        }
-       if (!eb)
-               return NULL;
-       btrfs_set_header_level(eb, old_root->level);
-       btrfs_set_header_generation(eb, old_generation);
-       __tree_mod_log_rewind(eb, time_seq, tm);
+       if (tm)
+               __tree_mod_log_rewind(eb, time_seq, tm);
+       else
+               WARN_ON(btrfs_header_level(eb) != 0);
+       extent_buffer_get(eb);
 
        return eb;
 }
@@ -1650,8 +1684,6 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
            BTRFS_NODEPTRS_PER_BLOCK(root) / 4)
                return 0;
 
-       btrfs_header_nritems(mid);
-
        left = read_node_slot(root, parent, pslot - 1);
        if (left) {
                btrfs_tree_lock(left);
@@ -1681,7 +1713,6 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
                wret = push_node_left(trans, root, left, mid, 1);
                if (wret < 0)
                        ret = wret;
-               btrfs_header_nritems(mid);
        }
 
        /*
@@ -2615,9 +2646,7 @@ int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key,
 
 again:
        b = get_old_root(root, time_seq);
-       extent_buffer_get(b);
        level = btrfs_header_level(b);
-       btrfs_tree_read_lock(b);
        p->locks[level] = BTRFS_READ_LOCK;
 
        while (b) {
@@ -2964,7 +2993,7 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
 static void insert_ptr(struct btrfs_trans_handle *trans,
                       struct btrfs_root *root, struct btrfs_path *path,
                       struct btrfs_disk_key *key, u64 bytenr,
-                      int slot, int level, int tree_mod_log)
+                      int slot, int level)
 {
        struct extent_buffer *lower;
        int nritems;
@@ -2977,7 +3006,7 @@ static void insert_ptr(struct btrfs_trans_handle *trans,
        BUG_ON(slot > nritems);
        BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(root));
        if (slot != nritems) {
-               if (tree_mod_log && level)
+               if (level)
                        tree_mod_log_eb_move(root->fs_info, lower, slot + 1,
                                             slot, nritems - slot);
                memmove_extent_buffer(lower,
@@ -2985,7 +3014,7 @@ static void insert_ptr(struct btrfs_trans_handle *trans,
                              btrfs_node_key_ptr_offset(slot),
                              (nritems - slot) * sizeof(struct btrfs_key_ptr));
        }
-       if (tree_mod_log && level) {
+       if (level) {
                ret = tree_mod_log_insert_key(root->fs_info, lower, slot,
                                              MOD_LOG_KEY_ADD);
                BUG_ON(ret < 0);
@@ -3073,7 +3102,7 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
        btrfs_mark_buffer_dirty(split);
 
        insert_ptr(trans, root, path, &disk_key, split->start,
-                  path->slots[level + 1] + 1, level + 1, 1);
+                  path->slots[level + 1] + 1, level + 1);
 
        if (path->slots[level] >= mid) {
                path->slots[level] -= mid;
@@ -3610,7 +3639,7 @@ static noinline void copy_for_split(struct btrfs_trans_handle *trans,
        btrfs_set_header_nritems(l, mid);
        btrfs_item_key(right, &disk_key, 0);
        insert_ptr(trans, root, path, &disk_key, right->start,
-                  path->slots[1] + 1, 1, 0);
+                  path->slots[1] + 1, 1);
 
        btrfs_mark_buffer_dirty(right);
        btrfs_mark_buffer_dirty(l);
@@ -3817,7 +3846,7 @@ again:
                if (mid <= slot) {
                        btrfs_set_header_nritems(right, 0);
                        insert_ptr(trans, root, path, &disk_key, right->start,
-                                  path->slots[1] + 1, 1, 0);
+                                  path->slots[1] + 1, 1);
                        btrfs_tree_unlock(path->nodes[0]);
                        free_extent_buffer(path->nodes[0]);
                        path->nodes[0] = right;
@@ -3826,7 +3855,7 @@ again:
                } else {
                        btrfs_set_header_nritems(right, 0);
                        insert_ptr(trans, root, path, &disk_key, right->start,
-                                         path->slots[1], 1, 0);
+                                         path->slots[1], 1);
                        btrfs_tree_unlock(path->nodes[0]);
                        free_extent_buffer(path->nodes[0]);
                        path->nodes[0] = right;
@@ -5000,6 +5029,12 @@ next:
  * returns < 0 on io errors.
  */
 int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
+{
+       return btrfs_next_old_leaf(root, path, 0);
+}
+
+int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
+                       u64 time_seq)
 {
        int slot;
        int level;
@@ -5025,7 +5060,10 @@ again:
        path->keep_locks = 1;
        path->leave_spinning = 1;
 
-       ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+       if (time_seq)
+               ret = btrfs_search_old_slot(root, &key, path, time_seq);
+       else
+               ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
        path->keep_locks = 0;
 
        if (ret < 0)
@@ -5081,6 +5119,18 @@ again:
 
                if (!path->skip_locking) {
                        ret = btrfs_try_tree_read_lock(next);
+                       if (!ret && time_seq) {
+                               /*
+                                * If we don't get the lock, we may be racing
+                                * with push_leaf_left, holding that lock while
+                                * itself waiting for the leaf we've currently
+                                * locked. To solve this situation, we give up
+                                * on our lock and cycle.
+                                */
+                               btrfs_release_path(path);
+                               cond_resched();
+                               goto again;
+                       }
                        if (!ret) {
                                btrfs_set_path_blocking(path);
                                btrfs_tree_read_lock(next);
index 0236d03c6732569a48a561049ea5a861d473da65..fa5c45b39075d858e9803c6cb1044338c7ee5d35 100644 (file)
@@ -2753,13 +2753,20 @@ static inline int btrfs_insert_empty_item(struct btrfs_trans_handle *trans,
 }
 
 int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path);
-static inline int btrfs_next_item(struct btrfs_root *root, struct btrfs_path *p)
+int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
+                       u64 time_seq);
+static inline int btrfs_next_old_item(struct btrfs_root *root,
+                                     struct btrfs_path *p, u64 time_seq)
 {
        ++p->slots[0];
        if (p->slots[0] >= btrfs_header_nritems(p->nodes[0]))
-               return btrfs_next_leaf(root, p);
+               return btrfs_next_old_leaf(root, p, time_seq);
        return 0;
 }
+static inline int btrfs_next_item(struct btrfs_root *root, struct btrfs_path *p)
+{
+       return btrfs_next_old_item(root, p, 0);
+}
 int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path);
 int btrfs_leaf_free_space(struct btrfs_root *root, struct extent_buffer *leaf);
 int __must_check btrfs_drop_snapshot(struct btrfs_root *root,
index c18d0442ae6daa69a564ebba400f9ad09573ea1d..2399f4086915acdc724f4def2fe308cf24d04c6f 100644 (file)
@@ -1879,3 +1879,21 @@ void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
                }
        }
 }
+
+void btrfs_destroy_delayed_inodes(struct btrfs_root *root)
+{
+       struct btrfs_delayed_root *delayed_root;
+       struct btrfs_delayed_node *curr_node, *prev_node;
+
+       delayed_root = btrfs_get_delayed_root(root);
+
+       curr_node = btrfs_first_delayed_node(delayed_root);
+       while (curr_node) {
+               __btrfs_kill_delayed_node(curr_node);
+
+               prev_node = curr_node;
+               curr_node = btrfs_next_delayed_node(curr_node);
+               btrfs_release_delayed_node(prev_node);
+       }
+}
+
index 7083d08b2a212bda501d7d3f7e08fe9c18bfe3a0..f5aa4023d3e18fe65488b91283b41b968da4e50d 100644 (file)
@@ -124,6 +124,9 @@ int btrfs_fill_inode(struct inode *inode, u32 *rdev);
 /* Used for drop dead root */
 void btrfs_kill_all_delayed_nodes(struct btrfs_root *root);
 
+/* Used for clean the transaction */
+void btrfs_destroy_delayed_inodes(struct btrfs_root *root);
+
 /* Used for readdir() */
 void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list,
                             struct list_head *del_list);
index 7ae51decf6d3d0fb5c3d44bb7791f843c74aa376..2936ca49b3b4af3a799f7585b74038d289ab8278 100644 (file)
@@ -44,6 +44,7 @@
 #include "free-space-cache.h"
 #include "inode-map.h"
 #include "check-integrity.h"
+#include "rcu-string.h"
 
 static struct extent_io_ops btree_extent_io_ops;
 static void end_workqueue_fn(struct btrfs_work *work);
@@ -2118,7 +2119,7 @@ int open_ctree(struct super_block *sb,
 
        features = btrfs_super_incompat_flags(disk_super);
        features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF;
-       if (tree_root->fs_info->compress_type & BTRFS_COMPRESS_LZO)
+       if (tree_root->fs_info->compress_type == BTRFS_COMPRESS_LZO)
                features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
 
        /*
@@ -2353,12 +2354,17 @@ retry_root_backup:
                                  BTRFS_CSUM_TREE_OBJECTID, csum_root);
        if (ret)
                goto recovery_tree_root;
-
        csum_root->track_dirty = 1;
 
        fs_info->generation = generation;
        fs_info->last_trans_committed = generation;
 
+       ret = btrfs_recover_balance(fs_info);
+       if (ret) {
+               printk(KERN_WARNING "btrfs: failed to recover balance\n");
+               goto fail_block_groups;
+       }
+
        ret = btrfs_init_dev_stats(fs_info);
        if (ret) {
                printk(KERN_ERR "btrfs: failed to init dev_stats: %d\n",
@@ -2484,20 +2490,23 @@ retry_root_backup:
                goto fail_trans_kthread;
        }
 
-       if (!(sb->s_flags & MS_RDONLY)) {
-               down_read(&fs_info->cleanup_work_sem);
-               err = btrfs_orphan_cleanup(fs_info->fs_root);
-               if (!err)
-                       err = btrfs_orphan_cleanup(fs_info->tree_root);
-               up_read(&fs_info->cleanup_work_sem);
+       if (sb->s_flags & MS_RDONLY)
+               return 0;
 
-               if (!err)
-                       err = btrfs_recover_balance(fs_info->tree_root);
+       down_read(&fs_info->cleanup_work_sem);
+       if ((ret = btrfs_orphan_cleanup(fs_info->fs_root)) ||
+           (ret = btrfs_orphan_cleanup(fs_info->tree_root))) {
+               up_read(&fs_info->cleanup_work_sem);
+               close_ctree(tree_root);
+               return ret;
+       }
+       up_read(&fs_info->cleanup_work_sem);
 
-               if (err) {
-                       close_ctree(tree_root);
-                       return err;
-               }
+       ret = btrfs_resume_balance_async(fs_info);
+       if (ret) {
+               printk(KERN_WARNING "btrfs: failed to resume balance\n");
+               close_ctree(tree_root);
+               return ret;
        }
 
        return 0;
@@ -2575,8 +2584,9 @@ static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
                struct btrfs_device *device = (struct btrfs_device *)
                        bh->b_private;
 
-               printk_ratelimited(KERN_WARNING "lost page write due to "
-                                  "I/O error on %s\n", device->name);
+               printk_ratelimited_in_rcu(KERN_WARNING "lost page write due to "
+                                         "I/O error on %s\n",
+                                         rcu_str_deref(device->name));
                /* note, we dont' set_buffer_write_io_error because we have
                 * our own ways of dealing with the IO errors
                 */
@@ -2749,8 +2759,8 @@ static int write_dev_flush(struct btrfs_device *device, int wait)
                wait_for_completion(&device->flush_wait);
 
                if (bio_flagged(bio, BIO_EOPNOTSUPP)) {
-                       printk("btrfs: disabling barriers on dev %s\n",
-                              device->name);
+                       printk_in_rcu("btrfs: disabling barriers on dev %s\n",
+                                     rcu_str_deref(device->name));
                        device->nobarriers = 1;
                }
                if (!bio_flagged(bio, BIO_UPTODATE)) {
@@ -3400,7 +3410,6 @@ int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
 
        delayed_refs = &trans->delayed_refs;
 
-again:
        spin_lock(&delayed_refs->lock);
        if (delayed_refs->num_entries == 0) {
                spin_unlock(&delayed_refs->lock);
@@ -3408,31 +3417,37 @@ again:
                return ret;
        }
 
-       node = rb_first(&delayed_refs->root);
-       while (node) {
+       while ((node = rb_first(&delayed_refs->root)) != NULL) {
                ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
-               node = rb_next(node);
-
-               ref->in_tree = 0;
-               rb_erase(&ref->rb_node, &delayed_refs->root);
-               delayed_refs->num_entries--;
 
                atomic_set(&ref->refs, 1);
                if (btrfs_delayed_ref_is_head(ref)) {
                        struct btrfs_delayed_ref_head *head;
 
                        head = btrfs_delayed_node_to_head(ref);
-                       spin_unlock(&delayed_refs->lock);
-                       mutex_lock(&head->mutex);
+                       if (!mutex_trylock(&head->mutex)) {
+                               atomic_inc(&ref->refs);
+                               spin_unlock(&delayed_refs->lock);
+
+                               /* Need to wait for the delayed ref to run */
+                               mutex_lock(&head->mutex);
+                               mutex_unlock(&head->mutex);
+                               btrfs_put_delayed_ref(ref);
+
+                               spin_lock(&delayed_refs->lock);
+                               continue;
+                       }
+
                        kfree(head->extent_op);
                        delayed_refs->num_heads--;
                        if (list_empty(&head->cluster))
                                delayed_refs->num_heads_ready--;
                        list_del_init(&head->cluster);
-                       mutex_unlock(&head->mutex);
-                       btrfs_put_delayed_ref(ref);
-                       goto again;
                }
+               ref->in_tree = 0;
+               rb_erase(&ref->rb_node, &delayed_refs->root);
+               delayed_refs->num_entries--;
+
                spin_unlock(&delayed_refs->lock);
                btrfs_put_delayed_ref(ref);
 
@@ -3520,11 +3535,9 @@ static int btrfs_destroy_marked_extents(struct btrfs_root *root,
                             &(&BTRFS_I(page->mapping->host)->io_tree)->buffer,
                                               offset >> PAGE_CACHE_SHIFT);
                        spin_unlock(&dirty_pages->buffer_lock);
-                       if (eb) {
+                       if (eb)
                                ret = test_and_clear_bit(EXTENT_BUFFER_DIRTY,
                                                         &eb->bflags);
-                               atomic_set(&eb->refs, 1);
-                       }
                        if (PageWriteback(page))
                                end_page_writeback(page);
 
@@ -3538,8 +3551,8 @@ static int btrfs_destroy_marked_extents(struct btrfs_root *root,
                                spin_unlock_irq(&page->mapping->tree_lock);
                        }
 
-                       page->mapping->a_ops->invalidatepage(page, 0);
                        unlock_page(page);
+                       page_cache_release(page);
                }
        }
 
@@ -3553,8 +3566,10 @@ static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
        u64 start;
        u64 end;
        int ret;
+       bool loop = true;
 
        unpin = pinned_extents;
+again:
        while (1) {
                ret = find_first_extent_bit(unpin, 0, &start, &end,
                                            EXTENT_DIRTY);
@@ -3572,6 +3587,15 @@ static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
                cond_resched();
        }
 
+       if (loop) {
+               if (unpin == &root->fs_info->freed_extents[0])
+                       unpin = &root->fs_info->freed_extents[1];
+               else
+                       unpin = &root->fs_info->freed_extents[0];
+               loop = false;
+               goto again;
+       }
+
        return 0;
 }
 
@@ -3585,21 +3609,23 @@ void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
        /* FIXME: cleanup wait for commit */
        cur_trans->in_commit = 1;
        cur_trans->blocked = 1;
-       if (waitqueue_active(&root->fs_info->transaction_blocked_wait))
-               wake_up(&root->fs_info->transaction_blocked_wait);
+       wake_up(&root->fs_info->transaction_blocked_wait);
 
        cur_trans->blocked = 0;
-       if (waitqueue_active(&root->fs_info->transaction_wait))
-               wake_up(&root->fs_info->transaction_wait);
+       wake_up(&root->fs_info->transaction_wait);
 
        cur_trans->commit_done = 1;
-       if (waitqueue_active(&cur_trans->commit_wait))
-               wake_up(&cur_trans->commit_wait);
+       wake_up(&cur_trans->commit_wait);
+
+       btrfs_destroy_delayed_inodes(root);
+       btrfs_assert_delayed_root_empty(root);
 
        btrfs_destroy_pending_snapshots(cur_trans);
 
        btrfs_destroy_marked_extents(root, &cur_trans->dirty_pages,
                                     EXTENT_DIRTY);
+       btrfs_destroy_pinned_extent(root,
+                                   root->fs_info->pinned_extents);
 
        /*
        memset(cur_trans, 0, sizeof(*cur_trans));
@@ -3648,6 +3674,9 @@ int btrfs_cleanup_transaction(struct btrfs_root *root)
                if (waitqueue_active(&t->commit_wait))
                        wake_up(&t->commit_wait);
 
+               btrfs_destroy_delayed_inodes(root);
+               btrfs_assert_delayed_root_empty(root);
+
                btrfs_destroy_pending_snapshots(t);
 
                btrfs_destroy_delalloc_inodes(root);
index 4b5a1e1bdefbe095c239b464e55c9699e865175b..6e1d36702ff71c4fc5bde2733cbf166376b726d3 100644 (file)
@@ -2347,12 +2347,10 @@ next:
        return count;
 }
 
-
 static void wait_for_more_refs(struct btrfs_delayed_ref_root *delayed_refs,
-                       unsigned long num_refs)
+                              unsigned long num_refs,
+                              struct list_head *first_seq)
 {
-       struct list_head *first_seq = delayed_refs->seq_head.next;
-
        spin_unlock(&delayed_refs->lock);
        pr_debug("waiting for more refs (num %ld, first %p)\n",
                 num_refs, first_seq);
@@ -2381,6 +2379,7 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
        struct btrfs_delayed_ref_root *delayed_refs;
        struct btrfs_delayed_ref_node *ref;
        struct list_head cluster;
+       struct list_head *first_seq = NULL;
        int ret;
        u64 delayed_start;
        int run_all = count == (unsigned long)-1;
@@ -2436,8 +2435,10 @@ again:
                                 */
                                consider_waiting = 1;
                                num_refs = delayed_refs->num_entries;
+                               first_seq = root->fs_info->tree_mod_seq_list.next;
                        } else {
-                               wait_for_more_refs(delayed_refs, num_refs);
+                               wait_for_more_refs(delayed_refs,
+                                                  num_refs, first_seq);
                                /*
                                 * after waiting, things have changed. we
                                 * dropped the lock and someone else might have
index 2c8f7b2046173954f720125a6e53e96de3c7727e..01c21b6c6d43e44a28a4c862ca6532d6f8b02654 100644 (file)
@@ -20,6 +20,7 @@
 #include "volumes.h"
 #include "check-integrity.h"
 #include "locking.h"
+#include "rcu-string.h"
 
 static struct kmem_cache *extent_state_cache;
 static struct kmem_cache *extent_buffer_cache;
@@ -1917,9 +1918,9 @@ int repair_io_failure(struct btrfs_mapping_tree *map_tree, u64 start,
                return -EIO;
        }
 
-       printk(KERN_INFO "btrfs read error corrected: ino %lu off %llu (dev %s "
-                       "sector %llu)\n", page->mapping->host->i_ino, start,
-                       dev->name, sector);
+       printk_in_rcu(KERN_INFO "btrfs read error corrected: ino %lu off %llu "
+                     "(dev %s sector %llu)\n", page->mapping->host->i_ino,
+                     start, rcu_str_deref(dev->name), sector);
 
        bio_put(bio);
        return 0;
@@ -3323,6 +3324,7 @@ static int extent_write_cache_pages(struct extent_io_tree *tree,
                             writepage_t writepage, void *data,
                             void (*flush_fn)(void *))
 {
+       struct inode *inode = mapping->host;
        int ret = 0;
        int done = 0;
        int nr_to_write_done = 0;
@@ -3333,6 +3335,18 @@ static int extent_write_cache_pages(struct extent_io_tree *tree,
        int scanned = 0;
        int tag;
 
+       /*
+        * We have to hold onto the inode so that ordered extents can do their
+        * work when the IO finishes.  The alternative to this is failing to add
+        * an ordered extent if the igrab() fails there and that is a huge pain
+        * to deal with, so instead just hold onto the inode throughout the
+        * writepages operation.  If it fails here we are freeing up the inode
+        * anyway and we'd rather not waste our time writing out stuff that is
+        * going to be truncated anyway.
+        */
+       if (!igrab(inode))
+               return 0;
+
        pagevec_init(&pvec, 0);
        if (wbc->range_cyclic) {
                index = mapping->writeback_index; /* Start from prev offset */
@@ -3427,6 +3441,7 @@ retry:
                index = 0;
                goto retry;
        }
+       btrfs_add_delayed_iput(inode);
        return ret;
 }
 
index 70dc8ca73e257bc3a1e7a96ea48009bff093af9a..9aa01ec2138d466f3d1726ccd6291d054c5e5c98 100644 (file)
@@ -1334,7 +1334,6 @@ static ssize_t __btrfs_direct_write(struct kiocb *iocb,
                                    loff_t *ppos, size_t count, size_t ocount)
 {
        struct file *file = iocb->ki_filp;
-       struct inode *inode = fdentry(file)->d_inode;
        struct iov_iter i;
        ssize_t written;
        ssize_t written_buffered;
@@ -1344,18 +1343,6 @@ static ssize_t __btrfs_direct_write(struct kiocb *iocb,
        written = generic_file_direct_write(iocb, iov, &nr_segs, pos, ppos,
                                            count, ocount);
 
-       /*
-        * the generic O_DIRECT will update in-memory i_size after the
-        * DIOs are done.  But our endio handlers that update the on
-        * disk i_size never update past the in memory i_size.  So we
-        * need one more update here to catch any additions to the
-        * file
-        */
-       if (inode->i_size != BTRFS_I(inode)->disk_i_size) {
-               btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
-               mark_inode_dirty(inode);
-       }
-
        if (written < 0 || written == count)
                return written;
 
index 81296c57405a5d53a27dba626a4d6201829bd578..6c4e2baa9290e16d06a4a8dfc5c3b05880cfeb27 100644 (file)
@@ -1543,29 +1543,26 @@ again:
        end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit) - 1;
 
        /*
-        * XXX - this can go away after a few releases.
-        *
-        * since the only user of btrfs_remove_free_space is the tree logging
-        * stuff, and the only way to test that is under crash conditions, we
-        * want to have this debug stuff here just in case somethings not
-        * working.  Search the bitmap for the space we are trying to use to
-        * make sure its actually there.  If its not there then we need to stop
-        * because something has gone wrong.
+        * We need to search for bits in this bitmap.  We could only cover some
+        * of the extent in this bitmap thanks to how we add space, so we need
+        * to search for as much as it as we can and clear that amount, and then
+        * go searching for the next bit.
         */
        search_start = *offset;
-       search_bytes = *bytes;
+       search_bytes = ctl->unit;
        search_bytes = min(search_bytes, end - search_start + 1);
        ret = search_bitmap(ctl, bitmap_info, &search_start, &search_bytes);
        BUG_ON(ret < 0 || search_start != *offset);
 
-       if (*offset > bitmap_info->offset && *offset + *bytes > end) {
-               bitmap_clear_bits(ctl, bitmap_info, *offset, end - *offset + 1);
-               *bytes -= end - *offset + 1;
-               *offset = end + 1;
-       } else if (*offset >= bitmap_info->offset && *offset + *bytes <= end) {
-               bitmap_clear_bits(ctl, bitmap_info, *offset, *bytes);
-               *bytes = 0;
-       }
+       /* We may have found more bits than what we need */
+       search_bytes = min(search_bytes, *bytes);
+
+       /* Cannot clear past the end of the bitmap */
+       search_bytes = min(search_bytes, end - search_start + 1);
+
+       bitmap_clear_bits(ctl, bitmap_info, search_start, search_bytes);
+       *offset += search_bytes;
+       *bytes -= search_bytes;
 
        if (*bytes) {
                struct rb_node *next = rb_next(&bitmap_info->offset_index);
@@ -1596,7 +1593,7 @@ again:
                 * everything over again.
                 */
                search_start = *offset;
-               search_bytes = *bytes;
+               search_bytes = ctl->unit;
                ret = search_bitmap(ctl, bitmap_info, &search_start,
                                    &search_bytes);
                if (ret < 0 || search_start != *offset)
@@ -1879,12 +1876,14 @@ int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
 {
        struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
        struct btrfs_free_space *info;
-       struct btrfs_free_space *next_info = NULL;
        int ret = 0;
 
        spin_lock(&ctl->tree_lock);
 
 again:
+       if (!bytes)
+               goto out_lock;
+
        info = tree_search_offset(ctl, offset, 0, 0);
        if (!info) {
                /*
@@ -1905,88 +1904,48 @@ again:
                }
        }
 
-       if (info->bytes < bytes && rb_next(&info->offset_index)) {
-               u64 end;
-               next_info = rb_entry(rb_next(&info->offset_index),
-                                            struct btrfs_free_space,
-                                            offset_index);
-
-               if (next_info->bitmap)
-                       end = next_info->offset +
-                             BITS_PER_BITMAP * ctl->unit - 1;
-               else
-                       end = next_info->offset + next_info->bytes;
-
-               if (next_info->bytes < bytes ||
-                   next_info->offset > offset || offset > end) {
-                       printk(KERN_CRIT "Found free space at %llu, size %llu,"
-                             " trying to use %llu\n",
-                             (unsigned long long)info->offset,
-                             (unsigned long long)info->bytes,
-                             (unsigned long long)bytes);
-                       WARN_ON(1);
-                       ret = -EINVAL;
-                       goto out_lock;
-               }
-
-               info = next_info;
-       }
-
-       if (info->bytes == bytes) {
+       if (!info->bitmap) {
                unlink_free_space(ctl, info);
-               if (info->bitmap) {
-                       kfree(info->bitmap);
-                       ctl->total_bitmaps--;
-               }
-               kmem_cache_free(btrfs_free_space_cachep, info);
-               ret = 0;
-               goto out_lock;
-       }
-
-       if (!info->bitmap && info->offset == offset) {
-               unlink_free_space(ctl, info);
-               info->offset += bytes;
-               info->bytes -= bytes;
-               ret = link_free_space(ctl, info);
-               WARN_ON(ret);
-               goto out_lock;
-       }
+               if (offset == info->offset) {
+                       u64 to_free = min(bytes, info->bytes);
+
+                       info->bytes -= to_free;
+                       info->offset += to_free;
+                       if (info->bytes) {
+                               ret = link_free_space(ctl, info);
+                               WARN_ON(ret);
+                       } else {
+                               kmem_cache_free(btrfs_free_space_cachep, info);
+                       }
 
-       if (!info->bitmap && info->offset <= offset &&
-           info->offset + info->bytes >= offset + bytes) {
-               u64 old_start = info->offset;
-               /*
-                * we're freeing space in the middle of the info,
-                * this can happen during tree log replay
-                *
-                * first unlink the old info and then
-                * insert it again after the hole we're creating
-                */
-               unlink_free_space(ctl, info);
-               if (offset + bytes < info->offset + info->bytes) {
-                       u64 old_end = info->offset + info->bytes;
+                       offset += to_free;
+                       bytes -= to_free;
+                       goto again;
+               } else {
+                       u64 old_end = info->bytes + info->offset;
 
-                       info->offset = offset + bytes;
-                       info->bytes = old_end - info->offset;
+                       info->bytes = offset - info->offset;
                        ret = link_free_space(ctl, info);
                        WARN_ON(ret);
                        if (ret)
                                goto out_lock;
-               } else {
-                       /* the hole we're creating ends at the end
-                        * of the info struct, just free the info
-                        */
-                       kmem_cache_free(btrfs_free_space_cachep, info);
-               }
-               spin_unlock(&ctl->tree_lock);
 
-               /* step two, insert a new info struct to cover
-                * anything before the hole
-                */
-               ret = btrfs_add_free_space(block_group, old_start,
-                                          offset - old_start);
-               WARN_ON(ret); /* -ENOMEM */
-               goto out;
+                       /* Not enough bytes in this entry to satisfy us */
+                       if (old_end < offset + bytes) {
+                               bytes -= old_end - offset;
+                               offset = old_end;
+                               goto again;
+                       } else if (old_end == offset + bytes) {
+                               /* all done */
+                               goto out_lock;
+                       }
+                       spin_unlock(&ctl->tree_lock);
+
+                       ret = btrfs_add_free_space(block_group, offset + bytes,
+                                                  old_end - (offset + bytes));
+                       WARN_ON(ret);
+                       goto out;
+               }
        }
 
        ret = remove_from_bitmap(ctl, info, &offset, &bytes);
index f6ab6f5e635a39b18ddb7f259bf5f0edd25d10a0..a7d1921ac76b8ee5d85d563c1ceed80f4a65a2dd 100644 (file)
@@ -830,7 +830,7 @@ static noinline int cow_file_range(struct inode *inode,
        if (IS_ERR(trans)) {
                extent_clear_unlock_delalloc(inode,
                             &BTRFS_I(inode)->io_tree,
-                            start, end, NULL,
+                            start, end, locked_page,
                             EXTENT_CLEAR_UNLOCK_PAGE |
                             EXTENT_CLEAR_UNLOCK |
                             EXTENT_CLEAR_DELALLOC |
@@ -963,7 +963,7 @@ out:
 out_unlock:
        extent_clear_unlock_delalloc(inode,
                     &BTRFS_I(inode)->io_tree,
-                    start, end, NULL,
+                    start, end, locked_page,
                     EXTENT_CLEAR_UNLOCK_PAGE |
                     EXTENT_CLEAR_UNLOCK |
                     EXTENT_CLEAR_DELALLOC |
@@ -986,8 +986,10 @@ static noinline void async_cow_start(struct btrfs_work *work)
        compress_file_range(async_cow->inode, async_cow->locked_page,
                            async_cow->start, async_cow->end, async_cow,
                            &num_added);
-       if (num_added == 0)
+       if (num_added == 0) {
+               btrfs_add_delayed_iput(async_cow->inode);
                async_cow->inode = NULL;
+       }
 }
 
 /*
@@ -1020,6 +1022,8 @@ static noinline void async_cow_free(struct btrfs_work *work)
 {
        struct async_cow *async_cow;
        async_cow = container_of(work, struct async_cow, work);
+       if (async_cow->inode)
+               btrfs_add_delayed_iput(async_cow->inode);
        kfree(async_cow);
 }
 
@@ -1038,7 +1042,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
        while (start < end) {
                async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
                BUG_ON(!async_cow); /* -ENOMEM */
-               async_cow->inode = inode;
+               async_cow->inode = igrab(inode);
                async_cow->root = root;
                async_cow->locked_page = locked_page;
                async_cow->start = start;
@@ -1136,8 +1140,18 @@ static noinline int run_delalloc_nocow(struct inode *inode,
        u64 ino = btrfs_ino(inode);
 
        path = btrfs_alloc_path();
-       if (!path)
+       if (!path) {
+               extent_clear_unlock_delalloc(inode,
+                            &BTRFS_I(inode)->io_tree,
+                            start, end, locked_page,
+                            EXTENT_CLEAR_UNLOCK_PAGE |
+                            EXTENT_CLEAR_UNLOCK |
+                            EXTENT_CLEAR_DELALLOC |
+                            EXTENT_CLEAR_DIRTY |
+                            EXTENT_SET_WRITEBACK |
+                            EXTENT_END_WRITEBACK);
                return -ENOMEM;
+       }
 
        nolock = btrfs_is_free_space_inode(root, inode);
 
@@ -1147,6 +1161,15 @@ static noinline int run_delalloc_nocow(struct inode *inode,
                trans = btrfs_join_transaction(root);
 
        if (IS_ERR(trans)) {
+               extent_clear_unlock_delalloc(inode,
+                            &BTRFS_I(inode)->io_tree,
+                            start, end, locked_page,
+                            EXTENT_CLEAR_UNLOCK_PAGE |
+                            EXTENT_CLEAR_UNLOCK |
+                            EXTENT_CLEAR_DELALLOC |
+                            EXTENT_CLEAR_DIRTY |
+                            EXTENT_SET_WRITEBACK |
+                            EXTENT_END_WRITEBACK);
                btrfs_free_path(path);
                return PTR_ERR(trans);
        }
@@ -1327,8 +1350,11 @@ out_check:
        }
        btrfs_release_path(path);
 
-       if (cur_offset <= end && cow_start == (u64)-1)
+       if (cur_offset <= end && cow_start == (u64)-1) {
                cow_start = cur_offset;
+               cur_offset = end;
+       }
+
        if (cow_start != (u64)-1) {
                ret = cow_file_range(inode, locked_page, cow_start, end,
                                     page_started, nr_written, 1);
@@ -1347,6 +1373,17 @@ error:
        if (!ret)
                ret = err;
 
+       if (ret && cur_offset < end)
+               extent_clear_unlock_delalloc(inode,
+                            &BTRFS_I(inode)->io_tree,
+                            cur_offset, end, locked_page,
+                            EXTENT_CLEAR_UNLOCK_PAGE |
+                            EXTENT_CLEAR_UNLOCK |
+                            EXTENT_CLEAR_DELALLOC |
+                            EXTENT_CLEAR_DIRTY |
+                            EXTENT_SET_WRITEBACK |
+                            EXTENT_END_WRITEBACK);
+
        btrfs_free_path(path);
        return ret;
 }
@@ -1361,20 +1398,23 @@ static int run_delalloc_range(struct inode *inode, struct page *locked_page,
        int ret;
        struct btrfs_root *root = BTRFS_I(inode)->root;
 
-       if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW)
+       if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) {
                ret = run_delalloc_nocow(inode, locked_page, start, end,
                                         page_started, 1, nr_written);
-       else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC)
+       } else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC) {
                ret = run_delalloc_nocow(inode, locked_page, start, end,
                                         page_started, 0, nr_written);
-       else if (!btrfs_test_opt(root, COMPRESS) &&
-                !(BTRFS_I(inode)->force_compress) &&
-                !(BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS))
+       else if (!btrfs_test_opt(root, COMPRESS) &&
+                  !(BTRFS_I(inode)->force_compress) &&
+                  !(BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS)) {
                ret = cow_file_range(inode, locked_page, start, end,
                                      page_started, nr_written, 1);
-       else
+       } else {
+               set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
+                       &BTRFS_I(inode)->runtime_flags);
                ret = cow_file_range_async(inode, locked_page, start, end,
                                           page_started, nr_written);
+       }
        return ret;
 }
 
@@ -3714,7 +3754,7 @@ void btrfs_evict_inode(struct inode *inode)
        btrfs_wait_ordered_range(inode, 0, (u64)-1);
 
        if (root->fs_info->log_root_recovering) {
-               BUG_ON(!test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
+               BUG_ON(test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
                                 &BTRFS_I(inode)->runtime_flags));
                goto no_delete;
        }
@@ -5836,8 +5876,17 @@ map:
        bh_result->b_size = len;
        bh_result->b_bdev = em->bdev;
        set_buffer_mapped(bh_result);
-       if (create && !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
-               set_buffer_new(bh_result);
+       if (create) {
+               if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
+                       set_buffer_new(bh_result);
+
+               /*
+                * Need to update the i_size under the extent lock so buffered
+                * readers will get the updated i_size when we unlock.
+                */
+               if (start + len > i_size_read(inode))
+                       i_size_write(inode, start + len);
+       }
 
        free_extent_map(em);
 
@@ -6320,12 +6369,48 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
                 */
                ordered = btrfs_lookup_ordered_range(inode, lockstart,
                                                     lockend - lockstart + 1);
-               if (!ordered)
+
+               /*
+                * We need to make sure there are no buffered pages in this
+                * range either, we could have raced between the invalidate in
+                * generic_file_direct_write and locking the extent.  The
+                * invalidate needs to happen so that reads after a write do not
+                * get stale data.
+                */
+               if (!ordered && (!writing ||
+                   !test_range_bit(&BTRFS_I(inode)->io_tree,
+                                   lockstart, lockend, EXTENT_UPTODATE, 0,
+                                   cached_state)))
                        break;
+
                unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
                                     &cached_state, GFP_NOFS);
-               btrfs_start_ordered_extent(inode, ordered, 1);
-               btrfs_put_ordered_extent(ordered);
+
+               if (ordered) {
+                       btrfs_start_ordered_extent(inode, ordered, 1);
+                       btrfs_put_ordered_extent(ordered);
+               } else {
+                       /* Screw you mmap */
+                       ret = filemap_write_and_wait_range(file->f_mapping,
+                                                          lockstart,
+                                                          lockend);
+                       if (ret)
+                               goto out;
+
+                       /*
+                        * If we found a page that couldn't be invalidated just
+                        * fall back to buffered.
+                        */
+                       ret = invalidate_inode_pages2_range(file->f_mapping,
+                                       lockstart >> PAGE_CACHE_SHIFT,
+                                       lockend >> PAGE_CACHE_SHIFT);
+                       if (ret) {
+                               if (ret == -EBUSY)
+                                       ret = 0;
+                               goto out;
+                       }
+               }
+
                cond_resched();
        }
 
@@ -7054,10 +7139,13 @@ static void fixup_inode_flags(struct inode *dir, struct inode *inode)
        else
                b_inode->flags &= ~BTRFS_INODE_NODATACOW;
 
-       if (b_dir->flags & BTRFS_INODE_COMPRESS)
+       if (b_dir->flags & BTRFS_INODE_COMPRESS) {
                b_inode->flags |= BTRFS_INODE_COMPRESS;
-       else
-               b_inode->flags &= ~BTRFS_INODE_COMPRESS;
+               b_inode->flags &= ~BTRFS_INODE_NOCOMPRESS;
+       } else {
+               b_inode->flags &= ~(BTRFS_INODE_COMPRESS |
+                                   BTRFS_INODE_NOCOMPRESS);
+       }
 }
 
 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
index 24b776c08d99f7bbb621076f68500464b6829435..0e92e5763005214b5d55d091d6fcfb94d73f4875 100644 (file)
@@ -52,6 +52,7 @@
 #include "locking.h"
 #include "inode-map.h"
 #include "backref.h"
+#include "rcu-string.h"
 
 /* Mask out flags that are inappropriate for the given type of inode. */
 static inline __u32 btrfs_mask_flags(umode_t mode, __u32 flags)
@@ -785,39 +786,57 @@ none:
        return -ENOENT;
 }
 
-/*
- * Validaty check of prev em and next em:
- * 1) no prev/next em
- * 2) prev/next em is an hole/inline extent
- */
-static int check_adjacent_extents(struct inode *inode, struct extent_map *em)
+static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start)
 {
        struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
-       struct extent_map *prev = NULL, *next = NULL;
-       int ret = 0;
+       struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
+       struct extent_map *em;
+       u64 len = PAGE_CACHE_SIZE;
 
+       /*
+        * hopefully we have this extent in the tree already, try without
+        * the full extent lock
+        */
        read_lock(&em_tree->lock);
-       prev = lookup_extent_mapping(em_tree, em->start - 1, (u64)-1);
-       next = lookup_extent_mapping(em_tree, em->start + em->len, (u64)-1);
+       em = lookup_extent_mapping(em_tree, start, len);
        read_unlock(&em_tree->lock);
 
-       if ((!prev || prev->block_start >= EXTENT_MAP_LAST_BYTE) &&
-           (!next || next->block_start >= EXTENT_MAP_LAST_BYTE))
-               ret = 1;
-       free_extent_map(prev);
-       free_extent_map(next);
+       if (!em) {
+               /* get the big lock and read metadata off disk */
+               lock_extent(io_tree, start, start + len - 1);
+               em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
+               unlock_extent(io_tree, start, start + len - 1);
+
+               if (IS_ERR(em))
+                       return NULL;
+       }
+
+       return em;
+}
+
+static bool defrag_check_next_extent(struct inode *inode, struct extent_map *em)
+{
+       struct extent_map *next;
+       bool ret = true;
 
+       /* this is the last extent */
+       if (em->start + em->len >= i_size_read(inode))
+               return false;
+
+       next = defrag_lookup_extent(inode, em->start + em->len);
+       if (!next || next->block_start >= EXTENT_MAP_LAST_BYTE)
+               ret = false;
+
+       free_extent_map(next);
        return ret;
 }
 
-static int should_defrag_range(struct inode *inode, u64 start, u64 len,
-                              int thresh, u64 *last_len, u64 *skip,
-                              u64 *defrag_end)
+static int should_defrag_range(struct inode *inode, u64 start, int thresh,
+                              u64 *last_len, u64 *skip, u64 *defrag_end)
 {
-       struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
-       struct extent_map *em = NULL;
-       struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
+       struct extent_map *em;
        int ret = 1;
+       bool next_mergeable = true;
 
        /*
         * make sure that once we start defragging an extent, we keep on
@@ -828,23 +847,9 @@ static int should_defrag_range(struct inode *inode, u64 start, u64 len,
 
        *skip = 0;
 
-       /*
-        * hopefully we have this extent in the tree already, try without
-        * the full extent lock
-        */
-       read_lock(&em_tree->lock);
-       em = lookup_extent_mapping(em_tree, start, len);
-       read_unlock(&em_tree->lock);
-
-       if (!em) {
-               /* get the big lock and read metadata off disk */
-               lock_extent(io_tree, start, start + len - 1);
-               em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
-               unlock_extent(io_tree, start, start + len - 1);
-
-               if (IS_ERR(em))
-                       return 0;
-       }
+       em = defrag_lookup_extent(inode, start);
+       if (!em)
+               return 0;
 
        /* this will cover holes, and inline extents */
        if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
@@ -852,18 +857,15 @@ static int should_defrag_range(struct inode *inode, u64 start, u64 len,
                goto out;
        }
 
-       /* If we have nothing to merge with us, just skip. */
-       if (check_adjacent_extents(inode, em)) {
-               ret = 0;
-               goto out;
-       }
+       next_mergeable = defrag_check_next_extent(inode, em);
 
        /*
-        * we hit a real extent, if it is big don't bother defragging it again
+        * we hit a real extent, if it is big or the next extent is not a
+        * real extent, don't bother defragging it
         */
-       if ((*last_len == 0 || *last_len >= thresh) && em->len >= thresh)
+       if ((*last_len == 0 || *last_len >= thresh) &&
+           (em->len >= thresh || !next_mergeable))
                ret = 0;
-
 out:
        /*
         * last_len ends up being a counter of how many bytes we've defragged.
@@ -1142,8 +1144,8 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
                        break;
 
                if (!should_defrag_range(inode, (u64)i << PAGE_CACHE_SHIFT,
-                                        PAGE_CACHE_SIZE, extent_thresh,
-                                        &last_len, &skip, &defrag_end)) {
+                                        extent_thresh, &last_len, &skip,
+                                        &defrag_end)) {
                        unsigned long next;
                        /*
                         * the should_defrag function tells us how much to skip
@@ -1304,6 +1306,14 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root,
                ret = -EINVAL;
                goto out_free;
        }
+       if (device->fs_devices && device->fs_devices->seeding) {
+               printk(KERN_INFO "btrfs: resizer unable to apply on "
+                      "seeding device %llu\n",
+                      (unsigned long long)devid);
+               ret = -EINVAL;
+               goto out_free;
+       }
+
        if (!strcmp(sizestr, "max"))
                new_size = device->bdev->bd_inode->i_size;
        else {
@@ -1345,8 +1355,9 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root,
        do_div(new_size, root->sectorsize);
        new_size *= root->sectorsize;
 
-       printk(KERN_INFO "btrfs: new size for %s is %llu\n",
-               device->name, (unsigned long long)new_size);
+       printk_in_rcu(KERN_INFO "btrfs: new size for %s is %llu\n",
+                     rcu_str_deref(device->name),
+                     (unsigned long long)new_size);
 
        if (new_size > old_size) {
                trans = btrfs_start_transaction(root, 0);
@@ -2264,7 +2275,12 @@ static long btrfs_ioctl_dev_info(struct btrfs_root *root, void __user *arg)
        di_args->total_bytes = dev->total_bytes;
        memcpy(di_args->uuid, dev->uuid, sizeof(di_args->uuid));
        if (dev->name) {
-               strncpy(di_args->path, dev->name, sizeof(di_args->path));
+               struct rcu_string *name;
+
+               rcu_read_lock();
+               name = rcu_dereference(dev->name);
+               strncpy(di_args->path, name->str, sizeof(di_args->path));
+               rcu_read_unlock();
                di_args->path[sizeof(di_args->path) - 1] = 0;
        } else {
                di_args->path[0] = '\0';
index 497c530724cf6b7a50296d2c6660fef7f4066cb9..e440aa653c30d6f6c8ad1e7437bfa6e2b4d50799 100644 (file)
@@ -339,7 +339,7 @@ struct btrfs_ioctl_get_dev_stats {
 #define BTRFS_IOC_WAIT_SYNC  _IOW(BTRFS_IOCTL_MAGIC, 22, __u64)
 #define BTRFS_IOC_SNAP_CREATE_V2 _IOW(BTRFS_IOCTL_MAGIC, 23, \
                                   struct btrfs_ioctl_vol_args_v2)
-#define BTRFS_IOC_SUBVOL_GETFLAGS _IOW(BTRFS_IOCTL_MAGIC, 25, __u64)
+#define BTRFS_IOC_SUBVOL_GETFLAGS _IOR(BTRFS_IOCTL_MAGIC, 25, __u64)
 #define BTRFS_IOC_SUBVOL_SETFLAGS _IOW(BTRFS_IOCTL_MAGIC, 26, __u64)
 #define BTRFS_IOC_SCRUB _IOWR(BTRFS_IOCTL_MAGIC, 27, \
                              struct btrfs_ioctl_scrub_args)
index 9e138cdc36c5eb7d66bf80dfc37829878eeaa6e2..643335a4fe3c6699a894c19d01e79bed9ef631c7 100644 (file)
@@ -627,7 +627,27 @@ void btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
        /* start IO across the range first to instantiate any delalloc
         * extents
         */
-       filemap_write_and_wait_range(inode->i_mapping, start, orig_end);
+       filemap_fdatawrite_range(inode->i_mapping, start, orig_end);
+
+       /*
+        * So with compression we will find and lock a dirty page and clear the
+        * first one as dirty, setup an async extent, and immediately return
+        * with the entire range locked but with nobody actually marked with
+        * writeback.  So we can't just filemap_write_and_wait_range() and
+        * expect it to work since it will just kick off a thread to do the
+        * actual work.  So we need to call filemap_fdatawrite_range _again_
+        * since it will wait on the page lock, which won't be unlocked until
+        * after the pages have been marked as writeback and so we're good to go
+        * from there.  We have to do this otherwise we'll miss the ordered
+        * extents and that results in badness.  Please Josef, do not think you
+        * know better and pull this out at some point in the future, it is
+        * right and you are wrong.
+        */
+       if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
+                    &BTRFS_I(inode)->runtime_flags))
+               filemap_fdatawrite_range(inode->i_mapping, start, orig_end);
+
+       filemap_fdatawait_range(inode->i_mapping, start, orig_end);
 
        end = orig_end;
        found = 0;
diff --git a/fs/btrfs/rcu-string.h b/fs/btrfs/rcu-string.h
new file mode 100644 (file)
index 0000000..9e111e4
--- /dev/null
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2012 Red Hat.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+struct rcu_string {
+       struct rcu_head rcu;
+       char str[0];
+};
+
+static inline struct rcu_string *rcu_string_strdup(const char *src, gfp_t mask)
+{
+       size_t len = strlen(src) + 1;
+       struct rcu_string *ret = kzalloc(sizeof(struct rcu_string) +
+                                        (len * sizeof(char)), mask);
+       if (!ret)
+               return ret;
+       strncpy(ret->str, src, len);
+       return ret;
+}
+
+static inline void rcu_string_free(struct rcu_string *str)
+{
+       if (str)
+               kfree_rcu(str, rcu);
+}
+
+#define printk_in_rcu(fmt, ...) do {   \
+       rcu_read_lock();                \
+       printk(fmt, __VA_ARGS__);       \
+       rcu_read_unlock();              \
+} while (0)
+
+#define printk_ratelimited_in_rcu(fmt, ...) do {       \
+       rcu_read_lock();                                \
+       printk_ratelimited(fmt, __VA_ARGS__);           \
+       rcu_read_unlock();                              \
+} while (0)
+
+#define rcu_str_deref(rcu_str) ({                              \
+       struct rcu_string *__str = rcu_dereference(rcu_str);    \
+       __str->str;                                             \
+})
index a38cfa4f251ec1065410f561188c4adf5868cea3..b223620cd5a6d59aa4b707e2539e59a8244d2dd1 100644 (file)
@@ -26,6 +26,7 @@
 #include "backref.h"
 #include "extent_io.h"
 #include "check-integrity.h"
+#include "rcu-string.h"
 
 /*
  * This is only the first step towards a full-features scrub. It reads all
@@ -320,10 +321,10 @@ static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root, void *ctx)
         * hold all of the paths here
         */
        for (i = 0; i < ipath->fspath->elem_cnt; ++i)
-               printk(KERN_WARNING "btrfs: %s at logical %llu on dev "
+               printk_in_rcu(KERN_WARNING "btrfs: %s at logical %llu on dev "
                        "%s, sector %llu, root %llu, inode %llu, offset %llu, "
                        "length %llu, links %u (path: %s)\n", swarn->errstr,
-                       swarn->logical, swarn->dev->name,
+                       swarn->logical, rcu_str_deref(swarn->dev->name),
                        (unsigned long long)swarn->sector, root, inum, offset,
                        min(isize - offset, (u64)PAGE_SIZE), nlink,
                        (char *)(unsigned long)ipath->fspath->val[i]);
@@ -332,10 +333,10 @@ static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root, void *ctx)
        return 0;
 
 err:
-       printk(KERN_WARNING "btrfs: %s at logical %llu on dev "
+       printk_in_rcu(KERN_WARNING "btrfs: %s at logical %llu on dev "
                "%s, sector %llu, root %llu, inode %llu, offset %llu: path "
                "resolving failed with ret=%d\n", swarn->errstr,
-               swarn->logical, swarn->dev->name,
+               swarn->logical, rcu_str_deref(swarn->dev->name),
                (unsigned long long)swarn->sector, root, inum, offset, ret);
 
        free_ipath(ipath);
@@ -390,10 +391,11 @@ static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
                do {
                        ret = tree_backref_for_extent(&ptr, eb, ei, item_size,
                                                        &ref_root, &ref_level);
-                       printk(KERN_WARNING
+                       printk_in_rcu(KERN_WARNING
                                "btrfs: %s at logical %llu on dev %s, "
                                "sector %llu: metadata %s (level %d) in tree "
-                               "%llu\n", errstr, swarn.logical, dev->name,
+                               "%llu\n", errstr, swarn.logical,
+                               rcu_str_deref(dev->name),
                                (unsigned long long)swarn.sector,
                                ref_level ? "node" : "leaf",
                                ret < 0 ? -1 : ref_level,
@@ -580,9 +582,11 @@ out:
                spin_lock(&sdev->stat_lock);
                ++sdev->stat.uncorrectable_errors;
                spin_unlock(&sdev->stat_lock);
-               printk_ratelimited(KERN_ERR
+
+               printk_ratelimited_in_rcu(KERN_ERR
                        "btrfs: unable to fixup (nodatasum) error at logical %llu on dev %s\n",
-                       (unsigned long long)fixup->logical, sdev->dev->name);
+                       (unsigned long long)fixup->logical,
+                       rcu_str_deref(sdev->dev->name));
        }
 
        btrfs_free_path(path);
@@ -936,18 +940,20 @@ corrected_error:
                        spin_lock(&sdev->stat_lock);
                        sdev->stat.corrected_errors++;
                        spin_unlock(&sdev->stat_lock);
-                       printk_ratelimited(KERN_ERR
+                       printk_ratelimited_in_rcu(KERN_ERR
                                "btrfs: fixed up error at logical %llu on dev %s\n",
-                               (unsigned long long)logical, sdev->dev->name);
+                               (unsigned long long)logical,
+                               rcu_str_deref(sdev->dev->name));
                }
        } else {
 did_not_correct_error:
                spin_lock(&sdev->stat_lock);
                sdev->stat.uncorrectable_errors++;
                spin_unlock(&sdev->stat_lock);
-               printk_ratelimited(KERN_ERR
+               printk_ratelimited_in_rcu(KERN_ERR
                        "btrfs: unable to fixup (regular) error at logical %llu on dev %s\n",
-                       (unsigned long long)logical, sdev->dev->name);
+                       (unsigned long long)logical,
+                       rcu_str_deref(sdev->dev->name));
        }
 
 out:
index 96eb9fef7bd279584cf4dd8b6ed42cc09e425c1d..e23991574fdf309e6ed95e23985fc5dd0af28e0f 100644 (file)
@@ -54,6 +54,7 @@
 #include "version.h"
 #include "export.h"
 #include "compression.h"
+#include "rcu-string.h"
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/btrfs.h>
@@ -1186,6 +1187,10 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
                if (ret)
                        goto restore;
 
+               ret = btrfs_resume_balance_async(fs_info);
+               if (ret)
+                       goto restore;
+
                sb->s_flags &= ~MS_RDONLY;
        }
 
@@ -1482,12 +1487,44 @@ static void btrfs_fs_dirty_inode(struct inode *inode, int flags)
                                   "error %d\n", btrfs_ino(inode), ret);
 }
 
+static int btrfs_show_devname(struct seq_file *m, struct dentry *root)
+{
+       struct btrfs_fs_info *fs_info = btrfs_sb(root->d_sb);
+       struct btrfs_fs_devices *cur_devices;
+       struct btrfs_device *dev, *first_dev = NULL;
+       struct list_head *head;
+       struct rcu_string *name;
+
+       mutex_lock(&fs_info->fs_devices->device_list_mutex);
+       cur_devices = fs_info->fs_devices;
+       while (cur_devices) {
+               head = &cur_devices->devices;
+               list_for_each_entry(dev, head, dev_list) {
+                       if (!first_dev || dev->devid < first_dev->devid)
+                               first_dev = dev;
+               }
+               cur_devices = cur_devices->seed;
+       }
+
+       if (first_dev) {
+               rcu_read_lock();
+               name = rcu_dereference(first_dev->name);
+               seq_escape(m, name->str, " \t\n\\");
+               rcu_read_unlock();
+       } else {
+               WARN_ON(1);
+       }
+       mutex_unlock(&fs_info->fs_devices->device_list_mutex);
+       return 0;
+}
+
 static const struct super_operations btrfs_super_ops = {
        .drop_inode     = btrfs_drop_inode,
        .evict_inode    = btrfs_evict_inode,
        .put_super      = btrfs_put_super,
        .sync_fs        = btrfs_sync_fs,
        .show_options   = btrfs_show_options,
+       .show_devname   = btrfs_show_devname,
        .write_inode    = btrfs_write_inode,
        .dirty_inode    = btrfs_fs_dirty_inode,
        .alloc_inode    = btrfs_alloc_inode,
index 1791c6e3d83487d82c9ffe80ab0239976cfd1c96..b72b068183ec6bb334a1cb9088ffa02d7eb13029 100644 (file)
@@ -100,6 +100,10 @@ loop:
                kmem_cache_free(btrfs_transaction_cachep, cur_trans);
                cur_trans = fs_info->running_transaction;
                goto loop;
+       } else if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
+               spin_unlock(&root->fs_info->trans_lock);
+               kmem_cache_free(btrfs_transaction_cachep, cur_trans);
+               return -EROFS;
        }
 
        atomic_set(&cur_trans->num_writers, 1);
@@ -1213,14 +1217,20 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
 
 
 static void cleanup_transaction(struct btrfs_trans_handle *trans,
-                               struct btrfs_root *root)
+                               struct btrfs_root *root, int err)
 {
        struct btrfs_transaction *cur_trans = trans->transaction;
 
        WARN_ON(trans->use_count > 1);
 
+       btrfs_abort_transaction(trans, root, err);
+
        spin_lock(&root->fs_info->trans_lock);
        list_del_init(&cur_trans->list);
+       if (cur_trans == root->fs_info->running_transaction) {
+               root->fs_info->running_transaction = NULL;
+               root->fs_info->trans_no_join = 0;
+       }
        spin_unlock(&root->fs_info->trans_lock);
 
        btrfs_cleanup_one_transaction(trans->transaction, root);
@@ -1526,7 +1536,7 @@ cleanup_transaction:
 //     WARN_ON(1);
        if (current->journal_info == trans)
                current->journal_info = NULL;
-       cleanup_transaction(trans, root);
+       cleanup_transaction(trans, root, ret);
 
        return ret;
 }
index 2017d0ff511ca3304dad46e85045ad2ab28d4e75..8abeae4224f92dbd870877b76224515c304e979d 100644 (file)
@@ -690,6 +690,8 @@ static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans,
        kfree(name);
 
        iput(inode);
+
+       btrfs_run_delayed_items(trans, root);
        return ret;
 }
 
@@ -895,6 +897,7 @@ again:
                                ret = btrfs_unlink_inode(trans, root, dir,
                                                         inode, victim_name,
                                                         victim_name_len);
+                               btrfs_run_delayed_items(trans, root);
                        }
                        kfree(victim_name);
                        ptr = (unsigned long)(victim_ref + 1) + victim_name_len;
@@ -1475,6 +1478,9 @@ again:
                        ret = btrfs_unlink_inode(trans, root, dir, inode,
                                                 name, name_len);
                        BUG_ON(ret);
+
+                       btrfs_run_delayed_items(trans, root);
+
                        kfree(name);
                        iput(inode);
 
index 7782020996feccd4b7103528a4c2989230f79b71..ecaad40e7ef499fedb7667659d9efd955a661eaa 100644 (file)
@@ -35,6 +35,7 @@
 #include "volumes.h"
 #include "async-thread.h"
 #include "check-integrity.h"
+#include "rcu-string.h"
 
 static int init_first_rw_device(struct btrfs_trans_handle *trans,
                                struct btrfs_root *root,
@@ -64,7 +65,7 @@ static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
                device = list_entry(fs_devices->devices.next,
                                    struct btrfs_device, dev_list);
                list_del(&device->dev_list);
-               kfree(device->name);
+               rcu_string_free(device->name);
                kfree(device);
        }
        kfree(fs_devices);
@@ -334,8 +335,8 @@ static noinline int device_list_add(const char *path,
 {
        struct btrfs_device *device;
        struct btrfs_fs_devices *fs_devices;
+       struct rcu_string *name;
        u64 found_transid = btrfs_super_generation(disk_super);
-       char *name;
 
        fs_devices = find_fsid(disk_super->fsid);
        if (!fs_devices) {
@@ -369,11 +370,13 @@ static noinline int device_list_add(const char *path,
                memcpy(device->uuid, disk_super->dev_item.uuid,
                       BTRFS_UUID_SIZE);
                spin_lock_init(&device->io_lock);
-               device->name = kstrdup(path, GFP_NOFS);
-               if (!device->name) {
+
+               name = rcu_string_strdup(path, GFP_NOFS);
+               if (!name) {
                        kfree(device);
                        return -ENOMEM;
                }
+               rcu_assign_pointer(device->name, name);
                INIT_LIST_HEAD(&device->dev_alloc_list);
 
                /* init readahead state */
@@ -390,12 +393,12 @@ static noinline int device_list_add(const char *path,
 
                device->fs_devices = fs_devices;
                fs_devices->num_devices++;
-       } else if (!device->name || strcmp(device->name, path)) {
-               name = kstrdup(path, GFP_NOFS);
+       } else if (!device->name || strcmp(device->name->str, path)) {
+               name = rcu_string_strdup(path, GFP_NOFS);
                if (!name)
                        return -ENOMEM;
-               kfree(device->name);
-               device->name = name;
+               rcu_string_free(device->name);
+               rcu_assign_pointer(device->name, name);
                if (device->missing) {
                        fs_devices->missing_devices--;
                        device->missing = 0;
@@ -430,15 +433,22 @@ static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
 
        /* We have held the volume lock, it is safe to get the devices. */
        list_for_each_entry(orig_dev, &orig->devices, dev_list) {
+               struct rcu_string *name;
+
                device = kzalloc(sizeof(*device), GFP_NOFS);
                if (!device)
                        goto error;
 
-               device->name = kstrdup(orig_dev->name, GFP_NOFS);
-               if (!device->name) {
+               /*
+                * This is ok to do without rcu read locked because we hold the
+                * uuid mutex so nothing we touch in here is going to disappear.
+                */
+               name = rcu_string_strdup(orig_dev->name->str, GFP_NOFS);
+               if (!name) {
                        kfree(device);
                        goto error;
                }
+               rcu_assign_pointer(device->name, name);
 
                device->devid = orig_dev->devid;
                device->work.func = pending_bios_fn;
@@ -491,7 +501,7 @@ again:
                }
                list_del_init(&device->dev_list);
                fs_devices->num_devices--;
-               kfree(device->name);
+               rcu_string_free(device->name);
                kfree(device);
        }
 
@@ -516,7 +526,7 @@ static void __free_device(struct work_struct *work)
        if (device->bdev)
                blkdev_put(device->bdev, device->mode);
 
-       kfree(device->name);
+       rcu_string_free(device->name);
        kfree(device);
 }
 
@@ -540,6 +550,7 @@ static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
        mutex_lock(&fs_devices->device_list_mutex);
        list_for_each_entry(device, &fs_devices->devices, dev_list) {
                struct btrfs_device *new_device;
+               struct rcu_string *name;
 
                if (device->bdev)
                        fs_devices->open_devices--;
@@ -555,8 +566,11 @@ static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
                new_device = kmalloc(sizeof(*new_device), GFP_NOFS);
                BUG_ON(!new_device); /* -ENOMEM */
                memcpy(new_device, device, sizeof(*new_device));
-               new_device->name = kstrdup(device->name, GFP_NOFS);
-               BUG_ON(device->name && !new_device->name); /* -ENOMEM */
+
+               /* Safe because we are under uuid_mutex */
+               name = rcu_string_strdup(device->name->str, GFP_NOFS);
+               BUG_ON(device->name && !name); /* -ENOMEM */
+               rcu_assign_pointer(new_device->name, name);
                new_device->bdev = NULL;
                new_device->writeable = 0;
                new_device->in_fs_metadata = 0;
@@ -621,9 +635,9 @@ static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
                if (!device->name)
                        continue;
 
-               bdev = blkdev_get_by_path(device->name, flags, holder);
+               bdev = blkdev_get_by_path(device->name->str, flags, holder);
                if (IS_ERR(bdev)) {
-                       printk(KERN_INFO "open %s failed\n", device->name);
+                       printk(KERN_INFO "open %s failed\n", device->name->str);
                        goto error;
                }
                filemap_write_and_wait(bdev->bd_inode->i_mapping);
@@ -1632,6 +1646,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
        struct block_device *bdev;
        struct list_head *devices;
        struct super_block *sb = root->fs_info->sb;
+       struct rcu_string *name;
        u64 total_bytes;
        int seeding_dev = 0;
        int ret = 0;
@@ -1671,23 +1686,24 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
                goto error;
        }
 
-       device->name = kstrdup(device_path, GFP_NOFS);
-       if (!device->name) {
+       name = rcu_string_strdup(device_path, GFP_NOFS);
+       if (!name) {
                kfree(device);
                ret = -ENOMEM;
                goto error;
        }
+       rcu_assign_pointer(device->name, name);
 
        ret = find_next_devid(root, &device->devid);
        if (ret) {
-               kfree(device->name);
+               rcu_string_free(device->name);
                kfree(device);
                goto error;
        }
 
        trans = btrfs_start_transaction(root, 0);
        if (IS_ERR(trans)) {
-               kfree(device->name);
+               rcu_string_free(device->name);
                kfree(device);
                ret = PTR_ERR(trans);
                goto error;
@@ -1796,7 +1812,7 @@ error_trans:
        unlock_chunks(root);
        btrfs_abort_transaction(trans, root, ret);
        btrfs_end_transaction(trans, root);
-       kfree(device->name);
+       rcu_string_free(device->name);
        kfree(device);
 error:
        blkdev_put(bdev, FMODE_EXCL);
@@ -2829,31 +2845,48 @@ out:
 
 static int balance_kthread(void *data)
 {
-       struct btrfs_balance_control *bctl =
-                       (struct btrfs_balance_control *)data;
-       struct btrfs_fs_info *fs_info = bctl->fs_info;
+       struct btrfs_fs_info *fs_info = data;
        int ret = 0;
 
        mutex_lock(&fs_info->volume_mutex);
        mutex_lock(&fs_info->balance_mutex);
 
-       set_balance_control(bctl);
-
-       if (btrfs_test_opt(fs_info->tree_root, SKIP_BALANCE)) {
-               printk(KERN_INFO "btrfs: force skipping balance\n");
-       } else {
+       if (fs_info->balance_ctl) {
                printk(KERN_INFO "btrfs: continuing balance\n");
-               ret = btrfs_balance(bctl, NULL);
+               ret = btrfs_balance(fs_info->balance_ctl, NULL);
        }
 
        mutex_unlock(&fs_info->balance_mutex);
        mutex_unlock(&fs_info->volume_mutex);
+
        return ret;
 }
 
-int btrfs_recover_balance(struct btrfs_root *tree_root)
+int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
 {
        struct task_struct *tsk;
+
+       spin_lock(&fs_info->balance_lock);
+       if (!fs_info->balance_ctl) {
+               spin_unlock(&fs_info->balance_lock);
+               return 0;
+       }
+       spin_unlock(&fs_info->balance_lock);
+
+       if (btrfs_test_opt(fs_info->tree_root, SKIP_BALANCE)) {
+               printk(KERN_INFO "btrfs: force skipping balance\n");
+               return 0;
+       }
+
+       tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
+       if (IS_ERR(tsk))
+               return PTR_ERR(tsk);
+
+       return 0;
+}
+
+int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
+{
        struct btrfs_balance_control *bctl;
        struct btrfs_balance_item *item;
        struct btrfs_disk_balance_args disk_bargs;
@@ -2866,29 +2899,30 @@ int btrfs_recover_balance(struct btrfs_root *tree_root)
        if (!path)
                return -ENOMEM;
 
-       bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
-       if (!bctl) {
-               ret = -ENOMEM;
-               goto out;
-       }
-
        key.objectid = BTRFS_BALANCE_OBJECTID;
        key.type = BTRFS_BALANCE_ITEM_KEY;
        key.offset = 0;
 
-       ret = btrfs_search_slot(NULL, tree_root, &key, path, 0, 0);
+       ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
        if (ret < 0)
-               goto out_bctl;
+               goto out;
        if (ret > 0) { /* ret = -ENOENT; */
                ret = 0;
-               goto out_bctl;
+               goto out;
+       }
+
+       bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
+       if (!bctl) {
+               ret = -ENOMEM;
+               goto out;
        }
 
        leaf = path->nodes[0];
        item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
 
-       bctl->fs_info = tree_root->fs_info;
-       bctl->flags = btrfs_balance_flags(leaf, item) | BTRFS_BALANCE_RESUME;
+       bctl->fs_info = fs_info;
+       bctl->flags = btrfs_balance_flags(leaf, item);
+       bctl->flags |= BTRFS_BALANCE_RESUME;
 
        btrfs_balance_data(leaf, item, &disk_bargs);
        btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
@@ -2897,14 +2931,13 @@ int btrfs_recover_balance(struct btrfs_root *tree_root)
        btrfs_balance_sys(leaf, item, &disk_bargs);
        btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
 
-       tsk = kthread_run(balance_kthread, bctl, "btrfs-balance");
-       if (IS_ERR(tsk))
-               ret = PTR_ERR(tsk);
-       else
-               goto out;
+       mutex_lock(&fs_info->volume_mutex);
+       mutex_lock(&fs_info->balance_mutex);
 
-out_bctl:
-       kfree(bctl);
+       set_balance_control(bctl);
+
+       mutex_unlock(&fs_info->balance_mutex);
+       mutex_unlock(&fs_info->volume_mutex);
 out:
        btrfs_free_path(path);
        return ret;
@@ -4045,16 +4078,18 @@ static void btrfs_end_bio(struct bio *bio, int err)
 
                        BUG_ON(stripe_index >= bbio->num_stripes);
                        dev = bbio->stripes[stripe_index].dev;
-                       if (bio->bi_rw & WRITE)
-                               btrfs_dev_stat_inc(dev,
-                                                  BTRFS_DEV_STAT_WRITE_ERRS);
-                       else
-                               btrfs_dev_stat_inc(dev,
-                                                  BTRFS_DEV_STAT_READ_ERRS);
-                       if ((bio->bi_rw & WRITE_FLUSH) == WRITE_FLUSH)
-                               btrfs_dev_stat_inc(dev,
-                                                  BTRFS_DEV_STAT_FLUSH_ERRS);
-                       btrfs_dev_stat_print_on_error(dev);
+                       if (dev->bdev) {
+                               if (bio->bi_rw & WRITE)
+                                       btrfs_dev_stat_inc(dev,
+                                               BTRFS_DEV_STAT_WRITE_ERRS);
+                               else
+                                       btrfs_dev_stat_inc(dev,
+                                               BTRFS_DEV_STAT_READ_ERRS);
+                               if ((bio->bi_rw & WRITE_FLUSH) == WRITE_FLUSH)
+                                       btrfs_dev_stat_inc(dev,
+                                               BTRFS_DEV_STAT_FLUSH_ERRS);
+                               btrfs_dev_stat_print_on_error(dev);
+                       }
                }
        }
 
@@ -4204,10 +4239,17 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
                bio->bi_sector = bbio->stripes[dev_nr].physical >> 9;
                dev = bbio->stripes[dev_nr].dev;
                if (dev && dev->bdev && (rw != WRITE || dev->writeable)) {
+#ifdef DEBUG
+                       struct rcu_string *name;
+
+                       rcu_read_lock();
+                       name = rcu_dereference(dev->name);
                        pr_debug("btrfs_map_bio: rw %d, secor=%llu, dev=%lu "
                                 "(%s id %llu), size=%u\n", rw,
                                 (u64)bio->bi_sector, (u_long)dev->bdev->bd_dev,
-                                dev->name, dev->devid, bio->bi_size);
+                                name->str, dev->devid, bio->bi_size);
+                       rcu_read_unlock();
+#endif
                        bio->bi_bdev = dev->bdev;
                        if (async_submit)
                                schedule_bio(root, dev, rw, bio);
@@ -4694,8 +4736,9 @@ int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
                key.offset = device->devid;
                ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0);
                if (ret) {
-                       printk(KERN_WARNING "btrfs: no dev_stats entry found for device %s (devid %llu) (OK on first mount after mkfs)\n",
-                              device->name, (unsigned long long)device->devid);
+                       printk_in_rcu(KERN_WARNING "btrfs: no dev_stats entry found for device %s (devid %llu) (OK on first mount after mkfs)\n",
+                                     rcu_str_deref(device->name),
+                                     (unsigned long long)device->devid);
                        __btrfs_reset_dev_stats(device);
                        device->dev_stats_valid = 1;
                        btrfs_release_path(path);
@@ -4747,8 +4790,8 @@ static int update_dev_stat_item(struct btrfs_trans_handle *trans,
        BUG_ON(!path);
        ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
        if (ret < 0) {
-               printk(KERN_WARNING "btrfs: error %d while searching for dev_stats item for device %s!\n",
-                      ret, device->name);
+               printk_in_rcu(KERN_WARNING "btrfs: error %d while searching for dev_stats item for device %s!\n",
+                             ret, rcu_str_deref(device->name));
                goto out;
        }
 
@@ -4757,8 +4800,8 @@ static int update_dev_stat_item(struct btrfs_trans_handle *trans,
                /* need to delete old one and insert a new one */
                ret = btrfs_del_item(trans, dev_root, path);
                if (ret != 0) {
-                       printk(KERN_WARNING "btrfs: delete too small dev_stats item for device %s failed %d!\n",
-                              device->name, ret);
+                       printk_in_rcu(KERN_WARNING "btrfs: delete too small dev_stats item for device %s failed %d!\n",
+                                     rcu_str_deref(device->name), ret);
                        goto out;
                }
                ret = 1;
@@ -4770,8 +4813,8 @@ static int update_dev_stat_item(struct btrfs_trans_handle *trans,
                ret = btrfs_insert_empty_item(trans, dev_root, path,
                                              &key, sizeof(*ptr));
                if (ret < 0) {
-                       printk(KERN_WARNING "btrfs: insert dev_stats item for device %s failed %d!\n",
-                              device->name, ret);
+                       printk_in_rcu(KERN_WARNING "btrfs: insert dev_stats item for device %s failed %d!\n",
+                                     rcu_str_deref(device->name), ret);
                        goto out;
                }
        }
@@ -4823,9 +4866,9 @@ void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
 {
        if (!dev->dev_stats_valid)
                return;
-       printk_ratelimited(KERN_ERR
+       printk_ratelimited_in_rcu(KERN_ERR
                           "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n",
-                          dev->name,
+                          rcu_str_deref(dev->name),
                           btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
                           btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
                           btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
@@ -4837,8 +4880,8 @@ void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
 
 static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
 {
-       printk(KERN_INFO "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n",
-              dev->name,
+       printk_in_rcu(KERN_INFO "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n",
+              rcu_str_deref(dev->name),
               btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
               btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
               btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
index 3406a88ca83e023429b8af19f2d6aa64d4cac6f8..95f6637614db5932f8d52daba79943514a7ed8da 100644 (file)
@@ -58,7 +58,7 @@ struct btrfs_device {
        /* the mode sent to blkdev_get */
        fmode_t mode;
 
-       char *name;
+       struct rcu_string *name;
 
        /* the internal btrfs device id */
        u64 devid;
@@ -281,7 +281,8 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size);
 int btrfs_init_new_device(struct btrfs_root *root, char *path);
 int btrfs_balance(struct btrfs_balance_control *bctl,
                  struct btrfs_ioctl_balance_args *bargs);
-int btrfs_recover_balance(struct btrfs_root *tree_root);
+int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info);
+int btrfs_recover_balance(struct btrfs_fs_info *fs_info);
 int btrfs_pause_balance(struct btrfs_fs_info *fs_info);
 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info);
 int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset);
index 838a9cf246bd0fa561ab66295f9bb3df77e0c6a2..c7062c896d7c20489f41ea838640a5cfa5387ca9 100644 (file)
@@ -1036,6 +1036,9 @@ grow_buffers(struct block_device *bdev, sector_t block, int size)
 static struct buffer_head *
 __getblk_slow(struct block_device *bdev, sector_t block, int size)
 {
+       int ret;
+       struct buffer_head *bh;
+
        /* Size must be multiple of hard sectorsize */
        if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
                        (size < 512 || size > PAGE_SIZE))) {
@@ -1048,20 +1051,21 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size)
                return NULL;
        }
 
-       for (;;) {
-               struct buffer_head * bh;
-               int ret;
+retry:
+       bh = __find_get_block(bdev, block, size);
+       if (bh)
+               return bh;
 
+       ret = grow_buffers(bdev, block, size);
+       if (ret == 0) {
+               free_more_memory();
+               goto retry;
+       } else if (ret > 0) {
                bh = __find_get_block(bdev, block, size);
                if (bh)
                        return bh;
-
-               ret = grow_buffers(bdev, block, size);
-               if (ret < 0)
-                       return NULL;
-               if (ret == 0)
-                       free_more_memory();
        }
+       return NULL;
 }
 
 /*
index 173b1d22e59b5a4bf8ed714f72b233cb70537468..8b67304e4b8079efe80eecd716fb1e98b5486842 100644 (file)
        (CONGESTION_ON_THRESH(congestion_kb) -                          \
         (CONGESTION_ON_THRESH(congestion_kb) >> 2))
 
-
+static inline struct ceph_snap_context *page_snap_context(struct page *page)
+{
+       if (PagePrivate(page))
+               return (void *)page->private;
+       return NULL;
+}
 
 /*
  * Dirty a page.  Optimistically adjust accounting, on the assumption
@@ -142,10 +147,9 @@ static void ceph_invalidatepage(struct page *page, unsigned long offset)
 {
        struct inode *inode;
        struct ceph_inode_info *ci;
-       struct ceph_snap_context *snapc = (void *)page->private;
+       struct ceph_snap_context *snapc = page_snap_context(page);
 
        BUG_ON(!PageLocked(page));
-       BUG_ON(!page->private);
        BUG_ON(!PagePrivate(page));
        BUG_ON(!page->mapping);
 
@@ -182,7 +186,6 @@ static int ceph_releasepage(struct page *page, gfp_t g)
        struct inode *inode = page->mapping ? page->mapping->host : NULL;
        dout("%p releasepage %p idx %lu\n", inode, page, page->index);
        WARN_ON(PageDirty(page));
-       WARN_ON(page->private);
        WARN_ON(PagePrivate(page));
        return 0;
 }
@@ -443,7 +446,7 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
        osdc = &fsc->client->osdc;
 
        /* verify this is a writeable snap context */
-       snapc = (void *)page->private;
+       snapc = page_snap_context(page);
        if (snapc == NULL) {
                dout("writepage %p page %p not dirty?\n", inode, page);
                goto out;
@@ -451,7 +454,7 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
        oldest = get_oldest_context(inode, &snap_size);
        if (snapc->seq > oldest->seq) {
                dout("writepage %p page %p snapc %p not writeable - noop\n",
-                    inode, page, (void *)page->private);
+                    inode, page, snapc);
                /* we should only noop if called by kswapd */
                WARN_ON((current->flags & PF_MEMALLOC) == 0);
                ceph_put_snap_context(oldest);
@@ -591,7 +594,7 @@ static void writepages_finish(struct ceph_osd_request *req,
                        clear_bdi_congested(&fsc->backing_dev_info,
                                            BLK_RW_ASYNC);
 
-               ceph_put_snap_context((void *)page->private);
+               ceph_put_snap_context(page_snap_context(page));
                page->private = 0;
                ClearPagePrivate(page);
                dout("unlocking %d %p\n", i, page);
@@ -795,7 +798,7 @@ get_more_pages:
                        }
 
                        /* only if matching snap context */
-                       pgsnapc = (void *)page->private;
+                       pgsnapc = page_snap_context(page);
                        if (pgsnapc->seq > snapc->seq) {
                                dout("page snapc %p %lld > oldest %p %lld\n",
                                     pgsnapc, pgsnapc->seq, snapc, snapc->seq);
@@ -984,7 +987,7 @@ retry_locked:
        BUG_ON(!ci->i_snap_realm);
        down_read(&mdsc->snap_rwsem);
        BUG_ON(!ci->i_snap_realm->cached_context);
-       snapc = (void *)page->private;
+       snapc = page_snap_context(page);
        if (snapc && snapc != ci->i_head_snapc) {
                /*
                 * this page is already dirty in another (older) snap
index 20350a93ed99105062743e1123e441af52b59a39..6df0cbe1cbc90bc1fd7e978172ed7f05f36e3cbe 100644 (file)
@@ -174,6 +174,7 @@ struct smb_version_operations {
        void (*add_credits)(struct TCP_Server_Info *, const unsigned int);
        void (*set_credits)(struct TCP_Server_Info *, const int);
        int * (*get_credits_field)(struct TCP_Server_Info *);
+       __u64 (*get_next_mid)(struct TCP_Server_Info *);
        /* data offset from read response message */
        unsigned int (*read_data_offset)(char *);
        /* data length from read response message */
@@ -399,6 +400,12 @@ set_credits(struct TCP_Server_Info *server, const int val)
        server->ops->set_credits(server, val);
 }
 
+static inline __u64
+get_next_mid(struct TCP_Server_Info *server)
+{
+       return server->ops->get_next_mid(server);
+}
+
 /*
  * Macros to allow the TCP_Server_Info->net field and related code to drop out
  * when CONFIG_NET_NS isn't set.
index 5ec21ecf7980e98a2d51ed9d2edf41c720952d4d..0a6cbfe2761ee5c62d7e31158a28fab7df63c1f0 100644 (file)
@@ -114,7 +114,6 @@ extern int small_smb_init_no_tc(const int smb_cmd, const int wct,
                                void **request_buf);
 extern int CIFS_SessSetup(unsigned int xid, struct cifs_ses *ses,
                             const struct nls_table *nls_cp);
-extern __u64 GetNextMid(struct TCP_Server_Info *server);
 extern struct timespec cifs_NTtimeToUnix(__le64 utc_nanoseconds_since_1601);
 extern u64 cifs_UnixTimeToNT(struct timespec);
 extern struct timespec cnvrtDosUnixTm(__le16 le_date, __le16 le_time,
index b5ad716b2642138ebebdc18a975718f9a09855c8..4ee522b3f66f0036737a688b0c97606ee1efe2ce 100644 (file)
@@ -86,7 +86,31 @@ static struct {
 #endif /* CONFIG_CIFS_WEAK_PW_HASH */
 #endif /* CIFS_POSIX */
 
-/* Forward declarations */
+#ifdef CONFIG_HIGHMEM
+/*
+ * On arches that have high memory, kmap address space is limited. By
+ * serializing the kmap operations on those arches, we ensure that we don't
+ * end up with a bunch of threads in writeback with partially mapped page
+ * arrays, stuck waiting for kmap to come back. That situation prevents
+ * progress and can deadlock.
+ */
+static DEFINE_MUTEX(cifs_kmap_mutex);
+
+static inline void
+cifs_kmap_lock(void)
+{
+       mutex_lock(&cifs_kmap_mutex);
+}
+
+static inline void
+cifs_kmap_unlock(void)
+{
+       mutex_unlock(&cifs_kmap_mutex);
+}
+#else /* !CONFIG_HIGHMEM */
+#define cifs_kmap_lock() do { ; } while(0)
+#define cifs_kmap_unlock() do { ; } while(0)
+#endif /* CONFIG_HIGHMEM */
 
 /* Mark as invalid, all open files on tree connections since they
    were closed when session to server was lost */
@@ -268,7 +292,7 @@ small_smb_init_no_tc(const int smb_command, const int wct,
                return rc;
 
        buffer = (struct smb_hdr *)*request_buf;
-       buffer->Mid = GetNextMid(ses->server);
+       buffer->Mid = get_next_mid(ses->server);
        if (ses->capabilities & CAP_UNICODE)
                buffer->Flags2 |= SMBFLG2_UNICODE;
        if (ses->capabilities & CAP_STATUS32)
@@ -402,7 +426,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifs_ses *ses)
 
        cFYI(1, "secFlags 0x%x", secFlags);
 
-       pSMB->hdr.Mid = GetNextMid(server);
+       pSMB->hdr.Mid = get_next_mid(server);
        pSMB->hdr.Flags2 |= (SMBFLG2_UNICODE | SMBFLG2_ERR_STATUS);
 
        if ((secFlags & CIFSSEC_MUST_KRB5) == CIFSSEC_MUST_KRB5)
@@ -782,7 +806,7 @@ CIFSSMBLogoff(const int xid, struct cifs_ses *ses)
                return rc;
        }
 
-       pSMB->hdr.Mid = GetNextMid(ses->server);
+       pSMB->hdr.Mid = get_next_mid(ses->server);
 
        if (ses->server->sec_mode &
                   (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
@@ -1503,7 +1527,9 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
        }
 
        /* marshal up the page array */
+       cifs_kmap_lock();
        len = rdata->marshal_iov(rdata, data_len);
+       cifs_kmap_unlock();
        data_len -= len;
 
        /* issue the read if we have any iovecs left to fill */
@@ -2069,7 +2095,9 @@ cifs_async_writev(struct cifs_writedata *wdata)
         * and set the iov_len properly for each one. It may also set
         * wdata->bytes too.
         */
+       cifs_kmap_lock();
        wdata->marshal_iov(iov, wdata);
+       cifs_kmap_unlock();
 
        cFYI(1, "async write at %llu %u bytes", wdata->offset, wdata->bytes);
 
@@ -4762,7 +4790,7 @@ getDFSRetry:
 
        /* server pointer checked in called function,
        but should never be null here anyway */
-       pSMB->hdr.Mid = GetNextMid(ses->server);
+       pSMB->hdr.Mid = get_next_mid(ses->server);
        pSMB->hdr.Tid = ses->ipc_tid;
        pSMB->hdr.Uid = ses->Suid;
        if (ses->capabilities & CAP_STATUS32)
index ccafdedd0dbc4df14e04c0087d28b1dfa453be1f..94b7788c3189281e043ecf82b9bf4fb1e9d0e0f7 100644 (file)
@@ -1058,13 +1058,15 @@ cifs_demultiplex_thread(void *p)
                if (mid_entry != NULL) {
                        if (!mid_entry->multiRsp || mid_entry->multiEnd)
                                mid_entry->callback(mid_entry);
-               } else if (!server->ops->is_oplock_break(buf, server)) {
+               } else if (!server->ops->is_oplock_break ||
+                          !server->ops->is_oplock_break(buf, server)) {
                        cERROR(1, "No task to wake, unknown frame received! "
                                   "NumMids %d", atomic_read(&midCount));
                        cifs_dump_mem("Received Data is: ", buf,
                                      HEADER_SIZE(server));
 #ifdef CONFIG_CIFS_DEBUG2
-                       server->ops->dump_detail(buf);
+                       if (server->ops->dump_detail)
+                               server->ops->dump_detail(buf);
                        cifs_dump_mids(server);
 #endif /* CIFS_DEBUG2 */
 
@@ -1651,24 +1653,26 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
                         * If yes, we have encountered a double deliminator
                         * reset the NULL character to the deliminator
                         */
-                       if (tmp_end < end && tmp_end[1] == delim)
+                       if (tmp_end < end && tmp_end[1] == delim) {
                                tmp_end[0] = delim;
 
-                       /* Keep iterating until we get to a single deliminator
-                        * OR the end
-                        */
-                       while ((tmp_end = strchr(tmp_end, delim)) != NULL &&
-                              (tmp_end[1] == delim)) {
-                               tmp_end = (char *) &tmp_end[2];
-                       }
+                               /* Keep iterating until we get to a single
+                                * deliminator OR the end
+                                */
+                               while ((tmp_end = strchr(tmp_end, delim))
+                                       != NULL && (tmp_end[1] == delim)) {
+                                               tmp_end = (char *) &tmp_end[2];
+                               }
 
-                       /* Reset var options to point to next element */
-                       if (tmp_end) {
-                               tmp_end[0] = '\0';
-                               options = (char *) &tmp_end[1];
-                       } else
-                               /* Reached the end of the mount option string */
-                               options = end;
+                               /* Reset var options to point to next element */
+                               if (tmp_end) {
+                                       tmp_end[0] = '\0';
+                                       options = (char *) &tmp_end[1];
+                               } else
+                                       /* Reached the end of the mount option
+                                        * string */
+                                       options = end;
+                       }
 
                        /* Now build new password string */
                        temp_len = strlen(value);
@@ -3441,6 +3445,18 @@ void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
 #define CIFS_DEFAULT_NON_POSIX_RSIZE (60 * 1024)
 #define CIFS_DEFAULT_NON_POSIX_WSIZE (65536)
 
+/*
+ * On hosts with high memory, we can't currently support wsize/rsize that are
+ * larger than we can kmap at once. Cap the rsize/wsize at
+ * LAST_PKMAP * PAGE_SIZE. We'll never be able to fill a read or write request
+ * larger than that anyway.
+ */
+#ifdef CONFIG_HIGHMEM
+#define CIFS_KMAP_SIZE_LIMIT   (LAST_PKMAP * PAGE_CACHE_SIZE)
+#else /* CONFIG_HIGHMEM */
+#define CIFS_KMAP_SIZE_LIMIT   (1<<24)
+#endif /* CONFIG_HIGHMEM */
+
 static unsigned int
 cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
 {
@@ -3471,6 +3487,9 @@ cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
                wsize = min_t(unsigned int, wsize,
                                server->maxBuf - sizeof(WRITE_REQ) + 4);
 
+       /* limit to the amount that we can kmap at once */
+       wsize = min_t(unsigned int, wsize, CIFS_KMAP_SIZE_LIMIT);
+
        /* hard limit of CIFS_MAX_WSIZE */
        wsize = min_t(unsigned int, wsize, CIFS_MAX_WSIZE);
 
@@ -3491,18 +3510,15 @@ cifs_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
         * MS-CIFS indicates that servers are only limited by the client's
         * bufsize for reads, testing against win98se shows that it throws
         * INVALID_PARAMETER errors if you try to request too large a read.
+        * OS/2 just sends back short reads.
         *
-        * If the server advertises a MaxBufferSize of less than one page,
-        * assume that it also can't satisfy reads larger than that either.
-        *
-        * FIXME: Is there a better heuristic for this?
+        * If the server doesn't advertise CAP_LARGE_READ_X, then assume that
+        * it can't handle a read request larger than its MaxBufferSize either.
         */
        if (tcon->unix_ext && (unix_cap & CIFS_UNIX_LARGE_READ_CAP))
                defsize = CIFS_DEFAULT_IOSIZE;
        else if (server->capabilities & CAP_LARGE_READ_X)
                defsize = CIFS_DEFAULT_NON_POSIX_RSIZE;
-       else if (server->maxBuf >= PAGE_CACHE_SIZE)
-               defsize = CIFSMaxBufSize;
        else
                defsize = server->maxBuf - sizeof(READ_RSP);
 
@@ -3515,6 +3531,9 @@ cifs_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
        if (!(server->capabilities & CAP_LARGE_READ_X))
                rsize = min_t(unsigned int, CIFSMaxBufSize, rsize);
 
+       /* limit to the amount that we can kmap at once */
+       rsize = min_t(unsigned int, rsize, CIFS_KMAP_SIZE_LIMIT);
+
        /* hard limit of CIFS_MAX_RSIZE */
        rsize = min_t(unsigned int, rsize, CIFS_MAX_RSIZE);
 
@@ -3938,7 +3957,7 @@ CIFSTCon(unsigned int xid, struct cifs_ses *ses,
        header_assemble(smb_buffer, SMB_COM_TREE_CONNECT_ANDX,
                        NULL /*no tid */ , 4 /*wct */ );
 
-       smb_buffer->Mid = GetNextMid(ses->server);
+       smb_buffer->Mid = get_next_mid(ses->server);
        smb_buffer->Uid = ses->Suid;
        pSMB = (TCONX_REQ *) smb_buffer;
        pSMBr = (TCONX_RSP *) smb_buffer_response;
index 253170dfa71650704c8c2f6cdb69942635488404..513adbc211d7029d1c8b1d6670a1a37d9029cac6 100644 (file)
@@ -876,7 +876,7 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
        struct cifsLockInfo *li, *tmp;
        struct cifs_tcon *tcon;
        struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
-       unsigned int num, max_num;
+       unsigned int num, max_num, max_buf;
        LOCKING_ANDX_RANGE *buf, *cur;
        int types[] = {LOCKING_ANDX_LARGE_FILES,
                       LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
@@ -892,8 +892,19 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
                return rc;
        }
 
-       max_num = (tcon->ses->server->maxBuf - sizeof(struct smb_hdr)) /
-                 sizeof(LOCKING_ANDX_RANGE);
+       /*
+        * Accessing maxBuf is racy with cifs_reconnect - need to store value
+        * and check it for zero before using.
+        */
+       max_buf = tcon->ses->server->maxBuf;
+       if (!max_buf) {
+               mutex_unlock(&cinode->lock_mutex);
+               FreeXid(xid);
+               return -EINVAL;
+       }
+
+       max_num = (max_buf - sizeof(struct smb_hdr)) /
+                                               sizeof(LOCKING_ANDX_RANGE);
        buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
        if (!buf) {
                mutex_unlock(&cinode->lock_mutex);
@@ -1218,7 +1229,7 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid)
        int types[] = {LOCKING_ANDX_LARGE_FILES,
                       LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
        unsigned int i;
-       unsigned int max_num, num;
+       unsigned int max_num, num, max_buf;
        LOCKING_ANDX_RANGE *buf, *cur;
        struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
        struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
@@ -1228,8 +1239,16 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid)
 
        INIT_LIST_HEAD(&tmp_llist);
 
-       max_num = (tcon->ses->server->maxBuf - sizeof(struct smb_hdr)) /
-                 sizeof(LOCKING_ANDX_RANGE);
+       /*
+        * Accessing maxBuf is racy with cifs_reconnect - need to store value
+        * and check it for zero before using.
+        */
+       max_buf = tcon->ses->server->maxBuf;
+       if (!max_buf)
+               return -EINVAL;
+
+       max_num = (max_buf - sizeof(struct smb_hdr)) /
+                                               sizeof(LOCKING_ANDX_RANGE);
        buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
        if (!buf)
                return -ENOMEM;
@@ -1247,46 +1266,7 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid)
                                continue;
                        if (types[i] != li->type)
                                continue;
-                       if (!cinode->can_cache_brlcks) {
-                               cur->Pid = cpu_to_le16(li->pid);
-                               cur->LengthLow = cpu_to_le32((u32)li->length);
-                               cur->LengthHigh =
-                                       cpu_to_le32((u32)(li->length>>32));
-                               cur->OffsetLow = cpu_to_le32((u32)li->offset);
-                               cur->OffsetHigh =
-                                       cpu_to_le32((u32)(li->offset>>32));
-                               /*
-                                * We need to save a lock here to let us add
-                                * it again to the file's list if the unlock
-                                * range request fails on the server.
-                                */
-                               list_move(&li->llist, &tmp_llist);
-                               if (++num == max_num) {
-                                       stored_rc = cifs_lockv(xid, tcon,
-                                                              cfile->netfid,
-                                                              li->type, num,
-                                                              0, buf);
-                                       if (stored_rc) {
-                                               /*
-                                                * We failed on the unlock range
-                                                * request - add all locks from
-                                                * the tmp list to the head of
-                                                * the file's list.
-                                                */
-                                               cifs_move_llist(&tmp_llist,
-                                                               &cfile->llist);
-                                               rc = stored_rc;
-                                       } else
-                                               /*
-                                                * The unlock range request
-                                                * succeed - free the tmp list.
-                                                */
-                                               cifs_free_llist(&tmp_llist);
-                                       cur = buf;
-                                       num = 0;
-                               } else
-                                       cur++;
-                       } else {
+                       if (cinode->can_cache_brlcks) {
                                /*
                                 * We can cache brlock requests - simply remove
                                 * a lock from the file's list.
@@ -1294,7 +1274,41 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid)
                                list_del(&li->llist);
                                cifs_del_lock_waiters(li);
                                kfree(li);
+                               continue;
                        }
+                       cur->Pid = cpu_to_le16(li->pid);
+                       cur->LengthLow = cpu_to_le32((u32)li->length);
+                       cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
+                       cur->OffsetLow = cpu_to_le32((u32)li->offset);
+                       cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
+                       /*
+                        * We need to save a lock here to let us add it again to
+                        * the file's list if the unlock range request fails on
+                        * the server.
+                        */
+                       list_move(&li->llist, &tmp_llist);
+                       if (++num == max_num) {
+                               stored_rc = cifs_lockv(xid, tcon, cfile->netfid,
+                                                      li->type, num, 0, buf);
+                               if (stored_rc) {
+                                       /*
+                                        * We failed on the unlock range
+                                        * request - add all locks from the tmp
+                                        * list to the head of the file's list.
+                                        */
+                                       cifs_move_llist(&tmp_llist,
+                                                       &cfile->llist);
+                                       rc = stored_rc;
+                               } else
+                                       /*
+                                        * The unlock range request succeed -
+                                        * free the tmp list.
+                                        */
+                                       cifs_free_llist(&tmp_llist);
+                               cur = buf;
+                               num = 0;
+                       } else
+                               cur++;
                }
                if (num) {
                        stored_rc = cifs_lockv(xid, tcon, cfile->netfid,
index e2552d2b2e42c551fde38d91e3ead83c6cfccd47..557506ae1e2a1c70c7edcb622b52d55fd0627dc7 100644 (file)
@@ -212,93 +212,6 @@ cifs_small_buf_release(void *buf_to_free)
        return;
 }
 
-/*
- * Find a free multiplex id (SMB mid). Otherwise there could be
- * mid collisions which might cause problems, demultiplexing the
- * wrong response to this request. Multiplex ids could collide if
- * one of a series requests takes much longer than the others, or
- * if a very large number of long lived requests (byte range
- * locks or FindNotify requests) are pending. No more than
- * 64K-1 requests can be outstanding at one time. If no
- * mids are available, return zero. A future optimization
- * could make the combination of mids and uid the key we use
- * to demultiplex on (rather than mid alone).
- * In addition to the above check, the cifs demultiplex
- * code already used the command code as a secondary
- * check of the frame and if signing is negotiated the
- * response would be discarded if the mid were the same
- * but the signature was wrong. Since the mid is not put in the
- * pending queue until later (when it is about to be dispatched)
- * we do have to limit the number of outstanding requests
- * to somewhat less than 64K-1 although it is hard to imagine
- * so many threads being in the vfs at one time.
- */
-__u64 GetNextMid(struct TCP_Server_Info *server)
-{
-       __u64 mid = 0;
-       __u16 last_mid, cur_mid;
-       bool collision;
-
-       spin_lock(&GlobalMid_Lock);
-
-       /* mid is 16 bit only for CIFS/SMB */
-       cur_mid = (__u16)((server->CurrentMid) & 0xffff);
-       /* we do not want to loop forever */
-       last_mid = cur_mid;
-       cur_mid++;
-
-       /*
-        * This nested loop looks more expensive than it is.
-        * In practice the list of pending requests is short,
-        * fewer than 50, and the mids are likely to be unique
-        * on the first pass through the loop unless some request
-        * takes longer than the 64 thousand requests before it
-        * (and it would also have to have been a request that
-        * did not time out).
-        */
-       while (cur_mid != last_mid) {
-               struct mid_q_entry *mid_entry;
-               unsigned int num_mids;
-
-               collision = false;
-               if (cur_mid == 0)
-                       cur_mid++;
-
-               num_mids = 0;
-               list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) {
-                       ++num_mids;
-                       if (mid_entry->mid == cur_mid &&
-                           mid_entry->mid_state == MID_REQUEST_SUBMITTED) {
-                               /* This mid is in use, try a different one */
-                               collision = true;
-                               break;
-                       }
-               }
-
-               /*
-                * if we have more than 32k mids in the list, then something
-                * is very wrong. Possibly a local user is trying to DoS the
-                * box by issuing long-running calls and SIGKILL'ing them. If
-                * we get to 2^16 mids then we're in big trouble as this
-                * function could loop forever.
-                *
-                * Go ahead and assign out the mid in this situation, but force
-                * an eventual reconnect to clean out the pending_mid_q.
-                */
-               if (num_mids > 32768)
-                       server->tcpStatus = CifsNeedReconnect;
-
-               if (!collision) {
-                       mid = (__u64)cur_mid;
-                       server->CurrentMid = mid;
-                       break;
-               }
-               cur_mid++;
-       }
-       spin_unlock(&GlobalMid_Lock);
-       return mid;
-}
-
 /* NB: MID can not be set if treeCon not passed in, in that
    case it is responsbility of caller to set the mid */
 void
@@ -334,7 +247,7 @@ header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
 
                        /* Uid is not converted */
                        buffer->Uid = treeCon->ses->Suid;
-                       buffer->Mid = GetNextMid(treeCon->ses->server);
+                       buffer->Mid = get_next_mid(treeCon->ses->server);
                }
                if (treeCon->Flags & SMB_SHARE_IS_IN_DFS)
                        buffer->Flags2 |= SMBFLG2_DFS;
index 0a8224d1c4c5f2df8545f2c84f9e668feba2e0e9..a4217f02fab2860ced9f1bf444662cbf05461a4e 100644 (file)
@@ -86,9 +86,12 @@ cifs_readdir_lookup(struct dentry *parent, struct qstr *name,
 
        dentry = d_lookup(parent, name);
        if (dentry) {
-               /* FIXME: check for inode number changes? */
-               if (dentry->d_inode != NULL)
+               inode = dentry->d_inode;
+               /* update inode in place if i_ino didn't change */
+               if (inode && CIFS_I(inode)->uniqueid == fattr->cf_uniqueid) {
+                       cifs_fattr_to_inode(inode, fattr);
                        return dentry;
+               }
                d_drop(dentry);
                dput(dentry);
        }
index d9d615fbed3f5547ad62fadb0c811f339769cede..6dec38f5522d19198beebb23942d21dd850d0dde 100644 (file)
@@ -125,6 +125,94 @@ cifs_get_credits_field(struct TCP_Server_Info *server)
        return &server->credits;
 }
 
+/*
+ * Find a free multiplex id (SMB mid). Otherwise there could be
+ * mid collisions which might cause problems, demultiplexing the
+ * wrong response to this request. Multiplex ids could collide if
+ * one of a series requests takes much longer than the others, or
+ * if a very large number of long lived requests (byte range
+ * locks or FindNotify requests) are pending. No more than
+ * 64K-1 requests can be outstanding at one time. If no
+ * mids are available, return zero. A future optimization
+ * could make the combination of mids and uid the key we use
+ * to demultiplex on (rather than mid alone).
+ * In addition to the above check, the cifs demultiplex
+ * code already used the command code as a secondary
+ * check of the frame and if signing is negotiated the
+ * response would be discarded if the mid were the same
+ * but the signature was wrong. Since the mid is not put in the
+ * pending queue until later (when it is about to be dispatched)
+ * we do have to limit the number of outstanding requests
+ * to somewhat less than 64K-1 although it is hard to imagine
+ * so many threads being in the vfs at one time.
+ */
+static __u64
+cifs_get_next_mid(struct TCP_Server_Info *server)
+{
+       __u64 mid = 0;
+       __u16 last_mid, cur_mid;
+       bool collision;
+
+       spin_lock(&GlobalMid_Lock);
+
+       /* mid is 16 bit only for CIFS/SMB */
+       cur_mid = (__u16)((server->CurrentMid) & 0xffff);
+       /* we do not want to loop forever */
+       last_mid = cur_mid;
+       cur_mid++;
+
+       /*
+        * This nested loop looks more expensive than it is.
+        * In practice the list of pending requests is short,
+        * fewer than 50, and the mids are likely to be unique
+        * on the first pass through the loop unless some request
+        * takes longer than the 64 thousand requests before it
+        * (and it would also have to have been a request that
+        * did not time out).
+        */
+       while (cur_mid != last_mid) {
+               struct mid_q_entry *mid_entry;
+               unsigned int num_mids;
+
+               collision = false;
+               if (cur_mid == 0)
+                       cur_mid++;
+
+               num_mids = 0;
+               list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) {
+                       ++num_mids;
+                       if (mid_entry->mid == cur_mid &&
+                           mid_entry->mid_state == MID_REQUEST_SUBMITTED) {
+                               /* This mid is in use, try a different one */
+                               collision = true;
+                               break;
+                       }
+               }
+
+               /*
+                * if we have more than 32k mids in the list, then something
+                * is very wrong. Possibly a local user is trying to DoS the
+                * box by issuing long-running calls and SIGKILL'ing them. If
+                * we get to 2^16 mids then we're in big trouble as this
+                * function could loop forever.
+                *
+                * Go ahead and assign out the mid in this situation, but force
+                * an eventual reconnect to clean out the pending_mid_q.
+                */
+               if (num_mids > 32768)
+                       server->tcpStatus = CifsNeedReconnect;
+
+               if (!collision) {
+                       mid = (__u64)cur_mid;
+                       server->CurrentMid = mid;
+                       break;
+               }
+               cur_mid++;
+       }
+       spin_unlock(&GlobalMid_Lock);
+       return mid;
+}
+
 struct smb_version_operations smb1_operations = {
        .send_cancel = send_nt_cancel,
        .compare_fids = cifs_compare_fids,
@@ -133,6 +221,7 @@ struct smb_version_operations smb1_operations = {
        .add_credits = cifs_add_credits,
        .set_credits = cifs_set_credits,
        .get_credits_field = cifs_get_credits_field,
+       .get_next_mid = cifs_get_next_mid,
        .read_data_offset = cifs_read_data_offset,
        .read_data_length = cifs_read_data_length,
        .map_error = map_smb_to_linux_error,
index 1b36ffe6a47b5f6888519fea797aebdbe3a9da97..f25d4ea14be4b7d751e69817eca9bad2d6a14935 100644 (file)
@@ -365,16 +365,14 @@ cifs_setup_async_request(struct TCP_Server_Info *server, struct kvec *iov,
        if (mid == NULL)
                return -ENOMEM;
 
-       /* put it on the pending_mid_q */
-       spin_lock(&GlobalMid_Lock);
-       list_add_tail(&mid->qhead, &server->pending_mid_q);
-       spin_unlock(&GlobalMid_Lock);
-
        rc = cifs_sign_smb2(iov, nvec, server, &mid->sequence_number);
-       if (rc)
-               delete_mid(mid);
+       if (rc) {
+               DeleteMidQEntry(mid);
+               return rc;
+       }
+
        *ret_mid = mid;
-       return rc;
+       return 0;
 }
 
 /*
@@ -407,17 +405,21 @@ cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov,
        mid->callback_data = cbdata;
        mid->mid_state = MID_REQUEST_SUBMITTED;
 
+       /* put it on the pending_mid_q */
+       spin_lock(&GlobalMid_Lock);
+       list_add_tail(&mid->qhead, &server->pending_mid_q);
+       spin_unlock(&GlobalMid_Lock);
+
+
        cifs_in_send_inc(server);
        rc = smb_sendv(server, iov, nvec);
        cifs_in_send_dec(server);
        cifs_save_when_sent(mid);
        mutex_unlock(&server->srv_mutex);
 
-       if (rc)
-               goto out_err;
+       if (rc == 0)
+               return 0;
 
-       return rc;
-out_err:
        delete_mid(mid);
        add_credits(server, 1);
        wake_up(&server->request_q);
@@ -779,7 +781,7 @@ send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
 
        pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
        pSMB->Timeout = 0;
-       pSMB->hdr.Mid = GetNextMid(ses->server);
+       pSMB->hdr.Mid = get_next_mid(ses->server);
 
        return SendReceive(xid, ses, in_buf, out_buf,
                        &bytes_returned, 0);
index 85c9e2bff8e65126eaca755e14d1ee4a15a27170..40469044088def2607d19540ef0cb8dcce78ac91 100644 (file)
@@ -683,6 +683,8 @@ EXPORT_SYMBOL(dget_parent);
 /**
  * d_find_alias - grab a hashed alias of inode
  * @inode: inode in question
+ * @want_discon:  flag, used by d_splice_alias, to request
+ *          that only a DISCONNECTED alias be returned.
  *
  * If inode has a hashed alias, or is a directory and has any alias,
  * acquire the reference to alias and return it. Otherwise return NULL.
@@ -691,9 +693,10 @@ EXPORT_SYMBOL(dget_parent);
  * of a filesystem.
  *
  * If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer
- * any other hashed alias over that.
+ * any other hashed alias over that one unless @want_discon is set,
+ * in which case only return an IS_ROOT, DCACHE_DISCONNECTED alias.
  */
-static struct dentry *__d_find_alias(struct inode *inode)
+static struct dentry *__d_find_alias(struct inode *inode, int want_discon)
 {
        struct dentry *alias, *discon_alias;
 
@@ -705,7 +708,7 @@ again:
                        if (IS_ROOT(alias) &&
                            (alias->d_flags & DCACHE_DISCONNECTED)) {
                                discon_alias = alias;
-                       } else {
+                       } else if (!want_discon) {
                                __dget_dlock(alias);
                                spin_unlock(&alias->d_lock);
                                return alias;
@@ -736,7 +739,7 @@ struct dentry *d_find_alias(struct inode *inode)
 
        if (!list_empty(&inode->i_dentry)) {
                spin_lock(&inode->i_lock);
-               de = __d_find_alias(inode);
+               de = __d_find_alias(inode, 0);
                spin_unlock(&inode->i_lock);
        }
        return de;
@@ -1647,8 +1650,9 @@ struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
 
        if (inode && S_ISDIR(inode->i_mode)) {
                spin_lock(&inode->i_lock);
-               new = __d_find_any_alias(inode);
+               new = __d_find_alias(inode, 1);
                if (new) {
+                       BUG_ON(!(new->d_flags & DCACHE_DISCONNECTED));
                        spin_unlock(&inode->i_lock);
                        security_d_instantiate(new, inode);
                        d_move(new, dentry);
@@ -2478,7 +2482,7 @@ struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode)
                struct dentry *alias;
 
                /* Does an aliased dentry already exist? */
-               alias = __d_find_alias(inode);
+               alias = __d_find_alias(inode, 0);
                if (alias) {
                        actual = alias;
                        write_seqlock(&rename_lock);
index 69f994a7d5249589bf594adaa4780bca967f08b2..0dbe58a8b172c1de225b3457600b50d644b1da35 100644 (file)
@@ -149,7 +149,7 @@ int ecryptfs_privileged_open(struct file **lower_file,
        (*lower_file) = dentry_open(lower_dentry, lower_mnt, flags, cred);
        if (!IS_ERR(*lower_file))
                goto out;
-       if (flags & O_RDONLY) {
+       if ((flags & O_ACCMODE) == O_RDONLY) {
                rc = PTR_ERR((*lower_file));
                goto out;
        }
index 3a06f4043df42a811add69fd7ede7b582a94c347..c0038f6566d4df1493d9321821a53adb62b902ab 100644 (file)
@@ -49,7 +49,10 @@ ecryptfs_miscdev_poll(struct file *file, poll_table *pt)
        mutex_lock(&ecryptfs_daemon_hash_mux);
        /* TODO: Just use file->private_data? */
        rc = ecryptfs_find_daemon_by_euid(&daemon, euid, current_user_ns());
-       BUG_ON(rc || !daemon);
+       if (rc || !daemon) {
+               mutex_unlock(&ecryptfs_daemon_hash_mux);
+               return -EINVAL;
+       }
        mutex_lock(&daemon->mux);
        mutex_unlock(&ecryptfs_daemon_hash_mux);
        if (daemon->flags & ECRYPTFS_DAEMON_ZOMBIE) {
@@ -122,6 +125,7 @@ ecryptfs_miscdev_open(struct inode *inode, struct file *file)
                goto out_unlock_daemon;
        }
        daemon->flags |= ECRYPTFS_DAEMON_MISCDEV_OPEN;
+       file->private_data = daemon;
        atomic_inc(&ecryptfs_num_miscdev_opens);
 out_unlock_daemon:
        mutex_unlock(&daemon->mux);
@@ -152,9 +156,9 @@ ecryptfs_miscdev_release(struct inode *inode, struct file *file)
 
        mutex_lock(&ecryptfs_daemon_hash_mux);
        rc = ecryptfs_find_daemon_by_euid(&daemon, euid, current_user_ns());
-       BUG_ON(rc || !daemon);
+       if (rc || !daemon)
+               daemon = file->private_data;
        mutex_lock(&daemon->mux);
-       BUG_ON(daemon->pid != task_pid(current));
        BUG_ON(!(daemon->flags & ECRYPTFS_DAEMON_MISCDEV_OPEN));
        daemon->flags &= ~ECRYPTFS_DAEMON_MISCDEV_OPEN;
        atomic_dec(&ecryptfs_num_miscdev_opens);
@@ -191,31 +195,32 @@ int ecryptfs_send_miscdev(char *data, size_t data_size,
                          struct ecryptfs_msg_ctx *msg_ctx, u8 msg_type,
                          u16 msg_flags, struct ecryptfs_daemon *daemon)
 {
-       int rc = 0;
+       struct ecryptfs_message *msg;
 
-       mutex_lock(&msg_ctx->mux);
-       msg_ctx->msg = kmalloc((sizeof(*msg_ctx->msg) + data_size),
-                              GFP_KERNEL);
-       if (!msg_ctx->msg) {
-               rc = -ENOMEM;
+       msg = kmalloc((sizeof(*msg) + data_size), GFP_KERNEL);
+       if (!msg) {
                printk(KERN_ERR "%s: Out of memory whilst attempting "
                       "to kmalloc(%zd, GFP_KERNEL)\n", __func__,
-                      (sizeof(*msg_ctx->msg) + data_size));
-               goto out_unlock;
+                      (sizeof(*msg) + data_size));
+               return -ENOMEM;
        }
+
+       mutex_lock(&msg_ctx->mux);
+       msg_ctx->msg = msg;
        msg_ctx->msg->index = msg_ctx->index;
        msg_ctx->msg->data_len = data_size;
        msg_ctx->type = msg_type;
        memcpy(msg_ctx->msg->data, data, data_size);
        msg_ctx->msg_size = (sizeof(*msg_ctx->msg) + data_size);
-       mutex_lock(&daemon->mux);
        list_add_tail(&msg_ctx->daemon_out_list, &daemon->msg_ctx_out_queue);
+       mutex_unlock(&msg_ctx->mux);
+
+       mutex_lock(&daemon->mux);
        daemon->num_queued_msg_ctx++;
        wake_up_interruptible(&daemon->wait);
        mutex_unlock(&daemon->mux);
-out_unlock:
-       mutex_unlock(&msg_ctx->mux);
-       return rc;
+
+       return 0;
 }
 
 /*
@@ -269,8 +274,16 @@ ecryptfs_miscdev_read(struct file *file, char __user *buf, size_t count,
        mutex_lock(&ecryptfs_daemon_hash_mux);
        /* TODO: Just use file->private_data? */
        rc = ecryptfs_find_daemon_by_euid(&daemon, euid, current_user_ns());
-       BUG_ON(rc || !daemon);
+       if (rc || !daemon) {
+               mutex_unlock(&ecryptfs_daemon_hash_mux);
+               return -EINVAL;
+       }
        mutex_lock(&daemon->mux);
+       if (task_pid(current) != daemon->pid) {
+               mutex_unlock(&daemon->mux);
+               mutex_unlock(&ecryptfs_daemon_hash_mux);
+               return -EPERM;
+       }
        if (daemon->flags & ECRYPTFS_DAEMON_ZOMBIE) {
                rc = 0;
                mutex_unlock(&ecryptfs_daemon_hash_mux);
@@ -307,9 +320,6 @@ check_list:
                 * message from the queue; try again */
                goto check_list;
        }
-       BUG_ON(euid != daemon->euid);
-       BUG_ON(current_user_ns() != daemon->user_ns);
-       BUG_ON(task_pid(current) != daemon->pid);
        msg_ctx = list_first_entry(&daemon->msg_ctx_out_queue,
                                   struct ecryptfs_msg_ctx, daemon_out_list);
        BUG_ON(!msg_ctx);
index 74598f67efebb85e71204a832c929dd20ea96771..1c8b55670804c20e88a10a48668f574d54f58205 100644 (file)
@@ -1710,7 +1710,7 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
                goto error_tgt_fput;
 
        /* Check if EPOLLWAKEUP is allowed */
-       if ((epds.events & EPOLLWAKEUP) && !capable(CAP_EPOLLWAKEUP))
+       if ((epds.events & EPOLLWAKEUP) && !capable(CAP_BLOCK_SUSPEND))
                epds.events &= ~EPOLLWAKEUP;
 
        /*
index a79786a8d2c88d5b6c580859ef12496f43b4b0f4..da27b91ff1e8cbe87d0fe42aa5d39513e6a9deeb 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -819,10 +819,10 @@ static int exec_mmap(struct mm_struct *mm)
        /* Notify parent that we're no longer interested in the old VM */
        tsk = current;
        old_mm = current->mm;
-       sync_mm_rss(old_mm);
        mm_release(tsk, old_mm);
 
        if (old_mm) {
+               sync_mm_rss(old_mm);
                /*
                 * Make sure that if there is a core dump in progress
                 * for the old mm, we get out and die instead of going
index 49cf230554a21d33785d16367d9397ab34a44d91..24a49d47e9354c00f0ebd2da6c92d3520e71332c 100644 (file)
@@ -735,13 +735,7 @@ static int _prepare_for_striping(struct ore_io_state *ios)
 out:
        ios->numdevs = devs_in_group;
        ios->pages_consumed = cur_pg;
-       if (unlikely(ret)) {
-               if (length == ios->length)
-                       return ret;
-               else
-                       ios->length -= length;
-       }
-       return 0;
+       return ret;
 }
 
 int ore_create(struct ore_io_state *ios)
index d222c77cfa1ba0669ca7580c420a1c31da35c86a..5f376d14fdcc3c0d9791fce5f2951d35b54c778b 100644 (file)
@@ -144,26 +144,26 @@ static void _sp2d_reset(struct __stripe_pages_2d *sp2d,
 {
        unsigned data_devs = sp2d->data_devs;
        unsigned group_width = data_devs + sp2d->parity;
-       unsigned p;
+       int p, c;
 
        if (!sp2d->needed)
                return;
 
-       for (p = 0; p < sp2d->pages_in_unit; p++) {
-               struct __1_page_stripe *_1ps = &sp2d->_1p_stripes[p];
-
-               if (_1ps->write_count < group_width) {
-                       unsigned c;
+       for (c = data_devs - 1; c >= 0; --c)
+               for (p = sp2d->pages_in_unit - 1; p >= 0; --p) {
+                       struct __1_page_stripe *_1ps = &sp2d->_1p_stripes[p];
 
-                       for (c = 0; c < data_devs; c++)
-                               if (_1ps->page_is_read[c]) {
-                                       struct page *page = _1ps->pages[c];
+                       if (_1ps->page_is_read[c]) {
+                               struct page *page = _1ps->pages[c];
 
-                                       r4w->put_page(priv, page);
-                                       _1ps->page_is_read[c] = false;
-                               }
+                               r4w->put_page(priv, page);
+                               _1ps->page_is_read[c] = false;
+                       }
                }
 
+       for (p = 0; p < sp2d->pages_in_unit; p++) {
+               struct __1_page_stripe *_1ps = &sp2d->_1p_stripes[p];
+
                memset(_1ps->pages, 0, group_width * sizeof(*_1ps->pages));
                _1ps->write_count = 0;
                _1ps->tx = NULL;
@@ -461,16 +461,12 @@ static void _mark_read4write_pages_uptodate(struct ore_io_state *ios, int ret)
  * ios->sp2d[p][*], xor is calculated the same way. These pages are
  * allocated/freed and don't go through cache
  */
-static int _read_4_write(struct ore_io_state *ios)
+static int _read_4_write_first_stripe(struct ore_io_state *ios)
 {
-       struct ore_io_state *ios_read;
        struct ore_striping_info read_si;
        struct __stripe_pages_2d *sp2d = ios->sp2d;
        u64 offset = ios->si.first_stripe_start;
-       u64 last_stripe_end;
-       unsigned bytes_in_stripe = ios->si.bytes_in_stripe;
-       unsigned i, c, p, min_p = sp2d->pages_in_unit, max_p = -1;
-       int ret;
+       unsigned c, p, min_p = sp2d->pages_in_unit, max_p = -1;
 
        if (offset == ios->offset) /* Go to start collect $200 */
                goto read_last_stripe;
@@ -478,6 +474,9 @@ static int _read_4_write(struct ore_io_state *ios)
        min_p = _sp2d_min_pg(sp2d);
        max_p = _sp2d_max_pg(sp2d);
 
+       ORE_DBGMSG("stripe_start=0x%llx ios->offset=0x%llx min_p=%d max_p=%d\n",
+                  offset, ios->offset, min_p, max_p);
+
        for (c = 0; ; c++) {
                ore_calc_stripe_info(ios->layout, offset, 0, &read_si);
                read_si.obj_offset += min_p * PAGE_SIZE;
@@ -512,6 +511,18 @@ static int _read_4_write(struct ore_io_state *ios)
        }
 
 read_last_stripe:
+       return 0;
+}
+
+static int _read_4_write_last_stripe(struct ore_io_state *ios)
+{
+       struct ore_striping_info read_si;
+       struct __stripe_pages_2d *sp2d = ios->sp2d;
+       u64 offset;
+       u64 last_stripe_end;
+       unsigned bytes_in_stripe = ios->si.bytes_in_stripe;
+       unsigned c, p, min_p = sp2d->pages_in_unit, max_p = -1;
+
        offset = ios->offset + ios->length;
        if (offset % PAGE_SIZE)
                _add_to_r4w_last_page(ios, &offset);
@@ -527,15 +538,15 @@ read_last_stripe:
        c = _dev_order(ios->layout->group_width * ios->layout->mirrors_p1,
                       ios->layout->mirrors_p1, read_si.par_dev, read_si.dev);
 
-       BUG_ON(ios->si.first_stripe_start + bytes_in_stripe != last_stripe_end);
-       /* unaligned IO must be within a single stripe */
-
        if (min_p == sp2d->pages_in_unit) {
                /* Didn't do it yet */
                min_p = _sp2d_min_pg(sp2d);
                max_p = _sp2d_max_pg(sp2d);
        }
 
+       ORE_DBGMSG("offset=0x%llx stripe_end=0x%llx min_p=%d max_p=%d\n",
+                  offset, last_stripe_end, min_p, max_p);
+
        while (offset < last_stripe_end) {
                struct __1_page_stripe *_1ps = &sp2d->_1p_stripes[p];
 
@@ -568,6 +579,15 @@ read_last_stripe:
        }
 
 read_it:
+       return 0;
+}
+
+static int _read_4_write_execute(struct ore_io_state *ios)
+{
+       struct ore_io_state *ios_read;
+       unsigned i;
+       int ret;
+
        ios_read = ios->ios_read_4_write;
        if (!ios_read)
                return 0;
@@ -591,6 +611,8 @@ read_it:
        }
 
        _mark_read4write_pages_uptodate(ios_read, ret);
+       ore_put_io_state(ios_read);
+       ios->ios_read_4_write = NULL; /* Might need a reuse at last stripe */
        return 0;
 }
 
@@ -626,8 +648,11 @@ int _ore_add_parity_unit(struct ore_io_state *ios,
                        /* If first stripe, Read in all read4write pages
                         * (if needed) before we calculate the first parity.
                         */
-                       _read_4_write(ios);
+                       _read_4_write_first_stripe(ios);
                }
+               if (!cur_len) /* If last stripe r4w pages of last stripe */
+                       _read_4_write_last_stripe(ios);
+               _read_4_write_execute(ios);
 
                for (i = 0; i < num_pages; i++) {
                        pages[i] = _raid_page_alloc();
@@ -654,34 +679,14 @@ int _ore_add_parity_unit(struct ore_io_state *ios,
 
 int _ore_post_alloc_raid_stuff(struct ore_io_state *ios)
 {
-       struct ore_layout *layout = ios->layout;
-
        if (ios->parity_pages) {
+               struct ore_layout *layout = ios->layout;
                unsigned pages_in_unit = layout->stripe_unit / PAGE_SIZE;
-               unsigned stripe_size = ios->si.bytes_in_stripe;
-               u64 last_stripe, first_stripe;
 
                if (_sp2d_alloc(pages_in_unit, layout->group_width,
                                layout->parity, &ios->sp2d)) {
                        return -ENOMEM;
                }
-
-               /* Round io down to last full strip */
-               first_stripe = div_u64(ios->offset, stripe_size);
-               last_stripe = div_u64(ios->offset + ios->length, stripe_size);
-
-               /* If an IO spans more then a single stripe it must end at
-                * a stripe boundary. The reminder at the end is pushed into the
-                * next IO.
-                */
-               if (last_stripe != first_stripe) {
-                       ios->length = last_stripe * stripe_size - ios->offset;
-
-                       BUG_ON(!ios->length);
-                       ios->nr_pages = (ios->length + PAGE_SIZE - 1) /
-                                       PAGE_SIZE;
-                       ios->si.length = ios->length; /*make it consistent */
-               }
        }
        return 0;
 }
index e32bc919e4e3413d6a8063ac3d0bfb466c959f22..5a7b691e748bdcda66746098d42194cb4682f7e5 100644 (file)
@@ -109,7 +109,7 @@ static struct kobj_type odev_ktype = {
 static struct kobj_type uuid_ktype = {
 };
 
-void exofs_sysfs_dbg_print()
+void exofs_sysfs_dbg_print(void)
 {
 #ifdef CONFIG_EXOFS_DEBUG
        struct kobject *k_name, *k_tmp;
index 99b6324290db916466d8b5c0633e9fa216d21798..cee7812cc3cf9a17f8dc967eb7af480c079c5f44 100644 (file)
@@ -90,8 +90,8 @@ unsigned ext4_num_overhead_clusters(struct super_block *sb,
         * unusual file system layouts.
         */
        if (ext4_block_in_group(sb, ext4_block_bitmap(sb, gdp), block_group)) {
-               block_cluster = EXT4_B2C(sbi, (start -
-                                              ext4_block_bitmap(sb, gdp)));
+               block_cluster = EXT4_B2C(sbi,
+                                        ext4_block_bitmap(sb, gdp) - start);
                if (block_cluster < num_clusters)
                        block_cluster = -1;
                else if (block_cluster == num_clusters) {
@@ -102,7 +102,7 @@ unsigned ext4_num_overhead_clusters(struct super_block *sb,
 
        if (ext4_block_in_group(sb, ext4_inode_bitmap(sb, gdp), block_group)) {
                inode_cluster = EXT4_B2C(sbi,
-                                        start - ext4_inode_bitmap(sb, gdp));
+                                        ext4_inode_bitmap(sb, gdp) - start);
                if (inode_cluster < num_clusters)
                        inode_cluster = -1;
                else if (inode_cluster == num_clusters) {
@@ -114,7 +114,7 @@ unsigned ext4_num_overhead_clusters(struct super_block *sb,
        itbl_blk = ext4_inode_table(sb, gdp);
        for (i = 0; i < sbi->s_itb_per_group; i++) {
                if (ext4_block_in_group(sb, itbl_blk + i, block_group)) {
-                       c = EXT4_B2C(sbi, start - itbl_blk + i);
+                       c = EXT4_B2C(sbi, itbl_blk + i - start);
                        if ((c < num_clusters) || (c == inode_cluster) ||
                            (c == block_cluster) || (c == itbl_cluster))
                                continue;
index 8ad112ae0ade2f21a953ccc03b687939b0b81310..6ec6f9ee2fec9099c87f523bbf69614eed789050 100644 (file)
@@ -123,7 +123,6 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
                        else
                                ext4_clear_inode_flag(inode, i);
                }
-               ei->i_flags = flags;
 
                ext4_set_inode_flags(inode);
                inode->i_ctime = ext4_current_time(inode);
@@ -269,7 +268,6 @@ group_extend_out:
                err = ext4_move_extents(filp, donor_filp, me.orig_start,
                                        me.donor_start, me.len, &me.moved_len);
                mnt_drop_write_file(filp);
-               mnt_drop_write(filp->f_path.mnt);
 
                if (copy_to_user((struct move_extent __user *)arg,
                                 &me, sizeof(me)))
index a3d81ebf6d864a8c2189147e5771c435473b2e42..0038b32cb36276d537f2ec81a46469bf3ad221b1 100644 (file)
@@ -738,22 +738,21 @@ static int
 fat_encode_fh(struct inode *inode, __u32 *fh, int *lenp, struct inode *parent)
 {
        int len = *lenp;
-       u32 ipos_h, ipos_m, ipos_l;
+       struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
+       loff_t i_pos;
 
        if (len < 5) {
                *lenp = 5;
                return 255; /* no room */
        }
 
-       ipos_h = MSDOS_I(inode)->i_pos >> 8;
-       ipos_m = (MSDOS_I(inode)->i_pos & 0xf0) << 24;
-       ipos_l = (MSDOS_I(inode)->i_pos & 0x0f) << 28;
+       i_pos = fat_i_pos_read(sbi, inode);
        *lenp = 5;
        fh[0] = inode->i_ino;
        fh[1] = inode->i_generation;
-       fh[2] = ipos_h;
-       fh[3] = ipos_m | MSDOS_I(inode)->i_logstart;
-       fh[4] = ipos_l;
+       fh[2] = i_pos >> 8;
+       fh[3] = ((i_pos & 0xf0) << 24) | MSDOS_I(inode)->i_logstart;
+       fh[4] = (i_pos & 0x0f) << 28;
        if (parent)
                fh[4] |= MSDOS_I(parent)->i_logstart;
        return 3;
index b1a524d798e720cf18ad7ad4decdab430a2b9c5e..cf6f4345ceb0125baf86b7a029e7fdf8712f76c3 100644 (file)
--- a/fs/fifo.c
+++ b/fs/fifo.c
@@ -14,7 +14,7 @@
 #include <linux/sched.h>
 #include <linux/pipe_fs_i.h>
 
-static void wait_for_partner(struct inode* inode, unsigned int *cnt)
+static int wait_for_partner(struct inode* inode, unsigned int *cnt)
 {
        int cur = *cnt; 
 
@@ -23,6 +23,7 @@ static void wait_for_partner(struct inode* inode, unsigned int *cnt)
                if (signal_pending(current))
                        break;
        }
+       return cur == *cnt ? -ERESTARTSYS : 0;
 }
 
 static void wake_up_partner(struct inode* inode)
@@ -67,8 +68,7 @@ static int fifo_open(struct inode *inode, struct file *filp)
                                 * seen a writer */
                                filp->f_version = pipe->w_counter;
                        } else {
-                               wait_for_partner(inode, &pipe->w_counter);
-                               if(signal_pending(current))
+                               if (wait_for_partner(inode, &pipe->w_counter))
                                        goto err_rd;
                        }
                }
@@ -90,8 +90,7 @@ static int fifo_open(struct inode *inode, struct file *filp)
                        wake_up_partner(inode);
 
                if (!pipe->readers) {
-                       wait_for_partner(inode, &pipe->r_counter);
-                       if (signal_pending(current))
+                       if (wait_for_partner(inode, &pipe->r_counter))
                                goto err_wr;
                }
                break;
index 8d2fb8c88cf36a196c47f473bcc729510ad89d8e..41a3ccff18d87bdfc3f65148e2f39f8067f3a8ce 100644 (file)
@@ -664,6 +664,7 @@ static long writeback_sb_inodes(struct super_block *sb,
                        /* Wait for I_SYNC. This function drops i_lock... */
                        inode_sleep_on_writeback(inode);
                        /* Inode may be gone, start again */
+                       spin_lock(&wb->list_lock);
                        continue;
                }
                inode->i_state |= I_SYNC;
index 42593c587d48509604f3b9cfd8c0f9ffefe9ba0e..03ff5b1eba93ec21e11d9ca31f9c8b3f22e65d36 100644 (file)
@@ -75,19 +75,13 @@ static ssize_t fuse_conn_limit_write(struct file *file, const char __user *buf,
                                     unsigned global_limit)
 {
        unsigned long t;
-       char tmp[32];
        unsigned limit = (1 << 16) - 1;
        int err;
 
-       if (*ppos || count >= sizeof(tmp) - 1)
-               return -EINVAL;
-
-       if (copy_from_user(tmp, buf, count))
+       if (*ppos)
                return -EINVAL;
 
-       tmp[count] = '\0';
-
-       err = strict_strtoul(tmp, 0, &t);
+       err = kstrtoul_from_user(buf, count, 0, &t);
        if (err)
                return err;
 
index df5ac048dc74e6b33174a69dc36b3a5b6084fd74..334e0b18a014c72bf78583f76a6c3b02f9058181 100644 (file)
@@ -775,6 +775,8 @@ static int fuse_link(struct dentry *entry, struct inode *newdir,
 static void fuse_fillattr(struct inode *inode, struct fuse_attr *attr,
                          struct kstat *stat)
 {
+       unsigned int blkbits;
+
        stat->dev = inode->i_sb->s_dev;
        stat->ino = attr->ino;
        stat->mode = (inode->i_mode & S_IFMT) | (attr->mode & 07777);
@@ -790,7 +792,13 @@ static void fuse_fillattr(struct inode *inode, struct fuse_attr *attr,
        stat->ctime.tv_nsec = attr->ctimensec;
        stat->size = attr->size;
        stat->blocks = attr->blocks;
-       stat->blksize = (1 << inode->i_blkbits);
+
+       if (attr->blksize != 0)
+               blkbits = ilog2(attr->blksize);
+       else
+               blkbits = inode->i_sb->s_blocksize_bits;
+
+       stat->blksize = 1 << blkbits;
 }
 
 static int fuse_do_getattr(struct inode *inode, struct kstat *stat,
@@ -863,6 +871,7 @@ int fuse_update_attributes(struct inode *inode, struct kstat *stat,
                if (stat) {
                        generic_fillattr(inode, stat);
                        stat->mode = fi->orig_i_mode;
+                       stat->ino = fi->orig_ino;
                }
        }
 
index 9562109d3a879b3dab50ee27d989f3ae89c8b833..b321a688cde79aa0f2923f3440330d03f0e5ee1e 100644 (file)
@@ -2173,6 +2173,44 @@ fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
        return ret;
 }
 
+long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
+                           loff_t length)
+{
+       struct fuse_file *ff = file->private_data;
+       struct fuse_conn *fc = ff->fc;
+       struct fuse_req *req;
+       struct fuse_fallocate_in inarg = {
+               .fh = ff->fh,
+               .offset = offset,
+               .length = length,
+               .mode = mode
+       };
+       int err;
+
+       if (fc->no_fallocate)
+               return -EOPNOTSUPP;
+
+       req = fuse_get_req(fc);
+       if (IS_ERR(req))
+               return PTR_ERR(req);
+
+       req->in.h.opcode = FUSE_FALLOCATE;
+       req->in.h.nodeid = ff->nodeid;
+       req->in.numargs = 1;
+       req->in.args[0].size = sizeof(inarg);
+       req->in.args[0].value = &inarg;
+       fuse_request_send(fc, req);
+       err = req->out.h.error;
+       if (err == -ENOSYS) {
+               fc->no_fallocate = 1;
+               err = -EOPNOTSUPP;
+       }
+       fuse_put_request(fc, req);
+
+       return err;
+}
+EXPORT_SYMBOL_GPL(fuse_file_fallocate);
+
 static const struct file_operations fuse_file_operations = {
        .llseek         = fuse_file_llseek,
        .read           = do_sync_read,
@@ -2190,6 +2228,7 @@ static const struct file_operations fuse_file_operations = {
        .unlocked_ioctl = fuse_file_ioctl,
        .compat_ioctl   = fuse_file_compat_ioctl,
        .poll           = fuse_file_poll,
+       .fallocate      = fuse_file_fallocate,
 };
 
 static const struct file_operations fuse_direct_io_file_operations = {
@@ -2206,6 +2245,7 @@ static const struct file_operations fuse_direct_io_file_operations = {
        .unlocked_ioctl = fuse_file_ioctl,
        .compat_ioctl   = fuse_file_compat_ioctl,
        .poll           = fuse_file_poll,
+       .fallocate      = fuse_file_fallocate,
        /* no splice_read */
 };
 
index 572cefc7801296ce1b15cba6223f09d9a9826072..771fb6322c0750d223bb2ee7873f6d0cdbdef41b 100644 (file)
@@ -82,6 +82,9 @@ struct fuse_inode {
            preserve the original mode */
        umode_t orig_i_mode;
 
+       /** 64 bit inode number */
+       u64 orig_ino;
+
        /** Version of last attribute change */
        u64 attr_version;
 
@@ -478,6 +481,9 @@ struct fuse_conn {
        /** Are BSD file locking primitives not implemented by fs? */
        unsigned no_flock:1;
 
+       /** Is fallocate not implemented by fs? */
+       unsigned no_fallocate:1;
+
        /** The number of requests waiting for completion */
        atomic_t num_waiting;
 
index 42678a33b7bb6297ced300f7fbb61696d37628c9..1cd61652018c7c8547a060344ef998ee6ae96879 100644 (file)
@@ -91,6 +91,7 @@ static struct inode *fuse_alloc_inode(struct super_block *sb)
        fi->nlookup = 0;
        fi->attr_version = 0;
        fi->writectr = 0;
+       fi->orig_ino = 0;
        INIT_LIST_HEAD(&fi->write_files);
        INIT_LIST_HEAD(&fi->queued_writes);
        INIT_LIST_HEAD(&fi->writepages);
@@ -139,6 +140,18 @@ static int fuse_remount_fs(struct super_block *sb, int *flags, char *data)
        return 0;
 }
 
+/*
+ * ino_t is 32-bits on 32-bit arch. We have to squash the 64-bit value down
+ * so that it will fit.
+ */
+static ino_t fuse_squash_ino(u64 ino64)
+{
+       ino_t ino = (ino_t) ino64;
+       if (sizeof(ino_t) < sizeof(u64))
+               ino ^= ino64 >> (sizeof(u64) - sizeof(ino_t)) * 8;
+       return ino;
+}
+
 void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr,
                                   u64 attr_valid)
 {
@@ -148,7 +161,7 @@ void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr,
        fi->attr_version = ++fc->attr_version;
        fi->i_time = attr_valid;
 
-       inode->i_ino     = attr->ino;
+       inode->i_ino     = fuse_squash_ino(attr->ino);
        inode->i_mode    = (inode->i_mode & S_IFMT) | (attr->mode & 07777);
        set_nlink(inode, attr->nlink);
        inode->i_uid     = attr->uid;
@@ -174,6 +187,8 @@ void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr,
        fi->orig_i_mode = inode->i_mode;
        if (!(fc->flags & FUSE_DEFAULT_PERMISSIONS))
                inode->i_mode &= ~S_ISVTX;
+
+       fi->orig_ino = attr->ino;
 }
 
 void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr,
index c640ba57074b8ba58c46c148815c714f20b48939..09addc8615fa6603e28d2fa718e5a32c1762acbd 100644 (file)
@@ -31,6 +31,7 @@ static int hfsplus_ioctl_bless(struct file *file, int __user *user_flags)
        struct hfsplus_sb_info *sbi = HFSPLUS_SB(inode->i_sb);
        struct hfsplus_vh *vh = sbi->s_vhdr;
        struct hfsplus_vh *bvh = sbi->s_backup_vhdr;
+       u32 cnid = (unsigned long)dentry->d_fsdata;
 
        if (!capable(CAP_SYS_ADMIN))
                return -EPERM;
@@ -41,8 +42,12 @@ static int hfsplus_ioctl_bless(struct file *file, int __user *user_flags)
        vh->finder_info[0] = bvh->finder_info[0] =
                cpu_to_be32(parent_ino(dentry));
 
-       /* Bootloader */
-       vh->finder_info[1] = bvh->finder_info[1] = cpu_to_be32(inode->i_ino);
+       /*
+        * Bootloader. Just using the inode here breaks in the case of
+        * hard links - the firmware wants the ID of the hard link file,
+        * but the inode points at the indirect inode
+        */
+       vh->finder_info[1] = bvh->finder_info[1] = cpu_to_be32(cnid);
 
        /* Per spec, the OS X system folder - same as finder_info[0] here */
        vh->finder_info[5] = bvh->finder_info[5] =
index 7daf4b852d1c78ca89c31d791e9eddeb9dcf1a0d..90effcccca9af4c13f6b44b7b3fe756612f9d735 100644 (file)
@@ -56,7 +56,7 @@ int hfsplus_submit_bio(struct super_block *sb, sector_t sector,
        DECLARE_COMPLETION_ONSTACK(wait);
        struct bio *bio;
        int ret = 0;
-       unsigned int io_size;
+       u64 io_size;
        loff_t start;
        int offset;
 
index 814c51d0de4739e4b89e9091e00c17f284c0e2ba..fce6238d52c1bf742307325a588d545e169508e4 100644 (file)
@@ -1465,7 +1465,7 @@ int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
        case F_WRLCK:
                return generic_add_lease(filp, arg, flp);
        default:
-               BUG();
+               return -EINVAL;
        }
 }
 EXPORT_SYMBOL(generic_setlease);
index 970659daa323865a113d25075d461c50c7f7dc7c..23ff18fe080afd0b6470e6d72df4fa9a8b9d0dab 100644 (file)
@@ -17,7 +17,6 @@
 #include <linux/kthread.h>
 #include <linux/sunrpc/svcauth_gss.h>
 #include <linux/sunrpc/bc_xprt.h>
-#include <linux/nsproxy.h>
 
 #include <net/inet_sock.h>
 
@@ -107,7 +106,7 @@ nfs4_callback_up(struct svc_serv *serv, struct rpc_xprt *xprt)
 {
        int ret;
 
-       ret = svc_create_xprt(serv, "tcp", xprt->xprt_net, PF_INET,
+       ret = svc_create_xprt(serv, "tcp", &init_net, PF_INET,
                                nfs_callback_set_tcpport, SVC_SOCK_ANONYMOUS);
        if (ret <= 0)
                goto out_err;
@@ -115,7 +114,7 @@ nfs4_callback_up(struct svc_serv *serv, struct rpc_xprt *xprt)
        dprintk("NFS: Callback listener port = %u (af %u)\n",
                        nfs_callback_tcpport, PF_INET);
 
-       ret = svc_create_xprt(serv, "tcp", xprt->xprt_net, PF_INET6,
+       ret = svc_create_xprt(serv, "tcp", &init_net, PF_INET6,
                                nfs_callback_set_tcpport, SVC_SOCK_ANONYMOUS);
        if (ret > 0) {
                nfs_callback_tcpport6 = ret;
@@ -184,7 +183,7 @@ nfs41_callback_up(struct svc_serv *serv, struct rpc_xprt *xprt)
         * fore channel connection.
         * Returns the input port (0) and sets the svc_serv bc_xprt on success
         */
-       ret = svc_create_xprt(serv, "tcp-bc", xprt->xprt_net, PF_INET, 0,
+       ret = svc_create_xprt(serv, "tcp-bc", &init_net, PF_INET, 0,
                              SVC_SOCK_ANONYMOUS);
        if (ret < 0) {
                rqstp = ERR_PTR(ret);
@@ -254,7 +253,7 @@ int nfs_callback_up(u32 minorversion, struct rpc_xprt *xprt)
        char svc_name[12];
        int ret = 0;
        int minorversion_setup;
-       struct net *net = current->nsproxy->net_ns;
+       struct net *net = &init_net;
 
        mutex_lock(&nfs_callback_mutex);
        if (cb_info->users++ || cb_info->task != NULL) {
@@ -330,7 +329,7 @@ void nfs_callback_down(int minorversion)
        cb_info->users--;
        if (cb_info->users == 0 && cb_info->task != NULL) {
                kthread_stop(cb_info->task);
-               svc_shutdown_net(cb_info->serv, current->nsproxy->net_ns);
+               svc_shutdown_net(cb_info->serv, &init_net);
                svc_exit_thread(cb_info->rqst);
                cb_info->serv = NULL;
                cb_info->rqst = NULL;
index 95bfc243992c1a822041d7a205bbca23162bf91d..e64b01d2a338274a459bc9c56c60dacf282aad84 100644 (file)
@@ -455,9 +455,9 @@ static __be32 decode_cb_sequence_args(struct svc_rqst *rqstp,
        args->csa_nrclists = ntohl(*p++);
        args->csa_rclists = NULL;
        if (args->csa_nrclists) {
-               args->csa_rclists = kmalloc(args->csa_nrclists *
-                                           sizeof(*args->csa_rclists),
-                                           GFP_KERNEL);
+               args->csa_rclists = kmalloc_array(args->csa_nrclists,
+                                                 sizeof(*args->csa_rclists),
+                                                 GFP_KERNEL);
                if (unlikely(args->csa_rclists == NULL))
                        goto out;
 
@@ -696,7 +696,7 @@ static __be32 encode_cb_sequence_res(struct svc_rqst *rqstp,
                                       const struct cb_sequenceres *res)
 {
        __be32 *p;
-       unsigned status = res->csr_status;
+       __be32 status = res->csr_status;
 
        if (unlikely(status != 0))
                goto out;
index 7d108753af81e9783ab1465c8bb9986452a6cf00..f005b5bebdc73bba4d548d134699dd4f00c471ca 100644 (file)
@@ -207,7 +207,6 @@ error_0:
 static void nfs4_shutdown_session(struct nfs_client *clp)
 {
        if (nfs4_has_session(clp)) {
-               nfs4_deviceid_purge_client(clp);
                nfs4_destroy_session(clp->cl_session);
                nfs4_destroy_clientid(clp);
        }
@@ -544,8 +543,6 @@ nfs_found_client(const struct nfs_client_initdata *cl_init,
 
        smp_rmb();
 
-       BUG_ON(clp->cl_cons_state != NFS_CS_READY);
-
        dprintk("<-- %s found nfs_client %p for %s\n",
                __func__, clp, cl_init->hostname ?: "");
        return clp;
index ad2775d3e219b65f6efed3454d6422f8822de0e8..48253372ab1d115def0b81f56b097db6e98a0f88 100644 (file)
@@ -484,17 +484,22 @@ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
 
        list_for_each_entry_safe(req, tmp, &reqs, wb_list) {
                if (!nfs_pageio_add_request(&desc, req)) {
+                       nfs_list_remove_request(req);
                        nfs_list_add_request(req, &failed);
                        spin_lock(cinfo.lock);
                        dreq->flags = 0;
                        dreq->error = -EIO;
                        spin_unlock(cinfo.lock);
                }
+               nfs_release_request(req);
        }
        nfs_pageio_complete(&desc);
 
-       while (!list_empty(&failed))
+       while (!list_empty(&failed)) {
+               req = nfs_list_entry(failed.next);
+               nfs_list_remove_request(req);
                nfs_unlock_and_release_request(req);
+       }
 
        if (put_dreq(dreq))
                nfs_direct_write_complete(dreq, dreq->inode);
@@ -523,9 +528,9 @@ static void nfs_direct_commit_complete(struct nfs_commit_data *data)
                nfs_list_remove_request(req);
                if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES) {
                        /* Note the rewrite will go through mds */
-                       kref_get(&req->wb_kref);
                        nfs_mark_request_commit(req, NULL, &cinfo);
-               }
+               } else
+                       nfs_release_request(req);
                nfs_unlock_and_release_request(req);
        }
 
@@ -716,12 +721,12 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
                        if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES)
                                bit = NFS_IOHDR_NEED_RESCHED;
                        else if (dreq->flags == 0) {
-                               memcpy(&dreq->verf, &req->wb_verf,
+                               memcpy(&dreq->verf, hdr->verf,
                                       sizeof(dreq->verf));
                                bit = NFS_IOHDR_NEED_COMMIT;
                                dreq->flags = NFS_ODIRECT_DO_COMMIT;
                        } else if (dreq->flags == NFS_ODIRECT_DO_COMMIT) {
-                               if (memcmp(&dreq->verf, &req->wb_verf, sizeof(dreq->verf))) {
+                               if (memcmp(&dreq->verf, hdr->verf, sizeof(dreq->verf))) {
                                        dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
                                        bit = NFS_IOHDR_NEED_RESCHED;
                                } else
index b5b86a05059c8c0cf157495878bad3621a25a8dc..864c51e4b400e5c7248bdd93eaed19772c0f23f2 100644 (file)
@@ -57,6 +57,11 @@ unsigned int nfs_idmap_cache_timeout = 600;
 static const struct cred *id_resolver_cache;
 static struct key_type key_type_id_resolver_legacy;
 
+struct idmap {
+       struct rpc_pipe         *idmap_pipe;
+       struct key_construction *idmap_key_cons;
+       struct mutex            idmap_mutex;
+};
 
 /**
  * nfs_fattr_init_names - initialise the nfs_fattr owner_name/group_name fields
@@ -310,9 +315,11 @@ static ssize_t nfs_idmap_get_key(const char *name, size_t namelen,
                                            name, namelen, type, data,
                                            data_size, NULL);
        if (ret < 0) {
+               mutex_lock(&idmap->idmap_mutex);
                ret = nfs_idmap_request_key(&key_type_id_resolver_legacy,
                                            name, namelen, type, data,
                                            data_size, idmap);
+               mutex_unlock(&idmap->idmap_mutex);
        }
        return ret;
 }
@@ -354,11 +361,6 @@ static int nfs_idmap_lookup_id(const char *name, size_t namelen, const char *typ
 /* idmap classic begins here */
 module_param(nfs_idmap_cache_timeout, int, 0644);
 
-struct idmap {
-       struct rpc_pipe         *idmap_pipe;
-       struct key_construction *idmap_key_cons;
-};
-
 enum {
        Opt_find_uid, Opt_find_gid, Opt_find_user, Opt_find_group, Opt_find_err
 };
@@ -469,6 +471,7 @@ nfs_idmap_new(struct nfs_client *clp)
                return error;
        }
        idmap->idmap_pipe = pipe;
+       mutex_init(&idmap->idmap_mutex);
 
        clp->cl_idmap = idmap;
        return 0;
index e605d695dbcb7b746d633f37e5ceb509deb7b790..f7296983eba60c5ea21f164600be800ea988977f 100644 (file)
@@ -1530,7 +1530,6 @@ static inline void nfs4_init_once(struct nfs_inode *nfsi)
        nfsi->delegation_state = 0;
        init_rwsem(&nfsi->rwsem);
        nfsi->layout = NULL;
-       atomic_set(&nfsi->commit_info.rpcs_out, 0);
 #endif
 }
 
@@ -1545,6 +1544,7 @@ static void init_once(void *foo)
        INIT_LIST_HEAD(&nfsi->commit_info.list);
        nfsi->npages = 0;
        nfsi->commit_info.ncommit = 0;
+       atomic_set(&nfsi->commit_info.rpcs_out, 0);
        atomic_set(&nfsi->silly_count, 1);
        INIT_HLIST_HEAD(&nfsi->silly_list);
        init_waitqueue_head(&nfsi->waitqueue);
index c6827f93ab57caeab4e613e97919c04a51aa5d64..cc5900ac61b584774de45f10c48906b3ff99de74 100644 (file)
@@ -295,7 +295,7 @@ is_ds_client(struct nfs_client *clp)
 
 extern const struct nfs4_minor_version_ops *nfs_v4_minor_ops[];
 
-extern const u32 nfs4_fattr_bitmap[2];
+extern const u32 nfs4_fattr_bitmap[3];
 extern const u32 nfs4_statfs_bitmap[2];
 extern const u32 nfs4_pathconf_bitmap[2];
 extern const u32 nfs4_fsinfo_bitmap[3];
index d48dbefa0e71ebf6d9ac90edbb893364afd2d0a3..15fc7e4664ed53206074df177efd74e30dd13521 100644 (file)
@@ -105,6 +105,8 @@ static int nfs4_map_errors(int err)
                return -EINVAL;
        case -NFS4ERR_SHARE_DENIED:
                return -EACCES;
+       case -NFS4ERR_MINOR_VERS_MISMATCH:
+               return -EPROTONOSUPPORT;
        default:
                dprintk("%s could not handle NFSv4 error %d\n",
                                __func__, -err);
@@ -116,7 +118,7 @@ static int nfs4_map_errors(int err)
 /*
  * This is our standard bitmap for GETATTR requests.
  */
-const u32 nfs4_fattr_bitmap[2] = {
+const u32 nfs4_fattr_bitmap[3] = {
        FATTR4_WORD0_TYPE
        | FATTR4_WORD0_CHANGE
        | FATTR4_WORD0_SIZE
@@ -133,6 +135,24 @@ const u32 nfs4_fattr_bitmap[2] = {
        | FATTR4_WORD1_TIME_MODIFY
 };
 
+static const u32 nfs4_pnfs_open_bitmap[3] = {
+       FATTR4_WORD0_TYPE
+       | FATTR4_WORD0_CHANGE
+       | FATTR4_WORD0_SIZE
+       | FATTR4_WORD0_FSID
+       | FATTR4_WORD0_FILEID,
+       FATTR4_WORD1_MODE
+       | FATTR4_WORD1_NUMLINKS
+       | FATTR4_WORD1_OWNER
+       | FATTR4_WORD1_OWNER_GROUP
+       | FATTR4_WORD1_RAWDEV
+       | FATTR4_WORD1_SPACE_USED
+       | FATTR4_WORD1_TIME_ACCESS
+       | FATTR4_WORD1_TIME_METADATA
+       | FATTR4_WORD1_TIME_MODIFY,
+       FATTR4_WORD2_MDSTHRESHOLD
+};
+
 const u32 nfs4_statfs_bitmap[2] = {
        FATTR4_WORD0_FILES_AVAIL
        | FATTR4_WORD0_FILES_FREE
@@ -844,6 +864,7 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
        p->o_arg.name = &dentry->d_name;
        p->o_arg.server = server;
        p->o_arg.bitmask = server->attr_bitmask;
+       p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0];
        p->o_arg.claim = NFS4_OPEN_CLAIM_NULL;
        if (attrs != NULL && attrs->ia_valid != 0) {
                __be32 verf[2];
@@ -1820,6 +1841,7 @@ static int _nfs4_do_open(struct inode *dir,
                opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc();
                if (!opendata->f_attr.mdsthreshold)
                        goto err_opendata_put;
+               opendata->o_arg.open_bitmap = &nfs4_pnfs_open_bitmap[0];
        }
        if (dentry->d_inode != NULL)
                opendata->state = nfs4_get_open_state(dentry->d_inode, sp);
@@ -1880,6 +1902,7 @@ static struct nfs4_state *nfs4_do_open(struct inode *dir,
        struct nfs4_state *res;
        int status;
 
+       fmode &= FMODE_READ|FMODE_WRITE;
        do {
                status = _nfs4_do_open(dir, dentry, fmode, flags, sattr, cred,
                                       &res, ctx_th);
@@ -2526,6 +2549,14 @@ nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
 
        nfs_fattr_init(fattr);
        
+       /* Deal with open(O_TRUNC) */
+       if (sattr->ia_valid & ATTR_OPEN)
+               sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME|ATTR_OPEN);
+
+       /* Optimization: if the end result is no change, don't RPC */
+       if ((sattr->ia_valid & ~(ATTR_FILE)) == 0)
+               return 0;
+
        /* Search for an existing open(O_WRITE) file */
        if (sattr->ia_valid & ATTR_FILE) {
                struct nfs_open_context *ctx;
@@ -2537,10 +2568,6 @@ nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
                }
        }
 
-       /* Deal with open(O_TRUNC) */
-       if (sattr->ia_valid & ATTR_OPEN)
-               sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME|ATTR_OPEN);
-
        status = nfs4_do_setattr(inode, cred, fattr, sattr, state);
        if (status == 0)
                nfs_setattr_update_inode(inode, sattr);
@@ -5275,7 +5302,7 @@ static int _nfs4_proc_destroy_clientid(struct nfs_client *clp,
 
        status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
        if (status)
-               pr_warn("NFS: Got error %d from the server %s on "
+               dprintk("NFS: Got error %d from the server %s on "
                        "DESTROY_CLIENTID.", status, clp->cl_hostname);
        return status;
 }
@@ -5746,8 +5773,7 @@ int nfs4_proc_destroy_session(struct nfs4_session *session,
        status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
 
        if (status)
-               printk(KERN_WARNING
-                       "NFS: Got error %d from the server on DESTROY_SESSION. "
+               dprintk("NFS: Got error %d from the server on DESTROY_SESSION. "
                        "Session has been destroyed regardless...\n", status);
 
        dprintk("<-- nfs4_proc_destroy_session\n");
index c679b9ecef634c80d4738e3cc2a9624f51c327c2..f38300e9f171646aeb414c704e26bb5302f380fa 100644 (file)
@@ -244,6 +244,16 @@ static int nfs4_begin_drain_session(struct nfs_client *clp)
        return nfs4_wait_on_slot_tbl(&ses->fc_slot_table);
 }
 
+static void nfs41_finish_session_reset(struct nfs_client *clp)
+{
+       clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
+       clear_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
+       /* create_session negotiated new slot table */
+       clear_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state);
+       clear_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state);
+       nfs41_setup_state_renewal(clp);
+}
+
 int nfs41_init_clientid(struct nfs_client *clp, struct rpc_cred *cred)
 {
        int status;
@@ -259,8 +269,7 @@ do_confirm:
        status = nfs4_proc_create_session(clp, cred);
        if (status != 0)
                goto out;
-       clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
-       nfs41_setup_state_renewal(clp);
+       nfs41_finish_session_reset(clp);
        nfs_mark_client_ready(clp, NFS_CS_READY);
 out:
        return status;
@@ -1772,16 +1781,9 @@ static int nfs4_reset_session(struct nfs_client *clp)
                status = nfs4_handle_reclaim_lease_error(clp, status);
                goto out;
        }
-       clear_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
-       /* create_session negotiated new slot table */
-       clear_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state);
-       clear_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state);
+       nfs41_finish_session_reset(clp);
        dprintk("%s: session reset was successful for server %s!\n",
                        __func__, clp->cl_hostname);
-
-        /* Let the state manager reestablish state */
-       if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
-               nfs41_setup_state_renewal(clp);
 out:
        if (cred)
                put_rpccred(cred);
index ee4a74db95d0b1b7ea49e8dd1263f0504f2fffa8..18fae29b0301c38a09fcb30a1345375780393f5c 100644 (file)
@@ -1198,12 +1198,13 @@ static void encode_getfattr(struct xdr_stream *xdr, const u32* bitmask, struct c
 }
 
 static void encode_getfattr_open(struct xdr_stream *xdr, const u32 *bitmask,
+                                const u32 *open_bitmap,
                                 struct compound_hdr *hdr)
 {
        encode_getattr_three(xdr,
-                            bitmask[0] & nfs4_fattr_bitmap[0],
-                            bitmask[1] & nfs4_fattr_bitmap[1],
-                            bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD,
+                            bitmask[0] & open_bitmap[0],
+                            bitmask[1] & open_bitmap[1],
+                            bitmask[2] & open_bitmap[2],
                             hdr);
 }
 
@@ -2221,7 +2222,7 @@ static void nfs4_xdr_enc_open(struct rpc_rqst *req, struct xdr_stream *xdr,
        encode_putfh(xdr, args->fh, &hdr);
        encode_open(xdr, args, &hdr);
        encode_getfh(xdr, &hdr);
-       encode_getfattr_open(xdr, args->bitmask, &hdr);
+       encode_getfattr_open(xdr, args->bitmask, args->open_bitmap, &hdr);
        encode_nops(&hdr);
 }
 
@@ -4359,7 +4360,10 @@ static int decode_attr_mdsthreshold(struct xdr_stream *xdr,
 
        if (unlikely(bitmap[2] & (FATTR4_WORD2_MDSTHRESHOLD - 1U)))
                return -EIO;
-       if (likely(bitmap[2] & FATTR4_WORD2_MDSTHRESHOLD)) {
+       if (bitmap[2] & FATTR4_WORD2_MDSTHRESHOLD) {
+               /* Did the server return an unrequested attribute? */
+               if (unlikely(res == NULL))
+                       return -EREMOTEIO;
                p = xdr_inline_decode(xdr, 4);
                if (unlikely(!p))
                        goto out_overflow;
@@ -4372,6 +4376,7 @@ static int decode_attr_mdsthreshold(struct xdr_stream *xdr,
                                __func__);
 
                status = decode_first_threshold_item4(xdr, res);
+               bitmap[2] &= ~FATTR4_WORD2_MDSTHRESHOLD;
        }
        return status;
 out_overflow:
index b47277baebab92930bee6c1fbac445fd8978a6b9..f50d3e8d6f2230a42cdc656b61004dcf62182dd2 100644 (file)
@@ -454,7 +454,10 @@ int objio_read_pagelist(struct nfs_read_data *rdata)
        objios->ios->done = _read_done;
        dprintk("%s: offset=0x%llx length=0x%x\n", __func__,
                rdata->args.offset, rdata->args.count);
-       return ore_read(objios->ios);
+       ret = ore_read(objios->ios);
+       if (unlikely(ret))
+               objio_free_result(&objios->oir);
+       return ret;
 }
 
 /*
@@ -486,8 +489,16 @@ static struct page *__r4w_get_page(void *priv, u64 offset, bool *uptodate)
        struct nfs_write_data *wdata = objios->oir.rpcdata;
        struct address_space *mapping = wdata->header->inode->i_mapping;
        pgoff_t index = offset / PAGE_SIZE;
-       struct page *page = find_get_page(mapping, index);
+       struct page *page;
+       loff_t i_size = i_size_read(wdata->header->inode);
+
+       if (offset >= i_size) {
+               *uptodate = true;
+               dprintk("%s: g_zero_page index=0x%lx\n", __func__, index);
+               return ZERO_PAGE(0);
+       }
 
+       page = find_get_page(mapping, index);
        if (!page) {
                page = find_or_create_page(mapping, index, GFP_NOFS);
                if (unlikely(!page)) {
@@ -507,8 +518,10 @@ static struct page *__r4w_get_page(void *priv, u64 offset, bool *uptodate)
 
 static void __r4w_put_page(void *priv, struct page *page)
 {
-       dprintk("%s: index=0x%lx\n", __func__, page->index);
-       page_cache_release(page);
+       dprintk("%s: index=0x%lx\n", __func__,
+               (page == ZERO_PAGE(0)) ? -1UL : page->index);
+       if (ZERO_PAGE(0) != page)
+               page_cache_release(page);
        return;
 }
 
@@ -539,8 +552,10 @@ int objio_write_pagelist(struct nfs_write_data *wdata, int how)
        dprintk("%s: offset=0x%llx length=0x%x\n", __func__,
                wdata->args.offset, wdata->args.count);
        ret = ore_write(objios->ios);
-       if (unlikely(ret))
+       if (unlikely(ret)) {
+               objio_free_result(&objios->oir);
                return ret;
+       }
 
        if (objios->sync)
                _write_done(objios->ios, objios);
index b8323aa7b54384af8f51b84b3077d98b8f22d951..bbc49caa7a82810ac497e7475997262f4da14be4 100644 (file)
@@ -70,6 +70,10 @@ find_pnfs_driver(u32 id)
 
        spin_lock(&pnfs_spinlock);
        local = find_pnfs_driver_locked(id);
+       if (local != NULL && !try_module_get(local->owner)) {
+               dprintk("%s: Could not grab reference on module\n", __func__);
+               local = NULL;
+       }
        spin_unlock(&pnfs_spinlock);
        return local;
 }
@@ -80,6 +84,9 @@ unset_pnfs_layoutdriver(struct nfs_server *nfss)
        if (nfss->pnfs_curr_ld) {
                if (nfss->pnfs_curr_ld->clear_layoutdriver)
                        nfss->pnfs_curr_ld->clear_layoutdriver(nfss);
+               /* Decrement the MDS count. Purge the deviceid cache if zero */
+               if (atomic_dec_and_test(&nfss->nfs_client->cl_mds_count))
+                       nfs4_deviceid_purge_client(nfss->nfs_client);
                module_put(nfss->pnfs_curr_ld->owner);
        }
        nfss->pnfs_curr_ld = NULL;
@@ -115,10 +122,6 @@ set_pnfs_layoutdriver(struct nfs_server *server, const struct nfs_fh *mntfh,
                        goto out_no_driver;
                }
        }
-       if (!try_module_get(ld_type->owner)) {
-               dprintk("%s: Could not grab reference on module\n", __func__);
-               goto out_no_driver;
-       }
        server->pnfs_curr_ld = ld_type;
        if (ld_type->set_layoutdriver
            && ld_type->set_layoutdriver(server, mntfh)) {
@@ -127,6 +130,8 @@ set_pnfs_layoutdriver(struct nfs_server *server, const struct nfs_fh *mntfh,
                module_put(ld_type->owner);
                goto out_no_driver;
        }
+       /* Bump the MDS count */
+       atomic_inc(&server->nfs_client->cl_mds_count);
 
        dprintk("%s: pNFS module for %u set\n", __func__, id);
        return;
index 29fd23c0efdcb07c699c5e2e94c1e23dad8de103..64f90d845f6a95cd8752e5b4e1c9b3895f1a1e66 100644 (file)
@@ -365,7 +365,7 @@ static inline bool
 pnfs_use_threshold(struct nfs4_threshold **dst, struct nfs4_threshold *src,
                   struct nfs_server *nfss)
 {
-       return (dst && src && src->bm != 0 &&
+       return (dst && src && src->bm != 0 && nfss->pnfs_curr_ld &&
                                        nfss->pnfs_curr_ld->id == src->l_type);
 }
 
index a706b6bcc286a5a401318e868b0d1fbab2a206a4..617c7419a08ef5d0b107292bb626b84b99916b2d 100644 (file)
@@ -651,7 +651,7 @@ static int nfs_read_done(struct rpc_task *task, struct nfs_read_data *data)
                /* Emulate the eof flag, which isn't normally needed in NFSv2
                 * as it is guaranteed to always return the file attributes
                 */
-               if (data->args.offset + data->args.count >= data->res.fattr->size)
+               if (data->args.offset + data->res.count >= data->res.fattr->size)
                        data->res.eof = 1;
        }
        return 0;
index ff656c022684e9e2b0d94587cf9d807d670bd715..06228192f64efb52e8466afd2334bc04952d50a4 100644 (file)
@@ -1867,6 +1867,7 @@ static int nfs23_validate_mount_data(void *options,
        if (data == NULL)
                goto out_no_data;
 
+       args->version = NFS_DEFAULT_VERSION;
        switch (data->version) {
        case 1:
                data->namlen = 0;
@@ -2637,6 +2638,8 @@ static int nfs4_validate_mount_data(void *options,
        if (data == NULL)
                goto out_no_data;
 
+       args->version = 4;
+
        switch (data->version) {
        case 1:
                if (data->host_addrlen > sizeof(args->nfs_server.address))
@@ -2857,6 +2860,8 @@ static struct dentry *nfs4_try_mount(int flags, const char *dev_name,
 
        dfprintk(MOUNT, "--> nfs4_try_mount()\n");
 
+       mount_info->fill_super = nfs4_fill_super;
+
        export_path = data->nfs_server.export_path;
        data->nfs_server.export_path = "/";
        root_mnt = nfs_do_root_mount(&nfs4_remote_fs_type, flags, mount_info,
index e6fe3d69d14cbe0a5b75fc2cc5905c875f4c0181..4d6861c0dc142a5ec41e5da405012b661aeb3bc4 100644 (file)
@@ -80,6 +80,7 @@ struct nfs_write_header *nfs_writehdr_alloc(void)
                INIT_LIST_HEAD(&hdr->rpc_list);
                spin_lock_init(&hdr->lock);
                atomic_set(&hdr->refcnt, 0);
+               hdr->verf = &p->verf;
        }
        return p;
 }
@@ -619,6 +620,7 @@ static void nfs_write_completion(struct nfs_pgio_header *hdr)
                        goto next;
                }
                if (test_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags)) {
+                       memcpy(&req->wb_verf, hdr->verf, sizeof(req->wb_verf));
                        nfs_mark_request_commit(req, hdr->lseg, &cinfo);
                        goto next;
                }
@@ -1255,15 +1257,14 @@ static void nfs_writeback_release_common(void *calldata)
        struct nfs_write_data   *data = calldata;
        struct nfs_pgio_header *hdr = data->header;
        int status = data->task.tk_status;
-       struct nfs_page *req = hdr->req;
 
        if ((status >= 0) && nfs_write_need_commit(data)) {
                spin_lock(&hdr->lock);
                if (test_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags))
                        ; /* Do nothing */
                else if (!test_and_set_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags))
-                       memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
-               else if (memcmp(&req->wb_verf, &data->verf, sizeof(req->wb_verf)))
+                       memcpy(hdr->verf, &data->verf, sizeof(*hdr->verf));
+               else if (memcmp(hdr->verf, &data->verf, sizeof(*hdr->verf)))
                        set_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags);
                spin_unlock(&hdr->lock);
        }
index 8fdc9ec5c5d359f8defb2766e710eb35fc08c3b0..94effd5bc4a107086ef53515ac2bb9dc39950b88 100644 (file)
@@ -900,7 +900,7 @@ static void free_session(struct kref *kref)
        struct nfsd4_session *ses;
        int mem;
 
-       BUG_ON(!spin_is_locked(&client_lock));
+       lockdep_assert_held(&client_lock);
        ses = container_of(kref, struct nfsd4_session, se_ref);
        nfsd4_del_conns(ses);
        spin_lock(&nfsd_drc_lock);
@@ -1080,7 +1080,7 @@ static struct nfs4_client *alloc_client(struct xdr_netobj name)
 static inline void
 free_client(struct nfs4_client *clp)
 {
-       BUG_ON(!spin_is_locked(&client_lock));
+       lockdep_assert_held(&client_lock);
        while (!list_empty(&clp->cl_sessions)) {
                struct nfsd4_session *ses;
                ses = list_entry(clp->cl_sessions.next, struct nfsd4_session,
index 08a07a218d26ef40ecc87db0560cdfedd5648e8f..57ceaf33d1773e6e1d2c7be8051b74795d663110 100644 (file)
@@ -191,6 +191,8 @@ void nilfs_remove_all_gcinodes(struct the_nilfs *nilfs)
        while (!list_empty(head)) {
                ii = list_first_entry(head, struct nilfs_inode_info, i_dirty);
                list_del_init(&ii->i_dirty);
+               truncate_inode_pages(&ii->vfs_inode.i_data, 0);
+               nilfs_btnode_cache_clear(&ii->i_btnode_cache);
                iput(&ii->vfs_inode);
        }
 }
index 0e72ad6f22aacfe7c30776eaae2ecdef7f22aba3..88e11fb346b6d0fd6e81fb03994fe18cac514bb7 100644 (file)
@@ -2309,6 +2309,8 @@ nilfs_remove_written_gcinodes(struct the_nilfs *nilfs, struct list_head *head)
                if (!test_bit(NILFS_I_UPDATED, &ii->i_state))
                        continue;
                list_del_init(&ii->i_dirty);
+               truncate_inode_pages(&ii->vfs_inode.i_data, 0);
+               nilfs_btnode_cache_clear(&ii->i_btnode_cache);
                iput(&ii->vfs_inode);
        }
 }
index 81a4cd22f80be84a06eac2b0fbf4348385d76262..4f7795fb5fc0b78a6f58d28f2de69356ff5e197d 100644 (file)
@@ -456,7 +456,7 @@ static void ocfs2_update_lock_stats(struct ocfs2_lock_res *res, int level,
        stats->ls_gets++;
        stats->ls_total += ktime_to_ns(kt);
        /* overflow */
-       if (unlikely(stats->ls_gets) == 0) {
+       if (unlikely(stats->ls_gets == 0)) {
                stats->ls_gets++;
                stats->ls_total = ktime_to_ns(kt);
        }
@@ -3932,6 +3932,8 @@ unqueue:
 static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb,
                                        struct ocfs2_lock_res *lockres)
 {
+       unsigned long flags;
+
        assert_spin_locked(&lockres->l_lock);
 
        if (lockres->l_flags & OCFS2_LOCK_FREEING) {
@@ -3945,21 +3947,22 @@ static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb,
 
        lockres_or_flags(lockres, OCFS2_LOCK_QUEUED);
 
-       spin_lock(&osb->dc_task_lock);
+       spin_lock_irqsave(&osb->dc_task_lock, flags);
        if (list_empty(&lockres->l_blocked_list)) {
                list_add_tail(&lockres->l_blocked_list,
                              &osb->blocked_lock_list);
                osb->blocked_lock_count++;
        }
-       spin_unlock(&osb->dc_task_lock);
+       spin_unlock_irqrestore(&osb->dc_task_lock, flags);
 }
 
 static void ocfs2_downconvert_thread_do_work(struct ocfs2_super *osb)
 {
        unsigned long processed;
+       unsigned long flags;
        struct ocfs2_lock_res *lockres;
 
-       spin_lock(&osb->dc_task_lock);
+       spin_lock_irqsave(&osb->dc_task_lock, flags);
        /* grab this early so we know to try again if a state change and
         * wake happens part-way through our work  */
        osb->dc_work_sequence = osb->dc_wake_sequence;
@@ -3972,38 +3975,40 @@ static void ocfs2_downconvert_thread_do_work(struct ocfs2_super *osb)
                                     struct ocfs2_lock_res, l_blocked_list);
                list_del_init(&lockres->l_blocked_list);
                osb->blocked_lock_count--;
-               spin_unlock(&osb->dc_task_lock);
+               spin_unlock_irqrestore(&osb->dc_task_lock, flags);
 
                BUG_ON(!processed);
                processed--;
 
                ocfs2_process_blocked_lock(osb, lockres);
 
-               spin_lock(&osb->dc_task_lock);
+               spin_lock_irqsave(&osb->dc_task_lock, flags);
        }
-       spin_unlock(&osb->dc_task_lock);
+       spin_unlock_irqrestore(&osb->dc_task_lock, flags);
 }
 
 static int ocfs2_downconvert_thread_lists_empty(struct ocfs2_super *osb)
 {
        int empty = 0;
+       unsigned long flags;
 
-       spin_lock(&osb->dc_task_lock);
+       spin_lock_irqsave(&osb->dc_task_lock, flags);
        if (list_empty(&osb->blocked_lock_list))
                empty = 1;
 
-       spin_unlock(&osb->dc_task_lock);
+       spin_unlock_irqrestore(&osb->dc_task_lock, flags);
        return empty;
 }
 
 static int ocfs2_downconvert_thread_should_wake(struct ocfs2_super *osb)
 {
        int should_wake = 0;
+       unsigned long flags;
 
-       spin_lock(&osb->dc_task_lock);
+       spin_lock_irqsave(&osb->dc_task_lock, flags);
        if (osb->dc_work_sequence != osb->dc_wake_sequence)
                should_wake = 1;
-       spin_unlock(&osb->dc_task_lock);
+       spin_unlock_irqrestore(&osb->dc_task_lock, flags);
 
        return should_wake;
 }
@@ -4033,10 +4038,12 @@ static int ocfs2_downconvert_thread(void *arg)
 
 void ocfs2_wake_downconvert_thread(struct ocfs2_super *osb)
 {
-       spin_lock(&osb->dc_task_lock);
+       unsigned long flags;
+
+       spin_lock_irqsave(&osb->dc_task_lock, flags);
        /* make sure the voting thread gets a swipe at whatever changes
         * the caller may have made to the voting state */
        osb->dc_wake_sequence++;
-       spin_unlock(&osb->dc_task_lock);
+       spin_unlock_irqrestore(&osb->dc_task_lock, flags);
        wake_up(&osb->dc_event);
 }
index 2f5b92ef0e533146007b49d21dd705a242125dc5..70b5863a2d64e05cde6474bd387112f3b470b5e7 100644 (file)
@@ -923,8 +923,6 @@ out_unlock:
 
        ocfs2_inode_unlock(inode, 0);
 out:
-       if (ret && ret != -ENXIO)
-               ret = -ENXIO;
        return ret;
 }
 
index 061591a3ab08a673d73544e819e7b7fd19b2689a..7602783d7f41c9a01827f16446da43e036e6a7cb 100644 (file)
@@ -1950,7 +1950,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
        if (ret < 0)
                mlog_errno(ret);
 
-       if (file->f_flags & O_SYNC)
+       if (file && (file->f_flags & O_SYNC))
                handle->h_sync = 1;
 
        ocfs2_commit_trans(osb, handle);
@@ -2422,8 +2422,10 @@ out_dio:
                unaligned_dio = 0;
        }
 
-       if (unaligned_dio)
+       if (unaligned_dio) {
+               ocfs2_iocb_clear_unaligned_aio(iocb);
                atomic_dec(&OCFS2_I(inode)->ip_unaligned_aio);
+       }
 
 out:
        if (rw_level != -1)
index 92fcd575775a0d1123e902b8499cd1b26cd52aab..0a86e302655f3384435ab4843fad50b6bf1a7cae 100644 (file)
@@ -399,8 +399,6 @@ int ocfs2_global_read_info(struct super_block *sb, int type)
                              msecs_to_jiffies(oinfo->dqi_syncms));
 
 out_err:
-       if (status)
-               mlog_errno(status);
        return status;
 out_unlock:
        ocfs2_unlock_global_qf(oinfo, 0);
index d6c79a0dffc7b0827b09562e11fa0f610af5657d..1540632d8387fe98a51d0193201346acb18ae70e 100644 (file)
--- a/fs/open.c
+++ b/fs/open.c
@@ -397,10 +397,10 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
 {
        struct file *file;
        struct inode *inode;
-       int error;
+       int error, fput_needed;
 
        error = -EBADF;
-       file = fget(fd);
+       file = fget_raw_light(fd, &fput_needed);
        if (!file)
                goto out;
 
@@ -414,7 +414,7 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
        if (!error)
                set_fs_pwd(current->fs, &file->f_path);
 out_putf:
-       fput(file);
+       fput_light(file, fput_needed);
 out:
        return error;
 }
index 616f41a7cde657d91d2bbd4c360d338c1dd66def..437195f204e14e908bdda87ace0e8b0ce81efb21 100644 (file)
@@ -1803,7 +1803,7 @@ static int tid_fd_revalidate(struct dentry *dentry, struct nameidata *nd)
                        rcu_read_lock();
                        file = fcheck_files(files, fd);
                        if (file) {
-                               unsigned i_mode, f_mode = file->f_mode;
+                               unsigned f_mode = file->f_mode;
 
                                rcu_read_unlock();
                                put_files_struct(files);
@@ -1819,12 +1819,14 @@ static int tid_fd_revalidate(struct dentry *dentry, struct nameidata *nd)
                                        inode->i_gid = GLOBAL_ROOT_GID;
                                }
 
-                               i_mode = S_IFLNK;
-                               if (f_mode & FMODE_READ)
-                                       i_mode |= S_IRUSR | S_IXUSR;
-                               if (f_mode & FMODE_WRITE)
-                                       i_mode |= S_IWUSR | S_IXUSR;
-                               inode->i_mode = i_mode;
+                               if (S_ISLNK(inode->i_mode)) {
+                                       unsigned i_mode = S_IFLNK;
+                                       if (f_mode & FMODE_READ)
+                                               i_mode |= S_IRUSR | S_IXUSR;
+                                       if (f_mode & FMODE_WRITE)
+                                               i_mode |= S_IWUSR | S_IXUSR;
+                                       inode->i_mode = i_mode;
+                               }
 
                                security_task_to_inode(task, inode);
                                put_task_struct(task);
@@ -1859,6 +1861,7 @@ static struct dentry *proc_fd_instantiate(struct inode *dir,
        ei = PROC_I(inode);
        ei->fd = fd;
 
+       inode->i_mode = S_IFLNK;
        inode->i_op = &proc_pid_link_inode_operations;
        inode->i_size = 64;
        ei->op.proc_get_link = proc_fd_link;
index aeb19e68e0860a436998223cf854ff5502730e53..11a2aa2a56c4c0273a08d107d9242985ccc0369c 100644 (file)
@@ -258,7 +258,7 @@ fail:
        return rc;
 }
 
-int pstore_fill_super(struct super_block *sb, void *data, int silent)
+static int pstore_fill_super(struct super_block *sb, void *data, int silent)
 {
        struct inode *inode;
 
index 82c585f715e341c36666fdc378f75408e3ea3d14..03ce7a9b81cc99765234e9b859f1bfea6a5d0208 100644 (file)
@@ -94,20 +94,15 @@ static const char *get_reason_str(enum kmsg_dump_reason reason)
  * as we can from the end of the buffer.
  */
 static void pstore_dump(struct kmsg_dumper *dumper,
-           enum kmsg_dump_reason reason,
-           const char *s1, unsigned long l1,
-           const char *s2, unsigned long l2)
+                       enum kmsg_dump_reason reason)
 {
-       unsigned long   s1_start, s2_start;
-       unsigned long   l1_cpy, l2_cpy;
-       unsigned long   size, total = 0;
-       char            *dst;
+       unsigned long   total = 0;
        const char      *why;
        u64             id;
-       int             hsize, ret;
        unsigned int    part = 1;
        unsigned long   flags = 0;
        int             is_locked = 0;
+       int             ret;
 
        why = get_reason_str(reason);
 
@@ -119,30 +114,25 @@ static void pstore_dump(struct kmsg_dumper *dumper,
                spin_lock_irqsave(&psinfo->buf_lock, flags);
        oopscount++;
        while (total < kmsg_bytes) {
+               char *dst;
+               unsigned long size;
+               int hsize;
+               size_t len;
+
                dst = psinfo->buf;
                hsize = sprintf(dst, "%s#%d Part%d\n", why, oopscount, part);
                size = psinfo->bufsize - hsize;
                dst += hsize;
 
-               l2_cpy = min(l2, size);
-               l1_cpy = min(l1, size - l2_cpy);
-
-               if (l1_cpy + l2_cpy == 0)
+               if (!kmsg_dump_get_buffer(dumper, true, dst, size, &len))
                        break;
 
-               s2_start = l2 - l2_cpy;
-               s1_start = l1 - l1_cpy;
-
-               memcpy(dst, s1 + s1_start, l1_cpy);
-               memcpy(dst + l1_cpy, s2 + s2_start, l2_cpy);
-
                ret = psinfo->write(PSTORE_TYPE_DMESG, reason, &id, part,
-                                  hsize + l1_cpy + l2_cpy, psinfo);
+                                   hsize + len, psinfo);
                if (ret == 0 && reason == KMSG_DUMP_OOPS && pstore_is_mounted())
                        pstore_new_entry = 1;
-               l1 -= l1_cpy;
-               l2 -= l2_cpy;
-               total += l1_cpy + l2_cpy;
+
+               total += hsize + len;
                part++;
        }
        if (in_nmi()) {
index 9123cce28c1e8d6f511738cda7435861f1261ef7..453030f9c5bc2a68523cafc26c6d331d86855949 100644 (file)
@@ -106,6 +106,8 @@ static ssize_t ramoops_pstore_read(u64 *id, enum pstore_type_id *type,
        time->tv_sec = 0;
        time->tv_nsec = 0;
 
+       /* Update old/shadowed buffer. */
+       persistent_ram_save_old(prz);
        size = persistent_ram_old_size(prz);
        *buf = kmalloc(size, GFP_KERNEL);
        if (*buf == NULL)
@@ -184,6 +186,7 @@ static int ramoops_pstore_erase(enum pstore_type_id type, u64 id,
                return -EINVAL;
 
        persistent_ram_free_old(cxt->przs[id]);
+       persistent_ram_zap(cxt->przs[id]);
 
        return 0;
 }
index 31f8d184f3a0a659cd53a5002c2c013bb13e76a0..c5fbdbbf81ac0d6fda33e9b848a9fafa5b2b21f2 100644 (file)
@@ -250,23 +250,24 @@ static void notrace persistent_ram_update(struct persistent_ram_zone *prz,
        persistent_ram_update_ecc(prz, start, count);
 }
 
-static void __init
-persistent_ram_save_old(struct persistent_ram_zone *prz)
+void persistent_ram_save_old(struct persistent_ram_zone *prz)
 {
        struct persistent_ram_buffer *buffer = prz->buffer;
        size_t size = buffer_size(prz);
        size_t start = buffer_start(prz);
-       char *dest;
 
-       persistent_ram_ecc_old(prz);
+       if (!size)
+               return;
 
-       dest = kmalloc(size, GFP_KERNEL);
-       if (dest == NULL) {
+       if (!prz->old_log) {
+               persistent_ram_ecc_old(prz);
+               prz->old_log = kmalloc(size, GFP_KERNEL);
+       }
+       if (!prz->old_log) {
                pr_err("persistent_ram: failed to allocate buffer\n");
                return;
        }
 
-       prz->old_log = dest;
        prz->old_log_size = size;
        memcpy(prz->old_log, &buffer->data[start], size - start);
        memcpy(prz->old_log + size - start, &buffer->data[0], start);
@@ -319,6 +320,13 @@ void persistent_ram_free_old(struct persistent_ram_zone *prz)
        prz->old_log_size = 0;
 }
 
+void persistent_ram_zap(struct persistent_ram_zone *prz)
+{
+       atomic_set(&prz->buffer->start, 0);
+       atomic_set(&prz->buffer->size, 0);
+       persistent_ram_update_header_ecc(prz);
+}
+
 static void *persistent_ram_vmap(phys_addr_t start, size_t size)
 {
        struct page **pages;
@@ -405,6 +413,7 @@ static int __init persistent_ram_post_init(struct persistent_ram_zone *prz, bool
                                " size %zu, start %zu\n",
                               buffer_size(prz), buffer_start(prz));
                        persistent_ram_save_old(prz);
+                       return 0;
                }
        } else {
                pr_info("persistent_ram: no valid data in buffer"
@@ -412,8 +421,7 @@ static int __init persistent_ram_post_init(struct persistent_ram_zone *prz, bool
        }
 
        prz->buffer->sig = PERSISTENT_RAM_SIG;
-       atomic_set(&prz->buffer->start, 0);
-       atomic_set(&prz->buffer->size, 0);
+       persistent_ram_zap(prz);
 
        return 0;
 }
@@ -448,7 +456,6 @@ struct persistent_ram_zone * __init persistent_ram_new(phys_addr_t start,
                goto err;
 
        persistent_ram_post_init(prz, ecc);
-       persistent_ram_update_header_ecc(prz);
 
        return prz;
 err:
index fbb0b478a346fbc77c854696c5e0e508760125ad..d5378d028589843e1cfef1efd5dba0b8cc4072cb 100644 (file)
@@ -110,6 +110,7 @@ int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize)
 
                /* prevent the page from being discarded on memory pressure */
                SetPageDirty(page);
+               SetPageUptodate(page);
 
                unlock_page(page);
                put_page(page);
index c9f1318a3b820b363526576036c4894205552921..7bf08fa22ec9ab122bad5438a02bd78db6f1e885 100644 (file)
@@ -273,13 +273,16 @@ void spd_release_page(struct splice_pipe_desc *spd, unsigned int i)
  * Check if we need to grow the arrays holding pages and partial page
  * descriptions.
  */
-int splice_grow_spd(struct pipe_inode_info *pipe, struct splice_pipe_desc *spd)
+int splice_grow_spd(const struct pipe_inode_info *pipe, struct splice_pipe_desc *spd)
 {
-       if (pipe->buffers <= PIPE_DEF_BUFFERS)
+       unsigned int buffers = ACCESS_ONCE(pipe->buffers);
+
+       spd->nr_pages_max = buffers;
+       if (buffers <= PIPE_DEF_BUFFERS)
                return 0;
 
-       spd->pages = kmalloc(pipe->buffers * sizeof(struct page *), GFP_KERNEL);
-       spd->partial = kmalloc(pipe->buffers * sizeof(struct partial_page), GFP_KERNEL);
+       spd->pages = kmalloc(buffers * sizeof(struct page *), GFP_KERNEL);
+       spd->partial = kmalloc(buffers * sizeof(struct partial_page), GFP_KERNEL);
 
        if (spd->pages && spd->partial)
                return 0;
@@ -289,10 +292,9 @@ int splice_grow_spd(struct pipe_inode_info *pipe, struct splice_pipe_desc *spd)
        return -ENOMEM;
 }
 
-void splice_shrink_spd(struct pipe_inode_info *pipe,
-                      struct splice_pipe_desc *spd)
+void splice_shrink_spd(struct splice_pipe_desc *spd)
 {
-       if (pipe->buffers <= PIPE_DEF_BUFFERS)
+       if (spd->nr_pages_max <= PIPE_DEF_BUFFERS)
                return;
 
        kfree(spd->pages);
@@ -315,6 +317,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
        struct splice_pipe_desc spd = {
                .pages = pages,
                .partial = partial,
+               .nr_pages_max = PIPE_DEF_BUFFERS,
                .flags = flags,
                .ops = &page_cache_pipe_buf_ops,
                .spd_release = spd_release_page,
@@ -326,7 +329,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
        index = *ppos >> PAGE_CACHE_SHIFT;
        loff = *ppos & ~PAGE_CACHE_MASK;
        req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
-       nr_pages = min(req_pages, pipe->buffers);
+       nr_pages = min(req_pages, spd.nr_pages_max);
 
        /*
         * Lookup the (hopefully) full range of pages we need.
@@ -497,7 +500,7 @@ fill_it:
        if (spd.nr_pages)
                error = splice_to_pipe(pipe, &spd);
 
-       splice_shrink_spd(pipe, &spd);
+       splice_shrink_spd(&spd);
        return error;
 }
 
@@ -598,6 +601,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
        struct splice_pipe_desc spd = {
                .pages = pages,
                .partial = partial,
+               .nr_pages_max = PIPE_DEF_BUFFERS,
                .flags = flags,
                .ops = &default_pipe_buf_ops,
                .spd_release = spd_release_page,
@@ -608,8 +612,8 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
 
        res = -ENOMEM;
        vec = __vec;
-       if (pipe->buffers > PIPE_DEF_BUFFERS) {
-               vec = kmalloc(pipe->buffers * sizeof(struct iovec), GFP_KERNEL);
+       if (spd.nr_pages_max > PIPE_DEF_BUFFERS) {
+               vec = kmalloc(spd.nr_pages_max * sizeof(struct iovec), GFP_KERNEL);
                if (!vec)
                        goto shrink_ret;
        }
@@ -617,7 +621,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
        offset = *ppos & ~PAGE_CACHE_MASK;
        nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
 
-       for (i = 0; i < nr_pages && i < pipe->buffers && len; i++) {
+       for (i = 0; i < nr_pages && i < spd.nr_pages_max && len; i++) {
                struct page *page;
 
                page = alloc_page(GFP_USER);
@@ -665,7 +669,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
 shrink_ret:
        if (vec != __vec)
                kfree(vec);
-       splice_shrink_spd(pipe, &spd);
+       splice_shrink_spd(&spd);
        return res;
 
 err:
@@ -1614,6 +1618,7 @@ static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov,
        struct splice_pipe_desc spd = {
                .pages = pages,
                .partial = partial,
+               .nr_pages_max = PIPE_DEF_BUFFERS,
                .flags = flags,
                .ops = &user_page_pipe_buf_ops,
                .spd_release = spd_release_page,
@@ -1629,13 +1634,13 @@ static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov,
 
        spd.nr_pages = get_iovec_page_array(iov, nr_segs, spd.pages,
                                            spd.partial, false,
-                                           pipe->buffers);
+                                           spd.nr_pages_max);
        if (spd.nr_pages <= 0)
                ret = spd.nr_pages;
        else
                ret = splice_to_pipe(pipe, &spd);
 
-       splice_shrink_spd(pipe, &spd);
+       splice_shrink_spd(&spd);
        return ret;
 }
 
index 685a83756b2b7df6e93373083324839ff14bac54..92df3b08153901350a433f8b95c95cd493479aad 100644 (file)
@@ -2918,6 +2918,9 @@ int dbg_debugfs_init_fs(struct ubifs_info *c)
        struct dentry *dent;
        struct ubifs_debug_info *d = c->dbg;
 
+       if (!IS_ENABLED(CONFIG_DEBUG_FS))
+               return 0;
+
        n = snprintf(d->dfs_dir_name, UBIFS_DFS_DIR_LEN + 1, UBIFS_DFS_DIR_NAME,
                     c->vi.ubi_num, c->vi.vol_id);
        if (n == UBIFS_DFS_DIR_LEN) {
@@ -3010,7 +3013,8 @@ out:
  */
 void dbg_debugfs_exit_fs(struct ubifs_info *c)
 {
-       debugfs_remove_recursive(c->dbg->dfs_dir);
+       if (IS_ENABLED(CONFIG_DEBUG_FS))
+               debugfs_remove_recursive(c->dbg->dfs_dir);
 }
 
 struct ubifs_global_debug_info ubifs_dbg;
@@ -3095,6 +3099,9 @@ int dbg_debugfs_init(void)
        const char *fname;
        struct dentry *dent;
 
+       if (!IS_ENABLED(CONFIG_DEBUG_FS))
+               return 0;
+
        fname = "ubifs";
        dent = debugfs_create_dir(fname, NULL);
        if (IS_ERR_OR_NULL(dent))
@@ -3159,7 +3166,8 @@ out:
  */
 void dbg_debugfs_exit(void)
 {
-       debugfs_remove_recursive(dfs_rootdir);
+       if (IS_ENABLED(CONFIG_DEBUG_FS))
+               debugfs_remove_recursive(dfs_rootdir);
 }
 
 /**
index 2559d174e0040a45578fcc2e7612522a08e99a2f..28ec13af28d91c360c89c68f7af0f1957e79477a 100644 (file)
@@ -939,8 +939,8 @@ static int find_dirtiest_idx_leb(struct ubifs_info *c)
        }
        dbg_find("LEB %d, dirty %d and free %d flags %#x", lp->lnum, lp->dirty,
                 lp->free, lp->flags);
-       ubifs_assert(lp->flags | LPROPS_TAKEN);
-       ubifs_assert(lp->flags | LPROPS_INDEX);
+       ubifs_assert(lp->flags & LPROPS_TAKEN);
+       ubifs_assert(lp->flags & LPROPS_INDEX);
        return lnum;
 }
 
index ef3d1ba6d992b8ffb9ab0d57e633a22d586758d2..15e2fc5aa60bd4790a86c828aaea6995b17e1737 100644 (file)
@@ -718,8 +718,12 @@ static int fixup_free_space(struct ubifs_info *c)
                lnum = ubifs_next_log_lnum(c, lnum);
        }
 
-       /* Fixup the current log head */
-       err = fixup_leb(c, c->lhead_lnum, c->lhead_offs);
+       /*
+        * Fixup the log head which contains the only a CS node at the
+        * beginning.
+        */
+       err = fixup_leb(c, c->lhead_lnum,
+                       ALIGN(UBIFS_CS_NODE_SZ, c->min_io_size));
        if (err)
                goto out;
 
index ac8a348dcb693bb10a24a930346b82bc7db799f9..8d86a8706c0e4d93bcace5ed039a71388cd966d5 100644 (file)
@@ -56,6 +56,7 @@
 #include <linux/seq_file.h>
 #include <linux/bitmap.h>
 #include <linux/crc-itu-t.h>
+#include <linux/log2.h>
 #include <asm/byteorder.h>
 
 #include "udf_sb.h"
@@ -1215,16 +1216,65 @@ out_bh:
        return ret;
 }
 
+static int udf_load_sparable_map(struct super_block *sb,
+                                struct udf_part_map *map,
+                                struct sparablePartitionMap *spm)
+{
+       uint32_t loc;
+       uint16_t ident;
+       struct sparingTable *st;
+       struct udf_sparing_data *sdata = &map->s_type_specific.s_sparing;
+       int i;
+       struct buffer_head *bh;
+
+       map->s_partition_type = UDF_SPARABLE_MAP15;
+       sdata->s_packet_len = le16_to_cpu(spm->packetLength);
+       if (!is_power_of_2(sdata->s_packet_len)) {
+               udf_err(sb, "error loading logical volume descriptor: "
+                       "Invalid packet length %u\n",
+                       (unsigned)sdata->s_packet_len);
+               return -EIO;
+       }
+       if (spm->numSparingTables > 4) {
+               udf_err(sb, "error loading logical volume descriptor: "
+                       "Too many sparing tables (%d)\n",
+                       (int)spm->numSparingTables);
+               return -EIO;
+       }
+
+       for (i = 0; i < spm->numSparingTables; i++) {
+               loc = le32_to_cpu(spm->locSparingTable[i]);
+               bh = udf_read_tagged(sb, loc, loc, &ident);
+               if (!bh)
+                       continue;
+
+               st = (struct sparingTable *)bh->b_data;
+               if (ident != 0 ||
+                   strncmp(st->sparingIdent.ident, UDF_ID_SPARING,
+                           strlen(UDF_ID_SPARING)) ||
+                   sizeof(*st) + le16_to_cpu(st->reallocationTableLen) >
+                                                       sb->s_blocksize) {
+                       brelse(bh);
+                       continue;
+               }
+
+               sdata->s_spar_map[i] = bh;
+       }
+       map->s_partition_func = udf_get_pblock_spar15;
+       return 0;
+}
+
 static int udf_load_logicalvol(struct super_block *sb, sector_t block,
                               struct kernel_lb_addr *fileset)
 {
        struct logicalVolDesc *lvd;
-       int i, j, offset;
+       int i, offset;
        uint8_t type;
        struct udf_sb_info *sbi = UDF_SB(sb);
        struct genericPartitionMap *gpm;
        uint16_t ident;
        struct buffer_head *bh;
+       unsigned int table_len;
        int ret = 0;
 
        bh = udf_read_tagged(sb, block, block, &ident);
@@ -1232,15 +1282,20 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block,
                return 1;
        BUG_ON(ident != TAG_IDENT_LVD);
        lvd = (struct logicalVolDesc *)bh->b_data;
-
-       i = udf_sb_alloc_partition_maps(sb, le32_to_cpu(lvd->numPartitionMaps));
-       if (i != 0) {
-               ret = i;
+       table_len = le32_to_cpu(lvd->mapTableLength);
+       if (sizeof(*lvd) + table_len > sb->s_blocksize) {
+               udf_err(sb, "error loading logical volume descriptor: "
+                       "Partition table too long (%u > %lu)\n", table_len,
+                       sb->s_blocksize - sizeof(*lvd));
                goto out_bh;
        }
 
+       ret = udf_sb_alloc_partition_maps(sb, le32_to_cpu(lvd->numPartitionMaps));
+       if (ret)
+               goto out_bh;
+
        for (i = 0, offset = 0;
-            i < sbi->s_partitions && offset < le32_to_cpu(lvd->mapTableLength);
+            i < sbi->s_partitions && offset < table_len;
             i++, offset += gpm->partitionMapLength) {
                struct udf_part_map *map = &sbi->s_partmaps[i];
                gpm = (struct genericPartitionMap *)
@@ -1275,38 +1330,9 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block,
                        } else if (!strncmp(upm2->partIdent.ident,
                                                UDF_ID_SPARABLE,
                                                strlen(UDF_ID_SPARABLE))) {
-                               uint32_t loc;
-                               struct sparingTable *st;
-                               struct sparablePartitionMap *spm =
-                                       (struct sparablePartitionMap *)gpm;
-
-                               map->s_partition_type = UDF_SPARABLE_MAP15;
-                               map->s_type_specific.s_sparing.s_packet_len =
-                                               le16_to_cpu(spm->packetLength);
-                               for (j = 0; j < spm->numSparingTables; j++) {
-                                       struct buffer_head *bh2;
-
-                                       loc = le32_to_cpu(
-                                               spm->locSparingTable[j]);
-                                       bh2 = udf_read_tagged(sb, loc, loc,
-                                                            &ident);
-                                       map->s_type_specific.s_sparing.
-                                                       s_spar_map[j] = bh2;
-
-                                       if (bh2 == NULL)
-                                               continue;
-
-                                       st = (struct sparingTable *)bh2->b_data;
-                                       if (ident != 0 || strncmp(
-                                               st->sparingIdent.ident,
-                                               UDF_ID_SPARING,
-                                               strlen(UDF_ID_SPARING))) {
-                                               brelse(bh2);
-                                               map->s_type_specific.s_sparing.
-                                                       s_spar_map[j] = NULL;
-                                       }
-                               }
-                               map->s_partition_func = udf_get_pblock_spar15;
+                               if (udf_load_sparable_map(sb, map,
+                                   (struct sparablePartitionMap *)gpm) < 0)
+                                       goto out_bh;
                        } else if (!strncmp(upm2->partIdent.ident,
                                                UDF_ID_METADATA,
                                                strlen(UDF_ID_METADATA))) {
index 229641fb8e67af0674df635908611b619ebbd6ca..4f33c32affe3d2eb4bec9c5ca8d71a33bbc5032a 100644 (file)
@@ -1074,12 +1074,13 @@ restart:
         * If we couldn't get anything, give up.
         */
        if (bno_cur_lt == NULL && bno_cur_gt == NULL) {
+               xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
+
                if (!forced++) {
                        trace_xfs_alloc_near_busy(args);
                        xfs_log_force(args->mp, XFS_LOG_SYNC);
                        goto restart;
                }
-
                trace_xfs_alloc_size_neither(args);
                args->agbno = NULLAGBLOCK;
                return 0;
@@ -2433,15 +2434,24 @@ xfs_alloc_vextent_worker(
        current_restore_flags_nested(&pflags, PF_FSTRANS);
 }
 
-
-int                            /* error */
+/*
+ * Data allocation requests often come in with little stack to work on. Push
+ * them off to a worker thread so there is lots of stack to use. Metadata
+ * requests, OTOH, are generally from low stack usage paths, so avoid the
+ * context switch overhead here.
+ */
+int
 xfs_alloc_vextent(
-       xfs_alloc_arg_t *args)  /* allocation argument structure */
+       struct xfs_alloc_arg    *args)
 {
        DECLARE_COMPLETION_ONSTACK(done);
 
+       if (!args->userdata)
+               return __xfs_alloc_vextent(args);
+
+
        args->done = &done;
-       INIT_WORK(&args->work, xfs_alloc_vextent_worker);
+       INIT_WORK_ONSTACK(&args->work, xfs_alloc_vextent_worker);
        queue_work(xfs_alloc_wq, &args->work);
        wait_for_completion(&done);
        return args->result;
index ae31c313a79ef6f0f53155a9b83877c6e56fc05a..8dad722c00410f25294f0a8fec608fcbd56e74e8 100644 (file)
@@ -981,10 +981,15 @@ xfs_vm_writepage(
                                imap_valid = 0;
                        }
                } else {
-                       if (PageUptodate(page)) {
+                       if (PageUptodate(page))
                                ASSERT(buffer_mapped(bh));
-                               imap_valid = 0;
-                       }
+                       /*
+                        * This buffer is not uptodate and will not be
+                        * written to disk.  Ensure that we will put any
+                        * subsequent writeable buffers into a new
+                        * ioend.
+                        */
+                       imap_valid = 0;
                        continue;
                }
 
index 172d3cc8f8cb8ad6d588fea95819ecfded627167..269b35c084dab6906267ccf66f473c98e85ce473 100644 (file)
@@ -201,14 +201,7 @@ xfs_buf_alloc(
        bp->b_length = numblks;
        bp->b_io_length = numblks;
        bp->b_flags = flags;
-
-       /*
-        * We do not set the block number here in the buffer because we have not
-        * finished initialising the buffer. We insert the buffer into the cache
-        * in this state, so this ensures that we are unable to do IO on a
-        * buffer that hasn't been fully initialised.
-        */
-       bp->b_bn = XFS_BUF_DADDR_NULL;
+       bp->b_bn = blkno;
        atomic_set(&bp->b_pin_count, 0);
        init_waitqueue_head(&bp->b_waiters);
 
@@ -567,11 +560,6 @@ xfs_buf_get(
        if (bp != new_bp)
                xfs_buf_free(new_bp);
 
-       /*
-        * Now we have a workable buffer, fill in the block number so
-        * that we can do IO on it.
-        */
-       bp->b_bn = blkno;
        bp->b_io_length = bp->b_length;
 
 found:
@@ -772,7 +760,7 @@ xfs_buf_get_uncached(
        int                     error, i;
        xfs_buf_t               *bp;
 
-       bp = xfs_buf_alloc(target, 0, numblks, 0);
+       bp = xfs_buf_alloc(target, XFS_BUF_DADDR_NULL, numblks, 0);
        if (unlikely(bp == NULL))
                goto fail;
 
@@ -1001,27 +989,6 @@ xfs_buf_ioerror_alert(
                (__uint64_t)XFS_BUF_ADDR(bp), func, bp->b_error, bp->b_length);
 }
 
-int
-xfs_bwrite(
-       struct xfs_buf          *bp)
-{
-       int                     error;
-
-       ASSERT(xfs_buf_islocked(bp));
-
-       bp->b_flags |= XBF_WRITE;
-       bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q);
-
-       xfs_bdstrat_cb(bp);
-
-       error = xfs_buf_iowait(bp);
-       if (error) {
-               xfs_force_shutdown(bp->b_target->bt_mount,
-                                  SHUTDOWN_META_IO_ERROR);
-       }
-       return error;
-}
-
 /*
  * Called when we want to stop a buffer from getting written or read.
  * We attach the EIO error, muck with its flags, and call xfs_buf_ioend
@@ -1091,14 +1058,7 @@ xfs_bioerror_relse(
        return EIO;
 }
 
-
-/*
- * All xfs metadata buffers except log state machine buffers
- * get this attached as their b_bdstrat callback function.
- * This is so that we can catch a buffer
- * after prematurely unpinning it to forcibly shutdown the filesystem.
- */
-int
+STATIC int
 xfs_bdstrat_cb(
        struct xfs_buf  *bp)
 {
@@ -1119,6 +1079,27 @@ xfs_bdstrat_cb(
        return 0;
 }
 
+int
+xfs_bwrite(
+       struct xfs_buf          *bp)
+{
+       int                     error;
+
+       ASSERT(xfs_buf_islocked(bp));
+
+       bp->b_flags |= XBF_WRITE;
+       bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q);
+
+       xfs_bdstrat_cb(bp);
+
+       error = xfs_buf_iowait(bp);
+       if (error) {
+               xfs_force_shutdown(bp->b_target->bt_mount,
+                                  SHUTDOWN_META_IO_ERROR);
+       }
+       return error;
+}
+
 /*
  * Wrapper around bdstrat so that we can stop data from going to disk in case
  * we are shutting down the filesystem.  Typically user data goes thru this
@@ -1255,7 +1236,7 @@ xfs_buf_iorequest(
         */
        atomic_set(&bp->b_io_remaining, 1);
        _xfs_buf_ioapply(bp);
-       _xfs_buf_ioend(bp, 0);
+       _xfs_buf_ioend(bp, 1);
 
        xfs_buf_rele(bp);
 }
index 7f1d1392ce37c292a04e8a22268537527687e10f..79344c48008eedaab4aaa21dc6ed28fe328607d3 100644 (file)
@@ -180,7 +180,6 @@ extern void xfs_buf_unlock(xfs_buf_t *);
 extern int xfs_bwrite(struct xfs_buf *bp);
 
 extern void xfsbdstrat(struct xfs_mount *, struct xfs_buf *);
-extern int xfs_bdstrat_cb(struct xfs_buf *);
 
 extern void xfs_buf_ioend(xfs_buf_t *, int);
 extern void xfs_buf_ioerror(xfs_buf_t *, int);
index 45df2b857d482fe478c3f4965ddfa57a648eb6ee..d9e451115f980ac1529c4e89aa89665c82661360 100644 (file)
@@ -954,7 +954,7 @@ xfs_buf_iodone_callbacks(
 
                if (!XFS_BUF_ISSTALE(bp)) {
                        bp->b_flags |= XBF_WRITE | XBF_ASYNC | XBF_DONE;
-                       xfs_bdstrat_cb(bp);
+                       xfs_buf_iorequest(bp);
                } else {
                        xfs_buf_relse(bp);
                }
index 6cdbf90c6f7b50116534c6cb7de2d1ff91816f91..d041d47d9d86d13718b4c0c89047efe96e098aab 100644 (file)
@@ -504,6 +504,14 @@ xfs_inode_item_push(
                goto out_unlock;
        }
 
+       /*
+        * Stale inode items should force out the iclog.
+        */
+       if (ip->i_flags & XFS_ISTALE) {
+               rval = XFS_ITEM_PINNED;
+               goto out_unlock;
+       }
+
        /*
         * Someone else is already flushing the inode.  Nothing we can do
         * here but wait for the flush to finish and remove the item from
@@ -514,15 +522,6 @@ xfs_inode_item_push(
                goto out_unlock;
        }
 
-       /*
-        * Stale inode items should force out the iclog.
-        */
-       if (ip->i_flags & XFS_ISTALE) {
-               xfs_ifunlock(ip);
-               xfs_iunlock(ip, XFS_ILOCK_SHARED);
-               return XFS_ITEM_PINNED;
-       }
-
        ASSERT(iip->ili_fields != 0 || XFS_FORCED_SHUTDOWN(ip->i_mount));
        ASSERT(iip->ili_logged == 0 || XFS_FORCED_SHUTDOWN(ip->i_mount));
 
index f30d9807dc48a0535084da1afd5a7620389adcd5..d90d4a388609af9da0cd40eb786a1fdd225a8d62 100644 (file)
 kmem_zone_t    *xfs_log_ticket_zone;
 
 /* Local miscellaneous function prototypes */
-STATIC int      xlog_commit_record(struct log *log, struct xlog_ticket *ticket,
-                                   xlog_in_core_t **, xfs_lsn_t *);
+STATIC int
+xlog_commit_record(
+       struct xlog             *log,
+       struct xlog_ticket      *ticket,
+       struct xlog_in_core     **iclog,
+       xfs_lsn_t               *commitlsnp);
+
 STATIC xlog_t *  xlog_alloc_log(xfs_mount_t    *mp,
                                xfs_buftarg_t   *log_target,
                                xfs_daddr_t     blk_offset,
                                int             num_bblks);
-STATIC int      xlog_space_left(struct log *log, atomic64_t *head);
+STATIC int
+xlog_space_left(
+       struct xlog             *log,
+       atomic64_t              *head);
 STATIC int      xlog_sync(xlog_t *log, xlog_in_core_t *iclog);
 STATIC void     xlog_dealloc_log(xlog_t *log);
 
@@ -64,8 +72,10 @@ STATIC void xlog_state_switch_iclogs(xlog_t          *log,
                                     int                eventual_size);
 STATIC void xlog_state_want_sync(xlog_t        *log, xlog_in_core_t *iclog);
 
-STATIC void xlog_grant_push_ail(struct log     *log,
-                               int             need_bytes);
+STATIC void
+xlog_grant_push_ail(
+       struct xlog     *log,
+       int             need_bytes);
 STATIC void xlog_regrant_reserve_log_space(xlog_t       *log,
                                           xlog_ticket_t *ticket);
 STATIC void xlog_ungrant_log_space(xlog_t       *log,
@@ -73,7 +83,9 @@ STATIC void xlog_ungrant_log_space(xlog_t      *log,
 
 #if defined(DEBUG)
 STATIC void    xlog_verify_dest_ptr(xlog_t *log, char *ptr);
-STATIC void    xlog_verify_grant_tail(struct log *log);
+STATIC void
+xlog_verify_grant_tail(
+       struct xlog     *log);
 STATIC void    xlog_verify_iclog(xlog_t *log, xlog_in_core_t *iclog,
                                  int count, boolean_t syncing);
 STATIC void    xlog_verify_tail_lsn(xlog_t *log, xlog_in_core_t *iclog,
@@ -89,9 +101,9 @@ STATIC int   xlog_iclogs_empty(xlog_t *log);
 
 static void
 xlog_grant_sub_space(
-       struct log      *log,
-       atomic64_t      *head,
-       int             bytes)
+       struct xlog             *log,
+       atomic64_t              *head,
+       int                     bytes)
 {
        int64_t head_val = atomic64_read(head);
        int64_t new, old;
@@ -115,9 +127,9 @@ xlog_grant_sub_space(
 
 static void
 xlog_grant_add_space(
-       struct log      *log,
-       atomic64_t      *head,
-       int             bytes)
+       struct xlog             *log,
+       atomic64_t              *head,
+       int                     bytes)
 {
        int64_t head_val = atomic64_read(head);
        int64_t new, old;
@@ -165,7 +177,7 @@ xlog_grant_head_wake_all(
 
 static inline int
 xlog_ticket_reservation(
-       struct log              *log,
+       struct xlog             *log,
        struct xlog_grant_head  *head,
        struct xlog_ticket      *tic)
 {
@@ -182,7 +194,7 @@ xlog_ticket_reservation(
 
 STATIC bool
 xlog_grant_head_wake(
-       struct log              *log,
+       struct xlog             *log,
        struct xlog_grant_head  *head,
        int                     *free_bytes)
 {
@@ -204,7 +216,7 @@ xlog_grant_head_wake(
 
 STATIC int
 xlog_grant_head_wait(
-       struct log              *log,
+       struct xlog             *log,
        struct xlog_grant_head  *head,
        struct xlog_ticket      *tic,
        int                     need_bytes)
@@ -256,7 +268,7 @@ shutdown:
  */
 STATIC int
 xlog_grant_head_check(
-       struct log              *log,
+       struct xlog             *log,
        struct xlog_grant_head  *head,
        struct xlog_ticket      *tic,
        int                     *need_bytes)
@@ -323,7 +335,7 @@ xfs_log_regrant(
        struct xfs_mount        *mp,
        struct xlog_ticket      *tic)
 {
-       struct log              *log = mp->m_log;
+       struct xlog             *log = mp->m_log;
        int                     need_bytes;
        int                     error = 0;
 
@@ -389,7 +401,7 @@ xfs_log_reserve(
        bool                    permanent,
        uint                    t_type)
 {
-       struct log              *log = mp->m_log;
+       struct xlog             *log = mp->m_log;
        struct xlog_ticket      *tic;
        int                     need_bytes;
        int                     error = 0;
@@ -465,7 +477,7 @@ xfs_log_done(
        struct xlog_in_core     **iclog,
        uint                    flags)
 {
-       struct log              *log = mp->m_log;
+       struct xlog             *log = mp->m_log;
        xfs_lsn_t               lsn = 0;
 
        if (XLOG_FORCED_SHUTDOWN(log) ||
@@ -810,6 +822,7 @@ xfs_log_unmount_write(xfs_mount_t *mp)
 void
 xfs_log_unmount(xfs_mount_t *mp)
 {
+       cancel_delayed_work_sync(&mp->m_sync_work);
        xfs_trans_ail_destroy(mp);
        xlog_dealloc_log(mp->m_log);
 }
@@ -838,7 +851,7 @@ void
 xfs_log_space_wake(
        struct xfs_mount        *mp)
 {
-       struct log              *log = mp->m_log;
+       struct xlog             *log = mp->m_log;
        int                     free_bytes;
 
        if (XLOG_FORCED_SHUTDOWN(log))
@@ -916,7 +929,7 @@ xfs_lsn_t
 xlog_assign_tail_lsn_locked(
        struct xfs_mount        *mp)
 {
-       struct log              *log = mp->m_log;
+       struct xlog             *log = mp->m_log;
        struct xfs_log_item     *lip;
        xfs_lsn_t               tail_lsn;
 
@@ -965,7 +978,7 @@ xlog_assign_tail_lsn(
  */
 STATIC int
 xlog_space_left(
-       struct log      *log,
+       struct xlog     *log,
        atomic64_t      *head)
 {
        int             free_bytes;
@@ -1277,7 +1290,7 @@ out:
  */
 STATIC int
 xlog_commit_record(
-       struct log              *log,
+       struct xlog             *log,
        struct xlog_ticket      *ticket,
        struct xlog_in_core     **iclog,
        xfs_lsn_t               *commitlsnp)
@@ -1311,7 +1324,7 @@ xlog_commit_record(
  */
 STATIC void
 xlog_grant_push_ail(
-       struct log      *log,
+       struct xlog     *log,
        int             need_bytes)
 {
        xfs_lsn_t       threshold_lsn = 0;
@@ -1790,7 +1803,7 @@ xlog_write_start_rec(
 
 static xlog_op_header_t *
 xlog_write_setup_ophdr(
-       struct log              *log,
+       struct xlog             *log,
        struct xlog_op_header   *ophdr,
        struct xlog_ticket      *ticket,
        uint                    flags)
@@ -1873,7 +1886,7 @@ xlog_write_setup_copy(
 
 static int
 xlog_write_copy_finish(
-       struct log              *log,
+       struct xlog             *log,
        struct xlog_in_core     *iclog,
        uint                    flags,
        int                     *record_cnt,
@@ -1958,7 +1971,7 @@ xlog_write_copy_finish(
  */
 int
 xlog_write(
-       struct log              *log,
+       struct xlog             *log,
        struct xfs_log_vec      *log_vector,
        struct xlog_ticket      *ticket,
        xfs_lsn_t               *start_lsn,
@@ -2821,7 +2834,7 @@ _xfs_log_force(
        uint                    flags,
        int                     *log_flushed)
 {
-       struct log              *log = mp->m_log;
+       struct xlog             *log = mp->m_log;
        struct xlog_in_core     *iclog;
        xfs_lsn_t               lsn;
 
@@ -2969,7 +2982,7 @@ _xfs_log_force_lsn(
        uint                    flags,
        int                     *log_flushed)
 {
-       struct log              *log = mp->m_log;
+       struct xlog             *log = mp->m_log;
        struct xlog_in_core     *iclog;
        int                     already_slept = 0;
 
@@ -3147,7 +3160,7 @@ xfs_log_ticket_get(
  */
 xlog_ticket_t *
 xlog_ticket_alloc(
-       struct log      *log,
+       struct xlog     *log,
        int             unit_bytes,
        int             cnt,
        char            client,
@@ -3278,7 +3291,7 @@ xlog_ticket_alloc(
  */
 void
 xlog_verify_dest_ptr(
-       struct log      *log,
+       struct xlog     *log,
        char            *ptr)
 {
        int i;
@@ -3307,7 +3320,7 @@ xlog_verify_dest_ptr(
  */
 STATIC void
 xlog_verify_grant_tail(
-       struct log      *log)
+       struct xlog     *log)
 {
        int             tail_cycle, tail_blocks;
        int             cycle, space;
index 7d6197c5849381e8d8f0f0e6fbef91f332f4e108..ddc4529d07d32fe69b2d8d54b7699f19768198d4 100644 (file)
@@ -44,7 +44,7 @@
  */
 static struct xlog_ticket *
 xlog_cil_ticket_alloc(
-       struct log      *log)
+       struct xlog     *log)
 {
        struct xlog_ticket *tic;
 
@@ -72,7 +72,7 @@ xlog_cil_ticket_alloc(
  */
 void
 xlog_cil_init_post_recovery(
-       struct log      *log)
+       struct xlog     *log)
 {
        log->l_cilp->xc_ctx->ticket = xlog_cil_ticket_alloc(log);
        log->l_cilp->xc_ctx->sequence = 1;
@@ -182,7 +182,7 @@ xlog_cil_prepare_log_vecs(
  */
 STATIC void
 xfs_cil_prepare_item(
-       struct log              *log,
+       struct xlog             *log,
        struct xfs_log_vec      *lv,
        int                     *len,
        int                     *diff_iovecs)
@@ -231,7 +231,7 @@ xfs_cil_prepare_item(
  */
 static void
 xlog_cil_insert_items(
-       struct log              *log,
+       struct xlog             *log,
        struct xfs_log_vec      *log_vector,
        struct xlog_ticket      *ticket)
 {
@@ -373,7 +373,7 @@ xlog_cil_committed(
  */
 STATIC int
 xlog_cil_push(
-       struct log              *log)
+       struct xlog             *log)
 {
        struct xfs_cil          *cil = log->l_cilp;
        struct xfs_log_vec      *lv;
@@ -601,7 +601,7 @@ xlog_cil_push_work(
  */
 static void
 xlog_cil_push_background(
-       struct log      *log)
+       struct xlog     *log)
 {
        struct xfs_cil  *cil = log->l_cilp;
 
@@ -629,7 +629,7 @@ xlog_cil_push_background(
 
 static void
 xlog_cil_push_foreground(
-       struct log      *log,
+       struct xlog     *log,
        xfs_lsn_t       push_seq)
 {
        struct xfs_cil  *cil = log->l_cilp;
@@ -683,7 +683,7 @@ xfs_log_commit_cil(
        xfs_lsn_t               *commit_lsn,
        int                     flags)
 {
-       struct log              *log = mp->m_log;
+       struct xlog             *log = mp->m_log;
        int                     log_flags = 0;
        struct xfs_log_vec      *log_vector;
 
@@ -754,7 +754,7 @@ xfs_log_commit_cil(
  */
 xfs_lsn_t
 xlog_cil_force_lsn(
-       struct log      *log,
+       struct xlog     *log,
        xfs_lsn_t       sequence)
 {
        struct xfs_cil          *cil = log->l_cilp;
@@ -833,7 +833,7 @@ xfs_log_item_in_current_chkpt(
  */
 int
 xlog_cil_init(
-       struct log      *log)
+       struct xlog     *log)
 {
        struct xfs_cil  *cil;
        struct xfs_cil_ctx *ctx;
@@ -869,7 +869,7 @@ xlog_cil_init(
 
 void
 xlog_cil_destroy(
-       struct log      *log)
+       struct xlog     *log)
 {
        if (log->l_cilp->xc_ctx) {
                if (log->l_cilp->xc_ctx->ticket)
index 5bc33261f5be6311fb5732f42db25e17064abded..72eba2201b1449aee4d52f5c0863549c9aedf7e9 100644 (file)
@@ -19,7 +19,7 @@
 #define __XFS_LOG_PRIV_H__
 
 struct xfs_buf;
-struct log;
+struct xlog;
 struct xlog_ticket;
 struct xfs_mount;
 
@@ -352,7 +352,7 @@ typedef struct xlog_in_core {
        struct xlog_in_core     *ic_next;
        struct xlog_in_core     *ic_prev;
        struct xfs_buf          *ic_bp;
-       struct log              *ic_log;
+       struct xlog             *ic_log;
        int                     ic_size;
        int                     ic_offset;
        int                     ic_bwritecnt;
@@ -409,7 +409,7 @@ struct xfs_cil_ctx {
  * operations almost as efficient as the old logging methods.
  */
 struct xfs_cil {
-       struct log              *xc_log;
+       struct xlog             *xc_log;
        struct list_head        xc_cil;
        spinlock_t              xc_cil_lock;
        struct xfs_cil_ctx      *xc_ctx;
@@ -487,7 +487,7 @@ struct xlog_grant_head {
  * overflow 31 bits worth of byte offset, so using a byte number will mean
  * that round off problems won't occur when releasing partial reservations.
  */
-typedef struct log {
+typedef struct xlog {
        /* The following fields don't need locking */
        struct xfs_mount        *l_mp;          /* mount point */
        struct xfs_ail          *l_ailp;        /* AIL log is working with */
@@ -553,9 +553,14 @@ extern int  xlog_recover_finish(xlog_t *log);
 extern void     xlog_pack_data(xlog_t *log, xlog_in_core_t *iclog, int);
 
 extern kmem_zone_t *xfs_log_ticket_zone;
-struct xlog_ticket *xlog_ticket_alloc(struct log *log, int unit_bytes,
-                               int count, char client, bool permanent,
-                               xfs_km_flags_t alloc_flags);
+struct xlog_ticket *
+xlog_ticket_alloc(
+       struct xlog     *log,
+       int             unit_bytes,
+       int             count,
+       char            client,
+       bool            permanent,
+       xfs_km_flags_t  alloc_flags);
 
 
 static inline void
@@ -567,9 +572,14 @@ xlog_write_adv_cnt(void **ptr, int *len, int *off, size_t bytes)
 }
 
 void   xlog_print_tic_res(struct xfs_mount *mp, struct xlog_ticket *ticket);
-int    xlog_write(struct log *log, struct xfs_log_vec *log_vector,
-                               struct xlog_ticket *tic, xfs_lsn_t *start_lsn,
-                               xlog_in_core_t **commit_iclog, uint flags);
+int
+xlog_write(
+       struct xlog             *log,
+       struct xfs_log_vec      *log_vector,
+       struct xlog_ticket      *tic,
+       xfs_lsn_t               *start_lsn,
+       struct xlog_in_core     **commit_iclog,
+       uint                    flags);
 
 /*
  * When we crack an atomic LSN, we sample it first so that the value will not
@@ -629,17 +639,23 @@ xlog_assign_grant_head(atomic64_t *head, int cycle, int space)
 /*
  * Committed Item List interfaces
  */
-int    xlog_cil_init(struct log *log);
-void   xlog_cil_init_post_recovery(struct log *log);
-void   xlog_cil_destroy(struct log *log);
+int
+xlog_cil_init(struct xlog *log);
+void
+xlog_cil_init_post_recovery(struct xlog *log);
+void
+xlog_cil_destroy(struct xlog *log);
 
 /*
  * CIL force routines
  */
-xfs_lsn_t xlog_cil_force_lsn(struct log *log, xfs_lsn_t sequence);
+xfs_lsn_t
+xlog_cil_force_lsn(
+       struct xlog *log,
+       xfs_lsn_t sequence);
 
 static inline void
-xlog_cil_force(struct log *log)
+xlog_cil_force(struct xlog *log)
 {
        xlog_cil_force_lsn(log, log->l_cilp->xc_current_sequence);
 }
index ca386909131a3f556a14868ee9354db352e3fe8f..a7be98abd6a90327ae9a0993ef65f2bc7f005e1c 100644 (file)
@@ -1471,8 +1471,8 @@ xlog_recover_add_item(
 
 STATIC int
 xlog_recover_add_to_cont_trans(
-       struct log              *log,
-       xlog_recover_t          *trans,
+       struct xlog             *log,
+       struct xlog_recover     *trans,
        xfs_caddr_t             dp,
        int                     len)
 {
@@ -1517,8 +1517,8 @@ xlog_recover_add_to_cont_trans(
  */
 STATIC int
 xlog_recover_add_to_trans(
-       struct log              *log,
-       xlog_recover_t          *trans,
+       struct xlog             *log,
+       struct xlog_recover     *trans,
        xfs_caddr_t             dp,
        int                     len)
 {
@@ -1588,8 +1588,8 @@ xlog_recover_add_to_trans(
  */
 STATIC int
 xlog_recover_reorder_trans(
-       struct log              *log,
-       xlog_recover_t          *trans,
+       struct xlog             *log,
+       struct xlog_recover     *trans,
        int                     pass)
 {
        xlog_recover_item_t     *item, *n;
@@ -1642,8 +1642,8 @@ xlog_recover_reorder_trans(
  */
 STATIC int
 xlog_recover_buffer_pass1(
-       struct log              *log,
-       xlog_recover_item_t     *item)
+       struct xlog                     *log,
+       struct xlog_recover_item        *item)
 {
        xfs_buf_log_format_t    *buf_f = item->ri_buf[0].i_addr;
        struct list_head        *bucket;
@@ -1696,7 +1696,7 @@ xlog_recover_buffer_pass1(
  */
 STATIC int
 xlog_check_buffer_cancelled(
-       struct log              *log,
+       struct xlog             *log,
        xfs_daddr_t             blkno,
        uint                    len,
        ushort                  flags)
@@ -2689,9 +2689,9 @@ xlog_recover_free_trans(
 
 STATIC int
 xlog_recover_commit_pass1(
-       struct log              *log,
-       struct xlog_recover     *trans,
-       xlog_recover_item_t     *item)
+       struct xlog                     *log,
+       struct xlog_recover             *trans,
+       struct xlog_recover_item        *item)
 {
        trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS1);
 
@@ -2716,10 +2716,10 @@ xlog_recover_commit_pass1(
 
 STATIC int
 xlog_recover_commit_pass2(
-       struct log              *log,
-       struct xlog_recover     *trans,
-       struct list_head        *buffer_list,
-       xlog_recover_item_t     *item)
+       struct xlog                     *log,
+       struct xlog_recover             *trans,
+       struct list_head                *buffer_list,
+       struct xlog_recover_item        *item)
 {
        trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS2);
 
@@ -2753,7 +2753,7 @@ xlog_recover_commit_pass2(
  */
 STATIC int
 xlog_recover_commit_trans(
-       struct log              *log,
+       struct xlog             *log,
        struct xlog_recover     *trans,
        int                     pass)
 {
@@ -2793,8 +2793,8 @@ out:
 
 STATIC int
 xlog_recover_unmount_trans(
-       struct log              *log,
-       xlog_recover_t          *trans)
+       struct xlog             *log,
+       struct xlog_recover     *trans)
 {
        /* Do nothing now */
        xfs_warn(log->l_mp, "%s: Unmount LR", __func__);
index 8b89c5ac72d9bf777b48e3e653736507bd04b446..90c1fc9eaea4d9c7be48c5ef219e7598f6e47f54 100644 (file)
@@ -53,7 +53,7 @@ typedef struct xfs_trans_reservations {
 
 #include "xfs_sync.h"
 
-struct log;
+struct xlog;
 struct xfs_mount_args;
 struct xfs_inode;
 struct xfs_bmbt_irec;
@@ -133,7 +133,7 @@ typedef struct xfs_mount {
        uint                    m_readio_blocks; /* min read size blocks */
        uint                    m_writeio_log;  /* min write size log bytes */
        uint                    m_writeio_blocks; /* min write size blocks */
-       struct log              *m_log;         /* log specific stuff */
+       struct xlog             *m_log;         /* log specific stuff */
        int                     m_logbufs;      /* number of log buffers */
        int                     m_logbsize;     /* size of each log buffer */
        uint                    m_rsumlevels;   /* rt summary levels */
index c9d3409c5ca3f991b7bde133994c4920316a216c..1e9ee064dbb28c7cb491d4c41dce53007eebc7e9 100644 (file)
@@ -386,23 +386,23 @@ xfs_sync_worker(
         * We shouldn't write/force the log if we are in the mount/unmount
         * process or on a read only filesystem. The workqueue still needs to be
         * active in both cases, however, because it is used for inode reclaim
-        * during these times.  Use the s_umount semaphore to provide exclusion
-        * with unmount.
+        * during these times.  Use the MS_ACTIVE flag to avoid doing anything
+        * during mount.  Doing work during unmount is avoided by calling
+        * cancel_delayed_work_sync on this work queue before tearing down
+        * the ail and the log in xfs_log_unmount.
         */
-       if (down_read_trylock(&mp->m_super->s_umount)) {
-               if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
-                       /* dgc: errors ignored here */
-                       if (mp->m_super->s_frozen == SB_UNFROZEN &&
-                           xfs_log_need_covered(mp))
-                               error = xfs_fs_log_dummy(mp);
-                       else
-                               xfs_log_force(mp, 0);
-
-                       /* start pushing all the metadata that is currently
-                        * dirty */
-                       xfs_ail_push_all(mp->m_ail);
-               }
-               up_read(&mp->m_super->s_umount);
+       if (!(mp->m_super->s_flags & MS_ACTIVE) &&
+           !(mp->m_flags & XFS_MOUNT_RDONLY)) {
+               /* dgc: errors ignored here */
+               if (mp->m_super->s_frozen == SB_UNFROZEN &&
+                   xfs_log_need_covered(mp))
+                       error = xfs_fs_log_dummy(mp);
+               else
+                       xfs_log_force(mp, 0);
+
+               /* start pushing all the metadata that is currently
+                * dirty */
+               xfs_ail_push_all(mp->m_ail);
        }
 
        /* queue us up again */
index 7cf9d3529e5112c39c25e007bb65c9c1774759ec..caf5dabfd55347b292664e86654f04bbe62d7a98 100644 (file)
@@ -32,7 +32,7 @@ struct xfs_da_node_entry;
 struct xfs_dquot;
 struct xfs_log_item;
 struct xlog_ticket;
-struct log;
+struct xlog;
 struct xlog_recover;
 struct xlog_recover_item;
 struct xfs_buf_log_format;
@@ -762,7 +762,7 @@ DEFINE_DQUOT_EVENT(xfs_dqflush_force);
 DEFINE_DQUOT_EVENT(xfs_dqflush_done);
 
 DECLARE_EVENT_CLASS(xfs_loggrant_class,
-       TP_PROTO(struct log *log, struct xlog_ticket *tic),
+       TP_PROTO(struct xlog *log, struct xlog_ticket *tic),
        TP_ARGS(log, tic),
        TP_STRUCT__entry(
                __field(dev_t, dev)
@@ -830,7 +830,7 @@ DECLARE_EVENT_CLASS(xfs_loggrant_class,
 
 #define DEFINE_LOGGRANT_EVENT(name) \
 DEFINE_EVENT(xfs_loggrant_class, name, \
-       TP_PROTO(struct log *log, struct xlog_ticket *tic), \
+       TP_PROTO(struct xlog *log, struct xlog_ticket *tic), \
        TP_ARGS(log, tic))
 DEFINE_LOGGRANT_EVENT(xfs_log_done_nonperm);
 DEFINE_LOGGRANT_EVENT(xfs_log_done_perm);
@@ -1664,7 +1664,7 @@ DEFINE_SWAPEXT_EVENT(xfs_swap_extent_before);
 DEFINE_SWAPEXT_EVENT(xfs_swap_extent_after);
 
 DECLARE_EVENT_CLASS(xfs_log_recover_item_class,
-       TP_PROTO(struct log *log, struct xlog_recover *trans,
+       TP_PROTO(struct xlog *log, struct xlog_recover *trans,
                struct xlog_recover_item *item, int pass),
        TP_ARGS(log, trans, item, pass),
        TP_STRUCT__entry(
@@ -1698,7 +1698,7 @@ DECLARE_EVENT_CLASS(xfs_log_recover_item_class,
 
 #define DEFINE_LOG_RECOVER_ITEM(name) \
 DEFINE_EVENT(xfs_log_recover_item_class, name, \
-       TP_PROTO(struct log *log, struct xlog_recover *trans, \
+       TP_PROTO(struct xlog *log, struct xlog_recover *trans, \
                struct xlog_recover_item *item, int pass), \
        TP_ARGS(log, trans, item, pass))
 
@@ -1709,7 +1709,7 @@ DEFINE_LOG_RECOVER_ITEM(xfs_log_recover_item_reorder_tail);
 DEFINE_LOG_RECOVER_ITEM(xfs_log_recover_item_recover);
 
 DECLARE_EVENT_CLASS(xfs_log_recover_buf_item_class,
-       TP_PROTO(struct log *log, struct xfs_buf_log_format *buf_f),
+       TP_PROTO(struct xlog *log, struct xfs_buf_log_format *buf_f),
        TP_ARGS(log, buf_f),
        TP_STRUCT__entry(
                __field(dev_t, dev)
@@ -1739,7 +1739,7 @@ DECLARE_EVENT_CLASS(xfs_log_recover_buf_item_class,
 
 #define DEFINE_LOG_RECOVER_BUF_ITEM(name) \
 DEFINE_EVENT(xfs_log_recover_buf_item_class, name, \
-       TP_PROTO(struct log *log, struct xfs_buf_log_format *buf_f), \
+       TP_PROTO(struct xlog *log, struct xfs_buf_log_format *buf_f), \
        TP_ARGS(log, buf_f))
 
 DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_not_cancel);
@@ -1752,7 +1752,7 @@ DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_reg_buf);
 DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_dquot_buf);
 
 DECLARE_EVENT_CLASS(xfs_log_recover_ino_item_class,
-       TP_PROTO(struct log *log, struct xfs_inode_log_format *in_f),
+       TP_PROTO(struct xlog *log, struct xfs_inode_log_format *in_f),
        TP_ARGS(log, in_f),
        TP_STRUCT__entry(
                __field(dev_t, dev)
@@ -1790,7 +1790,7 @@ DECLARE_EVENT_CLASS(xfs_log_recover_ino_item_class,
 )
 #define DEFINE_LOG_RECOVER_INO_ITEM(name) \
 DEFINE_EVENT(xfs_log_recover_ino_item_class, name, \
-       TP_PROTO(struct log *log, struct xfs_inode_log_format *in_f), \
+       TP_PROTO(struct xlog *log, struct xfs_inode_log_format *in_f), \
        TP_ARGS(log, in_f))
 
 DEFINE_LOG_RECOVER_INO_ITEM(xfs_log_recover_inode_recover);
index 92d6e1d701ff13466a5c8c5361b4beed017ba953..19503449814f587696a38e5c9078a50f8ae56aef 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -52,6 +52,7 @@
 #define AE_CODE_ACPI_TABLES             0x2000
 #define AE_CODE_AML                     0x3000
 #define AE_CODE_CONTROL                 0x4000
+#define AE_CODE_MAX                     0x4000
 #define AE_CODE_MASK                    0xF000
 
 #define ACPI_SUCCESS(a)                 (!(a))
 
 /* Exception strings for acpi_format_exception */
 
-#ifdef DEFINE_ACPI_GLOBALS
+#ifdef ACPI_DEFINE_EXCEPTION_TABLE
 
 /*
  * String versions of the exception codes above
@@ -295,6 +296,6 @@ char const *acpi_gbl_exception_names_ctrl[] = {
        "AE_CTRL_PARSE_PENDING"
 };
 
-#endif                         /* ACPI GLOBALS */
+#endif                         /* EXCEPTION_TABLE */
 
 #endif                         /* __ACEXCEP_H__ */
index 38f508816e4a7a2e9f3c5133de7a662a649141a1..ef24d82c4a415c8ae68f38fcf15b2a48d5fa0a9a 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index d7bd661bfae73ed39cb15f17a39c4c06b78b1d63..2457ac8496552d632b8075b6c4284e30008f22ae 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
 #define ACPI_WARNING(plist)             acpi_warning plist
 #define ACPI_EXCEPTION(plist)           acpi_exception plist
 #define ACPI_ERROR(plist)               acpi_error plist
+#define ACPI_BIOS_WARNING(plist)        acpi_bios_warning plist
+#define ACPI_BIOS_ERROR(plist)          acpi_bios_error plist
 #define ACPI_DEBUG_OBJECT(obj,l,i)      acpi_ex_do_debug_object(obj,l,i)
 
 #else
 #define ACPI_WARNING(plist)
 #define ACPI_EXCEPTION(plist)
 #define ACPI_ERROR(plist)
+#define ACPI_BIOS_WARNING(plist)
+#define ACPI_BIOS_ERROR(plist)
 #define ACPI_DEBUG_OBJECT(obj,l,i)
 
 #endif                         /* ACPI_NO_ERROR_MESSAGES */
index de39915f6b7fb389e8cf2c8b7be73184e1fa8db5..c433d5e27679803b061dff7c2715220e47422b92 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 62eb514f8e3ae27373058e08fc8734daa0f5c862..b22b77444b5c5d4e673a2a9f346da5fa1907eace 100644 (file)
@@ -450,8 +450,8 @@ static inline int acpi_pm_device_sleep_wake(struct device *dev, bool enable)
 
 #else  /* CONFIG_ACPI */
 
-static int register_acpi_bus_type(struct acpi_bus_type *bus) { return 0; }
-static int unregister_acpi_bus_type(struct acpi_bus_type *bus) { return 0; }
+static inline int register_acpi_bus_type(void *bus) { return 0; }
+static inline int unregister_acpi_bus_type(void *bus) { return 0; }
 
 #endif                         /* CONFIG_ACPI */
 
index 21a5548c66865cb2b3a79d18807b79ce771f6d9f..0650f5fa7ce99245fe1d00d2e83048c0256c3054 100644 (file)
@@ -8,7 +8,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -205,7 +205,7 @@ acpi_os_execute(acpi_execute_type type,
 acpi_status
 acpi_os_hotplug_execute(acpi_osd_exec_callback function, void *context);
 
-void acpi_os_wait_events_complete(void *context);
+void acpi_os_wait_events_complete(void);
 
 void acpi_os_sleep(u64 milliseconds);
 
index 9821101346726a27b54eec467a65012de45de466..2c744c7a5b3dcdd452d1c531884faa0ed0a3e29b 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -47,7 +47,7 @@
 
 /* Current ACPICA subsystem version in YYYYMMDD format */
 
-#define ACPI_CA_VERSION                 0x20120320
+#define ACPI_CA_VERSION                 0x20120711
 
 #include "acconfig.h"
 #include "actypes.h"
@@ -154,15 +154,20 @@ void *acpi_callocate(u32 size);
 void acpi_free(void *address);
 
 /*
- * ACPI table manipulation interfaces
+ * ACPI table load/unload interfaces
  */
-acpi_status acpi_reallocate_root_table(void);
+acpi_status acpi_load_table(struct acpi_table_header *table);
 
-acpi_status acpi_find_root_pointer(acpi_size *rsdp_address);
+acpi_status acpi_unload_parent_table(acpi_handle object);
 
 acpi_status acpi_load_tables(void);
 
-acpi_status acpi_load_table(struct acpi_table_header *table_ptr);
+/*
+ * ACPI table manipulation interfaces
+ */
+acpi_status acpi_reallocate_root_table(void);
+
+acpi_status acpi_find_root_pointer(acpi_size *rsdp_address);
 
 acpi_status acpi_unload_table_id(acpi_owner_id id);
 
@@ -529,6 +534,14 @@ void ACPI_INTERNAL_VAR_XFACE
 acpi_info(const char *module_name,
          u32 line_number, const char *format, ...) ACPI_PRINTF_LIKE(3);
 
+void ACPI_INTERNAL_VAR_XFACE
+acpi_bios_error(const char *module_name,
+               u32 line_number, const char *format, ...) ACPI_PRINTF_LIKE(3);
+
+void ACPI_INTERNAL_VAR_XFACE
+acpi_bios_warning(const char *module_name,
+                 u32 line_number, const char *format, ...) ACPI_PRINTF_LIKE(3);
+
 /*
  * Debug output
  */
index 3506e39a66b15962a5d6de4ab54a8117ff0267bb..40349ae654640e249f161c3df888577f19eecdc1 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -48,7 +48,7 @@
  * Definitions for Resource Attributes
  */
 typedef u16 acpi_rs_length;    /* Resource Length field is fixed at 16 bits */
-typedef u32 acpi_rsdesc_size;  /* Max Resource Descriptor size is (Length+3) = (64_k-1)+3 */
+typedef u32 acpi_rsdesc_size;  /* Max Resource Descriptor size is (Length+3) = (64K-1)+3 */
 
 /*
  * Memory Attributes
@@ -332,7 +332,7 @@ struct acpi_resource_address64 {
 };
 
 struct acpi_resource_extended_address64 {
-       ACPI_RESOURCE_ADDRESS_COMMON u8 revision_iD;
+       ACPI_RESOURCE_ADDRESS_COMMON u8 revision_ID;
        u64 granularity;
        u64 minimum;
        u64 maximum;
index 8dea54665dcf35b4c0bfdcd63b6920881d32e699..59a73e1b2845fbde763818159cc6b9791c34586d 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -212,7 +212,7 @@ struct acpi_table_fadt {
        u32 smi_command;        /* 32-bit Port address of SMI command port */
        u8 acpi_enable;         /* Value to write to smi_cmd to enable ACPI */
        u8 acpi_disable;        /* Value to write to smi_cmd to disable ACPI */
-       u8 S4bios_request;      /* Value to write to SMI CMD to enter S4BIOS state */
+       u8 s4_bios_request;     /* Value to write to SMI CMD to enter S4BIOS state */
        u8 pstate_control;      /* Processor performance state control */
        u32 pm1a_event_block;   /* 32-bit Port address of Power Mgt 1a Event Reg Blk */
        u32 pm1b_event_block;   /* 32-bit Port address of Power Mgt 1b Event Reg Blk */
@@ -230,8 +230,8 @@ struct acpi_table_fadt {
        u8 gpe1_block_length;   /* Byte Length of ports at gpe1_block */
        u8 gpe1_base;           /* Offset in GPE number space where GPE1 events start */
        u8 cst_control;         /* Support for the _CST object and C States change notification */
-       u16 C2latency;          /* Worst case HW latency to enter/exit C2 state */
-       u16 C3latency;          /* Worst case HW latency to enter/exit C3 state */
+       u16 c2_latency;         /* Worst case HW latency to enter/exit C2 state */
+       u16 c3_latency;         /* Worst case HW latency to enter/exit C3 state */
        u16 flush_size;         /* Processor's memory cache line width, in bytes */
        u16 flush_stride;       /* Number of flush strides that need to be read */
        u8 duty_offset;         /* Processor duty cycle index in processor's P_CNT reg */
@@ -291,7 +291,7 @@ struct acpi_table_fadt {
 #define ACPI_FADT_S4_RTC_VALID      (1<<16)    /* 16: [V4] Contents of RTC_STS valid after S4 wake (ACPI 3.0) */
 #define ACPI_FADT_REMOTE_POWER_ON   (1<<17)    /* 17: [V4] System is compatible with remote power on (ACPI 3.0) */
 #define ACPI_FADT_APIC_CLUSTER      (1<<18)    /* 18: [V4] All local APICs must use cluster model (ACPI 3.0) */
-#define ACPI_FADT_APIC_PHYSICAL     (1<<19)    /* 19: [V4] All local x_aPICs must use physical dest mode (ACPI 3.0) */
+#define ACPI_FADT_APIC_PHYSICAL     (1<<19)    /* 19: [V4] All local xAPICs must use physical dest mode (ACPI 3.0) */
 #define ACPI_FADT_HW_REDUCED        (1<<20)    /* 20: [V5] ACPI hardware is not implemented (ACPI 5.0) */
 #define ACPI_FADT_LOW_POWER_S0      (1<<21)    /* 21: [V5] S0 power savings are equal or better than S3 (ACPI 5.0) */
 
index 71e747beac8f3e016fa3719f4b75e7706a3b64fb..300d14e7c5d5e76ae24b3e4d77d279769f5a48aa 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -676,7 +676,7 @@ struct acpi_madt_local_apic {
 struct acpi_madt_io_apic {
        struct acpi_subtable_header header;
        u8 id;                  /* I/O APIC ID */
-       u8 reserved;            /* Reserved - must be zero */
+       u8 reserved;            /* reserved - must be zero */
        u32 address;            /* APIC physical address */
        u32 global_irq_base;    /* Global system interrupt where INTI lines start */
 };
@@ -794,11 +794,11 @@ struct acpi_madt_generic_interrupt {
 
 struct acpi_madt_generic_distributor {
        struct acpi_subtable_header header;
-       u16 reserved;           /* Reserved - must be zero */
+       u16 reserved;           /* reserved - must be zero */
        u32 gic_id;
        u64 base_address;
        u32 global_irq_base;
-       u32 reserved2;          /* Reserved - must be zero */
+       u32 reserved2;          /* reserved - must be zero */
 };
 
 /*
@@ -841,7 +841,7 @@ struct acpi_table_msct {
        u64 max_address;        /* Max physical address in system */
 };
 
-/* Subtable - Maximum Proximity Domain Information. Version 1 */
+/* subtable - Maximum Proximity Domain Information. Version 1 */
 
 struct acpi_msct_proximity {
        u8 revision;
index 58bdd0545c5afba6432c45ef8218b616e09f59ae..d9ceb3d316299275fdb2100c2843368be7541317 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -66,7 +66,7 @@
 #define ACPI_SIG_DBGP           "DBGP" /* Debug Port table */
 #define ACPI_SIG_DMAR           "DMAR" /* DMA Remapping table */
 #define ACPI_SIG_HPET           "HPET" /* High Precision Event Timer table */
-#define ACPI_SIG_IBFT           "IBFT" /* i_sCSI Boot Firmware Table */
+#define ACPI_SIG_IBFT           "IBFT" /* iSCSI Boot Firmware Table */
 #define ACPI_SIG_IVRS           "IVRS" /* I/O Virtualization Reporting Structure */
 #define ACPI_SIG_MCFG           "MCFG" /* PCI Memory Mapped Configuration table */
 #define ACPI_SIG_MCHI           "MCHI" /* Management Controller Host Interface table */
@@ -334,8 +334,8 @@ struct acpi_dmar_reserved_memory {
        struct acpi_dmar_header header;
        u16 reserved;
        u16 segment;
-       u64 base_address;       /* 4_k aligned base address */
-       u64 end_address;        /* 4_k aligned limit address */
+       u64 base_address;       /* 4K aligned base address */
+       u64 end_address;        /* 4K aligned limit address */
 };
 
 /* Masks for Flags field above */
@@ -565,7 +565,7 @@ struct acpi_ivrs_hardware {
 /* Masks for Info field above */
 
 #define ACPI_IVHD_MSI_NUMBER_MASK   0x001F     /* 5 bits, MSI message number */
-#define ACPI_IVHD_UNIT_ID_MASK      0x1F00     /* 5 bits, unit_iD */
+#define ACPI_IVHD_UNIT_ID_MASK      0x1F00     /* 5 bits, unit_ID */
 
 /*
  * Device Entries for IVHD subtable, appear after struct acpi_ivrs_hardware structure.
index c22ce80e9535446b1ef02f6e14cfe58885babdbc..f65a0ed869ebc327bceee545bbe7d905726b05b4 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index e8bcc4742e0e9a1f9563a27907871ab6ce727817..3af87de6a68cd1a9afbcf21aaa4a47151158416e 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -173,7 +173,7 @@ typedef u64 acpi_physical_address;
  * to indicate that special precautions must be taken to avoid alignment faults.
  * (IA64 or ia64 is currently used by existing compilers to indicate IPF.)
  *
- * Note: Em64_t and other X86-64 processors support misaligned transfers,
+ * Note: EM64T and other X86-64 processors support misaligned transfers,
  * so there is no need to define this flag.
  */
 #if defined (__IA64__) || defined (__ia64__)
@@ -636,7 +636,7 @@ typedef u32 acpi_event_type;
 #define ACPI_NUM_FIXED_EVENTS           ACPI_EVENT_MAX + 1
 
 /*
- * Event Status - Per event
+ * Event status - Per event
  * -------------
  * The encoding of acpi_event_status is illustrated below.
  * Note that a set bit (1) indicates the property is TRUE
@@ -706,10 +706,14 @@ typedef u32 acpi_event_status;
 #define ACPI_DEVICE_NOTIFY              0x2
 #define ACPI_ALL_NOTIFY                 (ACPI_SYSTEM_NOTIFY | ACPI_DEVICE_NOTIFY)
 #define ACPI_MAX_NOTIFY_HANDLER_TYPE    0x3
+#define ACPI_NUM_NOTIFY_TYPES           2
 
 #define ACPI_MAX_SYS_NOTIFY             0x7F
 #define ACPI_MAX_DEVICE_SPECIFIC_NOTIFY 0xBF
 
+#define ACPI_SYSTEM_HANDLER_LIST        0      /* Used as index, must be SYSTEM_NOTIFY -1 */
+#define ACPI_DEVICE_HANDLER_LIST        1      /* Used as index, must be DEVICE_NOTIFY -1 */
+
 /* Address Space (Operation Region) Types */
 
 typedef u8 acpi_adr_space_type;
@@ -724,8 +728,9 @@ typedef u8 acpi_adr_space_type;
 #define ACPI_ADR_SPACE_IPMI             (acpi_adr_space_type) 7
 #define ACPI_ADR_SPACE_GPIO             (acpi_adr_space_type) 8
 #define ACPI_ADR_SPACE_GSBUS            (acpi_adr_space_type) 9
+#define ACPI_ADR_SPACE_PLATFORM_COMM    (acpi_adr_space_type) 10
 
-#define ACPI_NUM_PREDEFINED_REGIONS     10
+#define ACPI_NUM_PREDEFINED_REGIONS     11
 
 /*
  * Special Address Spaces
index 5af3ed52ef980d9c09e9ceff7221341108cfa2db..560a9f272f34c56d361f156383e970a06a37632a 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index e228893591a95c09905a0728b49c8088496110a4..72553b0c9f33e297a5e0037ac010f08d11a05720 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 6fbc4cab583436399ef7f9f61da1cc6845e5dc79..7509be30ca01b903816009469ad1952af6741342 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 2520a6e241dc0967ef7f8e096436a19a6042e36a..7d10f962aa137776389daf226b42b8be9702e1bd 100644 (file)
@@ -3,10 +3,18 @@
 
 #include <linux/compiler.h>
 
+#ifdef CONFIG_GENERIC_BUG
+#define BUGFLAG_WARNING                (1 << 0)
+#define BUGFLAG_TAINT(taint)   (BUGFLAG_WARNING | ((taint) << 8))
+#define BUG_GET_TAINT(bug)     ((bug)->flags >> 8)
+#endif
+
+#ifndef __ASSEMBLY__
+#include <linux/kernel.h>
+
 #ifdef CONFIG_BUG
 
 #ifdef CONFIG_GENERIC_BUG
-#ifndef __ASSEMBLY__
 struct bug_entry {
 #ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
        unsigned long   bug_addr;
@@ -23,12 +31,6 @@ struct bug_entry {
 #endif
        unsigned short  flags;
 };
-#endif         /* __ASSEMBLY__ */
-
-#define BUGFLAG_WARNING                (1 << 0)
-#define BUGFLAG_TAINT(taint)   (BUGFLAG_WARNING | ((taint) << 8))
-#define BUG_GET_TAINT(bug)     ((bug)->flags >> 8)
-
 #endif /* CONFIG_GENERIC_BUG */
 
 /*
@@ -60,7 +62,6 @@ struct bug_entry {
  * to provide better diagnostics.
  */
 #ifndef __WARN_TAINT
-#ifndef __ASSEMBLY__
 extern __printf(3, 4)
 void warn_slowpath_fmt(const char *file, const int line,
                       const char *fmt, ...);
@@ -69,7 +70,6 @@ void warn_slowpath_fmt_taint(const char *file, const int line, unsigned taint,
                             const char *fmt, ...);
 extern void warn_slowpath_null(const char *file, const int line);
 #define WANT_WARN_ON_SLOWPATH
-#endif
 #define __WARN()               warn_slowpath_null(__FILE__, __LINE__)
 #define __WARN_printf(arg...)  warn_slowpath_fmt(__FILE__, __LINE__, arg)
 #define __WARN_printf_taint(taint, arg...)                             \
@@ -202,4 +202,6 @@ extern void warn_slowpath_null(const char *file, const int line);
 # define WARN_ON_SMP(x)                        ({0;})
 #endif
 
+#endif /* __ASSEMBLY__ */
+
 #endif
index c544356b374b62285b325c2df1ab77008c2422d1..294b1e755ab26e7bb47d9f1800ae132fc6667950 100644 (file)
@@ -18,7 +18,7 @@ static inline void dev_set_cma_area(struct device *dev, struct cma *cma)
 {
        if (dev)
                dev->cma_area = cma;
-       if (!dev || !dma_contiguous_default_area)
+       if (!dev && !dma_contiguous_default_area)
                dma_contiguous_default_area = cma;
 }
 
index 6f2b45a9b6bc425b7df6231f474516e1bd4c1344..ff4947b7a9762b6ea414aa8b0159af8abb90e9dc 100644 (file)
@@ -484,6 +484,16 @@ static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd)
        /*
         * The barrier will stabilize the pmdval in a register or on
         * the stack so that it will stop changing under the code.
+        *
+        * When CONFIG_TRANSPARENT_HUGEPAGE=y on x86 32bit PAE,
+        * pmd_read_atomic is allowed to return a not atomic pmdval
+        * (for example pointing to an hugepage that has never been
+        * mapped in the pmd). The below checks will only care about
+        * the low part of the pmd with 32bit PAE x86 anyway, with the
+        * exception of pmd_none(). So the important thing is that if
+        * the low part of the pmd is found null, the high part will
+        * be also null or the pmd_none() check below would be
+        * confused.
         */
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
        barrier();
index 73e45600f95def601badd64689d75a117ddcc579..bac55c2151131c9689e284a4b4a8b438ed223039 100644 (file)
@@ -54,7 +54,7 @@ struct drm_mode_object {
        struct drm_object_properties *properties;
 };
 
-#define DRM_OBJECT_MAX_PROPERTY 16
+#define DRM_OBJECT_MAX_PROPERTY 24
 struct drm_object_properties {
        int count;
        uint32_t ids[DRM_OBJECT_MAX_PROPERTY];
index 58d0bdab68dd081ae8daa6a06c19eef5bb47e4fe..a7aec391b7b7d68d59876f9f5bcde4888d257e87 100644 (file)
@@ -1,7 +1,3 @@
-/*
-   This file is auto-generated from the drm_pciids.txt in the DRM CVS
-   Please contact dri-devel@lists.sf.net to add new cards to this list
-*/
 #define radeon_PCI_IDS \
        {0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
        {0x1002, 0x3151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6747, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6748, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6749, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x674A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6750, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6751, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6758, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6767, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6768, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6770, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6771, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6772, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6778, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6779, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6827, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6829, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x682B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x682D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x682F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
-       {0x1002, 0x6830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
-       {0x1002, 0x6831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6838, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6839, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x9645, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x9647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
        {0x1002, 0x9648, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
+       {0x1002, 0x9649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
        {0x1002, 0x964a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x964b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x964c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x9807, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x9808, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x9809, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x980A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x9900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x9901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x9903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x9909, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x990A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x990F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x9910, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x9913, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x9917, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x9918, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x9919, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x9990, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x9991, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x9992, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x9993, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x9994, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x99A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x99A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x99A4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0, 0, 0}
 
 #define r128_PCI_IDS \
index b6d7ce92eadd67a67db12b19bb0d39208b870511..68733587e700bb00b776114e4a19530425194143 100644 (file)
@@ -64,6 +64,7 @@ struct drm_exynos_gem_map_off {
  * A structure for mapping buffer.
  *
  * @handle: a handle to gem object created.
+ * @pad: just padding to be 64-bit aligned.
  * @size: memory size to be mapped.
  * @mapped: having user virtual address mmaped.
  *     - this variable would be filled by exynos gem module
@@ -72,7 +73,8 @@ struct drm_exynos_gem_map_off {
  */
 struct drm_exynos_gem_mmap {
        unsigned int handle;
-       unsigned int size;
+       unsigned int pad;
+       uint64_t size;
        uint64_t mapped;
 };
 
index 2314ad8b3c9cced6a4679441d7c6b25afe500348..b1a520ec8b59dd0975bc4e738ce2398ca19dc363 100644 (file)
@@ -140,6 +140,7 @@ struct kiocb {
                (x)->ki_dtor = NULL;                    \
                (x)->ki_obj.tsk = tsk;                  \
                (x)->ki_user_data = 0;                  \
+               (x)->private = NULL;                    \
        } while (0)
 
 #define AIO_RING_MAGIC                 0xa10a10a1
index ba43f408baa38907a8f63a64314e07bb622a6e7f..07954b05b86cc3ac9c4f72aa97584b551914631e 100644 (file)
@@ -827,7 +827,6 @@ extern bool __blk_end_request_err(struct request *rq, int error);
 extern void blk_complete_request(struct request *);
 extern void __blk_complete_request(struct request *);
 extern void blk_abort_request(struct request *);
-extern void blk_abort_queue(struct request_queue *);
 extern void blk_unprep_request(struct request *);
 
 /*
index 324fe08ea3b140b7b8b92f7129ad334df2e260d5..6d6795d46a7509f01e4a12993e9c7de34b02d975 100644 (file)
@@ -91,6 +91,11 @@ extern void *__alloc_bootmem_node_nopanic(pg_data_t *pgdat,
                                  unsigned long size,
                                  unsigned long align,
                                  unsigned long goal);
+void *___alloc_bootmem_node_nopanic(pg_data_t *pgdat,
+                                 unsigned long size,
+                                 unsigned long align,
+                                 unsigned long goal,
+                                 unsigned long limit);
 extern void *__alloc_bootmem_low(unsigned long size,
                                 unsigned long align,
                                 unsigned long goal);
index 68d56effc32860ac29a26487316fcf75b4642261..d10b7ed595b15c07c58f30e08ca841dafb2aa0a1 100644 (file)
@@ -360,11 +360,11 @@ struct cpu_vfs_cap_data {
 
 #define CAP_WAKE_ALARM            35
 
-/* Allow preventing system suspends while epoll events are pending */
+/* Allow preventing system suspends */
 
-#define CAP_EPOLLWAKEUP      36
+#define CAP_BLOCK_SUSPEND    36
 
-#define CAP_LAST_CAP         CAP_EPOLLWAKEUP
+#define CAP_LAST_CAP         CAP_BLOCK_SUSPEND
 
 #define cap_valid(x) ((x) >= 0 && (x) <= CAP_LAST_CAP)
 
index 2521a95fa6d98597d1fafe4d4cff23ce9dc0f069..44c87e731e9d4c5fff13cbf851694cd41356fb04 100644 (file)
@@ -163,16 +163,8 @@ struct ceph_connection {
 
        /* connection negotiation temps */
        char in_banner[CEPH_BANNER_MAX_LEN];
-       union {
-               struct {  /* outgoing connection */
-                       struct ceph_msg_connect out_connect;
-                       struct ceph_msg_connect_reply in_reply;
-               };
-               struct {  /* incoming */
-                       struct ceph_msg_connect in_connect;
-                       struct ceph_msg_connect_reply out_reply;
-               };
-       };
+       struct ceph_msg_connect out_connect;
+       struct ceph_msg_connect_reply in_reply;
        struct ceph_entity_addr actual_peer_addr;
 
        /* message out temps */
index 81e803e90aa43ac42a48b678869333fa8c0e4f4e..acba894374a1537b4b961eee222e9c28b53845cf 100644 (file)
@@ -132,6 +132,7 @@ extern u64 clockevent_delta2ns(unsigned long latch,
                               struct clock_event_device *evt);
 extern void clockevents_register_device(struct clock_event_device *dev);
 
+extern void clockevents_config(struct clock_event_device *dev, u32 freq);
 extern void clockevents_config_and_register(struct clock_event_device *dev,
                                            u32 freq, unsigned long min_delta,
                                            unsigned long max_delta);
index e988037abd2a1afa25b9e22607295c0e915f1541..51a90b7f2d606a3dcb9bb582cd209add0c429351 100644 (file)
@@ -1,8 +1,6 @@
 #ifndef _LINUX_COMPACTION_H
 #define _LINUX_COMPACTION_H
 
-#include <linux/node.h>
-
 /* Return values for compact_zone() and try_to_compact_pages() */
 /* compaction didn't start as it was not possible or direct reclaim was more suitable */
 #define COMPACT_SKIPPED                0
 /* The full zone was compacted */
 #define COMPACT_COMPLETE       3
 
-/*
- * compaction supports three modes
- *
- * COMPACT_ASYNC_MOVABLE uses asynchronous migration and only scans
- *    MIGRATE_MOVABLE pageblocks as migration sources and targets.
- * COMPACT_ASYNC_UNMOVABLE uses asynchronous migration and only scans
- *    MIGRATE_MOVABLE pageblocks as migration sources.
- *    MIGRATE_UNMOVABLE pageblocks are scanned as potential migration
- *    targets and convers them to MIGRATE_MOVABLE if possible
- * COMPACT_SYNC uses synchronous migration and scans all pageblocks
- */
-enum compact_mode {
-       COMPACT_ASYNC_MOVABLE,
-       COMPACT_ASYNC_UNMOVABLE,
-       COMPACT_SYNC,
-};
-
 #ifdef CONFIG_COMPACTION
 extern int sysctl_compact_memory;
 extern int sysctl_compaction_handler(struct ctl_table *table, int write,
index e5834aa24b9ec2a287026d58caf097bf438df055..6a6d7aefe12d6e61cae9e44b13aea1ed74ebedda 100644 (file)
@@ -47,9 +47,9 @@
  */
 #if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \
     !defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4)
-# define inline                inline          __attribute__((always_inline))
-# define __inline__    __inline__      __attribute__((always_inline))
-# define __inline      __inline        __attribute__((always_inline))
+# define inline                inline          __attribute__((always_inline)) notrace
+# define __inline__    __inline__      __attribute__((always_inline)) notrace
+# define __inline      __inline        __attribute__((always_inline)) notrace
 #else
 /* A lot of inline functions can cause havoc with function tracing */
 # define inline                inline          notrace
index 6c26a3da0e03173b29ce1535c9833537f85a6786..5ab7183313ce2f361c8946cefc4442e6a6c15542 100644 (file)
@@ -57,6 +57,7 @@ struct cpuidle_state {
 
 /* Idle State Flags */
 #define CPUIDLE_FLAG_TIME_VALID        (0x01) /* is residency time measurable? */
+#define CPUIDLE_FLAG_COUPLED   (0x02) /* state applies to multiple cpus */
 
 #define CPUIDLE_DRIVER_FLAGS_MASK (0xFFFF0000)
 
@@ -100,6 +101,12 @@ struct cpuidle_device {
        struct list_head        device_list;
        struct kobject          kobj;
        struct completion       kobj_unregister;
+
+#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
+       int                     safe_state_index;
+       cpumask_t               coupled_cpus;
+       struct cpuidle_coupled  *coupled;
+#endif
 };
 
 DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices);
@@ -176,6 +183,10 @@ static inline int cpuidle_play_dead(void) {return -ENODEV; }
 
 #endif
 
+#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
+void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a);
+#endif
+
 /******************************
  * CPUIDLE GOVERNOR INTERFACE *
  ******************************/
index 161d96241b1b4da3b9a0909749709a33bcca33c5..6de94151ff6f7e8646ab16d7f093cea3c079f1fa 100644 (file)
@@ -865,8 +865,6 @@ extern int (*platform_notify_remove)(struct device *dev);
 extern struct device *get_device(struct device *dev);
 extern void put_device(struct device *dev);
 
-extern void wait_for_device_probe(void);
-
 #ifdef CONFIG_DEVTMPFS
 extern int devtmpfs_create_node(struct device *dev);
 extern int devtmpfs_delete_node(struct device *dev);
index 6f8be328770abb3c11166755d9c954194cfac1ae..f4bb378ccf6a355bbe49e79f56019f9ef386d1d4 100644 (file)
@@ -34,7 +34,7 @@
  * re-allowed until epoll_wait is called again after consuming the wakeup
  * event(s).
  *
- * Requires CAP_EPOLLWAKEUP
+ * Requires CAP_BLOCK_SUSPEND
  */
 #define EPOLLWAKEUP (1 << 29)
 
diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
new file mode 100644 (file)
index 0000000..0e4e2ee
--- /dev/null
@@ -0,0 +1,127 @@
+#ifndef _LINUX_FRONTSWAP_H
+#define _LINUX_FRONTSWAP_H
+
+#include <linux/swap.h>
+#include <linux/mm.h>
+#include <linux/bitops.h>
+
+struct frontswap_ops {
+       void (*init)(unsigned);
+       int (*store)(unsigned, pgoff_t, struct page *);
+       int (*load)(unsigned, pgoff_t, struct page *);
+       void (*invalidate_page)(unsigned, pgoff_t);
+       void (*invalidate_area)(unsigned);
+};
+
+extern bool frontswap_enabled;
+extern struct frontswap_ops
+       frontswap_register_ops(struct frontswap_ops *ops);
+extern void frontswap_shrink(unsigned long);
+extern unsigned long frontswap_curr_pages(void);
+extern void frontswap_writethrough(bool);
+
+extern void __frontswap_init(unsigned type);
+extern int __frontswap_store(struct page *page);
+extern int __frontswap_load(struct page *page);
+extern void __frontswap_invalidate_page(unsigned, pgoff_t);
+extern void __frontswap_invalidate_area(unsigned);
+
+#ifdef CONFIG_FRONTSWAP
+
+static inline bool frontswap_test(struct swap_info_struct *sis, pgoff_t offset)
+{
+       bool ret = false;
+
+       if (frontswap_enabled && sis->frontswap_map)
+               ret = test_bit(offset, sis->frontswap_map);
+       return ret;
+}
+
+static inline void frontswap_set(struct swap_info_struct *sis, pgoff_t offset)
+{
+       if (frontswap_enabled && sis->frontswap_map)
+               set_bit(offset, sis->frontswap_map);
+}
+
+static inline void frontswap_clear(struct swap_info_struct *sis, pgoff_t offset)
+{
+       if (frontswap_enabled && sis->frontswap_map)
+               clear_bit(offset, sis->frontswap_map);
+}
+
+static inline void frontswap_map_set(struct swap_info_struct *p,
+                                    unsigned long *map)
+{
+       p->frontswap_map = map;
+}
+
+static inline unsigned long *frontswap_map_get(struct swap_info_struct *p)
+{
+       return p->frontswap_map;
+}
+#else
+/* all inline routines become no-ops and all externs are ignored */
+
+#define frontswap_enabled (0)
+
+static inline bool frontswap_test(struct swap_info_struct *sis, pgoff_t offset)
+{
+       return false;
+}
+
+static inline void frontswap_set(struct swap_info_struct *sis, pgoff_t offset)
+{
+}
+
+static inline void frontswap_clear(struct swap_info_struct *sis, pgoff_t offset)
+{
+}
+
+static inline void frontswap_map_set(struct swap_info_struct *p,
+                                    unsigned long *map)
+{
+}
+
+static inline unsigned long *frontswap_map_get(struct swap_info_struct *p)
+{
+       return NULL;
+}
+#endif
+
+static inline int frontswap_store(struct page *page)
+{
+       int ret = -1;
+
+       if (frontswap_enabled)
+               ret = __frontswap_store(page);
+       return ret;
+}
+
+static inline int frontswap_load(struct page *page)
+{
+       int ret = -1;
+
+       if (frontswap_enabled)
+               ret = __frontswap_load(page);
+       return ret;
+}
+
+static inline void frontswap_invalidate_page(unsigned type, pgoff_t offset)
+{
+       if (frontswap_enabled)
+               __frontswap_invalidate_page(type, offset);
+}
+
+static inline void frontswap_invalidate_area(unsigned type)
+{
+       if (frontswap_enabled)
+               __frontswap_invalidate_area(type);
+}
+
+static inline void frontswap_init(unsigned type)
+{
+       if (frontswap_enabled)
+               __frontswap_init(type);
+}
+
+#endif /* _LINUX_FRONTSWAP_H */
index 51978ed43e973ccf70996c19c33820fd39edeabe..17fd887c798f3fff64aaf27bf82a81a2d237ee31 100644 (file)
@@ -802,13 +802,14 @@ struct inode {
                unsigned int __i_nlink;
        };
        dev_t                   i_rdev;
+       loff_t                  i_size;
        struct timespec         i_atime;
        struct timespec         i_mtime;
        struct timespec         i_ctime;
        spinlock_t              i_lock; /* i_blocks, i_bytes, maybe i_size */
        unsigned short          i_bytes;
+       unsigned int            i_blkbits;
        blkcnt_t                i_blocks;
-       loff_t                  i_size;
 
 #ifdef __NEED_I_SIZE_ORDERED
        seqcount_t              i_size_seqcount;
@@ -828,9 +829,8 @@ struct inode {
                struct list_head        i_dentry;
                struct rcu_head         i_rcu;
        };
-       atomic_t                i_count;
-       unsigned int            i_blkbits;
        u64                     i_version;
+       atomic_t                i_count;
        atomic_t                i_dio_count;
        atomic_t                i_writecount;
        const struct file_operations    *i_fop; /* former ->i_op->default_file_ops */
index 8f2ab8fef929f42b81f3d9a75c564b42455dc06e..9303348965fbdbc8a8d6d706949b543d54c5609b 100644 (file)
@@ -54,6 +54,9 @@
  * 7.18
  *  - add FUSE_IOCTL_DIR flag
  *  - add FUSE_NOTIFY_DELETE
+ *
+ * 7.19
+ *  - add FUSE_FALLOCATE
  */
 
 #ifndef _LINUX_FUSE_H
@@ -85,7 +88,7 @@
 #define FUSE_KERNEL_VERSION 7
 
 /** Minor version number of this interface */
-#define FUSE_KERNEL_MINOR_VERSION 18
+#define FUSE_KERNEL_MINOR_VERSION 19
 
 /** The node ID of the root inode */
 #define FUSE_ROOT_ID 1
@@ -278,6 +281,7 @@ enum fuse_opcode {
        FUSE_POLL          = 40,
        FUSE_NOTIFY_REPLY  = 41,
        FUSE_BATCH_FORGET  = 42,
+       FUSE_FALLOCATE     = 43,
 
        /* CUSE specific operations */
        CUSE_INIT          = 4096,
@@ -571,6 +575,14 @@ struct fuse_notify_poll_wakeup_out {
        __u64   kh;
 };
 
+struct fuse_fallocate_in {
+       __u64   fh;
+       __u64   offset;
+       __u64   length;
+       __u32   mode;
+       __u32   padding;
+};
+
 struct fuse_in_header {
        __u32   len;
        __u32   opcode;
index f07fc2d081598ba7c2432a2306081473d3e0bbe1..2e31e8b3a190bb652bb597104139d2f2b440d347 100644 (file)
@@ -22,8 +22,8 @@
 /* Gpio pin is open source */
 #define GPIOF_OPEN_SOURCE      (1 << 3)
 
-#define GPIOF_EXPORT           (1 << 2)
-#define GPIOF_EXPORT_CHANGEABLE        (1 << 3)
+#define GPIOF_EXPORT           (1 << 4)
+#define GPIOF_EXPORT_CHANGEABLE        (1 << 5)
 #define GPIOF_EXPORT_DIR_FIXED (GPIOF_EXPORT)
 #define GPIOF_EXPORT_DIR_CHANGEABLE (GPIOF_EXPORT | GPIOF_EXPORT_CHANGEABLE)
 
index fd0dc30c9f154af94155b8c8c47e0a228fbd2573..cc07d2777bbe6b11a632840c5f0a867436bbeac5 100644 (file)
@@ -165,6 +165,7 @@ enum  hrtimer_base_type {
  * @lock:              lock protecting the base and associated clock bases
  *                     and timers
  * @active_bases:      Bitfield to mark bases with active timers
+ * @clock_was_set:     Indicates that clock was set from irq context.
  * @expires_next:      absolute time of the next event which was scheduled
  *                     via clock_set_next_event()
  * @hres_active:       State of high resolution mode
@@ -177,7 +178,8 @@ enum  hrtimer_base_type {
  */
 struct hrtimer_cpu_base {
        raw_spinlock_t                  lock;
-       unsigned long                   active_bases;
+       unsigned int                    active_bases;
+       unsigned int                    clock_was_set;
 #ifdef CONFIG_HIGH_RES_TIMERS
        ktime_t                         expires_next;
        int                             hres_active;
@@ -286,6 +288,8 @@ extern void hrtimer_peek_ahead_timers(void);
 # define MONOTONIC_RES_NSEC    HIGH_RES_NSEC
 # define KTIME_MONOTONIC_RES   KTIME_HIGH_RES
 
+extern void clock_was_set_delayed(void);
+
 #else
 
 # define MONOTONIC_RES_NSEC    LOW_RES_NSEC
@@ -306,6 +310,9 @@ static inline int hrtimer_is_hres_active(struct hrtimer *timer)
 {
        return 0;
 }
+
+static inline void clock_was_set_delayed(void) { }
+
 #endif
 
 extern void clock_was_set(void);
@@ -320,6 +327,7 @@ extern ktime_t ktime_get(void);
 extern ktime_t ktime_get_real(void);
 extern ktime_t ktime_get_boottime(void);
 extern ktime_t ktime_get_monotonic_offset(void);
+extern ktime_t ktime_get_update_offsets(ktime_t *offs_real, ktime_t *offs_boot);
 
 DECLARE_PER_CPU(struct tick_device, tick_cpu_device);
 
diff --git a/include/linux/i2c-mux-pinctrl.h b/include/linux/i2c-mux-pinctrl.h
new file mode 100644 (file)
index 0000000..a65c864
--- /dev/null
@@ -0,0 +1,41 @@
+/*
+ * i2c-mux-pinctrl platform data
+ *
+ * Copyright (c) 2012, NVIDIA CORPORATION.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _LINUX_I2C_MUX_PINCTRL_H
+#define _LINUX_I2C_MUX_PINCTRL_H
+
+/**
+ * struct i2c_mux_pinctrl_platform_data - Platform data for i2c-mux-pinctrl
+ * @parent_bus_num: Parent I2C bus number
+ * @base_bus_num: Base I2C bus number for the child busses. 0 for dynamic.
+ * @bus_count: Number of child busses. Also the number of elements in
+ *     @pinctrl_states
+ * @pinctrl_states: The names of the pinctrl state to select for each child bus
+ * @pinctrl_state_idle: The pinctrl state to select when no child bus is being
+ *     accessed. If NULL, the most recently used pinctrl state will be left
+ *     selected.
+ */
+struct i2c_mux_pinctrl_platform_data {
+       int parent_bus_num;
+       int base_bus_num;
+       int bus_count;
+       const char **pinctrl_states;
+       const char *pinctrl_state_idle;
+};
+
+#endif
index e4baff5f7ff403722f54b5e6dcb7f67b2926fa98..9e65eff6af3bdc5dd6e865296233c8f9a1cd2a10 100644 (file)
@@ -149,6 +149,7 @@ extern struct cred init_cred;
        .normal_prio    = MAX_PRIO-20,                                  \
        .policy         = SCHED_NORMAL,                                 \
        .cpus_allowed   = CPU_MASK_ALL,                                 \
+       .nr_cpus_allowed= NR_CPUS,                                      \
        .mm             = NULL,                                         \
        .active_mm      = &init_mm,                                     \
        .se             = {                                             \
@@ -157,7 +158,6 @@ extern struct cred init_cred;
        .rt             = {                                             \
                .run_list       = LIST_HEAD_INIT(tsk.rt.run_list),      \
                .time_slice     = RR_TIMESLICE,                         \
-               .nr_cpus_allowed = NR_CPUS,                             \
        },                                                              \
        .tasks          = LIST_HEAD_INIT(tsk.tasks),                    \
        INIT_PUSHABLE_TASKS(tsk)                                        \
index a81671453575d800ec3d102e1aa6a8bf517073ce..2740d080ec6b7607fa1daba2bb2f72395fe34a7f 100644 (file)
@@ -116,6 +116,7 @@ struct input_keymap_entry {
 
 /**
  * EVIOCGMTSLOTS(len) - get MT slot values
+ * @len: size of the data buffer in bytes
  *
  * The ioctl buffer argument should be binary equivalent to
  *
index 61f5cec031e0345bebb8e506f9727b57069dc706..a5261e3d2e3c26f3cf645af875e4093d5adcaa77 100644 (file)
@@ -301,8 +301,6 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
  * @irq_pm_shutdown:   function called from core code on shutdown once per chip
  * @irq_print_chip:    optional to print special chip info in show_interrupts
  * @flags:             chip specific flags
- *
- * @release:           release function solely used by UML
  */
 struct irq_chip {
        const char      *name;
index e07f5e0c5df4400eb34ac0d89150003e512381b0..604382143bcfccd6772260ed56e16589800af83c 100644 (file)
@@ -377,7 +377,6 @@ extern enum system_states {
        SYSTEM_HALT,
        SYSTEM_POWER_OFF,
        SYSTEM_RESTART,
-       SYSTEM_SUSPEND_DISK,
 } system_state;
 
 #define TAINT_PROPRIETARY_MODULE       0
index 35f7237ec972bef4ce1ba24cd617571a193ce806..2e7a1e032c71a91e6a9dff98a0b637d621932a25 100644 (file)
@@ -21,6 +21,7 @@
  * is passed to the kernel.
  */
 enum kmsg_dump_reason {
+       KMSG_DUMP_UNDEF,
        KMSG_DUMP_PANIC,
        KMSG_DUMP_OOPS,
        KMSG_DUMP_EMERG,
@@ -31,23 +32,42 @@ enum kmsg_dump_reason {
 
 /**
  * struct kmsg_dumper - kernel crash message dumper structure
- * @dump:      The callback which gets called on crashes. The buffer is passed
- *             as two sections, where s1 (length l1) contains the older
- *             messages and s2 (length l2) contains the newer.
  * @list:      Entry in the dumper list (private)
+ * @dump:      Call into dumping code which will retrieve the data with
+ *             through the record iterator
+ * @max_reason:        filter for highest reason number that should be dumped
  * @registered:        Flag that specifies if this is already registered
  */
 struct kmsg_dumper {
-       void (*dump)(struct kmsg_dumper *dumper, enum kmsg_dump_reason reason,
-                       const char *s1, unsigned long l1,
-                       const char *s2, unsigned long l2);
        struct list_head list;
-       int registered;
+       void (*dump)(struct kmsg_dumper *dumper, enum kmsg_dump_reason reason);
+       enum kmsg_dump_reason max_reason;
+       bool active;
+       bool registered;
+
+       /* private state of the kmsg iterator */
+       u32 cur_idx;
+       u32 next_idx;
+       u64 cur_seq;
+       u64 next_seq;
 };
 
 #ifdef CONFIG_PRINTK
 void kmsg_dump(enum kmsg_dump_reason reason);
 
+bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper, bool syslog,
+                              char *line, size_t size, size_t *len);
+
+bool kmsg_dump_get_line(struct kmsg_dumper *dumper, bool syslog,
+                       char *line, size_t size, size_t *len);
+
+bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
+                         char *buf, size_t size, size_t *len);
+
+void kmsg_dump_rewind_nolock(struct kmsg_dumper *dumper);
+
+void kmsg_dump_rewind(struct kmsg_dumper *dumper);
+
 int kmsg_dump_register(struct kmsg_dumper *dumper);
 
 int kmsg_dump_unregister(struct kmsg_dumper *dumper);
@@ -56,6 +76,33 @@ static inline void kmsg_dump(enum kmsg_dump_reason reason)
 {
 }
 
+static inline bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper,
+                                            bool syslog, const char *line,
+                                            size_t size, size_t *len)
+{
+       return false;
+}
+
+static inline bool kmsg_dump_get_line(struct kmsg_dumper *dumper, bool syslog,
+                               const char *line, size_t size, size_t *len)
+{
+       return false;
+}
+
+static inline bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
+                                       char *buf, size_t size, size_t *len)
+{
+       return false;
+}
+
+static inline void kmsg_dump_rewind_nolock(struct kmsg_dumper *dumper)
+{
+}
+
+static inline void kmsg_dump_rewind(struct kmsg_dumper *dumper)
+{
+}
+
 static inline int kmsg_dump_register(struct kmsg_dumper *dumper)
 {
        return -EINVAL;
index c4464356b35b0af21eaafe6cbd1d2d7b4f549814..96c158a37d3e5ead53765e4bfa2280a82c79be5e 100644 (file)
@@ -815,7 +815,7 @@ static inline void kvm_free_irq_routing(struct kvm *kvm) {}
 #ifdef CONFIG_HAVE_KVM_EVENTFD
 
 void kvm_eventfd_init(struct kvm *kvm);
-int kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags);
+int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args);
 void kvm_irqfd_release(struct kvm *kvm);
 void kvm_irq_routing_update(struct kvm *, struct kvm_irq_routing_table *);
 int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
@@ -824,7 +824,7 @@ int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
 
 static inline void kvm_eventfd_init(struct kvm *kvm) {}
 
-static inline int kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags)
+static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
 {
        return -EINVAL;
 }
index a6bb102351486a15a8e1bce8a2da4fbfea4037c6..19dc455b4f3dd072d0e58abdfed2de6f8ae5727c 100644 (file)
@@ -50,9 +50,7 @@ phys_addr_t memblock_find_in_range_node(phys_addr_t start, phys_addr_t end,
                                phys_addr_t size, phys_addr_t align, int nid);
 phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end,
                                   phys_addr_t size, phys_addr_t align);
-int memblock_free_reserved_regions(void);
-int memblock_reserve_reserved_regions(void);
-
+phys_addr_t get_allocated_memblock_reserved_regions_info(phys_addr_t *addr);
 void memblock_allow_resize(void);
 int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid);
 int memblock_add(phys_addr_t base, phys_addr_t size);
index dad95bdd06d798545cea969d9cd4b9091e8a3089..704a626d94a08adc03b32b756d2d0a1c9a1f40cf 100644 (file)
@@ -57,8 +57,18 @@ struct page {
                };
 
                union {
+#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
+       defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
                        /* Used for cmpxchg_double in slub */
                        unsigned long counters;
+#else
+                       /*
+                        * Keep _count separate from slub cmpxchg_double data.
+                        * As the rest of the double word is protected by
+                        * slab_lock but _count is not.
+                        */
+                       unsigned counters;
+#endif
 
                        struct {
 
index 5cdc96da9dd53bbfbd13ca198367ecd4dd45b46f..e78c0e236e9dce163fc8df904698f43c455fcf85 100644 (file)
@@ -4,7 +4,7 @@
  * SDHCI declarations specific to ST SPEAr platform
  *
  * Copyright (C) 2010 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index c9fe66c58f8fc718e68b2147733a0acab750d5aa..17446d3c36027ccac99f70c097af2948390800ec 100644 (file)
@@ -98,7 +98,9 @@
 
 #define SDIO_CCCR_IF           0x07    /* bus interface controls */
 
+#define  SDIO_BUS_WIDTH_MASK   0x03    /* data bus width setting */
 #define  SDIO_BUS_WIDTH_1BIT   0x00
+#define  SDIO_BUS_WIDTH_RESERVED 0x01
 #define  SDIO_BUS_WIDTH_4BIT   0x02
 #define  SDIO_BUS_ECSI         0x20    /* Enable continuous SPI interrupt */
 #define  SDIO_BUS_SCSI         0x40    /* Support continuous SPI interrupt */
index 2427706f78b4d7043b5476b310d5a203631dbf90..68c569fcbb66ff751d689c57e19f196d33b2b0f8 100644 (file)
@@ -694,7 +694,7 @@ typedef struct pglist_data {
                                             range, including holes */
        int node_id;
        wait_queue_head_t kswapd_wait;
-       struct task_struct *kswapd;
+       struct task_struct *kswapd;     /* Protected by lock_memory_hotplug() */
        int kswapd_max_order;
        enum zone_type classzone_idx;
 } pg_data_t;
index 1b14d25162cb7cc56ff08960553ad89b32521ce7..d6a58065c09cacf4637ac9381623e6d0293c6b5d 100644 (file)
@@ -128,7 +128,7 @@ struct kparam_array
  * The ops can have NULL set or get functions.
  */
 #define module_param_cb(name, ops, arg, perm)                                \
-       __module_param_call(MODULE_PARAM_PREFIX, name, ops, arg, perm, 0)
+       __module_param_call(MODULE_PARAM_PREFIX, name, ops, arg, perm, -1)
 
 /**
  * <level>_param_cb - general callback for a module/cmdline parameter
@@ -192,7 +192,7 @@ struct kparam_array
                 { (void *)set, (void *)get };                          \
        __module_param_call(MODULE_PARAM_PREFIX,                        \
                            name, &__param_ops_##name, arg,             \
-                           (perm) + sizeof(__check_old_set_param(set))*0, 0)
+                           (perm) + sizeof(__check_old_set_param(set))*0, -1)
 
 /* We don't get oldget: it's often a new-style param_get_uint, etc. */
 static inline int
@@ -272,7 +272,7 @@ static inline void __kernel_param_unlock(void)
  */
 #define core_param(name, var, type, perm)                              \
        param_check_##type(name, &(var));                               \
-       __module_param_call("", name, &param_ops_##type, &var, perm, 0)
+       __module_param_call("", name, &param_ops_##type, &var, perm, -1)
 #endif /* !MODULE */
 
 /**
@@ -290,7 +290,7 @@ static inline void __kernel_param_unlock(void)
                = { len, string };                                      \
        __module_param_call(MODULE_PARAM_PREFIX, name,                  \
                            &param_ops_string,                          \
-                           .str = &__param_string_##name, perm, 0);    \
+                           .str = &__param_string_##name, perm, -1);   \
        __MODULE_PARM_TYPE(name, "string")
 
 /**
@@ -432,7 +432,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
        __module_param_call(MODULE_PARAM_PREFIX, name,                  \
                            &param_array_ops,                           \
                            .arr = &__param_arr_##name,                 \
-                           perm, 0);                                   \
+                           perm, -1);                                  \
        __MODULE_PARM_TYPE(name, "array of " #type)
 
 extern struct kernel_param_ops param_array_ops;
index abb1650940d21f11babe11aaf3730e434c5f0e38..826fc580757778dd1bfa6345edc680e0406bc6db 100644 (file)
@@ -27,7 +27,12 @@ union hmark_ports {
                __u16   src;
                __u16   dst;
        } p16;
+       struct {
+               __be16  src;
+               __be16  dst;
+       } b16;
        __u32   v32;
+       __be32  b32;
 };
 
 struct xt_hmark_info {
index fbb78fb09bd25c925d65207643bf61da614167d8..f58325a1d8fbe290fb8a7eb6e4ddc060ef553f91 100644 (file)
@@ -25,6 +25,7 @@ struct nfs41_impl_id;
  */
 struct nfs_client {
        atomic_t                cl_count;
+       atomic_t                cl_mds_count;
        int                     cl_cons_state;  /* current construction state (-ve: init error) */
 #define NFS_CS_READY           0               /* ready to be used */
 #define NFS_CS_INITING         1               /* busy initialising */
index d1a7bf51c326dc7f103aae60874a667f3307b373..8aadd90b808a67b466ceb66cbc0de0b57883e96b 100644 (file)
@@ -348,6 +348,7 @@ struct nfs_openargs {
        const struct qstr *     name;
        const struct nfs_server *server;         /* Needed for ID mapping */
        const u32 *             bitmask;
+       const u32 *             open_bitmap;
        __u32                   claim;
        struct nfs4_sequence_args       seq_args;
 };
@@ -1236,6 +1237,7 @@ struct nfs_pgio_header {
        struct list_head        rpc_list;
        atomic_t                refcnt;
        struct nfs_page         *req;
+       struct nfs_writeverf    *verf;
        struct pnfs_layout_segment *lseg;
        loff_t                  io_start;
        const struct rpc_call_ops *mds_ops;
@@ -1273,6 +1275,7 @@ struct nfs_write_data {
 struct nfs_write_header {
        struct nfs_pgio_header  header;
        struct nfs_write_data   rpc_data;
+       struct nfs_writeverf    verf;
 };
 
 struct nfs_mds_commit_info {
index a6ee9aa898bb7122e682ade64172fc568d98abd6..a7b4fc386e634964b520d84709b82f2e59b7a065 100644 (file)
@@ -4,7 +4,7 @@
  * Arasan Compact Flash host controller platform data header file
  *
  * Copyright (C) 2011 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index f32578634d9d1a9c195c8075c80bc981607d3452..45db49f64bb492ddf34d2b451fffa810ea45b965 100644 (file)
@@ -555,6 +555,8 @@ enum perf_event_type {
        PERF_RECORD_MAX,                        /* non-ABI */
 };
 
+#define PERF_MAX_STACK_DEPTH           127
+
 enum perf_callchain_context {
        PERF_CONTEXT_HV                 = (__u64)-32,
        PERF_CONTEXT_KERNEL             = (__u64)-128,
@@ -609,8 +611,6 @@ struct perf_guest_info_callbacks {
 #include <linux/sysfs.h>
 #include <asm/local.h>
 
-#define PERF_MAX_STACK_DEPTH           255
-
 struct perf_callchain_entry {
        __u64                           nr;
        __u64                           ip[PERF_MAX_STACK_DEPTH];
diff --git a/include/linux/platform_data/spear_thermal.h b/include/linux/platform_data/spear_thermal.h
deleted file mode 100644 (file)
index 724f2e1..0000000
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * SPEAr thermal driver platform data.
- *
- * Copyright (C) 2011-2012 ST Microelectronics
- * Author: Vincenzo Frascino <vincenzo.frascino@st.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-#ifndef SPEAR_THERMAL_H
-#define SPEAR_THERMAL_H
-
-/* SPEAr Thermal Sensor Platform Data */
-struct spear_thermal_pdata {
-       /* flags used to enable thermal sensor */
-       unsigned int thermal_flags;
-};
-
-#endif /* SPEAR_THERMAL_H */
index 711e0a30aaccc84b3a1bf5d15794b139433b5309..289760f424aaa3247d2e4f66841334c67295c9ef 100644 (file)
 #define PR_SET_PTRACER 0x59616d61
 # define PR_SET_PTRACER_ANY ((unsigned long)-1)
 
-#define PR_SET_CHILD_SUBREAPER 36
-#define PR_GET_CHILD_SUBREAPER 37
+#define PR_SET_CHILD_SUBREAPER 36
+#define PR_GET_CHILD_SUBREAPER 37
 
 /*
  * If no_new_privs is set, then operations that grant new privileges (i.e.
  * Changing LSM security domain is considered a new privilege.  So, for example,
  * asking selinux for a specific new context (e.g. with runcon) will result
  * in execve returning -EPERM.
+ *
+ * See Documentation/prctl/no_new_privs.txt for more details.
  */
-#define PR_SET_NO_NEW_PRIVS 38
-#define PR_GET_NO_NEW_PRIVS 39
+#define PR_SET_NO_NEW_PRIVS    38
+#define PR_GET_NO_NEW_PRIVS    39
+
+#define PR_GET_TID_ADDRESS     40
 
 #endif /* _LINUX_PRCTL_H */
index 7ed7fd4dba49629fe6bd3ed2bef29b244bd7c6d9..3b823d49a85a7fa87f5d8cae9dbde5c4067bb103 100644 (file)
@@ -69,12 +69,14 @@ struct persistent_ram_zone * __init persistent_ram_new(phys_addr_t start,
                                                       size_t size,
                                                       bool ecc);
 void persistent_ram_free(struct persistent_ram_zone *prz);
+void persistent_ram_zap(struct persistent_ram_zone *prz);
 struct persistent_ram_zone *persistent_ram_init_ringbuffer(struct device *dev,
                bool ecc);
 
 int persistent_ram_write(struct persistent_ram_zone *prz, const void *s,
        unsigned int count);
 
+void persistent_ram_save_old(struct persistent_ram_zone *prz);
 size_t persistent_ram_old_size(struct persistent_ram_zone *prz);
 void *persistent_ram_old(struct persistent_ram_zone *prz);
 void persistent_ram_free_old(struct persistent_ram_zone *prz);
index 44835fb39793b3263eb79b17e21575fccb12c921..f36632061c668d0b9e7fc8def05d34650a5a55c8 100644 (file)
@@ -160,7 +160,9 @@ enum pxa_ssp_type {
        PXA25x_SSP,  /* pxa 210, 250, 255, 26x */
        PXA25x_NSSP, /* pxa 255, 26x (including ASSP) */
        PXA27x_SSP,
+       PXA3xx_SSP,
        PXA168_SSP,
+       PXA910_SSP,
        CE4100_SSP,
 };
 
index 0d04cd69ab9b8c483895fbf7bab7cd691508e77a..ffc444c38b0ab64ab999da3d670dde338e669265 100644 (file)
@@ -368,8 +368,11 @@ radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags)
                        iter->index++;
                        if (likely(*slot))
                                return slot;
-                       if (flags & RADIX_TREE_ITER_CONTIG)
+                       if (flags & RADIX_TREE_ITER_CONTIG) {
+                               /* forbid switching to the next chunk */
+                               iter->next_index = 0;
                                break;
+                       }
                }
        }
        return NULL;
index 26d1a47591f1534306160811967248ed9ecfcc12..9cac722b169c9a1f5dbcae489d7b4c43a4fbe1a2 100644 (file)
@@ -184,7 +184,6 @@ static inline int rcu_preempt_depth(void)
 /* Internal to kernel */
 extern void rcu_sched_qs(int cpu);
 extern void rcu_bh_qs(int cpu);
-extern void rcu_preempt_note_context_switch(void);
 extern void rcu_check_callbacks(int cpu, int user);
 struct notifier_block;
 extern void rcu_idle_enter(void);
index adb5e5a38cae96cfcd66c98e489146e5f3ca8be2..4e56a9c69a356ca73fcd06cf775f337276f77f2e 100644 (file)
@@ -87,17 +87,24 @@ static inline void kfree_call_rcu(struct rcu_head *head,
 
 #ifdef CONFIG_TINY_RCU
 
-static inline int rcu_needs_cpu(int cpu)
+static inline void rcu_preempt_note_context_switch(void)
 {
+}
+
+static inline int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
+{
+       *delta_jiffies = ULONG_MAX;
        return 0;
 }
 
 #else /* #ifdef CONFIG_TINY_RCU */
 
+void rcu_preempt_note_context_switch(void);
 int rcu_preempt_needs_cpu(void);
 
-static inline int rcu_needs_cpu(int cpu)
+static inline int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
 {
+       *delta_jiffies = ULONG_MAX;
        return rcu_preempt_needs_cpu();
 }
 
@@ -106,6 +113,7 @@ static inline int rcu_needs_cpu(int cpu)
 static inline void rcu_note_context_switch(int cpu)
 {
        rcu_sched_qs(cpu);
+       rcu_preempt_note_context_switch();
 }
 
 /*
index 3c6083cde4fc2d33914012483929215fcf2ce23b..952b793393045d63b5f0f78c9f459ba4ad81a2f4 100644 (file)
@@ -32,7 +32,7 @@
 
 extern void rcu_init(void);
 extern void rcu_note_context_switch(int cpu);
-extern int rcu_needs_cpu(int cpu);
+extern int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies);
 extern void rcu_cpu_stall_reset(void);
 
 /*
index a8e50e44203c868d1957ba6170e9074569f0235e..82a673905edb12ee0ad6e466423439a31b407c49 100644 (file)
@@ -38,6 +38,8 @@
 #include <linux/types.h>
 #include <linux/device.h>
 #include <linux/mod_devicetable.h>
+#include <linux/kref.h>
+#include <linux/mutex.h>
 
 /* The feature bitmap for virtio rpmsg */
 #define VIRTIO_RPMSG_F_NS      0 /* RP supports name service notifications */
@@ -120,7 +122,9 @@ typedef void (*rpmsg_rx_cb_t)(struct rpmsg_channel *, void *, int, void *, u32);
 /**
  * struct rpmsg_endpoint - binds a local rpmsg address to its user
  * @rpdev: rpmsg channel device
+ * @refcount: when this drops to zero, the ept is deallocated
  * @cb: rx callback handler
+ * @cb_lock: must be taken before accessing/changing @cb
  * @addr: local rpmsg address
  * @priv: private data for the driver's use
  *
@@ -140,7 +144,9 @@ typedef void (*rpmsg_rx_cb_t)(struct rpmsg_channel *, void *, int, void *, u32);
  */
 struct rpmsg_endpoint {
        struct rpmsg_channel *rpdev;
+       struct kref refcount;
        rpmsg_rx_cb_t cb;
+       struct mutex cb_lock;
        u32 addr;
        void *priv;
 };
index f34437e835a7069dfdc4660dbed593ebb371d0e9..4a1f493e0feff18485fc7ccff11da4e7eb12dde7 100644 (file)
@@ -145,6 +145,7 @@ extern unsigned long this_cpu_load(void);
 
 
 extern void calc_global_load(unsigned long ticks);
+extern void update_cpu_load_nohz(void);
 
 extern unsigned long get_parent_ip(unsigned long addr);
 
@@ -438,6 +439,7 @@ extern int get_dumpable(struct mm_struct *mm);
                                        /* leave room for more dump flags */
 #define MMF_VM_MERGEABLE       16      /* KSM may merge identical pages */
 #define MMF_VM_HUGEPAGE                17      /* set when VM_HUGEPAGE is set on vma */
+#define MMF_EXE_FILE_CHANGED   18      /* see prctl_set_mm_exe_file() */
 
 #define MMF_INIT_MASK          (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK)
 
@@ -875,6 +877,8 @@ struct sched_group_power {
         * Number of busy cpus in this group.
         */
        atomic_t nr_busy_cpus;
+
+       unsigned long cpumask[0]; /* iteration mask */
 };
 
 struct sched_group {
@@ -899,6 +903,15 @@ static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
        return to_cpumask(sg->cpumask);
 }
 
+/*
+ * cpumask masking which cpus in the group are allowed to iterate up the domain
+ * tree.
+ */
+static inline struct cpumask *sched_group_mask(struct sched_group *sg)
+{
+       return to_cpumask(sg->sgp->cpumask);
+}
+
 /**
  * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
  * @group: The group whose first cpu is to be returned.
@@ -1187,7 +1200,6 @@ struct sched_rt_entity {
        struct list_head run_list;
        unsigned long timeout;
        unsigned int time_slice;
-       int nr_cpus_allowed;
 
        struct sched_rt_entity *back;
 #ifdef CONFIG_RT_GROUP_SCHED
@@ -1252,6 +1264,7 @@ struct task_struct {
 #endif
 
        unsigned int policy;
+       int nr_cpus_allowed;
        cpumask_t cpus_allowed;
 
 #ifdef CONFIG_PREEMPT_RCU
@@ -1858,22 +1871,12 @@ static inline void rcu_copy_process(struct task_struct *p)
        INIT_LIST_HEAD(&p->rcu_node_entry);
 }
 
-static inline void rcu_switch_from(struct task_struct *prev)
-{
-       if (prev->rcu_read_lock_nesting != 0)
-               rcu_preempt_note_context_switch();
-}
-
 #else
 
 static inline void rcu_copy_process(struct task_struct *p)
 {
 }
 
-static inline void rcu_switch_from(struct task_struct *prev)
-{
-}
-
 #endif
 
 #ifdef CONFIG_SMP
@@ -1896,6 +1899,14 @@ static inline int set_cpus_allowed_ptr(struct task_struct *p,
 }
 #endif
 
+#ifdef CONFIG_NO_HZ
+void calc_load_enter_idle(void);
+void calc_load_exit_idle(void);
+#else
+static inline void calc_load_enter_idle(void) { }
+static inline void calc_load_exit_idle(void) { }
+#endif /* CONFIG_NO_HZ */
+
 #ifndef CONFIG_CPUMASK_OFFSTACK
 static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
 {
index b534a1be540a0e254e39adf7443e2fdf3151b7f6..642cb7355df3ac82fff918f9ac02feff33bec0f2 100644 (file)
@@ -225,14 +225,11 @@ enum {
        /* device driver is going to provide hardware time stamp */
        SKBTX_IN_PROGRESS = 1 << 2,
 
-       /* ensure the originating sk reference is available on driver level */
-       SKBTX_DRV_NEEDS_SK_REF = 1 << 3,
-
        /* device driver supports TX zero-copy buffers */
-       SKBTX_DEV_ZEROCOPY = 1 << 4,
+       SKBTX_DEV_ZEROCOPY = 1 << 3,
 
        /* generate wifi status information (where possible) */
-       SKBTX_WIFI_STATUS = 1 << 5,
+       SKBTX_WIFI_STATUS = 1 << 4,
 };
 
 /*
index d3e1075f7b6031b3d1e56e045bb91157e14bb5b0..c73d1445c77ecfc13a4867f60444f6c3a1adc183 100644 (file)
@@ -43,7 +43,7 @@ struct pxa2xx_spi_chip {
        void (*cs_control)(u32 command);
 };
 
-#ifdef CONFIG_ARCH_PXA
+#if defined(CONFIG_ARCH_PXA) || defined(CONFIG_ARCH_MMP)
 
 #include <linux/clk.h>
 #include <mach/dma.h>
index 26e5b613deda9e63816b58c59f53f06d7dccd50c..09a545a7dfa39bcd7f736f446358d4c3b112aae4 100644 (file)
@@ -51,7 +51,8 @@ struct partial_page {
 struct splice_pipe_desc {
        struct page **pages;            /* page map */
        struct partial_page *partial;   /* pages[] may not be contig */
-       int nr_pages;                   /* number of pages in map */
+       int nr_pages;                   /* number of populated pages in map */
+       unsigned int nr_pages_max;      /* pages[] & partial[] arrays size */
        unsigned int flags;             /* splice flags */
        const struct pipe_buf_operations *ops;/* ops associated with output pipe */
        void (*spd_release)(struct splice_pipe_desc *, unsigned int);
@@ -85,9 +86,8 @@ extern ssize_t splice_direct_to_actor(struct file *, struct splice_desc *,
 /*
  * for dynamic pipe sizing
  */
-extern int splice_grow_spd(struct pipe_inode_info *, struct splice_pipe_desc *);
-extern void splice_shrink_spd(struct pipe_inode_info *,
-                               struct splice_pipe_desc *);
+extern int splice_grow_spd(const struct pipe_inode_info *, struct splice_pipe_desc *);
+extern void splice_shrink_spd(struct splice_pipe_desc *);
 extern void spd_release_page(struct splice_pipe_desc *, unsigned int);
 
 extern const struct pipe_buf_operations page_cache_pipe_buf_ops;
index b6661933e252643956cf3e8d389267f535d49653..c84ec68eaec957b2e16a059b6feccde505f7f241 100644 (file)
@@ -197,6 +197,10 @@ struct swap_info_struct {
        struct block_device *bdev;      /* swap device or bdev of swap file */
        struct file *swap_file;         /* seldom referenced */
        unsigned int old_block_size;    /* seldom referenced */
+#ifdef CONFIG_FRONTSWAP
+       unsigned long *frontswap_map;   /* frontswap in-use, one bit per page */
+       atomic_t frontswap_pages;       /* frontswap pages in-use counter */
+#endif
 };
 
 struct swap_list_t {
diff --git a/include/linux/swapfile.h b/include/linux/swapfile.h
new file mode 100644 (file)
index 0000000..e282624
--- /dev/null
@@ -0,0 +1,13 @@
+#ifndef _LINUX_SWAPFILE_H
+#define _LINUX_SWAPFILE_H
+
+/*
+ * these were static in swapfile.c but frontswap.c needs them and we don't
+ * want to expose them to the dozens of source files that include swap.h
+ */
+extern spinlock_t swap_lock;
+extern struct swap_list_t swap_list;
+extern struct swap_info_struct *swap_info[];
+extern int try_to_unuse(unsigned int, bool, unsigned long);
+
+#endif /* _LINUX_SWAPFILE_H */
index 792d16d9cbc74ff903c9d89e21726e1b5e3f54f7..47ead515c81197fc897bc7ef37fead3d9ab4b39f 100644 (file)
@@ -9,13 +9,15 @@
  * get good packing density in that tree, so the index should be dense in
  * the low-order bits.
  *
- * We arrange the `type' and `offset' fields so that `type' is at the five
+ * We arrange the `type' and `offset' fields so that `type' is at the seven
  * high-order bits of the swp_entry_t and `offset' is right-aligned in the
- * remaining bits.
+ * remaining bits.  Although `type' itself needs only five bits, we allow for
+ * shmem/tmpfs to shift it all up a further two bits: see swp_to_radix_entry().
  *
  * swp_entry_t's are *never* stored anywhere in their arch-dependent format.
  */
-#define SWP_TYPE_SHIFT(e)      (sizeof(e.val) * 8 - MAX_SWAPFILES_SHIFT)
+#define SWP_TYPE_SHIFT(e)      ((sizeof(e.val) * 8) - \
+                       (MAX_SWAPFILES_SHIFT + RADIX_TREE_EXCEPTIONAL_SHIFT))
 #define SWP_OFFSET_MASK(e)     ((1UL << SWP_TYPE_SHIFT(e)) - 1)
 
 /*
index 4c5b63283377449ff94252f1ff64e2a8b16fddcf..5f359dbfcdce5bbf40b9fe5cada0d59d60981e5f 100644 (file)
@@ -69,16 +69,16 @@ union tcp_word_hdr {
 #define tcp_flag_word(tp) ( ((union tcp_word_hdr *)(tp))->words [3]) 
 
 enum { 
-       TCP_FLAG_CWR = __cpu_to_be32(0x00800000),
-       TCP_FLAG_ECE = __cpu_to_be32(0x00400000),
-       TCP_FLAG_URG = __cpu_to_be32(0x00200000),
-       TCP_FLAG_ACK = __cpu_to_be32(0x00100000),
-       TCP_FLAG_PSH = __cpu_to_be32(0x00080000),
-       TCP_FLAG_RST = __cpu_to_be32(0x00040000),
-       TCP_FLAG_SYN = __cpu_to_be32(0x00020000),
-       TCP_FLAG_FIN = __cpu_to_be32(0x00010000),
-       TCP_RESERVED_BITS = __cpu_to_be32(0x0F000000),
-       TCP_DATA_OFFSET = __cpu_to_be32(0xF0000000)
+       TCP_FLAG_CWR = __constant_cpu_to_be32(0x00800000),
+       TCP_FLAG_ECE = __constant_cpu_to_be32(0x00400000),
+       TCP_FLAG_URG = __constant_cpu_to_be32(0x00200000),
+       TCP_FLAG_ACK = __constant_cpu_to_be32(0x00100000),
+       TCP_FLAG_PSH = __constant_cpu_to_be32(0x00080000),
+       TCP_FLAG_RST = __constant_cpu_to_be32(0x00040000),
+       TCP_FLAG_SYN = __constant_cpu_to_be32(0x00020000),
+       TCP_FLAG_FIN = __constant_cpu_to_be32(0x00010000),
+       TCP_RESERVED_BITS = __constant_cpu_to_be32(0x0F000000),
+       TCP_DATA_OFFSET = __constant_cpu_to_be32(0xF0000000)
 }; 
 
 /*
index 796f1ff0388c979138b81b7094bb6feda5bfd212..cfc8d908892e849a3bb8a3336a8225bae554d0eb 100644 (file)
@@ -58,6 +58,12 @@ struct thermal_zone_device_ops {
                enum thermal_trip_type *);
        int (*get_trip_temp) (struct thermal_zone_device *, int,
                              unsigned long *);
+       int (*set_trip_temp) (struct thermal_zone_device *, int,
+                             unsigned long);
+       int (*get_trip_hyst) (struct thermal_zone_device *, int,
+                             unsigned long *);
+       int (*set_trip_hyst) (struct thermal_zone_device *, int,
+                             unsigned long);
        int (*get_crit_temp) (struct thermal_zone_device *, unsigned long *);
        int (*notify) (struct thermal_zone_device *, int,
                       enum thermal_trip_type);
@@ -85,10 +91,18 @@ struct thermal_cooling_device {
                                ((long)t-2732+5)/10 : ((long)t-2732-5)/10)
 #define CELSIUS_TO_KELVIN(t)   ((t)*10+2732)
 
+struct thermal_attr {
+       struct device_attribute attr;
+       char name[THERMAL_NAME_LENGTH];
+};
+
 struct thermal_zone_device {
        int id;
        char type[THERMAL_NAME_LENGTH];
        struct device device;
+       struct thermal_attr *trip_temp_attrs;
+       struct thermal_attr *trip_type_attrs;
+       struct thermal_attr *trip_hyst_attrs;
        void *devdata;
        int trips;
        int tc1;
@@ -137,9 +151,9 @@ enum {
 };
 #define THERMAL_GENL_CMD_MAX (__THERMAL_GENL_CMD_MAX - 1)
 
-struct thermal_zone_device *thermal_zone_device_register(char *, int, void *,
-               const struct thermal_zone_device_ops *, int tc1, int tc2,
-               int passive_freq, int polling_freq);
+struct thermal_zone_device *thermal_zone_device_register(char *, int, int,
+               void *, const struct thermal_zone_device_ops *, int tc1,
+               int tc2, int passive_freq, int polling_freq);
 void thermal_zone_device_unregister(struct thermal_zone_device *);
 
 int thermal_zone_bind_cooling_device(struct thermal_zone_device *, int,
index 7f855d50cdf5567b66b31f2bfb073ac025357c00..49b3ac29726adf3040d47384b51948401ac44887 100644 (file)
@@ -126,8 +126,6 @@ struct usb_hcd {
        unsigned                wireless:1;     /* Wireless USB HCD */
        unsigned                authorized_default:1;
        unsigned                has_tt:1;       /* Integrated TT in root hub */
-       unsigned                broken_pci_sleep:1;     /* Don't put the
-                       controller in PCI-D3 for system sleep */
 
        unsigned int            irq;            /* irq allocated */
        void __iomem            *regs;          /* device memory/io */
index b455c7c212eb6de26716a44ec3674eb2ebcfc270..ddb419cf4530339f37d7e239d134cc4222d3d900 100644 (file)
@@ -7,11 +7,19 @@
  * vga_switcheroo.h - Support for laptop with dual GPU using one set of outputs
  */
 
+#ifndef _LINUX_VGA_SWITCHEROO_H_
+#define _LINUX_VGA_SWITCHEROO_H_
+
 #include <linux/fb.h>
 
+struct pci_dev;
+
 enum vga_switcheroo_state {
        VGA_SWITCHEROO_OFF,
        VGA_SWITCHEROO_ON,
+       /* below are referred only from vga_switcheroo_get_client_state() */
+       VGA_SWITCHEROO_INIT,
+       VGA_SWITCHEROO_NOT_FOUND,
 };
 
 enum vga_switcheroo_client_id {
@@ -50,6 +58,8 @@ void vga_switcheroo_unregister_handler(void);
 
 int vga_switcheroo_process_delayed_switch(void);
 
+int vga_switcheroo_get_client_state(struct pci_dev *dev);
+
 #else
 
 static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {}
@@ -62,5 +72,8 @@ static inline int vga_switcheroo_register_audio_client(struct pci_dev *pdev,
        int id, bool active) { return 0; }
 static inline void vga_switcheroo_unregister_handler(void) {}
 static inline int vga_switcheroo_process_delayed_switch(void) { return 0; }
+static inline int vga_switcheroo_get_client_state(struct pci_dev *dev) { return VGA_SWITCHEROO_ON; }
+
 
 #endif
+#endif /* _LINUX_VGA_SWITCHEROO_H_ */
index 370d11106c1116811b8bf668892bf4fadf6a8b0d..2039c5d3292e801c0abb8c770c99cdfe287ce9bf 100644 (file)
@@ -2640,9 +2640,9 @@ struct v4l2_create_buffers {
 
 /* Experimental, these three ioctls may change over the next couple of kernel
    versions. */
-#define VIDIOC_ENUM_DV_TIMINGS  _IOWR('V', 96, struct v4l2_enum_dv_timings)
-#define VIDIOC_QUERY_DV_TIMINGS  _IOR('V', 97, struct v4l2_dv_timings)
-#define VIDIOC_DV_TIMINGS_CAP   _IOWR('V', 98, struct v4l2_dv_timings_cap)
+#define VIDIOC_ENUM_DV_TIMINGS  _IOWR('V', 98, struct v4l2_enum_dv_timings)
+#define VIDIOC_QUERY_DV_TIMINGS  _IOR('V', 99, struct v4l2_dv_timings)
+#define VIDIOC_DV_TIMINGS_CAP   _IOWR('V', 100, struct v4l2_dv_timings_cap)
 
 /* Reminder: when adding new ioctls please add support for them to
    drivers/media/video/v4l2-compat-ioctl32.c as well! */
index 66a7b579e31c81912f635beb272cebf82a05ac57..3def64ba77fa0100bd62f0072cc6989e0f403f3f 100644 (file)
@@ -1144,6 +1144,12 @@ struct extended_inquiry_info {
        __u8     data[240];
 } __packed;
 
+#define HCI_EV_KEY_REFRESH_COMPLETE    0x30
+struct hci_ev_key_refresh_complete {
+       __u8    status;
+       __le16  handle;
+} __packed;
+
 #define HCI_EV_IO_CAPA_REQUEST         0x31
 struct hci_ev_io_capa_request {
        bdaddr_t bdaddr;
index b94765e38e8074aa5a877c90b395f0bc8cb6bb4a..2040bff945d4562e0c0129d078a26a0a1be34672 100644 (file)
@@ -40,7 +40,10 @@ struct inet_peer {
        u32                     pmtu_orig;
        u32                     pmtu_learned;
        struct inetpeer_addr_base redirect_learned;
-       struct list_head        gc_list;
+       union {
+               struct list_head        gc_list;
+               struct rcu_head     gc_rcu;
+       };
        /*
         * Once inet_peer is queued for deletion (refcnt == -1), following fields
         * are not available: rid, ip_id_count, tcp_ts, tcp_ts_stamp
index d6146b4811c212601a365eee7c9c28789b76481a..95374d1696a163a75f8aa006bf99fe958df195aa 100644 (file)
@@ -1425,7 +1425,7 @@ static inline void ip_vs_notrack(struct sk_buff *skb)
        struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
 
        if (!ct || !nf_ct_is_untracked(ct)) {
-               nf_reset(skb);
+               nf_conntrack_put(skb->nfct);
                skb->nfct = &nf_ct_untracked_get()->ct_general;
                skb->nfctinfo = IP_CT_NEW;
                nf_conntrack_get(skb->nfct);
index 1937c7d98304fc0ecfd6d7315c9e3cbb9e93a557..95e39b6a02ec924ab1229e728ddb4bd45ff3d682 100644 (file)
@@ -1940,6 +1940,11 @@ enum ieee80211_rate_control_changed {
  *     to also unregister the device. If it returns 1, then mac80211
  *     will also go through the regular complete restart on resume.
  *
+ * @set_wakeup: Enable or disable wakeup when WoWLAN configuration is
+ *     modified. The reason is that device_set_wakeup_enable() is
+ *     supposed to be called when the configuration changes, not only
+ *     in suspend().
+ *
  * @add_interface: Called when a netdevice attached to the hardware is
  *     enabled. Because it is not called for monitor mode devices, @start
  *     and @stop must be implemented.
@@ -2966,6 +2971,7 @@ __le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw,
  * ieee80211_generic_frame_duration - Calculate the duration field for a frame
  * @hw: pointer obtained from ieee80211_alloc_hw().
  * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @band: the band to calculate the frame duration on
  * @frame_len: the length of the frame.
  * @rate: the rate at which the frame is going to be transmitted.
  *
index a88fb6939387f228ac5826949f68151e0fceaf16..e1ce1048fe5fa1142196cb88e08829b7bb1d60f5 100644 (file)
@@ -78,7 +78,7 @@ nf_conntrack_event_cache(enum ip_conntrack_events event, struct nf_conn *ct)
        struct net *net = nf_ct_net(ct);
        struct nf_conntrack_ecache *e;
 
-       if (net->ct.nf_conntrack_event_cb == NULL)
+       if (!rcu_access_pointer(net->ct.nf_conntrack_event_cb))
                return;
 
        e = nf_ct_ecache_find(ct);
index 928daf595bebc04b840ddca77d649688f05eb2d1..bcd525e39a0ba89549f83c381b936d24db59bcf2 100644 (file)
@@ -5,7 +5,7 @@
  *
  * Copyright (C) 2008 Nokia Corporation.
  *
- * Author: Rémi Denis-Courmont <remi.denis-courmont@nokia.com>
+ * Author: Rémi Denis-Courmont
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
index ed2b78e2375d0de3ad537f4b901630d0ffa7b69d..98705468ac0329c884bfd0e2663422b73de565b8 100644 (file)
@@ -130,9 +130,9 @@ static inline struct rtable *ip_route_output(struct net *net, __be32 daddr,
 {
        struct flowi4 fl4 = {
                .flowi4_oif = oif,
+               .flowi4_tos = tos,
                .daddr = daddr,
                .saddr = saddr,
-               .flowi4_tos = tos,
        };
        return ip_route_output_key(net, &fl4);
 }
index 55ce96b53b092e3ca04db6eaa7d6888fb9f478bc..9d7d54a00e63f28feb80942bcaf7afb7ee68cd5c 100644 (file)
@@ -220,13 +220,16 @@ struct tcf_proto {
 
 struct qdisc_skb_cb {
        unsigned int            pkt_len;
-       unsigned char           data[24];
+       u16                     bond_queue_mapping;
+       u16                     _pad;
+       unsigned char           data[20];
 };
 
 static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
 {
        struct qdisc_skb_cb *qcb;
-       BUILD_BUG_ON(sizeof(skb->cb) < sizeof(unsigned int) + sz);
+
+       BUILD_BUG_ON(sizeof(skb->cb) < offsetof(struct qdisc_skb_cb, data) + sz);
        BUILD_BUG_ON(sizeof(qcb->data) < sz);
 }
 
index e4652fe5895863d956a52e436d10f1fc3724415b..fecdf31816f2fc6a834a060cfdd39158dcb30bc2 100644 (file)
@@ -912,6 +912,9 @@ struct sctp_transport {
                /* Is this structure kfree()able? */
                malloced:1;
 
+       /* Has this transport moved the ctsn since we last sacked */
+       __u32 sack_generation;
+
        struct flowi fl;
 
        /* This is the peer's IP address and port. */
@@ -1584,6 +1587,7 @@ struct sctp_association {
                 */
                __u8    sack_needed;     /* Do we need to sack the peer? */
                __u32   sack_cnt;
+               __u32   sack_generation;
 
                /* These are capabilities which our peer advertised.  */
                __u8    ecn_capable:1,      /* Can peer do ECN? */
index e7728bc14ccfde5bd85c3b08f990487af66ab072..2c5d2b4d5d1eb51542df26094700250ab6191070 100644 (file)
@@ -117,7 +117,8 @@ void sctp_tsnmap_free(struct sctp_tsnmap *map);
 int sctp_tsnmap_check(const struct sctp_tsnmap *, __u32 tsn);
 
 /* Mark this TSN as seen.  */
-int sctp_tsnmap_mark(struct sctp_tsnmap *, __u32 tsn);
+int sctp_tsnmap_mark(struct sctp_tsnmap *, __u32 tsn,
+                    struct sctp_transport *trans);
 
 /* Mark this TSN and all lower as seen. */
 void sctp_tsnmap_skip(struct sctp_tsnmap *map, __u32 tsn);
index f4f1c96dca726ff2f00211994dcf7381ef55b5b0..10ce74f589c5fd57622f630a17fbbc7e736d24e4 100644 (file)
@@ -163,6 +163,8 @@ enum ata_command_set {
         ATAPI_COMMAND_SET = 1,
 };
 
+#define ATA_RESP_FIS_SIZE 24
+
 struct sata_device {
         enum   ata_command_set command_set;
         struct smp_resp        rps_resp; /* report_phy_sata_resp */
@@ -171,7 +173,7 @@ struct sata_device {
 
        struct ata_port *ap;
        struct ata_host ata_host;
-       struct ata_taskfile tf;
+       u8     fis[ATA_RESP_FIS_SIZE];
 };
 
 enum {
@@ -537,7 +539,7 @@ enum exec_status {
  */
 struct ata_task_resp {
        u16  frame_len;
-       u8   ending_fis[24];      /* dev to host or data-in */
+       u8   ending_fis[ATA_RESP_FIS_SIZE];       /* dev to host or data-in */
 };
 
 #define SAS_STATUS_BUF_SIZE 96
index 1e1198546c725d43a7ff6baa657c25ef364d73ee..ac06cc595890ef87a8e01632f5872ab11e4057f2 100644 (file)
@@ -134,10 +134,16 @@ struct scsi_cmnd {
 
 static inline struct scsi_driver *scsi_cmd_to_driver(struct scsi_cmnd *cmd)
 {
+       struct scsi_driver **sdp;
+
        if (!cmd->request->rq_disk)
                return NULL;
 
-       return *(struct scsi_driver **)cmd->request->rq_disk->private_data;
+       sdp = (struct scsi_driver **)cmd->request->rq_disk->private_data;
+       if (!sdp)
+               return NULL;
+
+       return *sdp;
 }
 
 extern struct scsi_cmnd *scsi_get_command(struct scsi_device *, gfp_t);
index 6efb2e1416e073924a94442762459d89e118f520..ba969885232101e9eeed555e11c2385887b9cb85 100644 (file)
@@ -151,6 +151,7 @@ struct scsi_device {
                                           SD_LAST_BUGGY_SECTORS */
        unsigned no_read_disc_info:1;   /* Avoid READ_DISC_INFO cmds */
        unsigned no_read_capacity_16:1; /* Avoid READ_CAPACITY_16 cmds */
+       unsigned try_rc_10_first:1;     /* Try READ_CAPACACITY_10 first */
        unsigned is_visible:1;  /* is the device visible in sysfs */
 
        DECLARE_BITMAP(supported_events, SDEV_EVT_MAXBITS); /* supported events */
index ec3f910aa40b139bdf61eec45c55376225f8647b..0c3c2fb0f9395be596699830a62939cc83dd1609 100644 (file)
@@ -44,6 +44,7 @@ struct snd_tea575x_ops {
 
 struct snd_tea575x {
        struct v4l2_device *v4l2_dev;
+       struct v4l2_file_operations fops;
        struct video_device vd;         /* video device */
        int radio_nr;                   /* radio_nr */
        bool tea5759;                   /* 5759 chip is present */
@@ -62,7 +63,7 @@ struct snd_tea575x {
        int (*ext_init)(struct snd_tea575x *tea);
 };
 
-int snd_tea575x_init(struct snd_tea575x *tea);
+int snd_tea575x_init(struct snd_tea575x *tea, struct module *owner);
 void snd_tea575x_exit(struct snd_tea575x *tea);
 
 #endif /* __SOUND_TEA575X_TUNER_H */
index 116959933f46442b27075ba7d40c79c746308049..c78a23333c4fb801c72e86a55b05fd7e7bf66f67 100644 (file)
@@ -47,6 +47,7 @@ struct target_core_fabric_ops {
         */
        int (*check_stop_free)(struct se_cmd *);
        void (*release_cmd)(struct se_cmd *);
+       void (*put_session)(struct se_session *);
        /*
         * Called with spin_lock_bh(struct se_portal_group->session_lock held.
         */
index 1480900c511ce134443e7bf2f41d8e5ba0f97117..d274734b2aa42fee56d7ce7ab2b7898d39521e7e 100644 (file)
@@ -289,6 +289,7 @@ TRACE_EVENT(rcu_dyntick,
  *     "In holdoff": Nothing to do, holding off after unsuccessful attempt.
  *     "Begin holdoff": Attempt failed, don't retry until next jiffy.
  *     "Dyntick with callbacks": Entering dyntick-idle despite callbacks.
+ *     "Dyntick with lazy callbacks": Entering dyntick-idle w/lazy callbacks.
  *     "More callbacks": Still more callbacks, try again to clear them out.
  *     "Callbacks drained": All callbacks processed, off to dyntick idle!
  *     "Timer": Timer fired to cause CPU to continue processing callbacks.
index 1ca6b32c482875cbfd90fb315831c4c110d28414..b5cc0a7c4708f167925aa1f974eb356f99de1ebc 100644 (file)
@@ -508,7 +508,7 @@ asmlinkage void __init start_kernel(void)
        parse_early_param();
        parse_args("Booting kernel", static_command_line, __start___param,
                   __stop___param - __start___param,
-                  0, 0, &unknown_bootoption);
+                  -1, -1, &unknown_bootoption);
 
        jump_label_init();
 
@@ -755,13 +755,8 @@ static void __init do_initcalls(void)
 {
        int level;
 
-       for (level = 0; level < ARRAY_SIZE(initcall_levels) - 1; level++) {
-               pr_info("initlevel:%d=%s, %d registered initcalls\n",
-                       level, initcall_level_names[level],
-                       (int) (initcall_levels[level+1]
-                               - initcall_levels[level]));
+       for (level = 0; level < ARRAY_SIZE(initcall_levels) - 1; level++)
                do_initcall_level(level);
-       }
 }
 
 /*
index 5e2cbfdab6fc0d6b96a19c321a9208dda8cd130d..41c1285d697a6f19a3c9b945691346c9b45ade47 100644 (file)
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -393,6 +393,16 @@ static int shm_fsync(struct file *file, loff_t start, loff_t end, int datasync)
        return sfd->file->f_op->fsync(sfd->file, start, end, datasync);
 }
 
+static long shm_fallocate(struct file *file, int mode, loff_t offset,
+                         loff_t len)
+{
+       struct shm_file_data *sfd = shm_file_data(file);
+
+       if (!sfd->file->f_op->fallocate)
+               return -EOPNOTSUPP;
+       return sfd->file->f_op->fallocate(file, mode, offset, len);
+}
+
 static unsigned long shm_get_unmapped_area(struct file *file,
        unsigned long addr, unsigned long len, unsigned long pgoff,
        unsigned long flags)
@@ -410,6 +420,7 @@ static const struct file_operations shm_file_operations = {
        .get_unmapped_area      = shm_get_unmapped_area,
 #endif
        .llseek         = noop_llseek,
+       .fallocate      = shm_fallocate,
 };
 
 static const struct file_operations shm_file_operations_huge = {
@@ -418,6 +429,7 @@ static const struct file_operations shm_file_operations_huge = {
        .release        = shm_release,
        .get_unmapped_area      = shm_get_unmapped_area,
        .llseek         = noop_llseek,
+       .fallocate      = shm_fallocate,
 };
 
 int is_file_shm_hugepages(struct file *file)
index 0f3527d6184a1597fb81f00a878df15eeab0ecf8..b303dfc7dce0703299c3e8840e91d101497495ec 100644 (file)
@@ -255,12 +255,17 @@ int cgroup_lock_is_held(void)
 
 EXPORT_SYMBOL_GPL(cgroup_lock_is_held);
 
+static int css_unbias_refcnt(int refcnt)
+{
+       return refcnt >= 0 ? refcnt : refcnt - CSS_DEACT_BIAS;
+}
+
 /* the current nr of refs, always >= 0 whether @css is deactivated or not */
 static int css_refcnt(struct cgroup_subsys_state *css)
 {
        int v = atomic_read(&css->refcnt);
 
-       return v >= 0 ? v : v - CSS_DEACT_BIAS;
+       return css_unbias_refcnt(v);
 }
 
 /* convenient tests for these bits */
@@ -3878,8 +3883,12 @@ static void css_dput_fn(struct work_struct *work)
 {
        struct cgroup_subsys_state *css =
                container_of(work, struct cgroup_subsys_state, dput_work);
+       struct dentry *dentry = css->cgroup->dentry;
+       struct super_block *sb = dentry->d_sb;
 
-       dput(css->cgroup->dentry);
+       atomic_inc(&sb->s_active);
+       dput(dentry);
+       deactivate_super(sb);
 }
 
 static void init_cgroup_css(struct cgroup_subsys_state *css,
@@ -4971,10 +4980,12 @@ EXPORT_SYMBOL_GPL(__css_tryget);
 void __css_put(struct cgroup_subsys_state *css)
 {
        struct cgroup *cgrp = css->cgroup;
+       int v;
 
        rcu_read_lock();
-       atomic_dec(&css->refcnt);
-       switch (css_refcnt(css)) {
+       v = css_unbias_refcnt(atomic_dec_return(&css->refcnt));
+
+       switch (v) {
        case 1:
                if (notify_on_release(cgrp)) {
                        set_bit(CGRP_RELEASABLE, &cgrp->flags);
index 67b847dfa2bb64b58d30db51460f22243142ee2d..1f91413edb87d0c9b77efc4e84c9b2f9729e9b8c 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/ctype.h>
 #include <linux/string.h>
 #include <linux/kernel.h>
+#include <linux/kmsg_dump.h>
 #include <linux/reboot.h>
 #include <linux/sched.h>
 #include <linux/sysrq.h>
@@ -2040,8 +2041,15 @@ static int kdb_env(int argc, const char **argv)
  */
 static int kdb_dmesg(int argc, const char **argv)
 {
-       char *syslog_data[4], *start, *end, c = '\0', *p;
-       int diag, logging, logsize, lines = 0, adjust = 0, n;
+       int diag;
+       int logging;
+       int lines = 0;
+       int adjust = 0;
+       int n = 0;
+       int skip = 0;
+       struct kmsg_dumper dumper = { .active = 1 };
+       size_t len;
+       char buf[201];
 
        if (argc > 2)
                return KDB_ARGCOUNT;
@@ -2064,22 +2072,10 @@ static int kdb_dmesg(int argc, const char **argv)
                kdb_set(2, setargs);
        }
 
-       /* syslog_data[0,1] physical start, end+1.  syslog_data[2,3]
-        * logical start, end+1. */
-       kdb_syslog_data(syslog_data);
-       if (syslog_data[2] == syslog_data[3])
-               return 0;
-       logsize = syslog_data[1] - syslog_data[0];
-       start = syslog_data[2];
-       end = syslog_data[3];
-#define KDB_WRAP(p) (((p - syslog_data[0]) % logsize) + syslog_data[0])
-       for (n = 0, p = start; p < end; ++p) {
-               c = *KDB_WRAP(p);
-               if (c == '\n')
-                       ++n;
-       }
-       if (c != '\n')
-               ++n;
+       kmsg_dump_rewind_nolock(&dumper);
+       while (kmsg_dump_get_line_nolock(&dumper, 1, NULL, 0, NULL))
+               n++;
+
        if (lines < 0) {
                if (adjust >= n)
                        kdb_printf("buffer only contains %d lines, nothing "
@@ -2087,21 +2083,11 @@ static int kdb_dmesg(int argc, const char **argv)
                else if (adjust - lines >= n)
                        kdb_printf("buffer only contains %d lines, last %d "
                                   "lines printed\n", n, n - adjust);
-               if (adjust) {
-                       for (; start < end && adjust; ++start) {
-                               if (*KDB_WRAP(start) == '\n')
-                                       --adjust;
-                       }
-                       if (start < end)
-                               ++start;
-               }
-               for (p = start; p < end && lines; ++p) {
-                       if (*KDB_WRAP(p) == '\n')
-                               ++lines;
-               }
-               end = p;
+               skip = adjust;
+               lines = abs(lines);
        } else if (lines > 0) {
-               int skip = n - (adjust + lines);
+               skip = n - lines - adjust;
+               lines = abs(lines);
                if (adjust >= n) {
                        kdb_printf("buffer only contains %d lines, "
                                   "nothing printed\n", n);
@@ -2112,35 +2098,24 @@ static int kdb_dmesg(int argc, const char **argv)
                        kdb_printf("buffer only contains %d lines, first "
                                   "%d lines printed\n", n, lines);
                }
-               for (; start < end && skip; ++start) {
-                       if (*KDB_WRAP(start) == '\n')
-                               --skip;
-               }
-               for (p = start; p < end && lines; ++p) {
-                       if (*KDB_WRAP(p) == '\n')
-                               --lines;
-               }
-               end = p;
+       } else {
+               lines = n;
        }
-       /* Do a line at a time (max 200 chars) to reduce protocol overhead */
-       c = '\n';
-       while (start != end) {
-               char buf[201];
-               p = buf;
-               if (KDB_FLAG(CMD_INTERRUPT))
-                       return 0;
-               while (start < end && (c = *KDB_WRAP(start)) &&
-                      (p - buf) < sizeof(buf)-1) {
-                       ++start;
-                       *p++ = c;
-                       if (c == '\n')
-                               break;
+
+       if (skip >= n || skip < 0)
+               return 0;
+
+       kmsg_dump_rewind_nolock(&dumper);
+       while (kmsg_dump_get_line_nolock(&dumper, 1, buf, sizeof(buf), &len)) {
+               if (skip) {
+                       skip--;
+                       continue;
                }
-               *p = '\0';
-               kdb_printf("%s", buf);
+               if (!lines--)
+                       break;
+
+               kdb_printf("%.*s\n", (int)len - 1, buf);
        }
-       if (c != '\n')
-               kdb_printf("\n");
 
        return 0;
 }
index 47c4e56e513ba72ec485afdaa2ad0e30bac6e30f..392ec6a25844c48f1242a649c0c58a1d22c0c783 100644 (file)
@@ -205,7 +205,6 @@ extern char kdb_grep_string[];
 extern int kdb_grep_leading;
 extern int kdb_grep_trailing;
 extern char *kdb_cmds[];
-extern void kdb_syslog_data(char *syslog_data[]);
 extern unsigned long kdb_task_state_string(const char *);
 extern char kdb_task_state_char (const struct task_struct *);
 extern unsigned long kdb_task_state(const struct task_struct *p,
index 5b06cbbf6931da0ab7c4e9f1b27a76f5418d64d8..d7d71d6ec97278cf60fa5b13e79a80bff27b6af2 100644 (file)
@@ -253,9 +253,9 @@ perf_cgroup_match(struct perf_event *event)
        return !event->cgrp || event->cgrp == cpuctx->cgrp;
 }
 
-static inline void perf_get_cgroup(struct perf_event *event)
+static inline bool perf_tryget_cgroup(struct perf_event *event)
 {
-       css_get(&event->cgrp->css);
+       return css_tryget(&event->cgrp->css);
 }
 
 static inline void perf_put_cgroup(struct perf_event *event)
@@ -484,7 +484,11 @@ static inline int perf_cgroup_connect(int fd, struct perf_event *event,
        event->cgrp = cgrp;
 
        /* must be done before we fput() the file */
-       perf_get_cgroup(event);
+       if (!perf_tryget_cgroup(event)) {
+               event->cgrp = NULL;
+               ret = -ENOENT;
+               goto out;
+       }
 
        /*
         * all events in a group must monitor
@@ -3181,7 +3185,6 @@ static void perf_event_for_each(struct perf_event *event,
        event = event->group_leader;
 
        perf_event_for_each_child(event, func);
-       func(event);
        list_for_each_entry(sibling, &event->sibling_list, group_entry)
                perf_event_for_each_child(sibling, func);
        mutex_unlock(&ctx->mutex);
index 34867cc5b42a77f325c204bb2fd09e1fabe38955..2f59cc334516a9b9e831bb54aa0375c999ae69b2 100644 (file)
@@ -72,6 +72,18 @@ static void __unhash_process(struct task_struct *p, bool group_dead)
                list_del_rcu(&p->tasks);
                list_del_init(&p->sibling);
                __this_cpu_dec(process_counts);
+               /*
+                * If we are the last child process in a pid namespace to be
+                * reaped, notify the reaper sleeping zap_pid_ns_processes().
+                */
+               if (IS_ENABLED(CONFIG_PID_NS)) {
+                       struct task_struct *parent = p->real_parent;
+
+                       if ((task_active_pid_ns(parent)->child_reaper == parent) &&
+                           list_empty(&parent->children) &&
+                           (parent->flags & PF_EXITING))
+                               wake_up_process(parent);
+               }
        }
        list_del_rcu(&p->thread_group);
 }
@@ -643,6 +655,7 @@ static void exit_mm(struct task_struct * tsk)
        mm_release(tsk, mm);
        if (!mm)
                return;
+       sync_mm_rss(mm);
        /*
         * Serialize with any possible pending coredump.
         * We must hold mmap_sem around checking core_state
@@ -719,12 +732,6 @@ static struct task_struct *find_new_reaper(struct task_struct *father)
 
                zap_pid_ns_processes(pid_ns);
                write_lock_irq(&tasklist_lock);
-               /*
-                * We can not clear ->child_reaper or leave it alone.
-                * There may by stealth EXIT_DEAD tasks on ->children,
-                * forget_original_parent() must move them somewhere.
-                */
-               pid_ns->child_reaper = init_pid_ns.child_reaper;
        } else if (father->signal->has_child_subreaper) {
                struct task_struct *reaper;
 
index ab5211b9e622cf94d07b7bfb4ccfd9bac85e7b79..f00e319d8376289f0273fc1d301f4c1ebb132c86 100644 (file)
@@ -304,12 +304,17 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
        }
 
        err = arch_dup_task_struct(tsk, orig);
-       if (err)
-               goto out;
 
+       /*
+        * We defer looking at err, because we will need this setup
+        * for the clean up path to work correctly.
+        */
        tsk->stack = ti;
-
        setup_thread_stack(tsk, orig);
+
+       if (err)
+               goto out;
+
        clear_user_return_notifier(tsk);
        clear_tsk_need_resched(tsk);
        stackend = end_of_stack(tsk);
index ae34bf51682b4a204de93f62943055350cd5c4d0..6db7a5ed52b58727d33293853053c0fee1d049fe 100644 (file)
@@ -657,6 +657,14 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
        return 0;
 }
 
+static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
+{
+       ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset;
+       ktime_t *offs_boot = &base->clock_base[HRTIMER_BASE_BOOTTIME].offset;
+
+       return ktime_get_update_offsets(offs_real, offs_boot);
+}
+
 /*
  * Retrigger next event is called after clock was set
  *
@@ -665,22 +673,12 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
 static void retrigger_next_event(void *arg)
 {
        struct hrtimer_cpu_base *base = &__get_cpu_var(hrtimer_bases);
-       struct timespec realtime_offset, xtim, wtm, sleep;
 
        if (!hrtimer_hres_active())
                return;
 
-       /* Optimized out for !HIGH_RES */
-       get_xtime_and_monotonic_and_sleep_offset(&xtim, &wtm, &sleep);
-       set_normalized_timespec(&realtime_offset, -wtm.tv_sec, -wtm.tv_nsec);
-
-       /* Adjust CLOCK_REALTIME offset */
        raw_spin_lock(&base->lock);
-       base->clock_base[HRTIMER_BASE_REALTIME].offset =
-               timespec_to_ktime(realtime_offset);
-       base->clock_base[HRTIMER_BASE_BOOTTIME].offset =
-               timespec_to_ktime(sleep);
-
+       hrtimer_update_base(base);
        hrtimer_force_reprogram(base, 0);
        raw_spin_unlock(&base->lock);
 }
@@ -710,13 +708,25 @@ static int hrtimer_switch_to_hres(void)
                base->clock_base[i].resolution = KTIME_HIGH_RES;
 
        tick_setup_sched_timer();
-
        /* "Retrigger" the interrupt to get things going */
        retrigger_next_event(NULL);
        local_irq_restore(flags);
        return 1;
 }
 
+/*
+ * Called from timekeeping code to reprogramm the hrtimer interrupt
+ * device. If called from the timer interrupt context we defer it to
+ * softirq context.
+ */
+void clock_was_set_delayed(void)
+{
+       struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
+
+       cpu_base->clock_was_set = 1;
+       __raise_softirq_irqoff(HRTIMER_SOFTIRQ);
+}
+
 #else
 
 static inline int hrtimer_hres_active(void) { return 0; }
@@ -1250,11 +1260,10 @@ void hrtimer_interrupt(struct clock_event_device *dev)
        cpu_base->nr_events++;
        dev->next_event.tv64 = KTIME_MAX;
 
-       entry_time = now = ktime_get();
+       raw_spin_lock(&cpu_base->lock);
+       entry_time = now = hrtimer_update_base(cpu_base);
 retry:
        expires_next.tv64 = KTIME_MAX;
-
-       raw_spin_lock(&cpu_base->lock);
        /*
         * We set expires_next to KTIME_MAX here with cpu_base->lock
         * held to prevent that a timer is enqueued in our queue via
@@ -1330,8 +1339,12 @@ retry:
         * We need to prevent that we loop forever in the hrtimer
         * interrupt routine. We give it 3 attempts to avoid
         * overreacting on some spurious event.
+        *
+        * Acquire base lock for updating the offsets and retrieving
+        * the current time.
         */
-       now = ktime_get();
+       raw_spin_lock(&cpu_base->lock);
+       now = hrtimer_update_base(cpu_base);
        cpu_base->nr_retries++;
        if (++retries < 3)
                goto retry;
@@ -1343,6 +1356,7 @@ retry:
         */
        cpu_base->nr_hangs++;
        cpu_base->hang_detected = 1;
+       raw_spin_unlock(&cpu_base->lock);
        delta = ktime_sub(now, entry_time);
        if (delta.tv64 > cpu_base->max_hang_time.tv64)
                cpu_base->max_hang_time = delta;
@@ -1395,6 +1409,13 @@ void hrtimer_peek_ahead_timers(void)
 
 static void run_hrtimer_softirq(struct softirq_action *h)
 {
+       struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
+
+       if (cpu_base->clock_was_set) {
+               cpu_base->clock_was_set = 0;
+               clock_was_set();
+       }
+
        hrtimer_peek_ahead_timers();
 }
 
index fc275e4f629b941ef0905a77bc4b88b8659b3183..eebd6d5cfb44ce626f669029750112a0cad15525 100644 (file)
@@ -275,8 +275,10 @@ void handle_nested_irq(unsigned int irq)
        kstat_incr_irqs_this_cpu(irq, desc);
 
        action = desc->action;
-       if (unlikely(!action || irqd_irq_disabled(&desc->irq_data)))
+       if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) {
+               desc->istate |= IRQS_PENDING;
                goto out_unlock;
+       }
 
        irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
        raw_spin_unlock_irq(&desc->lock);
@@ -324,8 +326,10 @@ handle_simple_irq(unsigned int irq, struct irq_desc *desc)
        desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
        kstat_incr_irqs_this_cpu(irq, desc);
 
-       if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data)))
+       if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
+               desc->istate |= IRQS_PENDING;
                goto out_unlock;
+       }
 
        handle_irq_event(desc);
 
index 8e5c56b3b7d9c5f70c49d48f7082636b1b3bdf89..001fa5bab4902dcf403384ded269841c09d3ca6a 100644 (file)
@@ -101,6 +101,9 @@ extern int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask);
 
 extern void irq_set_thread_affinity(struct irq_desc *desc);
 
+extern int irq_do_set_affinity(struct irq_data *data,
+                              const struct cpumask *dest, bool force);
+
 /* Inline functions for support of irq chips on slow busses */
 static inline void chip_bus_lock(struct irq_desc *desc)
 {
index ea0c6c2ae6f747d0bd8dff198e5e1f6b6934f050..8c548232ba39daf0f7320d64f19d85775000201a 100644 (file)
@@ -142,6 +142,25 @@ static inline void
 irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
 #endif
 
+int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
+                       bool force)
+{
+       struct irq_desc *desc = irq_data_to_desc(data);
+       struct irq_chip *chip = irq_data_get_irq_chip(data);
+       int ret;
+
+       ret = chip->irq_set_affinity(data, mask, false);
+       switch (ret) {
+       case IRQ_SET_MASK_OK:
+               cpumask_copy(data->affinity, mask);
+       case IRQ_SET_MASK_OK_NOCOPY:
+               irq_set_thread_affinity(desc);
+               ret = 0;
+       }
+
+       return ret;
+}
+
 int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
 {
        struct irq_chip *chip = irq_data_get_irq_chip(data);
@@ -152,14 +171,7 @@ int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
                return -EINVAL;
 
        if (irq_can_move_pcntxt(data)) {
-               ret = chip->irq_set_affinity(data, mask, false);
-               switch (ret) {
-               case IRQ_SET_MASK_OK:
-                       cpumask_copy(data->affinity, mask);
-               case IRQ_SET_MASK_OK_NOCOPY:
-                       irq_set_thread_affinity(desc);
-                       ret = 0;
-               }
+               ret = irq_do_set_affinity(data, mask, false);
        } else {
                irqd_set_move_pending(data);
                irq_copy_pending(desc, mask);
@@ -283,9 +295,8 @@ EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
 static int
 setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
 {
-       struct irq_chip *chip = irq_desc_get_chip(desc);
        struct cpumask *set = irq_default_affinity;
-       int ret, node = desc->irq_data.node;
+       int node = desc->irq_data.node;
 
        /* Excludes PER_CPU and NO_BALANCE interrupts */
        if (!irq_can_set_affinity(irq))
@@ -311,13 +322,7 @@ setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
                if (cpumask_intersects(mask, nodemask))
                        cpumask_and(mask, mask, nodemask);
        }
-       ret = chip->irq_set_affinity(&desc->irq_data, mask, false);
-       switch (ret) {
-       case IRQ_SET_MASK_OK:
-               cpumask_copy(desc->irq_data.affinity, mask);
-       case IRQ_SET_MASK_OK_NOCOPY:
-               irq_set_thread_affinity(desc);
-       }
+       irq_do_set_affinity(&desc->irq_data, mask, false);
        return 0;
 }
 #else
index c3c89751b327c9cf257c870d046973107be810da..ca3f4aaff707db1d2aa28bee7f388be67a3317b5 100644 (file)
@@ -42,17 +42,8 @@ void irq_move_masked_irq(struct irq_data *idata)
         * For correct operation this depends on the caller
         * masking the irqs.
         */
-       if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask)
-                  < nr_cpu_ids)) {
-               int ret = chip->irq_set_affinity(&desc->irq_data,
-                                                desc->pending_mask, false);
-               switch (ret) {
-               case IRQ_SET_MASK_OK:
-                       cpumask_copy(desc->irq_data.affinity, desc->pending_mask);
-               case IRQ_SET_MASK_OK_NOCOPY:
-                       irq_set_thread_affinity(desc);
-               }
-       }
+       if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids)
+               irq_do_set_affinity(&desc->irq_data, desc->pending_mask, false);
 
        cpumask_clear(desc->pending_mask);
 }
index 8ed89a175d79376600dfbfd777f06ed3c24665a5..d2a5f4ecc6ddd2ebc2da68764da646c7b96d9fc9 100644 (file)
@@ -27,7 +27,7 @@
 #define PANIC_TIMER_STEP 100
 #define PANIC_BLINK_SPD 18
 
-int panic_on_oops;
+int panic_on_oops = CONFIG_PANIC_ON_OOPS_VALUE;
 static unsigned long tainted_mask;
 static int pause_on_oops;
 static int pause_on_oops_flag;
@@ -108,8 +108,6 @@ void panic(const char *fmt, ...)
         */
        crash_kexec(NULL);
 
-       kmsg_dump(KMSG_DUMP_PANIC);
-
        /*
         * Note smp_send_stop is the usual smp shutdown function, which
         * unfortunately means it may not be hardened to work in a panic
@@ -117,6 +115,8 @@ void panic(const char *fmt, ...)
         */
        smp_send_stop();
 
+       kmsg_dump(KMSG_DUMP_PANIC);
+
        atomic_notifier_call_chain(&panic_notifier_list, 0, buf);
 
        bust_spinlocks(0);
index 16b20e38c4a1e26e64db477f76bbbfcf1787b75b..b3c7fd5542500ab13940814211692bbe3b7ef8d3 100644 (file)
@@ -184,11 +184,31 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns)
        }
        read_unlock(&tasklist_lock);
 
+       /* Firstly reap the EXIT_ZOMBIE children we may have. */
        do {
                clear_thread_flag(TIF_SIGPENDING);
                rc = sys_wait4(-1, NULL, __WALL, NULL);
        } while (rc != -ECHILD);
 
+       /*
+        * sys_wait4() above can't reap the TASK_DEAD children.
+        * Make sure they all go away, see __unhash_process().
+        */
+       for (;;) {
+               bool need_wait = false;
+
+               read_lock(&tasklist_lock);
+               if (!list_empty(&current->children)) {
+                       __set_current_state(TASK_UNINTERRUPTIBLE);
+                       need_wait = true;
+               }
+               read_unlock(&tasklist_lock);
+
+               if (!need_wait)
+                       break;
+               schedule();
+       }
+
        if (pid_ns->reboot)
                current->signal->group_exit_code = pid_ns->reboot;
 
index 8b53db38a279e50a1fe3a8719752fe245201e7a0..238025f5472e42d745752f0c29d3534ab7b66285 100644 (file)
@@ -27,7 +27,6 @@
 #include <linux/syscore_ops.h>
 #include <linux/ctype.h>
 #include <linux/genhd.h>
-#include <scsi/scsi_scan.h>
 
 #include "power.h"
 
@@ -748,13 +747,6 @@ static int software_resume(void)
                        async_synchronize_full();
                }
 
-               /*
-                * We can't depend on SCSI devices being available after loading
-                * one of their modules until scsi_complete_async_scans() is
-                * called and the resume device usually is a SCSI one.
-                */
-               scsi_complete_async_scans();
-
                swsusp_resume_device = name_to_dev_t(resume_file);
                if (!swsusp_resume_device) {
                        error = -ENODEV;
index 91b0fd021a95d08eef106b3e0b86a41e3a135055..4ed81e74f86fbb2a9d7b0ab8e7d105d0944763c0 100644 (file)
@@ -24,7 +24,6 @@
 #include <linux/console.h>
 #include <linux/cpu.h>
 #include <linux/freezer.h>
-#include <scsi/scsi_scan.h>
 
 #include <asm/uaccess.h>
 
@@ -84,7 +83,6 @@ static int snapshot_open(struct inode *inode, struct file *filp)
                 * appear.
                 */
                wait_for_device_probe();
-               scsi_complete_async_scans();
 
                data->swap = -1;
                data->mode = O_WRONLY;
index 32462d2b364ae68280b7a8c773e2f25c119e5091..ac4bc9e79465ed6cf416d9694f8ddc463e2e77ab 100644 (file)
@@ -193,12 +193,21 @@ static int console_may_schedule;
  * separated by ',', and find the message after the ';' character.
  */
 
+enum log_flags {
+       LOG_NOCONS      = 1,    /* already flushed, do not print to console */
+       LOG_NEWLINE     = 2,    /* text ended with a newline */
+       LOG_PREFIX      = 4,    /* text started with a prefix */
+       LOG_CONT        = 8,    /* text is a fragment of a continuation line */
+};
+
 struct log {
        u64 ts_nsec;            /* timestamp in nanoseconds */
        u16 len;                /* length of entire record */
        u16 text_len;           /* length of text buffer */
        u16 dict_len;           /* length of dictionary buffer */
-       u16 level;              /* syslog level + facility */
+       u8 facility;            /* syslog facility */
+       u8 flags:5;             /* internal record flags */
+       u8 level:3;             /* syslog level */
 };
 
 /*
@@ -210,6 +219,8 @@ static DEFINE_RAW_SPINLOCK(logbuf_lock);
 /* the next printk record to read by syslog(READ) or /proc/kmsg */
 static u64 syslog_seq;
 static u32 syslog_idx;
+static enum log_flags syslog_prev;
+static size_t syslog_partial;
 
 /* index and sequence number of the first record stored in the buffer */
 static u64 log_first_seq;
@@ -227,10 +238,10 @@ static u32 clear_idx;
 #define LOG_LINE_MAX 1024
 
 /* record buffer */
-#if !defined(CONFIG_64BIT) || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
 #define LOG_ALIGN 4
 #else
-#define LOG_ALIGN 8
+#define LOG_ALIGN __alignof__(struct log)
 #endif
 #define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT)
 static char __log_buf[__LOG_BUF_LEN] __aligned(LOG_ALIGN);
@@ -286,6 +297,7 @@ static u32 log_next(u32 idx)
 
 /* insert record into the buffer, discard old ones, update heads */
 static void log_store(int facility, int level,
+                     enum log_flags flags, u64 ts_nsec,
                      const char *dict, u16 dict_len,
                      const char *text, u16 text_len)
 {
@@ -329,8 +341,13 @@ static void log_store(int facility, int level,
        msg->text_len = text_len;
        memcpy(log_dict(msg), dict, dict_len);
        msg->dict_len = dict_len;
-       msg->level = (facility << 3) | (level & 7);
-       msg->ts_nsec = local_clock();
+       msg->facility = facility;
+       msg->level = level & 7;
+       msg->flags = flags & 0x1f;
+       if (ts_nsec > 0)
+               msg->ts_nsec = ts_nsec;
+       else
+               msg->ts_nsec = local_clock();
        memset(log_dict(msg) + dict_len, 0, pad_len);
        msg->len = sizeof(struct log) + text_len + dict_len + pad_len;
 
@@ -414,21 +431,23 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf,
        if (!user)
                return -EBADF;
 
-       mutex_lock(&user->lock);
-       raw_spin_lock(&logbuf_lock);
+       ret = mutex_lock_interruptible(&user->lock);
+       if (ret)
+               return ret;
+       raw_spin_lock_irq(&logbuf_lock);
        while (user->seq == log_next_seq) {
                if (file->f_flags & O_NONBLOCK) {
                        ret = -EAGAIN;
-                       raw_spin_unlock(&logbuf_lock);
+                       raw_spin_unlock_irq(&logbuf_lock);
                        goto out;
                }
 
-               raw_spin_unlock(&logbuf_lock);
+               raw_spin_unlock_irq(&logbuf_lock);
                ret = wait_event_interruptible(log_wait,
                                               user->seq != log_next_seq);
                if (ret)
                        goto out;
-               raw_spin_lock(&logbuf_lock);
+               raw_spin_lock_irq(&logbuf_lock);
        }
 
        if (user->seq < log_first_seq) {
@@ -436,7 +455,7 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf,
                user->idx = log_first_idx;
                user->seq = log_first_seq;
                ret = -EPIPE;
-               raw_spin_unlock(&logbuf_lock);
+               raw_spin_unlock_irq(&logbuf_lock);
                goto out;
        }
 
@@ -444,13 +463,13 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf,
        ts_usec = msg->ts_nsec;
        do_div(ts_usec, 1000);
        len = sprintf(user->buf, "%u,%llu,%llu;",
-                     msg->level, user->seq, ts_usec);
+                     (msg->facility << 3) | msg->level, user->seq, ts_usec);
 
        /* escape non-printable characters */
        for (i = 0; i < msg->text_len; i++) {
                unsigned char c = log_text(msg)[i];
 
-               if (c < ' ' || c >= 128)
+               if (c < ' ' || c >= 127 || c == '\\')
                        len += sprintf(user->buf + len, "\\x%02x", c);
                else
                        user->buf[len++] = c;
@@ -474,7 +493,7 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf,
                                continue;
                        }
 
-                       if (c < ' ' || c >= 128) {
+                       if (c < ' ' || c >= 127 || c == '\\') {
                                len += sprintf(user->buf + len, "\\x%02x", c);
                                continue;
                        }
@@ -486,7 +505,7 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf,
 
        user->idx = log_next(user->idx);
        user->seq++;
-       raw_spin_unlock(&logbuf_lock);
+       raw_spin_unlock_irq(&logbuf_lock);
 
        if (len > count) {
                ret = -EINVAL;
@@ -513,7 +532,7 @@ static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence)
        if (offset)
                return -ESPIPE;
 
-       raw_spin_lock(&logbuf_lock);
+       raw_spin_lock_irq(&logbuf_lock);
        switch (whence) {
        case SEEK_SET:
                /* the first record */
@@ -537,7 +556,7 @@ static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence)
        default:
                ret = -EINVAL;
        }
-       raw_spin_unlock(&logbuf_lock);
+       raw_spin_unlock_irq(&logbuf_lock);
        return ret;
 }
 
@@ -551,14 +570,14 @@ static unsigned int devkmsg_poll(struct file *file, poll_table *wait)
 
        poll_wait(file, &log_wait, wait);
 
-       raw_spin_lock(&logbuf_lock);
+       raw_spin_lock_irq(&logbuf_lock);
        if (user->seq < log_next_seq) {
                /* return error when data has vanished underneath us */
                if (user->seq < log_first_seq)
                        ret = POLLIN|POLLRDNORM|POLLERR|POLLPRI;
                ret = POLLIN|POLLRDNORM;
        }
-       raw_spin_unlock(&logbuf_lock);
+       raw_spin_unlock_irq(&logbuf_lock);
 
        return ret;
 }
@@ -582,10 +601,10 @@ static int devkmsg_open(struct inode *inode, struct file *file)
 
        mutex_init(&user->lock);
 
-       raw_spin_lock(&logbuf_lock);
+       raw_spin_lock_irq(&logbuf_lock);
        user->idx = log_first_idx;
        user->seq = log_first_seq;
-       raw_spin_unlock(&logbuf_lock);
+       raw_spin_unlock_irq(&logbuf_lock);
 
        file->private_data = user;
        return 0;
@@ -785,44 +804,64 @@ static bool printk_time;
 #endif
 module_param_named(time, printk_time, bool, S_IRUGO | S_IWUSR);
 
+static size_t print_time(u64 ts, char *buf)
+{
+       unsigned long rem_nsec;
+
+       if (!printk_time)
+               return 0;
+
+       if (!buf)
+               return 15;
+
+       rem_nsec = do_div(ts, 1000000000);
+       return sprintf(buf, "[%5lu.%06lu] ",
+                      (unsigned long)ts, rem_nsec / 1000);
+}
+
 static size_t print_prefix(const struct log *msg, bool syslog, char *buf)
 {
        size_t len = 0;
+       unsigned int prefix = (msg->facility << 3) | msg->level;
 
        if (syslog) {
                if (buf) {
-                       len += sprintf(buf, "<%u>", msg->level);
+                       len += sprintf(buf, "<%u>", prefix);
                } else {
                        len += 3;
-                       if (msg->level > 9)
-                               len++;
-                       if (msg->level > 99)
+                       if (prefix > 999)
+                               len += 3;
+                       else if (prefix > 99)
+                               len += 2;
+                       else if (prefix > 9)
                                len++;
                }
        }
 
-       if (printk_time) {
-               if (buf) {
-                       unsigned long long ts = msg->ts_nsec;
-                       unsigned long rem_nsec = do_div(ts, 1000000000);
-
-                       len += sprintf(buf + len, "[%5lu.%06lu] ",
-                                        (unsigned long) ts, rem_nsec / 1000);
-               } else {
-                       len += 15;
-               }
-       }
-
+       len += print_time(msg->ts_nsec, buf ? buf + len : NULL);
        return len;
 }
 
-static size_t msg_print_text(const struct log *msg, bool syslog,
-                            char *buf, size_t size)
+static size_t msg_print_text(const struct log *msg, enum log_flags prev,
+                            bool syslog, char *buf, size_t size)
 {
        const char *text = log_text(msg);
        size_t text_size = msg->text_len;
+       bool prefix = true;
+       bool newline = true;
        size_t len = 0;
 
+       if ((prev & LOG_CONT) && !(msg->flags & LOG_PREFIX))
+               prefix = false;
+
+       if (msg->flags & LOG_CONT) {
+               if ((prev & LOG_CONT) && !(prev & LOG_NEWLINE))
+                       prefix = false;
+
+               if (!(msg->flags & LOG_NEWLINE))
+                       newline = false;
+       }
+
        do {
                const char *next = memchr(text, '\n', text_size);
                size_t text_len;
@@ -840,16 +879,22 @@ static size_t msg_print_text(const struct log *msg, bool syslog,
                            text_len + 1>= size - len)
                                break;
 
-                       len += print_prefix(msg, syslog, buf + len);
+                       if (prefix)
+                               len += print_prefix(msg, syslog, buf + len);
                        memcpy(buf + len, text, text_len);
                        len += text_len;
-                       buf[len++] = '\n';
+                       if (next || newline)
+                               buf[len++] = '\n';
                } else {
                        /* SYSLOG_ACTION_* buffer size only calculation */
-                       len += print_prefix(msg, syslog, NULL);
-                       len += text_len + 1;
+                       if (prefix)
+                               len += print_prefix(msg, syslog, NULL);
+                       len += text_len;
+                       if (next || newline)
+                               len++;
                }
 
+               prefix = true;
                text = next;
        } while (text);
 
@@ -860,26 +905,60 @@ static int syslog_print(char __user *buf, int size)
 {
        char *text;
        struct log *msg;
-       int len;
+       int len = 0;
 
        text = kmalloc(LOG_LINE_MAX, GFP_KERNEL);
        if (!text)
                return -ENOMEM;
 
-       raw_spin_lock_irq(&logbuf_lock);
-       if (syslog_seq < log_first_seq) {
-               /* messages are gone, move to first one */
-               syslog_seq = log_first_seq;
-               syslog_idx = log_first_idx;
-       }
-       msg = log_from_idx(syslog_idx);
-       len = msg_print_text(msg, true, text, LOG_LINE_MAX);
-       syslog_idx = log_next(syslog_idx);
-       syslog_seq++;
-       raw_spin_unlock_irq(&logbuf_lock);
+       while (size > 0) {
+               size_t n;
+               size_t skip;
 
-       if (len > 0 && copy_to_user(buf, text, len))
-               len = -EFAULT;
+               raw_spin_lock_irq(&logbuf_lock);
+               if (syslog_seq < log_first_seq) {
+                       /* messages are gone, move to first one */
+                       syslog_seq = log_first_seq;
+                       syslog_idx = log_first_idx;
+                       syslog_prev = 0;
+                       syslog_partial = 0;
+               }
+               if (syslog_seq == log_next_seq) {
+                       raw_spin_unlock_irq(&logbuf_lock);
+                       break;
+               }
+
+               skip = syslog_partial;
+               msg = log_from_idx(syslog_idx);
+               n = msg_print_text(msg, syslog_prev, true, text, LOG_LINE_MAX);
+               if (n - syslog_partial <= size) {
+                       /* message fits into buffer, move forward */
+                       syslog_idx = log_next(syslog_idx);
+                       syslog_seq++;
+                       syslog_prev = msg->flags;
+                       n -= syslog_partial;
+                       syslog_partial = 0;
+               } else if (!len){
+                       /* partial read(), remember position */
+                       n = size;
+                       syslog_partial += n;
+               } else
+                       n = 0;
+               raw_spin_unlock_irq(&logbuf_lock);
+
+               if (!n)
+                       break;
+
+               if (copy_to_user(buf, text + skip, n)) {
+                       if (!len)
+                               len = -EFAULT;
+                       break;
+               }
+
+               len += n;
+               size -= n;
+               buf += n;
+       }
 
        kfree(text);
        return len;
@@ -899,6 +978,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
                u64 next_seq;
                u64 seq;
                u32 idx;
+               enum log_flags prev;
 
                if (clear_seq < log_first_seq) {
                        /* messages are gone, move to first available one */
@@ -909,41 +989,47 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
                /*
                 * Find first record that fits, including all following records,
                 * into the user-provided buffer for this dump.
-               */
+                */
                seq = clear_seq;
                idx = clear_idx;
+               prev = 0;
                while (seq < log_next_seq) {
                        struct log *msg = log_from_idx(idx);
 
-                       len += msg_print_text(msg, true, NULL, 0);
+                       len += msg_print_text(msg, prev, true, NULL, 0);
                        idx = log_next(idx);
                        seq++;
                }
+
+               /* move first record forward until length fits into the buffer */
                seq = clear_seq;
                idx = clear_idx;
+               prev = 0;
                while (len > size && seq < log_next_seq) {
                        struct log *msg = log_from_idx(idx);
 
-                       len -= msg_print_text(msg, true, NULL, 0);
+                       len -= msg_print_text(msg, prev, true, NULL, 0);
                        idx = log_next(idx);
                        seq++;
                }
 
-               /* last message in this dump */
+               /* last message fitting into this dump */
                next_seq = log_next_seq;
 
                len = 0;
+               prev = 0;
                while (len >= 0 && seq < next_seq) {
                        struct log *msg = log_from_idx(idx);
                        int textlen;
 
-                       textlen = msg_print_text(msg, true, text, LOG_LINE_MAX);
+                       textlen = msg_print_text(msg, prev, true, text, LOG_LINE_MAX);
                        if (textlen < 0) {
                                len = textlen;
                                break;
                        }
                        idx = log_next(idx);
                        seq++;
+                       prev = msg->flags;
 
                        raw_spin_unlock_irq(&logbuf_lock);
                        if (copy_to_user(buf + len, text, textlen))
@@ -956,6 +1042,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
                                /* messages are gone, move to next one */
                                seq = log_first_seq;
                                idx = log_first_idx;
+                               prev = 0;
                        }
                }
        }
@@ -1027,6 +1114,7 @@ int do_syslog(int type, char __user *buf, int len, bool from_file)
        /* Clear ring buffer */
        case SYSLOG_ACTION_CLEAR:
                syslog_print_all(NULL, 0, true);
+               break;
        /* Disable logging to console */
        case SYSLOG_ACTION_CONSOLE_OFF:
                if (saved_console_loglevel == -1)
@@ -1059,6 +1147,8 @@ int do_syslog(int type, char __user *buf, int len, bool from_file)
                        /* messages are gone, move to first one */
                        syslog_seq = log_first_seq;
                        syslog_idx = log_first_idx;
+                       syslog_prev = 0;
+                       syslog_partial = 0;
                }
                if (from_file) {
                        /*
@@ -1068,19 +1158,20 @@ int do_syslog(int type, char __user *buf, int len, bool from_file)
                         */
                        error = log_next_idx - syslog_idx;
                } else {
-                       u64 seq;
-                       u32 idx;
+                       u64 seq = syslog_seq;
+                       u32 idx = syslog_idx;
+                       enum log_flags prev = syslog_prev;
 
                        error = 0;
-                       seq = syslog_seq;
-                       idx = syslog_idx;
                        while (seq < log_next_seq) {
                                struct log *msg = log_from_idx(idx);
 
-                               error += msg_print_text(msg, true, NULL, 0);
+                               error += msg_print_text(msg, prev, true, NULL, 0);
                                idx = log_next(idx);
                                seq++;
+                               prev = msg->flags;
                        }
+                       error -= syslog_partial;
                }
                raw_spin_unlock_irq(&logbuf_lock);
                break;
@@ -1101,21 +1192,6 @@ SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len)
        return do_syslog(type, buf, len, SYSLOG_FROM_CALL);
 }
 
-#ifdef CONFIG_KGDB_KDB
-/* kdb dmesg command needs access to the syslog buffer.  do_syslog()
- * uses locks so it cannot be used during debugging.  Just tell kdb
- * where the start and end of the physical and logical logs are.  This
- * is equivalent to do_syslog(3).
- */
-void kdb_syslog_data(char *syslog_data[4])
-{
-       syslog_data[0] = log_buf;
-       syslog_data[1] = log_buf + log_buf_len;
-       syslog_data[2] = log_buf + log_first_idx;
-       syslog_data[3] = log_buf + log_next_idx;
-}
-#endif /* CONFIG_KGDB_KDB */
-
 static bool __read_mostly ignore_loglevel;
 
 static int __init ignore_loglevel_setup(char *str)
@@ -1259,22 +1335,98 @@ static inline void printk_delay(void)
        }
 }
 
+/*
+ * Continuation lines are buffered, and not committed to the record buffer
+ * until the line is complete, or a race forces it. The line fragments
+ * though, are printed immediately to the consoles to ensure everything has
+ * reached the console in case of a kernel crash.
+ */
+static struct cont {
+       char buf[LOG_LINE_MAX];
+       size_t len;                     /* length == 0 means unused buffer */
+       size_t cons;                    /* bytes written to console */
+       struct task_struct *owner;      /* task of first print*/
+       u64 ts_nsec;                    /* time of first print */
+       u8 level;                       /* log level of first message */
+       u8 facility;                    /* log level of first message */
+       bool flushed:1;                 /* buffer sealed and committed */
+} cont;
+
+static void cont_flush(void)
+{
+       if (cont.flushed)
+               return;
+       if (cont.len == 0)
+               return;
+
+       log_store(cont.facility, cont.level, LOG_NOCONS, cont.ts_nsec,
+                 NULL, 0, cont.buf, cont.len);
+
+       cont.flushed = true;
+}
+
+static bool cont_add(int facility, int level, const char *text, size_t len)
+{
+       if (cont.len && cont.flushed)
+               return false;
+
+       if (cont.len + len > sizeof(cont.buf)) {
+               cont_flush();
+               return false;
+       }
+
+       if (!cont.len) {
+               cont.facility = facility;
+               cont.level = level;
+               cont.owner = current;
+               cont.ts_nsec = local_clock();
+               cont.cons = 0;
+               cont.flushed = false;
+       }
+
+       memcpy(cont.buf + cont.len, text, len);
+       cont.len += len;
+       return true;
+}
+
+static size_t cont_print_text(char *text, size_t size)
+{
+       size_t textlen = 0;
+       size_t len;
+
+       if (cont.cons == 0) {
+               textlen += print_time(cont.ts_nsec, text);
+               size -= textlen;
+       }
+
+       len = cont.len - cont.cons;
+       if (len > 0) {
+               if (len+1 > size)
+                       len = size-1;
+               memcpy(text + textlen, cont.buf + cont.cons, len);
+               textlen += len;
+               cont.cons = cont.len;
+       }
+
+       if (cont.flushed) {
+               text[textlen++] = '\n';
+               /* got everything, release buffer */
+               cont.len = 0;
+       }
+       return textlen;
+}
+
 asmlinkage int vprintk_emit(int facility, int level,
                            const char *dict, size_t dictlen,
                            const char *fmt, va_list args)
 {
        static int recursion_bug;
-       static char cont_buf[LOG_LINE_MAX];
-       static size_t cont_len;
-       static int cont_level;
-       static struct task_struct *cont_task;
        static char textbuf[LOG_LINE_MAX];
        char *text = textbuf;
        size_t text_len;
+       enum log_flags lflags = 0;
        unsigned long flags;
        int this_cpu;
-       bool newline = false;
-       bool prefix = false;
        int printed_len = 0;
 
        boot_delay_msec();
@@ -1313,7 +1465,8 @@ asmlinkage int vprintk_emit(int facility, int level,
                recursion_bug = 0;
                printed_len += strlen(recursion_msg);
                /* emit KERN_CRIT message */
-               log_store(0, 2, NULL, 0, recursion_msg, printed_len);
+               log_store(0, 2, LOG_PREFIX|LOG_NEWLINE, 0,
+                         NULL, 0, recursion_msg, printed_len);
        }
 
        /*
@@ -1325,7 +1478,7 @@ asmlinkage int vprintk_emit(int facility, int level,
        /* mark and strip a trailing newline */
        if (text_len && text[text_len-1] == '\n') {
                text_len--;
-               newline = true;
+               lflags |= LOG_NEWLINE;
        }
 
        /* strip syslog prefix and extract log level or control flags */
@@ -1335,7 +1488,7 @@ asmlinkage int vprintk_emit(int facility, int level,
                        if (level == -1)
                                level = text[1] - '0';
                case 'd':       /* KERN_DEFAULT */
-                       prefix = true;
+                       lflags |= LOG_PREFIX;
                case 'c':       /* KERN_CONT */
                        text += 3;
                        text_len -= 3;
@@ -1345,61 +1498,41 @@ asmlinkage int vprintk_emit(int facility, int level,
        if (level == -1)
                level = default_message_loglevel;
 
-       if (dict) {
-               prefix = true;
-               newline = true;
-       }
-
-       if (!newline) {
-               if (cont_len && (prefix || cont_task != current)) {
-                       /*
-                        * Flush earlier buffer, which is either from a
-                        * different thread, or when we got a new prefix.
-                        */
-                       log_store(facility, cont_level, NULL, 0, cont_buf, cont_len);
-                       cont_len = 0;
-               }
+       if (dict)
+               lflags |= LOG_PREFIX|LOG_NEWLINE;
 
-               if (!cont_len) {
-                       cont_level = level;
-                       cont_task = current;
-               }
+       if (!(lflags & LOG_NEWLINE)) {
+               /*
+                * Flush the conflicting buffer. An earlier newline was missing,
+                * or another task also prints continuation lines.
+                */
+               if (cont.len && (lflags & LOG_PREFIX || cont.owner != current))
+                       cont_flush();
 
-               /* buffer or append to earlier buffer from the same thread */
-               if (cont_len + text_len > sizeof(cont_buf))
-                       text_len = sizeof(cont_buf) - cont_len;
-               memcpy(cont_buf + cont_len, text, text_len);
-               cont_len += text_len;
+               /* buffer line if possible, otherwise store it right away */
+               if (!cont_add(facility, level, text, text_len))
+                       log_store(facility, level, lflags | LOG_CONT, 0,
+                                 dict, dictlen, text, text_len);
        } else {
-               if (cont_len && cont_task == current) {
-                       if (prefix) {
-                               /*
-                                * New prefix from the same thread; flush. We
-                                * either got no earlier newline, or we race
-                                * with an interrupt.
-                                */
-                               log_store(facility, cont_level,
-                                         NULL, 0, cont_buf, cont_len);
-                               cont_len = 0;
-                       }
+               bool stored = false;
 
-                       /* append to the earlier buffer and flush */
-                       if (cont_len + text_len > sizeof(cont_buf))
-                               text_len = sizeof(cont_buf) - cont_len;
-                       memcpy(cont_buf + cont_len, text, text_len);
-                       cont_len += text_len;
-                       log_store(facility, cont_level,
-                                 NULL, 0, cont_buf, cont_len);
-                       cont_len = 0;
-                       cont_task = NULL;
-                       printed_len = cont_len;
-               } else {
-                       /* ordinary single and terminated line */
-                       log_store(facility, level,
-                                 dict, dictlen, text, text_len);
-                       printed_len = text_len;
+               /*
+                * If an earlier newline was missing and it was the same task,
+                * either merge it with the current buffer and flush, or if
+                * there was a race with interrupts (prefix == true) then just
+                * flush it out and store this line separately.
+                */
+               if (cont.len && cont.owner == current) {
+                       if (!(lflags & LOG_PREFIX))
+                               stored = cont_add(facility, level, text, text_len);
+                       cont_flush();
                }
+
+               if (!stored)
+                       log_store(facility, level, lflags, 0,
+                                 dict, dictlen, text, text_len);
        }
+       printed_len += text_len;
 
        /*
         * Try to acquire and then immediately release the console semaphore.
@@ -1486,11 +1619,18 @@ EXPORT_SYMBOL(printk);
 #else
 
 #define LOG_LINE_MAX 0
+static struct cont {
+       size_t len;
+       size_t cons;
+       u8 level;
+       bool flushed:1;
+} cont;
 static struct log *log_from_idx(u32 idx) { return NULL; }
 static u32 log_next(u32 idx) { return 0; }
 static void call_console_drivers(int level, const char *text, size_t len) {}
-static size_t msg_print_text(const struct log *msg, bool syslog,
-                            char *buf, size_t size) { return 0; }
+static size_t msg_print_text(const struct log *msg, enum log_flags prev,
+                            bool syslog, char *buf, size_t size) { return 0; }
+static size_t cont_print_text(char *text, size_t size) { return 0; }
 
 #endif /* CONFIG_PRINTK */
 
@@ -1765,6 +1905,7 @@ void wake_up_klogd(void)
 /* the next printk record to write to the console */
 static u64 console_seq;
 static u32 console_idx;
+static enum log_flags console_prev;
 
 /**
  * console_unlock - unlock the console system
@@ -1782,6 +1923,7 @@ static u32 console_idx;
  */
 void console_unlock(void)
 {
+       static char text[LOG_LINE_MAX];
        static u64 seen_seq;
        unsigned long flags;
        bool wake_klogd = false;
@@ -1794,10 +1936,23 @@ void console_unlock(void)
 
        console_may_schedule = 0;
 
+       /* flush buffered message fragment immediately to console */
+       raw_spin_lock_irqsave(&logbuf_lock, flags);
+       if (cont.len && (cont.cons < cont.len || cont.flushed)) {
+               size_t len;
+
+               len = cont_print_text(text, sizeof(text));
+               raw_spin_unlock(&logbuf_lock);
+               stop_critical_timings();
+               call_console_drivers(cont.level, text, len);
+               start_critical_timings();
+               local_irq_restore(flags);
+       } else
+               raw_spin_unlock_irqrestore(&logbuf_lock, flags);
+
 again:
        for (;;) {
                struct log *msg;
-               static char text[LOG_LINE_MAX];
                size_t len;
                int level;
 
@@ -1811,18 +1966,35 @@ again:
                        /* messages are gone, move to first one */
                        console_seq = log_first_seq;
                        console_idx = log_first_idx;
+                       console_prev = 0;
                }
-
+skip:
                if (console_seq == log_next_seq)
                        break;
 
                msg = log_from_idx(console_idx);
-               level = msg->level & 7;
-
-               len = msg_print_text(msg, false, text, sizeof(text));
+               if (msg->flags & LOG_NOCONS) {
+                       /*
+                        * Skip record we have buffered and already printed
+                        * directly to the console when we received it.
+                        */
+                       console_idx = log_next(console_idx);
+                       console_seq++;
+                       /*
+                        * We will get here again when we register a new
+                        * CON_PRINTBUFFER console. Clear the flag so we
+                        * will properly dump everything later.
+                        */
+                       msg->flags &= ~LOG_NOCONS;
+                       goto skip;
+               }
 
+               level = msg->level;
+               len = msg_print_text(msg, console_prev, false,
+                                    text, sizeof(text));
                console_idx = log_next(console_idx);
                console_seq++;
+               console_prev = msg->flags;
                raw_spin_unlock(&logbuf_lock);
 
                stop_critical_timings();        /* don't trace print latency */
@@ -2085,6 +2257,7 @@ void register_console(struct console *newcon)
                raw_spin_lock_irqsave(&logbuf_lock, flags);
                console_seq = syslog_seq;
                console_idx = syslog_idx;
+               console_prev = syslog_prev;
                raw_spin_unlock_irqrestore(&logbuf_lock, flags);
                /*
                 * We're about to replay the log buffer.  Only do this to the
@@ -2300,48 +2473,256 @@ module_param_named(always_kmsg_dump, always_kmsg_dump, bool, S_IRUGO | S_IWUSR);
  * kmsg_dump - dump kernel log to kernel message dumpers.
  * @reason: the reason (oops, panic etc) for dumping
  *
- * Iterate through each of the dump devices and call the oops/panic
- * callbacks with the log buffer.
+ * Call each of the registered dumper's dump() callback, which can
+ * retrieve the kmsg records with kmsg_dump_get_line() or
+ * kmsg_dump_get_buffer().
  */
 void kmsg_dump(enum kmsg_dump_reason reason)
 {
-       u64 idx;
        struct kmsg_dumper *dumper;
-       const char *s1, *s2;
-       unsigned long l1, l2;
        unsigned long flags;
 
        if ((reason > KMSG_DUMP_OOPS) && !always_kmsg_dump)
                return;
 
-       /* Theoretically, the log could move on after we do this, but
-          there's not a lot we can do about that. The new messages
-          will overwrite the start of what we dump. */
+       rcu_read_lock();
+       list_for_each_entry_rcu(dumper, &dump_list, list) {
+               if (dumper->max_reason && reason > dumper->max_reason)
+                       continue;
+
+               /* initialize iterator with data about the stored records */
+               dumper->active = true;
+
+               raw_spin_lock_irqsave(&logbuf_lock, flags);
+               dumper->cur_seq = clear_seq;
+               dumper->cur_idx = clear_idx;
+               dumper->next_seq = log_next_seq;
+               dumper->next_idx = log_next_idx;
+               raw_spin_unlock_irqrestore(&logbuf_lock, flags);
+
+               /* invoke dumper which will iterate over records */
+               dumper->dump(dumper, reason);
+
+               /* reset iterator */
+               dumper->active = false;
+       }
+       rcu_read_unlock();
+}
+
+/**
+ * kmsg_dump_get_line_nolock - retrieve one kmsg log line (unlocked version)
+ * @dumper: registered kmsg dumper
+ * @syslog: include the "<4>" prefixes
+ * @line: buffer to copy the line to
+ * @size: maximum size of the buffer
+ * @len: length of line placed into buffer
+ *
+ * Start at the beginning of the kmsg buffer, with the oldest kmsg
+ * record, and copy one record into the provided buffer.
+ *
+ * Consecutive calls will return the next available record moving
+ * towards the end of the buffer with the youngest messages.
+ *
+ * A return value of FALSE indicates that there are no more records to
+ * read.
+ *
+ * The function is similar to kmsg_dump_get_line(), but grabs no locks.
+ */
+bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper, bool syslog,
+                              char *line, size_t size, size_t *len)
+{
+       struct log *msg;
+       size_t l = 0;
+       bool ret = false;
+
+       if (!dumper->active)
+               goto out;
+
+       if (dumper->cur_seq < log_first_seq) {
+               /* messages are gone, move to first available one */
+               dumper->cur_seq = log_first_seq;
+               dumper->cur_idx = log_first_idx;
+       }
+
+       /* last entry */
+       if (dumper->cur_seq >= log_next_seq)
+               goto out;
+
+       msg = log_from_idx(dumper->cur_idx);
+       l = msg_print_text(msg, 0, syslog, line, size);
+
+       dumper->cur_idx = log_next(dumper->cur_idx);
+       dumper->cur_seq++;
+       ret = true;
+out:
+       if (len)
+               *len = l;
+       return ret;
+}
+
+/**
+ * kmsg_dump_get_line - retrieve one kmsg log line
+ * @dumper: registered kmsg dumper
+ * @syslog: include the "<4>" prefixes
+ * @line: buffer to copy the line to
+ * @size: maximum size of the buffer
+ * @len: length of line placed into buffer
+ *
+ * Start at the beginning of the kmsg buffer, with the oldest kmsg
+ * record, and copy one record into the provided buffer.
+ *
+ * Consecutive calls will return the next available record moving
+ * towards the end of the buffer with the youngest messages.
+ *
+ * A return value of FALSE indicates that there are no more records to
+ * read.
+ */
+bool kmsg_dump_get_line(struct kmsg_dumper *dumper, bool syslog,
+                       char *line, size_t size, size_t *len)
+{
+       unsigned long flags;
+       bool ret;
+
+       raw_spin_lock_irqsave(&logbuf_lock, flags);
+       ret = kmsg_dump_get_line_nolock(dumper, syslog, line, size, len);
+       raw_spin_unlock_irqrestore(&logbuf_lock, flags);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(kmsg_dump_get_line);
+
+/**
+ * kmsg_dump_get_buffer - copy kmsg log lines
+ * @dumper: registered kmsg dumper
+ * @syslog: include the "<4>" prefixes
+ * @buf: buffer to copy the line to
+ * @size: maximum size of the buffer
+ * @len: length of line placed into buffer
+ *
+ * Start at the end of the kmsg buffer and fill the provided buffer
+ * with as many of the the *youngest* kmsg records that fit into it.
+ * If the buffer is large enough, all available kmsg records will be
+ * copied with a single call.
+ *
+ * Consecutive calls will fill the buffer with the next block of
+ * available older records, not including the earlier retrieved ones.
+ *
+ * A return value of FALSE indicates that there are no more records to
+ * read.
+ */
+bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
+                         char *buf, size_t size, size_t *len)
+{
+       unsigned long flags;
+       u64 seq;
+       u32 idx;
+       u64 next_seq;
+       u32 next_idx;
+       enum log_flags prev;
+       size_t l = 0;
+       bool ret = false;
+
+       if (!dumper->active)
+               goto out;
 
        raw_spin_lock_irqsave(&logbuf_lock, flags);
-       if (syslog_seq < log_first_seq)
-               idx = syslog_idx;
-       else
-               idx = log_first_idx;
+       if (dumper->cur_seq < log_first_seq) {
+               /* messages are gone, move to first available one */
+               dumper->cur_seq = log_first_seq;
+               dumper->cur_idx = log_first_idx;
+       }
+
+       /* last entry */
+       if (dumper->cur_seq >= dumper->next_seq) {
+               raw_spin_unlock_irqrestore(&logbuf_lock, flags);
+               goto out;
+       }
 
-       if (idx > log_next_idx) {
-               s1 = log_buf;
-               l1 = log_next_idx;
+       /* calculate length of entire buffer */
+       seq = dumper->cur_seq;
+       idx = dumper->cur_idx;
+       prev = 0;
+       while (seq < dumper->next_seq) {
+               struct log *msg = log_from_idx(idx);
+
+               l += msg_print_text(msg, prev, true, NULL, 0);
+               idx = log_next(idx);
+               seq++;
+               prev = msg->flags;
+       }
 
-               s2 = log_buf + idx;
-               l2 = log_buf_len - idx;
-       } else {
-               s1 = "";
-               l1 = 0;
+       /* move first record forward until length fits into the buffer */
+       seq = dumper->cur_seq;
+       idx = dumper->cur_idx;
+       prev = 0;
+       while (l > size && seq < dumper->next_seq) {
+               struct log *msg = log_from_idx(idx);
+
+               l -= msg_print_text(msg, prev, true, NULL, 0);
+               idx = log_next(idx);
+               seq++;
+               prev = msg->flags;
+       }
+
+       /* last message in next interation */
+       next_seq = seq;
+       next_idx = idx;
+
+       l = 0;
+       prev = 0;
+       while (seq < dumper->next_seq) {
+               struct log *msg = log_from_idx(idx);
 
-               s2 = log_buf + idx;
-               l2 = log_next_idx - idx;
+               l += msg_print_text(msg, prev, syslog, buf + l, size - l);
+               idx = log_next(idx);
+               seq++;
+               prev = msg->flags;
        }
+
+       dumper->next_seq = next_seq;
+       dumper->next_idx = next_idx;
+       ret = true;
        raw_spin_unlock_irqrestore(&logbuf_lock, flags);
+out:
+       if (len)
+               *len = l;
+       return ret;
+}
+EXPORT_SYMBOL_GPL(kmsg_dump_get_buffer);
 
-       rcu_read_lock();
-       list_for_each_entry_rcu(dumper, &dump_list, list)
-               dumper->dump(dumper, reason, s1, l1, s2, l2);
-       rcu_read_unlock();
+/**
+ * kmsg_dump_rewind_nolock - reset the interator (unlocked version)
+ * @dumper: registered kmsg dumper
+ *
+ * Reset the dumper's iterator so that kmsg_dump_get_line() and
+ * kmsg_dump_get_buffer() can be called again and used multiple
+ * times within the same dumper.dump() callback.
+ *
+ * The function is similar to kmsg_dump_rewind(), but grabs no locks.
+ */
+void kmsg_dump_rewind_nolock(struct kmsg_dumper *dumper)
+{
+       dumper->cur_seq = clear_seq;
+       dumper->cur_idx = clear_idx;
+       dumper->next_seq = log_next_seq;
+       dumper->next_idx = log_next_idx;
+}
+
+/**
+ * kmsg_dump_rewind - reset the interator
+ * @dumper: registered kmsg dumper
+ *
+ * Reset the dumper's iterator so that kmsg_dump_get_line() and
+ * kmsg_dump_get_buffer() can be called again and used multiple
+ * times within the same dumper.dump() callback.
+ */
+void kmsg_dump_rewind(struct kmsg_dumper *dumper)
+{
+       unsigned long flags;
+
+       raw_spin_lock_irqsave(&logbuf_lock, flags);
+       kmsg_dump_rewind_nolock(dumper);
+       raw_spin_unlock_irqrestore(&logbuf_lock, flags);
 }
+EXPORT_SYMBOL_GPL(kmsg_dump_rewind);
 #endif
index 0da7b88d92d0a599242e8dcf70ab566d3e23b687..4b97bba7396e6194db4bf5b644ef8b5a1997c974 100644 (file)
@@ -201,6 +201,7 @@ void rcu_note_context_switch(int cpu)
 {
        trace_rcu_utilization("Start context switch");
        rcu_sched_qs(cpu);
+       rcu_preempt_note_context_switch(cpu);
        trace_rcu_utilization("End context switch");
 }
 EXPORT_SYMBOL_GPL(rcu_note_context_switch);
@@ -1397,6 +1398,8 @@ static void rcu_adopt_orphan_cbs(struct rcu_state *rsp)
        rdp->qlen_lazy += rsp->qlen_lazy;
        rdp->qlen += rsp->qlen;
        rdp->n_cbs_adopted += rsp->qlen;
+       if (rsp->qlen_lazy != rsp->qlen)
+               rcu_idle_count_callbacks_posted();
        rsp->qlen_lazy = 0;
        rsp->qlen = 0;
 
@@ -1528,7 +1531,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
 {
        unsigned long flags;
        struct rcu_head *next, *list, **tail;
-       int bl, count, count_lazy;
+       int bl, count, count_lazy, i;
 
        /* If no callbacks are ready, just return.*/
        if (!cpu_has_callbacks_ready_to_invoke(rdp)) {
@@ -1551,9 +1554,9 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
        rdp->nxtlist = *rdp->nxttail[RCU_DONE_TAIL];
        *rdp->nxttail[RCU_DONE_TAIL] = NULL;
        tail = rdp->nxttail[RCU_DONE_TAIL];
-       for (count = RCU_NEXT_SIZE - 1; count >= 0; count--)
-               if (rdp->nxttail[count] == rdp->nxttail[RCU_DONE_TAIL])
-                       rdp->nxttail[count] = &rdp->nxtlist;
+       for (i = RCU_NEXT_SIZE - 1; i >= 0; i--)
+               if (rdp->nxttail[i] == rdp->nxttail[RCU_DONE_TAIL])
+                       rdp->nxttail[i] = &rdp->nxtlist;
        local_irq_restore(flags);
 
        /* Invoke callbacks. */
@@ -1581,9 +1584,9 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
        if (list != NULL) {
                *tail = rdp->nxtlist;
                rdp->nxtlist = list;
-               for (count = 0; count < RCU_NEXT_SIZE; count++)
-                       if (&rdp->nxtlist == rdp->nxttail[count])
-                               rdp->nxttail[count] = tail;
+               for (i = 0; i < RCU_NEXT_SIZE; i++)
+                       if (&rdp->nxtlist == rdp->nxttail[i])
+                               rdp->nxttail[i] = tail;
                        else
                                break;
        }
index 7f5d138dedf55c94c7f9a8f6ba9953b90f6b146b..19b61ac1079f825702a7f17ae7d8dec88756c141 100644 (file)
@@ -84,6 +84,20 @@ struct rcu_dynticks {
                                    /* Process level is worth LLONG_MAX/2. */
        int dynticks_nmi_nesting;   /* Track NMI nesting level. */
        atomic_t dynticks;          /* Even value for idle, else odd. */
+#ifdef CONFIG_RCU_FAST_NO_HZ
+       int dyntick_drain;          /* Prepare-for-idle state variable. */
+       unsigned long dyntick_holdoff;
+                                   /* No retries for the jiffy of failure. */
+       struct timer_list idle_gp_timer;
+                                   /* Wake up CPU sleeping with callbacks. */
+       unsigned long idle_gp_timer_expires;
+                                   /* When to wake up CPU (for repost). */
+       bool idle_first_pass;       /* First pass of attempt to go idle? */
+       unsigned long nonlazy_posted;
+                                   /* # times non-lazy CBs posted to CPU. */
+       unsigned long nonlazy_posted_snap;
+                                   /* idle-period nonlazy_posted snapshot. */
+#endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */
 };
 
 /* RCU's kthread states for tracing. */
@@ -430,6 +444,7 @@ DECLARE_PER_CPU(char, rcu_cpu_has_work);
 /* Forward declarations for rcutree_plugin.h */
 static void rcu_bootup_announce(void);
 long rcu_batches_completed(void);
+static void rcu_preempt_note_context_switch(int cpu);
 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp);
 #ifdef CONFIG_HOTPLUG_CPU
 static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp,
index 2411000d98690aacd76d20acc039964402e83388..3e4899459f3d9d7177ee67d41b8ea1121ddb9540 100644 (file)
@@ -153,7 +153,7 @@ static void rcu_preempt_qs(int cpu)
  *
  * Caller must disable preemption.
  */
-void rcu_preempt_note_context_switch(void)
+static void rcu_preempt_note_context_switch(int cpu)
 {
        struct task_struct *t = current;
        unsigned long flags;
@@ -164,7 +164,7 @@ void rcu_preempt_note_context_switch(void)
            (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
 
                /* Possibly blocking in an RCU read-side critical section. */
-               rdp = __this_cpu_ptr(rcu_preempt_state.rda);
+               rdp = per_cpu_ptr(rcu_preempt_state.rda, cpu);
                rnp = rdp->mynode;
                raw_spin_lock_irqsave(&rnp->lock, flags);
                t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
@@ -228,7 +228,7 @@ void rcu_preempt_note_context_switch(void)
         * means that we continue to block the current grace period.
         */
        local_irq_save(flags);
-       rcu_preempt_qs(smp_processor_id());
+       rcu_preempt_qs(cpu);
        local_irq_restore(flags);
 }
 
@@ -1001,6 +1001,14 @@ void rcu_force_quiescent_state(void)
 }
 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
 
+/*
+ * Because preemptible RCU does not exist, we never have to check for
+ * CPUs being in quiescent states.
+ */
+static void rcu_preempt_note_context_switch(int cpu)
+{
+}
+
 /*
  * Because preemptible RCU does not exist, there are never any preempted
  * RCU readers.
@@ -1886,8 +1894,9 @@ static void __cpuinit rcu_prepare_kthreads(int cpu)
  * Because we not have RCU_FAST_NO_HZ, just check whether this CPU needs
  * any flavor of RCU.
  */
-int rcu_needs_cpu(int cpu)
+int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
 {
+       *delta_jiffies = ULONG_MAX;
        return rcu_cpu_has_callbacks(cpu);
 }
 
@@ -1962,41 +1971,6 @@ static void rcu_idle_count_callbacks_posted(void)
 #define RCU_IDLE_GP_DELAY 6            /* Roughly one grace period. */
 #define RCU_IDLE_LAZY_GP_DELAY (6 * HZ)        /* Roughly six seconds. */
 
-/* Loop counter for rcu_prepare_for_idle(). */
-static DEFINE_PER_CPU(int, rcu_dyntick_drain);
-/* If rcu_dyntick_holdoff==jiffies, don't try to enter dyntick-idle mode. */
-static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff);
-/* Timer to awaken the CPU if it enters dyntick-idle mode with callbacks. */
-static DEFINE_PER_CPU(struct timer_list, rcu_idle_gp_timer);
-/* Scheduled expiry time for rcu_idle_gp_timer to allow reposting. */
-static DEFINE_PER_CPU(unsigned long, rcu_idle_gp_timer_expires);
-/* Enable special processing on first attempt to enter dyntick-idle mode. */
-static DEFINE_PER_CPU(bool, rcu_idle_first_pass);
-/* Running count of non-lazy callbacks posted, never decremented. */
-static DEFINE_PER_CPU(unsigned long, rcu_nonlazy_posted);
-/* Snapshot of rcu_nonlazy_posted to detect meaningful exits from idle. */
-static DEFINE_PER_CPU(unsigned long, rcu_nonlazy_posted_snap);
-
-/*
- * Allow the CPU to enter dyntick-idle mode if either: (1) There are no
- * callbacks on this CPU, (2) this CPU has not yet attempted to enter
- * dyntick-idle mode, or (3) this CPU is in the process of attempting to
- * enter dyntick-idle mode.  Otherwise, if we have recently tried and failed
- * to enter dyntick-idle mode, we refuse to try to enter it.  After all,
- * it is better to incur scheduling-clock interrupts than to spin
- * continuously for the same time duration!
- */
-int rcu_needs_cpu(int cpu)
-{
-       /* Flag a new idle sojourn to the idle-entry state machine. */
-       per_cpu(rcu_idle_first_pass, cpu) = 1;
-       /* If no callbacks, RCU doesn't need the CPU. */
-       if (!rcu_cpu_has_callbacks(cpu))
-               return 0;
-       /* Otherwise, RCU needs the CPU only if it recently tried and failed. */
-       return per_cpu(rcu_dyntick_holdoff, cpu) == jiffies;
-}
-
 /*
  * Does the specified flavor of RCU have non-lazy callbacks pending on
  * the specified CPU?  Both RCU flavor and CPU are specified by the
@@ -2039,6 +2013,47 @@ static bool rcu_cpu_has_nonlazy_callbacks(int cpu)
               rcu_preempt_cpu_has_nonlazy_callbacks(cpu);
 }
 
+/*
+ * Allow the CPU to enter dyntick-idle mode if either: (1) There are no
+ * callbacks on this CPU, (2) this CPU has not yet attempted to enter
+ * dyntick-idle mode, or (3) this CPU is in the process of attempting to
+ * enter dyntick-idle mode.  Otherwise, if we have recently tried and failed
+ * to enter dyntick-idle mode, we refuse to try to enter it.  After all,
+ * it is better to incur scheduling-clock interrupts than to spin
+ * continuously for the same time duration!
+ *
+ * The delta_jiffies argument is used to store the time when RCU is
+ * going to need the CPU again if it still has callbacks.  The reason
+ * for this is that rcu_prepare_for_idle() might need to post a timer,
+ * but if so, it will do so after tick_nohz_stop_sched_tick() has set
+ * the wakeup time for this CPU.  This means that RCU's timer can be
+ * delayed until the wakeup time, which defeats the purpose of posting
+ * a timer.
+ */
+int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
+{
+       struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
+
+       /* Flag a new idle sojourn to the idle-entry state machine. */
+       rdtp->idle_first_pass = 1;
+       /* If no callbacks, RCU doesn't need the CPU. */
+       if (!rcu_cpu_has_callbacks(cpu)) {
+               *delta_jiffies = ULONG_MAX;
+               return 0;
+       }
+       if (rdtp->dyntick_holdoff == jiffies) {
+               /* RCU recently tried and failed, so don't try again. */
+               *delta_jiffies = 1;
+               return 1;
+       }
+       /* Set up for the possibility that RCU will post a timer. */
+       if (rcu_cpu_has_nonlazy_callbacks(cpu))
+               *delta_jiffies = RCU_IDLE_GP_DELAY;
+       else
+               *delta_jiffies = RCU_IDLE_LAZY_GP_DELAY;
+       return 0;
+}
+
 /*
  * Handler for smp_call_function_single().  The only point of this
  * handler is to wake the CPU up, so the handler does only tracing.
@@ -2075,21 +2090,24 @@ static void rcu_idle_gp_timer_func(unsigned long cpu_in)
  */
 static void rcu_prepare_for_idle_init(int cpu)
 {
-       per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1;
-       setup_timer(&per_cpu(rcu_idle_gp_timer, cpu),
-                   rcu_idle_gp_timer_func, cpu);
-       per_cpu(rcu_idle_gp_timer_expires, cpu) = jiffies - 1;
-       per_cpu(rcu_idle_first_pass, cpu) = 1;
+       struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
+
+       rdtp->dyntick_holdoff = jiffies - 1;
+       setup_timer(&rdtp->idle_gp_timer, rcu_idle_gp_timer_func, cpu);
+       rdtp->idle_gp_timer_expires = jiffies - 1;
+       rdtp->idle_first_pass = 1;
 }
 
 /*
  * Clean up for exit from idle.  Because we are exiting from idle, there
- * is no longer any point to rcu_idle_gp_timer, so cancel it.  This will
+ * is no longer any point to ->idle_gp_timer, so cancel it.  This will
  * do nothing if this timer is not active, so just cancel it unconditionally.
  */
 static void rcu_cleanup_after_idle(int cpu)
 {
-       del_timer(&per_cpu(rcu_idle_gp_timer, cpu));
+       struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
+
+       del_timer(&rdtp->idle_gp_timer);
        trace_rcu_prep_idle("Cleanup after idle");
 }
 
@@ -2108,42 +2126,41 @@ static void rcu_cleanup_after_idle(int cpu)
  * Because it is not legal to invoke rcu_process_callbacks() with irqs
  * disabled, we do one pass of force_quiescent_state(), then do a
  * invoke_rcu_core() to cause rcu_process_callbacks() to be invoked
- * later.  The per-cpu rcu_dyntick_drain variable controls the sequencing.
+ * later.  The ->dyntick_drain field controls the sequencing.
  *
  * The caller must have disabled interrupts.
  */
 static void rcu_prepare_for_idle(int cpu)
 {
        struct timer_list *tp;
+       struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
 
        /*
         * If this is an idle re-entry, for example, due to use of
         * RCU_NONIDLE() or the new idle-loop tracing API within the idle
         * loop, then don't take any state-machine actions, unless the
         * momentary exit from idle queued additional non-lazy callbacks.
-        * Instead, repost the rcu_idle_gp_timer if this CPU has callbacks
+        * Instead, repost the ->idle_gp_timer if this CPU has callbacks
         * pending.
         */
-       if (!per_cpu(rcu_idle_first_pass, cpu) &&
-           (per_cpu(rcu_nonlazy_posted, cpu) ==
-            per_cpu(rcu_nonlazy_posted_snap, cpu))) {
+       if (!rdtp->idle_first_pass &&
+           (rdtp->nonlazy_posted == rdtp->nonlazy_posted_snap)) {
                if (rcu_cpu_has_callbacks(cpu)) {
-                       tp = &per_cpu(rcu_idle_gp_timer, cpu);
-                       mod_timer_pinned(tp, per_cpu(rcu_idle_gp_timer_expires, cpu));
+                       tp = &rdtp->idle_gp_timer;
+                       mod_timer_pinned(tp, rdtp->idle_gp_timer_expires);
                }
                return;
        }
-       per_cpu(rcu_idle_first_pass, cpu) = 0;
-       per_cpu(rcu_nonlazy_posted_snap, cpu) =
-               per_cpu(rcu_nonlazy_posted, cpu) - 1;
+       rdtp->idle_first_pass = 0;
+       rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted - 1;
 
        /*
         * If there are no callbacks on this CPU, enter dyntick-idle mode.
         * Also reset state to avoid prejudicing later attempts.
         */
        if (!rcu_cpu_has_callbacks(cpu)) {
-               per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1;
-               per_cpu(rcu_dyntick_drain, cpu) = 0;
+               rdtp->dyntick_holdoff = jiffies - 1;
+               rdtp->dyntick_drain = 0;
                trace_rcu_prep_idle("No callbacks");
                return;
        }
@@ -2152,36 +2169,37 @@ static void rcu_prepare_for_idle(int cpu)
         * If in holdoff mode, just return.  We will presumably have
         * refrained from disabling the scheduling-clock tick.
         */
-       if (per_cpu(rcu_dyntick_holdoff, cpu) == jiffies) {
+       if (rdtp->dyntick_holdoff == jiffies) {
                trace_rcu_prep_idle("In holdoff");
                return;
        }
 
-       /* Check and update the rcu_dyntick_drain sequencing. */
-       if (per_cpu(rcu_dyntick_drain, cpu) <= 0) {
+       /* Check and update the ->dyntick_drain sequencing. */
+       if (rdtp->dyntick_drain <= 0) {
                /* First time through, initialize the counter. */
-               per_cpu(rcu_dyntick_drain, cpu) = RCU_IDLE_FLUSHES;
-       } else if (per_cpu(rcu_dyntick_drain, cpu) <= RCU_IDLE_OPT_FLUSHES &&
+               rdtp->dyntick_drain = RCU_IDLE_FLUSHES;
+       } else if (rdtp->dyntick_drain <= RCU_IDLE_OPT_FLUSHES &&
                   !rcu_pending(cpu) &&
                   !local_softirq_pending()) {
                /* Can we go dyntick-idle despite still having callbacks? */
-               trace_rcu_prep_idle("Dyntick with callbacks");
-               per_cpu(rcu_dyntick_drain, cpu) = 0;
-               per_cpu(rcu_dyntick_holdoff, cpu) = jiffies;
-               if (rcu_cpu_has_nonlazy_callbacks(cpu))
-                       per_cpu(rcu_idle_gp_timer_expires, cpu) =
+               rdtp->dyntick_drain = 0;
+               rdtp->dyntick_holdoff = jiffies;
+               if (rcu_cpu_has_nonlazy_callbacks(cpu)) {
+                       trace_rcu_prep_idle("Dyntick with callbacks");
+                       rdtp->idle_gp_timer_expires =
                                           jiffies + RCU_IDLE_GP_DELAY;
-               else
-                       per_cpu(rcu_idle_gp_timer_expires, cpu) =
+               } else {
+                       rdtp->idle_gp_timer_expires =
                                           jiffies + RCU_IDLE_LAZY_GP_DELAY;
-               tp = &per_cpu(rcu_idle_gp_timer, cpu);
-               mod_timer_pinned(tp, per_cpu(rcu_idle_gp_timer_expires, cpu));
-               per_cpu(rcu_nonlazy_posted_snap, cpu) =
-                       per_cpu(rcu_nonlazy_posted, cpu);
+                       trace_rcu_prep_idle("Dyntick with lazy callbacks");
+               }
+               tp = &rdtp->idle_gp_timer;
+               mod_timer_pinned(tp, rdtp->idle_gp_timer_expires);
+               rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted;
                return; /* Nothing more to do immediately. */
-       } else if (--per_cpu(rcu_dyntick_drain, cpu) <= 0) {
+       } else if (--(rdtp->dyntick_drain) <= 0) {
                /* We have hit the limit, so time to give up. */
-               per_cpu(rcu_dyntick_holdoff, cpu) = jiffies;
+               rdtp->dyntick_holdoff = jiffies;
                trace_rcu_prep_idle("Begin holdoff");
                invoke_rcu_core();  /* Force the CPU out of dyntick-idle. */
                return;
@@ -2227,7 +2245,7 @@ static void rcu_prepare_for_idle(int cpu)
  */
 static void rcu_idle_count_callbacks_posted(void)
 {
-       __this_cpu_add(rcu_nonlazy_posted, 1);
+       __this_cpu_add(rcu_dynticks.nonlazy_posted, 1);
 }
 
 #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */
@@ -2238,11 +2256,12 @@ static void rcu_idle_count_callbacks_posted(void)
 
 static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
 {
-       struct timer_list *tltp = &per_cpu(rcu_idle_gp_timer, cpu);
+       struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
+       struct timer_list *tltp = &rdtp->idle_gp_timer;
 
        sprintf(cp, "drain=%d %c timer=%lu",
-               per_cpu(rcu_dyntick_drain, cpu),
-               per_cpu(rcu_dyntick_holdoff, cpu) == jiffies ? 'H' : '.',
+               rdtp->dyntick_drain,
+               rdtp->dyntick_holdoff == jiffies ? 'H' : '.',
                timer_pending(tltp) ? tltp->expires - jiffies : -1);
 }
 
index ab56a1764d4db8c6a5136cd3b636dc330648783b..e8cd2027abbd1e2e0ed6d432974816cef2845a42 100644 (file)
@@ -1235,6 +1235,7 @@ static ssize_t subbuf_splice_actor(struct file *in,
        struct splice_pipe_desc spd = {
                .pages = pages,
                .nr_pages = 0,
+               .nr_pages_max = PIPE_DEF_BUFFERS,
                .partial = partial,
                .flags = flags,
                .ops = &relay_pipe_buf_ops,
@@ -1302,8 +1303,8 @@ static ssize_t subbuf_splice_actor(struct file *in,
                 ret += padding;
 
 out:
-       splice_shrink_spd(pipe, &spd);
-        return ret;
+       splice_shrink_spd(&spd);
+       return ret;
 }
 
 static ssize_t relay_file_splice_read(struct file *in,
index 39eb6011bc38e3f20188c942cb31a173fce74093..468bdd44c1baeb914cfc93037b86691b839ccc46 100644 (file)
@@ -142,9 +142,8 @@ const_debug unsigned int sysctl_sched_features =
 #define SCHED_FEAT(name, enabled)      \
        #name ,
 
-static __read_mostly char *sched_feat_names[] = {
+static const char * const sched_feat_names[] = {
 #include "features.h"
-       NULL
 };
 
 #undef SCHED_FEAT
@@ -2082,7 +2081,6 @@ context_switch(struct rq *rq, struct task_struct *prev,
 #endif
 
        /* Here we just switch the register state and the stack. */
-       rcu_switch_from(prev);
        switch_to(prev, next, prev);
 
        barrier();
@@ -2162,11 +2160,73 @@ unsigned long this_cpu_load(void)
 }
 
 
+/*
+ * Global load-average calculations
+ *
+ * We take a distributed and async approach to calculating the global load-avg
+ * in order to minimize overhead.
+ *
+ * The global load average is an exponentially decaying average of nr_running +
+ * nr_uninterruptible.
+ *
+ * Once every LOAD_FREQ:
+ *
+ *   nr_active = 0;
+ *   for_each_possible_cpu(cpu)
+ *     nr_active += cpu_of(cpu)->nr_running + cpu_of(cpu)->nr_uninterruptible;
+ *
+ *   avenrun[n] = avenrun[0] * exp_n + nr_active * (1 - exp_n)
+ *
+ * Due to a number of reasons the above turns in the mess below:
+ *
+ *  - for_each_possible_cpu() is prohibitively expensive on machines with
+ *    serious number of cpus, therefore we need to take a distributed approach
+ *    to calculating nr_active.
+ *
+ *        \Sum_i x_i(t) = \Sum_i x_i(t) - x_i(t_0) | x_i(t_0) := 0
+ *                      = \Sum_i { \Sum_j=1 x_i(t_j) - x_i(t_j-1) }
+ *
+ *    So assuming nr_active := 0 when we start out -- true per definition, we
+ *    can simply take per-cpu deltas and fold those into a global accumulate
+ *    to obtain the same result. See calc_load_fold_active().
+ *
+ *    Furthermore, in order to avoid synchronizing all per-cpu delta folding
+ *    across the machine, we assume 10 ticks is sufficient time for every
+ *    cpu to have completed this task.
+ *
+ *    This places an upper-bound on the IRQ-off latency of the machine. Then
+ *    again, being late doesn't loose the delta, just wrecks the sample.
+ *
+ *  - cpu_rq()->nr_uninterruptible isn't accurately tracked per-cpu because
+ *    this would add another cross-cpu cacheline miss and atomic operation
+ *    to the wakeup path. Instead we increment on whatever cpu the task ran
+ *    when it went into uninterruptible state and decrement on whatever cpu
+ *    did the wakeup. This means that only the sum of nr_uninterruptible over
+ *    all cpus yields the correct result.
+ *
+ *  This covers the NO_HZ=n code, for extra head-aches, see the comment below.
+ */
+
 /* Variables and functions for calc_load */
 static atomic_long_t calc_load_tasks;
 static unsigned long calc_load_update;
 unsigned long avenrun[3];
-EXPORT_SYMBOL(avenrun);
+EXPORT_SYMBOL(avenrun); /* should be removed */
+
+/**
+ * get_avenrun - get the load average array
+ * @loads:     pointer to dest load array
+ * @offset:    offset to add
+ * @shift:     shift count to shift the result left
+ *
+ * These values are estimates at best, so no need for locking.
+ */
+void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
+{
+       loads[0] = (avenrun[0] + offset) << shift;
+       loads[1] = (avenrun[1] + offset) << shift;
+       loads[2] = (avenrun[2] + offset) << shift;
+}
 
 static long calc_load_fold_active(struct rq *this_rq)
 {
@@ -2183,6 +2243,9 @@ static long calc_load_fold_active(struct rq *this_rq)
        return delta;
 }
 
+/*
+ * a1 = a0 * e + a * (1 - e)
+ */
 static unsigned long
 calc_load(unsigned long load, unsigned long exp, unsigned long active)
 {
@@ -2194,30 +2257,118 @@ calc_load(unsigned long load, unsigned long exp, unsigned long active)
 
 #ifdef CONFIG_NO_HZ
 /*
- * For NO_HZ we delay the active fold to the next LOAD_FREQ update.
+ * Handle NO_HZ for the global load-average.
+ *
+ * Since the above described distributed algorithm to compute the global
+ * load-average relies on per-cpu sampling from the tick, it is affected by
+ * NO_HZ.
+ *
+ * The basic idea is to fold the nr_active delta into a global idle-delta upon
+ * entering NO_HZ state such that we can include this as an 'extra' cpu delta
+ * when we read the global state.
+ *
+ * Obviously reality has to ruin such a delightfully simple scheme:
+ *
+ *  - When we go NO_HZ idle during the window, we can negate our sample
+ *    contribution, causing under-accounting.
+ *
+ *    We avoid this by keeping two idle-delta counters and flipping them
+ *    when the window starts, thus separating old and new NO_HZ load.
+ *
+ *    The only trick is the slight shift in index flip for read vs write.
+ *
+ *        0s            5s            10s           15s
+ *          +10           +10           +10           +10
+ *        |-|-----------|-|-----------|-|-----------|-|
+ *    r:0 0 1           1 0           0 1           1 0
+ *    w:0 1 1           0 0           1 1           0 0
+ *
+ *    This ensures we'll fold the old idle contribution in this window while
+ *    accumlating the new one.
+ *
+ *  - When we wake up from NO_HZ idle during the window, we push up our
+ *    contribution, since we effectively move our sample point to a known
+ *    busy state.
+ *
+ *    This is solved by pushing the window forward, and thus skipping the
+ *    sample, for this cpu (effectively using the idle-delta for this cpu which
+ *    was in effect at the time the window opened). This also solves the issue
+ *    of having to deal with a cpu having been in NOHZ idle for multiple
+ *    LOAD_FREQ intervals.
  *
  * When making the ILB scale, we should try to pull this in as well.
  */
-static atomic_long_t calc_load_tasks_idle;
+static atomic_long_t calc_load_idle[2];
+static int calc_load_idx;
+
+static inline int calc_load_write_idx(void)
+{
+       int idx = calc_load_idx;
+
+       /*
+        * See calc_global_nohz(), if we observe the new index, we also
+        * need to observe the new update time.
+        */
+       smp_rmb();
+
+       /*
+        * If the folding window started, make sure we start writing in the
+        * next idle-delta.
+        */
+       if (!time_before(jiffies, calc_load_update))
+               idx++;
 
-void calc_load_account_idle(struct rq *this_rq)
+       return idx & 1;
+}
+
+static inline int calc_load_read_idx(void)
 {
+       return calc_load_idx & 1;
+}
+
+void calc_load_enter_idle(void)
+{
+       struct rq *this_rq = this_rq();
        long delta;
 
+       /*
+        * We're going into NOHZ mode, if there's any pending delta, fold it
+        * into the pending idle delta.
+        */
        delta = calc_load_fold_active(this_rq);
-       if (delta)
-               atomic_long_add(delta, &calc_load_tasks_idle);
+       if (delta) {
+               int idx = calc_load_write_idx();
+               atomic_long_add(delta, &calc_load_idle[idx]);
+       }
 }
 
-static long calc_load_fold_idle(void)
+void calc_load_exit_idle(void)
 {
-       long delta = 0;
+       struct rq *this_rq = this_rq();
 
        /*
-        * Its got a race, we don't care...
+        * If we're still before the sample window, we're done.
         */
-       if (atomic_long_read(&calc_load_tasks_idle))
-               delta = atomic_long_xchg(&calc_load_tasks_idle, 0);
+       if (time_before(jiffies, this_rq->calc_load_update))
+               return;
+
+       /*
+        * We woke inside or after the sample window, this means we're already
+        * accounted through the nohz accounting, so skip the entire deal and
+        * sync up for the next window.
+        */
+       this_rq->calc_load_update = calc_load_update;
+       if (time_before(jiffies, this_rq->calc_load_update + 10))
+               this_rq->calc_load_update += LOAD_FREQ;
+}
+
+static long calc_load_fold_idle(void)
+{
+       int idx = calc_load_read_idx();
+       long delta = 0;
+
+       if (atomic_long_read(&calc_load_idle[idx]))
+               delta = atomic_long_xchg(&calc_load_idle[idx], 0);
 
        return delta;
 }
@@ -2303,66 +2454,39 @@ static void calc_global_nohz(void)
 {
        long delta, active, n;
 
-       /*
-        * If we crossed a calc_load_update boundary, make sure to fold
-        * any pending idle changes, the respective CPUs might have
-        * missed the tick driven calc_load_account_active() update
-        * due to NO_HZ.
-        */
-       delta = calc_load_fold_idle();
-       if (delta)
-               atomic_long_add(delta, &calc_load_tasks);
-
-       /*
-        * It could be the one fold was all it took, we done!
-        */
-       if (time_before(jiffies, calc_load_update + 10))
-               return;
-
-       /*
-        * Catch-up, fold however many we are behind still
-        */
-       delta = jiffies - calc_load_update - 10;
-       n = 1 + (delta / LOAD_FREQ);
+       if (!time_before(jiffies, calc_load_update + 10)) {
+               /*
+                * Catch-up, fold however many we are behind still
+                */
+               delta = jiffies - calc_load_update - 10;
+               n = 1 + (delta / LOAD_FREQ);
 
-       active = atomic_long_read(&calc_load_tasks);
-       active = active > 0 ? active * FIXED_1 : 0;
+               active = atomic_long_read(&calc_load_tasks);
+               active = active > 0 ? active * FIXED_1 : 0;
 
-       avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n);
-       avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n);
-       avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n);
+               avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n);
+               avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n);
+               avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n);
 
-       calc_load_update += n * LOAD_FREQ;
-}
-#else
-void calc_load_account_idle(struct rq *this_rq)
-{
-}
+               calc_load_update += n * LOAD_FREQ;
+       }
 
-static inline long calc_load_fold_idle(void)
-{
-       return 0;
+       /*
+        * Flip the idle index...
+        *
+        * Make sure we first write the new time then flip the index, so that
+        * calc_load_write_idx() will see the new time when it reads the new
+        * index, this avoids a double flip messing things up.
+        */
+       smp_wmb();
+       calc_load_idx++;
 }
+#else /* !CONFIG_NO_HZ */
 
-static void calc_global_nohz(void)
-{
-}
-#endif
+static inline long calc_load_fold_idle(void) { return 0; }
+static inline void calc_global_nohz(void) { }
 
-/**
- * get_avenrun - get the load average array
- * @loads:     pointer to dest load array
- * @offset:    offset to add
- * @shift:     shift count to shift the result left
- *
- * These values are estimates at best, so no need for locking.
- */
-void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
-{
-       loads[0] = (avenrun[0] + offset) << shift;
-       loads[1] = (avenrun[1] + offset) << shift;
-       loads[2] = (avenrun[2] + offset) << shift;
-}
+#endif /* CONFIG_NO_HZ */
 
 /*
  * calc_load - update the avenrun load estimates 10 ticks after the
@@ -2370,11 +2494,18 @@ void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
  */
 void calc_global_load(unsigned long ticks)
 {
-       long active;
+       long active, delta;
 
        if (time_before(jiffies, calc_load_update + 10))
                return;
 
+       /*
+        * Fold the 'old' idle-delta to include all NO_HZ cpus.
+        */
+       delta = calc_load_fold_idle();
+       if (delta)
+               atomic_long_add(delta, &calc_load_tasks);
+
        active = atomic_long_read(&calc_load_tasks);
        active = active > 0 ? active * FIXED_1 : 0;
 
@@ -2385,12 +2516,7 @@ void calc_global_load(unsigned long ticks)
        calc_load_update += LOAD_FREQ;
 
        /*
-        * Account one period with whatever state we found before
-        * folding in the nohz state and ageing the entire idle period.
-        *
-        * This avoids loosing a sample when we go idle between 
-        * calc_load_account_active() (10 ticks ago) and now and thus
-        * under-accounting.
+        * In case we idled for multiple LOAD_FREQ intervals, catch up in bulk.
         */
        calc_global_nohz();
 }
@@ -2407,13 +2533,16 @@ static void calc_load_account_active(struct rq *this_rq)
                return;
 
        delta  = calc_load_fold_active(this_rq);
-       delta += calc_load_fold_idle();
        if (delta)
                atomic_long_add(delta, &calc_load_tasks);
 
        this_rq->calc_load_update += LOAD_FREQ;
 }
 
+/*
+ * End of global load-average stuff
+ */
+
 /*
  * The exact cpuload at various idx values, calculated at every tick would be
  * load = (2^idx - 1) / 2^idx * load + 1 / 2^idx * cur_load
@@ -2517,25 +2646,32 @@ static void __update_cpu_load(struct rq *this_rq, unsigned long this_load,
        sched_avg_update(this_rq);
 }
 
+#ifdef CONFIG_NO_HZ
+/*
+ * There is no sane way to deal with nohz on smp when using jiffies because the
+ * cpu doing the jiffies update might drift wrt the cpu doing the jiffy reading
+ * causing off-by-one errors in observed deltas; {0,2} instead of {1,1}.
+ *
+ * Therefore we cannot use the delta approach from the regular tick since that
+ * would seriously skew the load calculation. However we'll make do for those
+ * updates happening while idle (nohz_idle_balance) or coming out of idle
+ * (tick_nohz_idle_exit).
+ *
+ * This means we might still be one tick off for nohz periods.
+ */
+
 /*
  * Called from nohz_idle_balance() to update the load ratings before doing the
  * idle balance.
  */
 void update_idle_cpu_load(struct rq *this_rq)
 {
-       unsigned long curr_jiffies = jiffies;
+       unsigned long curr_jiffies = ACCESS_ONCE(jiffies);
        unsigned long load = this_rq->load.weight;
        unsigned long pending_updates;
 
        /*
-        * Bloody broken means of dealing with nohz, but better than nothing..
-        * jiffies is updated by one cpu, another cpu can drift wrt the jiffy
-        * update and see 0 difference the one time and 2 the next, even though
-        * we ticked at roughtly the same rate.
-        *
-        * Hence we only use this from nohz_idle_balance() and skip this
-        * nonsense when called from the scheduler_tick() since that's
-        * guaranteed a stable rate.
+        * bail if there's load or we're actually up-to-date.
         */
        if (load || curr_jiffies == this_rq->last_load_update_tick)
                return;
@@ -2546,13 +2682,39 @@ void update_idle_cpu_load(struct rq *this_rq)
        __update_cpu_load(this_rq, load, pending_updates);
 }
 
+/*
+ * Called from tick_nohz_idle_exit() -- try and fix up the ticks we missed.
+ */
+void update_cpu_load_nohz(void)
+{
+       struct rq *this_rq = this_rq();
+       unsigned long curr_jiffies = ACCESS_ONCE(jiffies);
+       unsigned long pending_updates;
+
+       if (curr_jiffies == this_rq->last_load_update_tick)
+               return;
+
+       raw_spin_lock(&this_rq->lock);
+       pending_updates = curr_jiffies - this_rq->last_load_update_tick;
+       if (pending_updates) {
+               this_rq->last_load_update_tick = curr_jiffies;
+               /*
+                * We were idle, this means load 0, the current load might be
+                * !0 due to remote wakeups and the sort.
+                */
+               __update_cpu_load(this_rq, 0, pending_updates);
+       }
+       raw_spin_unlock(&this_rq->lock);
+}
+#endif /* CONFIG_NO_HZ */
+
 /*
  * Called from scheduler_tick()
  */
 static void update_cpu_load_active(struct rq *this_rq)
 {
        /*
-        * See the mess in update_idle_cpu_load().
+        * See the mess around update_idle_cpu_load() / update_cpu_load_nohz().
         */
        this_rq->last_load_update_tick = jiffies;
        __update_cpu_load(this_rq, this_rq->load.weight, 1);
@@ -4982,7 +5144,7 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
                p->sched_class->set_cpus_allowed(p, new_mask);
 
        cpumask_copy(&p->cpus_allowed, new_mask);
-       p->rt.nr_cpus_allowed = cpumask_weight(new_mask);
+       p->nr_cpus_allowed = cpumask_weight(new_mask);
 }
 
 /*
@@ -5524,15 +5686,20 @@ static cpumask_var_t sched_domains_tmpmask; /* sched_domains_mutex */
 
 #ifdef CONFIG_SCHED_DEBUG
 
-static __read_mostly int sched_domain_debug_enabled;
+static __read_mostly int sched_debug_enabled;
 
-static int __init sched_domain_debug_setup(char *str)
+static int __init sched_debug_setup(char *str)
 {
-       sched_domain_debug_enabled = 1;
+       sched_debug_enabled = 1;
 
        return 0;
 }
-early_param("sched_debug", sched_domain_debug_setup);
+early_param("sched_debug", sched_debug_setup);
+
+static inline bool sched_debug(void)
+{
+       return sched_debug_enabled;
+}
 
 static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
                                  struct cpumask *groupmask)
@@ -5572,7 +5739,12 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
                        break;
                }
 
-               if (!group->sgp->power) {
+               /*
+                * Even though we initialize ->power to something semi-sane,
+                * we leave power_orig unset. This allows us to detect if
+                * domain iteration is still funny without causing /0 traps.
+                */
+               if (!group->sgp->power_orig) {
                        printk(KERN_CONT "\n");
                        printk(KERN_ERR "ERROR: domain->cpu_power not "
                                        "set\n");
@@ -5620,7 +5792,7 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu)
 {
        int level = 0;
 
-       if (!sched_domain_debug_enabled)
+       if (!sched_debug_enabled)
                return;
 
        if (!sd) {
@@ -5641,6 +5813,10 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu)
 }
 #else /* !CONFIG_SCHED_DEBUG */
 # define sched_domain_debug(sd, cpu) do { } while (0)
+static inline bool sched_debug(void)
+{
+       return false;
+}
 #endif /* CONFIG_SCHED_DEBUG */
 
 static int sd_degenerate(struct sched_domain *sd)
@@ -5962,6 +6138,44 @@ struct sched_domain_topology_level {
        struct sd_data      data;
 };
 
+/*
+ * Build an iteration mask that can exclude certain CPUs from the upwards
+ * domain traversal.
+ *
+ * Asymmetric node setups can result in situations where the domain tree is of
+ * unequal depth, make sure to skip domains that already cover the entire
+ * range.
+ *
+ * In that case build_sched_domains() will have terminated the iteration early
+ * and our sibling sd spans will be empty. Domains should always include the
+ * cpu they're built on, so check that.
+ *
+ */
+static void build_group_mask(struct sched_domain *sd, struct sched_group *sg)
+{
+       const struct cpumask *span = sched_domain_span(sd);
+       struct sd_data *sdd = sd->private;
+       struct sched_domain *sibling;
+       int i;
+
+       for_each_cpu(i, span) {
+               sibling = *per_cpu_ptr(sdd->sd, i);
+               if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
+                       continue;
+
+               cpumask_set_cpu(i, sched_group_mask(sg));
+       }
+}
+
+/*
+ * Return the canonical balance cpu for this group, this is the first cpu
+ * of this group that's also in the iteration mask.
+ */
+int group_balance_cpu(struct sched_group *sg)
+{
+       return cpumask_first_and(sched_group_cpus(sg), sched_group_mask(sg));
+}
+
 static int
 build_overlap_sched_groups(struct sched_domain *sd, int cpu)
 {
@@ -5980,6 +6194,12 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
                if (cpumask_test_cpu(i, covered))
                        continue;
 
+               child = *per_cpu_ptr(sdd->sd, i);
+
+               /* See the comment near build_group_mask(). */
+               if (!cpumask_test_cpu(i, sched_domain_span(child)))
+                       continue;
+
                sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
                                GFP_KERNEL, cpu_to_node(cpu));
 
@@ -5987,8 +6207,6 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
                        goto fail;
 
                sg_span = sched_group_cpus(sg);
-
-               child = *per_cpu_ptr(sdd->sd, i);
                if (child->child) {
                        child = child->child;
                        cpumask_copy(sg_span, sched_domain_span(child));
@@ -5997,10 +6215,24 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
 
                cpumask_or(covered, covered, sg_span);
 
-               sg->sgp = *per_cpu_ptr(sdd->sgp, cpumask_first(sg_span));
-               atomic_inc(&sg->sgp->ref);
+               sg->sgp = *per_cpu_ptr(sdd->sgp, i);
+               if (atomic_inc_return(&sg->sgp->ref) == 1)
+                       build_group_mask(sd, sg);
 
-               if (cpumask_test_cpu(cpu, sg_span))
+               /*
+                * Initialize sgp->power such that even if we mess up the
+                * domains and no possible iteration will get us here, we won't
+                * die on a /0 trap.
+                */
+               sg->sgp->power = SCHED_POWER_SCALE * cpumask_weight(sg_span);
+
+               /*
+                * Make sure the first group of this domain contains the
+                * canonical balance cpu. Otherwise the sched_domain iteration
+                * breaks. See update_sg_lb_stats().
+                */
+               if ((!groups && cpumask_test_cpu(cpu, sg_span)) ||
+                   group_balance_cpu(sg) == cpu)
                        groups = sg;
 
                if (!first)
@@ -6074,6 +6306,7 @@ build_sched_groups(struct sched_domain *sd, int cpu)
 
                cpumask_clear(sched_group_cpus(sg));
                sg->sgp->power = 0;
+               cpumask_setall(sched_group_mask(sg));
 
                for_each_cpu(j, span) {
                        if (get_group(j, sdd, NULL) != group)
@@ -6115,7 +6348,7 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd)
                sg = sg->next;
        } while (sg != sd->groups);
 
-       if (cpu != group_first_cpu(sg))
+       if (cpu != group_balance_cpu(sg))
                return;
 
        update_group_power(sd, cpu);
@@ -6165,11 +6398,8 @@ int sched_domain_level_max;
 
 static int __init setup_relax_domain_level(char *str)
 {
-       unsigned long val;
-
-       val = simple_strtoul(str, NULL, 0);
-       if (val < sched_domain_level_max)
-               default_relax_domain_level = val;
+       if (kstrtoint(str, 0, &default_relax_domain_level))
+               pr_warn("Unable to set relax_domain_level\n");
 
        return 1;
 }
@@ -6279,14 +6509,13 @@ static struct sched_domain_topology_level *sched_domain_topology = default_topol
 #ifdef CONFIG_NUMA
 
 static int sched_domains_numa_levels;
-static int sched_domains_numa_scale;
 static int *sched_domains_numa_distance;
 static struct cpumask ***sched_domains_numa_masks;
 static int sched_domains_curr_level;
 
 static inline int sd_local_flags(int level)
 {
-       if (sched_domains_numa_distance[level] > REMOTE_DISTANCE)
+       if (sched_domains_numa_distance[level] > RECLAIM_DISTANCE)
                return 0;
 
        return SD_BALANCE_EXEC | SD_BALANCE_FORK | SD_WAKE_AFFINE;
@@ -6344,6 +6573,42 @@ static const struct cpumask *sd_numa_mask(int cpu)
        return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)];
 }
 
+static void sched_numa_warn(const char *str)
+{
+       static int done = false;
+       int i,j;
+
+       if (done)
+               return;
+
+       done = true;
+
+       printk(KERN_WARNING "ERROR: %s\n\n", str);
+
+       for (i = 0; i < nr_node_ids; i++) {
+               printk(KERN_WARNING "  ");
+               for (j = 0; j < nr_node_ids; j++)
+                       printk(KERN_CONT "%02d ", node_distance(i,j));
+               printk(KERN_CONT "\n");
+       }
+       printk(KERN_WARNING "\n");
+}
+
+static bool find_numa_distance(int distance)
+{
+       int i;
+
+       if (distance == node_distance(0, 0))
+               return true;
+
+       for (i = 0; i < sched_domains_numa_levels; i++) {
+               if (sched_domains_numa_distance[i] == distance)
+                       return true;
+       }
+
+       return false;
+}
+
 static void sched_init_numa(void)
 {
        int next_distance, curr_distance = node_distance(0, 0);
@@ -6351,7 +6616,6 @@ static void sched_init_numa(void)
        int level = 0;
        int i, j, k;
 
-       sched_domains_numa_scale = curr_distance;
        sched_domains_numa_distance = kzalloc(sizeof(int) * nr_node_ids, GFP_KERNEL);
        if (!sched_domains_numa_distance)
                return;
@@ -6362,23 +6626,41 @@ static void sched_init_numa(void)
         *
         * Assumes node_distance(0,j) includes all distances in
         * node_distance(i,j) in order to avoid cubic time.
-        *
-        * XXX: could be optimized to O(n log n) by using sort()
         */
        next_distance = curr_distance;
        for (i = 0; i < nr_node_ids; i++) {
                for (j = 0; j < nr_node_ids; j++) {
-                       int distance = node_distance(0, j);
-                       if (distance > curr_distance &&
-                                       (distance < next_distance ||
-                                        next_distance == curr_distance))
-                               next_distance = distance;
+                       for (k = 0; k < nr_node_ids; k++) {
+                               int distance = node_distance(i, k);
+
+                               if (distance > curr_distance &&
+                                   (distance < next_distance ||
+                                    next_distance == curr_distance))
+                                       next_distance = distance;
+
+                               /*
+                                * While not a strong assumption it would be nice to know
+                                * about cases where if node A is connected to B, B is not
+                                * equally connected to A.
+                                */
+                               if (sched_debug() && node_distance(k, i) != distance)
+                                       sched_numa_warn("Node-distance not symmetric");
+
+                               if (sched_debug() && i && !find_numa_distance(distance))
+                                       sched_numa_warn("Node-0 not representative");
+                       }
+                       if (next_distance != curr_distance) {
+                               sched_domains_numa_distance[level++] = next_distance;
+                               sched_domains_numa_levels = level;
+                               curr_distance = next_distance;
+                       } else break;
                }
-               if (next_distance != curr_distance) {
-                       sched_domains_numa_distance[level++] = next_distance;
-                       sched_domains_numa_levels = level;
-                       curr_distance = next_distance;
-               } else break;
+
+               /*
+                * In case of sched_debug() we verify the above assumption.
+                */
+               if (!sched_debug())
+                       break;
        }
        /*
         * 'level' contains the number of unique distances, excluding the
@@ -6403,7 +6685,7 @@ static void sched_init_numa(void)
                        return;
 
                for (j = 0; j < nr_node_ids; j++) {
-                       struct cpumask *mask = kzalloc_node(cpumask_size(), GFP_KERNEL, j);
+                       struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL);
                        if (!mask)
                                return;
 
@@ -6490,7 +6772,7 @@ static int __sdt_alloc(const struct cpumask *cpu_map)
 
                        *per_cpu_ptr(sdd->sg, j) = sg;
 
-                       sgp = kzalloc_node(sizeof(struct sched_group_power),
+                       sgp = kzalloc_node(sizeof(struct sched_group_power) + cpumask_size(),
                                        GFP_KERNEL, cpu_to_node(j));
                        if (!sgp)
                                return -ENOMEM;
@@ -6543,7 +6825,6 @@ struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
        if (!sd)
                return child;
 
-       set_domain_attribute(sd, attr);
        cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu));
        if (child) {
                sd->level = child->level + 1;
@@ -6551,6 +6832,7 @@ struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
                child->parent = sd;
        }
        sd->child = child;
+       set_domain_attribute(sd, attr);
 
        return sd;
 }
@@ -6691,7 +6973,6 @@ static int init_sched_domains(const struct cpumask *cpu_map)
        if (!doms_cur)
                doms_cur = &fallback_doms;
        cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map);
-       dattr_cur = NULL;
        err = build_sched_domains(doms_cur[0], NULL);
        register_sched_domain_sysctl();
 
index 940e6d17cf96a333fd7ea0c4543e2578effd4638..c099cc6eebe3a6f0cf19dde597c2df583ce57081 100644 (file)
@@ -2703,7 +2703,7 @@ select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags)
        int want_sd = 1;
        int sync = wake_flags & WF_SYNC;
 
-       if (p->rt.nr_cpus_allowed == 1)
+       if (p->nr_cpus_allowed == 1)
                return prev_cpu;
 
        if (sd_flag & SD_BALANCE_WAKE) {
@@ -3503,15 +3503,22 @@ unsigned long __weak arch_scale_smt_power(struct sched_domain *sd, int cpu)
 unsigned long scale_rt_power(int cpu)
 {
        struct rq *rq = cpu_rq(cpu);
-       u64 total, available;
+       u64 total, available, age_stamp, avg;
 
-       total = sched_avg_period() + (rq->clock - rq->age_stamp);
+       /*
+        * Since we're reading these variables without serialization make sure
+        * we read them once before doing sanity checks on them.
+        */
+       age_stamp = ACCESS_ONCE(rq->age_stamp);
+       avg = ACCESS_ONCE(rq->rt_avg);
+
+       total = sched_avg_period() + (rq->clock - age_stamp);
 
-       if (unlikely(total < rq->rt_avg)) {
+       if (unlikely(total < avg)) {
                /* Ensures that power won't end up being negative */
                available = 0;
        } else {
-               available = total - rq->rt_avg;
+               available = total - avg;
        }
 
        if (unlikely((s64)total < SCHED_POWER_SCALE))
@@ -3574,13 +3581,28 @@ void update_group_power(struct sched_domain *sd, int cpu)
 
        power = 0;
 
-       group = child->groups;
-       do {
-               power += group->sgp->power;
-               group = group->next;
-       } while (group != child->groups);
+       if (child->flags & SD_OVERLAP) {
+               /*
+                * SD_OVERLAP domains cannot assume that child groups
+                * span the current group.
+                */
 
-       sdg->sgp->power = power;
+               for_each_cpu(cpu, sched_group_cpus(sdg))
+                       power += power_of(cpu);
+       } else  {
+               /*
+                * !SD_OVERLAP domains can assume that child groups
+                * span the current group.
+                */ 
+
+               group = child->groups;
+               do {
+                       power += group->sgp->power;
+                       group = group->next;
+               } while (group != child->groups);
+       }
+
+       sdg->sgp->power_orig = sdg->sgp->power = power;
 }
 
 /*
@@ -3610,7 +3632,7 @@ fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
 
 /**
  * update_sg_lb_stats - Update sched_group's statistics for load balancing.
- * @sd: The sched_domain whose statistics are to be updated.
+ * @env: The load balancing environment.
  * @group: sched_group whose statistics are to be updated.
  * @load_idx: Load index of sched_domain of this_cpu for load calc.
  * @local_group: Does group contain this_cpu.
@@ -3630,7 +3652,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
        int i;
 
        if (local_group)
-               balance_cpu = group_first_cpu(group);
+               balance_cpu = group_balance_cpu(group);
 
        /* Tally up the load of all CPUs in the group */
        max_cpu_load = 0;
@@ -3645,7 +3667,8 @@ static inline void update_sg_lb_stats(struct lb_env *env,
 
                /* Bias balancing toward cpus of our domain */
                if (local_group) {
-                       if (idle_cpu(i) && !first_idle_cpu) {
+                       if (idle_cpu(i) && !first_idle_cpu &&
+                                       cpumask_test_cpu(i, sched_group_mask(group))) {
                                first_idle_cpu = 1;
                                balance_cpu = i;
                        }
@@ -3719,11 +3742,10 @@ static inline void update_sg_lb_stats(struct lb_env *env,
 
 /**
  * update_sd_pick_busiest - return 1 on busiest group
- * @sd: sched_domain whose statistics are to be checked
+ * @env: The load balancing environment.
  * @sds: sched_domain statistics
  * @sg: sched_group candidate to be checked for being the busiest
  * @sgs: sched_group statistics
- * @this_cpu: the current cpu
  *
  * Determine if @sg is a busier group than the previously selected
  * busiest group.
@@ -3761,9 +3783,7 @@ static bool update_sd_pick_busiest(struct lb_env *env,
 
 /**
  * update_sd_lb_stats - Update sched_domain's statistics for load balancing.
- * @sd: sched_domain whose statistics are to be updated.
- * @this_cpu: Cpu for which load balance is currently performed.
- * @idle: Idle status of this_cpu
+ * @env: The load balancing environment.
  * @cpus: Set of cpus considered for load balancing.
  * @balance: Should we balance.
  * @sds: variable to hold the statistics for this sched_domain.
@@ -3852,10 +3872,8 @@ static inline void update_sd_lb_stats(struct lb_env *env,
  * Returns 1 when packing is required and a task should be moved to
  * this CPU.  The amount of the imbalance is returned in *imbalance.
  *
- * @sd: The sched_domain whose packing is to be checked.
+ * @env: The load balancing environment.
  * @sds: Statistics of the sched_domain which is to be packed
- * @this_cpu: The cpu at whose sched_domain we're performing load-balance.
- * @imbalance: returns amount of imbalanced due to packing.
  */
 static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds)
 {
@@ -3881,9 +3899,8 @@ static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds)
  * fix_small_imbalance - Calculate the minor imbalance that exists
  *                     amongst the groups of a sched_domain, during
  *                     load balancing.
+ * @env: The load balancing environment.
  * @sds: Statistics of the sched_domain whose imbalance is to be calculated.
- * @this_cpu: The cpu at whose sched_domain we're performing load-balance.
- * @imbalance: Variable to store the imbalance.
  */
 static inline
 void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
@@ -4026,11 +4043,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
  * Also calculates the amount of weighted load which should be moved
  * to restore balance.
  *
- * @sd: The sched_domain whose busiest group is to be returned.
- * @this_cpu: The cpu for which load balancing is currently being performed.
- * @imbalance: Variable which stores amount of weighted load which should
- *             be moved to restore balance/put a group to idle.
- * @idle: The idle status of this_cpu.
+ * @env: The load balancing environment.
  * @cpus: The set of CPUs under consideration for load-balancing.
  * @balance: Pointer to a variable indicating if this_cpu
  *     is the appropriate cpu to perform load balancing at this_level.
index b44d604b35d1a8b4164aa2ea4d94d00bf9063824..b6baf370cae973707c24389b30b38e43c76bb4d7 100644 (file)
@@ -25,7 +25,6 @@ static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int fl
 static struct task_struct *pick_next_task_idle(struct rq *rq)
 {
        schedstat_inc(rq, sched_goidle);
-       calc_load_account_idle(rq);
        return rq->idle;
 }
 
index c5565c3c515fd2d15dc5ed95f59a970fca087815..573e1ca01102066468e65aee96c57ea30cccd6de 100644 (file)
@@ -274,13 +274,16 @@ static void update_rt_migration(struct rt_rq *rt_rq)
 
 static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
 {
+       struct task_struct *p;
+
        if (!rt_entity_is_task(rt_se))
                return;
 
+       p = rt_task_of(rt_se);
        rt_rq = &rq_of_rt_rq(rt_rq)->rt;
 
        rt_rq->rt_nr_total++;
-       if (rt_se->nr_cpus_allowed > 1)
+       if (p->nr_cpus_allowed > 1)
                rt_rq->rt_nr_migratory++;
 
        update_rt_migration(rt_rq);
@@ -288,13 +291,16 @@ static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
 
 static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
 {
+       struct task_struct *p;
+
        if (!rt_entity_is_task(rt_se))
                return;
 
+       p = rt_task_of(rt_se);
        rt_rq = &rq_of_rt_rq(rt_rq)->rt;
 
        rt_rq->rt_nr_total--;
-       if (rt_se->nr_cpus_allowed > 1)
+       if (p->nr_cpus_allowed > 1)
                rt_rq->rt_nr_migratory--;
 
        update_rt_migration(rt_rq);
@@ -1161,7 +1167,7 @@ enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
 
        enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD);
 
-       if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1)
+       if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
                enqueue_pushable_task(rq, p);
 
        inc_nr_running(rq);
@@ -1225,7 +1231,7 @@ select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)
 
        cpu = task_cpu(p);
 
-       if (p->rt.nr_cpus_allowed == 1)
+       if (p->nr_cpus_allowed == 1)
                goto out;
 
        /* For anything but wake ups, just return the task_cpu */
@@ -1260,9 +1266,9 @@ select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)
         * will have to sort it out.
         */
        if (curr && unlikely(rt_task(curr)) &&
-           (curr->rt.nr_cpus_allowed < 2 ||
+           (curr->nr_cpus_allowed < 2 ||
             curr->prio <= p->prio) &&
-           (p->rt.nr_cpus_allowed > 1)) {
+           (p->nr_cpus_allowed > 1)) {
                int target = find_lowest_rq(p);
 
                if (target != -1)
@@ -1276,10 +1282,10 @@ out:
 
 static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
 {
-       if (rq->curr->rt.nr_cpus_allowed == 1)
+       if (rq->curr->nr_cpus_allowed == 1)
                return;
 
-       if (p->rt.nr_cpus_allowed != 1
+       if (p->nr_cpus_allowed != 1
            && cpupri_find(&rq->rd->cpupri, p, NULL))
                return;
 
@@ -1395,7 +1401,7 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
         * The previous task needs to be made eligible for pushing
         * if it is still active
         */
-       if (on_rt_rq(&p->rt) && p->rt.nr_cpus_allowed > 1)
+       if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
                enqueue_pushable_task(rq, p);
 }
 
@@ -1408,7 +1414,7 @@ static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
 {
        if (!task_running(rq, p) &&
            (cpu < 0 || cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) &&
-           (p->rt.nr_cpus_allowed > 1))
+           (p->nr_cpus_allowed > 1))
                return 1;
        return 0;
 }
@@ -1464,7 +1470,7 @@ static int find_lowest_rq(struct task_struct *task)
        if (unlikely(!lowest_mask))
                return -1;
 
-       if (task->rt.nr_cpus_allowed == 1)
+       if (task->nr_cpus_allowed == 1)
                return -1; /* No other targets possible */
 
        if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
@@ -1556,7 +1562,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
                                     task_running(rq, task) ||
                                     !task->on_rq)) {
 
-                               raw_spin_unlock(&lowest_rq->lock);
+                               double_unlock_balance(rq, lowest_rq);
                                lowest_rq = NULL;
                                break;
                        }
@@ -1586,7 +1592,7 @@ static struct task_struct *pick_next_pushable_task(struct rq *rq)
 
        BUG_ON(rq->cpu != task_cpu(p));
        BUG_ON(task_current(rq, p));
-       BUG_ON(p->rt.nr_cpus_allowed <= 1);
+       BUG_ON(p->nr_cpus_allowed <= 1);
 
        BUG_ON(!p->on_rq);
        BUG_ON(!rt_task(p));
@@ -1793,9 +1799,9 @@ static void task_woken_rt(struct rq *rq, struct task_struct *p)
        if (!task_running(rq, p) &&
            !test_tsk_need_resched(rq->curr) &&
            has_pushable_tasks(rq) &&
-           p->rt.nr_cpus_allowed > 1 &&
+           p->nr_cpus_allowed > 1 &&
            rt_task(rq->curr) &&
-           (rq->curr->rt.nr_cpus_allowed < 2 ||
+           (rq->curr->nr_cpus_allowed < 2 ||
             rq->curr->prio <= p->prio))
                push_rt_tasks(rq);
 }
@@ -1817,7 +1823,7 @@ static void set_cpus_allowed_rt(struct task_struct *p,
         * Only update if the process changes its state from whether it
         * can migrate or not.
         */
-       if ((p->rt.nr_cpus_allowed > 1) == (weight > 1))
+       if ((p->nr_cpus_allowed > 1) == (weight > 1))
                return;
 
        rq = task_rq(p);
@@ -1979,6 +1985,8 @@ static void watchdog(struct rq *rq, struct task_struct *p)
 
 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
 {
+       struct sched_rt_entity *rt_se = &p->rt;
+
        update_curr_rt(rq);
 
        watchdog(rq, p);
@@ -1996,12 +2004,15 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
        p->rt.time_slice = RR_TIMESLICE;
 
        /*
-        * Requeue to the end of queue if we are not the only element
-        * on the queue:
+        * Requeue to the end of queue if we (and all of our ancestors) are the
+        * only element on the queue
         */
-       if (p->rt.run_list.prev != p->rt.run_list.next) {
-               requeue_task_rt(rq, p, 0);
-               set_tsk_need_resched(p);
+       for_each_sched_rt_entity(rt_se) {
+               if (rt_se->run_list.prev != rt_se->run_list.next) {
+                       requeue_task_rt(rq, p, 0);
+                       set_tsk_need_resched(p);
+                       return;
+               }
        }
 }
 
index ba9dccfd24ce95b2b198257fb5e787e21da36d41..55844f24435a0ad5c1c50561a54424ce4b25992d 100644 (file)
@@ -526,6 +526,8 @@ static inline struct sched_domain *highest_flag_domain(int cpu, int flag)
 DECLARE_PER_CPU(struct sched_domain *, sd_llc);
 DECLARE_PER_CPU(int, sd_llc_id);
 
+extern int group_balance_cpu(struct sched_group *sg);
+
 #endif /* CONFIG_SMP */
 
 #include "stats.h"
@@ -940,8 +942,6 @@ static inline u64 sched_avg_period(void)
        return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2;
 }
 
-void calc_load_account_idle(struct rq *this_rq);
-
 #ifdef CONFIG_SCHED_HRTICK
 
 /*
index e1a797e028a320d62c01e77a47f533e0998adaa3..98f60c5caa1bef91733a4c0bcc646da4fa0b7a24 100644 (file)
@@ -31,6 +31,12 @@ void __init idle_thread_set_boot_cpu(void)
        per_cpu(idle_threads, smp_processor_id()) = current;
 }
 
+/**
+ * idle_init - Initialize the idle thread for a cpu
+ * @cpu:       The cpu for which the idle thread should be initialized
+ *
+ * Creates the thread if it does not exist.
+ */
 static inline void idle_init(unsigned int cpu)
 {
        struct task_struct *tsk = per_cpu(idle_threads, cpu);
@@ -45,17 +51,16 @@ static inline void idle_init(unsigned int cpu)
 }
 
 /**
- * idle_thread_init - Initialize the idle thread for a cpu
- * @cpu:       The cpu for which the idle thread should be initialized
- *
- * Creates the thread if it does not exist.
+ * idle_threads_init - Initialize idle threads for all cpus
  */
 void __init idle_threads_init(void)
 {
-       unsigned int cpu;
+       unsigned int cpu, boot_cpu;
+
+       boot_cpu = smp_processor_id();
 
        for_each_possible_cpu(cpu) {
-               if (cpu != smp_processor_id())
+               if (cpu != boot_cpu)
                        idle_init(cpu);
        }
 }
index 9ff89cb9657a681d15714447d7d3c64939fbbe50..2d39a84cd8575e6295f666421cee01b41652b3b1 100644 (file)
@@ -1786,27 +1786,12 @@ SYSCALL_DEFINE1(umask, int, mask)
 }
 
 #ifdef CONFIG_CHECKPOINT_RESTORE
-static bool vma_flags_mismatch(struct vm_area_struct *vma,
-                              unsigned long required,
-                              unsigned long banned)
-{
-       return (vma->vm_flags & required) != required ||
-               (vma->vm_flags & banned);
-}
-
 static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd)
 {
        struct file *exe_file;
        struct dentry *dentry;
        int err;
 
-       /*
-        * Setting new mm::exe_file is only allowed when no VM_EXECUTABLE vma's
-        * remain. So perform a quick test first.
-        */
-       if (mm->num_exe_file_vmas)
-               return -EBUSY;
-
        exe_file = fget(fd);
        if (!exe_file)
                return -EBADF;
@@ -1827,17 +1812,35 @@ static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd)
        if (err)
                goto exit;
 
+       down_write(&mm->mmap_sem);
+
+       /*
+        * Forbid mm->exe_file change if old file still mapped.
+        */
+       err = -EBUSY;
+       if (mm->exe_file) {
+               struct vm_area_struct *vma;
+
+               for (vma = mm->mmap; vma; vma = vma->vm_next)
+                       if (vma->vm_file &&
+                           path_equal(&vma->vm_file->f_path,
+                                      &mm->exe_file->f_path))
+                               goto exit_unlock;
+       }
+
        /*
         * The symlink can be changed only once, just to disallow arbitrary
         * transitions malicious software might bring in. This means one
         * could make a snapshot over all processes running and monitor
         * /proc/pid/exe changes to notice unusual activity if needed.
         */
-       down_write(&mm->mmap_sem);
-       if (likely(!mm->exe_file))
-               set_mm_exe_file(mm, exe_file);
-       else
-               err = -EBUSY;
+       err = -EPERM;
+       if (test_and_set_bit(MMF_EXE_FILE_CHANGED, &mm->flags))
+               goto exit_unlock;
+
+       err = 0;
+       set_mm_exe_file(mm, exe_file);
+exit_unlock:
        up_write(&mm->mmap_sem);
 
 exit:
@@ -1862,7 +1865,7 @@ static int prctl_set_mm(int opt, unsigned long addr,
        if (opt == PR_SET_MM_EXE_FILE)
                return prctl_set_mm_exe_file(mm, (unsigned int)addr);
 
-       if (addr >= TASK_SIZE)
+       if (addr >= TASK_SIZE || addr < mmap_min_addr)
                return -EINVAL;
 
        error = -EINVAL;
@@ -1924,12 +1927,6 @@ static int prctl_set_mm(int opt, unsigned long addr,
                        error = -EFAULT;
                        goto out;
                }
-#ifdef CONFIG_STACK_GROWSUP
-               if (vma_flags_mismatch(vma, VM_READ | VM_WRITE | VM_GROWSUP, 0))
-#else
-               if (vma_flags_mismatch(vma, VM_READ | VM_WRITE | VM_GROWSDOWN, 0))
-#endif
-                       goto out;
                if (opt == PR_SET_MM_START_STACK)
                        mm->start_stack = addr;
                else if (opt == PR_SET_MM_ARG_START)
@@ -1981,12 +1978,22 @@ out:
        up_read(&mm->mmap_sem);
        return error;
 }
+
+static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr)
+{
+       return put_user(me->clear_child_tid, tid_addr);
+}
+
 #else /* CONFIG_CHECKPOINT_RESTORE */
 static int prctl_set_mm(int opt, unsigned long addr,
                        unsigned long arg4, unsigned long arg5)
 {
        return -EINVAL;
 }
+static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr)
+{
+       return -EINVAL;
+}
 #endif
 
 SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
@@ -2141,6 +2148,9 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
                case PR_SET_MM:
                        error = prctl_set_mm(arg2, arg3, arg4, arg5);
                        break;
+               case PR_GET_TID_ADDRESS:
+                       error = prctl_get_tid_address(me, (int __user **)arg2);
+                       break;
                case PR_SET_CHILD_SUBREAPER:
                        me->signal->is_child_subreaper = !!arg2;
                        error = 0;
index 9cd928f7a7c6a18e8188988a3ed7cfcfec8c808e..7e1ce012a851351c10853fe732e714afa2bb8a5c 100644 (file)
@@ -297,8 +297,7 @@ void clockevents_register_device(struct clock_event_device *dev)
 }
 EXPORT_SYMBOL_GPL(clockevents_register_device);
 
-static void clockevents_config(struct clock_event_device *dev,
-                              u32 freq)
+void clockevents_config(struct clock_event_device *dev, u32 freq)
 {
        u64 sec;
 
index 70b33abcc7bb0e92762b05af6c6ceaa1be75cd38..b7fbadc5c973c928e531cf6d701f1520e4809812 100644 (file)
@@ -409,7 +409,9 @@ int second_overflow(unsigned long secs)
                        time_state = TIME_DEL;
                break;
        case TIME_INS:
-               if (secs % 86400 == 0) {
+               if (!(time_status & STA_INS))
+                       time_state = TIME_OK;
+               else if (secs % 86400 == 0) {
                        leap = -1;
                        time_state = TIME_OOP;
                        time_tai++;
@@ -418,7 +420,9 @@ int second_overflow(unsigned long secs)
                }
                break;
        case TIME_DEL:
-               if ((secs + 1) % 86400 == 0) {
+               if (!(time_status & STA_DEL))
+                       time_state = TIME_OK;
+               else if ((secs + 1) % 86400 == 0) {
                        leap = 1;
                        time_tai--;
                        time_state = TIME_WAIT;
index 6a3a5b9ff56176c2951256edf7ef9601a0f49106..4a08472c3ca71399edc34ffafa9f5394f25597e7 100644 (file)
@@ -274,6 +274,7 @@ EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us);
 static void tick_nohz_stop_sched_tick(struct tick_sched *ts)
 {
        unsigned long seq, last_jiffies, next_jiffies, delta_jiffies;
+       unsigned long rcu_delta_jiffies;
        ktime_t last_update, expires, now;
        struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
        u64 time_delta;
@@ -322,7 +323,7 @@ static void tick_nohz_stop_sched_tick(struct tick_sched *ts)
                time_delta = timekeeping_max_deferment();
        } while (read_seqretry(&xtime_lock, seq));
 
-       if (rcu_needs_cpu(cpu) || printk_needs_cpu(cpu) ||
+       if (rcu_needs_cpu(cpu, &rcu_delta_jiffies) || printk_needs_cpu(cpu) ||
            arch_needs_cpu(cpu)) {
                next_jiffies = last_jiffies + 1;
                delta_jiffies = 1;
@@ -330,6 +331,10 @@ static void tick_nohz_stop_sched_tick(struct tick_sched *ts)
                /* Get the next timer wheel timer */
                next_jiffies = get_next_timer_interrupt(last_jiffies);
                delta_jiffies = next_jiffies - last_jiffies;
+               if (rcu_delta_jiffies < delta_jiffies) {
+                       next_jiffies = last_jiffies + rcu_delta_jiffies;
+                       delta_jiffies = rcu_delta_jiffies;
+               }
        }
        /*
         * Do not stop the tick, if we are only one off
@@ -401,6 +406,7 @@ static void tick_nohz_stop_sched_tick(struct tick_sched *ts)
                 */
                if (!ts->tick_stopped) {
                        select_nohz_load_balancer(1);
+                       calc_load_enter_idle();
 
                        ts->idle_tick = hrtimer_get_expires(&ts->sched_timer);
                        ts->tick_stopped = 1;
@@ -576,6 +582,7 @@ void tick_nohz_idle_exit(void)
        /* Update jiffies first */
        select_nohz_load_balancer(0);
        tick_do_update_jiffies64(now);
+       update_cpu_load_nohz();
 
 #ifndef CONFIG_VIRT_CPU_ACCOUNTING
        /*
@@ -591,6 +598,7 @@ void tick_nohz_idle_exit(void)
                account_idle_ticks(ticks);
 #endif
 
+       calc_load_exit_idle();
        touch_softlockup_watchdog();
        /*
         * Cancel the scheduled timer and restore the tick
@@ -814,6 +822,16 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
        return HRTIMER_RESTART;
 }
 
+static int sched_skew_tick;
+
+static int __init skew_tick(char *str)
+{
+       get_option(&str, &sched_skew_tick);
+
+       return 0;
+}
+early_param("skew_tick", skew_tick);
+
 /**
  * tick_setup_sched_timer - setup the tick emulation timer
  */
@@ -831,6 +849,14 @@ void tick_setup_sched_timer(void)
        /* Get the next period (per cpu) */
        hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update());
 
+       /* Offset the tick to avert xtime_lock contention. */
+       if (sched_skew_tick) {
+               u64 offset = ktime_to_ns(tick_period) >> 1;
+               do_div(offset, num_possible_cpus());
+               offset *= smp_processor_id();
+               hrtimer_add_expires_ns(&ts->sched_timer, offset);
+       }
+
        for (;;) {
                hrtimer_forward(&ts->sched_timer, now, tick_period);
                hrtimer_start_expires(&ts->sched_timer,
index 6e46cacf5969c8290a7933a548b713a227b2e183..3447cfaf11e7d61b11d30bc063967e47d85e59d4 100644 (file)
@@ -70,6 +70,12 @@ struct timekeeper {
        /* The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock. */
        struct timespec raw_time;
 
+       /* Offset clock monotonic -> clock realtime */
+       ktime_t offs_real;
+
+       /* Offset clock monotonic -> clock boottime */
+       ktime_t offs_boot;
+
        /* Seqlock for all timekeeper values */
        seqlock_t lock;
 };
@@ -172,6 +178,14 @@ static inline s64 timekeeping_get_ns_raw(void)
        return clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift);
 }
 
+static void update_rt_offset(void)
+{
+       struct timespec tmp, *wtm = &timekeeper.wall_to_monotonic;
+
+       set_normalized_timespec(&tmp, -wtm->tv_sec, -wtm->tv_nsec);
+       timekeeper.offs_real = timespec_to_ktime(tmp);
+}
+
 /* must hold write on timekeeper.lock */
 static void timekeeping_update(bool clearntp)
 {
@@ -179,6 +193,7 @@ static void timekeeping_update(bool clearntp)
                timekeeper.ntp_error = 0;
                ntp_clear();
        }
+       update_rt_offset();
        update_vsyscall(&timekeeper.xtime, &timekeeper.wall_to_monotonic,
                         timekeeper.clock, timekeeper.mult);
 }
@@ -604,6 +619,7 @@ void __init timekeeping_init(void)
        }
        set_normalized_timespec(&timekeeper.wall_to_monotonic,
                                -boot.tv_sec, -boot.tv_nsec);
+       update_rt_offset();
        timekeeper.total_sleep_time.tv_sec = 0;
        timekeeper.total_sleep_time.tv_nsec = 0;
        write_sequnlock_irqrestore(&timekeeper.lock, flags);
@@ -612,6 +628,12 @@ void __init timekeeping_init(void)
 /* time in seconds when suspend began */
 static struct timespec timekeeping_suspend_time;
 
+static void update_sleep_time(struct timespec t)
+{
+       timekeeper.total_sleep_time = t;
+       timekeeper.offs_boot = timespec_to_ktime(t);
+}
+
 /**
  * __timekeeping_inject_sleeptime - Internal function to add sleep interval
  * @delta: pointer to a timespec delta value
@@ -630,8 +652,7 @@ static void __timekeeping_inject_sleeptime(struct timespec *delta)
        timekeeper.xtime = timespec_add(timekeeper.xtime, *delta);
        timekeeper.wall_to_monotonic =
                        timespec_sub(timekeeper.wall_to_monotonic, *delta);
-       timekeeper.total_sleep_time = timespec_add(
-                                       timekeeper.total_sleep_time, *delta);
+       update_sleep_time(timespec_add(timekeeper.total_sleep_time, *delta));
 }
 
 
@@ -696,6 +717,7 @@ static void timekeeping_resume(void)
        timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock);
        timekeeper.ntp_error = 0;
        timekeeping_suspended = 0;
+       timekeeping_update(false);
        write_sequnlock_irqrestore(&timekeeper.lock, flags);
 
        touch_softlockup_watchdog();
@@ -962,6 +984,9 @@ static cycle_t logarithmic_accumulation(cycle_t offset, int shift)
                timekeeper.xtime.tv_sec++;
                leap = second_overflow(timekeeper.xtime.tv_sec);
                timekeeper.xtime.tv_sec += leap;
+               timekeeper.wall_to_monotonic.tv_sec -= leap;
+               if (leap)
+                       clock_was_set_delayed();
        }
 
        /* Accumulate raw time */
@@ -1077,6 +1102,9 @@ static void update_wall_time(void)
                timekeeper.xtime.tv_sec++;
                leap = second_overflow(timekeeper.xtime.tv_sec);
                timekeeper.xtime.tv_sec += leap;
+               timekeeper.wall_to_monotonic.tv_sec -= leap;
+               if (leap)
+                       clock_was_set_delayed();
        }
 
        timekeeping_update(false);
@@ -1244,6 +1272,40 @@ void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim,
        } while (read_seqretry(&timekeeper.lock, seq));
 }
 
+#ifdef CONFIG_HIGH_RES_TIMERS
+/**
+ * ktime_get_update_offsets - hrtimer helper
+ * @offs_real: pointer to storage for monotonic -> realtime offset
+ * @offs_boot: pointer to storage for monotonic -> boottime offset
+ *
+ * Returns current monotonic time and updates the offsets
+ * Called from hrtimer_interupt() or retrigger_next_event()
+ */
+ktime_t ktime_get_update_offsets(ktime_t *offs_real, ktime_t *offs_boot)
+{
+       ktime_t now;
+       unsigned int seq;
+       u64 secs, nsecs;
+
+       do {
+               seq = read_seqbegin(&timekeeper.lock);
+
+               secs = timekeeper.xtime.tv_sec;
+               nsecs = timekeeper.xtime.tv_nsec;
+               nsecs += timekeeping_get_ns();
+               /* If arch requires, add in gettimeoffset() */
+               nsecs += arch_gettimeoffset();
+
+               *offs_real = timekeeper.offs_real;
+               *offs_boot = timekeeper.offs_boot;
+       } while (read_seqretry(&timekeeper.lock, seq));
+
+       now = ktime_add_ns(ktime_set(secs, 0), nsecs);
+       now = ktime_sub(now, *offs_real);
+       return now;
+}
+#endif
+
 /**
  * ktime_get_monotonic_offset() - get wall_to_monotonic in ktime_t format
  */
index 1d0f6a8a0e5e83680c0df3b28836a5f6a2103a39..f765465bffe47968752e272308d7fb5b63093a65 100644 (file)
@@ -1075,6 +1075,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int nr_pages, int cpu)
        rb_init_page(bpage->page);
 
        INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
+       INIT_LIST_HEAD(&cpu_buffer->new_pages);
 
        ret = rb_allocate_pages(cpu_buffer, nr_pages);
        if (ret < 0)
@@ -1346,10 +1347,9 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
                         * If something was added to this page, it was full
                         * since it is not the tail page. So we deduct the
                         * bytes consumed in ring buffer from here.
-                        * No need to update overruns, since this page is
-                        * deleted from ring buffer and its entries are
-                        * already accounted for.
+                        * Increment overrun to account for the lost events.
                         */
+                       local_add(page_entries, &cpu_buffer->overrun);
                        local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
                }
 
index 68032c6177dbb84a28d0bc432773f841101c11b4..a7fa0702be1cd5b269ed6f15e9624f4b02968b11 100644 (file)
@@ -371,7 +371,7 @@ EXPORT_SYMBOL_GPL(tracing_on);
 void tracing_off(void)
 {
        if (global_trace.buffer)
-               ring_buffer_record_on(global_trace.buffer);
+               ring_buffer_record_off(global_trace.buffer);
        /*
         * This flag is only looked at when buffers haven't been
         * allocated yet. We don't really care about the race
@@ -3609,6 +3609,7 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
                .pages          = pages_def,
                .partial        = partial_def,
                .nr_pages       = 0, /* This gets updated below. */
+               .nr_pages_max   = PIPE_DEF_BUFFERS,
                .flags          = flags,
                .ops            = &tracing_pipe_buf_ops,
                .spd_release    = tracing_spd_release_pipe,
@@ -3680,7 +3681,7 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
 
        ret = splice_to_pipe(pipe, &spd);
 out:
-       splice_shrink_spd(pipe, &spd);
+       splice_shrink_spd(&spd);
        return ret;
 
 out_err:
@@ -4231,6 +4232,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
        struct splice_pipe_desc spd = {
                .pages          = pages_def,
                .partial        = partial_def,
+               .nr_pages_max   = PIPE_DEF_BUFFERS,
                .flags          = flags,
                .ops            = &buffer_pipe_buf_ops,
                .spd_release    = buffer_spd_release,
@@ -4318,7 +4320,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
        }
 
        ret = splice_to_pipe(pipe, &spd);
-       splice_shrink_spd(pipe, &spd);
+       splice_shrink_spd(&spd);
 out:
        return ret;
 }
index e5e1d85b8c7c23090ce59b7e5b1e868535c85ef0..4b1dfba70f7cf8ae7397623656a9b695028f702a 100644 (file)
@@ -372,6 +372,13 @@ static int watchdog(void *unused)
 
 
 #ifdef CONFIG_HARDLOCKUP_DETECTOR
+/*
+ * People like the simple clean cpu node info on boot.
+ * Reduce the watchdog noise by only printing messages
+ * that are different from what cpu0 displayed.
+ */
+static unsigned long cpu0_err;
+
 static int watchdog_nmi_enable(int cpu)
 {
        struct perf_event_attr *wd_attr;
@@ -390,11 +397,21 @@ static int watchdog_nmi_enable(int cpu)
 
        /* Try to register using hardware perf events */
        event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL);
+
+       /* save cpu0 error for future comparision */
+       if (cpu == 0 && IS_ERR(event))
+               cpu0_err = PTR_ERR(event);
+
        if (!IS_ERR(event)) {
-               pr_info("enabled, takes one hw-pmu counter.\n");
+               /* only print for cpu0 or different than cpu0 */
+               if (cpu == 0 || cpu0_err)
+                       pr_info("enabled on all CPUs, permanently consumes one hw-PMU counter.\n");
                goto out_save;
        }
 
+       /* skip displaying the same error again */
+       if (cpu > 0 && (PTR_ERR(event) == cpu0_err))
+               return PTR_ERR(event);
 
        /* vary the KERN level based on the returned errno */
        if (PTR_ERR(event) == -EOPNOTSUPP)
index a42d3ae39648386b81a649e9844afd4e0b03bca9..ff5bdee4716d5206c7f16f4ce67e911f668c97e7 100644 (file)
@@ -241,6 +241,26 @@ config BOOTPARAM_SOFTLOCKUP_PANIC_VALUE
        default 0 if !BOOTPARAM_SOFTLOCKUP_PANIC
        default 1 if BOOTPARAM_SOFTLOCKUP_PANIC
 
+config PANIC_ON_OOPS
+       bool "Panic on Oops" if EXPERT
+       default n
+       help
+         Say Y here to enable the kernel to panic when it oopses. This
+         has the same effect as setting oops=panic on the kernel command
+         line.
+
+         This feature is useful to ensure that the kernel does not do
+         anything erroneous after an oops which could result in data
+         corruption or other issues.
+
+         Say N if unsure.
+
+config PANIC_ON_OOPS_VALUE
+       int
+       range 0 1
+       default 0 if !PANIC_ON_OOPS
+       default 1 if PANIC_ON_OOPS
+
 config DETECT_HUNG_TASK
        bool "Detect Hung Tasks"
        depends on DEBUG_KERNEL
index e5ec1e9c1aa52cc08c710a4dcc4c1815cad9c67b..f9a484676cb6a8e4782333ce164bf0662b583b78 100644 (file)
@@ -319,8 +319,8 @@ void *btree_get_prev(struct btree_head *head, struct btree_geo *geo,
 
        if (head->height == 0)
                return NULL;
-retry:
        longcpy(key, __key, geo->keylen);
+retry:
        dec_key(geo, key);
 
        node = head->node;
@@ -351,7 +351,7 @@ retry:
        }
 miss:
        if (retry_key) {
-               __key = retry_key;
+               longcpy(key, retry_key, geo->keylen);
                retry_key = NULL;
                goto retry;
        }
@@ -509,6 +509,7 @@ retry:
 int btree_insert(struct btree_head *head, struct btree_geo *geo,
                unsigned long *key, void *val, gfp_t gfp)
 {
+       BUG_ON(!val);
        return btree_insert_level(head, geo, key, val, 1, gfp);
 }
 EXPORT_SYMBOL_GPL(btree_insert);
index 518aea714d21d9dea1c7d52b0cf6878482ca5d82..66ce414891330964aacb48dee8079138034cad3d 100644 (file)
@@ -78,7 +78,7 @@ static LIST_HEAD(free_entries);
 static DEFINE_SPINLOCK(free_entries_lock);
 
 /* Global disable flag - will be set in case of an error */
-static bool global_disable __read_mostly;
+static u32 global_disable __read_mostly;
 
 /* Global error count */
 static u32 error_count;
@@ -657,7 +657,7 @@ static int dma_debug_fs_init(void)
 
        global_disable_dent = debugfs_create_bool("disabled", 0444,
                        dma_debug_dent,
-                       (u32 *)&global_disable);
+                       &global_disable);
        if (!global_disable_dent)
                goto out_err;
 
index 6805453c18e78a9d51592cb8ff3313fdbd3acc9a..f7210ad6cffd44c2486f8fef542dc5fa5affdae3 100644 (file)
@@ -101,6 +101,10 @@ static inline bool fail_stacktrace(struct fault_attr *attr)
 
 bool should_fail(struct fault_attr *attr, ssize_t size)
 {
+       /* No need to check any other properties if the probability is 0 */
+       if (attr->probability == 0)
+               return false;
+
        if (attr->task_filter && !fail_task(attr, current))
                return false;
 
index d7c878cc006cf62ac2b0b0abb1a3a69fa62c64e2..e7964296fd50551020872d430009e890d0e97b5d 100644 (file)
@@ -686,6 +686,9 @@ void **radix_tree_next_chunk(struct radix_tree_root *root,
         * during iterating; it can be zero only at the beginning.
         * And we cannot overflow iter->next_index in a single step,
         * because RADIX_TREE_MAP_SHIFT < BITS_PER_LONG.
+        *
+        * This condition also used by radix_tree_next_slot() to stop
+        * contiguous iterating, and forbid swithing to the next chunk.
         */
        index = iter->next_index;
        if (!index && iter->index)
index 1805a5cc5daaac68a08cd3f3cdd3e7d4cc5b5bad..a95bccb8497d8f923d5641e9c085e460605d261d 100644 (file)
@@ -22,8 +22,8 @@
 #include <linux/raid/pq.h>
 
 /* Recover two failed data blocks. */
-void raid6_2data_recov_intx1(int disks, size_t bytes, int faila, int failb,
-                      void **ptrs)
+static void raid6_2data_recov_intx1(int disks, size_t bytes, int faila,
+               int failb, void **ptrs)
 {
        u8 *p, *q, *dp, *dq;
        u8 px, qx, db;
@@ -66,7 +66,8 @@ void raid6_2data_recov_intx1(int disks, size_t bytes, int faila, int failb,
 }
 
 /* Recover failure of one data block plus the P block */
-void raid6_datap_recov_intx1(int disks, size_t bytes, int faila, void **ptrs)
+static void raid6_datap_recov_intx1(int disks, size_t bytes, int faila,
+               void **ptrs)
 {
        u8 *p, *q, *dq;
        const u8 *qmul;         /* Q multiplier table */
index 37ae61930559c0b4c77ce1080d1cb56d6dcdac95..ecb710c0b4d92110558b810045172e7046c20ec0 100644 (file)
@@ -19,8 +19,8 @@ static int raid6_has_ssse3(void)
                boot_cpu_has(X86_FEATURE_SSSE3);
 }
 
-void raid6_2data_recov_ssse3(int disks, size_t bytes, int faila, int failb,
-                      void **ptrs)
+static void raid6_2data_recov_ssse3(int disks, size_t bytes, int faila,
+               int failb, void **ptrs)
 {
        u8 *p, *q, *dp, *dq;
        const u8 *pbmul;        /* P multiplier table for B data */
@@ -194,7 +194,8 @@ void raid6_2data_recov_ssse3(int disks, size_t bytes, int faila, int failb,
 }
 
 
-void raid6_datap_recov_ssse3(int disks, size_t bytes, int faila, void **ptrs)
+static void raid6_datap_recov_ssse3(int disks, size_t bytes, int faila,
+               void **ptrs)
 {
        u8 *p, *q, *dq;
        const u8 *qmul;         /* Q multiplier table */
index d0ec4f3d1593031b5498dcc120822b41c423a3c5..e91fbc23fff121915217a8e5c53c29a2b52aeba4 100644 (file)
@@ -118,7 +118,7 @@ static void __spin_lock_debug(raw_spinlock_t *lock)
                /* lockup suspected: */
                if (print_once) {
                        print_once = 0;
-                       spin_dump(lock, "lockup");
+                       spin_dump(lock, "lockup suspected");
 #ifdef CONFIG_SMP
                        trigger_all_cpu_backtrace();
 #endif
index b2176374b98e5e678ec93acda28cc0891d1c3717..82fed4eb2b6fe39cfd0476afe46e9589e02b386f 100644 (file)
@@ -389,3 +389,20 @@ config CLEANCACHE
          in a negligible performance hit.
 
          If unsure, say Y to enable cleancache
+
+config FRONTSWAP
+       bool "Enable frontswap to cache swap pages if tmem is present"
+       depends on SWAP
+       default n
+       help
+         Frontswap is so named because it can be thought of as the opposite
+         of a "backing" store for a swap device.  The data is stored into
+         "transcendent memory", memory that is not directly accessible or
+         addressable by the kernel and is of unknown and possibly
+         time-varying size.  When space in transcendent memory is available,
+         a significant swap I/O reduction may be achieved.  When none is
+         available, all frontswap calls are reduced to a single pointer-
+         compare-against-NULL resulting in a negligible performance hit
+         and swap data is stored as normal on the matching swap device.
+
+         If unsure, say Y to enable frontswap.
index a156285ce88d9a19e529b54b8836efac559b7af7..2e2fbbefb99fa94c97be13aa8fa71da823455409 100644 (file)
@@ -29,6 +29,7 @@ obj-$(CONFIG_HAVE_MEMBLOCK) += memblock.o
 
 obj-$(CONFIG_BOUNCE)   += bounce.o
 obj-$(CONFIG_SWAP)     += page_io.o swap_state.o swapfile.o
+obj-$(CONFIG_FRONTSWAP)        += frontswap.o
 obj-$(CONFIG_HAS_DMA)  += dmapool.o
 obj-$(CONFIG_HUGETLBFS)        += hugetlb.o
 obj-$(CONFIG_NUMA)     += mempolicy.o
index ec4fcb7a56c8975492d656940906af6153136e51..bcb63ac48cc5e0d20eb7d1359b8d6d79a7358ac0 100644 (file)
@@ -698,7 +698,7 @@ void * __init __alloc_bootmem(unsigned long size, unsigned long align,
        return ___alloc_bootmem(size, align, goal, limit);
 }
 
-static void * __init ___alloc_bootmem_node_nopanic(pg_data_t *pgdat,
+void * __init ___alloc_bootmem_node_nopanic(pg_data_t *pgdat,
                                unsigned long size, unsigned long align,
                                unsigned long goal, unsigned long limit)
 {
@@ -710,6 +710,10 @@ again:
        if (ptr)
                return ptr;
 
+       /* do not panic in alloc_bootmem_bdata() */
+       if (limit && goal + size > limit)
+               limit = 0;
+
        ptr = alloc_bootmem_bdata(pgdat->bdata, size, align, goal, limit);
        if (ptr)
                return ptr;
index 4ac338af512099aae2dbb35c64bc96e822e8c162..2f42d952853970b5dd1f95a149d8750e7d570357 100644 (file)
@@ -236,7 +236,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
         */
        while (unlikely(too_many_isolated(zone))) {
                /* async migration should just abort */
-               if (cc->mode != COMPACT_SYNC)
+               if (!cc->sync)
                        return 0;
 
                congestion_wait(BLK_RW_ASYNC, HZ/10);
@@ -304,8 +304,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
                 * satisfies the allocation
                 */
                pageblock_nr = low_pfn >> pageblock_order;
-               if (cc->mode != COMPACT_SYNC &&
-                   last_pageblock_nr != pageblock_nr &&
+               if (!cc->sync && last_pageblock_nr != pageblock_nr &&
                    !migrate_async_suitable(get_pageblock_migratetype(page))) {
                        low_pfn += pageblock_nr_pages;
                        low_pfn = ALIGN(low_pfn, pageblock_nr_pages) - 1;
@@ -326,7 +325,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
                        continue;
                }
 
-               if (cc->mode != COMPACT_SYNC)
+               if (!cc->sync)
                        mode |= ISOLATE_ASYNC_MIGRATE;
 
                lruvec = mem_cgroup_page_lruvec(page, zone);
@@ -361,90 +360,27 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
 
 #endif /* CONFIG_COMPACTION || CONFIG_CMA */
 #ifdef CONFIG_COMPACTION
-/*
- * Returns true if MIGRATE_UNMOVABLE pageblock was successfully
- * converted to MIGRATE_MOVABLE type, false otherwise.
- */
-static bool rescue_unmovable_pageblock(struct page *page)
-{
-       unsigned long pfn, start_pfn, end_pfn;
-       struct page *start_page, *end_page;
-
-       pfn = page_to_pfn(page);
-       start_pfn = pfn & ~(pageblock_nr_pages - 1);
-       end_pfn = start_pfn + pageblock_nr_pages;
-
-       start_page = pfn_to_page(start_pfn);
-       end_page = pfn_to_page(end_pfn);
-
-       /* Do not deal with pageblocks that overlap zones */
-       if (page_zone(start_page) != page_zone(end_page))
-               return false;
-
-       for (page = start_page, pfn = start_pfn; page < end_page; pfn++,
-                                                                 page++) {
-               if (!pfn_valid_within(pfn))
-                       continue;
-
-               if (PageBuddy(page)) {
-                       int order = page_order(page);
-
-                       pfn += (1 << order) - 1;
-                       page += (1 << order) - 1;
-
-                       continue;
-               } else if (page_count(page) == 0 || PageLRU(page))
-                       continue;
-
-               return false;
-       }
-
-       set_pageblock_migratetype(page, MIGRATE_MOVABLE);
-       move_freepages_block(page_zone(page), page, MIGRATE_MOVABLE);
-       return true;
-}
-
-enum smt_result {
-       GOOD_AS_MIGRATION_TARGET,
-       FAIL_UNMOVABLE_TARGET,
-       FAIL_BAD_TARGET,
-};
 
-/*
- * Returns GOOD_AS_MIGRATION_TARGET if the page is within a block
- * suitable for migration to, FAIL_UNMOVABLE_TARGET if the page
- * is within a MIGRATE_UNMOVABLE block, FAIL_BAD_TARGET otherwise.
- */
-static enum smt_result suitable_migration_target(struct page *page,
-                                     struct compact_control *cc)
+/* Returns true if the page is within a block suitable for migration to */
+static bool suitable_migration_target(struct page *page)
 {
 
        int migratetype = get_pageblock_migratetype(page);
 
        /* Don't interfere with memory hot-remove or the min_free_kbytes blocks */
        if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE)
-               return FAIL_BAD_TARGET;
+               return false;
 
        /* If the page is a large free page, then allow migration */
        if (PageBuddy(page) && page_order(page) >= pageblock_order)
-               return GOOD_AS_MIGRATION_TARGET;
+               return true;
 
        /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
-       if (cc->mode != COMPACT_ASYNC_UNMOVABLE &&
-           migrate_async_suitable(migratetype))
-               return GOOD_AS_MIGRATION_TARGET;
-
-       if (cc->mode == COMPACT_ASYNC_MOVABLE &&
-           migratetype == MIGRATE_UNMOVABLE)
-               return FAIL_UNMOVABLE_TARGET;
-
-       if (cc->mode != COMPACT_ASYNC_MOVABLE &&
-           migratetype == MIGRATE_UNMOVABLE &&
-           rescue_unmovable_pageblock(page))
-               return GOOD_AS_MIGRATION_TARGET;
+       if (migrate_async_suitable(migratetype))
+               return true;
 
        /* Otherwise skip the block */
-       return FAIL_BAD_TARGET;
+       return false;
 }
 
 /*
@@ -477,13 +413,6 @@ static void isolate_freepages(struct zone *zone,
 
        zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
 
-       /*
-        * isolate_freepages() may be called more than once during
-        * compact_zone_order() run and we want only the most recent
-        * count.
-        */
-       cc->nr_pageblocks_skipped = 0;
-
        /*
         * Isolate free pages until enough are available to migrate the
         * pages on cc->migratepages. We stop searching if the migrate
@@ -492,7 +421,6 @@ static void isolate_freepages(struct zone *zone,
        for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages;
                                        pfn -= pageblock_nr_pages) {
                unsigned long isolated;
-               enum smt_result ret;
 
                if (!pfn_valid(pfn))
                        continue;
@@ -509,12 +437,9 @@ static void isolate_freepages(struct zone *zone,
                        continue;
 
                /* Check the block is suitable for migration */
-               ret = suitable_migration_target(page, cc);
-               if (ret != GOOD_AS_MIGRATION_TARGET) {
-                       if (ret == FAIL_UNMOVABLE_TARGET)
-                               cc->nr_pageblocks_skipped++;
+               if (!suitable_migration_target(page))
                        continue;
-               }
+
                /*
                 * Found a block suitable for isolating free pages from. Now
                 * we disabled interrupts, double check things are ok and
@@ -523,14 +448,12 @@ static void isolate_freepages(struct zone *zone,
                 */
                isolated = 0;
                spin_lock_irqsave(&zone->lock, flags);
-               ret = suitable_migration_target(page, cc);
-               if (ret == GOOD_AS_MIGRATION_TARGET) {
+               if (suitable_migration_target(page)) {
                        end_pfn = min(pfn + pageblock_nr_pages, zone_end_pfn);
                        isolated = isolate_freepages_block(pfn, end_pfn,
                                                           freelist, false);
                        nr_freepages += isolated;
-               } else if (ret == FAIL_UNMOVABLE_TARGET)
-                       cc->nr_pageblocks_skipped++;
+               }
                spin_unlock_irqrestore(&zone->lock, flags);
 
                /*
@@ -762,9 +685,8 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
 
                nr_migrate = cc->nr_migratepages;
                err = migrate_pages(&cc->migratepages, compaction_alloc,
-                       (unsigned long)&cc->freepages, false,
-                       (cc->mode == COMPACT_SYNC) ? MIGRATE_SYNC_LIGHT
-                                                     : MIGRATE_ASYNC);
+                               (unsigned long)cc, false,
+                               cc->sync ? MIGRATE_SYNC_LIGHT : MIGRATE_ASYNC);
                update_nr_listpages(cc);
                nr_remaining = cc->nr_migratepages;
 
@@ -779,8 +701,11 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
                if (err) {
                        putback_lru_pages(&cc->migratepages);
                        cc->nr_migratepages = 0;
+                       if (err == -ENOMEM) {
+                               ret = COMPACT_PARTIAL;
+                               goto out;
+                       }
                }
-
        }
 
 out:
@@ -793,8 +718,7 @@ out:
 
 static unsigned long compact_zone_order(struct zone *zone,
                                 int order, gfp_t gfp_mask,
-                                enum compact_mode mode,
-                                unsigned long *nr_pageblocks_skipped)
+                                bool sync)
 {
        struct compact_control cc = {
                .nr_freepages = 0,
@@ -802,17 +726,12 @@ static unsigned long compact_zone_order(struct zone *zone,
                .order = order,
                .migratetype = allocflags_to_migratetype(gfp_mask),
                .zone = zone,
-               .mode = mode,
+               .sync = sync,
        };
-       unsigned long rc;
-
        INIT_LIST_HEAD(&cc.freepages);
        INIT_LIST_HEAD(&cc.migratepages);
 
-       rc = compact_zone(zone, &cc);
-       *nr_pageblocks_skipped = cc.nr_pageblocks_skipped;
-
-       return rc;
+       return compact_zone(zone, &cc);
 }
 
 int sysctl_extfrag_threshold = 500;
@@ -837,8 +756,6 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
        struct zoneref *z;
        struct zone *zone;
        int rc = COMPACT_SKIPPED;
-       unsigned long nr_pageblocks_skipped;
-       enum compact_mode mode;
 
        /*
         * Check whether it is worth even starting compaction. The order check is
@@ -855,22 +772,12 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
                                                                nodemask) {
                int status;
 
-               mode = sync ? COMPACT_SYNC : COMPACT_ASYNC_MOVABLE;
-retry:
-               status = compact_zone_order(zone, order, gfp_mask, mode,
-                                               &nr_pageblocks_skipped);
+               status = compact_zone_order(zone, order, gfp_mask, sync);
                rc = max(status, rc);
 
                /* If a normal allocation would succeed, stop compacting */
                if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 0))
                        break;
-
-               if (rc == COMPACT_COMPLETE && mode == COMPACT_ASYNC_MOVABLE) {
-                       if (nr_pageblocks_skipped) {
-                               mode = COMPACT_ASYNC_UNMOVABLE;
-                               goto retry;
-                       }
-               }
        }
 
        return rc;
@@ -904,7 +811,7 @@ static int __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc)
                        if (ok && cc->order > zone->compact_order_failed)
                                zone->compact_order_failed = cc->order + 1;
                        /* Currently async compaction is never deferred. */
-                       else if (!ok && cc->mode == COMPACT_SYNC)
+                       else if (!ok && cc->sync)
                                defer_compaction(zone, cc->order);
                }
 
@@ -919,7 +826,7 @@ int compact_pgdat(pg_data_t *pgdat, int order)
 {
        struct compact_control cc = {
                .order = order,
-               .mode = COMPACT_ASYNC_MOVABLE,
+               .sync = false,
        };
 
        return __compact_pgdat(pgdat, &cc);
@@ -929,7 +836,7 @@ static int compact_node(int nid)
 {
        struct compact_control cc = {
                .order = -1,
-               .mode = COMPACT_SYNC,
+               .sync = true,
        };
 
        return __compact_pgdat(NODE_DATA(nid), &cc);
diff --git a/mm/frontswap.c b/mm/frontswap.c
new file mode 100644 (file)
index 0000000..e250255
--- /dev/null
@@ -0,0 +1,314 @@
+/*
+ * Frontswap frontend
+ *
+ * This code provides the generic "frontend" layer to call a matching
+ * "backend" driver implementation of frontswap.  See
+ * Documentation/vm/frontswap.txt for more information.
+ *
+ * Copyright (C) 2009-2012 Oracle Corp.  All rights reserved.
+ * Author: Dan Magenheimer
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ */
+
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/swap.h>
+#include <linux/swapops.h>
+#include <linux/proc_fs.h>
+#include <linux/security.h>
+#include <linux/capability.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <linux/frontswap.h>
+#include <linux/swapfile.h>
+
+/*
+ * frontswap_ops is set by frontswap_register_ops to contain the pointers
+ * to the frontswap "backend" implementation functions.
+ */
+static struct frontswap_ops frontswap_ops __read_mostly;
+
+/*
+ * This global enablement flag reduces overhead on systems where frontswap_ops
+ * has not been registered, so is preferred to the slower alternative: a
+ * function call that checks a non-global.
+ */
+bool frontswap_enabled __read_mostly;
+EXPORT_SYMBOL(frontswap_enabled);
+
+/*
+ * If enabled, frontswap_store will return failure even on success.  As
+ * a result, the swap subsystem will always write the page to swap, in
+ * effect converting frontswap into a writethrough cache.  In this mode,
+ * there is no direct reduction in swap writes, but a frontswap backend
+ * can unilaterally "reclaim" any pages in use with no data loss, thus
+ * providing increases control over maximum memory usage due to frontswap.
+ */
+static bool frontswap_writethrough_enabled __read_mostly;
+
+#ifdef CONFIG_DEBUG_FS
+/*
+ * Counters available via /sys/kernel/debug/frontswap (if debugfs is
+ * properly configured).  These are for information only so are not protected
+ * against increment races.
+ */
+static u64 frontswap_loads;
+static u64 frontswap_succ_stores;
+static u64 frontswap_failed_stores;
+static u64 frontswap_invalidates;
+
+static inline void inc_frontswap_loads(void) {
+       frontswap_loads++;
+}
+static inline void inc_frontswap_succ_stores(void) {
+       frontswap_succ_stores++;
+}
+static inline void inc_frontswap_failed_stores(void) {
+       frontswap_failed_stores++;
+}
+static inline void inc_frontswap_invalidates(void) {
+       frontswap_invalidates++;
+}
+#else
+static inline void inc_frontswap_loads(void) { }
+static inline void inc_frontswap_succ_stores(void) { }
+static inline void inc_frontswap_failed_stores(void) { }
+static inline void inc_frontswap_invalidates(void) { }
+#endif
+/*
+ * Register operations for frontswap, returning previous thus allowing
+ * detection of multiple backends and possible nesting.
+ */
+struct frontswap_ops frontswap_register_ops(struct frontswap_ops *ops)
+{
+       struct frontswap_ops old = frontswap_ops;
+
+       frontswap_ops = *ops;
+       frontswap_enabled = true;
+       return old;
+}
+EXPORT_SYMBOL(frontswap_register_ops);
+
+/*
+ * Enable/disable frontswap writethrough (see above).
+ */
+void frontswap_writethrough(bool enable)
+{
+       frontswap_writethrough_enabled = enable;
+}
+EXPORT_SYMBOL(frontswap_writethrough);
+
+/*
+ * Called when a swap device is swapon'd.
+ */
+void __frontswap_init(unsigned type)
+{
+       struct swap_info_struct *sis = swap_info[type];
+
+       BUG_ON(sis == NULL);
+       if (sis->frontswap_map == NULL)
+               return;
+       if (frontswap_enabled)
+               (*frontswap_ops.init)(type);
+}
+EXPORT_SYMBOL(__frontswap_init);
+
+/*
+ * "Store" data from a page to frontswap and associate it with the page's
+ * swaptype and offset.  Page must be locked and in the swap cache.
+ * If frontswap already contains a page with matching swaptype and
+ * offset, the frontswap implmentation may either overwrite the data and
+ * return success or invalidate the page from frontswap and return failure.
+ */
+int __frontswap_store(struct page *page)
+{
+       int ret = -1, dup = 0;
+       swp_entry_t entry = { .val = page_private(page), };
+       int type = swp_type(entry);
+       struct swap_info_struct *sis = swap_info[type];
+       pgoff_t offset = swp_offset(entry);
+
+       BUG_ON(!PageLocked(page));
+       BUG_ON(sis == NULL);
+       if (frontswap_test(sis, offset))
+               dup = 1;
+       ret = (*frontswap_ops.store)(type, offset, page);
+       if (ret == 0) {
+               frontswap_set(sis, offset);
+               inc_frontswap_succ_stores();
+               if (!dup)
+                       atomic_inc(&sis->frontswap_pages);
+       } else if (dup) {
+               /*
+                 failed dup always results in automatic invalidate of
+                 the (older) page from frontswap
+                */
+               frontswap_clear(sis, offset);
+               atomic_dec(&sis->frontswap_pages);
+               inc_frontswap_failed_stores();
+       } else
+               inc_frontswap_failed_stores();
+       if (frontswap_writethrough_enabled)
+               /* report failure so swap also writes to swap device */
+               ret = -1;
+       return ret;
+}
+EXPORT_SYMBOL(__frontswap_store);
+
+/*
+ * "Get" data from frontswap associated with swaptype and offset that were
+ * specified when the data was put to frontswap and use it to fill the
+ * specified page with data. Page must be locked and in the swap cache.
+ */
+int __frontswap_load(struct page *page)
+{
+       int ret = -1;
+       swp_entry_t entry = { .val = page_private(page), };
+       int type = swp_type(entry);
+       struct swap_info_struct *sis = swap_info[type];
+       pgoff_t offset = swp_offset(entry);
+
+       BUG_ON(!PageLocked(page));
+       BUG_ON(sis == NULL);
+       if (frontswap_test(sis, offset))
+               ret = (*frontswap_ops.load)(type, offset, page);
+       if (ret == 0)
+               inc_frontswap_loads();
+       return ret;
+}
+EXPORT_SYMBOL(__frontswap_load);
+
+/*
+ * Invalidate any data from frontswap associated with the specified swaptype
+ * and offset so that a subsequent "get" will fail.
+ */
+void __frontswap_invalidate_page(unsigned type, pgoff_t offset)
+{
+       struct swap_info_struct *sis = swap_info[type];
+
+       BUG_ON(sis == NULL);
+       if (frontswap_test(sis, offset)) {
+               (*frontswap_ops.invalidate_page)(type, offset);
+               atomic_dec(&sis->frontswap_pages);
+               frontswap_clear(sis, offset);
+               inc_frontswap_invalidates();
+       }
+}
+EXPORT_SYMBOL(__frontswap_invalidate_page);
+
+/*
+ * Invalidate all data from frontswap associated with all offsets for the
+ * specified swaptype.
+ */
+void __frontswap_invalidate_area(unsigned type)
+{
+       struct swap_info_struct *sis = swap_info[type];
+
+       BUG_ON(sis == NULL);
+       if (sis->frontswap_map == NULL)
+               return;
+       (*frontswap_ops.invalidate_area)(type);
+       atomic_set(&sis->frontswap_pages, 0);
+       memset(sis->frontswap_map, 0, sis->max / sizeof(long));
+}
+EXPORT_SYMBOL(__frontswap_invalidate_area);
+
+/*
+ * Frontswap, like a true swap device, may unnecessarily retain pages
+ * under certain circumstances; "shrink" frontswap is essentially a
+ * "partial swapoff" and works by calling try_to_unuse to attempt to
+ * unuse enough frontswap pages to attempt to -- subject to memory
+ * constraints -- reduce the number of pages in frontswap to the
+ * number given in the parameter target_pages.
+ */
+void frontswap_shrink(unsigned long target_pages)
+{
+       struct swap_info_struct *si = NULL;
+       int si_frontswap_pages;
+       unsigned long total_pages = 0, total_pages_to_unuse;
+       unsigned long pages = 0, pages_to_unuse = 0;
+       int type;
+       bool locked = false;
+
+       /*
+        * we don't want to hold swap_lock while doing a very
+        * lengthy try_to_unuse, but swap_list may change
+        * so restart scan from swap_list.head each time
+        */
+       spin_lock(&swap_lock);
+       locked = true;
+       total_pages = 0;
+       for (type = swap_list.head; type >= 0; type = si->next) {
+               si = swap_info[type];
+               total_pages += atomic_read(&si->frontswap_pages);
+       }
+       if (total_pages <= target_pages)
+               goto out;
+       total_pages_to_unuse = total_pages - target_pages;
+       for (type = swap_list.head; type >= 0; type = si->next) {
+               si = swap_info[type];
+               si_frontswap_pages = atomic_read(&si->frontswap_pages);
+               if (total_pages_to_unuse < si_frontswap_pages)
+                       pages = pages_to_unuse = total_pages_to_unuse;
+               else {
+                       pages = si_frontswap_pages;
+                       pages_to_unuse = 0; /* unuse all */
+               }
+               /* ensure there is enough RAM to fetch pages from frontswap */
+               if (security_vm_enough_memory_mm(current->mm, pages))
+                       continue;
+               vm_unacct_memory(pages);
+               break;
+       }
+       if (type < 0)
+               goto out;
+       locked = false;
+       spin_unlock(&swap_lock);
+       try_to_unuse(type, true, pages_to_unuse);
+out:
+       if (locked)
+               spin_unlock(&swap_lock);
+       return;
+}
+EXPORT_SYMBOL(frontswap_shrink);
+
+/*
+ * Count and return the number of frontswap pages across all
+ * swap devices.  This is exported so that backend drivers can
+ * determine current usage without reading debugfs.
+ */
+unsigned long frontswap_curr_pages(void)
+{
+       int type;
+       unsigned long totalpages = 0;
+       struct swap_info_struct *si = NULL;
+
+       spin_lock(&swap_lock);
+       for (type = swap_list.head; type >= 0; type = si->next) {
+               si = swap_info[type];
+               totalpages += atomic_read(&si->frontswap_pages);
+       }
+       spin_unlock(&swap_lock);
+       return totalpages;
+}
+EXPORT_SYMBOL(frontswap_curr_pages);
+
+static int __init init_frontswap(void)
+{
+#ifdef CONFIG_DEBUG_FS
+       struct dentry *root = debugfs_create_dir("frontswap", NULL);
+       if (root == NULL)
+               return -ENXIO;
+       debugfs_create_u64("loads", S_IRUGO, root, &frontswap_loads);
+       debugfs_create_u64("succ_stores", S_IRUGO, root, &frontswap_succ_stores);
+       debugfs_create_u64("failed_stores", S_IRUGO, root,
+                               &frontswap_failed_stores);
+       debugfs_create_u64("invalidates", S_IRUGO,
+                               root, &frontswap_invalidates);
+#endif
+       return 0;
+}
+
+module_init(init_frontswap);
index 5cbb78190041573ee4e92e77a12a8f129514862f..2ba87fbfb75b9755e279d39af93359693afe66fd 100644 (file)
@@ -94,9 +94,6 @@ extern void putback_lru_page(struct page *page);
 /*
  * in mm/page_alloc.c
  */
-extern void set_pageblock_migratetype(struct page *page, int migratetype);
-extern int move_freepages_block(struct zone *zone, struct page *page,
-                               int migratetype);
 extern void __free_pages_bootmem(struct page *page, unsigned int order);
 extern void prep_compound_page(struct page *page, unsigned long order);
 #ifdef CONFIG_MEMORY_FAILURE
@@ -104,7 +101,6 @@ extern bool is_free_buddy_page(struct page *page);
 #endif
 
 #if defined CONFIG_COMPACTION || defined CONFIG_CMA
-#include <linux/compaction.h>
 
 /*
  * in mm/compaction.c
@@ -123,14 +119,11 @@ struct compact_control {
        unsigned long nr_migratepages;  /* Number of pages to migrate */
        unsigned long free_pfn;         /* isolate_freepages search base */
        unsigned long migrate_pfn;      /* isolate_migratepages search base */
-       enum compact_mode mode;         /* Compaction mode */
+       bool sync;                      /* Synchronous migration */
 
        int order;                      /* order a direct compactor needs */
        int migratetype;                /* MOVABLE, RECLAIMABLE etc */
        struct zone *zone;
-
-       /* Number of UNMOVABLE destination pageblocks skipped during scan */
-       unsigned long nr_pageblocks_skipped;
 };
 
 unsigned long
index deff1b64a08c36ef4857590e9913717488953014..14d260fa0d17939a2279c244df91789cd30720e4 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/sched.h>
 #include <linux/ksm.h>
 #include <linux/fs.h>
+#include <linux/file.h>
 
 /*
  * Any behaviour which results in changes to the vma->vm_flags needs to
@@ -204,14 +205,16 @@ static long madvise_remove(struct vm_area_struct *vma,
 {
        loff_t offset;
        int error;
+       struct file *f;
 
        *prev = NULL;   /* tell sys_madvise we drop mmap_sem */
 
        if (vma->vm_flags & (VM_LOCKED|VM_NONLINEAR|VM_HUGETLB))
                return -EINVAL;
 
-       if (!vma->vm_file || !vma->vm_file->f_mapping
-               || !vma->vm_file->f_mapping->host) {
+       f = vma->vm_file;
+
+       if (!f || !f->f_mapping || !f->f_mapping->host) {
                        return -EINVAL;
        }
 
@@ -221,11 +224,18 @@ static long madvise_remove(struct vm_area_struct *vma,
        offset = (loff_t)(start - vma->vm_start)
                        + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
 
-       /* filesystem's fallocate may need to take i_mutex */
+       /*
+        * Filesystem's fallocate may need to take i_mutex.  We need to
+        * explicitly grab a reference because the vma (and hence the
+        * vma's reference to the file) can go away as soon as we drop
+        * mmap_sem.
+        */
+       get_file(f);
        up_read(&current->mm->mmap_sem);
-       error = do_fallocate(vma->vm_file,
+       error = do_fallocate(f,
                                FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
                                offset, end - start);
+       fput(f);
        down_read(&current->mm->mmap_sem);
        return error;
 }
index 952123eba43371a5e6d26ff8a5b7ad934747c027..5cc6731b00ccd05ff2f5b610627a0a2848dc5544 100644 (file)
@@ -143,30 +143,6 @@ phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start,
                                           MAX_NUMNODES);
 }
 
-/*
- * Free memblock.reserved.regions
- */
-int __init_memblock memblock_free_reserved_regions(void)
-{
-       if (memblock.reserved.regions == memblock_reserved_init_regions)
-               return 0;
-
-       return memblock_free(__pa(memblock.reserved.regions),
-                sizeof(struct memblock_region) * memblock.reserved.max);
-}
-
-/*
- * Reserve memblock.reserved.regions
- */
-int __init_memblock memblock_reserve_reserved_regions(void)
-{
-       if (memblock.reserved.regions == memblock_reserved_init_regions)
-               return 0;
-
-       return memblock_reserve(__pa(memblock.reserved.regions),
-                sizeof(struct memblock_region) * memblock.reserved.max);
-}
-
 static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
 {
        type->total_size -= type->regions[r].size;
@@ -184,9 +160,39 @@ static void __init_memblock memblock_remove_region(struct memblock_type *type, u
        }
 }
 
-static int __init_memblock memblock_double_array(struct memblock_type *type)
+phys_addr_t __init_memblock get_allocated_memblock_reserved_regions_info(
+                                       phys_addr_t *addr)
+{
+       if (memblock.reserved.regions == memblock_reserved_init_regions)
+               return 0;
+
+       *addr = __pa(memblock.reserved.regions);
+
+       return PAGE_ALIGN(sizeof(struct memblock_region) *
+                         memblock.reserved.max);
+}
+
+/**
+ * memblock_double_array - double the size of the memblock regions array
+ * @type: memblock type of the regions array being doubled
+ * @new_area_start: starting address of memory range to avoid overlap with
+ * @new_area_size: size of memory range to avoid overlap with
+ *
+ * Double the size of the @type regions array. If memblock is being used to
+ * allocate memory for a new reserved regions array and there is a previously
+ * allocated memory range [@new_area_start,@new_area_start+@new_area_size]
+ * waiting to be reserved, ensure the memory used by the new array does
+ * not overlap.
+ *
+ * RETURNS:
+ * 0 on success, -1 on failure.
+ */
+static int __init_memblock memblock_double_array(struct memblock_type *type,
+                                               phys_addr_t new_area_start,
+                                               phys_addr_t new_area_size)
 {
        struct memblock_region *new_array, *old_array;
+       phys_addr_t old_alloc_size, new_alloc_size;
        phys_addr_t old_size, new_size, addr;
        int use_slab = slab_is_available();
        int *in_slab;
@@ -200,6 +206,12 @@ static int __init_memblock memblock_double_array(struct memblock_type *type)
        /* Calculate new doubled size */
        old_size = type->max * sizeof(struct memblock_region);
        new_size = old_size << 1;
+       /*
+        * We need to allocated new one align to PAGE_SIZE,
+        *   so we can free them completely later.
+        */
+       old_alloc_size = PAGE_ALIGN(old_size);
+       new_alloc_size = PAGE_ALIGN(new_size);
 
        /* Retrieve the slab flag */
        if (type == &memblock.memory)
@@ -222,7 +234,18 @@ static int __init_memblock memblock_double_array(struct memblock_type *type)
                new_array = kmalloc(new_size, GFP_KERNEL);
                addr = new_array ? __pa(new_array) : 0;
        } else {
-               addr = memblock_find_in_range(0, MEMBLOCK_ALLOC_ACCESSIBLE, new_size, sizeof(phys_addr_t));
+               /* only exclude range when trying to double reserved.regions */
+               if (type != &memblock.reserved)
+                       new_area_start = new_area_size = 0;
+
+               addr = memblock_find_in_range(new_area_start + new_area_size,
+                                               memblock.current_limit,
+                                               new_alloc_size, PAGE_SIZE);
+               if (!addr && new_area_size)
+                       addr = memblock_find_in_range(0,
+                                       min(new_area_start, memblock.current_limit),
+                                       new_alloc_size, PAGE_SIZE);
+
                new_array = addr ? __va(addr) : 0;
        }
        if (!addr) {
@@ -251,13 +274,13 @@ static int __init_memblock memblock_double_array(struct memblock_type *type)
                kfree(old_array);
        else if (old_array != memblock_memory_init_regions &&
                 old_array != memblock_reserved_init_regions)
-               memblock_free(__pa(old_array), old_size);
+               memblock_free(__pa(old_array), old_alloc_size);
 
        /* Reserve the new array if that comes from the memblock.
         * Otherwise, we needn't do it
         */
        if (!use_slab)
-               BUG_ON(memblock_reserve(addr, new_size));
+               BUG_ON(memblock_reserve(addr, new_alloc_size));
 
        /* Update slab flag */
        *in_slab = use_slab;
@@ -399,7 +422,7 @@ repeat:
         */
        if (!insert) {
                while (type->cnt + nr_new > type->max)
-                       if (memblock_double_array(type) < 0)
+                       if (memblock_double_array(type, obase, size) < 0)
                                return -ENOMEM;
                insert = true;
                goto repeat;
@@ -450,7 +473,7 @@ static int __init_memblock memblock_isolate_range(struct memblock_type *type,
 
        /* we'll create at most two more regions */
        while (type->cnt + 2 > type->max)
-               if (memblock_double_array(type) < 0)
+               if (memblock_double_array(type, base, size) < 0)
                        return -ENOMEM;
 
        for (i = 0; i < type->cnt; i++) {
@@ -540,9 +563,9 @@ int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
  * __next_free_mem_range - next function for for_each_free_mem_range()
  * @idx: pointer to u64 loop variable
  * @nid: nid: node selector, %MAX_NUMNODES for all nodes
- * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
- * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
- * @p_nid: ptr to int for nid of the range, can be %NULL
+ * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
+ * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
+ * @out_nid: ptr to int for nid of the range, can be %NULL
  *
  * Find the first free area from *@idx which matches @nid, fill the out
  * parameters, and update *@idx for the next iteration.  The lower 32bit of
@@ -616,9 +639,9 @@ void __init_memblock __next_free_mem_range(u64 *idx, int nid,
  * __next_free_mem_range_rev - next function for for_each_free_mem_range_reverse()
  * @idx: pointer to u64 loop variable
  * @nid: nid: node selector, %MAX_NUMNODES for all nodes
- * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
- * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
- * @p_nid: ptr to int for nid of the range, can be %NULL
+ * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
+ * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
+ * @out_nid: ptr to int for nid of the range, can be %NULL
  *
  * Reverse of __next_free_mem_range().
  */
@@ -867,6 +890,16 @@ int __init_memblock memblock_is_memory(phys_addr_t addr)
        return memblock_search(&memblock.memory, addr) != -1;
 }
 
+/**
+ * memblock_is_region_memory - check if a region is a subset of memory
+ * @base: base of region to check
+ * @size: size of region to check
+ *
+ * Check if the region [@base, @base+@size) is a subset of a memory block.
+ *
+ * RETURNS:
+ * 0 if false, non-zero if true
+ */
 int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
 {
        int idx = memblock_search(&memblock.memory, base);
@@ -879,6 +912,16 @@ int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size
                 memblock.memory.regions[idx].size) >= end;
 }
 
+/**
+ * memblock_is_region_reserved - check if a region intersects reserved memory
+ * @base: base of region to check
+ * @size: size of region to check
+ *
+ * Check if the region [@base, @base+@size) intersects a reserved memory block.
+ *
+ * RETURNS:
+ * 0 if false, non-zero if true
+ */
 int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
 {
        memblock_cap_size(base, &size);
index ac35bccadb7b9f53606d445a961e442e891aa94a..f72b5e52451a7d8e62648dbfe211e5ff11c7c34f 100644 (file)
@@ -1148,7 +1148,7 @@ bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
 {
        if (root_memcg == memcg)
                return true;
-       if (!root_memcg->use_hierarchy)
+       if (!root_memcg->use_hierarchy || !memcg)
                return false;
        return css_is_ancestor(&memcg->css, &root_memcg->css);
 }
@@ -1234,7 +1234,7 @@ int mem_cgroup_inactive_file_is_low(struct lruvec *lruvec)
 
 /**
  * mem_cgroup_margin - calculate chargeable space of a memory cgroup
- * @mem: the memory cgroup
+ * @memcg: the memory cgroup
  *
  * Returns the maximum amount of memory @mem can be charged with, in
  * pages.
@@ -1508,7 +1508,7 @@ static unsigned long mem_cgroup_reclaim(struct mem_cgroup *memcg,
 
 /**
  * test_mem_cgroup_node_reclaimable
- * @mem: the target memcg
+ * @memcg: the target memcg
  * @nid: the node ID to be checked.
  * @noswap : specify true here if the user wants flle only information.
  *
index 1b7dc662bf9f229063cb3e7b97e8e4c22147b92b..2466d1250231f3e2405429ea4de4a97c597d0074 100644 (file)
@@ -1225,7 +1225,15 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
                next = pmd_addr_end(addr, end);
                if (pmd_trans_huge(*pmd)) {
                        if (next - addr != HPAGE_PMD_SIZE) {
-                               VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem));
+#ifdef CONFIG_DEBUG_VM
+                               if (!rwsem_is_locked(&tlb->mm->mmap_sem)) {
+                                       pr_err("%s: mmap_sem is unlocked! addr=0x%lx end=0x%lx vma->vm_start=0x%lx vma->vm_end=0x%lx\n",
+                                               __func__, addr, end,
+                                               vma->vm_start,
+                                               vma->vm_end);
+                                       BUG();
+                               }
+#endif
                                split_huge_page_pmd(vma->vm_mm, pmd);
                        } else if (zap_huge_pmd(tlb, vma, pmd, addr))
                                goto next;
@@ -1366,7 +1374,7 @@ void unmap_vmas(struct mmu_gather *tlb,
 /**
  * zap_page_range - remove user pages in a given range
  * @vma: vm_area_struct holding the applicable pages
- * @address: starting address of pages to zap
+ * @start: starting address of pages to zap
  * @size: number of bytes to zap
  * @details: details of nonlinear truncation or shared cache invalidation
  *
index 0d7e3ec8e0f3cc997b5fa0b422f5fa39caffe413..427bb291dd0fdeeb6db93c86c66df6a6f92c9b28 100644 (file)
@@ -618,7 +618,7 @@ int __ref add_memory(int nid, u64 start, u64 size)
                pgdat = hotadd_new_pgdat(nid, start);
                ret = -ENOMEM;
                if (!pgdat)
-                       goto out;
+                       goto error;
                new_pgdat = 1;
        }
 
index f15c1b24ca1822c7808b1ea825ca932a9e063b23..1d771e4200d222eea46458bdd6089279a78ffeb5 100644 (file)
@@ -1177,7 +1177,7 @@ static long do_mbind(unsigned long start, unsigned long len,
                if (!list_empty(&pagelist)) {
                        nr_failed = migrate_pages(&pagelist, new_vma_page,
                                                (unsigned long)vma,
-                                               false, true);
+                                               false, MIGRATE_SYNC);
                        if (nr_failed)
                                putback_lru_pages(&pagelist);
                }
index ab81d482ae6f1cac508fce796c8902432e32c3ad..be26d5cbe56b34d63f8c8ac8b799782f9ef6424b 100644 (file)
@@ -436,7 +436,10 @@ void migrate_page_copy(struct page *newpage, struct page *page)
                 * is actually a signal that all of the page has become dirty.
                 * Whereas only part of our page may be dirty.
                 */
-               __set_page_dirty_nobuffers(newpage);
+               if (PageSwapBacked(page))
+                       SetPageDirty(newpage);
+               else
+                       __set_page_dirty_nobuffers(newpage);
        }
 
        mlock_migrate_page(newpage, page);
index d23415c001bc4c5847986c7659261ca335888382..405573010f99a8b0d877dd980e284979ebd2ec69 100644 (file)
@@ -105,27 +105,35 @@ static void __init __free_pages_memory(unsigned long start, unsigned long end)
                __free_pages_bootmem(pfn_to_page(i), 0);
 }
 
+static unsigned long __init __free_memory_core(phys_addr_t start,
+                                phys_addr_t end)
+{
+       unsigned long start_pfn = PFN_UP(start);
+       unsigned long end_pfn = min_t(unsigned long,
+                                     PFN_DOWN(end), max_low_pfn);
+
+       if (start_pfn > end_pfn)
+               return 0;
+
+       __free_pages_memory(start_pfn, end_pfn);
+
+       return end_pfn - start_pfn;
+}
+
 unsigned long __init free_low_memory_core_early(int nodeid)
 {
        unsigned long count = 0;
-       phys_addr_t start, end;
+       phys_addr_t start, end, size;
        u64 i;
 
-       /* free reserved array temporarily so that it's treated as free area */
-       memblock_free_reserved_regions();
-
-       for_each_free_mem_range(i, MAX_NUMNODES, &start, &end, NULL) {
-               unsigned long start_pfn = PFN_UP(start);
-               unsigned long end_pfn = min_t(unsigned long,
-                                             PFN_DOWN(end), max_low_pfn);
-               if (start_pfn < end_pfn) {
-                       __free_pages_memory(start_pfn, end_pfn);
-                       count += end_pfn - start_pfn;
-               }
-       }
+       for_each_free_mem_range(i, MAX_NUMNODES, &start, &end, NULL)
+               count += __free_memory_core(start, end);
+
+       /* free range that is used for reserved array if we allocate it */
+       size = get_allocated_memblock_reserved_regions_info(&start);
+       if (size)
+               count += __free_memory_core(start, start + size);
 
-       /* put region array back? */
-       memblock_reserve_reserved_regions();
        return count;
 }
 
@@ -274,7 +282,7 @@ void * __init __alloc_bootmem(unsigned long size, unsigned long align,
        return ___alloc_bootmem(size, align, goal, limit);
 }
 
-static void * __init ___alloc_bootmem_node_nopanic(pg_data_t *pgdat,
+void * __init ___alloc_bootmem_node_nopanic(pg_data_t *pgdat,
                                                   unsigned long size,
                                                   unsigned long align,
                                                   unsigned long goal,
index c4acfbc099727b3f5151ed5917961ff59efb7481..d4b0c10872de59d8959262b1daac92d5d60eb80a 100644 (file)
@@ -1486,7 +1486,7 @@ SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
 
        flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
 
-       ret = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
+       retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
 
        if (file)
                fput(file);
index ed0e19677360fa55f62e3944208e82cab7eeacc8..ac300c99baf644824f9c6532627a99fdb26020b1 100644 (file)
@@ -183,7 +183,8 @@ static bool oom_unkillable_task(struct task_struct *p,
 unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
                          const nodemask_t *nodemask, unsigned long totalpages)
 {
-       unsigned long points;
+       long points;
+       long adj;
 
        if (oom_unkillable_task(p, memcg, nodemask))
                return 0;
@@ -192,7 +193,8 @@ unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
        if (!p)
                return 0;
 
-       if (p->signal->oom_score_adj == OOM_SCORE_ADJ_MIN) {
+       adj = p->signal->oom_score_adj;
+       if (adj == OOM_SCORE_ADJ_MIN) {
                task_unlock(p);
                return 0;
        }
@@ -210,20 +212,17 @@ unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
         * implementation used by LSMs.
         */
        if (has_capability_noaudit(p, CAP_SYS_ADMIN))
-               points -= 30 * totalpages / 1000;
+               adj -= 30;
 
-       /*
-        * /proc/pid/oom_score_adj ranges from -1000 to +1000 such that it may
-        * either completely disable oom killing or always prefer a certain
-        * task.
-        */
-       points += p->signal->oom_score_adj * totalpages / 1000;
+       /* Normalize to oom_score_adj units */
+       adj *= totalpages / 1000;
+       points += adj;
 
        /*
         * Never return 0 for an eligible task regardless of the root bonus and
         * oom_score_adj (oom_score_adj can't be OOM_SCORE_ADJ_MIN here).
         */
-       return points ? points : 1;
+       return points > 0 ? points : 1;
 }
 
 /*
@@ -366,7 +365,7 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
 
 /**
  * dump_tasks - dump current memory state of all system tasks
- * @mem: current's memory controller, if constrained
+ * @memcg: current's memory controller, if constrained
  * @nodemask: nodemask passed to page allocator for mempolicy ooms
  *
  * Dumps the current memory state of all eligible tasks.  Tasks not in the same
index 6092f331b32e496bb522f4f17741c3594cc4ba0d..4a4f9219683f63d8594c143fa24f27d4daa6a8ff 100644 (file)
@@ -219,7 +219,7 @@ EXPORT_SYMBOL(nr_online_nodes);
 
 int page_group_by_mobility_disabled __read_mostly;
 
-void set_pageblock_migratetype(struct page *page, int migratetype)
+static void set_pageblock_migratetype(struct page *page, int migratetype)
 {
 
        if (unlikely(page_group_by_mobility_disabled))
@@ -954,8 +954,8 @@ static int move_freepages(struct zone *zone,
        return pages_moved;
 }
 
-int move_freepages_block(struct zone *zone, struct page *page,
-                        int migratetype)
+static int move_freepages_block(struct zone *zone, struct page *page,
+                               int migratetype)
 {
        unsigned long start_pfn, end_pfn;
        struct page *start_page, *end_page;
@@ -5635,7 +5635,12 @@ static struct page *
 __alloc_contig_migrate_alloc(struct page *page, unsigned long private,
                             int **resultp)
 {
-       return alloc_page(GFP_HIGHUSER_MOVABLE);
+       gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE;
+
+       if (PageHighMem(page))
+               gfp_mask |= __GFP_HIGHMEM;
+
+       return alloc_page(gfp_mask);
 }
 
 /* [start, end) must belong to a single zone. */
@@ -5651,7 +5656,7 @@ static int __alloc_contig_migrate_range(unsigned long start, unsigned long end)
                .nr_migratepages = 0,
                .order = -1,
                .zone = page_zone(pfn_to_page(start)),
-               .mode = COMPACT_SYNC,
+               .sync = true,
        };
        INIT_LIST_HEAD(&cc.migratepages);
 
index 1ccbd714059cdd7ddb9d90cb3ca92438f2cb5d34..eb750f851395b4e726c06f3926d42b472ab1bdf9 100644 (file)
@@ -392,7 +392,7 @@ static struct swap_cgroup *lookup_swap_cgroup(swp_entry_t ent,
 
 /**
  * swap_cgroup_cmpxchg - cmpxchg mem_cgroup's id for this swp_entry.
- * @end: swap entry to be cmpxchged
+ * @ent: swap entry to be cmpxchged
  * @old: old id
  * @new: new id
  *
@@ -422,7 +422,7 @@ unsigned short swap_cgroup_cmpxchg(swp_entry_t ent,
 /**
  * swap_cgroup_record - record mem_cgroup for this swp_entry.
  * @ent: swap entry to be recorded into
- * @mem: mem_cgroup to be recorded
+ * @id: mem_cgroup to be recorded
  *
  * Returns old value at success, 0 at failure.
  * (Of course, old value can be 0.)
index dc76b4d0611ecb59fd85d89a78896c792443a62f..34f02923744c921fa2d990ec68f220a698049362 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/bio.h>
 #include <linux/swapops.h>
 #include <linux/writeback.h>
+#include <linux/frontswap.h>
 #include <asm/pgtable.h>
 
 static struct bio *get_swap_bio(gfp_t gfp_flags,
@@ -98,6 +99,12 @@ int swap_writepage(struct page *page, struct writeback_control *wbc)
                unlock_page(page);
                goto out;
        }
+       if (frontswap_store(page) == 0) {
+               set_page_writeback(page);
+               unlock_page(page);
+               end_page_writeback(page);
+               goto out;
+       }
        bio = get_swap_bio(GFP_NOIO, page, end_swap_bio_write);
        if (bio == NULL) {
                set_page_dirty(page);
@@ -122,6 +129,11 @@ int swap_readpage(struct page *page)
 
        VM_BUG_ON(!PageLocked(page));
        VM_BUG_ON(PageUptodate(page));
+       if (frontswap_load(page) == 0) {
+               SetPageUptodate(page);
+               unlock_page(page);
+               goto out;
+       }
        bio = get_swap_bio(GFP_KERNEL, page, end_swap_bio_read);
        if (bio == NULL) {
                unlock_page(page);
index aa9701e12714af2ce7ead752def02200910bf2b1..6c118d012bb5a27be54ab331a4491e3ec421ac31 100644 (file)
@@ -162,7 +162,6 @@ static int walk_hugetlb_range(struct vm_area_struct *vma,
 
 /**
  * walk_page_range - walk a memory map's page tables with a callback
- * @mm: memory map to walk
  * @addr: starting address
  * @end: ending address
  * @walk: set of callbacks to invoke for each level of the tree
index 405d331804c3da95c52347878387a807eb5e6c60..3707c71ae4cddbec027eac857291185c662c760a 100644 (file)
@@ -360,7 +360,6 @@ err_free:
  * @chunk: chunk to depopulate
  * @off: offset to the area to depopulate
  * @size: size of the area to depopulate in bytes
- * @flush: whether to flush cache and tlb or not
  *
  * For each cpu, depopulate and unmap pages [@page_start,@page_end)
  * from @chunk.  If @flush is true, vcache is flushed before unmapping
index 585bd220a21ee4e5eefaec2b9c46dc9ae68fde98..bd106361be4bf2d080e67f11252198c710745278 100644 (file)
@@ -263,6 +263,24 @@ static int shmem_radix_tree_replace(struct address_space *mapping,
        return 0;
 }
 
+/*
+ * Sometimes, before we decide whether to proceed or to fail, we must check
+ * that an entry was not already brought back from swap by a racing thread.
+ *
+ * Checking page is not enough: by the time a SwapCache page is locked, it
+ * might be reused, and again be SwapCache, using the same swap as before.
+ */
+static bool shmem_confirm_swap(struct address_space *mapping,
+                              pgoff_t index, swp_entry_t swap)
+{
+       void *item;
+
+       rcu_read_lock();
+       item = radix_tree_lookup(&mapping->page_tree, index);
+       rcu_read_unlock();
+       return item == swp_to_radix_entry(swap);
+}
+
 /*
  * Like add_to_page_cache_locked, but error if expected item has gone.
  */
@@ -270,40 +288,31 @@ static int shmem_add_to_page_cache(struct page *page,
                                   struct address_space *mapping,
                                   pgoff_t index, gfp_t gfp, void *expected)
 {
-       int error = 0;
+       int error;
 
        VM_BUG_ON(!PageLocked(page));
        VM_BUG_ON(!PageSwapBacked(page));
 
+       page_cache_get(page);
+       page->mapping = mapping;
+       page->index = index;
+
+       spin_lock_irq(&mapping->tree_lock);
        if (!expected)
-               error = radix_tree_preload(gfp & GFP_RECLAIM_MASK);
+               error = radix_tree_insert(&mapping->page_tree, index, page);
+       else
+               error = shmem_radix_tree_replace(mapping, index, expected,
+                                                                page);
        if (!error) {
-               page_cache_get(page);
-               page->mapping = mapping;
-               page->index = index;
-
-               spin_lock_irq(&mapping->tree_lock);
-               if (!expected)
-                       error = radix_tree_insert(&mapping->page_tree,
-                                                       index, page);
-               else
-                       error = shmem_radix_tree_replace(mapping, index,
-                                                       expected, page);
-               if (!error) {
-                       mapping->nrpages++;
-                       __inc_zone_page_state(page, NR_FILE_PAGES);
-                       __inc_zone_page_state(page, NR_SHMEM);
-                       spin_unlock_irq(&mapping->tree_lock);
-               } else {
-                       page->mapping = NULL;
-                       spin_unlock_irq(&mapping->tree_lock);
-                       page_cache_release(page);
-               }
-               if (!expected)
-                       radix_tree_preload_end();
+               mapping->nrpages++;
+               __inc_zone_page_state(page, NR_FILE_PAGES);
+               __inc_zone_page_state(page, NR_SHMEM);
+               spin_unlock_irq(&mapping->tree_lock);
+       } else {
+               page->mapping = NULL;
+               spin_unlock_irq(&mapping->tree_lock);
+               page_cache_release(page);
        }
-       if (error)
-               mem_cgroup_uncharge_cache_page(page);
        return error;
 }
 
@@ -683,10 +692,21 @@ static int shmem_unuse_inode(struct shmem_inode_info *info,
                mutex_lock(&shmem_swaplist_mutex);
                /*
                 * We needed to drop mutex to make that restrictive page
-                * allocation; but the inode might already be freed by now,
-                * and we cannot refer to inode or mapping or info to check.
-                * However, we do hold page lock on the PageSwapCache page,
-                * so can check if that still has our reference remaining.
+                * allocation, but the inode might have been freed while we
+                * dropped it: although a racing shmem_evict_inode() cannot
+                * complete without emptying the radix_tree, our page lock
+                * on this swapcache page is not enough to prevent that -
+                * free_swap_and_cache() of our swap entry will only
+                * trylock_page(), removing swap from radix_tree whatever.
+                *
+                * We must not proceed to shmem_add_to_page_cache() if the
+                * inode has been freed, but of course we cannot rely on
+                * inode or mapping or info to check that.  However, we can
+                * safely check if our swap entry is still in use (and here
+                * it can't have got reused for another page): if it's still
+                * in use, then the inode cannot have been freed yet, and we
+                * can safely proceed (if it's no longer in use, that tells
+                * nothing about the inode, but we don't need to unuse swap).
                 */
                if (!page_swapcount(*pagep))
                        error = -ENOENT;
@@ -730,9 +750,9 @@ int shmem_unuse(swp_entry_t swap, struct page *page)
 
        /*
         * There's a faint possibility that swap page was replaced before
-        * caller locked it: it will come back later with the right page.
+        * caller locked it: caller will come back later with the right page.
         */
-       if (unlikely(!PageSwapCache(page)))
+       if (unlikely(!PageSwapCache(page) || page_private(page) != swap.val))
                goto out;
 
        /*
@@ -995,21 +1015,15 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
        newpage = shmem_alloc_page(gfp, info, index);
        if (!newpage)
                return -ENOMEM;
-       VM_BUG_ON(shmem_should_replace_page(newpage, gfp));
 
-       *pagep = newpage;
        page_cache_get(newpage);
        copy_highpage(newpage, oldpage);
+       flush_dcache_page(newpage);
 
-       VM_BUG_ON(!PageLocked(oldpage));
        __set_page_locked(newpage);
-       VM_BUG_ON(!PageUptodate(oldpage));
        SetPageUptodate(newpage);
-       VM_BUG_ON(!PageSwapBacked(oldpage));
        SetPageSwapBacked(newpage);
-       VM_BUG_ON(!swap_index);
        set_page_private(newpage, swap_index);
-       VM_BUG_ON(!PageSwapCache(oldpage));
        SetPageSwapCache(newpage);
 
        /*
@@ -1019,13 +1033,24 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
        spin_lock_irq(&swap_mapping->tree_lock);
        error = shmem_radix_tree_replace(swap_mapping, swap_index, oldpage,
                                                                   newpage);
-       __inc_zone_page_state(newpage, NR_FILE_PAGES);
-       __dec_zone_page_state(oldpage, NR_FILE_PAGES);
+       if (!error) {
+               __inc_zone_page_state(newpage, NR_FILE_PAGES);
+               __dec_zone_page_state(oldpage, NR_FILE_PAGES);
+       }
        spin_unlock_irq(&swap_mapping->tree_lock);
-       BUG_ON(error);
 
-       mem_cgroup_replace_page_cache(oldpage, newpage);
-       lru_cache_add_anon(newpage);
+       if (unlikely(error)) {
+               /*
+                * Is this possible?  I think not, now that our callers check
+                * both PageSwapCache and page_private after getting page lock;
+                * but be defensive.  Reverse old to newpage for clear and free.
+                */
+               oldpage = newpage;
+       } else {
+               mem_cgroup_replace_page_cache(oldpage, newpage);
+               lru_cache_add_anon(newpage);
+               *pagep = newpage;
+       }
 
        ClearPageSwapCache(oldpage);
        set_page_private(oldpage, 0);
@@ -1033,7 +1058,7 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
        unlock_page(oldpage);
        page_cache_release(oldpage);
        page_cache_release(oldpage);
-       return 0;
+       return error;
 }
 
 /*
@@ -1107,9 +1132,10 @@ repeat:
 
                /* We have to do this with page locked to prevent races */
                lock_page(page);
-               if (!PageSwapCache(page) || page->mapping) {
+               if (!PageSwapCache(page) || page_private(page) != swap.val ||
+                   !shmem_confirm_swap(mapping, index, swap)) {
                        error = -EEXIST;        /* try again */
-                       goto failed;
+                       goto unlock;
                }
                if (!PageUptodate(page)) {
                        error = -EIO;
@@ -1125,9 +1151,12 @@ repeat:
 
                error = mem_cgroup_cache_charge(page, current->mm,
                                                gfp & GFP_RECLAIM_MASK);
-               if (!error)
+               if (!error) {
                        error = shmem_add_to_page_cache(page, mapping, index,
                                                gfp, swp_to_radix_entry(swap));
+                       /* We already confirmed swap, and make no allocation */
+                       VM_BUG_ON(error);
+               }
                if (error)
                        goto failed;
 
@@ -1164,11 +1193,18 @@ repeat:
                __set_page_locked(page);
                error = mem_cgroup_cache_charge(page, current->mm,
                                                gfp & GFP_RECLAIM_MASK);
-               if (!error)
-                       error = shmem_add_to_page_cache(page, mapping, index,
-                                               gfp, NULL);
                if (error)
                        goto decused;
+               error = radix_tree_preload(gfp & GFP_RECLAIM_MASK);
+               if (!error) {
+                       error = shmem_add_to_page_cache(page, mapping, index,
+                                                       gfp, NULL);
+                       radix_tree_preload_end();
+               }
+               if (error) {
+                       mem_cgroup_uncharge_cache_page(page);
+                       goto decused;
+               }
                lru_cache_add_anon(page);
 
                spin_lock(&info->lock);
@@ -1228,14 +1264,10 @@ decused:
 unacct:
        shmem_unacct_blocks(info->flags, 1);
 failed:
-       if (swap.val && error != -EINVAL) {
-               struct page *test = find_get_page(mapping, index);
-               if (test && !radix_tree_exceptional_entry(test))
-                       page_cache_release(test);
-               /* Have another try if the entry has changed */
-               if (test != swp_to_radix_entry(swap))
-                       error = -EEXIST;
-       }
+       if (swap.val && error != -EINVAL &&
+           !shmem_confirm_swap(mapping, index, swap))
+               error = -EEXIST;
+unlock:
        if (page) {
                unlock_page(page);
                page_cache_release(page);
@@ -1247,7 +1279,7 @@ failed:
                spin_unlock(&info->lock);
                goto repeat;
        }
-       if (error == -EEXIST)
+       if (error == -EEXIST)   /* from above or from radix_tree_insert */
                goto repeat;
        return error;
 }
@@ -1577,6 +1609,7 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
        struct splice_pipe_desc spd = {
                .pages = pages,
                .partial = partial,
+               .nr_pages_max = PIPE_DEF_BUFFERS,
                .flags = flags,
                .ops = &page_cache_pipe_buf_ops,
                .spd_release = spd_release_page,
@@ -1665,7 +1698,7 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
        if (spd.nr_pages)
                error = splice_to_pipe(pipe, &spd);
 
-       splice_shrink_spd(pipe, &spd);
+       splice_shrink_spd(&spd);
 
        if (error > 0) {
                *ppos += error;
@@ -1674,98 +1707,6 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
        return error;
 }
 
-/*
- * llseek SEEK_DATA or SEEK_HOLE through the radix_tree.
- */
-static pgoff_t shmem_seek_hole_data(struct address_space *mapping,
-                                   pgoff_t index, pgoff_t end, int origin)
-{
-       struct page *page;
-       struct pagevec pvec;
-       pgoff_t indices[PAGEVEC_SIZE];
-       bool done = false;
-       int i;
-
-       pagevec_init(&pvec, 0);
-       pvec.nr = 1;            /* start small: we may be there already */
-       while (!done) {
-               pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
-                                       pvec.nr, pvec.pages, indices);
-               if (!pvec.nr) {
-                       if (origin == SEEK_DATA)
-                               index = end;
-                       break;
-               }
-               for (i = 0; i < pvec.nr; i++, index++) {
-                       if (index < indices[i]) {
-                               if (origin == SEEK_HOLE) {
-                                       done = true;
-                                       break;
-                               }
-                               index = indices[i];
-                       }
-                       page = pvec.pages[i];
-                       if (page && !radix_tree_exceptional_entry(page)) {
-                               if (!PageUptodate(page))
-                                       page = NULL;
-                       }
-                       if (index >= end ||
-                           (page && origin == SEEK_DATA) ||
-                           (!page && origin == SEEK_HOLE)) {
-                               done = true;
-                               break;
-                       }
-               }
-               shmem_deswap_pagevec(&pvec);
-               pagevec_release(&pvec);
-               pvec.nr = PAGEVEC_SIZE;
-               cond_resched();
-       }
-       return index;
-}
-
-static loff_t shmem_file_llseek(struct file *file, loff_t offset, int origin)
-{
-       struct address_space *mapping;
-       struct inode *inode;
-       pgoff_t start, end;
-       loff_t new_offset;
-
-       if (origin != SEEK_DATA && origin != SEEK_HOLE)
-               return generic_file_llseek_size(file, offset, origin,
-                                                       MAX_LFS_FILESIZE);
-       mapping = file->f_mapping;
-       inode = mapping->host;
-       mutex_lock(&inode->i_mutex);
-       /* We're holding i_mutex so we can access i_size directly */
-
-       if (offset < 0)
-               offset = -EINVAL;
-       else if (offset >= inode->i_size)
-               offset = -ENXIO;
-       else {
-               start = offset >> PAGE_CACHE_SHIFT;
-               end = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
-               new_offset = shmem_seek_hole_data(mapping, start, end, origin);
-               new_offset <<= PAGE_CACHE_SHIFT;
-               if (new_offset > offset) {
-                       if (new_offset < inode->i_size)
-                               offset = new_offset;
-                       else if (origin == SEEK_DATA)
-                               offset = -ENXIO;
-                       else
-                               offset = inode->i_size;
-               }
-       }
-
-       if (offset >= 0 && offset != file->f_pos) {
-               file->f_pos = offset;
-               file->f_version = 0;
-       }
-       mutex_unlock(&inode->i_mutex);
-       return offset;
-}
-
 static long shmem_fallocate(struct file *file, int mode, loff_t offset,
                                                         loff_t len)
 {
@@ -2769,7 +2710,7 @@ static const struct address_space_operations shmem_aops = {
 static const struct file_operations shmem_file_operations = {
        .mmap           = shmem_mmap,
 #ifdef CONFIG_TMPFS
-       .llseek         = shmem_file_llseek,
+       .llseek         = generic_file_llseek,
        .read           = do_sync_read,
        .write          = do_sync_write,
        .aio_read       = shmem_file_aio_read,
index 6a4bf9160e855ae1e2d61fefb4922918f710bb24..c7bb952400c83c9114a401f647955cb18dd2ea44 100644 (file)
@@ -275,8 +275,9 @@ static unsigned long * __init
 sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
                                         unsigned long size)
 {
-       pg_data_t *host_pgdat;
-       unsigned long goal;
+       unsigned long goal, limit;
+       unsigned long *p;
+       int nid;
        /*
         * A page may contain usemaps for other sections preventing the
         * page being freed and making a section unremovable while
@@ -287,10 +288,17 @@ sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
         * from the same section as the pgdat where possible to avoid
         * this problem.
         */
-       goal = __pa(pgdat) & PAGE_SECTION_MASK;
-       host_pgdat = NODE_DATA(early_pfn_to_nid(goal >> PAGE_SHIFT));
-       return __alloc_bootmem_node_nopanic(host_pgdat, size,
-                                           SMP_CACHE_BYTES, goal);
+       goal = __pa(pgdat) & (PAGE_SECTION_MASK << PAGE_SHIFT);
+       limit = goal + (1UL << PA_SECTION_SHIFT);
+       nid = early_pfn_to_nid(goal >> PAGE_SHIFT);
+again:
+       p = ___alloc_bootmem_node_nopanic(NODE_DATA(nid), size,
+                                         SMP_CACHE_BYTES, goal, limit);
+       if (!p && limit) {
+               limit = 0;
+               goto again;
+       }
+       return p;
 }
 
 static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
index 457b10baef59414f591fb0bfab2b54619c5209b0..71373d03fcee99d57c29d24a25db888228bf3417 100644 (file)
@@ -31,6 +31,8 @@
 #include <linux/memcontrol.h>
 #include <linux/poll.h>
 #include <linux/oom.h>
+#include <linux/frontswap.h>
+#include <linux/swapfile.h>
 
 #include <asm/pgtable.h>
 #include <asm/tlbflush.h>
@@ -42,7 +44,7 @@ static bool swap_count_continued(struct swap_info_struct *, pgoff_t,
 static void free_swap_count_continuations(struct swap_info_struct *);
 static sector_t map_swap_entry(swp_entry_t, struct block_device**);
 
-static DEFINE_SPINLOCK(swap_lock);
+DEFINE_SPINLOCK(swap_lock);
 static unsigned int nr_swapfiles;
 long nr_swap_pages;
 long total_swap_pages;
@@ -53,9 +55,9 @@ static const char Unused_file[] = "Unused swap file entry ";
 static const char Bad_offset[] = "Bad swap offset entry ";
 static const char Unused_offset[] = "Unused swap offset entry ";
 
-static struct swap_list_t swap_list = {-1, -1};
+struct swap_list_t swap_list = {-1, -1};
 
-static struct swap_info_struct *swap_info[MAX_SWAPFILES];
+struct swap_info_struct *swap_info[MAX_SWAPFILES];
 
 static DEFINE_MUTEX(swapon_mutex);
 
@@ -556,6 +558,7 @@ static unsigned char swap_entry_free(struct swap_info_struct *p,
                        swap_list.next = p->type;
                nr_swap_pages++;
                p->inuse_pages--;
+               frontswap_invalidate_page(p->type, offset);
                if ((p->flags & SWP_BLKDEV) &&
                                disk->fops->swap_slot_free_notify)
                        disk->fops->swap_slot_free_notify(p->bdev, offset);
@@ -985,11 +988,12 @@ static int unuse_mm(struct mm_struct *mm,
 }
 
 /*
- * Scan swap_map from current position to next entry still in use.
+ * Scan swap_map (or frontswap_map if frontswap parameter is true)
+ * from current position to next entry still in use.
  * Recycle to start on reaching the end, returning 0 when empty.
  */
 static unsigned int find_next_to_unuse(struct swap_info_struct *si,
-                                       unsigned int prev)
+                                       unsigned int prev, bool frontswap)
 {
        unsigned int max = si->max;
        unsigned int i = prev;
@@ -1015,6 +1019,12 @@ static unsigned int find_next_to_unuse(struct swap_info_struct *si,
                        prev = 0;
                        i = 1;
                }
+               if (frontswap) {
+                       if (frontswap_test(si, i))
+                               break;
+                       else
+                               continue;
+               }
                count = si->swap_map[i];
                if (count && swap_count(count) != SWAP_MAP_BAD)
                        break;
@@ -1026,8 +1036,12 @@ static unsigned int find_next_to_unuse(struct swap_info_struct *si,
  * We completely avoid races by reading each swap page in advance,
  * and then search for the process using it.  All the necessary
  * page table adjustments can then be made atomically.
+ *
+ * if the boolean frontswap is true, only unuse pages_to_unuse pages;
+ * pages_to_unuse==0 means all pages; ignored if frontswap is false
  */
-static int try_to_unuse(unsigned int type)
+int try_to_unuse(unsigned int type, bool frontswap,
+                unsigned long pages_to_unuse)
 {
        struct swap_info_struct *si = swap_info[type];
        struct mm_struct *start_mm;
@@ -1060,7 +1074,7 @@ static int try_to_unuse(unsigned int type)
         * one pass through swap_map is enough, but not necessarily:
         * there are races when an instance of an entry might be missed.
         */
-       while ((i = find_next_to_unuse(si, i)) != 0) {
+       while ((i = find_next_to_unuse(si, i, frontswap)) != 0) {
                if (signal_pending(current)) {
                        retval = -EINTR;
                        break;
@@ -1227,6 +1241,10 @@ static int try_to_unuse(unsigned int type)
                 * interactive performance.
                 */
                cond_resched();
+               if (frontswap && pages_to_unuse > 0) {
+                       if (!--pages_to_unuse)
+                               break;
+               }
        }
 
        mmput(start_mm);
@@ -1486,7 +1504,8 @@ bad_bmap:
 }
 
 static void enable_swap_info(struct swap_info_struct *p, int prio,
-                               unsigned char *swap_map)
+                               unsigned char *swap_map,
+                               unsigned long *frontswap_map)
 {
        int i, prev;
 
@@ -1496,6 +1515,7 @@ static void enable_swap_info(struct swap_info_struct *p, int prio,
        else
                p->prio = --least_priority;
        p->swap_map = swap_map;
+       frontswap_map_set(p, frontswap_map);
        p->flags |= SWP_WRITEOK;
        nr_swap_pages += p->pages;
        total_swap_pages += p->pages;
@@ -1512,6 +1532,7 @@ static void enable_swap_info(struct swap_info_struct *p, int prio,
                swap_list.head = swap_list.next = p->type;
        else
                swap_info[prev]->next = p->type;
+       frontswap_init(p->type);
        spin_unlock(&swap_lock);
 }
 
@@ -1585,7 +1606,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
        spin_unlock(&swap_lock);
 
        oom_score_adj = test_set_oom_score_adj(OOM_SCORE_ADJ_MAX);
-       err = try_to_unuse(type);
+       err = try_to_unuse(type, false, 0); /* force all pages to be unused */
        compare_swap_oom_score_adj(OOM_SCORE_ADJ_MAX, oom_score_adj);
 
        if (err) {
@@ -1596,7 +1617,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
                 * sys_swapoff for this swap_info_struct at this point.
                 */
                /* re-insert swap space back into swap_list */
-               enable_swap_info(p, p->prio, p->swap_map);
+               enable_swap_info(p, p->prio, p->swap_map, frontswap_map_get(p));
                goto out_dput;
        }
 
@@ -1622,9 +1643,11 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
        swap_map = p->swap_map;
        p->swap_map = NULL;
        p->flags = 0;
+       frontswap_invalidate_area(type);
        spin_unlock(&swap_lock);
        mutex_unlock(&swapon_mutex);
        vfree(swap_map);
+       vfree(frontswap_map_get(p));
        /* Destroy swap account informatin */
        swap_cgroup_swapoff(type);
 
@@ -1893,24 +1916,20 @@ static unsigned long read_swap_header(struct swap_info_struct *p,
 
        /*
         * Find out how many pages are allowed for a single swap
-        * device. There are three limiting factors: 1) the number
+        * device. There are two limiting factors: 1) the number
         * of bits for the swap offset in the swp_entry_t type, and
         * 2) the number of bits in the swap pte as defined by the
-        * the different architectures, and 3) the number of free bits
-        * in an exceptional radix_tree entry. In order to find the
+        * different architectures. In order to find the
         * largest possible bit mask, a swap entry with swap type 0
         * and swap offset ~0UL is created, encoded to a swap pte,
         * decoded to a swp_entry_t again, and finally the swap
         * offset is extracted. This will mask all the bits from
         * the initial ~0UL mask that can't be encoded in either
         * the swp_entry_t or the architecture definition of a
-        * swap pte.  Then the same is done for a radix_tree entry.
+        * swap pte.
         */
        maxpages = swp_offset(pte_to_swp_entry(
-                       swp_entry_to_pte(swp_entry(0, ~0UL))));
-       maxpages = swp_offset(radix_to_swp_entry(
-                       swp_to_radix_entry(swp_entry(0, maxpages)))) + 1;
-
+                       swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1;
        if (maxpages > swap_header->info.last_page) {
                maxpages = swap_header->info.last_page + 1;
                /* p->max is an unsigned int: don't overflow it */
@@ -1988,6 +2007,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
        sector_t span;
        unsigned long maxpages;
        unsigned char *swap_map = NULL;
+       unsigned long *frontswap_map = NULL;
        struct page *page = NULL;
        struct inode *inode = NULL;
 
@@ -2071,6 +2091,9 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
                error = nr_extents;
                goto bad_swap;
        }
+       /* frontswap enabled? set up bit-per-page map for frontswap */
+       if (frontswap_enabled)
+               frontswap_map = vzalloc(maxpages / sizeof(long));
 
        if (p->bdev) {
                if (blk_queue_nonrot(bdev_get_queue(p->bdev))) {
@@ -2086,14 +2109,15 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
        if (swap_flags & SWAP_FLAG_PREFER)
                prio =
                  (swap_flags & SWAP_FLAG_PRIO_MASK) >> SWAP_FLAG_PRIO_SHIFT;
-       enable_swap_info(p, prio, swap_map);
+       enable_swap_info(p, prio, swap_map, frontswap_map);
 
        printk(KERN_INFO "Adding %uk swap on %s.  "
-                       "Priority:%d extents:%d across:%lluk %s%s\n",
+                       "Priority:%d extents:%d across:%lluk %s%s%s\n",
                p->pages<<(PAGE_SHIFT-10), name, p->prio,
                nr_extents, (unsigned long long)span<<(PAGE_SHIFT-10),
                (p->flags & SWP_SOLIDSTATE) ? "SS" : "",
-               (p->flags & SWP_DISCARDABLE) ? "D" : "");
+               (p->flags & SWP_DISCARDABLE) ? "D" : "",
+               (frontswap_map) ? "FS" : "");
 
        mutex_unlock(&swapon_mutex);
        atomic_inc(&proc_poll_event);
index eeb3bc9d1d361b6f20821073485f1b8e7c4931d3..66e431060c05616ace60c4d512e4b96e785723ae 100644 (file)
@@ -2688,7 +2688,10 @@ static void kswapd_try_to_sleep(pg_data_t *pgdat, int order, int classzone_idx)
                 * them before going back to sleep.
                 */
                set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold);
-               schedule();
+
+               if (!kthread_should_stop())
+                       schedule();
+
                set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold);
        } else {
                if (remaining)
@@ -2955,14 +2958,17 @@ int kswapd_run(int nid)
 }
 
 /*
- * Called by memory hotplug when all memory in a node is offlined.
+ * Called by memory hotplug when all memory in a node is offlined.  Caller must
+ * hold lock_memory_hotplug().
  */
 void kswapd_stop(int nid)
 {
        struct task_struct *kswapd = NODE_DATA(nid)->kswapd;
 
-       if (kswapd)
+       if (kswapd) {
                kthread_stop(kswapd);
+               NODE_DATA(nid)->kswapd = NULL;
+       }
 }
 
 static int __init kswapd_init(void)
index 6089f0cf23b480e4686916823cecafd681a64f42..9096bcb081326c92bb64b6dedb11033073998ca5 100644 (file)
@@ -403,6 +403,9 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
                break;
 
        case NETDEV_DOWN:
+               if (dev->features & NETIF_F_HW_VLAN_FILTER)
+                       vlan_vid_del(dev, 0);
+
                /* Put all VLANs for this dev in the down state too.  */
                for (i = 0; i < VLAN_N_VID; i++) {
                        vlandev = vlan_group_get_device(grp, i);
index 9ee48cb3017998f47928a3d8413c5df44ec4ac73..3d33ecf133271426aec0df569c5b2a4e8eb03a8a 100644 (file)
@@ -368,7 +368,7 @@ p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt,
                                const char *sptr = va_arg(ap, const char *);
                                uint16_t len = 0;
                                if (sptr)
-                                       len = min_t(uint16_t, strlen(sptr),
+                                       len = min_t(size_t, strlen(sptr),
                                                                USHRT_MAX);
 
                                errcode = p9pdu_writef(pdu, proto_version,
index 5af18d11b5184805bf7a01c5d94d0759b21c4992..2a167658bb958ae3a302bd80a46649416c697183 100644 (file)
@@ -192,10 +192,10 @@ static int pack_sg_list(struct scatterlist *sg, int start,
                s = rest_of_page(data);
                if (s > count)
                        s = count;
+               BUG_ON(index > limit);
                sg_set_buf(&sg[index++], data, s);
                count -= s;
                data += s;
-               BUG_ON(index > limit);
        }
 
        return index-start;
index 0301b328cf0fe04cf39f302ab6061bdbc288c42b..86852963b7f708b92e4596c63a2a2960d8676cda 100644 (file)
@@ -1208,9 +1208,7 @@ static int atalk_connect(struct socket *sock, struct sockaddr *uaddr,
        if (addr->sat_addr.s_node == ATADDR_BCAST &&
            !sock_flag(sk, SOCK_BROADCAST)) {
 #if 1
-               printk(KERN_WARNING "%s is broken and did not set "
-                                   "SO_BROADCAST. It will break when 2.2 is "
-                                   "released.\n",
+               pr_warn("atalk_connect: %s is broken and did not set SO_BROADCAST.\n",
                        current->comm);
 #else
                return -EACCES;
index 051f7abae66d987177536637c235865258cabbec..779095ded689918de025f48c47db880aa9b5d724 100644 (file)
@@ -842,6 +842,7 @@ static int ax25_create(struct net *net, struct socket *sock, int protocol,
                case AX25_P_NETROM:
                        if (ax25_protocol_is_registered(AX25_P_NETROM))
                                return -ESOCKTNOSUPPORT;
+                       break;
 #endif
 #ifdef CONFIG_ROSE_MODULE
                case AX25_P_ROSE:
index 8bf97515a77d6f3ccfaae8ab3d89e1eaed46013a..c5863f499133b466787b8b5044882c4ddded194a 100644 (file)
@@ -1351,6 +1351,7 @@ void bla_free(struct bat_priv *bat_priv)
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: the frame to be checked
  * @vid: the VLAN ID of the frame
+ * @is_bcast: the packet came in a broadcast packet type.
  *
  * bla_rx avoidance checks if:
  *  * we have to race for a claim
@@ -1361,7 +1362,8 @@ void bla_free(struct bat_priv *bat_priv)
  * process the skb.
  *
  */
-int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid)
+int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid,
+          bool is_bcast)
 {
        struct ethhdr *ethhdr;
        struct claim search_claim, *claim = NULL;
@@ -1380,7 +1382,7 @@ int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid)
 
        if (unlikely(atomic_read(&bat_priv->bla_num_requests)))
                /* don't allow broadcasts while requests are in flight */
-               if (is_multicast_ether_addr(ethhdr->h_dest))
+               if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast)
                        goto handled;
 
        memcpy(search_claim.addr, ethhdr->h_source, ETH_ALEN);
@@ -1406,8 +1408,13 @@ int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid)
        }
 
        /* if it is a broadcast ... */
-       if (is_multicast_ether_addr(ethhdr->h_dest)) {
-               /* ... drop it. the responsible gateway is in charge. */
+       if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast) {
+               /* ... drop it. the responsible gateway is in charge.
+                *
+                * We need to check is_bcast because with the gateway
+                * feature, broadcasts (like DHCP requests) may be sent
+                * using a unicast packet type.
+                */
                goto handled;
        } else {
                /* seems the client considers us as its best gateway.
index e39f93acc28f749793200be5b020df41fd5554e6..dc5227b398d44c7b57eda34adc4b53b9a1281f9a 100644 (file)
@@ -23,7 +23,8 @@
 #define _NET_BATMAN_ADV_BLA_H_
 
 #ifdef CONFIG_BATMAN_ADV_BLA
-int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid);
+int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid,
+          bool is_bcast);
 int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid);
 int bla_is_backbone_gw(struct sk_buff *skb,
                       struct orig_node *orig_node, int hdr_size);
@@ -41,7 +42,7 @@ void bla_free(struct bat_priv *bat_priv);
 #else /* ifdef CONFIG_BATMAN_ADV_BLA */
 
 static inline int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb,
-                        short vid)
+                        short vid, bool is_bcast)
 {
        return 0;
 }
index 840e2c64a301156c7b343468bedc65282f8c9b6b..015471d801b42eceb554c0ed86d6a49971389774 100644 (file)
@@ -617,6 +617,8 @@ int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if)
                         * changes */
                        if (skb_linearize(skb) < 0)
                                goto out;
+                       /* skb_linearize() possibly changed skb->data */
+                       tt_query = (struct tt_query_packet *)skb->data;
 
                        tt_len = tt_query->tt_data * sizeof(struct tt_change);
 
index 6e2530b020437e7243ff2a71d6098a11d7d5df52..a0ec0e4ada4c0acf9392136cff17198b766acd1f 100644 (file)
@@ -256,7 +256,11 @@ void interface_rx(struct net_device *soft_iface,
        struct bat_priv *bat_priv = netdev_priv(soft_iface);
        struct ethhdr *ethhdr;
        struct vlan_ethhdr *vhdr;
+       struct batman_header *batadv_header = (struct batman_header *)skb->data;
        short vid __maybe_unused = -1;
+       bool is_bcast;
+
+       is_bcast = (batadv_header->packet_type == BAT_BCAST);
 
        /* check if enough space is available for pulling, and pull */
        if (!pskb_may_pull(skb, hdr_size))
@@ -302,7 +306,7 @@ void interface_rx(struct net_device *soft_iface,
        /* Let the bridge loop avoidance check the packet. If will
         * not handle it, we can safely push it up.
         */
-       if (bla_rx(bat_priv, skb, vid))
+       if (bla_rx(bat_priv, skb, vid, is_bcast))
                goto out;
 
        netif_rx(skb);
index a66c2dcd108800e16f0280f5f42537c9f12d2699..2ab83d7fb1f84c864d5bb2a49295e9b4f8071811 100644 (file)
@@ -141,13 +141,14 @@ static void tt_orig_list_entry_free_rcu(struct rcu_head *rcu)
        struct tt_orig_list_entry *orig_entry;
 
        orig_entry = container_of(rcu, struct tt_orig_list_entry, rcu);
-       atomic_dec(&orig_entry->orig_node->tt_size);
        orig_node_free_ref(orig_entry->orig_node);
        kfree(orig_entry);
 }
 
 static void tt_orig_list_entry_free_ref(struct tt_orig_list_entry *orig_entry)
 {
+       /* to avoid race conditions, immediately decrease the tt counter */
+       atomic_dec(&orig_entry->orig_node->tt_size);
        call_rcu(&orig_entry->rcu, tt_orig_list_entry_free_rcu);
 }
 
@@ -910,7 +911,6 @@ void tt_global_del_orig(struct bat_priv *bat_priv,
                }
                spin_unlock_bh(list_lock);
        }
-       atomic_set(&orig_node->tt_size, 0);
        orig_node->tt_initialised = false;
 }
 
@@ -2031,10 +2031,10 @@ bool is_ap_isolated(struct bat_priv *bat_priv, uint8_t *src, uint8_t *dst)
 {
        struct tt_local_entry *tt_local_entry = NULL;
        struct tt_global_entry *tt_global_entry = NULL;
-       bool ret = true;
+       bool ret = false;
 
        if (!atomic_read(&bat_priv->ap_isolation))
-               return false;
+               goto out;
 
        tt_local_entry = tt_local_hash_find(bat_priv, dst);
        if (!tt_local_entry)
@@ -2044,10 +2044,10 @@ bool is_ap_isolated(struct bat_priv *bat_priv, uint8_t *src, uint8_t *dst)
        if (!tt_global_entry)
                goto out;
 
-       if (_is_ap_isolated(tt_local_entry, tt_global_entry))
+       if (!_is_ap_isolated(tt_local_entry, tt_global_entry))
                goto out;
 
-       ret = false;
+       ret = true;
 
 out:
        if (tt_global_entry)
index 46e7f86acfc99f820b66564f553dc64fe8fbcbac..3e18af4dadc442573960b94abefed550212b9b47 100644 (file)
@@ -210,7 +210,7 @@ struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock)
                }
 
                if (sk->sk_state == BT_CONNECTED || !newsock ||
-                   test_bit(BT_DEFER_SETUP, &bt_sk(parent)->flags)) {
+                   test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags)) {
                        bt_accept_unlink(sk);
                        if (newsock)
                                sock_graft(sk, newsock);
index 4eefb7f65cf62e6409fb5b67d0fed541eed5b541..94ad124a4ea3496c4bd971b3bc4ac9d4dda8860f 100644 (file)
@@ -3043,6 +3043,50 @@ static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct
        hci_dev_unlock(hdev);
 }
 
+static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
+                                        struct sk_buff *skb)
+{
+       struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
+       struct hci_conn *conn;
+
+       BT_DBG("%s status %u handle %u", hdev->name, ev->status,
+              __le16_to_cpu(ev->handle));
+
+       hci_dev_lock(hdev);
+
+       conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
+       if (!conn)
+               goto unlock;
+
+       if (!ev->status)
+               conn->sec_level = conn->pending_sec_level;
+
+       clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
+
+       if (ev->status && conn->state == BT_CONNECTED) {
+               hci_acl_disconn(conn, HCI_ERROR_AUTH_FAILURE);
+               hci_conn_put(conn);
+               goto unlock;
+       }
+
+       if (conn->state == BT_CONFIG) {
+               if (!ev->status)
+                       conn->state = BT_CONNECTED;
+
+               hci_proto_connect_cfm(conn, ev->status);
+               hci_conn_put(conn);
+       } else {
+               hci_auth_cfm(conn, ev->status);
+
+               hci_conn_hold(conn);
+               conn->disc_timeout = HCI_DISCONN_TIMEOUT;
+               hci_conn_put(conn);
+       }
+
+unlock:
+       hci_dev_unlock(hdev);
+}
+
 static inline u8 hci_get_auth_req(struct hci_conn *conn)
 {
        /* If remote requests dedicated bonding follow that lead */
@@ -3559,6 +3603,10 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
                hci_extended_inquiry_result_evt(hdev, skb);
                break;
 
+       case HCI_EV_KEY_REFRESH_COMPLETE:
+               hci_key_refresh_complete_evt(hdev, skb);
+               break;
+
        case HCI_EV_IO_CAPA_REQUEST:
                hci_io_capa_request_evt(hdev, skb);
                break;
index 4deaca78e91ea1853027db58f05441e8e8a3975f..9332bc7aa851fb798533cd5695fd260de68c242f 100644 (file)
@@ -1,6 +1,6 @@
 config BT_HIDP
        tristate "HIDP protocol support"
-       depends on BT && INPUT && HID_SUPPORT
+       depends on BT && INPUT
        select HID
        help
          HIDP (Human Interface Device Protocol) is a transport layer
index 24f144b72a96a87ee5f8fdc2016f613f00615344..4554e80d16a37b8ffdc0a7ac86805da2f2ee06e9 100644 (file)
@@ -1295,7 +1295,12 @@ static void security_timeout(struct work_struct *work)
        struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
                                                security_timer.work);
 
-       l2cap_conn_del(conn->hcon, ETIMEDOUT);
+       BT_DBG("conn %p", conn);
+
+       if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
+               smp_chan_destroy(conn);
+               l2cap_conn_del(conn->hcon, ETIMEDOUT);
+       }
 }
 
 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
@@ -2910,12 +2915,14 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
        while (len >= L2CAP_CONF_OPT_SIZE) {
                len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
 
-               switch (type) {
-               case L2CAP_CONF_RFC:
-                       if (olen == sizeof(rfc))
-                               memcpy(&rfc, (void *)val, olen);
-                       goto done;
-               }
+               if (type != L2CAP_CONF_RFC)
+                       continue;
+
+               if (olen != sizeof(rfc))
+                       break;
+
+               memcpy(&rfc, (void *)val, olen);
+               goto done;
        }
 
        /* Use sane default values in case a misbehaving remote device
index 25d22077607963d66a73cca2d644d4df468fa78f..3e5e3362ea00443b8f1954c1487bf47625b6288f 100644 (file)
@@ -1598,7 +1598,7 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
        else
                conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
 
-       if (!conn) {
+       if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
                err = cmd_status(sk, hdev->id, MGMT_OP_DISCONNECT,
                                 MGMT_STATUS_NOT_CONNECTED);
                goto failed;
@@ -1873,6 +1873,22 @@ static void pairing_complete_cb(struct hci_conn *conn, u8 status)
                pairing_complete(cmd, mgmt_status(status));
 }
 
+static void le_connect_complete_cb(struct hci_conn *conn, u8 status)
+{
+       struct pending_cmd *cmd;
+
+       BT_DBG("status %u", status);
+
+       if (!status)
+               return;
+
+       cmd = find_pairing(conn);
+       if (!cmd)
+               BT_DBG("Unable to find a pending command");
+       else
+               pairing_complete(cmd, mgmt_status(status));
+}
+
 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
                       u16 len)
 {
@@ -1934,6 +1950,8 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
        /* For LE, just connecting isn't a proof that the pairing finished */
        if (cp->addr.type == BDADDR_BREDR)
                conn->connect_cfm_cb = pairing_complete_cb;
+       else
+               conn->connect_cfm_cb = le_connect_complete_cb;
 
        conn->security_cfm_cb = pairing_complete_cb;
        conn->disconn_cfm_cb = pairing_complete_cb;
index 6fc7c4708f3e1fa6336a434ef8c63d072e262d5a..37df4e9b3896435164adf36a0c52b8ab8c08030c 100644 (file)
@@ -648,7 +648,7 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb)
 
        auth |= (req->auth_req | rsp->auth_req) & SMP_AUTH_MITM;
 
-       ret = tk_request(conn, 0, auth, rsp->io_capability, req->io_capability);
+       ret = tk_request(conn, 0, auth, req->io_capability, rsp->io_capability);
        if (ret)
                return SMP_UNSPECIFIED;
 
@@ -703,7 +703,7 @@ static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb)
        return 0;
 }
 
-static u8 smp_ltk_encrypt(struct l2cap_conn *conn)
+static u8 smp_ltk_encrypt(struct l2cap_conn *conn, u8 sec_level)
 {
        struct smp_ltk *key;
        struct hci_conn *hcon = conn->hcon;
@@ -712,6 +712,9 @@ static u8 smp_ltk_encrypt(struct l2cap_conn *conn)
        if (!key)
                return 0;
 
+       if (sec_level > BT_SECURITY_MEDIUM && !key->authenticated)
+               return 0;
+
        if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &hcon->flags))
                return 1;
 
@@ -732,7 +735,7 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
 
        hcon->pending_sec_level = authreq_to_seclevel(rp->auth_req);
 
-       if (smp_ltk_encrypt(conn))
+       if (smp_ltk_encrypt(conn, hcon->pending_sec_level))
                return 0;
 
        if (test_and_set_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags))
@@ -771,7 +774,7 @@ int smp_conn_security(struct l2cap_conn *conn, __u8 sec_level)
                return 1;
 
        if (hcon->link_mode & HCI_LM_MASTER)
-               if (smp_ltk_encrypt(conn))
+               if (smp_ltk_encrypt(conn, sec_level))
                        goto done;
 
        if (test_and_set_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags))
index 0a942fbccc9a64592d486199608e6527ebc8de8f..e1144e1617be38814ebb2fb497755cb10b646559 100644 (file)
@@ -240,6 +240,7 @@ int br_add_bridge(struct net *net, const char *name)
                return -ENOMEM;
 
        dev_net_set(dev, net);
+       dev->rtnl_link_ops = &br_link_ops;
 
        res = register_netdev(dev);
        if (res)
index 2080485515f1be56299f01a92b5c98e034636dde..fe41260fbf38b28bb121dcc2235a5d27b83e27b7 100644 (file)
@@ -208,7 +208,7 @@ static int br_validate(struct nlattr *tb[], struct nlattr *data[])
        return 0;
 }
 
-static struct rtnl_link_ops br_link_ops __read_mostly = {
+struct rtnl_link_ops br_link_ops __read_mostly = {
        .kind           = "bridge",
        .priv_size      = sizeof(struct net_bridge),
        .setup          = br_dev_setup,
index 1a8ad4fb9a6ba9e9246011e9ea674778591fd55f..a768b2408edff64890dde477df5918c102630a5e 100644 (file)
@@ -549,6 +549,7 @@ extern int (*br_fdb_test_addr_hook)(struct net_device *dev, unsigned char *addr)
 #endif
 
 /* br_netlink.c */
+extern struct rtnl_link_ops br_link_ops;
 extern int br_netlink_init(void);
 extern void br_netlink_fini(void);
 extern void br_ifinfo_notify(int event, struct net_bridge_port *port);
index aa6f716524fd3c0aecbeb0b9393f84df2c8046de..8c83c175b03a9f2379c253bd1c9e160b72457320 100644 (file)
@@ -4,8 +4,7 @@
  * Author:     Sjur Brendeland/sjur.brandeland@stericsson.com
  * License terms: GNU General Public License (GPL) version 2
  *
- * Borrowed heavily from file: pn_dev.c. Thanks to
- *  Remi Denis-Courmont <remi.denis-courmont@nokia.com>
+ * Borrowed heavily from file: pn_dev.c. Thanks to Remi Denis-Courmont
  *  and Sakari Ailus <sakari.ailus@nokia.com>
  */
 
@@ -562,9 +561,9 @@ static int __init caif_device_init(void)
 
 static void __exit caif_device_exit(void)
 {
-       unregister_pernet_subsys(&caif_net_ops);
        unregister_netdevice_notifier(&caif_device_notifier);
        dev_remove_pack(&caif_packet_type);
+       unregister_pernet_subsys(&caif_net_ops);
 }
 
 module_init(caif_device_init);
index fb8944355264689b4e61ba9f9402fe8cad902420..78f1cdad5b332b91401570aeccbc12af5bd33d1e 100644 (file)
@@ -220,6 +220,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
                                                cfsk_hold, cfsk_put);
                cf_sk->sk.sk_state = CAIF_CONNECTED;
                set_tx_flow_on(cf_sk);
+               cf_sk->sk.sk_shutdown = 0;
                cf_sk->sk.sk_state_change(&cf_sk->sk);
                break;
 
index cde1b4a20f758fe8055f26a7f24b40f16767ae55..46cca3a91d198093b86ea4d5701ecd42ce9dbce7 100644 (file)
@@ -681,9 +681,6 @@ static int raw_sendmsg(struct kiocb *iocb, struct socket *sock,
        if (err < 0)
                goto free_skb;
 
-       /* to be able to check the received tx sock reference in raw_rcv() */
-       skb_shinfo(skb)->tx_flags |= SKBTX_DRV_NEEDS_SK_REF;
-
        skb->dev = dev;
        skb->sk  = sk;
 
index a776f751edbf223220b63cdac9dadb38187bb2b6..ba4323bce0e92beff34d13dcdc45b63a1b14dce7 100644 (file)
@@ -504,13 +504,6 @@ void ceph_destroy_client(struct ceph_client *client)
        /* unmount */
        ceph_osdc_stop(&client->osdc);
 
-       /*
-        * make sure osd connections close out before destroying the
-        * auth module, which is needed to free those connections'
-        * ceph_authorizers.
-        */
-       ceph_msgr_flush();
-
        ceph_monc_stop(&client->monc);
 
        ceph_debugfs_client_cleanup(client);
index 524f4e4f598b845a7242c0243efb1a4e6a843955..10255e81be79d84c5428504c2a79be3180626f79 100644 (file)
@@ -563,6 +563,10 @@ static void prepare_write_message(struct ceph_connection *con)
                m->hdr.seq = cpu_to_le64(++con->out_seq);
                m->needs_out_seq = false;
        }
+#ifdef CONFIG_BLOCK
+       else
+               m->bio_iter = NULL;
+#endif
 
        dout("prepare_write_message %p seq %lld type %d len %d+%d+%d %d pgs\n",
             m, con->out_seq, le16_to_cpu(m->hdr.type),
@@ -1419,7 +1423,7 @@ static int process_connect(struct ceph_connection *con)
                 * dropped messages.
                 */
                dout("process_connect got RESET peer seq %u\n",
-                    le32_to_cpu(con->in_connect.connect_seq));
+                    le32_to_cpu(con->in_reply.connect_seq));
                pr_err("%s%lld %s connection reset\n",
                       ENTITY_NAME(con->peer_name),
                       ceph_pr_addr(&con->peer_addr.in_addr));
@@ -1446,10 +1450,10 @@ static int process_connect(struct ceph_connection *con)
                 * If we sent a smaller connect_seq than the peer has, try
                 * again with a larger value.
                 */
-               dout("process_connect got RETRY my seq = %u, peer_seq = %u\n",
+               dout("process_connect got RETRY_SESSION my seq %u, peer %u\n",
                     le32_to_cpu(con->out_connect.connect_seq),
-                    le32_to_cpu(con->in_connect.connect_seq));
-               con->connect_seq = le32_to_cpu(con->in_connect.connect_seq);
+                    le32_to_cpu(con->in_reply.connect_seq));
+               con->connect_seq = le32_to_cpu(con->in_reply.connect_seq);
                ceph_con_out_kvec_reset(con);
                ret = prepare_write_connect(con);
                if (ret < 0)
@@ -1464,9 +1468,9 @@ static int process_connect(struct ceph_connection *con)
                 */
                dout("process_connect got RETRY_GLOBAL my %u peer_gseq %u\n",
                     con->peer_global_seq,
-                    le32_to_cpu(con->in_connect.global_seq));
+                    le32_to_cpu(con->in_reply.global_seq));
                get_global_seq(con->msgr,
-                              le32_to_cpu(con->in_connect.global_seq));
+                              le32_to_cpu(con->in_reply.global_seq));
                ceph_con_out_kvec_reset(con);
                ret = prepare_write_connect(con);
                if (ret < 0)
index 10d6008d31f21f982fa929a023a4f0a5a1b02d2a..d0649a9655be3b7bbf1baaa03220548c74156ff0 100644 (file)
@@ -847,6 +847,14 @@ void ceph_monc_stop(struct ceph_mon_client *monc)
 
        mutex_unlock(&monc->mutex);
 
+       /*
+        * flush msgr queue before we destroy ourselves to ensure that:
+        *  - any work that references our embedded con is finished.
+        *  - any osd_client or other work that may reference an authorizer
+        *    finishes before we shut down the auth subsystem.
+        */
+       ceph_msgr_flush();
+
        ceph_auth_destroy(monc->auth);
 
        ceph_msg_put(monc->m_auth);
index 1ffebed5ce0f9a629ad2733349b8e33c326850d5..ca59e66c9787303805519f2bf325cc5d5817ff55 100644 (file)
@@ -139,15 +139,15 @@ void ceph_osdc_release_request(struct kref *kref)
 
        if (req->r_request)
                ceph_msg_put(req->r_request);
-       if (req->r_reply)
-               ceph_msg_put(req->r_reply);
        if (req->r_con_filling_msg) {
                dout("release_request revoking pages %p from con %p\n",
                     req->r_pages, req->r_con_filling_msg);
                ceph_con_revoke_message(req->r_con_filling_msg,
                                      req->r_reply);
-               ceph_con_put(req->r_con_filling_msg);
+               req->r_con_filling_msg->ops->put(req->r_con_filling_msg);
        }
+       if (req->r_reply)
+               ceph_msg_put(req->r_reply);
        if (req->r_own_pages)
                ceph_release_page_vector(req->r_pages,
                                         req->r_num_pages);
@@ -1216,7 +1216,7 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
        if (req->r_con_filling_msg == con && req->r_reply == msg) {
                dout(" dropping con_filling_msg ref %p\n", con);
                req->r_con_filling_msg = NULL;
-               ceph_con_put(con);
+               con->ops->put(con);
        }
 
        if (!req->r_got_reply) {
@@ -2028,7 +2028,7 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
                dout("get_reply revoking msg %p from old con %p\n",
                     req->r_reply, req->r_con_filling_msg);
                ceph_con_revoke_message(req->r_con_filling_msg, req->r_reply);
-               ceph_con_put(req->r_con_filling_msg);
+               req->r_con_filling_msg->ops->put(req->r_con_filling_msg);
                req->r_con_filling_msg = NULL;
        }
 
@@ -2063,7 +2063,7 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
 #endif
        }
        *skip = 0;
-       req->r_con_filling_msg = ceph_con_get(con);
+       req->r_con_filling_msg = con->ops->get(con);
        dout("get_reply tid %lld %p\n", tid, m);
 
 out:
index cd0981977f5c92ee82cb42651ebfcbfb64c9c82f..1cb0d8a6aa6c5cd3d23741cd5b941a23805b08bd 100644 (file)
@@ -1136,8 +1136,8 @@ void dev_load(struct net *net, const char *name)
                no_module = request_module("netdev-%s", name);
        if (no_module && capable(CAP_SYS_MODULE)) {
                if (!request_module("%s", name))
-                       pr_err("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated).  Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
-                              name);
+                       pr_warn("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated).  Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
+                               name);
        }
 }
 EXPORT_SYMBOL(dev_load);
@@ -2089,25 +2089,6 @@ static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
        return 0;
 }
 
-/*
- * Try to orphan skb early, right before transmission by the device.
- * We cannot orphan skb if tx timestamp is requested or the sk-reference
- * is needed on driver level for other reasons, e.g. see net/can/raw.c
- */
-static inline void skb_orphan_try(struct sk_buff *skb)
-{
-       struct sock *sk = skb->sk;
-
-       if (sk && !skb_shinfo(skb)->tx_flags) {
-               /* skb_tx_hash() wont be able to get sk.
-                * We copy sk_hash into skb->rxhash
-                */
-               if (!skb->rxhash)
-                       skb->rxhash = sk->sk_hash;
-               skb_orphan(skb);
-       }
-}
-
 static bool can_checksum_protocol(netdev_features_t features, __be16 protocol)
 {
        return ((features & NETIF_F_GEN_CSUM) ||
@@ -2193,8 +2174,6 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
                if (!list_empty(&ptype_all))
                        dev_queue_xmit_nit(skb, dev);
 
-               skb_orphan_try(skb);
-
                features = netif_skb_features(skb);
 
                if (vlan_tx_tag_present(skb) &&
@@ -2304,7 +2283,7 @@ u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
        if (skb->sk && skb->sk->sk_hash)
                hash = skb->sk->sk_hash;
        else
-               hash = (__force u16) skb->protocol ^ skb->rxhash;
+               hash = (__force u16) skb->protocol;
        hash = jhash_1word(hash, hashrnd);
 
        return (u16) (((u64) hash * qcount) >> 32) + qoffset;
@@ -2465,8 +2444,12 @@ static void skb_update_prio(struct sk_buff *skb)
 {
        struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
 
-       if ((!skb->priority) && (skb->sk) && map)
-               skb->priority = map->priomap[skb->sk->sk_cgrp_prioidx];
+       if (!skb->priority && skb->sk && map) {
+               unsigned int prioidx = skb->sk->sk_cgrp_prioidx;
+
+               if (prioidx < map->priomap_len)
+                       skb->priority = map->priomap[prioidx];
+       }
 }
 #else
 #define skb_update_prio(skb)
@@ -6300,7 +6283,8 @@ static struct hlist_head *netdev_create_hash(void)
 /* Initialize per network namespace state */
 static int __net_init netdev_init(struct net *net)
 {
-       INIT_LIST_HEAD(&net->dev_base_head);
+       if (net != &init_net)
+               INIT_LIST_HEAD(&net->dev_base_head);
 
        net->dev_name_head = netdev_create_hash();
        if (net->dev_name_head == NULL)
index ea5fb9fcc3f5937777db311ea88a75ae3f4b81f4..d23b6682f4e95cfd029cd19db31252184ec03d2d 100644 (file)
@@ -36,9 +36,6 @@
 #define TRACE_ON 1
 #define TRACE_OFF 0
 
-static void send_dm_alert(struct work_struct *unused);
-
-
 /*
  * Globals, our netlink socket pointer
  * and the work handle that will send up
@@ -48,11 +45,10 @@ static int trace_state = TRACE_OFF;
 static DEFINE_MUTEX(trace_state_mutex);
 
 struct per_cpu_dm_data {
-       struct work_struct dm_alert_work;
-       struct sk_buff __rcu *skb;
-       atomic_t dm_hit_count;
-       struct timer_list send_timer;
-       int cpu;
+       spinlock_t              lock;
+       struct sk_buff          *skb;
+       struct work_struct      dm_alert_work;
+       struct timer_list       send_timer;
 };
 
 struct dm_hw_stat_delta {
@@ -78,13 +74,13 @@ static int dm_delay = 1;
 static unsigned long dm_hw_check_delta = 2*HZ;
 static LIST_HEAD(hw_stats_list);
 
-static void reset_per_cpu_data(struct per_cpu_dm_data *data)
+static struct sk_buff *reset_per_cpu_data(struct per_cpu_dm_data *data)
 {
        size_t al;
        struct net_dm_alert_msg *msg;
        struct nlattr *nla;
        struct sk_buff *skb;
-       struct sk_buff *oskb = rcu_dereference_protected(data->skb, 1);
+       unsigned long flags;
 
        al = sizeof(struct net_dm_alert_msg);
        al += dm_hit_limit * sizeof(struct net_dm_drop_point);
@@ -99,65 +95,40 @@ static void reset_per_cpu_data(struct per_cpu_dm_data *data)
                                  sizeof(struct net_dm_alert_msg));
                msg = nla_data(nla);
                memset(msg, 0, al);
-       } else
-               schedule_work_on(data->cpu, &data->dm_alert_work);
-
-       /*
-        * Don't need to lock this, since we are guaranteed to only
-        * run this on a single cpu at a time.
-        * Note also that we only update data->skb if the old and new skb
-        * pointers don't match.  This ensures that we don't continually call
-        * synchornize_rcu if we repeatedly fail to alloc a new netlink message.
-        */
-       if (skb != oskb) {
-               rcu_assign_pointer(data->skb, skb);
-
-               synchronize_rcu();
-
-               atomic_set(&data->dm_hit_count, dm_hit_limit);
+       } else {
+               mod_timer(&data->send_timer, jiffies + HZ / 10);
        }
 
+       spin_lock_irqsave(&data->lock, flags);
+       swap(data->skb, skb);
+       spin_unlock_irqrestore(&data->lock, flags);
+
+       return skb;
 }
 
-static void send_dm_alert(struct work_struct *unused)
+static void send_dm_alert(struct work_struct *work)
 {
        struct sk_buff *skb;
-       struct per_cpu_dm_data *data = &get_cpu_var(dm_cpu_data);
+       struct per_cpu_dm_data *data;
 
-       WARN_ON_ONCE(data->cpu != smp_processor_id());
+       data = container_of(work, struct per_cpu_dm_data, dm_alert_work);
 
-       /*
-        * Grab the skb we're about to send
-        */
-       skb = rcu_dereference_protected(data->skb, 1);
-
-       /*
-        * Replace it with a new one
-        */
-       reset_per_cpu_data(data);
+       skb = reset_per_cpu_data(data);
 
-       /*
-        * Ship it!
-        */
        if (skb)
                genlmsg_multicast(skb, 0, NET_DM_GRP_ALERT, GFP_KERNEL);
-
-       put_cpu_var(dm_cpu_data);
 }
 
 /*
  * This is the timer function to delay the sending of an alert
  * in the event that more drops will arrive during the
- * hysteresis period.  Note that it operates under the timer interrupt
- * so we don't need to disable preemption here
+ * hysteresis period.
  */
-static void sched_send_work(unsigned long unused)
+static void sched_send_work(unsigned long _data)
 {
-       struct per_cpu_dm_data *data =  &get_cpu_var(dm_cpu_data);
-
-       schedule_work_on(smp_processor_id(), &data->dm_alert_work);
+       struct per_cpu_dm_data *data = (struct per_cpu_dm_data *)_data;
 
-       put_cpu_var(dm_cpu_data);
+       schedule_work(&data->dm_alert_work);
 }
 
 static void trace_drop_common(struct sk_buff *skb, void *location)
@@ -167,33 +138,28 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
        struct nlattr *nla;
        int i;
        struct sk_buff *dskb;
-       struct per_cpu_dm_data *data = &get_cpu_var(dm_cpu_data);
-
+       struct per_cpu_dm_data *data;
+       unsigned long flags;
 
-       rcu_read_lock();
-       dskb = rcu_dereference(data->skb);
+       local_irq_save(flags);
+       data = &__get_cpu_var(dm_cpu_data);
+       spin_lock(&data->lock);
+       dskb = data->skb;
 
        if (!dskb)
                goto out;
 
-       if (!atomic_add_unless(&data->dm_hit_count, -1, 0)) {
-               /*
-                * we're already at zero, discard this hit
-                */
-               goto out;
-       }
-
        nlh = (struct nlmsghdr *)dskb->data;
        nla = genlmsg_data(nlmsg_data(nlh));
        msg = nla_data(nla);
        for (i = 0; i < msg->entries; i++) {
                if (!memcmp(&location, msg->points[i].pc, sizeof(void *))) {
                        msg->points[i].count++;
-                       atomic_inc(&data->dm_hit_count);
                        goto out;
                }
        }
-
+       if (msg->entries == dm_hit_limit)
+               goto out;
        /*
         * We need to create a new entry
         */
@@ -205,13 +171,11 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
 
        if (!timer_pending(&data->send_timer)) {
                data->send_timer.expires = jiffies + dm_delay * HZ;
-               add_timer_on(&data->send_timer, smp_processor_id());
+               add_timer(&data->send_timer);
        }
 
 out:
-       rcu_read_unlock();
-       put_cpu_var(dm_cpu_data);
-       return;
+       spin_unlock_irqrestore(&data->lock, flags);
 }
 
 static void trace_kfree_skb_hit(void *ignore, struct sk_buff *skb, void *location)
@@ -418,11 +382,11 @@ static int __init init_net_drop_monitor(void)
 
        for_each_possible_cpu(cpu) {
                data = &per_cpu(dm_cpu_data, cpu);
-               data->cpu = cpu;
                INIT_WORK(&data->dm_alert_work, send_dm_alert);
                init_timer(&data->send_timer);
-               data->send_timer.data = cpu;
+               data->send_timer.data = (unsigned long)data;
                data->send_timer.function = sched_send_work;
+               spin_lock_init(&data->lock);
                reset_per_cpu_data(data);
        }
 
index a3eddb515d1b282dc9dd8c597e09d8476de7916d..d4ce2dc712e34b7b1cb974c5e938313f58e9a8aa 100644 (file)
@@ -616,9 +616,9 @@ static int __sk_prepare_filter(struct sk_filter *fp)
 /**
  *     sk_unattached_filter_create - create an unattached filter
  *     @fprog: the filter program
- *     @sk: the socket to use
+ *     @pfp: the unattached filter that is created
  *
- * Create a filter independent ofr any socket. We first run some
+ * Create a filter independent of any socket. We first run some
  * sanity checks on it to make sure it does not explode on us later.
  * If an error occurs or there is insufficient memory for the filter
  * a negative errno code is returned. On success the return is zero.
index eb09f8bbbf075bcc10f3335198dc8e097c2f9316..d81d026138f0810471ce4cf2540c0ec4229d853f 100644 (file)
@@ -2219,9 +2219,7 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
        rcu_read_lock_bh();
        nht = rcu_dereference_bh(tbl->nht);
 
-       for (h = 0; h < (1 << nht->hash_shift); h++) {
-               if (h < s_h)
-                       continue;
+       for (h = s_h; h < (1 << nht->hash_shift); h++) {
                if (h > s_h)
                        s_idx = 0;
                for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0;
@@ -2260,9 +2258,7 @@ static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
 
        read_lock_bh(&tbl->lock);
 
-       for (h = 0; h <= PNEIGH_HASHMASK; h++) {
-               if (h < s_h)
-                       continue;
+       for (h = s_h; h <= PNEIGH_HASHMASK; h++) {
                if (h > s_h)
                        s_idx = 0;
                for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
@@ -2297,7 +2293,7 @@ static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
        struct neigh_table *tbl;
        int t, family, s_t;
        int proxy = 0;
-       int err = 0;
+       int err;
 
        read_lock(&neigh_tbl_lock);
        family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
@@ -2311,7 +2307,7 @@ static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
 
        s_t = cb->args[0];
 
-       for (tbl = neigh_tables, t = 0; tbl && (err >= 0);
+       for (tbl = neigh_tables, t = 0; tbl;
             tbl = tbl->next, t++) {
                if (t < s_t || (family && tbl->family != family))
                        continue;
@@ -2322,6 +2318,8 @@ static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
                        err = pneigh_dump_table(tbl, skb, cb);
                else
                        err = neigh_dump_table(tbl, skb, cb);
+               if (err < 0)
+                       break;
        }
        read_unlock(&neigh_tbl_lock);
 
index dddbacb8f28ccba180cd20855476d5d7e351b1b8..42f1e1c7514f67ad56df06ffe53e6c94a094681a 100644 (file)
@@ -27,7 +27,9 @@ static DEFINE_MUTEX(net_mutex);
 LIST_HEAD(net_namespace_list);
 EXPORT_SYMBOL_GPL(net_namespace_list);
 
-struct net init_net;
+struct net init_net = {
+       .dev_base_head = LIST_HEAD_INIT(init_net.dev_base_head),
+};
 EXPORT_SYMBOL(init_net);
 
 #define INITIAL_NET_GEN_PTRS   13 /* +1 for len +2 for rcu_head */
index 3d84fb9d88739629b32c77f1a1d77c7f55ad7630..f9f40b932e4b855fc1a4dc3b3c74620efdc4f970 100644 (file)
@@ -362,22 +362,23 @@ EXPORT_SYMBOL(netpoll_send_skb_on_dev);
 
 void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
 {
-       int total_len, eth_len, ip_len, udp_len;
+       int total_len, ip_len, udp_len;
        struct sk_buff *skb;
        struct udphdr *udph;
        struct iphdr *iph;
        struct ethhdr *eth;
 
        udp_len = len + sizeof(*udph);
-       ip_len = eth_len = udp_len + sizeof(*iph);
-       total_len = eth_len + ETH_HLEN + NET_IP_ALIGN;
+       ip_len = udp_len + sizeof(*iph);
+       total_len = ip_len + LL_RESERVED_SPACE(np->dev);
 
-       skb = find_skb(np, total_len, total_len - len);
+       skb = find_skb(np, total_len + np->dev->needed_tailroom,
+                      total_len - len);
        if (!skb)
                return;
 
        skb_copy_to_linear_data(skb, msg, len);
-       skb->len += len;
+       skb_put(skb, len);
 
        skb_push(skb, sizeof(*udph));
        skb_reset_transport_header(skb);
index 5b8aa2fae48b84e3255ecdfe83535457bd170901..b2e9caa1ad1aa761ba98886a23ce3823bdc179ee 100644 (file)
@@ -49,8 +49,9 @@ static int get_prioidx(u32 *prio)
                return -ENOSPC;
        }
        set_bit(prioidx, prioidx_map);
+       if (atomic_read(&max_prioidx) < prioidx)
+               atomic_set(&max_prioidx, prioidx);
        spin_unlock_irqrestore(&prioidx_map_lock, flags);
-       atomic_set(&max_prioidx, prioidx);
        *prio = prioidx;
        return 0;
 }
@@ -64,7 +65,7 @@ static void put_prioidx(u32 idx)
        spin_unlock_irqrestore(&prioidx_map_lock, flags);
 }
 
-static void extend_netdev_table(struct net_device *dev, u32 new_len)
+static int extend_netdev_table(struct net_device *dev, u32 new_len)
 {
        size_t new_size = sizeof(struct netprio_map) +
                           ((sizeof(u32) * new_len));
@@ -76,7 +77,7 @@ static void extend_netdev_table(struct net_device *dev, u32 new_len)
 
        if (!new_priomap) {
                pr_warn("Unable to alloc new priomap!\n");
-               return;
+               return -ENOMEM;
        }
 
        for (i = 0;
@@ -89,46 +90,79 @@ static void extend_netdev_table(struct net_device *dev, u32 new_len)
        rcu_assign_pointer(dev->priomap, new_priomap);
        if (old_priomap)
                kfree_rcu(old_priomap, rcu);
+       return 0;
 }
 
-static void update_netdev_tables(void)
+static int write_update_netdev_table(struct net_device *dev)
 {
+       int ret = 0;
+       u32 max_len;
+       struct netprio_map *map;
+
+       rtnl_lock();
+       max_len = atomic_read(&max_prioidx) + 1;
+       map = rtnl_dereference(dev->priomap);
+       if (!map || map->priomap_len < max_len)
+               ret = extend_netdev_table(dev, max_len);
+       rtnl_unlock();
+
+       return ret;
+}
+
+static int update_netdev_tables(void)
+{
+       int ret = 0;
        struct net_device *dev;
-       u32 max_len = atomic_read(&max_prioidx) + 1;
+       u32 max_len;
        struct netprio_map *map;
 
        rtnl_lock();
+       max_len = atomic_read(&max_prioidx) + 1;
        for_each_netdev(&init_net, dev) {
                map = rtnl_dereference(dev->priomap);
-               if ((!map) ||
-                   (map->priomap_len < max_len))
-                       extend_netdev_table(dev, max_len);
+               /*
+                * don't allocate priomap if we didn't
+                * change net_prio.ifpriomap (map == NULL),
+                * this will speed up skb_update_prio.
+                */
+               if (map && map->priomap_len < max_len) {
+                       ret = extend_netdev_table(dev, max_len);
+                       if (ret < 0)
+                               break;
+               }
        }
        rtnl_unlock();
+       return ret;
 }
 
 static struct cgroup_subsys_state *cgrp_create(struct cgroup *cgrp)
 {
        struct cgroup_netprio_state *cs;
-       int ret;
+       int ret = -EINVAL;
 
        cs = kzalloc(sizeof(*cs), GFP_KERNEL);
        if (!cs)
                return ERR_PTR(-ENOMEM);
 
-       if (cgrp->parent && cgrp_netprio_state(cgrp->parent)->prioidx) {
-               kfree(cs);
-               return ERR_PTR(-EINVAL);
-       }
+       if (cgrp->parent && cgrp_netprio_state(cgrp->parent)->prioidx)
+               goto out;
 
        ret = get_prioidx(&cs->prioidx);
-       if (ret != 0) {
+       if (ret < 0) {
                pr_warn("No space in priority index array\n");
-               kfree(cs);
-               return ERR_PTR(ret);
+               goto out;
+       }
+
+       ret = update_netdev_tables();
+       if (ret < 0) {
+               put_prioidx(cs->prioidx);
+               goto out;
        }
 
        return &cs->css;
+out:
+       kfree(cs);
+       return ERR_PTR(ret);
 }
 
 static void cgrp_destroy(struct cgroup *cgrp)
@@ -141,7 +175,7 @@ static void cgrp_destroy(struct cgroup *cgrp)
        rtnl_lock();
        for_each_netdev(&init_net, dev) {
                map = rtnl_dereference(dev->priomap);
-               if (map)
+               if (map && cs->prioidx < map->priomap_len)
                        map->priomap[cs->prioidx] = 0;
        }
        rtnl_unlock();
@@ -165,7 +199,7 @@ static int read_priomap(struct cgroup *cont, struct cftype *cft,
        rcu_read_lock();
        for_each_netdev_rcu(&init_net, dev) {
                map = rcu_dereference(dev->priomap);
-               priority = map ? map->priomap[prioidx] : 0;
+               priority = (map && prioidx < map->priomap_len) ? map->priomap[prioidx] : 0;
                cb->fill(cb, dev->name, priority);
        }
        rcu_read_unlock();
@@ -220,13 +254,17 @@ static int write_priomap(struct cgroup *cgrp, struct cftype *cft,
        if (!dev)
                goto out_free_devname;
 
-       update_netdev_tables();
-       ret = 0;
+       ret = write_update_netdev_table(dev);
+       if (ret < 0)
+               goto out_put_dev;
+
        rcu_read_lock();
        map = rcu_dereference(dev->priomap);
        if (map)
                map->priomap[prioidx] = priority;
        rcu_read_unlock();
+
+out_put_dev:
        dev_put(dev);
 
 out_free_devname:
index 016694d624843c8ca1df3013639ffd4f6ae75f39..d124306b81fdbf73171d0921e401321f44f63711 100644 (file)
@@ -353,7 +353,7 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
        unsigned int fragsz = SKB_DATA_ALIGN(length + NET_SKB_PAD) +
                              SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
 
-       if (fragsz <= PAGE_SIZE && !(gfp_mask & __GFP_WAIT)) {
+       if (fragsz <= PAGE_SIZE && !(gfp_mask & (__GFP_WAIT | GFP_DMA))) {
                void *data = netdev_alloc_frag(fragsz);
 
                if (likely(data)) {
@@ -1755,6 +1755,7 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
        struct splice_pipe_desc spd = {
                .pages = pages,
                .partial = partial,
+               .nr_pages_max = MAX_SKB_FRAGS,
                .flags = flags,
                .ops = &sock_pipe_buf_ops,
                .spd_release = sock_spd_release,
@@ -3361,7 +3362,7 @@ EXPORT_SYMBOL(kfree_skb_partial);
  * @to: prior buffer
  * @from: buffer to add
  * @fragstolen: pointer to boolean
- *
+ * @delta_truesize: how much more was allocated than was requested
  */
 bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
                      bool *fragstolen, int *delta_truesize)
index 6fbb2ad7bb6df480a71612054003a6176fc12447..16705611589ab6abd1a2e67811d0e38d6d8b4d26 100644 (file)
@@ -230,6 +230,12 @@ static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk,
        mtu = dev->mtu;
        pr_debug("name = %s, mtu = %u\n", dev->name, mtu);
 
+       if (size > mtu) {
+               pr_debug("size = %Zu, mtu = %u\n", size, mtu);
+               err = -EINVAL;
+               goto out_dev;
+       }
+
        hlen = LL_RESERVED_SPACE(dev);
        tlen = dev->needed_tailroom;
        skb = sock_alloc_send_skb(sk, hlen + tlen + size,
@@ -258,12 +264,6 @@ static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk,
        if (err < 0)
                goto out_skb;
 
-       if (size > mtu) {
-               pr_debug("size = %Zu, mtu = %u\n", size, mtu);
-               err = -EINVAL;
-               goto out_skb;
-       }
-
        skb->dev = dev;
        skb->sk  = sk;
        skb->protocol = htons(ETH_P_IEEE802154);
index c48adc565e9239a45c600d791c8f97059e22696f..667c1d4ca9847c627127dc633e19c7c94f354188 100644 (file)
@@ -1725,8 +1725,10 @@ int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option)
                case CIPSO_V4_TAG_LOCAL:
                        /* This is a non-standard tag that we only allow for
                         * local connections, so if the incoming interface is
-                        * not the loopback device drop the packet. */
-                       if (!(skb->dev->flags & IFF_LOOPBACK)) {
+                        * not the loopback device drop the packet. Further,
+                        * there is no legitimate reason for setting this from
+                        * userspace so reject it if skb is NULL. */
+                       if (skb == NULL || !(skb->dev->flags & IFF_LOOPBACK)) {
                                err_offset = opt_iter;
                                goto validate_return_locked;
                        }
index d4d61b694fab9bc497b1cccb808a3a568ad30cc2..dfba343b25092de39c9a5d1eea5d43226690e984 100644 (file)
@@ -560,6 +560,17 @@ bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout)
 }
 EXPORT_SYMBOL(inet_peer_xrlim_allow);
 
+static void inetpeer_inval_rcu(struct rcu_head *head)
+{
+       struct inet_peer *p = container_of(head, struct inet_peer, gc_rcu);
+
+       spin_lock_bh(&gc_lock);
+       list_add_tail(&p->gc_list, &gc_list);
+       spin_unlock_bh(&gc_lock);
+
+       schedule_delayed_work(&gc_work, gc_delay);
+}
+
 void inetpeer_invalidate_tree(int family)
 {
        struct inet_peer *old, *new, *prev;
@@ -576,10 +587,7 @@ void inetpeer_invalidate_tree(int family)
        prev = cmpxchg(&base->root, old, new);
        if (prev == old) {
                base->total = 0;
-               spin_lock(&gc_lock);
-               list_add_tail(&prev->gc_list, &gc_list);
-               spin_unlock(&gc_lock);
-               schedule_delayed_work(&gc_work, gc_delay);
+               call_rcu(&prev->gc_rcu, inetpeer_inval_rcu);
        }
 
 out:
index e5c44fc586abe7157f8b75b8f164a7222ae8548c..ab09b126423ce3e56fd1fee2f6bda54e4b851022 100644 (file)
@@ -44,6 +44,7 @@ static int ip_forward_finish(struct sk_buff *skb)
        struct ip_options *opt  = &(IPCB(skb)->opt);
 
        IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS);
+       IP_ADD_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTOCTETS, skb->len);
 
        if (unlikely(opt->optlen))
                ip_forward_options(skb);
index a9e519ad6db53d544c73d145724602cde1e3f48e..c94bbc6f2ba331bb9e261692151f55f78277258b 100644 (file)
@@ -1574,6 +1574,7 @@ static inline int ipmr_forward_finish(struct sk_buff *skb)
        struct ip_options *opt = &(IPCB(skb)->opt);
 
        IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS);
+       IP_ADD_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTOCTETS, skb->len);
 
        if (unlikely(opt->optlen))
                ip_forward_options(skb);
index 0c220a416626af196f534ab062920c169f6dcd7a..60832766196054f1f03d2aa12558febf68e032d8 100644 (file)
@@ -1349,8 +1349,8 @@ static int fib6_walk_continue(struct fib6_walker_t *w)
                        if (w->leaf && fn->fn_flags & RTN_RTINFO) {
                                int err;
 
-                               if (w->count < w->skip) {
-                                       w->count++;
+                               if (w->skip) {
+                                       w->skip--;
                                        continue;
                                }
 
@@ -1561,7 +1561,7 @@ static int fib6_age(struct rt6_info *rt, void *arg)
                                neigh_flags = neigh->flags;
                                neigh_release(neigh);
                        }
-                       if (neigh_flags & NTF_ROUTER) {
+                       if (!(neigh_flags & NTF_ROUTER)) {
                                RT6_TRACE("purging route %p via non-router but gateway\n",
                                          rt);
                                return -1;
index 17b8c67998bb80dc5e7052af210c7b64aa1471ee..decc21d19c53e4b0c073b44e02deba46b6eac432 100644 (file)
@@ -526,6 +526,7 @@ int ip6_forward(struct sk_buff *skb)
        hdr->hop_limit--;
 
        IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
+       IP6_ADD_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len);
        return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, skb, skb->dev, dst->dev,
                       ip6_forward_finish);
 
index b15dc08643a42f5a45ea9bb0bd2bd0ec1865a377..461e47c8e95620456e83710eaf99643c1382c8fc 100644 (file)
@@ -1886,6 +1886,8 @@ static inline int ip6mr_forward2_finish(struct sk_buff *skb)
 {
        IP6_INC_STATS_BH(dev_net(skb_dst(skb)->dev), ip6_dst_idev(skb_dst(skb)),
                         IPSTATS_MIB_OUTFORWDATAGRAMS);
+       IP6_ADD_STATS_BH(dev_net(skb_dst(skb)->dev), ip6_dst_idev(skb_dst(skb)),
+                        IPSTATS_MIB_OUTOCTETS, skb->len);
        return dst_output(skb);
 }
 
index 999a982ad3fd7d7abac40211b50320fc4c038109..becb048d18d402f0335bee4e9d0c198f94eba6c4 100644 (file)
@@ -2957,10 +2957,6 @@ static int __net_init ip6_route_net_init(struct net *net)
        net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*HZ;
        net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;
 
-#ifdef CONFIG_PROC_FS
-       proc_net_fops_create(net, "ipv6_route", 0, &ipv6_route_proc_fops);
-       proc_net_fops_create(net, "rt6_stats", S_IRUGO, &rt6_stats_seq_fops);
-#endif
        net->ipv6.ip6_rt_gc_expire = 30*HZ;
 
        ret = 0;
@@ -2981,10 +2977,6 @@ out_ip6_dst_ops:
 
 static void __net_exit ip6_route_net_exit(struct net *net)
 {
-#ifdef CONFIG_PROC_FS
-       proc_net_remove(net, "ipv6_route");
-       proc_net_remove(net, "rt6_stats");
-#endif
        kfree(net->ipv6.ip6_null_entry);
 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
        kfree(net->ipv6.ip6_prohibit_entry);
@@ -2993,11 +2985,33 @@ static void __net_exit ip6_route_net_exit(struct net *net)
        dst_entries_destroy(&net->ipv6.ip6_dst_ops);
 }
 
+static int __net_init ip6_route_net_init_late(struct net *net)
+{
+#ifdef CONFIG_PROC_FS
+       proc_net_fops_create(net, "ipv6_route", 0, &ipv6_route_proc_fops);
+       proc_net_fops_create(net, "rt6_stats", S_IRUGO, &rt6_stats_seq_fops);
+#endif
+       return 0;
+}
+
+static void __net_exit ip6_route_net_exit_late(struct net *net)
+{
+#ifdef CONFIG_PROC_FS
+       proc_net_remove(net, "ipv6_route");
+       proc_net_remove(net, "rt6_stats");
+#endif
+}
+
 static struct pernet_operations ip6_route_net_ops = {
        .init = ip6_route_net_init,
        .exit = ip6_route_net_exit,
 };
 
+static struct pernet_operations ip6_route_net_late_ops = {
+       .init = ip6_route_net_init_late,
+       .exit = ip6_route_net_exit_late,
+};
+
 static struct notifier_block ip6_route_dev_notifier = {
        .notifier_call = ip6_route_dev_notify,
        .priority = 0,
@@ -3047,19 +3061,25 @@ int __init ip6_route_init(void)
        if (ret)
                goto xfrm6_init;
 
+       ret = register_pernet_subsys(&ip6_route_net_late_ops);
+       if (ret)
+               goto fib6_rules_init;
+
        ret = -ENOBUFS;
        if (__rtnl_register(PF_INET6, RTM_NEWROUTE, inet6_rtm_newroute, NULL, NULL) ||
            __rtnl_register(PF_INET6, RTM_DELROUTE, inet6_rtm_delroute, NULL, NULL) ||
            __rtnl_register(PF_INET6, RTM_GETROUTE, inet6_rtm_getroute, NULL, NULL))
-               goto fib6_rules_init;
+               goto out_register_late_subsys;
 
        ret = register_netdevice_notifier(&ip6_route_dev_notifier);
        if (ret)
-               goto fib6_rules_init;
+               goto out_register_late_subsys;
 
 out:
        return ret;
 
+out_register_late_subsys:
+       unregister_pernet_subsys(&ip6_route_net_late_ops);
 fib6_rules_init:
        fib6_rules_cleanup();
 xfrm6_init:
@@ -3078,6 +3098,7 @@ out_kmem_cache:
 void ip6_route_cleanup(void)
 {
        unregister_netdevice_notifier(&ip6_route_dev_notifier);
+       unregister_pernet_subsys(&ip6_route_net_late_ops);
        fib6_rules_cleanup();
        xfrm6_fini();
        fib6_gc_cleanup();
index 3a9aec29581a14cb88f4948ec32737026bfd22ce..9df64a50b07569f3f9050a5a73a3e6c448c63502 100644 (file)
@@ -1212,7 +1212,8 @@ have_isn:
        tcp_rsk(req)->snt_isn = isn;
        tcp_rsk(req)->snt_synack = tcp_time_stamp;
 
-       security_inet_conn_request(sk, skb, req);
+       if (security_inet_conn_request(sk, skb, req))
+               goto drop_and_release;
 
        if (tcp_v6_send_synack(sk, req,
                               (struct request_values *)&tmp_ext,
index 07d7d55a1b93af13013b23c361d8c2f7ff8382aa..cd6f7a991d8035bdfc6d883a8bcf21a433405030 100644 (file)
@@ -372,7 +372,6 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
                        skb_trim(skb, skb->dev->mtu);
        }
        skb->protocol = ETH_P_AF_IUCV;
-       skb_shinfo(skb)->tx_flags |= SKBTX_DRV_NEEDS_SK_REF;
        nskb = skb_clone(skb, GFP_ATOMIC);
        if (!nskb)
                return -ENOMEM;
index 443591d629caadff0da53b2c861b8748e61edf7e..47b259fccd278dc168cb5109644d440530a5c5ed 100644 (file)
@@ -42,6 +42,11 @@ struct l2tp_eth {
        struct sock             *tunnel_sock;
        struct l2tp_session     *session;
        struct list_head        list;
+       atomic_long_t           tx_bytes;
+       atomic_long_t           tx_packets;
+       atomic_long_t           rx_bytes;
+       atomic_long_t           rx_packets;
+       atomic_long_t           rx_errors;
 };
 
 /* via l2tp_session_priv() */
@@ -88,24 +93,40 @@ static int l2tp_eth_dev_xmit(struct sk_buff *skb, struct net_device *dev)
        struct l2tp_eth *priv = netdev_priv(dev);
        struct l2tp_session *session = priv->session;
 
+       atomic_long_add(skb->len, &priv->tx_bytes);
+       atomic_long_inc(&priv->tx_packets);
+
        l2tp_xmit_skb(session, skb, session->hdr_len);
 
-       dev->stats.tx_bytes += skb->len;
-       dev->stats.tx_packets++;
+       return NETDEV_TX_OK;
+}
 
-       return 0;
+static struct rtnl_link_stats64 *l2tp_eth_get_stats64(struct net_device *dev,
+                                                     struct rtnl_link_stats64 *stats)
+{
+       struct l2tp_eth *priv = netdev_priv(dev);
+
+       stats->tx_bytes   = atomic_long_read(&priv->tx_bytes);
+       stats->tx_packets = atomic_long_read(&priv->tx_packets);
+       stats->rx_bytes   = atomic_long_read(&priv->rx_bytes);
+       stats->rx_packets = atomic_long_read(&priv->rx_packets);
+       stats->rx_errors  = atomic_long_read(&priv->rx_errors);
+       return stats;
 }
 
+
 static struct net_device_ops l2tp_eth_netdev_ops = {
        .ndo_init               = l2tp_eth_dev_init,
        .ndo_uninit             = l2tp_eth_dev_uninit,
        .ndo_start_xmit         = l2tp_eth_dev_xmit,
+       .ndo_get_stats64        = l2tp_eth_get_stats64,
 };
 
 static void l2tp_eth_dev_setup(struct net_device *dev)
 {
        ether_setup(dev);
-       dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+       dev->priv_flags         &= ~IFF_TX_SKB_SHARING;
+       dev->features           |= NETIF_F_LLTX;
        dev->netdev_ops         = &l2tp_eth_netdev_ops;
        dev->destructor         = free_netdev;
 }
@@ -114,17 +135,17 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb,
 {
        struct l2tp_eth_sess *spriv = l2tp_session_priv(session);
        struct net_device *dev = spriv->dev;
+       struct l2tp_eth *priv = netdev_priv(dev);
 
        if (session->debug & L2TP_MSG_DATA) {
                unsigned int length;
-               u8 *ptr = skb->data;
 
                length = min(32u, skb->len);
                if (!pskb_may_pull(skb, length))
                        goto error;
 
                pr_debug("%s: eth recv\n", session->name);
-               print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
+               print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, skb->data, length);
        }
 
        if (!pskb_may_pull(skb, sizeof(ETH_HLEN)))
@@ -139,15 +160,15 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb,
        nf_reset(skb);
 
        if (dev_forward_skb(dev, skb) == NET_RX_SUCCESS) {
-               dev->stats.rx_packets++;
-               dev->stats.rx_bytes += data_len;
-       } else
-               dev->stats.rx_errors++;
-
+               atomic_long_inc(&priv->rx_packets);
+               atomic_long_add(data_len, &priv->rx_bytes);
+       } else {
+               atomic_long_inc(&priv->rx_errors);
+       }
        return;
 
 error:
-       dev->stats.rx_errors++;
+       atomic_long_inc(&priv->rx_errors);
        kfree_skb(skb);
 }
 
@@ -162,6 +183,7 @@ static void l2tp_eth_delete(struct l2tp_session *session)
                if (dev) {
                        unregister_netdev(dev);
                        spriv->dev = NULL;
+                       module_put(THIS_MODULE);
                }
        }
 }
@@ -249,6 +271,7 @@ static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 p
        if (rc < 0)
                goto out_del_dev;
 
+       __module_get(THIS_MODULE);
        /* Must be done after register_netdev() */
        strlcpy(session->ifname, dev->name, IFNAMSIZ);
 
index 70614e7affabded003aef1bf5ed4c097d4b515fa..61d8b75d2686c0272a1618887c08c34de2b4abe5 100644 (file)
@@ -464,10 +464,12 @@ static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
                                           sk->sk_bound_dev_if);
                if (IS_ERR(rt))
                        goto no_route;
-               if (connected)
+               if (connected) {
                        sk_setup_caps(sk, &rt->dst);
-               else
-                       dst_release(&rt->dst); /* safe since we hold rcu_read_lock */
+               } else {
+                       skb_dst_set(skb, &rt->dst);
+                       goto xmit;
+               }
        }
 
        /* We dont need to clone dst here, it is guaranteed to not disappear.
@@ -475,6 +477,7 @@ static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
         */
        skb_dst_set_noref(skb, &rt->dst);
 
+xmit:
        /* Queue the packet to IP for output */
        rc = ip_queue_xmit(skb, &inet->cork.fl);
        rcu_read_unlock();
index 26ddb699d693dcbc2f2610fdd4a35b1784b04b8c..c649188314cce99c17fca198e75d597c24701197 100644 (file)
@@ -145,15 +145,20 @@ static void sta_rx_agg_session_timer_expired(unsigned long data)
        struct tid_ampdu_rx *tid_rx;
        unsigned long timeout;
 
+       rcu_read_lock();
        tid_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[*ptid]);
-       if (!tid_rx)
+       if (!tid_rx) {
+               rcu_read_unlock();
                return;
+       }
 
        timeout = tid_rx->last_rx + TU_TO_JIFFIES(tid_rx->timeout);
        if (time_is_after_jiffies(timeout)) {
                mod_timer(&tid_rx->session_timer, timeout);
+               rcu_read_unlock();
                return;
        }
+       rcu_read_unlock();
 
 #ifdef CONFIG_MAC80211_HT_DEBUG
        printk(KERN_DEBUG "rx session timer expired on tid %d\n", (u16)*ptid);
index 495831ee48f1b007abe235852968e9fd3bd188b5..7d5108a867ad9ec5341fc5122a2b65cd792846c3 100644 (file)
@@ -533,16 +533,16 @@ static void ieee80211_get_et_stats(struct wiphy *wiphy,
                sinfo.filled = 0;
                sta_set_sinfo(sta, &sinfo);
 
-               if (sinfo.filled | STATION_INFO_TX_BITRATE)
+               if (sinfo.filled & STATION_INFO_TX_BITRATE)
                        data[i] = 100000 *
                                cfg80211_calculate_bitrate(&sinfo.txrate);
                i++;
-               if (sinfo.filled | STATION_INFO_RX_BITRATE)
+               if (sinfo.filled & STATION_INFO_RX_BITRATE)
                        data[i] = 100000 *
                                cfg80211_calculate_bitrate(&sinfo.rxrate);
                i++;
 
-               if (sinfo.filled | STATION_INFO_SIGNAL_AVG)
+               if (sinfo.filled & STATION_INFO_SIGNAL_AVG)
                        data[i] = (u8)sinfo.signal_avg;
                i++;
        } else {
@@ -2093,6 +2093,9 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
        struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
        int i, ret;
 
+       if (!ieee80211_sdata_running(sdata))
+               return -ENETDOWN;
+
        if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
                ret = drv_set_bitrate_mask(local, sdata, mask);
                if (ret)
index d4c19a7773db24b12bacf0407330ebccd75ba772..8664111d05663d47678f2088f4169a6ab80fdb96 100644 (file)
@@ -637,6 +637,18 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
                ieee80211_configure_filter(local);
                break;
        default:
+               mutex_lock(&local->mtx);
+               if (local->hw_roc_dev == sdata->dev &&
+                   local->hw_roc_channel) {
+                       /* ignore return value since this is racy */
+                       drv_cancel_remain_on_channel(local);
+                       ieee80211_queue_work(&local->hw, &local->hw_roc_done);
+               }
+               mutex_unlock(&local->mtx);
+
+               flush_work(&local->hw_roc_start);
+               flush_work(&local->hw_roc_done);
+
                flush_work(&sdata->work);
                /*
                 * When we get here, the interface is marked down.
index 04c3063089874fa7c8eb5ded233be3e5d7b86502..0db5d34a06b69c8c72798a8a7d4e6c4f642f7560 100644 (file)
@@ -1220,6 +1220,22 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
        sdata->vif.bss_conf.qos = true;
 }
 
+static void __ieee80211_stop_poll(struct ieee80211_sub_if_data *sdata)
+{
+       lockdep_assert_held(&sdata->local->mtx);
+
+       sdata->u.mgd.flags &= ~(IEEE80211_STA_CONNECTION_POLL |
+                               IEEE80211_STA_BEACON_POLL);
+       ieee80211_run_deferred_scan(sdata->local);
+}
+
+static void ieee80211_stop_poll(struct ieee80211_sub_if_data *sdata)
+{
+       mutex_lock(&sdata->local->mtx);
+       __ieee80211_stop_poll(sdata);
+       mutex_unlock(&sdata->local->mtx);
+}
+
 static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata,
                                           u16 capab, bool erp_valid, u8 erp)
 {
@@ -1285,8 +1301,7 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
        sdata->u.mgd.flags |= IEEE80211_STA_RESET_SIGNAL_AVE;
 
        /* just to be sure */
-       sdata->u.mgd.flags &= ~(IEEE80211_STA_CONNECTION_POLL |
-                               IEEE80211_STA_BEACON_POLL);
+       ieee80211_stop_poll(sdata);
 
        ieee80211_led_assoc(local, 1);
 
@@ -1327,7 +1342,6 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
        struct ieee80211_local *local = sdata->local;
        struct sta_info *sta;
        u32 changed = 0;
-       u8 bssid[ETH_ALEN];
 
        ASSERT_MGD_MTX(ifmgd);
 
@@ -1337,10 +1351,9 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
        if (WARN_ON(!ifmgd->associated))
                return;
 
-       memcpy(bssid, ifmgd->associated->bssid, ETH_ALEN);
+       ieee80211_stop_poll(sdata);
 
        ifmgd->associated = NULL;
-       memset(ifmgd->bssid, 0, ETH_ALEN);
 
        /*
         * we need to commit the associated = NULL change because the
@@ -1360,7 +1373,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
        netif_carrier_off(sdata->dev);
 
        mutex_lock(&local->sta_mtx);
-       sta = sta_info_get(sdata, bssid);
+       sta = sta_info_get(sdata, ifmgd->bssid);
        if (sta) {
                set_sta_flag(sta, WLAN_STA_BLOCK_BA);
                ieee80211_sta_tear_down_BA_sessions(sta, tx);
@@ -1369,13 +1382,16 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
 
        /* deauthenticate/disassociate now */
        if (tx || frame_buf)
-               ieee80211_send_deauth_disassoc(sdata, bssid, stype, reason,
-                                              tx, frame_buf);
+               ieee80211_send_deauth_disassoc(sdata, ifmgd->bssid, stype,
+                                              reason, tx, frame_buf);
 
        /* flush out frame */
        if (tx)
                drv_flush(local, false);
 
+       /* clear bssid only after building the needed mgmt frames */
+       memset(ifmgd->bssid, 0, ETH_ALEN);
+
        /* remove AP and TDLS peers */
        sta_info_flush(local, sdata);
 
@@ -1456,8 +1472,7 @@ static void ieee80211_reset_ap_probe(struct ieee80211_sub_if_data *sdata)
                return;
        }
 
-       ifmgd->flags &= ~(IEEE80211_STA_CONNECTION_POLL |
-                         IEEE80211_STA_BEACON_POLL);
+       __ieee80211_stop_poll(sdata);
 
        mutex_lock(&local->iflist_mtx);
        ieee80211_recalc_ps(local, -1);
@@ -1477,7 +1492,6 @@ static void ieee80211_reset_ap_probe(struct ieee80211_sub_if_data *sdata)
                  round_jiffies_up(jiffies +
                                   IEEE80211_CONNECTION_IDLE_TIME));
 out:
-       ieee80211_run_deferred_scan(local);
        mutex_unlock(&local->mtx);
 }
 
@@ -2160,15 +2174,13 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
                       sdata->name, mgmt->sa, status_code);
                ieee80211_destroy_assoc_data(sdata, false);
        } else {
-               printk(KERN_DEBUG "%s: associated\n", sdata->name);
-
                if (!ieee80211_assoc_success(sdata, *bss, mgmt, len)) {
                        /* oops -- internal error -- send timeout for now */
-                       ieee80211_destroy_assoc_data(sdata, true);
-                       sta_info_destroy_addr(sdata, mgmt->bssid);
+                       ieee80211_destroy_assoc_data(sdata, false);
                        cfg80211_put_bss(*bss);
                        return RX_MGMT_CFG80211_ASSOC_TIMEOUT;
                }
+               printk(KERN_DEBUG "%s: associated\n", sdata->name);
 
                /*
                 * destroy assoc_data afterwards, as otherwise an idle
@@ -2408,7 +2420,11 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
                net_dbg_ratelimited("%s: cancelling probereq poll due to a received beacon\n",
                                    sdata->name);
 #endif
+               mutex_lock(&local->mtx);
                ifmgd->flags &= ~IEEE80211_STA_BEACON_POLL;
+               ieee80211_run_deferred_scan(local);
+               mutex_unlock(&local->mtx);
+
                mutex_lock(&local->iflist_mtx);
                ieee80211_recalc_ps(local, -1);
                mutex_unlock(&local->iflist_mtx);
@@ -2595,9 +2611,6 @@ static void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata,
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
        u8 frame_buf[DEAUTH_DISASSOC_LEN];
 
-       ifmgd->flags &= ~(IEEE80211_STA_CONNECTION_POLL |
-                         IEEE80211_STA_BEACON_POLL);
-
        ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, reason,
                               false, frame_buf);
        mutex_unlock(&ifmgd->mtx);
@@ -2874,8 +2887,7 @@ static void ieee80211_restart_sta_timer(struct ieee80211_sub_if_data *sdata)
        u32 flags;
 
        if (sdata->vif.type == NL80211_IFTYPE_STATION) {
-               sdata->u.mgd.flags &= ~(IEEE80211_STA_BEACON_POLL |
-                                       IEEE80211_STA_CONNECTION_POLL);
+               __ieee80211_stop_poll(sdata);
 
                /* let's probe the connection once */
                flags = sdata->local->hw.flags;
@@ -2944,7 +2956,10 @@ void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata)
        if (test_and_clear_bit(TMR_RUNNING_CHANSW, &ifmgd->timers_running))
                add_timer(&ifmgd->chswitch_timer);
        ieee80211_sta_reset_beacon_monitor(sdata);
+
+       mutex_lock(&sdata->local->mtx);
        ieee80211_restart_sta_timer(sdata);
+       mutex_unlock(&sdata->local->mtx);
 }
 #endif
 
@@ -3106,7 +3121,7 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
        }
 
        local->oper_channel = cbss->channel;
-       ieee80211_hw_config(local, 0);
+       ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
 
        if (!have_sta) {
                u32 rates = 0, basic_rates = 0;
index f054e94901a295443abcae7c3cfddda963f8d7c4..935aa4b6deee0220737ee4c1cebad69472dea343 100644 (file)
@@ -234,6 +234,22 @@ static void ieee80211_hw_roc_done(struct work_struct *work)
                return;
        }
 
+       /* was never transmitted */
+       if (local->hw_roc_skb) {
+               u64 cookie;
+
+               cookie = local->hw_roc_cookie ^ 2;
+
+               cfg80211_mgmt_tx_status(local->hw_roc_dev, cookie,
+                                       local->hw_roc_skb->data,
+                                       local->hw_roc_skb->len, false,
+                                       GFP_KERNEL);
+
+               kfree_skb(local->hw_roc_skb);
+               local->hw_roc_skb = NULL;
+               local->hw_roc_skb_for_status = NULL;
+       }
+
        if (!local->hw_roc_for_tx)
                cfg80211_remain_on_channel_expired(local->hw_roc_dev,
                                                   local->hw_roc_cookie,
index 2d1acc6c54455de4d397dc64cc471d8890e95bc7..f9e51ef8dfa2432ec44168feebc82579f3ac0be6 100644 (file)
@@ -809,7 +809,7 @@ minstrel_ht_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp)
                        max_rates = sband->n_bitrates;
        }
 
-       msp = kzalloc(sizeof(struct minstrel_ht_sta), gfp);
+       msp = kzalloc(sizeof(*msp), gfp);
        if (!msp)
                return NULL;
 
index 7bcecf73aafbf9dadb87a2c8a114e3eb92490131..965e6ec0adb6c12393ced183a44463a63e09a9ff 100644 (file)
@@ -2455,7 +2455,7 @@ ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx)
         * frames that we didn't handle, including returning unknown
         * ones. For all other modes we will return them to the sender,
         * setting the 0x80 bit in the action category, as required by
-        * 802.11-2007 7.3.1.11.
+        * 802.11-2012 9.24.4.
         * Newer versions of hostapd shall also use the management frame
         * registration mechanisms, but older ones still use cooked
         * monitor interfaces so push all frames there.
@@ -2465,6 +2465,9 @@ ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx)
             sdata->vif.type == NL80211_IFTYPE_AP_VLAN))
                return RX_DROP_MONITOR;
 
+       if (is_multicast_ether_addr(mgmt->da))
+               return RX_DROP_MONITOR;
+
        /* do not return rejected action frames */
        if (mgmt->u.action.category & 0x80)
                return RX_DROP_UNUSABLE;
index f5b1638fbf8092a5ac30eebaea5b46b0b840867a..de455f8bbb91c0ffbedd2c911623380946d07d0b 100644 (file)
@@ -378,7 +378,7 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
        /* make the station visible */
        sta_info_hash_add(local, sta);
 
-       list_add(&sta->list, &local->sta_list);
+       list_add_rcu(&sta->list, &local->sta_list);
 
        set_sta_flag(sta, WLAN_STA_INSERTED);
 
@@ -688,7 +688,7 @@ int __must_check __sta_info_destroy(struct sta_info *sta)
        if (ret)
                return ret;
 
-       list_del(&sta->list);
+       list_del_rcu(&sta->list);
 
        mutex_lock(&local->key_mtx);
        for (i = 0; i < NUM_DEFAULT_KEYS; i++)
index 3bb24a121c95f7e82e35719a6aebf582038c449c..a470e1123a5576ed5e14b779ed4a9213cda407b7 100644 (file)
@@ -271,6 +271,9 @@ struct sta_ampdu_mlme {
  * @plink_timer: peer link watch timer
  * @plink_timer_was_running: used by suspend/resume to restore timers
  * @t_offset: timing offset relative to this host
+ * @t_offset_setpoint: reference timing offset of this sta to be used when
+ *     calculating clockdrift
+ * @ch_type: peer's channel type
  * @debugfs: debug filesystem info
  * @dead: set to true when sta is unlinked
  * @uploaded: set to true when sta is uploaded to the driver
@@ -278,6 +281,8 @@ struct sta_ampdu_mlme {
  * @sta: station information we share with the driver
  * @sta_state: duplicates information about station state (for debug)
  * @beacon_loss_count: number of times beacon loss has triggered
+ * @supports_40mhz: tracks whether the station advertised 40 MHz support
+ *     as we overwrite its HT parameters with the currently used value
  */
 struct sta_info {
        /* General information, mostly static */
index 847215bb2a6fc63c1ed010dbfc9bd2d35cb6b7c0..e453212fa17f741bc380b2cbdabe59ee4f6df5d7 100644 (file)
@@ -1737,7 +1737,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
        __le16 fc;
        struct ieee80211_hdr hdr;
        struct ieee80211s_hdr mesh_hdr __maybe_unused;
-       struct mesh_path __maybe_unused *mppath = NULL;
+       struct mesh_path __maybe_unused *mppath = NULL, *mpath = NULL;
        const u8 *encaps_data;
        int encaps_len, skip_header_bytes;
        int nh_pos, h_pos;
@@ -1803,8 +1803,11 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
                        goto fail;
                }
                rcu_read_lock();
-               if (!is_multicast_ether_addr(skb->data))
-                       mppath = mpp_path_lookup(skb->data, sdata);
+               if (!is_multicast_ether_addr(skb->data)) {
+                       mpath = mesh_path_lookup(skb->data, sdata);
+                       if (!mpath)
+                               mppath = mpp_path_lookup(skb->data, sdata);
+               }
 
                /*
                 * Use address extension if it is a packet from
index a44c6807df01914a04c5675d1422d765260a8c29..8dd4712620ff53832a212a3d69791e13cd59905f 100644 (file)
@@ -1271,7 +1271,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
                        enum ieee80211_sta_state state;
 
                        for (state = IEEE80211_STA_NOTEXIST;
-                            state < sta->sta_state - 1; state++)
+                            state < sta->sta_state; state++)
                                WARN_ON(drv_sta_state(local, sta->sdata, sta,
                                                      state, state + 1));
                }
index 8781d8f904d94940880ba90e1fd247c33b4b1aa2..434b6873b352a9e384fd02b2e607bc1fab17895e 100644 (file)
@@ -83,9 +83,10 @@ netdev_tx_t mac802154_tx(struct mac802154_priv *priv, struct sk_buff *skb,
 {
        struct xmit_work *work;
 
-       if (!(priv->phy->channels_supported[page] & (1 << chan)))
+       if (!(priv->phy->channels_supported[page] & (1 << chan))) {
                WARN_ON(1);
                return NETDEV_TX_OK;
+       }
 
        if (!(priv->hw.flags & IEEE802154_HW_OMIT_CKSUM)) {
                u16 crc = crc_ccitt(0, skb->data, skb->len);
index 819c342f5b3012b6a4e37f60414fb835fc0041d8..9730882697aaedbab0beee66f0f12a654b97b63a 100644 (file)
@@ -639,6 +639,14 @@ find_free_id(const char *name, ip_set_id_t *index, struct ip_set **set)
        return 0;
 }
 
+static int
+ip_set_none(struct sock *ctnl, struct sk_buff *skb,
+           const struct nlmsghdr *nlh,
+           const struct nlattr * const attr[])
+{
+       return -EOPNOTSUPP;
+}
+
 static int
 ip_set_create(struct sock *ctnl, struct sk_buff *skb,
              const struct nlmsghdr *nlh,
@@ -1539,6 +1547,10 @@ nlmsg_failure:
 }
 
 static const struct nfnl_callback ip_set_netlink_subsys_cb[IPSET_MSG_MAX] = {
+       [IPSET_CMD_NONE]        = {
+               .call           = ip_set_none,
+               .attr_count     = IPSET_ATTR_CMD_MAX,
+       },
        [IPSET_CMD_CREATE]      = {
                .call           = ip_set_create,
                .attr_count     = IPSET_ATTR_CMD_MAX,
index ee863943c8267286e4b16e52400dd40bf51b4f26..d5d3607ae7bcf5e9704bd189217115cf048aee4b 100644 (file)
@@ -38,30 +38,6 @@ struct iface_node {
 
 #define iface_data(n)  (rb_entry(n, struct iface_node, node)->iface)
 
-static inline long
-ifname_compare(const char *_a, const char *_b)
-{
-       const long *a = (const long *)_a;
-       const long *b = (const long *)_b;
-
-       BUILD_BUG_ON(IFNAMSIZ > 4 * sizeof(unsigned long));
-       if (a[0] != b[0])
-               return a[0] - b[0];
-       if (IFNAMSIZ > sizeof(long)) {
-               if (a[1] != b[1])
-                       return a[1] - b[1];
-       }
-       if (IFNAMSIZ > 2 * sizeof(long)) {
-               if (a[2] != b[2])
-                       return a[2] - b[2];
-       }
-       if (IFNAMSIZ > 3 * sizeof(long)) {
-               if (a[3] != b[3])
-                       return a[3] - b[3];
-       }
-       return 0;
-}
-
 static void
 rbtree_destroy(struct rb_root *root)
 {
@@ -99,7 +75,7 @@ iface_test(struct rb_root *root, const char **iface)
 
        while (n) {
                const char *d = iface_data(n);
-               long res = ifname_compare(*iface, d);
+               int res = strcmp(*iface, d);
 
                if (res < 0)
                        n = n->rb_left;
@@ -121,7 +97,7 @@ iface_add(struct rb_root *root, const char **iface)
 
        while (*n) {
                char *ifname = iface_data(*n);
-               long res = ifname_compare(*iface, ifname);
+               int res = strcmp(*iface, ifname);
 
                p = *n;
                if (res < 0)
@@ -366,7 +342,7 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
        struct hash_netiface4_elem data = { .cidr = HOST_MASK };
        u32 ip = 0, ip_to, last;
        u32 timeout = h->timeout;
-       char iface[IFNAMSIZ] = {};
+       char iface[IFNAMSIZ];
        int ret;
 
        if (unlikely(!tb[IPSET_ATTR_IP] ||
@@ -663,7 +639,7 @@ hash_netiface6_uadt(struct ip_set *set, struct nlattr *tb[],
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_netiface6_elem data = { .cidr = HOST_MASK };
        u32 timeout = h->timeout;
-       char iface[IFNAMSIZ] = {};
+       char iface[IFNAMSIZ];
        int ret;
 
        if (unlikely(!tb[IPSET_ATTR_IP] ||
index dd811b8dd97c66a9da387a4e37073c47e8dc4407..84444dda194b61806efaeadde4c2458f502d8283 100644 (file)
@@ -76,19 +76,19 @@ static void __ip_vs_del_service(struct ip_vs_service *svc);
 
 #ifdef CONFIG_IP_VS_IPV6
 /* Taken from rt6_fill_node() in net/ipv6/route.c, is there a better way? */
-static int __ip_vs_addr_is_local_v6(struct net *net,
-                                   const struct in6_addr *addr)
+static bool __ip_vs_addr_is_local_v6(struct net *net,
+                                    const struct in6_addr *addr)
 {
-       struct rt6_info *rt;
        struct flowi6 fl6 = {
                .daddr = *addr,
        };
+       struct dst_entry *dst = ip6_route_output(net, NULL, &fl6);
+       bool is_local;
 
-       rt = (struct rt6_info *)ip6_route_output(net, NULL, &fl6);
-       if (rt && rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK))
-               return 1;
+       is_local = !dst->error && dst->dev && (dst->dev->flags & IFF_LOOPBACK);
 
-       return 0;
+       dst_release(dst);
+       return is_local;
 }
 #endif
 
@@ -1521,11 +1521,12 @@ static int ip_vs_dst_event(struct notifier_block *this, unsigned long event,
 {
        struct net_device *dev = ptr;
        struct net *net = dev_net(dev);
+       struct netns_ipvs *ipvs = net_ipvs(net);
        struct ip_vs_service *svc;
        struct ip_vs_dest *dest;
        unsigned int idx;
 
-       if (event != NETDEV_UNREGISTER)
+       if (event != NETDEV_UNREGISTER || !ipvs)
                return NOTIFY_DONE;
        IP_VS_DBG(3, "%s() dev=%s\n", __func__, dev->name);
        EnterFunction(2);
@@ -1551,7 +1552,7 @@ static int ip_vs_dst_event(struct notifier_block *this, unsigned long event,
                }
        }
 
-       list_for_each_entry(dest, &net_ipvs(net)->dest_trash, n_list) {
+       list_for_each_entry(dest, &ipvs->dest_trash, n_list) {
                __ip_vs_dev_reset(dest, dev);
        }
        mutex_unlock(&__ip_vs_mutex);
index 46d69d7f1bb4e15a3116c389131e23fd9a8d28d6..31f50bc3a3124a111ce0c2f75f4f9e22f095243f 100644 (file)
@@ -270,9 +270,8 @@ static int expect_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct,
                return 0;
 
        /* RTP port is even */
-       port &= htons(~1);
-       rtp_port = port;
-       rtcp_port = htons(ntohs(port) + 1);
+       rtp_port = port & ~htons(1);
+       rtcp_port = port | htons(1);
 
        /* Create expect for RTP */
        if ((rtp_exp = nf_ct_expect_alloc(ct)) == NULL)
index 3e797d1fcb94272ad96b9798e91a6e6467031379..791d56bbd74a7613e4b1d87ba74bcf2a8a135e46 100644 (file)
@@ -169,8 +169,10 @@ replay:
 
                err = nla_parse(cda, ss->cb[cb_id].attr_count,
                                attr, attrlen, ss->cb[cb_id].policy);
-               if (err < 0)
+               if (err < 0) {
+                       rcu_read_unlock();
                        return err;
+               }
 
                if (nc->call_rcu) {
                        err = nc->call_rcu(net->nfnl, skb, nlh,
index 0a96a43108edde1dcdc251a168f0547c9f277378..1686ca1b53a157d8568ae8ac104aeb0f9415d696 100644 (file)
@@ -32,13 +32,13 @@ MODULE_ALIAS("ipt_HMARK");
 MODULE_ALIAS("ip6t_HMARK");
 
 struct hmark_tuple {
-       u32                     src;
-       u32                     dst;
+       __be32                  src;
+       __be32                  dst;
        union hmark_ports       uports;
-       uint8_t                 proto;
+       u8                      proto;
 };
 
-static inline u32 hmark_addr6_mask(const __u32 *addr32, const __u32 *mask)
+static inline __be32 hmark_addr6_mask(const __be32 *addr32, const __be32 *mask)
 {
        return (addr32[0] & mask[0]) ^
               (addr32[1] & mask[1]) ^
@@ -46,8 +46,8 @@ static inline u32 hmark_addr6_mask(const __u32 *addr32, const __u32 *mask)
               (addr32[3] & mask[3]);
 }
 
-static inline u32
-hmark_addr_mask(int l3num, const __u32 *addr32, const __u32 *mask)
+static inline __be32
+hmark_addr_mask(int l3num, const __be32 *addr32, const __be32 *mask)
 {
        switch (l3num) {
        case AF_INET:
@@ -58,6 +58,22 @@ hmark_addr_mask(int l3num, const __u32 *addr32, const __u32 *mask)
        return 0;
 }
 
+static inline void hmark_swap_ports(union hmark_ports *uports,
+                                   const struct xt_hmark_info *info)
+{
+       union hmark_ports hp;
+       u16 src, dst;
+
+       hp.b32 = (uports->b32 & info->port_mask.b32) | info->port_set.b32;
+       src = ntohs(hp.b16.src);
+       dst = ntohs(hp.b16.dst);
+
+       if (dst > src)
+               uports->v32 = (dst << 16) | src;
+       else
+               uports->v32 = (src << 16) | dst;
+}
+
 static int
 hmark_ct_set_htuple(const struct sk_buff *skb, struct hmark_tuple *t,
                    const struct xt_hmark_info *info)
@@ -74,22 +90,19 @@ hmark_ct_set_htuple(const struct sk_buff *skb, struct hmark_tuple *t,
        otuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
        rtuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple;
 
-       t->src = hmark_addr_mask(otuple->src.l3num, otuple->src.u3.all,
-                                info->src_mask.all);
-       t->dst = hmark_addr_mask(otuple->src.l3num, rtuple->src.u3.all,
-                                info->dst_mask.all);
+       t->src = hmark_addr_mask(otuple->src.l3num, otuple->src.u3.ip6,
+                                info->src_mask.ip6);
+       t->dst = hmark_addr_mask(otuple->src.l3num, rtuple->src.u3.ip6,
+                                info->dst_mask.ip6);
 
        if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3))
                return 0;
 
        t->proto = nf_ct_protonum(ct);
        if (t->proto != IPPROTO_ICMP) {
-               t->uports.p16.src = otuple->src.u.all;
-               t->uports.p16.dst = rtuple->src.u.all;
-               t->uports.v32 = (t->uports.v32 & info->port_mask.v32) |
-                               info->port_set.v32;
-               if (t->uports.p16.dst < t->uports.p16.src)
-                       swap(t->uports.p16.dst, t->uports.p16.src);
+               t->uports.b16.src = otuple->src.u.all;
+               t->uports.b16.dst = rtuple->src.u.all;
+               hmark_swap_ports(&t->uports, info);
        }
 
        return 0;
@@ -98,15 +111,19 @@ hmark_ct_set_htuple(const struct sk_buff *skb, struct hmark_tuple *t,
 #endif
 }
 
+/* This hash function is endian independent, to ensure consistent hashing if
+ * the cluster is composed of big and little endian systems. */
 static inline u32
 hmark_hash(struct hmark_tuple *t, const struct xt_hmark_info *info)
 {
        u32 hash;
+       u32 src = ntohl(t->src);
+       u32 dst = ntohl(t->dst);
 
-       if (t->dst < t->src)
-               swap(t->src, t->dst);
+       if (dst < src)
+               swap(src, dst);
 
-       hash = jhash_3words(t->src, t->dst, t->uports.v32, info->hashrnd);
+       hash = jhash_3words(src, dst, t->uports.v32, info->hashrnd);
        hash = hash ^ (t->proto & info->proto_mask);
 
        return (((u64)hash * info->hmodulus) >> 32) + info->hoffset;
@@ -126,11 +143,7 @@ hmark_set_tuple_ports(const struct sk_buff *skb, unsigned int nhoff,
        if (skb_copy_bits(skb, nhoff, &t->uports, sizeof(t->uports)) < 0)
                return;
 
-       t->uports.v32 = (t->uports.v32 & info->port_mask.v32) |
-                       info->port_set.v32;
-
-       if (t->uports.p16.dst < t->uports.p16.src)
-               swap(t->uports.p16.dst, t->uports.p16.src);
+       hmark_swap_ports(&t->uports, info);
 }
 
 #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
@@ -178,8 +191,8 @@ hmark_pkt_set_htuple_ipv6(const struct sk_buff *skb, struct hmark_tuple *t,
                        return -1;
        }
 noicmp:
-       t->src = hmark_addr6_mask(ip6->saddr.s6_addr32, info->src_mask.all);
-       t->dst = hmark_addr6_mask(ip6->daddr.s6_addr32, info->dst_mask.all);
+       t->src = hmark_addr6_mask(ip6->saddr.s6_addr32, info->src_mask.ip6);
+       t->dst = hmark_addr6_mask(ip6->daddr.s6_addr32, info->dst_mask.ip6);
 
        if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3))
                return 0;
@@ -255,11 +268,8 @@ hmark_pkt_set_htuple_ipv4(const struct sk_buff *skb, struct hmark_tuple *t,
                }
        }
 
-       t->src = (__force u32) ip->saddr;
-       t->dst = (__force u32) ip->daddr;
-
-       t->src &= info->src_mask.ip;
-       t->dst &= info->dst_mask.ip;
+       t->src = ip->saddr & info->src_mask.ip;
+       t->dst = ip->daddr & info->dst_mask.ip;
 
        if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3))
                return 0;
index 035960ec5cb9c8f51b51e48d6909ebb112047f93..c6f7db720d84f4650e975a952054c04d15868fc3 100644 (file)
@@ -16,6 +16,7 @@
 
 #include <linux/netfilter/x_tables.h>
 #include <linux/netfilter/xt_set.h>
+#include <linux/netfilter/ipset/ip_set_timeout.h>
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
@@ -310,7 +311,8 @@ set_target_v2(struct sk_buff *skb, const struct xt_action_param *par)
                info->del_set.flags, 0, UINT_MAX);
 
        /* Normalize to fit into jiffies */
-       if (add_opt.timeout > UINT_MAX/MSEC_PER_SEC)
+       if (add_opt.timeout != IPSET_NO_TIMEOUT &&
+           add_opt.timeout > UINT_MAX/MSEC_PER_SEC)
                add_opt.timeout = UINT_MAX/MSEC_PER_SEC;
        if (info->add_set.index != IPSET_INVALID_ID)
                ip_set_add(info->add_set.index, skb, par, &add_opt);
index 3f339b19d140d666328b5dfd462f5d27bafb94d5..e06d458fc7197ff73e1f1f6f155c154c330cfa67 100644 (file)
@@ -292,6 +292,9 @@ static int llcp_sock_getname(struct socket *sock, struct sockaddr *addr,
 
        pr_debug("%p\n", sk);
 
+       if (llcp_sock == NULL || llcp_sock->dev == NULL)
+               return -EBADFD;
+
        addr->sa_family = AF_NFC;
        *len = sizeof(struct sockaddr_nfc_llcp);
 
index cb2646179e5f8b8d1836499579d1ccfd109fad60..2ab196a9f228ac873238c2a060685bacea62b87a 100644 (file)
@@ -106,7 +106,7 @@ static __u8 *nci_extract_rf_params_nfca_passive_poll(struct nci_dev *ndev,
        nfca_poll->sens_res = __le16_to_cpu(*((__u16 *)data));
        data += 2;
 
-       nfca_poll->nfcid1_len = *data++;
+       nfca_poll->nfcid1_len = min_t(__u8, *data++, NFC_NFCID1_MAXSIZE);
 
        pr_debug("sens_res 0x%x, nfcid1_len %d\n",
                 nfca_poll->sens_res, nfca_poll->nfcid1_len);
@@ -130,7 +130,7 @@ static __u8 *nci_extract_rf_params_nfcb_passive_poll(struct nci_dev *ndev,
                        struct rf_tech_specific_params_nfcb_poll *nfcb_poll,
                                                     __u8 *data)
 {
-       nfcb_poll->sensb_res_len = *data++;
+       nfcb_poll->sensb_res_len = min_t(__u8, *data++, NFC_SENSB_RES_MAXSIZE);
 
        pr_debug("sensb_res_len %d\n", nfcb_poll->sensb_res_len);
 
@@ -145,7 +145,7 @@ static __u8 *nci_extract_rf_params_nfcf_passive_poll(struct nci_dev *ndev,
                                                     __u8 *data)
 {
        nfcf_poll->bit_rate = *data++;
-       nfcf_poll->sensf_res_len = *data++;
+       nfcf_poll->sensf_res_len = min_t(__u8, *data++, NFC_SENSF_RES_MAXSIZE);
 
        pr_debug("bit_rate %d, sensf_res_len %d\n",
                 nfcf_poll->bit_rate, nfcf_poll->sensf_res_len);
@@ -331,7 +331,7 @@ static int nci_extract_activation_params_iso_dep(struct nci_dev *ndev,
        switch (ntf->activation_rf_tech_and_mode) {
        case NCI_NFC_A_PASSIVE_POLL_MODE:
                nfca_poll = &ntf->activation_params.nfca_poll_iso_dep;
-               nfca_poll->rats_res_len = *data++;
+               nfca_poll->rats_res_len = min_t(__u8, *data++, 20);
                pr_debug("rats_res_len %d\n", nfca_poll->rats_res_len);
                if (nfca_poll->rats_res_len > 0) {
                        memcpy(nfca_poll->rats_res,
@@ -341,7 +341,7 @@ static int nci_extract_activation_params_iso_dep(struct nci_dev *ndev,
 
        case NCI_NFC_B_PASSIVE_POLL_MODE:
                nfcb_poll = &ntf->activation_params.nfcb_poll_iso_dep;
-               nfcb_poll->attrib_res_len = *data++;
+               nfcb_poll->attrib_res_len = min_t(__u8, *data++, 50);
                pr_debug("attrib_res_len %d\n", nfcb_poll->attrib_res_len);
                if (nfcb_poll->attrib_res_len > 0) {
                        memcpy(nfcb_poll->attrib_res,
index ec1134c9e07fcd34fc5d6116e1a4aef9dedf6f23..8b8a6a2b2badaf61e9c71a174809ca989438668f 100644 (file)
@@ -54,7 +54,10 @@ static int rawsock_release(struct socket *sock)
 {
        struct sock *sk = sock->sk;
 
-       pr_debug("sock=%p\n", sock);
+       pr_debug("sock=%p sk=%p\n", sock, sk);
+
+       if (!sk)
+               return 0;
 
        sock_orphan(sk);
        sock_put(sk);
index 779ce4ff92ec9e5a7de8ce5123b6c6dbc7d798e3..5a940dbd74a3bbf6ec3cadb82cbe47a4dbee8f15 100644 (file)
@@ -5,8 +5,8 @@
  *
  * Copyright (C) 2008 Nokia Corporation.
  *
- * Contact: Remi Denis-Courmont <remi.denis-courmont@nokia.com>
- * Original author: Sakari Ailus <sakari.ailus@nokia.com>
+ * Authors: Sakari Ailus <sakari.ailus@nokia.com>
+ *          Rémi Denis-Courmont
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
index bf35b4e1a14c02dfe8f3c8f03dbcb19084bcaaf7..12c30f3e643e00e3fa495cfc7d471e0078b00114 100644 (file)
@@ -5,8 +5,8 @@
  *
  * Copyright (C) 2008 Nokia Corporation.
  *
- * Contact: Remi Denis-Courmont <remi.denis-courmont@nokia.com>
- * Original author: Sakari Ailus <sakari.ailus@nokia.com>
+ * Authors: Sakari Ailus <sakari.ailus@nokia.com>
+ *          Rémi Denis-Courmont
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
index d01208968c830c5fa7a7484ea15ae059fc7015d6..a2fba7edfd1f433546a65b5ae386d97b80e16caa 100644 (file)
@@ -5,7 +5,7 @@
  *
  * Copyright (C) 2008 Nokia Corporation.
  *
- * Author: Rémi Denis-Courmont <remi.denis-courmont@nokia.com>
+ * Author: Rémi Denis-Courmont
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
index 9dd4f926f7d15ec4f03786d2ebcaa8070d9954b3..576f22c9c76e82630ba7e4495df1db583cf63b01 100644 (file)
@@ -5,7 +5,7 @@
  *
  * Copyright (C) 2008 Nokia Corporation.
  *
- * Author: Rémi Denis-Courmont <remi.denis-courmont@nokia.com>
+ * Author: Rémi Denis-Courmont
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
index 36f75a9e2c3d1fd248d1102a268351aa05a2c19b..5bf6341e2dd47b1f1a0f6dd3a7a3f9c9c28562c3 100644 (file)
@@ -5,8 +5,8 @@
  *
  * Copyright (C) 2008 Nokia Corporation.
  *
- * Contact: Remi Denis-Courmont <remi.denis-courmont@nokia.com>
- * Original author: Sakari Ailus <sakari.ailus@nokia.com>
+ * Authors: Sakari Ailus <sakari.ailus@nokia.com>
+ *          Rémi Denis-Courmont
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
index cfdf135fcd69332e9cb497315f9b6beb39097126..7dd762a464e55f9ef7f41c83b433aa81b6f4d7cf 100644 (file)
@@ -5,8 +5,8 @@
  *
  * Copyright (C) 2008 Nokia Corporation.
  *
- * Contact: Remi Denis-Courmont <remi.denis-courmont@nokia.com>
- * Original author: Sakari Ailus <sakari.ailus@nokia.com>
+ * Authors: Sakari Ailus <sakari.ailus@nokia.com>
+ *          Remi Denis-Courmont
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
index 89cfa9ce49395cfe2e64c738e249ba7891453eaa..0acc943f713a94c5d695cd150fa445e906eba751 100644 (file)
@@ -5,8 +5,8 @@
  *
  * Copyright (C) 2008 Nokia Corporation.
  *
- * Contact: Remi Denis-Courmont <remi.denis-courmont@nokia.com>
- * Original author: Sakari Ailus <sakari.ailus@nokia.com>
+ * Authors: Sakari Ailus <sakari.ailus@nokia.com>
+ *          Rémi Denis-Courmont
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
index 696348fd31a11300f0346007b124cb55d8f10481..d6bbbbd0af182352b41111ce9a2eefb22ef9be85 100644 (file)
@@ -5,7 +5,7 @@
  *
  * Copyright (C) 2008 Nokia Corporation.
  *
- * Contact: Remi Denis-Courmont <remi.denis-courmont@nokia.com>
+ * Author: Rémi Denis-Courmont
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
index 2754f098d43633f80c0a39b7c34a35296d5523ea..bebaa43484bcdbf72bbaaa764559051aeb8efb99 100644 (file)
@@ -229,7 +229,7 @@ found_UDP_peer:
        return peer;
 
 new_UDP_peer:
-       _net("Rx UDP DGRAM from NEW peer %d", peer->debug_id);
+       _net("Rx UDP DGRAM from NEW peer");
        read_unlock_bh(&rxrpc_peer_lock);
        _leave(" = -EBUSY [new]");
        return ERR_PTR(-EBUSY);
index a2a95aabf9c22bd3b0b0bbbe558feb1c1f65430a..c412ad0d0308ed8aedee1c5ce69bce323a32b823 100644 (file)
@@ -331,29 +331,22 @@ static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sche
        return PSCHED_NS2TICKS(ticks);
 }
 
-static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
+static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
 {
        struct sk_buff_head *list = &sch->q;
        psched_time_t tnext = netem_skb_cb(nskb)->time_to_send;
-       struct sk_buff *skb;
-
-       if (likely(skb_queue_len(list) < sch->limit)) {
-               skb = skb_peek_tail(list);
-               /* Optimize for add at tail */
-               if (likely(!skb || tnext >= netem_skb_cb(skb)->time_to_send))
-                       return qdisc_enqueue_tail(nskb, sch);
+       struct sk_buff *skb = skb_peek_tail(list);
 
-               skb_queue_reverse_walk(list, skb) {
-                       if (tnext >= netem_skb_cb(skb)->time_to_send)
-                               break;
-               }
+       /* Optimize for add at tail */
+       if (likely(!skb || tnext >= netem_skb_cb(skb)->time_to_send))
+               return __skb_queue_tail(list, nskb);
 
-               __skb_queue_after(list, skb, nskb);
-               sch->qstats.backlog += qdisc_pkt_len(nskb);
-               return NET_XMIT_SUCCESS;
+       skb_queue_reverse_walk(list, skb) {
+               if (tnext >= netem_skb_cb(skb)->time_to_send)
+                       break;
        }
 
-       return qdisc_reshape_fail(nskb, sch);
+       __skb_queue_after(list, skb, nskb);
 }
 
 /*
@@ -368,7 +361,6 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
        /* We don't fill cb now as skb_unshare() may invalidate it */
        struct netem_skb_cb *cb;
        struct sk_buff *skb2;
-       int ret;
        int count = 1;
 
        /* Random duplication */
@@ -419,6 +411,11 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
                skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
        }
 
+       if (unlikely(skb_queue_len(&sch->q) >= sch->limit))
+               return qdisc_reshape_fail(skb, sch);
+
+       sch->qstats.backlog += qdisc_pkt_len(skb);
+
        cb = netem_skb_cb(skb);
        if (q->gap == 0 ||              /* not doing reordering */
            q->counter < q->gap - 1 ||  /* inside last reordering gap */
@@ -450,7 +447,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 
                cb->time_to_send = now + delay;
                ++q->counter;
-               ret = tfifo_enqueue(skb, sch);
+               tfifo_enqueue(skb, sch);
        } else {
                /*
                 * Do re-ordering by putting one out of N packets at the front
@@ -460,16 +457,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
                q->counter = 0;
 
                __skb_queue_head(&sch->q, skb);
-               sch->qstats.backlog += qdisc_pkt_len(skb);
                sch->qstats.requeues++;
-               ret = NET_XMIT_SUCCESS;
-       }
-
-       if (ret != NET_XMIT_SUCCESS) {
-               if (net_xmit_drop_count(ret)) {
-                       sch->qstats.drops++;
-                       return ret;
-               }
        }
 
        return NET_XMIT_SUCCESS;
index 74305c883bd3ee842c535bbc27f003d389207346..30ea4674cabd2a735c47af22239ab006b1c2e826 100644 (file)
@@ -570,6 +570,8 @@ static int sfb_dump(struct Qdisc *sch, struct sk_buff *skb)
 
        sch->qstats.backlog = q->qdisc->qstats.backlog;
        opts = nla_nest_start(skb, TCA_OPTIONS);
+       if (opts == NULL)
+               goto nla_put_failure;
        if (nla_put(skb, TCA_SFB_PARMS, sizeof(opt), &opt))
                goto nla_put_failure;
        return nla_nest_end(skb, opts);
index 5bc9ab161b373eadf51843ebaa77c527716a2122..b16517ee1aaf7cfad94978ed1181df6432da6f07 100644 (file)
@@ -271,6 +271,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
         */
        asoc->peer.sack_needed = 1;
        asoc->peer.sack_cnt = 0;
+       asoc->peer.sack_generation = 1;
 
        /* Assume that the peer will tell us if he recognizes ASCONF
         * as part of INIT exchange.
index 80564fe03024634dc479280a8fe8d60ea643e1cc..8b9b6790a3dfc60b562a3a6ea30dccf248612cc4 100644 (file)
@@ -736,15 +736,12 @@ static void __sctp_unhash_endpoint(struct sctp_endpoint *ep)
 
        epb = &ep->base;
 
-       if (hlist_unhashed(&epb->node))
-               return;
-
        epb->hashent = sctp_ep_hashfn(epb->bind_addr.port);
 
        head = &sctp_ep_hashtable[epb->hashent];
 
        sctp_write_lock(&head->lock);
-       __hlist_del(&epb->node);
+       hlist_del_init(&epb->node);
        sctp_write_unlock(&head->lock);
 }
 
@@ -825,7 +822,7 @@ static void __sctp_unhash_established(struct sctp_association *asoc)
        head = &sctp_assoc_hashtable[epb->hashent];
 
        sctp_write_lock(&head->lock);
-       __hlist_del(&epb->node);
+       hlist_del_init(&epb->node);
        sctp_write_unlock(&head->lock);
 }
 
index f1b7d4bb591e9b648865c4e096ef838e77678442..6ae47acaaec65ceb898e10e462454e667a8e739e 100644 (file)
@@ -248,6 +248,11 @@ static sctp_xmit_t sctp_packet_bundle_sack(struct sctp_packet *pkt,
                /* If the SACK timer is running, we have a pending SACK */
                if (timer_pending(timer)) {
                        struct sctp_chunk *sack;
+
+                       if (pkt->transport->sack_generation !=
+                           pkt->transport->asoc->peer.sack_generation)
+                               return retval;
+
                        asoc->a_rwnd = asoc->rwnd;
                        sack = sctp_make_sack(asoc);
                        if (sack) {
index 5942d27b1444c71dc4c7af0c591ec3b1b181a92d..9c90811d11345b6fb4a55b5878a9ec168bd3f17c 100644 (file)
@@ -673,7 +673,9 @@ void sctp_addr_wq_timeout_handler(unsigned long arg)
                                SCTP_DEBUG_PRINTK("sctp_addrwq_timo_handler: sctp_asconf_mgmt failed\n");
                        sctp_bh_unlock_sock(sk);
                }
+#if IS_ENABLED(CONFIG_IPV6)
 free_next:
+#endif
                list_del(&addrw->list);
                kfree(addrw);
        }
index a85eeeb55dd0022e009895c53a6d8593929b7691..b6de71efb140c538a66e0a7901df2b4402a85bf4 100644 (file)
@@ -734,8 +734,10 @@ struct sctp_chunk *sctp_make_sack(const struct sctp_association *asoc)
        int len;
        __u32 ctsn;
        __u16 num_gabs, num_dup_tsns;
+       struct sctp_association *aptr = (struct sctp_association *)asoc;
        struct sctp_tsnmap *map = (struct sctp_tsnmap *)&asoc->peer.tsn_map;
        struct sctp_gap_ack_block gabs[SCTP_MAX_GABS];
+       struct sctp_transport *trans;
 
        memset(gabs, 0, sizeof(gabs));
        ctsn = sctp_tsnmap_get_ctsn(map);
@@ -805,6 +807,20 @@ struct sctp_chunk *sctp_make_sack(const struct sctp_association *asoc)
                sctp_addto_chunk(retval, sizeof(__u32) * num_dup_tsns,
                                 sctp_tsnmap_get_dups(map));
 
+       /* Once we have a sack generated, check to see what our sack
+        * generation is, if its 0, reset the transports to 0, and reset
+        * the association generation to 1
+        *
+        * The idea is that zero is never used as a valid generation for the
+        * association so no transport will match after a wrap event like this,
+        * Until the next sack
+        */
+       if (++aptr->peer.sack_generation == 0) {
+               list_for_each_entry(trans, &asoc->peer.transport_addr_list,
+                                   transports)
+                       trans->sack_generation = 0;
+               aptr->peer.sack_generation = 1;
+       }
 nodata:
        return retval;
 }
index c96d1a81cf4209f6d3ca9b6762fefa47e75867c3..8716da1a859221dc96d206ed517190e9e9cfa2ca 100644 (file)
@@ -1268,7 +1268,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
                case SCTP_CMD_REPORT_TSN:
                        /* Record the arrival of a TSN.  */
                        error = sctp_tsnmap_mark(&asoc->peer.tsn_map,
-                                                cmd->obj.u32);
+                                                cmd->obj.u32, NULL);
                        break;
 
                case SCTP_CMD_REPORT_FWDTSN:
index b3b8a8d813eb663f18a6fd488d3f0472f2fe1263..31c7bfcd9b5872aa136e2b513123bb6d582bdff6 100644 (file)
@@ -1231,8 +1231,14 @@ out_free:
        SCTP_DEBUG_PRINTK("About to exit __sctp_connect() free asoc: %p"
                          " kaddrs: %p err: %d\n",
                          asoc, kaddrs, err);
-       if (asoc)
+       if (asoc) {
+               /* sctp_primitive_ASSOCIATE may have added this association
+                * To the hash table, try to unhash it, just in case, its a noop
+                * if it wasn't hashed so we're safe
+                */
+               sctp_unhash_established(asoc);
                sctp_association_free(asoc);
+       }
        return err;
 }
 
@@ -1942,8 +1948,10 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
        goto out_unlock;
 
 out_free:
-       if (new_asoc)
+       if (new_asoc) {
+               sctp_unhash_established(asoc);
                sctp_association_free(asoc);
+       }
 out_unlock:
        sctp_release_sock(sk);
 
index b026ba0c69922e09eb5c150298f650ea7bb3d1c4..1dcceb6e0ce6c2b9e5f6c0c3abf8075c3e75224c 100644 (file)
@@ -68,6 +68,8 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
        peer->af_specific = sctp_get_af_specific(addr->sa.sa_family);
        memset(&peer->saddr, 0, sizeof(union sctp_addr));
 
+       peer->sack_generation = 0;
+
        /* From 6.3.1 RTO Calculation:
         *
         * C1) Until an RTT measurement has been made for a packet sent to the
index f1e40cebc981ae7962bb9cd20ae84823b70d43cf..b5fb7c409023ff8adea81d41e68ace60781febae 100644 (file)
@@ -114,7 +114,8 @@ int sctp_tsnmap_check(const struct sctp_tsnmap *map, __u32 tsn)
 
 
 /* Mark this TSN as seen.  */
-int sctp_tsnmap_mark(struct sctp_tsnmap *map, __u32 tsn)
+int sctp_tsnmap_mark(struct sctp_tsnmap *map, __u32 tsn,
+                    struct sctp_transport *trans)
 {
        u16 gap;
 
@@ -133,6 +134,9 @@ int sctp_tsnmap_mark(struct sctp_tsnmap *map, __u32 tsn)
                 */
                map->max_tsn_seen++;
                map->cumulative_tsn_ack_point++;
+               if (trans)
+                       trans->sack_generation =
+                               trans->asoc->peer.sack_generation;
                map->base_tsn++;
        } else {
                /* Either we already have a gap, or about to record a gap, so
index 8a84017834c211a840e83c1e39bebdbb001ec5c4..33d894776192205cd4b4a9573ccf70664c723b2d 100644 (file)
@@ -715,7 +715,8 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc,
         * can mark it as received so the tsn_map is updated correctly.
         */
        if (sctp_tsnmap_mark(&asoc->peer.tsn_map,
-                            ntohl(chunk->subh.data_hdr->tsn)))
+                            ntohl(chunk->subh.data_hdr->tsn),
+                            chunk->transport))
                goto fail_mark;
 
        /* First calculate the padding, so we don't inadvertently
index f2d1de7f2ffbd5a219759f55bdc546f2edbf19b0..f5a6a4f4faf721af4874538093cb003f4efc202c 100644 (file)
@@ -1051,7 +1051,7 @@ void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
        if (chunk && (freed >= needed)) {
                __u32 tsn;
                tsn = ntohl(chunk->subh.data_hdr->tsn);
-               sctp_tsnmap_mark(&asoc->peer.tsn_map, tsn);
+               sctp_tsnmap_mark(&asoc->peer.tsn_map, tsn, chunk->transport);
                sctp_ulpq_tail_data(ulpq, chunk, gfp);
 
                sctp_ulpq_partial_delivery(ulpq, chunk, gfp);
index 04040476082e6efd5ef08f9c7e6444c0fec77929..21fde99e5c56e4ed6413d51f46a3945bff67f7d9 100644 (file)
@@ -71,7 +71,9 @@ static void rpc_purge_list(wait_queue_head_t *waitq, struct list_head *head,
                msg->errno = err;
                destroy_msg(msg);
        } while (!list_empty(head));
-       wake_up(waitq);
+
+       if (waitq)
+               wake_up(waitq);
 }
 
 static void
@@ -91,11 +93,9 @@ rpc_timeout_upcall_queue(struct work_struct *work)
        }
        dentry = dget(pipe->dentry);
        spin_unlock(&pipe->lock);
-       if (dentry) {
-               rpc_purge_list(&RPC_I(dentry->d_inode)->waitq,
-                              &free_list, destroy_msg, -ETIMEDOUT);
-               dput(dentry);
-       }
+       rpc_purge_list(dentry ? &RPC_I(dentry->d_inode)->waitq : NULL,
+                       &free_list, destroy_msg, -ETIMEDOUT);
+       dput(dentry);
 }
 
 ssize_t rpc_pipe_generic_upcall(struct file *filp, struct rpc_pipe_msg *msg,
index 7e9baaa1e543e55878dcb0d9bd0378a0e51754e0..3ee7461926d8a01318ed2876e37651506c6b3c4b 100644 (file)
@@ -1374,7 +1374,8 @@ bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req,
                                                sizeof(req->rq_snd_buf));
                return bc_send(req);
        } else {
-               /* Nothing to do to drop request */
+               /* drop request */
+               xprt_free_bc_request(req);
                return 0;
        }
 }
index d2a19b0ff71f134544ac1afc65dd5d3fe3bc4891..89baa3328411485ac4748aa9e0f9d097fdd30a97 100644 (file)
@@ -42,6 +42,7 @@ void __cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid)
        cfg80211_hold_bss(bss_from_pub(bss));
        wdev->current_bss = bss_from_pub(bss);
 
+       wdev->sme_state = CFG80211_SME_CONNECTED;
        cfg80211_upload_connect_keys(wdev);
 
        nl80211_send_ibss_bssid(wiphy_to_dev(wdev->wiphy), dev, bssid,
@@ -60,7 +61,7 @@ void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid, gfp_t gfp)
        struct cfg80211_event *ev;
        unsigned long flags;
 
-       CFG80211_DEV_WARN_ON(!wdev->ssid_len);
+       CFG80211_DEV_WARN_ON(wdev->sme_state != CFG80211_SME_CONNECTING);
 
        ev = kzalloc(sizeof(*ev), gfp);
        if (!ev)
@@ -115,9 +116,11 @@ int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev,
 #ifdef CONFIG_CFG80211_WEXT
        wdev->wext.ibss.channel = params->channel;
 #endif
+       wdev->sme_state = CFG80211_SME_CONNECTING;
        err = rdev->ops->join_ibss(&rdev->wiphy, dev, params);
        if (err) {
                wdev->connect_keys = NULL;
+               wdev->sme_state = CFG80211_SME_IDLE;
                return err;
        }
 
@@ -169,6 +172,7 @@ static void __cfg80211_clear_ibss(struct net_device *dev, bool nowext)
        }
 
        wdev->current_bss = NULL;
+       wdev->sme_state = CFG80211_SME_IDLE;
        wdev->ssid_len = 0;
 #ifdef CONFIG_CFG80211_WEXT
        if (!nowext)
index 15f347477a9953fb85acc494436cd2763cd266a9..baf5704740ee62080e8dc4a16de8cb71503b7669 100644 (file)
@@ -1389,7 +1389,7 @@ static void reg_set_request_processed(void)
        spin_unlock(&reg_requests_lock);
 
        if (last_request->initiator == NL80211_REGDOM_SET_BY_USER)
-               cancel_delayed_work_sync(&reg_timeout);
+               cancel_delayed_work(&reg_timeout);
 
        if (need_more_processing)
                schedule_work(&reg_work);
index 55d99466babb1d8060dc09d841315e9d366ecc21..316cfd00914fe8df7d6510b13aeaf4165a9d6d88 100644 (file)
@@ -804,7 +804,7 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
             ntype == NL80211_IFTYPE_P2P_CLIENT))
                return -EBUSY;
 
-       if (ntype != otype) {
+       if (ntype != otype && netif_running(dev)) {
                err = cfg80211_can_change_interface(rdev, dev->ieee80211_ptr,
                                                    ntype);
                if (err)
@@ -935,6 +935,7 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
                                  enum nl80211_iftype iftype)
 {
        struct wireless_dev *wdev_iter;
+       u32 used_iftypes = BIT(iftype);
        int num[NUM_NL80211_IFTYPES];
        int total = 1;
        int i, j;
@@ -961,6 +962,7 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
 
                num[wdev_iter->iftype]++;
                total++;
+               used_iftypes |= BIT(wdev_iter->iftype);
        }
        mutex_unlock(&rdev->devlist_mtx);
 
@@ -970,6 +972,7 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
        for (i = 0; i < rdev->wiphy.n_iface_combinations; i++) {
                const struct ieee80211_iface_combination *c;
                struct ieee80211_iface_limit *limits;
+               u32 all_iftypes = 0;
 
                c = &rdev->wiphy.iface_combinations[i];
 
@@ -984,6 +987,7 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
                        if (rdev->wiphy.software_iftypes & BIT(iftype))
                                continue;
                        for (j = 0; j < c->n_limits; j++) {
+                               all_iftypes |= limits[j].types;
                                if (!(limits[j].types & BIT(iftype)))
                                        continue;
                                if (limits[j].max < num[iftype])
@@ -991,7 +995,20 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
                                limits[j].max -= num[iftype];
                        }
                }
-               /* yay, it fits */
+
+               /*
+                * Finally check that all iftypes that we're currently
+                * using are actually part of this combination. If they
+                * aren't then we can't use this combination and have
+                * to continue to the next.
+                */
+               if ((all_iftypes & used_iftypes) != used_iftypes)
+                       goto cont;
+
+               /*
+                * This combination covered all interface types and
+                * supported the requested numbers, so we're good.
+                */
                kfree(limits);
                return 0;
  cont:
index 0948c6b5a32158cc4a28736d1c6924a4b79beded..8b673dd4627fb20f4ab16a8af6de9524bd84a426 100755 (executable)
@@ -83,6 +83,8 @@ push(@signature_tags, "Signed-off-by:");
 push(@signature_tags, "Reviewed-by:");
 push(@signature_tags, "Acked-by:");
 
+my $signature_pattern = "\(" . join("|", @signature_tags) . "\)";
+
 # rfc822 email address - preloaded methods go here.
 my $rfc822_lwsp = "(?:(?:\\r\\n)?[ \\t])";
 my $rfc822_char = '[\\000-\\377]';
@@ -473,7 +475,6 @@ my @subsystem = ();
 my @status = ();
 my %deduplicate_name_hash = ();
 my %deduplicate_address_hash = ();
-my $signature_pattern;
 
 my @maintainers = get_maintainers();
 
old mode 100644 (file)
new mode 100755 (executable)
index 3efc9b12aef44016201b02eeafcc10140f17a240..860aeb349cb337bbccf4346d99120d4d1fd51c90 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/mman.h>
 #include <linux/mount.h>
 #include <linux/personality.h>
+#include <linux/backing-dev.h>
 #include <net/flow.h>
 
 #define MAX_LSM_EVM_XATTR      2
index 372ec6502aa8752dca83c3c507e2d0ce9cac84d1..ffd8900a38e8d7ddb906c6f090bacd1f6e419907 100644 (file)
@@ -2717,7 +2717,7 @@ static int selinux_inode_setattr(struct dentry *dentry, struct iattr *iattr)
                        ATTR_ATIME_SET | ATTR_MTIME_SET | ATTR_TIMES_SET))
                return dentry_has_perm(cred, dentry, FILE__SETATTR);
 
-       if (ia_valid & ATTR_SIZE)
+       if (selinux_policycap_openperm && (ia_valid & ATTR_SIZE))
                av |= FILE__OPEN;
 
        return dentry_has_perm(cred, dentry, av);
index b8c53723e09bfe7d6c211bc05df35793ae9ac8b2..df2de54a958debfab56caf3483cd5d93856e5be4 100644 (file)
@@ -145,7 +145,9 @@ struct security_class_mapping secclass_map[] = {
            "node_bind", "name_connect", NULL } },
        { "memprotect", { "mmap_zero", NULL } },
        { "peer", { "recv", NULL } },
-       { "capability2", { "mac_override", "mac_admin", "syslog", NULL } },
+       { "capability2",
+         { "mac_override", "mac_admin", "syslog", "wake_alarm", "block_suspend",
+           NULL } },
        { "kernel_service", { "use_as_override", "create_files_as", NULL } },
        { "tun_socket",
          { COMMON_SOCK_PERMS, NULL } },
index a68aed7fce0205462ce08183f560015eeb0060cb..ec2118d0e27aca3f5fef6c2ddd72f8b166ce98ca 100644 (file)
@@ -502,10 +502,8 @@ static int snd_compr_pause(struct snd_compr_stream *stream)
        if (stream->runtime->state != SNDRV_PCM_STATE_RUNNING)
                return -EPERM;
        retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_PAUSE_PUSH);
-       if (!retval) {
+       if (!retval)
                stream->runtime->state = SNDRV_PCM_STATE_PAUSED;
-               wake_up(&stream->runtime->sleep);
-       }
        return retval;
 }
 
@@ -544,6 +542,10 @@ static int snd_compr_stop(struct snd_compr_stream *stream)
        if (!retval) {
                stream->runtime->state = SNDRV_PCM_STATE_SETUP;
                wake_up(&stream->runtime->sleep);
+               stream->runtime->hw_pointer = 0;
+               stream->runtime->app_pointer = 0;
+               stream->runtime->total_bytes_available = 0;
+               stream->runtime->total_bytes_transferred = 0;
        }
        return retval;
 }
index 582aace20ea31357b94a660e2607da4b058ad228..7eca25fae4137947ebf933451e2d2964d2bd9d40 100644 (file)
@@ -37,8 +37,8 @@ MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>");
 MODULE_DESCRIPTION("Routines for control of TEA5757/5759 Philips AM/FM radio tuner chips");
 MODULE_LICENSE("GPL");
 
-#define FREQ_LO                 (76U * 16000)
-#define FREQ_HI                (108U * 16000)
+#define FREQ_LO                ((tea->tea5759 ? 760 :  875) * 1600U)
+#define FREQ_HI                ((tea->tea5759 ? 910 : 1080) * 1600U)
 
 /*
  * definitions
@@ -120,9 +120,9 @@ static u32 snd_tea575x_read(struct snd_tea575x *tea)
        return data;
 }
 
-static u32 snd_tea575x_get_freq(struct snd_tea575x *tea)
+static u32 snd_tea575x_val_to_freq(struct snd_tea575x *tea, u32 val)
 {
-       u32 freq = snd_tea575x_read(tea) & TEA575X_BIT_FREQ_MASK;
+       u32 freq = val & TEA575X_BIT_FREQ_MASK;
 
        if (freq == 0)
                return freq;
@@ -139,6 +139,11 @@ static u32 snd_tea575x_get_freq(struct snd_tea575x *tea)
        return clamp(freq * 16, FREQ_LO, FREQ_HI); /* from kHz */
 }
 
+static u32 snd_tea575x_get_freq(struct snd_tea575x *tea)
+{
+       return snd_tea575x_val_to_freq(tea, snd_tea575x_read(tea));
+}
+
 static void snd_tea575x_set_freq(struct snd_tea575x *tea)
 {
        u32 freq = tea->freq;
@@ -156,6 +161,7 @@ static void snd_tea575x_set_freq(struct snd_tea575x *tea)
        tea->val &= ~TEA575X_BIT_FREQ_MASK;
        tea->val |= freq & TEA575X_BIT_FREQ_MASK;
        snd_tea575x_write(tea, tea->val);
+       tea->freq = snd_tea575x_val_to_freq(tea, tea->val);
 }
 
 /*
@@ -317,7 +323,6 @@ static int tea575x_s_ctrl(struct v4l2_ctrl *ctrl)
 }
 
 static const struct v4l2_file_operations tea575x_fops = {
-       .owner          = THIS_MODULE,
        .unlocked_ioctl = video_ioctl2,
        .open           = v4l2_fh_open,
        .release        = v4l2_fh_release,
@@ -337,7 +342,6 @@ static const struct v4l2_ioctl_ops tea575x_ioctl_ops = {
 };
 
 static const struct video_device tea575x_radio = {
-       .fops           = &tea575x_fops,
        .ioctl_ops      = &tea575x_ioctl_ops,
        .release        = video_device_release_empty,
 };
@@ -349,7 +353,7 @@ static const struct v4l2_ctrl_ops tea575x_ctrl_ops = {
 /*
  * initialize all the tea575x chips
  */
-int snd_tea575x_init(struct snd_tea575x *tea)
+int snd_tea575x_init(struct snd_tea575x *tea, struct module *owner)
 {
        int retval;
 
@@ -374,6 +378,9 @@ int snd_tea575x_init(struct snd_tea575x *tea)
        tea->vd.lock = &tea->mutex;
        tea->vd.v4l2_dev = tea->v4l2_dev;
        tea->vd.ctrl_handler = &tea->ctrl_handler;
+       tea->fops = tea575x_fops;
+       tea->fops.owner = owner;
+       tea->vd.fops = &tea->fops;
        set_bit(V4L2_FL_USE_FH_PRIO, &tea->vd.flags);
        /* disable hw_freq_seek if we can't use it */
        if (tea->cannot_read_data)
index 67f47d891959661c326902cf5fa7803d39a09be0..52b5c0bf90c12a640b02deb844c58c0254a8c73c 100644 (file)
@@ -2769,7 +2769,7 @@ static int __devinit snd_es1968_create(struct snd_card *card,
        chip->tea.ops = &snd_es1968_tea_ops;
        strlcpy(chip->tea.card, "SF64-PCE2", sizeof(chip->tea.card));
        sprintf(chip->tea.bus_info, "PCI:%s", pci_name(pci));
-       if (!snd_tea575x_init(&chip->tea))
+       if (!snd_tea575x_init(&chip->tea, THIS_MODULE))
                printk(KERN_INFO "es1968: detected TEA575x radio\n");
 #endif
 
index f696623227503763d9996589034244314f39568c..b32e8024ea86c017812fc517c6d246daec475c73 100644 (file)
@@ -1254,7 +1254,7 @@ static int __devinit snd_fm801_create(struct snd_card *card,
        sprintf(chip->tea.bus_info, "PCI:%s", pci_name(pci));
        if ((tea575x_tuner & TUNER_TYPE_MASK) > 0 &&
            (tea575x_tuner & TUNER_TYPE_MASK) < 4) {
-               if (snd_tea575x_init(&chip->tea)) {
+               if (snd_tea575x_init(&chip->tea, THIS_MODULE)) {
                        snd_printk(KERN_ERR "TEA575x radio not found\n");
                        snd_fm801_free(chip);
                        return -ENODEV;
@@ -1263,7 +1263,7 @@ static int __devinit snd_fm801_create(struct snd_card *card,
                /* autodetect tuner connection */
                for (tea575x_tuner = 1; tea575x_tuner <= 3; tea575x_tuner++) {
                        chip->tea575x_tuner = tea575x_tuner;
-                       if (!snd_tea575x_init(&chip->tea)) {
+                       if (!snd_tea575x_init(&chip->tea, THIS_MODULE)) {
                                snd_printk(KERN_INFO "detected TEA575x radio type %s\n",
                                           get_tea575x_gpio(chip)->name);
                                break;
index 163b6b5de3eb535fe972e145b6ed35f0182590fa..d03079764189dedef68484f72b649aabd460006c 100644 (file)
@@ -97,19 +97,6 @@ config SND_HDA_CODEC_REALTEK
          snd-hda-codec-realtek.
          This module is automatically loaded at probing.
 
-config SND_HDA_ENABLE_REALTEK_QUIRKS
-       bool "Build static quirks for Realtek codecs"
-       depends on SND_HDA_CODEC_REALTEK
-       default y
-       help
-         Say Y here to build the static quirks codes for Realtek codecs.
-         If you need the "model" preset that the default BIOS auto-parser
-         can't handle, turn this option on.
-
-         If your device works with model=auto option, basically you don't
-         need the quirk code.  By turning this off, you can reduce the
-         module size quite a lot.
-
 config SND_HDA_CODEC_ANALOG
        bool "Build Analog Device HD-audio codec support"
        default y
index 6e9ef3e250935326ccf5d4583760dadc8091be20..f7520b9f909cfb0d577ed745bef272675b23477a 100644 (file)
@@ -618,7 +618,6 @@ int snd_hda_gen_add_verbs(struct hda_gen_spec *spec,
                          const struct hda_verb *list)
 {
        const struct hda_verb **v;
-       snd_array_init(&spec->verbs, sizeof(struct hda_verb *), 8);
        v = snd_array_new(&spec->verbs);
        if (!v)
                return -ENOMEM;
index 2a7889dfbd1b53aff68dfff49f554d6f7e0fec84..632ad0ad3007491b827f7e2e3edcb87dba280c6e 100644 (file)
@@ -157,4 +157,14 @@ void snd_hda_pick_fixup(struct hda_codec *codec,
                        const struct snd_pci_quirk *quirk,
                        const struct hda_fixup *fixlist);
 
+static inline void snd_hda_gen_init(struct hda_gen_spec *spec)
+{
+       snd_array_init(&spec->verbs, sizeof(struct hda_verb *), 8);
+}
+
+static inline void snd_hda_gen_free(struct hda_gen_spec *spec)
+{
+       snd_array_free(&spec->verbs);
+}
+
 #endif /* __SOUND_HDA_AUTO_PARSER_H */
index 41ca803a1fff9d1a4c60baa3889cc3171a990bf2..51cb2a2e4fce03fbfdce96fb4b2c1b23063d45ed 100644 (file)
@@ -1184,6 +1184,7 @@ static void snd_hda_codec_free(struct hda_codec *codec)
 {
        if (!codec)
                return;
+       snd_hda_jack_tbl_clear(codec);
        restore_init_pincfgs(codec);
 #ifdef CONFIG_SND_HDA_POWER_SAVE
        cancel_delayed_work(&codec->power_work);
@@ -1192,6 +1193,7 @@ static void snd_hda_codec_free(struct hda_codec *codec)
        list_del(&codec->list);
        snd_array_free(&codec->mixers);
        snd_array_free(&codec->nids);
+       snd_array_free(&codec->cvt_setups);
        snd_array_free(&codec->conn_lists);
        snd_array_free(&codec->spdif_out);
        codec->bus->caddr_tbl[codec->addr] = NULL;
@@ -2333,6 +2335,8 @@ int snd_hda_codec_reset(struct hda_codec *codec)
        /* free only driver_pins so that init_pins + user_pins are restored */
        snd_array_free(&codec->driver_pins);
        restore_pincfgs(codec);
+       snd_array_free(&codec->cvt_setups);
+       snd_array_free(&codec->spdif_out);
        codec->num_pcms = 0;
        codec->pcm_info = NULL;
        codec->preset = NULL;
@@ -4393,20 +4397,19 @@ void snd_hda_update_power_acct(struct hda_codec *codec)
        codec->power_jiffies += delta;
 }
 
-/**
- * snd_hda_power_up - Power-up the codec
- * @codec: HD-audio codec
- *
- * Increment the power-up counter and power up the hardware really when
- * not turned on yet.
- */
-void snd_hda_power_up(struct hda_codec *codec)
+/* Transition to powered up, if wait_power_down then wait for a pending
+ * transition to D3 to complete. A pending D3 transition is indicated
+ * with power_transition == -1. */
+static void __snd_hda_power_up(struct hda_codec *codec, bool wait_power_down)
 {
        struct hda_bus *bus = codec->bus;
 
        spin_lock(&codec->power_lock);
        codec->power_count++;
-       if (codec->power_on || codec->power_transition > 0) {
+       /* Return if power_on or transitioning to power_on, unless currently
+        * powering down. */
+       if ((codec->power_on || codec->power_transition > 0) &&
+           !(wait_power_down && codec->power_transition < 0)) {
                spin_unlock(&codec->power_lock);
                return;
        }
@@ -4430,8 +4433,37 @@ void snd_hda_power_up(struct hda_codec *codec)
        codec->power_transition = 0;
        spin_unlock(&codec->power_lock);
 }
+
+/**
+ * snd_hda_power_up - Power-up the codec
+ * @codec: HD-audio codec
+ *
+ * Increment the power-up counter and power up the hardware really when
+ * not turned on yet.
+ */
+void snd_hda_power_up(struct hda_codec *codec)
+{
+       __snd_hda_power_up(codec, false);
+}
 EXPORT_SYMBOL_HDA(snd_hda_power_up);
 
+/**
+ * snd_hda_power_up_d3wait - Power-up the codec after waiting for any pending
+ *   D3 transition to complete.  This differs from snd_hda_power_up() when
+ *   power_transition == -1.  snd_hda_power_up sees this case as a nop,
+ *   snd_hda_power_up_d3wait waits for the D3 transition to complete then powers
+ *   back up.
+ * @codec: HD-audio codec
+ *
+ * Cancel any power down operation hapenning on the work queue, then power up.
+ */
+void snd_hda_power_up_d3wait(struct hda_codec *codec)
+{
+       /* This will cancel and wait for pending power_work to complete. */
+       __snd_hda_power_up(codec, true);
+}
+EXPORT_SYMBOL_HDA(snd_hda_power_up_d3wait);
+
 #define power_save(codec)      \
        ((codec)->bus->power_save ? *(codec)->bus->power_save : 0)
 
index 4fc3960c85917837508ef32b1d954737061fc926..2fdaadbb4326fad36863fa1c5f1373464614ab88 100644 (file)
@@ -1056,10 +1056,12 @@ const char *snd_hda_get_jack_location(u32 cfg);
  */
 #ifdef CONFIG_SND_HDA_POWER_SAVE
 void snd_hda_power_up(struct hda_codec *codec);
+void snd_hda_power_up_d3wait(struct hda_codec *codec);
 void snd_hda_power_down(struct hda_codec *codec);
 void snd_hda_update_power_acct(struct hda_codec *codec);
 #else
 static inline void snd_hda_power_up(struct hda_codec *codec) {}
+static inline void snd_hda_power_up_d3wait(struct hda_codec *codec) {}
 static inline void snd_hda_power_down(struct hda_codec *codec) {}
 #endif
 
index 2b6392be451c688830cf9d42e346d0eee61ac1dc..7757536b9d5faccfdd33d5ff2ece304d4282a2d0 100644 (file)
@@ -1766,7 +1766,7 @@ static int azx_pcm_open(struct snd_pcm_substream *substream)
                                   buff_step);
        snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
                                   buff_step);
-       snd_hda_power_up(apcm->codec);
+       snd_hda_power_up_d3wait(apcm->codec);
        err = hinfo->ops.open(hinfo, apcm->codec, substream);
        if (err < 0) {
                azx_release_device(azx_dev);
@@ -2484,9 +2484,9 @@ static void azx_notifier_unregister(struct azx *chip)
 static int DELAYED_INIT_MARK azx_first_init(struct azx *chip);
 static int DELAYED_INIT_MARK azx_probe_continue(struct azx *chip);
 
+#ifdef SUPPORT_VGA_SWITCHEROO
 static struct pci_dev __devinit *get_bound_vga(struct pci_dev *pci);
 
-#ifdef SUPPORT_VGA_SWITCHEROO
 static void azx_vs_set_state(struct pci_dev *pci,
                             enum vga_switcheroo_state state)
 {
@@ -2578,6 +2578,7 @@ static int __devinit register_vga_switcheroo(struct azx *chip)
 #else
 #define init_vga_switcheroo(chip)              /* NOP */
 #define register_vga_switcheroo(chip)          0
+#define check_hdmi_disabled(pci)       false
 #endif /* SUPPORT_VGA_SWITCHER */
 
 /*
@@ -2638,6 +2639,7 @@ static int azx_dev_free(struct snd_device *device)
        return azx_free(device->device_data);
 }
 
+#ifdef SUPPORT_VGA_SWITCHEROO
 /*
  * Check of disabled HDMI controller by vga-switcheroo
  */
@@ -2670,12 +2672,13 @@ static bool __devinit check_hdmi_disabled(struct pci_dev *pci)
        struct pci_dev *p = get_bound_vga(pci);
 
        if (p) {
-               if (vga_default_device() && p != vga_default_device())
+               if (vga_switcheroo_get_client_state(p) == VGA_SWITCHEROO_OFF)
                        vga_inactive = true;
                pci_dev_put(p);
        }
        return vga_inactive;
 }
+#endif /* SUPPORT_VGA_SWITCHEROO */
 
 /*
  * white/black-listing for position_fix
@@ -3351,6 +3354,11 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
        { PCI_DEVICE(0x6549, 0x1200),
          .driver_data = AZX_DRIVER_TERA | AZX_DCAPS_NO_64BIT },
        /* Creative X-Fi (CA0110-IBG) */
+       /* CTHDA chips */
+       { PCI_DEVICE(0x1102, 0x0010),
+         .driver_data = AZX_DRIVER_CTHDA | AZX_DCAPS_PRESET_CTHDA },
+       { PCI_DEVICE(0x1102, 0x0012),
+         .driver_data = AZX_DRIVER_CTHDA | AZX_DCAPS_PRESET_CTHDA },
 #if !defined(CONFIG_SND_CTXFI) && !defined(CONFIG_SND_CTXFI_MODULE)
        /* the following entry conflicts with snd-ctxfi driver,
         * as ctxfi driver mutates from HD-audio to native mode with
@@ -3367,11 +3375,6 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
          .driver_data = AZX_DRIVER_CTX | AZX_DCAPS_CTX_WORKAROUND |
          AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_POSFIX_LPIB },
 #endif
-       /* CTHDA chips */
-       { PCI_DEVICE(0x1102, 0x0010),
-         .driver_data = AZX_DRIVER_CTHDA | AZX_DCAPS_PRESET_CTHDA },
-       { PCI_DEVICE(0x1102, 0x0012),
-         .driver_data = AZX_DRIVER_CTHDA | AZX_DCAPS_PRESET_CTHDA },
        /* Vortex86MX */
        { PCI_DEVICE(0x17f3, 0x3010), .driver_data = AZX_DRIVER_GENERIC },
        /* VMware HDAudio */
index 3acb5824ad39e95e354491c2336a445cd796745e..2bf99fc1cbf243a78d06b711df3e9852d5882998 100644 (file)
@@ -445,8 +445,10 @@ static int conexant_init(struct hda_codec *codec)
 
 static void conexant_free(struct hda_codec *codec)
 {
+       struct conexant_spec *spec = codec->spec;
+       snd_hda_gen_free(&spec->gen);
        snd_hda_detach_beep_device(codec);
-       kfree(codec->spec);
+       kfree(spec);
 }
 
 static const struct snd_kcontrol_new cxt_capture_mixers[] = {
@@ -4061,7 +4063,7 @@ static void cx_auto_init_digital(struct hda_codec *codec)
 static int cx_auto_init(struct hda_codec *codec)
 {
        struct conexant_spec *spec = codec->spec;
-       /*snd_hda_sequence_write(codec, cx_auto_init_verbs);*/
+       snd_hda_gen_apply_verbs(codec);
        cx_auto_init_output(codec);
        cx_auto_init_input(codec);
        cx_auto_init_digital(codec);
@@ -4466,6 +4468,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
        SND_PCI_QUIRK(0x17aa, 0x21ce, "Lenovo T420", CXT_PINCFG_LENOVO_TP410),
        SND_PCI_QUIRK(0x17aa, 0x21cf, "Lenovo T520", CXT_PINCFG_LENOVO_TP410),
        SND_PCI_QUIRK(0x17aa, 0x3975, "Lenovo U300s", CXT_FIXUP_STEREO_DMIC),
+       SND_PCI_QUIRK(0x17aa, 0x397b, "Lenovo S205", CXT_FIXUP_STEREO_DMIC),
        {}
 };
 
@@ -4497,6 +4500,7 @@ static int patch_conexant_auto(struct hda_codec *codec)
        if (!spec)
                return -ENOMEM;
        codec->spec = spec;
+       snd_hda_gen_init(&spec->gen);
 
        switch (codec->vendor_id) {
        case 0x14f15045:
index 224410e8e9e7461431063a8d1f23b8650362faca..aa4c25e0f3277fce38520c72c9759bfc90b9022d 100644 (file)
@@ -1896,6 +1896,7 @@ static int alc_init(struct hda_codec *codec)
        alc_fix_pll(codec);
        alc_auto_init_amp(codec, spec->init_amp);
 
+       snd_hda_gen_apply_verbs(codec);
        alc_init_special_input_src(codec);
        alc_auto_init_std(codec);
 
@@ -2288,6 +2289,7 @@ static void alc_free(struct hda_codec *codec)
        alc_shutup(codec);
        alc_free_kctls(codec);
        alc_free_bind_ctls(codec);
+       snd_hda_gen_free(&spec->gen);
        kfree(spec);
        snd_hda_detach_beep_device(codec);
 }
@@ -4252,6 +4254,7 @@ static int alc_alloc_spec(struct hda_codec *codec, hda_nid_t mixer_nid)
                return -ENOMEM;
        codec->spec = spec;
        spec->mixer_nid = mixer_nid;
+       snd_hda_gen_init(&spec->gen);
 
        err = alc_codec_rename_from_preset(codec);
        if (err < 0) {
@@ -6439,6 +6442,7 @@ enum {
        ALC662_FIXUP_ASUS_MODE7,
        ALC662_FIXUP_ASUS_MODE8,
        ALC662_FIXUP_NO_JACK_DETECT,
+       ALC662_FIXUP_ZOTAC_Z68,
 };
 
 static const struct alc_fixup alc662_fixups[] = {
@@ -6588,6 +6592,13 @@ static const struct alc_fixup alc662_fixups[] = {
                .type = ALC_FIXUP_FUNC,
                .v.func = alc_fixup_no_jack_detect,
        },
+       [ALC662_FIXUP_ZOTAC_Z68] = {
+               .type = ALC_FIXUP_PINS,
+               .v.pins = (const struct alc_pincfg[]) {
+                       { 0x1b, 0x02214020 }, /* Front HP */
+                       { }
+               }
+       },
 };
 
 static const struct snd_pci_quirk alc662_fixup_tbl[] = {
@@ -6601,6 +6612,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
        SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD),
        SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD),
        SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Ideapad Y550", ALC662_FIXUP_IDEAPAD),
+       SND_PCI_QUIRK(0x19da, 0xa130, "Zotac Z68", ALC662_FIXUP_ZOTAC_Z68),
        SND_PCI_QUIRK(0x1b35, 0x2206, "CZC P10T", ALC662_FIXUP_CZC_P10T),
 
 #if 0
@@ -6676,6 +6688,31 @@ static const struct alc_model_fixup alc662_fixup_models[] = {
        {}
 };
 
+static void alc662_fill_coef(struct hda_codec *codec)
+{
+       int val, coef;
+
+       coef = alc_get_coef0(codec);
+
+       switch (codec->vendor_id) {
+       case 0x10ec0662:
+               if ((coef & 0x00f0) == 0x0030) {
+                       val = alc_read_coef_idx(codec, 0x4); /* EAPD Ctrl */
+                       alc_write_coef_idx(codec, 0x4, val & ~(1<<10));
+               }
+               break;
+       case 0x10ec0272:
+       case 0x10ec0273:
+       case 0x10ec0663:
+       case 0x10ec0665:
+       case 0x10ec0670:
+       case 0x10ec0671:
+       case 0x10ec0672:
+               val = alc_read_coef_idx(codec, 0xd); /* EAPD Ctrl */
+               alc_write_coef_idx(codec, 0xd, val | (1<<14));
+               break;
+       }
+}
 
 /*
  */
@@ -6695,12 +6732,8 @@ static int patch_alc662(struct hda_codec *codec)
 
        alc_fix_pll_init(codec, 0x20, 0x04, 15);
 
-       if ((alc_get_coef0(codec) & (1 << 14)) &&
-           codec->bus->pci->subsystem_vendor == 0x1025 &&
-           spec->cdefine.platform_type == 1) {
-               if (alc_codec_rename(codec, "ALC272X") < 0)
-                       goto error;
-       }
+       spec->init_hook = alc662_fill_coef;
+       alc662_fill_coef(codec);
 
        alc_pick_fixup(codec, alc662_fixup_models,
                       alc662_fixup_tbl, alc662_fixups);
@@ -6708,6 +6741,13 @@ static int patch_alc662(struct hda_codec *codec)
 
        alc_auto_parse_customize_define(codec);
 
+       if ((alc_get_coef0(codec) & (1 << 14)) &&
+           codec->bus->pci->subsystem_vendor == 0x1025 &&
+           spec->cdefine.platform_type == 1) {
+               if (alc_codec_rename(codec, "ALC272X") < 0)
+                       goto error;
+       }
+
        /* automatic parse from the BIOS config */
        err = alc662_parse_auto_config(codec);
        if (err < 0)
@@ -6790,6 +6830,7 @@ static const struct hda_codec_preset snd_hda_preset_realtek[] = {
        { .id = 0x10ec0272, .name = "ALC272", .patch = patch_alc662 },
        { .id = 0x10ec0275, .name = "ALC275", .patch = patch_alc269 },
        { .id = 0x10ec0276, .name = "ALC276", .patch = patch_alc269 },
+       { .id = 0x10ec0280, .name = "ALC280", .patch = patch_alc269 },
        { .id = 0x10ec0861, .rev = 0x100340, .name = "ALC660",
          .patch = patch_alc861 },
        { .id = 0x10ec0660, .name = "ALC660-VD", .patch = patch_alc861vd },
index 7db8228f1b882c013f4105e535af8ceaae3c1fbd..07675282015a5a87a7572b4bb632abae1684e0e9 100644 (file)
@@ -4367,7 +4367,7 @@ static int stac92xx_init(struct hda_codec *codec)
                                         AC_PINCTL_IN_EN);
        for (i = 0; i < spec->num_pwrs; i++)  {
                hda_nid_t nid = spec->pwr_nids[i];
-               int pinctl, def_conf;
+               unsigned int pinctl, def_conf;
 
                def_conf = snd_hda_codec_get_pincfg(codec, nid);
                def_conf = get_defcfg_connect(def_conf);
@@ -4376,6 +4376,11 @@ static int stac92xx_init(struct hda_codec *codec)
                        stac_toggle_power_map(codec, nid, 0);
                        continue;
                }
+               if (def_conf == AC_JACK_PORT_FIXED) {
+                       /* no need for jack detection for fixed pins */
+                       stac_toggle_power_map(codec, nid, 1);
+                       continue;
+               }
                /* power on when no jack detection is available */
                /* or when the VREF is used for controlling LED */
                if (!spec->hp_detect ||
index 64d2a4fa34b27d81ad84d0e64ed625e04e293b7a..e9b62b5ea637580ea7e8904ec2c68662ed1471fc 100644 (file)
@@ -935,9 +935,7 @@ static int aic3x_hw_params(struct snd_pcm_substream *substream,
        }
 
 found:
-       data = snd_soc_read(codec, AIC3X_PLL_PROGA_REG);
-       snd_soc_write(codec, AIC3X_PLL_PROGA_REG,
-                     data | (pll_p << PLLP_SHIFT));
+       snd_soc_update_bits(codec, AIC3X_PLL_PROGA_REG, PLLP_MASK, pll_p);
        snd_soc_write(codec, AIC3X_OVRF_STATUS_AND_PLLR_REG,
                      pll_r << PLLR_SHIFT);
        snd_soc_write(codec, AIC3X_PLL_PROGB_REG, pll_j << PLLJ_SHIFT);
index 6f097fb60683e7b5605b38e373c7f1c75f4158c9..08c7f6685ff0937a367824c7a731fd4fb8e48d72 100644 (file)
 
 /* PLL registers bitfields */
 #define PLLP_SHIFT             0
+#define PLLP_MASK              7
 #define PLLQ_SHIFT             3
 #define PLLR_SHIFT             0
 #define PLLJ_SHIFT             2
index a75c3766aedeec9192c0d887ebe9b8ff366e1e5b..0418fa11e6bd1a828a40249a4fb65f63c47255a3 100644 (file)
@@ -99,8 +99,9 @@ static void wm2000_reset(struct wm2000_priv *wm2000)
 }
 
 static int wm2000_poll_bit(struct i2c_client *i2c,
-                          unsigned int reg, u8 mask, int timeout)
+                          unsigned int reg, u8 mask)
 {
+       int timeout = 4000;
        int val;
 
        val = wm2000_read(i2c, reg);
@@ -119,7 +120,7 @@ static int wm2000_poll_bit(struct i2c_client *i2c,
 static int wm2000_power_up(struct i2c_client *i2c, int analogue)
 {
        struct wm2000_priv *wm2000 = dev_get_drvdata(&i2c->dev);
-       int ret, timeout;
+       int ret;
 
        BUG_ON(wm2000->anc_mode != ANC_OFF);
 
@@ -140,13 +141,13 @@ static int wm2000_power_up(struct i2c_client *i2c, int analogue)
 
        /* Wait for ANC engine to become ready */
        if (!wm2000_poll_bit(i2c, WM2000_REG_ANC_STAT,
-                            WM2000_ANC_ENG_IDLE, 1)) {
+                            WM2000_ANC_ENG_IDLE)) {
                dev_err(&i2c->dev, "ANC engine failed to reset\n");
                return -ETIMEDOUT;
        }
 
        if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS,
-                            WM2000_STATUS_BOOT_COMPLETE, 1)) {
+                            WM2000_STATUS_BOOT_COMPLETE)) {
                dev_err(&i2c->dev, "ANC engine failed to initialise\n");
                return -ETIMEDOUT;
        }
@@ -173,16 +174,13 @@ static int wm2000_power_up(struct i2c_client *i2c, int analogue)
        dev_dbg(&i2c->dev, "Download complete\n");
 
        if (analogue) {
-               timeout = 248;
-               wm2000_write(i2c, WM2000_REG_ANA_VMID_PU_TIME, timeout / 4);
+               wm2000_write(i2c, WM2000_REG_ANA_VMID_PU_TIME, 248 / 4);
 
                wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL,
                             WM2000_MODE_ANA_SEQ_INCLUDE |
                             WM2000_MODE_MOUSE_ENABLE |
                             WM2000_MODE_THERMAL_ENABLE);
        } else {
-               timeout = 10;
-
                wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL,
                             WM2000_MODE_MOUSE_ENABLE |
                             WM2000_MODE_THERMAL_ENABLE);
@@ -201,9 +199,8 @@ static int wm2000_power_up(struct i2c_client *i2c, int analogue)
        wm2000_write(i2c, WM2000_REG_SYS_CTL2, WM2000_ANC_INT_N_CLR);
 
        if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS,
-                            WM2000_STATUS_MOUSE_ACTIVE, timeout)) {
-               dev_err(&i2c->dev, "Timed out waiting for device after %dms\n",
-                       timeout * 10);
+                            WM2000_STATUS_MOUSE_ACTIVE)) {
+               dev_err(&i2c->dev, "Timed out waiting for device\n");
                return -ETIMEDOUT;
        }
 
@@ -218,28 +215,25 @@ static int wm2000_power_up(struct i2c_client *i2c, int analogue)
 static int wm2000_power_down(struct i2c_client *i2c, int analogue)
 {
        struct wm2000_priv *wm2000 = dev_get_drvdata(&i2c->dev);
-       int timeout;
 
        if (analogue) {
-               timeout = 248;
-               wm2000_write(i2c, WM2000_REG_ANA_VMID_PD_TIME, timeout / 4);
+               wm2000_write(i2c, WM2000_REG_ANA_VMID_PD_TIME, 248 / 4);
                wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL,
                             WM2000_MODE_ANA_SEQ_INCLUDE |
                             WM2000_MODE_POWER_DOWN);
        } else {
-               timeout = 10;
                wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL,
                             WM2000_MODE_POWER_DOWN);
        }
 
        if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS,
-                            WM2000_STATUS_POWER_DOWN_COMPLETE, timeout)) {
+                            WM2000_STATUS_POWER_DOWN_COMPLETE)) {
                dev_err(&i2c->dev, "Timeout waiting for ANC power down\n");
                return -ETIMEDOUT;
        }
 
        if (!wm2000_poll_bit(i2c, WM2000_REG_ANC_STAT,
-                            WM2000_ANC_ENG_IDLE, 1)) {
+                            WM2000_ANC_ENG_IDLE)) {
                dev_err(&i2c->dev, "Timeout waiting for ANC engine idle\n");
                return -ETIMEDOUT;
        }
@@ -268,13 +262,13 @@ static int wm2000_enter_bypass(struct i2c_client *i2c, int analogue)
        }
 
        if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS,
-                            WM2000_STATUS_ANC_DISABLED, 10)) {
+                            WM2000_STATUS_ANC_DISABLED)) {
                dev_err(&i2c->dev, "Timeout waiting for ANC disable\n");
                return -ETIMEDOUT;
        }
 
        if (!wm2000_poll_bit(i2c, WM2000_REG_ANC_STAT,
-                            WM2000_ANC_ENG_IDLE, 1)) {
+                            WM2000_ANC_ENG_IDLE)) {
                dev_err(&i2c->dev, "Timeout waiting for ANC engine idle\n");
                return -ETIMEDOUT;
        }
@@ -311,7 +305,7 @@ static int wm2000_exit_bypass(struct i2c_client *i2c, int analogue)
        wm2000_write(i2c, WM2000_REG_SYS_CTL2, WM2000_ANC_INT_N_CLR);
 
        if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS,
-                            WM2000_STATUS_MOUSE_ACTIVE, 10)) {
+                            WM2000_STATUS_MOUSE_ACTIVE)) {
                dev_err(&i2c->dev, "Timed out waiting for MOUSE\n");
                return -ETIMEDOUT;
        }
@@ -325,38 +319,32 @@ static int wm2000_exit_bypass(struct i2c_client *i2c, int analogue)
 static int wm2000_enter_standby(struct i2c_client *i2c, int analogue)
 {
        struct wm2000_priv *wm2000 = dev_get_drvdata(&i2c->dev);
-       int timeout;
 
        BUG_ON(wm2000->anc_mode != ANC_ACTIVE);
 
        if (analogue) {
-               timeout = 248;
-               wm2000_write(i2c, WM2000_REG_ANA_VMID_PD_TIME, timeout / 4);
+               wm2000_write(i2c, WM2000_REG_ANA_VMID_PD_TIME, 248 / 4);
 
                wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL,
                             WM2000_MODE_ANA_SEQ_INCLUDE |
                             WM2000_MODE_THERMAL_ENABLE |
                             WM2000_MODE_STANDBY_ENTRY);
        } else {
-               timeout = 10;
-
                wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL,
                             WM2000_MODE_THERMAL_ENABLE |
                             WM2000_MODE_STANDBY_ENTRY);
        }
 
        if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS,
-                            WM2000_STATUS_ANC_DISABLED, timeout)) {
+                            WM2000_STATUS_ANC_DISABLED)) {
                dev_err(&i2c->dev,
                        "Timed out waiting for ANC disable after 1ms\n");
                return -ETIMEDOUT;
        }
 
-       if (!wm2000_poll_bit(i2c, WM2000_REG_ANC_STAT, WM2000_ANC_ENG_IDLE,
-                            1)) {
+       if (!wm2000_poll_bit(i2c, WM2000_REG_ANC_STAT, WM2000_ANC_ENG_IDLE)) {
                dev_err(&i2c->dev,
-                       "Timed out waiting for standby after %dms\n",
-                       timeout * 10);
+                       "Timed out waiting for standby\n");
                return -ETIMEDOUT;
        }
 
@@ -374,23 +362,19 @@ static int wm2000_enter_standby(struct i2c_client *i2c, int analogue)
 static int wm2000_exit_standby(struct i2c_client *i2c, int analogue)
 {
        struct wm2000_priv *wm2000 = dev_get_drvdata(&i2c->dev);
-       int timeout;
 
        BUG_ON(wm2000->anc_mode != ANC_STANDBY);
 
        wm2000_write(i2c, WM2000_REG_SYS_CTL1, 0);
 
        if (analogue) {
-               timeout = 248;
-               wm2000_write(i2c, WM2000_REG_ANA_VMID_PU_TIME, timeout / 4);
+               wm2000_write(i2c, WM2000_REG_ANA_VMID_PU_TIME, 248 / 4);
 
                wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL,
                             WM2000_MODE_ANA_SEQ_INCLUDE |
                             WM2000_MODE_THERMAL_ENABLE |
                             WM2000_MODE_MOUSE_ENABLE);
        } else {
-               timeout = 10;
-
                wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL,
                             WM2000_MODE_THERMAL_ENABLE |
                             WM2000_MODE_MOUSE_ENABLE);
@@ -400,9 +384,8 @@ static int wm2000_exit_standby(struct i2c_client *i2c, int analogue)
        wm2000_write(i2c, WM2000_REG_SYS_CTL2, WM2000_ANC_INT_N_CLR);
 
        if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS,
-                            WM2000_STATUS_MOUSE_ACTIVE, timeout)) {
-               dev_err(&i2c->dev, "Timed out waiting for MOUSE after %dms\n",
-                       timeout * 10);
+                            WM2000_STATUS_MOUSE_ACTIVE)) {
+               dev_err(&i2c->dev, "Timed out waiting for MOUSE\n");
                return -ETIMEDOUT;
        }
 
index acbdc5fde9236be0e43a5767bd0135eb92450e68..32682c1b7cdece413693db6f6487ae4df640da09 100644 (file)
@@ -1491,6 +1491,7 @@ static int wm2200_bclk_rates_dat[WM2200_NUM_BCLK_RATES] = {
 
 static int wm2200_bclk_rates_cd[WM2200_NUM_BCLK_RATES] = {
        5644800,
+       3763200,
        2882400,
        1881600,
        1411200,
index 65d525d74c549ee9d83bbb1f1ca0a982a103a567..812acd83fb4889e3413c76c0269fd2e36e33286b 100644 (file)
@@ -1863,6 +1863,7 @@ static int wm8904_set_bias_level(struct snd_soc_codec *codec,
                                return ret;
                        }
 
+                       regcache_cache_only(wm8904->regmap, false);
                        regcache_sync(wm8904->regmap);
 
                        /* Enable bias */
@@ -1899,14 +1900,8 @@ static int wm8904_set_bias_level(struct snd_soc_codec *codec,
                snd_soc_update_bits(codec, WM8904_BIAS_CONTROL_0,
                                    WM8904_BIAS_ENA, 0);
 
-#ifdef CONFIG_REGULATOR
-               /* Post 2.6.34 we will be able to get a callback when
-                * the regulators are disabled which we can use but
-                * for now just assume that the power will be cut if
-                * the regulator API is in use.
-                */
-               codec->cache_sync = 1;
-#endif
+               regcache_cache_only(wm8904->regmap, true);
+               regcache_mark_dirty(wm8904->regmap);
 
                regulator_bulk_disable(ARRAY_SIZE(wm8904->supplies),
                                       wm8904->supplies);
@@ -2084,10 +2079,8 @@ static int wm8904_probe(struct snd_soc_codec *codec)
 {
        struct wm8904_priv *wm8904 = snd_soc_codec_get_drvdata(codec);
        struct wm8904_pdata *pdata = wm8904->pdata;
-       u16 *reg_cache = codec->reg_cache;
        int ret, i;
 
-       codec->cache_sync = 1;
        codec->control_data = wm8904->regmap;
 
        switch (wm8904->devtype) {
@@ -2150,6 +2143,7 @@ static int wm8904_probe(struct snd_soc_codec *codec)
                goto err_enable;
        }
 
+       regcache_cache_only(wm8904->regmap, true);
        /* Change some default settings - latch VU and enable ZC */
        snd_soc_update_bits(codec, WM8904_ADC_DIGITAL_VOLUME_LEFT,
                            WM8904_ADC_VU, WM8904_ADC_VU);
@@ -2180,14 +2174,18 @@ static int wm8904_probe(struct snd_soc_codec *codec)
                        if (!pdata->gpio_cfg[i])
                                continue;
 
-                       reg_cache[WM8904_GPIO_CONTROL_1 + i]
-                               = pdata->gpio_cfg[i] & 0xffff;
+                       regmap_update_bits(wm8904->regmap,
+                                          WM8904_GPIO_CONTROL_1 + i,
+                                          0xffff,
+                                          pdata->gpio_cfg[i]);
                }
 
                /* Zero is the default value for these anyway */
                for (i = 0; i < WM8904_MIC_REGS; i++)
-                       reg_cache[WM8904_MIC_BIAS_CONTROL_0 + i]
-                               = pdata->mic_cfg[i];
+                       regmap_update_bits(wm8904->regmap,
+                                          WM8904_MIC_BIAS_CONTROL_0 + i,
+                                          0xffff,
+                                          pdata->mic_cfg[i]);
        }
 
        /* Set Class W by default - this will be managed by the Class
index 993639d694ce308aed067cea3003fcbf6ad2ee2b..1436b6ce74d1319b347bc8294dee332a616a02c3 100644 (file)
 #define WM8994_NUM_DRC 3
 #define WM8994_NUM_EQ  3
 
+static struct {
+       unsigned int reg;
+       unsigned int mask;
+} wm8994_vu_bits[] = {
+       { WM8994_LEFT_LINE_INPUT_1_2_VOLUME, WM8994_IN1_VU },
+       { WM8994_RIGHT_LINE_INPUT_1_2_VOLUME, WM8994_IN1_VU },
+       { WM8994_LEFT_LINE_INPUT_3_4_VOLUME, WM8994_IN2_VU },
+       { WM8994_RIGHT_LINE_INPUT_3_4_VOLUME, WM8994_IN2_VU },
+       { WM8994_SPEAKER_VOLUME_LEFT, WM8994_SPKOUT_VU },
+       { WM8994_SPEAKER_VOLUME_RIGHT, WM8994_SPKOUT_VU },
+       { WM8994_LEFT_OUTPUT_VOLUME, WM8994_HPOUT1_VU },
+       { WM8994_RIGHT_OUTPUT_VOLUME, WM8994_HPOUT1_VU },
+       { WM8994_LEFT_OPGA_VOLUME, WM8994_MIXOUT_VU },
+       { WM8994_RIGHT_OPGA_VOLUME, WM8994_MIXOUT_VU },
+
+       { WM8994_AIF1_DAC1_LEFT_VOLUME, WM8994_AIF1DAC1_VU },
+       { WM8994_AIF1_DAC1_RIGHT_VOLUME, WM8994_AIF1DAC1_VU },
+       { WM8994_AIF1_DAC2_LEFT_VOLUME, WM8994_AIF1DAC2_VU },
+       { WM8994_AIF1_DAC2_RIGHT_VOLUME, WM8994_AIF1DAC2_VU },
+       { WM8994_AIF2_DAC_LEFT_VOLUME, WM8994_AIF2DAC_VU },
+       { WM8994_AIF2_DAC_RIGHT_VOLUME, WM8994_AIF2DAC_VU },
+       { WM8994_AIF1_ADC1_LEFT_VOLUME, WM8994_AIF1ADC1_VU },
+       { WM8994_AIF1_ADC1_RIGHT_VOLUME, WM8994_AIF1ADC1_VU },
+       { WM8994_AIF1_ADC2_LEFT_VOLUME, WM8994_AIF1ADC2_VU },
+       { WM8994_AIF1_ADC2_RIGHT_VOLUME, WM8994_AIF1ADC2_VU },
+       { WM8994_AIF2_ADC_LEFT_VOLUME, WM8994_AIF2ADC_VU },
+       { WM8994_AIF2_ADC_RIGHT_VOLUME, WM8994_AIF1ADC2_VU },
+       { WM8994_DAC1_LEFT_VOLUME, WM8994_DAC1_VU },
+       { WM8994_DAC1_RIGHT_VOLUME, WM8994_DAC1_VU },
+       { WM8994_DAC2_LEFT_VOLUME, WM8994_DAC2_VU },
+       { WM8994_DAC2_RIGHT_VOLUME, WM8994_DAC2_VU },
+};
+
 static int wm8994_drc_base[] = {
        WM8994_AIF1_DRC1_1,
        WM8994_AIF1_DRC2_1,
@@ -691,9 +724,6 @@ static void wm1811_jackdet_set_mode(struct snd_soc_codec *codec, u16 mode)
 {
        struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
 
-       if (!wm8994->jackdet || !wm8994->jack_cb)
-               return;
-
        if (!wm8994->jackdet || !wm8994->jack_cb)
                return;
 
@@ -989,6 +1019,7 @@ static int aif1clk_ev(struct snd_soc_dapm_widget *w,
        struct snd_soc_codec *codec = w->codec;
        struct wm8994 *control = codec->control_data;
        int mask = WM8994_AIF1DAC1L_ENA | WM8994_AIF1DAC1R_ENA;
+       int i;
        int dac;
        int adc;
        int val;
@@ -1047,6 +1078,13 @@ static int aif1clk_ev(struct snd_soc_dapm_widget *w,
                                    WM8994_AIF1DAC2L_ENA);
                break;
 
+       case SND_SOC_DAPM_POST_PMU:
+               for (i = 0; i < ARRAY_SIZE(wm8994_vu_bits); i++)
+                       snd_soc_write(codec, wm8994_vu_bits[i].reg,
+                                     snd_soc_read(codec,
+                                                  wm8994_vu_bits[i].reg));
+               break;
+
        case SND_SOC_DAPM_PRE_PMD:
        case SND_SOC_DAPM_POST_PMD:
                snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5,
@@ -1072,6 +1110,7 @@ static int aif2clk_ev(struct snd_soc_dapm_widget *w,
                      struct snd_kcontrol *kcontrol, int event)
 {
        struct snd_soc_codec *codec = w->codec;
+       int i;
        int dac;
        int adc;
        int val;
@@ -1122,6 +1161,13 @@ static int aif2clk_ev(struct snd_soc_dapm_widget *w,
                                    WM8994_AIF2DACR_ENA);
                break;
 
+       case SND_SOC_DAPM_POST_PMU:
+               for (i = 0; i < ARRAY_SIZE(wm8994_vu_bits); i++)
+                       snd_soc_write(codec, wm8994_vu_bits[i].reg,
+                                     snd_soc_read(codec,
+                                                  wm8994_vu_bits[i].reg));
+               break;
+
        case SND_SOC_DAPM_PRE_PMD:
        case SND_SOC_DAPM_POST_PMD:
                snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5,
@@ -1190,17 +1236,19 @@ static int late_enable_ev(struct snd_soc_dapm_widget *w,
        switch (event) {
        case SND_SOC_DAPM_PRE_PMU:
                if (wm8994->aif1clk_enable) {
-                       aif1clk_ev(w, kcontrol, event);
+                       aif1clk_ev(w, kcontrol, SND_SOC_DAPM_PRE_PMU);
                        snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1,
                                            WM8994_AIF1CLK_ENA_MASK,
                                            WM8994_AIF1CLK_ENA);
+                       aif1clk_ev(w, kcontrol, SND_SOC_DAPM_POST_PMU);
                        wm8994->aif1clk_enable = 0;
                }
                if (wm8994->aif2clk_enable) {
-                       aif2clk_ev(w, kcontrol, event);
+                       aif2clk_ev(w, kcontrol, SND_SOC_DAPM_PRE_PMU);
                        snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1,
                                            WM8994_AIF2CLK_ENA_MASK,
                                            WM8994_AIF2CLK_ENA);
+                       aif2clk_ev(w, kcontrol, SND_SOC_DAPM_POST_PMU);
                        wm8994->aif2clk_enable = 0;
                }
                break;
@@ -1221,15 +1269,17 @@ static int late_disable_ev(struct snd_soc_dapm_widget *w,
        switch (event) {
        case SND_SOC_DAPM_POST_PMD:
                if (wm8994->aif1clk_disable) {
+                       aif1clk_ev(w, kcontrol, SND_SOC_DAPM_PRE_PMD);
                        snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1,
                                            WM8994_AIF1CLK_ENA_MASK, 0);
-                       aif1clk_ev(w, kcontrol, event);
+                       aif1clk_ev(w, kcontrol, SND_SOC_DAPM_POST_PMD);
                        wm8994->aif1clk_disable = 0;
                }
                if (wm8994->aif2clk_disable) {
+                       aif2clk_ev(w, kcontrol, SND_SOC_DAPM_PRE_PMD);
                        snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1,
                                            WM8994_AIF2CLK_ENA_MASK, 0);
-                       aif2clk_ev(w, kcontrol, event);
+                       aif2clk_ev(w, kcontrol, SND_SOC_DAPM_POST_PMD);
                        wm8994->aif2clk_disable = 0;
                }
                break;
@@ -1527,9 +1577,11 @@ SND_SOC_DAPM_POST("Late Disable PGA", late_disable_ev)
 
 static const struct snd_soc_dapm_widget wm8994_lateclk_widgets[] = {
 SND_SOC_DAPM_SUPPLY("AIF1CLK", WM8994_AIF1_CLOCKING_1, 0, 0, aif1clk_ev,
-                   SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD),
+                   SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+                   SND_SOC_DAPM_PRE_PMD),
 SND_SOC_DAPM_SUPPLY("AIF2CLK", WM8994_AIF2_CLOCKING_1, 0, 0, aif2clk_ev,
-                   SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD),
+                   SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+                   SND_SOC_DAPM_PRE_PMD),
 SND_SOC_DAPM_PGA("Direct Voice", SND_SOC_NOPM, 0, 0, NULL, 0),
 SND_SOC_DAPM_MIXER("SPKL", WM8994_POWER_MANAGEMENT_3, 8, 0,
                   left_speaker_mixer, ARRAY_SIZE(left_speaker_mixer)),
@@ -3879,39 +3931,11 @@ static int wm8994_codec_probe(struct snd_soc_codec *codec)
 
        pm_runtime_put(codec->dev);
 
-       /* Latch volume updates (right only; we always do left then right). */
-       snd_soc_update_bits(codec, WM8994_AIF1_DAC1_LEFT_VOLUME,
-                           WM8994_AIF1DAC1_VU, WM8994_AIF1DAC1_VU);
-       snd_soc_update_bits(codec, WM8994_AIF1_DAC1_RIGHT_VOLUME,
-                           WM8994_AIF1DAC1_VU, WM8994_AIF1DAC1_VU);
-       snd_soc_update_bits(codec, WM8994_AIF1_DAC2_LEFT_VOLUME,
-                           WM8994_AIF1DAC2_VU, WM8994_AIF1DAC2_VU);
-       snd_soc_update_bits(codec, WM8994_AIF1_DAC2_RIGHT_VOLUME,
-                           WM8994_AIF1DAC2_VU, WM8994_AIF1DAC2_VU);
-       snd_soc_update_bits(codec, WM8994_AIF2_DAC_LEFT_VOLUME,
-                           WM8994_AIF2DAC_VU, WM8994_AIF2DAC_VU);
-       snd_soc_update_bits(codec, WM8994_AIF2_DAC_RIGHT_VOLUME,
-                           WM8994_AIF2DAC_VU, WM8994_AIF2DAC_VU);
-       snd_soc_update_bits(codec, WM8994_AIF1_ADC1_LEFT_VOLUME,
-                           WM8994_AIF1ADC1_VU, WM8994_AIF1ADC1_VU);
-       snd_soc_update_bits(codec, WM8994_AIF1_ADC1_RIGHT_VOLUME,
-                           WM8994_AIF1ADC1_VU, WM8994_AIF1ADC1_VU);
-       snd_soc_update_bits(codec, WM8994_AIF1_ADC2_LEFT_VOLUME,
-                           WM8994_AIF1ADC2_VU, WM8994_AIF1ADC2_VU);
-       snd_soc_update_bits(codec, WM8994_AIF1_ADC2_RIGHT_VOLUME,
-                           WM8994_AIF1ADC2_VU, WM8994_AIF1ADC2_VU);
-       snd_soc_update_bits(codec, WM8994_AIF2_ADC_LEFT_VOLUME,
-                           WM8994_AIF2ADC_VU, WM8994_AIF1ADC2_VU);
-       snd_soc_update_bits(codec, WM8994_AIF2_ADC_RIGHT_VOLUME,
-                           WM8994_AIF2ADC_VU, WM8994_AIF1ADC2_VU);
-       snd_soc_update_bits(codec, WM8994_DAC1_LEFT_VOLUME,
-                           WM8994_DAC1_VU, WM8994_DAC1_VU);
-       snd_soc_update_bits(codec, WM8994_DAC1_RIGHT_VOLUME,
-                           WM8994_DAC1_VU, WM8994_DAC1_VU);
-       snd_soc_update_bits(codec, WM8994_DAC2_LEFT_VOLUME,
-                           WM8994_DAC2_VU, WM8994_DAC2_VU);
-       snd_soc_update_bits(codec, WM8994_DAC2_RIGHT_VOLUME,
-                           WM8994_DAC2_VU, WM8994_DAC2_VU);
+       /* Latch volume update bits */
+       for (i = 0; i < ARRAY_SIZE(wm8994_vu_bits); i++)
+               snd_soc_update_bits(codec, wm8994_vu_bits[i].reg,
+                                   wm8994_vu_bits[i].mask,
+                                   wm8994_vu_bits[i].mask);
 
        /* Set the low bit of the 3D stereo depth so TLV matches */
        snd_soc_update_bits(codec, WM8994_AIF1_DAC1_FILTERS_2,
index 8af422e38fd060118968eb6f3bf62d707b32584e..dc9b42b7fc4d79d749f07be41f1fe3484eb987e5 100644 (file)
@@ -2837,8 +2837,6 @@ static int wm8996_probe(struct snd_soc_codec *codec)
                }
        }
 
-       regcache_cache_only(codec->control_data, true);
-
        /* Apply platform data settings */
        snd_soc_update_bits(codec, WM8996_LINE_INPUT_CONTROL,
                            WM8996_INL_MODE_MASK | WM8996_INR_MODE_MASK,
@@ -3051,7 +3049,6 @@ static int wm8996_remove(struct snd_soc_codec *codec)
        for (i = 0; i < ARRAY_SIZE(wm8996->supplies); i++)
                regulator_unregister_notifier(wm8996->supplies[i].consumer,
                                              &wm8996->disable_nb[i]);
-       regulator_bulk_free(ARRAY_SIZE(wm8996->supplies), wm8996->supplies);
 
        return 0;
 }
@@ -3206,14 +3203,15 @@ static __devinit int wm8996_i2c_probe(struct i2c_client *i2c,
        dev_info(&i2c->dev, "revision %c\n",
                 (reg & WM8996_CHIP_REV_MASK) + 'A');
 
-       regulator_bulk_disable(ARRAY_SIZE(wm8996->supplies), wm8996->supplies);
-
        ret = wm8996_reset(wm8996);
        if (ret < 0) {
                dev_err(&i2c->dev, "Failed to issue reset\n");
                goto err_regmap;
        }
 
+       regcache_cache_only(wm8996->regmap, true);
+       regulator_bulk_disable(ARRAY_SIZE(wm8996->supplies), wm8996->supplies);
+
        wm8996_init_gpio(wm8996);
 
        ret = snd_soc_register_codec(&i2c->dev,
index f23700359c672372f84a58df8701176b57e6d475..080327414c6b61da37d62a845261e93f413864fa 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
+#include <linux/pinctrl/consumer.h>
 
 #include "imx-audmux.h"
 
@@ -249,6 +250,7 @@ EXPORT_SYMBOL_GPL(imx_audmux_v2_configure_port);
 static int __devinit imx_audmux_probe(struct platform_device *pdev)
 {
        struct resource *res;
+       struct pinctrl *pinctrl;
        const struct of_device_id *of_id =
                        of_match_device(imx_audmux_dt_ids, &pdev->dev);
 
@@ -257,6 +259,12 @@ static int __devinit imx_audmux_probe(struct platform_device *pdev)
        if (!audmux_base)
                return -EADDRNOTAVAIL;
 
+       pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
+       if (IS_ERR(pinctrl)) {
+               dev_err(&pdev->dev, "setup pinctrl failed!");
+               return PTR_ERR(pinctrl);
+       }
+
        audmux_clk = clk_get(&pdev->dev, "audmux");
        if (IS_ERR(audmux_clk)) {
                dev_dbg(&pdev->dev, "cannot get clock: %ld\n",
index 1c2aa7fab3fd1f2beed5232187e959e69f38e7bc..4da5fc55c7ee81d81369fae163344e8058c96b86 100644 (file)
@@ -33,7 +33,6 @@
 
 #include <mach/hardware.h>
 #include <mach/dma.h>
-#include <mach/audio.h>
 
 #include "../../arm/pxa2xx-pcm.h"
 #include "pxa-ssp.h"
@@ -194,7 +193,7 @@ static void pxa_ssp_set_scr(struct ssp_device *ssp, u32 div)
 {
        u32 sscr0 = pxa_ssp_read_reg(ssp, SSCR0);
 
-       if (cpu_is_pxa25x() && ssp->type == PXA25x_SSP) {
+       if (ssp->type == PXA25x_SSP) {
                sscr0 &= ~0x0000ff00;
                sscr0 |= ((div - 2)/2) << 8; /* 2..512 */
        } else {
@@ -212,7 +211,7 @@ static u32 pxa_ssp_get_scr(struct ssp_device *ssp)
        u32 sscr0 = pxa_ssp_read_reg(ssp, SSCR0);
        u32 div;
 
-       if (cpu_is_pxa25x() && ssp->type == PXA25x_SSP)
+       if (ssp->type == PXA25x_SSP)
                div = ((sscr0 >> 8) & 0xff) * 2 + 2;
        else
                div = ((sscr0 >> 8) & 0xfff) + 1;
@@ -242,7 +241,7 @@ static int pxa_ssp_set_dai_sysclk(struct snd_soc_dai *cpu_dai,
                break;
        case PXA_SSP_CLK_PLL:
                /* Internal PLL is fixed */
-               if (cpu_is_pxa25x())
+               if (ssp->type == PXA25x_SSP)
                        priv->sysclk = 1843200;
                else
                        priv->sysclk = 13000000;
@@ -266,11 +265,11 @@ static int pxa_ssp_set_dai_sysclk(struct snd_soc_dai *cpu_dai,
 
        /* The SSP clock must be disabled when changing SSP clock mode
         * on PXA2xx.  On PXA3xx it must be enabled when doing so. */
-       if (!cpu_is_pxa3xx())
+       if (ssp->type != PXA3xx_SSP)
                clk_disable(ssp->clk);
        val = pxa_ssp_read_reg(ssp, SSCR0) | sscr0;
        pxa_ssp_write_reg(ssp, SSCR0, val);
-       if (!cpu_is_pxa3xx())
+       if (ssp->type != PXA3xx_SSP)
                clk_enable(ssp->clk);
 
        return 0;
@@ -294,24 +293,20 @@ static int pxa_ssp_set_dai_clkdiv(struct snd_soc_dai *cpu_dai,
        case PXA_SSP_AUDIO_DIV_SCDB:
                val = pxa_ssp_read_reg(ssp, SSACD);
                val &= ~SSACD_SCDB;
-#if defined(CONFIG_PXA3xx)
-               if (cpu_is_pxa3xx())
+               if (ssp->type == PXA3xx_SSP)
                        val &= ~SSACD_SCDX8;
-#endif
                switch (div) {
                case PXA_SSP_CLK_SCDB_1:
                        val |= SSACD_SCDB;
                        break;
                case PXA_SSP_CLK_SCDB_4:
                        break;
-#if defined(CONFIG_PXA3xx)
                case PXA_SSP_CLK_SCDB_8:
-                       if (cpu_is_pxa3xx())
+                       if (ssp->type == PXA3xx_SSP)
                                val |= SSACD_SCDX8;
                        else
                                return -EINVAL;
                        break;
-#endif
                default:
                        return -EINVAL;
                }
@@ -337,10 +332,8 @@ static int pxa_ssp_set_dai_pll(struct snd_soc_dai *cpu_dai, int pll_id,
        struct ssp_device *ssp = priv->ssp;
        u32 ssacd = pxa_ssp_read_reg(ssp, SSACD) & ~0x70;
 
-#if defined(CONFIG_PXA3xx)
-       if (cpu_is_pxa3xx())
+       if (ssp->type == PXA3xx_SSP)
                pxa_ssp_write_reg(ssp, SSACDD, 0);
-#endif
 
        switch (freq_out) {
        case 5622000:
@@ -365,11 +358,10 @@ static int pxa_ssp_set_dai_pll(struct snd_soc_dai *cpu_dai, int pll_id,
                break;
 
        default:
-#ifdef CONFIG_PXA3xx
                /* PXA3xx has a clock ditherer which can be used to generate
                 * a wider range of frequencies - calculate a value for it.
                 */
-               if (cpu_is_pxa3xx()) {
+               if (ssp->type == PXA3xx_SSP) {
                        u32 val;
                        u64 tmp = 19968;
                        tmp *= 1000000;
@@ -386,7 +378,6 @@ static int pxa_ssp_set_dai_pll(struct snd_soc_dai *cpu_dai, int pll_id,
                                val, freq_out);
                        break;
                }
-#endif
 
                return -EINVAL;
        }
@@ -590,10 +581,8 @@ static int pxa_ssp_hw_params(struct snd_pcm_substream *substream,
        /* bit size */
        switch (params_format(params)) {
        case SNDRV_PCM_FORMAT_S16_LE:
-#ifdef CONFIG_PXA3xx
-               if (cpu_is_pxa3xx())
+               if (ssp->type == PXA3xx_SSP)
                        sscr0 |= SSCR0_FPCKE;
-#endif
                sscr0 |= SSCR0_DataSize(16);
                break;
        case SNDRV_PCM_FORMAT_S24_LE:
@@ -618,9 +607,7 @@ static int pxa_ssp_hw_params(struct snd_pcm_substream *substream,
                        * trying and failing a lot; some of the registers
                        * needed for that mode are only available on PXA3xx.
                        */
-
-#ifdef CONFIG_PXA3xx
-                       if (!cpu_is_pxa3xx())
+                       if (ssp->type != PXA3xx_SSP)
                                return -EINVAL;
 
                        sspsp |= SSPSP_SFRMWDTH(width * 2);
@@ -628,9 +615,6 @@ static int pxa_ssp_hw_params(struct snd_pcm_substream *substream,
                        sspsp |= SSPSP_EDMYSTOP(3);
                        sspsp |= SSPSP_DMYSTOP(3);
                        sspsp |= SSPSP_DMYSTRT(1);
-#else
-                       return -EINVAL;
-#endif
                } else {
                        /* The frame width is the width the LRCLK is
                         * asserted for; the delay is expressed in
index 90ee77d2409da8402ea58026b788a624f850a25a..89eae93445cf517e5e356297374fcbcf183529ae 100644 (file)
@@ -913,7 +913,7 @@ static int is_connected_input_ep(struct snd_soc_dapm_widget *widget,
                        /* do we need to add this widget to the list ? */
                        if (list) {
                                int err;
-                               err = dapm_list_add_widget(list, path->sink);
+                               err = dapm_list_add_widget(list, path->source);
                                if (err < 0) {
                                        dev_err(widget->dapm->dev, "could not add widget %s\n",
                                                widget->name);
@@ -954,7 +954,7 @@ int snd_soc_dapm_dai_get_connected_widgets(struct snd_soc_dai *dai, int stream,
        if (stream == SNDRV_PCM_STREAM_PLAYBACK)
                paths = is_connected_output_ep(dai->playback_widget, list);
        else
-               paths = is_connected_input_ep(dai->playback_widget, list);
+               paths = is_connected_input_ep(dai->capture_widget, list);
 
        trace_snd_soc_dapm_connected(paths, stream);
        dapm_clear_walk(&card->dapm);
index bedd1717a37381376e6084a5e631d739dbb06cb3..48fd15b312c1e46d06945640dcbc2df0cd1eabe2 100644 (file)
@@ -794,6 +794,9 @@ static struct snd_soc_pcm_runtime *dpcm_get_be(struct snd_soc_card *card,
                for (i = 0; i < card->num_links; i++) {
                        be = &card->rtd[i];
 
+                       if (!be->dai_link->no_pcm)
+                               continue;
+
                        if (be->cpu_dai->playback_widget == widget ||
                                be->codec_dai->playback_widget == widget)
                                return be;
@@ -803,6 +806,9 @@ static struct snd_soc_pcm_runtime *dpcm_get_be(struct snd_soc_card *card,
                for (i = 0; i < card->num_links; i++) {
                        be = &card->rtd[i];
 
+                       if (!be->dai_link->no_pcm)
+                               continue;
+
                        if (be->cpu_dai->capture_widget == widget ||
                                be->codec_dai->capture_widget == widget)
                                return be;
index 57cd419f743ec5ee7a84f24ed0ad9532de01fd6a..f43edb364a185de5cb2f3c43aa16850a32c2236e 100644 (file)
@@ -629,3 +629,4 @@ MODULE_AUTHOR("Stephen Warren <swarren@nvidia.com>");
 MODULE_DESCRIPTION("Tegra30 AHUB driver");
 MODULE_LICENSE("GPL v2");
 MODULE_ALIAS("platform:" DRV_NAME);
+MODULE_DEVICE_TABLE(of, tegra30_ahub_of_match);
index 0b0df49d9d33b071118576c340b69366f212663f..3b6da91188a9949faaacaa73539f3fa04dbbcd4b 100644 (file)
@@ -346,6 +346,17 @@ static int tegra_wm8903_init(struct snd_soc_pcm_runtime *rtd)
        return 0;
 }
 
+static int tegra_wm8903_remove(struct snd_soc_card *card)
+{
+       struct snd_soc_pcm_runtime *rtd = &(card->rtd[0]);
+       struct snd_soc_dai *codec_dai = rtd->codec_dai;
+       struct snd_soc_codec *codec = codec_dai->codec;
+
+       wm8903_mic_detect(codec, NULL, 0, 0);
+
+       return 0;
+}
+
 static struct snd_soc_dai_link tegra_wm8903_dai = {
        .name = "WM8903",
        .stream_name = "WM8903 PCM",
@@ -363,6 +374,8 @@ static struct snd_soc_card snd_soc_tegra_wm8903 = {
        .dai_link = &tegra_wm8903_dai,
        .num_links = 1,
 
+       .remove = tegra_wm8903_remove,
+
        .controls = tegra_wm8903_controls,
        .num_controls = ARRAY_SIZE(tegra_wm8903_controls),
        .dapm_widgets = tegra_wm8903_dapm_widgets,
index 6f9715ab32fe77cc3fd39de42410efe1c2aa1006..56ad923bf6b5cb74db2f5a56cdc2a16807c66a03 100644 (file)
@@ -209,7 +209,7 @@ static int usb6fire_fw_ezusb_upload(
        int ret;
        u8 data;
        struct usb_device *device = interface_to_usbdev(intf);
-       const struct firmware *fw = 0;
+       const struct firmware *fw = NULL;
        struct ihex_record *rec = kmalloc(sizeof(struct ihex_record),
                        GFP_KERNEL);
 
index 0d37238b84572fed54cef1c89ee23e1d44d8ae00..2b9fffff23b62a5449d39f2a4b03cbaadb1529a6 100644 (file)
@@ -119,6 +119,7 @@ struct snd_usb_substream {
        unsigned long unlink_mask;      /* bitmask of unlinked urbs */
 
        /* data and sync endpoints for this stream */
+       unsigned int ep_num;            /* the endpoint number */
        struct snd_usb_endpoint *data_endpoint;
        struct snd_usb_endpoint *sync_endpoint;
        unsigned long flags;
index e6906901debbc2aaa71c40621fbd4241d8d15663..0f647d22cb4ac7bccffe25311011255cf0b33104 100644 (file)
@@ -414,7 +414,7 @@ struct snd_usb_endpoint *snd_usb_add_endpoint(struct snd_usb_audio *chip,
 {
        struct list_head *p;
        struct snd_usb_endpoint *ep;
-       int ret, is_playback = direction == SNDRV_PCM_STREAM_PLAYBACK;
+       int is_playback = direction == SNDRV_PCM_STREAM_PLAYBACK;
 
        mutex_lock(&chip->mutex);
 
@@ -434,16 +434,6 @@ struct snd_usb_endpoint *snd_usb_add_endpoint(struct snd_usb_audio *chip,
                    type == SND_USB_ENDPOINT_TYPE_DATA ? "data" : "sync",
                    ep_num);
 
-       /* select the alt setting once so the endpoints become valid */
-       ret = usb_set_interface(chip->dev, alts->desc.bInterfaceNumber,
-                               alts->desc.bAlternateSetting);
-       if (ret < 0) {
-               snd_printk(KERN_ERR "%s(): usb_set_interface() failed, ret = %d\n",
-                                       __func__, ret);
-               ep = NULL;
-               goto __exit_unlock;
-       }
-
        ep = kzalloc(sizeof(*ep), GFP_KERNEL);
        if (!ep)
                goto __exit_unlock;
@@ -831,9 +821,6 @@ int snd_usb_endpoint_start(struct snd_usb_endpoint *ep)
        if (++ep->use_count != 1)
                return 0;
 
-       if (snd_BUG_ON(!test_bit(EP_FLAG_ACTIVATED, &ep->flags)))
-               return -EINVAL;
-
        /* just to be sure */
        deactivate_urbs(ep, 0, 1);
        wait_clear_urbs(ep);
@@ -911,9 +898,6 @@ void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep,
        if (snd_BUG_ON(ep->use_count == 0))
                return;
 
-       if (snd_BUG_ON(!test_bit(EP_FLAG_ACTIVATED, &ep->flags)))
-               return;
-
        if (--ep->use_count == 0) {
                deactivate_urbs(ep, force, can_sleep);
                ep->data_subs = NULL;
@@ -926,42 +910,6 @@ void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep,
        }
 }
 
-/**
- * snd_usb_endpoint_activate: activate an snd_usb_endpoint
- *
- * @ep: the endpoint to activate
- *
- * If the endpoint is not currently in use, this functions will select the
- * correct alternate interface setting for the interface of this endpoint.
- *
- * In case of any active users, this functions does nothing.
- *
- * Returns an error if usb_set_interface() failed, 0 in all other
- * cases.
- */
-int snd_usb_endpoint_activate(struct snd_usb_endpoint *ep)
-{
-       if (ep->use_count != 0)
-               return 0;
-
-       if (!ep->chip->shutdown &&
-           !test_and_set_bit(EP_FLAG_ACTIVATED, &ep->flags)) {
-               int ret;
-
-               ret = usb_set_interface(ep->chip->dev, ep->iface, ep->alt_idx);
-               if (ret < 0) {
-                       snd_printk(KERN_ERR "%s() usb_set_interface() failed, ret = %d\n",
-                                               __func__, ret);
-                       clear_bit(EP_FLAG_ACTIVATED, &ep->flags);
-                       return ret;
-               }
-
-               return 0;
-       }
-
-       return -EBUSY;
-}
-
 /**
  * snd_usb_endpoint_deactivate: deactivate an snd_usb_endpoint
  *
@@ -980,24 +928,15 @@ int snd_usb_endpoint_deactivate(struct snd_usb_endpoint *ep)
        if (!ep)
                return -EINVAL;
 
+       deactivate_urbs(ep, 1, 1);
+       wait_clear_urbs(ep);
+
        if (ep->use_count != 0)
                return 0;
 
-       if (!ep->chip->shutdown &&
-           test_and_clear_bit(EP_FLAG_ACTIVATED, &ep->flags)) {
-               int ret;
-
-               ret = usb_set_interface(ep->chip->dev, ep->iface, 0);
-               if (ret < 0) {
-                       snd_printk(KERN_ERR "%s(): usb_set_interface() failed, ret = %d\n",
-                                               __func__, ret);
-                       return ret;
-               }
+       clear_bit(EP_FLAG_ACTIVATED, &ep->flags);
 
-               return 0;
-       }
-
-       return -EBUSY;
+       return 0;
 }
 
 /**
index 41daaa24c25f465e6ff209118d4a24f6f68ec220..e71fe55cebefa8285963643e7a592a0b71fbfc73 100644 (file)
@@ -341,6 +341,14 @@ static struct usbmix_ctl_map usbmix_ctl_maps[] = {
                .map = audigy2nx_map,
                .selector_map = audigy2nx_selectors,
        },
+       {       /* Logitech, Inc. QuickCam Pro for Notebooks */
+               .id = USB_ID(0x046d, 0x0991),
+               .ignore_ctl_error = 1,
+       },
+       {       /* Logitech, Inc. QuickCam E 3500 */
+               .id = USB_ID(0x046d, 0x09a4),
+               .ignore_ctl_error = 1,
+       },
        {
                /* Hercules DJ Console (Windows Edition) */
                .id = USB_ID(0x06f8, 0xb000),
index cdf8b7601973406c445c69c6e5cd4afbdbbdf23b..a1298f379428280045bf661f89e5fa59bb0acb48 100644 (file)
@@ -261,19 +261,6 @@ static void stop_endpoints(struct snd_usb_substream *subs,
                                      force, can_sleep, wait);
 }
 
-static int activate_endpoints(struct snd_usb_substream *subs)
-{
-       if (subs->sync_endpoint) {
-               int ret;
-
-               ret = snd_usb_endpoint_activate(subs->sync_endpoint);
-               if (ret < 0)
-                       return ret;
-       }
-
-       return snd_usb_endpoint_activate(subs->data_endpoint);
-}
-
 static int deactivate_endpoints(struct snd_usb_substream *subs)
 {
        int reta, retb;
@@ -314,6 +301,33 @@ static int set_format(struct snd_usb_substream *subs, struct audioformat *fmt)
        if (fmt == subs->cur_audiofmt)
                return 0;
 
+       /* close the old interface */
+       if (subs->interface >= 0 && subs->interface != fmt->iface) {
+               err = usb_set_interface(subs->dev, subs->interface, 0);
+               if (err < 0) {
+                       snd_printk(KERN_ERR "%d:%d:%d: return to setting 0 failed (%d)\n",
+                               dev->devnum, fmt->iface, fmt->altsetting, err);
+                       return -EIO;
+               }
+               subs->interface = -1;
+               subs->altset_idx = 0;
+       }
+
+       /* set interface */
+       if (subs->interface != fmt->iface ||
+           subs->altset_idx != fmt->altset_idx) {
+               err = usb_set_interface(dev, fmt->iface, fmt->altsetting);
+               if (err < 0) {
+                       snd_printk(KERN_ERR "%d:%d:%d: usb_set_interface failed (%d)\n",
+                                  dev->devnum, fmt->iface, fmt->altsetting, err);
+                       return -EIO;
+               }
+               snd_printdd(KERN_INFO "setting usb interface %d:%d\n",
+                               fmt->iface, fmt->altsetting);
+               subs->interface = fmt->iface;
+               subs->altset_idx = fmt->altset_idx;
+       }
+
        subs->data_endpoint = snd_usb_add_endpoint(subs->stream->chip,
                                                   alts, fmt->endpoint, subs->direction,
                                                   SND_USB_ENDPOINT_TYPE_DATA);
@@ -354,17 +368,21 @@ static int set_format(struct snd_usb_substream *subs, struct audioformat *fmt)
                    (get_endpoint(alts, 1)->bLength >= USB_DT_ENDPOINT_AUDIO_SIZE &&
                     get_endpoint(alts, 1)->bSynchAddress != 0 &&
                     !implicit_fb)) {
-                       snd_printk(KERN_ERR "%d:%d:%d : invalid synch pipe\n",
-                                  dev->devnum, fmt->iface, fmt->altsetting);
+                       snd_printk(KERN_ERR "%d:%d:%d : invalid sync pipe. bmAttributes %02x, bLength %d, bSynchAddress %02x\n",
+                                  dev->devnum, fmt->iface, fmt->altsetting,
+                                  get_endpoint(alts, 1)->bmAttributes,
+                                  get_endpoint(alts, 1)->bLength,
+                                  get_endpoint(alts, 1)->bSynchAddress);
                        return -EINVAL;
                }
                ep = get_endpoint(alts, 1)->bEndpointAddress;
-               if (get_endpoint(alts, 0)->bLength >= USB_DT_ENDPOINT_AUDIO_SIZE &&
+               if (!implicit_fb &&
+                   get_endpoint(alts, 0)->bLength >= USB_DT_ENDPOINT_AUDIO_SIZE &&
                    (( is_playback && ep != (unsigned int)(get_endpoint(alts, 0)->bSynchAddress | USB_DIR_IN)) ||
-                    (!is_playback && ep != (unsigned int)(get_endpoint(alts, 0)->bSynchAddress & ~USB_DIR_IN)) ||
-                    ( is_playback && !implicit_fb))) {
-                       snd_printk(KERN_ERR "%d:%d:%d : invalid synch pipe\n",
-                                  dev->devnum, fmt->iface, fmt->altsetting);
+                    (!is_playback && ep != (unsigned int)(get_endpoint(alts, 0)->bSynchAddress & ~USB_DIR_IN)))) {
+                       snd_printk(KERN_ERR "%d:%d:%d : invalid sync pipe. is_playback %d, ep %02x, bSynchAddress %02x\n",
+                                  dev->devnum, fmt->iface, fmt->altsetting,
+                                  is_playback, ep, get_endpoint(alts, 0)->bSynchAddress);
                        return -EINVAL;
                }
 
@@ -383,7 +401,7 @@ add_sync_ep:
                subs->data_endpoint->sync_master = subs->sync_endpoint;
        }
 
-       if ((err = snd_usb_init_pitch(subs->stream->chip, subs->interface, alts, fmt)) < 0)
+       if ((err = snd_usb_init_pitch(subs->stream->chip, fmt->iface, alts, fmt)) < 0)
                return err;
 
        subs->cur_audiofmt = fmt;
@@ -446,7 +464,7 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream,
                struct usb_interface *iface;
                iface = usb_ifnum_to_if(subs->dev, fmt->iface);
                alts = &iface->altsetting[fmt->altset_idx];
-               ret = snd_usb_init_sample_rate(subs->stream->chip, subs->interface, alts, fmt, rate);
+               ret = snd_usb_init_sample_rate(subs->stream->chip, fmt->iface, alts, fmt, rate);
                if (ret < 0)
                        return ret;
                subs->cur_rate = rate;
@@ -456,12 +474,6 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream,
                mutex_lock(&subs->stream->chip->shutdown_mutex);
                /* format changed */
                stop_endpoints(subs, 0, 0, 0);
-               deactivate_endpoints(subs);
-
-               ret = activate_endpoints(subs);
-               if (ret < 0)
-                       goto unlock;
-
                ret = snd_usb_endpoint_set_params(subs->data_endpoint, hw_params, fmt,
                                                  subs->sync_endpoint);
                if (ret < 0)
@@ -496,6 +508,7 @@ static int snd_usb_hw_free(struct snd_pcm_substream *substream)
        subs->period_bytes = 0;
        mutex_lock(&subs->stream->chip->shutdown_mutex);
        stop_endpoints(subs, 0, 1, 1);
+       deactivate_endpoints(subs);
        mutex_unlock(&subs->stream->chip->shutdown_mutex);
        return snd_pcm_lib_free_vmalloc_buffer(substream);
 }
@@ -934,16 +947,20 @@ static int snd_usb_pcm_open(struct snd_pcm_substream *substream, int direction)
 
 static int snd_usb_pcm_close(struct snd_pcm_substream *substream, int direction)
 {
-       int ret;
        struct snd_usb_stream *as = snd_pcm_substream_chip(substream);
        struct snd_usb_substream *subs = &as->substream[direction];
 
        stop_endpoints(subs, 0, 0, 0);
-       ret = deactivate_endpoints(subs);
+
+       if (!as->chip->shutdown && subs->interface >= 0) {
+               usb_set_interface(subs->dev, subs->interface, 0);
+               subs->interface = -1;
+       }
+
        subs->pcm_substream = NULL;
        snd_usb_autosuspend(subs->stream->chip);
 
-       return ret;
+       return 0;
 }
 
 /* Since a URB can handle only a single linear buffer, we must use double
@@ -1147,7 +1164,8 @@ static int snd_usb_substream_playback_trigger(struct snd_pcm_substream *substrea
        return -EINVAL;
 }
 
-int snd_usb_substream_capture_trigger(struct snd_pcm_substream *substream, int cmd)
+static int snd_usb_substream_capture_trigger(struct snd_pcm_substream *substream,
+                                            int cmd)
 {
        int err;
        struct snd_usb_substream *subs = substream->runtime->private_data;
index d89ab4c7d44b28bc0c52f9e4ec817030059b0692..79780fa57a431345e28896730d12a9fbedf11426 100644 (file)
@@ -1831,6 +1831,36 @@ YAMAHA_DEVICE(0x7010, "UB99"),
                }
        }
 },
+{
+       USB_DEVICE(0x0582, 0x014d),
+       .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
+               /* .vendor_name = "BOSS", */
+               /* .product_name = "GT-100", */
+               .ifnum = QUIRK_ANY_INTERFACE,
+               .type = QUIRK_COMPOSITE,
+               .data = (const struct snd_usb_audio_quirk[]) {
+                       {
+                               .ifnum = 1,
+                               .type = QUIRK_AUDIO_STANDARD_INTERFACE
+                       },
+                       {
+                               .ifnum = 2,
+                               .type = QUIRK_AUDIO_STANDARD_INTERFACE
+                       },
+                       {
+                               .ifnum = 3,
+                               .type = QUIRK_MIDI_FIXED_ENDPOINT,
+                               .data = & (const struct snd_usb_midi_endpoint_info) {
+                                       .out_cables = 0x0001,
+                                       .in_cables  = 0x0001
+                               }
+                       },
+                       {
+                               .ifnum = -1
+                       }
+               }
+       }
+},
 
 /* Guillemot devices */
 {
index 6b7d7a2b7baa3a83c68e35917d95515a3caa482a..083ed81160e58b094ee8f4fca637371aa5b5fff4 100644 (file)
@@ -97,6 +97,7 @@ static void snd_usb_init_substream(struct snd_usb_stream *as,
        subs->formats |= fp->formats;
        subs->num_formats++;
        subs->fmt_type = fp->fmt_type;
+       subs->ep_num = fp->endpoint;
 }
 
 /*
@@ -119,9 +120,7 @@ int snd_usb_add_audio_stream(struct snd_usb_audio *chip,
                if (as->fmt_type != fp->fmt_type)
                        continue;
                subs = &as->substream[stream];
-               if (!subs->data_endpoint)
-                       continue;
-               if (subs->data_endpoint->ep_num == fp->endpoint) {
+               if (subs->ep_num == fp->endpoint) {
                        list_add_tail(&fp->list, &subs->fmt_list);
                        subs->num_formats++;
                        subs->formats |= fp->formats;
@@ -134,7 +133,7 @@ int snd_usb_add_audio_stream(struct snd_usb_audio *chip,
                if (as->fmt_type != fp->fmt_type)
                        continue;
                subs = &as->substream[stream];
-               if (subs->data_endpoint)
+               if (subs->ep_num)
                        continue;
                err = snd_pcm_new_stream(as->pcm, stream, 1);
                if (err < 0)
index 146fd6147e84be5cde2a66009f331f1b6ee2b805..d9834b36294373f88d29731350ccc9d384b41788 100644 (file)
@@ -701,14 +701,18 @@ int main(void)
        pfd.fd = fd;
 
        while (1) {
+               struct sockaddr *addr_p = (struct sockaddr *) &addr;
+               socklen_t addr_l = sizeof(addr);
                pfd.events = POLLIN;
                pfd.revents = 0;
                poll(&pfd, 1, -1);
 
-               len = recv(fd, kvp_recv_buffer, sizeof(kvp_recv_buffer), 0);
+               len = recvfrom(fd, kvp_recv_buffer, sizeof(kvp_recv_buffer), 0,
+                               addr_p, &addr_l);
 
-               if (len < 0) {
-                       syslog(LOG_ERR, "recv failed; error:%d", len);
+               if (len < 0 || addr.nl_pid) {
+                       syslog(LOG_ERR, "recvfrom failed; pid:%u error:%d %s",
+                                       addr.nl_pid, errno, strerror(errno));
                        close(fd);
                        return -1;
                }
index 5476bc0a1eac8712587b0f1bb3617c8067c76f94..b4b572e8c100af50d9c836905368a8dc4fed8205 100644 (file)
@@ -1,4 +1,6 @@
 tools/perf
+tools/scripts
+tools/lib/traceevent
 include/linux/const.h
 include/linux/perf_event.h
 include/linux/rbtree.h
index 8c767c6bca91b7490cb1564da230c7c9021422e6..25249f76329daf851f925240f8c295c0c3fc5bdc 100644 (file)
@@ -152,7 +152,7 @@ static int perf_evsel__add_hist_entry(struct perf_evsel *evsel,
 
        if (symbol_conf.use_callchain) {
                err = callchain_append(he->callchain,
-                                      &evsel->hists.callchain_cursor,
+                                      &callchain_cursor,
                                       sample->period);
                if (err)
                        return err;
@@ -162,7 +162,7 @@ static int perf_evsel__add_hist_entry(struct perf_evsel *evsel,
         * so we don't allocated the extra space needed because the stdio
         * code will not use it.
         */
-       if (al->sym != NULL && use_browser > 0) {
+       if (he->ms.sym != NULL && use_browser > 0) {
                struct annotation *notes = symbol__annotation(he->ms.sym);
 
                assert(evsel != NULL);
index 62ae30d34fa6c14734a5f171b52e7b3bb0979257..07b5c7703dd10677e12a5b6e36c8603f18fd0f7e 100644 (file)
@@ -1129,7 +1129,7 @@ static int add_default_attributes(void)
                return 0;
 
        if (!evsel_list->nr_entries) {
-               if (perf_evlist__add_attrs_array(evsel_list, default_attrs) < 0)
+               if (perf_evlist__add_default_attrs(evsel_list, default_attrs) < 0)
                        return -1;
        }
 
@@ -1139,21 +1139,21 @@ static int add_default_attributes(void)
                return 0;
 
        /* Append detailed run extra attributes: */
-       if (perf_evlist__add_attrs_array(evsel_list, detailed_attrs) < 0)
+       if (perf_evlist__add_default_attrs(evsel_list, detailed_attrs) < 0)
                return -1;
 
        if (detailed_run < 2)
                return 0;
 
        /* Append very detailed run extra attributes: */
-       if (perf_evlist__add_attrs_array(evsel_list, very_detailed_attrs) < 0)
+       if (perf_evlist__add_default_attrs(evsel_list, very_detailed_attrs) < 0)
                return -1;
 
        if (detailed_run < 3)
                return 0;
 
        /* Append very, very detailed run extra attributes: */
-       return perf_evlist__add_attrs_array(evsel_list, very_very_detailed_attrs);
+       return perf_evlist__add_default_attrs(evsel_list, very_very_detailed_attrs);
 }
 
 int cmd_stat(int argc, const char **argv, const char *prefix __used)
@@ -1179,6 +1179,12 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used)
                fprintf(stderr, "cannot use both --output and --log-fd\n");
                usage_with_options(stat_usage, options);
        }
+
+       if (output_fd < 0) {
+               fprintf(stderr, "argument to --log-fd must be a > 0\n");
+               usage_with_options(stat_usage, options);
+       }
+
        if (!output) {
                struct timespec tm;
                mode = append_file ? "a" : "w";
@@ -1190,7 +1196,7 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used)
                }
                clock_gettime(CLOCK_REALTIME, &tm);
                fprintf(output, "# started on %s\n", ctime(&tm.tv_sec));
-       } else if (output_fd != 2) {
+       } else if (output_fd > 0) {
                mode = append_file ? "a" : "w";
                output = fdopen(output_fd, mode);
                if (!output) {
index 871b540293e132610bb6a50bb384289d89aca75c..6bb0277b7dfecdbf63e727592995480a409f16f5 100644 (file)
@@ -787,7 +787,7 @@ static void perf_event__process_sample(struct perf_tool *tool,
                }
 
                if (symbol_conf.use_callchain) {
-                       err = callchain_append(he->callchain, &evsel->hists.callchain_cursor,
+                       err = callchain_append(he->callchain, &callchain_cursor,
                                               sample->period);
                        if (err)
                                return;
index bd0bb1b1279b8ec258cc40a231fc4a9d289abd96..67e5d0cace85aad7c70c7ba3ac8bbcf95c4d6e10 100644 (file)
@@ -409,14 +409,15 @@ Counters can be enabled and disabled in two ways: via ioctl and via
 prctl.  When a counter is disabled, it doesn't count or generate
 events but does continue to exist and maintain its count value.
 
-An individual counter or counter group can be enabled with
+An individual counter can be enabled with
 
-       ioctl(fd, PERF_EVENT_IOC_ENABLE);
+       ioctl(fd, PERF_EVENT_IOC_ENABLE, 0);
 
 or disabled with
 
-       ioctl(fd, PERF_EVENT_IOC_DISABLE);
+       ioctl(fd, PERF_EVENT_IOC_DISABLE, 0);
 
+For a counter group, pass PERF_IOC_FLAG_GROUP as the third argument.
 Enabling or disabling the leader of a group enables or disables the
 whole group; that is, while the group leader is disabled, none of the
 counters in the group will count.  Enabling or disabling a member of a
index 4deea6aaf9274f65887997fcd175fe9fd3bfc459..34b1c46eaf42c63d36ed5c36cbaceea38c2347a9 100644 (file)
@@ -668,7 +668,7 @@ static int annotate_browser__run(struct annotate_browser *browser, int evidx,
                "q/ESC/CTRL+C  Exit\n\n"
                "->            Go to target\n"
                "<-            Exit\n"
-               "h             Cycle thru hottest instructions\n"
+               "H             Cycle thru hottest instructions\n"
                "j             Toggle showing jump to target arrows\n"
                "J             Toggle showing number of jump sources on targets\n"
                "n             Search next string\n"
index ad73300f7bac6d1b11e4db0f1e121e0e26b0fb0c..95264f30417903af6f9a91aab42d757bfdf42d5f 100755 (executable)
@@ -12,7 +12,7 @@ LF='
 # First check if there is a .git to get the version from git describe
 # otherwise try to get the version from the kernel makefile
 if test -d ../../.git -o -f ../../.git &&
-       VN=$(git describe --abbrev=4 HEAD 2>/dev/null) &&
+       VN=$(git describe --match 'v[0-9].[0-9]*' --abbrev=4 HEAD 2>/dev/null) &&
        case "$VN" in
        *$LF*) (exit 1) ;;
        v[0-9]*)
index 9f7106a8d9a48cb6f9600beac10bfa3a38bd9be5..3a6bff47614f788eae96a6ea7c7ed64759ec465a 100644 (file)
@@ -18,6 +18,8 @@
 #include "util.h"
 #include "callchain.h"
 
+__thread struct callchain_cursor callchain_cursor;
+
 bool ip_callchain__valid(struct ip_callchain *chain,
                         const union perf_event *event)
 {
index 7f9c0f1ae3a9aad6f6a845299f74d1340ece754f..3bdb407f9cd9f8b9ed9e6340316c4e8237942f5c 100644 (file)
@@ -76,6 +76,8 @@ struct callchain_cursor {
        struct callchain_cursor_node    *curr;
 };
 
+extern __thread struct callchain_cursor callchain_cursor;
+
 static inline void callchain_init(struct callchain_root *root)
 {
        INIT_LIST_HEAD(&root->node.siblings);
index 4ac5f5ae4ce903a9284d6b4f105493da2bc454c5..7400fb3fc50c91910a51eb3dc6a679ca6e4a05de 100644 (file)
@@ -159,6 +159,17 @@ out_delete_partial_list:
        return -1;
 }
 
+int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
+                                    struct perf_event_attr *attrs, size_t nr_attrs)
+{
+       size_t i;
+
+       for (i = 0; i < nr_attrs; i++)
+               event_attr_init(attrs + i);
+
+       return perf_evlist__add_attrs(evlist, attrs, nr_attrs);
+}
+
 static int trace_event__id(const char *evname)
 {
        char *filename, *colon;
@@ -263,7 +274,8 @@ void perf_evlist__disable(struct perf_evlist *evlist)
        for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
                list_for_each_entry(pos, &evlist->entries, node) {
                        for (thread = 0; thread < evlist->threads->nr; thread++)
-                               ioctl(FD(pos, cpu, thread), PERF_EVENT_IOC_DISABLE);
+                               ioctl(FD(pos, cpu, thread),
+                                     PERF_EVENT_IOC_DISABLE, 0);
                }
        }
 }
@@ -276,7 +288,8 @@ void perf_evlist__enable(struct perf_evlist *evlist)
        for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
                list_for_each_entry(pos, &evlist->entries, node) {
                        for (thread = 0; thread < evlist->threads->nr; thread++)
-                               ioctl(FD(pos, cpu, thread), PERF_EVENT_IOC_ENABLE);
+                               ioctl(FD(pos, cpu, thread),
+                                     PERF_EVENT_IOC_ENABLE, 0);
                }
        }
 }
index 58abb63ac13a42ac14f3014ba5977f4635797592..989bee9624c23c66e002b7a245c89c7114f3b219 100644 (file)
@@ -54,6 +54,8 @@ void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry);
 int perf_evlist__add_default(struct perf_evlist *evlist);
 int perf_evlist__add_attrs(struct perf_evlist *evlist,
                           struct perf_event_attr *attrs, size_t nr_attrs);
+int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
+                                    struct perf_event_attr *attrs, size_t nr_attrs);
 int perf_evlist__add_tracepoints(struct perf_evlist *evlist,
                                 const char *tracepoints[], size_t nr_tracepoints);
 int perf_evlist__set_tracepoints_handlers(struct perf_evlist *evlist,
@@ -62,6 +64,8 @@ int perf_evlist__set_tracepoints_handlers(struct perf_evlist *evlist,
 
 #define perf_evlist__add_attrs_array(evlist, array) \
        perf_evlist__add_attrs(evlist, array, ARRAY_SIZE(array))
+#define perf_evlist__add_default_attrs(evlist, array) \
+       __perf_evlist__add_default_attrs(evlist, array, ARRAY_SIZE(array))
 
 #define perf_evlist__add_tracepoints_array(evlist, array) \
        perf_evlist__add_tracepoints(evlist, array, ARRAY_SIZE(array))
index 91d19138f3ec3891620b755fe0faf470d61d2319..9f6cebd798eed5ee44ea4510305b93b4f2e3fd6b 100644 (file)
@@ -494,16 +494,24 @@ int perf_evsel__open_per_thread(struct perf_evsel *evsel,
 }
 
 static int perf_event__parse_id_sample(const union perf_event *event, u64 type,
-                                      struct perf_sample *sample)
+                                      struct perf_sample *sample,
+                                      bool swapped)
 {
        const u64 *array = event->sample.array;
+       union u64_swap u;
 
        array += ((event->header.size -
                   sizeof(event->header)) / sizeof(u64)) - 1;
 
        if (type & PERF_SAMPLE_CPU) {
-               u32 *p = (u32 *)array;
-               sample->cpu = *p;
+               u.val64 = *array;
+               if (swapped) {
+                       /* undo swap of u64, then swap on individual u32s */
+                       u.val64 = bswap_64(u.val64);
+                       u.val32[0] = bswap_32(u.val32[0]);
+               }
+
+               sample->cpu = u.val32[0];
                array--;
        }
 
@@ -523,9 +531,16 @@ static int perf_event__parse_id_sample(const union perf_event *event, u64 type,
        }
 
        if (type & PERF_SAMPLE_TID) {
-               u32 *p = (u32 *)array;
-               sample->pid = p[0];
-               sample->tid = p[1];
+               u.val64 = *array;
+               if (swapped) {
+                       /* undo swap of u64, then swap on individual u32s */
+                       u.val64 = bswap_64(u.val64);
+                       u.val32[0] = bswap_32(u.val32[0]);
+                       u.val32[1] = bswap_32(u.val32[1]);
+               }
+
+               sample->pid = u.val32[0];
+               sample->tid = u.val32[1];
        }
 
        return 0;
@@ -562,7 +577,7 @@ int perf_event__parse_sample(const union perf_event *event, u64 type,
        if (event->header.type != PERF_RECORD_SAMPLE) {
                if (!sample_id_all)
                        return 0;
-               return perf_event__parse_id_sample(event, type, data);
+               return perf_event__parse_id_sample(event, type, data, swapped);
        }
 
        array = event->sample.array;
index 2dd5edf161b731ae1be22b9530e25e59fd444f47..e909d43cf5422333e4d47d2b412c1f3fa0b636af 100644 (file)
@@ -1942,7 +1942,6 @@ int perf_file_header__read(struct perf_file_header *header,
                else
                        return -1;
        } else if (ph->needs_swap) {
-               unsigned int i;
                /*
                 * feature bitmap is declared as an array of unsigned longs --
                 * not good since its size can differ between the host that
@@ -1958,14 +1957,17 @@ int perf_file_header__read(struct perf_file_header *header,
                 * file), punt and fallback to the original behavior --
                 * clearing all feature bits and setting buildid.
                 */
-               for (i = 0; i < BITS_TO_LONGS(HEADER_FEAT_BITS); ++i)
-                       header->adds_features[i] = bswap_64(header->adds_features[i]);
+               mem_bswap_64(&header->adds_features,
+                           BITS_TO_U64(HEADER_FEAT_BITS));
 
                if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
-                       for (i = 0; i < BITS_TO_LONGS(HEADER_FEAT_BITS); ++i) {
-                               header->adds_features[i] = bswap_64(header->adds_features[i]);
-                               header->adds_features[i] = bswap_32(header->adds_features[i]);
-                       }
+                       /* unswap as u64 */
+                       mem_bswap_64(&header->adds_features,
+                                   BITS_TO_U64(HEADER_FEAT_BITS));
+
+                       /* unswap as u32 */
+                       mem_bswap_32(&header->adds_features,
+                                   BITS_TO_U32(HEADER_FEAT_BITS));
                }
 
                if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
@@ -2091,6 +2093,35 @@ static int read_attr(int fd, struct perf_header *ph,
        return ret <= 0 ? -1 : 0;
 }
 
+static int perf_evsel__set_tracepoint_name(struct perf_evsel *evsel)
+{
+       struct event_format *event = trace_find_event(evsel->attr.config);
+       char bf[128];
+
+       if (event == NULL)
+               return -1;
+
+       snprintf(bf, sizeof(bf), "%s:%s", event->system, event->name);
+       evsel->name = strdup(bf);
+       if (event->name == NULL)
+               return -1;
+
+       return 0;
+}
+
+static int perf_evlist__set_tracepoint_names(struct perf_evlist *evlist)
+{
+       struct perf_evsel *pos;
+
+       list_for_each_entry(pos, &evlist->entries, node) {
+               if (pos->attr.type == PERF_TYPE_TRACEPOINT &&
+                   perf_evsel__set_tracepoint_name(pos))
+                       return -1;
+       }
+
+       return 0;
+}
+
 int perf_session__read_header(struct perf_session *session, int fd)
 {
        struct perf_header *header = &session->header;
@@ -2172,6 +2203,9 @@ int perf_session__read_header(struct perf_session *session, int fd)
 
        lseek(fd, header->data_offset, SEEK_SET);
 
+       if (perf_evlist__set_tracepoint_names(session->evlist))
+               goto out_delete_evlist;
+
        header->frozen = 1;
        return 0;
 out_errno:
index 1293b5ebea4dd31327a9e36402577779ec41626b..514e2a4b367d6d53838d9ac726a063b0ba3bf5e8 100644 (file)
@@ -378,7 +378,7 @@ void hist_entry__free(struct hist_entry *he)
  * collapse the histogram
  */
 
-static bool hists__collapse_insert_entry(struct hists *hists,
+static bool hists__collapse_insert_entry(struct hists *hists __used,
                                         struct rb_root *root,
                                         struct hist_entry *he)
 {
@@ -397,8 +397,9 @@ static bool hists__collapse_insert_entry(struct hists *hists,
                        iter->period += he->period;
                        iter->nr_events += he->nr_events;
                        if (symbol_conf.use_callchain) {
-                               callchain_cursor_reset(&hists->callchain_cursor);
-                               callchain_merge(&hists->callchain_cursor, iter->callchain,
+                               callchain_cursor_reset(&callchain_cursor);
+                               callchain_merge(&callchain_cursor,
+                                               iter->callchain,
                                                he->callchain);
                        }
                        hist_entry__free(he);
index cfc64e293f90b6f3e065d42379da8bb00302616b..34bb556d62191a1300b295011be6210f9633fcf7 100644 (file)
@@ -67,8 +67,6 @@ struct hists {
        struct events_stats     stats;
        u64                     event_stream;
        u16                     col_len[HISTC_NR_COLS];
-       /* Best would be to reuse the session callchain cursor */
-       struct callchain_cursor callchain_cursor;
 };
 
 struct hist_entry *__hists__add_entry(struct hists *self,
index f1584833bd2296aa2befe02950e5d49c7a93741f..587a230d2075a3ab1451cb46d34f651e20ffc486 100644 (file)
@@ -8,6 +8,8 @@
 #define BITS_PER_LONG __WORDSIZE
 #define BITS_PER_BYTE           8
 #define BITS_TO_LONGS(nr)       DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
+#define BITS_TO_U64(nr)         DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u64))
+#define BITS_TO_U32(nr)         DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u32))
 
 #define for_each_set_bit(bit, addr, size) \
        for ((bit) = find_first_bit((addr), (size));            \
index 35ae56864e4f59625369941886b196438f107c99..a1f4e3669142630aa641d1a34b0f53673b6b67b0 100644 (file)
@@ -669,25 +669,26 @@ struct machine *machines__find(struct rb_root *self, pid_t pid)
 struct machine *machines__findnew(struct rb_root *self, pid_t pid)
 {
        char path[PATH_MAX];
-       const char *root_dir;
+       const char *root_dir = "";
        struct machine *machine = machines__find(self, pid);
 
-       if (!machine || machine->pid != pid) {
-               if (pid == HOST_KERNEL_ID || pid == DEFAULT_GUEST_KERNEL_ID)
-                       root_dir = "";
-               else {
-                       if (!symbol_conf.guestmount)
-                               goto out;
-                       sprintf(path, "%s/%d", symbol_conf.guestmount, pid);
-                       if (access(path, R_OK)) {
-                               pr_err("Can't access file %s\n", path);
-                               goto out;
-                       }
-                       root_dir = path;
+       if (machine && (machine->pid == pid))
+               goto out;
+
+       if ((pid != HOST_KERNEL_ID) &&
+           (pid != DEFAULT_GUEST_KERNEL_ID) &&
+           (symbol_conf.guestmount)) {
+               sprintf(path, "%s/%d", symbol_conf.guestmount, pid);
+               if (access(path, R_OK)) {
+                       pr_err("Can't access file %s\n", path);
+                       machine = NULL;
+                       goto out;
                }
-               machine = machines__add(self, pid, root_dir);
+               root_dir = path;
        }
 
+       machine = machines__add(self, pid, root_dir);
+
 out:
        return machine;
 }
index 1915de20dcacf28e3dd037ca5b5f462577b561b5..3322b8446e891af7d420f0eb18e2232db51e8621 100644 (file)
@@ -57,6 +57,10 @@ void setup_pager(void)
        }
        if (!pager)
                pager = getenv("PAGER");
+       if (!pager) {
+               if (!access("/usr/bin/pager", X_OK))
+                       pager = "/usr/bin/pager";
+       }
        if (!pager)
                pager = "less";
        else if (!*pager || !strcmp(pager, "cat"))
index 59dccc98b5540284373ead1b43a74eb81a7a856d..0dda25d82d06ff81d55be2eb6529e970e4a3cd07 100644 (file)
@@ -2164,16 +2164,12 @@ int del_perf_probe_events(struct strlist *dellist)
 
 error:
        if (kfd >= 0) {
-               if (namelist)
-                       strlist__delete(namelist);
-
+               strlist__delete(namelist);
                close(kfd);
        }
 
        if (ufd >= 0) {
-               if (unamelist)
-                       strlist__delete(unamelist);
-
+               strlist__delete(unamelist);
                close(ufd);
        }
 
index 93d355d2710989d784f5a796e02f58c2d4938888..56142d0fb8d79bfdc1bd21cd7ac24f0ded4627bf 100644 (file)
@@ -288,7 +288,8 @@ struct branch_info *machine__resolve_bstack(struct machine *self,
        return bi;
 }
 
-int machine__resolve_callchain(struct machine *self, struct perf_evsel *evsel,
+int machine__resolve_callchain(struct machine *self,
+                              struct perf_evsel *evsel __used,
                               struct thread *thread,
                               struct ip_callchain *chain,
                               struct symbol **parent)
@@ -297,7 +298,12 @@ int machine__resolve_callchain(struct machine *self, struct perf_evsel *evsel,
        unsigned int i;
        int err;
 
-       callchain_cursor_reset(&evsel->hists.callchain_cursor);
+       callchain_cursor_reset(&callchain_cursor);
+
+       if (chain->nr > PERF_MAX_STACK_DEPTH) {
+               pr_warning("corrupted callchain. skipping...\n");
+               return 0;
+       }
 
        for (i = 0; i < chain->nr; i++) {
                u64 ip;
@@ -317,7 +323,14 @@ int machine__resolve_callchain(struct machine *self, struct perf_evsel *evsel,
                        case PERF_CONTEXT_USER:
                                cpumode = PERF_RECORD_MISC_USER;        break;
                        default:
-                               break;
+                               pr_debug("invalid callchain context: "
+                                        "%"PRId64"\n", (s64) ip);
+                               /*
+                                * It seems the callchain is corrupted.
+                                * Discard all.
+                                */
+                               callchain_cursor_reset(&callchain_cursor);
+                               return 0;
                        }
                        continue;
                }
@@ -333,7 +346,7 @@ int machine__resolve_callchain(struct machine *self, struct perf_evsel *evsel,
                                break;
                }
 
-               err = callchain_cursor_append(&evsel->hists.callchain_cursor,
+               err = callchain_cursor_append(&callchain_cursor,
                                              ip, al.map, al.sym);
                if (err)
                        return err;
@@ -429,6 +442,16 @@ static void perf_tool__fill_defaults(struct perf_tool *tool)
                        tool->finished_round = process_finished_round_stub;
        }
 }
+void mem_bswap_32(void *src, int byte_size)
+{
+       u32 *m = src;
+       while (byte_size > 0) {
+               *m = bswap_32(*m);
+               byte_size -= sizeof(u32);
+               ++m;
+       }
+}
 
 void mem_bswap_64(void *src, int byte_size)
 {
@@ -441,37 +464,65 @@ void mem_bswap_64(void *src, int byte_size)
        }
 }
 
-static void perf_event__all64_swap(union perf_event *event)
+static void swap_sample_id_all(union perf_event *event, void *data)
+{
+       void *end = (void *) event + event->header.size;
+       int size = end - data;
+
+       BUG_ON(size % sizeof(u64));
+       mem_bswap_64(data, size);
+}
+
+static void perf_event__all64_swap(union perf_event *event,
+                                  bool sample_id_all __used)
 {
        struct perf_event_header *hdr = &event->header;
        mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
 }
 
-static void perf_event__comm_swap(union perf_event *event)
+static void perf_event__comm_swap(union perf_event *event, bool sample_id_all)
 {
        event->comm.pid = bswap_32(event->comm.pid);
        event->comm.tid = bswap_32(event->comm.tid);
+
+       if (sample_id_all) {
+               void *data = &event->comm.comm;
+
+               data += ALIGN(strlen(data) + 1, sizeof(u64));
+               swap_sample_id_all(event, data);
+       }
 }
 
-static void perf_event__mmap_swap(union perf_event *event)
+static void perf_event__mmap_swap(union perf_event *event,
+                                 bool sample_id_all)
 {
        event->mmap.pid   = bswap_32(event->mmap.pid);
        event->mmap.tid   = bswap_32(event->mmap.tid);
        event->mmap.start = bswap_64(event->mmap.start);
        event->mmap.len   = bswap_64(event->mmap.len);
        event->mmap.pgoff = bswap_64(event->mmap.pgoff);
+
+       if (sample_id_all) {
+               void *data = &event->mmap.filename;
+
+               data += ALIGN(strlen(data) + 1, sizeof(u64));
+               swap_sample_id_all(event, data);
+       }
 }
 
-static void perf_event__task_swap(union perf_event *event)
+static void perf_event__task_swap(union perf_event *event, bool sample_id_all)
 {
        event->fork.pid  = bswap_32(event->fork.pid);
        event->fork.tid  = bswap_32(event->fork.tid);
        event->fork.ppid = bswap_32(event->fork.ppid);
        event->fork.ptid = bswap_32(event->fork.ptid);
        event->fork.time = bswap_64(event->fork.time);
+
+       if (sample_id_all)
+               swap_sample_id_all(event, &event->fork + 1);
 }
 
-static void perf_event__read_swap(union perf_event *event)
+static void perf_event__read_swap(union perf_event *event, bool sample_id_all)
 {
        event->read.pid          = bswap_32(event->read.pid);
        event->read.tid          = bswap_32(event->read.tid);
@@ -479,6 +530,9 @@ static void perf_event__read_swap(union perf_event *event)
        event->read.time_enabled = bswap_64(event->read.time_enabled);
        event->read.time_running = bswap_64(event->read.time_running);
        event->read.id           = bswap_64(event->read.id);
+
+       if (sample_id_all)
+               swap_sample_id_all(event, &event->read + 1);
 }
 
 static u8 revbyte(u8 b)
@@ -530,7 +584,8 @@ void perf_event__attr_swap(struct perf_event_attr *attr)
        swap_bitfield((u8 *) (&attr->read_format + 1), sizeof(u64));
 }
 
-static void perf_event__hdr_attr_swap(union perf_event *event)
+static void perf_event__hdr_attr_swap(union perf_event *event,
+                                     bool sample_id_all __used)
 {
        size_t size;
 
@@ -541,18 +596,21 @@ static void perf_event__hdr_attr_swap(union perf_event *event)
        mem_bswap_64(event->attr.id, size);
 }
 
-static void perf_event__event_type_swap(union perf_event *event)
+static void perf_event__event_type_swap(union perf_event *event,
+                                       bool sample_id_all __used)
 {
        event->event_type.event_type.event_id =
                bswap_64(event->event_type.event_type.event_id);
 }
 
-static void perf_event__tracing_data_swap(union perf_event *event)
+static void perf_event__tracing_data_swap(union perf_event *event,
+                                         bool sample_id_all __used)
 {
        event->tracing_data.size = bswap_32(event->tracing_data.size);
 }
 
-typedef void (*perf_event__swap_op)(union perf_event *event);
+typedef void (*perf_event__swap_op)(union perf_event *event,
+                                   bool sample_id_all);
 
 static perf_event__swap_op perf_event__swap_ops[] = {
        [PERF_RECORD_MMAP]                = perf_event__mmap_swap,
@@ -868,7 +926,7 @@ static struct machine *
                else
                        pid = event->ip.pid;
 
-               return perf_session__find_machine(session, pid);
+               return perf_session__findnew_machine(session, pid);
        }
 
        return perf_session__find_host_machine(session);
@@ -986,6 +1044,15 @@ static int perf_session__process_user_event(struct perf_session *session, union
        }
 }
 
+static void event_swap(union perf_event *event, bool sample_id_all)
+{
+       perf_event__swap_op swap;
+
+       swap = perf_event__swap_ops[event->header.type];
+       if (swap)
+               swap(event, sample_id_all);
+}
+
 static int perf_session__process_event(struct perf_session *session,
                                       union perf_event *event,
                                       struct perf_tool *tool,
@@ -994,9 +1061,8 @@ static int perf_session__process_event(struct perf_session *session,
        struct perf_sample sample;
        int ret;
 
-       if (session->header.needs_swap &&
-           perf_event__swap_ops[event->header.type])
-               perf_event__swap_ops[event->header.type](event);
+       if (session->header.needs_swap)
+               event_swap(event, session->sample_id_all);
 
        if (event->header.type >= PERF_RECORD_HEADER_MAX)
                return -EINVAL;
@@ -1428,7 +1494,6 @@ void perf_event__print_ip(union perf_event *event, struct perf_sample *sample,
                          int print_sym, int print_dso, int print_symoffset)
 {
        struct addr_location al;
-       struct callchain_cursor *cursor = &evsel->hists.callchain_cursor;
        struct callchain_cursor_node *node;
 
        if (perf_event__preprocess_sample(event, machine, &al, sample,
@@ -1446,10 +1511,10 @@ void perf_event__print_ip(union perf_event *event, struct perf_sample *sample,
                                error("Failed to resolve callchain. Skipping\n");
                        return;
                }
-               callchain_cursor_commit(cursor);
+               callchain_cursor_commit(&callchain_cursor);
 
                while (1) {
-                       node = callchain_cursor_current(cursor);
+                       node = callchain_cursor_current(&callchain_cursor);
                        if (!node)
                                break;
 
@@ -1460,12 +1525,12 @@ void perf_event__print_ip(union perf_event *event, struct perf_sample *sample,
                        }
                        if (print_dso) {
                                printf(" (");
-                               map__fprintf_dsoname(al.map, stdout);
+                               map__fprintf_dsoname(node->map, stdout);
                                printf(")");
                        }
                        printf("\n");
 
-                       callchain_cursor_advance(cursor);
+                       callchain_cursor_advance(&callchain_cursor);
                }
 
        } else {
index 7a5434c005653a4736cabdc2921ad907f74f2c4c..0c702e3f0a364272a9d979b04786403c4e079bbe 100644 (file)
@@ -80,6 +80,7 @@ struct branch_info *machine__resolve_bstack(struct machine *self,
 bool perf_session__has_traces(struct perf_session *self, const char *msg);
 
 void mem_bswap_64(void *src, int byte_size);
+void mem_bswap_32(void *src, int byte_size);
 void perf_event__attr_swap(struct perf_event_attr *attr);
 
 int perf_session__create_kernel_maps(struct perf_session *self);
index e2ba8858f3e105b60eb0476b786c33d4aa36e863..3e2e5ea0f03f692c41f62e8aeb1f5d3382c86ce1 100644 (file)
@@ -323,6 +323,7 @@ struct dso *dso__new(const char *name)
                dso->sorted_by_name = 0;
                dso->has_build_id = 0;
                dso->kernel = DSO_TYPE_USER;
+               dso->needs_swap = DSO_SWAP__UNSET;
                INIT_LIST_HEAD(&dso->node);
        }
 
@@ -1156,6 +1157,33 @@ static size_t elf_addr_to_index(Elf *elf, GElf_Addr addr)
        return -1;
 }
 
+static int dso__swap_init(struct dso *dso, unsigned char eidata)
+{
+       static unsigned int const endian = 1;
+
+       dso->needs_swap = DSO_SWAP__NO;
+
+       switch (eidata) {
+       case ELFDATA2LSB:
+               /* We are big endian, DSO is little endian. */
+               if (*(unsigned char const *)&endian != 1)
+                       dso->needs_swap = DSO_SWAP__YES;
+               break;
+
+       case ELFDATA2MSB:
+               /* We are little endian, DSO is big endian. */
+               if (*(unsigned char const *)&endian != 0)
+                       dso->needs_swap = DSO_SWAP__YES;
+               break;
+
+       default:
+               pr_err("unrecognized DSO data encoding %d\n", eidata);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
 static int dso__load_sym(struct dso *dso, struct map *map, const char *name,
                         int fd, symbol_filter_t filter, int kmodule,
                         int want_symtab)
@@ -1187,6 +1215,9 @@ static int dso__load_sym(struct dso *dso, struct map *map, const char *name,
                goto out_elf_end;
        }
 
+       if (dso__swap_init(dso, ehdr.e_ident[EI_DATA]))
+               goto out_elf_end;
+
        /* Always reject images with a mismatched build-id: */
        if (dso->has_build_id) {
                u8 build_id[BUILD_ID_SIZE];
@@ -1272,7 +1303,7 @@ static int dso__load_sym(struct dso *dso, struct map *map, const char *name,
                if (opdsec && sym.st_shndx == opdidx) {
                        u32 offset = sym.st_value - opdshdr.sh_addr;
                        u64 *opd = opddata->d_buf + offset;
-                       sym.st_value = *opd;
+                       sym.st_value = DSO__SWAP(dso, u64, *opd);
                        sym.st_shndx = elf_addr_to_index(elf, sym.st_value);
                }
 
@@ -2786,8 +2817,11 @@ int machine__load_vmlinux_path(struct machine *machine, enum map_type type,
 
 struct map *dso__new_map(const char *name)
 {
+       struct map *map = NULL;
        struct dso *dso = dso__new(name);
-       struct map *map = map__new2(0, dso, MAP__FUNCTION);
+
+       if (dso)
+               map = map__new2(0, dso, MAP__FUNCTION);
 
        return map;
 }
index 5649d63798cbfc0a24f3892d5a83b0d5809512ee..af0752b1aca1a9097b1eb552da416683bf3394c7 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/list.h>
 #include <linux/rbtree.h>
 #include <stdio.h>
+#include <byteswap.h>
 
 #ifdef HAVE_CPLUS_DEMANGLE
 extern char *cplus_demangle(const char *, int);
@@ -160,11 +161,18 @@ enum dso_kernel_type {
        DSO_TYPE_GUEST_KERNEL
 };
 
+enum dso_swap_type {
+       DSO_SWAP__UNSET,
+       DSO_SWAP__NO,
+       DSO_SWAP__YES,
+};
+
 struct dso {
        struct list_head node;
        struct rb_root   symbols[MAP__NR_TYPES];
        struct rb_root   symbol_names[MAP__NR_TYPES];
        enum dso_kernel_type    kernel;
+       enum dso_swap_type      needs_swap;
        u8               adjust_symbols:1;
        u8               has_build_id:1;
        u8               hit:1;
@@ -182,6 +190,28 @@ struct dso {
        char             name[0];
 };
 
+#define DSO__SWAP(dso, type, val)                      \
+({                                                     \
+       type ____r = val;                               \
+       BUG_ON(dso->needs_swap == DSO_SWAP__UNSET);     \
+       if (dso->needs_swap == DSO_SWAP__YES) {         \
+               switch (sizeof(____r)) {                \
+               case 2:                                 \
+                       ____r = bswap_16(val);          \
+                       break;                          \
+               case 4:                                 \
+                       ____r = bswap_32(val);          \
+                       break;                          \
+               case 8:                                 \
+                       ____r = bswap_64(val);          \
+                       break;                          \
+               default:                                \
+                       BUG_ON(1);                      \
+               }                                       \
+       }                                               \
+       ____r;                                          \
+})
+
 struct dso *dso__new(const char *name);
 void dso__delete(struct dso *dso);
 
index df2fddbf0cd2f46d376d691786295526fa1deab0..5dd3b5ec8411191c41d76d19345496b3923f9a6f 100644 (file)
@@ -198,9 +198,8 @@ void print_trace_event(int cpu, void *data, int size)
        record.data = data;
 
        trace_seq_init(&s);
-       pevent_print_event(pevent, &s, &record);
+       pevent_event_info(&s, event, &record);
        trace_seq_do_printf(&s);
-       printf("\n");
 }
 
 void print_event(int cpu, void *data, int size, unsigned long long nsecs,
index fd8e1f1297aa36c117de779993f621860584cc76..f8564955419126babec85631c686df9c38c0e1bb 100644 (file)
@@ -1,4 +1,5 @@
 turbostat : turbostat.c
+CFLAGS +=      -Wall
 
 clean :
        rm -f turbostat
index adf175f614961cebcb6d1bf89fbe7810471da68d..74e44507dfe9ed9b4c376b075a7dceadb0a0959f 100644 (file)
@@ -27,7 +27,11 @@ supports an "invariant" TSC, plus the APERF and MPERF MSRs.
 on processors that additionally support C-state residency counters.
 
 .SS Options
-The \fB-s\fP option prints only a 1-line summary for each sample interval.
+The \fB-s\fP option limits output to a 1-line system summary for each interval.
+.PP
+The \fB-c\fP option limits output to the 1st thread in each core.
+.PP
+The \fB-p\fP option limits output to the 1st thread in each package.
 .PP
 The \fB-v\fP option increases verbosity.
 .PP
@@ -65,19 +69,19 @@ Subsequent rows show per-CPU statistics.
 .nf
 [root@x980]# ./turbostat
 cor CPU    %c0  GHz  TSC    %c1    %c3    %c6   %pc3   %pc6
-          0.60 1.63 3.38   2.91   0.00  96.49   0.00  76.64
-  0   0   0.59 1.62 3.38   4.51   0.00  94.90   0.00  76.64
-  0   6   1.13 1.64 3.38   3.97   0.00  94.90   0.00  76.64
-  1   2   0.08 1.62 3.38   0.07   0.00  99.85   0.00  76.64
-  1   8   0.03 1.62 3.38   0.12   0.00  99.85   0.00  76.64
-  2   4   0.01 1.62 3.38   0.06   0.00  99.93   0.00  76.64
-  2  10   0.04 1.62 3.38   0.02   0.00  99.93   0.00  76.64
-  8   1   2.85 1.62 3.38  11.71   0.00  85.44   0.00  76.64
-  8   7   1.98 1.62 3.38  12.58   0.00  85.44   0.00  76.64
-  9   3   0.36 1.62 3.38   0.71   0.00  98.93   0.00  76.64
-  9   9   0.09 1.62 3.38   0.98   0.00  98.93   0.00  76.64
- 10   5   0.03 1.62 3.38   0.09   0.00  99.87   0.00  76.64
- 10  11   0.07 1.62 3.38   0.06   0.00  99.87   0.00  76.64
+          0.09 1.62 3.38   1.83   0.32  97.76   1.26  83.61
+  0   0   0.15 1.62 3.38  10.23   0.05  89.56   1.26  83.61
+  0   6   0.05 1.62 3.38  10.34
+  1   2   0.03 1.62 3.38   0.07   0.05  99.86
+  1   8   0.03 1.62 3.38   0.06
+  2   4   0.21 1.62 3.38   0.10   1.49  98.21
+  2  10   0.02 1.62 3.38   0.29
+  8   1   0.04 1.62 3.38   0.04   0.08  99.84
+  8   7   0.01 1.62 3.38   0.06
+  9   3   0.53 1.62 3.38   0.10   0.20  99.17
+  9   9   0.02 1.62 3.38   0.60
+ 10   5   0.01 1.62 3.38   0.02   0.04  99.92
+ 10  11   0.02 1.62 3.38   0.02
 .fi
 .SH SUMMARY EXAMPLE
 The "-s" option prints the column headers just once,
@@ -86,9 +90,10 @@ and then the one line system summary for each sample interval.
 .nf
 [root@x980]# ./turbostat -s
    %c0  GHz  TSC    %c1    %c3    %c6   %pc3   %pc6
-  0.61 1.89 3.38   5.95   0.00  93.44   0.00  66.33
-  0.52 1.62 3.38   6.83   0.00  92.65   0.00  61.11
-  0.62 1.92 3.38   5.47   0.00  93.91   0.00  67.31
+  0.23 1.67 3.38   2.00   0.30  97.47   1.07  82.12
+  0.10 1.62 3.38   1.87   2.25  95.77  12.02  72.60
+  0.20 1.64 3.38   1.98   0.11  97.72   0.30  83.36
+  0.11 1.70 3.38   1.86   1.81  96.22   9.71  74.90
 .fi
 .SH VERBOSE EXAMPLE
 The "-v" option adds verbosity to the output:
@@ -120,30 +125,28 @@ until ^C while the other CPUs are mostly idle:
 [root@x980 lenb]# ./turbostat cat /dev/zero > /dev/null
 ^C
 cor CPU    %c0  GHz  TSC    %c1    %c3    %c6   %pc3   %pc6
-          8.63 3.64 3.38  14.46   0.49  76.42   0.00   0.00
-  0   0   0.34 3.36 3.38  99.66   0.00   0.00   0.00   0.00
-  0   6  99.96 3.64 3.38   0.04   0.00   0.00   0.00   0.00
-  1   2   0.14 3.50 3.38   1.75   2.04  96.07   0.00   0.00
-  1   8   0.38 3.57 3.38   1.51   2.04  96.07   0.00   0.00
-  2   4   0.01 2.65 3.38   0.06   0.00  99.93   0.00   0.00
-  2  10   0.03 2.12 3.38   0.04   0.00  99.93   0.00   0.00
-  8   1   0.91 3.59 3.38  35.27   0.92  62.90   0.00   0.00
-  8   7   1.61 3.63 3.38  34.57   0.92  62.90   0.00   0.00
-  9   3   0.04 3.38 3.38   0.20   0.00  99.76   0.00   0.00
-  9   9   0.04 3.29 3.38   0.20   0.00  99.76   0.00   0.00
- 10   5   0.03 3.08 3.38   0.12   0.00  99.85   0.00   0.00
- 10  11   0.05 3.07 3.38   0.10   0.00  99.85   0.00   0.00
-4.907015 sec
-
+          8.86 3.61 3.38  15.06  31.19  44.89   0.00   0.00
+  0   0   1.46 3.22 3.38  16.84  29.48  52.22   0.00   0.00
+  0   6   0.21 3.06 3.38  18.09
+  1   2   0.53 3.33 3.38   2.80  46.40  50.27
+  1   8   0.89 3.47 3.38   2.44
+  2   4   1.36 3.43 3.38   9.04  23.71  65.89
+  2  10   0.18 2.86 3.38  10.22
+  8   1   0.04 2.87 3.38  99.96   0.01   0.00
+  8   7  99.72 3.63 3.38   0.27
+  9   3   0.31 3.21 3.38   7.64  56.55  35.50
+  9   9   0.08 2.95 3.38   7.88
+ 10   5   1.42 3.43 3.38   2.14  30.99  65.44
+ 10  11   0.16 2.88 3.38   3.40
 .fi
-Above the cycle soaker drives cpu6 up 3.6 Ghz turbo limit
+Above the cycle soaker drives cpu7 up its 3.6 Ghz turbo limit
 while the other processors are generally in various states of idle.
 
-Note that cpu0 is an HT sibling sharing core0
-with cpu6, and thus it is unable to get to an idle state
-deeper than c1 while cpu6 is busy.
+Note that cpu1 and cpu7 are HT siblings within core8.
+As cpu7 is very busy, it prevents its sibling, cpu1,
+from entering a c-state deeper than c1.
 
-Note that turbostat reports average GHz of 3.64, while
+Note that turbostat reports average GHz of 3.63, while
 the arithmetic average of the GHz column above is lower.
 This is a weighted average, where the weight is %c0.  ie. it is the total number of
 un-halted cycles elapsed per time divided by the number of CPUs.
index ab2f682fd44c6cbc402b4a1ab8cbfd0fa9ce0f2e..861d771902065877ee0e09deb50adcde31b534fc 100644 (file)
@@ -67,71 +67,119 @@ double bclk;
 unsigned int show_pkg;
 unsigned int show_core;
 unsigned int show_cpu;
+unsigned int show_pkg_only;
+unsigned int show_core_only;
+char *output_buffer, *outp;
 
 int aperf_mperf_unstable;
 int backwards_count;
 char *progname;
 
-int num_cpus;
-cpu_set_t *cpu_mask;
-size_t cpu_mask_size;
-
-struct counters {
-       unsigned long long tsc;         /* per thread */
-       unsigned long long aperf;       /* per thread */
-       unsigned long long mperf;       /* per thread */
-       unsigned long long c1;  /* per thread (calculated) */
-       unsigned long long c3;  /* per core */
-       unsigned long long c6;  /* per core */
-       unsigned long long c7;  /* per core */
-       unsigned long long pc2; /* per package */
-       unsigned long long pc3; /* per package */
-       unsigned long long pc6; /* per package */
-       unsigned long long pc7; /* per package */
-       unsigned long long extra_msr;   /* per thread */
-       int pkg;
-       int core;
-       int cpu;
-       struct counters *next;
-};
-
-struct counters *cnt_even;
-struct counters *cnt_odd;
-struct counters *cnt_delta;
-struct counters *cnt_average;
-struct timeval tv_even;
-struct timeval tv_odd;
-struct timeval tv_delta;
-
-/*
- * cpu_mask_init(ncpus)
- *
- * allocate and clear cpu_mask
- * set cpu_mask_size
- */
-void cpu_mask_init(int ncpus)
+cpu_set_t *cpu_present_set, *cpu_affinity_set;
+size_t cpu_present_setsize, cpu_affinity_setsize;
+
+struct thread_data {
+       unsigned long long tsc;
+       unsigned long long aperf;
+       unsigned long long mperf;
+       unsigned long long c1;  /* derived */
+       unsigned long long extra_msr;
+       unsigned int cpu_id;
+       unsigned int flags;
+#define CPU_IS_FIRST_THREAD_IN_CORE    0x2
+#define CPU_IS_FIRST_CORE_IN_PACKAGE   0x4
+} *thread_even, *thread_odd;
+
+struct core_data {
+       unsigned long long c3;
+       unsigned long long c6;
+       unsigned long long c7;
+       unsigned int core_id;
+} *core_even, *core_odd;
+
+struct pkg_data {
+       unsigned long long pc2;
+       unsigned long long pc3;
+       unsigned long long pc6;
+       unsigned long long pc7;
+       unsigned int package_id;
+} *package_even, *package_odd;
+
+#define ODD_COUNTERS thread_odd, core_odd, package_odd
+#define EVEN_COUNTERS thread_even, core_even, package_even
+
+#define GET_THREAD(thread_base, thread_no, core_no, pkg_no) \
+       (thread_base + (pkg_no) * topo.num_cores_per_pkg * \
+               topo.num_threads_per_core + \
+               (core_no) * topo.num_threads_per_core + (thread_no))
+#define GET_CORE(core_base, core_no, pkg_no) \
+       (core_base + (pkg_no) * topo.num_cores_per_pkg + (core_no))
+#define GET_PKG(pkg_base, pkg_no) (pkg_base + pkg_no)
+
+struct system_summary {
+       struct thread_data threads;
+       struct core_data cores;
+       struct pkg_data packages;
+} sum, average;
+
+
+struct topo_params {
+       int num_packages;
+       int num_cpus;
+       int num_cores;
+       int max_cpu_num;
+       int num_cores_per_pkg;
+       int num_threads_per_core;
+} topo;
+
+struct timeval tv_even, tv_odd, tv_delta;
+
+void setup_all_buffers(void);
+
+int cpu_is_not_present(int cpu)
 {
-       cpu_mask = CPU_ALLOC(ncpus);
-       if (cpu_mask == NULL) {
-               perror("CPU_ALLOC");
-               exit(3);
-       }
-       cpu_mask_size = CPU_ALLOC_SIZE(ncpus);
-       CPU_ZERO_S(cpu_mask_size, cpu_mask);
+       return !CPU_ISSET_S(cpu, cpu_present_setsize, cpu_present_set);
 }
+/*
+ * run func(thread, core, package) in topology order
+ * skip non-present cpus
+ */
 
-void cpu_mask_uninit()
+int for_all_cpus(int (func)(struct thread_data *, struct core_data *, struct pkg_data *),
+       struct thread_data *thread_base, struct core_data *core_base, struct pkg_data *pkg_base)
 {
-       CPU_FREE(cpu_mask);
-       cpu_mask = NULL;
-       cpu_mask_size = 0;
+       int retval, pkg_no, core_no, thread_no;
+
+       for (pkg_no = 0; pkg_no < topo.num_packages; ++pkg_no) {
+               for (core_no = 0; core_no < topo.num_cores_per_pkg; ++core_no) {
+                       for (thread_no = 0; thread_no <
+                               topo.num_threads_per_core; ++thread_no) {
+                               struct thread_data *t;
+                               struct core_data *c;
+                               struct pkg_data *p;
+
+                               t = GET_THREAD(thread_base, thread_no, core_no, pkg_no);
+
+                               if (cpu_is_not_present(t->cpu_id))
+                                       continue;
+
+                               c = GET_CORE(core_base, core_no, pkg_no);
+                               p = GET_PKG(pkg_base, pkg_no);
+
+                               retval = func(t, c, p);
+                               if (retval)
+                                       return retval;
+                       }
+               }
+       }
+       return 0;
 }
 
 int cpu_migrate(int cpu)
 {
-       CPU_ZERO_S(cpu_mask_size, cpu_mask);
-       CPU_SET_S(cpu, cpu_mask_size, cpu_mask);
-       if (sched_setaffinity(0, cpu_mask_size, cpu_mask) == -1)
+       CPU_ZERO_S(cpu_affinity_setsize, cpu_affinity_set);
+       CPU_SET_S(cpu, cpu_affinity_setsize, cpu_affinity_set);
+       if (sched_setaffinity(0, cpu_affinity_setsize, cpu_affinity_set) == -1)
                return -1;
        else
                return 0;
@@ -160,67 +208,72 @@ int get_msr(int cpu, off_t offset, unsigned long long *msr)
 void print_header(void)
 {
        if (show_pkg)
-               fprintf(stderr, "pk");
+               outp += sprintf(outp, "pk");
        if (show_pkg)
-               fprintf(stderr, " ");
+               outp += sprintf(outp, " ");
        if (show_core)
-               fprintf(stderr, "cor");
+               outp += sprintf(outp, "cor");
        if (show_cpu)
-               fprintf(stderr, " CPU");
+               outp += sprintf(outp, " CPU");
        if (show_pkg || show_core || show_cpu)
-               fprintf(stderr, " ");
+               outp += sprintf(outp, " ");
        if (do_nhm_cstates)
-               fprintf(stderr, "   %%c0");
+               outp += sprintf(outp, "   %%c0");
        if (has_aperf)
-               fprintf(stderr, "  GHz");
-       fprintf(stderr, "  TSC");
+               outp += sprintf(outp, "  GHz");
+       outp += sprintf(outp, "  TSC");
        if (do_nhm_cstates)
-               fprintf(stderr, "    %%c1");
+               outp += sprintf(outp, "    %%c1");
        if (do_nhm_cstates)
-               fprintf(stderr, "    %%c3");
+               outp += sprintf(outp, "    %%c3");
        if (do_nhm_cstates)
-               fprintf(stderr, "    %%c6");
+               outp += sprintf(outp, "    %%c6");
        if (do_snb_cstates)
-               fprintf(stderr, "    %%c7");
+               outp += sprintf(outp, "    %%c7");
        if (do_snb_cstates)
-               fprintf(stderr, "   %%pc2");
+               outp += sprintf(outp, "   %%pc2");
        if (do_nhm_cstates)
-               fprintf(stderr, "   %%pc3");
+               outp += sprintf(outp, "   %%pc3");
        if (do_nhm_cstates)
-               fprintf(stderr, "   %%pc6");
+               outp += sprintf(outp, "   %%pc6");
        if (do_snb_cstates)
-               fprintf(stderr, "   %%pc7");
+               outp += sprintf(outp, "   %%pc7");
        if (extra_msr_offset)
-               fprintf(stderr, "        MSR 0x%x ", extra_msr_offset);
+               outp += sprintf(outp, "        MSR 0x%x ", extra_msr_offset);
 
-       putc('\n', stderr);
+       outp += sprintf(outp, "\n");
 }
 
-void dump_cnt(struct counters *cnt)
+int dump_counters(struct thread_data *t, struct core_data *c,
+       struct pkg_data *p)
 {
-       if (!cnt)
-               return;
-       if (cnt->pkg) fprintf(stderr, "package: %d ", cnt->pkg);
-       if (cnt->core) fprintf(stderr, "core:: %d ", cnt->core);
-       if (cnt->cpu) fprintf(stderr, "CPU: %d ", cnt->cpu);
-       if (cnt->tsc) fprintf(stderr, "TSC: %016llX\n", cnt->tsc);
-       if (cnt->c3) fprintf(stderr, "c3: %016llX\n", cnt->c3);
-       if (cnt->c6) fprintf(stderr, "c6: %016llX\n", cnt->c6);
-       if (cnt->c7) fprintf(stderr, "c7: %016llX\n", cnt->c7);
-       if (cnt->aperf) fprintf(stderr, "aperf: %016llX\n", cnt->aperf);
-       if (cnt->pc2) fprintf(stderr, "pc2: %016llX\n", cnt->pc2);
-       if (cnt->pc3) fprintf(stderr, "pc3: %016llX\n", cnt->pc3);
-       if (cnt->pc6) fprintf(stderr, "pc6: %016llX\n", cnt->pc6);
-       if (cnt->pc7) fprintf(stderr, "pc7: %016llX\n", cnt->pc7);
-       if (cnt->extra_msr) fprintf(stderr, "msr0x%x: %016llX\n", extra_msr_offset, cnt->extra_msr);
-}
+       fprintf(stderr, "t %p, c %p, p %p\n", t, c, p);
+
+       if (t) {
+               fprintf(stderr, "CPU: %d flags 0x%x\n", t->cpu_id, t->flags);
+               fprintf(stderr, "TSC: %016llX\n", t->tsc);
+               fprintf(stderr, "aperf: %016llX\n", t->aperf);
+               fprintf(stderr, "mperf: %016llX\n", t->mperf);
+               fprintf(stderr, "c1: %016llX\n", t->c1);
+               fprintf(stderr, "msr0x%x: %016llX\n",
+                       extra_msr_offset, t->extra_msr);
+       }
 
-void dump_list(struct counters *cnt)
-{
-       printf("dump_list 0x%p\n", cnt);
+       if (c) {
+               fprintf(stderr, "core: %d\n", c->core_id);
+               fprintf(stderr, "c3: %016llX\n", c->c3);
+               fprintf(stderr, "c6: %016llX\n", c->c6);
+               fprintf(stderr, "c7: %016llX\n", c->c7);
+       }
 
-       for (; cnt; cnt = cnt->next)
-               dump_cnt(cnt);
+       if (p) {
+               fprintf(stderr, "package: %d\n", p->package_id);
+               fprintf(stderr, "pc2: %016llX\n", p->pc2);
+               fprintf(stderr, "pc3: %016llX\n", p->pc3);
+               fprintf(stderr, "pc6: %016llX\n", p->pc6);
+               fprintf(stderr, "pc7: %016llX\n", p->pc7);
+       }
+       return 0;
 }
 
 /*
@@ -232,321 +285,389 @@ void dump_list(struct counters *cnt)
  * TSC: "TSC" 3 columns %3.2
  * percentage " %pc3" %6.2
  */
-void print_cnt(struct counters *p)
+int format_counters(struct thread_data *t, struct core_data *c,
+       struct pkg_data *p)
 {
        double interval_float;
 
+        /* if showing only 1st thread in core and this isn't one, bail out */
+       if (show_core_only && !(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
+               return 0;
+
+        /* if showing only 1st thread in pkg and this isn't one, bail out */
+       if (show_pkg_only && !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
+               return 0;
+
        interval_float = tv_delta.tv_sec + tv_delta.tv_usec/1000000.0;
 
-       /* topology columns, print blanks on 1st (average) line */
-       if (p == cnt_average) {
+       /* topo columns, print blanks on 1st (average) line */
+       if (t == &average.threads) {
                if (show_pkg)
-                       fprintf(stderr, "  ");
+                       outp += sprintf(outp, "  ");
                if (show_pkg && show_core)
-                       fprintf(stderr, " ");
+                       outp += sprintf(outp, " ");
                if (show_core)
-                       fprintf(stderr, "   ");
+                       outp += sprintf(outp, "   ");
                if (show_cpu)
-                       fprintf(stderr, " " "   ");
+                       outp += sprintf(outp, " " "   ");
        } else {
-               if (show_pkg)
-                       fprintf(stderr, "%2d", p->pkg);
+               if (show_pkg) {
+                       if (p)
+                               outp += sprintf(outp, "%2d", p->package_id);
+                       else
+                               outp += sprintf(outp, "  ");
+               }
                if (show_pkg && show_core)
-                       fprintf(stderr, " ");
-               if (show_core)
-                       fprintf(stderr, "%3d", p->core);
+                       outp += sprintf(outp, " ");
+               if (show_core) {
+                       if (c)
+                               outp += sprintf(outp, "%3d", c->core_id);
+                       else
+                               outp += sprintf(outp, "   ");
+               }
                if (show_cpu)
-                       fprintf(stderr, " %3d", p->cpu);
+                       outp += sprintf(outp, " %3d", t->cpu_id);
        }
 
        /* %c0 */
        if (do_nhm_cstates) {
                if (show_pkg || show_core || show_cpu)
-                       fprintf(stderr, " ");
+                       outp += sprintf(outp, " ");
                if (!skip_c0)
-                       fprintf(stderr, "%6.2f", 100.0 * p->mperf/p->tsc);
+                       outp += sprintf(outp, "%6.2f", 100.0 * t->mperf/t->tsc);
                else
-                       fprintf(stderr, "  ****");
+                       outp += sprintf(outp, "  ****");
        }
 
        /* GHz */
        if (has_aperf) {
                if (!aperf_mperf_unstable) {
-                       fprintf(stderr, " %3.2f",
-                               1.0 * p->tsc / units * p->aperf /
-                               p->mperf / interval_float);
+                       outp += sprintf(outp, " %3.2f",
+                               1.0 * t->tsc / units * t->aperf /
+                               t->mperf / interval_float);
                } else {
-                       if (p->aperf > p->tsc || p->mperf > p->tsc) {
-                               fprintf(stderr, " ***");
+                       if (t->aperf > t->tsc || t->mperf > t->tsc) {
+                               outp += sprintf(outp, " ***");
                        } else {
-                               fprintf(stderr, "%3.1f*",
-                                       1.0 * p->tsc /
-                                       units * p->aperf /
-                                       p->mperf / interval_float);
+                               outp += sprintf(outp, "%3.1f*",
+                                       1.0 * t->tsc /
+                                       units * t->aperf /
+                                       t->mperf / interval_float);
                        }
                }
        }
 
        /* TSC */
-       fprintf(stderr, "%5.2f", 1.0 * p->tsc/units/interval_float);
+       outp += sprintf(outp, "%5.2f", 1.0 * t->tsc/units/interval_float);
 
        if (do_nhm_cstates) {
                if (!skip_c1)
-                       fprintf(stderr, " %6.2f", 100.0 * p->c1/p->tsc);
+                       outp += sprintf(outp, " %6.2f", 100.0 * t->c1/t->tsc);
                else
-                       fprintf(stderr, "  ****");
+                       outp += sprintf(outp, "  ****");
        }
+
+       /* print per-core data only for 1st thread in core */
+       if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
+               goto done;
+
        if (do_nhm_cstates)
-               fprintf(stderr, " %6.2f", 100.0 * p->c3/p->tsc);
+               outp += sprintf(outp, " %6.2f", 100.0 * c->c3/t->tsc);
        if (do_nhm_cstates)
-               fprintf(stderr, " %6.2f", 100.0 * p->c6/p->tsc);
+               outp += sprintf(outp, " %6.2f", 100.0 * c->c6/t->tsc);
        if (do_snb_cstates)
-               fprintf(stderr, " %6.2f", 100.0 * p->c7/p->tsc);
+               outp += sprintf(outp, " %6.2f", 100.0 * c->c7/t->tsc);
+
+       /* print per-package data only for 1st core in package */
+       if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
+               goto done;
+
        if (do_snb_cstates)
-               fprintf(stderr, " %6.2f", 100.0 * p->pc2/p->tsc);
+               outp += sprintf(outp, " %6.2f", 100.0 * p->pc2/t->tsc);
        if (do_nhm_cstates)
-               fprintf(stderr, " %6.2f", 100.0 * p->pc3/p->tsc);
+               outp += sprintf(outp, " %6.2f", 100.0 * p->pc3/t->tsc);
        if (do_nhm_cstates)
-               fprintf(stderr, " %6.2f", 100.0 * p->pc6/p->tsc);
+               outp += sprintf(outp, " %6.2f", 100.0 * p->pc6/t->tsc);
        if (do_snb_cstates)
-               fprintf(stderr, " %6.2f", 100.0 * p->pc7/p->tsc);
+               outp += sprintf(outp, " %6.2f", 100.0 * p->pc7/t->tsc);
+done:
        if (extra_msr_offset)
-               fprintf(stderr, "  0x%016llx", p->extra_msr);
-       putc('\n', stderr);
+               outp += sprintf(outp, "  0x%016llx", t->extra_msr);
+       outp += sprintf(outp, "\n");
+
+       return 0;
 }
 
-void print_counters(struct counters *counters)
+void flush_stdout()
+{
+       fputs(output_buffer, stdout);
+       outp = output_buffer;
+}
+void flush_stderr()
+{
+       fputs(output_buffer, stderr);
+       outp = output_buffer;
+}
+void format_all_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
 {
-       struct counters *cnt;
        static int printed;
 
-
        if (!printed || !summary_only)
                print_header();
 
-       if (num_cpus > 1)
-               print_cnt(cnt_average);
+       if (topo.num_cpus > 1)
+               format_counters(&average.threads, &average.cores,
+                       &average.packages);
 
        printed = 1;
 
        if (summary_only)
                return;
 
-       for (cnt = counters; cnt != NULL; cnt = cnt->next)
-               print_cnt(cnt);
-
+       for_all_cpus(format_counters, t, c, p);
 }
 
-#define SUBTRACT_COUNTER(after, before, delta) (delta = (after - before), (before > after))
+void
+delta_package(struct pkg_data *new, struct pkg_data *old)
+{
+       old->pc2 = new->pc2 - old->pc2;
+       old->pc3 = new->pc3 - old->pc3;
+       old->pc6 = new->pc6 - old->pc6;
+       old->pc7 = new->pc7 - old->pc7;
+}
 
-int compute_delta(struct counters *after,
-       struct counters *before, struct counters *delta)
+void
+delta_core(struct core_data *new, struct core_data *old)
 {
-       int errors = 0;
-       int perf_err = 0;
+       old->c3 = new->c3 - old->c3;
+       old->c6 = new->c6 - old->c6;
+       old->c7 = new->c7 - old->c7;
+}
 
-       skip_c0 = skip_c1 = 0;
+/*
+ * old = new - old
+ */
+void
+delta_thread(struct thread_data *new, struct thread_data *old,
+       struct core_data *core_delta)
+{
+       old->tsc = new->tsc - old->tsc;
+
+       /* check for TSC < 1 Mcycles over interval */
+       if (old->tsc < (1000 * 1000)) {
+               fprintf(stderr, "Insanely slow TSC rate, TSC stops in idle?\n");
+               fprintf(stderr, "You can disable all c-states by booting with \"idle=poll\"\n");
+               fprintf(stderr, "or just the deep ones with \"processor.max_cstate=1\"\n");
+               exit(-3);
+       }
 
-       for ( ; after && before && delta;
-               after = after->next, before = before->next, delta = delta->next) {
-               if (before->cpu != after->cpu) {
-                       printf("cpu configuration changed: %d != %d\n",
-                               before->cpu, after->cpu);
-                       return -1;
-               }
+       old->c1 = new->c1 - old->c1;
 
-               if (SUBTRACT_COUNTER(after->tsc, before->tsc, delta->tsc)) {
-                       fprintf(stderr, "cpu%d TSC went backwards %llX to %llX\n",
-                               before->cpu, before->tsc, after->tsc);
-                       errors++;
-               }
-               /* check for TSC < 1 Mcycles over interval */
-               if (delta->tsc < (1000 * 1000)) {
-                       fprintf(stderr, "Insanely slow TSC rate,"
-                               " TSC stops in idle?\n");
-                       fprintf(stderr, "You can disable all c-states"
-                               " by booting with \"idle=poll\"\n");
-                       fprintf(stderr, "or just the deep ones with"
-                               " \"processor.max_cstate=1\"\n");
-                       exit(-3);
-               }
-               if (SUBTRACT_COUNTER(after->c3, before->c3, delta->c3)) {
-                       fprintf(stderr, "cpu%d c3 counter went backwards %llX to %llX\n",
-                               before->cpu, before->c3, after->c3);
-                       errors++;
-               }
-               if (SUBTRACT_COUNTER(after->c6, before->c6, delta->c6)) {
-                       fprintf(stderr, "cpu%d c6 counter went backwards %llX to %llX\n",
-                               before->cpu, before->c6, after->c6);
-                       errors++;
-               }
-               if (SUBTRACT_COUNTER(after->c7, before->c7, delta->c7)) {
-                       fprintf(stderr, "cpu%d c7 counter went backwards %llX to %llX\n",
-                               before->cpu, before->c7, after->c7);
-                       errors++;
-               }
-               if (SUBTRACT_COUNTER(after->pc2, before->pc2, delta->pc2)) {
-                       fprintf(stderr, "cpu%d pc2 counter went backwards %llX to %llX\n",
-                               before->cpu, before->pc2, after->pc2);
-                       errors++;
-               }
-               if (SUBTRACT_COUNTER(after->pc3, before->pc3, delta->pc3)) {
-                       fprintf(stderr, "cpu%d pc3 counter went backwards %llX to %llX\n",
-                               before->cpu, before->pc3, after->pc3);
-                       errors++;
-               }
-               if (SUBTRACT_COUNTER(after->pc6, before->pc6, delta->pc6)) {
-                       fprintf(stderr, "cpu%d pc6 counter went backwards %llX to %llX\n",
-                               before->cpu, before->pc6, after->pc6);
-                       errors++;
-               }
-               if (SUBTRACT_COUNTER(after->pc7, before->pc7, delta->pc7)) {
-                       fprintf(stderr, "cpu%d pc7 counter went backwards %llX to %llX\n",
-                               before->cpu, before->pc7, after->pc7);
-                       errors++;
-               }
+       if ((new->aperf > old->aperf) && (new->mperf > old->mperf)) {
+               old->aperf = new->aperf - old->aperf;
+               old->mperf = new->mperf - old->mperf;
+       } else {
 
-               perf_err = SUBTRACT_COUNTER(after->aperf, before->aperf, delta->aperf);
-               if (perf_err) {
-                       fprintf(stderr, "cpu%d aperf counter went backwards %llX to %llX\n",
-                               before->cpu, before->aperf, after->aperf);
-               }
-               perf_err |= SUBTRACT_COUNTER(after->mperf, before->mperf, delta->mperf);
-               if (perf_err) {
-                       fprintf(stderr, "cpu%d mperf counter went backwards %llX to %llX\n",
-                               before->cpu, before->mperf, after->mperf);
-               }
-               if (perf_err) {
-                       if (!aperf_mperf_unstable) {
-                               fprintf(stderr, "%s: APERF or MPERF went backwards *\n", progname);
-                               fprintf(stderr, "* Frequency results do not cover entire interval *\n");
-                               fprintf(stderr, "* fix this by running Linux-2.6.30 or later *\n");
+               if (!aperf_mperf_unstable) {
+                       fprintf(stderr, "%s: APERF or MPERF went backwards *\n", progname);
+                       fprintf(stderr, "* Frequency results do not cover entire interval *\n");
+                       fprintf(stderr, "* fix this by running Linux-2.6.30 or later *\n");
 
-                               aperf_mperf_unstable = 1;
-                       }
-                       /*
-                        * mperf delta is likely a huge "positive" number
-                        * can not use it for calculating c0 time
-                        */
-                       skip_c0 = 1;
-                       skip_c1 = 1;
+                       aperf_mperf_unstable = 1;
                }
-
                /*
-                * As mperf and tsc collection are not atomic,
-                * it is possible for mperf's non-halted cycles
-                * to exceed TSC's all cycles: show c1 = 0% in that case.
+                * mperf delta is likely a huge "positive" number
+                * can not use it for calculating c0 time
                 */
-               if (delta->mperf > delta->tsc)
-                       delta->c1 = 0;
-               else /* normal case, derive c1 */
-                       delta->c1 = delta->tsc - delta->mperf
-                               - delta->c3 - delta->c6 - delta->c7;
+               skip_c0 = 1;
+               skip_c1 = 1;
+       }
 
-               if (delta->mperf == 0)
-                       delta->mperf = 1;       /* divide by 0 protection */
 
-               /*
-                * for "extra msr", just copy the latest w/o subtracting
-                */
-               delta->extra_msr = after->extra_msr;
-               if (errors) {
-                       fprintf(stderr, "ERROR cpu%d before:\n", before->cpu);
-                       dump_cnt(before);
-                       fprintf(stderr, "ERROR cpu%d after:\n", before->cpu);
-                       dump_cnt(after);
-                       errors = 0;
-               }
+       /*
+        * As counter collection is not atomic,
+        * it is possible for mperf's non-halted cycles + idle states
+        * to exceed TSC's all cycles: show c1 = 0% in that case.
+        */
+       if ((old->mperf + core_delta->c3 + core_delta->c6 + core_delta->c7) > old->tsc)
+               old->c1 = 0;
+       else {
+               /* normal case, derive c1 */
+               old->c1 = old->tsc - old->mperf - core_delta->c3
+                               - core_delta->c6 - core_delta->c7;
+       }
+
+       if (old->mperf == 0) {
+               if (verbose > 1) fprintf(stderr, "cpu%d MPERF 0!\n", old->cpu_id);
+               old->mperf = 1; /* divide by 0 protection */
        }
+
+       /*
+        * for "extra msr", just copy the latest w/o subtracting
+        */
+       old->extra_msr = new->extra_msr;
+}
+
+int delta_cpu(struct thread_data *t, struct core_data *c,
+       struct pkg_data *p, struct thread_data *t2,
+       struct core_data *c2, struct pkg_data *p2)
+{
+       /* calculate core delta only for 1st thread in core */
+       if (t->flags & CPU_IS_FIRST_THREAD_IN_CORE)
+               delta_core(c, c2);
+
+       /* always calculate thread delta */
+       delta_thread(t, t2, c2);        /* c2 is core delta */
+
+       /* calculate package delta only for 1st core in package */
+       if (t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)
+               delta_package(p, p2);
+
        return 0;
 }
 
-void compute_average(struct counters *delta, struct counters *avg)
+void clear_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
+{
+       t->tsc = 0;
+       t->aperf = 0;
+       t->mperf = 0;
+       t->c1 = 0;
+
+       /* tells format_counters to dump all fields from this set */
+       t->flags = CPU_IS_FIRST_THREAD_IN_CORE | CPU_IS_FIRST_CORE_IN_PACKAGE;
+
+       c->c3 = 0;
+       c->c6 = 0;
+       c->c7 = 0;
+
+       p->pc2 = 0;
+       p->pc3 = 0;
+       p->pc6 = 0;
+       p->pc7 = 0;
+}
+int sum_counters(struct thread_data *t, struct core_data *c,
+       struct pkg_data *p)
 {
-       struct counters *sum;
+       average.threads.tsc += t->tsc;
+       average.threads.aperf += t->aperf;
+       average.threads.mperf += t->mperf;
+       average.threads.c1 += t->c1;
 
-       sum = calloc(1, sizeof(struct counters));
-       if (sum == NULL) {
-               perror("calloc sum");
-               exit(1);
-       }
+       /* sum per-core values only for 1st thread in core */
+       if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
+               return 0;
 
-       for (; delta; delta = delta->next) {
-               sum->tsc += delta->tsc;
-               sum->c1 += delta->c1;
-               sum->c3 += delta->c3;
-               sum->c6 += delta->c6;
-               sum->c7 += delta->c7;
-               sum->aperf += delta->aperf;
-               sum->mperf += delta->mperf;
-               sum->pc2 += delta->pc2;
-               sum->pc3 += delta->pc3;
-               sum->pc6 += delta->pc6;
-               sum->pc7 += delta->pc7;
-       }
-       avg->tsc = sum->tsc/num_cpus;
-       avg->c1 = sum->c1/num_cpus;
-       avg->c3 = sum->c3/num_cpus;
-       avg->c6 = sum->c6/num_cpus;
-       avg->c7 = sum->c7/num_cpus;
-       avg->aperf = sum->aperf/num_cpus;
-       avg->mperf = sum->mperf/num_cpus;
-       avg->pc2 = sum->pc2/num_cpus;
-       avg->pc3 = sum->pc3/num_cpus;
-       avg->pc6 = sum->pc6/num_cpus;
-       avg->pc7 = sum->pc7/num_cpus;
-
-       free(sum);
+       average.cores.c3 += c->c3;
+       average.cores.c6 += c->c6;
+       average.cores.c7 += c->c7;
+
+       /* sum per-pkg values only for 1st core in pkg */
+       if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
+               return 0;
+
+       average.packages.pc2 += p->pc2;
+       average.packages.pc3 += p->pc3;
+       average.packages.pc6 += p->pc6;
+       average.packages.pc7 += p->pc7;
+
+       return 0;
 }
+/*
+ * sum the counters for all cpus in the system
+ * compute the weighted average
+ */
+void compute_average(struct thread_data *t, struct core_data *c,
+       struct pkg_data *p)
+{
+       clear_counters(&average.threads, &average.cores, &average.packages);
+
+       for_all_cpus(sum_counters, t, c, p);
+
+       average.threads.tsc /= topo.num_cpus;
+       average.threads.aperf /= topo.num_cpus;
+       average.threads.mperf /= topo.num_cpus;
+       average.threads.c1 /= topo.num_cpus;
+
+       average.cores.c3 /= topo.num_cores;
+       average.cores.c6 /= topo.num_cores;
+       average.cores.c7 /= topo.num_cores;
 
-int get_counters(struct counters *cnt)
+       average.packages.pc2 /= topo.num_packages;
+       average.packages.pc3 /= topo.num_packages;
+       average.packages.pc6 /= topo.num_packages;
+       average.packages.pc7 /= topo.num_packages;
+}
+
+static unsigned long long rdtsc(void)
 {
-       for ( ; cnt; cnt = cnt->next) {
+       unsigned int low, high;
 
-               if (cpu_migrate(cnt->cpu))
-                       return -1;
+       asm volatile("rdtsc" : "=a" (low), "=d" (high));
 
-               if (get_msr(cnt->cpu, MSR_TSC, &cnt->tsc))
-                       return -1;
+       return low | ((unsigned long long)high) << 32;
+}
 
-               if (has_aperf) {
-                       if (get_msr(cnt->cpu, MSR_APERF, &cnt->aperf))
-                               return -1;
-                       if (get_msr(cnt->cpu, MSR_MPERF, &cnt->mperf))
-                               return -1;
-               }
 
-               if (do_nhm_cstates) {
-                       if (get_msr(cnt->cpu, MSR_CORE_C3_RESIDENCY, &cnt->c3))
-                               return -1;
-                       if (get_msr(cnt->cpu, MSR_CORE_C6_RESIDENCY, &cnt->c6))
-                               return -1;
-               }
+/*
+ * get_counters(...)
+ * migrate to cpu
+ * acquire and record local counters for that cpu
+ */
+int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
+{
+       int cpu = t->cpu_id;
 
-               if (do_snb_cstates)
-                       if (get_msr(cnt->cpu, MSR_CORE_C7_RESIDENCY, &cnt->c7))
-                               return -1;
+       if (cpu_migrate(cpu))
+               return -1;
 
-               if (do_nhm_cstates) {
-                       if (get_msr(cnt->cpu, MSR_PKG_C3_RESIDENCY, &cnt->pc3))
-                               return -1;
-                       if (get_msr(cnt->cpu, MSR_PKG_C6_RESIDENCY, &cnt->pc6))
-                               return -1;
-               }
-               if (do_snb_cstates) {
-                       if (get_msr(cnt->cpu, MSR_PKG_C2_RESIDENCY, &cnt->pc2))
-                               return -1;
-                       if (get_msr(cnt->cpu, MSR_PKG_C7_RESIDENCY, &cnt->pc7))
-                               return -1;
-               }
-               if (extra_msr_offset)
-                       if (get_msr(cnt->cpu, extra_msr_offset, &cnt->extra_msr))
-                               return -1;
+       t->tsc = rdtsc();       /* we are running on local CPU of interest */
+
+       if (has_aperf) {
+               if (get_msr(cpu, MSR_APERF, &t->aperf))
+                       return -3;
+               if (get_msr(cpu, MSR_MPERF, &t->mperf))
+                       return -4;
+       }
+
+       if (extra_msr_offset)
+               if (get_msr(cpu, extra_msr_offset, &t->extra_msr))
+                       return -5;
+
+       /* collect core counters only for 1st thread in core */
+       if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
+               return 0;
+
+       if (do_nhm_cstates) {
+               if (get_msr(cpu, MSR_CORE_C3_RESIDENCY, &c->c3))
+                       return -6;
+               if (get_msr(cpu, MSR_CORE_C6_RESIDENCY, &c->c6))
+                       return -7;
+       }
+
+       if (do_snb_cstates)
+               if (get_msr(cpu, MSR_CORE_C7_RESIDENCY, &c->c7))
+                       return -8;
+
+       /* collect package counters only for 1st core in package */
+       if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
+               return 0;
+
+       if (do_nhm_cstates) {
+               if (get_msr(cpu, MSR_PKG_C3_RESIDENCY, &p->pc3))
+                       return -9;
+               if (get_msr(cpu, MSR_PKG_C6_RESIDENCY, &p->pc6))
+                       return -10;
+       }
+       if (do_snb_cstates) {
+               if (get_msr(cpu, MSR_PKG_C2_RESIDENCY, &p->pc2))
+                       return -11;
+               if (get_msr(cpu, MSR_PKG_C7_RESIDENCY, &p->pc7))
+                       return -12;
        }
        return 0;
 }
 
-void print_nehalem_info(void)
+void print_verbose_header(void)
 {
        unsigned long long msr;
        unsigned int ratio;
@@ -594,143 +715,82 @@ void print_nehalem_info(void)
 
 }
 
-void free_counter_list(struct counters *list)
+void free_all_buffers(void)
 {
-       struct counters *p;
+       CPU_FREE(cpu_present_set);
+       cpu_present_set = NULL;
+       cpu_present_set = 0;
 
-       for (p = list; p; ) {
-               struct counters *free_me;
+       CPU_FREE(cpu_affinity_set);
+       cpu_affinity_set = NULL;
+       cpu_affinity_setsize = 0;
 
-               free_me = p;
-               p = p->next;
-               free(free_me);
-       }
-}
+       free(thread_even);
+       free(core_even);
+       free(package_even);
 
-void free_all_counters(void)
-{
-       free_counter_list(cnt_even);
-       cnt_even = NULL;
+       thread_even = NULL;
+       core_even = NULL;
+       package_even = NULL;
 
-       free_counter_list(cnt_odd);
-       cnt_odd = NULL;
+       free(thread_odd);
+       free(core_odd);
+       free(package_odd);
 
-       free_counter_list(cnt_delta);
-       cnt_delta = NULL;
+       thread_odd = NULL;
+       core_odd = NULL;
+       package_odd = NULL;
 
-       free_counter_list(cnt_average);
-       cnt_average = NULL;
+       free(output_buffer);
+       output_buffer = NULL;
+       outp = NULL;
 }
 
-void insert_counters(struct counters **list,
-       struct counters *new)
+/*
+ * cpu_is_first_sibling_in_core(cpu)
+ * return 1 if given CPU is 1st HT sibling in the core
+ */
+int cpu_is_first_sibling_in_core(int cpu)
 {
-       struct counters *prev;
-
-       /*
-        * list was empty
-        */
-       if (*list == NULL) {
-               new->next = *list;
-               *list = new;
-               return;
-       }
-
-       if (!summary_only)
-               show_cpu = 1;   /* there is more than one CPU */
-
-       /*
-        * insert on front of list.
-        * It is sorted by ascending package#, core#, cpu#
-        */
-       if (((*list)->pkg > new->pkg) ||
-           (((*list)->pkg == new->pkg) && ((*list)->core > new->core)) ||
-           (((*list)->pkg == new->pkg) && ((*list)->core == new->core) && ((*list)->cpu > new->cpu))) {
-               new->next = *list;
-               *list = new;
-               return;
-       }
-
-       prev = *list;
-
-       while (prev->next && (prev->next->pkg < new->pkg)) {
-               prev = prev->next;
-               if (!summary_only)
-                       show_pkg = 1;   /* there is more than 1 package */
-       }
-
-       while (prev->next && (prev->next->pkg == new->pkg)
-               && (prev->next->core < new->core)) {
-               prev = prev->next;
-               if (!summary_only)
-                       show_core = 1;  /* there is more than 1 core */
-       }
+       char path[64];
+       FILE *filep;
+       int first_cpu;
 
-       while (prev->next && (prev->next->pkg == new->pkg)
-               && (prev->next->core == new->core)
-               && (prev->next->cpu < new->cpu)) {
-               prev = prev->next;
+       sprintf(path, "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", cpu);
+       filep = fopen(path, "r");
+       if (filep == NULL) {
+               perror(path);
+               exit(1);
        }
-
-       /*
-        * insert after "prev"
-        */
-       new->next = prev->next;
-       prev->next = new;
+       fscanf(filep, "%d", &first_cpu);
+       fclose(filep);
+       return (cpu == first_cpu);
 }
 
-void alloc_new_counters(int pkg, int core, int cpu)
+/*
+ * cpu_is_first_core_in_package(cpu)
+ * return 1 if given CPU is 1st core in package
+ */
+int cpu_is_first_core_in_package(int cpu)
 {
-       struct counters *new;
-
-       if (verbose > 1)
-               printf("pkg%d core%d, cpu%d\n", pkg, core, cpu);
-
-       new = (struct counters *)calloc(1, sizeof(struct counters));
-       if (new == NULL) {
-               perror("calloc");
-               exit(1);
-       }
-       new->pkg = pkg;
-       new->core = core;
-       new->cpu = cpu;
-       insert_counters(&cnt_odd, new);
-
-       new = (struct counters *)calloc(1,
-               sizeof(struct counters));
-       if (new == NULL) {
-               perror("calloc");
-               exit(1);
-       }
-       new->pkg = pkg;
-       new->core = core;
-       new->cpu = cpu;
-       insert_counters(&cnt_even, new);
-
-       new = (struct counters *)calloc(1, sizeof(struct counters));
-       if (new == NULL) {
-               perror("calloc");
-               exit(1);
-       }
-       new->pkg = pkg;
-       new->core = core;
-       new->cpu = cpu;
-       insert_counters(&cnt_delta, new);
+       char path[64];
+       FILE *filep;
+       int first_cpu;
 
-       new = (struct counters *)calloc(1, sizeof(struct counters));
-       if (new == NULL) {
-               perror("calloc");
+       sprintf(path, "/sys/devices/system/cpu/cpu%d/topology/core_siblings_list", cpu);
+       filep = fopen(path, "r");
+       if (filep == NULL) {
+               perror(path);
                exit(1);
        }
-       new->pkg = pkg;
-       new->core = core;
-       new->cpu = cpu;
-       cnt_average = new;
+       fscanf(filep, "%d", &first_cpu);
+       fclose(filep);
+       return (cpu == first_cpu);
 }
 
 int get_physical_package_id(int cpu)
 {
-       char path[64];
+       char path[80];
        FILE *filep;
        int pkg;
 
@@ -747,7 +807,7 @@ int get_physical_package_id(int cpu)
 
 int get_core_id(int cpu)
 {
-       char path[64];
+       char path[80];
        FILE *filep;
        int core;
 
@@ -762,14 +822,87 @@ int get_core_id(int cpu)
        return core;
 }
 
+int get_num_ht_siblings(int cpu)
+{
+       char path[80];
+       FILE *filep;
+       int sib1, sib2;
+       int matches;
+       char character;
+
+       sprintf(path, "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", cpu);
+       filep = fopen(path, "r");
+       if (filep == NULL) {
+               perror(path);
+               exit(1);
+       }
+       /*
+        * file format:
+        * if a pair of number with a character between: 2 siblings (eg. 1-2, or 1,4)
+        * otherwinse 1 sibling (self).
+        */
+       matches = fscanf(filep, "%d%c%d\n", &sib1, &character, &sib2);
+
+       fclose(filep);
+
+       if (matches == 3)
+               return 2;
+       else
+               return 1;
+}
+
 /*
- * run func(pkg, core, cpu) on every cpu in /proc/stat
+ * run func(thread, core, package) in topology order
+ * skip non-present cpus
  */
 
-int for_all_cpus(void (func)(int, int, int))
+int for_all_cpus_2(int (func)(struct thread_data *, struct core_data *,
+       struct pkg_data *, struct thread_data *, struct core_data *,
+       struct pkg_data *), struct thread_data *thread_base,
+       struct core_data *core_base, struct pkg_data *pkg_base,
+       struct thread_data *thread_base2, struct core_data *core_base2,
+       struct pkg_data *pkg_base2)
+{
+       int retval, pkg_no, core_no, thread_no;
+
+       for (pkg_no = 0; pkg_no < topo.num_packages; ++pkg_no) {
+               for (core_no = 0; core_no < topo.num_cores_per_pkg; ++core_no) {
+                       for (thread_no = 0; thread_no <
+                               topo.num_threads_per_core; ++thread_no) {
+                               struct thread_data *t, *t2;
+                               struct core_data *c, *c2;
+                               struct pkg_data *p, *p2;
+
+                               t = GET_THREAD(thread_base, thread_no, core_no, pkg_no);
+
+                               if (cpu_is_not_present(t->cpu_id))
+                                       continue;
+
+                               t2 = GET_THREAD(thread_base2, thread_no, core_no, pkg_no);
+
+                               c = GET_CORE(core_base, core_no, pkg_no);
+                               c2 = GET_CORE(core_base2, core_no, pkg_no);
+
+                               p = GET_PKG(pkg_base, pkg_no);
+                               p2 = GET_PKG(pkg_base2, pkg_no);
+
+                               retval = func(t, c, p, t2, c2, p2);
+                               if (retval)
+                                       return retval;
+                       }
+               }
+       }
+       return 0;
+}
+
+/*
+ * run func(cpu) on every cpu in /proc/stat
+ * return max_cpu number
+ */
+int for_all_proc_cpus(int (func)(int))
 {
        FILE *fp;
-       int cpu_count;
+       int cpu_num;
        int retval;
 
        fp = fopen(proc_stat, "r");
@@ -784,78 +917,88 @@ int for_all_cpus(void (func)(int, int, int))
                exit(1);
        }
 
-       for (cpu_count = 0; ; cpu_count++) {
-               int cpu;
-
-               retval = fscanf(fp, "cpu%u %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n", &cpu);
+       while (1) {
+               retval = fscanf(fp, "cpu%u %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n", &cpu_num);
                if (retval != 1)
                        break;
 
-               func(get_physical_package_id(cpu), get_core_id(cpu), cpu);
+               retval = func(cpu_num);
+               if (retval) {
+                       fclose(fp);
+                       return(retval);
+               }
        }
        fclose(fp);
-       return cpu_count;
+       return 0;
 }
 
 void re_initialize(void)
 {
-       free_all_counters();
-       num_cpus = for_all_cpus(alloc_new_counters);
-       cpu_mask_uninit();
-       cpu_mask_init(num_cpus);
-       printf("turbostat: re-initialized with num_cpus %d\n", num_cpus);
+       free_all_buffers();
+       setup_all_buffers();
+       printf("turbostat: re-initialized with num_cpus %d\n", topo.num_cpus);
 }
 
-void dummy(int pkg, int core, int cpu) { return; }
+
 /*
- * check to see if a cpu came on-line
+ * count_cpus()
+ * remember the last one seen, it will be the max
  */
-int verify_num_cpus(void)
+int count_cpus(int cpu)
 {
-       int new_num_cpus;
+       if (topo.max_cpu_num < cpu)
+               topo.max_cpu_num = cpu;
 
-       new_num_cpus = for_all_cpus(dummy);
-
-       if (new_num_cpus != num_cpus) {
-               if (verbose)
-                       printf("num_cpus was %d, is now  %d\n",
-                               num_cpus, new_num_cpus);
-               return -1;
-       }
+       topo.num_cpus += 1;
+       return 0;
+}
+int mark_cpu_present(int cpu)
+{
+       CPU_SET_S(cpu, cpu_present_setsize, cpu_present_set);
        return 0;
 }
 
 void turbostat_loop()
 {
+       int retval;
+
 restart:
-       get_counters(cnt_even);
+       retval = for_all_cpus(get_counters, EVEN_COUNTERS);
+       if (retval) {
+               re_initialize();
+               goto restart;
+       }
        gettimeofday(&tv_even, (struct timezone *)NULL);
 
        while (1) {
-               if (verify_num_cpus()) {
+               if (for_all_proc_cpus(cpu_is_not_present)) {
                        re_initialize();
                        goto restart;
                }
                sleep(interval_sec);
-               if (get_counters(cnt_odd)) {
+               retval = for_all_cpus(get_counters, ODD_COUNTERS);
+               if (retval) {
                        re_initialize();
                        goto restart;
                }
                gettimeofday(&tv_odd, (struct timezone *)NULL);
-               compute_delta(cnt_odd, cnt_even, cnt_delta);
                timersub(&tv_odd, &tv_even, &tv_delta);
-               compute_average(cnt_delta, cnt_average);
-               print_counters(cnt_delta);
+               for_all_cpus_2(delta_cpu, ODD_COUNTERS, EVEN_COUNTERS);
+               compute_average(EVEN_COUNTERS);
+               format_all_counters(EVEN_COUNTERS);
+               flush_stdout();
                sleep(interval_sec);
-               if (get_counters(cnt_even)) {
+               retval = for_all_cpus(get_counters, EVEN_COUNTERS);
+               if (retval) {
                        re_initialize();
                        goto restart;
                }
                gettimeofday(&tv_even, (struct timezone *)NULL);
-               compute_delta(cnt_even, cnt_odd, cnt_delta);
                timersub(&tv_even, &tv_odd, &tv_delta);
-               compute_average(cnt_delta, cnt_average);
-               print_counters(cnt_delta);
+               for_all_cpus_2(delta_cpu, EVEN_COUNTERS, ODD_COUNTERS);
+               compute_average(ODD_COUNTERS);
+               format_all_counters(ODD_COUNTERS);
+               flush_stdout();
        }
 }
 
@@ -912,6 +1055,8 @@ int is_snb(unsigned int family, unsigned int model)
        switch (model) {
        case 0x2A:
        case 0x2D:
+       case 0x3A:      /* IVB */
+       case 0x3D:      /* IVB Xeon */
                return 1;
        }
        return 0;
@@ -1028,6 +1173,208 @@ int open_dev_cpu_msr(int dummy1)
        return 0;
 }
 
+void topology_probe()
+{
+       int i;
+       int max_core_id = 0;
+       int max_package_id = 0;
+       int max_siblings = 0;
+       struct cpu_topology {
+               int core_id;
+               int physical_package_id;
+       } *cpus;
+
+       /* Initialize num_cpus, max_cpu_num */
+       topo.num_cpus = 0;
+       topo.max_cpu_num = 0;
+       for_all_proc_cpus(count_cpus);
+       if (!summary_only && topo.num_cpus > 1)
+               show_cpu = 1;
+
+       if (verbose > 1)
+               fprintf(stderr, "num_cpus %d max_cpu_num %d\n", topo.num_cpus, topo.max_cpu_num);
+
+       cpus = calloc(1, (topo.max_cpu_num  + 1) * sizeof(struct cpu_topology));
+       if (cpus == NULL) {
+               perror("calloc cpus");
+               exit(1);
+       }
+
+       /*
+        * Allocate and initialize cpu_present_set
+        */
+       cpu_present_set = CPU_ALLOC((topo.max_cpu_num + 1));
+       if (cpu_present_set == NULL) {
+               perror("CPU_ALLOC");
+               exit(3);
+       }
+       cpu_present_setsize = CPU_ALLOC_SIZE((topo.max_cpu_num + 1));
+       CPU_ZERO_S(cpu_present_setsize, cpu_present_set);
+       for_all_proc_cpus(mark_cpu_present);
+
+       /*
+        * Allocate and initialize cpu_affinity_set
+        */
+       cpu_affinity_set = CPU_ALLOC((topo.max_cpu_num + 1));
+       if (cpu_affinity_set == NULL) {
+               perror("CPU_ALLOC");
+               exit(3);
+       }
+       cpu_affinity_setsize = CPU_ALLOC_SIZE((topo.max_cpu_num + 1));
+       CPU_ZERO_S(cpu_affinity_setsize, cpu_affinity_set);
+
+
+       /*
+        * For online cpus
+        * find max_core_id, max_package_id
+        */
+       for (i = 0; i <= topo.max_cpu_num; ++i) {
+               int siblings;
+
+               if (cpu_is_not_present(i)) {
+                       if (verbose > 1)
+                               fprintf(stderr, "cpu%d NOT PRESENT\n", i);
+                       continue;
+               }
+               cpus[i].core_id = get_core_id(i);
+               if (cpus[i].core_id > max_core_id)
+                       max_core_id = cpus[i].core_id;
+
+               cpus[i].physical_package_id = get_physical_package_id(i);
+               if (cpus[i].physical_package_id > max_package_id)
+                       max_package_id = cpus[i].physical_package_id;
+
+               siblings = get_num_ht_siblings(i);
+               if (siblings > max_siblings)
+                       max_siblings = siblings;
+               if (verbose > 1)
+                       fprintf(stderr, "cpu %d pkg %d core %d\n",
+                               i, cpus[i].physical_package_id, cpus[i].core_id);
+       }
+       topo.num_cores_per_pkg = max_core_id + 1;
+       if (verbose > 1)
+               fprintf(stderr, "max_core_id %d, sizing for %d cores per package\n",
+                       max_core_id, topo.num_cores_per_pkg);
+       if (!summary_only && topo.num_cores_per_pkg > 1)
+               show_core = 1;
+
+       topo.num_packages = max_package_id + 1;
+       if (verbose > 1)
+               fprintf(stderr, "max_package_id %d, sizing for %d packages\n",
+                       max_package_id, topo.num_packages);
+       if (!summary_only && topo.num_packages > 1)
+               show_pkg = 1;
+
+       topo.num_threads_per_core = max_siblings;
+       if (verbose > 1)
+               fprintf(stderr, "max_siblings %d\n", max_siblings);
+
+       free(cpus);
+}
+
+void
+allocate_counters(struct thread_data **t, struct core_data **c, struct pkg_data **p)
+{
+       int i;
+
+       *t = calloc(topo.num_threads_per_core * topo.num_cores_per_pkg *
+               topo.num_packages, sizeof(struct thread_data));
+       if (*t == NULL)
+               goto error;
+
+       for (i = 0; i < topo.num_threads_per_core *
+               topo.num_cores_per_pkg * topo.num_packages; i++)
+               (*t)[i].cpu_id = -1;
+
+       *c = calloc(topo.num_cores_per_pkg * topo.num_packages,
+               sizeof(struct core_data));
+       if (*c == NULL)
+               goto error;
+
+       for (i = 0; i < topo.num_cores_per_pkg * topo.num_packages; i++)
+               (*c)[i].core_id = -1;
+
+       *p = calloc(topo.num_packages, sizeof(struct pkg_data));
+       if (*p == NULL)
+               goto error;
+
+       for (i = 0; i < topo.num_packages; i++)
+               (*p)[i].package_id = i;
+
+       return;
+error:
+       perror("calloc counters");
+       exit(1);
+}
+/*
+ * init_counter()
+ *
+ * set cpu_id, core_num, pkg_num
+ * set FIRST_THREAD_IN_CORE and FIRST_CORE_IN_PACKAGE
+ *
+ * increment topo.num_cores when 1st core in pkg seen
+ */
+void init_counter(struct thread_data *thread_base, struct core_data *core_base,
+       struct pkg_data *pkg_base, int thread_num, int core_num,
+       int pkg_num, int cpu_id)
+{
+       struct thread_data *t;
+       struct core_data *c;
+       struct pkg_data *p;
+
+       t = GET_THREAD(thread_base, thread_num, core_num, pkg_num);
+       c = GET_CORE(core_base, core_num, pkg_num);
+       p = GET_PKG(pkg_base, pkg_num);
+
+       t->cpu_id = cpu_id;
+       if (thread_num == 0) {
+               t->flags |= CPU_IS_FIRST_THREAD_IN_CORE;
+               if (cpu_is_first_core_in_package(cpu_id))
+                       t->flags |= CPU_IS_FIRST_CORE_IN_PACKAGE;
+       }
+
+       c->core_id = core_num;
+       p->package_id = pkg_num;
+}
+
+
+int initialize_counters(int cpu_id)
+{
+       int my_thread_id, my_core_id, my_package_id;
+
+       my_package_id = get_physical_package_id(cpu_id);
+       my_core_id = get_core_id(cpu_id);
+
+       if (cpu_is_first_sibling_in_core(cpu_id)) {
+               my_thread_id = 0;
+               topo.num_cores++;
+       } else {
+               my_thread_id = 1;
+       }
+
+       init_counter(EVEN_COUNTERS, my_thread_id, my_core_id, my_package_id, cpu_id);
+       init_counter(ODD_COUNTERS, my_thread_id, my_core_id, my_package_id, cpu_id);
+       return 0;
+}
+
+void allocate_output_buffer()
+{
+       output_buffer = calloc(1, (1 + topo.num_cpus) * 128);
+       outp = output_buffer;
+       if (outp == NULL) {
+               perror("calloc");
+               exit(-1);
+       }
+}
+
+void setup_all_buffers(void)
+{
+       topology_probe();
+       allocate_counters(&thread_even, &core_even, &package_even);
+       allocate_counters(&thread_odd, &core_odd, &package_odd);
+       allocate_output_buffer();
+       for_all_proc_cpus(initialize_counters);
+}
 void turbostat_init()
 {
        check_cpuid();
@@ -1035,18 +1382,19 @@ void turbostat_init()
        check_dev_msr();
        check_super_user();
 
-       num_cpus = for_all_cpus(alloc_new_counters);
-       cpu_mask_init(num_cpus);
+       setup_all_buffers();
 
        if (verbose)
-               print_nehalem_info();
+               print_verbose_header();
 }
 
 int fork_it(char **argv)
 {
-       int retval;
        pid_t child_pid;
-       get_counters(cnt_even);
+
+       for_all_cpus(get_counters, EVEN_COUNTERS);
+       /* clear affinity side-effect of get_counters() */
+       sched_setaffinity(0, cpu_present_setsize, cpu_present_set);
        gettimeofday(&tv_even, (struct timezone *)NULL);
 
        child_pid = fork();
@@ -1069,14 +1417,17 @@ int fork_it(char **argv)
                        exit(1);
                }
        }
-       get_counters(cnt_odd);
+       /*
+        * n.b. fork_it() does not check for errors from for_all_cpus()
+        * because re-starting is problematic when forking
+        */
+       for_all_cpus(get_counters, ODD_COUNTERS);
        gettimeofday(&tv_odd, (struct timezone *)NULL);
-       retval = compute_delta(cnt_odd, cnt_even, cnt_delta);
-
        timersub(&tv_odd, &tv_even, &tv_delta);
-       compute_average(cnt_delta, cnt_average);
-       if (!retval)
-               print_counters(cnt_delta);
+       for_all_cpus_2(delta_cpu, ODD_COUNTERS, EVEN_COUNTERS);
+       compute_average(EVEN_COUNTERS);
+       format_all_counters(EVEN_COUNTERS);
+       flush_stderr();
 
        fprintf(stderr, "%.6f sec\n", tv_delta.tv_sec + tv_delta.tv_usec/1000000.0);
 
@@ -1089,8 +1440,14 @@ void cmdline(int argc, char **argv)
 
        progname = argv[0];
 
-       while ((opt = getopt(argc, argv, "+svi:M:")) != -1) {
+       while ((opt = getopt(argc, argv, "+cpsvi:M:")) != -1) {
                switch (opt) {
+               case 'c':
+                       show_core_only++;
+                       break;
+               case 'p':
+                       show_pkg_only++;
+                       break;
                case 's':
                        summary_only++;
                        break;
@@ -1116,10 +1473,8 @@ int main(int argc, char **argv)
        cmdline(argc, argv);
 
        if (verbose > 1)
-               fprintf(stderr, "turbostat Dec 6, 2010"
+               fprintf(stderr, "turbostat v2.0 May 16, 2012"
                        " - Len Brown <lenb@kernel.org>\n");
-       if (verbose > 1)
-               fprintf(stderr, "http://userweb.kernel.org/~lenb/acpi/utils/pmtools/turbostat/\n");
 
        turbostat_init();
 
index 01f572c10c71c1ba2fb5775320600b61829e73c6..23a41a9f8db999f4b70fd4c0cd4b6ae8b26ee3c2 100644 (file)
@@ -334,6 +334,11 @@ static int assigned_device_enable_host_intx(struct kvm *kvm,
 }
 
 #ifdef __KVM_HAVE_MSI
+static irqreturn_t kvm_assigned_dev_msi(int irq, void *dev_id)
+{
+       return IRQ_WAKE_THREAD;
+}
+
 static int assigned_device_enable_host_msi(struct kvm *kvm,
                                           struct kvm_assigned_dev_kernel *dev)
 {
@@ -346,7 +351,7 @@ static int assigned_device_enable_host_msi(struct kvm *kvm,
        }
 
        dev->host_irq = dev->dev->irq;
-       if (request_threaded_irq(dev->host_irq, NULL,
+       if (request_threaded_irq(dev->host_irq, kvm_assigned_dev_msi,
                                 kvm_assigned_dev_thread_msi, 0,
                                 dev->irq_name, dev)) {
                pci_disable_msi(dev->dev);
@@ -358,6 +363,11 @@ static int assigned_device_enable_host_msi(struct kvm *kvm,
 #endif
 
 #ifdef __KVM_HAVE_MSIX
+static irqreturn_t kvm_assigned_dev_msix(int irq, void *dev_id)
+{
+       return IRQ_WAKE_THREAD;
+}
+
 static int assigned_device_enable_host_msix(struct kvm *kvm,
                                            struct kvm_assigned_dev_kernel *dev)
 {
@@ -374,7 +384,8 @@ static int assigned_device_enable_host_msix(struct kvm *kvm,
 
        for (i = 0; i < dev->entries_nr; i++) {
                r = request_threaded_irq(dev->host_msix_entries[i].vector,
-                                        NULL, kvm_assigned_dev_thread_msix,
+                                        kvm_assigned_dev_msix,
+                                        kvm_assigned_dev_thread_msix,
                                         0, dev->irq_name, dev);
                if (r)
                        goto err;
@@ -635,7 +646,6 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
        int r = 0, idx;
        struct kvm_assigned_dev_kernel *match;
        struct pci_dev *dev;
-       u8 header_type;
 
        if (!(assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU))
                return -EINVAL;
@@ -668,8 +678,7 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
        }
 
        /* Don't allow bridges to be assigned */
-       pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type);
-       if ((header_type & PCI_HEADER_TYPE) != PCI_HEADER_TYPE_NORMAL) {
+       if (dev->hdr_type != PCI_HEADER_TYPE_NORMAL) {
                r = -EPERM;
                goto out_put;
        }
index f59c1e8de7a2e62b5977d90ab992e1c2240e80f5..7d7e2aaffece234a81cef3f4181cc7ffe5bb14cb 100644 (file)
@@ -198,7 +198,7 @@ static void irqfd_update(struct kvm *kvm, struct _irqfd *irqfd,
 }
 
 static int
-kvm_irqfd_assign(struct kvm *kvm, int fd, int gsi)
+kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
 {
        struct kvm_irq_routing_table *irq_rt;
        struct _irqfd *irqfd, *tmp;
@@ -212,12 +212,12 @@ kvm_irqfd_assign(struct kvm *kvm, int fd, int gsi)
                return -ENOMEM;
 
        irqfd->kvm = kvm;
-       irqfd->gsi = gsi;
+       irqfd->gsi = args->gsi;
        INIT_LIST_HEAD(&irqfd->list);
        INIT_WORK(&irqfd->inject, irqfd_inject);
        INIT_WORK(&irqfd->shutdown, irqfd_shutdown);
 
-       file = eventfd_fget(fd);
+       file = eventfd_fget(args->fd);
        if (IS_ERR(file)) {
                ret = PTR_ERR(file);
                goto fail;
@@ -298,19 +298,19 @@ kvm_eventfd_init(struct kvm *kvm)
  * shutdown any irqfd's that match fd+gsi
  */
 static int
-kvm_irqfd_deassign(struct kvm *kvm, int fd, int gsi)
+kvm_irqfd_deassign(struct kvm *kvm, struct kvm_irqfd *args)
 {
        struct _irqfd *irqfd, *tmp;
        struct eventfd_ctx *eventfd;
 
-       eventfd = eventfd_ctx_fdget(fd);
+       eventfd = eventfd_ctx_fdget(args->fd);
        if (IS_ERR(eventfd))
                return PTR_ERR(eventfd);
 
        spin_lock_irq(&kvm->irqfds.lock);
 
        list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) {
-               if (irqfd->eventfd == eventfd && irqfd->gsi == gsi) {
+               if (irqfd->eventfd == eventfd && irqfd->gsi == args->gsi) {
                        /*
                         * This rcu_assign_pointer is needed for when
                         * another thread calls kvm_irq_routing_update before
@@ -338,12 +338,15 @@ kvm_irqfd_deassign(struct kvm *kvm, int fd, int gsi)
 }
 
 int
-kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags)
+kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
 {
-       if (flags & KVM_IRQFD_FLAG_DEASSIGN)
-               return kvm_irqfd_deassign(kvm, fd, gsi);
+       if (args->flags & ~KVM_IRQFD_FLAG_DEASSIGN)
+               return -EINVAL;
+
+       if (args->flags & KVM_IRQFD_FLAG_DEASSIGN)
+               return kvm_irqfd_deassign(kvm, args);
 
-       return kvm_irqfd_assign(kvm, fd, gsi);
+       return kvm_irqfd_assign(kvm, args);
 }
 
 /*
index a6a0365475edafc1935e46c04297931eadb8663c..5afb43114020a82e0f3275d94e78d0d8fcba470c 100644 (file)
@@ -332,6 +332,7 @@ static int setup_routing_entry(struct kvm_irq_routing_table *rt,
         */
        hlist_for_each_entry(ei, n, &rt->map[ue->gsi], link)
                if (ei->type == KVM_IRQ_ROUTING_MSI ||
+                   ue->type == KVM_IRQ_ROUTING_MSI ||
                    ue->u.irqchip.irqchip == ei->irqchip.irqchip)
                        return r;
 
index 7e140683ff14d503a9714058cadd9dde7e4ffaf9..44ee7124b16dae1820ca1ca1c79f2a099979da12 100644 (file)
@@ -2047,7 +2047,7 @@ static long kvm_vm_ioctl(struct file *filp,
                r = -EFAULT;
                if (copy_from_user(&data, argp, sizeof data))
                        goto out;
-               r = kvm_irqfd(kvm, data.fd, data.gsi, data.flags);
+               r = kvm_irqfd(kvm, &data);
                break;
        }
        case KVM_IOEVENTFD: {
@@ -2845,6 +2845,7 @@ void kvm_exit(void)
        kvm_arch_hardware_unsetup();
        kvm_arch_exit();
        free_cpumask_var(cpus_hardware_enabled);
+       __free_page(fault_page);
        __free_page(hwpoison_page);
        __free_page(bad_page);
 }