Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 31 Jan 2014 01:07:18 +0000 (17:07 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 31 Jan 2014 01:07:18 +0000 (17:07 -0800)
Pull more powerpc bits from Ben Herrenschmidt:
 "Here are a few more powerpc bits for this merge window.  The bulk is
  made of two pull requests from Scott and Anatolij that I had missed
  previously (they arrived while I was away).  Since both their branches
  are in -next independently, and the content has been around for a
  little while, they can still go in.

  The rest is mostly bug and regression fixes, a small series of
  cleanups to our pseries cpuidle code (including moving it to the right
  place), and one new cpuidle bakend for the powernv platform.  I also
  wired up the new sched_attr syscalls"

* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc: (37 commits)
  powerpc: Wire up sched_setattr and sched_getattr syscalls
  powerpc/hugetlb: Replace __get_cpu_var with get_cpu_var
  powerpc: Make sure "cache" directory is removed when offlining cpu
  powerpc/mm: Fix mmap errno when MAP_FIXED is set and mapping exceeds the allowed address space
  powerpc/powernv/cpuidle: Back-end cpuidle driver for powernv platform.
  powerpc/pseries/cpuidle: smt-snooze-delay cleanup.
  powerpc/pseries/cpuidle: Remove MAX_IDLE_STATE macro.
  powerpc/pseries/cpuidle: Make cpuidle-pseries backend driver a non-module.
  powerpc/pseries/cpuidle: Use cpuidle_register() for initialisation.
  powerpc/pseries/cpuidle: Move processor_idle.c to drivers/cpuidle.
  powerpc: Fix 32-bit frames for signals delivered when transactional
  powerpc/iommu: Fix initialisation of DART iommu table
  powerpc/numa: Fix decimal permissions
  powerpc/mm: Fix compile error of pgtable-ppc64.h
  powerpc: Fix hw breakpoints on !HAVE_HW_BREAKPOINT configurations
  clk: corenet: Adds the clock binding
  powerpc/booke64: Guard e6500 tlb handler with CONFIG_PPC_FSL_BOOK3E
  powerpc/512x: dts: add MPC5125 clock specs
  powerpc/512x: clk: support MPC5121/5123/5125 SoC variants
  powerpc/512x: clk: enforce even SDHC divider values
  ...

1280 files changed:
CREDITS
Documentation/ABI/testing/configfs-usb-gadget
Documentation/ABI/testing/configfs-usb-gadget-acm
Documentation/ABI/testing/configfs-usb-gadget-ecm
Documentation/ABI/testing/configfs-usb-gadget-eem
Documentation/ABI/testing/configfs-usb-gadget-ffs
Documentation/ABI/testing/configfs-usb-gadget-loopback
Documentation/ABI/testing/configfs-usb-gadget-mass-storage
Documentation/ABI/testing/configfs-usb-gadget-ncm
Documentation/ABI/testing/configfs-usb-gadget-obex
Documentation/ABI/testing/configfs-usb-gadget-phonet
Documentation/ABI/testing/configfs-usb-gadget-rndis
Documentation/ABI/testing/configfs-usb-gadget-serial
Documentation/ABI/testing/configfs-usb-gadget-sourcesink
Documentation/ABI/testing/configfs-usb-gadget-subset
Documentation/block/biodoc.txt
Documentation/block/biovecs.txt [new file with mode: 0644]
Documentation/devicetree/bindings/arm/davinci/nand.txt [deleted file]
Documentation/devicetree/bindings/clock/ti/apll.txt [new file with mode: 0644]
Documentation/devicetree/bindings/clock/ti/autoidle.txt [new file with mode: 0644]
Documentation/devicetree/bindings/clock/ti/clockdomain.txt [new file with mode: 0644]
Documentation/devicetree/bindings/clock/ti/composite.txt [new file with mode: 0644]
Documentation/devicetree/bindings/clock/ti/divider.txt [new file with mode: 0644]
Documentation/devicetree/bindings/clock/ti/dpll.txt [new file with mode: 0644]
Documentation/devicetree/bindings/clock/ti/fixed-factor-clock.txt [new file with mode: 0644]
Documentation/devicetree/bindings/clock/ti/gate.txt [new file with mode: 0644]
Documentation/devicetree/bindings/clock/ti/interface.txt [new file with mode: 0644]
Documentation/devicetree/bindings/clock/ti/mux.txt [new file with mode: 0644]
Documentation/devicetree/bindings/dma/bcm2835-dma.txt [new file with mode: 0644]
Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt
Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt [new file with mode: 0644]
Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt
Documentation/devicetree/bindings/leds/tca6507.txt
Documentation/devicetree/bindings/mipi/dsi/mipi-dsi-bus.txt [new file with mode: 0644]
Documentation/devicetree/bindings/mipi/nvidia,tegra114-mipi.txt [new file with mode: 0644]
Documentation/devicetree/bindings/mtd/davinci-nand.txt [new file with mode: 0644]
Documentation/devicetree/bindings/mtd/gpmi-nand.txt
Documentation/devicetree/bindings/mtd/pxa3xx-nand.txt
Documentation/devicetree/bindings/net/davinci_emac.txt
Documentation/devicetree/bindings/panel/auo,b101aw03.txt [new file with mode: 0644]
Documentation/devicetree/bindings/panel/chunghwa,claa101wa01a.txt [new file with mode: 0644]
Documentation/devicetree/bindings/panel/chunghwa,claa101wb03.txt [new file with mode: 0644]
Documentation/devicetree/bindings/panel/panasonic,vvx10f004b00.txt [new file with mode: 0644]
Documentation/devicetree/bindings/panel/samsung,ltn101nt05.txt [new file with mode: 0644]
Documentation/devicetree/bindings/panel/simple-panel.txt [new file with mode: 0644]
Documentation/devicetree/bindings/watchdog/atmel-wdt.txt
Documentation/devicetree/bindings/watchdog/davinci-wdt.txt
Documentation/devicetree/bindings/watchdog/gpio-wdt.txt [new file with mode: 0644]
Documentation/devicetree/bindings/watchdog/samsung-wdt.txt
Documentation/filesystems/nfs/nfs41-server.txt
Documentation/hwmon/adm1025
Documentation/hwmon/adm1031
Documentation/hwmon/adm9240
Documentation/hwmon/ds1621
Documentation/hwmon/emc6w201
Documentation/hwmon/f71805f
Documentation/hwmon/gl518sm
Documentation/hwmon/it87
Documentation/hwmon/lm63
Documentation/hwmon/lm70
Documentation/hwmon/lm78
Documentation/hwmon/lm83
Documentation/hwmon/lm87
Documentation/hwmon/lm90
Documentation/hwmon/lm92
Documentation/hwmon/max1619
Documentation/hwmon/pc87360
Documentation/hwmon/pc87427
Documentation/hwmon/pcf8591
Documentation/hwmon/smsc47m1
Documentation/hwmon/w83627ehf
Documentation/hwmon/w83795
Documentation/hwmon/w83l785ts
Documentation/i2c/busses/i2c-i801
Documentation/i2c/busses/i2c-parport
Documentation/i2c/busses/i2c-parport-light
Documentation/i2c/busses/i2c-piix4
Documentation/i2c/busses/i2c-taos-evm
Documentation/i2c/busses/i2c-viapro
Documentation/leds/leds-lp55xx.txt
Documentation/misc-devices/eeprom
Documentation/mtd/nand/pxa3xx-nand.txt [new file with mode: 0644]
Documentation/networking/ip-sysctl.txt
Documentation/networking/packet_mmap.txt
Documentation/sysctl/vm.txt
MAINTAINERS
Makefile
arch/arc/include/asm/linkage.h
arch/arm/Makefile
arch/arm/boot/dts/Makefile
arch/arm/boot/dts/am33xx-clocks.dtsi [new file with mode: 0644]
arch/arm/boot/dts/am33xx.dtsi
arch/arm/boot/dts/am3517.dtsi
arch/arm/boot/dts/am35xx-clocks.dtsi [new file with mode: 0644]
arch/arm/boot/dts/am4372.dtsi
arch/arm/boot/dts/am43xx-clocks.dtsi [new file with mode: 0644]
arch/arm/boot/dts/at91sam9260.dtsi
arch/arm/boot/dts/at91sam9263.dtsi
arch/arm/boot/dts/at91sam9g45.dtsi
arch/arm/boot/dts/at91sam9n12.dtsi
arch/arm/boot/dts/at91sam9x5.dtsi
arch/arm/boot/dts/dra7.dtsi
arch/arm/boot/dts/dra7xx-clocks.dtsi [new file with mode: 0644]
arch/arm/boot/dts/imx6dl-cubox-i.dts [new file with mode: 0644]
arch/arm/boot/dts/imx6dl-hummingboard.dts [new file with mode: 0644]
arch/arm/boot/dts/imx6q-cubox-i.dts [new file with mode: 0644]
arch/arm/boot/dts/imx6qdl-cubox-i.dtsi [new file with mode: 0644]
arch/arm/boot/dts/imx6qdl-microsom-ar8035.dtsi [new file with mode: 0644]
arch/arm/boot/dts/imx6qdl-microsom.dtsi [new file with mode: 0644]
arch/arm/boot/dts/kizbox.dts
arch/arm/boot/dts/omap3.dtsi
arch/arm/boot/dts/omap3430es1-clocks.dtsi [new file with mode: 0644]
arch/arm/boot/dts/omap34xx-omap36xx-clocks.dtsi [new file with mode: 0644]
arch/arm/boot/dts/omap34xx.dtsi
arch/arm/boot/dts/omap36xx-am35xx-omap3430es2plus-clocks.dtsi [new file with mode: 0644]
arch/arm/boot/dts/omap36xx-clocks.dtsi [new file with mode: 0644]
arch/arm/boot/dts/omap36xx-omap3430es2plus-clocks.dtsi [new file with mode: 0644]
arch/arm/boot/dts/omap36xx.dtsi
arch/arm/boot/dts/omap3xxx-clocks.dtsi [new file with mode: 0644]
arch/arm/boot/dts/omap4.dtsi
arch/arm/boot/dts/omap443x-clocks.dtsi [new file with mode: 0644]
arch/arm/boot/dts/omap443x.dtsi
arch/arm/boot/dts/omap4460.dtsi
arch/arm/boot/dts/omap446x-clocks.dtsi [new file with mode: 0644]
arch/arm/boot/dts/omap44xx-clocks.dtsi [new file with mode: 0644]
arch/arm/boot/dts/omap5.dtsi
arch/arm/boot/dts/omap54xx-clocks.dtsi [new file with mode: 0644]
arch/arm/boot/dts/sama5d3.dtsi
arch/arm/configs/bcm_defconfig
arch/arm/include/asm/io.h
arch/arm/kernel/entry-armv.S
arch/arm/kernel/head.S
arch/arm/kernel/io.c
arch/arm/mach-imx/mach-imx6q.c
arch/arm/mach-imx/mach-mx31moboard.c
arch/arm/mach-omap2/Kconfig
arch/arm/mach-omap2/Makefile
arch/arm/mach-omap2/cclock33xx_data.c [deleted file]
arch/arm/mach-omap2/cclock44xx_data.c [deleted file]
arch/arm/mach-omap2/clkt_clksel.c
arch/arm/mach-omap2/clkt_dpll.c
arch/arm/mach-omap2/clkt_iclk.c
arch/arm/mach-omap2/clock.c
arch/arm/mach-omap2/clock.h
arch/arm/mach-omap2/clock36xx.c
arch/arm/mach-omap2/clock3xxx.h
arch/arm/mach-omap2/common.h
arch/arm/mach-omap2/dpll3xxx.c
arch/arm/mach-omap2/dpll44xx.c
arch/arm/mach-omap2/io.c
arch/arm/mach-omap2/omap_hwmod.c
arch/arm/mach-omap2/prm.h
arch/arm/mach-omap2/prm_common.c
arch/arm/mach-omap2/timer.c
arch/arm/mm/init.c
arch/arm/plat-samsung/include/plat/regs-nand.h [deleted file]
arch/blackfin/configs/BF527-EZKIT_defconfig
arch/blackfin/configs/BF538-EZKIT_defconfig
arch/blackfin/configs/BF561-ACVILON_defconfig
arch/blackfin/configs/BlackStamp_defconfig
arch/blackfin/configs/CM-BF533_defconfig
arch/blackfin/configs/CM-BF548_defconfig
arch/blackfin/configs/CM-BF561_defconfig
arch/blackfin/configs/DNP5370_defconfig
arch/blackfin/configs/H8606_defconfig
arch/blackfin/configs/IP0X_defconfig
arch/blackfin/configs/PNAV-10_defconfig
arch/blackfin/configs/SRV1_defconfig
arch/blackfin/configs/TCM-BF518_defconfig
arch/blackfin/include/asm/def_LPBlackfin.h
arch/blackfin/include/uapi/asm/byteorder.h
arch/blackfin/include/uapi/asm/cachectl.h
arch/blackfin/include/uapi/asm/fcntl.h
arch/blackfin/include/uapi/asm/ioctls.h
arch/blackfin/include/uapi/asm/poll.h
arch/blackfin/include/uapi/asm/posix_types.h
arch/blackfin/include/uapi/asm/sigcontext.h
arch/blackfin/include/uapi/asm/siginfo.h
arch/blackfin/include/uapi/asm/signal.h
arch/blackfin/include/uapi/asm/stat.h
arch/blackfin/include/uapi/asm/swab.h
arch/blackfin/mach-bf533/boards/stamp.c
arch/blackfin/mach-bf609/Kconfig
arch/blackfin/mach-bf609/clock.c
arch/blackfin/mach-bf609/include/mach/anomaly.h
arch/blackfin/mach-common/cache-c.c
arch/blackfin/mach-common/clocks-init.c
arch/blackfin/mach-common/ints-priority.c
arch/blackfin/mach-common/scb-init.c
arch/blackfin/mach-common/smp.c
arch/frv/Makefile
arch/m68k/emu/nfblock.c
arch/mn10300/Makefile
arch/powerpc/sysdev/axonram.c
arch/s390/kernel/compat_linux.c
arch/sparc/include/uapi/asm/unistd.h
arch/sparc/kernel/cpumap.c
arch/sparc/kernel/ebus.c
arch/sparc/kernel/hvtramp.S
arch/sparc/kernel/of_device_common.c
arch/sparc/kernel/pci.c
arch/sparc/kernel/pci_common.c
arch/sparc/kernel/process_32.c
arch/sparc/kernel/sparc_ksyms_32.c
arch/sparc/kernel/sparc_ksyms_64.c
arch/sparc/kernel/systbls_32.S
arch/sparc/kernel/systbls_64.S
arch/sparc/kernel/trampoline_32.S
arch/sparc/kernel/trampoline_64.S
arch/sparc/mm/hugetlbpage.c
arch/sparc/mm/tlb.c
arch/sparc/prom/p1275.c
arch/x86/include/uapi/asm/sembuf.h
arch/x86/math-emu/errors.c
arch/x86/platform/intel-mid/device_libs/platform_ipc.h
arch/x86/platform/intel-mid/device_libs/platform_msic.h
arch/x86/platform/intel-mid/intel_mid_weak_decls.h
arch/x86/platform/intel-mid/mfld.c
arch/x86/platform/intel-mid/mrfl.c
arch/xtensa/platforms/iss/simdisk.c
block/blk-core.c
block/blk-exec.c
block/blk-flush.c
block/blk-integrity.c
block/blk-lib.c
block/blk-map.c
block/blk-merge.c
block/blk-mq-cpu.c
block/blk-mq.c
block/blk-mq.h
block/blk-settings.c
block/blk-sysfs.c
block/blk-throttle.c
block/cmdline-parser.c
block/elevator.c
block/scsi_ioctl.c
drivers/block/aoe/aoe.h
drivers/block/aoe/aoecmd.c
drivers/block/brd.c
drivers/block/cciss.c
drivers/block/drbd/drbd_actlog.c
drivers/block/drbd/drbd_bitmap.c
drivers/block/drbd/drbd_main.c
drivers/block/drbd/drbd_receiver.c
drivers/block/drbd/drbd_req.c
drivers/block/drbd/drbd_req.h
drivers/block/drbd/drbd_worker.c
drivers/block/floppy.c
drivers/block/loop.c
drivers/block/mg_disk.c
drivers/block/mtip32xx/mtip32xx.c
drivers/block/mtip32xx/mtip32xx.h
drivers/block/nbd.c
drivers/block/null_blk.c
drivers/block/nvme-core.c
drivers/block/paride/pg.c
drivers/block/pktcdvd.c
drivers/block/ps3disk.c
drivers/block/ps3vram.c
drivers/block/rbd.c
drivers/block/rsxx/dev.c
drivers/block/rsxx/dma.c
drivers/block/sx8.c
drivers/block/umem.c
drivers/block/xen-blkback/blkback.c
drivers/block/xen-blkfront.c
drivers/cdrom/gdrom.c
drivers/char/Makefile
drivers/char/agp/Kconfig
drivers/char/agp/Makefile
drivers/char/agp/intel-agp.c
drivers/char/agp/intel-gtt.c
drivers/char/i8k.c
drivers/char/ipmi/ipmi_si_intf.c
drivers/clk/Makefile
drivers/clk/clk-si5351.c
drivers/clk/clk-si5351.h
drivers/clk/clk.c
drivers/clk/qcom/Makefile
drivers/clk/samsung/clk-pll.c
drivers/clk/sunxi/clk-sunxi.c
drivers/clk/ti/Makefile [new file with mode: 0644]
drivers/clk/ti/apll.c [new file with mode: 0644]
drivers/clk/ti/autoidle.c [new file with mode: 0644]
drivers/clk/ti/clk-33xx.c [new file with mode: 0644]
drivers/clk/ti/clk-3xxx.c [new file with mode: 0644]
drivers/clk/ti/clk-43xx.c [new file with mode: 0644]
drivers/clk/ti/clk-44xx.c [new file with mode: 0644]
drivers/clk/ti/clk-54xx.c [new file with mode: 0644]
drivers/clk/ti/clk-7xx.c [new file with mode: 0644]
drivers/clk/ti/clk.c [new file with mode: 0644]
drivers/clk/ti/clockdomain.c [new file with mode: 0644]
drivers/clk/ti/composite.c [new file with mode: 0644]
drivers/clk/ti/divider.c [new file with mode: 0644]
drivers/clk/ti/dpll.c [new file with mode: 0644]
drivers/clk/ti/fixed-factor.c [new file with mode: 0644]
drivers/clk/ti/gate.c [new file with mode: 0644]
drivers/clk/ti/interface.c [new file with mode: 0644]
drivers/clk/ti/mux.c [new file with mode: 0644]
drivers/dma/Kconfig
drivers/dma/Makefile
drivers/dma/acpi-dma.c
drivers/dma/amba-pl08x.c
drivers/dma/bcm2835-dma.c [new file with mode: 0644]
drivers/dma/cppi41.c
drivers/dma/dmatest.c
drivers/dma/dw/core.c
drivers/dma/edma.c
drivers/dma/fsldma.h
drivers/dma/imx-sdma.c
drivers/dma/k3dma.c
drivers/dma/mmp_pdma.c
drivers/dma/mmp_tdma.c
drivers/dma/moxart-dma.c [new file with mode: 0644]
drivers/dma/omap-dma.c
drivers/dma/pl330.c
drivers/dma/ppc4xx/adma.c
drivers/dma/sirf-dma.c
drivers/dma/tegra20-apb-dma.c
drivers/dma/virt-dma.h
drivers/gpu/drm/Kconfig
drivers/gpu/drm/Makefile
drivers/gpu/drm/armada/Kconfig
drivers/gpu/drm/armada/armada_drv.c
drivers/gpu/drm/ast/ast_fb.c
drivers/gpu/drm/ast/ast_main.c
drivers/gpu/drm/ast/ast_mode.c
drivers/gpu/drm/ast/ast_ttm.c
drivers/gpu/drm/bochs/Kconfig [new file with mode: 0644]
drivers/gpu/drm/bochs/Makefile [new file with mode: 0644]
drivers/gpu/drm/bochs/bochs.h [new file with mode: 0644]
drivers/gpu/drm/bochs/bochs_drv.c [new file with mode: 0644]
drivers/gpu/drm/bochs/bochs_fbdev.c [new file with mode: 0644]
drivers/gpu/drm/bochs/bochs_hw.c [new file with mode: 0644]
drivers/gpu/drm/bochs/bochs_kms.c [new file with mode: 0644]
drivers/gpu/drm/bochs/bochs_mm.c [new file with mode: 0644]
drivers/gpu/drm/cirrus/cirrus_drv.h
drivers/gpu/drm/cirrus/cirrus_fbdev.c
drivers/gpu/drm/cirrus/cirrus_main.c
drivers/gpu/drm/cirrus/cirrus_mode.c
drivers/gpu/drm/cirrus/cirrus_ttm.c
drivers/gpu/drm/drm_agpsupport.c
drivers/gpu/drm/drm_buffer.c
drivers/gpu/drm/drm_bufs.c
drivers/gpu/drm/drm_crtc.c
drivers/gpu/drm/drm_crtc_helper.c
drivers/gpu/drm/drm_drv.c
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/drm_edid_load.c
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/drm_fops.c
drivers/gpu/drm/drm_gem.c
drivers/gpu/drm/drm_info.c
drivers/gpu/drm/drm_irq.c
drivers/gpu/drm/drm_memory.c
drivers/gpu/drm/drm_mipi_dsi.c [new file with mode: 0644]
drivers/gpu/drm/drm_panel.c [new file with mode: 0644]
drivers/gpu/drm/drm_pci.c
drivers/gpu/drm/drm_platform.c
drivers/gpu/drm/drm_stub.c
drivers/gpu/drm/drm_usb.c
drivers/gpu/drm/drm_vm.c
drivers/gpu/drm/exynos/exynos_drm_drv.c
drivers/gpu/drm/exynos/exynos_drm_drv.h
drivers/gpu/drm/exynos/exynos_drm_fimd.c
drivers/gpu/drm/exynos/exynos_drm_gem.c
drivers/gpu/drm/exynos/exynos_drm_gem.h
drivers/gpu/drm/exynos/exynos_mixer.c
drivers/gpu/drm/gma500/accel_2d.c
drivers/gpu/drm/gma500/cdv_intel_dp.c
drivers/gpu/drm/gma500/gma_display.c
drivers/gpu/drm/gma500/psb_drv.h
drivers/gpu/drm/gma500/psb_intel_drv.h
drivers/gpu/drm/gma500/psb_irq.c
drivers/gpu/drm/gma500/psb_irq.h
drivers/gpu/drm/i810/i810_dma.c
drivers/gpu/drm/i810/i810_drv.c
drivers/gpu/drm/i915/Kconfig
drivers/gpu/drm/i915/Makefile
drivers/gpu/drm/i915/dvo_ns2501.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_context.c
drivers/gpu/drm/i915/i915_gem_evict.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_gem_stolen.c
drivers/gpu/drm/i915/i915_gpu_error.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/i915_suspend.c
drivers/gpu/drm/i915/i915_sysfs.c
drivers/gpu/drm/i915/i915_ums.c
drivers/gpu/drm/i915/intel_bios.c
drivers/gpu/drm/i915/intel_bios.h
drivers/gpu/drm/i915/intel_crt.c
drivers/gpu/drm/i915/intel_ddi.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_dsi.c
drivers/gpu/drm/i915/intel_dsi.h
drivers/gpu/drm/i915/intel_dsi_pll.c
drivers/gpu/drm/i915/intel_dvo.c
drivers/gpu/drm/i915/intel_fbdev.c
drivers/gpu/drm/i915/intel_hdmi.c
drivers/gpu/drm/i915/intel_i2c.c
drivers/gpu/drm/i915/intel_lvds.c
drivers/gpu/drm/i915/intel_opregion.c
drivers/gpu/drm/i915/intel_overlay.c
drivers/gpu/drm/i915/intel_panel.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_sdvo.c
drivers/gpu/drm/i915/intel_sdvo_regs.h
drivers/gpu/drm/i915/intel_sideband.c
drivers/gpu/drm/i915/intel_sprite.c
drivers/gpu/drm/i915/intel_uncore.c
drivers/gpu/drm/mga/mga_dma.c
drivers/gpu/drm/mga/mga_drv.h
drivers/gpu/drm/mga/mga_ioc32.c
drivers/gpu/drm/mga/mga_irq.c
drivers/gpu/drm/mga/mga_state.c
drivers/gpu/drm/mgag200/mgag200_cursor.c
drivers/gpu/drm/mgag200/mgag200_fb.c
drivers/gpu/drm/mgag200/mgag200_main.c
drivers/gpu/drm/mgag200/mgag200_mode.c
drivers/gpu/drm/mgag200/mgag200_ttm.c
drivers/gpu/drm/msm/Kconfig
drivers/gpu/drm/msm/Makefile
drivers/gpu/drm/msm/NOTES
drivers/gpu/drm/msm/adreno/a2xx.xml.h
drivers/gpu/drm/msm/adreno/a3xx.xml.h
drivers/gpu/drm/msm/adreno/a3xx_gpu.c
drivers/gpu/drm/msm/adreno/a3xx_gpu.h
drivers/gpu/drm/msm/adreno/adreno_common.xml.h
drivers/gpu/drm/msm/adreno/adreno_gpu.c
drivers/gpu/drm/msm/adreno/adreno_gpu.h
drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
drivers/gpu/drm/msm/dsi/dsi.xml.h
drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
drivers/gpu/drm/msm/dsi/sfpb.xml.h
drivers/gpu/drm/msm/hdmi/hdmi.c
drivers/gpu/drm/msm/hdmi/hdmi.h
drivers/gpu/drm/msm/hdmi/hdmi.xml.h
drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
drivers/gpu/drm/msm/hdmi/hdmi_connector.c
drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c [new file with mode: 0644]
drivers/gpu/drm/msm/hdmi/qfprom.xml.h
drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h [new file with mode: 0644]
drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c [new file with mode: 0644]
drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c [new file with mode: 0644]
drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c [new file with mode: 0644]
drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c [new file with mode: 0644]
drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h [new file with mode: 0644]
drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c [new file with mode: 0644]
drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h [new file with mode: 0644]
drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c [new file with mode: 0644]
drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c [new file with mode: 0644]
drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c [new file with mode: 0644]
drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c [new file with mode: 0644]
drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h [new file with mode: 0644]
drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c [new file with mode: 0644]
drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c [new file with mode: 0644]
drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h [new file with mode: 0644]
drivers/gpu/drm/msm/mdp/mdp_common.xml.h [new file with mode: 0644]
drivers/gpu/drm/msm/mdp/mdp_format.c [new file with mode: 0644]
drivers/gpu/drm/msm/mdp/mdp_kms.c [new file with mode: 0644]
drivers/gpu/drm/msm/mdp/mdp_kms.h [new file with mode: 0644]
drivers/gpu/drm/msm/mdp4/mdp4.xml.h [deleted file]
drivers/gpu/drm/msm/mdp4/mdp4_crtc.c [deleted file]
drivers/gpu/drm/msm/mdp4/mdp4_dtv_encoder.c [deleted file]
drivers/gpu/drm/msm/mdp4/mdp4_format.c [deleted file]
drivers/gpu/drm/msm/mdp4/mdp4_irq.c [deleted file]
drivers/gpu/drm/msm/mdp4/mdp4_kms.c [deleted file]
drivers/gpu/drm/msm/mdp4/mdp4_kms.h [deleted file]
drivers/gpu/drm/msm/mdp4/mdp4_plane.c [deleted file]
drivers/gpu/drm/msm/msm_drv.c
drivers/gpu/drm/msm/msm_drv.h
drivers/gpu/drm/msm/msm_fb.c
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/msm/msm_gem.h
drivers/gpu/drm/msm/msm_gpu.c
drivers/gpu/drm/msm/msm_gpu.h
drivers/gpu/drm/msm/msm_iommu.c [new file with mode: 0644]
drivers/gpu/drm/msm/msm_kms.h [new file with mode: 0644]
drivers/gpu/drm/msm/msm_mmu.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/Makefile
drivers/gpu/drm/nouveau/core/core/engine.c
drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c
drivers/gpu/drm/nouveau/core/engine/copy/nve0.c
drivers/gpu/drm/nouveau/core/engine/device/nv04.c
drivers/gpu/drm/nouveau/core/engine/device/nv10.c
drivers/gpu/drm/nouveau/core/engine/device/nv20.c
drivers/gpu/drm/nouveau/core/engine/device/nv30.c
drivers/gpu/drm/nouveau/core/engine/device/nv40.c
drivers/gpu/drm/nouveau/core/engine/device/nv50.c
drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
drivers/gpu/drm/nouveau/core/engine/device/nve0.c
drivers/gpu/drm/nouveau/core/engine/disp/nv04.c
drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
drivers/gpu/drm/nouveau/core/engine/disp/nv50.h
drivers/gpu/drm/nouveau/core/engine/disp/nv84.c
drivers/gpu/drm/nouveau/core/engine/disp/nv94.c
drivers/gpu/drm/nouveau/core/engine/disp/nva3.c
drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
drivers/gpu/drm/nouveau/core/engine/disp/nve0.c
drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c
drivers/gpu/drm/nouveau/core/engine/disp/vga.c
drivers/gpu/drm/nouveau/core/engine/falcon.c
drivers/gpu/drm/nouveau/core/engine/fifo/nv108.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
drivers/gpu/drm/nouveau/core/engine/fifo/nve0.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/engine/graph/ctxnv108.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/engine/graph/ctxnvf0.c
drivers/gpu/drm/nouveau/core/engine/graph/fuc/com.fuc
drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpc.fuc
drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnv108.fuc5 [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnv108.fuc5.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc.h
drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvd7.fuc.h
drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h
drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvf0.fuc.h
drivers/gpu/drm/nouveau/core/engine/graph/fuc/hub.fuc
drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnv108.fuc5 [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnv108.fuc5.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h
drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvd7.fuc.h
drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h
drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvf0.fuc.h
drivers/gpu/drm/nouveau/core/engine/graph/fuc/macros.fuc
drivers/gpu/drm/nouveau/core/engine/graph/nv108.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
drivers/gpu/drm/nouveau/core/engine/graph/nvf0.c
drivers/gpu/drm/nouveau/core/include/core/class.h
drivers/gpu/drm/nouveau/core/include/core/device.h
drivers/gpu/drm/nouveau/core/include/engine/fifo.h
drivers/gpu/drm/nouveau/core/include/engine/graph.h
drivers/gpu/drm/nouveau/core/include/subdev/bar.h
drivers/gpu/drm/nouveau/core/include/subdev/bios/ramcfg.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/include/subdev/bios/rammap.h
drivers/gpu/drm/nouveau/core/include/subdev/bios/timing.h
drivers/gpu/drm/nouveau/core/include/subdev/devinit.h
drivers/gpu/drm/nouveau/core/include/subdev/fb.h
drivers/gpu/drm/nouveau/core/include/subdev/instmem.h
drivers/gpu/drm/nouveau/core/include/subdev/vm.h
drivers/gpu/drm/nouveau/core/subdev/bar/base.c
drivers/gpu/drm/nouveau/core/subdev/bar/nv50.c
drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c
drivers/gpu/drm/nouveau/core/subdev/bar/priv.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/subdev/bios/init.c
drivers/gpu/drm/nouveau/core/subdev/bios/ramcfg.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/subdev/bios/rammap.c
drivers/gpu/drm/nouveau/core/subdev/bios/timing.c
drivers/gpu/drm/nouveau/core/subdev/clock/base.c
drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c
drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c
drivers/gpu/drm/nouveau/core/subdev/devinit/base.c
drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c
drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/subdev/devinit/nv05.c
drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c
drivers/gpu/drm/nouveau/core/subdev/devinit/nv1a.c
drivers/gpu/drm/nouveau/core/subdev/devinit/nv20.c
drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c
drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/subdev/devinit/nv84.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/subdev/devinit/nv98.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/subdev/devinit/nva3.c
drivers/gpu/drm/nouveau/core/subdev/devinit/nvaf.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/subdev/devinit/nvc0.c
drivers/gpu/drm/nouveau/core/subdev/devinit/priv.h
drivers/gpu/drm/nouveau/core/subdev/fb/gddr5.c
drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
drivers/gpu/drm/nouveau/core/subdev/fb/priv.h
drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c
drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c
drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c
drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c
drivers/gpu/drm/nouveau/core/subdev/instmem/base.c
drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c
drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.h
drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c
drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c
drivers/gpu/drm/nouveau/core/subdev/instmem/priv.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/i2c_.fuc [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/kernel.fuc
drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/macros.fuc
drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc
drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h
drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc
drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h
drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc
drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h
drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc
drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h
drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/os.h
drivers/gpu/drm/nouveau/core/subdev/vm/base.c
drivers/gpu/drm/nouveau/dispnv04/disp.c
drivers/gpu/drm/nouveau/dispnv04/disp.h
drivers/gpu/drm/nouveau/dispnv04/overlay.c
drivers/gpu/drm/nouveau/nouveau_acpi.c
drivers/gpu/drm/nouveau/nouveau_bo.c
drivers/gpu/drm/nouveau/nouveau_display.c
drivers/gpu/drm/nouveau/nouveau_display.h
drivers/gpu/drm/nouveau/nouveau_dma.c
drivers/gpu/drm/nouveau/nouveau_dma.h
drivers/gpu/drm/nouveau/nouveau_drm.c
drivers/gpu/drm/nouveau/nouveau_drm.h
drivers/gpu/drm/nouveau/nouveau_fence.c
drivers/gpu/drm/nouveau/nouveau_gem.c
drivers/gpu/drm/nouveau/nouveau_sgdma.c
drivers/gpu/drm/nouveau/nouveau_ttm.c
drivers/gpu/drm/nouveau/nv50_display.c
drivers/gpu/drm/omapdrm/omap_crtc.c
drivers/gpu/drm/omapdrm/omap_debugfs.c
drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
drivers/gpu/drm/omapdrm/omap_drv.c
drivers/gpu/drm/omapdrm/omap_drv.h
drivers/gpu/drm/omapdrm/omap_encoder.c
drivers/gpu/drm/omapdrm/omap_fb.c
drivers/gpu/drm/omapdrm/omap_irq.c
drivers/gpu/drm/panel/Kconfig [new file with mode: 0644]
drivers/gpu/drm/panel/Makefile [new file with mode: 0644]
drivers/gpu/drm/panel/panel-simple.c [new file with mode: 0644]
drivers/gpu/drm/qxl/qxl_display.c
drivers/gpu/drm/qxl/qxl_drv.h
drivers/gpu/drm/qxl/qxl_ioctl.c
drivers/gpu/drm/qxl/qxl_irq.c
drivers/gpu/drm/qxl/qxl_kms.c
drivers/gpu/drm/r128/r128_cce.c
drivers/gpu/drm/r128/r128_drv.h
drivers/gpu/drm/r128/r128_ioc32.c
drivers/gpu/drm/r128/r128_irq.c
drivers/gpu/drm/r128/r128_state.c
drivers/gpu/drm/radeon/atombios_crtc.c
drivers/gpu/drm/radeon/atombios_dp.c
drivers/gpu/drm/radeon/atombios_i2c.c
drivers/gpu/drm/radeon/btc_dpm.c
drivers/gpu/drm/radeon/ci_dpm.c
drivers/gpu/drm/radeon/ci_smc.c
drivers/gpu/drm/radeon/cik.c
drivers/gpu/drm/radeon/cik_sdma.c
drivers/gpu/drm/radeon/cikd.h
drivers/gpu/drm/radeon/cypress_dpm.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/evergreen_cs.c
drivers/gpu/drm/radeon/evergreen_reg.h
drivers/gpu/drm/radeon/evergreend.h
drivers/gpu/drm/radeon/kv_dpm.c
drivers/gpu/drm/radeon/ni.c
drivers/gpu/drm/radeon/ni_dma.c
drivers/gpu/drm/radeon/ni_dpm.c
drivers/gpu/drm/radeon/nid.h
drivers/gpu/drm/radeon/pptable.h
drivers/gpu/drm/radeon/r100.c
drivers/gpu/drm/radeon/r300.c
drivers/gpu/drm/radeon/r300_cmdbuf.c
drivers/gpu/drm/radeon/r420.c
drivers/gpu/drm/radeon/r520.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/r600_cp.c
drivers/gpu/drm/radeon/r600_cs.c
drivers/gpu/drm/radeon/r600_dma.c
drivers/gpu/drm/radeon/r600_dpm.c
drivers/gpu/drm/radeon/r600_dpm.h
drivers/gpu/drm/radeon/r600_hdmi.c
drivers/gpu/drm/radeon/r600d.h
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_asic.c
drivers/gpu/drm/radeon/radeon_asic.h
drivers/gpu/drm/radeon/radeon_atombios.c
drivers/gpu/drm/radeon/radeon_combios.c
drivers/gpu/drm/radeon/radeon_connectors.c
drivers/gpu/drm/radeon/radeon_cp.c
drivers/gpu/drm/radeon/radeon_cs.c
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_display.c
drivers/gpu/drm/radeon/radeon_drv.c
drivers/gpu/drm/radeon/radeon_drv.h
drivers/gpu/drm/radeon/radeon_fence.c
drivers/gpu/drm/radeon/radeon_gart.c
drivers/gpu/drm/radeon/radeon_gem.c
drivers/gpu/drm/radeon/radeon_i2c.c
drivers/gpu/drm/radeon/radeon_irq.c
drivers/gpu/drm/radeon/radeon_irq_kms.c
drivers/gpu/drm/radeon/radeon_kms.c
drivers/gpu/drm/radeon/radeon_mem.c
drivers/gpu/drm/radeon/radeon_mode.h
drivers/gpu/drm/radeon/radeon_object.c
drivers/gpu/drm/radeon/radeon_pm.c
drivers/gpu/drm/radeon/radeon_ring.c
drivers/gpu/drm/radeon/radeon_sa.c
drivers/gpu/drm/radeon/radeon_state.c
drivers/gpu/drm/radeon/radeon_trace.h
drivers/gpu/drm/radeon/radeon_ttm.c
drivers/gpu/drm/radeon/radeon_uvd.c
drivers/gpu/drm/radeon/rs400.c
drivers/gpu/drm/radeon/rs600.c
drivers/gpu/drm/radeon/rs690.c
drivers/gpu/drm/radeon/rs780_dpm.c
drivers/gpu/drm/radeon/rv515.c
drivers/gpu/drm/radeon/rv6xx_dpm.c
drivers/gpu/drm/radeon/rv770.c
drivers/gpu/drm/radeon/rv770_dpm.c
drivers/gpu/drm/radeon/rv770_dpm.h
drivers/gpu/drm/radeon/rv770d.h
drivers/gpu/drm/radeon/si.c
drivers/gpu/drm/radeon/si_dpm.c
drivers/gpu/drm/radeon/si_smc.c
drivers/gpu/drm/radeon/sid.h
drivers/gpu/drm/radeon/sislands_smc.h
drivers/gpu/drm/radeon/sumo_dpm.c
drivers/gpu/drm/radeon/sumo_smc.c
drivers/gpu/drm/radeon/trinity_dpm.c
drivers/gpu/drm/radeon/trinity_smc.c
drivers/gpu/drm/radeon/uvd_v2_2.c
drivers/gpu/drm/rcar-du/rcar_du_crtc.c
drivers/gpu/drm/rcar-du/rcar_du_drv.c
drivers/gpu/drm/rcar-du/rcar_du_drv.h
drivers/gpu/drm/rcar-du/rcar_du_kms.c
drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c
drivers/gpu/drm/rcar-du/rcar_du_plane.c
drivers/gpu/drm/savage/savage_bci.c
drivers/gpu/drm/savage/savage_state.c
drivers/gpu/drm/shmobile/shmob_drm_crtc.c
drivers/gpu/drm/shmobile/shmob_drm_drv.c
drivers/gpu/drm/sis/sis_drv.c
drivers/gpu/drm/sis/sis_mm.c
drivers/gpu/drm/tegra/Kconfig
drivers/gpu/drm/tegra/Makefile
drivers/gpu/drm/tegra/bus.c
drivers/gpu/drm/tegra/dc.c
drivers/gpu/drm/tegra/dc.h
drivers/gpu/drm/tegra/drm.c
drivers/gpu/drm/tegra/drm.h
drivers/gpu/drm/tegra/dsi.c [new file with mode: 0644]
drivers/gpu/drm/tegra/dsi.h [new file with mode: 0644]
drivers/gpu/drm/tegra/fb.c
drivers/gpu/drm/tegra/gem.c
drivers/gpu/drm/tegra/gem.h
drivers/gpu/drm/tegra/hdmi.c
drivers/gpu/drm/tegra/mipi-phy.c [new file with mode: 0644]
drivers/gpu/drm/tegra/mipi-phy.h [new file with mode: 0644]
drivers/gpu/drm/tegra/output.c
drivers/gpu/drm/tegra/rgb.c
drivers/gpu/drm/tilcdc/tilcdc_drv.c
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/ttm/ttm_bo_util.c
drivers/gpu/drm/ttm/ttm_bo_vm.c
drivers/gpu/drm/ttm/ttm_lock.c
drivers/gpu/drm/ttm/ttm_object.c
drivers/gpu/drm/ttm/ttm_tt.c
drivers/gpu/drm/udl/udl_fb.c
drivers/gpu/drm/via/via_dma.c
drivers/gpu/drm/via/via_dmablit.c
drivers/gpu/drm/via/via_drv.c
drivers/gpu/drm/via/via_drv.h
drivers/gpu/drm/via/via_irq.c
drivers/gpu/drm/via/via_video.c
drivers/gpu/drm/vmwgfx/Makefile
drivers/gpu/drm/vmwgfx/svga3d_reg.h
drivers/gpu/drm/vmwgfx/svga_reg.h
drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
drivers/gpu/drm/vmwgfx/vmwgfx_context.c
drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
drivers/gpu/drm/vmwgfx/vmwgfx_mob.c [new file with mode: 0644]
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
drivers/gpu/drm/vmwgfx/vmwgfx_shader.c [new file with mode: 0644]
drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
drivers/gpu/host1x/Kconfig
drivers/gpu/host1x/Makefile
drivers/gpu/host1x/bus.c
drivers/gpu/host1x/channel.c
drivers/gpu/host1x/debug.c
drivers/gpu/host1x/dev.c
drivers/gpu/host1x/dev.h
drivers/gpu/host1x/hw/host1x02.c
drivers/gpu/host1x/hw/host1x02_hardware.h [new file with mode: 0644]
drivers/gpu/host1x/hw/host1x04.c [new file with mode: 0644]
drivers/gpu/host1x/hw/host1x04.h [new file with mode: 0644]
drivers/gpu/host1x/hw/host1x04_hardware.h [new file with mode: 0644]
drivers/gpu/host1x/hw/hw_host1x02_uclass.h
drivers/gpu/host1x/hw/hw_host1x04_channel.h [new file with mode: 0644]
drivers/gpu/host1x/hw/hw_host1x04_sync.h [new file with mode: 0644]
drivers/gpu/host1x/hw/hw_host1x04_uclass.h [new file with mode: 0644]
drivers/gpu/host1x/hw/intr_hw.c
drivers/gpu/host1x/job.c
drivers/gpu/host1x/mipi.c [new file with mode: 0644]
drivers/gpu/host1x/syncpt.c
drivers/hv/channel.c
drivers/hwmon/Kconfig
drivers/hwmon/adm1025.c
drivers/hwmon/adm1029.c
drivers/hwmon/adm1031.c
drivers/hwmon/adt7475.c
drivers/hwmon/ds1621.c
drivers/hwmon/emc6w201.c
drivers/hwmon/f71805f.c
drivers/hwmon/gl518sm.c
drivers/hwmon/it87.c
drivers/hwmon/lm63.c
drivers/hwmon/lm78.c
drivers/hwmon/lm83.c
drivers/hwmon/lm85.c
drivers/hwmon/lm87.c
drivers/hwmon/lm90.c
drivers/hwmon/lm92.c
drivers/hwmon/lm93.c
drivers/hwmon/max1619.c
drivers/hwmon/max6642.c
drivers/hwmon/nct6775.c
drivers/hwmon/pc87360.c
drivers/hwmon/pc87427.c
drivers/hwmon/pcf8591.c
drivers/hwmon/sis5595.c
drivers/hwmon/smsc47b397.c
drivers/hwmon/smsc47m1.c
drivers/hwmon/w83627ehf.c
drivers/hwmon/w83627hf.c
drivers/hwmon/w83781d.c
drivers/hwmon/w83795.c
drivers/hwmon/w83l785ts.c
drivers/i2c/algos/i2c-algo-bit.c
drivers/i2c/algos/i2c-algo-pca.c
drivers/i2c/algos/i2c-algo-pcf.c
drivers/i2c/busses/Kconfig
drivers/i2c/busses/i2c-acorn.c
drivers/i2c/busses/i2c-ali1535.c
drivers/i2c/busses/i2c-ali1563.c
drivers/i2c/busses/i2c-ali15x3.c
drivers/i2c/busses/i2c-amd756-s4882.c
drivers/i2c/busses/i2c-amd756.c
drivers/i2c/busses/i2c-amd8111.c
drivers/i2c/busses/i2c-au1550.c
drivers/i2c/busses/i2c-cbus-gpio.c
drivers/i2c/busses/i2c-cpm.c
drivers/i2c/busses/i2c-eg20t.c
drivers/i2c/busses/i2c-exynos5.c
drivers/i2c/busses/i2c-highlander.c
drivers/i2c/busses/i2c-hydra.c
drivers/i2c/busses/i2c-i801.c
drivers/i2c/busses/i2c-ibm_iic.c
drivers/i2c/busses/i2c-iop3xx.c
drivers/i2c/busses/i2c-isch.c
drivers/i2c/busses/i2c-ismt.c
drivers/i2c/busses/i2c-mpc.c
drivers/i2c/busses/i2c-nforce2-s4985.c
drivers/i2c/busses/i2c-nforce2.c
drivers/i2c/busses/i2c-ocores.c
drivers/i2c/busses/i2c-octeon.c
drivers/i2c/busses/i2c-parport-light.c
drivers/i2c/busses/i2c-parport.c
drivers/i2c/busses/i2c-parport.h
drivers/i2c/busses/i2c-pca-platform.c
drivers/i2c/busses/i2c-piix4.c
drivers/i2c/busses/i2c-pmcmsp.c
drivers/i2c/busses/i2c-powermac.c
drivers/i2c/busses/i2c-puv3.c
drivers/i2c/busses/i2c-rcar.c
drivers/i2c/busses/i2c-scmi.c
drivers/i2c/busses/i2c-sh7760.c
drivers/i2c/busses/i2c-simtec.c
drivers/i2c/busses/i2c-sis630.c
drivers/i2c/busses/i2c-sis96x.c
drivers/i2c/busses/i2c-taos-evm.c
drivers/i2c/busses/i2c-via.c
drivers/i2c/busses/i2c-viapro.c
drivers/i2c/busses/i2c-xiic.c
drivers/i2c/busses/i2c-xlr.c
drivers/i2c/busses/scx200_i2c.c
drivers/i2c/i2c-core.c
drivers/i2c/i2c-smbus.c
drivers/i2c/i2c-stub.c
drivers/i2c/muxes/i2c-arb-gpio-challenge.c
drivers/i2c/muxes/i2c-mux-gpio.c
drivers/i2c/muxes/i2c-mux-pca9541.c
drivers/i2c/muxes/i2c-mux-pca954x.c
drivers/i2c/muxes/i2c-mux-pinctrl.c
drivers/ide/ide-cd_verbose.c
drivers/ide/ide-pio-blacklist.c
drivers/iommu/Kconfig
drivers/iommu/amd_iommu.c
drivers/iommu/arm-smmu.c
drivers/iommu/dmar.c
drivers/iommu/fsl_pamu_domain.c
drivers/iommu/intel-iommu.c
drivers/iommu/intel_irq_remapping.c
drivers/iommu/irq_remapping.c
drivers/iommu/of_iommu.c
drivers/iommu/shmobile-iommu.c
drivers/iommu/shmobile-ipmmu.c
drivers/iommu/shmobile-ipmmu.h
drivers/leds/led-triggers.c
drivers/leds/leds-lp5521.c
drivers/leds/leds-lp5523.c
drivers/leds/leds-lp55xx-common.c
drivers/leds/leds-mc13783.c
drivers/leds/leds-pwm.c
drivers/leds/leds-s3c24xx.c
drivers/leds/leds-tca6507.c
drivers/md/bcache/Makefile
drivers/md/bcache/alloc.c
drivers/md/bcache/bcache.h
drivers/md/bcache/bset.c
drivers/md/bcache/bset.h
drivers/md/bcache/btree.c
drivers/md/bcache/btree.h
drivers/md/bcache/closure.c
drivers/md/bcache/closure.h
drivers/md/bcache/debug.c
drivers/md/bcache/debug.h
drivers/md/bcache/extents.c [new file with mode: 0644]
drivers/md/bcache/extents.h [new file with mode: 0644]
drivers/md/bcache/io.c
drivers/md/bcache/journal.c
drivers/md/bcache/journal.h
drivers/md/bcache/movinggc.c
drivers/md/bcache/request.c
drivers/md/bcache/request.h
drivers/md/bcache/super.c
drivers/md/bcache/sysfs.c
drivers/md/bcache/util.c
drivers/md/bcache/util.h
drivers/md/bcache/writeback.c
drivers/md/bcache/writeback.h
drivers/md/dm-bio-record.h
drivers/md/dm-bufio.c
drivers/md/dm-cache-policy-mq.c
drivers/md/dm-cache-target.c
drivers/md/dm-crypt.c
drivers/md/dm-delay.c
drivers/md/dm-flakey.c
drivers/md/dm-io.c
drivers/md/dm-linear.c
drivers/md/dm-raid1.c
drivers/md/dm-region-hash.c
drivers/md/dm-snap.c
drivers/md/dm-stripe.c
drivers/md/dm-switch.c
drivers/md/dm-thin.c
drivers/md/dm-verity.c
drivers/md/dm.c
drivers/md/faulty.c
drivers/md/linear.c
drivers/md/md.c
drivers/md/multipath.c
drivers/md/raid0.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid5.c
drivers/message/fusion/mptsas.c
drivers/misc/eeprom/eeprom.c
drivers/mtd/Kconfig
drivers/mtd/afs.c
drivers/mtd/ar7part.c
drivers/mtd/bcm47xxpart.c
drivers/mtd/bcm63xxpart.c
drivers/mtd/cmdlinepart.c
drivers/mtd/devices/docg3.c
drivers/mtd/devices/m25p80.c
drivers/mtd/devices/ms02-nv.c
drivers/mtd/devices/mtd_dataflash.c
drivers/mtd/devices/mtdram.c
drivers/mtd/lpddr/lpddr_cmds.c
drivers/mtd/maps/ixp4xx.c
drivers/mtd/maps/lantiq-flash.c
drivers/mtd/maps/pxa2xx-flash.c
drivers/mtd/maps/sun_uflash.c
drivers/mtd/mtdcore.c
drivers/mtd/mtdpart.c
drivers/mtd/nand/Kconfig
drivers/mtd/nand/atmel_nand.c
drivers/mtd/nand/au1550nd.c
drivers/mtd/nand/bf5xx_nand.c
drivers/mtd/nand/cafe_nand.c
drivers/mtd/nand/cmx270_nand.c
drivers/mtd/nand/cs553x_nand.c
drivers/mtd/nand/davinci_nand.c
drivers/mtd/nand/denali.c
drivers/mtd/nand/denali.h
drivers/mtd/nand/denali_dt.c
drivers/mtd/nand/denali_pci.c
drivers/mtd/nand/diskonchip.c
drivers/mtd/nand/fsl_elbc_nand.c
drivers/mtd/nand/fsl_ifc_nand.c
drivers/mtd/nand/fsmc_nand.c
drivers/mtd/nand/gpio.c
drivers/mtd/nand/gpmi-nand/gpmi-lib.c
drivers/mtd/nand/gpmi-nand/gpmi-nand.c
drivers/mtd/nand/gpmi-nand/gpmi-nand.h
drivers/mtd/nand/jz4740_nand.c
drivers/mtd/nand/lpc32xx_mlc.c
drivers/mtd/nand/lpc32xx_slc.c
drivers/mtd/nand/mpc5121_nfc.c
drivers/mtd/nand/mxc_nand.c
drivers/mtd/nand/nand_base.c
drivers/mtd/nand/nand_ids.c
drivers/mtd/nand/nuc900_nand.c
drivers/mtd/nand/omap2.c
drivers/mtd/nand/orion_nand.c
drivers/mtd/nand/pasemi_nand.c
drivers/mtd/nand/plat_nand.c
drivers/mtd/nand/pxa3xx_nand.c
drivers/mtd/nand/s3c2410.c
drivers/mtd/nand/sh_flctl.c
drivers/mtd/nand/sharpsl.c
drivers/mtd/nand/tmio_nand.c
drivers/mtd/nand/txx9ndfmc.c
drivers/mtd/ofpart.c
drivers/mtd/onenand/generic.c
drivers/mtd/redboot.c
drivers/mtd/tests/mtd_nandecctest.c
drivers/net/bonding/bond_main.c
drivers/net/bonding/bonding.h
drivers/net/ethernet/8390/apne.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/marvell/sky2.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/hyperv/hyperv_net.h
drivers/net/hyperv/netvsc.c
drivers/net/tun.c
drivers/net/xen-netfront.c
drivers/platform/chrome/Kconfig
drivers/platform/chrome/Makefile
drivers/platform/chrome/chromeos_laptop.c
drivers/platform/chrome/chromeos_pstore.c [new file with mode: 0644]
drivers/platform/x86/Kconfig
drivers/platform/x86/Makefile
drivers/platform/x86/asus-wmi.c
drivers/platform/x86/compal-laptop.c
drivers/platform/x86/dell-laptop.c
drivers/platform/x86/eeepc-laptop.c
drivers/platform/x86/fujitsu-laptop.c
drivers/platform/x86/hp-wireless.c [new file with mode: 0644]
drivers/platform/x86/hp_accel.c
drivers/platform/x86/intel_baytrail.c [new file with mode: 0644]
drivers/platform/x86/intel_baytrail.h [new file with mode: 0644]
drivers/platform/x86/intel_scu_ipc.c
drivers/platform/x86/mxm-wmi.c
drivers/platform/x86/sony-laptop.c
drivers/platform/x86/toshiba_acpi.c
drivers/s390/block/dasd_diag.c
drivers/s390/block/dasd_eckd.c
drivers/s390/block/dasd_fba.c
drivers/s390/block/dcssblk.c
drivers/s390/block/scm_blk.c
drivers/s390/block/scm_blk_cluster.c
drivers/s390/block/xpram.c
drivers/s390/net/qeth_core.h
drivers/s390/net/qeth_core_main.c
drivers/s390/net/qeth_l2_main.c
drivers/s390/net/qeth_l3_main.c
drivers/sbus/char/bbc_i2c.c
drivers/sbus/char/display7seg.c
drivers/sbus/char/envctrl.c
drivers/sbus/char/flash.c
drivers/sbus/char/uctrl.c
drivers/scsi/libsas/sas_expander.c
drivers/scsi/mpt2sas/mpt2sas_transport.c
drivers/scsi/mpt3sas/mpt3sas_transport.c
drivers/scsi/osd/osd_initiator.c
drivers/scsi/sd.c
drivers/scsi/sd_dif.c
drivers/staging/lustre/lustre/llite/lloop.c
drivers/staging/zram/zram_drv.c
drivers/target/target_core_iblock.c
drivers/tty/serial/icom.c
drivers/tty/serial/pch_uart.c
drivers/tty/serial/samsung.c
drivers/video/fbmem.c
drivers/video/omap2/dss/dispc.c
drivers/watchdog/Kconfig
drivers/watchdog/Makefile
drivers/watchdog/alim1535_wdt.c
drivers/watchdog/alim7101_wdt.c
drivers/watchdog/at91sam9_wdt.c
drivers/watchdog/bcm_kona_wdt.c [new file with mode: 0644]
drivers/watchdog/davinci_wdt.c
drivers/watchdog/dw_wdt.c
drivers/watchdog/gpio_wdt.c [new file with mode: 0644]
drivers/watchdog/hpwdt.c
drivers/watchdog/i6300esb.c
drivers/watchdog/imx2_wdt.c
drivers/watchdog/moxart_wdt.c
drivers/watchdog/mpc8xxx_wdt.c
drivers/watchdog/nv_tco.c
drivers/watchdog/pcwd_pci.c
drivers/watchdog/s3c2410_wdt.c
drivers/watchdog/sirfsoc_wdt.c
drivers/watchdog/sp5100_tco.c
drivers/watchdog/via_wdt.c
drivers/watchdog/w83627hf_wdt.c
drivers/watchdog/watchdog_core.c
drivers/watchdog/wdt_pci.c
fs/bio-integrity.c
fs/bio.c
fs/btrfs/check-integrity.c
fs/btrfs/compression.c
fs/btrfs/disk-io.c
fs/btrfs/extent_io.c
fs/btrfs/file-item.c
fs/btrfs/inode.c
fs/btrfs/raid56.c
fs/btrfs/scrub.c
fs/btrfs/volumes.c
fs/buffer.c
fs/ceph/acl.c
fs/ceph/dir.c
fs/ceph/inode.c
fs/ceph/super.h
fs/ceph/xattr.c
fs/dcookies.c
fs/direct-io.c
fs/ext4/page-io.c
fs/f2fs/data.c
fs/gfs2/lops.c
fs/gfs2/ops_fstype.c
fs/hfsplus/wrapper.c
fs/jffs2/malloc.c
fs/jfs/jfs_logmgr.c
fs/jfs/jfs_metapage.c
fs/logfs/dev_bdev.c
fs/mpage.c
fs/nfs/blocklayout/blocklayout.c
fs/nfs/nfs3proc.c
fs/nfsd/acl.h
fs/nfsd/cache.h
fs/nfsd/idmap.h
fs/nfsd/netns.h
fs/nfsd/nfs3xdr.c
fs/nfsd/nfs4acl.c
fs/nfsd/nfs4idmap.c
fs/nfsd/nfs4proc.c
fs/nfsd/nfs4state.c
fs/nfsd/nfs4xdr.c
fs/nfsd/nfscache.c
fs/nfsd/nfssvc.c
fs/nfsd/nfsxdr.c
fs/nfsd/vfs.c
fs/nfsd/vfs.h
fs/nfsd/xdr3.h
fs/nfsd/xdr4.h
fs/nilfs2/segbuf.c
fs/notify/fanotify/fanotify.c
fs/notify/fanotify/fanotify.h
fs/notify/fanotify/fanotify_user.c
fs/notify/inotify/inotify_fsnotify.c
fs/notify/notification.c
fs/ocfs2/cluster/heartbeat.c
fs/read_write.c
fs/xfs/xfs_aops.c
fs/xfs/xfs_buf.c
fs/xfs/xfs_buf.h
fs/xfs/xfs_file.c
fs/xfs/xfs_ioctl.c
include/drm/drmP.h
include/drm/drm_agpsupport.h
include/drm/drm_crtc.h
include/drm/drm_crtc_helper.h
include/drm/drm_dp_helper.h
include/drm/drm_mipi_dsi.h [new file with mode: 0644]
include/drm/drm_os_linux.h
include/drm/drm_panel.h [new file with mode: 0644]
include/drm/ttm/ttm_bo_driver.h
include/drm/ttm/ttm_object.h
include/dt-bindings/clock/qcom,mmcc-msm8974.h
include/linux/bio.h
include/linux/blk-mq.h
include/linux/blk_types.h
include/linux/blkdev.h
include/linux/ceph/messenger.h
include/linux/clk-provider.h
include/linux/clk/ti.h [new file with mode: 0644]
include/linux/cmdline-parser.h
include/linux/compat.h
include/linux/dm-io.h
include/linux/dma_remapping.h
include/linux/dmaengine.h
include/linux/dmar.h
include/linux/fb.h
include/linux/fsnotify_backend.h
include/linux/host1x.h
include/linux/i2c-smbus.h
include/linux/i2c.h
include/linux/intel-iommu.h
include/linux/iommu.h
include/linux/linkage.h
include/linux/mfd/mc13xxx.h
include/linux/mtd/mtdram.h
include/linux/mtd/nand.h
include/linux/mtd/partitions.h
include/linux/of_mtd.h
include/linux/platform_data/dma-imx-sdma.h
include/linux/platform_data/dma-imx.h
include/linux/platform_data/dma-mmp_tdma.h
include/linux/platform_data/dma-mv_xor.h
include/linux/platform_data/leds-kirkwood-netxbig.h
include/linux/platform_data/leds-kirkwood-ns2.h
include/linux/platform_data/mtd-nand-omap2.h
include/linux/platform_data/mtd-nand-pxa3xx.h
include/linux/platform_data/mtd-onenand-omap2.h
include/linux/platform_data/mtd-orion_nand.h
include/linux/platform_data/si5351.h
include/linux/skbuff.h
include/linux/sunrpc/svc.h
include/linux/vmstat.h
include/trace/events/bcache.h
include/trace/events/block.h
include/trace/events/f2fs.h
include/uapi/asm-generic/ipcbuf.h
include/uapi/asm-generic/msgbuf.h
include/uapi/asm-generic/shmbuf.h
include/uapi/drm/drm.h
include/uapi/drm/i915_drm.h
include/uapi/drm/radeon_drm.h
include/uapi/drm/vmwgfx_drm.h
include/uapi/linux/bcache.h
include/uapi/linux/fd.h
include/uapi/linux/mqueue.h
include/uapi/linux/msg.h
include/uapi/linux/resource.h
include/uapi/linux/shm.h
include/uapi/linux/timex.h
kernel/power/block_io.c
kernel/trace/blktrace.c
lib/dma-debug.c
lib/genalloc.c
mm/bounce.c
mm/internal.h
mm/memblock.c
mm/mempolicy.c
mm/page-writeback.c
mm/page_io.c
mm/readahead.c
mm/slab_common.c
mm/slub.c
mm/vmscan.c
net/ceph/messenger.c
net/core/skbuff.c
net/ieee802154/6lowpan_iphc.c
net/ipv4/ip_gre.c
net/ipv4/ip_input.c
net/ipv4/ip_tunnel.c
net/ipv6/ip6_input.c
net/llc/llc_output.c
net/rxrpc/ar-connection.c
net/rxrpc/ar-recvmsg.c
net/sched/sch_tbf.c
net/sunrpc/auth_gss/gss_krb5_keys.c
net/sunrpc/auth_gss/gss_rpc_upcall.c
net/sunrpc/auth_gss/svcauth_gss.c
net/sunrpc/cache.c
net/sunrpc/netns.h
net/sunrpc/svc.c
net/sunrpc/xprtsock.c
scripts/setlocalversion
sound/soc/fsl/fsl_ssi.c

diff --git a/CREDITS b/CREDITS
index 4c7738f493570eb9d0c70e6db67c527bcbe6e691..e371c5504a5053c32b50caf9d2251f793814bcb1 100644 (file)
--- a/CREDITS
+++ b/CREDITS
@@ -823,8 +823,8 @@ S: D-69231 Rauenberg
 S: Germany
 
 N: Jean Delvare
-E: khali@linux-fr.org
-W: http://khali.linux-fr.org/
+E: jdelvare@suse.de
+W: http://jdelvare.nerim.net/
 D: Several hardware monitoring drivers
 S: France
 
index 01e769d6984d575bc431b3be14f014434633427b..37559a06393b56c743ee9b45c129f268bc16aa6c 100644 (file)
@@ -1,13 +1,13 @@
 What:          /config/usb-gadget
 Date:          Jun 2013
-KenelVersion:  3.11
+KernelVersion: 3.11
 Description:
                This group contains sub-groups corresponding to created
                USB gadgets.
 
 What:          /config/usb-gadget/gadget
 Date:          Jun 2013
-KenelVersion:  3.11
+KernelVersion: 3.11
 Description:
 
                The attributes of a gadget:
@@ -27,7 +27,7 @@ Description:
 
 What:          /config/usb-gadget/gadget/configs
 Date:          Jun 2013
-KenelVersion:  3.11
+KernelVersion: 3.11
 Description:
                This group contains a USB gadget's configurations
 
@@ -58,20 +58,20 @@ Description:
 
 What:          /config/usb-gadget/gadget/functions
 Date:          Jun 2013
-KenelVersion:  3.11
+KernelVersion: 3.11
 Description:
                This group contains functions available to this USB gadget.
 
 What:          /config/usb-gadget/gadget/strings
 Date:          Jun 2013
-KenelVersion:  3.11
+KernelVersion: 3.11
 Description:
                This group contains subdirectories for language-specific
                strings for this gadget.
 
 What:          /config/usb-gadget/gadget/strings/language
 Date:          Jun 2013
-KenelVersion:  3.11
+KernelVersion: 3.11
 Description:
                The attributes:
 
index 5708a568b5f6fb63fa7493e7ff33831ab5acbd90..d21092d75a0587ea8d7edbcd48a9af344388396d 100644 (file)
@@ -1,6 +1,6 @@
 What:          /config/usb-gadget/gadget/functions/acm.name
 Date:          Jun 2013
-KenelVersion:  3.11
+KernelVersion: 3.11
 Description:
 
                This item contains just one readonly attribute: port_num.
index 6b9a582ce0b55afbca2f25688577cc4ea15cc675..0addf7704b4c0ee9639406abfd7f16184d7402fd 100644 (file)
@@ -1,6 +1,6 @@
 What:          /config/usb-gadget/gadget/functions/ecm.name
 Date:          Jun 2013
-KenelVersion:  3.11
+KernelVersion: 3.11
 Description:
                The attributes:
 
index dbddf36b48b3c037f2822f06469c4e85d4e23928..a4c57158fcdef28a8805157b24015eb4edb38568 100644 (file)
@@ -1,6 +1,6 @@
 What:          /config/usb-gadget/gadget/functions/eem.name
 Date:          Jun 2013
-KenelVersion:  3.11
+KernelVersion: 3.11
 Description:
                The attributes:
 
index 14343e237e83910e54255d14f7c85cf5837a7957..e39b27653c65c15f4911db846c7ca98859bf0463 100644 (file)
@@ -1,6 +1,6 @@
 What:          /config/usb-gadget/gadget/functions/ffs.name
 Date:          Nov 2013
-KenelVersion:  3.13
+KernelVersion: 3.13
 Description:   The purpose of this directory is to create and remove it.
 
                A corresponding USB function instance is created/removed.
index 852b2365a5b57a2e0c7f56030815b7397fddb695..9aae5bfb990887a6ad3faee29830d1603a7ecb19 100644 (file)
@@ -1,6 +1,6 @@
 What:          /config/usb-gadget/gadget/functions/Loopback.name
 Date:          Nov 2013
-KenelVersion:  3.13
+KernelVersion: 3.13
 Description:
                The attributes:
 
index ad72a37ee9ff24fab43631db8c8a0b01f02f4252..9931fb0d63ba44ce155d3e859a5bac2df451298a 100644 (file)
@@ -1,6 +1,6 @@
 What:          /config/usb-gadget/gadget/functions/mass_storage.name
 Date:          Oct 2013
-KenelVersion:  3.13
+KernelVersion: 3.13
 Description:
                The attributes:
 
@@ -13,7 +13,7 @@ Description:
 
 What:          /config/usb-gadget/gadget/functions/mass_storage.name/lun.name
 Date:          Oct 2013
-KenelVersion:  3.13
+KernelVersion: 3.13
 Description:
                The attributes:
 
index bc309f42357d60a3843eed7f71b4c0e9fd55d660..6fe723effc7894aeccfc297a34219cd0a5725565 100644 (file)
@@ -1,6 +1,6 @@
 What:          /config/usb-gadget/gadget/functions/ncm.name
 Date:          Jun 2013
-KenelVersion:  3.11
+KernelVersion: 3.11
 Description:
                The attributes:
 
index aaa5c96fb7c609de47de1a716c1d233ce7f43368..a6a9327ed9ba51ea6bd0d2aa7d844ccb00cb32cb 100644 (file)
@@ -1,6 +1,6 @@
 What:          /config/usb-gadget/gadget/functions/obex.name
 Date:          Jun 2013
-KenelVersion:  3.11
+KernelVersion: 3.11
 Description:
 
                This item contains just one readonly attribute: port_num.
index 3e3b742cdfd73aa4c3d04da9e061cb9a932d5257..7037a358e6c48f672b0aab94ec5159a29fe9fcdc 100644 (file)
@@ -1,6 +1,6 @@
 What:          /config/usb-gadget/gadget/functions/phonet.name
 Date:          Jun 2013
-KenelVersion:  3.11
+KernelVersion: 3.11
 Description:
 
                This item contains just one readonly attribute: ifname.
index 822e6dad8fc08e5644a7e8297464f04d804e4d8f..e32879b84b4d092ffc3aa3c655c470ec5135c9fd 100644 (file)
@@ -1,6 +1,6 @@
 What:          /config/usb-gadget/gadget/functions/rndis.name
 Date:          Jun 2013
-KenelVersion:  3.11
+KernelVersion: 3.11
 Description:
                The attributes:
 
index 16f130c1501f009f0d021138d48a8b27da58ee52..474d249f760be01f1311ba17842b4c4e827c8079 100644 (file)
@@ -1,6 +1,6 @@
 What:          /config/usb-gadget/gadget/functions/gser.name
 Date:          Jun 2013
-KenelVersion:  3.11
+KernelVersion: 3.11
 Description:
 
                This item contains just one readonly attribute: port_num.
index a30f3093ef6caeb9b0891474884c40432e898000..29477c319f61bf16f1259962237f4b3f0a7e6287 100644 (file)
@@ -1,6 +1,6 @@
 What:          /config/usb-gadget/gadget/functions/SourceSink.name
 Date:          Nov 2013
-KenelVersion:  3.13
+KernelVersion: 3.13
 Description:
                The attributes:
 
index 154ae597cd99a84604decdd1a667f18707bda7e1..9373e2c51ea454e6de4031908ed5cdee4a04e9b6 100644 (file)
@@ -1,6 +1,6 @@
 What:          /config/usb-gadget/gadget/functions/geth.name
 Date:          Jun 2013
-KenelVersion:  3.11
+KernelVersion: 3.11
 Description:
                The attributes:
 
index 8df5e8e6dceba06846042d0c6155fd4e986addd8..2101e718670d0248110caa4320e51e83c715fad2 100644 (file)
@@ -447,14 +447,13 @@ struct bio_vec {
  * main unit of I/O for the block layer and lower layers (ie drivers)
  */
 struct bio {
-       sector_t            bi_sector;
        struct bio          *bi_next;    /* request queue link */
        struct block_device *bi_bdev;   /* target device */
        unsigned long       bi_flags;    /* status, command, etc */
        unsigned long       bi_rw;       /* low bits: r/w, high: priority */
 
        unsigned int    bi_vcnt;     /* how may bio_vec's */
-       unsigned int    bi_idx;         /* current index into bio_vec array */
+       struct bvec_iter        bi_iter;        /* current index into bio_vec array */
 
        unsigned int    bi_size;     /* total size in bytes */
        unsigned short  bi_phys_segments; /* segments after physaddr coalesce*/
@@ -480,7 +479,7 @@ With this multipage bio design:
 - Code that traverses the req list can find all the segments of a bio
   by using rq_for_each_segment.  This handles the fact that a request
   has multiple bios, each of which can have multiple segments.
-- Drivers which can't process a large bio in one shot can use the bi_idx
+- Drivers which can't process a large bio in one shot can use the bi_iter
   field to keep track of the next bio_vec entry to process.
   (e.g a 1MB bio_vec needs to be handled in max 128kB chunks for IDE)
   [TBD: Should preferably also have a bi_voffset and bi_vlen to avoid modifying
@@ -589,7 +588,7 @@ driver should not modify these values. The block layer sets up the
 nr_sectors and current_nr_sectors fields (based on the corresponding
 hard_xxx values and the number of bytes transferred) and updates it on
 every transfer that invokes end_that_request_first. It does the same for the
-buffer, bio, bio->bi_idx fields too.
+buffer, bio, bio->bi_iter fields too.
 
 The buffer field is just a virtual address mapping of the current segment
 of the i/o buffer in cases where the buffer resides in low-memory. For high
diff --git a/Documentation/block/biovecs.txt b/Documentation/block/biovecs.txt
new file mode 100644 (file)
index 0000000..74a32ad
--- /dev/null
@@ -0,0 +1,111 @@
+
+Immutable biovecs and biovec iterators:
+=======================================
+
+Kent Overstreet <kmo@daterainc.com>
+
+As of 3.13, biovecs should never be modified after a bio has been submitted.
+Instead, we have a new struct bvec_iter which represents a range of a biovec -
+the iterator will be modified as the bio is completed, not the biovec.
+
+More specifically, old code that needed to partially complete a bio would
+update bi_sector and bi_size, and advance bi_idx to the next biovec. If it
+ended up partway through a biovec, it would increment bv_offset and decrement
+bv_len by the number of bytes completed in that biovec.
+
+In the new scheme of things, everything that must be mutated in order to
+partially complete a bio is segregated into struct bvec_iter: bi_sector,
+bi_size and bi_idx have been moved there; and instead of modifying bv_offset
+and bv_len, struct bvec_iter has bi_bvec_done, which represents the number of
+bytes completed in the current bvec.
+
+There are a bunch of new helper macros for hiding the gory details - in
+particular, presenting the illusion of partially completed biovecs so that
+normal code doesn't have to deal with bi_bvec_done.
+
+ * Driver code should no longer refer to biovecs directly; we now have
+   bio_iovec() and bio_iovec_iter() macros that return literal struct biovecs,
+   constructed from the raw biovecs but taking into account bi_bvec_done and
+   bi_size.
+
+   bio_for_each_segment() has been updated to take a bvec_iter argument
+   instead of an integer (that corresponded to bi_idx); for a lot of code the
+   conversion just required changing the types of the arguments to
+   bio_for_each_segment().
+
+ * Advancing a bvec_iter is done with bio_advance_iter(); bio_advance() is a
+   wrapper around bio_advance_iter() that operates on bio->bi_iter, and also
+   advances the bio integrity's iter if present.
+
+   There is a lower level advance function - bvec_iter_advance() - which takes
+   a pointer to a biovec, not a bio; this is used by the bio integrity code.
+
+What's all this get us?
+=======================
+
+Having a real iterator, and making biovecs immutable, has a number of
+advantages:
+
+ * Before, iterating over bios was very awkward when you weren't processing
+   exactly one bvec at a time - for example, bio_copy_data() in fs/bio.c,
+   which copies the contents of one bio into another. Because the biovecs
+   wouldn't necessarily be the same size, the old code was tricky convoluted -
+   it had to walk two different bios at the same time, keeping both bi_idx and
+   and offset into the current biovec for each.
+
+   The new code is much more straightforward - have a look. This sort of
+   pattern comes up in a lot of places; a lot of drivers were essentially open
+   coding bvec iterators before, and having common implementation considerably
+   simplifies a lot of code.
+
+ * Before, any code that might need to use the biovec after the bio had been
+   completed (perhaps to copy the data somewhere else, or perhaps to resubmit
+   it somewhere else if there was an error) had to save the entire bvec array
+   - again, this was being done in a fair number of places.
+
+ * Biovecs can be shared between multiple bios - a bvec iter can represent an
+   arbitrary range of an existing biovec, both starting and ending midway
+   through biovecs. This is what enables efficient splitting of arbitrary
+   bios. Note that this means we _only_ use bi_size to determine when we've
+   reached the end of a bio, not bi_vcnt - and the bio_iovec() macro takes
+   bi_size into account when constructing biovecs.
+
+ * Splitting bios is now much simpler. The old bio_split() didn't even work on
+   bios with more than a single bvec! Now, we can efficiently split arbitrary
+   size bios - because the new bio can share the old bio's biovec.
+
+   Care must be taken to ensure the biovec isn't freed while the split bio is
+   still using it, in case the original bio completes first, though. Using
+   bio_chain() when splitting bios helps with this.
+
+ * Submitting partially completed bios is now perfectly fine - this comes up
+   occasionally in stacking block drivers and various code (e.g. md and
+   bcache) had some ugly workarounds for this.
+
+   It used to be the case that submitting a partially completed bio would work
+   fine to _most_ devices, but since accessing the raw bvec array was the
+   norm, not all drivers would respect bi_idx and those would break. Now,
+   since all drivers _must_ go through the bvec iterator - and have been
+   audited to make sure they are - submitting partially completed bios is
+   perfectly fine.
+
+Other implications:
+===================
+
+ * Almost all usage of bi_idx is now incorrect and has been removed; instead,
+   where previously you would have used bi_idx you'd now use a bvec_iter,
+   probably passing it to one of the helper macros.
+
+   I.e. instead of using bio_iovec_idx() (or bio->bi_iovec[bio->bi_idx]), you
+   now use bio_iter_iovec(), which takes a bvec_iter and returns a
+   literal struct bio_vec - constructed on the fly from the raw biovec but
+   taking into account bi_bvec_done (and bi_size).
+
+ * bi_vcnt can't be trusted or relied upon by driver code - i.e. anything that
+   doesn't actually own the bio. The reason is twofold: firstly, it's not
+   actually needed for iterating over the bio anymore - we only use bi_size.
+   Secondly, when cloning a bio and reusing (a portion of) the original bio's
+   biovec, in order to calculate bi_vcnt for the new bio we'd have to iterate
+   over all the biovecs in the new bio - which is silly as it's not needed.
+
+   So, don't use bi_vcnt anymore.
diff --git a/Documentation/devicetree/bindings/arm/davinci/nand.txt b/Documentation/devicetree/bindings/arm/davinci/nand.txt
deleted file mode 100644 (file)
index 3545ea7..0000000
+++ /dev/null
@@ -1,46 +0,0 @@
-* Texas Instruments Davinci NAND
-
-This file provides information, what the device node for the
-davinci nand interface contain.
-
-Required properties:
-- compatible: "ti,davinci-nand";
-- reg : contain 2 offset/length values:
-        - offset and length for the access window
-        - offset and length for accessing the aemif control registers
-- ti,davinci-chipselect: Indicates on the davinci_nand driver which
-                         chipselect is used for accessing the nand.
-
-Recommended properties :
-- ti,davinci-mask-ale: mask for ale
-- ti,davinci-mask-cle: mask for cle
-- ti,davinci-mask-chipsel: mask for chipselect
-- ti,davinci-ecc-mode: ECC mode valid values for davinci driver:
-               - "none"
-               - "soft"
-               - "hw"
-- ti,davinci-ecc-bits: used ECC bits, currently supported 1 or 4.
-- ti,davinci-nand-buswidth: buswidth 8 or 16
-- ti,davinci-nand-use-bbt: use flash based bad block table support.
-
-nand device bindings may contain additional sub-nodes describing
-partitions of the address space. See partition.txt for more detail.
-
-Example(da850 EVM ):
-nand_cs3@62000000 {
-       compatible = "ti,davinci-nand";
-       reg = <0x62000000 0x807ff
-               0x68000000 0x8000>;
-       ti,davinci-chipselect = <1>;
-       ti,davinci-mask-ale = <0>;
-       ti,davinci-mask-cle = <0>;
-       ti,davinci-mask-chipsel = <0>;
-       ti,davinci-ecc-mode = "hw";
-       ti,davinci-ecc-bits = <4>;
-       ti,davinci-nand-use-bbt;
-
-       partition@180000 {
-               label = "ubifs";
-               reg = <0x180000 0x7e80000>;
-       };
-};
diff --git a/Documentation/devicetree/bindings/clock/ti/apll.txt b/Documentation/devicetree/bindings/clock/ti/apll.txt
new file mode 100644 (file)
index 0000000..7faf5a6
--- /dev/null
@@ -0,0 +1,31 @@
+Binding for Texas Instruments APLL clock.
+
+Binding status: Unstable - ABI compatibility may be broken in the future
+
+This binding uses the common clock binding[1].  It assumes a
+register-mapped APLL with usually two selectable input clocks
+(reference clock and bypass clock), with analog phase locked
+loop logic for multiplying the input clock to a desired output
+clock. This clock also typically supports different operation
+modes (locked, low power stop etc.) APLL mostly behaves like
+a subtype of a DPLL [2], although a simplified one at that.
+
+[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
+[2] Documentation/devicetree/bindings/clock/ti/dpll.txt
+
+Required properties:
+- compatible : shall be "ti,dra7-apll-clock"
+- #clock-cells : from common clock binding; shall be set to 0.
+- clocks : link phandles of parent clocks (clk-ref and clk-bypass)
+- reg : address and length of the register set for controlling the APLL.
+  It contains the information of registers in the following order:
+       "control" - contains the control register base address
+       "idlest" - contains the idlest register base address
+
+Examples:
+       apll_pcie_ck: apll_pcie_ck@4a008200 {
+               #clock-cells = <0>;
+               clocks = <&apll_pcie_in_clk_mux>, <&dpll_pcie_ref_ck>;
+               reg = <0x4a00821c 0x4>, <0x4a008220 0x4>;
+               compatible = "ti,dra7-apll-clock";
+       };
diff --git a/Documentation/devicetree/bindings/clock/ti/autoidle.txt b/Documentation/devicetree/bindings/clock/ti/autoidle.txt
new file mode 100644 (file)
index 0000000..7c735dd
--- /dev/null
@@ -0,0 +1,39 @@
+Binding for Texas Instruments autoidle clock.
+
+Binding status: Unstable - ABI compatibility may be broken in the future
+
+This binding uses the common clock binding[1]. It assumes a register mapped
+clock which can be put to idle automatically by hardware based on the usage
+and a configuration bit setting. Autoidle clock is never an individual
+clock, it is always a derivative of some basic clock like a gate, divider,
+or fixed-factor.
+
+[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
+
+Required properties:
+- reg : offset for the register controlling the autoidle
+- ti,autoidle-shift : bit shift of the autoidle enable bit
+- ti,invert-autoidle-bit : autoidle is enabled by setting the bit to 0
+
+Examples:
+       dpll_core_m4_ck: dpll_core_m4_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_core_x2_ck>;
+               ti,max-div = <31>;
+               ti,autoidle-shift = <8>;
+               reg = <0x2d38>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       dpll_usb_clkdcoldo_ck: dpll_usb_clkdcoldo_ck {
+               #clock-cells = <0>;
+               compatible = "ti,fixed-factor-clock";
+               clocks = <&dpll_usb_ck>;
+               ti,clock-div = <1>;
+               ti,autoidle-shift = <8>;
+               reg = <0x01b4>;
+               ti,clock-mult = <1>;
+               ti,invert-autoidle-bit;
+       };
diff --git a/Documentation/devicetree/bindings/clock/ti/clockdomain.txt b/Documentation/devicetree/bindings/clock/ti/clockdomain.txt
new file mode 100644 (file)
index 0000000..cb76b3f
--- /dev/null
@@ -0,0 +1,24 @@
+Binding for Texas Instruments clockdomain.
+
+Binding status: Unstable - ABI compatibility may be broken in the future
+
+This binding uses the common clock binding[1] in consumer role.
+Every clock on TI SoC belongs to one clockdomain, but software
+only needs this information for specific clocks which require
+their parent clockdomain to be controlled when the clock is
+enabled/disabled. This binding doesn't define a new clock
+binding type, it is used to group existing clock nodes under
+hardware hierarchy.
+
+[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
+
+Required properties:
+- compatible : shall be "ti,clockdomain"
+- #clock-cells : from common clock binding; shall be set to 0.
+- clocks : link phandles of clocks within this domain
+
+Examples:
+       dss_clkdm: dss_clkdm {
+               compatible = "ti,clockdomain";
+               clocks = <&dss1_alwon_fck_3430es2>, <&dss_ick_3430es2>;
+       };
diff --git a/Documentation/devicetree/bindings/clock/ti/composite.txt b/Documentation/devicetree/bindings/clock/ti/composite.txt
new file mode 100644 (file)
index 0000000..5f43c47
--- /dev/null
@@ -0,0 +1,54 @@
+Binding for TI composite clock.
+
+Binding status: Unstable - ABI compatibility may be broken in the future
+
+This binding uses the common clock binding[1]. It assumes a
+register-mapped composite clock with multiple different sub-types;
+
+a multiplexer clock with multiple input clock signals or parents, one
+of which can be selected as output, this behaves exactly as [2]
+
+an adjustable clock rate divider, this behaves exactly as [3]
+
+a gating function which can be used to enable and disable the output
+clock, this behaves exactly as [4]
+
+The binding must provide a list of the component clocks that shall be
+merged to this clock. The component clocks shall be of one of the
+"ti,*composite*-clock" types.
+
+[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
+[2] Documentation/devicetree/bindings/clock/ti/mux.txt
+[3] Documentation/devicetree/bindings/clock/ti/divider.txt
+[4] Documentation/devicetree/bindings/clock/ti/gate.txt
+
+Required properties:
+- compatible : shall be: "ti,composite-clock"
+- clocks : link phandles of component clocks
+- #clock-cells : from common clock binding; shall be set to 0.
+
+Examples:
+
+usb_l4_gate_ick: usb_l4_gate_ick {
+       #clock-cells = <0>;
+       compatible = "ti,composite-interface-clock";
+       clocks = <&l4_ick>;
+       ti,bit-shift = <5>;
+       reg = <0x0a10>;
+};
+
+usb_l4_div_ick: usb_l4_div_ick {
+       #clock-cells = <0>;
+       compatible = "ti,composite-divider-clock";
+       clocks = <&l4_ick>;
+       ti,bit-shift = <4>;
+       ti,max-div = <1>;
+       reg = <0x0a40>;
+       ti,index-starts-at-one;
+};
+
+usb_l4_ick: usb_l4_ick {
+       #clock-cells = <0>;
+       compatible = "ti,composite-clock";
+       clocks = <&usb_l4_gate_ick>, <&usb_l4_div_ick>;
+};
diff --git a/Documentation/devicetree/bindings/clock/ti/divider.txt b/Documentation/devicetree/bindings/clock/ti/divider.txt
new file mode 100644 (file)
index 0000000..35a6f5c
--- /dev/null
@@ -0,0 +1,114 @@
+Binding for TI divider clock
+
+Binding status: Unstable - ABI compatibility may be broken in the future
+
+This binding uses the common clock binding[1].  It assumes a
+register-mapped adjustable clock rate divider that does not gate and has
+only one input clock or parent.  By default the value programmed into
+the register is one less than the actual divisor value.  E.g:
+
+register value         actual divisor value
+0                      1
+1                      2
+2                      3
+
+This assumption may be modified by the following optional properties:
+
+ti,index-starts-at-one - valid divisor values start at 1, not the default
+of 0.  E.g:
+register value         actual divisor value
+1                      1
+2                      2
+3                      3
+
+ti,index-power-of-two - valid divisor values are powers of two.  E.g:
+register value         actual divisor value
+0                      1
+1                      2
+2                      4
+
+Additionally an array of valid dividers may be supplied like so:
+
+       ti,dividers = <4>, <8>, <0>, <16>;
+
+Which will map the resulting values to a divisor table by their index:
+register value         actual divisor value
+0                      4
+1                      8
+2                      <invalid divisor, skipped>
+3                      16
+
+Any zero value in this array means the corresponding bit-value is invalid
+and must not be used.
+
+The binding must also provide the register to control the divider and
+unless the divider array is provided, min and max dividers. Optionally
+the number of bits to shift that mask, if necessary. If the shift value
+is missing it is the same as supplying a zero shift.
+
+This binding can also optionally provide support to the hardware autoidle
+feature, see [2].
+
+[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
+[2] Documentation/devicetree/bindings/clock/ti/autoidle.txt
+
+Required properties:
+- compatible : shall be "ti,divider-clock" or "ti,composite-divider-clock".
+- #clock-cells : from common clock binding; shall be set to 0.
+- clocks : link to phandle of parent clock
+- reg : offset for register controlling adjustable divider
+
+Optional properties:
+- clock-output-names : from common clock binding.
+- ti,dividers : array of integers defining divisors
+- ti,bit-shift : number of bits to shift the divider value, defaults to 0
+- ti,min-div : min divisor for dividing the input clock rate, only
+  needed if the first divisor is offset from the default value (1)
+- ti,max-div : max divisor for dividing the input clock rate, only needed
+  if ti,dividers is not defined.
+- ti,index-starts-at-one : valid divisor programming starts at 1, not zero,
+  only valid if ti,dividers is not defined.
+- ti,index-power-of-two : valid divisor programming must be a power of two,
+  only valid if ti,dividers is not defined.
+- ti,autoidle-shift : bit shift of the autoidle enable bit for the clock,
+  see [2]
+- ti,invert-autoidle-bit : autoidle is enabled by setting the bit to 0,
+  see [2]
+- ti,set-rate-parent : clk_set_rate is propagated to parent
+
+Examples:
+dpll_usb_m2_ck: dpll_usb_m2_ck@4a008190 {
+       #clock-cells = <0>;
+       compatible = "ti,divider-clock";
+       clocks = <&dpll_usb_ck>;
+       ti,max-div = <127>;
+       reg = <0x190>;
+       ti,index-starts-at-one;
+};
+
+aess_fclk: aess_fclk@4a004528 {
+       #clock-cells = <0>;
+       compatible = "ti,divider-clock";
+       clocks = <&abe_clk>;
+       ti,bit-shift = <24>;
+       reg = <0x528>;
+       ti,max-div = <2>;
+};
+
+dpll_core_m3x2_div_ck: dpll_core_m3x2_div_ck {
+       #clock-cells = <0>;
+       compatible = "ti,composite-divider-clock";
+       clocks = <&dpll_core_x2_ck>;
+       ti,max-div = <31>;
+       reg = <0x0134>;
+       ti,index-starts-at-one;
+};
+
+ssi_ssr_div_fck_3430es2: ssi_ssr_div_fck_3430es2 {
+       #clock-cells = <0>;
+       compatible = "ti,composite-divider-clock";
+       clocks = <&corex2_fck>;
+       ti,bit-shift = <8>;
+       reg = <0x0a40>;
+       ti,dividers = <0>, <1>, <2>, <3>, <4>, <0>, <6>, <0>, <8>;
+};
diff --git a/Documentation/devicetree/bindings/clock/ti/dpll.txt b/Documentation/devicetree/bindings/clock/ti/dpll.txt
new file mode 100644 (file)
index 0000000..30bfdb7
--- /dev/null
@@ -0,0 +1,75 @@
+Binding for Texas Instruments DPLL clock.
+
+Binding status: Unstable - ABI compatibility may be broken in the future
+
+This binding uses the common clock binding[1].  It assumes a
+register-mapped DPLL with usually two selectable input clocks
+(reference clock and bypass clock), with digital phase locked
+loop logic for multiplying the input clock to a desired output
+clock. This clock also typically supports different operation
+modes (locked, low power stop etc.) This binding has several
+sub-types, which effectively result in slightly different setup
+for the actual DPLL clock.
+
+[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
+
+Required properties:
+- compatible : shall be one of:
+               "ti,omap3-dpll-clock",
+               "ti,omap3-dpll-core-clock",
+               "ti,omap3-dpll-per-clock",
+               "ti,omap3-dpll-per-j-type-clock",
+               "ti,omap4-dpll-clock",
+               "ti,omap4-dpll-x2-clock",
+               "ti,omap4-dpll-core-clock",
+               "ti,omap4-dpll-m4xen-clock",
+               "ti,omap4-dpll-j-type-clock",
+               "ti,am3-dpll-no-gate-clock",
+               "ti,am3-dpll-j-type-clock",
+               "ti,am3-dpll-no-gate-j-type-clock",
+               "ti,am3-dpll-clock",
+               "ti,am3-dpll-core-clock",
+               "ti,am3-dpll-x2-clock",
+
+- #clock-cells : from common clock binding; shall be set to 0.
+- clocks : link phandles of parent clocks, first entry lists reference clock
+  and second entry bypass clock
+- reg : offsets for the register set for controlling the DPLL.
+  Registers are listed in following order:
+       "control" - contains the control register base address
+       "idlest" - contains the idle status register base address
+       "mult-div1" - contains the multiplier / divider register base address
+       "autoidle" - contains the autoidle register base address (optional)
+  ti,am3-* dpll types do not have autoidle register
+
+Optional properties:
+- DPLL mode setting - defining any one or more of the following overrides
+  default setting.
+       - ti,low-power-stop : DPLL supports low power stop mode, gating output
+       - ti,low-power-bypass : DPLL output matches rate of parent bypass clock
+       - ti,lock : DPLL locks in programmed rate
+
+Examples:
+       dpll_core_ck: dpll_core_ck@44e00490 {
+               #clock-cells = <0>;
+               compatible = "ti,omap4-dpll-core-clock";
+               clocks = <&sys_clkin_ck>, <&sys_clkin_ck>;
+               reg = <0x490>, <0x45c>, <0x488>, <0x468>;
+       };
+
+       dpll2_ck: dpll2_ck@48004004 {
+               #clock-cells = <0>;
+               compatible = "ti,omap3-dpll-clock";
+               clocks = <&sys_ck>, <&dpll2_fck>;
+               ti,low-power-stop;
+               ti,low-power-bypass;
+               ti,lock;
+               reg = <0x4>, <0x24>, <0x34>, <0x40>;
+       };
+
+       dpll_core_ck: dpll_core_ck@44e00490 {
+               #clock-cells = <0>;
+               compatible = "ti,am3-dpll-core-clock";
+               clocks = <&sys_clkin_ck>, <&sys_clkin_ck>;
+               reg = <0x90>, <0x5c>, <0x68>;
+       };
diff --git a/Documentation/devicetree/bindings/clock/ti/fixed-factor-clock.txt b/Documentation/devicetree/bindings/clock/ti/fixed-factor-clock.txt
new file mode 100644 (file)
index 0000000..662b36d
--- /dev/null
@@ -0,0 +1,43 @@
+Binding for TI fixed factor rate clock sources.
+
+Binding status: Unstable - ABI compatibility may be broken in the future
+
+This binding uses the common clock binding[1], and also uses the autoidle
+support from TI autoidle clock [2].
+
+[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
+[2] Documentation/devicetree/bindings/clock/ti/autoidle.txt
+
+Required properties:
+- compatible : shall be "ti,fixed-factor-clock".
+- #clock-cells : from common clock binding; shall be set to 0.
+- ti,clock-div: fixed divider.
+- ti,clock-mult: fixed multiplier.
+- clocks: parent clock.
+
+Optional properties:
+- ti,autoidle-shift: bit shift of the autoidle enable bit for the clock,
+  see [2]
+- reg: offset for the autoidle register of this clock, see [2]
+- ti,invert-autoidle-bit: autoidle is enabled by setting the bit to 0, see [2]
+- ti,set-rate-parent: clk_set_rate is propagated to parent
+
+Example:
+       clock {
+               compatible = "ti,fixed-factor-clock";
+               clocks = <&parentclk>;
+               #clock-cells = <0>;
+               ti,clock-div = <2>;
+               ti,clock-mult = <1>;
+       };
+
+       dpll_usb_clkdcoldo_ck: dpll_usb_clkdcoldo_ck {
+               #clock-cells = <0>;
+               compatible = "ti,fixed-factor-clock";
+               clocks = <&dpll_usb_ck>;
+               ti,clock-div = <1>;
+               ti,autoidle-shift = <8>;
+               reg = <0x01b4>;
+               ti,clock-mult = <1>;
+               ti,invert-autoidle-bit;
+       };
diff --git a/Documentation/devicetree/bindings/clock/ti/gate.txt b/Documentation/devicetree/bindings/clock/ti/gate.txt
new file mode 100644 (file)
index 0000000..125281a
--- /dev/null
@@ -0,0 +1,85 @@
+Binding for Texas Instruments gate clock.
+
+Binding status: Unstable - ABI compatibility may be broken in the future
+
+This binding uses the common clock binding[1]. This clock is
+quite much similar to the basic gate-clock [2], however,
+it supports a number of additional features. If no register
+is provided for this clock, the code assumes that a clockdomain
+will be controlled instead and the corresponding hw-ops for
+that is used.
+
+[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
+[2] Documentation/devicetree/bindings/clock/gate-clock.txt
+[3] Documentation/devicetree/bindings/clock/ti/clockdomain.txt
+
+Required properties:
+- compatible : shall be one of:
+  "ti,gate-clock" - basic gate clock
+  "ti,wait-gate-clock" - gate clock which waits until clock is active before
+                        returning from clk_enable()
+  "ti,dss-gate-clock" - gate clock with DSS specific hardware handling
+  "ti,am35xx-gate-clock" - gate clock with AM35xx specific hardware handling
+  "ti,clkdm-gate-clock" - clockdomain gate clock, which derives its functional
+                         clock directly from a clockdomain, see [3] how
+                         to map clockdomains properly
+  "ti,hsdiv-gate-clock" - gate clock with OMAP36xx specific hardware handling,
+                         required for a hardware errata
+- #clock-cells : from common clock binding; shall be set to 0
+- clocks : link to phandle of parent clock
+- reg : offset for register controlling adjustable gate, not needed for
+       ti,clkdm-gate-clock type
+
+Optional properties:
+- ti,bit-shift : bit shift for programming the clock gate, invalid for
+                ti,clkdm-gate-clock type
+- ti,set-bit-to-disable : inverts default gate programming. Setting the bit
+  gates the clock and clearing the bit ungates the clock.
+
+Examples:
+       mmchs2_fck: mmchs2_fck@48004a00 {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&core_96m_fck>;
+               reg = <0x48004a00 0x4>;
+               ti,bit-shift = <25>;
+       };
+
+       uart4_fck_am35xx: uart4_fck_am35xx {
+               #clock-cells = <0>;
+               compatible = "ti,wait-gate-clock";
+               clocks = <&core_48m_fck>;
+               reg = <0x0a00>;
+               ti,bit-shift = <23>;
+       };
+
+       dss1_alwon_fck_3430es2: dss1_alwon_fck_3430es2@48004e00 {
+               #clock-cells = <0>;
+               compatible = "ti,dss-gate-clock";
+               clocks = <&dpll4_m4x2_ck>;
+               reg = <0x48004e00 0x4>;
+               ti,bit-shift = <0>;
+       };
+
+       emac_ick: emac_ick@4800259c {
+               #clock-cells = <0>;
+               compatible = "ti,am35xx-gate-clock";
+               clocks = <&ipss_ick>;
+               reg = <0x4800259c 0x4>;
+               ti,bit-shift = <1>;
+       };
+
+       emu_src_ck: emu_src_ck {
+               #clock-cells = <0>;
+               compatible = "ti,clkdm-gate-clock";
+               clocks = <&emu_src_mux_ck>;
+       };
+
+       dpll4_m2x2_ck: dpll4_m2x2_ck@48004d00 {
+               #clock-cells = <0>;
+               compatible = "ti,hsdiv-gate-clock";
+               clocks = <&dpll4_m2x2_mul_ck>;
+               ti,bit-shift = <0x1b>;
+               reg = <0x48004d00 0x4>;
+               ti,set-bit-to-disable;
+       };
diff --git a/Documentation/devicetree/bindings/clock/ti/interface.txt b/Documentation/devicetree/bindings/clock/ti/interface.txt
new file mode 100644 (file)
index 0000000..064e8ca
--- /dev/null
@@ -0,0 +1,54 @@
+Binding for Texas Instruments interface clock.
+
+Binding status: Unstable - ABI compatibility may be broken in the future
+
+This binding uses the common clock binding[1]. This clock is
+quite much similar to the basic gate-clock [2], however,
+it supports a number of additional features, including
+companion clock finding (match corresponding functional gate
+clock) and hardware autoidle enable / disable.
+
+[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
+[2] Documentation/devicetree/bindings/clock/gate-clock.txt
+
+Required properties:
+- compatible : shall be one of:
+  "ti,omap3-interface-clock" - basic OMAP3 interface clock
+  "ti,omap3-no-wait-interface-clock" - interface clock which has no hardware
+                                      capability for waiting clock to be ready
+  "ti,omap3-hsotgusb-interface-clock" - interface clock with USB specific HW
+                                       handling
+  "ti,omap3-dss-interface-clock" - interface clock with DSS specific HW handling
+  "ti,omap3-ssi-interface-clock" - interface clock with SSI specific HW handling
+  "ti,am35xx-interface-clock" - interface clock with AM35xx specific HW handling
+- #clock-cells : from common clock binding; shall be set to 0
+- clocks : link to phandle of parent clock
+- reg : base address for the control register
+
+Optional properties:
+- ti,bit-shift : bit shift for the bit enabling/disabling the clock (default 0)
+
+Examples:
+       aes1_ick: aes1_ick@48004a14 {
+               #clock-cells = <0>;
+               compatible = "ti,omap3-interface-clock";
+               clocks = <&security_l4_ick2>;
+               reg = <0x48004a14 0x4>;
+               ti,bit-shift = <3>;
+       };
+
+       cam_ick: cam_ick@48004f10 {
+               #clock-cells = <0>;
+               compatible = "ti,omap3-no-wait-interface-clock";
+               clocks = <&l4_ick>;
+               reg = <0x48004f10 0x4>;
+               ti,bit-shift = <0>;
+       };
+
+       ssi_ick_3430es2: ssi_ick_3430es2@48004a10 {
+               #clock-cells = <0>;
+               compatible = "ti,omap3-ssi-interface-clock";
+               clocks = <&ssi_l4_ick>;
+               reg = <0x48004a10 0x4>;
+               ti,bit-shift = <0>;
+       };
diff --git a/Documentation/devicetree/bindings/clock/ti/mux.txt b/Documentation/devicetree/bindings/clock/ti/mux.txt
new file mode 100644 (file)
index 0000000..2d0d170
--- /dev/null
@@ -0,0 +1,76 @@
+Binding for TI mux clock.
+
+Binding status: Unstable - ABI compatibility may be broken in the future
+
+This binding uses the common clock binding[1].  It assumes a
+register-mapped multiplexer with multiple input clock signals or
+parents, one of which can be selected as output.  This clock does not
+gate or adjust the parent rate via a divider or multiplier.
+
+By default the "clocks" property lists the parents in the same order
+as they are programmed into the regster.  E.g:
+
+       clocks = <&foo_clock>, <&bar_clock>, <&baz_clock>;
+
+results in programming the register as follows:
+
+register value         selected parent clock
+0                      foo_clock
+1                      bar_clock
+2                      baz_clock
+
+Some clock controller IPs do not allow a value of zero to be programmed
+into the register, instead indexing begins at 1.  The optional property
+"index-starts-at-one" modified the scheme as follows:
+
+register value         selected clock parent
+1                      foo_clock
+2                      bar_clock
+3                      baz_clock
+
+The binding must provide the register to control the mux. Optionally
+the number of bits to shift the control field in the register can be
+supplied. If the shift value is missing it is the same as supplying
+a zero shift.
+
+[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
+
+Required properties:
+- compatible : shall be "ti,mux-clock" or "ti,composite-mux-clock".
+- #clock-cells : from common clock binding; shall be set to 0.
+- clocks : link phandles of parent clocks
+- reg : register offset for register controlling adjustable mux
+
+Optional properties:
+- ti,bit-shift : number of bits to shift the bit-mask, defaults to
+  0 if not present
+- ti,index-starts-at-one : valid input select programming starts at 1, not
+  zero
+- ti,set-rate-parent : clk_set_rate is propagated to parent clock,
+  not supported by the composite-mux-clock subtype
+
+Examples:
+
+sys_clkin_ck: sys_clkin_ck@4a306110 {
+       #clock-cells = <0>;
+       compatible = "ti,mux-clock";
+       clocks = <&virt_12000000_ck>, <&virt_13000000_ck>, <&virt_16800000_ck>, <&virt_19200000_ck>, <&virt_26000000_ck>, <&virt_27000000_ck>, <&virt_38400000_ck>;
+       reg = <0x0110>;
+       ti,index-starts-at-one;
+};
+
+abe_dpll_bypass_clk_mux_ck: abe_dpll_bypass_clk_mux_ck@4a306108 {
+       #clock-cells = <0>;
+       compatible = "ti,mux-clock";
+       clocks = <&sys_clkin_ck>, <&sys_32k_ck>;
+       ti,bit-shift = <24>;
+       reg = <0x0108>;
+};
+
+mcbsp5_mux_fck: mcbsp5_mux_fck {
+       #clock-cells = <0>;
+       compatible = "ti,composite-mux-clock";
+       clocks = <&core_96m_fck>, <&mcbsp_clks>;
+       ti,bit-shift = <4>;
+       reg = <0x02d8>;
+};
diff --git a/Documentation/devicetree/bindings/dma/bcm2835-dma.txt b/Documentation/devicetree/bindings/dma/bcm2835-dma.txt
new file mode 100644 (file)
index 0000000..1396078
--- /dev/null
@@ -0,0 +1,57 @@
+* BCM2835 DMA controller
+
+The BCM2835 DMA controller has 16 channels in total.
+Only the lower 13 channels have an associated IRQ.
+Some arbitrary channels are used by the firmware
+(1,3,6,7 in the current firmware version).
+The channels 0,2 and 3 have special functionality
+and should not be used by the driver.
+
+Required properties:
+- compatible: Should be "brcm,bcm2835-dma".
+- reg: Should contain DMA registers location and length.
+- interrupts: Should contain the DMA interrupts associated
+               to the DMA channels in ascending order.
+- #dma-cells: Must be <1>, the cell in the dmas property of the
+               client device represents the DREQ number.
+- brcm,dma-channel-mask: Bit mask representing the channels
+                        not used by the firmware in ascending order,
+                        i.e. first channel corresponds to LSB.
+
+Example:
+
+dma: dma@7e007000 {
+       compatible = "brcm,bcm2835-dma";
+       reg = <0x7e007000 0xf00>;
+       interrupts = <1 16>,
+                    <1 17>,
+                    <1 18>,
+                    <1 19>,
+                    <1 20>,
+                    <1 21>,
+                    <1 22>,
+                    <1 23>,
+                    <1 24>,
+                    <1 25>,
+                    <1 26>,
+                    <1 27>,
+                    <1 28>;
+
+       #dma-cells = <1>;
+       brcm,dma-channel-mask = <0x7f35>;
+};
+
+DMA clients connected to the BCM2835 DMA controller must use the format
+described in the dma.txt file, using a two-cell specifier for each channel.
+
+Example:
+
+bcm2835_i2s: i2s@7e203000 {
+       compatible = "brcm,bcm2835-i2s";
+       reg = < 0x7e203000 0x20>,
+             < 0x7e101098 0x02>;
+
+       dmas = <&dma 2>,
+              <&dma 3>;
+       dma-names = "tx", "rx";
+};
index 4fa814d3832124adb80f29ee777849739acbb7e4..68b83ecc385007216d391f1a0edf06527dad5fb9 100644 (file)
@@ -42,6 +42,7 @@ The full ID of peripheral types can be found below.
        19      IPU Memory
        20      ASRC
        21      ESAI
+       22      SSI Dual FIFO   (needs firmware ver >= 2)
 
 The third cell specifies the transfer priority as below.
 
diff --git a/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
new file mode 100644 (file)
index 0000000..8a9f355
--- /dev/null
@@ -0,0 +1,45 @@
+MOXA ART DMA Controller
+
+See dma.txt first
+
+Required properties:
+
+- compatible : Must be "moxa,moxart-dma"
+- reg :                Should contain registers location and length
+- interrupts : Should contain an interrupt-specifier for the sole
+               interrupt generated by the device
+- #dma-cells : Should be 1, a single cell holding a line request number
+
+Example:
+
+       dma: dma@90500000 {
+               compatible = "moxa,moxart-dma";
+               reg = <0x90500080 0x40>;
+               interrupts = <24 0>;
+               #dma-cells = <1>;
+       };
+
+
+Clients:
+
+DMA clients connected to the MOXA ART DMA controller must use the format
+described in the dma.txt file, using a two-cell specifier for each channel:
+a phandle plus one integer cells.
+The two cells in order are:
+
+1. A phandle pointing to the DMA controller.
+2. Peripheral identifier for the hardware handshaking interface.
+
+Example:
+Use specific request line passing from dma
+For example, MMC request line is 5
+
+       sdhci: sdhci@98e00000 {
+               compatible = "moxa,moxart-sdhci";
+               reg = <0x98e00000 0x5C>;
+               interrupts = <5 0>;
+               clocks = <&clk_apb>;
+               dmas =  <&dma 5>,
+                       <&dma 5>;
+               dma-names = "tx", "rx";
+       };
index ab45c02aa658f666b78b1f4bedf4dc87255033db..efaeec8961b64bddba0df9d48af9af65e630f659 100644 (file)
@@ -118,6 +118,9 @@ of the following host1x client modules:
     See ../reset/reset.txt for details.
   - reset-names: Must include the following entries:
     - dc
+  - nvidia,head: The number of the display controller head. This is used to
+    setup the various types of output to receive video data from the given
+    head.
 
   Each display controller node has a child node, named "rgb", that represents
   the RGB output associated with the controller. It can take the following
@@ -125,6 +128,7 @@ of the following host1x client modules:
   - nvidia,ddc-i2c-bus: phandle of an I2C controller used for DDC EDID probing
   - nvidia,hpd-gpio: specifies a GPIO used for hotplug detection
   - nvidia,edid: supplies a binary EDID blob
+  - nvidia,panel: phandle of a display panel
 
 - hdmi: High Definition Multimedia Interface
 
@@ -149,6 +153,7 @@ of the following host1x client modules:
   - nvidia,ddc-i2c-bus: phandle of an I2C controller used for DDC EDID probing
   - nvidia,hpd-gpio: specifies a GPIO used for hotplug detection
   - nvidia,edid: supplies a binary EDID blob
+  - nvidia,panel: phandle of a display panel
 
 - tvo: TV encoder output
 
@@ -169,11 +174,21 @@ of the following host1x client modules:
   - clock-names: Must include the following entries:
     - dsi
       This MUST be the first entry.
+    - lp
     - parent
   - resets: Must contain an entry for each entry in reset-names.
     See ../reset/reset.txt for details.
   - reset-names: Must include the following entries:
     - dsi
+  - nvidia,mipi-calibrate: Should contain a phandle and a specifier specifying
+    which pads are used by this DSI output and need to be calibrated. See also
+    ../mipi/nvidia,tegra114-mipi.txt.
+
+  Optional properties:
+  - nvidia,ddc-i2c-bus: phandle of an I2C controller used for DDC EDID probing
+  - nvidia,hpd-gpio: specifies a GPIO used for hotplug detection
+  - nvidia,edid: supplies a binary EDID blob
+  - nvidia,panel: phandle of a display panel
 
 Example:
 
@@ -253,7 +268,7 @@ Example:
                        interrupts = <0 73 0x04>;
                        clocks = <&tegra_car TEGRA20_CLK_DISP1>,
                                 <&tegra_car TEGRA20_CLK_PLL_P>;
-                       clock-names = "disp1", "parent";
+                       clock-names = "dc", "parent";
                        resets = <&tegra_car 27>;
                        reset-names = "dc";
 
@@ -268,7 +283,7 @@ Example:
                        interrupts = <0 74 0x04>;
                        clocks = <&tegra_car TEGRA20_CLK_DISP2>,
                                 <&tegra_car TEGRA20_CLK_PLL_P>;
-                       clock-names = "disp2", "parent";
+                       clock-names = "dc", "parent";
                        resets = <&tegra_car 26>;
                        reset-names = "dc";
 
index 80ff3dfb1f325e89993184324e8ed053fd15e867..d7221b84987cd684169bee2765a050cba3511c75 100644 (file)
@@ -2,6 +2,13 @@ LEDs connected to tca6507
 
 Required properties:
 - compatible : should be : "ti,tca6507".
+- #address-cells: must be 1
+- #size-cells: must be 0
+- reg: typically 0x45.
+
+Optional properties:
+- gpio-controller: allows lines to be used as output-only GPIOs.
+- #gpio-cells: if present, must be 0.
 
 Each led is represented as a sub-node of the ti,tca6507 device.
 
@@ -10,6 +17,7 @@ LED sub-node properties:
 - reg : number of LED line (could be from 0 to 6)
 - linux,default-trigger : (optional)
    see Documentation/devicetree/bindings/leds/common.txt
+- compatible: either "led" (the default) or "gpio".
 
 Examples:
 
@@ -19,6 +27,9 @@ tca6507@45 {
        #size-cells = <0>;
        reg = <0x45>;
 
+       gpio-controller;
+       #gpio-cells = <2>;
+
        led0: red-aux@0 {
                label = "red:aux";
                reg = <0x0>;
@@ -29,5 +40,10 @@ tca6507@45 {
                reg = <0x5>;
                linux,default-trigger = "default-on";
        };
+
+       wifi-reset@6 {
+               reg = <0x6>;
+               compatible = "gpio";
+       };
 };
 
diff --git a/Documentation/devicetree/bindings/mipi/dsi/mipi-dsi-bus.txt b/Documentation/devicetree/bindings/mipi/dsi/mipi-dsi-bus.txt
new file mode 100644 (file)
index 0000000..973c272
--- /dev/null
@@ -0,0 +1,98 @@
+MIPI DSI (Display Serial Interface) busses
+==========================================
+
+The MIPI Display Serial Interface specifies a serial bus and a protocol for
+communication between a host and up to four peripherals. This document will
+define the syntax used to represent a DSI bus in a device tree.
+
+This document describes DSI bus-specific properties only or defines existing
+standard properties in the context of the DSI bus.
+
+Each DSI host provides a DSI bus. The DSI host controller's node contains a
+set of properties that characterize the bus. Child nodes describe individual
+peripherals on that bus.
+
+The following assumes that only a single peripheral is connected to a DSI
+host. Experience shows that this is true for the large majority of setups.
+
+DSI host
+--------
+
+In addition to the standard properties and those defined by the parent bus of
+a DSI host, the following properties apply to a node representing a DSI host.
+
+Required properties:
+- #address-cells: The number of cells required to represent an address on the
+  bus. DSI peripherals are addressed using a 2-bit virtual channel number, so
+  a maximum of 4 devices can be addressed on a single bus. Hence the value of
+  this property should be 1.
+- #size-cells: Should be 0. There are cases where it makes sense to use a
+  different value here. See below.
+
+DSI peripheral
+--------------
+
+Peripherals are represented as child nodes of the DSI host's node. Properties
+described here apply to all DSI peripherals, but individual bindings may want
+to define additional, device-specific properties.
+
+Required properties:
+- reg: The virtual channel number of a DSI peripheral. Must be in the range
+  from 0 to 3.
+
+Some DSI peripherals respond to more than a single virtual channel. In that
+case two alternative representations can be chosen:
+- The reg property can take multiple entries, one for each virtual channel
+  that the peripheral responds to.
+- If the virtual channels that a peripheral responds to are consecutive, the
+  #size-cells can be set to 1. The first cell of each entry in the reg
+  property is the number of the first virtual channel and the second cell is
+  the number of consecutive virtual channels.
+
+Example
+-------
+
+       dsi-host {
+               ...
+
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               /* peripheral responds to virtual channel 0 */
+               peripheral@0 {
+                       compatible = "...";
+                       reg = <0>;
+               };
+
+               ...
+       };
+
+       dsi-host {
+               ...
+
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               /* peripheral responds to virtual channels 0 and 2 */
+               peripheral@0 {
+                       compatible = "...";
+                       reg = <0, 2>;
+               };
+
+               ...
+       };
+
+       dsi-host {
+               ...
+
+               #address-cells = <1>;
+               #size-cells = <1>;
+
+               /* peripheral responds to virtual channels 1, 2 and 3 */
+               peripheral@1 {
+                       compatible = "...";
+                       reg = <1 3>;
+               };
+
+               ...
+       };
diff --git a/Documentation/devicetree/bindings/mipi/nvidia,tegra114-mipi.txt b/Documentation/devicetree/bindings/mipi/nvidia,tegra114-mipi.txt
new file mode 100644 (file)
index 0000000..e4a25ce
--- /dev/null
@@ -0,0 +1,41 @@
+NVIDIA Tegra MIPI pad calibration controller
+
+Required properties:
+- compatible: "nvidia,tegra<chip>-mipi"
+- reg: Physical base address and length of the controller's registers.
+- clocks: Must contain an entry for each entry in clock-names.
+  See ../clocks/clock-bindings.txt for details.
+- clock-names: Must include the following entries:
+  - mipi-cal
+- #nvidia,mipi-calibrate-cells: Should be 1. The cell is a bitmask of the pads
+  that need to be calibrated for a given device.
+
+User nodes need to contain an nvidia,mipi-calibrate property that has a
+phandle to refer to the calibration controller node and a bitmask of the pads
+that need to be calibrated.
+
+Example:
+
+       mipi: mipi@700e3000 {
+               compatible = "nvidia,tegra114-mipi";
+               reg = <0x700e3000 0x100>;
+               clocks = <&tegra_car TEGRA114_CLK_MIPI_CAL>;
+               clock-names = "mipi-cal";
+               #nvidia,mipi-calibrate-cells = <1>;
+       };
+
+       ...
+
+       host1x@50000000 {
+               ...
+
+               dsi@54300000 {
+                       ...
+
+                       nvidia,mipi-calibrate = <&mipi 0x060>;
+
+                       ...
+               };
+
+               ...
+       };
diff --git a/Documentation/devicetree/bindings/mtd/davinci-nand.txt b/Documentation/devicetree/bindings/mtd/davinci-nand.txt
new file mode 100644 (file)
index 0000000..cfb18ab
--- /dev/null
@@ -0,0 +1,94 @@
+Device tree bindings for Texas instruments Davinci/Keystone NAND controller
+
+This file provides information, what the device node for the davinci/keystone
+NAND interface contains.
+
+Documentation:
+Davinci DM646x - http://www.ti.com/lit/ug/sprueq7c/sprueq7c.pdf
+Kestone - http://www.ti.com/lit/ug/sprugz3a/sprugz3a.pdf
+
+Required properties:
+
+- compatible:                  "ti,davinci-nand"
+                               "ti,keystone-nand"
+
+- reg:                         Contains 2 offset/length values:
+                               - offset and length for the access window.
+                               - offset and length for accessing the AEMIF
+                               control registers.
+
+- ti,davinci-chipselect:       number of chipselect. Indicates on the
+                               davinci_nand driver which chipselect is used
+                               for accessing the nand.
+                               Can be in the range [0-3].
+
+Recommended properties :
+
+- ti,davinci-mask-ale:         mask for ALE. Needed for executing address
+                               phase. These offset will be added to the base
+                               address for the chip select space the NAND Flash
+                               device is connected to.
+                               If not set equal to 0x08.
+
+- ti,davinci-mask-cle:         mask for CLE. Needed for executing command
+                               phase. These offset will be added to the base
+                               address for the chip select space the NAND Flash
+                               device is connected to.
+                               If not set equal to 0x10.
+
+- ti,davinci-mask-chipsel:     mask for chipselect address. Needed to mask
+                               addresses for given chipselect.
+
+- nand-ecc-mode:               operation mode of the NAND ecc mode. ECC mode
+                               valid values for davinci driver:
+                               - "none"
+                               - "soft"
+                               - "hw"
+
+- ti,davinci-ecc-bits:         used ECC bits, currently supported 1 or 4.
+
+- nand-bus-width:              buswidth 8 or 16. If not present 8.
+
+- nand-on-flash-bbt:           use flash based bad block table support. OOB
+                               identifier is saved in OOB area. If not present
+                               false.
+
+Deprecated properties:
+
+- ti,davinci-ecc-mode:         operation mode of the NAND ecc mode. ECC mode
+                               valid values for davinci driver:
+                               - "none"
+                               - "soft"
+                               - "hw"
+
+- ti,davinci-nand-buswidth:    buswidth 8 or 16. If not present 8.
+
+- ti,davinci-nand-use-bbt:     use flash based bad block table support. OOB
+                               identifier is saved in OOB area. If not present
+                               false.
+
+Nand device bindings may contain additional sub-nodes describing partitions of
+the address space. See partition.txt for more detail. The NAND Flash timing
+values must be programmed in the chip select’s node of AEMIF
+memory-controller (see Documentation/devicetree/bindings/memory-controllers/
+davinci-aemif.txt).
+
+Example(da850 EVM ):
+
+nand_cs3@62000000 {
+       compatible = "ti,davinci-nand";
+       reg = <0x62000000 0x807ff
+              0x68000000 0x8000>;
+       ti,davinci-chipselect = <1>;
+       ti,davinci-mask-ale = <0>;
+       ti,davinci-mask-cle = <0>;
+       ti,davinci-mask-chipsel = <0>;
+       nand-ecc-mode = "hw";
+       ti,davinci-ecc-bits = <4>;
+       nand-on-flash-bbt;
+
+       partition@180000 {
+               label = "ubifs";
+               reg = <0x180000 0x7e80000>;
+       };
+};
index 551b2a179d016df4bd9bb8e611f38633d57d6640..458d5963468826647d2469c23f542bde3ffe047c 100644 (file)
@@ -17,6 +17,14 @@ Required properties:
 Optional properties:
   - nand-on-flash-bbt: boolean to enable on flash bbt option if not
                        present false
+  - fsl,use-minimum-ecc: Protect this NAND flash with the minimum ECC
+                       strength required. The required ECC strength is
+                       automatically discoverable for some flash
+                       (e.g., according to the ONFI standard).
+                       However, note that if this strength is not
+                       discoverable or this property is not enabled,
+                       the software may chooses an implementation-defined
+                       ECC scheme.
 
 The device tree may optionally contain sub-nodes describing partitions of the
 address space. See partition.txt for more detail.
index f1421e2bbab7387a87e1885794b9688780d6de73..86e0a5601ff5dfb05d9eeed35eb3e638ea58cc7b 100644 (file)
@@ -2,7 +2,9 @@ PXA3xx NAND DT bindings
 
 Required properties:
 
- - compatible:         Should be "marvell,pxa3xx-nand"
+ - compatible:         Should be set to one of the following:
+                       marvell,pxa3xx-nand
+                       marvell,armada370-nand
  - reg:                The register base for the controller
  - interrupts:         The interrupt to map
  - #address-cells:     Set to <1> if the node includes partitions
@@ -13,6 +15,8 @@ Optional properties:
  - marvell,nand-keep-config:   Set to keep the NAND controller config as set
                                by the bootloader
  - num-cs:                     Number of chipselect lines to usw
+ - nand-on-flash-bbt:          boolean to enable on flash bbt option if
+                               not present false
 
 Example:
 
index ca0911a20e8b21595c23d3eae2e8e4869dec65cc..6e356d15154a9603c8ce2b73f57edbd0c326c60c 100644 (file)
@@ -10,8 +10,6 @@ Required properties:
 - ti,davinci-ctrl-mod-reg-offset: offset to control module register
 - ti,davinci-ctrl-ram-offset: offset to control module ram
 - ti,davinci-ctrl-ram-size: size of control module ram
-- ti,davinci-rmii-en: use RMII
-- ti,davinci-no-bd-ram: has the emac controller BD RAM
 - interrupts: interrupt mapping for the davinci emac interrupts sources:
               4 sources: <Receive Threshold Interrupt
                          Receive Interrupt
@@ -22,6 +20,8 @@ Optional properties:
 - phy-handle: Contains a phandle to an Ethernet PHY.
               If absent, davinci_emac driver defaults to 100/FULL.
 - local-mac-address : 6 bytes, mac address
+- ti,davinci-rmii-en: 1 byte, 1 means use RMII
+- ti,davinci-no-bd-ram: boolean, does EMAC have BD RAM?
 
 Example (enbw_cmc board):
        eth0: emac@1e20000 {
diff --git a/Documentation/devicetree/bindings/panel/auo,b101aw03.txt b/Documentation/devicetree/bindings/panel/auo,b101aw03.txt
new file mode 100644 (file)
index 0000000..72e088a
--- /dev/null
@@ -0,0 +1,7 @@
+AU Optronics Corporation 10.1" WSVGA TFT LCD panel
+
+Required properties:
+- compatible: should be "auo,b101aw03"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/panel/chunghwa,claa101wa01a.txt b/Documentation/devicetree/bindings/panel/chunghwa,claa101wa01a.txt
new file mode 100644 (file)
index 0000000..f24614e
--- /dev/null
@@ -0,0 +1,7 @@
+Chunghwa Picture Tubes Ltd. 10.1" WXGA TFT LCD panel
+
+Required properties:
+- compatible: should be "chunghwa,claa101wa01a"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/panel/chunghwa,claa101wb03.txt b/Documentation/devicetree/bindings/panel/chunghwa,claa101wb03.txt
new file mode 100644 (file)
index 0000000..0ab2c05
--- /dev/null
@@ -0,0 +1,7 @@
+Chunghwa Picture Tubes Ltd. 10.1" WXGA TFT LCD panel
+
+Required properties:
+- compatible: should be "chunghwa,claa101wb03"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/panel/panasonic,vvx10f004b00.txt b/Documentation/devicetree/bindings/panel/panasonic,vvx10f004b00.txt
new file mode 100644 (file)
index 0000000..d328b03
--- /dev/null
@@ -0,0 +1,7 @@
+Panasonic Corporation 10.1" WUXGA TFT LCD panel
+
+Required properties:
+- compatible: should be "panasonic,vvx10f004b00"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/panel/samsung,ltn101nt05.txt b/Documentation/devicetree/bindings/panel/samsung,ltn101nt05.txt
new file mode 100644 (file)
index 0000000..ef522c6
--- /dev/null
@@ -0,0 +1,7 @@
+Samsung Electronics 10.1" WSVGA TFT LCD panel
+
+Required properties:
+- compatible: should be "samsung,ltn101nt05"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/panel/simple-panel.txt b/Documentation/devicetree/bindings/panel/simple-panel.txt
new file mode 100644 (file)
index 0000000..1341bbf
--- /dev/null
@@ -0,0 +1,21 @@
+Simple display panel
+
+Required properties:
+- power-supply: regulator to provide the supply voltage
+
+Optional properties:
+- ddc-i2c-bus: phandle of an I2C controller used for DDC EDID probing
+- enable-gpios: GPIO pin to enable or disable the panel
+- backlight: phandle of the backlight device attached to the panel
+
+Example:
+
+       panel: panel {
+               compatible = "cptt,claa101wb01";
+               ddc-i2c-bus = <&panelddc>;
+
+               power-supply = <&vdd_pnl_reg>;
+               enable-gpios = <&gpio 90 0>;
+
+               backlight = <&backlight>;
+       };
index fcdd48f7dcffc0f71540dacb48aa4f501c6dd5de..f90e294d7631f9b538ba3e5e72c2ab1b096a5479 100644 (file)
@@ -9,11 +9,37 @@ Required properties:
 
 Optional properties:
 - timeout-sec: contains the watchdog timeout in seconds.
+- interrupts : Should contain WDT interrupt.
+- atmel,max-heartbeat-sec : Should contain the maximum heartbeat value in
+       seconds. This value should be less or equal to 16. It is used to
+       compute the WDV field.
+- atmel,min-heartbeat-sec : Should contain the minimum heartbeat value in
+       seconds. This value must be smaller than the max-heartbeat-sec value.
+       It is used to compute the WDD field.
+- atmel,watchdog-type : Should be "hardware" or "software". Hardware watchdog
+       use the at91 watchdog reset. Software watchdog use the watchdog
+       interrupt to trigger a software reset.
+- atmel,reset-type : Should be "proc" or "all".
+       "all" : assert peripherals and processor reset signals
+       "proc" : assert the processor reset signal
+       This is valid only when using "hardware" watchdog.
+- atmel,disable : Should be present if you want to disable the watchdog.
+- atmel,idle-halt : Should be present if you want to stop the watchdog when
+       entering idle state.
+- atmel,dbg-halt : Should be present if you want to stop the watchdog when
+       entering debug state.
 
 Example:
-
        watchdog@fffffd40 {
                compatible = "atmel,at91sam9260-wdt";
                reg = <0xfffffd40 0x10>;
-               timeout-sec = <10>;
+               interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
+               timeout-sec = <15>;
+               atmel,watchdog-type = "hardware";
+               atmel,reset-type = "all";
+               atmel,dbg-halt;
+               atmel,idle-halt;
+               atmel,max-heartbeat-sec = <16>;
+               atmel,min-heartbeat-sec = <0>;
+               status = "okay";
        };
index 75558ccd9a051e59af9233f279a660c27fa58f97..e60b9a13bdcbd5887078e110280dffe5e39ba5e5 100644 (file)
@@ -1,12 +1,24 @@
-DaVinci Watchdog Timer (WDT) Controller
+Texas Instruments DaVinci/Keystone Watchdog Timer (WDT) Controller
 
 Required properties:
-- compatible : Should be "ti,davinci-wdt"
+- compatible : Should be "ti,davinci-wdt", "ti,keystone-wdt"
 - reg : Should contain WDT registers location and length
 
+Optional properties:
+- timeout-sec : Contains the watchdog timeout in seconds
+- clocks : the clock feeding the watchdog timer.
+          Needed if platform uses clocks.
+          See clock-bindings.txt
+
+Documentation:
+Davinci DM646x - http://www.ti.com/lit/ug/spruer5b/spruer5b.pdf
+Keystone - http://www.ti.com/lit/ug/sprugv5a/sprugv5a.pdf
+
 Examples:
 
 wdt: wdt@2320000 {
        compatible = "ti,davinci-wdt";
        reg = <0x02320000 0x80>;
+       timeout-sec = <30>;
+       clocks = <&clkwdtimer0>;
 };
diff --git a/Documentation/devicetree/bindings/watchdog/gpio-wdt.txt b/Documentation/devicetree/bindings/watchdog/gpio-wdt.txt
new file mode 100644 (file)
index 0000000..37afec1
--- /dev/null
@@ -0,0 +1,23 @@
+* GPIO-controlled Watchdog
+
+Required Properties:
+- compatible: Should contain "linux,wdt-gpio".
+- gpios: From common gpio binding; gpio connection to WDT reset pin.
+- hw_algo: The algorithm used by the driver. Should be one of the
+  following values:
+  - toggle: Either a high-to-low or a low-to-high transition clears
+    the WDT counter. The watchdog timer is disabled when GPIO is
+    left floating or connected to a three-state buffer.
+  - level: Low or high level starts counting WDT timeout,
+    the opposite level disables the WDT. Active level is determined
+    by the GPIO flags.
+- hw_margin_ms: Maximum time to reset watchdog circuit (milliseconds).
+
+Example:
+       watchdog: watchdog {
+               /* ADM706 */
+               compatible = "linux,wdt-gpio";
+               gpios = <&gpio3 9 GPIO_ACTIVE_LOW>;
+               hw_algo = "toggle";
+               hw_margin_ms = <1600>;
+       };
index 2aa486cc1ff6a17ba8f1d95b75800a76941d2f5f..cfff37511aac0e3e1f3d42f6e9805c087a8bc286 100644 (file)
@@ -5,10 +5,29 @@ after a preset amount of time during which the WDT reset event has not
 occurred.
 
 Required properties:
-- compatible : should be "samsung,s3c2410-wdt"
+- compatible : should be one among the following
+       (a) "samsung,s3c2410-wdt" for Exynos4 and previous SoCs
+       (b) "samsung,exynos5250-wdt" for Exynos5250
+       (c) "samsung,exynos5420-wdt" for Exynos5420
+
 - reg : base physical address of the controller and length of memory mapped
        region.
 - interrupts : interrupt number to the cpu.
+- samsung,syscon-phandle : reference to syscon node (This property required only
+       in case of compatible being "samsung,exynos5250-wdt" or "samsung,exynos5420-wdt".
+       In case of Exynos5250 and 5420 this property points to syscon node holding the PMU
+       base address)
 
 Optional properties:
 - timeout-sec : contains the watchdog timeout in seconds.
+
+Example:
+
+watchdog@101D0000 {
+       compatible = "samsung,exynos5250-wdt";
+       reg = <0x101D0000 0x100>;
+       interrupts = <0 42 0>;
+       clocks = <&clock 336>;
+       clock-names = "watchdog";
+       samsung,syscon-phandle = <&pmu_syscon>;
+};
index 01c2db769791832b989b738d413cea2ecefcba68..b930ad08778000e43fd27c605fb50bd0f9d03d41 100644 (file)
@@ -5,11 +5,11 @@ Server support for minorversion 1 can be controlled using the
 by reading this file will contain either "+4.1" or "-4.1"
 correspondingly.
 
-Currently, server support for minorversion 1 is disabled by default.
-It can be enabled at run time by writing the string "+4.1" to
+Currently, server support for minorversion 1 is enabled by default.
+It can be disabled at run time by writing the string "-4.1" to
 the /proc/fs/nfsd/versions control file.  Note that to write this
-control file, the nfsd service must be taken down.  Use your user-mode
-nfs-utils to set this up; see rpc.nfsd(8)
+control file, the nfsd service must be taken down.  You can use rpc.nfsd
+for this; see rpc.nfsd(8).
 
 (Warning: older servers will interpret "+4.1" and "-4.1" as "+4" and
 "-4", respectively.  Therefore, code meant to work on both new and old
@@ -29,29 +29,6 @@ are still under development out of tree.
 See http://wiki.linux-nfs.org/wiki/index.php/PNFS_prototype_design
 for more information.
 
-The current implementation is intended for developers only: while it
-does support ordinary file operations on clients we have tested against
-(including the linux client), it is incomplete in ways which may limit
-features unexpectedly, cause known bugs in rare cases, or cause
-interoperability problems with future clients.  Known issues:
-
-       - gss support is questionable: currently mounts with kerberos
-         from a linux client are possible, but we aren't really
-         conformant with the spec (for example, we don't use kerberos
-         on the backchannel correctly).
-       - We do not support SSV, which provides security for shared
-         client-server state (thus preventing unauthorized tampering
-         with locks and opens, for example).  It is mandatory for
-         servers to support this, though no clients use it yet.
-
-In addition, some limitations are inherited from the current NFSv4
-implementation:
-
-       - Incomplete delegation enforcement: if a file is renamed or
-         unlinked by a local process, a client holding a delegation may
-         continue to indefinitely allow opens of the file under the old
-         name.
-
 The table below, taken from the NFSv4.1 document, lists
 the operations that are mandatory to implement (REQ), optional
 (OPT), and NFSv4.0 operations that are required not to implement (MNI)
@@ -169,6 +146,16 @@ NS*| CB_WANTS_CANCELLED      | OPT       | FDELG,      | Section 20.10 |
 
 Implementation notes:
 
+SSV:
+* The spec claims this is mandatory, but we don't actually know of any
+  implementations, so we're ignoring it for now.  The server returns
+  NFS4ERR_ENCR_ALG_UNSUPP on EXCHANGE_ID, which should be future-proof.
+
+GSS on the backchannel:
+* Again, theoretically required but not widely implemented (in
+  particular, the current Linux client doesn't request it).  We return
+  NFS4ERR_ENCR_ALG_UNSUPP on CREATE_SESSION.
+
 DELEGPURGE:
 * mandatory only for servers that support CLAIM_DELEGATE_PREV and/or
   CLAIM_DELEG_PREV_FH (which allows clients to keep delegations that
@@ -176,7 +163,6 @@ DELEGPURGE:
   now.
 
 EXCHANGE_ID:
-* only SP4_NONE state protection supported
 * implementation ids are ignored
 
 CREATE_SESSION:
index 39d2b781b5d6144f5f46eff8a5a873b908be044d..99f05049c68a89330f4be3b099c205853935792d 100644 (file)
@@ -18,7 +18,7 @@ The NE1619 presents some differences with the original ADM1025:
 
 Authors:
         Chen-Yuan Wu <gwu@esoft.com>,
-        Jean Delvare <khali@linux-fr.org>
+        Jean Delvare <jdelvare@suse.de>
 
 Description
 -----------
index be92a77da1d5116d9e828903a61f58fffc1feea3..a143117c99cb3dea9cc7a5a4a17670a81d5e1d4c 100644 (file)
@@ -16,7 +16,7 @@ Supported chips:
 
 Authors:
         Alexandre d'Alton <alex@alexdalton.org>
-        Jean Delvare <khali@linux-fr.org>
+        Jean Delvare <jdelvare@suse.de>
 
 Description
 -----------
index 36e8ec6aa868c1b6fa0a5cee9fc42ba10e83364a..9b174fc700cce8ce6716f281a4593e7b19182920 100644 (file)
@@ -25,7 +25,7 @@ Authors:
     Philip Edelbrock <phil@netroedge.com>,
     Michiel Rook <michiel@grendelproject.nl>,
     Grant Coady <gcoady.lk@gmail.com> with guidance
-        from Jean Delvare <khali@linux-fr.org>
+        from Jean Delvare <jdelvare@suse.de>
 
 Interface
 ---------
index 896cdc972ca8edcd9e7ae31081aeb39cc4d91eb3..f775e612f5820a5488da463f7fcfb86ffaa8cd9b 100644 (file)
@@ -31,7 +31,7 @@ Authors:
         Christian W. Zuckschwerdt <zany@triq.net>
         valuable contributions by Jan M. Sendler <sendler@sendler.de>
         ported to 2.6 by Aurelien Jarno <aurelien@aurel32.net>
-        with the help of Jean Delvare <khali@linux-fr.org>
+        with the help of Jean Delvare <jdelvare@suse.de>
 
 Module Parameters
 ------------------
index 32f355aaf56b6b4dcd10e61e2a7db90b82dbbfa7..757629b128978d5edadecfdc9932ba7e6cffed9f 100644 (file)
@@ -7,7 +7,7 @@ Supported chips:
     Addresses scanned: I2C 0x2c, 0x2d, 0x2e
     Datasheet: Not public
 
-Author: Jean Delvare <khali@linux-fr.org>
+Author: Jean Delvare <jdelvare@suse.de>
 
 
 Description
index f0d55976740adfeabfb7258d4109a7d4c930c6ac..48a356084bc671fbe8b473461deac358497640da 100644 (file)
@@ -15,7 +15,7 @@ Supported chips:
     Addresses scanned: none, address read from Super I/O config space
     Datasheet: Available from the Fintek website
 
-Author: Jean Delvare <khali@linux-fr.org>
+Author: Jean Delvare <jdelvare@suse.de>
 
 Thanks to Denis Kieft from Barracuda Networks for the donation of a
 test system (custom Jetway K8M8MS motherboard, with CPU and RAM) and
index 26f9f3c02dc7e6febbf9835415abf994fecc82e5..494bb55b6e7261d55cc4a859fc4b1abb513fec7e 100644 (file)
@@ -14,7 +14,7 @@ Authors:
         Frodo Looijaard <frodol@dds.nl>,
         Kyösti Mälkki <kmalkki@cc.hut.fi>
         Hong-Gunn Chew <hglinux@gunnet.org>
-        Jean Delvare <khali@linux-fr.org>
+        Jean Delvare <jdelvare@suse.de>
 
 Description
 -----------
index c263740f0cba83b5c3f17e95ecae83326da06458..0c1635082c9951c6b9c1b038a283ba5ef68ae0bf 100644 (file)
@@ -2,6 +2,10 @@ Kernel driver it87
 ==================
 
 Supported chips:
+  * IT8603E
+    Prefix: 'it8603'
+    Addresses scanned: from Super I/O config space (8 I/O ports)
+    Datasheet: Not publicly available
   * IT8705F
     Prefix: 'it87'
     Addresses scanned: from Super I/O config space (8 I/O ports)
@@ -53,7 +57,7 @@ Supported chips:
 
 Authors:
     Christophe Gauthron
-    Jean Delvare <khali@linux-fr.org>
+    Jean Delvare <jdelvare@suse.de>
 
 
 Module Parameters
@@ -90,7 +94,7 @@ motherboard models.
 Description
 -----------
 
-This driver implements support for the IT8705F, IT8712F, IT8716F,
+This driver implements support for the IT8603E, IT8705F, IT8712F, IT8716F,
 IT8718F, IT8720F, IT8721F, IT8726F, IT8728F, IT8758E, IT8771E, IT8772E,
 IT8782F, IT8783E/F, and SiS950 chips.
 
@@ -129,6 +133,10 @@ to userspace applications.
 The IT8728F, IT8771E, and IT8772E are considered compatible with the IT8721F,
 until a datasheet becomes available (hopefully.)
 
+The IT8603E is a custom design, hardware monitoring part is similar to
+IT8728F. It only supports 16-bit fan mode, the full speed mode of the
+fan is not supported (value 0 of pwmX_enable).
+
 Temperatures are measured in degrees Celsius. An alarm is triggered once
 when the Overtemperature Shutdown limit is crossed.
 
@@ -145,13 +153,16 @@ alarm is triggered if the voltage has crossed a programmable minimum or
 maximum limit. Note that minimum in this case always means 'closest to
 zero'; this is important for negative voltage measurements. All voltage
 inputs can measure voltages between 0 and 4.08 volts, with a resolution of
-0.016 volt (except IT8721F/IT8758E and IT8728F: 0.012 volt.) The battery
-voltage in8 does not have limit registers.
-
-On the IT8721F/IT8758E, IT8782F, and IT8783E/F, some voltage inputs are
-internal and scaled inside the chip (in7 (optional for IT8782F and IT8783E/F),
-in8 and optionally in3). The driver handles this transparently so user-space
-doesn't have to care.
+0.016 volt (except IT8603E, IT8721F/IT8758E and IT8728F: 0.012 volt.) The
+battery voltage in8 does not have limit registers.
+
+On the IT8603E, IT8721F/IT8758E, IT8782F, and IT8783E/F, some voltage inputs
+are internal and scaled inside the chip:
+* in3 (optional)
+* in7 (optional for IT8782F and IT8783E/F)
+* in8 (always)
+* in9 (relevant for IT8603E only)
+The driver handles this transparently so user-space doesn't have to care.
 
 The VID lines (IT8712F/IT8716F/IT8718F/IT8720F) encode the core voltage value:
 the voltage level your processor should work with. This is hardcoded by
index 4d30d209881a2861ad95c07208a7179ead236b03..4a00461512a61e4b57fc544c9e8d725834f9333a 100644 (file)
@@ -18,7 +18,7 @@ Supported chips:
     Datasheet: Publicly available at the National Semiconductor website
                http://www.national.com/pf/LM/LM96163.html
 
-Author: Jean Delvare <khali@linux-fr.org>
+Author: Jean Delvare <jdelvare@suse.de>
 
 Thanks go to Tyan and especially Alex Buckingham for setting up a remote
 access to their S4882 test platform for this driver.
index 86d182942c51498524429f34e7ed979b68dc75b3..1bb2db4406717ec02f4033553fd7df6c16d2951a 100644 (file)
@@ -43,5 +43,5 @@ data (0.03125 degrees celsius resolution).
 
 Thanks to
 ---------
-Jean Delvare <khali@linux-fr.org> for mentoring the hwmon-side driver
+Jean Delvare <jdelvare@suse.de> for mentoring the hwmon-side driver
 development.
index 2bdc881a0c1238da62e9e3dd912057a817874f4c..4dd47731789f1b30a191eb6b0b33e76137acf103 100644 (file)
@@ -14,7 +14,7 @@ Supported chips:
                http://www.national.com/
 
 Authors: Frodo Looijaard <frodol@dds.nl>
-         Jean Delvare <khali@linux-fr.org>
+         Jean Delvare <jdelvare@suse.de>
 
 Description
 -----------
index a04d1fe9269cc9f2e129de63a2cdd18625704bca..50be5cb26de930c5798d0506153dc5352858cc9b 100644 (file)
@@ -13,7 +13,7 @@ Supported chips:
                http://www.national.com/pf/LM/LM82.html
 
 
-Author: Jean Delvare <khali@linux-fr.org>
+Author: Jean Delvare <jdelvare@suse.de>
 
 Description
 -----------
index 6b47b67fd968e8a05c0eb811f329f33774e96e5b..a2339fd9acb97019d70eeaa4b2f05c08546f310b 100644 (file)
@@ -17,7 +17,7 @@ Authors:
         Mark Studebaker <mdsxyz123@yahoo.com>,
         Stephen Rousset <stephen.rousset@rocketlogix.com>,
         Dan Eaton <dan.eaton@rocketlogix.com>,
-        Jean Delvare <khali@linux-fr.org>,
+        Jean Delvare <jdelvare@suse.de>,
         Original 2.6 port Jeff Oliver
 
 Description
index ab81013cc3907a45ffb039a1f72a98d83f1b46b8..8122675d30f627564578d2c5f4c1c371e8861849 100644 (file)
@@ -129,7 +129,7 @@ Supported chips:
                http://www.ti.com/litv/pdf/sbos686
 
 
-Author: Jean Delvare <khali@linux-fr.org>
+Author: Jean Delvare <jdelvare@suse.de>
 
 
 Description
index 7705bfaa070856666a2cce8678ca8b1dac886376..22f68ad032cf983c200a36975691442f53f2b8d9 100644 (file)
@@ -19,7 +19,7 @@ Supported chips:
 
 Authors:
         Abraham van der Merwe <abraham@2d3d.co.za>
-        Jean Delvare <khali@linux-fr.org>
+        Jean Delvare <jdelvare@suse.de>
 
 
 Description
index e6d87398cc8f01e8c450713f5933347627d05752..518bae3a80c472245cf18421facdbd7fa41f4477 100644 (file)
@@ -10,7 +10,7 @@ Supported chips:
 
 Authors:
         Oleksij Rempel <bug-track@fisher-privat.net>,
-        Jean Delvare <khali@linux-fr.org>
+        Jean Delvare <jdelvare@suse.de>
 
 Description
 -----------
index cbac32b59c8c96a1bc86a3455a3ee4cbe3342db5..d5f5cf16ce5987299aed1758d3d6731e0cd695e1 100644 (file)
@@ -7,7 +7,7 @@ Supported chips:
     Addresses scanned: none, address read from Super I/O config space
     Datasheets: No longer available
 
-Authors: Jean Delvare <khali@linux-fr.org>
+Authors: Jean Delvare <jdelvare@suse.de>
 
 Thanks to Sandeep Mehta, Tonko de Rooy and Daniel Ceregatti for testing.
 Thanks to Rudolf Marek for helping me investigate conversion issues.
index 8fdd08c9e48b5ceac48b100ee4cedbdf78fcc242..c313eb66e08adb03eb6a71ea0553da3e47e96351 100644 (file)
@@ -7,7 +7,7 @@ Supported chips:
     Addresses scanned: none, address read from Super I/O config space
     Datasheet: No longer available
 
-Author: Jean Delvare <khali@linux-fr.org>
+Author: Jean Delvare <jdelvare@suse.de>
 
 Thanks to Amir Habibi at Candelis for setting up a test system, and to
 Michael Kress for testing several iterations of this driver.
index ac020b3bb7b3769939a3574505ab1d3a0b173900..447c0702c0ecbf90ed25a7780579546ad52bed7b 100644 (file)
@@ -11,7 +11,7 @@ Supported chips:
 Authors:
         Aurelien Jarno <aurelien@aurel32.net>
         valuable contributions by Jan M. Sendler <sendler@sendler.de>,
-        Jean Delvare <khali@linux-fr.org>
+        Jean Delvare <jdelvare@suse.de>
 
 
 Description
index 2a13378dcf2213642fb6faf12f479babadbe6d61..10a24b42068636333445553ac1fbcc031b84e3f2 100644 (file)
@@ -25,7 +25,7 @@ Authors:
         With assistance from Bruce Allen <ballen@uwm.edu>, and his
         fan.c program: http://www.lsc-group.phys.uwm.edu/%7Eballen/driver/
         Gabriele Gorla <gorlik@yahoo.com>,
-        Jean Delvare <khali@linux-fr.org>
+        Jean Delvare <jdelvare@suse.de>
 
 Description
 -----------
index ceaf6f652b00662d19b2ece7b8792ee92799e4fa..735c42a85eadcab4a68d8cfc06a5aa4a946e9146 100644 (file)
@@ -36,7 +36,7 @@ Supported chips:
     Datasheet: Available from Nuvoton upon request
 
 Authors:
-        Jean Delvare <khali@linux-fr.org>
+        Jean Delvare <jdelvare@suse.de>
         Yuan Mu (Winbond)
         Rudolf Marek <r.marek@assembler.cz>
         David Hubbard <david.c.hubbard@gmail.com>
index 9f160371f463dbd11663999eedb5f538fce739e2..d3e678216b9adb87e6c052b950916e7f857222e8 100644 (file)
@@ -13,7 +13,7 @@ Supported chips:
 
 Authors:
     Wei Song (Nuvoton)
-    Jean Delvare <khali@linux-fr.org>
+    Jean Delvare <jdelvare@suse.de>
 
 
 Pin mapping
index bd1fa9d4468d9ba238c05c62cd562807c122e1ad..c8978478871f27d20d8f030b66c359a578360cbf 100644 (file)
@@ -9,7 +9,7 @@ Supported chips:
                http://www.winbond-usa.com/products/winbond_products/pdfs/PCIC/W83L785TS-S.pdf
 
 Authors:
-        Jean Delvare <khali@linux-fr.org>
+        Jean Delvare <jdelvare@suse.de>
 
 Description
 -----------
index 7b0dcdb57173ce2246303c2f20eec1e94a5c6b8b..aaaf069306a3ec13dd4dc86372ebb7416b12dd31 100644 (file)
@@ -33,7 +33,7 @@ and the additional 'Integrated Device Function' controllers are supported.
 
 Authors: 
        Mark Studebaker <mdsxyz123@yahoo.com>
-       Jean Delvare <khali@linux-fr.org>
+       Jean Delvare <jdelvare@suse.de>
 
 
 Module Parameters
index 2461c7b53b2c1e489ae699e95a25e1bd2ac8fe45..0e2d17b460fddc79b4127b8771c6edb58565780a 100644 (file)
@@ -1,6 +1,6 @@
 Kernel driver i2c-parport
 
-Author: Jean Delvare <khali@linux-fr.org> 
+Author: Jean Delvare <jdelvare@suse.de>
 
 This is a unified driver for several i2c-over-parallel-port adapters,
 such as the ones made by Philips, Velleman or ELV. This driver is
index c22ee063e1e52e6f5d03d6cf191bf564a7492857..7071b8ba0af4c88b9b73c100d1a0766a2ca20618 100644 (file)
@@ -1,6 +1,6 @@
 Kernel driver i2c-parport-light
 
-Author: Jean Delvare <khali@linux-fr.org> 
+Author: Jean Delvare <jdelvare@suse.de>
 
 This driver is a light version of i2c-parport. It doesn't depend        
 on the parport driver, and uses direct I/O access instead. This might be
index c097e0f020fe1d786bed67fd3ed9e06fd99f7b63..aa959fd22450de6793e02d73f2fa657ac31c4bce 100644 (file)
@@ -13,7 +13,7 @@ Supported adapters:
   * AMD SP5100 (SB700 derivative found on some server mainboards)
     Datasheet: Publicly available at the AMD website
     http://support.amd.com/us/Embedded_TechDocs/44413.pdf
-  * AMD Hudson-2, CZ
+  * AMD Hudson-2, ML, CZ
     Datasheet: Not publicly available
   * Standard Microsystems (SMSC) SLC90E66 (Victory66) southbridge
     Datasheet: Publicly available at the SMSC website http://www.smsc.com
index 63f62bcbf59200524b628d7b04859e1c09f9069f..60299555dcf01f44d1f3324691e44b642829573e 100644 (file)
@@ -1,6 +1,6 @@
 Kernel driver i2c-taos-evm
 
-Author: Jean Delvare <khali@linux-fr.org>
+Author: Jean Delvare <jdelvare@suse.de>
 
 This is a driver for the evaluation modules for TAOS I2C/SMBus chips.
 The modules include an SMBus master with limited capabilities, which can
index b88f91ae580ef53c72b5081d93f392c4b9d05278..ab64ce21c25442f68a262c19cf64aa5167bdfdbb 100644 (file)
@@ -28,7 +28,7 @@ Supported adapters:
 Authors:
        Kyösti Mälkki <kmalkki@cc.hut.fi>,
        Mark D. Studebaker <mdsxyz123@yahoo.com>,
-       Jean Delvare <khali@linux-fr.org>
+       Jean Delvare <jdelvare@suse.de>
 
 Module Parameters
 -----------------
index 82713ff92eb3e5c72a82ff6f1dc9d637e7f04376..bcea12a0c5847e12211a14d43f356d0f1a1e77f0 100644 (file)
@@ -73,6 +73,10 @@ select_engine : Select which engine is used for running program
 run_engine    : Start program which is loaded via the firmware interface
 firmware      : Load program data
 
+In case of LP5523, one more command is required, 'enginex_leds'.
+It is used for selecting LED output(s) at each engine number.
+In more details, please refer to 'leds-lp5523.txt'.
+
 For example, run blinking pattern in engine #1 of LP5521
 echo 1 > /sys/bus/i2c/devices/xxxx/select_engine
 echo 1 > /sys/class/firmware/lp5521/loading
@@ -81,10 +85,12 @@ echo 0 > /sys/class/firmware/lp5521/loading
 echo 1 > /sys/bus/i2c/devices/xxxx/run_engine
 
 For example, run blinking pattern in engine #3 of LP55231
+Two LEDs are configured as pattern output channels.
 echo 3 > /sys/bus/i2c/devices/xxxx/select_engine
 echo 1 > /sys/class/firmware/lp55231/loading
 echo "9d0740ff7e0040007e00a0010000" > /sys/class/firmware/lp55231/data
 echo 0 > /sys/class/firmware/lp55231/loading
+echo "000001100" > /sys/bus/i2c/devices/xxxx/engine3_leds
 echo 1 > /sys/bus/i2c/devices/xxxx/run_engine
 
 To start blinking patterns in engine #2 and #3 simultaneously,
@@ -99,17 +105,19 @@ done
 echo 1 > /sys/class/leds/red/device/run_engine
 
 Here is another example for LP5523.
+Full LED strings are selected by 'engine2_leds'.
 echo 2 > /sys/bus/i2c/devices/xxxx/select_engine
 echo 1 > /sys/class/firmware/lp5523/loading
 echo "9d80400004ff05ff437f0000" > /sys/class/firmware/lp5523/data
 echo 0 > /sys/class/firmware/lp5523/loading
+echo "111111111" > /sys/bus/i2c/devices/xxxx/engine2_leds
 echo 1 > /sys/bus/i2c/devices/xxxx/run_engine
 
 As soon as 'loading' is set to 0, registered callback is called.
 Inside the callback, the selected engine is loaded and memory is updated.
 To run programmed pattern, 'run_engine' attribute should be enabled.
 
-The pattern sqeuence of LP8501 is same as LP5523.
+The pattern sqeuence of LP8501 is similar to LP5523.
 However pattern data is specific.
 Ex 1) Engine 1 is used
 echo 1 > /sys/bus/i2c/devices/xxxx/select_engine
index f7e8104b5764fa277b09ef4e7c9a9ab8634719b3..ba692011f221337f553ba5eb489be5548eaaa96b 100644 (file)
@@ -38,7 +38,7 @@ Supported chips:
 Authors:
         Frodo Looijaard <frodol@dds.nl>,
         Philip Edelbrock <phil@netroedge.com>,
-        Jean Delvare <khali@linux-fr.org>,
+        Jean Delvare <jdelvare@suse.de>,
         Greg Kroah-Hartman <greg@kroah.com>,
         IBM Corp.
 
diff --git a/Documentation/mtd/nand/pxa3xx-nand.txt b/Documentation/mtd/nand/pxa3xx-nand.txt
new file mode 100644 (file)
index 0000000..840fd41
--- /dev/null
@@ -0,0 +1,113 @@
+
+About this document
+===================
+
+Some notes about Marvell's NAND controller available in PXA and Armada 370/XP
+SoC (aka NFCv1 and NFCv2), with an emphasis on the latter.
+
+NFCv2 controller background
+===========================
+
+The controller has a 2176 bytes FIFO buffer. Therefore, in order to support
+larger pages, I/O operations on 4 KiB and 8 KiB pages is done with a set of
+chunked transfers.
+
+For instance, if we choose a 2048 data chunk and set "BCH" ECC (see below)
+we'll have this layout in the pages:
+
+  ------------------------------------------------------------------------------
+  | 2048B data | 32B spare | 30B ECC || 2048B data | 32B spare | 30B ECC | ... |
+  ------------------------------------------------------------------------------
+
+The driver reads the data and spare portions independently and builds an internal
+buffer with this layout (in the 4 KiB page case):
+
+  ------------------------------------------
+  |     4096B data     |     64B spare     |
+  ------------------------------------------
+
+Also, for the READOOB command the driver disables the ECC and reads a 'spare + ECC'
+OOB, one per chunk read.
+
+  -------------------------------------------------------------------
+  |     4096B data     |  32B spare | 30B ECC | 32B spare | 30B ECC |
+  -------------------------------------------------------------------
+
+So, in order to achieve reading (for instance), we issue several READ0 commands
+(with some additional controller-specific magic) and read two chunks of 2080B
+(2048 data + 32 spare) each.
+The driver accommodates this data to expose the NAND core a contiguous buffer
+(4096 data + spare) or (4096 + spare + ECC + spare + ECC).
+
+ECC
+===
+
+The controller has built-in hardware ECC capabilities. In addition it is
+configurable between two modes: 1) Hamming, 2) BCH.
+
+Note that the actual BCH mode: BCH-4 or BCH-8 will depend on the way
+the controller is configured to transfer the data.
+
+In the BCH mode the ECC code will be calculated for each transfered chunk
+and expected to be located (when reading/programming) right after the spare
+bytes as the figure above shows.
+
+So, repeating the above scheme, a 2048B data chunk will be followed by 32B
+spare, and then the ECC controller will read/write the ECC code (30B in
+this case):
+
+  ------------------------------------
+  | 2048B data | 32B spare | 30B ECC |
+  ------------------------------------
+
+If the ECC mode is 'BCH' then the ECC is *always* 30 bytes long.
+If the ECC mode is 'Hamming' the ECC is 6 bytes long, for each 512B block.
+So in Hamming mode, a 2048B page will have a 24B ECC.
+
+Despite all of the above, the controller requires the driver to only read or
+write in multiples of 8-bytes, because the data buffer is 64-bits.
+
+OOB
+===
+
+Because of the above scheme, and because the "spare" OOB is really located in
+the middle of a page, spare OOB cannot be read or write independently of the
+data area. In other words, in order to read the OOB (aka READOOB), the entire
+page (aka READ0) has to be read.
+
+In the same sense, in order to write to the spare OOB the driver has to write
+an *entire* page.
+
+Factory bad blocks handling
+===========================
+
+Given the ECC BCH requires to layout the device's pages in a split
+data/OOB/data/OOB way, the controller has a view of the flash page that's
+different from the specified (aka the manufacturer's) view. In other words,
+
+Factory view:
+
+  -----------------------------------------------
+  |                    Data           |x  OOB   |
+  -----------------------------------------------
+
+Driver's view:
+
+  -----------------------------------------------
+  |      Data      | OOB |      Data   x  | OOB |
+  -----------------------------------------------
+
+It can be seen from the above, that the factory bad block marker must be
+searched within the 'data' region, and not in the usual OOB region.
+
+In addition, this means under regular usage the driver will write such
+position (since it belongs to the data region) and every used block is
+likely to be marked as bad.
+
+For this reason, marking the block as bad in the OOB is explicitly
+disabled by using the NAND_BBT_NO_OOB_BBM option in the driver. The rationale
+for this is that there's no point in marking a block as bad, because good
+blocks are also 'marked as bad' (in the OOB BBM sense) under normal usage.
+
+Instead, the driver relies on the bad block table alone, and should only perform
+the bad block scan on the very first time (when the device hasn't been used).
index 5de03740cdd50c4177d649bc1675c7e412526651..ab42c95f9985c0172109308c052cfe426a5bed9a 100644 (file)
@@ -1088,6 +1088,12 @@ igmpv3_unsolicited_report_interval - INTEGER
        IGMPv3 report retransmit will take place.
        Default: 1000 (1 seconds)
 
+promote_secondaries - BOOLEAN
+       When a primary IP address is removed from this interface
+       promote a corresponding secondary IP address instead of
+       removing all the corresponding secondary IP addresses.
+
+
 tag - INTEGER
        Allows you to write a number, which can be used as required.
        Default value is 0.
index 91ffe1d9e8cab8bd1c42314fb065b6ace17632bb..1404674c0a0282af7d077cf55a5da076875bd2ed 100644 (file)
@@ -583,6 +583,7 @@ Currently implemented fanout policies are:
   - PACKET_FANOUT_CPU: schedule to socket by CPU packet arrives on
   - PACKET_FANOUT_RND: schedule to socket by random selection
   - PACKET_FANOUT_ROLLOVER: if one socket is full, rollover to another
+  - PACKET_FANOUT_QM: schedule to socket by skbs recorded queue_mapping
 
 Minimal example code by David S. Miller (try things like "./test eth0 hash",
 "./test eth0 lb", etc.):
index 9f5481bdc5a43f942fb833f3131901249d2d5c50..d614a9b6a28048ecf879e96b3aada686ecadb6cb 100644 (file)
@@ -696,7 +696,9 @@ swappiness
 
 This control is used to define how aggressive the kernel will swap
 memory pages.  Higher values will increase agressiveness, lower values
-decrease the amount of swap.
+decrease the amount of swap.  A value of 0 instructs the kernel not to
+initiate swap until the amount of free and file-backed pages is less
+than the high water mark in a zone.
 
 The default value is 60.
 
index 9bf651c578062ecbc0cd9a6d95959955882da987..a31a6e3e199fa9c114515cba48c57c6931d77e6f 100644 (file)
@@ -309,36 +309,36 @@ F:        sound/pci/ad1889.*
 
 AD525X ANALOG DEVICES DIGITAL POTENTIOMETERS DRIVER
 M:     Michael Hennerich <michael.hennerich@analog.com>
-L:     device-drivers-devel@blackfin.uclinux.org
 W:     http://wiki.analog.com/AD5254
+W:     http://ez.analog.com/community/linux-device-drivers
 S:     Supported
 F:     drivers/misc/ad525x_dpot.c
 
 AD5398 CURRENT REGULATOR DRIVER (AD5398/AD5821)
 M:     Michael Hennerich <michael.hennerich@analog.com>
-L:     device-drivers-devel@blackfin.uclinux.org
 W:     http://wiki.analog.com/AD5398
+W:     http://ez.analog.com/community/linux-device-drivers
 S:     Supported
 F:     drivers/regulator/ad5398.c
 
 AD714X CAPACITANCE TOUCH SENSOR DRIVER (AD7142/3/7/8/7A)
 M:     Michael Hennerich <michael.hennerich@analog.com>
-L:     device-drivers-devel@blackfin.uclinux.org
 W:     http://wiki.analog.com/AD7142
+W:     http://ez.analog.com/community/linux-device-drivers
 S:     Supported
 F:     drivers/input/misc/ad714x.c
 
 AD7877 TOUCHSCREEN DRIVER
 M:     Michael Hennerich <michael.hennerich@analog.com>
-L:     device-drivers-devel@blackfin.uclinux.org
 W:     http://wiki.analog.com/AD7877
+W:     http://ez.analog.com/community/linux-device-drivers
 S:     Supported
 F:     drivers/input/touchscreen/ad7877.c
 
 AD7879 TOUCHSCREEN DRIVER (AD7879/AD7889)
 M:     Michael Hennerich <michael.hennerich@analog.com>
-L:     device-drivers-devel@blackfin.uclinux.org
 W:     http://wiki.analog.com/AD7879
+W:     http://ez.analog.com/community/linux-device-drivers
 S:     Supported
 F:     drivers/input/touchscreen/ad7879.c
 
@@ -347,7 +347,7 @@ M:  Jiri Kosina <jkosina@suse.cz>
 S:     Maintained
 
 ADM1025 HARDWARE MONITOR DRIVER
-M:     Jean Delvare <khali@linux-fr.org>
+M:     Jean Delvare <jdelvare@suse.de>
 L:     lm-sensors@lm-sensors.org
 S:     Maintained
 F:     Documentation/hwmon/adm1025
@@ -374,8 +374,8 @@ F:  include/media/adp1653.h
 
 ADP5520 BACKLIGHT DRIVER WITH IO EXPANDER (ADP5520/ADP5501)
 M:     Michael Hennerich <michael.hennerich@analog.com>
-L:     device-drivers-devel@blackfin.uclinux.org
 W:     http://wiki.analog.com/ADP5520
+W:     http://ez.analog.com/community/linux-device-drivers
 S:     Supported
 F:     drivers/mfd/adp5520.c
 F:     drivers/video/backlight/adp5520_bl.c
@@ -385,16 +385,16 @@ F:        drivers/input/keyboard/adp5520-keys.c
 
 ADP5588 QWERTY KEYPAD AND IO EXPANDER DRIVER (ADP5588/ADP5587)
 M:     Michael Hennerich <michael.hennerich@analog.com>
-L:     device-drivers-devel@blackfin.uclinux.org
 W:     http://wiki.analog.com/ADP5588
+W:     http://ez.analog.com/community/linux-device-drivers
 S:     Supported
 F:     drivers/input/keyboard/adp5588-keys.c
 F:     drivers/gpio/gpio-adp5588.c
 
 ADP8860 BACKLIGHT DRIVER (ADP8860/ADP8861/ADP8863)
 M:     Michael Hennerich <michael.hennerich@analog.com>
-L:     device-drivers-devel@blackfin.uclinux.org
 W:     http://wiki.analog.com/ADP8860
+W:     http://ez.analog.com/community/linux-device-drivers
 S:     Supported
 F:     drivers/video/backlight/adp8860_bl.c
 
@@ -412,7 +412,7 @@ S:  Maintained
 F:     drivers/macintosh/therm_adt746x.c
 
 ADT7475 HARDWARE MONITOR DRIVER
-M:     Jean Delvare <khali@linux-fr.org>
+M:     Jean Delvare <jdelvare@suse.de>
 L:     lm-sensors@lm-sensors.org
 S:     Maintained
 F:     Documentation/hwmon/adt7475
@@ -420,8 +420,8 @@ F:  drivers/hwmon/adt7475.c
 
 ADXL34X THREE-AXIS DIGITAL ACCELEROMETER DRIVER (ADXL345/ADXL346)
 M:     Michael Hennerich <michael.hennerich@analog.com>
-L:     device-drivers-devel@blackfin.uclinux.org
 W:     http://wiki.analog.com/ADXL345
+W:     http://ez.analog.com/community/linux-device-drivers
 S:     Supported
 F:     drivers/input/misc/adxl34x.c
 
@@ -627,9 +627,9 @@ F:  drivers/media/i2c/adv7842*
 
 ANALOG DEVICES INC ASOC CODEC DRIVERS
 M:     Lars-Peter Clausen <lars@metafoo.de>
-L:     device-drivers-devel@blackfin.uclinux.org
 L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
 W:     http://wiki.analog.com/
+W:     http://ez.analog.com/community/linux-device-drivers
 S:     Supported
 F:     sound/soc/codecs/adau*
 F:     sound/soc/codecs/adav*
@@ -639,7 +639,7 @@ F:  sound/soc/codecs/ssm*
 F:     sound/soc/codecs/sigmadsp.*
 
 ANALOG DEVICES INC ASOC DRIVERS
-L:     uclinux-dist-devel@blackfin.uclinux.org
+L:     adi-buildroot-devel@lists.sourceforge.net
 L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
 W:     http://blackfin.uclinux.org/
 S:     Supported
@@ -1742,56 +1742,54 @@ F:      fs/bfs/
 F:     include/uapi/linux/bfs_fs.h
 
 BLACKFIN ARCHITECTURE
-M:     Mike Frysinger <vapier@gentoo.org>
-L:     uclinux-dist-devel@blackfin.uclinux.org
+M:     Steven Miao <realmz6@gmail.com>
+L:     adi-buildroot-devel@lists.sourceforge.net
 W:     http://blackfin.uclinux.org
 S:     Supported
 F:     arch/blackfin/
 
 BLACKFIN EMAC DRIVER
-L:     uclinux-dist-devel@blackfin.uclinux.org
+L:     adi-buildroot-devel@lists.sourceforge.net
 W:     http://blackfin.uclinux.org
 S:     Supported
 F:     drivers/net/ethernet/adi/
 
 BLACKFIN RTC DRIVER
-M:     Mike Frysinger <vapier.adi@gmail.com>
-L:     uclinux-dist-devel@blackfin.uclinux.org
+L:     adi-buildroot-devel@lists.sourceforge.net
 W:     http://blackfin.uclinux.org
 S:     Supported
 F:     drivers/rtc/rtc-bfin.c
 
 BLACKFIN SDH DRIVER
 M:     Sonic Zhang <sonic.zhang@analog.com>
-L:     uclinux-dist-devel@blackfin.uclinux.org
+L:     adi-buildroot-devel@lists.sourceforge.net
 W:     http://blackfin.uclinux.org
 S:     Supported
 F:     drivers/mmc/host/bfin_sdh.c
 
 BLACKFIN SERIAL DRIVER
 M:     Sonic Zhang <sonic.zhang@analog.com>
-L:     uclinux-dist-devel@blackfin.uclinux.org
+L:     adi-buildroot-devel@lists.sourceforge.net
 W:     http://blackfin.uclinux.org
 S:     Supported
 F:     drivers/tty/serial/bfin_uart.c
 
 BLACKFIN WATCHDOG DRIVER
-M:     Mike Frysinger <vapier.adi@gmail.com>
-L:     uclinux-dist-devel@blackfin.uclinux.org
+L:     adi-buildroot-devel@lists.sourceforge.net
 W:     http://blackfin.uclinux.org
 S:     Supported
 F:     drivers/watchdog/bfin_wdt.c
 
 BLACKFIN I2C TWI DRIVER
 M:     Sonic Zhang <sonic.zhang@analog.com>
-L:     uclinux-dist-devel@blackfin.uclinux.org
+L:     adi-buildroot-devel@lists.sourceforge.net
 W:     http://blackfin.uclinux.org/
 S:     Supported
 F:     drivers/i2c/busses/i2c-bfin-twi.c
 
 BLACKFIN MEDIA DRIVER
 M:     Scott Jiang <scott.jiang.linux@gmail.com>
-L:     uclinux-dist-devel@blackfin.uclinux.org
+L:     adi-buildroot-devel@lists.sourceforge.net
 W:     http://blackfin.uclinux.org/
 S:     Supported
 F:     drivers/media/platform/blackfin/
@@ -3389,7 +3387,7 @@ F:        drivers/video/exynos/exynos_mipi*
 F:     include/video/exynos_mipi*
 
 F71805F HARDWARE MONITORING DRIVER
-M:     Jean Delvare <khali@linux-fr.org>
+M:     Jean Delvare <jdelvare@suse.de>
 L:     lm-sensors@lm-sensors.org
 S:     Maintained
 F:     Documentation/hwmon/f71805f
@@ -3916,7 +3914,7 @@ S:        Odd Fixes
 F:     drivers/tty/hvc/
 
 HARDWARE MONITORING
-M:     Jean Delvare <khali@linux-fr.org>
+M:     Jean Delvare <jdelvare@suse.de>
 M:     Guenter Roeck <linux@roeck-us.net>
 L:     lm-sensors@lm-sensors.org
 W:     http://www.lm-sensors.org/
@@ -4144,7 +4142,7 @@ F:        include/linux/hyperv.h
 F:     tools/hv/
 
 I2C OVER PARALLEL PORT
-M:     Jean Delvare <khali@linux-fr.org>
+M:     Jean Delvare <jdelvare@suse.de>
 L:     linux-i2c@vger.kernel.org
 S:     Maintained
 F:     Documentation/i2c/busses/i2c-parport
@@ -4153,7 +4151,7 @@ F:        drivers/i2c/busses/i2c-parport.c
 F:     drivers/i2c/busses/i2c-parport-light.c
 
 I2C/SMBUS CONTROLLER DRIVERS FOR PC
-M:     Jean Delvare <khali@linux-fr.org>
+M:     Jean Delvare <jdelvare@suse.de>
 L:     linux-i2c@vger.kernel.org
 S:     Maintained
 F:     Documentation/i2c/busses/i2c-ali1535
@@ -4194,7 +4192,7 @@ F:        drivers/i2c/busses/i2c-ismt.c
 F:     Documentation/i2c/busses/i2c-ismt
 
 I2C/SMBUS STUB DRIVER
-M:     Jean Delvare <khali@linux-fr.org>
+M:     Jean Delvare <jdelvare@suse.de>
 L:     linux-i2c@vger.kernel.org
 S:     Maintained
 F:     drivers/i2c/i2c-stub.c
@@ -4213,7 +4211,7 @@ F:        include/uapi/linux/i2c.h
 F:     include/uapi/linux/i2c-*.h
 
 I2C-TAOS-EVM DRIVER
-M:     Jean Delvare <khali@linux-fr.org>
+M:     Jean Delvare <jdelvare@suse.de>
 L:     linux-i2c@vger.kernel.org
 S:     Maintained
 F:     Documentation/i2c/busses/i2c-taos-evm
@@ -4770,7 +4768,7 @@ S:        Maintained
 F:     drivers/isdn/hardware/eicon/
 
 IT87 HARDWARE MONITORING DRIVER
-M:     Jean Delvare <khali@linux-fr.org>
+M:     Jean Delvare <jdelvare@suse.de>
 L:     lm-sensors@lm-sensors.org
 S:     Maintained
 F:     Documentation/hwmon/it87
@@ -5140,7 +5138,7 @@ F:        drivers/leds/
 F:     include/linux/leds.h
 
 LEGACY EEPROM DRIVER
-M:     Jean Delvare <khali@linux-fr.org>
+M:     Jean Delvare <jdelvare@suse.de>
 S:     Maintained
 F:     Documentation/misc-devices/eeprom
 F:     drivers/misc/eeprom/eeprom.c
@@ -5288,21 +5286,21 @@ S:      Maintained
 F:     drivers/hwmon/lm73.c
 
 LM78 HARDWARE MONITOR DRIVER
-M:     Jean Delvare <khali@linux-fr.org>
+M:     Jean Delvare <jdelvare@suse.de>
 L:     lm-sensors@lm-sensors.org
 S:     Maintained
 F:     Documentation/hwmon/lm78
 F:     drivers/hwmon/lm78.c
 
 LM83 HARDWARE MONITOR DRIVER
-M:     Jean Delvare <khali@linux-fr.org>
+M:     Jean Delvare <jdelvare@suse.de>
 L:     lm-sensors@lm-sensors.org
 S:     Maintained
 F:     Documentation/hwmon/lm83
 F:     drivers/hwmon/lm83.c
 
 LM90 HARDWARE MONITOR DRIVER
-M:     Jean Delvare <khali@linux-fr.org>
+M:     Jean Delvare <jdelvare@suse.de>
 L:     lm-sensors@lm-sensors.org
 S:     Maintained
 F:     Documentation/hwmon/lm90
@@ -5622,10 +5620,11 @@ F:      mm/page_cgroup.c
 
 MEMORY TECHNOLOGY DEVICES (MTD)
 M:     David Woodhouse <dwmw2@infradead.org>
+M:     Brian Norris <computersforpeace@gmail.com>
 L:     linux-mtd@lists.infradead.org
 W:     http://www.linux-mtd.infradead.org/
 Q:     http://patchwork.ozlabs.org/project/linux-mtd/list/
-T:     git git://git.infradead.org/mtd-2.6.git
+T:     git git://git.infradead.org/linux-mtd.git
 S:     Maintained
 F:     drivers/mtd/
 F:     include/linux/mtd/
@@ -6494,7 +6493,7 @@ S:        Maintained
 F:     drivers/char/pc8736x_gpio.c
 
 PC87427 HARDWARE MONITORING DRIVER
-M:     Jean Delvare <khali@linux-fr.org>
+M:     Jean Delvare <jdelvare@suse.de>
 L:     lm-sensors@lm-sensors.org
 S:     Maintained
 F:     Documentation/hwmon/pc87427
@@ -6942,6 +6941,12 @@ F:       include/sound/pxa2xx-lib.h
 F:     sound/arm/pxa*
 F:     sound/soc/pxa/
 
+PXA3xx NAND FLASH DRIVER
+M:     Ezequiel Garcia <ezequiel.garcia@free-electrons.com>
+L:     linux-mtd@lists.infradead.org
+S:     Maintained
+F:     drivers/mtd/nand/pxa3xx-nand.c
+
 MMP SUPPORT
 M:     Eric Miao <eric.y.miao@gmail.com>
 M:     Haojian Zhuang <haojian.zhuang@gmail.com>
@@ -7938,7 +7943,7 @@ F:        Documentation/hwmon/sch5627
 F:     drivers/hwmon/sch5627.c
 
 SMSC47B397 HARDWARE MONITOR DRIVER
-M:     Jean Delvare <khali@linux-fr.org>
+M:     Jean Delvare <jdelvare@suse.de>
 L:     lm-sensors@lm-sensors.org
 S:     Maintained
 F:     Documentation/hwmon/smsc47b397
@@ -9456,7 +9461,7 @@ F:        Documentation/hwmon/w83793
 F:     drivers/hwmon/w83793.c
 
 W83795 HARDWARE MONITORING DRIVER
-M:     Jean Delvare <khali@linux-fr.org>
+M:     Jean Delvare <jdelvare@suse.de>
 L:     lm-sensors@lm-sensors.org
 S:     Maintained
 F:     drivers/hwmon/w83795.c
index 455fd484b20edb92a0c916c1be3cc7adffc6cfa4..4231023c0b80429ff4e44f315eb099743d4d043a 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -311,9 +311,15 @@ endif
 # If the user is running make -s (silent mode), suppress echoing of
 # commands
 
+ifneq ($(filter 4.%,$(MAKE_VERSION)),) # make-4
+ifneq ($(filter %s ,$(firstword x$(MAKEFLAGS))),)
+  quiet=silent_
+endif
+else                                   # make-3.8x
 ifneq ($(filter s% -s%,$(MAKEFLAGS)),)
   quiet=silent_
 endif
+endif
 
 export quiet Q KBUILD_VERBOSE
 
@@ -633,7 +639,7 @@ endif
 
 ifdef CONFIG_DEBUG_INFO
 KBUILD_CFLAGS  += -g
-KBUILD_AFLAGS  += -gdwarf-2
+KBUILD_AFLAGS  += -Wa,--gdwarf-2
 endif
 
 ifdef CONFIG_DEBUG_INFO_REDUCED
@@ -682,6 +688,9 @@ KBUILD_CFLAGS   += $(call cc-option,-Werror=implicit-int)
 # require functions to have arguments in prototypes, not empty 'int foo()'
 KBUILD_CFLAGS   += $(call cc-option,-Werror=strict-prototypes)
 
+# Prohibit date/time macros, which would make the build non-deterministic
+KBUILD_CFLAGS   += $(call cc-option,-Werror=date-time)
+
 # use the deterministic mode of AR if available
 KBUILD_ARFLAGS := $(call ar-option,D)
 
index 0283e9e44e0d8cf368066484a4e502d5f1b399ef..66ee5527aefc08d7e5e3af13dbf553ce67d7c6c7 100644 (file)
@@ -11,6 +11,8 @@
 
 #ifdef __ASSEMBLY__
 
+#define ASM_NL          `      /* use '`' to mark new line in macro */
+
 /* Can't use the ENTRY macro in linux/linkage.h
  * gas considers ';' as comment vs. newline
  */
index 23d5e3946589a6c5e240b7d0060d84efee71d5d5..08a9ef58d9c3567f1b78862ed136546416ad241d 100644 (file)
@@ -96,7 +96,7 @@ tune-$(CONFIG_CPU_V6K)                =$(call cc-option,-mtune=arm1136j-s,-mtune=strongarm)
 tune-y := $(tune-y)
 
 ifeq ($(CONFIG_AEABI),y)
-CFLAGS_ABI     :=-mabi=aapcs-linux -mno-thumb-interwork
+CFLAGS_ABI     :=-mabi=aapcs-linux -mno-thumb-interwork -mfpu=vfp
 else
 CFLAGS_ABI     :=$(call cc-option,-mapcs-32,-mabi=apcs-gnu) $(call cc-option,-mno-thumb-interwork,)
 endif
index ede21c16fdc03f38d585de84d1f13b83192f5bf9..b9d6a8b485e0bdeba13c1848391fca3ca263341b 100644 (file)
@@ -152,10 +152,13 @@ dtb-$(CONFIG_ARCH_MXC) += \
        imx53-mba53.dtb \
        imx53-qsb.dtb \
        imx53-smd.dtb \
+       imx6dl-cubox-i.dtb \
+       imx6dl-hummingboard.dtb \
        imx6dl-sabreauto.dtb \
        imx6dl-sabresd.dtb \
        imx6dl-wandboard.dtb \
        imx6q-arm2.dtb \
+       imx6q-cubox-i.dtb \
        imx6q-phytec-pbab01.dtb \
        imx6q-sabreauto.dtb \
        imx6q-sabrelite.dtb \
diff --git a/arch/arm/boot/dts/am33xx-clocks.dtsi b/arch/arm/boot/dts/am33xx-clocks.dtsi
new file mode 100644 (file)
index 0000000..9ccfe50
--- /dev/null
@@ -0,0 +1,664 @@
+/*
+ * Device Tree Source for AM33xx clock data
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+&scrm_clocks {
+       sys_clkin_ck: sys_clkin_ck {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&virt_19200000_ck>, <&virt_24000000_ck>, <&virt_25000000_ck>, <&virt_26000000_ck>;
+               ti,bit-shift = <22>;
+               reg = <0x0040>;
+       };
+
+       adc_tsc_fck: adc_tsc_fck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&sys_clkin_ck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       dcan0_fck: dcan0_fck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&sys_clkin_ck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       dcan1_fck: dcan1_fck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&sys_clkin_ck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       mcasp0_fck: mcasp0_fck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&sys_clkin_ck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       mcasp1_fck: mcasp1_fck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&sys_clkin_ck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       smartreflex0_fck: smartreflex0_fck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&sys_clkin_ck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       smartreflex1_fck: smartreflex1_fck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&sys_clkin_ck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       sha0_fck: sha0_fck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&sys_clkin_ck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       aes0_fck: aes0_fck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&sys_clkin_ck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       rng_fck: rng_fck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&sys_clkin_ck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       ehrpwm0_gate_tbclk: ehrpwm0_gate_tbclk {
+               #clock-cells = <0>;
+               compatible = "ti,composite-no-wait-gate-clock";
+               clocks = <&dpll_per_m2_ck>;
+               ti,bit-shift = <0>;
+               reg = <0x0664>;
+       };
+
+       ehrpwm0_tbclk: ehrpwm0_tbclk {
+               #clock-cells = <0>;
+               compatible = "ti,composite-clock";
+               clocks = <&ehrpwm0_gate_tbclk>;
+       };
+
+       ehrpwm1_gate_tbclk: ehrpwm1_gate_tbclk {
+               #clock-cells = <0>;
+               compatible = "ti,composite-no-wait-gate-clock";
+               clocks = <&dpll_per_m2_ck>;
+               ti,bit-shift = <1>;
+               reg = <0x0664>;
+       };
+
+       ehrpwm1_tbclk: ehrpwm1_tbclk {
+               #clock-cells = <0>;
+               compatible = "ti,composite-clock";
+               clocks = <&ehrpwm1_gate_tbclk>;
+       };
+
+       ehrpwm2_gate_tbclk: ehrpwm2_gate_tbclk {
+               #clock-cells = <0>;
+               compatible = "ti,composite-no-wait-gate-clock";
+               clocks = <&dpll_per_m2_ck>;
+               ti,bit-shift = <2>;
+               reg = <0x0664>;
+       };
+
+       ehrpwm2_tbclk: ehrpwm2_tbclk {
+               #clock-cells = <0>;
+               compatible = "ti,composite-clock";
+               clocks = <&ehrpwm2_gate_tbclk>;
+       };
+};
+&prcm_clocks {
+       clk_32768_ck: clk_32768_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <32768>;
+       };
+
+       clk_rc32k_ck: clk_rc32k_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <32000>;
+       };
+
+       virt_19200000_ck: virt_19200000_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <19200000>;
+       };
+
+       virt_24000000_ck: virt_24000000_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <24000000>;
+       };
+
+       virt_25000000_ck: virt_25000000_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <25000000>;
+       };
+
+       virt_26000000_ck: virt_26000000_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <26000000>;
+       };
+
+       tclkin_ck: tclkin_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <12000000>;
+       };
+
+       dpll_core_ck: dpll_core_ck {
+               #clock-cells = <0>;
+               compatible = "ti,am3-dpll-core-clock";
+               clocks = <&sys_clkin_ck>, <&sys_clkin_ck>;
+               reg = <0x0490>, <0x045c>, <0x0468>;
+       };
+
+       dpll_core_x2_ck: dpll_core_x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,am3-dpll-x2-clock";
+               clocks = <&dpll_core_ck>;
+       };
+
+       dpll_core_m4_ck: dpll_core_m4_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_core_x2_ck>;
+               ti,max-div = <31>;
+               reg = <0x0480>;
+               ti,index-starts-at-one;
+       };
+
+       dpll_core_m5_ck: dpll_core_m5_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_core_x2_ck>;
+               ti,max-div = <31>;
+               reg = <0x0484>;
+               ti,index-starts-at-one;
+       };
+
+       dpll_core_m6_ck: dpll_core_m6_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_core_x2_ck>;
+               ti,max-div = <31>;
+               reg = <0x04d8>;
+               ti,index-starts-at-one;
+       };
+
+       dpll_mpu_ck: dpll_mpu_ck {
+               #clock-cells = <0>;
+               compatible = "ti,am3-dpll-clock";
+               clocks = <&sys_clkin_ck>, <&sys_clkin_ck>;
+               reg = <0x0488>, <0x0420>, <0x042c>;
+       };
+
+       dpll_mpu_m2_ck: dpll_mpu_m2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_mpu_ck>;
+               ti,max-div = <31>;
+               reg = <0x04a8>;
+               ti,index-starts-at-one;
+       };
+
+       dpll_ddr_ck: dpll_ddr_ck {
+               #clock-cells = <0>;
+               compatible = "ti,am3-dpll-no-gate-clock";
+               clocks = <&sys_clkin_ck>, <&sys_clkin_ck>;
+               reg = <0x0494>, <0x0434>, <0x0440>;
+       };
+
+       dpll_ddr_m2_ck: dpll_ddr_m2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_ddr_ck>;
+               ti,max-div = <31>;
+               reg = <0x04a0>;
+               ti,index-starts-at-one;
+       };
+
+       dpll_ddr_m2_div2_ck: dpll_ddr_m2_div2_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll_ddr_m2_ck>;
+               clock-mult = <1>;
+               clock-div = <2>;
+       };
+
+       dpll_disp_ck: dpll_disp_ck {
+               #clock-cells = <0>;
+               compatible = "ti,am3-dpll-no-gate-clock";
+               clocks = <&sys_clkin_ck>, <&sys_clkin_ck>;
+               reg = <0x0498>, <0x0448>, <0x0454>;
+       };
+
+       dpll_disp_m2_ck: dpll_disp_m2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_disp_ck>;
+               ti,max-div = <31>;
+               reg = <0x04a4>;
+               ti,index-starts-at-one;
+               ti,set-rate-parent;
+       };
+
+       dpll_per_ck: dpll_per_ck {
+               #clock-cells = <0>;
+               compatible = "ti,am3-dpll-no-gate-j-type-clock";
+               clocks = <&sys_clkin_ck>, <&sys_clkin_ck>;
+               reg = <0x048c>, <0x0470>, <0x049c>;
+       };
+
+       dpll_per_m2_ck: dpll_per_m2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_per_ck>;
+               ti,max-div = <31>;
+               reg = <0x04ac>;
+               ti,index-starts-at-one;
+       };
+
+       dpll_per_m2_div4_wkupdm_ck: dpll_per_m2_div4_wkupdm_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll_per_m2_ck>;
+               clock-mult = <1>;
+               clock-div = <4>;
+       };
+
+       dpll_per_m2_div4_ck: dpll_per_m2_div4_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll_per_m2_ck>;
+               clock-mult = <1>;
+               clock-div = <4>;
+       };
+
+       cefuse_fck: cefuse_fck {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&sys_clkin_ck>;
+               ti,bit-shift = <1>;
+               reg = <0x0a20>;
+       };
+
+       clk_24mhz: clk_24mhz {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll_per_m2_ck>;
+               clock-mult = <1>;
+               clock-div = <8>;
+       };
+
+       clkdiv32k_ck: clkdiv32k_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&clk_24mhz>;
+               clock-mult = <1>;
+               clock-div = <732>;
+       };
+
+       clkdiv32k_ick: clkdiv32k_ick {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&clkdiv32k_ck>;
+               ti,bit-shift = <1>;
+               reg = <0x014c>;
+       };
+
+       l3_gclk: l3_gclk {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll_core_m4_ck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       pruss_ocp_gclk: pruss_ocp_gclk {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&l3_gclk>, <&dpll_disp_m2_ck>;
+               reg = <0x0530>;
+       };
+
+       mmu_fck: mmu_fck {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&dpll_core_m4_ck>;
+               ti,bit-shift = <1>;
+               reg = <0x0914>;
+       };
+
+       timer1_fck: timer1_fck {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&sys_clkin_ck>, <&clkdiv32k_ick>, <&tclkin_ck>, <&clk_rc32k_ck>, <&clk_32768_ck>;
+               reg = <0x0528>;
+       };
+
+       timer2_fck: timer2_fck {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&tclkin_ck>, <&sys_clkin_ck>, <&clkdiv32k_ick>;
+               reg = <0x0508>;
+       };
+
+       timer3_fck: timer3_fck {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&tclkin_ck>, <&sys_clkin_ck>, <&clkdiv32k_ick>;
+               reg = <0x050c>;
+       };
+
+       timer4_fck: timer4_fck {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&tclkin_ck>, <&sys_clkin_ck>, <&clkdiv32k_ick>;
+               reg = <0x0510>;
+       };
+
+       timer5_fck: timer5_fck {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&tclkin_ck>, <&sys_clkin_ck>, <&clkdiv32k_ick>;
+               reg = <0x0518>;
+       };
+
+       timer6_fck: timer6_fck {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&tclkin_ck>, <&sys_clkin_ck>, <&clkdiv32k_ick>;
+               reg = <0x051c>;
+       };
+
+       timer7_fck: timer7_fck {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&tclkin_ck>, <&sys_clkin_ck>, <&clkdiv32k_ick>;
+               reg = <0x0504>;
+       };
+
+       usbotg_fck: usbotg_fck {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&dpll_per_ck>;
+               ti,bit-shift = <8>;
+               reg = <0x047c>;
+       };
+
+       dpll_core_m4_div2_ck: dpll_core_m4_div2_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll_core_m4_ck>;
+               clock-mult = <1>;
+               clock-div = <2>;
+       };
+
+       ieee5000_fck: ieee5000_fck {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&dpll_core_m4_div2_ck>;
+               ti,bit-shift = <1>;
+               reg = <0x00e4>;
+       };
+
+       wdt1_fck: wdt1_fck {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&clk_rc32k_ck>, <&clkdiv32k_ick>;
+               reg = <0x0538>;
+       };
+
+       l4_rtc_gclk: l4_rtc_gclk {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll_core_m4_ck>;
+               clock-mult = <1>;
+               clock-div = <2>;
+       };
+
+       l4hs_gclk: l4hs_gclk {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll_core_m4_ck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       l3s_gclk: l3s_gclk {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll_core_m4_div2_ck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       l4fw_gclk: l4fw_gclk {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll_core_m4_div2_ck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       l4ls_gclk: l4ls_gclk {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll_core_m4_div2_ck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       sysclk_div_ck: sysclk_div_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll_core_m4_ck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       cpsw_125mhz_gclk: cpsw_125mhz_gclk {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll_core_m5_ck>;
+               clock-mult = <1>;
+               clock-div = <2>;
+       };
+
+       cpsw_cpts_rft_clk: cpsw_cpts_rft_clk {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&dpll_core_m5_ck>, <&dpll_core_m4_ck>;
+               reg = <0x0520>;
+       };
+
+       gpio0_dbclk_mux_ck: gpio0_dbclk_mux_ck {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&clk_rc32k_ck>, <&clk_32768_ck>, <&clkdiv32k_ick>;
+               reg = <0x053c>;
+       };
+
+       gpio0_dbclk: gpio0_dbclk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&gpio0_dbclk_mux_ck>;
+               ti,bit-shift = <18>;
+               reg = <0x0408>;
+       };
+
+       gpio1_dbclk: gpio1_dbclk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&clkdiv32k_ick>;
+               ti,bit-shift = <18>;
+               reg = <0x00ac>;
+       };
+
+       gpio2_dbclk: gpio2_dbclk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&clkdiv32k_ick>;
+               ti,bit-shift = <18>;
+               reg = <0x00b0>;
+       };
+
+       gpio3_dbclk: gpio3_dbclk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&clkdiv32k_ick>;
+               ti,bit-shift = <18>;
+               reg = <0x00b4>;
+       };
+
+       lcd_gclk: lcd_gclk {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&dpll_disp_m2_ck>, <&dpll_core_m5_ck>, <&dpll_per_m2_ck>;
+               reg = <0x0534>;
+               ti,set-rate-parent;
+       };
+
+       mmc_clk: mmc_clk {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll_per_m2_ck>;
+               clock-mult = <1>;
+               clock-div = <2>;
+       };
+
+       gfx_fclk_clksel_ck: gfx_fclk_clksel_ck {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&dpll_core_m4_ck>, <&dpll_per_m2_ck>;
+               ti,bit-shift = <1>;
+               reg = <0x052c>;
+       };
+
+       gfx_fck_div_ck: gfx_fck_div_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&gfx_fclk_clksel_ck>;
+               reg = <0x052c>;
+               ti,max-div = <2>;
+       };
+
+       sysclkout_pre_ck: sysclkout_pre_ck {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&clk_32768_ck>, <&l3_gclk>, <&dpll_ddr_m2_ck>, <&dpll_per_m2_ck>, <&lcd_gclk>;
+               reg = <0x0700>;
+       };
+
+       clkout2_div_ck: clkout2_div_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&sysclkout_pre_ck>;
+               ti,bit-shift = <3>;
+               ti,max-div = <8>;
+               reg = <0x0700>;
+       };
+
+       dbg_sysclk_ck: dbg_sysclk_ck {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&sys_clkin_ck>;
+               ti,bit-shift = <19>;
+               reg = <0x0414>;
+       };
+
+       dbg_clka_ck: dbg_clka_ck {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&dpll_core_m4_ck>;
+               ti,bit-shift = <30>;
+               reg = <0x0414>;
+       };
+
+       stm_pmd_clock_mux_ck: stm_pmd_clock_mux_ck {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&dbg_sysclk_ck>, <&dbg_clka_ck>;
+               ti,bit-shift = <22>;
+               reg = <0x0414>;
+       };
+
+       trace_pmd_clk_mux_ck: trace_pmd_clk_mux_ck {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&dbg_sysclk_ck>, <&dbg_clka_ck>;
+               ti,bit-shift = <20>;
+               reg = <0x0414>;
+       };
+
+       stm_clk_div_ck: stm_clk_div_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&stm_pmd_clock_mux_ck>;
+               ti,bit-shift = <27>;
+               ti,max-div = <64>;
+               reg = <0x0414>;
+               ti,index-power-of-two;
+       };
+
+       trace_clk_div_ck: trace_clk_div_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&trace_pmd_clk_mux_ck>;
+               ti,bit-shift = <24>;
+               ti,max-div = <64>;
+               reg = <0x0414>;
+               ti,index-power-of-two;
+       };
+
+       clkout2_ck: clkout2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&clkout2_div_ck>;
+               ti,bit-shift = <7>;
+               reg = <0x0700>;
+       };
+};
+
+&prcm_clockdomains {
+       clk_24mhz_clkdm: clk_24mhz_clkdm {
+               compatible = "ti,clockdomain";
+               clocks = <&clkdiv32k_ick>;
+       };
+};
index f6d8ffe98d0bfa15bc516b8fde2847f3568fcdd2..6d95d3df33c7913dc0feb76598f6e94ea15cf0fa 100644 (file)
                ranges;
                ti,hwmods = "l3_main";
 
+               prcm: prcm@44e00000 {
+                       compatible = "ti,am3-prcm";
+                       reg = <0x44e00000 0x4000>;
+
+                       prcm_clocks: clocks {
+                               #address-cells = <1>;
+                               #size-cells = <0>;
+                       };
+
+                       prcm_clockdomains: clockdomains {
+                       };
+               };
+
+               scrm: scrm@44e10000 {
+                       compatible = "ti,am3-scrm";
+                       reg = <0x44e10000 0x2000>;
+
+                       scrm_clocks: clocks {
+                               #address-cells = <1>;
+                               #size-cells = <0>;
+                       };
+
+                       scrm_clockdomains: clockdomains {
+                       };
+               };
+
                intc: interrupt-controller@48200000 {
                        compatible = "ti,omap2-intc";
                        interrupt-controller;
                };
        };
 };
+
+/include/ "am33xx-clocks.dtsi"
index 2fbe02faa8b18f2b18dd0e8bedff0256db4ff501..788391f916844130d8d6cdb2722479163863a707 100644 (file)
@@ -61,3 +61,6 @@
                };
        };
 };
+
+/include/ "am35xx-clocks.dtsi"
+/include/ "omap36xx-am35xx-omap3430es2plus-clocks.dtsi"
diff --git a/arch/arm/boot/dts/am35xx-clocks.dtsi b/arch/arm/boot/dts/am35xx-clocks.dtsi
new file mode 100644 (file)
index 0000000..df489d3
--- /dev/null
@@ -0,0 +1,128 @@
+/*
+ * Device Tree Source for OMAP3 clock data
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+&scrm_clocks {
+       emac_ick: emac_ick {
+               #clock-cells = <0>;
+               compatible = "ti,am35xx-gate-clock";
+               clocks = <&ipss_ick>;
+               reg = <0x059c>;
+               ti,bit-shift = <1>;
+       };
+
+       emac_fck: emac_fck {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&rmii_ck>;
+               reg = <0x059c>;
+               ti,bit-shift = <9>;
+       };
+
+       vpfe_ick: vpfe_ick {
+               #clock-cells = <0>;
+               compatible = "ti,am35xx-gate-clock";
+               clocks = <&ipss_ick>;
+               reg = <0x059c>;
+               ti,bit-shift = <2>;
+       };
+
+       vpfe_fck: vpfe_fck {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&pclk_ck>;
+               reg = <0x059c>;
+               ti,bit-shift = <10>;
+       };
+
+       hsotgusb_ick_am35xx: hsotgusb_ick_am35xx {
+               #clock-cells = <0>;
+               compatible = "ti,am35xx-gate-clock";
+               clocks = <&ipss_ick>;
+               reg = <0x059c>;
+               ti,bit-shift = <0>;
+       };
+
+       hsotgusb_fck_am35xx: hsotgusb_fck_am35xx {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&sys_ck>;
+               reg = <0x059c>;
+               ti,bit-shift = <8>;
+       };
+
+       hecc_ck: hecc_ck {
+               #clock-cells = <0>;
+               compatible = "ti,am35xx-gate-clock";
+               clocks = <&sys_ck>;
+               reg = <0x059c>;
+               ti,bit-shift = <3>;
+       };
+};
+&cm_clocks {
+       ipss_ick: ipss_ick {
+               #clock-cells = <0>;
+               compatible = "ti,am35xx-interface-clock";
+               clocks = <&core_l3_ick>;
+               reg = <0x0a10>;
+               ti,bit-shift = <4>;
+       };
+
+       rmii_ck: rmii_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <50000000>;
+       };
+
+       pclk_ck: pclk_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <27000000>;
+       };
+
+       uart4_ick_am35xx: uart4_ick_am35xx {
+               #clock-cells = <0>;
+               compatible = "ti,omap3-interface-clock";
+               clocks = <&core_l4_ick>;
+               reg = <0x0a10>;
+               ti,bit-shift = <23>;
+       };
+
+       uart4_fck_am35xx: uart4_fck_am35xx {
+               #clock-cells = <0>;
+               compatible = "ti,wait-gate-clock";
+               clocks = <&core_48m_fck>;
+               reg = <0x0a00>;
+               ti,bit-shift = <23>;
+       };
+};
+
+&cm_clockdomains {
+       core_l3_clkdm: core_l3_clkdm {
+               compatible = "ti,clockdomain";
+               clocks = <&sdrc_ick>, <&ipss_ick>, <&emac_ick>, <&vpfe_ick>,
+                        <&hsotgusb_ick_am35xx>, <&hsotgusb_fck_am35xx>,
+                        <&hecc_ck>;
+       };
+
+       core_l4_clkdm: core_l4_clkdm {
+               compatible = "ti,clockdomain";
+               clocks = <&cpefuse_fck>, <&ts_fck>, <&usbtll_fck>,
+                        <&usbtll_ick>, <&mmchs3_ick>, <&mmchs3_fck>,
+                        <&mmchs2_fck>, <&mmchs1_fck>, <&i2c3_fck>, <&i2c2_fck>,
+                        <&i2c1_fck>, <&mcspi4_fck>, <&mcspi3_fck>,
+                        <&mcspi2_fck>, <&mcspi1_fck>, <&uart2_fck>,
+                        <&uart1_fck>, <&hdq_fck>, <&mmchs2_ick>, <&mmchs1_ick>,
+                        <&hdq_ick>, <&mcspi4_ick>, <&mcspi3_ick>,
+                        <&mcspi2_ick>, <&mcspi1_ick>, <&i2c3_ick>, <&i2c2_ick>,
+                        <&i2c1_ick>, <&uart2_ick>, <&uart1_ick>, <&gpt11_ick>,
+                        <&gpt10_ick>, <&mcbsp5_ick>, <&mcbsp1_ick>,
+                        <&omapctrl_ick>, <&aes2_ick>, <&sha12_ick>,
+                        <&uart4_ick_am35xx>, <&uart4_fck_am35xx>;
+       };
+};
index 974d103ab3b1e673fe255e28e5c13d8544ad2565..c6bd4d986c290aaeb7bd4e0f78e2d48489c2bee4 100644 (file)
                ranges;
                ti,hwmods = "l3_main";
 
+               prcm: prcm@44df0000 {
+                       compatible = "ti,am4-prcm";
+                       reg = <0x44df0000 0x11000>;
+
+                       prcm_clocks: clocks {
+                               #address-cells = <1>;
+                               #size-cells = <0>;
+                       };
+
+                       prcm_clockdomains: clockdomains {
+                       };
+               };
+
+               scrm: scrm@44e10000 {
+                       compatible = "ti,am4-scrm";
+                       reg = <0x44e10000 0x2000>;
+
+                       scrm_clocks: clocks {
+                               #address-cells = <1>;
+                               #size-cells = <0>;
+                       };
+
+                       scrm_clockdomains: clockdomains {
+                       };
+               };
+
                edma: edma@49000000 {
                        compatible = "ti,edma3";
                        ti,hwmods = "tpcc", "tptc0", "tptc1", "tptc2";
                };
        };
 };
+
+/include/ "am43xx-clocks.dtsi"
diff --git a/arch/arm/boot/dts/am43xx-clocks.dtsi b/arch/arm/boot/dts/am43xx-clocks.dtsi
new file mode 100644 (file)
index 0000000..142009c
--- /dev/null
@@ -0,0 +1,656 @@
+/*
+ * Device Tree Source for AM43xx clock data
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+&scrm_clocks {
+       sys_clkin_ck: sys_clkin_ck {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&virt_19200000_ck>, <&virt_24000000_ck>, <&virt_25000000_ck>, <&virt_26000000_ck>;
+               ti,bit-shift = <22>;
+               reg = <0x0040>;
+       };
+
+       adc_tsc_fck: adc_tsc_fck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&sys_clkin_ck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       dcan0_fck: dcan0_fck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&sys_clkin_ck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       dcan1_fck: dcan1_fck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&sys_clkin_ck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       mcasp0_fck: mcasp0_fck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&sys_clkin_ck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       mcasp1_fck: mcasp1_fck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&sys_clkin_ck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       smartreflex0_fck: smartreflex0_fck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&sys_clkin_ck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       smartreflex1_fck: smartreflex1_fck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&sys_clkin_ck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       sha0_fck: sha0_fck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&sys_clkin_ck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       aes0_fck: aes0_fck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&sys_clkin_ck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+};
+&prcm_clocks {
+       clk_32768_ck: clk_32768_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <32768>;
+       };
+
+       clk_rc32k_ck: clk_rc32k_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <32768>;
+       };
+
+       virt_19200000_ck: virt_19200000_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <19200000>;
+       };
+
+       virt_24000000_ck: virt_24000000_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <24000000>;
+       };
+
+       virt_25000000_ck: virt_25000000_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <25000000>;
+       };
+
+       virt_26000000_ck: virt_26000000_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <26000000>;
+       };
+
+       tclkin_ck: tclkin_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <26000000>;
+       };
+
+       dpll_core_ck: dpll_core_ck {
+               #clock-cells = <0>;
+               compatible = "ti,am3-dpll-core-clock";
+               clocks = <&sys_clkin_ck>, <&sys_clkin_ck>;
+               reg = <0x2d20>, <0x2d24>, <0x2d2c>;
+       };
+
+       dpll_core_x2_ck: dpll_core_x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,am3-dpll-x2-clock";
+               clocks = <&dpll_core_ck>;
+       };
+
+       dpll_core_m4_ck: dpll_core_m4_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_core_x2_ck>;
+               ti,max-div = <31>;
+               ti,autoidle-shift = <8>;
+               reg = <0x2d38>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       dpll_core_m5_ck: dpll_core_m5_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_core_x2_ck>;
+               ti,max-div = <31>;
+               ti,autoidle-shift = <8>;
+               reg = <0x2d3c>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       dpll_core_m6_ck: dpll_core_m6_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_core_x2_ck>;
+               ti,max-div = <31>;
+               ti,autoidle-shift = <8>;
+               reg = <0x2d40>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       dpll_mpu_ck: dpll_mpu_ck {
+               #clock-cells = <0>;
+               compatible = "ti,am3-dpll-clock";
+               clocks = <&sys_clkin_ck>, <&sys_clkin_ck>;
+               reg = <0x2d60>, <0x2d64>, <0x2d6c>;
+       };
+
+       dpll_mpu_m2_ck: dpll_mpu_m2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_mpu_ck>;
+               ti,max-div = <31>;
+               ti,autoidle-shift = <8>;
+               reg = <0x2d70>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       dpll_ddr_ck: dpll_ddr_ck {
+               #clock-cells = <0>;
+               compatible = "ti,am3-dpll-clock";
+               clocks = <&sys_clkin_ck>, <&sys_clkin_ck>;
+               reg = <0x2da0>, <0x2da4>, <0x2dac>;
+       };
+
+       dpll_ddr_m2_ck: dpll_ddr_m2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_ddr_ck>;
+               ti,max-div = <31>;
+               ti,autoidle-shift = <8>;
+               reg = <0x2db0>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       dpll_disp_ck: dpll_disp_ck {
+               #clock-cells = <0>;
+               compatible = "ti,am3-dpll-clock";
+               clocks = <&sys_clkin_ck>, <&sys_clkin_ck>;
+               reg = <0x2e20>, <0x2e24>, <0x2e2c>;
+       };
+
+       dpll_disp_m2_ck: dpll_disp_m2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_disp_ck>;
+               ti,max-div = <31>;
+               ti,autoidle-shift = <8>;
+               reg = <0x2e30>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       dpll_per_ck: dpll_per_ck {
+               #clock-cells = <0>;
+               compatible = "ti,am3-dpll-j-type-clock";
+               clocks = <&sys_clkin_ck>, <&sys_clkin_ck>;
+               reg = <0x2de0>, <0x2de4>, <0x2dec>;
+       };
+
+       dpll_per_m2_ck: dpll_per_m2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_per_ck>;
+               ti,max-div = <127>;
+               ti,autoidle-shift = <8>;
+               reg = <0x2df0>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       dpll_per_m2_div4_wkupdm_ck: dpll_per_m2_div4_wkupdm_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll_per_m2_ck>;
+               clock-mult = <1>;
+               clock-div = <4>;
+       };
+
+       dpll_per_m2_div4_ck: dpll_per_m2_div4_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll_per_m2_ck>;
+               clock-mult = <1>;
+               clock-div = <4>;
+       };
+
+       clk_24mhz: clk_24mhz {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll_per_m2_ck>;
+               clock-mult = <1>;
+               clock-div = <8>;
+       };
+
+       clkdiv32k_ck: clkdiv32k_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&clk_24mhz>;
+               clock-mult = <1>;
+               clock-div = <732>;
+       };
+
+       clkdiv32k_ick: clkdiv32k_ick {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&clkdiv32k_ck>;
+               ti,bit-shift = <8>;
+               reg = <0x2a38>;
+       };
+
+       sysclk_div: sysclk_div {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll_core_m4_ck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       pruss_ocp_gclk: pruss_ocp_gclk {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&sysclk_div>, <&dpll_disp_m2_ck>;
+               reg = <0x4248>;
+       };
+
+       clk_32k_tpm_ck: clk_32k_tpm_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <32768>;
+       };
+
+       timer1_fck: timer1_fck {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&sys_clkin_ck>, <&clkdiv32k_ick>, <&tclkin_ck>, <&clk_rc32k_ck>, <&clk_32768_ck>, <&clk_32k_tpm_ck>;
+               reg = <0x4200>;
+       };
+
+       timer2_fck: timer2_fck {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&tclkin_ck>, <&sys_clkin_ck>, <&clkdiv32k_ick>;
+               reg = <0x4204>;
+       };
+
+       timer3_fck: timer3_fck {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&tclkin_ck>, <&sys_clkin_ck>, <&clkdiv32k_ick>;
+               reg = <0x4208>;
+       };
+
+       timer4_fck: timer4_fck {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&tclkin_ck>, <&sys_clkin_ck>, <&clkdiv32k_ick>;
+               reg = <0x420c>;
+       };
+
+       timer5_fck: timer5_fck {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&tclkin_ck>, <&sys_clkin_ck>, <&clkdiv32k_ick>;
+               reg = <0x4210>;
+       };
+
+       timer6_fck: timer6_fck {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&tclkin_ck>, <&sys_clkin_ck>, <&clkdiv32k_ick>;
+               reg = <0x4214>;
+       };
+
+       timer7_fck: timer7_fck {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&tclkin_ck>, <&sys_clkin_ck>, <&clkdiv32k_ick>;
+               reg = <0x4218>;
+       };
+
+       wdt1_fck: wdt1_fck {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&clk_rc32k_ck>, <&clkdiv32k_ick>;
+               reg = <0x422c>;
+       };
+
+       l3_gclk: l3_gclk {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll_core_m4_ck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       dpll_core_m4_div2_ck: dpll_core_m4_div2_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&sysclk_div>;
+               clock-mult = <1>;
+               clock-div = <2>;
+       };
+
+       l4hs_gclk: l4hs_gclk {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll_core_m4_ck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       l3s_gclk: l3s_gclk {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll_core_m4_div2_ck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       l4ls_gclk: l4ls_gclk {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll_core_m4_div2_ck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       cpsw_125mhz_gclk: cpsw_125mhz_gclk {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll_core_m5_ck>;
+               clock-mult = <1>;
+               clock-div = <2>;
+       };
+
+       cpsw_cpts_rft_clk: cpsw_cpts_rft_clk {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&sysclk_div>, <&dpll_core_m5_ck>, <&dpll_disp_m2_ck>;
+               reg = <0x4238>;
+       };
+
+       clk_32k_mosc_ck: clk_32k_mosc_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <32768>;
+       };
+
+       gpio0_dbclk_mux_ck: gpio0_dbclk_mux_ck {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&clk_rc32k_ck>, <&clk_32768_ck>, <&clkdiv32k_ick>, <&clk_32k_mosc_ck>, <&clk_32k_tpm_ck>;
+               reg = <0x4240>;
+       };
+
+       gpio0_dbclk: gpio0_dbclk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&gpio0_dbclk_mux_ck>;
+               ti,bit-shift = <8>;
+               reg = <0x2b68>;
+       };
+
+       gpio1_dbclk: gpio1_dbclk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&clkdiv32k_ick>;
+               ti,bit-shift = <8>;
+               reg = <0x8c78>;
+       };
+
+       gpio2_dbclk: gpio2_dbclk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&clkdiv32k_ick>;
+               ti,bit-shift = <8>;
+               reg = <0x8c80>;
+       };
+
+       gpio3_dbclk: gpio3_dbclk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&clkdiv32k_ick>;
+               ti,bit-shift = <8>;
+               reg = <0x8c88>;
+       };
+
+       gpio4_dbclk: gpio4_dbclk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&clkdiv32k_ick>;
+               ti,bit-shift = <8>;
+               reg = <0x8c90>;
+       };
+
+       gpio5_dbclk: gpio5_dbclk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&clkdiv32k_ick>;
+               ti,bit-shift = <8>;
+               reg = <0x8c98>;
+       };
+
+       mmc_clk: mmc_clk {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll_per_m2_ck>;
+               clock-mult = <1>;
+               clock-div = <2>;
+       };
+
+       gfx_fclk_clksel_ck: gfx_fclk_clksel_ck {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&sysclk_div>, <&dpll_per_m2_ck>;
+               ti,bit-shift = <1>;
+               reg = <0x423c>;
+       };
+
+       gfx_fck_div_ck: gfx_fck_div_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&gfx_fclk_clksel_ck>;
+               reg = <0x423c>;
+               ti,max-div = <2>;
+       };
+
+       disp_clk: disp_clk {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&dpll_disp_m2_ck>, <&dpll_core_m5_ck>, <&dpll_per_m2_ck>;
+               reg = <0x4244>;
+       };
+
+       dpll_extdev_ck: dpll_extdev_ck {
+               #clock-cells = <0>;
+               compatible = "ti,am3-dpll-clock";
+               clocks = <&sys_clkin_ck>, <&sys_clkin_ck>;
+               reg = <0x2e60>, <0x2e64>, <0x2e6c>;
+       };
+
+       dpll_extdev_m2_ck: dpll_extdev_m2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_extdev_ck>;
+               ti,max-div = <127>;
+               ti,autoidle-shift = <8>;
+               reg = <0x2e70>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       mux_synctimer32k_ck: mux_synctimer32k_ck {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&clk_32768_ck>, <&clk_32k_tpm_ck>, <&clkdiv32k_ick>;
+               reg = <0x4230>;
+       };
+
+       synctimer_32kclk: synctimer_32kclk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&mux_synctimer32k_ck>;
+               ti,bit-shift = <8>;
+               reg = <0x2a30>;
+       };
+
+       timer8_fck: timer8_fck {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&tclkin_ck>, <&sys_clkin_ck>, <&clkdiv32k_ick>, <&clk_32k_tpm_ck>;
+               reg = <0x421c>;
+       };
+
+       timer9_fck: timer9_fck {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&tclkin_ck>, <&sys_clkin_ck>, <&clkdiv32k_ick>, <&clk_32k_tpm_ck>;
+               reg = <0x4220>;
+       };
+
+       timer10_fck: timer10_fck {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&tclkin_ck>, <&sys_clkin_ck>, <&clkdiv32k_ick>, <&clk_32k_tpm_ck>;
+               reg = <0x4224>;
+       };
+
+       timer11_fck: timer11_fck {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&tclkin_ck>, <&sys_clkin_ck>, <&clkdiv32k_ick>, <&clk_32k_tpm_ck>;
+               reg = <0x4228>;
+       };
+
+       cpsw_50m_clkdiv: cpsw_50m_clkdiv {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll_core_m5_ck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       cpsw_5m_clkdiv: cpsw_5m_clkdiv {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&cpsw_50m_clkdiv>;
+               clock-mult = <1>;
+               clock-div = <10>;
+       };
+
+       dpll_ddr_x2_ck: dpll_ddr_x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,am3-dpll-x2-clock";
+               clocks = <&dpll_ddr_ck>;
+       };
+
+       dpll_ddr_m4_ck: dpll_ddr_m4_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_ddr_x2_ck>;
+               ti,max-div = <31>;
+               ti,autoidle-shift = <8>;
+               reg = <0x2db8>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       dpll_per_clkdcoldo: dpll_per_clkdcoldo {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll_per_ck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       dll_aging_clk_div: dll_aging_clk_div {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&sys_clkin_ck>;
+               reg = <0x4250>;
+               ti,dividers = <8>, <16>, <32>;
+       };
+
+       div_core_25m_ck: div_core_25m_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&sysclk_div>;
+               clock-mult = <1>;
+               clock-div = <8>;
+       };
+
+       func_12m_clk: func_12m_clk {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll_per_m2_ck>;
+               clock-mult = <1>;
+               clock-div = <16>;
+       };
+
+       vtp_clk_div: vtp_clk_div {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&sys_clkin_ck>;
+               clock-mult = <1>;
+               clock-div = <2>;
+       };
+
+       usbphy_32khz_clkmux: usbphy_32khz_clkmux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&clk_32768_ck>, <&clk_32k_tpm_ck>;
+               reg = <0x4260>;
+       };
+};
index 56ee8282a7a8ef201f1833135ddfb3b4d81e38a9..997901f7ed7381c77ff6c669f4f7571474ebd4b2 100644 (file)
                        watchdog@fffffd40 {
                                compatible = "atmel,at91sam9260-wdt";
                                reg = <0xfffffd40 0x10>;
+                               interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
+                               atmel,watchdog-type = "hardware";
+                               atmel,reset-type = "all";
+                               atmel,dbg-halt;
+                               atmel,idle-halt;
                                status = "disabled";
                        };
                };
index c8fa9b9f07e34e0b38e089c5a5cbd9fab9c5507e..0042f73068b0c913f729cb6e0fd5b3b5b01609d5 100644 (file)
                        watchdog@fffffd40 {
                                compatible = "atmel,at91sam9260-wdt";
                                reg = <0xfffffd40 0x10>;
+                               interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
+                               atmel,watchdog-type = "hardware";
+                               atmel,reset-type = "all";
+                               atmel,dbg-halt;
+                               atmel,idle-halt;
                                status = "disabled";
                        };
 
index ef0857cb171c4aab25aac34b6bf9048e18f59d9b..cbcc058b26b4ebea4ba81ff24e4ff21c3edc95ee 100644 (file)
                        watchdog@fffffd40 {
                                compatible = "atmel,at91sam9260-wdt";
                                reg = <0xfffffd40 0x10>;
+                               interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
+                               atmel,watchdog-type = "hardware";
+                               atmel,reset-type = "all";
+                               atmel,dbg-halt;
+                               atmel,idle-halt;
                                status = "disabled";
                        };
 
index 7248270a3ea61525ac75f5f6a206ca6f3afd277f..394e6ce2afb75547f229c55348ff527376e98927 100644 (file)
                        watchdog@fffffe40 {
                                compatible = "atmel,at91sam9260-wdt";
                                reg = <0xfffffe40 0x10>;
+                               interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
+                               atmel,watchdog-type = "hardware";
+                               atmel,reset-type = "all";
+                               atmel,dbg-halt;
+                               atmel,idle-halt;
                                status = "disabled";
                        };
 
index 6e5e9cfc3c4997f98af3b5608b743ddd97cd0d8d..174219de92fa7380d4e6dc8a2a9916fa05e70a6e 100644 (file)
                        watchdog@fffffe40 {
                                compatible = "atmel,at91sam9260-wdt";
                                reg = <0xfffffe40 0x10>;
+                               interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
+                               atmel,watchdog-type = "hardware";
+                               atmel,reset-type = "all";
+                               atmel,dbg-halt;
+                               atmel,idle-halt;
                                status = "disabled";
                        };
 
index d0df4c4e8b0a1bd422f33015a51656d8c063b5d9..1fd75aa4639da23c8e1b05bd1c5e28730f01c791 100644 (file)
                interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>,
                             <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
 
+               prm: prm@4ae06000 {
+                       compatible = "ti,dra7-prm";
+                       reg = <0x4ae06000 0x3000>;
+
+                       prm_clocks: clocks {
+                               #address-cells = <1>;
+                               #size-cells = <0>;
+                       };
+
+                       prm_clockdomains: clockdomains {
+                       };
+               };
+
+               cm_core_aon: cm_core_aon@4a005000 {
+                       compatible = "ti,dra7-cm-core-aon";
+                       reg = <0x4a005000 0x2000>;
+
+                       cm_core_aon_clocks: clocks {
+                               #address-cells = <1>;
+                               #size-cells = <0>;
+                       };
+
+                       cm_core_aon_clockdomains: clockdomains {
+                       };
+               };
+
+               cm_core: cm_core@4a008000 {
+                       compatible = "ti,dra7-cm-core";
+                       reg = <0x4a008000 0x3000>;
+
+                       cm_core_clocks: clocks {
+                               #address-cells = <1>;
+                               #size-cells = <0>;
+                       };
+
+                       cm_core_clockdomains: clockdomains {
+                       };
+               };
+
                counter32k: counter@4ae04000 {
                        compatible = "ti,omap-counter32k";
                        reg = <0x4ae04000 0x40>;
                };
        };
 };
+
+/include/ "dra7xx-clocks.dtsi"
diff --git a/arch/arm/boot/dts/dra7xx-clocks.dtsi b/arch/arm/boot/dts/dra7xx-clocks.dtsi
new file mode 100644 (file)
index 0000000..e96da9a
--- /dev/null
@@ -0,0 +1,2015 @@
+/*
+ * Device Tree Source for DRA7xx clock data
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+&cm_core_aon_clocks {
+       atl_clkin0_ck: atl_clkin0_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <0>;
+       };
+
+       atl_clkin1_ck: atl_clkin1_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <0>;
+       };
+
+       atl_clkin2_ck: atl_clkin2_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <0>;
+       };
+
+       atlclkin3_ck: atlclkin3_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <0>;
+       };
+
+       hdmi_clkin_ck: hdmi_clkin_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <0>;
+       };
+
+       mlb_clkin_ck: mlb_clkin_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <0>;
+       };
+
+       mlbp_clkin_ck: mlbp_clkin_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <0>;
+       };
+
+       pciesref_acs_clk_ck: pciesref_acs_clk_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <100000000>;
+       };
+
+       ref_clkin0_ck: ref_clkin0_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <0>;
+       };
+
+       ref_clkin1_ck: ref_clkin1_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <0>;
+       };
+
+       ref_clkin2_ck: ref_clkin2_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <0>;
+       };
+
+       ref_clkin3_ck: ref_clkin3_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <0>;
+       };
+
+       rmii_clk_ck: rmii_clk_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <0>;
+       };
+
+       sdvenc_clkin_ck: sdvenc_clkin_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <0>;
+       };
+
+       secure_32k_clk_src_ck: secure_32k_clk_src_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <32768>;
+       };
+
+       sys_32k_ck: sys_32k_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <32768>;
+       };
+
+       virt_12000000_ck: virt_12000000_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <12000000>;
+       };
+
+       virt_13000000_ck: virt_13000000_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <13000000>;
+       };
+
+       virt_16800000_ck: virt_16800000_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <16800000>;
+       };
+
+       virt_19200000_ck: virt_19200000_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <19200000>;
+       };
+
+       virt_20000000_ck: virt_20000000_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <20000000>;
+       };
+
+       virt_26000000_ck: virt_26000000_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <26000000>;
+       };
+
+       virt_27000000_ck: virt_27000000_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <27000000>;
+       };
+
+       virt_38400000_ck: virt_38400000_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <38400000>;
+       };
+
+       sys_clkin2: sys_clkin2 {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <22579200>;
+       };
+
+       usb_otg_clkin_ck: usb_otg_clkin_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <0>;
+       };
+
+       video1_clkin_ck: video1_clkin_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <0>;
+       };
+
+       video1_m2_clkin_ck: video1_m2_clkin_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <0>;
+       };
+
+       video2_clkin_ck: video2_clkin_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <0>;
+       };
+
+       video2_m2_clkin_ck: video2_m2_clkin_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <0>;
+       };
+
+       dpll_abe_ck: dpll_abe_ck {
+               #clock-cells = <0>;
+               compatible = "ti,omap4-dpll-m4xen-clock";
+               clocks = <&abe_dpll_clk_mux>, <&abe_dpll_bypass_clk_mux>;
+               reg = <0x01e0>, <0x01e4>, <0x01ec>, <0x01e8>;
+       };
+
+       dpll_abe_x2_ck: dpll_abe_x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,omap4-dpll-x2-clock";
+               clocks = <&dpll_abe_ck>;
+       };
+
+       dpll_abe_m2x2_ck: dpll_abe_m2x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_abe_x2_ck>;
+               ti,max-div = <31>;
+               ti,autoidle-shift = <8>;
+               reg = <0x01f0>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       abe_clk: abe_clk {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_abe_m2x2_ck>;
+               ti,max-div = <4>;
+               reg = <0x0108>;
+               ti,index-power-of-two;
+       };
+
+       dpll_abe_m2_ck: dpll_abe_m2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_abe_ck>;
+               ti,max-div = <31>;
+               ti,autoidle-shift = <8>;
+               reg = <0x01f0>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       dpll_abe_m3x2_ck: dpll_abe_m3x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_abe_x2_ck>;
+               ti,max-div = <31>;
+               ti,autoidle-shift = <8>;
+               reg = <0x01f4>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       dpll_core_ck: dpll_core_ck {
+               #clock-cells = <0>;
+               compatible = "ti,omap4-dpll-core-clock";
+               clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>;
+               reg = <0x0120>, <0x0124>, <0x012c>, <0x0128>;
+       };
+
+       dpll_core_x2_ck: dpll_core_x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,omap4-dpll-x2-clock";
+               clocks = <&dpll_core_ck>;
+       };
+
+       dpll_core_h12x2_ck: dpll_core_h12x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_core_x2_ck>;
+               ti,max-div = <63>;
+               ti,autoidle-shift = <8>;
+               reg = <0x013c>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       mpu_dpll_hs_clk_div: mpu_dpll_hs_clk_div {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll_core_h12x2_ck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       dpll_mpu_ck: dpll_mpu_ck {
+               #clock-cells = <0>;
+               compatible = "ti,omap4-dpll-clock";
+               clocks = <&sys_clkin1>, <&mpu_dpll_hs_clk_div>;
+               reg = <0x0160>, <0x0164>, <0x016c>, <0x0168>;
+       };
+
+       dpll_mpu_m2_ck: dpll_mpu_m2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_mpu_ck>;
+               ti,max-div = <31>;
+               ti,autoidle-shift = <8>;
+               reg = <0x0170>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       mpu_dclk_div: mpu_dclk_div {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll_mpu_m2_ck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       dsp_dpll_hs_clk_div: dsp_dpll_hs_clk_div {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll_core_h12x2_ck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       dpll_dsp_ck: dpll_dsp_ck {
+               #clock-cells = <0>;
+               compatible = "ti,omap4-dpll-clock";
+               clocks = <&sys_clkin1>, <&dsp_dpll_hs_clk_div>;
+               reg = <0x0234>, <0x0238>, <0x0240>, <0x023c>;
+       };
+
+       dpll_dsp_m2_ck: dpll_dsp_m2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_dsp_ck>;
+               ti,max-div = <31>;
+               ti,autoidle-shift = <8>;
+               reg = <0x0244>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       iva_dpll_hs_clk_div: iva_dpll_hs_clk_div {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll_core_h12x2_ck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       dpll_iva_ck: dpll_iva_ck {
+               #clock-cells = <0>;
+               compatible = "ti,omap4-dpll-clock";
+               clocks = <&sys_clkin1>, <&iva_dpll_hs_clk_div>;
+               reg = <0x01a0>, <0x01a4>, <0x01ac>, <0x01a8>;
+       };
+
+       dpll_iva_m2_ck: dpll_iva_m2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_iva_ck>;
+               ti,max-div = <31>;
+               ti,autoidle-shift = <8>;
+               reg = <0x01b0>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       iva_dclk: iva_dclk {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll_iva_m2_ck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       dpll_gpu_ck: dpll_gpu_ck {
+               #clock-cells = <0>;
+               compatible = "ti,omap4-dpll-clock";
+               clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>;
+               reg = <0x02d8>, <0x02dc>, <0x02e4>, <0x02e0>;
+       };
+
+       dpll_gpu_m2_ck: dpll_gpu_m2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_gpu_ck>;
+               ti,max-div = <31>;
+               ti,autoidle-shift = <8>;
+               reg = <0x02e8>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       dpll_core_m2_ck: dpll_core_m2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_core_ck>;
+               ti,max-div = <31>;
+               ti,autoidle-shift = <8>;
+               reg = <0x0130>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       core_dpll_out_dclk_div: core_dpll_out_dclk_div {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll_core_m2_ck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       dpll_ddr_ck: dpll_ddr_ck {
+               #clock-cells = <0>;
+               compatible = "ti,omap4-dpll-clock";
+               clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>;
+               reg = <0x0210>, <0x0214>, <0x021c>, <0x0218>;
+       };
+
+       dpll_ddr_m2_ck: dpll_ddr_m2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_ddr_ck>;
+               ti,max-div = <31>;
+               ti,autoidle-shift = <8>;
+               reg = <0x0220>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       dpll_gmac_ck: dpll_gmac_ck {
+               #clock-cells = <0>;
+               compatible = "ti,omap4-dpll-clock";
+               clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>;
+               reg = <0x02a8>, <0x02ac>, <0x02b4>, <0x02b0>;
+       };
+
+       dpll_gmac_m2_ck: dpll_gmac_m2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_gmac_ck>;
+               ti,max-div = <31>;
+               ti,autoidle-shift = <8>;
+               reg = <0x02b8>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       video2_dclk_div: video2_dclk_div {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&video2_m2_clkin_ck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       video1_dclk_div: video1_dclk_div {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&video1_m2_clkin_ck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       hdmi_dclk_div: hdmi_dclk_div {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&hdmi_clkin_ck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       per_dpll_hs_clk_div: per_dpll_hs_clk_div {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll_abe_m3x2_ck>;
+               clock-mult = <1>;
+               clock-div = <2>;
+       };
+
+       usb_dpll_hs_clk_div: usb_dpll_hs_clk_div {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll_abe_m3x2_ck>;
+               clock-mult = <1>;
+               clock-div = <3>;
+       };
+
+       eve_dpll_hs_clk_div: eve_dpll_hs_clk_div {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll_core_h12x2_ck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       dpll_eve_ck: dpll_eve_ck {
+               #clock-cells = <0>;
+               compatible = "ti,omap4-dpll-clock";
+               clocks = <&sys_clkin1>, <&eve_dpll_hs_clk_div>;
+               reg = <0x0284>, <0x0288>, <0x0290>, <0x028c>;
+       };
+
+       dpll_eve_m2_ck: dpll_eve_m2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_eve_ck>;
+               ti,max-div = <31>;
+               ti,autoidle-shift = <8>;
+               reg = <0x0294>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       eve_dclk_div: eve_dclk_div {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll_eve_m2_ck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       dpll_core_h13x2_ck: dpll_core_h13x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_core_x2_ck>;
+               ti,max-div = <63>;
+               ti,autoidle-shift = <8>;
+               reg = <0x0140>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       dpll_core_h14x2_ck: dpll_core_h14x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_core_x2_ck>;
+               ti,max-div = <63>;
+               ti,autoidle-shift = <8>;
+               reg = <0x0144>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       dpll_core_h22x2_ck: dpll_core_h22x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_core_x2_ck>;
+               ti,max-div = <63>;
+               ti,autoidle-shift = <8>;
+               reg = <0x0154>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       dpll_core_h23x2_ck: dpll_core_h23x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_core_x2_ck>;
+               ti,max-div = <63>;
+               ti,autoidle-shift = <8>;
+               reg = <0x0158>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       dpll_core_h24x2_ck: dpll_core_h24x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_core_x2_ck>;
+               ti,max-div = <63>;
+               ti,autoidle-shift = <8>;
+               reg = <0x015c>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       dpll_ddr_x2_ck: dpll_ddr_x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,omap4-dpll-x2-clock";
+               clocks = <&dpll_ddr_ck>;
+       };
+
+       dpll_ddr_h11x2_ck: dpll_ddr_h11x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_ddr_x2_ck>;
+               ti,max-div = <63>;
+               ti,autoidle-shift = <8>;
+               reg = <0x0228>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       dpll_dsp_x2_ck: dpll_dsp_x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,omap4-dpll-x2-clock";
+               clocks = <&dpll_dsp_ck>;
+       };
+
+       dpll_dsp_m3x2_ck: dpll_dsp_m3x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_dsp_x2_ck>;
+               ti,max-div = <31>;
+               ti,autoidle-shift = <8>;
+               reg = <0x0248>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       dpll_gmac_x2_ck: dpll_gmac_x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,omap4-dpll-x2-clock";
+               clocks = <&dpll_gmac_ck>;
+       };
+
+       dpll_gmac_h11x2_ck: dpll_gmac_h11x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_gmac_x2_ck>;
+               ti,max-div = <63>;
+               ti,autoidle-shift = <8>;
+               reg = <0x02c0>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       dpll_gmac_h12x2_ck: dpll_gmac_h12x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_gmac_x2_ck>;
+               ti,max-div = <63>;
+               ti,autoidle-shift = <8>;
+               reg = <0x02c4>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       dpll_gmac_h13x2_ck: dpll_gmac_h13x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_gmac_x2_ck>;
+               ti,max-div = <63>;
+               ti,autoidle-shift = <8>;
+               reg = <0x02c8>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       dpll_gmac_m3x2_ck: dpll_gmac_m3x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_gmac_x2_ck>;
+               ti,max-div = <31>;
+               ti,autoidle-shift = <8>;
+               reg = <0x02bc>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       gmii_m_clk_div: gmii_m_clk_div {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll_gmac_h11x2_ck>;
+               clock-mult = <1>;
+               clock-div = <2>;
+       };
+
+       hdmi_clk2_div: hdmi_clk2_div {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&hdmi_clkin_ck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       hdmi_div_clk: hdmi_div_clk {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&hdmi_clkin_ck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       l3_iclk_div: l3_iclk_div {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll_core_h12x2_ck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       l4_root_clk_div: l4_root_clk_div {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&l3_iclk_div>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       video1_clk2_div: video1_clk2_div {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&video1_clkin_ck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       video1_div_clk: video1_div_clk {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&video1_clkin_ck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       video2_clk2_div: video2_clk2_div {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&video2_clkin_ck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       video2_div_clk: video2_div_clk {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&video2_clkin_ck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       ipu1_gfclk_mux: ipu1_gfclk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&dpll_abe_m2x2_ck>, <&dpll_core_h22x2_ck>;
+               ti,bit-shift = <24>;
+               reg = <0x0520>;
+       };
+
+       mcasp1_ahclkr_mux: mcasp1_ahclkr_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&abe_24m_fclk>, <&abe_sys_clk_div>, <&func_24m_clk>, <&atlclkin3_ck>, <&atl_clkin2_ck>, <&atl_clkin1_ck>, <&atl_clkin0_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&mlb_clk>, <&mlbp_clk>;
+               ti,bit-shift = <28>;
+               reg = <0x0550>;
+       };
+
+       mcasp1_ahclkx_mux: mcasp1_ahclkx_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&abe_24m_fclk>, <&abe_sys_clk_div>, <&func_24m_clk>, <&atlclkin3_ck>, <&atl_clkin2_ck>, <&atl_clkin1_ck>, <&atl_clkin0_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&mlb_clk>, <&mlbp_clk>;
+               ti,bit-shift = <24>;
+               reg = <0x0550>;
+       };
+
+       mcasp1_aux_gfclk_mux: mcasp1_aux_gfclk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&per_abe_x1_gfclk2_div>, <&video1_clk2_div>, <&video2_clk2_div>, <&hdmi_clk2_div>;
+               ti,bit-shift = <22>;
+               reg = <0x0550>;
+       };
+
+       timer5_gfclk_mux: timer5_gfclk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&timer_sys_clk_div>, <&sys_32k_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&abe_giclk_div>, <&video1_div_clk>, <&video2_div_clk>, <&hdmi_div_clk>, <&clkoutmux0_clk_mux>;
+               ti,bit-shift = <24>;
+               reg = <0x0558>;
+       };
+
+       timer6_gfclk_mux: timer6_gfclk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&timer_sys_clk_div>, <&sys_32k_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&abe_giclk_div>, <&video1_div_clk>, <&video2_div_clk>, <&hdmi_div_clk>, <&clkoutmux0_clk_mux>;
+               ti,bit-shift = <24>;
+               reg = <0x0560>;
+       };
+
+       timer7_gfclk_mux: timer7_gfclk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&timer_sys_clk_div>, <&sys_32k_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&abe_giclk_div>, <&video1_div_clk>, <&video2_div_clk>, <&hdmi_div_clk>, <&clkoutmux0_clk_mux>;
+               ti,bit-shift = <24>;
+               reg = <0x0568>;
+       };
+
+       timer8_gfclk_mux: timer8_gfclk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&timer_sys_clk_div>, <&sys_32k_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&abe_giclk_div>, <&video1_div_clk>, <&video2_div_clk>, <&hdmi_div_clk>, <&clkoutmux0_clk_mux>;
+               ti,bit-shift = <24>;
+               reg = <0x0570>;
+       };
+
+       uart6_gfclk_mux: uart6_gfclk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&func_48m_fclk>, <&dpll_per_m2x2_ck>;
+               ti,bit-shift = <24>;
+               reg = <0x0580>;
+       };
+
+       dummy_ck: dummy_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <0>;
+       };
+};
+&prm_clocks {
+       sys_clkin1: sys_clkin1 {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&virt_12000000_ck>, <&virt_20000000_ck>, <&virt_16800000_ck>, <&virt_19200000_ck>, <&virt_26000000_ck>, <&virt_27000000_ck>, <&virt_38400000_ck>;
+               reg = <0x0110>;
+               ti,index-starts-at-one;
+       };
+
+       abe_dpll_sys_clk_mux: abe_dpll_sys_clk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&sys_clkin1>, <&sys_clkin2>;
+               reg = <0x0118>;
+       };
+
+       abe_dpll_bypass_clk_mux: abe_dpll_bypass_clk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&abe_dpll_sys_clk_mux>, <&sys_32k_ck>;
+               reg = <0x0114>;
+       };
+
+       abe_dpll_clk_mux: abe_dpll_clk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&abe_dpll_sys_clk_mux>, <&sys_32k_ck>;
+               reg = <0x010c>;
+       };
+
+       abe_24m_fclk: abe_24m_fclk {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_abe_m2x2_ck>;
+               reg = <0x011c>;
+               ti,dividers = <8>, <16>;
+       };
+
+       aess_fclk: aess_fclk {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&abe_clk>;
+               reg = <0x0178>;
+               ti,max-div = <2>;
+       };
+
+       abe_giclk_div: abe_giclk_div {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&aess_fclk>;
+               reg = <0x0174>;
+               ti,max-div = <2>;
+       };
+
+       abe_lp_clk_div: abe_lp_clk_div {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_abe_m2x2_ck>;
+               reg = <0x01d8>;
+               ti,dividers = <16>, <32>;
+       };
+
+       abe_sys_clk_div: abe_sys_clk_div {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&sys_clkin1>;
+               reg = <0x0120>;
+               ti,max-div = <2>;
+       };
+
+       adc_gfclk_mux: adc_gfclk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&sys_clkin1>, <&sys_clkin2>, <&sys_32k_ck>;
+               reg = <0x01dc>;
+       };
+
+       sys_clk1_dclk_div: sys_clk1_dclk_div {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&sys_clkin1>;
+               ti,max-div = <64>;
+               reg = <0x01c8>;
+               ti,index-power-of-two;
+       };
+
+       sys_clk2_dclk_div: sys_clk2_dclk_div {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&sys_clkin2>;
+               ti,max-div = <64>;
+               reg = <0x01cc>;
+               ti,index-power-of-two;
+       };
+
+       per_abe_x1_dclk_div: per_abe_x1_dclk_div {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_abe_m2_ck>;
+               ti,max-div = <64>;
+               reg = <0x01bc>;
+               ti,index-power-of-two;
+       };
+
+       dsp_gclk_div: dsp_gclk_div {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_dsp_m2_ck>;
+               ti,max-div = <64>;
+               reg = <0x018c>;
+               ti,index-power-of-two;
+       };
+
+       gpu_dclk: gpu_dclk {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_gpu_m2_ck>;
+               ti,max-div = <64>;
+               reg = <0x01a0>;
+               ti,index-power-of-two;
+       };
+
+       emif_phy_dclk_div: emif_phy_dclk_div {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_ddr_m2_ck>;
+               ti,max-div = <64>;
+               reg = <0x0190>;
+               ti,index-power-of-two;
+       };
+
+       gmac_250m_dclk_div: gmac_250m_dclk_div {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_gmac_m2_ck>;
+               ti,max-div = <64>;
+               reg = <0x019c>;
+               ti,index-power-of-two;
+       };
+
+       l3init_480m_dclk_div: l3init_480m_dclk_div {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_usb_m2_ck>;
+               ti,max-div = <64>;
+               reg = <0x01ac>;
+               ti,index-power-of-two;
+       };
+
+       usb_otg_dclk_div: usb_otg_dclk_div {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&usb_otg_clkin_ck>;
+               ti,max-div = <64>;
+               reg = <0x0184>;
+               ti,index-power-of-two;
+       };
+
+       sata_dclk_div: sata_dclk_div {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&sys_clkin1>;
+               ti,max-div = <64>;
+               reg = <0x01c0>;
+               ti,index-power-of-two;
+       };
+
+       pcie2_dclk_div: pcie2_dclk_div {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_pcie_ref_m2_ck>;
+               ti,max-div = <64>;
+               reg = <0x01b8>;
+               ti,index-power-of-two;
+       };
+
+       pcie_dclk_div: pcie_dclk_div {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&apll_pcie_m2_ck>;
+               ti,max-div = <64>;
+               reg = <0x01b4>;
+               ti,index-power-of-two;
+       };
+
+       emu_dclk_div: emu_dclk_div {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&sys_clkin1>;
+               ti,max-div = <64>;
+               reg = <0x0194>;
+               ti,index-power-of-two;
+       };
+
+       secure_32k_dclk_div: secure_32k_dclk_div {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&secure_32k_clk_src_ck>;
+               ti,max-div = <64>;
+               reg = <0x01c4>;
+               ti,index-power-of-two;
+       };
+
+       clkoutmux0_clk_mux: clkoutmux0_clk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&sys_clk1_dclk_div>, <&sys_clk2_dclk_div>, <&per_abe_x1_dclk_div>, <&mpu_dclk_div>, <&dsp_gclk_div>, <&iva_dclk>, <&gpu_dclk>, <&core_dpll_out_dclk_div>, <&emif_phy_dclk_div>, <&gmac_250m_dclk_div>, <&video2_dclk_div>, <&video1_dclk_div>, <&hdmi_dclk_div>, <&func_96m_aon_dclk_div>, <&l3init_480m_dclk_div>, <&usb_otg_dclk_div>, <&sata_dclk_div>, <&pcie2_dclk_div>, <&pcie_dclk_div>, <&emu_dclk_div>, <&secure_32k_dclk_div>, <&eve_dclk_div>;
+               reg = <0x0158>;
+       };
+
+       clkoutmux1_clk_mux: clkoutmux1_clk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&sys_clk1_dclk_div>, <&sys_clk2_dclk_div>, <&per_abe_x1_dclk_div>, <&mpu_dclk_div>, <&dsp_gclk_div>, <&iva_dclk>, <&gpu_dclk>, <&core_dpll_out_dclk_div>, <&emif_phy_dclk_div>, <&gmac_250m_dclk_div>, <&video2_dclk_div>, <&video1_dclk_div>, <&hdmi_dclk_div>, <&func_96m_aon_dclk_div>, <&l3init_480m_dclk_div>, <&usb_otg_dclk_div>, <&sata_dclk_div>, <&pcie2_dclk_div>, <&pcie_dclk_div>, <&emu_dclk_div>, <&secure_32k_dclk_div>, <&eve_dclk_div>;
+               reg = <0x015c>;
+       };
+
+       clkoutmux2_clk_mux: clkoutmux2_clk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&sys_clk1_dclk_div>, <&sys_clk2_dclk_div>, <&per_abe_x1_dclk_div>, <&mpu_dclk_div>, <&dsp_gclk_div>, <&iva_dclk>, <&gpu_dclk>, <&core_dpll_out_dclk_div>, <&emif_phy_dclk_div>, <&gmac_250m_dclk_div>, <&video2_dclk_div>, <&video1_dclk_div>, <&hdmi_dclk_div>, <&func_96m_aon_dclk_div>, <&l3init_480m_dclk_div>, <&usb_otg_dclk_div>, <&sata_dclk_div>, <&pcie2_dclk_div>, <&pcie_dclk_div>, <&emu_dclk_div>, <&secure_32k_dclk_div>, <&eve_dclk_div>;
+               reg = <0x0160>;
+       };
+
+       custefuse_sys_gfclk_div: custefuse_sys_gfclk_div {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&sys_clkin1>;
+               clock-mult = <1>;
+               clock-div = <2>;
+       };
+
+       eve_clk: eve_clk {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&dpll_eve_m2_ck>, <&dpll_dsp_m3x2_ck>;
+               reg = <0x0180>;
+       };
+
+       hdmi_dpll_clk_mux: hdmi_dpll_clk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&sys_clkin1>, <&sys_clkin2>;
+               reg = <0x01a4>;
+       };
+
+       mlb_clk: mlb_clk {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&mlb_clkin_ck>;
+               ti,max-div = <64>;
+               reg = <0x0134>;
+               ti,index-power-of-two;
+       };
+
+       mlbp_clk: mlbp_clk {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&mlbp_clkin_ck>;
+               ti,max-div = <64>;
+               reg = <0x0130>;
+               ti,index-power-of-two;
+       };
+
+       per_abe_x1_gfclk2_div: per_abe_x1_gfclk2_div {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_abe_m2_ck>;
+               ti,max-div = <64>;
+               reg = <0x0138>;
+               ti,index-power-of-two;
+       };
+
+       timer_sys_clk_div: timer_sys_clk_div {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&sys_clkin1>;
+               reg = <0x0144>;
+               ti,max-div = <2>;
+       };
+
+       video1_dpll_clk_mux: video1_dpll_clk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&sys_clkin1>, <&sys_clkin2>;
+               reg = <0x01d0>;
+       };
+
+       video2_dpll_clk_mux: video2_dpll_clk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&sys_clkin1>, <&sys_clkin2>;
+               reg = <0x01d4>;
+       };
+
+       wkupaon_iclk_mux: wkupaon_iclk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&sys_clkin1>, <&abe_lp_clk_div>;
+               reg = <0x0108>;
+       };
+
+       gpio1_dbclk: gpio1_dbclk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&sys_32k_ck>;
+               ti,bit-shift = <8>;
+               reg = <0x1838>;
+       };
+
+       dcan1_sys_clk_mux: dcan1_sys_clk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&sys_clkin1>, <&sys_clkin2>;
+               ti,bit-shift = <24>;
+               reg = <0x1888>;
+       };
+
+       timer1_gfclk_mux: timer1_gfclk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&timer_sys_clk_div>, <&sys_32k_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&abe_giclk_div>, <&video1_div_clk>, <&video2_div_clk>, <&hdmi_div_clk>;
+               ti,bit-shift = <24>;
+               reg = <0x1840>;
+       };
+
+       uart10_gfclk_mux: uart10_gfclk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&func_48m_fclk>, <&dpll_per_m2x2_ck>;
+               ti,bit-shift = <24>;
+               reg = <0x1880>;
+       };
+};
+&cm_core_clocks {
+       dpll_pcie_ref_ck: dpll_pcie_ref_ck {
+               #clock-cells = <0>;
+               compatible = "ti,omap4-dpll-clock";
+               clocks = <&sys_clkin1>, <&sys_clkin1>;
+               reg = <0x0200>, <0x0204>, <0x020c>, <0x0208>;
+       };
+
+       dpll_pcie_ref_m2ldo_ck: dpll_pcie_ref_m2ldo_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_pcie_ref_ck>;
+               ti,max-div = <31>;
+               ti,autoidle-shift = <8>;
+               reg = <0x0210>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       apll_pcie_in_clk_mux: apll_pcie_in_clk_mux@4ae06118 {
+               compatible = "ti,mux-clock";
+               clocks = <&dpll_pcie_ref_ck>, <&pciesref_acs_clk_ck>;
+               #clock-cells = <0>;
+               reg = <0x021c 0x4>;
+               ti,bit-shift = <7>;
+       };
+
+       apll_pcie_ck: apll_pcie_ck {
+               #clock-cells = <0>;
+               compatible = "ti,dra7-apll-clock";
+               clocks = <&apll_pcie_in_clk_mux>, <&dpll_pcie_ref_ck>;
+               reg = <0x021c>, <0x0220>;
+       };
+
+       optfclk_pciephy_div: optfclk_pciephy_div@4a00821c {
+               compatible = "ti,divider-clock";
+               clocks = <&apll_pcie_ck>;
+               #clock-cells = <0>;
+               reg = <0x021c>;
+               ti,bit-shift = <8>;
+               ti,max-div = <2>;
+       };
+
+       optfclk_pciephy_clk: optfclk_pciephy_clk@4a0093b0 {
+               compatible = "ti,gate-clock";
+               clocks = <&apll_pcie_ck>;
+               #clock-cells = <0>;
+               reg = <0x13b0>;
+               ti,bit-shift = <9>;
+       };
+
+       optfclk_pciephy_div_clk: optfclk_pciephy_div_clk@4a0093b0 {
+               compatible = "ti,gate-clock";
+               clocks = <&optfclk_pciephy_div>;
+               #clock-cells = <0>;
+               reg = <0x13b0>;
+               ti,bit-shift = <10>;
+       };
+
+       apll_pcie_clkvcoldo: apll_pcie_clkvcoldo {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&apll_pcie_ck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       apll_pcie_clkvcoldo_div: apll_pcie_clkvcoldo_div {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&apll_pcie_ck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       apll_pcie_m2_ck: apll_pcie_m2_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&apll_pcie_ck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       dpll_per_ck: dpll_per_ck {
+               #clock-cells = <0>;
+               compatible = "ti,omap4-dpll-clock";
+               clocks = <&sys_clkin1>, <&per_dpll_hs_clk_div>;
+               reg = <0x0140>, <0x0144>, <0x014c>, <0x0148>;
+       };
+
+       dpll_per_m2_ck: dpll_per_m2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_per_ck>;
+               ti,max-div = <31>;
+               ti,autoidle-shift = <8>;
+               reg = <0x0150>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       func_96m_aon_dclk_div: func_96m_aon_dclk_div {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll_per_m2_ck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       dpll_usb_ck: dpll_usb_ck {
+               #clock-cells = <0>;
+               compatible = "ti,omap4-dpll-j-type-clock";
+               clocks = <&sys_clkin1>, <&usb_dpll_hs_clk_div>;
+               reg = <0x0180>, <0x0184>, <0x018c>, <0x0188>;
+       };
+
+       dpll_usb_m2_ck: dpll_usb_m2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_usb_ck>;
+               ti,max-div = <127>;
+               ti,autoidle-shift = <8>;
+               reg = <0x0190>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       dpll_pcie_ref_m2_ck: dpll_pcie_ref_m2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_pcie_ref_ck>;
+               ti,max-div = <127>;
+               ti,autoidle-shift = <8>;
+               reg = <0x0210>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       dpll_per_x2_ck: dpll_per_x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,omap4-dpll-x2-clock";
+               clocks = <&dpll_per_ck>;
+       };
+
+       dpll_per_h11x2_ck: dpll_per_h11x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_per_x2_ck>;
+               ti,max-div = <63>;
+               ti,autoidle-shift = <8>;
+               reg = <0x0158>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       dpll_per_h12x2_ck: dpll_per_h12x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_per_x2_ck>;
+               ti,max-div = <63>;
+               ti,autoidle-shift = <8>;
+               reg = <0x015c>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       dpll_per_h13x2_ck: dpll_per_h13x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_per_x2_ck>;
+               ti,max-div = <63>;
+               ti,autoidle-shift = <8>;
+               reg = <0x0160>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       dpll_per_h14x2_ck: dpll_per_h14x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_per_x2_ck>;
+               ti,max-div = <63>;
+               ti,autoidle-shift = <8>;
+               reg = <0x0164>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       dpll_per_m2x2_ck: dpll_per_m2x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_per_x2_ck>;
+               ti,max-div = <31>;
+               ti,autoidle-shift = <8>;
+               reg = <0x0150>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       dpll_usb_clkdcoldo: dpll_usb_clkdcoldo {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll_usb_ck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       func_128m_clk: func_128m_clk {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll_per_h11x2_ck>;
+               clock-mult = <1>;
+               clock-div = <2>;
+       };
+
+       func_12m_fclk: func_12m_fclk {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll_per_m2x2_ck>;
+               clock-mult = <1>;
+               clock-div = <16>;
+       };
+
+       func_24m_clk: func_24m_clk {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll_per_m2_ck>;
+               clock-mult = <1>;
+               clock-div = <4>;
+       };
+
+       func_48m_fclk: func_48m_fclk {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll_per_m2x2_ck>;
+               clock-mult = <1>;
+               clock-div = <4>;
+       };
+
+       func_96m_fclk: func_96m_fclk {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll_per_m2x2_ck>;
+               clock-mult = <1>;
+               clock-div = <2>;
+       };
+
+       l3init_60m_fclk: l3init_60m_fclk {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_usb_m2_ck>;
+               reg = <0x0104>;
+               ti,dividers = <1>, <8>;
+       };
+
+       dss_32khz_clk: dss_32khz_clk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&sys_32k_ck>;
+               ti,bit-shift = <11>;
+               reg = <0x1120>;
+       };
+
+       dss_48mhz_clk: dss_48mhz_clk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&func_48m_fclk>;
+               ti,bit-shift = <9>;
+               reg = <0x1120>;
+       };
+
+       dss_dss_clk: dss_dss_clk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&dpll_per_h12x2_ck>;
+               ti,bit-shift = <8>;
+               reg = <0x1120>;
+       };
+
+       dss_hdmi_clk: dss_hdmi_clk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&hdmi_dpll_clk_mux>;
+               ti,bit-shift = <10>;
+               reg = <0x1120>;
+       };
+
+       dss_video1_clk: dss_video1_clk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&video1_dpll_clk_mux>;
+               ti,bit-shift = <12>;
+               reg = <0x1120>;
+       };
+
+       dss_video2_clk: dss_video2_clk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&video2_dpll_clk_mux>;
+               ti,bit-shift = <13>;
+               reg = <0x1120>;
+       };
+
+       gpio2_dbclk: gpio2_dbclk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&sys_32k_ck>;
+               ti,bit-shift = <8>;
+               reg = <0x1760>;
+       };
+
+       gpio3_dbclk: gpio3_dbclk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&sys_32k_ck>;
+               ti,bit-shift = <8>;
+               reg = <0x1768>;
+       };
+
+       gpio4_dbclk: gpio4_dbclk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&sys_32k_ck>;
+               ti,bit-shift = <8>;
+               reg = <0x1770>;
+       };
+
+       gpio5_dbclk: gpio5_dbclk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&sys_32k_ck>;
+               ti,bit-shift = <8>;
+               reg = <0x1778>;
+       };
+
+       gpio6_dbclk: gpio6_dbclk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&sys_32k_ck>;
+               ti,bit-shift = <8>;
+               reg = <0x1780>;
+       };
+
+       gpio7_dbclk: gpio7_dbclk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&sys_32k_ck>;
+               ti,bit-shift = <8>;
+               reg = <0x1810>;
+       };
+
+       gpio8_dbclk: gpio8_dbclk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&sys_32k_ck>;
+               ti,bit-shift = <8>;
+               reg = <0x1818>;
+       };
+
+       mmc1_clk32k: mmc1_clk32k {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&sys_32k_ck>;
+               ti,bit-shift = <8>;
+               reg = <0x1328>;
+       };
+
+       mmc2_clk32k: mmc2_clk32k {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&sys_32k_ck>;
+               ti,bit-shift = <8>;
+               reg = <0x1330>;
+       };
+
+       mmc3_clk32k: mmc3_clk32k {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&sys_32k_ck>;
+               ti,bit-shift = <8>;
+               reg = <0x1820>;
+       };
+
+       mmc4_clk32k: mmc4_clk32k {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&sys_32k_ck>;
+               ti,bit-shift = <8>;
+               reg = <0x1828>;
+       };
+
+       sata_ref_clk: sata_ref_clk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&sys_clkin1>;
+               ti,bit-shift = <8>;
+               reg = <0x1388>;
+       };
+
+       usb_otg_ss1_refclk960m: usb_otg_ss1_refclk960m {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&dpll_usb_clkdcoldo>;
+               ti,bit-shift = <8>;
+               reg = <0x13f0>;
+       };
+
+       usb_otg_ss2_refclk960m: usb_otg_ss2_refclk960m {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&dpll_usb_clkdcoldo>;
+               ti,bit-shift = <8>;
+               reg = <0x1340>;
+       };
+
+       usb_phy1_always_on_clk32k: usb_phy1_always_on_clk32k {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&sys_32k_ck>;
+               ti,bit-shift = <8>;
+               reg = <0x0640>;
+       };
+
+       usb_phy2_always_on_clk32k: usb_phy2_always_on_clk32k {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&sys_32k_ck>;
+               ti,bit-shift = <8>;
+               reg = <0x0688>;
+       };
+
+       usb_phy3_always_on_clk32k: usb_phy3_always_on_clk32k {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&sys_32k_ck>;
+               ti,bit-shift = <8>;
+               reg = <0x0698>;
+       };
+
+       atl_dpll_clk_mux: atl_dpll_clk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&sys_32k_ck>, <&video1_clkin_ck>, <&video2_clkin_ck>, <&hdmi_clkin_ck>;
+               ti,bit-shift = <24>;
+               reg = <0x0c00>;
+       };
+
+       atl_gfclk_mux: atl_gfclk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&l3_iclk_div>, <&dpll_abe_m2_ck>, <&atl_dpll_clk_mux>;
+               ti,bit-shift = <26>;
+               reg = <0x0c00>;
+       };
+
+       gmac_gmii_ref_clk_div: gmac_gmii_ref_clk_div {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_gmac_m2_ck>;
+               ti,bit-shift = <24>;
+               reg = <0x13d0>;
+               ti,dividers = <2>;
+       };
+
+       gmac_rft_clk_mux: gmac_rft_clk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&video1_clkin_ck>, <&video2_clkin_ck>, <&dpll_abe_m2_ck>, <&hdmi_clkin_ck>, <&l3_iclk_div>;
+               ti,bit-shift = <25>;
+               reg = <0x13d0>;
+       };
+
+       gpu_core_gclk_mux: gpu_core_gclk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&dpll_core_h14x2_ck>, <&dpll_per_h14x2_ck>, <&dpll_gpu_m2_ck>;
+               ti,bit-shift = <24>;
+               reg = <0x1220>;
+       };
+
+       gpu_hyd_gclk_mux: gpu_hyd_gclk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&dpll_core_h14x2_ck>, <&dpll_per_h14x2_ck>, <&dpll_gpu_m2_ck>;
+               ti,bit-shift = <26>;
+               reg = <0x1220>;
+       };
+
+       l3instr_ts_gclk_div: l3instr_ts_gclk_div {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&wkupaon_iclk_mux>;
+               ti,bit-shift = <24>;
+               reg = <0x0e50>;
+               ti,dividers = <8>, <16>, <32>;
+       };
+
+       mcasp2_ahclkr_mux: mcasp2_ahclkr_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&abe_24m_fclk>, <&abe_sys_clk_div>, <&func_24m_clk>, <&atlclkin3_ck>, <&atl_clkin2_ck>, <&atl_clkin1_ck>, <&atl_clkin0_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&mlb_clk>, <&mlbp_clk>;
+               ti,bit-shift = <28>;
+               reg = <0x1860>;
+       };
+
+       mcasp2_ahclkx_mux: mcasp2_ahclkx_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&abe_24m_fclk>, <&abe_sys_clk_div>, <&func_24m_clk>, <&atlclkin3_ck>, <&atl_clkin2_ck>, <&atl_clkin1_ck>, <&atl_clkin0_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&mlb_clk>, <&mlbp_clk>;
+               ti,bit-shift = <28>;
+               reg = <0x1860>;
+       };
+
+       mcasp2_aux_gfclk_mux: mcasp2_aux_gfclk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&per_abe_x1_gfclk2_div>, <&video1_clk2_div>, <&video2_clk2_div>, <&hdmi_clk2_div>;
+               ti,bit-shift = <22>;
+               reg = <0x1860>;
+       };
+
+       mcasp3_ahclkx_mux: mcasp3_ahclkx_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&abe_24m_fclk>, <&abe_sys_clk_div>, <&func_24m_clk>, <&atlclkin3_ck>, <&atl_clkin2_ck>, <&atl_clkin1_ck>, <&atl_clkin0_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&mlb_clk>, <&mlbp_clk>;
+               ti,bit-shift = <24>;
+               reg = <0x1868>;
+       };
+
+       mcasp3_aux_gfclk_mux: mcasp3_aux_gfclk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&per_abe_x1_gfclk2_div>, <&video1_clk2_div>, <&video2_clk2_div>, <&hdmi_clk2_div>;
+               ti,bit-shift = <22>;
+               reg = <0x1868>;
+       };
+
+       mcasp4_ahclkx_mux: mcasp4_ahclkx_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&abe_24m_fclk>, <&abe_sys_clk_div>, <&func_24m_clk>, <&atlclkin3_ck>, <&atl_clkin2_ck>, <&atl_clkin1_ck>, <&atl_clkin0_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&mlb_clk>, <&mlbp_clk>;
+               ti,bit-shift = <24>;
+               reg = <0x1898>;
+       };
+
+       mcasp4_aux_gfclk_mux: mcasp4_aux_gfclk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&per_abe_x1_gfclk2_div>, <&video1_clk2_div>, <&video2_clk2_div>, <&hdmi_clk2_div>;
+               ti,bit-shift = <22>;
+               reg = <0x1898>;
+       };
+
+       mcasp5_ahclkx_mux: mcasp5_ahclkx_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&abe_24m_fclk>, <&abe_sys_clk_div>, <&func_24m_clk>, <&atlclkin3_ck>, <&atl_clkin2_ck>, <&atl_clkin1_ck>, <&atl_clkin0_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&mlb_clk>, <&mlbp_clk>;
+               ti,bit-shift = <24>;
+               reg = <0x1878>;
+       };
+
+       mcasp5_aux_gfclk_mux: mcasp5_aux_gfclk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&per_abe_x1_gfclk2_div>, <&video1_clk2_div>, <&video2_clk2_div>, <&hdmi_clk2_div>;
+               ti,bit-shift = <22>;
+               reg = <0x1878>;
+       };
+
+       mcasp6_ahclkx_mux: mcasp6_ahclkx_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&abe_24m_fclk>, <&abe_sys_clk_div>, <&func_24m_clk>, <&atlclkin3_ck>, <&atl_clkin2_ck>, <&atl_clkin1_ck>, <&atl_clkin0_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&mlb_clk>, <&mlbp_clk>;
+               ti,bit-shift = <24>;
+               reg = <0x1904>;
+       };
+
+       mcasp6_aux_gfclk_mux: mcasp6_aux_gfclk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&per_abe_x1_gfclk2_div>, <&video1_clk2_div>, <&video2_clk2_div>, <&hdmi_clk2_div>;
+               ti,bit-shift = <22>;
+               reg = <0x1904>;
+       };
+
+       mcasp7_ahclkx_mux: mcasp7_ahclkx_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&abe_24m_fclk>, <&abe_sys_clk_div>, <&func_24m_clk>, <&atlclkin3_ck>, <&atl_clkin2_ck>, <&atl_clkin1_ck>, <&atl_clkin0_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&mlb_clk>, <&mlbp_clk>;
+               ti,bit-shift = <24>;
+               reg = <0x1908>;
+       };
+
+       mcasp7_aux_gfclk_mux: mcasp7_aux_gfclk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&per_abe_x1_gfclk2_div>, <&video1_clk2_div>, <&video2_clk2_div>, <&hdmi_clk2_div>;
+               ti,bit-shift = <22>;
+               reg = <0x1908>;
+       };
+
+       mcasp8_ahclk_mux: mcasp8_ahclk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&abe_24m_fclk>, <&abe_sys_clk_div>, <&func_24m_clk>, <&atlclkin3_ck>, <&atl_clkin2_ck>, <&atl_clkin1_ck>, <&atl_clkin0_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&mlb_clk>, <&mlbp_clk>;
+               ti,bit-shift = <22>;
+               reg = <0x1890>;
+       };
+
+       mcasp8_aux_gfclk_mux: mcasp8_aux_gfclk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&per_abe_x1_gfclk2_div>, <&video1_clk2_div>, <&video2_clk2_div>, <&hdmi_clk2_div>;
+               ti,bit-shift = <24>;
+               reg = <0x1890>;
+       };
+
+       mmc1_fclk_mux: mmc1_fclk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&func_128m_clk>, <&dpll_per_m2x2_ck>;
+               ti,bit-shift = <24>;
+               reg = <0x1328>;
+       };
+
+       mmc1_fclk_div: mmc1_fclk_div {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&mmc1_fclk_mux>;
+               ti,bit-shift = <25>;
+               ti,max-div = <4>;
+               reg = <0x1328>;
+               ti,index-power-of-two;
+       };
+
+       mmc2_fclk_mux: mmc2_fclk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&func_128m_clk>, <&dpll_per_m2x2_ck>;
+               ti,bit-shift = <24>;
+               reg = <0x1330>;
+       };
+
+       mmc2_fclk_div: mmc2_fclk_div {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&mmc2_fclk_mux>;
+               ti,bit-shift = <25>;
+               ti,max-div = <4>;
+               reg = <0x1330>;
+               ti,index-power-of-two;
+       };
+
+       mmc3_gfclk_mux: mmc3_gfclk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&func_48m_fclk>, <&dpll_per_m2x2_ck>;
+               ti,bit-shift = <24>;
+               reg = <0x1820>;
+       };
+
+       mmc3_gfclk_div: mmc3_gfclk_div {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&mmc3_gfclk_mux>;
+               ti,bit-shift = <25>;
+               ti,max-div = <4>;
+               reg = <0x1820>;
+               ti,index-power-of-two;
+       };
+
+       mmc4_gfclk_mux: mmc4_gfclk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&func_48m_fclk>, <&dpll_per_m2x2_ck>;
+               ti,bit-shift = <24>;
+               reg = <0x1828>;
+       };
+
+       mmc4_gfclk_div: mmc4_gfclk_div {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&mmc4_gfclk_mux>;
+               ti,bit-shift = <25>;
+               ti,max-div = <4>;
+               reg = <0x1828>;
+               ti,index-power-of-two;
+       };
+
+       qspi_gfclk_mux: qspi_gfclk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&func_128m_clk>, <&dpll_per_h13x2_ck>;
+               ti,bit-shift = <24>;
+               reg = <0x1838>;
+       };
+
+       qspi_gfclk_div: qspi_gfclk_div {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&qspi_gfclk_mux>;
+               ti,bit-shift = <25>;
+               ti,max-div = <4>;
+               reg = <0x1838>;
+               ti,index-power-of-two;
+       };
+
+       timer10_gfclk_mux: timer10_gfclk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&timer_sys_clk_div>, <&sys_32k_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&abe_giclk_div>, <&video1_div_clk>, <&video2_div_clk>, <&hdmi_div_clk>;
+               ti,bit-shift = <24>;
+               reg = <0x1728>;
+       };
+
+       timer11_gfclk_mux: timer11_gfclk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&timer_sys_clk_div>, <&sys_32k_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&abe_giclk_div>, <&video1_div_clk>, <&video2_div_clk>, <&hdmi_div_clk>;
+               ti,bit-shift = <24>;
+               reg = <0x1730>;
+       };
+
+       timer13_gfclk_mux: timer13_gfclk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&timer_sys_clk_div>, <&sys_32k_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&abe_giclk_div>, <&video1_div_clk>, <&video2_div_clk>, <&hdmi_div_clk>;
+               ti,bit-shift = <24>;
+               reg = <0x17c8>;
+       };
+
+       timer14_gfclk_mux: timer14_gfclk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&timer_sys_clk_div>, <&sys_32k_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&abe_giclk_div>, <&video1_div_clk>, <&video2_div_clk>, <&hdmi_div_clk>;
+               ti,bit-shift = <24>;
+               reg = <0x17d0>;
+       };
+
+       timer15_gfclk_mux: timer15_gfclk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&timer_sys_clk_div>, <&sys_32k_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&abe_giclk_div>, <&video1_div_clk>, <&video2_div_clk>, <&hdmi_div_clk>;
+               ti,bit-shift = <24>;
+               reg = <0x17d8>;
+       };
+
+       timer16_gfclk_mux: timer16_gfclk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&timer_sys_clk_div>, <&sys_32k_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&abe_giclk_div>, <&video1_div_clk>, <&video2_div_clk>, <&hdmi_div_clk>;
+               ti,bit-shift = <24>;
+               reg = <0x1830>;
+       };
+
+       timer2_gfclk_mux: timer2_gfclk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&timer_sys_clk_div>, <&sys_32k_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&abe_giclk_div>, <&video1_div_clk>, <&video2_div_clk>, <&hdmi_div_clk>;
+               ti,bit-shift = <24>;
+               reg = <0x1738>;
+       };
+
+       timer3_gfclk_mux: timer3_gfclk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&timer_sys_clk_div>, <&sys_32k_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&abe_giclk_div>, <&video1_div_clk>, <&video2_div_clk>, <&hdmi_div_clk>;
+               ti,bit-shift = <24>;
+               reg = <0x1740>;
+       };
+
+       timer4_gfclk_mux: timer4_gfclk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&timer_sys_clk_div>, <&sys_32k_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&abe_giclk_div>, <&video1_div_clk>, <&video2_div_clk>, <&hdmi_div_clk>;
+               ti,bit-shift = <24>;
+               reg = <0x1748>;
+       };
+
+       timer9_gfclk_mux: timer9_gfclk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&timer_sys_clk_div>, <&sys_32k_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&abe_giclk_div>, <&video1_div_clk>, <&video2_div_clk>, <&hdmi_div_clk>;
+               ti,bit-shift = <24>;
+               reg = <0x1750>;
+       };
+
+       uart1_gfclk_mux: uart1_gfclk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&func_48m_fclk>, <&dpll_per_m2x2_ck>;
+               ti,bit-shift = <24>;
+               reg = <0x1840>;
+       };
+
+       uart2_gfclk_mux: uart2_gfclk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&func_48m_fclk>, <&dpll_per_m2x2_ck>;
+               ti,bit-shift = <24>;
+               reg = <0x1848>;
+       };
+
+       uart3_gfclk_mux: uart3_gfclk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&func_48m_fclk>, <&dpll_per_m2x2_ck>;
+               ti,bit-shift = <24>;
+               reg = <0x1850>;
+       };
+
+       uart4_gfclk_mux: uart4_gfclk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&func_48m_fclk>, <&dpll_per_m2x2_ck>;
+               ti,bit-shift = <24>;
+               reg = <0x1858>;
+       };
+
+       uart5_gfclk_mux: uart5_gfclk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&func_48m_fclk>, <&dpll_per_m2x2_ck>;
+               ti,bit-shift = <24>;
+               reg = <0x1870>;
+       };
+
+       uart7_gfclk_mux: uart7_gfclk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&func_48m_fclk>, <&dpll_per_m2x2_ck>;
+               ti,bit-shift = <24>;
+               reg = <0x18d0>;
+       };
+
+       uart8_gfclk_mux: uart8_gfclk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&func_48m_fclk>, <&dpll_per_m2x2_ck>;
+               ti,bit-shift = <24>;
+               reg = <0x18e0>;
+       };
+
+       uart9_gfclk_mux: uart9_gfclk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&func_48m_fclk>, <&dpll_per_m2x2_ck>;
+               ti,bit-shift = <24>;
+               reg = <0x18e8>;
+       };
+
+       vip1_gclk_mux: vip1_gclk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&l3_iclk_div>, <&dpll_core_h23x2_ck>;
+               ti,bit-shift = <24>;
+               reg = <0x1020>;
+       };
+
+       vip2_gclk_mux: vip2_gclk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&l3_iclk_div>, <&dpll_core_h23x2_ck>;
+               ti,bit-shift = <24>;
+               reg = <0x1028>;
+       };
+
+       vip3_gclk_mux: vip3_gclk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&l3_iclk_div>, <&dpll_core_h23x2_ck>;
+               ti,bit-shift = <24>;
+               reg = <0x1030>;
+       };
+};
+
+&cm_core_clockdomains {
+       coreaon_clkdm: coreaon_clkdm {
+               compatible = "ti,clockdomain";
+               clocks = <&dpll_usb_ck>;
+       };
+};
diff --git a/arch/arm/boot/dts/imx6dl-cubox-i.dts b/arch/arm/boot/dts/imx6dl-cubox-i.dts
new file mode 100644 (file)
index 0000000..58aa8f2
--- /dev/null
@@ -0,0 +1,12 @@
+/*
+ * Copyright (C) 2014 Russell King
+ */
+/dts-v1/;
+
+#include "imx6dl.dtsi"
+#include "imx6qdl-cubox-i.dtsi"
+
+/ {
+       model = "SolidRun Cubox-i Solo/DualLite";
+       compatible = "solidrun,cubox-i/dl", "fsl,imx6dl";
+};
diff --git a/arch/arm/boot/dts/imx6dl-hummingboard.dts b/arch/arm/boot/dts/imx6dl-hummingboard.dts
new file mode 100644 (file)
index 0000000..fd8fc7c
--- /dev/null
@@ -0,0 +1,167 @@
+/*
+ * Copyright (C) 2013,2014 Russell King
+ */
+/dts-v1/;
+
+#include "imx6dl.dtsi"
+#include "imx6qdl-microsom.dtsi"
+#include "imx6qdl-microsom-ar8035.dtsi"
+
+/ {
+       model = "SolidRun HummingBoard DL/Solo";
+       compatible = "solidrun,hummingboard", "fsl,imx6dl";
+
+       ir_recv: ir-receiver {
+               compatible = "gpio-ir-receiver";
+               gpios = <&gpio1 2 1>;
+               pinctrl-names = "default";
+               pinctrl-0 = <&pinctrl_hummingboard_gpio1_2>;
+       };
+
+       regulators {
+               compatible = "simple-bus";
+
+               reg_3p3v: 3p3v {
+                       compatible = "regulator-fixed";
+                       regulator-name = "3P3V";
+                       regulator-min-microvolt = <3300000>;
+                       regulator-max-microvolt = <3300000>;
+                       regulator-always-on;
+               };
+
+               reg_usbh1_vbus: usb-h1-vbus {
+                       compatible = "regulator-fixed";
+                       enable-active-high;
+                       gpio = <&gpio1 0 0>;
+                       pinctrl-names = "default";
+                       pinctrl-0 = <&pinctrl_hummingboard_usbh1_vbus>;
+                       regulator-name = "usb_h1_vbus";
+                       regulator-min-microvolt = <5000000>;
+                       regulator-max-microvolt = <5000000>;
+               };
+
+               reg_usbotg_vbus: usb-otg-vbus {
+                       compatible = "regulator-fixed";
+                       enable-active-high;
+                       gpio = <&gpio3 22 0>;
+                       pinctrl-names = "default";
+                       pinctrl-0 = <&pinctrl_hummingboard_usbotg_vbus>;
+                       regulator-name = "usb_otg_vbus";
+                       regulator-min-microvolt = <5000000>;
+                       regulator-max-microvolt = <5000000>;
+               };
+       };
+
+       codec: spdif-transmitter {
+               compatible = "linux,spdif-dit";
+               pinctrl-names = "default";
+               pinctrl-0 = <&pinctrl_hummingboard_spdif>;
+       };
+
+       sound-spdif {
+               compatible = "fsl,imx-audio-spdif";
+               model = "imx-spdif";
+               /* IMX6 doesn't implement this yet */
+               spdif-controller = <&spdif>;
+               spdif-out;
+       };
+};
+
+&can1 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_hummingboard_flexcan1>;
+       status = "okay";
+};
+
+&i2c1 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_hummingboard_i2c1>;
+
+       /*
+        * Not fitted on Carrier-1 board... yet
+       status = "okay";
+
+       rtc: pcf8523@68 {
+               compatible = "nxp,pcf8523";
+               reg = <0x68>;
+       };
+        */
+};
+
+&iomuxc {
+       hummingboard {
+               pinctrl_hummingboard_flexcan1: hummingboard-flexcan1 {
+                       fsl,pins = <
+                               MX6QDL_PAD_SD3_CLK__FLEXCAN1_RX 0x80000000
+                               MX6QDL_PAD_SD3_CMD__FLEXCAN1_TX 0x80000000
+                       >;
+               };
+
+               pinctrl_hummingboard_gpio1_2: hummingboard-gpio1_2 {
+                       fsl,pins = <
+                               MX6QDL_PAD_GPIO_2__GPIO1_IO02 0x80000000
+                       >;
+               };
+
+               pinctrl_hummingboard_i2c1: hummingboard-i2c1 {
+                       fsl,pins = <
+                               MX6QDL_PAD_EIM_D21__I2C1_SCL 0x4001b8b1
+                               MX6QDL_PAD_EIM_D28__I2C1_SDA 0x4001b8b1
+                       >;
+               };
+
+               pinctrl_hummingboard_spdif: hummingboard-spdif {
+                       fsl,pins = <MX6QDL_PAD_GPIO_17__SPDIF_OUT 0x1b0b0>;
+               };
+
+               pinctrl_hummingboard_usbh1_vbus: hummingboard-usbh1-vbus {
+                       fsl,pins = <MX6QDL_PAD_GPIO_0__GPIO1_IO00 0x1b0b0>;
+               };
+
+               pinctrl_hummingboard_usbotg_vbus: hummingboard-usbotg-vbus {
+                       fsl,pins = <MX6QDL_PAD_EIM_D22__GPIO3_IO22 0x1b0b0>;
+               };
+
+               pinctrl_hummingboard_usdhc2_aux: hummingboard-usdhc2-aux {
+                       fsl,pins = <
+                               MX6QDL_PAD_GPIO_4__GPIO1_IO04    0x1f071
+                       >;
+               };
+
+               pinctrl_hummingboard_usdhc2: hummingboard-usdhc2 {
+                       fsl,pins = <
+                               MX6QDL_PAD_SD2_CMD__SD2_CMD    0x17059
+                               MX6QDL_PAD_SD2_CLK__SD2_CLK    0x10059
+                               MX6QDL_PAD_SD2_DAT0__SD2_DATA0 0x17059
+                               MX6QDL_PAD_SD2_DAT1__SD2_DATA1 0x17059
+                               MX6QDL_PAD_SD2_DAT2__SD2_DATA2 0x17059
+                               MX6QDL_PAD_SD2_DAT3__SD2_DATA3 0x13059
+                       >;
+               };
+       };
+};
+
+&spdif {
+       status = "okay";
+};
+
+&usbh1 {
+       vbus-supply = <&reg_usbh1_vbus>;
+       status = "okay";
+};
+
+&usbotg {
+       vbus-supply = <&reg_usbotg_vbus>;
+       status = "okay";
+};
+
+&usdhc2 {
+       pinctrl-names = "default";
+       pinctrl-0 = <
+               &pinctrl_hummingboard_usdhc2_aux
+               &pinctrl_hummingboard_usdhc2
+       >;
+       vmmc-supply = <&reg_3p3v>;
+       cd-gpios = <&gpio1 4 0>;
+       status = "okay";
+};
diff --git a/arch/arm/boot/dts/imx6q-cubox-i.dts b/arch/arm/boot/dts/imx6q-cubox-i.dts
new file mode 100644 (file)
index 0000000..bc5f31e
--- /dev/null
@@ -0,0 +1,16 @@
+/*
+ * Copyright (C) 2014 Russell King
+ */
+/dts-v1/;
+
+#include "imx6q.dtsi"
+#include "imx6qdl-cubox-i.dtsi"
+
+/ {
+       model = "SolidRun Cubox-i Dual/Quad";
+       compatible = "solidrun,cubox-i/q", "fsl,imx6q";
+};
+
+&sata {
+       status = "okay";
+};
diff --git a/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi b/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi
new file mode 100644 (file)
index 0000000..64daa3b
--- /dev/null
@@ -0,0 +1,143 @@
+/*
+ * Copyright (C) 2014 Russell King
+ */
+#include "imx6qdl-microsom.dtsi"
+#include "imx6qdl-microsom-ar8035.dtsi"
+
+/ {
+       ir_recv: ir-receiver {
+               compatible = "gpio-ir-receiver";
+               gpios = <&gpio3 9 1>;
+               pinctrl-names = "default";
+               pinctrl-0 = <&pinctrl_cubox_i_ir>;
+       };
+
+       regulators {
+               compatible = "simple-bus";
+
+               reg_3p3v: 3p3v {
+                       compatible = "regulator-fixed";
+                       regulator-name = "3P3V";
+                       regulator-min-microvolt = <3300000>;
+                       regulator-max-microvolt = <3300000>;
+                       regulator-always-on;
+               };
+
+               reg_usbh1_vbus: usb-h1-vbus {
+                       compatible = "regulator-fixed";
+                       enable-active-high;
+                       gpio = <&gpio1 0 0>;
+                       pinctrl-names = "default";
+                       pinctrl-0 = <&pinctrl_cubox_i_usbh1_vbus>;
+                       regulator-name = "usb_h1_vbus";
+                       regulator-min-microvolt = <5000000>;
+                       regulator-max-microvolt = <5000000>;
+               };
+
+               reg_usbotg_vbus: usb-otg-vbus {
+                       compatible = "regulator-fixed";
+                       enable-active-high;
+                       gpio = <&gpio3 22 0>;
+                       pinctrl-names = "default";
+                       pinctrl-0 = <&pinctrl_cubox_i_usbotg_vbus>;
+                       regulator-name = "usb_otg_vbus";
+                       regulator-min-microvolt = <5000000>;
+                       regulator-max-microvolt = <5000000>;
+               };
+       };
+
+       codec: spdif-transmitter {
+               compatible = "linux,spdif-dit";
+               pinctrl-names = "default";
+               pinctrl-0 = <&pinctrl_cubox_i_spdif>;
+       };
+
+       sound-spdif {
+               compatible = "fsl,imx-audio-spdif";
+               model = "imx-spdif";
+               /* IMX6 doesn't implement this yet */
+               spdif-controller = <&spdif>;
+               spdif-out;
+       };
+};
+
+&i2c3 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_cubox_i_i2c3>;
+
+       status = "okay";
+
+       rtc: pcf8523@68 {
+               compatible = "nxp,pcf8523";
+               reg = <0x68>;
+       };
+};
+
+&iomuxc {
+       cubox_i {
+               pinctrl_cubox_i_i2c3: cubox-i-i2c3 {
+                       fsl,pins = <
+                               MX6QDL_PAD_EIM_D17__I2C3_SCL 0x4001b8b1
+                               MX6QDL_PAD_EIM_D18__I2C3_SDA 0x4001b8b1
+                       >;
+               };
+
+               pinctrl_cubox_i_ir: cubox-i-ir {
+                       fsl,pins = <
+                               MX6QDL_PAD_EIM_DA9__GPIO3_IO09 0x80000000
+                       >;
+               };
+
+               pinctrl_cubox_i_spdif: cubox-i-spdif {
+                       fsl,pins = <MX6QDL_PAD_GPIO_17__SPDIF_OUT 0x1b0b0>;
+               };
+
+               pinctrl_cubox_i_usbh1_vbus: cubox-i-usbh1-vbus {
+                       fsl,pins = <MX6QDL_PAD_GPIO_0__GPIO1_IO00 0x4001b0b0>;
+               };
+
+               pinctrl_cubox_i_usbotg_vbus: cubox-i-usbotg-vbus {
+                       fsl,pins = <MX6QDL_PAD_EIM_D22__GPIO3_IO22 0x4001b0b0>;
+               };
+
+               pinctrl_cubox_i_usdhc2_aux: cubox-i-usdhc2-aux {
+                       fsl,pins = <
+                               MX6QDL_PAD_GPIO_4__GPIO1_IO04    0x1f071
+                               MX6QDL_PAD_KEY_ROW1__SD2_VSELECT 0x1b071
+                       >;
+               };
+
+               pinctrl_cubox_i_usdhc2: cubox-i-usdhc2 {
+                       fsl,pins = <
+                               MX6QDL_PAD_SD2_CMD__SD2_CMD    0x17059
+                               MX6QDL_PAD_SD2_CLK__SD2_CLK    0x10059
+                               MX6QDL_PAD_SD2_DAT0__SD2_DATA0 0x17059
+                               MX6QDL_PAD_SD2_DAT1__SD2_DATA1 0x17059
+                               MX6QDL_PAD_SD2_DAT2__SD2_DATA2 0x17059
+                               MX6QDL_PAD_SD2_DAT3__SD2_DATA3 0x13059
+                       >;
+               };
+       };
+};
+
+&spdif {
+       status = "okay";
+};
+
+&usbh1 {
+       vbus-supply = <&reg_usbh1_vbus>;
+       status = "okay";
+};
+
+&usbotg {
+       vbus-supply = <&reg_usbotg_vbus>;
+       status = "okay";
+};
+
+&usdhc2 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_cubox_i_usdhc2_aux &pinctrl_cubox_i_usdhc2>;
+       vmmc-supply = <&reg_3p3v>;
+       cd-gpios = <&gpio1 4 0>;
+       status = "okay";
+};
diff --git a/arch/arm/boot/dts/imx6qdl-microsom-ar8035.dtsi b/arch/arm/boot/dts/imx6qdl-microsom-ar8035.dtsi
new file mode 100644 (file)
index 0000000..a3cb2ff
--- /dev/null
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2013,2014 Russell King
+ *
+ * This describes the hookup for an AR8035 to the iMX6 on the SolidRun
+ * MicroSOM.
+ */
+&fec {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_microsom_enet_ar8035>;
+       phy-mode = "rgmii";
+       phy-reset-duration = <2>;
+       phy-reset-gpios = <&gpio4 15 0>;
+       status = "okay";
+};
+
+&iomuxc {
+       enet {
+               pinctrl_microsom_enet_ar8035: microsom-enet-ar8035 {
+                       fsl,pins = <
+                               MX6QDL_PAD_ENET_MDIO__ENET_MDIO         0x1b0b0
+                               MX6QDL_PAD_ENET_MDC__ENET_MDC           0x1b0b0
+                               /* AR8035 reset */
+                               MX6QDL_PAD_KEY_ROW4__GPIO4_IO15         0x130b0
+                               /* AR8035 interrupt */
+                               MX6QDL_PAD_DI0_PIN2__GPIO4_IO18         0x80000000
+                               /* GPIO16 -> AR8035 25MHz */
+                               MX6QDL_PAD_GPIO_16__ENET_REF_CLK        0xc0000000
+                               MX6QDL_PAD_RGMII_TXC__RGMII_TXC         0x80000000
+                               MX6QDL_PAD_RGMII_TD0__RGMII_TD0         0x1b0b0
+                               MX6QDL_PAD_RGMII_TD1__RGMII_TD1         0x1b0b0
+                               MX6QDL_PAD_RGMII_TD2__RGMII_TD2         0x1b0b0
+                               MX6QDL_PAD_RGMII_TD3__RGMII_TD3         0x1b0b0
+                               MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL   0x1b0b0
+                               /* AR8035 CLK_25M --> ENET_REF_CLK (V22) */
+                               MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK    0x0a0b1
+                               /* AR8035 pin strapping: IO voltage: pull up */
+                               MX6QDL_PAD_RGMII_RXC__RGMII_RXC         0x1b0b0
+                               /* AR8035 pin strapping: PHYADDR#0: pull down */
+                               MX6QDL_PAD_RGMII_RD0__RGMII_RD0         0x130b0
+                               /* AR8035 pin strapping: PHYADDR#1: pull down */
+                               MX6QDL_PAD_RGMII_RD1__RGMII_RD1         0x130b0
+                               /* AR8035 pin strapping: MODE#1: pull up */
+                               MX6QDL_PAD_RGMII_RD2__RGMII_RD2         0x1b0b0
+                               /* AR8035 pin strapping: MODE#3: pull up */
+                               MX6QDL_PAD_RGMII_RD3__RGMII_RD3         0x1b0b0
+                               /* AR8035 pin strapping: MODE#0: pull down */
+                               MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL   0x130b0
+
+                               /*
+                                * As the RMII pins are also connected to RGMII
+                                * so that an AR8030 can be placed, set these
+                                * to high-z with the same pulls as above.
+                                * Use the GPIO settings to avoid changing the
+                                * input select registers.
+                                */
+                               MX6QDL_PAD_ENET_CRS_DV__GPIO1_IO25      0x03000
+                               MX6QDL_PAD_ENET_RXD0__GPIO1_IO27        0x03000
+                               MX6QDL_PAD_ENET_RXD1__GPIO1_IO26        0x03000
+                       >;
+               };
+       };
+};
diff --git a/arch/arm/boot/dts/imx6qdl-microsom.dtsi b/arch/arm/boot/dts/imx6qdl-microsom.dtsi
new file mode 100644 (file)
index 0000000..d729d0b
--- /dev/null
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2013,2014 Russell King
+ */
+
+&iomuxc {
+       microsom {
+               pinctrl_microsom_uart1: microsom-uart1 {
+                       fsl,pins = <
+                               MX6QDL_PAD_CSI0_DAT10__UART1_TX_DATA    0x1b0b1
+                               MX6QDL_PAD_CSI0_DAT11__UART1_RX_DATA    0x1b0b1
+                       >;
+               };
+
+               pinctrl_microsom_usbotg: microsom-usbotg {
+                       /*
+                        * Similar to pinctrl_usbotg_2, but we want it
+                        * pulled down for a fixed host connection.
+                        */
+                       fsl,pins = <MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x13059>;
+               };
+       };
+};
+
+&uart1 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_microsom_uart1>;
+       status = "okay";
+};
+
+&usbotg {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_microsom_usbotg>;
+};
index 02df1914a47c8f971cbe8fb852cea0963833bcff..928f6eef2d592cc6b0fe0318c275cd3c98e75583 100644 (file)
                                status = "okay";
                        };
 
+                       watchdog@fffffd40 {
+                               timeout-sec = <15>;
+                               atmel,max-heartbeat-sec = <16>;
+                               atmel,min-heartbeat-sec = <0>;
+                               status = "okay";
+                       };
                };
 
                nand0: nand@40000000 {
index 427395c083f59e63c3b41bfeeefaa0fab1b66ee0..a5fc83b9c83545ce61738abf4695ceaa47453980 100644 (file)
                        interrupts = <0>;
                };
 
+               prm: prm@48306000 {
+                       compatible = "ti,omap3-prm";
+                       reg = <0x48306000 0x4000>;
+
+                       prm_clocks: clocks {
+                               #address-cells = <1>;
+                               #size-cells = <0>;
+                       };
+
+                       prm_clockdomains: clockdomains {
+                       };
+               };
+
+               cm: cm@48004000 {
+                       compatible = "ti,omap3-cm";
+                       reg = <0x48004000 0x4000>;
+
+                       cm_clocks: clocks {
+                               #address-cells = <1>;
+                               #size-cells = <0>;
+                       };
+
+                       cm_clockdomains: clockdomains {
+                       };
+               };
+
+               scrm: scrm@48002000 {
+                       compatible = "ti,omap3-scrm";
+                       reg = <0x48002000 0x2000>;
+
+                       scrm_clocks: clocks {
+                               #address-cells = <1>;
+                               #size-cells = <0>;
+                       };
+
+                       scrm_clockdomains: clockdomains {
+                       };
+               };
+
                counter32k: counter@48320000 {
                        compatible = "ti,omap-counter32k";
                        reg = <0x48320000 0x20>;
                };
        };
 };
+
+/include/ "omap3xxx-clocks.dtsi"
diff --git a/arch/arm/boot/dts/omap3430es1-clocks.dtsi b/arch/arm/boot/dts/omap3430es1-clocks.dtsi
new file mode 100644 (file)
index 0000000..02f6c7f
--- /dev/null
@@ -0,0 +1,208 @@
+/*
+ * Device Tree Source for OMAP3430 ES1 clock data
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+&cm_clocks {
+       gfx_l3_ck: gfx_l3_ck {
+               #clock-cells = <0>;
+               compatible = "ti,wait-gate-clock";
+               clocks = <&l3_ick>;
+               reg = <0x0b10>;
+               ti,bit-shift = <0>;
+       };
+
+       gfx_l3_fck: gfx_l3_fck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&l3_ick>;
+               ti,max-div = <7>;
+               reg = <0x0b40>;
+               ti,index-starts-at-one;
+       };
+
+       gfx_l3_ick: gfx_l3_ick {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&gfx_l3_ck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       gfx_cg1_ck: gfx_cg1_ck {
+               #clock-cells = <0>;
+               compatible = "ti,wait-gate-clock";
+               clocks = <&gfx_l3_fck>;
+               reg = <0x0b00>;
+               ti,bit-shift = <1>;
+       };
+
+       gfx_cg2_ck: gfx_cg2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,wait-gate-clock";
+               clocks = <&gfx_l3_fck>;
+               reg = <0x0b00>;
+               ti,bit-shift = <2>;
+       };
+
+       d2d_26m_fck: d2d_26m_fck {
+               #clock-cells = <0>;
+               compatible = "ti,wait-gate-clock";
+               clocks = <&sys_ck>;
+               reg = <0x0a00>;
+               ti,bit-shift = <3>;
+       };
+
+       fshostusb_fck: fshostusb_fck {
+               #clock-cells = <0>;
+               compatible = "ti,wait-gate-clock";
+               clocks = <&core_48m_fck>;
+               reg = <0x0a00>;
+               ti,bit-shift = <5>;
+       };
+
+       ssi_ssr_gate_fck_3430es1: ssi_ssr_gate_fck_3430es1 {
+               #clock-cells = <0>;
+               compatible = "ti,composite-no-wait-gate-clock";
+               clocks = <&corex2_fck>;
+               ti,bit-shift = <0>;
+               reg = <0x0a00>;
+       };
+
+       ssi_ssr_div_fck_3430es1: ssi_ssr_div_fck_3430es1 {
+               #clock-cells = <0>;
+               compatible = "ti,composite-divider-clock";
+               clocks = <&corex2_fck>;
+               ti,bit-shift = <8>;
+               reg = <0x0a40>;
+               ti,dividers = <0>, <1>, <2>, <3>, <4>, <0>, <6>, <0>, <8>;
+       };
+
+       ssi_ssr_fck_3430es1: ssi_ssr_fck_3430es1 {
+               #clock-cells = <0>;
+               compatible = "ti,composite-clock";
+               clocks = <&ssi_ssr_gate_fck_3430es1>, <&ssi_ssr_div_fck_3430es1>;
+       };
+
+       ssi_sst_fck_3430es1: ssi_sst_fck_3430es1 {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&ssi_ssr_fck_3430es1>;
+               clock-mult = <1>;
+               clock-div = <2>;
+       };
+
+       hsotgusb_ick_3430es1: hsotgusb_ick_3430es1 {
+               #clock-cells = <0>;
+               compatible = "ti,omap3-no-wait-interface-clock";
+               clocks = <&core_l3_ick>;
+               reg = <0x0a10>;
+               ti,bit-shift = <4>;
+       };
+
+       fac_ick: fac_ick {
+               #clock-cells = <0>;
+               compatible = "ti,omap3-interface-clock";
+               clocks = <&core_l4_ick>;
+               reg = <0x0a10>;
+               ti,bit-shift = <8>;
+       };
+
+       ssi_l4_ick: ssi_l4_ick {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&l4_ick>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       ssi_ick_3430es1: ssi_ick_3430es1 {
+               #clock-cells = <0>;
+               compatible = "ti,omap3-no-wait-interface-clock";
+               clocks = <&ssi_l4_ick>;
+               reg = <0x0a10>;
+               ti,bit-shift = <0>;
+       };
+
+       usb_l4_gate_ick: usb_l4_gate_ick {
+               #clock-cells = <0>;
+               compatible = "ti,composite-interface-clock";
+               clocks = <&l4_ick>;
+               ti,bit-shift = <5>;
+               reg = <0x0a10>;
+       };
+
+       usb_l4_div_ick: usb_l4_div_ick {
+               #clock-cells = <0>;
+               compatible = "ti,composite-divider-clock";
+               clocks = <&l4_ick>;
+               ti,bit-shift = <4>;
+               ti,max-div = <1>;
+               reg = <0x0a40>;
+               ti,index-starts-at-one;
+       };
+
+       usb_l4_ick: usb_l4_ick {
+               #clock-cells = <0>;
+               compatible = "ti,composite-clock";
+               clocks = <&usb_l4_gate_ick>, <&usb_l4_div_ick>;
+       };
+
+       dss1_alwon_fck_3430es1: dss1_alwon_fck_3430es1 {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&dpll4_m4x2_ck>;
+               ti,bit-shift = <0>;
+               reg = <0x0e00>;
+               ti,set-rate-parent;
+       };
+
+       dss_ick_3430es1: dss_ick_3430es1 {
+               #clock-cells = <0>;
+               compatible = "ti,omap3-no-wait-interface-clock";
+               clocks = <&l4_ick>;
+               reg = <0x0e10>;
+               ti,bit-shift = <0>;
+       };
+};
+
+&cm_clockdomains {
+       core_l3_clkdm: core_l3_clkdm {
+               compatible = "ti,clockdomain";
+               clocks = <&sdrc_ick>, <&hsotgusb_ick_3430es1>;
+       };
+
+       gfx_3430es1_clkdm: gfx_3430es1_clkdm {
+               compatible = "ti,clockdomain";
+               clocks = <&gfx_l3_ck>, <&gfx_cg1_ck>, <&gfx_cg2_ck>;
+       };
+
+       dss_clkdm: dss_clkdm {
+               compatible = "ti,clockdomain";
+               clocks = <&dss_tv_fck>, <&dss_96m_fck>, <&dss2_alwon_fck>,
+                        <&dss1_alwon_fck_3430es1>, <&dss_ick_3430es1>;
+       };
+
+       d2d_clkdm: d2d_clkdm {
+               compatible = "ti,clockdomain";
+               clocks = <&d2d_26m_fck>;
+       };
+
+       core_l4_clkdm: core_l4_clkdm {
+               compatible = "ti,clockdomain";
+               clocks = <&mmchs2_fck>, <&mmchs1_fck>, <&i2c3_fck>, <&i2c2_fck>,
+                        <&i2c1_fck>, <&mcspi4_fck>, <&mcspi3_fck>,
+                        <&mcspi2_fck>, <&mcspi1_fck>, <&uart2_fck>,
+                        <&uart1_fck>, <&hdq_fck>, <&mmchs2_ick>, <&mmchs1_ick>,
+                        <&hdq_ick>, <&mcspi4_ick>, <&mcspi3_ick>,
+                        <&mcspi2_ick>, <&mcspi1_ick>, <&i2c3_ick>, <&i2c2_ick>,
+                        <&i2c1_ick>, <&uart2_ick>, <&uart1_ick>, <&gpt11_ick>,
+                        <&gpt10_ick>, <&mcbsp5_ick>, <&mcbsp1_ick>,
+                        <&omapctrl_ick>, <&aes2_ick>, <&sha12_ick>,
+                        <&fshostusb_fck>, <&fac_ick>, <&ssi_ick_3430es1>;
+       };
+};
diff --git a/arch/arm/boot/dts/omap34xx-omap36xx-clocks.dtsi b/arch/arm/boot/dts/omap34xx-omap36xx-clocks.dtsi
new file mode 100644 (file)
index 0000000..b02017b
--- /dev/null
@@ -0,0 +1,268 @@
+/*
+ * Device Tree Source for OMAP34XX/OMAP36XX clock data
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+&cm_clocks {
+       security_l4_ick2: security_l4_ick2 {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&l4_ick>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       aes1_ick: aes1_ick {
+               #clock-cells = <0>;
+               compatible = "ti,omap3-interface-clock";
+               clocks = <&security_l4_ick2>;
+               ti,bit-shift = <3>;
+               reg = <0x0a14>;
+       };
+
+       rng_ick: rng_ick {
+               #clock-cells = <0>;
+               compatible = "ti,omap3-interface-clock";
+               clocks = <&security_l4_ick2>;
+               reg = <0x0a14>;
+               ti,bit-shift = <2>;
+       };
+
+       sha11_ick: sha11_ick {
+               #clock-cells = <0>;
+               compatible = "ti,omap3-interface-clock";
+               clocks = <&security_l4_ick2>;
+               reg = <0x0a14>;
+               ti,bit-shift = <1>;
+       };
+
+       des1_ick: des1_ick {
+               #clock-cells = <0>;
+               compatible = "ti,omap3-interface-clock";
+               clocks = <&security_l4_ick2>;
+               reg = <0x0a14>;
+               ti,bit-shift = <0>;
+       };
+
+       cam_mclk: cam_mclk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&dpll4_m5x2_ck>;
+               ti,bit-shift = <0>;
+               reg = <0x0f00>;
+               ti,set-rate-parent;
+       };
+
+       cam_ick: cam_ick {
+               #clock-cells = <0>;
+               compatible = "ti,omap3-no-wait-interface-clock";
+               clocks = <&l4_ick>;
+               reg = <0x0f10>;
+               ti,bit-shift = <0>;
+       };
+
+       csi2_96m_fck: csi2_96m_fck {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&core_96m_fck>;
+               reg = <0x0f00>;
+               ti,bit-shift = <1>;
+       };
+
+       security_l3_ick: security_l3_ick {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&l3_ick>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       pka_ick: pka_ick {
+               #clock-cells = <0>;
+               compatible = "ti,omap3-interface-clock";
+               clocks = <&security_l3_ick>;
+               reg = <0x0a14>;
+               ti,bit-shift = <4>;
+       };
+
+       icr_ick: icr_ick {
+               #clock-cells = <0>;
+               compatible = "ti,omap3-interface-clock";
+               clocks = <&core_l4_ick>;
+               reg = <0x0a10>;
+               ti,bit-shift = <29>;
+       };
+
+       des2_ick: des2_ick {
+               #clock-cells = <0>;
+               compatible = "ti,omap3-interface-clock";
+               clocks = <&core_l4_ick>;
+               reg = <0x0a10>;
+               ti,bit-shift = <26>;
+       };
+
+       mspro_ick: mspro_ick {
+               #clock-cells = <0>;
+               compatible = "ti,omap3-interface-clock";
+               clocks = <&core_l4_ick>;
+               reg = <0x0a10>;
+               ti,bit-shift = <23>;
+       };
+
+       mailboxes_ick: mailboxes_ick {
+               #clock-cells = <0>;
+               compatible = "ti,omap3-interface-clock";
+               clocks = <&core_l4_ick>;
+               reg = <0x0a10>;
+               ti,bit-shift = <7>;
+       };
+
+       ssi_l4_ick: ssi_l4_ick {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&l4_ick>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       sr1_fck: sr1_fck {
+               #clock-cells = <0>;
+               compatible = "ti,wait-gate-clock";
+               clocks = <&sys_ck>;
+               reg = <0x0c00>;
+               ti,bit-shift = <6>;
+       };
+
+       sr2_fck: sr2_fck {
+               #clock-cells = <0>;
+               compatible = "ti,wait-gate-clock";
+               clocks = <&sys_ck>;
+               reg = <0x0c00>;
+               ti,bit-shift = <7>;
+       };
+
+       sr_l4_ick: sr_l4_ick {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&l4_ick>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       dpll2_fck: dpll2_fck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&core_ck>;
+               ti,bit-shift = <19>;
+               ti,max-div = <7>;
+               reg = <0x0040>;
+               ti,index-starts-at-one;
+       };
+
+       dpll2_ck: dpll2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,omap3-dpll-clock";
+               clocks = <&sys_ck>, <&dpll2_fck>;
+               reg = <0x0004>, <0x0024>, <0x0040>, <0x0034>;
+               ti,low-power-stop;
+               ti,lock;
+               ti,low-power-bypass;
+       };
+
+       dpll2_m2_ck: dpll2_m2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll2_ck>;
+               ti,max-div = <31>;
+               reg = <0x0044>;
+               ti,index-starts-at-one;
+       };
+
+       iva2_ck: iva2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,wait-gate-clock";
+               clocks = <&dpll2_m2_ck>;
+               reg = <0x0000>;
+               ti,bit-shift = <0>;
+       };
+
+       modem_fck: modem_fck {
+               #clock-cells = <0>;
+               compatible = "ti,omap3-interface-clock";
+               clocks = <&sys_ck>;
+               reg = <0x0a00>;
+               ti,bit-shift = <31>;
+       };
+
+       sad2d_ick: sad2d_ick {
+               #clock-cells = <0>;
+               compatible = "ti,omap3-interface-clock";
+               clocks = <&l3_ick>;
+               reg = <0x0a10>;
+               ti,bit-shift = <3>;
+       };
+
+       mad2d_ick: mad2d_ick {
+               #clock-cells = <0>;
+               compatible = "ti,omap3-interface-clock";
+               clocks = <&l3_ick>;
+               reg = <0x0a18>;
+               ti,bit-shift = <3>;
+       };
+
+       mspro_fck: mspro_fck {
+               #clock-cells = <0>;
+               compatible = "ti,wait-gate-clock";
+               clocks = <&core_96m_fck>;
+               reg = <0x0a00>;
+               ti,bit-shift = <23>;
+       };
+};
+
+&cm_clockdomains {
+       cam_clkdm: cam_clkdm {
+               compatible = "ti,clockdomain";
+               clocks = <&cam_ick>, <&csi2_96m_fck>;
+       };
+
+       iva2_clkdm: iva2_clkdm {
+               compatible = "ti,clockdomain";
+               clocks = <&iva2_ck>;
+       };
+
+       dpll2_clkdm: dpll2_clkdm {
+               compatible = "ti,clockdomain";
+               clocks = <&dpll2_ck>;
+       };
+
+       wkup_clkdm: wkup_clkdm {
+               compatible = "ti,clockdomain";
+               clocks = <&gpio1_dbck>, <&wdt2_fck>, <&wdt2_ick>, <&wdt1_ick>,
+                        <&gpio1_ick>, <&omap_32ksync_ick>, <&gpt12_ick>,
+                        <&gpt1_ick>, <&sr1_fck>, <&sr2_fck>;
+       };
+
+       d2d_clkdm: d2d_clkdm {
+               compatible = "ti,clockdomain";
+               clocks = <&modem_fck>, <&sad2d_ick>, <&mad2d_ick>;
+       };
+
+       core_l4_clkdm: core_l4_clkdm {
+               compatible = "ti,clockdomain";
+               clocks = <&mmchs2_fck>, <&mmchs1_fck>, <&i2c3_fck>, <&i2c2_fck>,
+                        <&i2c1_fck>, <&mcspi4_fck>, <&mcspi3_fck>,
+                        <&mcspi2_fck>, <&mcspi1_fck>, <&uart2_fck>,
+                        <&uart1_fck>, <&hdq_fck>, <&mmchs2_ick>, <&mmchs1_ick>,
+                        <&hdq_ick>, <&mcspi4_ick>, <&mcspi3_ick>,
+                        <&mcspi2_ick>, <&mcspi1_ick>, <&i2c3_ick>, <&i2c2_ick>,
+                        <&i2c1_ick>, <&uart2_ick>, <&uart1_ick>, <&gpt11_ick>,
+                        <&gpt10_ick>, <&mcbsp5_ick>, <&mcbsp1_ick>,
+                        <&omapctrl_ick>, <&aes2_ick>, <&sha12_ick>, <&icr_ick>,
+                        <&des2_ick>, <&mspro_ick>, <&mailboxes_ick>,
+                        <&mspro_fck>;
+       };
+};
index 77d124678c9587905b4da1fb9ee18b9759883efe..2e92360da1f36d29d094ef67c8183a8824c0f047 100644 (file)
@@ -39,3 +39,7 @@
                };
        };
 };
+
+/include/ "omap34xx-omap36xx-clocks.dtsi"
+/include/ "omap36xx-omap3430es2plus-clocks.dtsi"
+/include/ "omap36xx-am35xx-omap3430es2plus-clocks.dtsi"
diff --git a/arch/arm/boot/dts/omap36xx-am35xx-omap3430es2plus-clocks.dtsi b/arch/arm/boot/dts/omap36xx-am35xx-omap3430es2plus-clocks.dtsi
new file mode 100644 (file)
index 0000000..af9ae53
--- /dev/null
@@ -0,0 +1,242 @@
+/*
+ * Device Tree Source for OMAP36xx/AM35xx/OMAP34xx clock data
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+&prm_clocks {
+       corex2_d3_fck: corex2_d3_fck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&corex2_fck>;
+               clock-mult = <1>;
+               clock-div = <3>;
+       };
+
+       corex2_d5_fck: corex2_d5_fck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&corex2_fck>;
+               clock-mult = <1>;
+               clock-div = <5>;
+       };
+};
+&cm_clocks {
+       dpll5_ck: dpll5_ck {
+               #clock-cells = <0>;
+               compatible = "ti,omap3-dpll-clock";
+               clocks = <&sys_ck>, <&sys_ck>;
+               reg = <0x0d04>, <0x0d24>, <0x0d4c>, <0x0d34>;
+               ti,low-power-stop;
+               ti,lock;
+       };
+
+       dpll5_m2_ck: dpll5_m2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll5_ck>;
+               ti,max-div = <31>;
+               reg = <0x0d50>;
+               ti,index-starts-at-one;
+       };
+
+       sgx_gate_fck: sgx_gate_fck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-gate-clock";
+               clocks = <&core_ck>;
+               ti,bit-shift = <1>;
+               reg = <0x0b00>;
+       };
+
+       core_d3_ck: core_d3_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&core_ck>;
+               clock-mult = <1>;
+               clock-div = <3>;
+       };
+
+       core_d4_ck: core_d4_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&core_ck>;
+               clock-mult = <1>;
+               clock-div = <4>;
+       };
+
+       core_d6_ck: core_d6_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&core_ck>;
+               clock-mult = <1>;
+               clock-div = <6>;
+       };
+
+       omap_192m_alwon_fck: omap_192m_alwon_fck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll4_m2x2_ck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       core_d2_ck: core_d2_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&core_ck>;
+               clock-mult = <1>;
+               clock-div = <2>;
+       };
+
+       sgx_mux_fck: sgx_mux_fck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-mux-clock";
+               clocks = <&core_d3_ck>, <&core_d4_ck>, <&core_d6_ck>, <&cm_96m_fck>, <&omap_192m_alwon_fck>, <&core_d2_ck>, <&corex2_d3_fck>, <&corex2_d5_fck>;
+               reg = <0x0b40>;
+       };
+
+       sgx_fck: sgx_fck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-clock";
+               clocks = <&sgx_gate_fck>, <&sgx_mux_fck>;
+       };
+
+       sgx_ick: sgx_ick {
+               #clock-cells = <0>;
+               compatible = "ti,wait-gate-clock";
+               clocks = <&l3_ick>;
+               reg = <0x0b10>;
+               ti,bit-shift = <0>;
+       };
+
+       cpefuse_fck: cpefuse_fck {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&sys_ck>;
+               reg = <0x0a08>;
+               ti,bit-shift = <0>;
+       };
+
+       ts_fck: ts_fck {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&omap_32k_fck>;
+               reg = <0x0a08>;
+               ti,bit-shift = <1>;
+       };
+
+       usbtll_fck: usbtll_fck {
+               #clock-cells = <0>;
+               compatible = "ti,wait-gate-clock";
+               clocks = <&dpll5_m2_ck>;
+               reg = <0x0a08>;
+               ti,bit-shift = <2>;
+       };
+
+       usbtll_ick: usbtll_ick {
+               #clock-cells = <0>;
+               compatible = "ti,omap3-interface-clock";
+               clocks = <&core_l4_ick>;
+               reg = <0x0a18>;
+               ti,bit-shift = <2>;
+       };
+
+       mmchs3_ick: mmchs3_ick {
+               #clock-cells = <0>;
+               compatible = "ti,omap3-interface-clock";
+               clocks = <&core_l4_ick>;
+               reg = <0x0a10>;
+               ti,bit-shift = <30>;
+       };
+
+       mmchs3_fck: mmchs3_fck {
+               #clock-cells = <0>;
+               compatible = "ti,wait-gate-clock";
+               clocks = <&core_96m_fck>;
+               reg = <0x0a00>;
+               ti,bit-shift = <30>;
+       };
+
+       dss1_alwon_fck_3430es2: dss1_alwon_fck_3430es2 {
+               #clock-cells = <0>;
+               compatible = "ti,dss-gate-clock";
+               clocks = <&dpll4_m4x2_ck>;
+               ti,bit-shift = <0>;
+               reg = <0x0e00>;
+               ti,set-rate-parent;
+       };
+
+       dss_ick_3430es2: dss_ick_3430es2 {
+               #clock-cells = <0>;
+               compatible = "ti,omap3-dss-interface-clock";
+               clocks = <&l4_ick>;
+               reg = <0x0e10>;
+               ti,bit-shift = <0>;
+       };
+
+       usbhost_120m_fck: usbhost_120m_fck {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&dpll5_m2_ck>;
+               reg = <0x1400>;
+               ti,bit-shift = <1>;
+       };
+
+       usbhost_48m_fck: usbhost_48m_fck {
+               #clock-cells = <0>;
+               compatible = "ti,dss-gate-clock";
+               clocks = <&omap_48m_fck>;
+               reg = <0x1400>;
+               ti,bit-shift = <0>;
+       };
+
+       usbhost_ick: usbhost_ick {
+               #clock-cells = <0>;
+               compatible = "ti,omap3-dss-interface-clock";
+               clocks = <&l4_ick>;
+               reg = <0x1410>;
+               ti,bit-shift = <0>;
+       };
+};
+
+&cm_clockdomains {
+       dpll5_clkdm: dpll5_clkdm {
+               compatible = "ti,clockdomain";
+               clocks = <&dpll5_ck>;
+       };
+
+       sgx_clkdm: sgx_clkdm {
+               compatible = "ti,clockdomain";
+               clocks = <&sgx_ick>;
+       };
+
+       dss_clkdm: dss_clkdm {
+               compatible = "ti,clockdomain";
+               clocks = <&dss_tv_fck>, <&dss_96m_fck>, <&dss2_alwon_fck>,
+                        <&dss1_alwon_fck_3430es2>, <&dss_ick_3430es2>;
+       };
+
+       core_l4_clkdm: core_l4_clkdm {
+               compatible = "ti,clockdomain";
+               clocks = <&mmchs2_fck>, <&mmchs1_fck>, <&i2c3_fck>, <&i2c2_fck>,
+                        <&i2c1_fck>, <&mcspi4_fck>, <&mcspi3_fck>,
+                        <&mcspi2_fck>, <&mcspi1_fck>, <&uart2_fck>,
+                        <&uart1_fck>, <&hdq_fck>, <&mmchs2_ick>, <&mmchs1_ick>,
+                        <&hdq_ick>, <&mcspi4_ick>, <&mcspi3_ick>,
+                        <&mcspi2_ick>, <&mcspi1_ick>, <&i2c3_ick>, <&i2c2_ick>,
+                        <&i2c1_ick>, <&uart2_ick>, <&uart1_ick>, <&gpt11_ick>,
+                        <&gpt10_ick>, <&mcbsp5_ick>, <&mcbsp1_ick>,
+                        <&omapctrl_ick>, <&aes2_ick>, <&sha12_ick>,
+                        <&cpefuse_fck>, <&ts_fck>, <&usbtll_fck>,
+                        <&usbtll_ick>, <&mmchs3_ick>, <&mmchs3_fck>;
+       };
+
+       usbhost_clkdm: usbhost_clkdm {
+               compatible = "ti,clockdomain";
+               clocks = <&usbhost_120m_fck>, <&usbhost_48m_fck>,
+                        <&usbhost_ick>;
+       };
+};
diff --git a/arch/arm/boot/dts/omap36xx-clocks.dtsi b/arch/arm/boot/dts/omap36xx-clocks.dtsi
new file mode 100644 (file)
index 0000000..2fcf253
--- /dev/null
@@ -0,0 +1,90 @@
+/*
+ * Device Tree Source for OMAP36xx clock data
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+&cm_clocks {
+       dpll4_ck: dpll4_ck {
+               #clock-cells = <0>;
+               compatible = "ti,omap3-dpll-per-j-type-clock";
+               clocks = <&sys_ck>, <&sys_ck>;
+               reg = <0x0d00>, <0x0d20>, <0x0d44>, <0x0d30>;
+       };
+
+       dpll4_m5x2_ck: dpll4_m5x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,hsdiv-gate-clock";
+               clocks = <&dpll4_m5x2_mul_ck>;
+               ti,bit-shift = <0x1e>;
+               reg = <0x0d00>;
+               ti,set-rate-parent;
+               ti,set-bit-to-disable;
+       };
+
+       dpll4_m2x2_ck: dpll4_m2x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,hsdiv-gate-clock";
+               clocks = <&dpll4_m2x2_mul_ck>;
+               ti,bit-shift = <0x1b>;
+               reg = <0x0d00>;
+               ti,set-bit-to-disable;
+       };
+
+       dpll3_m3x2_ck: dpll3_m3x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,hsdiv-gate-clock";
+               clocks = <&dpll3_m3x2_mul_ck>;
+               ti,bit-shift = <0xc>;
+               reg = <0x0d00>;
+               ti,set-bit-to-disable;
+       };
+
+       dpll4_m3x2_ck: dpll4_m3x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,hsdiv-gate-clock";
+               clocks = <&dpll4_m3x2_mul_ck>;
+               ti,bit-shift = <0x1c>;
+               reg = <0x0d00>;
+               ti,set-bit-to-disable;
+       };
+
+       dpll4_m6x2_ck: dpll4_m6x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,hsdiv-gate-clock";
+               clocks = <&dpll4_m6x2_mul_ck>;
+               ti,bit-shift = <0x1f>;
+               reg = <0x0d00>;
+               ti,set-bit-to-disable;
+       };
+
+       uart4_fck: uart4_fck {
+               #clock-cells = <0>;
+               compatible = "ti,wait-gate-clock";
+               clocks = <&per_48m_fck>;
+               reg = <0x1000>;
+               ti,bit-shift = <18>;
+       };
+};
+
+&cm_clockdomains {
+       dpll4_clkdm: dpll4_clkdm {
+               compatible = "ti,clockdomain";
+               clocks = <&dpll4_ck>;
+       };
+
+       per_clkdm: per_clkdm {
+               compatible = "ti,clockdomain";
+               clocks = <&uart3_fck>, <&gpio6_dbck>, <&gpio5_dbck>,
+                        <&gpio4_dbck>, <&gpio3_dbck>, <&gpio2_dbck>,
+                        <&wdt3_fck>, <&gpio6_ick>, <&gpio5_ick>, <&gpio4_ick>,
+                        <&gpio3_ick>, <&gpio2_ick>, <&wdt3_ick>, <&uart3_ick>,
+                        <&uart4_ick>, <&gpt9_ick>, <&gpt8_ick>, <&gpt7_ick>,
+                        <&gpt6_ick>, <&gpt5_ick>, <&gpt4_ick>, <&gpt3_ick>,
+                        <&gpt2_ick>, <&mcbsp2_ick>, <&mcbsp3_ick>,
+                        <&mcbsp4_ick>, <&uart4_fck>;
+       };
+};
diff --git a/arch/arm/boot/dts/omap36xx-omap3430es2plus-clocks.dtsi b/arch/arm/boot/dts/omap36xx-omap3430es2plus-clocks.dtsi
new file mode 100644 (file)
index 0000000..8ed475d
--- /dev/null
@@ -0,0 +1,198 @@
+/*
+ * Device Tree Source for OMAP34xx/OMAP36xx clock data
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+&cm_clocks {
+       ssi_ssr_gate_fck_3430es2: ssi_ssr_gate_fck_3430es2 {
+               #clock-cells = <0>;
+               compatible = "ti,composite-no-wait-gate-clock";
+               clocks = <&corex2_fck>;
+               ti,bit-shift = <0>;
+               reg = <0x0a00>;
+       };
+
+       ssi_ssr_div_fck_3430es2: ssi_ssr_div_fck_3430es2 {
+               #clock-cells = <0>;
+               compatible = "ti,composite-divider-clock";
+               clocks = <&corex2_fck>;
+               ti,bit-shift = <8>;
+               reg = <0x0a40>;
+               ti,dividers = <0>, <1>, <2>, <3>, <4>, <0>, <6>, <0>, <8>;
+       };
+
+       ssi_ssr_fck_3430es2: ssi_ssr_fck_3430es2 {
+               #clock-cells = <0>;
+               compatible = "ti,composite-clock";
+               clocks = <&ssi_ssr_gate_fck_3430es2>, <&ssi_ssr_div_fck_3430es2>;
+       };
+
+       ssi_sst_fck_3430es2: ssi_sst_fck_3430es2 {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&ssi_ssr_fck_3430es2>;
+               clock-mult = <1>;
+               clock-div = <2>;
+       };
+
+       hsotgusb_ick_3430es2: hsotgusb_ick_3430es2 {
+               #clock-cells = <0>;
+               compatible = "ti,omap3-hsotgusb-interface-clock";
+               clocks = <&core_l3_ick>;
+               reg = <0x0a10>;
+               ti,bit-shift = <4>;
+       };
+
+       ssi_l4_ick: ssi_l4_ick {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&l4_ick>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       ssi_ick_3430es2: ssi_ick_3430es2 {
+               #clock-cells = <0>;
+               compatible = "ti,omap3-ssi-interface-clock";
+               clocks = <&ssi_l4_ick>;
+               reg = <0x0a10>;
+               ti,bit-shift = <0>;
+       };
+
+       usim_gate_fck: usim_gate_fck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-gate-clock";
+               clocks = <&omap_96m_fck>;
+               ti,bit-shift = <9>;
+               reg = <0x0c00>;
+       };
+
+       sys_d2_ck: sys_d2_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&sys_ck>;
+               clock-mult = <1>;
+               clock-div = <2>;
+       };
+
+       omap_96m_d2_fck: omap_96m_d2_fck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&omap_96m_fck>;
+               clock-mult = <1>;
+               clock-div = <2>;
+       };
+
+       omap_96m_d4_fck: omap_96m_d4_fck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&omap_96m_fck>;
+               clock-mult = <1>;
+               clock-div = <4>;
+       };
+
+       omap_96m_d8_fck: omap_96m_d8_fck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&omap_96m_fck>;
+               clock-mult = <1>;
+               clock-div = <8>;
+       };
+
+       omap_96m_d10_fck: omap_96m_d10_fck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&omap_96m_fck>;
+               clock-mult = <1>;
+               clock-div = <10>;
+       };
+
+       dpll5_m2_d4_ck: dpll5_m2_d4_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll5_m2_ck>;
+               clock-mult = <1>;
+               clock-div = <4>;
+       };
+
+       dpll5_m2_d8_ck: dpll5_m2_d8_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll5_m2_ck>;
+               clock-mult = <1>;
+               clock-div = <8>;
+       };
+
+       dpll5_m2_d16_ck: dpll5_m2_d16_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll5_m2_ck>;
+               clock-mult = <1>;
+               clock-div = <16>;
+       };
+
+       dpll5_m2_d20_ck: dpll5_m2_d20_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll5_m2_ck>;
+               clock-mult = <1>;
+               clock-div = <20>;
+       };
+
+       usim_mux_fck: usim_mux_fck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-mux-clock";
+               clocks = <&sys_ck>, <&sys_d2_ck>, <&omap_96m_d2_fck>, <&omap_96m_d4_fck>, <&omap_96m_d8_fck>, <&omap_96m_d10_fck>, <&dpll5_m2_d4_ck>, <&dpll5_m2_d8_ck>, <&dpll5_m2_d16_ck>, <&dpll5_m2_d20_ck>;
+               ti,bit-shift = <3>;
+               reg = <0x0c40>;
+               ti,index-starts-at-one;
+       };
+
+       usim_fck: usim_fck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-clock";
+               clocks = <&usim_gate_fck>, <&usim_mux_fck>;
+       };
+
+       usim_ick: usim_ick {
+               #clock-cells = <0>;
+               compatible = "ti,omap3-interface-clock";
+               clocks = <&wkup_l4_ick>;
+               reg = <0x0c10>;
+               ti,bit-shift = <9>;
+       };
+};
+
+&cm_clockdomains {
+       core_l3_clkdm: core_l3_clkdm {
+               compatible = "ti,clockdomain";
+               clocks = <&sdrc_ick>, <&hsotgusb_ick_3430es2>;
+       };
+
+       wkup_clkdm: wkup_clkdm {
+               compatible = "ti,clockdomain";
+               clocks = <&gpio1_dbck>, <&wdt2_fck>, <&wdt2_ick>, <&wdt1_ick>,
+                        <&gpio1_ick>, <&omap_32ksync_ick>, <&gpt12_ick>,
+                        <&gpt1_ick>, <&usim_ick>;
+       };
+
+       core_l4_clkdm: core_l4_clkdm {
+               compatible = "ti,clockdomain";
+               clocks = <&cpefuse_fck>, <&ts_fck>, <&usbtll_fck>,
+                        <&usbtll_ick>, <&mmchs3_ick>, <&mmchs3_fck>,
+                        <&mmchs2_fck>, <&mmchs1_fck>, <&i2c3_fck>, <&i2c2_fck>,
+                        <&i2c1_fck>, <&mcspi4_fck>, <&mcspi3_fck>,
+                        <&mcspi2_fck>, <&mcspi1_fck>, <&uart2_fck>,
+                        <&uart1_fck>, <&hdq_fck>, <&mmchs2_ick>, <&mmchs1_ick>,
+                        <&hdq_ick>, <&mcspi4_ick>, <&mcspi3_ick>,
+                        <&mcspi2_ick>, <&mcspi1_ick>, <&i2c3_ick>, <&i2c2_ick>,
+                        <&i2c1_ick>, <&uart2_ick>, <&uart1_ick>, <&gpt11_ick>,
+                        <&gpt10_ick>, <&mcbsp5_ick>, <&mcbsp1_ick>,
+                        <&omapctrl_ick>, <&aes2_ick>, <&sha12_ick>,
+                        <&ssi_ick_3430es2>;
+       };
+};
index b7c7bd96c4041e70752e64c7f70c16c4d7ff4456..7e8dee9175d6a4d1d796a5a9389b4a9c3300125d 100644 (file)
@@ -51,3 +51,8 @@
                };
        };
 };
+
+/include/ "omap36xx-clocks.dtsi"
+/include/ "omap34xx-omap36xx-clocks.dtsi"
+/include/ "omap36xx-omap3430es2plus-clocks.dtsi"
+/include/ "omap36xx-am35xx-omap3430es2plus-clocks.dtsi"
diff --git a/arch/arm/boot/dts/omap3xxx-clocks.dtsi b/arch/arm/boot/dts/omap3xxx-clocks.dtsi
new file mode 100644 (file)
index 0000000..cb04d4b
--- /dev/null
@@ -0,0 +1,1660 @@
+/*
+ * Device Tree Source for OMAP3 clock data
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+&prm_clocks {
+       virt_16_8m_ck: virt_16_8m_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <16800000>;
+       };
+
+       osc_sys_ck: osc_sys_ck {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&virt_12m_ck>, <&virt_13m_ck>, <&virt_19200000_ck>, <&virt_26000000_ck>, <&virt_38_4m_ck>, <&virt_16_8m_ck>;
+               reg = <0x0d40>;
+       };
+
+       sys_ck: sys_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&osc_sys_ck>;
+               ti,bit-shift = <6>;
+               ti,max-div = <3>;
+               reg = <0x1270>;
+               ti,index-starts-at-one;
+       };
+
+       sys_clkout1: sys_clkout1 {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&osc_sys_ck>;
+               reg = <0x0d70>;
+               ti,bit-shift = <7>;
+       };
+
+       dpll3_x2_ck: dpll3_x2_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll3_ck>;
+               clock-mult = <2>;
+               clock-div = <1>;
+       };
+
+       dpll3_m2x2_ck: dpll3_m2x2_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll3_m2_ck>;
+               clock-mult = <2>;
+               clock-div = <1>;
+       };
+
+       dpll4_x2_ck: dpll4_x2_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll4_ck>;
+               clock-mult = <2>;
+               clock-div = <1>;
+       };
+
+       corex2_fck: corex2_fck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll3_m2x2_ck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       wkup_l4_ick: wkup_l4_ick {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&sys_ck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+};
+&scrm_clocks {
+       mcbsp5_mux_fck: mcbsp5_mux_fck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-mux-clock";
+               clocks = <&core_96m_fck>, <&mcbsp_clks>;
+               ti,bit-shift = <4>;
+               reg = <0x02d8>;
+       };
+
+       mcbsp5_fck: mcbsp5_fck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-clock";
+               clocks = <&mcbsp5_gate_fck>, <&mcbsp5_mux_fck>;
+       };
+
+       mcbsp1_mux_fck: mcbsp1_mux_fck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-mux-clock";
+               clocks = <&core_96m_fck>, <&mcbsp_clks>;
+               ti,bit-shift = <2>;
+               reg = <0x0274>;
+       };
+
+       mcbsp1_fck: mcbsp1_fck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-clock";
+               clocks = <&mcbsp1_gate_fck>, <&mcbsp1_mux_fck>;
+       };
+
+       mcbsp2_mux_fck: mcbsp2_mux_fck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-mux-clock";
+               clocks = <&per_96m_fck>, <&mcbsp_clks>;
+               ti,bit-shift = <6>;
+               reg = <0x0274>;
+       };
+
+       mcbsp2_fck: mcbsp2_fck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-clock";
+               clocks = <&mcbsp2_gate_fck>, <&mcbsp2_mux_fck>;
+       };
+
+       mcbsp3_mux_fck: mcbsp3_mux_fck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-mux-clock";
+               clocks = <&per_96m_fck>, <&mcbsp_clks>;
+               reg = <0x02d8>;
+       };
+
+       mcbsp3_fck: mcbsp3_fck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-clock";
+               clocks = <&mcbsp3_gate_fck>, <&mcbsp3_mux_fck>;
+       };
+
+       mcbsp4_mux_fck: mcbsp4_mux_fck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-mux-clock";
+               clocks = <&per_96m_fck>, <&mcbsp_clks>;
+               ti,bit-shift = <2>;
+               reg = <0x02d8>;
+       };
+
+       mcbsp4_fck: mcbsp4_fck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-clock";
+               clocks = <&mcbsp4_gate_fck>, <&mcbsp4_mux_fck>;
+       };
+};
+&cm_clocks {
+       dummy_apb_pclk: dummy_apb_pclk {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <0x0>;
+       };
+
+       omap_32k_fck: omap_32k_fck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <32768>;
+       };
+
+       virt_12m_ck: virt_12m_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <12000000>;
+       };
+
+       virt_13m_ck: virt_13m_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <13000000>;
+       };
+
+       virt_19200000_ck: virt_19200000_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <19200000>;
+       };
+
+       virt_26000000_ck: virt_26000000_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <26000000>;
+       };
+
+       virt_38_4m_ck: virt_38_4m_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <38400000>;
+       };
+
+       dpll4_ck: dpll4_ck {
+               #clock-cells = <0>;
+               compatible = "ti,omap3-dpll-per-clock";
+               clocks = <&sys_ck>, <&sys_ck>;
+               reg = <0x0d00>, <0x0d20>, <0x0d44>, <0x0d30>;
+       };
+
+       dpll4_m2_ck: dpll4_m2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll4_ck>;
+               ti,max-div = <63>;
+               reg = <0x0d48>;
+               ti,index-starts-at-one;
+       };
+
+       dpll4_m2x2_mul_ck: dpll4_m2x2_mul_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll4_m2_ck>;
+               clock-mult = <2>;
+               clock-div = <1>;
+       };
+
+       dpll4_m2x2_ck: dpll4_m2x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&dpll4_m2x2_mul_ck>;
+               ti,bit-shift = <0x1b>;
+               reg = <0x0d00>;
+               ti,set-bit-to-disable;
+       };
+
+       omap_96m_alwon_fck: omap_96m_alwon_fck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll4_m2x2_ck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       dpll3_ck: dpll3_ck {
+               #clock-cells = <0>;
+               compatible = "ti,omap3-dpll-core-clock";
+               clocks = <&sys_ck>, <&sys_ck>;
+               reg = <0x0d00>, <0x0d20>, <0x0d40>, <0x0d30>;
+       };
+
+       dpll3_m3_ck: dpll3_m3_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll3_ck>;
+               ti,bit-shift = <16>;
+               ti,max-div = <31>;
+               reg = <0x1140>;
+               ti,index-starts-at-one;
+       };
+
+       dpll3_m3x2_mul_ck: dpll3_m3x2_mul_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll3_m3_ck>;
+               clock-mult = <2>;
+               clock-div = <1>;
+       };
+
+       dpll3_m3x2_ck: dpll3_m3x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&dpll3_m3x2_mul_ck>;
+               ti,bit-shift = <0xc>;
+               reg = <0x0d00>;
+               ti,set-bit-to-disable;
+       };
+
+       emu_core_alwon_ck: emu_core_alwon_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll3_m3x2_ck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       sys_altclk: sys_altclk {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <0x0>;
+       };
+
+       mcbsp_clks: mcbsp_clks {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <0x0>;
+       };
+
+       dpll3_m2_ck: dpll3_m2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll3_ck>;
+               ti,bit-shift = <27>;
+               ti,max-div = <31>;
+               reg = <0x0d40>;
+               ti,index-starts-at-one;
+       };
+
+       core_ck: core_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll3_m2_ck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       dpll1_fck: dpll1_fck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&core_ck>;
+               ti,bit-shift = <19>;
+               ti,max-div = <7>;
+               reg = <0x0940>;
+               ti,index-starts-at-one;
+       };
+
+       dpll1_ck: dpll1_ck {
+               #clock-cells = <0>;
+               compatible = "ti,omap3-dpll-clock";
+               clocks = <&sys_ck>, <&dpll1_fck>;
+               reg = <0x0904>, <0x0924>, <0x0940>, <0x0934>;
+       };
+
+       dpll1_x2_ck: dpll1_x2_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll1_ck>;
+               clock-mult = <2>;
+               clock-div = <1>;
+       };
+
+       dpll1_x2m2_ck: dpll1_x2m2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll1_x2_ck>;
+               ti,max-div = <31>;
+               reg = <0x0944>;
+               ti,index-starts-at-one;
+       };
+
+       cm_96m_fck: cm_96m_fck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&omap_96m_alwon_fck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       omap_96m_fck: omap_96m_fck {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&cm_96m_fck>, <&sys_ck>;
+               ti,bit-shift = <6>;
+               reg = <0x0d40>;
+       };
+
+       dpll4_m3_ck: dpll4_m3_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll4_ck>;
+               ti,bit-shift = <8>;
+               ti,max-div = <32>;
+               reg = <0x0e40>;
+               ti,index-starts-at-one;
+       };
+
+       dpll4_m3x2_mul_ck: dpll4_m3x2_mul_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll4_m3_ck>;
+               clock-mult = <2>;
+               clock-div = <1>;
+       };
+
+       dpll4_m3x2_ck: dpll4_m3x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&dpll4_m3x2_mul_ck>;
+               ti,bit-shift = <0x1c>;
+               reg = <0x0d00>;
+               ti,set-bit-to-disable;
+       };
+
+       omap_54m_fck: omap_54m_fck {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&dpll4_m3x2_ck>, <&sys_altclk>;
+               ti,bit-shift = <5>;
+               reg = <0x0d40>;
+       };
+
+       cm_96m_d2_fck: cm_96m_d2_fck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&cm_96m_fck>;
+               clock-mult = <1>;
+               clock-div = <2>;
+       };
+
+       omap_48m_fck: omap_48m_fck {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&cm_96m_d2_fck>, <&sys_altclk>;
+               ti,bit-shift = <3>;
+               reg = <0x0d40>;
+       };
+
+       omap_12m_fck: omap_12m_fck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&omap_48m_fck>;
+               clock-mult = <1>;
+               clock-div = <4>;
+       };
+
+       dpll4_m4_ck: dpll4_m4_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll4_ck>;
+               ti,max-div = <32>;
+               reg = <0x0e40>;
+               ti,index-starts-at-one;
+       };
+
+       dpll4_m4x2_mul_ck: dpll4_m4x2_mul_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll4_m4_ck>;
+               clock-mult = <2>;
+               clock-div = <1>;
+       };
+
+       dpll4_m4x2_ck: dpll4_m4x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&dpll4_m4x2_mul_ck>;
+               ti,bit-shift = <0x1d>;
+               reg = <0x0d00>;
+               ti,set-bit-to-disable;
+       };
+
+       dpll4_m5_ck: dpll4_m5_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll4_ck>;
+               ti,max-div = <63>;
+               reg = <0x0f40>;
+               ti,index-starts-at-one;
+       };
+
+       dpll4_m5x2_mul_ck: dpll4_m5x2_mul_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll4_m5_ck>;
+               clock-mult = <2>;
+               clock-div = <1>;
+       };
+
+       dpll4_m5x2_ck: dpll4_m5x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&dpll4_m5x2_mul_ck>;
+               ti,bit-shift = <0x1e>;
+               reg = <0x0d00>;
+               ti,set-bit-to-disable;
+       };
+
+       dpll4_m6_ck: dpll4_m6_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll4_ck>;
+               ti,bit-shift = <24>;
+               ti,max-div = <63>;
+               reg = <0x1140>;
+               ti,index-starts-at-one;
+       };
+
+       dpll4_m6x2_mul_ck: dpll4_m6x2_mul_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll4_m6_ck>;
+               clock-mult = <2>;
+               clock-div = <1>;
+       };
+
+       dpll4_m6x2_ck: dpll4_m6x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&dpll4_m6x2_mul_ck>;
+               ti,bit-shift = <0x1f>;
+               reg = <0x0d00>;
+               ti,set-bit-to-disable;
+       };
+
+       emu_per_alwon_ck: emu_per_alwon_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll4_m6x2_ck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       clkout2_src_gate_ck: clkout2_src_gate_ck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-no-wait-gate-clock";
+               clocks = <&core_ck>;
+               ti,bit-shift = <7>;
+               reg = <0x0d70>;
+       };
+
+       clkout2_src_mux_ck: clkout2_src_mux_ck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-mux-clock";
+               clocks = <&core_ck>, <&sys_ck>, <&cm_96m_fck>, <&omap_54m_fck>;
+               reg = <0x0d70>;
+       };
+
+       clkout2_src_ck: clkout2_src_ck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-clock";
+               clocks = <&clkout2_src_gate_ck>, <&clkout2_src_mux_ck>;
+       };
+
+       sys_clkout2: sys_clkout2 {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&clkout2_src_ck>;
+               ti,bit-shift = <3>;
+               ti,max-div = <64>;
+               reg = <0x0d70>;
+               ti,index-power-of-two;
+       };
+
+       mpu_ck: mpu_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll1_x2m2_ck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       arm_fck: arm_fck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&mpu_ck>;
+               reg = <0x0924>;
+               ti,max-div = <2>;
+       };
+
+       emu_mpu_alwon_ck: emu_mpu_alwon_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&mpu_ck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       l3_ick: l3_ick {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&core_ck>;
+               ti,max-div = <3>;
+               reg = <0x0a40>;
+               ti,index-starts-at-one;
+       };
+
+       l4_ick: l4_ick {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&l3_ick>;
+               ti,bit-shift = <2>;
+               ti,max-div = <3>;
+               reg = <0x0a40>;
+               ti,index-starts-at-one;
+       };
+
+       rm_ick: rm_ick {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&l4_ick>;
+               ti,bit-shift = <1>;
+               ti,max-div = <3>;
+               reg = <0x0c40>;
+               ti,index-starts-at-one;
+       };
+
+       gpt10_gate_fck: gpt10_gate_fck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-gate-clock";
+               clocks = <&sys_ck>;
+               ti,bit-shift = <11>;
+               reg = <0x0a00>;
+       };
+
+       gpt10_mux_fck: gpt10_mux_fck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-mux-clock";
+               clocks = <&omap_32k_fck>, <&sys_ck>;
+               ti,bit-shift = <6>;
+               reg = <0x0a40>;
+       };
+
+       gpt10_fck: gpt10_fck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-clock";
+               clocks = <&gpt10_gate_fck>, <&gpt10_mux_fck>;
+       };
+
+       gpt11_gate_fck: gpt11_gate_fck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-gate-clock";
+               clocks = <&sys_ck>;
+               ti,bit-shift = <12>;
+               reg = <0x0a00>;
+       };
+
+       gpt11_mux_fck: gpt11_mux_fck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-mux-clock";
+               clocks = <&omap_32k_fck>, <&sys_ck>;
+               ti,bit-shift = <7>;
+               reg = <0x0a40>;
+       };
+
+       gpt11_fck: gpt11_fck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-clock";
+               clocks = <&gpt11_gate_fck>, <&gpt11_mux_fck>;
+       };
+
+       core_96m_fck: core_96m_fck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&omap_96m_fck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       mmchs2_fck: mmchs2_fck {
+               #clock-cells = <0>;
+               compatible = "ti,wait-gate-clock";
+               clocks = <&core_96m_fck>;
+               reg = <0x0a00>;
+               ti,bit-shift = <25>;
+       };
+
+       mmchs1_fck: mmchs1_fck {
+               #clock-cells = <0>;
+               compatible = "ti,wait-gate-clock";
+               clocks = <&core_96m_fck>;
+               reg = <0x0a00>;
+               ti,bit-shift = <24>;
+       };
+
+       i2c3_fck: i2c3_fck {
+               #clock-cells = <0>;
+               compatible = "ti,wait-gate-clock";
+               clocks = <&core_96m_fck>;
+               reg = <0x0a00>;
+               ti,bit-shift = <17>;
+       };
+
+       i2c2_fck: i2c2_fck {
+               #clock-cells = <0>;
+               compatible = "ti,wait-gate-clock";
+               clocks = <&core_96m_fck>;
+               reg = <0x0a00>;
+               ti,bit-shift = <16>;
+       };
+
+       i2c1_fck: i2c1_fck {
+               #clock-cells = <0>;
+               compatible = "ti,wait-gate-clock";
+               clocks = <&core_96m_fck>;
+               reg = <0x0a00>;
+               ti,bit-shift = <15>;
+       };
+
+       mcbsp5_gate_fck: mcbsp5_gate_fck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-gate-clock";
+               clocks = <&mcbsp_clks>;
+               ti,bit-shift = <10>;
+               reg = <0x0a00>;
+       };
+
+       mcbsp1_gate_fck: mcbsp1_gate_fck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-gate-clock";
+               clocks = <&mcbsp_clks>;
+               ti,bit-shift = <9>;
+               reg = <0x0a00>;
+       };
+
+       core_48m_fck: core_48m_fck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&omap_48m_fck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       mcspi4_fck: mcspi4_fck {
+               #clock-cells = <0>;
+               compatible = "ti,wait-gate-clock";
+               clocks = <&core_48m_fck>;
+               reg = <0x0a00>;
+               ti,bit-shift = <21>;
+       };
+
+       mcspi3_fck: mcspi3_fck {
+               #clock-cells = <0>;
+               compatible = "ti,wait-gate-clock";
+               clocks = <&core_48m_fck>;
+               reg = <0x0a00>;
+               ti,bit-shift = <20>;
+       };
+
+       mcspi2_fck: mcspi2_fck {
+               #clock-cells = <0>;
+               compatible = "ti,wait-gate-clock";
+               clocks = <&core_48m_fck>;
+               reg = <0x0a00>;
+               ti,bit-shift = <19>;
+       };
+
+       mcspi1_fck: mcspi1_fck {
+               #clock-cells = <0>;
+               compatible = "ti,wait-gate-clock";
+               clocks = <&core_48m_fck>;
+               reg = <0x0a00>;
+               ti,bit-shift = <18>;
+       };
+
+       uart2_fck: uart2_fck {
+               #clock-cells = <0>;
+               compatible = "ti,wait-gate-clock";
+               clocks = <&core_48m_fck>;
+               reg = <0x0a00>;
+               ti,bit-shift = <14>;
+       };
+
+       uart1_fck: uart1_fck {
+               #clock-cells = <0>;
+               compatible = "ti,wait-gate-clock";
+               clocks = <&core_48m_fck>;
+               reg = <0x0a00>;
+               ti,bit-shift = <13>;
+       };
+
+       core_12m_fck: core_12m_fck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&omap_12m_fck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       hdq_fck: hdq_fck {
+               #clock-cells = <0>;
+               compatible = "ti,wait-gate-clock";
+               clocks = <&core_12m_fck>;
+               reg = <0x0a00>;
+               ti,bit-shift = <22>;
+       };
+
+       core_l3_ick: core_l3_ick {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&l3_ick>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       sdrc_ick: sdrc_ick {
+               #clock-cells = <0>;
+               compatible = "ti,wait-gate-clock";
+               clocks = <&core_l3_ick>;
+               reg = <0x0a10>;
+               ti,bit-shift = <1>;
+       };
+
+       gpmc_fck: gpmc_fck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&core_l3_ick>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       core_l4_ick: core_l4_ick {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&l4_ick>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       mmchs2_ick: mmchs2_ick {
+               #clock-cells = <0>;
+               compatible = "ti,omap3-interface-clock";
+               clocks = <&core_l4_ick>;
+               reg = <0x0a10>;
+               ti,bit-shift = <25>;
+       };
+
+       mmchs1_ick: mmchs1_ick {
+               #clock-cells = <0>;
+               compatible = "ti,omap3-interface-clock";
+               clocks = <&core_l4_ick>;
+               reg = <0x0a10>;
+               ti,bit-shift = <24>;
+       };
+
+       hdq_ick: hdq_ick {
+               #clock-cells = <0>;
+               compatible = "ti,omap3-interface-clock";
+               clocks = <&core_l4_ick>;
+               reg = <0x0a10>;
+               ti,bit-shift = <22>;
+       };
+
+       mcspi4_ick: mcspi4_ick {
+               #clock-cells = <0>;
+               compatible = "ti,omap3-interface-clock";
+               clocks = <&core_l4_ick>;
+               reg = <0x0a10>;
+               ti,bit-shift = <21>;
+       };
+
+       mcspi3_ick: mcspi3_ick {
+               #clock-cells = <0>;
+               compatible = "ti,omap3-interface-clock";
+               clocks = <&core_l4_ick>;
+               reg = <0x0a10>;
+               ti,bit-shift = <20>;
+       };
+
+       mcspi2_ick: mcspi2_ick {
+               #clock-cells = <0>;
+               compatible = "ti,omap3-interface-clock";
+               clocks = <&core_l4_ick>;
+               reg = <0x0a10>;
+               ti,bit-shift = <19>;
+       };
+
+       mcspi1_ick: mcspi1_ick {
+               #clock-cells = <0>;
+               compatible = "ti,omap3-interface-clock";
+               clocks = <&core_l4_ick>;
+               reg = <0x0a10>;
+               ti,bit-shift = <18>;
+       };
+
+       i2c3_ick: i2c3_ick {
+               #clock-cells = <0>;
+               compatible = "ti,omap3-interface-clock";
+               clocks = <&core_l4_ick>;
+               reg = <0x0a10>;
+               ti,bit-shift = <17>;
+       };
+
+       i2c2_ick: i2c2_ick {
+               #clock-cells = <0>;
+               compatible = "ti,omap3-interface-clock";
+               clocks = <&core_l4_ick>;
+               reg = <0x0a10>;
+               ti,bit-shift = <16>;
+       };
+
+       i2c1_ick: i2c1_ick {
+               #clock-cells = <0>;
+               compatible = "ti,omap3-interface-clock";
+               clocks = <&core_l4_ick>;
+               reg = <0x0a10>;
+               ti,bit-shift = <15>;
+       };
+
+       uart2_ick: uart2_ick {
+               #clock-cells = <0>;
+               compatible = "ti,omap3-interface-clock";
+               clocks = <&core_l4_ick>;
+               reg = <0x0a10>;
+               ti,bit-shift = <14>;
+       };
+
+       uart1_ick: uart1_ick {
+               #clock-cells = <0>;
+               compatible = "ti,omap3-interface-clock";
+               clocks = <&core_l4_ick>;
+               reg = <0x0a10>;
+               ti,bit-shift = <13>;
+       };
+
+       gpt11_ick: gpt11_ick {
+               #clock-cells = <0>;
+               compatible = "ti,omap3-interface-clock";
+               clocks = <&core_l4_ick>;
+               reg = <0x0a10>;
+               ti,bit-shift = <12>;
+       };
+
+       gpt10_ick: gpt10_ick {
+               #clock-cells = <0>;
+               compatible = "ti,omap3-interface-clock";
+               clocks = <&core_l4_ick>;
+               reg = <0x0a10>;
+               ti,bit-shift = <11>;
+       };
+
+       mcbsp5_ick: mcbsp5_ick {
+               #clock-cells = <0>;
+               compatible = "ti,omap3-interface-clock";
+               clocks = <&core_l4_ick>;
+               reg = <0x0a10>;
+               ti,bit-shift = <10>;
+       };
+
+       mcbsp1_ick: mcbsp1_ick {
+               #clock-cells = <0>;
+               compatible = "ti,omap3-interface-clock";
+               clocks = <&core_l4_ick>;
+               reg = <0x0a10>;
+               ti,bit-shift = <9>;
+       };
+
+       omapctrl_ick: omapctrl_ick {
+               #clock-cells = <0>;
+               compatible = "ti,omap3-interface-clock";
+               clocks = <&core_l4_ick>;
+               reg = <0x0a10>;
+               ti,bit-shift = <6>;
+       };
+
+       dss_tv_fck: dss_tv_fck {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&omap_54m_fck>;
+               reg = <0x0e00>;
+               ti,bit-shift = <2>;
+       };
+
+       dss_96m_fck: dss_96m_fck {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&omap_96m_fck>;
+               reg = <0x0e00>;
+               ti,bit-shift = <2>;
+       };
+
+       dss2_alwon_fck: dss2_alwon_fck {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&sys_ck>;
+               reg = <0x0e00>;
+               ti,bit-shift = <1>;
+       };
+
+       dummy_ck: dummy_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <0>;
+       };
+
+       gpt1_gate_fck: gpt1_gate_fck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-gate-clock";
+               clocks = <&sys_ck>;
+               ti,bit-shift = <0>;
+               reg = <0x0c00>;
+       };
+
+       gpt1_mux_fck: gpt1_mux_fck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-mux-clock";
+               clocks = <&omap_32k_fck>, <&sys_ck>;
+               reg = <0x0c40>;
+       };
+
+       gpt1_fck: gpt1_fck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-clock";
+               clocks = <&gpt1_gate_fck>, <&gpt1_mux_fck>;
+       };
+
+       aes2_ick: aes2_ick {
+               #clock-cells = <0>;
+               compatible = "ti,omap3-interface-clock";
+               clocks = <&core_l4_ick>;
+               ti,bit-shift = <28>;
+               reg = <0x0a10>;
+       };
+
+       wkup_32k_fck: wkup_32k_fck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&omap_32k_fck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       gpio1_dbck: gpio1_dbck {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&wkup_32k_fck>;
+               reg = <0x0c00>;
+               ti,bit-shift = <3>;
+       };
+
+       sha12_ick: sha12_ick {
+               #clock-cells = <0>;
+               compatible = "ti,omap3-interface-clock";
+               clocks = <&core_l4_ick>;
+               reg = <0x0a10>;
+               ti,bit-shift = <27>;
+       };
+
+       wdt2_fck: wdt2_fck {
+               #clock-cells = <0>;
+               compatible = "ti,wait-gate-clock";
+               clocks = <&wkup_32k_fck>;
+               reg = <0x0c00>;
+               ti,bit-shift = <5>;
+       };
+
+       wdt2_ick: wdt2_ick {
+               #clock-cells = <0>;
+               compatible = "ti,omap3-interface-clock";
+               clocks = <&wkup_l4_ick>;
+               reg = <0x0c10>;
+               ti,bit-shift = <5>;
+       };
+
+       wdt1_ick: wdt1_ick {
+               #clock-cells = <0>;
+               compatible = "ti,omap3-interface-clock";
+               clocks = <&wkup_l4_ick>;
+               reg = <0x0c10>;
+               ti,bit-shift = <4>;
+       };
+
+       gpio1_ick: gpio1_ick {
+               #clock-cells = <0>;
+               compatible = "ti,omap3-interface-clock";
+               clocks = <&wkup_l4_ick>;
+               reg = <0x0c10>;
+               ti,bit-shift = <3>;
+       };
+
+       omap_32ksync_ick: omap_32ksync_ick {
+               #clock-cells = <0>;
+               compatible = "ti,omap3-interface-clock";
+               clocks = <&wkup_l4_ick>;
+               reg = <0x0c10>;
+               ti,bit-shift = <2>;
+       };
+
+       gpt12_ick: gpt12_ick {
+               #clock-cells = <0>;
+               compatible = "ti,omap3-interface-clock";
+               clocks = <&wkup_l4_ick>;
+               reg = <0x0c10>;
+               ti,bit-shift = <1>;
+       };
+
+       gpt1_ick: gpt1_ick {
+               #clock-cells = <0>;
+               compatible = "ti,omap3-interface-clock";
+               clocks = <&wkup_l4_ick>;
+               reg = <0x0c10>;
+               ti,bit-shift = <0>;
+       };
+
+       per_96m_fck: per_96m_fck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&omap_96m_alwon_fck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       per_48m_fck: per_48m_fck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&omap_48m_fck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       uart3_fck: uart3_fck {
+               #clock-cells = <0>;
+               compatible = "ti,wait-gate-clock";
+               clocks = <&per_48m_fck>;
+               reg = <0x1000>;
+               ti,bit-shift = <11>;
+       };
+
+       gpt2_gate_fck: gpt2_gate_fck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-gate-clock";
+               clocks = <&sys_ck>;
+               ti,bit-shift = <3>;
+               reg = <0x1000>;
+       };
+
+       gpt2_mux_fck: gpt2_mux_fck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-mux-clock";
+               clocks = <&omap_32k_fck>, <&sys_ck>;
+               reg = <0x1040>;
+       };
+
+       gpt2_fck: gpt2_fck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-clock";
+               clocks = <&gpt2_gate_fck>, <&gpt2_mux_fck>;
+       };
+
+       gpt3_gate_fck: gpt3_gate_fck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-gate-clock";
+               clocks = <&sys_ck>;
+               ti,bit-shift = <4>;
+               reg = <0x1000>;
+       };
+
+       gpt3_mux_fck: gpt3_mux_fck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-mux-clock";
+               clocks = <&omap_32k_fck>, <&sys_ck>;
+               ti,bit-shift = <1>;
+               reg = <0x1040>;
+       };
+
+       gpt3_fck: gpt3_fck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-clock";
+               clocks = <&gpt3_gate_fck>, <&gpt3_mux_fck>;
+       };
+
+       gpt4_gate_fck: gpt4_gate_fck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-gate-clock";
+               clocks = <&sys_ck>;
+               ti,bit-shift = <5>;
+               reg = <0x1000>;
+       };
+
+       gpt4_mux_fck: gpt4_mux_fck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-mux-clock";
+               clocks = <&omap_32k_fck>, <&sys_ck>;
+               ti,bit-shift = <2>;
+               reg = <0x1040>;
+       };
+
+       gpt4_fck: gpt4_fck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-clock";
+               clocks = <&gpt4_gate_fck>, <&gpt4_mux_fck>;
+       };
+
+       gpt5_gate_fck: gpt5_gate_fck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-gate-clock";
+               clocks = <&sys_ck>;
+               ti,bit-shift = <6>;
+               reg = <0x1000>;
+       };
+
+       gpt5_mux_fck: gpt5_mux_fck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-mux-clock";
+               clocks = <&omap_32k_fck>, <&sys_ck>;
+               ti,bit-shift = <3>;
+               reg = <0x1040>;
+       };
+
+       gpt5_fck: gpt5_fck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-clock";
+               clocks = <&gpt5_gate_fck>, <&gpt5_mux_fck>;
+       };
+
+       gpt6_gate_fck: gpt6_gate_fck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-gate-clock";
+               clocks = <&sys_ck>;
+               ti,bit-shift = <7>;
+               reg = <0x1000>;
+       };
+
+       gpt6_mux_fck: gpt6_mux_fck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-mux-clock";
+               clocks = <&omap_32k_fck>, <&sys_ck>;
+               ti,bit-shift = <4>;
+               reg = <0x1040>;
+       };
+
+       gpt6_fck: gpt6_fck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-clock";
+               clocks = <&gpt6_gate_fck>, <&gpt6_mux_fck>;
+       };
+
+       gpt7_gate_fck: gpt7_gate_fck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-gate-clock";
+               clocks = <&sys_ck>;
+               ti,bit-shift = <8>;
+               reg = <0x1000>;
+       };
+
+       gpt7_mux_fck: gpt7_mux_fck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-mux-clock";
+               clocks = <&omap_32k_fck>, <&sys_ck>;
+               ti,bit-shift = <5>;
+               reg = <0x1040>;
+       };
+
+       gpt7_fck: gpt7_fck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-clock";
+               clocks = <&gpt7_gate_fck>, <&gpt7_mux_fck>;
+       };
+
+       gpt8_gate_fck: gpt8_gate_fck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-gate-clock";
+               clocks = <&sys_ck>;
+               ti,bit-shift = <9>;
+               reg = <0x1000>;
+       };
+
+       gpt8_mux_fck: gpt8_mux_fck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-mux-clock";
+               clocks = <&omap_32k_fck>, <&sys_ck>;
+               ti,bit-shift = <6>;
+               reg = <0x1040>;
+       };
+
+       gpt8_fck: gpt8_fck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-clock";
+               clocks = <&gpt8_gate_fck>, <&gpt8_mux_fck>;
+       };
+
+       gpt9_gate_fck: gpt9_gate_fck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-gate-clock";
+               clocks = <&sys_ck>;
+               ti,bit-shift = <10>;
+               reg = <0x1000>;
+       };
+
+       gpt9_mux_fck: gpt9_mux_fck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-mux-clock";
+               clocks = <&omap_32k_fck>, <&sys_ck>;
+               ti,bit-shift = <7>;
+               reg = <0x1040>;
+       };
+
+       gpt9_fck: gpt9_fck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-clock";
+               clocks = <&gpt9_gate_fck>, <&gpt9_mux_fck>;
+       };
+
+       per_32k_alwon_fck: per_32k_alwon_fck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&omap_32k_fck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       gpio6_dbck: gpio6_dbck {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&per_32k_alwon_fck>;
+               reg = <0x1000>;
+               ti,bit-shift = <17>;
+       };
+
+       gpio5_dbck: gpio5_dbck {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&per_32k_alwon_fck>;
+               reg = <0x1000>;
+               ti,bit-shift = <16>;
+       };
+
+       gpio4_dbck: gpio4_dbck {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&per_32k_alwon_fck>;
+               reg = <0x1000>;
+               ti,bit-shift = <15>;
+       };
+
+       gpio3_dbck: gpio3_dbck {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&per_32k_alwon_fck>;
+               reg = <0x1000>;
+               ti,bit-shift = <14>;
+       };
+
+       gpio2_dbck: gpio2_dbck {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&per_32k_alwon_fck>;
+               reg = <0x1000>;
+               ti,bit-shift = <13>;
+       };
+
+       wdt3_fck: wdt3_fck {
+               #clock-cells = <0>;
+               compatible = "ti,wait-gate-clock";
+               clocks = <&per_32k_alwon_fck>;
+               reg = <0x1000>;
+               ti,bit-shift = <12>;
+       };
+
+       per_l4_ick: per_l4_ick {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&l4_ick>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       gpio6_ick: gpio6_ick {
+               #clock-cells = <0>;
+               compatible = "ti,omap3-interface-clock";
+               clocks = <&per_l4_ick>;
+               reg = <0x1010>;
+               ti,bit-shift = <17>;
+       };
+
+       gpio5_ick: gpio5_ick {
+               #clock-cells = <0>;
+               compatible = "ti,omap3-interface-clock";
+               clocks = <&per_l4_ick>;
+               reg = <0x1010>;
+               ti,bit-shift = <16>;
+       };
+
+       gpio4_ick: gpio4_ick {
+               #clock-cells = <0>;
+               compatible = "ti,omap3-interface-clock";
+               clocks = <&per_l4_ick>;
+               reg = <0x1010>;
+               ti,bit-shift = <15>;
+       };
+
+       gpio3_ick: gpio3_ick {
+               #clock-cells = <0>;
+               compatible = "ti,omap3-interface-clock";
+               clocks = <&per_l4_ick>;
+               reg = <0x1010>;
+               ti,bit-shift = <14>;
+       };
+
+       gpio2_ick: gpio2_ick {
+               #clock-cells = <0>;
+               compatible = "ti,omap3-interface-clock";
+               clocks = <&per_l4_ick>;
+               reg = <0x1010>;
+               ti,bit-shift = <13>;
+       };
+
+       wdt3_ick: wdt3_ick {
+               #clock-cells = <0>;
+               compatible = "ti,omap3-interface-clock";
+               clocks = <&per_l4_ick>;
+               reg = <0x1010>;
+               ti,bit-shift = <12>;
+       };
+
+       uart3_ick: uart3_ick {
+               #clock-cells = <0>;
+               compatible = "ti,omap3-interface-clock";
+               clocks = <&per_l4_ick>;
+               reg = <0x1010>;
+               ti,bit-shift = <11>;
+       };
+
+       uart4_ick: uart4_ick {
+               #clock-cells = <0>;
+               compatible = "ti,omap3-interface-clock";
+               clocks = <&per_l4_ick>;
+               reg = <0x1010>;
+               ti,bit-shift = <18>;
+       };
+
+       gpt9_ick: gpt9_ick {
+               #clock-cells = <0>;
+               compatible = "ti,omap3-interface-clock";
+               clocks = <&per_l4_ick>;
+               reg = <0x1010>;
+               ti,bit-shift = <10>;
+       };
+
+       gpt8_ick: gpt8_ick {
+               #clock-cells = <0>;
+               compatible = "ti,omap3-interface-clock";
+               clocks = <&per_l4_ick>;
+               reg = <0x1010>;
+               ti,bit-shift = <9>;
+       };
+
+       gpt7_ick: gpt7_ick {
+               #clock-cells = <0>;
+               compatible = "ti,omap3-interface-clock";
+               clocks = <&per_l4_ick>;
+               reg = <0x1010>;
+               ti,bit-shift = <8>;
+       };
+
+       gpt6_ick: gpt6_ick {
+               #clock-cells = <0>;
+               compatible = "ti,omap3-interface-clock";
+               clocks = <&per_l4_ick>;
+               reg = <0x1010>;
+               ti,bit-shift = <7>;
+       };
+
+       gpt5_ick: gpt5_ick {
+               #clock-cells = <0>;
+               compatible = "ti,omap3-interface-clock";
+               clocks = <&per_l4_ick>;
+               reg = <0x1010>;
+               ti,bit-shift = <6>;
+       };
+
+       gpt4_ick: gpt4_ick {
+               #clock-cells = <0>;
+               compatible = "ti,omap3-interface-clock";
+               clocks = <&per_l4_ick>;
+               reg = <0x1010>;
+               ti,bit-shift = <5>;
+       };
+
+       gpt3_ick: gpt3_ick {
+               #clock-cells = <0>;
+               compatible = "ti,omap3-interface-clock";
+               clocks = <&per_l4_ick>;
+               reg = <0x1010>;
+               ti,bit-shift = <4>;
+       };
+
+       gpt2_ick: gpt2_ick {
+               #clock-cells = <0>;
+               compatible = "ti,omap3-interface-clock";
+               clocks = <&per_l4_ick>;
+               reg = <0x1010>;
+               ti,bit-shift = <3>;
+       };
+
+       mcbsp2_ick: mcbsp2_ick {
+               #clock-cells = <0>;
+               compatible = "ti,omap3-interface-clock";
+               clocks = <&per_l4_ick>;
+               reg = <0x1010>;
+               ti,bit-shift = <0>;
+       };
+
+       mcbsp3_ick: mcbsp3_ick {
+               #clock-cells = <0>;
+               compatible = "ti,omap3-interface-clock";
+               clocks = <&per_l4_ick>;
+               reg = <0x1010>;
+               ti,bit-shift = <1>;
+       };
+
+       mcbsp4_ick: mcbsp4_ick {
+               #clock-cells = <0>;
+               compatible = "ti,omap3-interface-clock";
+               clocks = <&per_l4_ick>;
+               reg = <0x1010>;
+               ti,bit-shift = <2>;
+       };
+
+       mcbsp2_gate_fck: mcbsp2_gate_fck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-gate-clock";
+               clocks = <&mcbsp_clks>;
+               ti,bit-shift = <0>;
+               reg = <0x1000>;
+       };
+
+       mcbsp3_gate_fck: mcbsp3_gate_fck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-gate-clock";
+               clocks = <&mcbsp_clks>;
+               ti,bit-shift = <1>;
+               reg = <0x1000>;
+       };
+
+       mcbsp4_gate_fck: mcbsp4_gate_fck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-gate-clock";
+               clocks = <&mcbsp_clks>;
+               ti,bit-shift = <2>;
+               reg = <0x1000>;
+       };
+
+       emu_src_mux_ck: emu_src_mux_ck {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&sys_ck>, <&emu_core_alwon_ck>, <&emu_per_alwon_ck>, <&emu_mpu_alwon_ck>;
+               reg = <0x1140>;
+       };
+
+       emu_src_ck: emu_src_ck {
+               #clock-cells = <0>;
+               compatible = "ti,clkdm-gate-clock";
+               clocks = <&emu_src_mux_ck>;
+       };
+
+       pclk_fck: pclk_fck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&emu_src_ck>;
+               ti,bit-shift = <8>;
+               ti,max-div = <7>;
+               reg = <0x1140>;
+               ti,index-starts-at-one;
+       };
+
+       pclkx2_fck: pclkx2_fck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&emu_src_ck>;
+               ti,bit-shift = <6>;
+               ti,max-div = <3>;
+               reg = <0x1140>;
+               ti,index-starts-at-one;
+       };
+
+       atclk_fck: atclk_fck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&emu_src_ck>;
+               ti,bit-shift = <4>;
+               ti,max-div = <3>;
+               reg = <0x1140>;
+               ti,index-starts-at-one;
+       };
+
+       traceclk_src_fck: traceclk_src_fck {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&sys_ck>, <&emu_core_alwon_ck>, <&emu_per_alwon_ck>, <&emu_mpu_alwon_ck>;
+               ti,bit-shift = <2>;
+               reg = <0x1140>;
+       };
+
+       traceclk_fck: traceclk_fck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&traceclk_src_fck>;
+               ti,bit-shift = <11>;
+               ti,max-div = <7>;
+               reg = <0x1140>;
+               ti,index-starts-at-one;
+       };
+
+       secure_32k_fck: secure_32k_fck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <32768>;
+       };
+
+       gpt12_fck: gpt12_fck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&secure_32k_fck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       wdt1_fck: wdt1_fck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&secure_32k_fck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+};
+
+&cm_clockdomains {
+       core_l3_clkdm: core_l3_clkdm {
+               compatible = "ti,clockdomain";
+               clocks = <&sdrc_ick>;
+       };
+
+       dpll3_clkdm: dpll3_clkdm {
+               compatible = "ti,clockdomain";
+               clocks = <&dpll3_ck>;
+       };
+
+       dpll1_clkdm: dpll1_clkdm {
+               compatible = "ti,clockdomain";
+               clocks = <&dpll1_ck>;
+       };
+
+       per_clkdm: per_clkdm {
+               compatible = "ti,clockdomain";
+               clocks = <&uart3_fck>, <&gpio6_dbck>, <&gpio5_dbck>,
+                        <&gpio4_dbck>, <&gpio3_dbck>, <&gpio2_dbck>,
+                        <&wdt3_fck>, <&gpio6_ick>, <&gpio5_ick>, <&gpio4_ick>,
+                        <&gpio3_ick>, <&gpio2_ick>, <&wdt3_ick>, <&uart3_ick>,
+                        <&uart4_ick>, <&gpt9_ick>, <&gpt8_ick>, <&gpt7_ick>,
+                        <&gpt6_ick>, <&gpt5_ick>, <&gpt4_ick>, <&gpt3_ick>,
+                        <&gpt2_ick>, <&mcbsp2_ick>, <&mcbsp3_ick>,
+                        <&mcbsp4_ick>;
+       };
+
+       emu_clkdm: emu_clkdm {
+               compatible = "ti,clockdomain";
+               clocks = <&emu_src_ck>;
+       };
+
+       dpll4_clkdm: dpll4_clkdm {
+               compatible = "ti,clockdomain";
+               clocks = <&dpll4_ck>;
+       };
+
+       wkup_clkdm: wkup_clkdm {
+               compatible = "ti,clockdomain";
+               clocks = <&gpio1_dbck>, <&wdt2_fck>, <&wdt2_ick>, <&wdt1_ick>,
+                        <&gpio1_ick>, <&omap_32ksync_ick>, <&gpt12_ick>,
+                        <&gpt1_ick>;
+       };
+
+       dss_clkdm: dss_clkdm {
+               compatible = "ti,clockdomain";
+               clocks = <&dss_tv_fck>, <&dss_96m_fck>, <&dss2_alwon_fck>;
+       };
+
+       core_l4_clkdm: core_l4_clkdm {
+               compatible = "ti,clockdomain";
+               clocks = <&mmchs2_fck>, <&mmchs1_fck>, <&i2c3_fck>, <&i2c2_fck>,
+                        <&i2c1_fck>, <&mcspi4_fck>, <&mcspi3_fck>,
+                        <&mcspi2_fck>, <&mcspi1_fck>, <&uart2_fck>,
+                        <&uart1_fck>, <&hdq_fck>, <&mmchs2_ick>, <&mmchs1_ick>,
+                        <&hdq_ick>, <&mcspi4_ick>, <&mcspi3_ick>,
+                        <&mcspi2_ick>, <&mcspi1_ick>, <&i2c3_ick>, <&i2c2_ick>,
+                        <&i2c1_ick>, <&uart2_ick>, <&uart1_ick>, <&gpt11_ick>,
+                        <&gpt10_ick>, <&mcbsp5_ick>, <&mcbsp1_ick>,
+                        <&omapctrl_ick>, <&aes2_ick>, <&sha12_ick>;
+       };
+};
index a1e05853afcd583c15da3ad259c8f715992b2d7f..d3f8a6e8ca205ef1585c279c1ce50dc8703fa6b5 100644 (file)
                interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>,
                             <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
 
+               cm1: cm1@4a004000 {
+                       compatible = "ti,omap4-cm1";
+                       reg = <0x4a004000 0x2000>;
+
+                       cm1_clocks: clocks {
+                               #address-cells = <1>;
+                               #size-cells = <0>;
+                       };
+
+                       cm1_clockdomains: clockdomains {
+                       };
+               };
+
+               prm: prm@4a306000 {
+                       compatible = "ti,omap4-prm";
+                       reg = <0x4a306000 0x3000>;
+
+                       prm_clocks: clocks {
+                               #address-cells = <1>;
+                               #size-cells = <0>;
+                       };
+
+                       prm_clockdomains: clockdomains {
+                       };
+               };
+
+               cm2: cm2@4a008000 {
+                       compatible = "ti,omap4-cm2";
+                       reg = <0x4a008000 0x3000>;
+
+                       cm2_clocks: clocks {
+                               #address-cells = <1>;
+                               #size-cells = <0>;
+                       };
+
+                       cm2_clockdomains: clockdomains {
+                       };
+               };
+
+               scrm: scrm@4a30a000 {
+                       compatible = "ti,omap4-scrm";
+                       reg = <0x4a30a000 0x2000>;
+
+                       scrm_clocks: clocks {
+                               #address-cells = <1>;
+                               #size-cells = <0>;
+                       };
+
+                       scrm_clockdomains: clockdomains {
+                       };
+               };
+
                counter32k: counter@4a304000 {
                        compatible = "ti,omap-counter32k";
                        reg = <0x4a304000 0x20>;
                };
        };
 };
+
+/include/ "omap44xx-clocks.dtsi"
diff --git a/arch/arm/boot/dts/omap443x-clocks.dtsi b/arch/arm/boot/dts/omap443x-clocks.dtsi
new file mode 100644 (file)
index 0000000..2bd2166
--- /dev/null
@@ -0,0 +1,18 @@
+/*
+ * Device Tree Source for OMAP4 clock data
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+&prm_clocks {
+       bandgap_fclk: bandgap_fclk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&sys_32k_ck>;
+               ti,bit-shift = <8>;
+               reg = <0x1888>;
+       };
+};
index ab607a19a6137fe0c9f3fa779f6934131cc623f3..8c1cfad30d603714ce267820186dd8c6635ff923 100644 (file)
@@ -44,3 +44,5 @@
                };
        };
 };
+
+/include/ "omap443x-clocks.dtsi"
index 11566bed00358485634f1def11ffb980a2a48981..6b32f520741a9cb1398bf586a614a9358f04d3e5 100644 (file)
@@ -52,3 +52,5 @@
                };
        };
 };
+
+/include/ "omap446x-clocks.dtsi"
diff --git a/arch/arm/boot/dts/omap446x-clocks.dtsi b/arch/arm/boot/dts/omap446x-clocks.dtsi
new file mode 100644 (file)
index 0000000..be033e9
--- /dev/null
@@ -0,0 +1,27 @@
+/*
+ * Device Tree Source for OMAP4 clock data
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+&prm_clocks {
+       div_ts_ck: div_ts_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&l4_wkup_clk_mux_ck>;
+               ti,bit-shift = <24>;
+               reg = <0x1888>;
+               ti,dividers = <8>, <16>, <32>;
+       };
+
+       bandgap_ts_fclk: bandgap_ts_fclk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&div_ts_ck>;
+               ti,bit-shift = <8>;
+               reg = <0x1888>;
+       };
+};
diff --git a/arch/arm/boot/dts/omap44xx-clocks.dtsi b/arch/arm/boot/dts/omap44xx-clocks.dtsi
new file mode 100644 (file)
index 0000000..c821ff5
--- /dev/null
@@ -0,0 +1,1651 @@
+/*
+ * Device Tree Source for OMAP4 clock data
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+&cm1_clocks {
+       extalt_clkin_ck: extalt_clkin_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <59000000>;
+       };
+
+       pad_clks_src_ck: pad_clks_src_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <12000000>;
+       };
+
+       pad_clks_ck: pad_clks_ck {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&pad_clks_src_ck>;
+               ti,bit-shift = <8>;
+               reg = <0x0108>;
+       };
+
+       pad_slimbus_core_clks_ck: pad_slimbus_core_clks_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <12000000>;
+       };
+
+       secure_32k_clk_src_ck: secure_32k_clk_src_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <32768>;
+       };
+
+       slimbus_src_clk: slimbus_src_clk {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <12000000>;
+       };
+
+       slimbus_clk: slimbus_clk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&slimbus_src_clk>;
+               ti,bit-shift = <10>;
+               reg = <0x0108>;
+       };
+
+       sys_32k_ck: sys_32k_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <32768>;
+       };
+
+       virt_12000000_ck: virt_12000000_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <12000000>;
+       };
+
+       virt_13000000_ck: virt_13000000_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <13000000>;
+       };
+
+       virt_16800000_ck: virt_16800000_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <16800000>;
+       };
+
+       virt_19200000_ck: virt_19200000_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <19200000>;
+       };
+
+       virt_26000000_ck: virt_26000000_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <26000000>;
+       };
+
+       virt_27000000_ck: virt_27000000_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <27000000>;
+       };
+
+       virt_38400000_ck: virt_38400000_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <38400000>;
+       };
+
+       tie_low_clock_ck: tie_low_clock_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <0>;
+       };
+
+       utmi_phy_clkout_ck: utmi_phy_clkout_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <60000000>;
+       };
+
+       xclk60mhsp1_ck: xclk60mhsp1_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <60000000>;
+       };
+
+       xclk60mhsp2_ck: xclk60mhsp2_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <60000000>;
+       };
+
+       xclk60motg_ck: xclk60motg_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <60000000>;
+       };
+
+       dpll_abe_ck: dpll_abe_ck {
+               #clock-cells = <0>;
+               compatible = "ti,omap4-dpll-m4xen-clock";
+               clocks = <&abe_dpll_refclk_mux_ck>, <&abe_dpll_bypass_clk_mux_ck>;
+               reg = <0x01e0>, <0x01e4>, <0x01ec>, <0x01e8>;
+       };
+
+       dpll_abe_x2_ck: dpll_abe_x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,omap4-dpll-x2-clock";
+               clocks = <&dpll_abe_ck>;
+               reg = <0x01f0>;
+       };
+
+       dpll_abe_m2x2_ck: dpll_abe_m2x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_abe_x2_ck>;
+               ti,max-div = <31>;
+               ti,autoidle-shift = <8>;
+               reg = <0x01f0>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       abe_24m_fclk: abe_24m_fclk {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll_abe_m2x2_ck>;
+               clock-mult = <1>;
+               clock-div = <8>;
+       };
+
+       abe_clk: abe_clk {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_abe_m2x2_ck>;
+               ti,max-div = <4>;
+               reg = <0x0108>;
+               ti,index-power-of-two;
+       };
+
+       aess_fclk: aess_fclk {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&abe_clk>;
+               ti,bit-shift = <24>;
+               ti,max-div = <2>;
+               reg = <0x0528>;
+       };
+
+       dpll_abe_m3x2_ck: dpll_abe_m3x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_abe_x2_ck>;
+               ti,max-div = <31>;
+               ti,autoidle-shift = <8>;
+               reg = <0x01f4>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       core_hsd_byp_clk_mux_ck: core_hsd_byp_clk_mux_ck {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&sys_clkin_ck>, <&dpll_abe_m3x2_ck>;
+               ti,bit-shift = <23>;
+               reg = <0x012c>;
+       };
+
+       dpll_core_ck: dpll_core_ck {
+               #clock-cells = <0>;
+               compatible = "ti,omap4-dpll-core-clock";
+               clocks = <&sys_clkin_ck>, <&core_hsd_byp_clk_mux_ck>;
+               reg = <0x0120>, <0x0124>, <0x012c>, <0x0128>;
+       };
+
+       dpll_core_x2_ck: dpll_core_x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,omap4-dpll-x2-clock";
+               clocks = <&dpll_core_ck>;
+       };
+
+       dpll_core_m6x2_ck: dpll_core_m6x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_core_x2_ck>;
+               ti,max-div = <31>;
+               ti,autoidle-shift = <8>;
+               reg = <0x0140>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       dpll_core_m2_ck: dpll_core_m2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_core_ck>;
+               ti,max-div = <31>;
+               ti,autoidle-shift = <8>;
+               reg = <0x0130>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       ddrphy_ck: ddrphy_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll_core_m2_ck>;
+               clock-mult = <1>;
+               clock-div = <2>;
+       };
+
+       dpll_core_m5x2_ck: dpll_core_m5x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_core_x2_ck>;
+               ti,max-div = <31>;
+               ti,autoidle-shift = <8>;
+               reg = <0x013c>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       div_core_ck: div_core_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_core_m5x2_ck>;
+               reg = <0x0100>;
+               ti,max-div = <2>;
+       };
+
+       div_iva_hs_clk: div_iva_hs_clk {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_core_m5x2_ck>;
+               ti,max-div = <4>;
+               reg = <0x01dc>;
+               ti,index-power-of-two;
+       };
+
+       div_mpu_hs_clk: div_mpu_hs_clk {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_core_m5x2_ck>;
+               ti,max-div = <4>;
+               reg = <0x019c>;
+               ti,index-power-of-two;
+       };
+
+       dpll_core_m4x2_ck: dpll_core_m4x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_core_x2_ck>;
+               ti,max-div = <31>;
+               ti,autoidle-shift = <8>;
+               reg = <0x0138>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       dll_clk_div_ck: dll_clk_div_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll_core_m4x2_ck>;
+               clock-mult = <1>;
+               clock-div = <2>;
+       };
+
+       dpll_abe_m2_ck: dpll_abe_m2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_abe_ck>;
+               ti,max-div = <31>;
+               reg = <0x01f0>;
+               ti,index-starts-at-one;
+       };
+
+       dpll_core_m3x2_gate_ck: dpll_core_m3x2_gate_ck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-no-wait-gate-clock";
+               clocks = <&dpll_core_x2_ck>;
+               ti,bit-shift = <8>;
+               reg = <0x0134>;
+       };
+
+       dpll_core_m3x2_div_ck: dpll_core_m3x2_div_ck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-divider-clock";
+               clocks = <&dpll_core_x2_ck>;
+               ti,max-div = <31>;
+               reg = <0x0134>;
+               ti,index-starts-at-one;
+       };
+
+       dpll_core_m3x2_ck: dpll_core_m3x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-clock";
+               clocks = <&dpll_core_m3x2_gate_ck>, <&dpll_core_m3x2_div_ck>;
+       };
+
+       dpll_core_m7x2_ck: dpll_core_m7x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_core_x2_ck>;
+               ti,max-div = <31>;
+               ti,autoidle-shift = <8>;
+               reg = <0x0144>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       iva_hsd_byp_clk_mux_ck: iva_hsd_byp_clk_mux_ck {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&sys_clkin_ck>, <&div_iva_hs_clk>;
+               ti,bit-shift = <23>;
+               reg = <0x01ac>;
+       };
+
+       dpll_iva_ck: dpll_iva_ck {
+               #clock-cells = <0>;
+               compatible = "ti,omap4-dpll-clock";
+               clocks = <&sys_clkin_ck>, <&iva_hsd_byp_clk_mux_ck>;
+               reg = <0x01a0>, <0x01a4>, <0x01ac>, <0x01a8>;
+       };
+
+       dpll_iva_x2_ck: dpll_iva_x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,omap4-dpll-x2-clock";
+               clocks = <&dpll_iva_ck>;
+       };
+
+       dpll_iva_m4x2_ck: dpll_iva_m4x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_iva_x2_ck>;
+               ti,max-div = <31>;
+               ti,autoidle-shift = <8>;
+               reg = <0x01b8>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       dpll_iva_m5x2_ck: dpll_iva_m5x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_iva_x2_ck>;
+               ti,max-div = <31>;
+               ti,autoidle-shift = <8>;
+               reg = <0x01bc>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       dpll_mpu_ck: dpll_mpu_ck {
+               #clock-cells = <0>;
+               compatible = "ti,omap4-dpll-clock";
+               clocks = <&sys_clkin_ck>, <&div_mpu_hs_clk>;
+               reg = <0x0160>, <0x0164>, <0x016c>, <0x0168>;
+       };
+
+       dpll_mpu_m2_ck: dpll_mpu_m2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_mpu_ck>;
+               ti,max-div = <31>;
+               ti,autoidle-shift = <8>;
+               reg = <0x0170>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       per_hs_clk_div_ck: per_hs_clk_div_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll_abe_m3x2_ck>;
+               clock-mult = <1>;
+               clock-div = <2>;
+       };
+
+       usb_hs_clk_div_ck: usb_hs_clk_div_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll_abe_m3x2_ck>;
+               clock-mult = <1>;
+               clock-div = <3>;
+       };
+
+       l3_div_ck: l3_div_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&div_core_ck>;
+               ti,bit-shift = <4>;
+               ti,max-div = <2>;
+               reg = <0x0100>;
+       };
+
+       l4_div_ck: l4_div_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&l3_div_ck>;
+               ti,bit-shift = <8>;
+               ti,max-div = <2>;
+               reg = <0x0100>;
+       };
+
+       lp_clk_div_ck: lp_clk_div_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll_abe_m2x2_ck>;
+               clock-mult = <1>;
+               clock-div = <16>;
+       };
+
+       mpu_periphclk: mpu_periphclk {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll_mpu_ck>;
+               clock-mult = <1>;
+               clock-div = <2>;
+       };
+
+       ocp_abe_iclk: ocp_abe_iclk {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&aess_fclk>;
+               ti,bit-shift = <24>;
+               reg = <0x0528>;
+               ti,dividers = <2>, <1>;
+       };
+
+       per_abe_24m_fclk: per_abe_24m_fclk {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll_abe_m2_ck>;
+               clock-mult = <1>;
+               clock-div = <4>;
+       };
+
+       dmic_sync_mux_ck: dmic_sync_mux_ck {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&abe_24m_fclk>, <&syc_clk_div_ck>, <&func_24m_clk>;
+               ti,bit-shift = <25>;
+               reg = <0x0538>;
+       };
+
+       func_dmic_abe_gfclk: func_dmic_abe_gfclk {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&dmic_sync_mux_ck>, <&pad_clks_ck>, <&slimbus_clk>;
+               ti,bit-shift = <24>;
+               reg = <0x0538>;
+       };
+
+       mcasp_sync_mux_ck: mcasp_sync_mux_ck {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&abe_24m_fclk>, <&syc_clk_div_ck>, <&func_24m_clk>;
+               ti,bit-shift = <25>;
+               reg = <0x0540>;
+       };
+
+       func_mcasp_abe_gfclk: func_mcasp_abe_gfclk {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&mcasp_sync_mux_ck>, <&pad_clks_ck>, <&slimbus_clk>;
+               ti,bit-shift = <24>;
+               reg = <0x0540>;
+       };
+
+       mcbsp1_sync_mux_ck: mcbsp1_sync_mux_ck {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&abe_24m_fclk>, <&syc_clk_div_ck>, <&func_24m_clk>;
+               ti,bit-shift = <25>;
+               reg = <0x0548>;
+       };
+
+       func_mcbsp1_gfclk: func_mcbsp1_gfclk {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&mcbsp1_sync_mux_ck>, <&pad_clks_ck>, <&slimbus_clk>;
+               ti,bit-shift = <24>;
+               reg = <0x0548>;
+       };
+
+       mcbsp2_sync_mux_ck: mcbsp2_sync_mux_ck {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&abe_24m_fclk>, <&syc_clk_div_ck>, <&func_24m_clk>;
+               ti,bit-shift = <25>;
+               reg = <0x0550>;
+       };
+
+       func_mcbsp2_gfclk: func_mcbsp2_gfclk {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&mcbsp2_sync_mux_ck>, <&pad_clks_ck>, <&slimbus_clk>;
+               ti,bit-shift = <24>;
+               reg = <0x0550>;
+       };
+
+       mcbsp3_sync_mux_ck: mcbsp3_sync_mux_ck {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&abe_24m_fclk>, <&syc_clk_div_ck>, <&func_24m_clk>;
+               ti,bit-shift = <25>;
+               reg = <0x0558>;
+       };
+
+       func_mcbsp3_gfclk: func_mcbsp3_gfclk {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&mcbsp3_sync_mux_ck>, <&pad_clks_ck>, <&slimbus_clk>;
+               ti,bit-shift = <24>;
+               reg = <0x0558>;
+       };
+
+       slimbus1_fclk_1: slimbus1_fclk_1 {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&func_24m_clk>;
+               ti,bit-shift = <9>;
+               reg = <0x0560>;
+       };
+
+       slimbus1_fclk_0: slimbus1_fclk_0 {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&abe_24m_fclk>;
+               ti,bit-shift = <8>;
+               reg = <0x0560>;
+       };
+
+       slimbus1_fclk_2: slimbus1_fclk_2 {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&pad_clks_ck>;
+               ti,bit-shift = <10>;
+               reg = <0x0560>;
+       };
+
+       slimbus1_slimbus_clk: slimbus1_slimbus_clk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&slimbus_clk>;
+               ti,bit-shift = <11>;
+               reg = <0x0560>;
+       };
+
+       timer5_sync_mux: timer5_sync_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&syc_clk_div_ck>, <&sys_32k_ck>;
+               ti,bit-shift = <24>;
+               reg = <0x0568>;
+       };
+
+       timer6_sync_mux: timer6_sync_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&syc_clk_div_ck>, <&sys_32k_ck>;
+               ti,bit-shift = <24>;
+               reg = <0x0570>;
+       };
+
+       timer7_sync_mux: timer7_sync_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&syc_clk_div_ck>, <&sys_32k_ck>;
+               ti,bit-shift = <24>;
+               reg = <0x0578>;
+       };
+
+       timer8_sync_mux: timer8_sync_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&syc_clk_div_ck>, <&sys_32k_ck>;
+               ti,bit-shift = <24>;
+               reg = <0x0580>;
+       };
+
+       dummy_ck: dummy_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <0>;
+       };
+};
+&prm_clocks {
+       sys_clkin_ck: sys_clkin_ck {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&virt_12000000_ck>, <&virt_13000000_ck>, <&virt_16800000_ck>, <&virt_19200000_ck>, <&virt_26000000_ck>, <&virt_27000000_ck>, <&virt_38400000_ck>;
+               reg = <0x0110>;
+               ti,index-starts-at-one;
+       };
+
+       abe_dpll_bypass_clk_mux_ck: abe_dpll_bypass_clk_mux_ck {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&sys_clkin_ck>, <&sys_32k_ck>;
+               ti,bit-shift = <24>;
+               reg = <0x0108>;
+       };
+
+       abe_dpll_refclk_mux_ck: abe_dpll_refclk_mux_ck {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&sys_clkin_ck>, <&sys_32k_ck>;
+               reg = <0x010c>;
+       };
+
+       dbgclk_mux_ck: dbgclk_mux_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&sys_clkin_ck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       l4_wkup_clk_mux_ck: l4_wkup_clk_mux_ck {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&sys_clkin_ck>, <&lp_clk_div_ck>;
+               reg = <0x0108>;
+       };
+
+       syc_clk_div_ck: syc_clk_div_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&sys_clkin_ck>;
+               reg = <0x0100>;
+               ti,max-div = <2>;
+       };
+
+       gpio1_dbclk: gpio1_dbclk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&sys_32k_ck>;
+               ti,bit-shift = <8>;
+               reg = <0x1838>;
+       };
+
+       dmt1_clk_mux: dmt1_clk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&sys_clkin_ck>, <&sys_32k_ck>;
+               ti,bit-shift = <24>;
+               reg = <0x1840>;
+       };
+
+       usim_ck: usim_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_per_m4x2_ck>;
+               ti,bit-shift = <24>;
+               reg = <0x1858>;
+               ti,dividers = <14>, <18>;
+       };
+
+       usim_fclk: usim_fclk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&usim_ck>;
+               ti,bit-shift = <8>;
+               reg = <0x1858>;
+       };
+
+       pmd_stm_clock_mux_ck: pmd_stm_clock_mux_ck {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&sys_clkin_ck>, <&dpll_core_m6x2_ck>, <&tie_low_clock_ck>;
+               ti,bit-shift = <20>;
+               reg = <0x1a20>;
+       };
+
+       pmd_trace_clk_mux_ck: pmd_trace_clk_mux_ck {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&sys_clkin_ck>, <&dpll_core_m6x2_ck>, <&tie_low_clock_ck>;
+               ti,bit-shift = <22>;
+               reg = <0x1a20>;
+       };
+
+       stm_clk_div_ck: stm_clk_div_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&pmd_stm_clock_mux_ck>;
+               ti,bit-shift = <27>;
+               ti,max-div = <64>;
+               reg = <0x1a20>;
+               ti,index-power-of-two;
+       };
+
+       trace_clk_div_div_ck: trace_clk_div_div_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&pmd_trace_clk_mux_ck>;
+               ti,bit-shift = <24>;
+               reg = <0x1a20>;
+               ti,dividers = <0>, <1>, <2>, <0>, <4>;
+       };
+
+       trace_clk_div_ck: trace_clk_div_ck {
+               #clock-cells = <0>;
+               compatible = "ti,clkdm-gate-clock";
+               clocks = <&trace_clk_div_div_ck>;
+       };
+};
+
+&prm_clockdomains {
+       emu_sys_clkdm: emu_sys_clkdm {
+               compatible = "ti,clockdomain";
+               clocks = <&trace_clk_div_ck>;
+       };
+};
+
+&cm2_clocks {
+       per_hsd_byp_clk_mux_ck: per_hsd_byp_clk_mux_ck {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&sys_clkin_ck>, <&per_hs_clk_div_ck>;
+               ti,bit-shift = <23>;
+               reg = <0x014c>;
+       };
+
+       dpll_per_ck: dpll_per_ck {
+               #clock-cells = <0>;
+               compatible = "ti,omap4-dpll-clock";
+               clocks = <&sys_clkin_ck>, <&per_hsd_byp_clk_mux_ck>;
+               reg = <0x0140>, <0x0144>, <0x014c>, <0x0148>;
+       };
+
+       dpll_per_m2_ck: dpll_per_m2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_per_ck>;
+               ti,max-div = <31>;
+               reg = <0x0150>;
+               ti,index-starts-at-one;
+       };
+
+       dpll_per_x2_ck: dpll_per_x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,omap4-dpll-x2-clock";
+               clocks = <&dpll_per_ck>;
+               reg = <0x0150>;
+       };
+
+       dpll_per_m2x2_ck: dpll_per_m2x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_per_x2_ck>;
+               ti,max-div = <31>;
+               ti,autoidle-shift = <8>;
+               reg = <0x0150>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       dpll_per_m3x2_gate_ck: dpll_per_m3x2_gate_ck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-no-wait-gate-clock";
+               clocks = <&dpll_per_x2_ck>;
+               ti,bit-shift = <8>;
+               reg = <0x0154>;
+       };
+
+       dpll_per_m3x2_div_ck: dpll_per_m3x2_div_ck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-divider-clock";
+               clocks = <&dpll_per_x2_ck>;
+               ti,max-div = <31>;
+               reg = <0x0154>;
+               ti,index-starts-at-one;
+       };
+
+       dpll_per_m3x2_ck: dpll_per_m3x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-clock";
+               clocks = <&dpll_per_m3x2_gate_ck>, <&dpll_per_m3x2_div_ck>;
+       };
+
+       dpll_per_m4x2_ck: dpll_per_m4x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_per_x2_ck>;
+               ti,max-div = <31>;
+               ti,autoidle-shift = <8>;
+               reg = <0x0158>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       dpll_per_m5x2_ck: dpll_per_m5x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_per_x2_ck>;
+               ti,max-div = <31>;
+               ti,autoidle-shift = <8>;
+               reg = <0x015c>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       dpll_per_m6x2_ck: dpll_per_m6x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_per_x2_ck>;
+               ti,max-div = <31>;
+               ti,autoidle-shift = <8>;
+               reg = <0x0160>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       dpll_per_m7x2_ck: dpll_per_m7x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_per_x2_ck>;
+               ti,max-div = <31>;
+               ti,autoidle-shift = <8>;
+               reg = <0x0164>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       dpll_usb_ck: dpll_usb_ck {
+               #clock-cells = <0>;
+               compatible = "ti,omap4-dpll-j-type-clock";
+               clocks = <&sys_clkin_ck>, <&usb_hs_clk_div_ck>;
+               reg = <0x0180>, <0x0184>, <0x018c>, <0x0188>;
+       };
+
+       dpll_usb_clkdcoldo_ck: dpll_usb_clkdcoldo_ck {
+               #clock-cells = <0>;
+               compatible = "ti,fixed-factor-clock";
+               clocks = <&dpll_usb_ck>;
+               ti,clock-div = <1>;
+               ti,autoidle-shift = <8>;
+               reg = <0x01b4>;
+               ti,clock-mult = <1>;
+               ti,invert-autoidle-bit;
+       };
+
+       dpll_usb_m2_ck: dpll_usb_m2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_usb_ck>;
+               ti,max-div = <127>;
+               ti,autoidle-shift = <8>;
+               reg = <0x0190>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       ducati_clk_mux_ck: ducati_clk_mux_ck {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&div_core_ck>, <&dpll_per_m6x2_ck>;
+               reg = <0x0100>;
+       };
+
+       func_12m_fclk: func_12m_fclk {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll_per_m2x2_ck>;
+               clock-mult = <1>;
+               clock-div = <16>;
+       };
+
+       func_24m_clk: func_24m_clk {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll_per_m2_ck>;
+               clock-mult = <1>;
+               clock-div = <4>;
+       };
+
+       func_24mc_fclk: func_24mc_fclk {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll_per_m2x2_ck>;
+               clock-mult = <1>;
+               clock-div = <8>;
+       };
+
+       func_48m_fclk: func_48m_fclk {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_per_m2x2_ck>;
+               reg = <0x0108>;
+               ti,dividers = <4>, <8>;
+       };
+
+       func_48mc_fclk: func_48mc_fclk {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll_per_m2x2_ck>;
+               clock-mult = <1>;
+               clock-div = <4>;
+       };
+
+       func_64m_fclk: func_64m_fclk {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_per_m4x2_ck>;
+               reg = <0x0108>;
+               ti,dividers = <2>, <4>;
+       };
+
+       func_96m_fclk: func_96m_fclk {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_per_m2x2_ck>;
+               reg = <0x0108>;
+               ti,dividers = <2>, <4>;
+       };
+
+       init_60m_fclk: init_60m_fclk {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_usb_m2_ck>;
+               reg = <0x0104>;
+               ti,dividers = <1>, <8>;
+       };
+
+       per_abe_nc_fclk: per_abe_nc_fclk {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_abe_m2_ck>;
+               reg = <0x0108>;
+               ti,max-div = <2>;
+       };
+
+       aes1_fck: aes1_fck {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&l3_div_ck>;
+               ti,bit-shift = <1>;
+               reg = <0x15a0>;
+       };
+
+       aes2_fck: aes2_fck {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&l3_div_ck>;
+               ti,bit-shift = <1>;
+               reg = <0x15a8>;
+       };
+
+       dss_sys_clk: dss_sys_clk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&syc_clk_div_ck>;
+               ti,bit-shift = <10>;
+               reg = <0x1120>;
+       };
+
+       dss_tv_clk: dss_tv_clk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&extalt_clkin_ck>;
+               ti,bit-shift = <11>;
+               reg = <0x1120>;
+       };
+
+       dss_dss_clk: dss_dss_clk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&dpll_per_m5x2_ck>;
+               ti,bit-shift = <8>;
+               reg = <0x1120>;
+               ti,set-rate-parent;
+       };
+
+       dss_48mhz_clk: dss_48mhz_clk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&func_48mc_fclk>;
+               ti,bit-shift = <9>;
+               reg = <0x1120>;
+       };
+
+       dss_fck: dss_fck {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&l3_div_ck>;
+               ti,bit-shift = <1>;
+               reg = <0x1120>;
+       };
+
+       fdif_fck: fdif_fck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_per_m4x2_ck>;
+               ti,bit-shift = <24>;
+               ti,max-div = <4>;
+               reg = <0x1028>;
+               ti,index-power-of-two;
+       };
+
+       gpio2_dbclk: gpio2_dbclk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&sys_32k_ck>;
+               ti,bit-shift = <8>;
+               reg = <0x1460>;
+       };
+
+       gpio3_dbclk: gpio3_dbclk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&sys_32k_ck>;
+               ti,bit-shift = <8>;
+               reg = <0x1468>;
+       };
+
+       gpio4_dbclk: gpio4_dbclk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&sys_32k_ck>;
+               ti,bit-shift = <8>;
+               reg = <0x1470>;
+       };
+
+       gpio5_dbclk: gpio5_dbclk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&sys_32k_ck>;
+               ti,bit-shift = <8>;
+               reg = <0x1478>;
+       };
+
+       gpio6_dbclk: gpio6_dbclk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&sys_32k_ck>;
+               ti,bit-shift = <8>;
+               reg = <0x1480>;
+       };
+
+       sgx_clk_mux: sgx_clk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&dpll_core_m7x2_ck>, <&dpll_per_m7x2_ck>;
+               ti,bit-shift = <24>;
+               reg = <0x1220>;
+       };
+
+       hsi_fck: hsi_fck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_per_m2x2_ck>;
+               ti,bit-shift = <24>;
+               ti,max-div = <4>;
+               reg = <0x1338>;
+               ti,index-power-of-two;
+       };
+
+       iss_ctrlclk: iss_ctrlclk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&func_96m_fclk>;
+               ti,bit-shift = <8>;
+               reg = <0x1020>;
+       };
+
+       mcbsp4_sync_mux_ck: mcbsp4_sync_mux_ck {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&func_96m_fclk>, <&per_abe_nc_fclk>;
+               ti,bit-shift = <25>;
+               reg = <0x14e0>;
+       };
+
+       per_mcbsp4_gfclk: per_mcbsp4_gfclk {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&mcbsp4_sync_mux_ck>, <&pad_clks_ck>;
+               ti,bit-shift = <24>;
+               reg = <0x14e0>;
+       };
+
+       hsmmc1_fclk: hsmmc1_fclk {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&func_64m_fclk>, <&func_96m_fclk>;
+               ti,bit-shift = <24>;
+               reg = <0x1328>;
+       };
+
+       hsmmc2_fclk: hsmmc2_fclk {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&func_64m_fclk>, <&func_96m_fclk>;
+               ti,bit-shift = <24>;
+               reg = <0x1330>;
+       };
+
+       ocp2scp_usb_phy_phy_48m: ocp2scp_usb_phy_phy_48m {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&func_48m_fclk>;
+               ti,bit-shift = <8>;
+               reg = <0x13e0>;
+       };
+
+       sha2md5_fck: sha2md5_fck {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&l3_div_ck>;
+               ti,bit-shift = <1>;
+               reg = <0x15c8>;
+       };
+
+       slimbus2_fclk_1: slimbus2_fclk_1 {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&per_abe_24m_fclk>;
+               ti,bit-shift = <9>;
+               reg = <0x1538>;
+       };
+
+       slimbus2_fclk_0: slimbus2_fclk_0 {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&func_24mc_fclk>;
+               ti,bit-shift = <8>;
+               reg = <0x1538>;
+       };
+
+       slimbus2_slimbus_clk: slimbus2_slimbus_clk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&pad_slimbus_core_clks_ck>;
+               ti,bit-shift = <10>;
+               reg = <0x1538>;
+       };
+
+       smartreflex_core_fck: smartreflex_core_fck {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&l4_wkup_clk_mux_ck>;
+               ti,bit-shift = <1>;
+               reg = <0x0638>;
+       };
+
+       smartreflex_iva_fck: smartreflex_iva_fck {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&l4_wkup_clk_mux_ck>;
+               ti,bit-shift = <1>;
+               reg = <0x0630>;
+       };
+
+       smartreflex_mpu_fck: smartreflex_mpu_fck {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&l4_wkup_clk_mux_ck>;
+               ti,bit-shift = <1>;
+               reg = <0x0628>;
+       };
+
+       cm2_dm10_mux: cm2_dm10_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&sys_clkin_ck>, <&sys_32k_ck>;
+               ti,bit-shift = <24>;
+               reg = <0x1428>;
+       };
+
+       cm2_dm11_mux: cm2_dm11_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&sys_clkin_ck>, <&sys_32k_ck>;
+               ti,bit-shift = <24>;
+               reg = <0x1430>;
+       };
+
+       cm2_dm2_mux: cm2_dm2_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&sys_clkin_ck>, <&sys_32k_ck>;
+               ti,bit-shift = <24>;
+               reg = <0x1438>;
+       };
+
+       cm2_dm3_mux: cm2_dm3_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&sys_clkin_ck>, <&sys_32k_ck>;
+               ti,bit-shift = <24>;
+               reg = <0x1440>;
+       };
+
+       cm2_dm4_mux: cm2_dm4_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&sys_clkin_ck>, <&sys_32k_ck>;
+               ti,bit-shift = <24>;
+               reg = <0x1448>;
+       };
+
+       cm2_dm9_mux: cm2_dm9_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&sys_clkin_ck>, <&sys_32k_ck>;
+               ti,bit-shift = <24>;
+               reg = <0x1450>;
+       };
+
+       usb_host_fs_fck: usb_host_fs_fck {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&func_48mc_fclk>;
+               ti,bit-shift = <1>;
+               reg = <0x13d0>;
+       };
+
+       utmi_p1_gfclk: utmi_p1_gfclk {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&init_60m_fclk>, <&xclk60mhsp1_ck>;
+               ti,bit-shift = <24>;
+               reg = <0x1358>;
+       };
+
+       usb_host_hs_utmi_p1_clk: usb_host_hs_utmi_p1_clk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&utmi_p1_gfclk>;
+               ti,bit-shift = <8>;
+               reg = <0x1358>;
+       };
+
+       utmi_p2_gfclk: utmi_p2_gfclk {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&init_60m_fclk>, <&xclk60mhsp2_ck>;
+               ti,bit-shift = <25>;
+               reg = <0x1358>;
+       };
+
+       usb_host_hs_utmi_p2_clk: usb_host_hs_utmi_p2_clk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&utmi_p2_gfclk>;
+               ti,bit-shift = <9>;
+               reg = <0x1358>;
+       };
+
+       usb_host_hs_utmi_p3_clk: usb_host_hs_utmi_p3_clk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&init_60m_fclk>;
+               ti,bit-shift = <10>;
+               reg = <0x1358>;
+       };
+
+       usb_host_hs_hsic480m_p1_clk: usb_host_hs_hsic480m_p1_clk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&dpll_usb_m2_ck>;
+               ti,bit-shift = <13>;
+               reg = <0x1358>;
+       };
+
+       usb_host_hs_hsic60m_p1_clk: usb_host_hs_hsic60m_p1_clk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&init_60m_fclk>;
+               ti,bit-shift = <11>;
+               reg = <0x1358>;
+       };
+
+       usb_host_hs_hsic60m_p2_clk: usb_host_hs_hsic60m_p2_clk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&init_60m_fclk>;
+               ti,bit-shift = <12>;
+               reg = <0x1358>;
+       };
+
+       usb_host_hs_hsic480m_p2_clk: usb_host_hs_hsic480m_p2_clk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&dpll_usb_m2_ck>;
+               ti,bit-shift = <14>;
+               reg = <0x1358>;
+       };
+
+       usb_host_hs_func48mclk: usb_host_hs_func48mclk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&func_48mc_fclk>;
+               ti,bit-shift = <15>;
+               reg = <0x1358>;
+       };
+
+       usb_host_hs_fck: usb_host_hs_fck {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&init_60m_fclk>;
+               ti,bit-shift = <1>;
+               reg = <0x1358>;
+       };
+
+       otg_60m_gfclk: otg_60m_gfclk {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&utmi_phy_clkout_ck>, <&xclk60motg_ck>;
+               ti,bit-shift = <24>;
+               reg = <0x1360>;
+       };
+
+       usb_otg_hs_xclk: usb_otg_hs_xclk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&otg_60m_gfclk>;
+               ti,bit-shift = <8>;
+               reg = <0x1360>;
+       };
+
+       usb_otg_hs_ick: usb_otg_hs_ick {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&l3_div_ck>;
+               ti,bit-shift = <0>;
+               reg = <0x1360>;
+       };
+
+       usb_phy_cm_clk32k: usb_phy_cm_clk32k {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&sys_32k_ck>;
+               ti,bit-shift = <8>;
+               reg = <0x0640>;
+       };
+
+       usb_tll_hs_usb_ch2_clk: usb_tll_hs_usb_ch2_clk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&init_60m_fclk>;
+               ti,bit-shift = <10>;
+               reg = <0x1368>;
+       };
+
+       usb_tll_hs_usb_ch0_clk: usb_tll_hs_usb_ch0_clk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&init_60m_fclk>;
+               ti,bit-shift = <8>;
+               reg = <0x1368>;
+       };
+
+       usb_tll_hs_usb_ch1_clk: usb_tll_hs_usb_ch1_clk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&init_60m_fclk>;
+               ti,bit-shift = <9>;
+               reg = <0x1368>;
+       };
+
+       usb_tll_hs_ick: usb_tll_hs_ick {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&l4_div_ck>;
+               ti,bit-shift = <0>;
+               reg = <0x1368>;
+       };
+};
+
+&cm2_clockdomains {
+       l3_init_clkdm: l3_init_clkdm {
+               compatible = "ti,clockdomain";
+               clocks = <&dpll_usb_ck>, <&usb_host_fs_fck>;
+       };
+};
+
+&scrm_clocks {
+       auxclk0_src_gate_ck: auxclk0_src_gate_ck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-no-wait-gate-clock";
+               clocks = <&dpll_core_m3x2_ck>;
+               ti,bit-shift = <8>;
+               reg = <0x0310>;
+       };
+
+       auxclk0_src_mux_ck: auxclk0_src_mux_ck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-mux-clock";
+               clocks = <&sys_clkin_ck>, <&dpll_core_m3x2_ck>, <&dpll_per_m3x2_ck>;
+               ti,bit-shift = <1>;
+               reg = <0x0310>;
+       };
+
+       auxclk0_src_ck: auxclk0_src_ck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-clock";
+               clocks = <&auxclk0_src_gate_ck>, <&auxclk0_src_mux_ck>;
+       };
+
+       auxclk0_ck: auxclk0_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&auxclk0_src_ck>;
+               ti,bit-shift = <16>;
+               ti,max-div = <16>;
+               reg = <0x0310>;
+       };
+
+       auxclk1_src_gate_ck: auxclk1_src_gate_ck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-no-wait-gate-clock";
+               clocks = <&dpll_core_m3x2_ck>;
+               ti,bit-shift = <8>;
+               reg = <0x0314>;
+       };
+
+       auxclk1_src_mux_ck: auxclk1_src_mux_ck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-mux-clock";
+               clocks = <&sys_clkin_ck>, <&dpll_core_m3x2_ck>, <&dpll_per_m3x2_ck>;
+               ti,bit-shift = <1>;
+               reg = <0x0314>;
+       };
+
+       auxclk1_src_ck: auxclk1_src_ck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-clock";
+               clocks = <&auxclk1_src_gate_ck>, <&auxclk1_src_mux_ck>;
+       };
+
+       auxclk1_ck: auxclk1_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&auxclk1_src_ck>;
+               ti,bit-shift = <16>;
+               ti,max-div = <16>;
+               reg = <0x0314>;
+       };
+
+       auxclk2_src_gate_ck: auxclk2_src_gate_ck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-no-wait-gate-clock";
+               clocks = <&dpll_core_m3x2_ck>;
+               ti,bit-shift = <8>;
+               reg = <0x0318>;
+       };
+
+       auxclk2_src_mux_ck: auxclk2_src_mux_ck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-mux-clock";
+               clocks = <&sys_clkin_ck>, <&dpll_core_m3x2_ck>, <&dpll_per_m3x2_ck>;
+               ti,bit-shift = <1>;
+               reg = <0x0318>;
+       };
+
+       auxclk2_src_ck: auxclk2_src_ck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-clock";
+               clocks = <&auxclk2_src_gate_ck>, <&auxclk2_src_mux_ck>;
+       };
+
+       auxclk2_ck: auxclk2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&auxclk2_src_ck>;
+               ti,bit-shift = <16>;
+               ti,max-div = <16>;
+               reg = <0x0318>;
+       };
+
+       auxclk3_src_gate_ck: auxclk3_src_gate_ck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-no-wait-gate-clock";
+               clocks = <&dpll_core_m3x2_ck>;
+               ti,bit-shift = <8>;
+               reg = <0x031c>;
+       };
+
+       auxclk3_src_mux_ck: auxclk3_src_mux_ck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-mux-clock";
+               clocks = <&sys_clkin_ck>, <&dpll_core_m3x2_ck>, <&dpll_per_m3x2_ck>;
+               ti,bit-shift = <1>;
+               reg = <0x031c>;
+       };
+
+       auxclk3_src_ck: auxclk3_src_ck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-clock";
+               clocks = <&auxclk3_src_gate_ck>, <&auxclk3_src_mux_ck>;
+       };
+
+       auxclk3_ck: auxclk3_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&auxclk3_src_ck>;
+               ti,bit-shift = <16>;
+               ti,max-div = <16>;
+               reg = <0x031c>;
+       };
+
+       auxclk4_src_gate_ck: auxclk4_src_gate_ck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-no-wait-gate-clock";
+               clocks = <&dpll_core_m3x2_ck>;
+               ti,bit-shift = <8>;
+               reg = <0x0320>;
+       };
+
+       auxclk4_src_mux_ck: auxclk4_src_mux_ck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-mux-clock";
+               clocks = <&sys_clkin_ck>, <&dpll_core_m3x2_ck>, <&dpll_per_m3x2_ck>;
+               ti,bit-shift = <1>;
+               reg = <0x0320>;
+       };
+
+       auxclk4_src_ck: auxclk4_src_ck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-clock";
+               clocks = <&auxclk4_src_gate_ck>, <&auxclk4_src_mux_ck>;
+       };
+
+       auxclk4_ck: auxclk4_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&auxclk4_src_ck>;
+               ti,bit-shift = <16>;
+               ti,max-div = <16>;
+               reg = <0x0320>;
+       };
+
+       auxclk5_src_gate_ck: auxclk5_src_gate_ck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-no-wait-gate-clock";
+               clocks = <&dpll_core_m3x2_ck>;
+               ti,bit-shift = <8>;
+               reg = <0x0324>;
+       };
+
+       auxclk5_src_mux_ck: auxclk5_src_mux_ck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-mux-clock";
+               clocks = <&sys_clkin_ck>, <&dpll_core_m3x2_ck>, <&dpll_per_m3x2_ck>;
+               ti,bit-shift = <1>;
+               reg = <0x0324>;
+       };
+
+       auxclk5_src_ck: auxclk5_src_ck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-clock";
+               clocks = <&auxclk5_src_gate_ck>, <&auxclk5_src_mux_ck>;
+       };
+
+       auxclk5_ck: auxclk5_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&auxclk5_src_ck>;
+               ti,bit-shift = <16>;
+               ti,max-div = <16>;
+               reg = <0x0324>;
+       };
+
+       auxclkreq0_ck: auxclkreq0_ck {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&auxclk0_ck>, <&auxclk1_ck>, <&auxclk2_ck>, <&auxclk3_ck>, <&auxclk4_ck>, <&auxclk5_ck>;
+               ti,bit-shift = <2>;
+               reg = <0x0210>;
+       };
+
+       auxclkreq1_ck: auxclkreq1_ck {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&auxclk0_ck>, <&auxclk1_ck>, <&auxclk2_ck>, <&auxclk3_ck>, <&auxclk4_ck>, <&auxclk5_ck>;
+               ti,bit-shift = <2>;
+               reg = <0x0214>;
+       };
+
+       auxclkreq2_ck: auxclkreq2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&auxclk0_ck>, <&auxclk1_ck>, <&auxclk2_ck>, <&auxclk3_ck>, <&auxclk4_ck>, <&auxclk5_ck>;
+               ti,bit-shift = <2>;
+               reg = <0x0218>;
+       };
+
+       auxclkreq3_ck: auxclkreq3_ck {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&auxclk0_ck>, <&auxclk1_ck>, <&auxclk2_ck>, <&auxclk3_ck>, <&auxclk4_ck>, <&auxclk5_ck>;
+               ti,bit-shift = <2>;
+               reg = <0x021c>;
+       };
+
+       auxclkreq4_ck: auxclkreq4_ck {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&auxclk0_ck>, <&auxclk1_ck>, <&auxclk2_ck>, <&auxclk3_ck>, <&auxclk4_ck>, <&auxclk5_ck>;
+               ti,bit-shift = <2>;
+               reg = <0x0220>;
+       };
+
+       auxclkreq5_ck: auxclkreq5_ck {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&auxclk0_ck>, <&auxclk1_ck>, <&auxclk2_ck>, <&auxclk3_ck>, <&auxclk4_ck>, <&auxclk5_ck>;
+               ti,bit-shift = <2>;
+               reg = <0x0224>;
+       };
+};
index ab9a21ae82f34a7a19a27529a8c8bfd04bef5610..a72813a9663eccd7075b185489cbda4694fd0003 100644 (file)
                interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>,
                             <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
 
+               prm: prm@4ae06000 {
+                       compatible = "ti,omap5-prm";
+                       reg = <0x4ae06000 0x3000>;
+
+                       prm_clocks: clocks {
+                               #address-cells = <1>;
+                               #size-cells = <0>;
+                       };
+
+                       prm_clockdomains: clockdomains {
+                       };
+               };
+
+               cm_core_aon: cm_core_aon@4a004000 {
+                       compatible = "ti,omap5-cm-core-aon";
+                       reg = <0x4a004000 0x2000>;
+
+                       cm_core_aon_clocks: clocks {
+                               #address-cells = <1>;
+                               #size-cells = <0>;
+                       };
+
+                       cm_core_aon_clockdomains: clockdomains {
+                       };
+               };
+
+               scrm: scrm@4ae0a000 {
+                       compatible = "ti,omap5-scrm";
+                       reg = <0x4ae0a000 0x2000>;
+
+                       scrm_clocks: clocks {
+                               #address-cells = <1>;
+                               #size-cells = <0>;
+                       };
+
+                       scrm_clockdomains: clockdomains {
+                       };
+               };
+
+               cm_core: cm_core@4a008000 {
+                       compatible = "ti,omap5-cm-core";
+                       reg = <0x4a008000 0x3000>;
+
+                       cm_core_clocks: clocks {
+                               #address-cells = <1>;
+                               #size-cells = <0>;
+                       };
+
+                       cm_core_clockdomains: clockdomains {
+                       };
+               };
+
                counter32k: counter@4ae04000 {
                        compatible = "ti,omap-counter32k";
                        reg = <0x4ae04000 0x40>;
                };
        };
 };
+
+/include/ "omap54xx-clocks.dtsi"
diff --git a/arch/arm/boot/dts/omap54xx-clocks.dtsi b/arch/arm/boot/dts/omap54xx-clocks.dtsi
new file mode 100644 (file)
index 0000000..d487fda
--- /dev/null
@@ -0,0 +1,1399 @@
+/*
+ * Device Tree Source for OMAP5 clock data
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+&cm_core_aon_clocks {
+       pad_clks_src_ck: pad_clks_src_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <12000000>;
+       };
+
+       pad_clks_ck: pad_clks_ck {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&pad_clks_src_ck>;
+               ti,bit-shift = <8>;
+               reg = <0x0108>;
+       };
+
+       secure_32k_clk_src_ck: secure_32k_clk_src_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <32768>;
+       };
+
+       slimbus_src_clk: slimbus_src_clk {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <12000000>;
+       };
+
+       slimbus_clk: slimbus_clk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&slimbus_src_clk>;
+               ti,bit-shift = <10>;
+               reg = <0x0108>;
+       };
+
+       sys_32k_ck: sys_32k_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <32768>;
+       };
+
+       virt_12000000_ck: virt_12000000_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <12000000>;
+       };
+
+       virt_13000000_ck: virt_13000000_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <13000000>;
+       };
+
+       virt_16800000_ck: virt_16800000_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <16800000>;
+       };
+
+       virt_19200000_ck: virt_19200000_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <19200000>;
+       };
+
+       virt_26000000_ck: virt_26000000_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <26000000>;
+       };
+
+       virt_27000000_ck: virt_27000000_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <27000000>;
+       };
+
+       virt_38400000_ck: virt_38400000_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <38400000>;
+       };
+
+       xclk60mhsp1_ck: xclk60mhsp1_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <60000000>;
+       };
+
+       xclk60mhsp2_ck: xclk60mhsp2_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <60000000>;
+       };
+
+       dpll_abe_ck: dpll_abe_ck {
+               #clock-cells = <0>;
+               compatible = "ti,omap4-dpll-m4xen-clock";
+               clocks = <&abe_dpll_clk_mux>, <&abe_dpll_bypass_clk_mux>;
+               reg = <0x01e0>, <0x01e4>, <0x01ec>, <0x01e8>;
+       };
+
+       dpll_abe_x2_ck: dpll_abe_x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,omap4-dpll-x2-clock";
+               clocks = <&dpll_abe_ck>;
+       };
+
+       dpll_abe_m2x2_ck: dpll_abe_m2x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_abe_x2_ck>;
+               ti,max-div = <31>;
+               ti,autoidle-shift = <8>;
+               reg = <0x01f0>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       abe_24m_fclk: abe_24m_fclk {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll_abe_m2x2_ck>;
+               clock-mult = <1>;
+               clock-div = <8>;
+       };
+
+       abe_clk: abe_clk {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_abe_m2x2_ck>;
+               ti,max-div = <4>;
+               reg = <0x0108>;
+               ti,index-power-of-two;
+       };
+
+       abe_iclk: abe_iclk {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&abe_clk>;
+               clock-mult = <1>;
+               clock-div = <2>;
+       };
+
+       abe_lp_clk_div: abe_lp_clk_div {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll_abe_m2x2_ck>;
+               clock-mult = <1>;
+               clock-div = <16>;
+       };
+
+       dpll_abe_m3x2_ck: dpll_abe_m3x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_abe_x2_ck>;
+               ti,max-div = <31>;
+               ti,autoidle-shift = <8>;
+               reg = <0x01f4>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       dpll_core_ck: dpll_core_ck {
+               #clock-cells = <0>;
+               compatible = "ti,omap4-dpll-core-clock";
+               clocks = <&sys_clkin>, <&dpll_abe_m3x2_ck>;
+               reg = <0x0120>, <0x0124>, <0x012c>, <0x0128>;
+       };
+
+       dpll_core_x2_ck: dpll_core_x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,omap4-dpll-x2-clock";
+               clocks = <&dpll_core_ck>;
+       };
+
+       dpll_core_h21x2_ck: dpll_core_h21x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_core_x2_ck>;
+               ti,max-div = <63>;
+               ti,autoidle-shift = <8>;
+               reg = <0x0150>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       c2c_fclk: c2c_fclk {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll_core_h21x2_ck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       c2c_iclk: c2c_iclk {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&c2c_fclk>;
+               clock-mult = <1>;
+               clock-div = <2>;
+       };
+
+       dpll_core_h11x2_ck: dpll_core_h11x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_core_x2_ck>;
+               ti,max-div = <63>;
+               ti,autoidle-shift = <8>;
+               reg = <0x0138>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       dpll_core_h12x2_ck: dpll_core_h12x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_core_x2_ck>;
+               ti,max-div = <63>;
+               ti,autoidle-shift = <8>;
+               reg = <0x013c>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       dpll_core_h13x2_ck: dpll_core_h13x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_core_x2_ck>;
+               ti,max-div = <63>;
+               ti,autoidle-shift = <8>;
+               reg = <0x0140>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       dpll_core_h14x2_ck: dpll_core_h14x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_core_x2_ck>;
+               ti,max-div = <63>;
+               ti,autoidle-shift = <8>;
+               reg = <0x0144>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       dpll_core_h22x2_ck: dpll_core_h22x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_core_x2_ck>;
+               ti,max-div = <63>;
+               ti,autoidle-shift = <8>;
+               reg = <0x0154>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       dpll_core_h23x2_ck: dpll_core_h23x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_core_x2_ck>;
+               ti,max-div = <63>;
+               ti,autoidle-shift = <8>;
+               reg = <0x0158>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       dpll_core_h24x2_ck: dpll_core_h24x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_core_x2_ck>;
+               ti,max-div = <63>;
+               ti,autoidle-shift = <8>;
+               reg = <0x015c>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       dpll_core_m2_ck: dpll_core_m2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_core_ck>;
+               ti,max-div = <31>;
+               ti,autoidle-shift = <8>;
+               reg = <0x0130>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       dpll_core_m3x2_ck: dpll_core_m3x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_core_x2_ck>;
+               ti,max-div = <31>;
+               ti,autoidle-shift = <8>;
+               reg = <0x0134>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       iva_dpll_hs_clk_div: iva_dpll_hs_clk_div {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll_core_h12x2_ck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       dpll_iva_ck: dpll_iva_ck {
+               #clock-cells = <0>;
+               compatible = "ti,omap4-dpll-clock";
+               clocks = <&sys_clkin>, <&iva_dpll_hs_clk_div>;
+               reg = <0x01a0>, <0x01a4>, <0x01ac>, <0x01a8>;
+       };
+
+       dpll_iva_x2_ck: dpll_iva_x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,omap4-dpll-x2-clock";
+               clocks = <&dpll_iva_ck>;
+       };
+
+       dpll_iva_h11x2_ck: dpll_iva_h11x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_iva_x2_ck>;
+               ti,max-div = <63>;
+               ti,autoidle-shift = <8>;
+               reg = <0x01b8>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       dpll_iva_h12x2_ck: dpll_iva_h12x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_iva_x2_ck>;
+               ti,max-div = <63>;
+               ti,autoidle-shift = <8>;
+               reg = <0x01bc>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       mpu_dpll_hs_clk_div: mpu_dpll_hs_clk_div {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll_core_h12x2_ck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       dpll_mpu_ck: dpll_mpu_ck {
+               #clock-cells = <0>;
+               compatible = "ti,omap4-dpll-clock";
+               clocks = <&sys_clkin>, <&mpu_dpll_hs_clk_div>;
+               reg = <0x0160>, <0x0164>, <0x016c>, <0x0168>;
+       };
+
+       dpll_mpu_m2_ck: dpll_mpu_m2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_mpu_ck>;
+               ti,max-div = <31>;
+               ti,autoidle-shift = <8>;
+               reg = <0x0170>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       per_dpll_hs_clk_div: per_dpll_hs_clk_div {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll_abe_m3x2_ck>;
+               clock-mult = <1>;
+               clock-div = <2>;
+       };
+
+       usb_dpll_hs_clk_div: usb_dpll_hs_clk_div {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll_abe_m3x2_ck>;
+               clock-mult = <1>;
+               clock-div = <3>;
+       };
+
+       l3_iclk_div: l3_iclk_div {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll_core_h12x2_ck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       gpu_l3_iclk: gpu_l3_iclk {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&l3_iclk_div>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       l4_root_clk_div: l4_root_clk_div {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&l3_iclk_div>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       slimbus1_slimbus_clk: slimbus1_slimbus_clk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&slimbus_clk>;
+               ti,bit-shift = <11>;
+               reg = <0x0560>;
+       };
+
+       aess_fclk: aess_fclk {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&abe_clk>;
+               ti,bit-shift = <24>;
+               ti,max-div = <2>;
+               reg = <0x0528>;
+       };
+
+       dmic_sync_mux_ck: dmic_sync_mux_ck {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&abe_24m_fclk>, <&dss_syc_gfclk_div>, <&func_24m_clk>;
+               ti,bit-shift = <26>;
+               reg = <0x0538>;
+       };
+
+       dmic_gfclk: dmic_gfclk {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&dmic_sync_mux_ck>, <&pad_clks_ck>, <&slimbus_clk>;
+               ti,bit-shift = <24>;
+               reg = <0x0538>;
+       };
+
+       mcasp_sync_mux_ck: mcasp_sync_mux_ck {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&abe_24m_fclk>, <&dss_syc_gfclk_div>, <&func_24m_clk>;
+               ti,bit-shift = <26>;
+               reg = <0x0540>;
+       };
+
+       mcasp_gfclk: mcasp_gfclk {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&mcasp_sync_mux_ck>, <&pad_clks_ck>, <&slimbus_clk>;
+               ti,bit-shift = <24>;
+               reg = <0x0540>;
+       };
+
+       mcbsp1_sync_mux_ck: mcbsp1_sync_mux_ck {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&abe_24m_fclk>, <&dss_syc_gfclk_div>, <&func_24m_clk>;
+               ti,bit-shift = <26>;
+               reg = <0x0548>;
+       };
+
+       mcbsp1_gfclk: mcbsp1_gfclk {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&mcbsp1_sync_mux_ck>, <&pad_clks_ck>, <&slimbus_clk>;
+               ti,bit-shift = <24>;
+               reg = <0x0548>;
+       };
+
+       mcbsp2_sync_mux_ck: mcbsp2_sync_mux_ck {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&abe_24m_fclk>, <&dss_syc_gfclk_div>, <&func_24m_clk>;
+               ti,bit-shift = <26>;
+               reg = <0x0550>;
+       };
+
+       mcbsp2_gfclk: mcbsp2_gfclk {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&mcbsp2_sync_mux_ck>, <&pad_clks_ck>, <&slimbus_clk>;
+               ti,bit-shift = <24>;
+               reg = <0x0550>;
+       };
+
+       mcbsp3_sync_mux_ck: mcbsp3_sync_mux_ck {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&abe_24m_fclk>, <&dss_syc_gfclk_div>, <&func_24m_clk>;
+               ti,bit-shift = <26>;
+               reg = <0x0558>;
+       };
+
+       mcbsp3_gfclk: mcbsp3_gfclk {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&mcbsp3_sync_mux_ck>, <&pad_clks_ck>, <&slimbus_clk>;
+               ti,bit-shift = <24>;
+               reg = <0x0558>;
+       };
+
+       timer5_gfclk_mux: timer5_gfclk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&dss_syc_gfclk_div>, <&sys_32k_ck>;
+               ti,bit-shift = <24>;
+               reg = <0x0568>;
+       };
+
+       timer6_gfclk_mux: timer6_gfclk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&dss_syc_gfclk_div>, <&sys_32k_ck>;
+               ti,bit-shift = <24>;
+               reg = <0x0570>;
+       };
+
+       timer7_gfclk_mux: timer7_gfclk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&dss_syc_gfclk_div>, <&sys_32k_ck>;
+               ti,bit-shift = <24>;
+               reg = <0x0578>;
+       };
+
+       timer8_gfclk_mux: timer8_gfclk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&dss_syc_gfclk_div>, <&sys_32k_ck>;
+               ti,bit-shift = <24>;
+               reg = <0x0580>;
+       };
+
+       dummy_ck: dummy_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <0>;
+       };
+};
+&prm_clocks {
+       sys_clkin: sys_clkin {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&virt_12000000_ck>, <&virt_13000000_ck>, <&virt_16800000_ck>, <&virt_19200000_ck>, <&virt_26000000_ck>, <&virt_27000000_ck>, <&virt_38400000_ck>;
+               reg = <0x0110>;
+               ti,index-starts-at-one;
+       };
+
+       abe_dpll_bypass_clk_mux: abe_dpll_bypass_clk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&sys_clkin>, <&sys_32k_ck>;
+               reg = <0x0108>;
+       };
+
+       abe_dpll_clk_mux: abe_dpll_clk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&sys_clkin>, <&sys_32k_ck>;
+               reg = <0x010c>;
+       };
+
+       custefuse_sys_gfclk_div: custefuse_sys_gfclk_div {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&sys_clkin>;
+               clock-mult = <1>;
+               clock-div = <2>;
+       };
+
+       dss_syc_gfclk_div: dss_syc_gfclk_div {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&sys_clkin>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       wkupaon_iclk_mux: wkupaon_iclk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&sys_clkin>, <&abe_lp_clk_div>;
+               reg = <0x0108>;
+       };
+
+       l3instr_ts_gclk_div: l3instr_ts_gclk_div {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&wkupaon_iclk_mux>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       gpio1_dbclk: gpio1_dbclk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&sys_32k_ck>;
+               ti,bit-shift = <8>;
+               reg = <0x1938>;
+       };
+
+       timer1_gfclk_mux: timer1_gfclk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&sys_clkin>, <&sys_32k_ck>;
+               ti,bit-shift = <24>;
+               reg = <0x1940>;
+       };
+};
+&cm_core_clocks {
+       dpll_per_ck: dpll_per_ck {
+               #clock-cells = <0>;
+               compatible = "ti,omap4-dpll-clock";
+               clocks = <&sys_clkin>, <&per_dpll_hs_clk_div>;
+               reg = <0x0140>, <0x0144>, <0x014c>, <0x0148>;
+       };
+
+       dpll_per_x2_ck: dpll_per_x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,omap4-dpll-x2-clock";
+               clocks = <&dpll_per_ck>;
+       };
+
+       dpll_per_h11x2_ck: dpll_per_h11x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_per_x2_ck>;
+               ti,max-div = <63>;
+               ti,autoidle-shift = <8>;
+               reg = <0x0158>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       dpll_per_h12x2_ck: dpll_per_h12x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_per_x2_ck>;
+               ti,max-div = <63>;
+               ti,autoidle-shift = <8>;
+               reg = <0x015c>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       dpll_per_h14x2_ck: dpll_per_h14x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_per_x2_ck>;
+               ti,max-div = <63>;
+               ti,autoidle-shift = <8>;
+               reg = <0x0164>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       dpll_per_m2_ck: dpll_per_m2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_per_ck>;
+               ti,max-div = <31>;
+               ti,autoidle-shift = <8>;
+               reg = <0x0150>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       dpll_per_m2x2_ck: dpll_per_m2x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_per_x2_ck>;
+               ti,max-div = <31>;
+               ti,autoidle-shift = <8>;
+               reg = <0x0150>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       dpll_per_m3x2_ck: dpll_per_m3x2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_per_x2_ck>;
+               ti,max-div = <31>;
+               ti,autoidle-shift = <8>;
+               reg = <0x0154>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       dpll_unipro1_ck: dpll_unipro1_ck {
+               #clock-cells = <0>;
+               compatible = "ti,omap4-dpll-clock";
+               clocks = <&sys_clkin>, <&sys_clkin>;
+               reg = <0x0200>, <0x0204>, <0x020c>, <0x0208>;
+       };
+
+       dpll_unipro1_clkdcoldo: dpll_unipro1_clkdcoldo {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll_unipro1_ck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       dpll_unipro1_m2_ck: dpll_unipro1_m2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_unipro1_ck>;
+               ti,max-div = <127>;
+               ti,autoidle-shift = <8>;
+               reg = <0x0210>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       dpll_unipro2_ck: dpll_unipro2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,omap4-dpll-clock";
+               clocks = <&sys_clkin>, <&sys_clkin>;
+               reg = <0x01c0>, <0x01c4>, <0x01cc>, <0x01c8>;
+       };
+
+       dpll_unipro2_clkdcoldo: dpll_unipro2_clkdcoldo {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll_unipro2_ck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       dpll_unipro2_m2_ck: dpll_unipro2_m2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_unipro2_ck>;
+               ti,max-div = <127>;
+               ti,autoidle-shift = <8>;
+               reg = <0x01d0>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       dpll_usb_ck: dpll_usb_ck {
+               #clock-cells = <0>;
+               compatible = "ti,omap4-dpll-j-type-clock";
+               clocks = <&sys_clkin>, <&usb_dpll_hs_clk_div>;
+               reg = <0x0180>, <0x0184>, <0x018c>, <0x0188>;
+       };
+
+       dpll_usb_clkdcoldo: dpll_usb_clkdcoldo {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll_usb_ck>;
+               clock-mult = <1>;
+               clock-div = <1>;
+       };
+
+       dpll_usb_m2_ck: dpll_usb_m2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_usb_ck>;
+               ti,max-div = <127>;
+               ti,autoidle-shift = <8>;
+               reg = <0x0190>;
+               ti,index-starts-at-one;
+               ti,invert-autoidle-bit;
+       };
+
+       func_128m_clk: func_128m_clk {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll_per_h11x2_ck>;
+               clock-mult = <1>;
+               clock-div = <2>;
+       };
+
+       func_12m_fclk: func_12m_fclk {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll_per_m2x2_ck>;
+               clock-mult = <1>;
+               clock-div = <16>;
+       };
+
+       func_24m_clk: func_24m_clk {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll_per_m2_ck>;
+               clock-mult = <1>;
+               clock-div = <4>;
+       };
+
+       func_48m_fclk: func_48m_fclk {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll_per_m2x2_ck>;
+               clock-mult = <1>;
+               clock-div = <4>;
+       };
+
+       func_96m_fclk: func_96m_fclk {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&dpll_per_m2x2_ck>;
+               clock-mult = <1>;
+               clock-div = <2>;
+       };
+
+       l3init_60m_fclk: l3init_60m_fclk {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_usb_m2_ck>;
+               reg = <0x0104>;
+               ti,dividers = <1>, <8>;
+       };
+
+       dss_32khz_clk: dss_32khz_clk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&sys_32k_ck>;
+               ti,bit-shift = <11>;
+               reg = <0x1420>;
+       };
+
+       dss_48mhz_clk: dss_48mhz_clk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&func_48m_fclk>;
+               ti,bit-shift = <9>;
+               reg = <0x1420>;
+       };
+
+       dss_dss_clk: dss_dss_clk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&dpll_per_h12x2_ck>;
+               ti,bit-shift = <8>;
+               reg = <0x1420>;
+       };
+
+       dss_sys_clk: dss_sys_clk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&dss_syc_gfclk_div>;
+               ti,bit-shift = <10>;
+               reg = <0x1420>;
+       };
+
+       gpio2_dbclk: gpio2_dbclk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&sys_32k_ck>;
+               ti,bit-shift = <8>;
+               reg = <0x1060>;
+       };
+
+       gpio3_dbclk: gpio3_dbclk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&sys_32k_ck>;
+               ti,bit-shift = <8>;
+               reg = <0x1068>;
+       };
+
+       gpio4_dbclk: gpio4_dbclk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&sys_32k_ck>;
+               ti,bit-shift = <8>;
+               reg = <0x1070>;
+       };
+
+       gpio5_dbclk: gpio5_dbclk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&sys_32k_ck>;
+               ti,bit-shift = <8>;
+               reg = <0x1078>;
+       };
+
+       gpio6_dbclk: gpio6_dbclk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&sys_32k_ck>;
+               ti,bit-shift = <8>;
+               reg = <0x1080>;
+       };
+
+       gpio7_dbclk: gpio7_dbclk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&sys_32k_ck>;
+               ti,bit-shift = <8>;
+               reg = <0x1110>;
+       };
+
+       gpio8_dbclk: gpio8_dbclk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&sys_32k_ck>;
+               ti,bit-shift = <8>;
+               reg = <0x1118>;
+       };
+
+       iss_ctrlclk: iss_ctrlclk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&func_96m_fclk>;
+               ti,bit-shift = <8>;
+               reg = <0x1320>;
+       };
+
+       lli_txphy_clk: lli_txphy_clk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&dpll_unipro1_clkdcoldo>;
+               ti,bit-shift = <8>;
+               reg = <0x0f20>;
+       };
+
+       lli_txphy_ls_clk: lli_txphy_ls_clk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&dpll_unipro1_m2_ck>;
+               ti,bit-shift = <9>;
+               reg = <0x0f20>;
+       };
+
+       mmc1_32khz_clk: mmc1_32khz_clk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&sys_32k_ck>;
+               ti,bit-shift = <8>;
+               reg = <0x1628>;
+       };
+
+       sata_ref_clk: sata_ref_clk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&sys_clkin>;
+               ti,bit-shift = <8>;
+               reg = <0x1688>;
+       };
+
+       usb_host_hs_hsic480m_p1_clk: usb_host_hs_hsic480m_p1_clk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&dpll_usb_m2_ck>;
+               ti,bit-shift = <13>;
+               reg = <0x1658>;
+       };
+
+       usb_host_hs_hsic480m_p2_clk: usb_host_hs_hsic480m_p2_clk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&dpll_usb_m2_ck>;
+               ti,bit-shift = <14>;
+               reg = <0x1658>;
+       };
+
+       usb_host_hs_hsic480m_p3_clk: usb_host_hs_hsic480m_p3_clk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&dpll_usb_m2_ck>;
+               ti,bit-shift = <7>;
+               reg = <0x1658>;
+       };
+
+       usb_host_hs_hsic60m_p1_clk: usb_host_hs_hsic60m_p1_clk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&l3init_60m_fclk>;
+               ti,bit-shift = <11>;
+               reg = <0x1658>;
+       };
+
+       usb_host_hs_hsic60m_p2_clk: usb_host_hs_hsic60m_p2_clk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&l3init_60m_fclk>;
+               ti,bit-shift = <12>;
+               reg = <0x1658>;
+       };
+
+       usb_host_hs_hsic60m_p3_clk: usb_host_hs_hsic60m_p3_clk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&l3init_60m_fclk>;
+               ti,bit-shift = <6>;
+               reg = <0x1658>;
+       };
+
+       utmi_p1_gfclk: utmi_p1_gfclk {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&l3init_60m_fclk>, <&xclk60mhsp1_ck>;
+               ti,bit-shift = <24>;
+               reg = <0x1658>;
+       };
+
+       usb_host_hs_utmi_p1_clk: usb_host_hs_utmi_p1_clk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&utmi_p1_gfclk>;
+               ti,bit-shift = <8>;
+               reg = <0x1658>;
+       };
+
+       utmi_p2_gfclk: utmi_p2_gfclk {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&l3init_60m_fclk>, <&xclk60mhsp2_ck>;
+               ti,bit-shift = <25>;
+               reg = <0x1658>;
+       };
+
+       usb_host_hs_utmi_p2_clk: usb_host_hs_utmi_p2_clk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&utmi_p2_gfclk>;
+               ti,bit-shift = <9>;
+               reg = <0x1658>;
+       };
+
+       usb_host_hs_utmi_p3_clk: usb_host_hs_utmi_p3_clk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&l3init_60m_fclk>;
+               ti,bit-shift = <10>;
+               reg = <0x1658>;
+       };
+
+       usb_otg_ss_refclk960m: usb_otg_ss_refclk960m {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&dpll_usb_clkdcoldo>;
+               ti,bit-shift = <8>;
+               reg = <0x16f0>;
+       };
+
+       usb_phy_cm_clk32k: usb_phy_cm_clk32k {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&sys_32k_ck>;
+               ti,bit-shift = <8>;
+               reg = <0x0640>;
+       };
+
+       usb_tll_hs_usb_ch0_clk: usb_tll_hs_usb_ch0_clk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&l3init_60m_fclk>;
+               ti,bit-shift = <8>;
+               reg = <0x1668>;
+       };
+
+       usb_tll_hs_usb_ch1_clk: usb_tll_hs_usb_ch1_clk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&l3init_60m_fclk>;
+               ti,bit-shift = <9>;
+               reg = <0x1668>;
+       };
+
+       usb_tll_hs_usb_ch2_clk: usb_tll_hs_usb_ch2_clk {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+               clocks = <&l3init_60m_fclk>;
+               ti,bit-shift = <10>;
+               reg = <0x1668>;
+       };
+
+       fdif_fclk: fdif_fclk {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_per_h11x2_ck>;
+               ti,bit-shift = <24>;
+               ti,max-div = <2>;
+               reg = <0x1328>;
+       };
+
+       gpu_core_gclk_mux: gpu_core_gclk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&dpll_core_h14x2_ck>, <&dpll_per_h14x2_ck>;
+               ti,bit-shift = <24>;
+               reg = <0x1520>;
+       };
+
+       gpu_hyd_gclk_mux: gpu_hyd_gclk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&dpll_core_h14x2_ck>, <&dpll_per_h14x2_ck>;
+               ti,bit-shift = <25>;
+               reg = <0x1520>;
+       };
+
+       hsi_fclk: hsi_fclk {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&dpll_per_m2x2_ck>;
+               ti,bit-shift = <24>;
+               ti,max-div = <2>;
+               reg = <0x1638>;
+       };
+
+       mmc1_fclk_mux: mmc1_fclk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&func_128m_clk>, <&dpll_per_m2x2_ck>;
+               ti,bit-shift = <24>;
+               reg = <0x1628>;
+       };
+
+       mmc1_fclk: mmc1_fclk {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&mmc1_fclk_mux>;
+               ti,bit-shift = <25>;
+               ti,max-div = <2>;
+               reg = <0x1628>;
+       };
+
+       mmc2_fclk_mux: mmc2_fclk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&func_128m_clk>, <&dpll_per_m2x2_ck>;
+               ti,bit-shift = <24>;
+               reg = <0x1630>;
+       };
+
+       mmc2_fclk: mmc2_fclk {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&mmc2_fclk_mux>;
+               ti,bit-shift = <25>;
+               ti,max-div = <2>;
+               reg = <0x1630>;
+       };
+
+       timer10_gfclk_mux: timer10_gfclk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&sys_clkin>, <&sys_32k_ck>;
+               ti,bit-shift = <24>;
+               reg = <0x1028>;
+       };
+
+       timer11_gfclk_mux: timer11_gfclk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&sys_clkin>, <&sys_32k_ck>;
+               ti,bit-shift = <24>;
+               reg = <0x1030>;
+       };
+
+       timer2_gfclk_mux: timer2_gfclk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&sys_clkin>, <&sys_32k_ck>;
+               ti,bit-shift = <24>;
+               reg = <0x1038>;
+       };
+
+       timer3_gfclk_mux: timer3_gfclk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&sys_clkin>, <&sys_32k_ck>;
+               ti,bit-shift = <24>;
+               reg = <0x1040>;
+       };
+
+       timer4_gfclk_mux: timer4_gfclk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&sys_clkin>, <&sys_32k_ck>;
+               ti,bit-shift = <24>;
+               reg = <0x1048>;
+       };
+
+       timer9_gfclk_mux: timer9_gfclk_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&sys_clkin>, <&sys_32k_ck>;
+               ti,bit-shift = <24>;
+               reg = <0x1050>;
+       };
+};
+
+&cm_core_clockdomains {
+       l3init_clkdm: l3init_clkdm {
+               compatible = "ti,clockdomain";
+               clocks = <&dpll_usb_ck>;
+       };
+};
+
+&scrm_clocks {
+       auxclk0_src_gate_ck: auxclk0_src_gate_ck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-no-wait-gate-clock";
+               clocks = <&dpll_core_m3x2_ck>;
+               ti,bit-shift = <8>;
+               reg = <0x0310>;
+       };
+
+       auxclk0_src_mux_ck: auxclk0_src_mux_ck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-mux-clock";
+               clocks = <&sys_clkin>, <&dpll_core_m3x2_ck>, <&dpll_per_m3x2_ck>;
+               ti,bit-shift = <1>;
+               reg = <0x0310>;
+       };
+
+       auxclk0_src_ck: auxclk0_src_ck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-clock";
+               clocks = <&auxclk0_src_gate_ck>, <&auxclk0_src_mux_ck>;
+       };
+
+       auxclk0_ck: auxclk0_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&auxclk0_src_ck>;
+               ti,bit-shift = <16>;
+               ti,max-div = <16>;
+               reg = <0x0310>;
+       };
+
+       auxclk1_src_gate_ck: auxclk1_src_gate_ck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-no-wait-gate-clock";
+               clocks = <&dpll_core_m3x2_ck>;
+               ti,bit-shift = <8>;
+               reg = <0x0314>;
+       };
+
+       auxclk1_src_mux_ck: auxclk1_src_mux_ck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-mux-clock";
+               clocks = <&sys_clkin>, <&dpll_core_m3x2_ck>, <&dpll_per_m3x2_ck>;
+               ti,bit-shift = <1>;
+               reg = <0x0314>;
+       };
+
+       auxclk1_src_ck: auxclk1_src_ck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-clock";
+               clocks = <&auxclk1_src_gate_ck>, <&auxclk1_src_mux_ck>;
+       };
+
+       auxclk1_ck: auxclk1_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&auxclk1_src_ck>;
+               ti,bit-shift = <16>;
+               ti,max-div = <16>;
+               reg = <0x0314>;
+       };
+
+       auxclk2_src_gate_ck: auxclk2_src_gate_ck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-no-wait-gate-clock";
+               clocks = <&dpll_core_m3x2_ck>;
+               ti,bit-shift = <8>;
+               reg = <0x0318>;
+       };
+
+       auxclk2_src_mux_ck: auxclk2_src_mux_ck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-mux-clock";
+               clocks = <&sys_clkin>, <&dpll_core_m3x2_ck>, <&dpll_per_m3x2_ck>;
+               ti,bit-shift = <1>;
+               reg = <0x0318>;
+       };
+
+       auxclk2_src_ck: auxclk2_src_ck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-clock";
+               clocks = <&auxclk2_src_gate_ck>, <&auxclk2_src_mux_ck>;
+       };
+
+       auxclk2_ck: auxclk2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&auxclk2_src_ck>;
+               ti,bit-shift = <16>;
+               ti,max-div = <16>;
+               reg = <0x0318>;
+       };
+
+       auxclk3_src_gate_ck: auxclk3_src_gate_ck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-no-wait-gate-clock";
+               clocks = <&dpll_core_m3x2_ck>;
+               ti,bit-shift = <8>;
+               reg = <0x031c>;
+       };
+
+       auxclk3_src_mux_ck: auxclk3_src_mux_ck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-mux-clock";
+               clocks = <&sys_clkin>, <&dpll_core_m3x2_ck>, <&dpll_per_m3x2_ck>;
+               ti,bit-shift = <1>;
+               reg = <0x031c>;
+       };
+
+       auxclk3_src_ck: auxclk3_src_ck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-clock";
+               clocks = <&auxclk3_src_gate_ck>, <&auxclk3_src_mux_ck>;
+       };
+
+       auxclk3_ck: auxclk3_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&auxclk3_src_ck>;
+               ti,bit-shift = <16>;
+               ti,max-div = <16>;
+               reg = <0x031c>;
+       };
+
+       auxclk4_src_gate_ck: auxclk4_src_gate_ck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-no-wait-gate-clock";
+               clocks = <&dpll_core_m3x2_ck>;
+               ti,bit-shift = <8>;
+               reg = <0x0320>;
+       };
+
+       auxclk4_src_mux_ck: auxclk4_src_mux_ck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-mux-clock";
+               clocks = <&sys_clkin>, <&dpll_core_m3x2_ck>, <&dpll_per_m3x2_ck>;
+               ti,bit-shift = <1>;
+               reg = <0x0320>;
+       };
+
+       auxclk4_src_ck: auxclk4_src_ck {
+               #clock-cells = <0>;
+               compatible = "ti,composite-clock";
+               clocks = <&auxclk4_src_gate_ck>, <&auxclk4_src_mux_ck>;
+       };
+
+       auxclk4_ck: auxclk4_ck {
+               #clock-cells = <0>;
+               compatible = "ti,divider-clock";
+               clocks = <&auxclk4_src_ck>;
+               ti,bit-shift = <16>;
+               ti,max-div = <16>;
+               reg = <0x0320>;
+       };
+
+       auxclkreq0_ck: auxclkreq0_ck {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&auxclk0_ck>, <&auxclk1_ck>, <&auxclk2_ck>, <&auxclk3_ck>, <&auxclk4_ck>;
+               ti,bit-shift = <2>;
+               reg = <0x0210>;
+       };
+
+       auxclkreq1_ck: auxclkreq1_ck {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&auxclk0_ck>, <&auxclk1_ck>, <&auxclk2_ck>, <&auxclk3_ck>, <&auxclk4_ck>;
+               ti,bit-shift = <2>;
+               reg = <0x0214>;
+       };
+
+       auxclkreq2_ck: auxclkreq2_ck {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&auxclk0_ck>, <&auxclk1_ck>, <&auxclk2_ck>, <&auxclk3_ck>, <&auxclk4_ck>;
+               ti,bit-shift = <2>;
+               reg = <0x0218>;
+       };
+
+       auxclkreq3_ck: auxclkreq3_ck {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&auxclk0_ck>, <&auxclk1_ck>, <&auxclk2_ck>, <&auxclk3_ck>, <&auxclk4_ck>;
+               ti,bit-shift = <2>;
+               reg = <0x021c>;
+       };
+};
index 1105558d188b2fe485aaf0cd9c4ba2ecc482b31b..52447c17537a17c6035bf33694909e7252b78733 100644 (file)
                        watchdog@fffffe40 {
                                compatible = "atmel,at91sam9260-wdt";
                                reg = <0xfffffe40 0x10>;
+                               interrupts = <4 IRQ_TYPE_LEVEL_HIGH 7>;
+                               atmel,watchdog-type = "hardware";
+                               atmel,reset-type = "all";
+                               atmel,dbg-halt;
+                               atmel,idle-halt;
                                status = "disabled";
                        };
 
index 2c38fdf1951d377ee648778a847fb3776fabc70a..2519d6de064012be7b17ca38c9e2f6e921780d8c 100644 (file)
@@ -126,3 +126,6 @@ CONFIG_CRC7=y
 CONFIG_XZ_DEC=y
 CONFIG_AVERAGE=y
 CONFIG_PINCTRL_CAPRI=y
+CONFIG_WATCHDOG=y
+CONFIG_BCM_KONA_WDT=y
+CONFIG_BCM_KONA_WDT_DEBUG=y
index fbeb39c869e9fdcedace1d61688247c14141cab3..8aa4cca74501f394b74d35fb69657553bc54daa3 100644 (file)
 #define isa_page_to_bus page_to_phys
 #define isa_bus_to_virt phys_to_virt
 
+/*
+ * Atomic MMIO-wide IO modify
+ */
+extern void atomic_io_modify(void __iomem *reg, u32 mask, u32 set);
+extern void atomic_io_modify_relaxed(void __iomem *reg, u32 mask, u32 set);
+
 /*
  * Generic IO read/write.  These perform native-endian accesses.  Note
  * that some architectures will want to re-define __raw_{read,write}w.
index b3fb8c9e1ff2d75f15666a9233dee2d1b3ccb6bc..1879e8dd2acc18a7837f0eee71beb8241c1aa9c0 100644 (file)
@@ -451,9 +451,11 @@ __und_usr_thumb:
        .arch   armv6t2
 #endif
 2:     ldrht   r5, [r4]
+ARM_BE8(rev16  r5, r5)                         @ little endian instruction
        cmp     r5, #0xe800                     @ 32bit instruction if xx != 0
        blo     __und_usr_fault_16              @ 16bit undefined instruction
 3:     ldrht   r0, [r2]
+ARM_BE8(rev16  r0, r0)                         @ little endian instruction
        add     r2, r2, #2                      @ r2 is PC + 2, make it PC + 4
        str     r2, [sp, #S_PC]                 @ it's a 2x16bit instr, update
        orr     r0, r0, r5, lsl #16
index 32f317e5828adafc2bdec200705a9d12c686711c..914616e0bdcd0c0108376a9e5834e9cb73219b51 100644 (file)
@@ -52,7 +52,8 @@
        .equ    swapper_pg_dir, KERNEL_RAM_VADDR - PG_DIR_SIZE
 
        .macro  pgtbl, rd, phys
-       add     \rd, \phys, #TEXT_OFFSET - PG_DIR_SIZE
+       add     \rd, \phys, #TEXT_OFFSET
+       sub     \rd, \rd, #PG_DIR_SIZE
        .endm
 
 /*
index dcd5b4d8614374519a80eb0772c8cd12df1320e0..9203cf883330a3c64460f07d572994f5f8263a83 100644 (file)
@@ -1,6 +1,41 @@
 #include <linux/export.h>
 #include <linux/types.h>
 #include <linux/io.h>
+#include <linux/spinlock.h>
+
+static DEFINE_RAW_SPINLOCK(__io_lock);
+
+/*
+ * Generic atomic MMIO modify.
+ *
+ * Allows thread-safe access to registers shared by unrelated subsystems.
+ * The access is protected by a single MMIO-wide lock.
+ */
+void atomic_io_modify_relaxed(void __iomem *reg, u32 mask, u32 set)
+{
+       unsigned long flags;
+       u32 value;
+
+       raw_spin_lock_irqsave(&__io_lock, flags);
+       value = readl_relaxed(reg) & ~mask;
+       value |= (set & mask);
+       writel_relaxed(value, reg);
+       raw_spin_unlock_irqrestore(&__io_lock, flags);
+}
+EXPORT_SYMBOL(atomic_io_modify_relaxed);
+
+void atomic_io_modify(void __iomem *reg, u32 mask, u32 set)
+{
+       unsigned long flags;
+       u32 value;
+
+       raw_spin_lock_irqsave(&__io_lock, flags);
+       value = readl_relaxed(reg) & ~mask;
+       value |= (set & mask);
+       writel(value, reg);
+       raw_spin_unlock_irqrestore(&__io_lock, flags);
+}
+EXPORT_SYMBOL(atomic_io_modify);
 
 /*
  * Copy data from IO memory space to "real" memory space.
index d2ea6e60ea7b2042f6ff8d73503db73f95fd9770..76e5db4fce35a96eb7b5f7abeaf6c60e70fddcf3 100644 (file)
@@ -133,6 +133,39 @@ static int ar8031_phy_fixup(struct phy_device *dev)
 
 #define PHY_ID_AR8031  0x004dd074
 
+static int ar8035_phy_fixup(struct phy_device *dev)
+{
+       u16 val;
+
+       /* Ar803x phy SmartEEE feature cause link status generates glitch,
+        * which cause ethernet link down/up issue, so disable SmartEEE
+        */
+       phy_write(dev, 0xd, 0x3);
+       phy_write(dev, 0xe, 0x805d);
+       phy_write(dev, 0xd, 0x4003);
+
+       val = phy_read(dev, 0xe);
+       phy_write(dev, 0xe, val & ~(1 << 8));
+
+       /*
+        * Enable 125MHz clock from CLK_25M on the AR8031.  This
+        * is fed in to the IMX6 on the ENET_REF_CLK (V22) pad.
+        * Also, introduce a tx clock delay.
+        *
+        * This is the same as is the AR8031 fixup.
+        */
+       ar8031_phy_fixup(dev);
+
+       /*check phy power*/
+       val = phy_read(dev, 0x0);
+       if (val & BMCR_PDOWN)
+               phy_write(dev, 0x0, val & ~BMCR_PDOWN);
+
+       return 0;
+}
+
+#define PHY_ID_AR8035 0x004dd072
+
 static void __init imx6q_enet_phy_init(void)
 {
        if (IS_BUILTIN(CONFIG_PHYLIB)) {
@@ -142,6 +175,8 @@ static void __init imx6q_enet_phy_init(void)
                                ksz9031rn_phy_fixup);
                phy_register_fixup_for_uid(PHY_ID_AR8031, 0xffffffff,
                                ar8031_phy_fixup);
+               phy_register_fixup_for_uid(PHY_ID_AR8035, 0xffffffef,
+                               ar8035_phy_fixup);
        }
 }
 
index 6f424eced1816914bf59e39e48608b50bb338fe5..b3738e616f197bef538f2610bd519ad066f8cddd 100644 (file)
@@ -236,32 +236,26 @@ static struct mc13xxx_led_platform_data moboard_led[] = {
        {
                .id = MC13783_LED_R1,
                .name = "coreboard-led-4:red",
-               .max_current = 2,
        },
        {
                .id = MC13783_LED_G1,
                .name = "coreboard-led-4:green",
-               .max_current = 2,
        },
        {
                .id = MC13783_LED_B1,
                .name = "coreboard-led-4:blue",
-               .max_current = 2,
        },
        {
                .id = MC13783_LED_R2,
                .name = "coreboard-led-5:red",
-               .max_current = 3,
        },
        {
                .id = MC13783_LED_G2,
                .name = "coreboard-led-5:green",
-               .max_current = 3,
        },
        {
                .id = MC13783_LED_B2,
                .name = "coreboard-led-5:blue",
-               .max_current = 3,
        },
 };
 
@@ -271,8 +265,14 @@ static struct mc13xxx_leds_platform_data moboard_leds = {
        .led_control[0] = MC13783_LED_C0_ENABLE | MC13783_LED_C0_ABMODE(0),
        .led_control[1] = MC13783_LED_C1_SLEWLIM,
        .led_control[2] = MC13783_LED_C2_SLEWLIM,
-       .led_control[3] = MC13783_LED_C3_PERIOD(0),
-       .led_control[4] = MC13783_LED_C3_PERIOD(0),
+       .led_control[3] = MC13783_LED_C3_PERIOD(0) |
+                         MC13783_LED_C3_CURRENT_R1(2) |
+                         MC13783_LED_C3_CURRENT_G1(2) |
+                         MC13783_LED_C3_CURRENT_B1(2),
+       .led_control[4] = MC13783_LED_C4_PERIOD(0) |
+                         MC13783_LED_C4_CURRENT_R2(3) |
+                         MC13783_LED_C4_CURRENT_G2(3) |
+                         MC13783_LED_C4_CURRENT_B2(3),
 };
 
 static struct mc13xxx_buttons_platform_data moboard_buttons = {
index 4191ae08f4c81a2c95059df01fac77f883f30b70..653b489479e0ee2d4166d6d033d315a1e56430e4 100644 (file)
@@ -76,6 +76,16 @@ config SOC_AM43XX
        select ARM_GIC
        select MACH_OMAP_GENERIC
 
+config SOC_DRA7XX
+       bool "TI DRA7XX"
+       depends on ARCH_MULTI_V7
+       select ARCH_OMAP2PLUS
+       select ARM_CPU_SUSPEND if PM
+       select ARM_GIC
+       select CPU_V7
+       select HAVE_SMP
+       select HAVE_ARM_ARCH_TIMER
+
 config ARCH_OMAP2PLUS
        bool
        select ARCH_HAS_BANDGAP
@@ -128,14 +138,6 @@ config SOC_HAS_REALTIME_COUNTER
        depends on SOC_OMAP5 || SOC_DRA7XX
        default y
 
-config SOC_DRA7XX
-       bool "TI DRA7XX"
-       select ARM_ARCH_TIMER
-       select CPU_V7
-       select ARM_GIC
-       select HAVE_SMP
-       select COMMON_CLK
-
 comment "OMAP Core Type"
        depends on ARCH_OMAP2
 
index f78b177e8f4fd17849c3265200b319fc17d52198..e6eec6f72fd3ed76b30af03bb4923fbacc62af7c 100644 (file)
@@ -130,6 +130,7 @@ obj-$(CONFIG_SOC_AM33XX)            += $(voltagedomain-common)
 obj-$(CONFIG_SOC_AM43XX)               += $(voltagedomain-common)
 obj-$(CONFIG_SOC_OMAP5)                        += $(voltagedomain-common)
 obj-$(CONFIG_SOC_OMAP5)                += voltagedomains54xx_data.o
+obj-$(CONFIG_SOC_DRA7XX)               += $(voltagedomain-common)
 
 # OMAP powerdomain framework
 powerdomain-common                     += powerdomain.o powerdomain-common.o
@@ -184,12 +185,14 @@ obj-$(CONFIG_ARCH_OMAP3)          += clock34xx.o clkt34xx_dpll3m2.o
 obj-$(CONFIG_ARCH_OMAP3)               += clock3517.o clock36xx.o
 obj-$(CONFIG_ARCH_OMAP3)               += dpll3xxx.o cclock3xxx_data.o
 obj-$(CONFIG_ARCH_OMAP3)               += clkt_iclk.o
-obj-$(CONFIG_ARCH_OMAP4)               += $(clock-common) cclock44xx_data.o
+obj-$(CONFIG_ARCH_OMAP4)               += $(clock-common)
 obj-$(CONFIG_ARCH_OMAP4)               += dpll3xxx.o dpll44xx.o
 obj-$(CONFIG_SOC_AM33XX)               += $(clock-common) dpll3xxx.o
-obj-$(CONFIG_SOC_AM33XX)               += cclock33xx_data.o
 obj-$(CONFIG_SOC_OMAP5)                        += $(clock-common)
 obj-$(CONFIG_SOC_OMAP5)                        += dpll3xxx.o dpll44xx.o
+obj-$(CONFIG_SOC_DRA7XX)               += $(clock-common)
+obj-$(CONFIG_SOC_DRA7XX)               += dpll3xxx.o dpll44xx.o
+obj-$(CONFIG_SOC_AM43XX)               += $(clock-common) dpll3xxx.o
 
 # OMAP2 clock rate set data (old "OPP" data)
 obj-$(CONFIG_SOC_OMAP2420)             += opp2420_data.o
diff --git a/arch/arm/mach-omap2/cclock33xx_data.c b/arch/arm/mach-omap2/cclock33xx_data.c
deleted file mode 100644 (file)
index 865d30e..0000000
+++ /dev/null
@@ -1,1064 +0,0 @@
-/*
- * AM33XX Clock data
- *
- * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
- * Vaibhav Hiremath <hvaibhav@ti.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/clk-private.h>
-#include <linux/clkdev.h>
-#include <linux/io.h>
-
-#include "am33xx.h"
-#include "soc.h"
-#include "iomap.h"
-#include "clock.h"
-#include "control.h"
-#include "cm.h"
-#include "cm33xx.h"
-#include "cm-regbits-33xx.h"
-#include "prm.h"
-
-/* Modulemode control */
-#define AM33XX_MODULEMODE_HWCTRL_SHIFT         0
-#define AM33XX_MODULEMODE_SWCTRL_SHIFT         1
-
-/*LIST_HEAD(clocks);*/
-
-/* Root clocks */
-
-/* RTC 32k */
-DEFINE_CLK_FIXED_RATE(clk_32768_ck, CLK_IS_ROOT, 32768, 0x0);
-
-/* On-Chip 32KHz RC OSC */
-DEFINE_CLK_FIXED_RATE(clk_rc32k_ck, CLK_IS_ROOT, 32000, 0x0);
-
-/* Crystal input clks */
-DEFINE_CLK_FIXED_RATE(virt_19200000_ck, CLK_IS_ROOT, 19200000, 0x0);
-
-DEFINE_CLK_FIXED_RATE(virt_24000000_ck, CLK_IS_ROOT, 24000000, 0x0);
-
-DEFINE_CLK_FIXED_RATE(virt_25000000_ck, CLK_IS_ROOT, 25000000, 0x0);
-
-DEFINE_CLK_FIXED_RATE(virt_26000000_ck, CLK_IS_ROOT, 26000000, 0x0);
-
-/* Oscillator clock */
-/* 19.2, 24, 25 or 26 MHz */
-static const char *sys_clkin_ck_parents[] = {
-       "virt_19200000_ck", "virt_24000000_ck", "virt_25000000_ck",
-       "virt_26000000_ck",
-};
-
-/*
- * sys_clk in: input to the dpll and also used as funtional clock for,
- *   adc_tsc, smartreflex0-1, timer1-7, mcasp0-1, dcan0-1, cefuse
- *
- */
-DEFINE_CLK_MUX(sys_clkin_ck, sys_clkin_ck_parents, NULL, 0x0,
-              AM33XX_CTRL_REGADDR(AM33XX_CONTROL_STATUS),
-              AM33XX_CONTROL_STATUS_SYSBOOT1_SHIFT,
-              AM33XX_CONTROL_STATUS_SYSBOOT1_WIDTH,
-              0, NULL);
-
-/* External clock - 12 MHz */
-DEFINE_CLK_FIXED_RATE(tclkin_ck, CLK_IS_ROOT, 12000000, 0x0);
-
-/* Module clocks and DPLL outputs */
-
-/* DPLL_CORE */
-static struct dpll_data dpll_core_dd = {
-       .mult_div1_reg  = AM33XX_CM_CLKSEL_DPLL_CORE,
-       .clk_bypass     = &sys_clkin_ck,
-       .clk_ref        = &sys_clkin_ck,
-       .control_reg    = AM33XX_CM_CLKMODE_DPLL_CORE,
-       .modes          = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
-       .idlest_reg     = AM33XX_CM_IDLEST_DPLL_CORE,
-       .mult_mask      = AM33XX_DPLL_MULT_MASK,
-       .div1_mask      = AM33XX_DPLL_DIV_MASK,
-       .enable_mask    = AM33XX_DPLL_EN_MASK,
-       .idlest_mask    = AM33XX_ST_DPLL_CLK_MASK,
-       .max_multiplier = 2047,
-       .max_divider    = 128,
-       .min_divider    = 1,
-};
-
-/* CLKDCOLDO output */
-static const char *dpll_core_ck_parents[] = {
-       "sys_clkin_ck",
-};
-
-static struct clk dpll_core_ck;
-
-static const struct clk_ops dpll_core_ck_ops = {
-       .recalc_rate    = &omap3_dpll_recalc,
-       .get_parent     = &omap2_init_dpll_parent,
-};
-
-static struct clk_hw_omap dpll_core_ck_hw = {
-       .hw     = {
-               .clk    = &dpll_core_ck,
-       },
-       .dpll_data      = &dpll_core_dd,
-       .ops            = &clkhwops_omap3_dpll,
-};
-
-DEFINE_STRUCT_CLK(dpll_core_ck, dpll_core_ck_parents, dpll_core_ck_ops);
-
-static const char *dpll_core_x2_ck_parents[] = {
-       "dpll_core_ck",
-};
-
-static struct clk dpll_core_x2_ck;
-
-static const struct clk_ops dpll_x2_ck_ops = {
-       .recalc_rate    = &omap3_clkoutx2_recalc,
-};
-
-static struct clk_hw_omap dpll_core_x2_ck_hw = {
-       .hw     = {
-               .clk    = &dpll_core_x2_ck,
-       },
-       .flags          = CLOCK_CLKOUTX2,
-};
-
-DEFINE_STRUCT_CLK(dpll_core_x2_ck, dpll_core_x2_ck_parents, dpll_x2_ck_ops);
-
-DEFINE_CLK_DIVIDER(dpll_core_m4_ck, "dpll_core_x2_ck", &dpll_core_x2_ck,
-                  0x0, AM33XX_CM_DIV_M4_DPLL_CORE,
-                  AM33XX_HSDIVIDER_CLKOUT1_DIV_SHIFT,
-                  AM33XX_HSDIVIDER_CLKOUT1_DIV_WIDTH, CLK_DIVIDER_ONE_BASED,
-                  NULL);
-
-DEFINE_CLK_DIVIDER(dpll_core_m5_ck, "dpll_core_x2_ck", &dpll_core_x2_ck,
-                  0x0, AM33XX_CM_DIV_M5_DPLL_CORE,
-                  AM33XX_HSDIVIDER_CLKOUT2_DIV_SHIFT,
-                  AM33XX_HSDIVIDER_CLKOUT2_DIV_WIDTH,
-                  CLK_DIVIDER_ONE_BASED, NULL);
-
-DEFINE_CLK_DIVIDER(dpll_core_m6_ck, "dpll_core_x2_ck", &dpll_core_x2_ck,
-                  0x0, AM33XX_CM_DIV_M6_DPLL_CORE,
-                  AM33XX_HSDIVIDER_CLKOUT3_DIV_SHIFT,
-                  AM33XX_HSDIVIDER_CLKOUT3_DIV_WIDTH,
-                  CLK_DIVIDER_ONE_BASED, NULL);
-
-
-/* DPLL_MPU */
-static struct dpll_data dpll_mpu_dd = {
-       .mult_div1_reg  = AM33XX_CM_CLKSEL_DPLL_MPU,
-       .clk_bypass     = &sys_clkin_ck,
-       .clk_ref        = &sys_clkin_ck,
-       .control_reg    = AM33XX_CM_CLKMODE_DPLL_MPU,
-       .modes          = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
-       .idlest_reg     = AM33XX_CM_IDLEST_DPLL_MPU,
-       .mult_mask      = AM33XX_DPLL_MULT_MASK,
-       .div1_mask      = AM33XX_DPLL_DIV_MASK,
-       .enable_mask    = AM33XX_DPLL_EN_MASK,
-       .idlest_mask    = AM33XX_ST_DPLL_CLK_MASK,
-       .max_multiplier = 2047,
-       .max_divider    = 128,
-       .min_divider    = 1,
-};
-
-/* CLKOUT: fdpll/M2 */
-static struct clk dpll_mpu_ck;
-
-static const struct clk_ops dpll_mpu_ck_ops = {
-       .enable         = &omap3_noncore_dpll_enable,
-       .disable        = &omap3_noncore_dpll_disable,
-       .recalc_rate    = &omap3_dpll_recalc,
-       .round_rate     = &omap2_dpll_round_rate,
-       .set_rate       = &omap3_noncore_dpll_set_rate,
-       .get_parent     = &omap2_init_dpll_parent,
-};
-
-static struct clk_hw_omap dpll_mpu_ck_hw = {
-       .hw = {
-               .clk    = &dpll_mpu_ck,
-       },
-       .dpll_data      = &dpll_mpu_dd,
-       .ops            = &clkhwops_omap3_dpll,
-};
-
-DEFINE_STRUCT_CLK(dpll_mpu_ck, dpll_core_ck_parents, dpll_mpu_ck_ops);
-
-/*
- * TODO: Add clksel here (sys_clkin, CORE_CLKOUTM6, PER_CLKOUTM2
- * and ALT_CLK1/2)
- */
-DEFINE_CLK_DIVIDER(dpll_mpu_m2_ck, "dpll_mpu_ck", &dpll_mpu_ck,
-                  0x0, AM33XX_CM_DIV_M2_DPLL_MPU, AM33XX_DPLL_CLKOUT_DIV_SHIFT,
-                  AM33XX_DPLL_CLKOUT_DIV_WIDTH, CLK_DIVIDER_ONE_BASED, NULL);
-
-/* DPLL_DDR */
-static struct dpll_data dpll_ddr_dd = {
-       .mult_div1_reg  = AM33XX_CM_CLKSEL_DPLL_DDR,
-       .clk_bypass     = &sys_clkin_ck,
-       .clk_ref        = &sys_clkin_ck,
-       .control_reg    = AM33XX_CM_CLKMODE_DPLL_DDR,
-       .modes          = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
-       .idlest_reg     = AM33XX_CM_IDLEST_DPLL_DDR,
-       .mult_mask      = AM33XX_DPLL_MULT_MASK,
-       .div1_mask      = AM33XX_DPLL_DIV_MASK,
-       .enable_mask    = AM33XX_DPLL_EN_MASK,
-       .idlest_mask    = AM33XX_ST_DPLL_CLK_MASK,
-       .max_multiplier = 2047,
-       .max_divider    = 128,
-       .min_divider    = 1,
-};
-
-/* CLKOUT: fdpll/M2 */
-static struct clk dpll_ddr_ck;
-
-static const struct clk_ops dpll_ddr_ck_ops = {
-       .recalc_rate    = &omap3_dpll_recalc,
-       .get_parent     = &omap2_init_dpll_parent,
-       .round_rate     = &omap2_dpll_round_rate,
-       .set_rate       = &omap3_noncore_dpll_set_rate,
-};
-
-static struct clk_hw_omap dpll_ddr_ck_hw = {
-       .hw = {
-               .clk    = &dpll_ddr_ck,
-       },
-       .dpll_data      = &dpll_ddr_dd,
-       .ops            = &clkhwops_omap3_dpll,
-};
-
-DEFINE_STRUCT_CLK(dpll_ddr_ck, dpll_core_ck_parents, dpll_ddr_ck_ops);
-
-/*
- * TODO: Add clksel here (sys_clkin, CORE_CLKOUTM6, PER_CLKOUTM2
- * and ALT_CLK1/2)
- */
-DEFINE_CLK_DIVIDER(dpll_ddr_m2_ck, "dpll_ddr_ck", &dpll_ddr_ck,
-                  0x0, AM33XX_CM_DIV_M2_DPLL_DDR,
-                  AM33XX_DPLL_CLKOUT_DIV_SHIFT, AM33XX_DPLL_CLKOUT_DIV_WIDTH,
-                  CLK_DIVIDER_ONE_BASED, NULL);
-
-/* emif_fck functional clock */
-DEFINE_CLK_FIXED_FACTOR(dpll_ddr_m2_div2_ck, "dpll_ddr_m2_ck", &dpll_ddr_m2_ck,
-                       0x0, 1, 2);
-
-/* DPLL_DISP */
-static struct dpll_data dpll_disp_dd = {
-       .mult_div1_reg  = AM33XX_CM_CLKSEL_DPLL_DISP,
-       .clk_bypass     = &sys_clkin_ck,
-       .clk_ref        = &sys_clkin_ck,
-       .control_reg    = AM33XX_CM_CLKMODE_DPLL_DISP,
-       .modes          = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
-       .idlest_reg     = AM33XX_CM_IDLEST_DPLL_DISP,
-       .mult_mask      = AM33XX_DPLL_MULT_MASK,
-       .div1_mask      = AM33XX_DPLL_DIV_MASK,
-       .enable_mask    = AM33XX_DPLL_EN_MASK,
-       .idlest_mask    = AM33XX_ST_DPLL_CLK_MASK,
-       .max_multiplier = 2047,
-       .max_divider    = 128,
-       .min_divider    = 1,
-};
-
-/* CLKOUT: fdpll/M2 */
-static struct clk dpll_disp_ck;
-
-static struct clk_hw_omap dpll_disp_ck_hw = {
-       .hw = {
-               .clk    = &dpll_disp_ck,
-       },
-       .dpll_data      = &dpll_disp_dd,
-       .ops            = &clkhwops_omap3_dpll,
-};
-
-DEFINE_STRUCT_CLK(dpll_disp_ck, dpll_core_ck_parents, dpll_ddr_ck_ops);
-
-/*
- * TODO: Add clksel here (sys_clkin, CORE_CLKOUTM6, PER_CLKOUTM2
- * and ALT_CLK1/2)
- */
-DEFINE_CLK_DIVIDER(dpll_disp_m2_ck, "dpll_disp_ck", &dpll_disp_ck,
-                  CLK_SET_RATE_PARENT, AM33XX_CM_DIV_M2_DPLL_DISP,
-                  AM33XX_DPLL_CLKOUT_DIV_SHIFT, AM33XX_DPLL_CLKOUT_DIV_WIDTH,
-                  CLK_DIVIDER_ONE_BASED, NULL);
-
-/* DPLL_PER */
-static struct dpll_data dpll_per_dd = {
-       .mult_div1_reg  = AM33XX_CM_CLKSEL_DPLL_PERIPH,
-       .clk_bypass     = &sys_clkin_ck,
-       .clk_ref        = &sys_clkin_ck,
-       .control_reg    = AM33XX_CM_CLKMODE_DPLL_PER,
-       .modes          = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
-       .idlest_reg     = AM33XX_CM_IDLEST_DPLL_PER,
-       .mult_mask      = AM33XX_DPLL_MULT_PERIPH_MASK,
-       .div1_mask      = AM33XX_DPLL_PER_DIV_MASK,
-       .enable_mask    = AM33XX_DPLL_EN_MASK,
-       .idlest_mask    = AM33XX_ST_DPLL_CLK_MASK,
-       .max_multiplier = 2047,
-       .max_divider    = 128,
-       .min_divider    = 1,
-       .flags          = DPLL_J_TYPE,
-};
-
-/* CLKDCOLDO */
-static struct clk dpll_per_ck;
-
-static struct clk_hw_omap dpll_per_ck_hw = {
-       .hw     = {
-               .clk    = &dpll_per_ck,
-       },
-       .dpll_data      = &dpll_per_dd,
-       .ops            = &clkhwops_omap3_dpll,
-};
-
-DEFINE_STRUCT_CLK(dpll_per_ck, dpll_core_ck_parents, dpll_ddr_ck_ops);
-
-/* CLKOUT: fdpll/M2 */
-DEFINE_CLK_DIVIDER(dpll_per_m2_ck, "dpll_per_ck", &dpll_per_ck, 0x0,
-                  AM33XX_CM_DIV_M2_DPLL_PER, AM33XX_DPLL_CLKOUT_DIV_SHIFT,
-                  AM33XX_DPLL_CLKOUT_DIV_WIDTH, CLK_DIVIDER_ONE_BASED,
-                  NULL);
-
-DEFINE_CLK_FIXED_FACTOR(dpll_per_m2_div4_wkupdm_ck, "dpll_per_m2_ck",
-                       &dpll_per_m2_ck, 0x0, 1, 4);
-
-DEFINE_CLK_FIXED_FACTOR(dpll_per_m2_div4_ck, "dpll_per_m2_ck",
-                       &dpll_per_m2_ck, 0x0, 1, 4);
-
-DEFINE_CLK_FIXED_FACTOR(dpll_core_m4_div2_ck, "dpll_core_m4_ck",
-                       &dpll_core_m4_ck, 0x0, 1, 2);
-
-DEFINE_CLK_FIXED_FACTOR(l4_rtc_gclk, "dpll_core_m4_ck", &dpll_core_m4_ck, 0x0,
-                       1, 2);
-
-DEFINE_CLK_FIXED_FACTOR(clk_24mhz, "dpll_per_m2_ck", &dpll_per_m2_ck, 0x0, 1,
-                       8);
-
-/*
- * Below clock nodes describes clockdomains derived out
- * of core clock.
- */
-static const struct clk_ops clk_ops_null = {
-};
-
-static const char *l3_gclk_parents[] = {
-       "dpll_core_m4_ck"
-};
-
-static struct clk l3_gclk;
-DEFINE_STRUCT_CLK_HW_OMAP(l3_gclk, NULL);
-DEFINE_STRUCT_CLK(l3_gclk, l3_gclk_parents, clk_ops_null);
-
-static struct clk l4hs_gclk;
-DEFINE_STRUCT_CLK_HW_OMAP(l4hs_gclk, NULL);
-DEFINE_STRUCT_CLK(l4hs_gclk, l3_gclk_parents, clk_ops_null);
-
-static const char *l3s_gclk_parents[] = {
-       "dpll_core_m4_div2_ck"
-};
-
-static struct clk l3s_gclk;
-DEFINE_STRUCT_CLK_HW_OMAP(l3s_gclk, NULL);
-DEFINE_STRUCT_CLK(l3s_gclk, l3s_gclk_parents, clk_ops_null);
-
-static struct clk l4fw_gclk;
-DEFINE_STRUCT_CLK_HW_OMAP(l4fw_gclk, NULL);
-DEFINE_STRUCT_CLK(l4fw_gclk, l3s_gclk_parents, clk_ops_null);
-
-static struct clk l4ls_gclk;
-DEFINE_STRUCT_CLK_HW_OMAP(l4ls_gclk, NULL);
-DEFINE_STRUCT_CLK(l4ls_gclk, l3s_gclk_parents, clk_ops_null);
-
-static struct clk sysclk_div_ck;
-DEFINE_STRUCT_CLK_HW_OMAP(sysclk_div_ck, NULL);
-DEFINE_STRUCT_CLK(sysclk_div_ck, l3_gclk_parents, clk_ops_null);
-
-/*
- * In order to match the clock domain with hwmod clockdomain entry,
- * separate clock nodes is required for the modules which are
- * directly getting their funtioncal clock from sys_clkin.
- */
-static struct clk adc_tsc_fck;
-DEFINE_STRUCT_CLK_HW_OMAP(adc_tsc_fck, NULL);
-DEFINE_STRUCT_CLK(adc_tsc_fck, dpll_core_ck_parents, clk_ops_null);
-
-static struct clk dcan0_fck;
-DEFINE_STRUCT_CLK_HW_OMAP(dcan0_fck, NULL);
-DEFINE_STRUCT_CLK(dcan0_fck, dpll_core_ck_parents, clk_ops_null);
-
-static struct clk dcan1_fck;
-DEFINE_STRUCT_CLK_HW_OMAP(dcan1_fck, NULL);
-DEFINE_STRUCT_CLK(dcan1_fck, dpll_core_ck_parents, clk_ops_null);
-
-static struct clk mcasp0_fck;
-DEFINE_STRUCT_CLK_HW_OMAP(mcasp0_fck, NULL);
-DEFINE_STRUCT_CLK(mcasp0_fck, dpll_core_ck_parents, clk_ops_null);
-
-static struct clk mcasp1_fck;
-DEFINE_STRUCT_CLK_HW_OMAP(mcasp1_fck, NULL);
-DEFINE_STRUCT_CLK(mcasp1_fck, dpll_core_ck_parents, clk_ops_null);
-
-static struct clk smartreflex0_fck;
-DEFINE_STRUCT_CLK_HW_OMAP(smartreflex0_fck, NULL);
-DEFINE_STRUCT_CLK(smartreflex0_fck, dpll_core_ck_parents, clk_ops_null);
-
-static struct clk smartreflex1_fck;
-DEFINE_STRUCT_CLK_HW_OMAP(smartreflex1_fck, NULL);
-DEFINE_STRUCT_CLK(smartreflex1_fck, dpll_core_ck_parents, clk_ops_null);
-
-static struct clk sha0_fck;
-DEFINE_STRUCT_CLK_HW_OMAP(sha0_fck, NULL);
-DEFINE_STRUCT_CLK(sha0_fck, dpll_core_ck_parents, clk_ops_null);
-
-static struct clk aes0_fck;
-DEFINE_STRUCT_CLK_HW_OMAP(aes0_fck, NULL);
-DEFINE_STRUCT_CLK(aes0_fck, dpll_core_ck_parents, clk_ops_null);
-
-static struct clk rng_fck;
-DEFINE_STRUCT_CLK_HW_OMAP(rng_fck, NULL);
-DEFINE_STRUCT_CLK(rng_fck, dpll_core_ck_parents, clk_ops_null);
-
-/*
- * Modules clock nodes
- *
- * The following clock leaf nodes are added for the moment because:
- *
- *  - hwmod data is not present for these modules, either hwmod
- *    control is not required or its not populated.
- *  - Driver code is not yet migrated to use hwmod/runtime pm
- *  - Modules outside kernel access (to disable them by default)
- *
- *     - mmu (gfx domain)
- *     - cefuse
- *     - usbotg_fck (its additional clock and not really a modulemode)
- *     - ieee5000
- */
-
-DEFINE_CLK_GATE(mmu_fck, "dpll_core_m4_ck", &dpll_core_m4_ck, 0x0,
-               AM33XX_CM_GFX_MMUDATA_CLKCTRL, AM33XX_MODULEMODE_SWCTRL_SHIFT,
-               0x0, NULL);
-
-DEFINE_CLK_GATE(cefuse_fck, "sys_clkin_ck", &sys_clkin_ck, 0x0,
-               AM33XX_CM_CEFUSE_CEFUSE_CLKCTRL, AM33XX_MODULEMODE_SWCTRL_SHIFT,
-               0x0, NULL);
-
-/*
- * clkdiv32 is generated from fixed division of 732.4219
- */
-DEFINE_CLK_FIXED_FACTOR(clkdiv32k_ck, "clk_24mhz", &clk_24mhz, 0x0, 1, 732);
-
-static struct clk clkdiv32k_ick;
-
-static const char *clkdiv32k_ick_parent_names[] = {
-       "clkdiv32k_ck",
-};
-
-static const struct clk_ops clkdiv32k_ick_ops = {
-       .enable         = &omap2_dflt_clk_enable,
-       .disable        = &omap2_dflt_clk_disable,
-       .is_enabled     = &omap2_dflt_clk_is_enabled,
-       .init           = &omap2_init_clk_clkdm,
-};
-
-static struct clk_hw_omap clkdiv32k_ick_hw = {
-       .hw     = {
-               .clk    = &clkdiv32k_ick,
-       },
-       .enable_reg     = AM33XX_CM_PER_CLKDIV32K_CLKCTRL,
-       .enable_bit     = AM33XX_MODULEMODE_SWCTRL_SHIFT,
-       .clkdm_name     = "clk_24mhz_clkdm",
-};
-
-DEFINE_STRUCT_CLK(clkdiv32k_ick, clkdiv32k_ick_parent_names, clkdiv32k_ick_ops);
-
-/* "usbotg_fck" is an additional clock and not really a modulemode */
-DEFINE_CLK_GATE(usbotg_fck, "dpll_per_ck", &dpll_per_ck, 0x0,
-               AM33XX_CM_CLKDCOLDO_DPLL_PER, AM33XX_ST_DPLL_CLKDCOLDO_SHIFT,
-               0x0, NULL);
-
-DEFINE_CLK_GATE(ieee5000_fck, "dpll_core_m4_div2_ck", &dpll_core_m4_div2_ck,
-               0x0, AM33XX_CM_PER_IEEE5000_CLKCTRL,
-               AM33XX_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL);
-
-/* Timers */
-static const struct clksel timer1_clkmux_sel[] = {
-       { .parent = &sys_clkin_ck, .rates = div_1_0_rates },
-       { .parent = &clkdiv32k_ick, .rates = div_1_1_rates },
-       { .parent = &tclkin_ck, .rates = div_1_2_rates },
-       { .parent = &clk_rc32k_ck, .rates = div_1_3_rates },
-       { .parent = &clk_32768_ck, .rates = div_1_4_rates },
-       { .parent = NULL },
-};
-
-static const char *timer1_ck_parents[] = {
-       "sys_clkin_ck", "clkdiv32k_ick", "tclkin_ck", "clk_rc32k_ck",
-       "clk_32768_ck",
-};
-
-static struct clk timer1_fck;
-
-static const struct clk_ops timer1_fck_ops = {
-       .recalc_rate    = &omap2_clksel_recalc,
-       .get_parent     = &omap2_clksel_find_parent_index,
-       .set_parent     = &omap2_clksel_set_parent,
-       .init           = &omap2_init_clk_clkdm,
-};
-
-static struct clk_hw_omap timer1_fck_hw = {
-       .hw     = {
-               .clk    = &timer1_fck,
-       },
-       .clkdm_name     = "l4ls_clkdm",
-       .clksel         = timer1_clkmux_sel,
-       .clksel_reg     = AM33XX_CLKSEL_TIMER1MS_CLK,
-       .clksel_mask    = AM33XX_CLKSEL_0_2_MASK,
-};
-
-DEFINE_STRUCT_CLK(timer1_fck, timer1_ck_parents, timer1_fck_ops);
-
-static const struct clksel timer2_to_7_clk_sel[] = {
-       { .parent = &tclkin_ck, .rates = div_1_0_rates },
-       { .parent = &sys_clkin_ck, .rates = div_1_1_rates },
-       { .parent = &clkdiv32k_ick, .rates = div_1_2_rates },
-       { .parent = NULL },
-};
-
-static const char *timer2_to_7_ck_parents[] = {
-       "tclkin_ck", "sys_clkin_ck", "clkdiv32k_ick",
-};
-
-static struct clk timer2_fck;
-
-static struct clk_hw_omap timer2_fck_hw = {
-       .hw     = {
-               .clk    = &timer2_fck,
-       },
-       .clkdm_name     = "l4ls_clkdm",
-       .clksel         = timer2_to_7_clk_sel,
-       .clksel_reg     = AM33XX_CLKSEL_TIMER2_CLK,
-       .clksel_mask    = AM33XX_CLKSEL_0_1_MASK,
-};
-
-DEFINE_STRUCT_CLK(timer2_fck, timer2_to_7_ck_parents, timer1_fck_ops);
-
-static struct clk timer3_fck;
-
-static struct clk_hw_omap timer3_fck_hw = {
-       .hw     = {
-               .clk    = &timer3_fck,
-       },
-       .clkdm_name     = "l4ls_clkdm",
-       .clksel         = timer2_to_7_clk_sel,
-       .clksel_reg     = AM33XX_CLKSEL_TIMER3_CLK,
-       .clksel_mask    = AM33XX_CLKSEL_0_1_MASK,
-};
-
-DEFINE_STRUCT_CLK(timer3_fck, timer2_to_7_ck_parents, timer1_fck_ops);
-
-static struct clk timer4_fck;
-
-static struct clk_hw_omap timer4_fck_hw = {
-       .hw     = {
-               .clk    = &timer4_fck,
-       },
-       .clkdm_name     = "l4ls_clkdm",
-       .clksel         = timer2_to_7_clk_sel,
-       .clksel_reg     = AM33XX_CLKSEL_TIMER4_CLK,
-       .clksel_mask    = AM33XX_CLKSEL_0_1_MASK,
-};
-
-DEFINE_STRUCT_CLK(timer4_fck, timer2_to_7_ck_parents, timer1_fck_ops);
-
-static struct clk timer5_fck;
-
-static struct clk_hw_omap timer5_fck_hw = {
-       .hw     = {
-               .clk    = &timer5_fck,
-       },
-       .clkdm_name     = "l4ls_clkdm",
-       .clksel         = timer2_to_7_clk_sel,
-       .clksel_reg     = AM33XX_CLKSEL_TIMER5_CLK,
-       .clksel_mask    = AM33XX_CLKSEL_0_1_MASK,
-};
-
-DEFINE_STRUCT_CLK(timer5_fck, timer2_to_7_ck_parents, timer1_fck_ops);
-
-static struct clk timer6_fck;
-
-static struct clk_hw_omap timer6_fck_hw = {
-       .hw     = {
-               .clk    = &timer6_fck,
-       },
-       .clkdm_name     = "l4ls_clkdm",
-       .clksel         = timer2_to_7_clk_sel,
-       .clksel_reg     = AM33XX_CLKSEL_TIMER6_CLK,
-       .clksel_mask    = AM33XX_CLKSEL_0_1_MASK,
-};
-
-DEFINE_STRUCT_CLK(timer6_fck, timer2_to_7_ck_parents, timer1_fck_ops);
-
-static struct clk timer7_fck;
-
-static struct clk_hw_omap timer7_fck_hw = {
-       .hw     = {
-               .clk    = &timer7_fck,
-       },
-       .clkdm_name     = "l4ls_clkdm",
-       .clksel         = timer2_to_7_clk_sel,
-       .clksel_reg     = AM33XX_CLKSEL_TIMER7_CLK,
-       .clksel_mask    = AM33XX_CLKSEL_0_1_MASK,
-};
-
-DEFINE_STRUCT_CLK(timer7_fck, timer2_to_7_ck_parents, timer1_fck_ops);
-
-DEFINE_CLK_FIXED_FACTOR(cpsw_125mhz_gclk,
-                       "dpll_core_m5_ck",
-                       &dpll_core_m5_ck,
-                       0x0,
-                       1, 2);
-
-static const struct clk_ops cpsw_fck_ops = {
-       .recalc_rate    = &omap2_clksel_recalc,
-       .get_parent     = &omap2_clksel_find_parent_index,
-       .set_parent     = &omap2_clksel_set_parent,
-};
-
-static const struct clksel cpsw_cpts_rft_clkmux_sel[] = {
-       { .parent = &dpll_core_m5_ck, .rates = div_1_0_rates },
-       { .parent = &dpll_core_m4_ck, .rates = div_1_1_rates },
-       { .parent = NULL },
-};
-
-static const char *cpsw_cpts_rft_ck_parents[] = {
-       "dpll_core_m5_ck", "dpll_core_m4_ck",
-};
-
-static struct clk cpsw_cpts_rft_clk;
-
-static struct clk_hw_omap cpsw_cpts_rft_clk_hw = {
-       .hw     = {
-               .clk    = &cpsw_cpts_rft_clk,
-       },
-       .clkdm_name     = "cpsw_125mhz_clkdm",
-       .clksel         = cpsw_cpts_rft_clkmux_sel,
-       .clksel_reg     = AM33XX_CM_CPTS_RFT_CLKSEL,
-       .clksel_mask    = AM33XX_CLKSEL_0_0_MASK,
-};
-
-DEFINE_STRUCT_CLK(cpsw_cpts_rft_clk, cpsw_cpts_rft_ck_parents, cpsw_fck_ops);
-
-
-/* gpio */
-static const char *gpio0_ck_parents[] = {
-       "clk_rc32k_ck", "clk_32768_ck", "clkdiv32k_ick",
-};
-
-static const struct clksel gpio0_dbclk_mux_sel[] = {
-       { .parent = &clk_rc32k_ck, .rates = div_1_0_rates },
-       { .parent = &clk_32768_ck, .rates = div_1_1_rates },
-       { .parent = &clkdiv32k_ick, .rates = div_1_2_rates },
-       { .parent = NULL },
-};
-
-static const struct clk_ops gpio_fck_ops = {
-       .recalc_rate    = &omap2_clksel_recalc,
-       .get_parent     = &omap2_clksel_find_parent_index,
-       .set_parent     = &omap2_clksel_set_parent,
-       .init           = &omap2_init_clk_clkdm,
-};
-
-static struct clk gpio0_dbclk_mux_ck;
-
-static struct clk_hw_omap gpio0_dbclk_mux_ck_hw = {
-       .hw     = {
-               .clk    = &gpio0_dbclk_mux_ck,
-       },
-       .clkdm_name     = "l4_wkup_clkdm",
-       .clksel         = gpio0_dbclk_mux_sel,
-       .clksel_reg     = AM33XX_CLKSEL_GPIO0_DBCLK,
-       .clksel_mask    = AM33XX_CLKSEL_0_1_MASK,
-};
-
-DEFINE_STRUCT_CLK(gpio0_dbclk_mux_ck, gpio0_ck_parents, gpio_fck_ops);
-
-DEFINE_CLK_GATE(gpio0_dbclk, "gpio0_dbclk_mux_ck", &gpio0_dbclk_mux_ck, 0x0,
-               AM33XX_CM_WKUP_GPIO0_CLKCTRL,
-               AM33XX_OPTFCLKEN_GPIO0_GDBCLK_SHIFT, 0x0, NULL);
-
-DEFINE_CLK_GATE(gpio1_dbclk, "clkdiv32k_ick", &clkdiv32k_ick, 0x0,
-               AM33XX_CM_PER_GPIO1_CLKCTRL,
-               AM33XX_OPTFCLKEN_GPIO_1_GDBCLK_SHIFT, 0x0, NULL);
-
-DEFINE_CLK_GATE(gpio2_dbclk, "clkdiv32k_ick", &clkdiv32k_ick, 0x0,
-               AM33XX_CM_PER_GPIO2_CLKCTRL,
-               AM33XX_OPTFCLKEN_GPIO_2_GDBCLK_SHIFT, 0x0, NULL);
-
-DEFINE_CLK_GATE(gpio3_dbclk, "clkdiv32k_ick", &clkdiv32k_ick, 0x0,
-               AM33XX_CM_PER_GPIO3_CLKCTRL,
-               AM33XX_OPTFCLKEN_GPIO_3_GDBCLK_SHIFT, 0x0, NULL);
-
-
-static const char *pruss_ck_parents[] = {
-       "l3_gclk", "dpll_disp_m2_ck",
-};
-
-static const struct clksel pruss_ocp_clk_mux_sel[] = {
-       { .parent = &l3_gclk, .rates = div_1_0_rates },
-       { .parent = &dpll_disp_m2_ck, .rates = div_1_1_rates },
-       { .parent = NULL },
-};
-
-static struct clk pruss_ocp_gclk;
-
-static struct clk_hw_omap pruss_ocp_gclk_hw = {
-       .hw     = {
-               .clk    = &pruss_ocp_gclk,
-       },
-       .clkdm_name     = "pruss_ocp_clkdm",
-       .clksel         = pruss_ocp_clk_mux_sel,
-       .clksel_reg     = AM33XX_CLKSEL_PRUSS_OCP_CLK,
-       .clksel_mask    = AM33XX_CLKSEL_0_0_MASK,
-};
-
-DEFINE_STRUCT_CLK(pruss_ocp_gclk, pruss_ck_parents, gpio_fck_ops);
-
-static const char *lcd_ck_parents[] = {
-       "dpll_disp_m2_ck", "dpll_core_m5_ck", "dpll_per_m2_ck",
-};
-
-static const struct clksel lcd_clk_mux_sel[] = {
-       { .parent = &dpll_disp_m2_ck, .rates = div_1_0_rates },
-       { .parent = &dpll_core_m5_ck, .rates = div_1_1_rates },
-       { .parent = &dpll_per_m2_ck, .rates = div_1_2_rates },
-       { .parent = NULL },
-};
-
-static struct clk lcd_gclk;
-
-static struct clk_hw_omap lcd_gclk_hw = {
-       .hw     = {
-               .clk    = &lcd_gclk,
-       },
-       .clkdm_name     = "lcdc_clkdm",
-       .clksel         = lcd_clk_mux_sel,
-       .clksel_reg     = AM33XX_CLKSEL_LCDC_PIXEL_CLK,
-       .clksel_mask    = AM33XX_CLKSEL_0_1_MASK,
-};
-
-DEFINE_STRUCT_CLK_FLAGS(lcd_gclk, lcd_ck_parents,
-                       gpio_fck_ops, CLK_SET_RATE_PARENT);
-
-DEFINE_CLK_FIXED_FACTOR(mmc_clk, "dpll_per_m2_ck", &dpll_per_m2_ck, 0x0, 1, 2);
-
-static const char *gfx_ck_parents[] = {
-       "dpll_core_m4_ck", "dpll_per_m2_ck",
-};
-
-static const struct clksel gfx_clksel_sel[] = {
-       { .parent = &dpll_core_m4_ck, .rates = div_1_0_rates },
-       { .parent = &dpll_per_m2_ck, .rates = div_1_1_rates },
-       { .parent = NULL },
-};
-
-static struct clk gfx_fclk_clksel_ck;
-
-static struct clk_hw_omap gfx_fclk_clksel_ck_hw = {
-       .hw     = {
-               .clk    = &gfx_fclk_clksel_ck,
-       },
-       .clksel         = gfx_clksel_sel,
-       .clksel_reg     = AM33XX_CLKSEL_GFX_FCLK,
-       .clksel_mask    = AM33XX_CLKSEL_GFX_FCLK_MASK,
-};
-
-DEFINE_STRUCT_CLK(gfx_fclk_clksel_ck, gfx_ck_parents, gpio_fck_ops);
-
-static const struct clk_div_table div_1_0_2_1_rates[] = {
-       { .div = 1, .val = 0, },
-       { .div = 2, .val = 1, },
-       { .div = 0 },
-};
-
-DEFINE_CLK_DIVIDER_TABLE(gfx_fck_div_ck, "gfx_fclk_clksel_ck",
-                        &gfx_fclk_clksel_ck, 0x0, AM33XX_CLKSEL_GFX_FCLK,
-                        AM33XX_CLKSEL_0_0_SHIFT, AM33XX_CLKSEL_0_0_WIDTH,
-                        0x0, div_1_0_2_1_rates, NULL);
-
-static const char *sysclkout_ck_parents[] = {
-       "clk_32768_ck", "l3_gclk", "dpll_ddr_m2_ck", "dpll_per_m2_ck",
-       "lcd_gclk",
-};
-
-static const struct clksel sysclkout_pre_sel[] = {
-       { .parent = &clk_32768_ck, .rates = div_1_0_rates },
-       { .parent = &l3_gclk, .rates = div_1_1_rates },
-       { .parent = &dpll_ddr_m2_ck, .rates = div_1_2_rates },
-       { .parent = &dpll_per_m2_ck, .rates = div_1_3_rates },
-       { .parent = &lcd_gclk, .rates = div_1_4_rates },
-       { .parent = NULL },
-};
-
-static struct clk sysclkout_pre_ck;
-
-static struct clk_hw_omap sysclkout_pre_ck_hw = {
-       .hw     = {
-               .clk    = &sysclkout_pre_ck,
-       },
-       .clksel         = sysclkout_pre_sel,
-       .clksel_reg     = AM33XX_CM_CLKOUT_CTRL,
-       .clksel_mask    = AM33XX_CLKOUT2SOURCE_MASK,
-};
-
-DEFINE_STRUCT_CLK(sysclkout_pre_ck, sysclkout_ck_parents, gpio_fck_ops);
-
-/* Divide by 8 clock rates with default clock is 1/1*/
-static const struct clk_div_table div8_rates[] = {
-       { .div = 1, .val = 0, },
-       { .div = 2, .val = 1, },
-       { .div = 3, .val = 2, },
-       { .div = 4, .val = 3, },
-       { .div = 5, .val = 4, },
-       { .div = 6, .val = 5, },
-       { .div = 7, .val = 6, },
-       { .div = 8, .val = 7, },
-       { .div = 0 },
-};
-
-DEFINE_CLK_DIVIDER_TABLE(clkout2_div_ck, "sysclkout_pre_ck", &sysclkout_pre_ck,
-                        0x0, AM33XX_CM_CLKOUT_CTRL, AM33XX_CLKOUT2DIV_SHIFT,
-                        AM33XX_CLKOUT2DIV_WIDTH, 0x0, div8_rates, NULL);
-
-DEFINE_CLK_GATE(clkout2_ck, "clkout2_div_ck", &clkout2_div_ck, 0x0,
-               AM33XX_CM_CLKOUT_CTRL, AM33XX_CLKOUT2EN_SHIFT, 0x0, NULL);
-
-static const char *wdt_ck_parents[] = {
-       "clk_rc32k_ck", "clkdiv32k_ick",
-};
-
-static const struct clksel wdt_clkmux_sel[] = {
-       { .parent = &clk_rc32k_ck, .rates = div_1_0_rates },
-       { .parent = &clkdiv32k_ick, .rates = div_1_1_rates },
-       { .parent = NULL },
-};
-
-static struct clk wdt1_fck;
-
-static struct clk_hw_omap wdt1_fck_hw = {
-       .hw     = {
-               .clk    = &wdt1_fck,
-       },
-       .clkdm_name     = "l4_wkup_clkdm",
-       .clksel         = wdt_clkmux_sel,
-       .clksel_reg     = AM33XX_CLKSEL_WDT1_CLK,
-       .clksel_mask    = AM33XX_CLKSEL_0_1_MASK,
-};
-
-DEFINE_STRUCT_CLK(wdt1_fck, wdt_ck_parents, gpio_fck_ops);
-
-static const char *pwmss_clk_parents[] = {
-       "dpll_per_m2_ck",
-};
-
-static const struct clk_ops ehrpwm_tbclk_ops = {
-       .enable         = &omap2_dflt_clk_enable,
-       .disable        = &omap2_dflt_clk_disable,
-};
-
-DEFINE_CLK_OMAP_MUX_GATE(ehrpwm0_tbclk, "l4ls_clkdm",
-                        NULL, NULL, 0,
-                        AM33XX_CTRL_REGADDR(AM33XX_PWMSS_TBCLK_CLKCTRL),
-                        AM33XX_PWMSS0_TBCLKEN_SHIFT,
-                        NULL, pwmss_clk_parents, ehrpwm_tbclk_ops);
-
-DEFINE_CLK_OMAP_MUX_GATE(ehrpwm1_tbclk, "l4ls_clkdm",
-                        NULL, NULL, 0,
-                        AM33XX_CTRL_REGADDR(AM33XX_PWMSS_TBCLK_CLKCTRL),
-                        AM33XX_PWMSS1_TBCLKEN_SHIFT,
-                        NULL, pwmss_clk_parents, ehrpwm_tbclk_ops);
-
-DEFINE_CLK_OMAP_MUX_GATE(ehrpwm2_tbclk, "l4ls_clkdm",
-                        NULL, NULL, 0,
-                        AM33XX_CTRL_REGADDR(AM33XX_PWMSS_TBCLK_CLKCTRL),
-                        AM33XX_PWMSS2_TBCLKEN_SHIFT,
-                        NULL, pwmss_clk_parents, ehrpwm_tbclk_ops);
-
-/*
- * debugss optional clocks
- */
-DEFINE_CLK_GATE(dbg_sysclk_ck, "sys_clkin_ck", &sys_clkin_ck,
-               0x0, AM33XX_CM_WKUP_DEBUGSS_CLKCTRL,
-               AM33XX_OPTFCLKEN_DBGSYSCLK_SHIFT, 0x0, NULL);
-
-DEFINE_CLK_GATE(dbg_clka_ck, "dpll_core_m4_ck", &dpll_core_m4_ck,
-               0x0, AM33XX_CM_WKUP_DEBUGSS_CLKCTRL,
-               AM33XX_OPTCLK_DEBUG_CLKA_SHIFT, 0x0, NULL);
-
-static const char *stm_pmd_clock_mux_ck_parents[] = {
-       "dbg_sysclk_ck", "dbg_clka_ck",
-};
-
-DEFINE_CLK_MUX(stm_pmd_clock_mux_ck, stm_pmd_clock_mux_ck_parents, NULL, 0x0,
-              AM33XX_CM_WKUP_DEBUGSS_CLKCTRL, AM33XX_STM_PMD_CLKSEL_SHIFT,
-              AM33XX_STM_PMD_CLKSEL_WIDTH, 0x0, NULL);
-
-DEFINE_CLK_MUX(trace_pmd_clk_mux_ck, stm_pmd_clock_mux_ck_parents, NULL, 0x0,
-              AM33XX_CM_WKUP_DEBUGSS_CLKCTRL,
-              AM33XX_TRC_PMD_CLKSEL_SHIFT,
-              AM33XX_TRC_PMD_CLKSEL_WIDTH, 0x0, NULL);
-
-DEFINE_CLK_DIVIDER(stm_clk_div_ck, "stm_pmd_clock_mux_ck",
-                  &stm_pmd_clock_mux_ck, 0x0, AM33XX_CM_WKUP_DEBUGSS_CLKCTRL,
-                  AM33XX_STM_PMD_CLKDIVSEL_SHIFT,
-                  AM33XX_STM_PMD_CLKDIVSEL_WIDTH, CLK_DIVIDER_POWER_OF_TWO,
-                  NULL);
-
-DEFINE_CLK_DIVIDER(trace_clk_div_ck, "trace_pmd_clk_mux_ck",
-                  &trace_pmd_clk_mux_ck, 0x0, AM33XX_CM_WKUP_DEBUGSS_CLKCTRL,
-                  AM33XX_TRC_PMD_CLKDIVSEL_SHIFT,
-                  AM33XX_TRC_PMD_CLKDIVSEL_WIDTH, CLK_DIVIDER_POWER_OF_TWO,
-                  NULL);
-
-/*
- * clkdev
- */
-static struct omap_clk am33xx_clks[] = {
-       CLK(NULL,       "clk_32768_ck",         &clk_32768_ck),
-       CLK(NULL,       "clk_rc32k_ck",         &clk_rc32k_ck),
-       CLK(NULL,       "virt_19200000_ck",     &virt_19200000_ck),
-       CLK(NULL,       "virt_24000000_ck",     &virt_24000000_ck),
-       CLK(NULL,       "virt_25000000_ck",     &virt_25000000_ck),
-       CLK(NULL,       "virt_26000000_ck",     &virt_26000000_ck),
-       CLK(NULL,       "sys_clkin_ck",         &sys_clkin_ck),
-       CLK(NULL,       "tclkin_ck",            &tclkin_ck),
-       CLK(NULL,       "dpll_core_ck",         &dpll_core_ck),
-       CLK(NULL,       "dpll_core_x2_ck",      &dpll_core_x2_ck),
-       CLK(NULL,       "dpll_core_m4_ck",      &dpll_core_m4_ck),
-       CLK(NULL,       "dpll_core_m5_ck",      &dpll_core_m5_ck),
-       CLK(NULL,       "dpll_core_m6_ck",      &dpll_core_m6_ck),
-       CLK(NULL,       "dpll_mpu_ck",          &dpll_mpu_ck),
-       CLK("cpu0",     NULL,                   &dpll_mpu_ck),
-       CLK(NULL,       "dpll_mpu_m2_ck",       &dpll_mpu_m2_ck),
-       CLK(NULL,       "dpll_ddr_ck",          &dpll_ddr_ck),
-       CLK(NULL,       "dpll_ddr_m2_ck",       &dpll_ddr_m2_ck),
-       CLK(NULL,       "dpll_ddr_m2_div2_ck",  &dpll_ddr_m2_div2_ck),
-       CLK(NULL,       "dpll_disp_ck",         &dpll_disp_ck),
-       CLK(NULL,       "dpll_disp_m2_ck",      &dpll_disp_m2_ck),
-       CLK(NULL,       "dpll_per_ck",          &dpll_per_ck),
-       CLK(NULL,       "dpll_per_m2_ck",       &dpll_per_m2_ck),
-       CLK(NULL,       "dpll_per_m2_div4_wkupdm_ck",   &dpll_per_m2_div4_wkupdm_ck),
-       CLK(NULL,       "dpll_per_m2_div4_ck",  &dpll_per_m2_div4_ck),
-       CLK(NULL,       "adc_tsc_fck",          &adc_tsc_fck),
-       CLK(NULL,       "cefuse_fck",           &cefuse_fck),
-       CLK(NULL,       "clkdiv32k_ck",         &clkdiv32k_ck),
-       CLK(NULL,       "clkdiv32k_ick",        &clkdiv32k_ick),
-       CLK(NULL,       "dcan0_fck",            &dcan0_fck),
-       CLK("481cc000.d_can",   NULL,           &dcan0_fck),
-       CLK(NULL,       "dcan1_fck",            &dcan1_fck),
-       CLK("481d0000.d_can",   NULL,           &dcan1_fck),
-       CLK(NULL,       "pruss_ocp_gclk",       &pruss_ocp_gclk),
-       CLK(NULL,       "mcasp0_fck",           &mcasp0_fck),
-       CLK(NULL,       "mcasp1_fck",           &mcasp1_fck),
-       CLK(NULL,       "mmu_fck",              &mmu_fck),
-       CLK(NULL,       "smartreflex0_fck",     &smartreflex0_fck),
-       CLK(NULL,       "smartreflex1_fck",     &smartreflex1_fck),
-       CLK(NULL,       "sha0_fck",             &sha0_fck),
-       CLK(NULL,       "aes0_fck",             &aes0_fck),
-       CLK(NULL,       "rng_fck",              &rng_fck),
-       CLK(NULL,       "timer1_fck",           &timer1_fck),
-       CLK(NULL,       "timer2_fck",           &timer2_fck),
-       CLK(NULL,       "timer3_fck",           &timer3_fck),
-       CLK(NULL,       "timer4_fck",           &timer4_fck),
-       CLK(NULL,       "timer5_fck",           &timer5_fck),
-       CLK(NULL,       "timer6_fck",           &timer6_fck),
-       CLK(NULL,       "timer7_fck",           &timer7_fck),
-       CLK(NULL,       "usbotg_fck",           &usbotg_fck),
-       CLK(NULL,       "ieee5000_fck",         &ieee5000_fck),
-       CLK(NULL,       "wdt1_fck",             &wdt1_fck),
-       CLK(NULL,       "l4_rtc_gclk",          &l4_rtc_gclk),
-       CLK(NULL,       "l3_gclk",              &l3_gclk),
-       CLK(NULL,       "dpll_core_m4_div2_ck", &dpll_core_m4_div2_ck),
-       CLK(NULL,       "l4hs_gclk",            &l4hs_gclk),
-       CLK(NULL,       "l3s_gclk",             &l3s_gclk),
-       CLK(NULL,       "l4fw_gclk",            &l4fw_gclk),
-       CLK(NULL,       "l4ls_gclk",            &l4ls_gclk),
-       CLK(NULL,       "clk_24mhz",            &clk_24mhz),
-       CLK(NULL,       "sysclk_div_ck",        &sysclk_div_ck),
-       CLK(NULL,       "cpsw_125mhz_gclk",     &cpsw_125mhz_gclk),
-       CLK(NULL,       "cpsw_cpts_rft_clk",    &cpsw_cpts_rft_clk),
-       CLK(NULL,       "gpio0_dbclk_mux_ck",   &gpio0_dbclk_mux_ck),
-       CLK(NULL,       "gpio0_dbclk",          &gpio0_dbclk),
-       CLK(NULL,       "gpio1_dbclk",          &gpio1_dbclk),
-       CLK(NULL,       "gpio2_dbclk",          &gpio2_dbclk),
-       CLK(NULL,       "gpio3_dbclk",          &gpio3_dbclk),
-       CLK(NULL,       "lcd_gclk",             &lcd_gclk),
-       CLK(NULL,       "mmc_clk",              &mmc_clk),
-       CLK(NULL,       "gfx_fclk_clksel_ck",   &gfx_fclk_clksel_ck),
-       CLK(NULL,       "gfx_fck_div_ck",       &gfx_fck_div_ck),
-       CLK(NULL,       "sysclkout_pre_ck",     &sysclkout_pre_ck),
-       CLK(NULL,       "clkout2_div_ck",       &clkout2_div_ck),
-       CLK(NULL,       "timer_32k_ck",         &clkdiv32k_ick),
-       CLK(NULL,       "timer_sys_ck",         &sys_clkin_ck),
-       CLK(NULL,       "dbg_sysclk_ck",        &dbg_sysclk_ck),
-       CLK(NULL,       "dbg_clka_ck",          &dbg_clka_ck),
-       CLK(NULL,       "stm_pmd_clock_mux_ck", &stm_pmd_clock_mux_ck),
-       CLK(NULL,       "trace_pmd_clk_mux_ck", &trace_pmd_clk_mux_ck),
-       CLK(NULL,       "stm_clk_div_ck",       &stm_clk_div_ck),
-       CLK(NULL,       "trace_clk_div_ck",     &trace_clk_div_ck),
-       CLK(NULL,       "clkout2_ck",           &clkout2_ck),
-       CLK("48300200.ehrpwm",  "tbclk",        &ehrpwm0_tbclk),
-       CLK("48302200.ehrpwm",  "tbclk",        &ehrpwm1_tbclk),
-       CLK("48304200.ehrpwm",  "tbclk",        &ehrpwm2_tbclk),
-};
-
-
-static const char *enable_init_clks[] = {
-       "dpll_ddr_m2_ck",
-       "dpll_mpu_m2_ck",
-       "l3_gclk",
-       "l4hs_gclk",
-       "l4fw_gclk",
-       "l4ls_gclk",
-       "clkout2_ck",   /* Required for external peripherals like, Audio codecs */
-};
-
-int __init am33xx_clk_init(void)
-{
-       if (soc_is_am33xx())
-               cpu_mask = RATE_IN_AM33XX;
-
-       omap_clocks_register(am33xx_clks, ARRAY_SIZE(am33xx_clks));
-
-       omap2_clk_disable_autoidle_all();
-
-       omap2_clk_enable_init_clocks(enable_init_clks,
-                                    ARRAY_SIZE(enable_init_clks));
-
-       /* TRM ERRATA: Timer 3 & 6 default parent (TCLKIN) may not be always
-        *    physically present, in such a case HWMOD enabling of
-        *    clock would be failure with default parent. And timer
-        *    probe thinks clock is already enabled, this leads to
-        *    crash upon accessing timer 3 & 6 registers in probe.
-        *    Fix by setting parent of both these timers to master
-        *    oscillator clock.
-        */
-
-       clk_set_parent(&timer3_fck, &sys_clkin_ck);
-       clk_set_parent(&timer6_fck, &sys_clkin_ck);
-       /*
-        * The On-Chip 32K RC Osc clock is not an accurate clock-source as per
-        * the design/spec, so as a result, for example, timer which supposed
-        * to get expired @60Sec, but will expire somewhere ~@40Sec, which is
-        * not expected by any use-case, so change WDT1 clock source to PRCM
-        * 32KHz clock.
-        */
-       clk_set_parent(&wdt1_fck, &clkdiv32k_ick);
-
-       return 0;
-}
diff --git a/arch/arm/mach-omap2/cclock44xx_data.c b/arch/arm/mach-omap2/cclock44xx_data.c
deleted file mode 100644 (file)
index ec0dc0b..0000000
+++ /dev/null
@@ -1,1735 +0,0 @@
-/*
- * OMAP4 Clock data
- *
- * Copyright (C) 2009-2012 Texas Instruments, Inc.
- * Copyright (C) 2009-2010 Nokia Corporation
- *
- * Paul Walmsley (paul@pwsan.com)
- * Rajendra Nayak (rnayak@ti.com)
- * Benoit Cousson (b-cousson@ti.com)
- * Mike Turquette (mturquette@ti.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * XXX Some of the ES1 clocks have been removed/changed; once support
- * is added for discriminating clocks by ES level, these should be added back
- * in.
- *
- * XXX All of the remaining MODULEMODE clock nodes should be removed
- * once the drivers are updated to use pm_runtime or to use the appropriate
- * upstream clock node for rate/parent selection.
- */
-
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/clk-private.h>
-#include <linux/clkdev.h>
-#include <linux/io.h>
-
-#include "soc.h"
-#include "iomap.h"
-#include "clock.h"
-#include "clock44xx.h"
-#include "cm1_44xx.h"
-#include "cm2_44xx.h"
-#include "cm-regbits-44xx.h"
-#include "prm44xx.h"
-#include "prm-regbits-44xx.h"
-#include "control.h"
-#include "scrm44xx.h"
-
-/* OMAP4 modulemode control */
-#define OMAP4430_MODULEMODE_HWCTRL_SHIFT               0
-#define OMAP4430_MODULEMODE_SWCTRL_SHIFT               1
-
-/*
- * OMAP4 ABE DPLL default frequency. In OMAP4460 TRM version V, section
- * "3.6.3.2.3 CM1_ABE Clock Generator" states that the "DPLL_ABE_X2_CLK
- * must be set to 196.608 MHz" and hence, the DPLL locked frequency is
- * half of this value.
- */
-#define OMAP4_DPLL_ABE_DEFFREQ                         98304000
-
-/*
- * OMAP4 USB DPLL default frequency. In OMAP4430 TRM version V, section
- * "3.6.3.9.5 DPLL_USB Preferred Settings" shows that the preferred
- * locked frequency for the USB DPLL is 960MHz.
- */
-#define OMAP4_DPLL_USB_DEFFREQ                         960000000
-
-/* Root clocks */
-
-DEFINE_CLK_FIXED_RATE(extalt_clkin_ck, CLK_IS_ROOT, 59000000, 0x0);
-
-DEFINE_CLK_FIXED_RATE(pad_clks_src_ck, CLK_IS_ROOT, 12000000, 0x0);
-
-DEFINE_CLK_GATE(pad_clks_ck, "pad_clks_src_ck", &pad_clks_src_ck, 0x0,
-               OMAP4430_CM_CLKSEL_ABE, OMAP4430_PAD_CLKS_GATE_SHIFT,
-               0x0, NULL);
-
-DEFINE_CLK_FIXED_RATE(pad_slimbus_core_clks_ck, CLK_IS_ROOT, 12000000, 0x0);
-
-DEFINE_CLK_FIXED_RATE(secure_32k_clk_src_ck, CLK_IS_ROOT, 32768, 0x0);
-
-DEFINE_CLK_FIXED_RATE(slimbus_src_clk, CLK_IS_ROOT, 12000000, 0x0);
-
-DEFINE_CLK_GATE(slimbus_clk, "slimbus_src_clk", &slimbus_src_clk, 0x0,
-               OMAP4430_CM_CLKSEL_ABE, OMAP4430_SLIMBUS_CLK_GATE_SHIFT,
-               0x0, NULL);
-
-DEFINE_CLK_FIXED_RATE(sys_32k_ck, CLK_IS_ROOT, 32768, 0x0);
-
-DEFINE_CLK_FIXED_RATE(virt_12000000_ck, CLK_IS_ROOT, 12000000, 0x0);
-
-DEFINE_CLK_FIXED_RATE(virt_13000000_ck, CLK_IS_ROOT, 13000000, 0x0);
-
-DEFINE_CLK_FIXED_RATE(virt_16800000_ck, CLK_IS_ROOT, 16800000, 0x0);
-
-DEFINE_CLK_FIXED_RATE(virt_19200000_ck, CLK_IS_ROOT, 19200000, 0x0);
-
-DEFINE_CLK_FIXED_RATE(virt_26000000_ck, CLK_IS_ROOT, 26000000, 0x0);
-
-DEFINE_CLK_FIXED_RATE(virt_27000000_ck, CLK_IS_ROOT, 27000000, 0x0);
-
-DEFINE_CLK_FIXED_RATE(virt_38400000_ck, CLK_IS_ROOT, 38400000, 0x0);
-
-static const char *sys_clkin_ck_parents[] = {
-       "virt_12000000_ck", "virt_13000000_ck", "virt_16800000_ck",
-       "virt_19200000_ck", "virt_26000000_ck", "virt_27000000_ck",
-       "virt_38400000_ck",
-};
-
-DEFINE_CLK_MUX(sys_clkin_ck, sys_clkin_ck_parents, NULL, 0x0,
-              OMAP4430_CM_SYS_CLKSEL, OMAP4430_SYS_CLKSEL_SHIFT,
-              OMAP4430_SYS_CLKSEL_WIDTH, CLK_MUX_INDEX_ONE, NULL);
-
-DEFINE_CLK_FIXED_RATE(tie_low_clock_ck, CLK_IS_ROOT, 0, 0x0);
-
-DEFINE_CLK_FIXED_RATE(utmi_phy_clkout_ck, CLK_IS_ROOT, 60000000, 0x0);
-
-DEFINE_CLK_FIXED_RATE(xclk60mhsp1_ck, CLK_IS_ROOT, 60000000, 0x0);
-
-DEFINE_CLK_FIXED_RATE(xclk60mhsp2_ck, CLK_IS_ROOT, 60000000, 0x0);
-
-DEFINE_CLK_FIXED_RATE(xclk60motg_ck, CLK_IS_ROOT, 60000000, 0x0);
-
-/* Module clocks and DPLL outputs */
-
-static const char *abe_dpll_bypass_clk_mux_ck_parents[] = {
-       "sys_clkin_ck", "sys_32k_ck",
-};
-
-DEFINE_CLK_MUX(abe_dpll_bypass_clk_mux_ck, abe_dpll_bypass_clk_mux_ck_parents,
-              NULL, 0x0, OMAP4430_CM_L4_WKUP_CLKSEL, OMAP4430_CLKSEL_SHIFT,
-              OMAP4430_CLKSEL_WIDTH, 0x0, NULL);
-
-DEFINE_CLK_MUX(abe_dpll_refclk_mux_ck, abe_dpll_bypass_clk_mux_ck_parents, NULL,
-              0x0, OMAP4430_CM_ABE_PLL_REF_CLKSEL, OMAP4430_CLKSEL_0_0_SHIFT,
-              OMAP4430_CLKSEL_0_0_WIDTH, 0x0, NULL);
-
-/* DPLL_ABE */
-static struct dpll_data dpll_abe_dd = {
-       .mult_div1_reg  = OMAP4430_CM_CLKSEL_DPLL_ABE,
-       .clk_bypass     = &abe_dpll_bypass_clk_mux_ck,
-       .clk_ref        = &abe_dpll_refclk_mux_ck,
-       .control_reg    = OMAP4430_CM_CLKMODE_DPLL_ABE,
-       .modes          = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
-       .autoidle_reg   = OMAP4430_CM_AUTOIDLE_DPLL_ABE,
-       .idlest_reg     = OMAP4430_CM_IDLEST_DPLL_ABE,
-       .mult_mask      = OMAP4430_DPLL_MULT_MASK,
-       .div1_mask      = OMAP4430_DPLL_DIV_MASK,
-       .enable_mask    = OMAP4430_DPLL_EN_MASK,
-       .autoidle_mask  = OMAP4430_AUTO_DPLL_MODE_MASK,
-       .idlest_mask    = OMAP4430_ST_DPLL_CLK_MASK,
-       .m4xen_mask     = OMAP4430_DPLL_REGM4XEN_MASK,
-       .lpmode_mask    = OMAP4430_DPLL_LPMODE_EN_MASK,
-       .max_multiplier = 2047,
-       .max_divider    = 128,
-       .min_divider    = 1,
-};
-
-
-static const char *dpll_abe_ck_parents[] = {
-       "abe_dpll_refclk_mux_ck",
-};
-
-static struct clk dpll_abe_ck;
-
-static const struct clk_ops dpll_abe_ck_ops = {
-       .enable         = &omap3_noncore_dpll_enable,
-       .disable        = &omap3_noncore_dpll_disable,
-       .recalc_rate    = &omap4_dpll_regm4xen_recalc,
-       .round_rate     = &omap4_dpll_regm4xen_round_rate,
-       .set_rate       = &omap3_noncore_dpll_set_rate,
-       .get_parent     = &omap2_init_dpll_parent,
-};
-
-static struct clk_hw_omap dpll_abe_ck_hw = {
-       .hw = {
-               .clk = &dpll_abe_ck,
-       },
-       .dpll_data      = &dpll_abe_dd,
-       .ops            = &clkhwops_omap3_dpll,
-};
-
-DEFINE_STRUCT_CLK(dpll_abe_ck, dpll_abe_ck_parents, dpll_abe_ck_ops);
-
-static const char *dpll_abe_x2_ck_parents[] = {
-       "dpll_abe_ck",
-};
-
-static struct clk dpll_abe_x2_ck;
-
-static const struct clk_ops dpll_abe_x2_ck_ops = {
-       .recalc_rate    = &omap3_clkoutx2_recalc,
-};
-
-static struct clk_hw_omap dpll_abe_x2_ck_hw = {
-       .hw = {
-               .clk = &dpll_abe_x2_ck,
-       },
-       .flags          = CLOCK_CLKOUTX2,
-       .clksel_reg     = OMAP4430_CM_DIV_M2_DPLL_ABE,
-       .ops            = &clkhwops_omap4_dpllmx,
-};
-
-DEFINE_STRUCT_CLK(dpll_abe_x2_ck, dpll_abe_x2_ck_parents, dpll_abe_x2_ck_ops);
-
-static const struct clk_ops omap_hsdivider_ops = {
-       .set_rate       = &omap2_clksel_set_rate,
-       .recalc_rate    = &omap2_clksel_recalc,
-       .round_rate     = &omap2_clksel_round_rate,
-};
-
-DEFINE_CLK_OMAP_HSDIVIDER(dpll_abe_m2x2_ck, "dpll_abe_x2_ck", &dpll_abe_x2_ck,
-                         0x0, OMAP4430_CM_DIV_M2_DPLL_ABE,
-                         OMAP4430_DPLL_CLKOUT_DIV_MASK);
-
-DEFINE_CLK_FIXED_FACTOR(abe_24m_fclk, "dpll_abe_m2x2_ck", &dpll_abe_m2x2_ck,
-                       0x0, 1, 8);
-
-DEFINE_CLK_DIVIDER(abe_clk, "dpll_abe_m2x2_ck", &dpll_abe_m2x2_ck, 0x0,
-                  OMAP4430_CM_CLKSEL_ABE, OMAP4430_CLKSEL_OPP_SHIFT,
-                  OMAP4430_CLKSEL_OPP_WIDTH, CLK_DIVIDER_POWER_OF_TWO, NULL);
-
-DEFINE_CLK_DIVIDER(aess_fclk, "abe_clk", &abe_clk, 0x0,
-                  OMAP4430_CM1_ABE_AESS_CLKCTRL,
-                  OMAP4430_CLKSEL_AESS_FCLK_SHIFT,
-                  OMAP4430_CLKSEL_AESS_FCLK_WIDTH,
-                  0x0, NULL);
-
-DEFINE_CLK_OMAP_HSDIVIDER(dpll_abe_m3x2_ck, "dpll_abe_x2_ck", &dpll_abe_x2_ck,
-                         0x0, OMAP4430_CM_DIV_M3_DPLL_ABE,
-                         OMAP4430_DPLL_CLKOUTHIF_DIV_MASK);
-
-static const char *core_hsd_byp_clk_mux_ck_parents[] = {
-       "sys_clkin_ck", "dpll_abe_m3x2_ck",
-};
-
-DEFINE_CLK_MUX(core_hsd_byp_clk_mux_ck, core_hsd_byp_clk_mux_ck_parents, NULL,
-              0x0, OMAP4430_CM_CLKSEL_DPLL_CORE,
-              OMAP4430_DPLL_BYP_CLKSEL_SHIFT, OMAP4430_DPLL_BYP_CLKSEL_WIDTH,
-              0x0, NULL);
-
-/* DPLL_CORE */
-static struct dpll_data dpll_core_dd = {
-       .mult_div1_reg  = OMAP4430_CM_CLKSEL_DPLL_CORE,
-       .clk_bypass     = &core_hsd_byp_clk_mux_ck,
-       .clk_ref        = &sys_clkin_ck,
-       .control_reg    = OMAP4430_CM_CLKMODE_DPLL_CORE,
-       .modes          = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
-       .autoidle_reg   = OMAP4430_CM_AUTOIDLE_DPLL_CORE,
-       .idlest_reg     = OMAP4430_CM_IDLEST_DPLL_CORE,
-       .mult_mask      = OMAP4430_DPLL_MULT_MASK,
-       .div1_mask      = OMAP4430_DPLL_DIV_MASK,
-       .enable_mask    = OMAP4430_DPLL_EN_MASK,
-       .autoidle_mask  = OMAP4430_AUTO_DPLL_MODE_MASK,
-       .idlest_mask    = OMAP4430_ST_DPLL_CLK_MASK,
-       .max_multiplier = 2047,
-       .max_divider    = 128,
-       .min_divider    = 1,
-};
-
-
-static const char *dpll_core_ck_parents[] = {
-       "sys_clkin_ck", "core_hsd_byp_clk_mux_ck"
-};
-
-static struct clk dpll_core_ck;
-
-static const struct clk_ops dpll_core_ck_ops = {
-       .recalc_rate    = &omap3_dpll_recalc,
-       .get_parent     = &omap2_init_dpll_parent,
-};
-
-static struct clk_hw_omap dpll_core_ck_hw = {
-       .hw = {
-               .clk = &dpll_core_ck,
-       },
-       .dpll_data      = &dpll_core_dd,
-       .ops            = &clkhwops_omap3_dpll,
-};
-
-DEFINE_STRUCT_CLK(dpll_core_ck, dpll_core_ck_parents, dpll_core_ck_ops);
-
-static const char *dpll_core_x2_ck_parents[] = {
-       "dpll_core_ck",
-};
-
-static struct clk dpll_core_x2_ck;
-
-static struct clk_hw_omap dpll_core_x2_ck_hw = {
-       .hw = {
-               .clk = &dpll_core_x2_ck,
-       },
-};
-
-DEFINE_STRUCT_CLK(dpll_core_x2_ck, dpll_core_x2_ck_parents, dpll_abe_x2_ck_ops);
-
-DEFINE_CLK_OMAP_HSDIVIDER(dpll_core_m6x2_ck, "dpll_core_x2_ck",
-                         &dpll_core_x2_ck, 0x0, OMAP4430_CM_DIV_M6_DPLL_CORE,
-                         OMAP4430_HSDIVIDER_CLKOUT3_DIV_MASK);
-
-DEFINE_CLK_OMAP_HSDIVIDER(dpll_core_m2_ck, "dpll_core_ck", &dpll_core_ck, 0x0,
-                         OMAP4430_CM_DIV_M2_DPLL_CORE,
-                         OMAP4430_DPLL_CLKOUT_DIV_MASK);
-
-DEFINE_CLK_FIXED_FACTOR(ddrphy_ck, "dpll_core_m2_ck", &dpll_core_m2_ck, 0x0, 1,
-                       2);
-
-DEFINE_CLK_OMAP_HSDIVIDER(dpll_core_m5x2_ck, "dpll_core_x2_ck",
-                         &dpll_core_x2_ck, 0x0, OMAP4430_CM_DIV_M5_DPLL_CORE,
-                         OMAP4430_HSDIVIDER_CLKOUT2_DIV_MASK);
-
-DEFINE_CLK_DIVIDER(div_core_ck, "dpll_core_m5x2_ck", &dpll_core_m5x2_ck, 0x0,
-                  OMAP4430_CM_CLKSEL_CORE, OMAP4430_CLKSEL_CORE_SHIFT,
-                  OMAP4430_CLKSEL_CORE_WIDTH, 0x0, NULL);
-
-DEFINE_CLK_DIVIDER(div_iva_hs_clk, "dpll_core_m5x2_ck", &dpll_core_m5x2_ck,
-                  0x0, OMAP4430_CM_BYPCLK_DPLL_IVA, OMAP4430_CLKSEL_0_1_SHIFT,
-                  OMAP4430_CLKSEL_0_1_WIDTH, CLK_DIVIDER_POWER_OF_TWO, NULL);
-
-DEFINE_CLK_DIVIDER(div_mpu_hs_clk, "dpll_core_m5x2_ck", &dpll_core_m5x2_ck,
-                  0x0, OMAP4430_CM_BYPCLK_DPLL_MPU, OMAP4430_CLKSEL_0_1_SHIFT,
-                  OMAP4430_CLKSEL_0_1_WIDTH, CLK_DIVIDER_POWER_OF_TWO, NULL);
-
-DEFINE_CLK_OMAP_HSDIVIDER(dpll_core_m4x2_ck, "dpll_core_x2_ck",
-                         &dpll_core_x2_ck, 0x0, OMAP4430_CM_DIV_M4_DPLL_CORE,
-                         OMAP4430_HSDIVIDER_CLKOUT1_DIV_MASK);
-
-DEFINE_CLK_FIXED_FACTOR(dll_clk_div_ck, "dpll_core_m4x2_ck", &dpll_core_m4x2_ck,
-                       0x0, 1, 2);
-
-DEFINE_CLK_DIVIDER(dpll_abe_m2_ck, "dpll_abe_ck", &dpll_abe_ck, 0x0,
-                  OMAP4430_CM_DIV_M2_DPLL_ABE, OMAP4430_DPLL_CLKOUT_DIV_SHIFT,
-                  OMAP4430_DPLL_CLKOUT_DIV_WIDTH, CLK_DIVIDER_ONE_BASED, NULL);
-
-static const struct clk_ops dpll_hsd_ops = {
-       .enable         = &omap2_dflt_clk_enable,
-       .disable        = &omap2_dflt_clk_disable,
-       .is_enabled     = &omap2_dflt_clk_is_enabled,
-       .recalc_rate    = &omap2_clksel_recalc,
-       .get_parent     = &omap2_clksel_find_parent_index,
-       .set_parent     = &omap2_clksel_set_parent,
-       .init           = &omap2_init_clk_clkdm,
-};
-
-static const struct clk_ops func_dmic_abe_gfclk_ops = {
-       .recalc_rate    = &omap2_clksel_recalc,
-       .get_parent     = &omap2_clksel_find_parent_index,
-       .set_parent     = &omap2_clksel_set_parent,
-};
-
-static const char *dpll_core_m3x2_ck_parents[] = {
-       "dpll_core_x2_ck",
-};
-
-static const struct clksel dpll_core_m3x2_div[] = {
-       { .parent = &dpll_core_x2_ck, .rates = div31_1to31_rates },
-       { .parent = NULL },
-};
-
-/* XXX Missing round_rate, set_rate in ops */
-DEFINE_CLK_OMAP_MUX_GATE(dpll_core_m3x2_ck, NULL, dpll_core_m3x2_div,
-                        OMAP4430_CM_DIV_M3_DPLL_CORE,
-                        OMAP4430_DPLL_CLKOUTHIF_DIV_MASK,
-                        OMAP4430_CM_DIV_M3_DPLL_CORE,
-                        OMAP4430_DPLL_CLKOUTHIF_GATE_CTRL_SHIFT, NULL,
-                        dpll_core_m3x2_ck_parents, dpll_hsd_ops);
-
-DEFINE_CLK_OMAP_HSDIVIDER(dpll_core_m7x2_ck, "dpll_core_x2_ck",
-                         &dpll_core_x2_ck, 0x0, OMAP4430_CM_DIV_M7_DPLL_CORE,
-                         OMAP4430_HSDIVIDER_CLKOUT4_DIV_MASK);
-
-static const char *iva_hsd_byp_clk_mux_ck_parents[] = {
-       "sys_clkin_ck", "div_iva_hs_clk",
-};
-
-DEFINE_CLK_MUX(iva_hsd_byp_clk_mux_ck, iva_hsd_byp_clk_mux_ck_parents, NULL,
-              0x0, OMAP4430_CM_CLKSEL_DPLL_IVA, OMAP4430_DPLL_BYP_CLKSEL_SHIFT,
-              OMAP4430_DPLL_BYP_CLKSEL_WIDTH, 0x0, NULL);
-
-/* DPLL_IVA */
-static struct dpll_data dpll_iva_dd = {
-       .mult_div1_reg  = OMAP4430_CM_CLKSEL_DPLL_IVA,
-       .clk_bypass     = &iva_hsd_byp_clk_mux_ck,
-       .clk_ref        = &sys_clkin_ck,
-       .control_reg    = OMAP4430_CM_CLKMODE_DPLL_IVA,
-       .modes          = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
-       .autoidle_reg   = OMAP4430_CM_AUTOIDLE_DPLL_IVA,
-       .idlest_reg     = OMAP4430_CM_IDLEST_DPLL_IVA,
-       .mult_mask      = OMAP4430_DPLL_MULT_MASK,
-       .div1_mask      = OMAP4430_DPLL_DIV_MASK,
-       .enable_mask    = OMAP4430_DPLL_EN_MASK,
-       .autoidle_mask  = OMAP4430_AUTO_DPLL_MODE_MASK,
-       .idlest_mask    = OMAP4430_ST_DPLL_CLK_MASK,
-       .max_multiplier = 2047,
-       .max_divider    = 128,
-       .min_divider    = 1,
-};
-
-static const char *dpll_iva_ck_parents[] = {
-       "sys_clkin_ck", "iva_hsd_byp_clk_mux_ck"
-};
-
-static struct clk dpll_iva_ck;
-
-static const struct clk_ops dpll_ck_ops = {
-       .enable         = &omap3_noncore_dpll_enable,
-       .disable        = &omap3_noncore_dpll_disable,
-       .recalc_rate    = &omap3_dpll_recalc,
-       .round_rate     = &omap2_dpll_round_rate,
-       .set_rate       = &omap3_noncore_dpll_set_rate,
-       .get_parent     = &omap2_init_dpll_parent,
-};
-
-static struct clk_hw_omap dpll_iva_ck_hw = {
-       .hw = {
-               .clk = &dpll_iva_ck,
-       },
-       .dpll_data      = &dpll_iva_dd,
-       .ops            = &clkhwops_omap3_dpll,
-};
-
-DEFINE_STRUCT_CLK(dpll_iva_ck, dpll_iva_ck_parents, dpll_ck_ops);
-
-static const char *dpll_iva_x2_ck_parents[] = {
-       "dpll_iva_ck",
-};
-
-static struct clk dpll_iva_x2_ck;
-
-static struct clk_hw_omap dpll_iva_x2_ck_hw = {
-       .hw = {
-               .clk = &dpll_iva_x2_ck,
-       },
-};
-
-DEFINE_STRUCT_CLK(dpll_iva_x2_ck, dpll_iva_x2_ck_parents, dpll_abe_x2_ck_ops);
-
-DEFINE_CLK_OMAP_HSDIVIDER(dpll_iva_m4x2_ck, "dpll_iva_x2_ck", &dpll_iva_x2_ck,
-                         0x0, OMAP4430_CM_DIV_M4_DPLL_IVA,
-                         OMAP4430_HSDIVIDER_CLKOUT1_DIV_MASK);
-
-DEFINE_CLK_OMAP_HSDIVIDER(dpll_iva_m5x2_ck, "dpll_iva_x2_ck", &dpll_iva_x2_ck,
-                         0x0, OMAP4430_CM_DIV_M5_DPLL_IVA,
-                         OMAP4430_HSDIVIDER_CLKOUT2_DIV_MASK);
-
-/* DPLL_MPU */
-static struct dpll_data dpll_mpu_dd = {
-       .mult_div1_reg  = OMAP4430_CM_CLKSEL_DPLL_MPU,
-       .clk_bypass     = &div_mpu_hs_clk,
-       .clk_ref        = &sys_clkin_ck,
-       .control_reg    = OMAP4430_CM_CLKMODE_DPLL_MPU,
-       .modes          = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
-       .autoidle_reg   = OMAP4430_CM_AUTOIDLE_DPLL_MPU,
-       .idlest_reg     = OMAP4430_CM_IDLEST_DPLL_MPU,
-       .mult_mask      = OMAP4430_DPLL_MULT_MASK,
-       .div1_mask      = OMAP4430_DPLL_DIV_MASK,
-       .enable_mask    = OMAP4430_DPLL_EN_MASK,
-       .autoidle_mask  = OMAP4430_AUTO_DPLL_MODE_MASK,
-       .idlest_mask    = OMAP4430_ST_DPLL_CLK_MASK,
-       .max_multiplier = 2047,
-       .max_divider    = 128,
-       .min_divider    = 1,
-};
-
-static const char *dpll_mpu_ck_parents[] = {
-       "sys_clkin_ck", "div_mpu_hs_clk"
-};
-
-static struct clk dpll_mpu_ck;
-
-static struct clk_hw_omap dpll_mpu_ck_hw = {
-       .hw = {
-               .clk = &dpll_mpu_ck,
-       },
-       .dpll_data      = &dpll_mpu_dd,
-       .ops            = &clkhwops_omap3_dpll,
-};
-
-DEFINE_STRUCT_CLK(dpll_mpu_ck, dpll_mpu_ck_parents, dpll_ck_ops);
-
-DEFINE_CLK_FIXED_FACTOR(mpu_periphclk, "dpll_mpu_ck", &dpll_mpu_ck, 0x0, 1, 2);
-
-DEFINE_CLK_OMAP_HSDIVIDER(dpll_mpu_m2_ck, "dpll_mpu_ck", &dpll_mpu_ck, 0x0,
-                         OMAP4430_CM_DIV_M2_DPLL_MPU,
-                         OMAP4430_DPLL_CLKOUT_DIV_MASK);
-
-DEFINE_CLK_FIXED_FACTOR(per_hs_clk_div_ck, "dpll_abe_m3x2_ck",
-                       &dpll_abe_m3x2_ck, 0x0, 1, 2);
-
-static const char *per_hsd_byp_clk_mux_ck_parents[] = {
-       "sys_clkin_ck", "per_hs_clk_div_ck",
-};
-
-DEFINE_CLK_MUX(per_hsd_byp_clk_mux_ck, per_hsd_byp_clk_mux_ck_parents, NULL,
-              0x0, OMAP4430_CM_CLKSEL_DPLL_PER, OMAP4430_DPLL_BYP_CLKSEL_SHIFT,
-              OMAP4430_DPLL_BYP_CLKSEL_WIDTH, 0x0, NULL);
-
-/* DPLL_PER */
-static struct dpll_data dpll_per_dd = {
-       .mult_div1_reg  = OMAP4430_CM_CLKSEL_DPLL_PER,
-       .clk_bypass     = &per_hsd_byp_clk_mux_ck,
-       .clk_ref        = &sys_clkin_ck,
-       .control_reg    = OMAP4430_CM_CLKMODE_DPLL_PER,
-       .modes          = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
-       .autoidle_reg   = OMAP4430_CM_AUTOIDLE_DPLL_PER,
-       .idlest_reg     = OMAP4430_CM_IDLEST_DPLL_PER,
-       .mult_mask      = OMAP4430_DPLL_MULT_MASK,
-       .div1_mask      = OMAP4430_DPLL_DIV_MASK,
-       .enable_mask    = OMAP4430_DPLL_EN_MASK,
-       .autoidle_mask  = OMAP4430_AUTO_DPLL_MODE_MASK,
-       .idlest_mask    = OMAP4430_ST_DPLL_CLK_MASK,
-       .max_multiplier = 2047,
-       .max_divider    = 128,
-       .min_divider    = 1,
-};
-
-static const char *dpll_per_ck_parents[] = {
-       "sys_clkin_ck", "per_hsd_byp_clk_mux_ck"
-};
-
-static struct clk dpll_per_ck;
-
-static struct clk_hw_omap dpll_per_ck_hw = {
-       .hw = {
-               .clk = &dpll_per_ck,
-       },
-       .dpll_data      = &dpll_per_dd,
-       .ops            = &clkhwops_omap3_dpll,
-};
-
-DEFINE_STRUCT_CLK(dpll_per_ck, dpll_per_ck_parents, dpll_ck_ops);
-
-DEFINE_CLK_DIVIDER(dpll_per_m2_ck, "dpll_per_ck", &dpll_per_ck, 0x0,
-                  OMAP4430_CM_DIV_M2_DPLL_PER, OMAP4430_DPLL_CLKOUT_DIV_SHIFT,
-                  OMAP4430_DPLL_CLKOUT_DIV_WIDTH, CLK_DIVIDER_ONE_BASED, NULL);
-
-static const char *dpll_per_x2_ck_parents[] = {
-       "dpll_per_ck",
-};
-
-static struct clk dpll_per_x2_ck;
-
-static struct clk_hw_omap dpll_per_x2_ck_hw = {
-       .hw = {
-               .clk = &dpll_per_x2_ck,
-       },
-       .flags          = CLOCK_CLKOUTX2,
-       .clksel_reg     = OMAP4430_CM_DIV_M2_DPLL_PER,
-       .ops            = &clkhwops_omap4_dpllmx,
-};
-
-DEFINE_STRUCT_CLK(dpll_per_x2_ck, dpll_per_x2_ck_parents, dpll_abe_x2_ck_ops);
-
-DEFINE_CLK_OMAP_HSDIVIDER(dpll_per_m2x2_ck, "dpll_per_x2_ck", &dpll_per_x2_ck,
-                         0x0, OMAP4430_CM_DIV_M2_DPLL_PER,
-                         OMAP4430_DPLL_CLKOUT_DIV_MASK);
-
-static const char *dpll_per_m3x2_ck_parents[] = {
-       "dpll_per_x2_ck",
-};
-
-static const struct clksel dpll_per_m3x2_div[] = {
-       { .parent = &dpll_per_x2_ck, .rates = div31_1to31_rates },
-       { .parent = NULL },
-};
-
-/* XXX Missing round_rate, set_rate in ops */
-DEFINE_CLK_OMAP_MUX_GATE(dpll_per_m3x2_ck, NULL, dpll_per_m3x2_div,
-                        OMAP4430_CM_DIV_M3_DPLL_PER,
-                        OMAP4430_DPLL_CLKOUTHIF_DIV_MASK,
-                        OMAP4430_CM_DIV_M3_DPLL_PER,
-                        OMAP4430_DPLL_CLKOUTHIF_GATE_CTRL_SHIFT, NULL,
-                        dpll_per_m3x2_ck_parents, dpll_hsd_ops);
-
-DEFINE_CLK_OMAP_HSDIVIDER(dpll_per_m4x2_ck, "dpll_per_x2_ck", &dpll_per_x2_ck,
-                         0x0, OMAP4430_CM_DIV_M4_DPLL_PER,
-                         OMAP4430_HSDIVIDER_CLKOUT1_DIV_MASK);
-
-DEFINE_CLK_OMAP_HSDIVIDER(dpll_per_m5x2_ck, "dpll_per_x2_ck", &dpll_per_x2_ck,
-                         0x0, OMAP4430_CM_DIV_M5_DPLL_PER,
-                         OMAP4430_HSDIVIDER_CLKOUT2_DIV_MASK);
-
-DEFINE_CLK_OMAP_HSDIVIDER(dpll_per_m6x2_ck, "dpll_per_x2_ck", &dpll_per_x2_ck,
-                         0x0, OMAP4430_CM_DIV_M6_DPLL_PER,
-                         OMAP4430_HSDIVIDER_CLKOUT3_DIV_MASK);
-
-DEFINE_CLK_OMAP_HSDIVIDER(dpll_per_m7x2_ck, "dpll_per_x2_ck", &dpll_per_x2_ck,
-                         0x0, OMAP4430_CM_DIV_M7_DPLL_PER,
-                         OMAP4430_HSDIVIDER_CLKOUT4_DIV_MASK);
-
-DEFINE_CLK_FIXED_FACTOR(usb_hs_clk_div_ck, "dpll_abe_m3x2_ck",
-                       &dpll_abe_m3x2_ck, 0x0, 1, 3);
-
-/* DPLL_USB */
-static struct dpll_data dpll_usb_dd = {
-       .mult_div1_reg  = OMAP4430_CM_CLKSEL_DPLL_USB,
-       .clk_bypass     = &usb_hs_clk_div_ck,
-       .flags          = DPLL_J_TYPE,
-       .clk_ref        = &sys_clkin_ck,
-       .control_reg    = OMAP4430_CM_CLKMODE_DPLL_USB,
-       .modes          = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
-       .autoidle_reg   = OMAP4430_CM_AUTOIDLE_DPLL_USB,
-       .idlest_reg     = OMAP4430_CM_IDLEST_DPLL_USB,
-       .mult_mask      = OMAP4430_DPLL_MULT_USB_MASK,
-       .div1_mask      = OMAP4430_DPLL_DIV_0_7_MASK,
-       .enable_mask    = OMAP4430_DPLL_EN_MASK,
-       .autoidle_mask  = OMAP4430_AUTO_DPLL_MODE_MASK,
-       .idlest_mask    = OMAP4430_ST_DPLL_CLK_MASK,
-       .sddiv_mask     = OMAP4430_DPLL_SD_DIV_MASK,
-       .max_multiplier = 4095,
-       .max_divider    = 256,
-       .min_divider    = 1,
-};
-
-static const char *dpll_usb_ck_parents[] = {
-       "sys_clkin_ck", "usb_hs_clk_div_ck"
-};
-
-static struct clk dpll_usb_ck;
-
-static const struct clk_ops dpll_usb_ck_ops = {
-       .enable         = &omap3_noncore_dpll_enable,
-       .disable        = &omap3_noncore_dpll_disable,
-       .recalc_rate    = &omap3_dpll_recalc,
-       .round_rate     = &omap2_dpll_round_rate,
-       .set_rate       = &omap3_noncore_dpll_set_rate,
-       .get_parent     = &omap2_init_dpll_parent,
-       .init           = &omap2_init_clk_clkdm,
-};
-
-static struct clk_hw_omap dpll_usb_ck_hw = {
-       .hw = {
-               .clk = &dpll_usb_ck,
-       },
-       .dpll_data      = &dpll_usb_dd,
-       .clkdm_name     = "l3_init_clkdm",
-       .ops            = &clkhwops_omap3_dpll,
-};
-
-DEFINE_STRUCT_CLK(dpll_usb_ck, dpll_usb_ck_parents, dpll_usb_ck_ops);
-
-static const char *dpll_usb_clkdcoldo_ck_parents[] = {
-       "dpll_usb_ck",
-};
-
-static struct clk dpll_usb_clkdcoldo_ck;
-
-static const struct clk_ops dpll_usb_clkdcoldo_ck_ops = {
-};
-
-static struct clk_hw_omap dpll_usb_clkdcoldo_ck_hw = {
-       .hw = {
-               .clk = &dpll_usb_clkdcoldo_ck,
-       },
-       .clksel_reg     = OMAP4430_CM_CLKDCOLDO_DPLL_USB,
-       .ops            = &clkhwops_omap4_dpllmx,
-};
-
-DEFINE_STRUCT_CLK(dpll_usb_clkdcoldo_ck, dpll_usb_clkdcoldo_ck_parents,
-                 dpll_usb_clkdcoldo_ck_ops);
-
-DEFINE_CLK_OMAP_HSDIVIDER(dpll_usb_m2_ck, "dpll_usb_ck", &dpll_usb_ck, 0x0,
-                         OMAP4430_CM_DIV_M2_DPLL_USB,
-                         OMAP4430_DPLL_CLKOUT_DIV_0_6_MASK);
-
-static const char *ducati_clk_mux_ck_parents[] = {
-       "div_core_ck", "dpll_per_m6x2_ck",
-};
-
-DEFINE_CLK_MUX(ducati_clk_mux_ck, ducati_clk_mux_ck_parents, NULL, 0x0,
-              OMAP4430_CM_CLKSEL_DUCATI_ISS_ROOT, OMAP4430_CLKSEL_0_0_SHIFT,
-              OMAP4430_CLKSEL_0_0_WIDTH, 0x0, NULL);
-
-DEFINE_CLK_FIXED_FACTOR(func_12m_fclk, "dpll_per_m2x2_ck", &dpll_per_m2x2_ck,
-                       0x0, 1, 16);
-
-DEFINE_CLK_FIXED_FACTOR(func_24m_clk, "dpll_per_m2_ck", &dpll_per_m2_ck, 0x0,
-                       1, 4);
-
-DEFINE_CLK_FIXED_FACTOR(func_24mc_fclk, "dpll_per_m2x2_ck", &dpll_per_m2x2_ck,
-                       0x0, 1, 8);
-
-static const struct clk_div_table func_48m_fclk_rates[] = {
-       { .div = 4, .val = 0 },
-       { .div = 8, .val = 1 },
-       { .div = 0 },
-};
-DEFINE_CLK_DIVIDER_TABLE(func_48m_fclk, "dpll_per_m2x2_ck", &dpll_per_m2x2_ck,
-                        0x0, OMAP4430_CM_SCALE_FCLK, OMAP4430_SCALE_FCLK_SHIFT,
-                        OMAP4430_SCALE_FCLK_WIDTH, 0x0, func_48m_fclk_rates,
-                        NULL);
-
-DEFINE_CLK_FIXED_FACTOR(func_48mc_fclk,        "dpll_per_m2x2_ck", &dpll_per_m2x2_ck,
-                       0x0, 1, 4);
-
-static const struct clk_div_table func_64m_fclk_rates[] = {
-       { .div = 2, .val = 0 },
-       { .div = 4, .val = 1 },
-       { .div = 0 },
-};
-DEFINE_CLK_DIVIDER_TABLE(func_64m_fclk, "dpll_per_m4x2_ck", &dpll_per_m4x2_ck,
-                        0x0, OMAP4430_CM_SCALE_FCLK, OMAP4430_SCALE_FCLK_SHIFT,
-                        OMAP4430_SCALE_FCLK_WIDTH, 0x0, func_64m_fclk_rates,
-                        NULL);
-
-static const struct clk_div_table func_96m_fclk_rates[] = {
-       { .div = 2, .val = 0 },
-       { .div = 4, .val = 1 },
-       { .div = 0 },
-};
-DEFINE_CLK_DIVIDER_TABLE(func_96m_fclk, "dpll_per_m2x2_ck", &dpll_per_m2x2_ck,
-                        0x0, OMAP4430_CM_SCALE_FCLK, OMAP4430_SCALE_FCLK_SHIFT,
-                        OMAP4430_SCALE_FCLK_WIDTH, 0x0, func_96m_fclk_rates,
-                        NULL);
-
-static const struct clk_div_table init_60m_fclk_rates[] = {
-       { .div = 1, .val = 0 },
-       { .div = 8, .val = 1 },
-       { .div = 0 },
-};
-DEFINE_CLK_DIVIDER_TABLE(init_60m_fclk, "dpll_usb_m2_ck", &dpll_usb_m2_ck,
-                        0x0, OMAP4430_CM_CLKSEL_USB_60MHZ,
-                        OMAP4430_CLKSEL_0_0_SHIFT, OMAP4430_CLKSEL_0_0_WIDTH,
-                        0x0, init_60m_fclk_rates, NULL);
-
-DEFINE_CLK_DIVIDER(l3_div_ck, "div_core_ck", &div_core_ck, 0x0,
-                  OMAP4430_CM_CLKSEL_CORE, OMAP4430_CLKSEL_L3_SHIFT,
-                  OMAP4430_CLKSEL_L3_WIDTH, 0x0, NULL);
-
-DEFINE_CLK_DIVIDER(l4_div_ck, "l3_div_ck", &l3_div_ck, 0x0,
-                  OMAP4430_CM_CLKSEL_CORE, OMAP4430_CLKSEL_L4_SHIFT,
-                  OMAP4430_CLKSEL_L4_WIDTH, 0x0, NULL);
-
-DEFINE_CLK_FIXED_FACTOR(lp_clk_div_ck, "dpll_abe_m2x2_ck", &dpll_abe_m2x2_ck,
-                       0x0, 1, 16);
-
-static const char *l4_wkup_clk_mux_ck_parents[] = {
-       "sys_clkin_ck", "lp_clk_div_ck",
-};
-
-DEFINE_CLK_MUX(l4_wkup_clk_mux_ck, l4_wkup_clk_mux_ck_parents, NULL, 0x0,
-              OMAP4430_CM_L4_WKUP_CLKSEL, OMAP4430_CLKSEL_0_0_SHIFT,
-              OMAP4430_CLKSEL_0_0_WIDTH, 0x0, NULL);
-
-static const struct clk_div_table ocp_abe_iclk_rates[] = {
-       { .div = 2, .val = 0 },
-       { .div = 1, .val = 1 },
-       { .div = 0 },
-};
-DEFINE_CLK_DIVIDER_TABLE(ocp_abe_iclk, "aess_fclk", &aess_fclk, 0x0,
-                        OMAP4430_CM1_ABE_AESS_CLKCTRL,
-                        OMAP4430_CLKSEL_AESS_FCLK_SHIFT,
-                        OMAP4430_CLKSEL_AESS_FCLK_WIDTH,
-                        0x0, ocp_abe_iclk_rates, NULL);
-
-DEFINE_CLK_FIXED_FACTOR(per_abe_24m_fclk, "dpll_abe_m2_ck", &dpll_abe_m2_ck,
-                       0x0, 1, 4);
-
-DEFINE_CLK_DIVIDER(per_abe_nc_fclk, "dpll_abe_m2_ck", &dpll_abe_m2_ck, 0x0,
-                  OMAP4430_CM_SCALE_FCLK, OMAP4430_SCALE_FCLK_SHIFT,
-                  OMAP4430_SCALE_FCLK_WIDTH, 0x0, NULL);
-
-DEFINE_CLK_DIVIDER(syc_clk_div_ck, "sys_clkin_ck", &sys_clkin_ck, 0x0,
-                  OMAP4430_CM_ABE_DSS_SYS_CLKSEL, OMAP4430_CLKSEL_0_0_SHIFT,
-                  OMAP4430_CLKSEL_0_0_WIDTH, 0x0, NULL);
-
-static const char *dbgclk_mux_ck_parents[] = {
-       "sys_clkin_ck"
-};
-
-static struct clk dbgclk_mux_ck;
-DEFINE_STRUCT_CLK_HW_OMAP(dbgclk_mux_ck, NULL);
-DEFINE_STRUCT_CLK(dbgclk_mux_ck, dbgclk_mux_ck_parents,
-                 dpll_usb_clkdcoldo_ck_ops);
-
-/* Leaf clocks controlled by modules */
-
-DEFINE_CLK_GATE(aes1_fck, "l3_div_ck", &l3_div_ck, 0x0,
-               OMAP4430_CM_L4SEC_AES1_CLKCTRL,
-               OMAP4430_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL);
-
-DEFINE_CLK_GATE(aes2_fck, "l3_div_ck", &l3_div_ck, 0x0,
-               OMAP4430_CM_L4SEC_AES2_CLKCTRL,
-               OMAP4430_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL);
-
-DEFINE_CLK_GATE(bandgap_fclk, "sys_32k_ck", &sys_32k_ck, 0x0,
-               OMAP4430_CM_WKUP_BANDGAP_CLKCTRL,
-               OMAP4430_OPTFCLKEN_BGAP_32K_SHIFT, 0x0, NULL);
-
-static const struct clk_div_table div_ts_ck_rates[] = {
-       { .div = 8, .val = 0 },
-       { .div = 16, .val = 1 },
-       { .div = 32, .val = 2 },
-       { .div = 0 },
-};
-DEFINE_CLK_DIVIDER_TABLE(div_ts_ck, "l4_wkup_clk_mux_ck", &l4_wkup_clk_mux_ck,
-                        0x0, OMAP4430_CM_WKUP_BANDGAP_CLKCTRL,
-                        OMAP4430_CLKSEL_24_25_SHIFT,
-                        OMAP4430_CLKSEL_24_25_WIDTH, 0x0, div_ts_ck_rates,
-                        NULL);
-
-DEFINE_CLK_GATE(bandgap_ts_fclk, "div_ts_ck", &div_ts_ck, 0x0,
-               OMAP4430_CM_WKUP_BANDGAP_CLKCTRL,
-               OMAP4460_OPTFCLKEN_TS_FCLK_SHIFT,
-               0x0, NULL);
-
-static const char *dmic_sync_mux_ck_parents[] = {
-       "abe_24m_fclk", "syc_clk_div_ck", "func_24m_clk",
-};
-
-DEFINE_CLK_MUX(dmic_sync_mux_ck, dmic_sync_mux_ck_parents, NULL,
-              0x0, OMAP4430_CM1_ABE_DMIC_CLKCTRL,
-              OMAP4430_CLKSEL_INTERNAL_SOURCE_SHIFT,
-              OMAP4430_CLKSEL_INTERNAL_SOURCE_WIDTH, 0x0, NULL);
-
-static const struct clksel func_dmic_abe_gfclk_sel[] = {
-       { .parent = &dmic_sync_mux_ck, .rates = div_1_0_rates },
-       { .parent = &pad_clks_ck, .rates = div_1_1_rates },
-       { .parent = &slimbus_clk, .rates = div_1_2_rates },
-       { .parent = NULL },
-};
-
-static const char *func_dmic_abe_gfclk_parents[] = {
-       "dmic_sync_mux_ck", "pad_clks_ck", "slimbus_clk",
-};
-
-DEFINE_CLK_OMAP_MUX(func_dmic_abe_gfclk, "abe_clkdm", func_dmic_abe_gfclk_sel,
-                   OMAP4430_CM1_ABE_DMIC_CLKCTRL, OMAP4430_CLKSEL_SOURCE_MASK,
-                   func_dmic_abe_gfclk_parents, func_dmic_abe_gfclk_ops);
-
-DEFINE_CLK_GATE(dss_sys_clk, "syc_clk_div_ck", &syc_clk_div_ck, 0x0,
-               OMAP4430_CM_DSS_DSS_CLKCTRL,
-               OMAP4430_OPTFCLKEN_SYS_CLK_SHIFT, 0x0, NULL);
-
-DEFINE_CLK_GATE(dss_tv_clk, "extalt_clkin_ck", &extalt_clkin_ck, 0x0,
-               OMAP4430_CM_DSS_DSS_CLKCTRL,
-               OMAP4430_OPTFCLKEN_TV_CLK_SHIFT, 0x0, NULL);
-
-DEFINE_CLK_GATE(dss_dss_clk, "dpll_per_m5x2_ck", &dpll_per_m5x2_ck,
-               CLK_SET_RATE_PARENT,
-               OMAP4430_CM_DSS_DSS_CLKCTRL, OMAP4430_OPTFCLKEN_DSSCLK_SHIFT,
-               0x0, NULL);
-
-DEFINE_CLK_GATE(dss_48mhz_clk, "func_48mc_fclk", &func_48mc_fclk, 0x0,
-               OMAP4430_CM_DSS_DSS_CLKCTRL, OMAP4430_OPTFCLKEN_48MHZ_CLK_SHIFT,
-               0x0, NULL);
-
-DEFINE_CLK_GATE(dss_fck, "l3_div_ck", &l3_div_ck, 0x0,
-               OMAP4430_CM_DSS_DSS_CLKCTRL, OMAP4430_MODULEMODE_SWCTRL_SHIFT,
-               0x0, NULL);
-
-DEFINE_CLK_DIVIDER(fdif_fck, "dpll_per_m4x2_ck", &dpll_per_m4x2_ck, 0x0,
-                  OMAP4430_CM_CAM_FDIF_CLKCTRL, OMAP4430_CLKSEL_FCLK_SHIFT,
-                  OMAP4430_CLKSEL_FCLK_WIDTH, CLK_DIVIDER_POWER_OF_TWO, NULL);
-
-DEFINE_CLK_GATE(gpio1_dbclk, "sys_32k_ck", &sys_32k_ck, 0x0,
-               OMAP4430_CM_WKUP_GPIO1_CLKCTRL,
-               OMAP4430_OPTFCLKEN_DBCLK_SHIFT, 0x0, NULL);
-
-DEFINE_CLK_GATE(gpio2_dbclk, "sys_32k_ck", &sys_32k_ck, 0x0,
-               OMAP4430_CM_L4PER_GPIO2_CLKCTRL, OMAP4430_OPTFCLKEN_DBCLK_SHIFT,
-               0x0, NULL);
-
-DEFINE_CLK_GATE(gpio3_dbclk, "sys_32k_ck", &sys_32k_ck, 0x0,
-               OMAP4430_CM_L4PER_GPIO3_CLKCTRL,
-               OMAP4430_OPTFCLKEN_DBCLK_SHIFT, 0x0, NULL);
-
-DEFINE_CLK_GATE(gpio4_dbclk, "sys_32k_ck", &sys_32k_ck, 0x0,
-               OMAP4430_CM_L4PER_GPIO4_CLKCTRL, OMAP4430_OPTFCLKEN_DBCLK_SHIFT,
-               0x0, NULL);
-
-DEFINE_CLK_GATE(gpio5_dbclk, "sys_32k_ck", &sys_32k_ck, 0x0,
-               OMAP4430_CM_L4PER_GPIO5_CLKCTRL, OMAP4430_OPTFCLKEN_DBCLK_SHIFT,
-               0x0, NULL);
-
-DEFINE_CLK_GATE(gpio6_dbclk, "sys_32k_ck", &sys_32k_ck, 0x0,
-               OMAP4430_CM_L4PER_GPIO6_CLKCTRL, OMAP4430_OPTFCLKEN_DBCLK_SHIFT,
-               0x0, NULL);
-
-static const struct clksel sgx_clk_mux_sel[] = {
-       { .parent = &dpll_core_m7x2_ck, .rates = div_1_0_rates },
-       { .parent = &dpll_per_m7x2_ck, .rates = div_1_1_rates },
-       { .parent = NULL },
-};
-
-static const char *sgx_clk_mux_parents[] = {
-       "dpll_core_m7x2_ck", "dpll_per_m7x2_ck",
-};
-
-DEFINE_CLK_OMAP_MUX(sgx_clk_mux, "l3_gfx_clkdm", sgx_clk_mux_sel,
-                   OMAP4430_CM_GFX_GFX_CLKCTRL, OMAP4430_CLKSEL_SGX_FCLK_MASK,
-                   sgx_clk_mux_parents, func_dmic_abe_gfclk_ops);
-
-DEFINE_CLK_DIVIDER(hsi_fck, "dpll_per_m2x2_ck", &dpll_per_m2x2_ck, 0x0,
-                  OMAP4430_CM_L3INIT_HSI_CLKCTRL, OMAP4430_CLKSEL_24_25_SHIFT,
-                  OMAP4430_CLKSEL_24_25_WIDTH, CLK_DIVIDER_POWER_OF_TWO,
-                  NULL);
-
-DEFINE_CLK_GATE(iss_ctrlclk, "func_96m_fclk", &func_96m_fclk, 0x0,
-               OMAP4430_CM_CAM_ISS_CLKCTRL, OMAP4430_OPTFCLKEN_CTRLCLK_SHIFT,
-               0x0, NULL);
-
-DEFINE_CLK_MUX(mcasp_sync_mux_ck, dmic_sync_mux_ck_parents, NULL, 0x0,
-              OMAP4430_CM1_ABE_MCASP_CLKCTRL,
-              OMAP4430_CLKSEL_INTERNAL_SOURCE_SHIFT,
-              OMAP4430_CLKSEL_INTERNAL_SOURCE_WIDTH, 0x0, NULL);
-
-static const struct clksel func_mcasp_abe_gfclk_sel[] = {
-       { .parent = &mcasp_sync_mux_ck, .rates = div_1_0_rates },
-       { .parent = &pad_clks_ck, .rates = div_1_1_rates },
-       { .parent = &slimbus_clk, .rates = div_1_2_rates },
-       { .parent = NULL },
-};
-
-static const char *func_mcasp_abe_gfclk_parents[] = {
-       "mcasp_sync_mux_ck", "pad_clks_ck", "slimbus_clk",
-};
-
-DEFINE_CLK_OMAP_MUX(func_mcasp_abe_gfclk, "abe_clkdm", func_mcasp_abe_gfclk_sel,
-                   OMAP4430_CM1_ABE_MCASP_CLKCTRL, OMAP4430_CLKSEL_SOURCE_MASK,
-                   func_mcasp_abe_gfclk_parents, func_dmic_abe_gfclk_ops);
-
-DEFINE_CLK_MUX(mcbsp1_sync_mux_ck, dmic_sync_mux_ck_parents, NULL, 0x0,
-              OMAP4430_CM1_ABE_MCBSP1_CLKCTRL,
-              OMAP4430_CLKSEL_INTERNAL_SOURCE_SHIFT,
-              OMAP4430_CLKSEL_INTERNAL_SOURCE_WIDTH, 0x0, NULL);
-
-static const struct clksel func_mcbsp1_gfclk_sel[] = {
-       { .parent = &mcbsp1_sync_mux_ck, .rates = div_1_0_rates },
-       { .parent = &pad_clks_ck, .rates = div_1_1_rates },
-       { .parent = &slimbus_clk, .rates = div_1_2_rates },
-       { .parent = NULL },
-};
-
-static const char *func_mcbsp1_gfclk_parents[] = {
-       "mcbsp1_sync_mux_ck", "pad_clks_ck", "slimbus_clk",
-};
-
-DEFINE_CLK_OMAP_MUX(func_mcbsp1_gfclk, "abe_clkdm", func_mcbsp1_gfclk_sel,
-                   OMAP4430_CM1_ABE_MCBSP1_CLKCTRL,
-                   OMAP4430_CLKSEL_SOURCE_MASK, func_mcbsp1_gfclk_parents,
-                   func_dmic_abe_gfclk_ops);
-
-DEFINE_CLK_MUX(mcbsp2_sync_mux_ck, dmic_sync_mux_ck_parents, NULL, 0x0,
-              OMAP4430_CM1_ABE_MCBSP2_CLKCTRL,
-              OMAP4430_CLKSEL_INTERNAL_SOURCE_SHIFT,
-              OMAP4430_CLKSEL_INTERNAL_SOURCE_WIDTH, 0x0, NULL);
-
-static const struct clksel func_mcbsp2_gfclk_sel[] = {
-       { .parent = &mcbsp2_sync_mux_ck, .rates = div_1_0_rates },
-       { .parent = &pad_clks_ck, .rates = div_1_1_rates },
-       { .parent = &slimbus_clk, .rates = div_1_2_rates },
-       { .parent = NULL },
-};
-
-static const char *func_mcbsp2_gfclk_parents[] = {
-       "mcbsp2_sync_mux_ck", "pad_clks_ck", "slimbus_clk",
-};
-
-DEFINE_CLK_OMAP_MUX(func_mcbsp2_gfclk, "abe_clkdm", func_mcbsp2_gfclk_sel,
-                   OMAP4430_CM1_ABE_MCBSP2_CLKCTRL,
-                   OMAP4430_CLKSEL_SOURCE_MASK, func_mcbsp2_gfclk_parents,
-                   func_dmic_abe_gfclk_ops);
-
-DEFINE_CLK_MUX(mcbsp3_sync_mux_ck, dmic_sync_mux_ck_parents, NULL, 0x0,
-              OMAP4430_CM1_ABE_MCBSP3_CLKCTRL,
-              OMAP4430_CLKSEL_INTERNAL_SOURCE_SHIFT,
-              OMAP4430_CLKSEL_INTERNAL_SOURCE_WIDTH, 0x0, NULL);
-
-static const struct clksel func_mcbsp3_gfclk_sel[] = {
-       { .parent = &mcbsp3_sync_mux_ck, .rates = div_1_0_rates },
-       { .parent = &pad_clks_ck, .rates = div_1_1_rates },
-       { .parent = &slimbus_clk, .rates = div_1_2_rates },
-       { .parent = NULL },
-};
-
-static const char *func_mcbsp3_gfclk_parents[] = {
-       "mcbsp3_sync_mux_ck", "pad_clks_ck", "slimbus_clk",
-};
-
-DEFINE_CLK_OMAP_MUX(func_mcbsp3_gfclk, "abe_clkdm", func_mcbsp3_gfclk_sel,
-                   OMAP4430_CM1_ABE_MCBSP3_CLKCTRL,
-                   OMAP4430_CLKSEL_SOURCE_MASK, func_mcbsp3_gfclk_parents,
-                   func_dmic_abe_gfclk_ops);
-
-static const char *mcbsp4_sync_mux_ck_parents[] = {
-       "func_96m_fclk", "per_abe_nc_fclk",
-};
-
-DEFINE_CLK_MUX(mcbsp4_sync_mux_ck, mcbsp4_sync_mux_ck_parents, NULL, 0x0,
-              OMAP4430_CM_L4PER_MCBSP4_CLKCTRL,
-              OMAP4430_CLKSEL_INTERNAL_SOURCE_SHIFT,
-              OMAP4430_CLKSEL_INTERNAL_SOURCE_WIDTH, 0x0, NULL);
-
-static const struct clksel per_mcbsp4_gfclk_sel[] = {
-       { .parent = &mcbsp4_sync_mux_ck, .rates = div_1_0_rates },
-       { .parent = &pad_clks_ck, .rates = div_1_1_rates },
-       { .parent = NULL },
-};
-
-static const char *per_mcbsp4_gfclk_parents[] = {
-       "mcbsp4_sync_mux_ck", "pad_clks_ck",
-};
-
-DEFINE_CLK_OMAP_MUX(per_mcbsp4_gfclk, "l4_per_clkdm", per_mcbsp4_gfclk_sel,
-                   OMAP4430_CM_L4PER_MCBSP4_CLKCTRL,
-                   OMAP4430_CLKSEL_SOURCE_24_24_MASK, per_mcbsp4_gfclk_parents,
-                   func_dmic_abe_gfclk_ops);
-
-static const struct clksel hsmmc1_fclk_sel[] = {
-       { .parent = &func_64m_fclk, .rates = div_1_0_rates },
-       { .parent = &func_96m_fclk, .rates = div_1_1_rates },
-       { .parent = NULL },
-};
-
-static const char *hsmmc1_fclk_parents[] = {
-       "func_64m_fclk", "func_96m_fclk",
-};
-
-DEFINE_CLK_OMAP_MUX(hsmmc1_fclk, "l3_init_clkdm", hsmmc1_fclk_sel,
-                   OMAP4430_CM_L3INIT_MMC1_CLKCTRL, OMAP4430_CLKSEL_MASK,
-                   hsmmc1_fclk_parents, func_dmic_abe_gfclk_ops);
-
-DEFINE_CLK_OMAP_MUX(hsmmc2_fclk, "l3_init_clkdm", hsmmc1_fclk_sel,
-                   OMAP4430_CM_L3INIT_MMC2_CLKCTRL, OMAP4430_CLKSEL_MASK,
-                   hsmmc1_fclk_parents, func_dmic_abe_gfclk_ops);
-
-DEFINE_CLK_GATE(ocp2scp_usb_phy_phy_48m, "func_48m_fclk", &func_48m_fclk, 0x0,
-               OMAP4430_CM_L3INIT_USBPHYOCP2SCP_CLKCTRL,
-               OMAP4430_OPTFCLKEN_PHY_48M_SHIFT, 0x0, NULL);
-
-DEFINE_CLK_GATE(sha2md5_fck, "l3_div_ck", &l3_div_ck, 0x0,
-               OMAP4430_CM_L4SEC_SHA2MD51_CLKCTRL,
-               OMAP4430_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL);
-
-DEFINE_CLK_GATE(slimbus1_fclk_1, "func_24m_clk", &func_24m_clk, 0x0,
-               OMAP4430_CM1_ABE_SLIMBUS_CLKCTRL,
-               OMAP4430_OPTFCLKEN_FCLK1_SHIFT, 0x0, NULL);
-
-DEFINE_CLK_GATE(slimbus1_fclk_0, "abe_24m_fclk", &abe_24m_fclk, 0x0,
-               OMAP4430_CM1_ABE_SLIMBUS_CLKCTRL,
-               OMAP4430_OPTFCLKEN_FCLK0_SHIFT, 0x0, NULL);
-
-DEFINE_CLK_GATE(slimbus1_fclk_2, "pad_clks_ck", &pad_clks_ck, 0x0,
-               OMAP4430_CM1_ABE_SLIMBUS_CLKCTRL,
-               OMAP4430_OPTFCLKEN_FCLK2_SHIFT, 0x0, NULL);
-
-DEFINE_CLK_GATE(slimbus1_slimbus_clk, "slimbus_clk", &slimbus_clk, 0x0,
-               OMAP4430_CM1_ABE_SLIMBUS_CLKCTRL,
-               OMAP4430_OPTFCLKEN_SLIMBUS_CLK_11_11_SHIFT, 0x0, NULL);
-
-DEFINE_CLK_GATE(slimbus2_fclk_1, "per_abe_24m_fclk", &per_abe_24m_fclk, 0x0,
-               OMAP4430_CM_L4PER_SLIMBUS2_CLKCTRL,
-               OMAP4430_OPTFCLKEN_PERABE24M_GFCLK_SHIFT, 0x0, NULL);
-
-DEFINE_CLK_GATE(slimbus2_fclk_0, "func_24mc_fclk", &func_24mc_fclk, 0x0,
-               OMAP4430_CM_L4PER_SLIMBUS2_CLKCTRL,
-               OMAP4430_OPTFCLKEN_PER24MC_GFCLK_SHIFT, 0x0, NULL);
-
-DEFINE_CLK_GATE(slimbus2_slimbus_clk, "pad_slimbus_core_clks_ck",
-               &pad_slimbus_core_clks_ck, 0x0,
-               OMAP4430_CM_L4PER_SLIMBUS2_CLKCTRL,
-               OMAP4430_OPTFCLKEN_SLIMBUS_CLK_SHIFT, 0x0, NULL);
-
-DEFINE_CLK_GATE(smartreflex_core_fck, "l4_wkup_clk_mux_ck", &l4_wkup_clk_mux_ck,
-               0x0, OMAP4430_CM_ALWON_SR_CORE_CLKCTRL,
-               OMAP4430_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL);
-
-DEFINE_CLK_GATE(smartreflex_iva_fck, "l4_wkup_clk_mux_ck", &l4_wkup_clk_mux_ck,
-               0x0, OMAP4430_CM_ALWON_SR_IVA_CLKCTRL,
-               OMAP4430_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL);
-
-DEFINE_CLK_GATE(smartreflex_mpu_fck, "l4_wkup_clk_mux_ck", &l4_wkup_clk_mux_ck,
-               0x0, OMAP4430_CM_ALWON_SR_MPU_CLKCTRL,
-               OMAP4430_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL);
-
-static const struct clksel dmt1_clk_mux_sel[] = {
-       { .parent = &sys_clkin_ck, .rates = div_1_0_rates },
-       { .parent = &sys_32k_ck, .rates = div_1_1_rates },
-       { .parent = NULL },
-};
-
-DEFINE_CLK_OMAP_MUX(dmt1_clk_mux, "l4_wkup_clkdm", dmt1_clk_mux_sel,
-                   OMAP4430_CM_WKUP_TIMER1_CLKCTRL, OMAP4430_CLKSEL_MASK,
-                   abe_dpll_bypass_clk_mux_ck_parents,
-                   func_dmic_abe_gfclk_ops);
-
-DEFINE_CLK_OMAP_MUX(cm2_dm10_mux, "l4_per_clkdm", dmt1_clk_mux_sel,
-                   OMAP4430_CM_L4PER_DMTIMER10_CLKCTRL, OMAP4430_CLKSEL_MASK,
-                   abe_dpll_bypass_clk_mux_ck_parents,
-                   func_dmic_abe_gfclk_ops);
-
-DEFINE_CLK_OMAP_MUX(cm2_dm11_mux, "l4_per_clkdm", dmt1_clk_mux_sel,
-                   OMAP4430_CM_L4PER_DMTIMER11_CLKCTRL, OMAP4430_CLKSEL_MASK,
-                   abe_dpll_bypass_clk_mux_ck_parents,
-                   func_dmic_abe_gfclk_ops);
-
-DEFINE_CLK_OMAP_MUX(cm2_dm2_mux, "l4_per_clkdm", dmt1_clk_mux_sel,
-                   OMAP4430_CM_L4PER_DMTIMER2_CLKCTRL, OMAP4430_CLKSEL_MASK,
-                   abe_dpll_bypass_clk_mux_ck_parents,
-                   func_dmic_abe_gfclk_ops);
-
-DEFINE_CLK_OMAP_MUX(cm2_dm3_mux, "l4_per_clkdm", dmt1_clk_mux_sel,
-                   OMAP4430_CM_L4PER_DMTIMER3_CLKCTRL, OMAP4430_CLKSEL_MASK,
-                   abe_dpll_bypass_clk_mux_ck_parents,
-                   func_dmic_abe_gfclk_ops);
-
-DEFINE_CLK_OMAP_MUX(cm2_dm4_mux, "l4_per_clkdm", dmt1_clk_mux_sel,
-                   OMAP4430_CM_L4PER_DMTIMER4_CLKCTRL, OMAP4430_CLKSEL_MASK,
-                   abe_dpll_bypass_clk_mux_ck_parents,
-                   func_dmic_abe_gfclk_ops);
-
-static const struct clksel timer5_sync_mux_sel[] = {
-       { .parent = &syc_clk_div_ck, .rates = div_1_0_rates },
-       { .parent = &sys_32k_ck, .rates = div_1_1_rates },
-       { .parent = NULL },
-};
-
-static const char *timer5_sync_mux_parents[] = {
-       "syc_clk_div_ck", "sys_32k_ck",
-};
-
-DEFINE_CLK_OMAP_MUX(timer5_sync_mux, "abe_clkdm", timer5_sync_mux_sel,
-                   OMAP4430_CM1_ABE_TIMER5_CLKCTRL, OMAP4430_CLKSEL_MASK,
-                   timer5_sync_mux_parents, func_dmic_abe_gfclk_ops);
-
-DEFINE_CLK_OMAP_MUX(timer6_sync_mux, "abe_clkdm", timer5_sync_mux_sel,
-                   OMAP4430_CM1_ABE_TIMER6_CLKCTRL, OMAP4430_CLKSEL_MASK,
-                   timer5_sync_mux_parents, func_dmic_abe_gfclk_ops);
-
-DEFINE_CLK_OMAP_MUX(timer7_sync_mux, "abe_clkdm", timer5_sync_mux_sel,
-                   OMAP4430_CM1_ABE_TIMER7_CLKCTRL, OMAP4430_CLKSEL_MASK,
-                   timer5_sync_mux_parents, func_dmic_abe_gfclk_ops);
-
-DEFINE_CLK_OMAP_MUX(timer8_sync_mux, "abe_clkdm", timer5_sync_mux_sel,
-                   OMAP4430_CM1_ABE_TIMER8_CLKCTRL, OMAP4430_CLKSEL_MASK,
-                   timer5_sync_mux_parents, func_dmic_abe_gfclk_ops);
-
-DEFINE_CLK_OMAP_MUX(cm2_dm9_mux, "l4_per_clkdm", dmt1_clk_mux_sel,
-                   OMAP4430_CM_L4PER_DMTIMER9_CLKCTRL, OMAP4430_CLKSEL_MASK,
-                   abe_dpll_bypass_clk_mux_ck_parents,
-                   func_dmic_abe_gfclk_ops);
-
-static struct clk usb_host_fs_fck;
-
-static const char *usb_host_fs_fck_parent_names[] = {
-       "func_48mc_fclk",
-};
-
-static const struct clk_ops usb_host_fs_fck_ops = {
-       .enable         = &omap2_dflt_clk_enable,
-       .disable        = &omap2_dflt_clk_disable,
-       .is_enabled     = &omap2_dflt_clk_is_enabled,
-};
-
-static struct clk_hw_omap usb_host_fs_fck_hw = {
-       .hw = {
-               .clk = &usb_host_fs_fck,
-       },
-       .enable_reg     = OMAP4430_CM_L3INIT_USB_HOST_FS_CLKCTRL,
-       .enable_bit     = OMAP4430_MODULEMODE_SWCTRL_SHIFT,
-       .clkdm_name     = "l3_init_clkdm",
-};
-
-DEFINE_STRUCT_CLK(usb_host_fs_fck, usb_host_fs_fck_parent_names,
-                 usb_host_fs_fck_ops);
-
-static const char *utmi_p1_gfclk_parents[] = {
-       "init_60m_fclk", "xclk60mhsp1_ck",
-};
-
-DEFINE_CLK_MUX(utmi_p1_gfclk, utmi_p1_gfclk_parents, NULL, 0x0,
-              OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL,
-              OMAP4430_CLKSEL_UTMI_P1_SHIFT, OMAP4430_CLKSEL_UTMI_P1_WIDTH,
-              0x0, NULL);
-
-DEFINE_CLK_GATE(usb_host_hs_utmi_p1_clk, "utmi_p1_gfclk", &utmi_p1_gfclk, 0x0,
-               OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL,
-               OMAP4430_OPTFCLKEN_UTMI_P1_CLK_SHIFT, 0x0, NULL);
-
-static const char *utmi_p2_gfclk_parents[] = {
-       "init_60m_fclk", "xclk60mhsp2_ck",
-};
-
-DEFINE_CLK_MUX(utmi_p2_gfclk, utmi_p2_gfclk_parents, NULL, 0x0,
-              OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL,
-              OMAP4430_CLKSEL_UTMI_P2_SHIFT, OMAP4430_CLKSEL_UTMI_P2_WIDTH,
-              0x0, NULL);
-
-DEFINE_CLK_GATE(usb_host_hs_utmi_p2_clk, "utmi_p2_gfclk", &utmi_p2_gfclk, 0x0,
-               OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL,
-               OMAP4430_OPTFCLKEN_UTMI_P2_CLK_SHIFT, 0x0, NULL);
-
-DEFINE_CLK_GATE(usb_host_hs_utmi_p3_clk, "init_60m_fclk", &init_60m_fclk, 0x0,
-               OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL,
-               OMAP4430_OPTFCLKEN_UTMI_P3_CLK_SHIFT, 0x0, NULL);
-
-DEFINE_CLK_GATE(usb_host_hs_hsic480m_p1_clk, "dpll_usb_m2_ck",
-               &dpll_usb_m2_ck, 0x0,
-               OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL,
-               OMAP4430_OPTFCLKEN_HSIC480M_P1_CLK_SHIFT, 0x0, NULL);
-
-DEFINE_CLK_GATE(usb_host_hs_hsic60m_p1_clk, "init_60m_fclk",
-               &init_60m_fclk, 0x0,
-               OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL,
-               OMAP4430_OPTFCLKEN_HSIC60M_P1_CLK_SHIFT, 0x0, NULL);
-
-DEFINE_CLK_GATE(usb_host_hs_hsic60m_p2_clk, "init_60m_fclk",
-               &init_60m_fclk, 0x0,
-               OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL,
-               OMAP4430_OPTFCLKEN_HSIC60M_P2_CLK_SHIFT, 0x0, NULL);
-
-DEFINE_CLK_GATE(usb_host_hs_hsic480m_p2_clk, "dpll_usb_m2_ck",
-               &dpll_usb_m2_ck, 0x0,
-               OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL,
-               OMAP4430_OPTFCLKEN_HSIC480M_P2_CLK_SHIFT, 0x0, NULL);
-
-DEFINE_CLK_GATE(usb_host_hs_func48mclk, "func_48mc_fclk", &func_48mc_fclk, 0x0,
-               OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL,
-               OMAP4430_OPTFCLKEN_FUNC48MCLK_SHIFT, 0x0, NULL);
-
-DEFINE_CLK_GATE(usb_host_hs_fck, "init_60m_fclk", &init_60m_fclk, 0x0,
-               OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL,
-               OMAP4430_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL);
-
-static const char *otg_60m_gfclk_parents[] = {
-       "utmi_phy_clkout_ck", "xclk60motg_ck",
-};
-
-DEFINE_CLK_MUX(otg_60m_gfclk, otg_60m_gfclk_parents, NULL, 0x0,
-              OMAP4430_CM_L3INIT_USB_OTG_CLKCTRL, OMAP4430_CLKSEL_60M_SHIFT,
-              OMAP4430_CLKSEL_60M_WIDTH, 0x0, NULL);
-
-DEFINE_CLK_GATE(usb_otg_hs_xclk, "otg_60m_gfclk", &otg_60m_gfclk, 0x0,
-               OMAP4430_CM_L3INIT_USB_OTG_CLKCTRL,
-               OMAP4430_OPTFCLKEN_XCLK_SHIFT, 0x0, NULL);
-
-DEFINE_CLK_GATE(usb_otg_hs_ick, "l3_div_ck", &l3_div_ck, 0x0,
-               OMAP4430_CM_L3INIT_USB_OTG_CLKCTRL,
-               OMAP4430_MODULEMODE_HWCTRL_SHIFT, 0x0, NULL);
-
-DEFINE_CLK_GATE(usb_phy_cm_clk32k, "sys_32k_ck", &sys_32k_ck, 0x0,
-               OMAP4430_CM_ALWON_USBPHY_CLKCTRL,
-               OMAP4430_OPTFCLKEN_CLK32K_SHIFT, 0x0, NULL);
-
-DEFINE_CLK_GATE(usb_tll_hs_usb_ch2_clk, "init_60m_fclk", &init_60m_fclk, 0x0,
-               OMAP4430_CM_L3INIT_USB_TLL_CLKCTRL,
-               OMAP4430_OPTFCLKEN_USB_CH2_CLK_SHIFT, 0x0, NULL);
-
-DEFINE_CLK_GATE(usb_tll_hs_usb_ch0_clk, "init_60m_fclk", &init_60m_fclk, 0x0,
-               OMAP4430_CM_L3INIT_USB_TLL_CLKCTRL,
-               OMAP4430_OPTFCLKEN_USB_CH0_CLK_SHIFT, 0x0, NULL);
-
-DEFINE_CLK_GATE(usb_tll_hs_usb_ch1_clk, "init_60m_fclk", &init_60m_fclk, 0x0,
-               OMAP4430_CM_L3INIT_USB_TLL_CLKCTRL,
-               OMAP4430_OPTFCLKEN_USB_CH1_CLK_SHIFT, 0x0, NULL);
-
-DEFINE_CLK_GATE(usb_tll_hs_ick, "l4_div_ck", &l4_div_ck, 0x0,
-               OMAP4430_CM_L3INIT_USB_TLL_CLKCTRL,
-               OMAP4430_MODULEMODE_HWCTRL_SHIFT, 0x0, NULL);
-
-static const struct clk_div_table usim_ck_rates[] = {
-       { .div = 14, .val = 0 },
-       { .div = 18, .val = 1 },
-       { .div = 0 },
-};
-DEFINE_CLK_DIVIDER_TABLE(usim_ck, "dpll_per_m4x2_ck", &dpll_per_m4x2_ck, 0x0,
-                        OMAP4430_CM_WKUP_USIM_CLKCTRL,
-                        OMAP4430_CLKSEL_DIV_SHIFT, OMAP4430_CLKSEL_DIV_WIDTH,
-                        0x0, usim_ck_rates, NULL);
-
-DEFINE_CLK_GATE(usim_fclk, "usim_ck", &usim_ck, 0x0,
-               OMAP4430_CM_WKUP_USIM_CLKCTRL, OMAP4430_OPTFCLKEN_FCLK_SHIFT,
-               0x0, NULL);
-
-/* Remaining optional clocks */
-static const char *pmd_stm_clock_mux_ck_parents[] = {
-       "sys_clkin_ck", "dpll_core_m6x2_ck", "tie_low_clock_ck",
-};
-
-DEFINE_CLK_MUX(pmd_stm_clock_mux_ck, pmd_stm_clock_mux_ck_parents, NULL, 0x0,
-              OMAP4430_CM_EMU_DEBUGSS_CLKCTRL, OMAP4430_PMD_STM_MUX_CTRL_SHIFT,
-              OMAP4430_PMD_STM_MUX_CTRL_WIDTH, 0x0, NULL);
-
-DEFINE_CLK_MUX(pmd_trace_clk_mux_ck, pmd_stm_clock_mux_ck_parents, NULL, 0x0,
-              OMAP4430_CM_EMU_DEBUGSS_CLKCTRL,
-              OMAP4430_PMD_TRACE_MUX_CTRL_SHIFT,
-              OMAP4430_PMD_TRACE_MUX_CTRL_WIDTH, 0x0, NULL);
-
-DEFINE_CLK_DIVIDER(stm_clk_div_ck, "pmd_stm_clock_mux_ck",
-                  &pmd_stm_clock_mux_ck, 0x0, OMAP4430_CM_EMU_DEBUGSS_CLKCTRL,
-                  OMAP4430_CLKSEL_PMD_STM_CLK_SHIFT,
-                  OMAP4430_CLKSEL_PMD_STM_CLK_WIDTH, CLK_DIVIDER_POWER_OF_TWO,
-                  NULL);
-
-static const char *trace_clk_div_ck_parents[] = {
-       "pmd_trace_clk_mux_ck",
-};
-
-static const struct clksel trace_clk_div_div[] = {
-       { .parent = &pmd_trace_clk_mux_ck, .rates = div3_1to4_rates },
-       { .parent = NULL },
-};
-
-static struct clk trace_clk_div_ck;
-
-static const struct clk_ops trace_clk_div_ck_ops = {
-       .recalc_rate    = &omap2_clksel_recalc,
-       .set_rate       = &omap2_clksel_set_rate,
-       .round_rate     = &omap2_clksel_round_rate,
-       .init           = &omap2_init_clk_clkdm,
-       .enable         = &omap2_clkops_enable_clkdm,
-       .disable        = &omap2_clkops_disable_clkdm,
-};
-
-static struct clk_hw_omap trace_clk_div_ck_hw = {
-       .hw = {
-               .clk = &trace_clk_div_ck,
-       },
-       .clkdm_name     = "emu_sys_clkdm",
-       .clksel         = trace_clk_div_div,
-       .clksel_reg     = OMAP4430_CM_EMU_DEBUGSS_CLKCTRL,
-       .clksel_mask    = OMAP4430_CLKSEL_PMD_TRACE_CLK_MASK,
-};
-
-DEFINE_STRUCT_CLK(trace_clk_div_ck, trace_clk_div_ck_parents,
-                 trace_clk_div_ck_ops);
-
-/* SCRM aux clk nodes */
-
-static const struct clksel auxclk_src_sel[] = {
-       { .parent = &sys_clkin_ck, .rates = div_1_0_rates },
-       { .parent = &dpll_core_m3x2_ck, .rates = div_1_1_rates },
-       { .parent = &dpll_per_m3x2_ck, .rates = div_1_2_rates },
-       { .parent = NULL },
-};
-
-static const char *auxclk_src_ck_parents[] = {
-       "sys_clkin_ck", "dpll_core_m3x2_ck", "dpll_per_m3x2_ck",
-};
-
-static const struct clk_ops auxclk_src_ck_ops = {
-       .enable         = &omap2_dflt_clk_enable,
-       .disable        = &omap2_dflt_clk_disable,
-       .is_enabled     = &omap2_dflt_clk_is_enabled,
-       .recalc_rate    = &omap2_clksel_recalc,
-       .get_parent     = &omap2_clksel_find_parent_index,
-};
-
-DEFINE_CLK_OMAP_MUX_GATE(auxclk0_src_ck, NULL, auxclk_src_sel,
-                        OMAP4_SCRM_AUXCLK0, OMAP4_SRCSELECT_MASK,
-                        OMAP4_SCRM_AUXCLK0, OMAP4_ENABLE_SHIFT, NULL,
-                        auxclk_src_ck_parents, auxclk_src_ck_ops);
-
-DEFINE_CLK_DIVIDER(auxclk0_ck, "auxclk0_src_ck", &auxclk0_src_ck, 0x0,
-                  OMAP4_SCRM_AUXCLK0, OMAP4_CLKDIV_SHIFT, OMAP4_CLKDIV_WIDTH,
-                  0x0, NULL);
-
-DEFINE_CLK_OMAP_MUX_GATE(auxclk1_src_ck, NULL, auxclk_src_sel,
-                        OMAP4_SCRM_AUXCLK1, OMAP4_SRCSELECT_MASK,
-                        OMAP4_SCRM_AUXCLK1, OMAP4_ENABLE_SHIFT, NULL,
-                        auxclk_src_ck_parents, auxclk_src_ck_ops);
-
-DEFINE_CLK_DIVIDER(auxclk1_ck, "auxclk1_src_ck", &auxclk1_src_ck, 0x0,
-                  OMAP4_SCRM_AUXCLK1, OMAP4_CLKDIV_SHIFT, OMAP4_CLKDIV_WIDTH,
-                  0x0, NULL);
-
-DEFINE_CLK_OMAP_MUX_GATE(auxclk2_src_ck, NULL, auxclk_src_sel,
-                        OMAP4_SCRM_AUXCLK2, OMAP4_SRCSELECT_MASK,
-                        OMAP4_SCRM_AUXCLK2, OMAP4_ENABLE_SHIFT, NULL,
-                        auxclk_src_ck_parents, auxclk_src_ck_ops);
-
-DEFINE_CLK_DIVIDER(auxclk2_ck, "auxclk2_src_ck", &auxclk2_src_ck, 0x0,
-                  OMAP4_SCRM_AUXCLK2, OMAP4_CLKDIV_SHIFT, OMAP4_CLKDIV_WIDTH,
-                  0x0, NULL);
-
-DEFINE_CLK_OMAP_MUX_GATE(auxclk3_src_ck, NULL, auxclk_src_sel,
-                        OMAP4_SCRM_AUXCLK3, OMAP4_SRCSELECT_MASK,
-                        OMAP4_SCRM_AUXCLK3, OMAP4_ENABLE_SHIFT, NULL,
-                        auxclk_src_ck_parents, auxclk_src_ck_ops);
-
-DEFINE_CLK_DIVIDER(auxclk3_ck, "auxclk3_src_ck", &auxclk3_src_ck, 0x0,
-                  OMAP4_SCRM_AUXCLK3, OMAP4_CLKDIV_SHIFT, OMAP4_CLKDIV_WIDTH,
-                  0x0, NULL);
-
-DEFINE_CLK_OMAP_MUX_GATE(auxclk4_src_ck, NULL, auxclk_src_sel,
-                        OMAP4_SCRM_AUXCLK4, OMAP4_SRCSELECT_MASK,
-                        OMAP4_SCRM_AUXCLK4, OMAP4_ENABLE_SHIFT, NULL,
-                        auxclk_src_ck_parents, auxclk_src_ck_ops);
-
-DEFINE_CLK_DIVIDER(auxclk4_ck, "auxclk4_src_ck", &auxclk4_src_ck, 0x0,
-                  OMAP4_SCRM_AUXCLK4, OMAP4_CLKDIV_SHIFT, OMAP4_CLKDIV_WIDTH,
-                  0x0, NULL);
-
-DEFINE_CLK_OMAP_MUX_GATE(auxclk5_src_ck, NULL, auxclk_src_sel,
-                        OMAP4_SCRM_AUXCLK5, OMAP4_SRCSELECT_MASK,
-                        OMAP4_SCRM_AUXCLK5, OMAP4_ENABLE_SHIFT, NULL,
-                        auxclk_src_ck_parents, auxclk_src_ck_ops);
-
-DEFINE_CLK_DIVIDER(auxclk5_ck, "auxclk5_src_ck", &auxclk5_src_ck, 0x0,
-                  OMAP4_SCRM_AUXCLK5, OMAP4_CLKDIV_SHIFT, OMAP4_CLKDIV_WIDTH,
-                  0x0, NULL);
-
-static const char *auxclkreq_ck_parents[] = {
-       "auxclk0_ck", "auxclk1_ck", "auxclk2_ck", "auxclk3_ck", "auxclk4_ck",
-       "auxclk5_ck",
-};
-
-DEFINE_CLK_MUX(auxclkreq0_ck, auxclkreq_ck_parents, NULL, 0x0,
-              OMAP4_SCRM_AUXCLKREQ0, OMAP4_MAPPING_SHIFT, OMAP4_MAPPING_WIDTH,
-              0x0, NULL);
-
-DEFINE_CLK_MUX(auxclkreq1_ck, auxclkreq_ck_parents, NULL, 0x0,
-              OMAP4_SCRM_AUXCLKREQ1, OMAP4_MAPPING_SHIFT, OMAP4_MAPPING_WIDTH,
-              0x0, NULL);
-
-DEFINE_CLK_MUX(auxclkreq2_ck, auxclkreq_ck_parents, NULL, 0x0,
-              OMAP4_SCRM_AUXCLKREQ2, OMAP4_MAPPING_SHIFT, OMAP4_MAPPING_WIDTH,
-              0x0, NULL);
-
-DEFINE_CLK_MUX(auxclkreq3_ck, auxclkreq_ck_parents, NULL, 0x0,
-              OMAP4_SCRM_AUXCLKREQ3, OMAP4_MAPPING_SHIFT, OMAP4_MAPPING_WIDTH,
-              0x0, NULL);
-
-DEFINE_CLK_MUX(auxclkreq4_ck, auxclkreq_ck_parents, NULL, 0x0,
-              OMAP4_SCRM_AUXCLKREQ4, OMAP4_MAPPING_SHIFT, OMAP4_MAPPING_WIDTH,
-              0x0, NULL);
-
-DEFINE_CLK_MUX(auxclkreq5_ck, auxclkreq_ck_parents, NULL, 0x0,
-              OMAP4_SCRM_AUXCLKREQ5, OMAP4_MAPPING_SHIFT, OMAP4_MAPPING_WIDTH,
-              0x0, NULL);
-
-/*
- * clocks specific to omap4460
- */
-static struct omap_clk omap446x_clks[] = {
-       CLK(NULL,       "div_ts_ck",                    &div_ts_ck),
-       CLK(NULL,       "bandgap_ts_fclk",              &bandgap_ts_fclk),
-};
-
-/*
- * clocks specific to omap4430
- */
-static struct omap_clk omap443x_clks[] = {
-       CLK(NULL,       "bandgap_fclk",                 &bandgap_fclk),
-};
-
-/*
- * clocks common to omap44xx
- */
-static struct omap_clk omap44xx_clks[] = {
-       CLK(NULL,       "extalt_clkin_ck",              &extalt_clkin_ck),
-       CLK(NULL,       "pad_clks_src_ck",              &pad_clks_src_ck),
-       CLK(NULL,       "pad_clks_ck",                  &pad_clks_ck),
-       CLK(NULL,       "pad_slimbus_core_clks_ck",     &pad_slimbus_core_clks_ck),
-       CLK(NULL,       "secure_32k_clk_src_ck",        &secure_32k_clk_src_ck),
-       CLK(NULL,       "slimbus_src_clk",              &slimbus_src_clk),
-       CLK(NULL,       "slimbus_clk",                  &slimbus_clk),
-       CLK(NULL,       "sys_32k_ck",                   &sys_32k_ck),
-       CLK(NULL,       "virt_12000000_ck",             &virt_12000000_ck),
-       CLK(NULL,       "virt_13000000_ck",             &virt_13000000_ck),
-       CLK(NULL,       "virt_16800000_ck",             &virt_16800000_ck),
-       CLK(NULL,       "virt_19200000_ck",             &virt_19200000_ck),
-       CLK(NULL,       "virt_26000000_ck",             &virt_26000000_ck),
-       CLK(NULL,       "virt_27000000_ck",             &virt_27000000_ck),
-       CLK(NULL,       "virt_38400000_ck",             &virt_38400000_ck),
-       CLK(NULL,       "sys_clkin_ck",                 &sys_clkin_ck),
-       CLK(NULL,       "tie_low_clock_ck",             &tie_low_clock_ck),
-       CLK(NULL,       "utmi_phy_clkout_ck",           &utmi_phy_clkout_ck),
-       CLK(NULL,       "xclk60mhsp1_ck",               &xclk60mhsp1_ck),
-       CLK(NULL,       "xclk60mhsp2_ck",               &xclk60mhsp2_ck),
-       CLK(NULL,       "xclk60motg_ck",                &xclk60motg_ck),
-       CLK(NULL,       "abe_dpll_bypass_clk_mux_ck",   &abe_dpll_bypass_clk_mux_ck),
-       CLK(NULL,       "abe_dpll_refclk_mux_ck",       &abe_dpll_refclk_mux_ck),
-       CLK(NULL,       "dpll_abe_ck",                  &dpll_abe_ck),
-       CLK(NULL,       "dpll_abe_x2_ck",               &dpll_abe_x2_ck),
-       CLK(NULL,       "dpll_abe_m2x2_ck",             &dpll_abe_m2x2_ck),
-       CLK(NULL,       "abe_24m_fclk",                 &abe_24m_fclk),
-       CLK(NULL,       "abe_clk",                      &abe_clk),
-       CLK(NULL,       "aess_fclk",                    &aess_fclk),
-       CLK(NULL,       "dpll_abe_m3x2_ck",             &dpll_abe_m3x2_ck),
-       CLK(NULL,       "core_hsd_byp_clk_mux_ck",      &core_hsd_byp_clk_mux_ck),
-       CLK(NULL,       "dpll_core_ck",                 &dpll_core_ck),
-       CLK(NULL,       "dpll_core_x2_ck",              &dpll_core_x2_ck),
-       CLK(NULL,       "dpll_core_m6x2_ck",            &dpll_core_m6x2_ck),
-       CLK(NULL,       "dbgclk_mux_ck",                &dbgclk_mux_ck),
-       CLK(NULL,       "dpll_core_m2_ck",              &dpll_core_m2_ck),
-       CLK(NULL,       "ddrphy_ck",                    &ddrphy_ck),
-       CLK(NULL,       "dpll_core_m5x2_ck",            &dpll_core_m5x2_ck),
-       CLK(NULL,       "div_core_ck",                  &div_core_ck),
-       CLK(NULL,       "div_iva_hs_clk",               &div_iva_hs_clk),
-       CLK(NULL,       "div_mpu_hs_clk",               &div_mpu_hs_clk),
-       CLK(NULL,       "dpll_core_m4x2_ck",            &dpll_core_m4x2_ck),
-       CLK(NULL,       "dll_clk_div_ck",               &dll_clk_div_ck),
-       CLK(NULL,       "dpll_abe_m2_ck",               &dpll_abe_m2_ck),
-       CLK(NULL,       "dpll_core_m3x2_ck",            &dpll_core_m3x2_ck),
-       CLK(NULL,       "dpll_core_m7x2_ck",            &dpll_core_m7x2_ck),
-       CLK(NULL,       "iva_hsd_byp_clk_mux_ck",       &iva_hsd_byp_clk_mux_ck),
-       CLK(NULL,       "dpll_iva_ck",                  &dpll_iva_ck),
-       CLK(NULL,       "dpll_iva_x2_ck",               &dpll_iva_x2_ck),
-       CLK(NULL,       "dpll_iva_m4x2_ck",             &dpll_iva_m4x2_ck),
-       CLK(NULL,       "dpll_iva_m5x2_ck",             &dpll_iva_m5x2_ck),
-       CLK(NULL,       "dpll_mpu_ck",                  &dpll_mpu_ck),
-       CLK(NULL,       "dpll_mpu_m2_ck",               &dpll_mpu_m2_ck),
-       CLK(NULL,       "per_hs_clk_div_ck",            &per_hs_clk_div_ck),
-       CLK(NULL,       "per_hsd_byp_clk_mux_ck",       &per_hsd_byp_clk_mux_ck),
-       CLK(NULL,       "dpll_per_ck",                  &dpll_per_ck),
-       CLK(NULL,       "dpll_per_m2_ck",               &dpll_per_m2_ck),
-       CLK(NULL,       "dpll_per_x2_ck",               &dpll_per_x2_ck),
-       CLK(NULL,       "dpll_per_m2x2_ck",             &dpll_per_m2x2_ck),
-       CLK(NULL,       "dpll_per_m3x2_ck",             &dpll_per_m3x2_ck),
-       CLK(NULL,       "dpll_per_m4x2_ck",             &dpll_per_m4x2_ck),
-       CLK(NULL,       "dpll_per_m5x2_ck",             &dpll_per_m5x2_ck),
-       CLK(NULL,       "dpll_per_m6x2_ck",             &dpll_per_m6x2_ck),
-       CLK(NULL,       "dpll_per_m7x2_ck",             &dpll_per_m7x2_ck),
-       CLK(NULL,       "usb_hs_clk_div_ck",            &usb_hs_clk_div_ck),
-       CLK(NULL,       "dpll_usb_ck",                  &dpll_usb_ck),
-       CLK(NULL,       "dpll_usb_clkdcoldo_ck",        &dpll_usb_clkdcoldo_ck),
-       CLK(NULL,       "dpll_usb_m2_ck",               &dpll_usb_m2_ck),
-       CLK(NULL,       "ducati_clk_mux_ck",            &ducati_clk_mux_ck),
-       CLK(NULL,       "func_12m_fclk",                &func_12m_fclk),
-       CLK(NULL,       "func_24m_clk",                 &func_24m_clk),
-       CLK(NULL,       "func_24mc_fclk",               &func_24mc_fclk),
-       CLK(NULL,       "func_48m_fclk",                &func_48m_fclk),
-       CLK(NULL,       "func_48mc_fclk",               &func_48mc_fclk),
-       CLK(NULL,       "func_64m_fclk",                &func_64m_fclk),
-       CLK(NULL,       "func_96m_fclk",                &func_96m_fclk),
-       CLK(NULL,       "init_60m_fclk",                &init_60m_fclk),
-       CLK(NULL,       "l3_div_ck",                    &l3_div_ck),
-       CLK(NULL,       "l4_div_ck",                    &l4_div_ck),
-       CLK(NULL,       "lp_clk_div_ck",                &lp_clk_div_ck),
-       CLK(NULL,       "l4_wkup_clk_mux_ck",           &l4_wkup_clk_mux_ck),
-       CLK("smp_twd",  NULL,                           &mpu_periphclk),
-       CLK(NULL,       "ocp_abe_iclk",                 &ocp_abe_iclk),
-       CLK(NULL,       "per_abe_24m_fclk",             &per_abe_24m_fclk),
-       CLK(NULL,       "per_abe_nc_fclk",              &per_abe_nc_fclk),
-       CLK(NULL,       "syc_clk_div_ck",               &syc_clk_div_ck),
-       CLK(NULL,       "aes1_fck",                     &aes1_fck),
-       CLK(NULL,       "aes2_fck",                     &aes2_fck),
-       CLK(NULL,       "dmic_sync_mux_ck",             &dmic_sync_mux_ck),
-       CLK(NULL,       "func_dmic_abe_gfclk",          &func_dmic_abe_gfclk),
-       CLK(NULL,       "dss_sys_clk",                  &dss_sys_clk),
-       CLK(NULL,       "dss_tv_clk",                   &dss_tv_clk),
-       CLK(NULL,       "dss_dss_clk",                  &dss_dss_clk),
-       CLK(NULL,       "dss_48mhz_clk",                &dss_48mhz_clk),
-       CLK(NULL,       "dss_fck",                      &dss_fck),
-       CLK("omapdss_dss",      "ick",                  &dss_fck),
-       CLK(NULL,       "fdif_fck",                     &fdif_fck),
-       CLK(NULL,       "gpio1_dbclk",                  &gpio1_dbclk),
-       CLK(NULL,       "gpio2_dbclk",                  &gpio2_dbclk),
-       CLK(NULL,       "gpio3_dbclk",                  &gpio3_dbclk),
-       CLK(NULL,       "gpio4_dbclk",                  &gpio4_dbclk),
-       CLK(NULL,       "gpio5_dbclk",                  &gpio5_dbclk),
-       CLK(NULL,       "gpio6_dbclk",                  &gpio6_dbclk),
-       CLK(NULL,       "sgx_clk_mux",                  &sgx_clk_mux),
-       CLK(NULL,       "hsi_fck",                      &hsi_fck),
-       CLK(NULL,       "iss_ctrlclk",                  &iss_ctrlclk),
-       CLK(NULL,       "mcasp_sync_mux_ck",            &mcasp_sync_mux_ck),
-       CLK(NULL,       "func_mcasp_abe_gfclk",         &func_mcasp_abe_gfclk),
-       CLK(NULL,       "mcbsp1_sync_mux_ck",           &mcbsp1_sync_mux_ck),
-       CLK(NULL,       "func_mcbsp1_gfclk",            &func_mcbsp1_gfclk),
-       CLK(NULL,       "mcbsp2_sync_mux_ck",           &mcbsp2_sync_mux_ck),
-       CLK(NULL,       "func_mcbsp2_gfclk",            &func_mcbsp2_gfclk),
-       CLK(NULL,       "mcbsp3_sync_mux_ck",           &mcbsp3_sync_mux_ck),
-       CLK(NULL,       "func_mcbsp3_gfclk",            &func_mcbsp3_gfclk),
-       CLK(NULL,       "mcbsp4_sync_mux_ck",           &mcbsp4_sync_mux_ck),
-       CLK(NULL,       "per_mcbsp4_gfclk",             &per_mcbsp4_gfclk),
-       CLK(NULL,       "hsmmc1_fclk",                  &hsmmc1_fclk),
-       CLK(NULL,       "hsmmc2_fclk",                  &hsmmc2_fclk),
-       CLK(NULL,       "ocp2scp_usb_phy_phy_48m",      &ocp2scp_usb_phy_phy_48m),
-       CLK(NULL,       "sha2md5_fck",                  &sha2md5_fck),
-       CLK(NULL,       "slimbus1_fclk_1",              &slimbus1_fclk_1),
-       CLK(NULL,       "slimbus1_fclk_0",              &slimbus1_fclk_0),
-       CLK(NULL,       "slimbus1_fclk_2",              &slimbus1_fclk_2),
-       CLK(NULL,       "slimbus1_slimbus_clk",         &slimbus1_slimbus_clk),
-       CLK(NULL,       "slimbus2_fclk_1",              &slimbus2_fclk_1),
-       CLK(NULL,       "slimbus2_fclk_0",              &slimbus2_fclk_0),
-       CLK(NULL,       "slimbus2_slimbus_clk",         &slimbus2_slimbus_clk),
-       CLK(NULL,       "smartreflex_core_fck",         &smartreflex_core_fck),
-       CLK(NULL,       "smartreflex_iva_fck",          &smartreflex_iva_fck),
-       CLK(NULL,       "smartreflex_mpu_fck",          &smartreflex_mpu_fck),
-       CLK(NULL,       "dmt1_clk_mux",                 &dmt1_clk_mux),
-       CLK(NULL,       "cm2_dm10_mux",                 &cm2_dm10_mux),
-       CLK(NULL,       "cm2_dm11_mux",                 &cm2_dm11_mux),
-       CLK(NULL,       "cm2_dm2_mux",                  &cm2_dm2_mux),
-       CLK(NULL,       "cm2_dm3_mux",                  &cm2_dm3_mux),
-       CLK(NULL,       "cm2_dm4_mux",                  &cm2_dm4_mux),
-       CLK(NULL,       "timer5_sync_mux",              &timer5_sync_mux),
-       CLK(NULL,       "timer6_sync_mux",              &timer6_sync_mux),
-       CLK(NULL,       "timer7_sync_mux",              &timer7_sync_mux),
-       CLK(NULL,       "timer8_sync_mux",              &timer8_sync_mux),
-       CLK(NULL,       "cm2_dm9_mux",                  &cm2_dm9_mux),
-       CLK(NULL,       "usb_host_fs_fck",              &usb_host_fs_fck),
-       CLK("usbhs_omap",       "fs_fck",               &usb_host_fs_fck),
-       CLK(NULL,       "utmi_p1_gfclk",                &utmi_p1_gfclk),
-       CLK(NULL,       "usb_host_hs_utmi_p1_clk",      &usb_host_hs_utmi_p1_clk),
-       CLK(NULL,       "utmi_p2_gfclk",                &utmi_p2_gfclk),
-       CLK(NULL,       "usb_host_hs_utmi_p2_clk",      &usb_host_hs_utmi_p2_clk),
-       CLK(NULL,       "usb_host_hs_utmi_p3_clk",      &usb_host_hs_utmi_p3_clk),
-       CLK(NULL,       "usb_host_hs_hsic480m_p1_clk",  &usb_host_hs_hsic480m_p1_clk),
-       CLK(NULL,       "usb_host_hs_hsic60m_p1_clk",   &usb_host_hs_hsic60m_p1_clk),
-       CLK(NULL,       "usb_host_hs_hsic60m_p2_clk",   &usb_host_hs_hsic60m_p2_clk),
-       CLK(NULL,       "usb_host_hs_hsic480m_p2_clk",  &usb_host_hs_hsic480m_p2_clk),
-       CLK(NULL,       "usb_host_hs_func48mclk",       &usb_host_hs_func48mclk),
-       CLK(NULL,       "usb_host_hs_fck",              &usb_host_hs_fck),
-       CLK("usbhs_omap",       "hs_fck",               &usb_host_hs_fck),
-       CLK(NULL,       "otg_60m_gfclk",                &otg_60m_gfclk),
-       CLK(NULL,       "usb_otg_hs_xclk",              &usb_otg_hs_xclk),
-       CLK(NULL,       "usb_otg_hs_ick",               &usb_otg_hs_ick),
-       CLK("musb-omap2430",    "ick",                  &usb_otg_hs_ick),
-       CLK(NULL,       "usb_phy_cm_clk32k",            &usb_phy_cm_clk32k),
-       CLK(NULL,       "usb_tll_hs_usb_ch2_clk",       &usb_tll_hs_usb_ch2_clk),
-       CLK(NULL,       "usb_tll_hs_usb_ch0_clk",       &usb_tll_hs_usb_ch0_clk),
-       CLK(NULL,       "usb_tll_hs_usb_ch1_clk",       &usb_tll_hs_usb_ch1_clk),
-       CLK(NULL,       "usb_tll_hs_ick",               &usb_tll_hs_ick),
-       CLK("usbhs_omap",       "usbtll_ick",           &usb_tll_hs_ick),
-       CLK("usbhs_tll",        "usbtll_ick",           &usb_tll_hs_ick),
-       CLK(NULL,       "usim_ck",                      &usim_ck),
-       CLK(NULL,       "usim_fclk",                    &usim_fclk),
-       CLK(NULL,       "pmd_stm_clock_mux_ck",         &pmd_stm_clock_mux_ck),
-       CLK(NULL,       "pmd_trace_clk_mux_ck",         &pmd_trace_clk_mux_ck),
-       CLK(NULL,       "stm_clk_div_ck",               &stm_clk_div_ck),
-       CLK(NULL,       "trace_clk_div_ck",             &trace_clk_div_ck),
-       CLK(NULL,       "auxclk0_src_ck",               &auxclk0_src_ck),
-       CLK(NULL,       "auxclk0_ck",                   &auxclk0_ck),
-       CLK(NULL,       "auxclkreq0_ck",                &auxclkreq0_ck),
-       CLK(NULL,       "auxclk1_src_ck",               &auxclk1_src_ck),
-       CLK(NULL,       "auxclk1_ck",                   &auxclk1_ck),
-       CLK(NULL,       "auxclkreq1_ck",                &auxclkreq1_ck),
-       CLK(NULL,       "auxclk2_src_ck",               &auxclk2_src_ck),
-       CLK(NULL,       "auxclk2_ck",                   &auxclk2_ck),
-       CLK(NULL,       "auxclkreq2_ck",                &auxclkreq2_ck),
-       CLK(NULL,       "auxclk3_src_ck",               &auxclk3_src_ck),
-       CLK(NULL,       "auxclk3_ck",                   &auxclk3_ck),
-       CLK(NULL,       "auxclkreq3_ck",                &auxclkreq3_ck),
-       CLK(NULL,       "auxclk4_src_ck",               &auxclk4_src_ck),
-       CLK(NULL,       "auxclk4_ck",                   &auxclk4_ck),
-       CLK(NULL,       "auxclkreq4_ck",                &auxclkreq4_ck),
-       CLK(NULL,       "auxclk5_src_ck",               &auxclk5_src_ck),
-       CLK(NULL,       "auxclk5_ck",                   &auxclk5_ck),
-       CLK(NULL,       "auxclkreq5_ck",                &auxclkreq5_ck),
-       CLK("50000000.gpmc",    "fck",                  &dummy_ck),
-       CLK("omap_i2c.1",       "ick",                  &dummy_ck),
-       CLK("omap_i2c.2",       "ick",                  &dummy_ck),
-       CLK("omap_i2c.3",       "ick",                  &dummy_ck),
-       CLK("omap_i2c.4",       "ick",                  &dummy_ck),
-       CLK(NULL,       "mailboxes_ick",                &dummy_ck),
-       CLK("omap_hsmmc.0",     "ick",                  &dummy_ck),
-       CLK("omap_hsmmc.1",     "ick",                  &dummy_ck),
-       CLK("omap_hsmmc.2",     "ick",                  &dummy_ck),
-       CLK("omap_hsmmc.3",     "ick",                  &dummy_ck),
-       CLK("omap_hsmmc.4",     "ick",                  &dummy_ck),
-       CLK("omap-mcbsp.1",     "ick",                  &dummy_ck),
-       CLK("omap-mcbsp.2",     "ick",                  &dummy_ck),
-       CLK("omap-mcbsp.3",     "ick",                  &dummy_ck),
-       CLK("omap-mcbsp.4",     "ick",                  &dummy_ck),
-       CLK("omap2_mcspi.1",    "ick",                  &dummy_ck),
-       CLK("omap2_mcspi.2",    "ick",                  &dummy_ck),
-       CLK("omap2_mcspi.3",    "ick",                  &dummy_ck),
-       CLK("omap2_mcspi.4",    "ick",                  &dummy_ck),
-       CLK(NULL,       "uart1_ick",                    &dummy_ck),
-       CLK(NULL,       "uart2_ick",                    &dummy_ck),
-       CLK(NULL,       "uart3_ick",                    &dummy_ck),
-       CLK(NULL,       "uart4_ick",                    &dummy_ck),
-       CLK("usbhs_omap",       "usbhost_ick",          &dummy_ck),
-       CLK("usbhs_omap",       "usbtll_fck",           &dummy_ck),
-       CLK("usbhs_tll",        "usbtll_fck",           &dummy_ck),
-       CLK("omap_wdt", "ick",                          &dummy_ck),
-       CLK(NULL,       "timer_32k_ck", &sys_32k_ck),
-       /* TODO: Remove "omap_timer.X" aliases once DT migration is complete */
-       CLK("omap_timer.1",     "timer_sys_ck", &sys_clkin_ck),
-       CLK("omap_timer.2",     "timer_sys_ck", &sys_clkin_ck),
-       CLK("omap_timer.3",     "timer_sys_ck", &sys_clkin_ck),
-       CLK("omap_timer.4",     "timer_sys_ck", &sys_clkin_ck),
-       CLK("omap_timer.9",     "timer_sys_ck", &sys_clkin_ck),
-       CLK("omap_timer.10",    "timer_sys_ck", &sys_clkin_ck),
-       CLK("omap_timer.11",    "timer_sys_ck", &sys_clkin_ck),
-       CLK("omap_timer.5",     "timer_sys_ck", &syc_clk_div_ck),
-       CLK("omap_timer.6",     "timer_sys_ck", &syc_clk_div_ck),
-       CLK("omap_timer.7",     "timer_sys_ck", &syc_clk_div_ck),
-       CLK("omap_timer.8",     "timer_sys_ck", &syc_clk_div_ck),
-       CLK("4a318000.timer",   "timer_sys_ck", &sys_clkin_ck),
-       CLK("48032000.timer",   "timer_sys_ck", &sys_clkin_ck),
-       CLK("48034000.timer",   "timer_sys_ck", &sys_clkin_ck),
-       CLK("48036000.timer",   "timer_sys_ck", &sys_clkin_ck),
-       CLK("4803e000.timer",   "timer_sys_ck", &sys_clkin_ck),
-       CLK("48086000.timer",   "timer_sys_ck", &sys_clkin_ck),
-       CLK("48088000.timer",   "timer_sys_ck", &sys_clkin_ck),
-       CLK("40138000.timer",   "timer_sys_ck", &syc_clk_div_ck),
-       CLK("4013a000.timer",   "timer_sys_ck", &syc_clk_div_ck),
-       CLK("4013c000.timer",   "timer_sys_ck", &syc_clk_div_ck),
-       CLK("4013e000.timer",   "timer_sys_ck", &syc_clk_div_ck),
-       CLK(NULL,       "cpufreq_ck",   &dpll_mpu_ck),
-};
-
-int __init omap4xxx_clk_init(void)
-{
-       int rc;
-
-       if (cpu_is_omap443x()) {
-               cpu_mask = RATE_IN_4430;
-               omap_clocks_register(omap443x_clks, ARRAY_SIZE(omap443x_clks));
-       } else if (cpu_is_omap446x() || cpu_is_omap447x()) {
-               cpu_mask = RATE_IN_4460 | RATE_IN_4430;
-               omap_clocks_register(omap446x_clks, ARRAY_SIZE(omap446x_clks));
-               if (cpu_is_omap447x())
-                       pr_warn("WARNING: OMAP4470 clock data incomplete!\n");
-       } else {
-               return 0;
-       }
-
-       omap_clocks_register(omap44xx_clks, ARRAY_SIZE(omap44xx_clks));
-
-       omap2_clk_disable_autoidle_all();
-
-       /*
-        * A set rate of ABE DPLL inturn triggers a set rate of USB DPLL
-        * when its in bypass. So always lock USB before ABE DPLL.
-        */
-       /*
-        * Lock USB DPLL on OMAP4 devices so that the L3INIT power
-        * domain can transition to retention state when not in use.
-        */
-       rc = clk_set_rate(&dpll_usb_ck, OMAP4_DPLL_USB_DEFFREQ);
-       if (rc)
-               pr_err("%s: failed to configure USB DPLL!\n", __func__);
-
-       /*
-        * On OMAP4460 the ABE DPLL fails to turn on if in idle low-power
-        * state when turning the ABE clock domain. Workaround this by
-        * locking the ABE DPLL on boot.
-        * Lock the ABE DPLL in any case to avoid issues with audio.
-        */
-       rc = clk_set_parent(&abe_dpll_refclk_mux_ck, &sys_32k_ck);
-       if (!rc)
-               rc = clk_set_rate(&dpll_abe_ck, OMAP4_DPLL_ABE_DEFFREQ);
-       if (rc)
-               pr_err("%s: failed to configure ABE DPLL!\n", __func__);
-
-       return 0;
-}
index 0ec9f6fdf0463bb6ff435f250c748db075c62ca4..7ee26108ac0d36b76b156b93b619b2cbee0b89d7 100644 (file)
@@ -97,12 +97,12 @@ static void _write_clksel_reg(struct clk_hw_omap *clk, u32 field_val)
 {
        u32 v;
 
-       v = __raw_readl(clk->clksel_reg);
+       v = omap2_clk_readl(clk, clk->clksel_reg);
        v &= ~clk->clksel_mask;
        v |= field_val << __ffs(clk->clksel_mask);
-       __raw_writel(v, clk->clksel_reg);
+       omap2_clk_writel(v, clk, clk->clksel_reg);
 
-       v = __raw_readl(clk->clksel_reg); /* OCP barrier */
+       v = omap2_clk_readl(clk, clk->clksel_reg); /* OCP barrier */
 }
 
 /**
@@ -204,7 +204,7 @@ static u32 _read_divisor(struct clk_hw_omap *clk)
        if (!clk->clksel || !clk->clksel_mask)
                return 0;
 
-       v = __raw_readl(clk->clksel_reg);
+       v = omap2_clk_readl(clk, clk->clksel_reg);
        v &= clk->clksel_mask;
        v >>= __ffs(clk->clksel_mask);
 
@@ -320,7 +320,7 @@ u8 omap2_clksel_find_parent_index(struct clk_hw *hw)
        WARN((!clk->clksel || !clk->clksel_mask),
             "clock: %s: attempt to call on a non-clksel clock", clk_name);
 
-       r = __raw_readl(clk->clksel_reg) & clk->clksel_mask;
+       r = omap2_clk_readl(clk, clk->clksel_reg) & clk->clksel_mask;
        r >>= __ffs(clk->clksel_mask);
 
        for (clks = clk->clksel; clks->parent && !found; clks++) {
index 924c230f89484473f057246fb7f3a8b53a28c2bd..47f9562ca7aa0f3007cfac811ffceb90b7cce80e 100644 (file)
@@ -196,7 +196,7 @@ u8 omap2_init_dpll_parent(struct clk_hw *hw)
        if (!dd)
                return -EINVAL;
 
-       v = __raw_readl(dd->control_reg);
+       v = omap2_clk_readl(clk, dd->control_reg);
        v &= dd->enable_mask;
        v >>= __ffs(dd->enable_mask);
 
@@ -243,7 +243,7 @@ unsigned long omap2_get_dpll_rate(struct clk_hw_omap *clk)
                return 0;
 
        /* Return bypass rate if DPLL is bypassed */
-       v = __raw_readl(dd->control_reg);
+       v = omap2_clk_readl(clk, dd->control_reg);
        v &= dd->enable_mask;
        v >>= __ffs(dd->enable_mask);
 
@@ -262,7 +262,7 @@ unsigned long omap2_get_dpll_rate(struct clk_hw_omap *clk)
                        return __clk_get_rate(dd->clk_bypass);
        }
 
-       v = __raw_readl(dd->mult_div1_reg);
+       v = omap2_clk_readl(clk, dd->mult_div1_reg);
        dpll_mult = v & dd->mult_mask;
        dpll_mult >>= __ffs(dd->mult_mask);
        dpll_div = v & dd->div1_mask;
index f10eb03ce3e27493ee1d52b18444ff52d4cf1507..333f0a66617165fa23448e19cd6b59c8856c78d3 100644 (file)
 /* XXX */
 void omap2_clkt_iclk_allow_idle(struct clk_hw_omap *clk)
 {
-       u32 v, r;
+       u32 v;
+       void __iomem *r;
 
-       r = ((__force u32)clk->enable_reg ^ (CM_AUTOIDLE ^ CM_ICLKEN));
+       r = (__force void __iomem *)
+               ((__force u32)clk->enable_reg ^ (CM_AUTOIDLE ^ CM_ICLKEN));
 
-       v = __raw_readl((__force void __iomem *)r);
+       v = omap2_clk_readl(clk, r);
        v |= (1 << clk->enable_bit);
-       __raw_writel(v, (__force void __iomem *)r);
+       omap2_clk_writel(v, clk, r);
 }
 
 /* XXX */
 void omap2_clkt_iclk_deny_idle(struct clk_hw_omap *clk)
 {
-       u32 v, r;
+       u32 v;
+       void __iomem *r;
 
-       r = ((__force u32)clk->enable_reg ^ (CM_AUTOIDLE ^ CM_ICLKEN));
+       r = (__force void __iomem *)
+               ((__force u32)clk->enable_reg ^ (CM_AUTOIDLE ^ CM_ICLKEN));
 
-       v = __raw_readl((__force void __iomem *)r);
+       v = omap2_clk_readl(clk, r);
        v &= ~(1 << clk->enable_bit);
-       __raw_writel(v, (__force void __iomem *)r);
+       omap2_clk_writel(v, clk, r);
 }
 
 /* Public data */
index c7c5d31e90829141373662f32ffb39ce6c32637a..591581a665321c09fafbe78fd9c5bdbcae53c5a4 100644 (file)
@@ -26,7 +26,6 @@
 #include <linux/clk-private.h>
 #include <asm/cpu.h>
 
-
 #include <trace/events/power.h>
 
 #include "soc.h"
@@ -56,6 +55,31 @@ u16 cpu_mask;
 static bool clkdm_control = true;
 
 static LIST_HEAD(clk_hw_omap_clocks);
+void __iomem *clk_memmaps[CLK_MAX_MEMMAPS];
+
+void omap2_clk_writel(u32 val, struct clk_hw_omap *clk, void __iomem *reg)
+{
+       if (clk->flags & MEMMAP_ADDRESSING) {
+               struct clk_omap_reg *r = (struct clk_omap_reg *)&reg;
+               writel_relaxed(val, clk_memmaps[r->index] + r->offset);
+       } else {
+               writel_relaxed(val, reg);
+       }
+}
+
+u32 omap2_clk_readl(struct clk_hw_omap *clk, void __iomem *reg)
+{
+       u32 val;
+
+       if (clk->flags & MEMMAP_ADDRESSING) {
+               struct clk_omap_reg *r = (struct clk_omap_reg *)&reg;
+               val = readl_relaxed(clk_memmaps[r->index] + r->offset);
+       } else {
+               val = readl_relaxed(reg);
+       }
+
+       return val;
+}
 
 /*
  * Used for clocks that have the same value as the parent clock,
@@ -87,6 +111,7 @@ unsigned long omap_fixed_divisor_recalc(struct clk_hw *hw,
 
 /**
  * _wait_idlest_generic - wait for a module to leave the idle state
+ * @clk: module clock to wait for (needed for register offsets)
  * @reg: virtual address of module IDLEST register
  * @mask: value to mask against to determine if the module is active
  * @idlest: idle state indicator (0 or 1) for the clock
@@ -98,14 +123,14 @@ unsigned long omap_fixed_divisor_recalc(struct clk_hw *hw,
  * elapsed.  XXX Deprecated - should be moved into drivers for the
  * individual IP block that the IDLEST register exists in.
  */
-static int _wait_idlest_generic(void __iomem *reg, u32 mask, u8 idlest,
-                               const char *name)
+static int _wait_idlest_generic(struct clk_hw_omap *clk, void __iomem *reg,
+                               u32 mask, u8 idlest, const char *name)
 {
        int i = 0, ena = 0;
 
        ena = (idlest) ? 0 : mask;
 
-       omap_test_timeout(((__raw_readl(reg) & mask) == ena),
+       omap_test_timeout(((omap2_clk_readl(clk, reg) & mask) == ena),
                          MAX_MODULE_ENABLE_WAIT, i);
 
        if (i < MAX_MODULE_ENABLE_WAIT)
@@ -138,7 +163,7 @@ static void _omap2_module_wait_ready(struct clk_hw_omap *clk)
        /* Not all modules have multiple clocks that their IDLEST depends on */
        if (clk->ops->find_companion) {
                clk->ops->find_companion(clk, &companion_reg, &other_bit);
-               if (!(__raw_readl(companion_reg) & (1 << other_bit)))
+               if (!(omap2_clk_readl(clk, companion_reg) & (1 << other_bit)))
                        return;
        }
 
@@ -146,8 +171,8 @@ static void _omap2_module_wait_ready(struct clk_hw_omap *clk)
        r = cm_split_idlest_reg(idlest_reg, &prcm_mod, &idlest_reg_id);
        if (r) {
                /* IDLEST register not in the CM module */
-               _wait_idlest_generic(idlest_reg, (1 << idlest_bit), idlest_val,
-                                    __clk_get_name(clk->hw.clk));
+               _wait_idlest_generic(clk, idlest_reg, (1 << idlest_bit),
+                                    idlest_val, __clk_get_name(clk->hw.clk));
        } else {
                cm_wait_module_ready(prcm_mod, idlest_reg_id, idlest_bit);
        };
@@ -309,13 +334,13 @@ int omap2_dflt_clk_enable(struct clk_hw *hw)
        }
 
        /* FIXME should not have INVERT_ENABLE bit here */
-       v = __raw_readl(clk->enable_reg);
+       v = omap2_clk_readl(clk, clk->enable_reg);
        if (clk->flags & INVERT_ENABLE)
                v &= ~(1 << clk->enable_bit);
        else
                v |= (1 << clk->enable_bit);
-       __raw_writel(v, clk->enable_reg);
-       v = __raw_readl(clk->enable_reg); /* OCP barrier */
+       omap2_clk_writel(v, clk, clk->enable_reg);
+       v = omap2_clk_readl(clk, clk->enable_reg); /* OCP barrier */
 
        if (clk->ops && clk->ops->find_idlest)
                _omap2_module_wait_ready(clk);
@@ -353,12 +378,12 @@ void omap2_dflt_clk_disable(struct clk_hw *hw)
                return;
        }
 
-       v = __raw_readl(clk->enable_reg);
+       v = omap2_clk_readl(clk, clk->enable_reg);
        if (clk->flags & INVERT_ENABLE)
                v |= (1 << clk->enable_bit);
        else
                v &= ~(1 << clk->enable_bit);
-       __raw_writel(v, clk->enable_reg);
+       omap2_clk_writel(v, clk, clk->enable_reg);
        /* No OCP barrier needed here since it is a disable operation */
 
        if (clkdm_control && clk->clkdm)
@@ -454,7 +479,7 @@ int omap2_dflt_clk_is_enabled(struct clk_hw *hw)
        struct clk_hw_omap *clk = to_clk_hw_omap(hw);
        u32 v;
 
-       v = __raw_readl(clk->enable_reg);
+       v = omap2_clk_readl(clk, clk->enable_reg);
 
        if (clk->flags & INVERT_ENABLE)
                v ^= BIT(clk->enable_bit);
@@ -520,6 +545,9 @@ int omap2_clk_enable_autoidle_all(void)
        list_for_each_entry(c, &clk_hw_omap_clocks, node)
                if (c->ops && c->ops->allow_idle)
                        c->ops->allow_idle(c);
+
+       of_ti_clk_allow_autoidle_all();
+
        return 0;
 }
 
@@ -539,6 +567,9 @@ int omap2_clk_disable_autoidle_all(void)
        list_for_each_entry(c, &clk_hw_omap_clocks, node)
                if (c->ops && c->ops->deny_idle)
                        c->ops->deny_idle(c);
+
+       of_ti_clk_deny_autoidle_all();
+
        return 0;
 }
 
index 82916cc82c920d6fb7d08dcec444dded07ede0bb..bda767a9dea862d7223d86d1a59c0b1505e091f9 100644 (file)
@@ -21,6 +21,7 @@
 
 #include <linux/clkdev.h>
 #include <linux/clk-provider.h>
+#include <linux/clk/ti.h>
 
 struct omap_clk {
        u16                             cpu;
@@ -37,7 +38,6 @@ struct omap_clk {
        }
 
 struct clockdomain;
-#define to_clk_hw_omap(_hw) container_of(_hw, struct clk_hw_omap, hw)
 
 #define DEFINE_STRUCT_CLK(_name, _parent_array_name, _clkops_name)     \
        static struct clk _name = {                             \
@@ -178,141 +178,6 @@ struct clksel {
        const struct clksel_rate *rates;
 };
 
-/**
- * struct dpll_data - DPLL registers and integration data
- * @mult_div1_reg: register containing the DPLL M and N bitfields
- * @mult_mask: mask of the DPLL M bitfield in @mult_div1_reg
- * @div1_mask: mask of the DPLL N bitfield in @mult_div1_reg
- * @clk_bypass: struct clk pointer to the clock's bypass clock input
- * @clk_ref: struct clk pointer to the clock's reference clock input
- * @control_reg: register containing the DPLL mode bitfield
- * @enable_mask: mask of the DPLL mode bitfield in @control_reg
- * @last_rounded_rate: cache of the last rate result of omap2_dpll_round_rate()
- * @last_rounded_m: cache of the last M result of omap2_dpll_round_rate()
- * @last_rounded_m4xen: cache of the last M4X result of
- *                     omap4_dpll_regm4xen_round_rate()
- * @last_rounded_lpmode: cache of the last lpmode result of
- *                      omap4_dpll_lpmode_recalc()
- * @max_multiplier: maximum valid non-bypass multiplier value (actual)
- * @last_rounded_n: cache of the last N result of omap2_dpll_round_rate()
- * @min_divider: minimum valid non-bypass divider value (actual)
- * @max_divider: maximum valid non-bypass divider value (actual)
- * @modes: possible values of @enable_mask
- * @autoidle_reg: register containing the DPLL autoidle mode bitfield
- * @idlest_reg: register containing the DPLL idle status bitfield
- * @autoidle_mask: mask of the DPLL autoidle mode bitfield in @autoidle_reg
- * @freqsel_mask: mask of the DPLL jitter correction bitfield in @control_reg
- * @idlest_mask: mask of the DPLL idle status bitfield in @idlest_reg
- * @lpmode_mask: mask of the DPLL low-power mode bitfield in @control_reg
- * @m4xen_mask: mask of the DPLL M4X multiplier bitfield in @control_reg
- * @auto_recal_bit: bitshift of the driftguard enable bit in @control_reg
- * @recal_en_bit: bitshift of the PRM_IRQENABLE_* bit for recalibration IRQs
- * @recal_st_bit: bitshift of the PRM_IRQSTATUS_* bit for recalibration IRQs
- * @flags: DPLL type/features (see below)
- *
- * Possible values for @flags:
- * DPLL_J_TYPE: "J-type DPLL" (only some 36xx, 4xxx DPLLs)
- *
- * @freqsel_mask is only used on the OMAP34xx family and AM35xx.
- *
- * XXX Some DPLLs have multiple bypass inputs, so it's not technically
- * correct to only have one @clk_bypass pointer.
- *
- * XXX The runtime-variable fields (@last_rounded_rate, @last_rounded_m,
- * @last_rounded_n) should be separated from the runtime-fixed fields
- * and placed into a different structure, so that the runtime-fixed data
- * can be placed into read-only space.
- */
-struct dpll_data {
-       void __iomem            *mult_div1_reg;
-       u32                     mult_mask;
-       u32                     div1_mask;
-       struct clk              *clk_bypass;
-       struct clk              *clk_ref;
-       void __iomem            *control_reg;
-       u32                     enable_mask;
-       unsigned long           last_rounded_rate;
-       u16                     last_rounded_m;
-       u8                      last_rounded_m4xen;
-       u8                      last_rounded_lpmode;
-       u16                     max_multiplier;
-       u8                      last_rounded_n;
-       u8                      min_divider;
-       u16                     max_divider;
-       u8                      modes;
-       void __iomem            *autoidle_reg;
-       void __iomem            *idlest_reg;
-       u32                     autoidle_mask;
-       u32                     freqsel_mask;
-       u32                     idlest_mask;
-       u32                     dco_mask;
-       u32                     sddiv_mask;
-       u32                     lpmode_mask;
-       u32                     m4xen_mask;
-       u8                      auto_recal_bit;
-       u8                      recal_en_bit;
-       u8                      recal_st_bit;
-       u8                      flags;
-};
-
-/*
- * struct clk.flags possibilities
- *
- * XXX document the rest of the clock flags here
- *
- * CLOCK_CLKOUTX2: (OMAP4 only) DPLL CLKOUT and CLKOUTX2 GATE_CTRL
- *     bits share the same register.  This flag allows the
- *     omap4_dpllmx*() code to determine which GATE_CTRL bit field
- *     should be used.  This is a temporary solution - a better approach
- *     would be to associate clock type-specific data with the clock,
- *     similar to the struct dpll_data approach.
- */
-#define ENABLE_REG_32BIT       (1 << 0)        /* Use 32-bit access */
-#define CLOCK_IDLE_CONTROL     (1 << 1)
-#define CLOCK_NO_IDLE_PARENT   (1 << 2)
-#define ENABLE_ON_INIT         (1 << 3)        /* Enable upon framework init */
-#define INVERT_ENABLE          (1 << 4)        /* 0 enables, 1 disables */
-#define CLOCK_CLKOUTX2         (1 << 5)
-
-/**
- * struct clk_hw_omap - OMAP struct clk
- * @node: list_head connecting this clock into the full clock list
- * @enable_reg: register to write to enable the clock (see @enable_bit)
- * @enable_bit: bitshift to write to enable/disable the clock (see @enable_reg)
- * @flags: see "struct clk.flags possibilities" above
- * @clksel_reg: for clksel clks, register va containing src/divisor select
- * @clksel_mask: bitmask in @clksel_reg for the src/divisor selector
- * @clksel: for clksel clks, pointer to struct clksel for this clock
- * @dpll_data: for DPLLs, pointer to struct dpll_data for this clock
- * @clkdm_name: clockdomain name that this clock is contained in
- * @clkdm: pointer to struct clockdomain, resolved from @clkdm_name at runtime
- * @rate_offset: bitshift for rate selection bitfield (OMAP1 only)
- * @src_offset: bitshift for source selection bitfield (OMAP1 only)
- *
- * XXX @rate_offset, @src_offset should probably be removed and OMAP1
- * clock code converted to use clksel.
- *
- */
-
-struct clk_hw_omap_ops;
-
-struct clk_hw_omap {
-       struct clk_hw           hw;
-       struct list_head        node;
-       unsigned long           fixed_rate;
-       u8                      fixed_div;
-       void __iomem            *enable_reg;
-       u8                      enable_bit;
-       u8                      flags;
-       void __iomem            *clksel_reg;
-       u32                     clksel_mask;
-       const struct clksel     *clksel;
-       struct dpll_data        *dpll_data;
-       const char              *clkdm_name;
-       struct clockdomain      *clkdm;
-       const struct clk_hw_omap_ops    *ops;
-};
-
 struct clk_hw_omap_ops {
        void                    (*find_idlest)(struct clk_hw_omap *oclk,
                                        void __iomem **idlest_reg,
@@ -348,36 +213,13 @@ unsigned long omap_fixed_divisor_recalc(struct clk_hw *hw,
 #define OMAP4XXX_EN_DPLL_FRBYPASS              0x6
 #define OMAP4XXX_EN_DPLL_LOCKED                        0x7
 
-/* CM_CLKEN_PLL*.EN* bit values - not all are available for every DPLL */
-#define DPLL_LOW_POWER_STOP    0x1
-#define DPLL_LOW_POWER_BYPASS  0x5
-#define DPLL_LOCKED            0x7
-
-/* DPLL Type and DCO Selection Flags */
-#define DPLL_J_TYPE            0x1
-
-long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate,
-                       unsigned long *parent_rate);
-unsigned long omap3_dpll_recalc(struct clk_hw *hw, unsigned long parent_rate);
-int omap3_noncore_dpll_enable(struct clk_hw *hw);
-void omap3_noncore_dpll_disable(struct clk_hw *hw);
-int omap3_noncore_dpll_set_rate(struct clk_hw *hw, unsigned long rate,
-                               unsigned long parent_rate);
 u32 omap3_dpll_autoidle_read(struct clk_hw_omap *clk);
 void omap3_dpll_allow_idle(struct clk_hw_omap *clk);
 void omap3_dpll_deny_idle(struct clk_hw_omap *clk);
-unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw,
-                                   unsigned long parent_rate);
 int omap4_dpllmx_gatectrl_read(struct clk_hw_omap *clk);
 void omap4_dpllmx_allow_gatectrl(struct clk_hw_omap *clk);
 void omap4_dpllmx_deny_gatectrl(struct clk_hw_omap *clk);
-unsigned long omap4_dpll_regm4xen_recalc(struct clk_hw *hw,
-                               unsigned long parent_rate);
-long omap4_dpll_regm4xen_round_rate(struct clk_hw *hw,
-                                   unsigned long target_rate,
-                                   unsigned long *parent_rate);
 
-void omap2_init_clk_clkdm(struct clk_hw *clk);
 void __init omap2_clk_disable_clkdm_control(void);
 
 /* clkt_clksel.c public functions */
@@ -396,29 +238,25 @@ int omap2_clksel_set_parent(struct clk_hw *hw, u8 field_val);
 extern void omap2_clkt_iclk_allow_idle(struct clk_hw_omap *clk);
 extern void omap2_clkt_iclk_deny_idle(struct clk_hw_omap *clk);
 
-u8 omap2_init_dpll_parent(struct clk_hw *hw);
 unsigned long omap2_get_dpll_rate(struct clk_hw_omap *clk);
 
-int omap2_dflt_clk_enable(struct clk_hw *hw);
-void omap2_dflt_clk_disable(struct clk_hw *hw);
-int omap2_dflt_clk_is_enabled(struct clk_hw *hw);
 void omap2_clk_dflt_find_companion(struct clk_hw_omap *clk,
                                   void __iomem **other_reg,
                                   u8 *other_bit);
 void omap2_clk_dflt_find_idlest(struct clk_hw_omap *clk,
                                void __iomem **idlest_reg,
                                u8 *idlest_bit, u8 *idlest_val);
-void omap2_init_clk_hw_omap_clocks(struct clk *clk);
 int omap2_clk_enable_autoidle_all(void);
-int omap2_clk_disable_autoidle_all(void);
 int omap2_clk_allow_idle(struct clk *clk);
 int omap2_clk_deny_idle(struct clk *clk);
-void omap2_clk_enable_init_clocks(const char **clk_names, u8 num_clocks);
 int omap2_clk_switch_mpurate_at_boot(const char *mpurate_ck_name);
 void omap2_clk_print_new_rates(const char *hfclkin_ck_name,
                               const char *core_ck_name,
                               const char *mpu_ck_name);
 
+u32 omap2_clk_readl(struct clk_hw_omap *clk, void __iomem *reg);
+void omap2_clk_writel(u32 val, struct clk_hw_omap *clk, void __iomem *reg);
+
 extern u16 cpu_mask;
 
 extern const struct clkops clkops_omap2_dflt_wait;
@@ -433,19 +271,12 @@ extern const struct clksel_rate gfx_l3_rates[];
 extern const struct clksel_rate dsp_ick_rates[];
 extern struct clk dummy_ck;
 
-extern const struct clk_hw_omap_ops clkhwops_omap3_dpll;
 extern const struct clk_hw_omap_ops clkhwops_iclk_wait;
 extern const struct clk_hw_omap_ops clkhwops_wait;
-extern const struct clk_hw_omap_ops clkhwops_omap4_dpllmx;
-extern const struct clk_hw_omap_ops clkhwops_iclk;
 extern const struct clk_hw_omap_ops clkhwops_omap3430es2_ssi_wait;
-extern const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_ssi_wait;
 extern const struct clk_hw_omap_ops clkhwops_omap3430es2_dss_usbhost_wait;
-extern const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_dss_usbhost_wait;
-extern const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_hsotgusb_wait;
 extern const struct clk_hw_omap_ops clkhwops_omap3430es2_hsotgusb_wait;
 extern const struct clk_hw_omap_ops clkhwops_am35xx_ipss_module_wait;
-extern const struct clk_hw_omap_ops clkhwops_am35xx_ipss_wait;
 extern const struct clk_hw_omap_ops clkhwops_apll54;
 extern const struct clk_hw_omap_ops clkhwops_apll96;
 extern const struct clk_hw_omap_ops clkhwops_omap2xxx_dpll;
@@ -460,6 +291,8 @@ extern const struct clksel_rate div_1_3_rates[];
 extern const struct clksel_rate div_1_4_rates[];
 extern const struct clksel_rate div31_1to31_rates[];
 
+extern void __iomem *clk_memmaps[];
+
 extern int am33xx_clk_init(void);
 
 extern int omap2_clkops_enable_clkdm(struct clk_hw *hw);
index bbd6a3f717e64e5b23cce67591f374d4dcb7b2f3..91ccb962e09e9ed7a59e45c1399de556048724d7 100644 (file)
@@ -43,6 +43,7 @@ int omap36xx_pwrdn_clk_enable_with_hsdiv_restore(struct clk_hw *clk)
        struct clk_divider *parent;
        struct clk_hw *parent_hw;
        u32 dummy_v, orig_v;
+       struct clk_hw_omap *omap_clk = to_clk_hw_omap(clk);
        int ret;
 
        /* Clear PWRDN bit of HSDIVIDER */
@@ -53,15 +54,15 @@ int omap36xx_pwrdn_clk_enable_with_hsdiv_restore(struct clk_hw *clk)
 
        /* Restore the dividers */
        if (!ret) {
-               orig_v = __raw_readl(parent->reg);
+               orig_v = omap2_clk_readl(omap_clk, parent->reg);
                dummy_v = orig_v;
 
                /* Write any other value different from the Read value */
                dummy_v ^= (1 << parent->shift);
-               __raw_writel(dummy_v, parent->reg);
+               omap2_clk_writel(dummy_v, omap_clk, parent->reg);
 
                /* Write the original divider */
-               __raw_writel(orig_v, parent->reg);
+               omap2_clk_writel(orig_v, omap_clk, parent->reg);
        }
 
        return ret;
index 8cd4b0a882aec39418c8adcd4e41daab0a010c0d..78d9f562e3ce796b76821c812ea3a7cf56050fb0 100644 (file)
@@ -9,11 +9,8 @@
 #define __ARCH_ARM_MACH_OMAP2_CLOCK3XXX_H
 
 int omap3xxx_clk_init(void);
-int omap3_dpll4_set_rate(struct clk_hw *clk, unsigned long rate,
-                                       unsigned long parent_rate);
 int omap3_core_dpll_m2_set_rate(struct clk_hw *clk, unsigned long rate,
                                        unsigned long parent_rate);
-void omap3_clk_lock_dpll5(void);
 
 extern struct clk *sdrc_ick_p;
 extern struct clk *arm_fck_p;
index 240db38f232c66226bca15be02aa144a8021acce..e515278351600436a691a77770153690f29348d4 100644 (file)
@@ -306,7 +306,7 @@ struct omap_hwmod;
 extern int omap_dss_reset(struct omap_hwmod *);
 
 /* SoC specific clock initializer */
-extern int (*omap_clk_init)(void);
+int omap_clk_init(void);
 
 #endif /* __ASSEMBLER__ */
 #endif /* __ARCH_ARM_MACH_OMAP2PLUS_COMMON_H */
index 3a0296cfcace879a322a6c414b4b9de9bb0cce62..3185ced807c952804d50199fd3ff7bc8ea68fb8f 100644 (file)
@@ -50,10 +50,10 @@ static void _omap3_dpll_write_clken(struct clk_hw_omap *clk, u8 clken_bits)
 
        dd = clk->dpll_data;
 
-       v = __raw_readl(dd->control_reg);
+       v = omap2_clk_readl(clk, dd->control_reg);
        v &= ~dd->enable_mask;
        v |= clken_bits << __ffs(dd->enable_mask);
-       __raw_writel(v, dd->control_reg);
+       omap2_clk_writel(v, clk, dd->control_reg);
 }
 
 /* _omap3_wait_dpll_status: wait for a DPLL to enter a specific state */
@@ -69,8 +69,8 @@ static int _omap3_wait_dpll_status(struct clk_hw_omap *clk, u8 state)
 
        state <<= __ffs(dd->idlest_mask);
 
-       while (((__raw_readl(dd->idlest_reg) & dd->idlest_mask) != state) &&
-              i < MAX_DPLL_WAIT_TRIES) {
+       while (((omap2_clk_readl(clk, dd->idlest_reg) & dd->idlest_mask)
+               != state) && i < MAX_DPLL_WAIT_TRIES) {
                i++;
                udelay(1);
        }
@@ -147,7 +147,7 @@ static int _omap3_noncore_dpll_lock(struct clk_hw_omap *clk)
        state <<= __ffs(dd->idlest_mask);
 
        /* Check if already locked */
-       if ((__raw_readl(dd->idlest_reg) & dd->idlest_mask) == state)
+       if ((omap2_clk_readl(clk, dd->idlest_reg) & dd->idlest_mask) == state)
                goto done;
 
        ai = omap3_dpll_autoidle_read(clk);
@@ -311,14 +311,14 @@ static int omap3_noncore_dpll_program(struct clk_hw_omap *clk, u16 freqsel)
         * only since freqsel field is no longer present on other devices.
         */
        if (cpu_is_omap343x()) {
-               v = __raw_readl(dd->control_reg);
+               v = omap2_clk_readl(clk, dd->control_reg);
                v &= ~dd->freqsel_mask;
                v |= freqsel << __ffs(dd->freqsel_mask);
-               __raw_writel(v, dd->control_reg);
+               omap2_clk_writel(v, clk, dd->control_reg);
        }
 
        /* Set DPLL multiplier, divider */
-       v = __raw_readl(dd->mult_div1_reg);
+       v = omap2_clk_readl(clk, dd->mult_div1_reg);
        v &= ~(dd->mult_mask | dd->div1_mask);
        v |= dd->last_rounded_m << __ffs(dd->mult_mask);
        v |= (dd->last_rounded_n - 1) << __ffs(dd->div1_mask);
@@ -336,11 +336,11 @@ static int omap3_noncore_dpll_program(struct clk_hw_omap *clk, u16 freqsel)
                v |= sd_div << __ffs(dd->sddiv_mask);
        }
 
-       __raw_writel(v, dd->mult_div1_reg);
+       omap2_clk_writel(v, clk, dd->mult_div1_reg);
 
        /* Set 4X multiplier and low-power mode */
        if (dd->m4xen_mask || dd->lpmode_mask) {
-               v = __raw_readl(dd->control_reg);
+               v = omap2_clk_readl(clk, dd->control_reg);
 
                if (dd->m4xen_mask) {
                        if (dd->last_rounded_m4xen)
@@ -356,7 +356,7 @@ static int omap3_noncore_dpll_program(struct clk_hw_omap *clk, u16 freqsel)
                                v &= ~dd->lpmode_mask;
                }
 
-               __raw_writel(v, dd->control_reg);
+               omap2_clk_writel(v, clk, dd->control_reg);
        }
 
        /* We let the clock framework set the other output dividers later */
@@ -554,7 +554,7 @@ u32 omap3_dpll_autoidle_read(struct clk_hw_omap *clk)
        if (!dd->autoidle_reg)
                return -EINVAL;
 
-       v = __raw_readl(dd->autoidle_reg);
+       v = omap2_clk_readl(clk, dd->autoidle_reg);
        v &= dd->autoidle_mask;
        v >>= __ffs(dd->autoidle_mask);
 
@@ -588,10 +588,10 @@ void omap3_dpll_allow_idle(struct clk_hw_omap *clk)
         * by writing 0x5 instead of 0x1.  Add some mechanism to
         * optionally enter this mode.
         */
-       v = __raw_readl(dd->autoidle_reg);
+       v = omap2_clk_readl(clk, dd->autoidle_reg);
        v &= ~dd->autoidle_mask;
        v |= DPLL_AUTOIDLE_LOW_POWER_STOP << __ffs(dd->autoidle_mask);
-       __raw_writel(v, dd->autoidle_reg);
+       omap2_clk_writel(v, clk, dd->autoidle_reg);
 
 }
 
@@ -614,10 +614,10 @@ void omap3_dpll_deny_idle(struct clk_hw_omap *clk)
        if (!dd->autoidle_reg)
                return;
 
-       v = __raw_readl(dd->autoidle_reg);
+       v = omap2_clk_readl(clk, dd->autoidle_reg);
        v &= ~dd->autoidle_mask;
        v |= DPLL_AUTOIDLE_DISABLE << __ffs(dd->autoidle_mask);
-       __raw_writel(v, dd->autoidle_reg);
+       omap2_clk_writel(v, clk, dd->autoidle_reg);
 
 }
 
@@ -639,6 +639,9 @@ unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw,
        struct clk_hw_omap *pclk = NULL;
        struct clk *parent;
 
+       if (!parent_rate)
+               return 0;
+
        /* Walk up the parents of clk, looking for a DPLL */
        do {
                do {
@@ -660,7 +663,7 @@ unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw,
 
        WARN_ON(!dd->enable_mask);
 
-       v = __raw_readl(dd->control_reg) & dd->enable_mask;
+       v = omap2_clk_readl(pclk, dd->control_reg) & dd->enable_mask;
        v >>= __ffs(dd->enable_mask);
        if ((v != OMAP3XXX_EN_DPLL_LOCKED) || (dd->flags & DPLL_J_TYPE))
                rate = parent_rate;
index d28b0f7267151dfdcf2909b1647c9a5b80ba661b..52f9438b92f2a5d165b548b323bacb8680588bae 100644 (file)
@@ -42,7 +42,7 @@ int omap4_dpllmx_gatectrl_read(struct clk_hw_omap *clk)
                        OMAP4430_DPLL_CLKOUTX2_GATE_CTRL_MASK :
                        OMAP4430_DPLL_CLKOUT_GATE_CTRL_MASK;
 
-       v = __raw_readl(clk->clksel_reg);
+       v = omap2_clk_readl(clk, clk->clksel_reg);
        v &= mask;
        v >>= __ffs(mask);
 
@@ -61,10 +61,10 @@ void omap4_dpllmx_allow_gatectrl(struct clk_hw_omap *clk)
                        OMAP4430_DPLL_CLKOUTX2_GATE_CTRL_MASK :
                        OMAP4430_DPLL_CLKOUT_GATE_CTRL_MASK;
 
-       v = __raw_readl(clk->clksel_reg);
+       v = omap2_clk_readl(clk, clk->clksel_reg);
        /* Clear the bit to allow gatectrl */
        v &= ~mask;
-       __raw_writel(v, clk->clksel_reg);
+       omap2_clk_writel(v, clk, clk->clksel_reg);
 }
 
 void omap4_dpllmx_deny_gatectrl(struct clk_hw_omap *clk)
@@ -79,10 +79,10 @@ void omap4_dpllmx_deny_gatectrl(struct clk_hw_omap *clk)
                        OMAP4430_DPLL_CLKOUTX2_GATE_CTRL_MASK :
                        OMAP4430_DPLL_CLKOUT_GATE_CTRL_MASK;
 
-       v = __raw_readl(clk->clksel_reg);
+       v = omap2_clk_readl(clk, clk->clksel_reg);
        /* Set the bit to deny gatectrl */
        v |= mask;
-       __raw_writel(v, clk->clksel_reg);
+       omap2_clk_writel(v, clk, clk->clksel_reg);
 }
 
 const struct clk_hw_omap_ops clkhwops_omap4_dpllmx = {
@@ -140,7 +140,7 @@ unsigned long omap4_dpll_regm4xen_recalc(struct clk_hw *hw,
        rate = omap2_get_dpll_rate(clk);
 
        /* regm4xen adds a multiplier of 4 to DPLL calculations */
-       v = __raw_readl(dd->control_reg);
+       v = omap2_clk_readl(clk, dd->control_reg);
        if (v & OMAP4430_DPLL_REGM4XEN_MASK)
                rate *= OMAP4430_REGM4XEN_MULT;
 
index 07b68d5a7940e402705568114b7e0523cdda44f8..47381fd8746f6f673a6c2aeaa25ca11436c3809e 100644 (file)
 #include "prm44xx.h"
 
 /*
- * omap_clk_init: points to a function that does the SoC-specific
+ * omap_clk_soc_init: points to a function that does the SoC-specific
  * clock initializations
  */
-int (*omap_clk_init)(void);
+static int (*omap_clk_soc_init)(void);
 
 /*
  * The machine specific code may provide the extra mapping besides the
@@ -419,7 +419,7 @@ void __init omap2420_init_early(void)
        omap242x_clockdomains_init();
        omap2420_hwmod_init();
        omap_hwmod_init_postsetup();
-       omap_clk_init = omap2420_clk_init;
+       omap_clk_soc_init = omap2420_clk_init;
 }
 
 void __init omap2420_init_late(void)
@@ -448,7 +448,7 @@ void __init omap2430_init_early(void)
        omap243x_clockdomains_init();
        omap2430_hwmod_init();
        omap_hwmod_init_postsetup();
-       omap_clk_init = omap2430_clk_init;
+       omap_clk_soc_init = omap2430_clk_init;
 }
 
 void __init omap2430_init_late(void)
@@ -482,27 +482,35 @@ void __init omap3_init_early(void)
        omap3xxx_clockdomains_init();
        omap3xxx_hwmod_init();
        omap_hwmod_init_postsetup();
-       omap_clk_init = omap3xxx_clk_init;
+       omap_clk_soc_init = omap3xxx_clk_init;
 }
 
 void __init omap3430_init_early(void)
 {
        omap3_init_early();
+       if (of_have_populated_dt())
+               omap_clk_soc_init = omap3430_dt_clk_init;
 }
 
 void __init omap35xx_init_early(void)
 {
        omap3_init_early();
+       if (of_have_populated_dt())
+               omap_clk_soc_init = omap3430_dt_clk_init;
 }
 
 void __init omap3630_init_early(void)
 {
        omap3_init_early();
+       if (of_have_populated_dt())
+               omap_clk_soc_init = omap3630_dt_clk_init;
 }
 
 void __init am35xx_init_early(void)
 {
        omap3_init_early();
+       if (of_have_populated_dt())
+               omap_clk_soc_init = am35xx_dt_clk_init;
 }
 
 void __init ti81xx_init_early(void)
@@ -520,7 +528,10 @@ void __init ti81xx_init_early(void)
        omap3xxx_clockdomains_init();
        omap3xxx_hwmod_init();
        omap_hwmod_init_postsetup();
-       omap_clk_init = omap3xxx_clk_init;
+       if (of_have_populated_dt())
+               omap_clk_soc_init = ti81xx_dt_clk_init;
+       else
+               omap_clk_soc_init = omap3xxx_clk_init;
 }
 
 void __init omap3_init_late(void)
@@ -581,7 +592,7 @@ void __init am33xx_init_early(void)
        am33xx_clockdomains_init();
        am33xx_hwmod_init();
        omap_hwmod_init_postsetup();
-       omap_clk_init = am33xx_clk_init;
+       omap_clk_soc_init = am33xx_dt_clk_init;
 }
 
 void __init am33xx_init_late(void)
@@ -606,6 +617,7 @@ void __init am43xx_init_early(void)
        am43xx_clockdomains_init();
        am43xx_hwmod_init();
        omap_hwmod_init_postsetup();
+       omap_clk_soc_init = am43xx_dt_clk_init;
 }
 
 void __init am43xx_init_late(void)
@@ -635,7 +647,7 @@ void __init omap4430_init_early(void)
        omap44xx_clockdomains_init();
        omap44xx_hwmod_init();
        omap_hwmod_init_postsetup();
-       omap_clk_init = omap4xxx_clk_init;
+       omap_clk_soc_init = omap4xxx_dt_clk_init;
 }
 
 void __init omap4430_init_late(void)
@@ -666,6 +678,7 @@ void __init omap5_init_early(void)
        omap54xx_clockdomains_init();
        omap54xx_hwmod_init();
        omap_hwmod_init_postsetup();
+       omap_clk_soc_init = omap5xxx_dt_clk_init;
 }
 
 void __init omap5_init_late(void)
@@ -691,6 +704,7 @@ void __init dra7xx_init_early(void)
        dra7xx_clockdomains_init();
        dra7xx_hwmod_init();
        omap_hwmod_init_postsetup();
+       omap_clk_soc_init = dra7xx_dt_clk_init;
 }
 
 void __init dra7xx_init_late(void)
@@ -710,3 +724,17 @@ void __init omap_sdrc_init(struct omap_sdrc_params *sdrc_cs0,
                _omap2_init_reprogram_sdrc();
        }
 }
+
+int __init omap_clk_init(void)
+{
+       int ret = 0;
+
+       if (!omap_clk_soc_init)
+               return 0;
+
+       ret = of_prcm_init();
+       if (!ret)
+               ret = omap_clk_soc_init();
+
+       return ret;
+}
index f7a6fd35b1e43f6dc9d36256508928b9ba135ea9..42d81885c700c498babd2dd2e5d322e3af2c3f19 100644 (file)
@@ -686,6 +686,8 @@ static struct clockdomain *_get_clkdm(struct omap_hwmod *oh)
        if (oh->clkdm) {
                return oh->clkdm;
        } else if (oh->_clk) {
+               if (__clk_get_flags(oh->_clk) & CLK_IS_BASIC)
+                       return NULL;
                clk = to_clk_hw_omap(__clk_get_hw(oh->_clk));
                return  clk->clkdm;
        }
@@ -1576,7 +1578,7 @@ static int _init_clkdm(struct omap_hwmod *oh)
        if (!oh->clkdm) {
                pr_warning("omap_hwmod: %s: could not associate to clkdm %s\n",
                        oh->name, oh->clkdm_name);
-               return -EINVAL;
+               return 0;
        }
 
        pr_debug("omap_hwmod: %s: associated to clkdm %s\n",
@@ -4231,6 +4233,7 @@ void __init omap_hwmod_init(void)
                soc_ops.assert_hardreset = _omap2_assert_hardreset;
                soc_ops.deassert_hardreset = _omap2_deassert_hardreset;
                soc_ops.is_hardreset_asserted = _omap2_is_hardreset_asserted;
+               soc_ops.init_clkdm = _init_clkdm;
        } else if (cpu_is_omap44xx() || soc_is_omap54xx() || soc_is_dra7xx()) {
                soc_ops.enable_module = _omap4_enable_module;
                soc_ops.disable_module = _omap4_disable_module;
index ac25ae6667cf92c448d37a9fc25058d69f26be19..623db40fdbbda48c50888d620b6a8a28b8a27c17 100644 (file)
@@ -18,6 +18,7 @@
 # ifndef __ASSEMBLER__
 extern void __iomem *prm_base;
 extern void omap2_set_globals_prm(void __iomem *prm);
+int of_prcm_init(void);
 # endif
 
 
index a2e1174ad1b6a6632ad904e4a0b8c5f37dea1e79..b4c4ab9c8044476d0777ed04cb8c9595d2f29f83 100644 (file)
 #include <linux/irq.h>
 #include <linux/interrupt.h>
 #include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/clk-provider.h>
+#include <linux/clk/ti.h>
 
 #include "soc.h"
 #include "prm2xxx_3xxx.h"
@@ -30,6 +34,7 @@
 #include "prm3xxx.h"
 #include "prm44xx.h"
 #include "common.h"
+#include "clock.h"
 
 /*
  * OMAP_PRCM_MAX_NR_PENDING_REG: maximum number of PRM_IRQ*_MPU regs
@@ -464,3 +469,64 @@ int prm_unregister(struct prm_ll_data *pld)
 
        return 0;
 }
+
+static struct of_device_id omap_prcm_dt_match_table[] = {
+       { .compatible = "ti,am3-prcm" },
+       { .compatible = "ti,am3-scrm" },
+       { .compatible = "ti,am4-prcm" },
+       { .compatible = "ti,am4-scrm" },
+       { .compatible = "ti,omap3-prm" },
+       { .compatible = "ti,omap3-cm" },
+       { .compatible = "ti,omap3-scrm" },
+       { .compatible = "ti,omap4-cm1" },
+       { .compatible = "ti,omap4-prm" },
+       { .compatible = "ti,omap4-cm2" },
+       { .compatible = "ti,omap4-scrm" },
+       { .compatible = "ti,omap5-prm" },
+       { .compatible = "ti,omap5-cm-core-aon" },
+       { .compatible = "ti,omap5-scrm" },
+       { .compatible = "ti,omap5-cm-core" },
+       { .compatible = "ti,dra7-prm" },
+       { .compatible = "ti,dra7-cm-core-aon" },
+       { .compatible = "ti,dra7-cm-core" },
+       { }
+};
+
+static struct clk_hw_omap memmap_dummy_ck = {
+       .flags = MEMMAP_ADDRESSING,
+};
+
+static u32 prm_clk_readl(void __iomem *reg)
+{
+       return omap2_clk_readl(&memmap_dummy_ck, reg);
+}
+
+static void prm_clk_writel(u32 val, void __iomem *reg)
+{
+       omap2_clk_writel(val, &memmap_dummy_ck, reg);
+}
+
+static struct ti_clk_ll_ops omap_clk_ll_ops = {
+       .clk_readl = prm_clk_readl,
+       .clk_writel = prm_clk_writel,
+};
+
+int __init of_prcm_init(void)
+{
+       struct device_node *np;
+       void __iomem *mem;
+       int memmap_index = 0;
+
+       ti_clk_ll_ops = &omap_clk_ll_ops;
+
+       for_each_matching_node(np, omap_prcm_dt_match_table) {
+               mem = of_iomap(np, 0);
+               clk_memmaps[memmap_index] = mem;
+               ti_dt_clk_init_provider(np, memmap_index);
+               memmap_index++;
+       }
+
+       ti_dt_clockdomains_setup();
+
+       return 0;
+}
index ec084d158f642b3cf919adbbb0c6fd97e64f6317..74044aaf438b6f2c5fa9c093d40e15077b06f153 100644 (file)
@@ -570,8 +570,7 @@ static inline void __init realtime_counter_init(void)
                               clksrc_nr, clksrc_src, clksrc_prop)      \
 void __init omap##name##_gptimer_timer_init(void)                      \
 {                                                                      \
-       if (omap_clk_init)                                              \
-               omap_clk_init();                                        \
+       omap_clk_init();                                        \
        omap_dmtimer_init();                                            \
        omap2_gp_clockevent_init((clkev_nr), clkev_src, clkev_prop);    \
        omap2_gptimer_clocksource_init((clksrc_nr), clksrc_src,         \
@@ -582,8 +581,7 @@ void __init omap##name##_gptimer_timer_init(void)                   \
                                clksrc_nr, clksrc_src, clksrc_prop)     \
 void __init omap##name##_sync32k_timer_init(void)              \
 {                                                                      \
-       if (omap_clk_init)                                              \
-               omap_clk_init();                                        \
+       omap_clk_init();                                        \
        omap_dmtimer_init();                                            \
        omap2_gp_clockevent_init((clkev_nr), clkev_src, clkev_prop);    \
        /* Enable the use of clocksource="gp_timer" kernel parameter */ \
index f57fb338cc8aaa2ee533ca77be3e06994a74b25f..804d61566a53dd235750f44ed6ad7875df246b70 100644 (file)
@@ -290,10 +290,11 @@ void __init arm_memblock_init(struct meminfo *mi,
 #endif
 #ifdef CONFIG_BLK_DEV_INITRD
        /* FDT scan will populate initrd_start */
-       if (initrd_start) {
+       if (initrd_start && !phys_initrd_size) {
                phys_initrd_start = __virt_to_phys(initrd_start);
                phys_initrd_size = initrd_end - initrd_start;
        }
+       initrd_start = initrd_end = 0;
        if (phys_initrd_size &&
            !memblock_is_region_memory(phys_initrd_start, phys_initrd_size)) {
                pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region - disabling initrd\n",
diff --git a/arch/arm/plat-samsung/include/plat/regs-nand.h b/arch/arm/plat-samsung/include/plat/regs-nand.h
deleted file mode 100644 (file)
index 238efea..0000000
+++ /dev/null
@@ -1,123 +0,0 @@
-/* arch/arm/mach-s3c2410/include/mach/regs-nand.h
- *
- * Copyright (c) 2004-2005 Simtec Electronics <linux@simtec.co.uk>
- *     http://www.simtec.co.uk/products/SWLINUX/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * S3C2410 NAND register definitions
-*/
-
-#ifndef __ASM_ARM_REGS_NAND
-#define __ASM_ARM_REGS_NAND
-
-
-#define S3C2410_NFREG(x) (x)
-
-#define S3C2410_NFCONF  S3C2410_NFREG(0x00)
-#define S3C2410_NFCMD   S3C2410_NFREG(0x04)
-#define S3C2410_NFADDR  S3C2410_NFREG(0x08)
-#define S3C2410_NFDATA  S3C2410_NFREG(0x0C)
-#define S3C2410_NFSTAT  S3C2410_NFREG(0x10)
-#define S3C2410_NFECC   S3C2410_NFREG(0x14)
-
-#define S3C2440_NFCONT   S3C2410_NFREG(0x04)
-#define S3C2440_NFCMD    S3C2410_NFREG(0x08)
-#define S3C2440_NFADDR   S3C2410_NFREG(0x0C)
-#define S3C2440_NFDATA   S3C2410_NFREG(0x10)
-#define S3C2440_NFECCD0  S3C2410_NFREG(0x14)
-#define S3C2440_NFECCD1  S3C2410_NFREG(0x18)
-#define S3C2440_NFECCD   S3C2410_NFREG(0x1C)
-#define S3C2440_NFSTAT   S3C2410_NFREG(0x20)
-#define S3C2440_NFESTAT0 S3C2410_NFREG(0x24)
-#define S3C2440_NFESTAT1 S3C2410_NFREG(0x28)
-#define S3C2440_NFMECC0  S3C2410_NFREG(0x2C)
-#define S3C2440_NFMECC1  S3C2410_NFREG(0x30)
-#define S3C2440_NFSECC   S3C2410_NFREG(0x34)
-#define S3C2440_NFSBLK   S3C2410_NFREG(0x38)
-#define S3C2440_NFEBLK   S3C2410_NFREG(0x3C)
-
-#define S3C2412_NFSBLK         S3C2410_NFREG(0x20)
-#define S3C2412_NFEBLK         S3C2410_NFREG(0x24)
-#define S3C2412_NFSTAT         S3C2410_NFREG(0x28)
-#define S3C2412_NFMECC_ERR0    S3C2410_NFREG(0x2C)
-#define S3C2412_NFMECC_ERR1    S3C2410_NFREG(0x30)
-#define S3C2412_NFMECC0                S3C2410_NFREG(0x34)
-#define S3C2412_NFMECC1                S3C2410_NFREG(0x38)
-#define S3C2412_NFSECC         S3C2410_NFREG(0x3C)
-
-#define S3C2410_NFCONF_EN          (1<<15)
-#define S3C2410_NFCONF_512BYTE     (1<<14)
-#define S3C2410_NFCONF_4STEP       (1<<13)
-#define S3C2410_NFCONF_INITECC     (1<<12)
-#define S3C2410_NFCONF_nFCE        (1<<11)
-#define S3C2410_NFCONF_TACLS(x)    ((x)<<8)
-#define S3C2410_NFCONF_TWRPH0(x)   ((x)<<4)
-#define S3C2410_NFCONF_TWRPH1(x)   ((x)<<0)
-
-#define S3C2410_NFSTAT_BUSY        (1<<0)
-
-#define S3C2440_NFCONF_BUSWIDTH_8      (0<<0)
-#define S3C2440_NFCONF_BUSWIDTH_16     (1<<0)
-#define S3C2440_NFCONF_ADVFLASH                (1<<3)
-#define S3C2440_NFCONF_TACLS(x)                ((x)<<12)
-#define S3C2440_NFCONF_TWRPH0(x)       ((x)<<8)
-#define S3C2440_NFCONF_TWRPH1(x)       ((x)<<4)
-
-#define S3C2440_NFCONT_LOCKTIGHT       (1<<13)
-#define S3C2440_NFCONT_SOFTLOCK                (1<<12)
-#define S3C2440_NFCONT_ILLEGALACC_EN   (1<<10)
-#define S3C2440_NFCONT_RNBINT_EN       (1<<9)
-#define S3C2440_NFCONT_RN_FALLING      (1<<8)
-#define S3C2440_NFCONT_SPARE_ECCLOCK   (1<<6)
-#define S3C2440_NFCONT_MAIN_ECCLOCK    (1<<5)
-#define S3C2440_NFCONT_INITECC         (1<<4)
-#define S3C2440_NFCONT_nFCE            (1<<1)
-#define S3C2440_NFCONT_ENABLE          (1<<0)
-
-#define S3C2440_NFSTAT_READY           (1<<0)
-#define S3C2440_NFSTAT_nCE             (1<<1)
-#define S3C2440_NFSTAT_RnB_CHANGE      (1<<2)
-#define S3C2440_NFSTAT_ILLEGAL_ACCESS  (1<<3)
-
-#define S3C2412_NFCONF_NANDBOOT                (1<<31)
-#define S3C2412_NFCONF_ECCCLKCON       (1<<30)
-#define S3C2412_NFCONF_ECC_MLC         (1<<24)
-#define S3C2412_NFCONF_TACLS_MASK      (7<<12) /* 1 extra bit of Tacls */
-
-#define S3C2412_NFCONT_ECC4_DIRWR      (1<<18)
-#define S3C2412_NFCONT_LOCKTIGHT       (1<<17)
-#define S3C2412_NFCONT_SOFTLOCK                (1<<16)
-#define S3C2412_NFCONT_ECC4_ENCINT     (1<<13)
-#define S3C2412_NFCONT_ECC4_DECINT     (1<<12)
-#define S3C2412_NFCONT_MAIN_ECC_LOCK   (1<<7)
-#define S3C2412_NFCONT_INIT_MAIN_ECC   (1<<5)
-#define S3C2412_NFCONT_nFCE1           (1<<2)
-#define S3C2412_NFCONT_nFCE0           (1<<1)
-
-#define S3C2412_NFSTAT_ECC_ENCDONE     (1<<7)
-#define S3C2412_NFSTAT_ECC_DECDONE     (1<<6)
-#define S3C2412_NFSTAT_ILLEGAL_ACCESS  (1<<5)
-#define S3C2412_NFSTAT_RnB_CHANGE      (1<<4)
-#define S3C2412_NFSTAT_nFCE1           (1<<3)
-#define S3C2412_NFSTAT_nFCE0           (1<<2)
-#define S3C2412_NFSTAT_Res1            (1<<1)
-#define S3C2412_NFSTAT_READY           (1<<0)
-
-#define S3C2412_NFECCERR_SERRDATA(x)   (((x) >> 21) & 0xf)
-#define S3C2412_NFECCERR_SERRBIT(x)    (((x) >> 18) & 0x7)
-#define S3C2412_NFECCERR_MERRDATA(x)   (((x) >> 7) & 0x3ff)
-#define S3C2412_NFECCERR_MERRBIT(x)    (((x) >> 4) & 0x7)
-#define S3C2412_NFECCERR_SPARE_ERR(x)  (((x) >> 2) & 0x3)
-#define S3C2412_NFECCERR_MAIN_ERR(x)   (((x) >> 2) & 0x3)
-#define S3C2412_NFECCERR_NONE          (0)
-#define S3C2412_NFECCERR_1BIT          (1)
-#define S3C2412_NFECCERR_MULTIBIT      (2)
-#define S3C2412_NFECCERR_ECCAREA       (3)
-
-
-
-#endif /* __ASM_ARM_REGS_NAND */
-
index 90b1753236446fcef1ce1225abc0decd92dd092d..af2738c7441b4cd4ffe9f78c5e03425c67b413bb 100644 (file)
@@ -146,6 +146,7 @@ CONFIG_USB_DEVICEFS=y
 CONFIG_USB_OTG_BLACKLIST_HUB=y
 CONFIG_USB_MON=y
 CONFIG_USB_MUSB_HDRC=y
+CONFIG_MUSB_PIO_ONLY=y
 CONFIG_USB_MUSB_BLACKFIN=y
 CONFIG_MUSB_PIO_ONLY=y
 CONFIG_USB_STORAGE=y
index 972aa6263ad0a9e7163c7b3b191afceef4282d70..be03be6ba5436eef23950abef36f4dbf294ce7cc 100644 (file)
@@ -59,7 +59,6 @@ CONFIG_BFIN_SIR=m
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 # CONFIG_FW_LOADER is not set
 CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
 CONFIG_MTD_CMDLINE_PARTS=y
 CONFIG_MTD_CHAR=m
 CONFIG_MTD_BLOCK=y
index 91988370b75e03b9603abdd7939faed63f496220..802f9c421621764869672bd8002b8bb893d950eb 100644 (file)
@@ -49,7 +49,6 @@ CONFIG_SYN_COOKIES=y
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 # CONFIG_FW_LOADER is not set
 CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
 CONFIG_MTD_CMDLINE_PARTS=y
 CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
index 7b982d0502adddfb1f8142f9550944985b5725c8..3853c473b443193dac482e2994c1d131328a80fe 100644 (file)
@@ -44,7 +44,6 @@ CONFIG_IP_PNP=y
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 # CONFIG_FW_LOADER is not set
 CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
 CONFIG_MTD_CMDLINE_PARTS=y
 CONFIG_MTD_CHAR=m
 CONFIG_MTD_BLOCK=y
index c940a1e3ab3685fa85f6db0fd132a5d928d484f8..5e0db82b679ea669e72d28f1bdffb10fb5bfec19 100644 (file)
@@ -36,7 +36,6 @@ CONFIG_UNIX=y
 # CONFIG_WIRELESS is not set
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
 CONFIG_MTD_CMDLINE_PARTS=y
 CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
index e961483f1879bf2c3c13fdefdeb2b8db999d78b1..b9af4fa69984d20e0868cc293a46a1fe5f9e8264 100644 (file)
@@ -53,7 +53,6 @@ CONFIG_INET_XFRM_MODE_BEET=m
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 # CONFIG_FW_LOADER is not set
 CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
 CONFIG_MTD_CMDLINE_PARTS=y
 CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
index 24936b91a6ee225ab491c168b42200ee4e2004c8..d6dd98e671463b4d86e83e3febcc376e46581faa 100644 (file)
@@ -51,7 +51,6 @@ CONFIG_INET=y
 # CONFIG_WIRELESS is not set
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
 CONFIG_MTD_CMDLINE_PARTS=y
 CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
index 89162d0fff9ea6325be99d8a45a21f0d8c45c224..2b58cb2212837f2dcafd49eda71efaef9fa8e2aa 100644 (file)
@@ -36,7 +36,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_MTD=y
 CONFIG_MTD_DEBUG=y
 CONFIG_MTD_DEBUG_VERBOSE=1
-CONFIG_MTD_PARTITIONS=y
 CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
 CONFIG_NFTL=y
index a26436bf50fff23db967e47b2e2ba3c5c765d5df..f754e490bbfd00a570995da744290cfa0db1d894 100644 (file)
@@ -36,7 +36,6 @@ CONFIG_IRTTY_SIR=m
 # CONFIG_WIRELESS is not set
 # CONFIG_FW_LOADER is not set
 CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
 CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_RAM=y
index 647991514ac9b8fc98065b1a6878b1b7c7e72908..629516578760cff5507fbe3482c603397ef65b53 100644 (file)
@@ -43,7 +43,6 @@ CONFIG_IP_NF_TARGET_REJECT=y
 CONFIG_IP_NF_MANGLE=y
 # CONFIG_WIRELESS is not set
 CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
 CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_CFI=y
index 8fd9b446d6583f595e2b44ec57a7c6f771581c9f..a6a7298962edcda479759327b2b2c1e8ac25656a 100644 (file)
@@ -46,7 +46,6 @@ CONFIG_IP_PNP=y
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 # CONFIG_FW_LOADER is not set
 CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
 CONFIG_MTD_CHAR=m
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_RAM=y
index 0520c160230de103d8b85b907c48f64abab4e2b3..bc216646fe1864579ce444d6da28170147ea6523 100644 (file)
@@ -38,7 +38,6 @@ CONFIG_IRTTY_SIR=m
 # CONFIG_WIRELESS is not set
 # CONFIG_FW_LOADER is not set
 CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
 CONFIG_MTD_CHAR=m
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_JEDECPROBE=m
index e4ed865b885e3a0279f53250afe78f17dcfefd35..ea88158ab432c7a7f2897c8d061e41cf2c40df84 100644 (file)
@@ -54,7 +54,6 @@ CONFIG_IP_PNP=y
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 # CONFIG_FW_LOADER is not set
 CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
 CONFIG_MTD_CMDLINE_PARTS=y
 CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
index ca67145c6a45987600407d44f80919b0e718b25e..c5c8d8a3a5fadaf597145af8b75060f02f4843de 100644 (file)
@@ -544,6 +544,7 @@ do { \
 #define DCBS_P                 0x04    /* L1 Data Cache Bank Select */
 #define PORT_PREF0_P           0x12    /* DAG0 Port Preference */
 #define PORT_PREF1_P           0x13    /* DAG1 Port Preference */
+#define RDCHK                  0x9     /* Enable L1 Parity Check */
 
 /* Masks */
 #define ENDM               0x00000001  /* (doesn't really exist) Enable
index 9558416d578b58f939e1969c268fe58da6c19331..3b125da5dcb233d8541558676b4c577289a8fe40 100644 (file)
@@ -1 +1,6 @@
+#ifndef _UAPI__BFIN_ASM_BYTEORDER_H
+#define _UAPI__BFIN_ASM_BYTEORDER_H
+
 #include <linux/byteorder/little_endian.h>
+
+#endif /* _UAPI__BFIN_ASM_BYTEORDER_H */
index 03255df6c1ea0acaaa8c232f9608f878ca9c5fbf..4fdab75dee15972853c5a417f4e8c0421e1bb573 100644 (file)
@@ -7,8 +7,8 @@
  * Licensed under the GPL-2 or later.
  */
 
-#ifndef        _ASM_CACHECTL
-#define        _ASM_CACHECTL
+#ifndef _UAPI_ASM_CACHECTL
+#define _UAPI_ASM_CACHECTL
 
 /*
  * Options for cacheflush system call
@@ -17,4 +17,4 @@
 #define        DCACHE  (1<<1)          /* writeback and flush data cache */
 #define        BCACHE  (ICACHE|DCACHE) /* flush both caches              */
 
-#endif /* _ASM_CACHECTL */
+#endif /* _UAPI_ASM_CACHECTL */
index 251c911d59c1181efbbafbeaa94b37b2c7f08c08..f51ad9a4f617ad221a4cba6bdb656d912252408d 100644 (file)
@@ -4,8 +4,8 @@
  * Licensed under the GPL-2 or later.
  */
 
-#ifndef _BFIN_FCNTL_H
-#define _BFIN_FCNTL_H
+#ifndef _UAPI_BFIN_FCNTL_H
+#define _UAPI_BFIN_FCNTL_H
 
 #define O_DIRECTORY     040000 /* must be a directory */
 #define O_NOFOLLOW     0100000 /* don't follow links */
@@ -14,4 +14,4 @@
 
 #include <asm-generic/fcntl.h>
 
-#endif
+#endif /* _UAPI_BFIN_FCNTL_H */
index eca8d75b0a8a4c5f36964a61c779d3aa04f208d9..9a41c20fc83da49a829b84e866d369eb8f937265 100644 (file)
@@ -1,7 +1,7 @@
-#ifndef __ARCH_BFIN_IOCTLS_H__
-#define __ARCH_BFIN_IOCTLS_H__
+#ifndef _UAPI__ARCH_BFIN_IOCTLS_H__
+#define _UAPI__ARCH_BFIN_IOCTLS_H__
 
 #define FIOQSIZE       0x545E
 #include <asm-generic/ioctls.h>
 
-#endif
+#endif /* _UAPI__ARCH_BFIN_IOCTLS_H__ */
index 072d8966c5c3cc4fe94082df0e00ee41cff7ffd6..99c7d6816da0bd3af15380153fb7f7bb57056f99 100644 (file)
@@ -5,12 +5,12 @@
  *
  */
 
-#ifndef __BFIN_POLL_H
-#define __BFIN_POLL_H
+#ifndef _UAPI__BFIN_POLL_H
+#define _UAPI__BFIN_POLL_H
 
 #define POLLWRNORM     4 /* POLLOUT */
 #define POLLWRBAND     256
 
 #include <asm-generic/poll.h>
 
-#endif
+#endif /* _UAPI__BFIN_POLL_H */
index 1bd3436db6a7b7d080bdf4bcb1a09db621fde1b0..9608ef64dc473d7c4dbefbe0d415283b390a0d5f 100644 (file)
@@ -4,8 +4,8 @@
  * Licensed under the GPL-2 or later.
  */
 
-#ifndef __ARCH_BFIN_POSIX_TYPES_H
-#define __ARCH_BFIN_POSIX_TYPES_H
+#ifndef _UAPI__ARCH_BFIN_POSIX_TYPES_H
+#define _UAPI__ARCH_BFIN_POSIX_TYPES_H
 
 typedef unsigned short __kernel_mode_t;
 #define __kernel_mode_t __kernel_mode_t
@@ -27,4 +27,4 @@ typedef unsigned short __kernel_old_dev_t;
 
 #include <asm-generic/posix_types.h>
 
-#endif
+#endif /* _UAPI__ARCH_BFIN_POSIX_TYPES_H */
index 906bdc1f5fda7c8b54e36775bab5bf92515a228d..b58f12dc27bd232f2143afbed6ac3b6073a1200e 100644 (file)
@@ -4,8 +4,8 @@
  * Licensed under the GPL-2 or later.
  */
 
-#ifndef _ASM_BLACKFIN_SIGCONTEXT_H
-#define _ASM_BLACKFIN_SIGCONTEXT_H
+#ifndef _UAPI_ASM_BLACKFIN_SIGCONTEXT_H
+#define _UAPI_ASM_BLACKFIN_SIGCONTEXT_H
 
 /* Add new entries at the end of the structure only.  */
 struct sigcontext {
@@ -58,4 +58,4 @@ struct sigcontext {
        unsigned long sc_seqstat;
 };
 
-#endif
+#endif /* _UAPI_ASM_BLACKFIN_SIGCONTEXT_H */
index 3e81306394e20fdb4630d08a9f25b575a5f64b9b..c72f4e6e386fa34b2b120e14baa3ee932caadee9 100644 (file)
@@ -4,8 +4,8 @@
  * Licensed under the GPL-2 or later.
  */
 
-#ifndef _BFIN_SIGINFO_H
-#define _BFIN_SIGINFO_H
+#ifndef _UAPI_BFIN_SIGINFO_H
+#define _UAPI_BFIN_SIGINFO_H
 
 #include <linux/types.h>
 #include <asm-generic/siginfo.h>
@@ -38,4 +38,4 @@
  */
 #define SEGV_STACKFLOW (__SI_FAULT|3)  /* stack overflow */
 
-#endif
+#endif /* _UAPI_BFIN_SIGINFO_H */
index 77a3bf37b69d6b3759ae678f8a90e61483a4cfe8..f0a0d8b6663a6905bea90c968a6dc43f5c686ed2 100644 (file)
@@ -1,7 +1,7 @@
-#ifndef _BLACKFIN_SIGNAL_H
-#define _BLACKFIN_SIGNAL_H
+#ifndef _UAPI_BLACKFIN_SIGNAL_H
+#define _UAPI_BLACKFIN_SIGNAL_H
 
 #define SA_RESTORER 0x04000000
 #include <asm-generic/signal.h>
 
-#endif
+#endif /* _UAPI_BLACKFIN_SIGNAL_H */
index 2e27665c4e91956c1bb7cc7525c0815c4c1ab485..d3068a750b94fd333256b1c6aee0a511067150e6 100644 (file)
@@ -4,8 +4,8 @@
  * Licensed under the GPL-2.
  */
 
-#ifndef _BFIN_STAT_H
-#define _BFIN_STAT_H
+#ifndef _UAPI_BFIN_STAT_H
+#define _UAPI_BFIN_STAT_H
 
 struct stat {
        unsigned short st_dev;
@@ -66,4 +66,4 @@ struct stat64 {
        unsigned long long st_ino;
 };
 
-#endif                         /* _BFIN_STAT_H */
+#endif /* _UAPI_BFIN_STAT_H */
index 89de6507ca2bbd38adcae0503fafff08b8fae61f..f5626b77684a71bae2385af1c5ba8ca8aab848e8 100644 (file)
@@ -4,8 +4,8 @@
  * Licensed under the GPL-2 or later.
  */
 
-#ifndef _BLACKFIN_SWAB_H
-#define _BLACKFIN_SWAB_H
+#ifndef _UAPI_BLACKFIN_SWAB_H
+#define _UAPI_BLACKFIN_SWAB_H
 
 #include <linux/types.h>
 #include <asm-generic/swab.h>
@@ -47,4 +47,4 @@ static __inline__ __attribute_const__ __u16 __arch_swab16(__u16 xx)
 
 #endif /* __GNUC__ */
 
-#endif                         /* _BLACKFIN_SWAB_H */
+#endif /* _UAPI_BLACKFIN_SWAB_H */
index 4a8c2e3fd7e5649b469738ae163721854f279d99..4da70c47cc055223087c18baa6287e3976a14a08 100644 (file)
@@ -370,7 +370,8 @@ static struct platform_device bfin_sir0_device = {
 #endif
 #endif
 
-#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
+#if defined(CONFIG_SERIAL_BFIN_SPORT) || \
+       defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
 #ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
 static struct resource bfin_sport0_uart_resources[] = {
        {
@@ -441,6 +442,50 @@ static struct platform_device bfin_sport1_uart_device = {
 #endif
 #endif
 
+#if defined(CONFIG_BFIN_SPORT) || defined(CONFIG_BFIN_SPORT_MODULE)
+static struct resource bfin_sport0_resources[] = {
+       {
+               .start = SPORT0_TCR1,
+               .end = SPORT0_MRCS3+4,
+               .flags = IORESOURCE_MEM,
+       },
+       {
+               .start = IRQ_SPORT0_TX,
+               .end = IRQ_SPORT0_TX+1,
+               .flags = IORESOURCE_IRQ,
+       },
+       {
+               .start = IRQ_SPORT0_RX,
+               .end = IRQ_SPORT0_RX+1,
+               .flags = IORESOURCE_IRQ,
+       },
+       {
+               .start = IRQ_SPORT0_ERROR,
+               .end = IRQ_SPORT0_ERROR,
+               .flags = IORESOURCE_IRQ,
+       },
+       {
+               .start = CH_SPORT0_TX,
+               .end = CH_SPORT0_TX,
+               .flags = IORESOURCE_DMA,
+       },
+       {
+               .start = CH_SPORT0_RX,
+               .end = CH_SPORT0_RX,
+               .flags = IORESOURCE_DMA,
+       },
+};
+static struct platform_device bfin_sport0_device = {
+       .name = "bfin_sport_raw",
+       .id = 0,
+       .num_resources = ARRAY_SIZE(bfin_sport0_resources),
+       .resource = bfin_sport0_resources,
+       .dev = {
+               .platform_data = &bfin_sport0_peripherals,
+       },
+};
+#endif
+
 #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
 #include <linux/input.h>
 #include <linux/gpio_keys.h>
index b0fca44110b032dc59dc7b5d9485ba2724bf573c..6584190faeb809c2b73b698bc565b84cdfa98dfa 100644 (file)
@@ -17,6 +17,12 @@ config SEC_IRQ_PRIORITY_LEVELS
          Divide the total number of interrupt priority levels into sub-levels.
          There is 2 ^ (SEC_IRQ_PRIORITY_LEVELS + 1) different levels.
 
+config L1_PARITY_CHECK
+       bool "Enable L1 parity check"
+       default n
+       help
+         Enable the L1 parity check in L1 sram. A fault event is raised
+         when L1 parity error is found.
 
 comment "System Cross Bar Priority Assignment"
 
index dab8849af884a5a2032081eff51005c6daf08151..13644ed25489de4b46285ca3cd8953785310feff 100644 (file)
@@ -120,6 +120,7 @@ void clk_disable(struct clk *clk)
 }
 EXPORT_SYMBOL(clk_disable);
 
+
 unsigned long clk_get_rate(struct clk *clk)
 {
        unsigned long ret = 0;
@@ -131,7 +132,7 @@ EXPORT_SYMBOL(clk_get_rate);
 
 long clk_round_rate(struct clk *clk, unsigned long rate)
 {
-       long ret = -EIO;
+       long ret = 0;
        if (clk->ops && clk->ops->round_rate)
                ret = clk->ops->round_rate(clk, rate);
        return ret;
index 7a07374308aca4e0aba7b28276bd1114eb9a6f4a..696786e9a53147c8c51b8489b0375a77aceaf240 100644 (file)
 /* TRU_STAT.ADDRERR and TRU_ERRADDR.ADDR May Not Reflect the Correct Status */
 #define ANOMALY_16000003 (1)
 /* The EPPI Data Enable (DEN) Signal is Not Functional */
-#define ANOMALY_16000004 (1)
+#define ANOMALY_16000004 (__SILICON_REVISION__ < 1)
 /* Using L1 Instruction Cache with Parity Enabled is Unreliable */
-#define ANOMALY_16000005 (1)
+#define ANOMALY_16000005 (__SILICON_REVISION__ < 1)
 /* SEQSTAT.SYSNMI Clears Upon Entering the NMI ISR */
-#define ANOMALY_16000006 (1)
+#define ANOMALY_16000006 (__SILICON_REVISION__ < 1)
 /* DDR2 Memory Reads May Fail Intermittently */
 #define ANOMALY_16000007 (1)
 /* Instruction Memory Stalls Can Cause IFLUSH to Fail */
 /* Speculative Fetches Can Cause Undesired External FIFO Operations */
 #define ANOMALY_16000017 (1)
 /* RSI Boot Cleanup Routine Does Not Clear Registers */
-#define ANOMALY_16000018 (1)
+#define ANOMALY_16000018 (__SILICON_REVISION__ < 1)
 /* SPI Master Boot Device Auto-detection Frequency is Set Incorrectly */
-#define ANOMALY_16000019 (1)
+#define ANOMALY_16000019 (__SILICON_REVISION__ < 1)
 /* rom_SysControl() Fails to Set DDR0_CTL.INIT for Wakeup From Hibernate */
-#define ANOMALY_16000020 (1)
+#define ANOMALY_16000020 (__SILICON_REVISION__ < 1)
 /* rom_SysControl() Fails to Save and Restore DDR0_PHYCTL3 for Hibernate/Wakeup Sequence */
-#define ANOMALY_16000021 (1)
+#define ANOMALY_16000021 (__SILICON_REVISION__ < 1)
 /* Boot Code Fails to Enable Parity Fault Detection */
-#define ANOMALY_16000022 (1)
+#define ANOMALY_16000022 (__SILICON_REVISION__ < 1)
+/* Rom_SysControl Does not Update CGU0_CLKOUTSEL */
+#define ANOMALY_16000023 (__SILICON_REVISION__ < 1)
+/* Spurious Fault Signaled After Clearing an Externally Generated Fault */
+#define ANOMALY_16000024 (1)
+/* SPORT May Drive Data Pins During Inactive Channels in Multichannel Mode */
+#define ANOMALY_16000025 (1)
 /* USB DMA interrupt status do not show the DMA channel interrupt in the DMA ISR */
-#define ANOMALY_16000027 (1)
+#define ANOMALY_16000027 (__SILICON_REVISION__ < 1)
+/* Default SPI Master Boot Mode Setting is Incorrect */
+#define ANOMALY_16000028 (__SILICON_REVISION__ < 1)
+/* PPI tDFSPI Timing Does Not Meet Data Sheet Specification */
+#define ANOMALY_16000027 (__SILICON_REVISION__ < 1)
 /* Interrupted Core Reads of MMRs May Cause Data Loss */
-#define ANOMALY_16000030 (1)
+#define ANOMALY_16000030 (__SILICON_REVISION__ < 1)
+/* Incorrect Default USB_PLL_OSC.PLLM Value */
+#define ANOMALY_16000031 (__SILICON_REVISION__ < 1)
+/* Core Reads of System MMRs May Cause the Core to Hang */
+#define ANOMALY_16000032 (__SILICON_REVISION__ < 1)
+/* PPI Data Underflow on First Word Not Reported in Certain Modes */
+#define ANOMALY_16000033 (1)
+/* CNV1 Red Pixel Substitution feature not functional in the PVP */
+#define ANOMALY_16000034 (__SILICON_REVISION__ < 1)
+/* IPF0 Output Port Color Separation feature not functional */
+#define ANOMALY_16000035 (__SILICON_REVISION__ < 1)
+/* Spurious USB Wake From Hibernate May Occur When USB_VBUS is Low */
+#define ANOMALY_16000036 (__SILICON_REVISION__ < 1)
+/* Core RAISE 2 Instruction Not Latched When Executed at Priority Level 0, 1, or 2 */
+#define ANOMALY_16000037 (__SILICON_REVISION__ < 1)
+/* Spurious Unhandled NMI or L1 Memory Parity Error Interrupt May Occur Upon Entering the NMI ISR */
+#define ANOMALY_16000038 (__SILICON_REVISION__ < 1)
+/* CGU_STAT.PLOCKERR Bit May be Unreliable */
+#define ANOMALY_16000039 (1)
+/* JTAG Emulator Reads of SDU_IDCODE Alter Register Contents */
+#define ANOMALY_16000040 (1)
+/* IFLUSH Instruction Causes Parity Error When Parity Is Enabled */
+#define ANOMALY_16000041 (1)
+/* Instruction Cache Failure When Parity Is Enabled */
+#define ANOMALY_16000042 (__SILICON_REVISION__ == 1)
 
 /* Anomalies that don't exist on this proc */
 #define ANOMALY_05000158 (0)
index 0e1e451fd7d81aed622ef16bbbe0c0dcbf12502d..f4adedc9289563d38eb9872be94c53538b1fee6c 100644 (file)
@@ -6,7 +6,6 @@
  * Licensed under the GPL-2 or later.
  */
 
-#include <linux/init.h>
 #include <asm/blackfin.h>
 #include <asm/cplbinit.h>
 
@@ -42,6 +41,16 @@ bfin_cache_init(struct cplb_entry *cplb_tbl, unsigned long cplb_addr,
                 unsigned long mem_mask)
 {
        int i;
+#ifdef CONFIG_L1_PARITY_CHECK
+       u32 ctrl;
+
+       if (cplb_addr == DCPLB_ADDR0) {
+               ctrl = bfin_read32(mem_control) | (1 << RDCHK);
+               CSYNC();
+               bfin_write32(mem_control, ctrl);
+               SSYNC();
+       }
+#endif
 
        for (i = 0; i < MAX_CPLBS; i++) {
                bfin_write32(cplb_addr + i * 4, cplb_tbl[i].addr);
index 2308ce52f849b582ed8046bdfca9476bdad49e0e..d436bd907fc8c6e036a16b9f57241121cc5a0f2e 100644 (file)
@@ -7,7 +7,6 @@
  */
 
 #include <linux/linkage.h>
-#include <linux/init.h>
 #include <asm/blackfin.h>
 
 #include <asm/dma.h>
index ca75613231c84474ad276c667eb4cd415027532e..867b7cef204cb91f0ed15e9386ba7ea0b2089d96 100644 (file)
@@ -471,13 +471,8 @@ void handle_sec_ssi_fault(uint32_t gstat)
 
 }
 
-void handle_sec_fault(unsigned int irq, struct irq_desc *desc)
+void handle_sec_fault(uint32_t sec_gstat)
 {
-       uint32_t sec_gstat;
-
-       raw_spin_lock(&desc->lock);
-
-       sec_gstat = bfin_read32(SEC_GSTAT);
        if (sec_gstat & SEC_GSTAT_ERR) {
 
                switch (sec_gstat & SEC_GSTAT_ERRC) {
@@ -494,18 +489,16 @@ void handle_sec_fault(unsigned int irq, struct irq_desc *desc)
 
 
        }
-
-       raw_spin_unlock(&desc->lock);
-
-       handle_fasteoi_irq(irq, desc);
 }
 
-void handle_core_fault(unsigned int irq, struct irq_desc *desc)
+static struct irqaction bfin_fault_irq = {
+       .name = "Blackfin fault",
+};
+
+static irqreturn_t bfin_fault_routine(int irq, void *data)
 {
        struct pt_regs *fp = get_irq_regs();
 
-       raw_spin_lock(&desc->lock);
-
        switch (irq) {
        case IRQ_C0_DBL_FAULT:
                double_fault_c(fp);
@@ -522,11 +515,15 @@ void handle_core_fault(unsigned int irq, struct irq_desc *desc)
        case IRQ_C0_NMI_L1_PARITY_ERR:
                panic("Core 0 NMI L1 parity error");
                break;
+       case IRQ_SEC_ERR:
+               pr_err("SEC error\n");
+               handle_sec_fault(bfin_read32(SEC_GSTAT));
+               break;
        default:
-               panic("Core 1 fault %d occurs unexpectedly", irq);
+               panic("Unknown fault %d", irq);
        }
 
-       raw_spin_unlock(&desc->lock);
+       return IRQ_HANDLED;
 }
 #endif /* SEC_GCTL */
 
@@ -1195,12 +1192,7 @@ int __init init_arch_irq(void)
                                handle_percpu_irq);
                } else {
                        irq_set_chip(irq, &bfin_sec_irqchip);
-                       if (irq == IRQ_SEC_ERR)
-                               irq_set_handler(irq, handle_sec_fault);
-                       else if (irq >= IRQ_C0_DBL_FAULT && irq < CORE_IRQS)
-                               irq_set_handler(irq, handle_core_fault);
-                       else
-                               irq_set_handler(irq, handle_fasteoi_irq);
+                       irq_set_handler(irq, handle_fasteoi_irq);
                        __irq_set_preflow_handler(irq, bfin_sec_preflow_handler);
                }
        }
@@ -1239,6 +1231,13 @@ int __init init_arch_irq(void)
        register_syscore_ops(&sec_pm_syscore_ops);
 #endif
 
+       bfin_fault_irq.handler = bfin_fault_routine;
+#ifdef CONFIG_L1_PARITY_CHECK
+       setup_irq(IRQ_C0_NMI_L1_PARITY_ERR, &bfin_fault_irq);
+#endif
+       setup_irq(IRQ_C0_DBL_FAULT, &bfin_fault_irq);
+       setup_irq(IRQ_SEC_ERR, &bfin_fault_irq);
+
        return 0;
 }
 
index 2cbfb0b5679ee841ba337d7eaa39ec10a5d694db..8923398db66f01eef7777faaa08f2d324674e4ed 100644 (file)
@@ -6,7 +6,6 @@
  * Licensed under the GPL-2 or later.
  */
 
-#include <linux/init.h>
 #include <linux/errno.h>
 #include <linux/kernel.h>
 #include <asm/scb.h>
index 2bbae07838198912aa4beda231a0f96eaa647bd5..ba6c30d8534d0dec77cac409788aa37c03772748 100644 (file)
@@ -53,7 +53,6 @@ enum ipi_message_type {
        BFIN_IPI_TIMER,
        BFIN_IPI_RESCHEDULE,
        BFIN_IPI_CALL_FUNC,
-       BFIN_IPI_CALL_FUNC_SINGLE,
        BFIN_IPI_CPU_STOP,
 };
 
@@ -162,9 +161,6 @@ static irqreturn_t ipi_handler_int1(int irq, void *dev_instance)
                        case BFIN_IPI_CALL_FUNC:
                                generic_smp_call_function_interrupt();
                                break;
-                       case BFIN_IPI_CALL_FUNC_SINGLE:
-                               generic_smp_call_function_single_interrupt();
-                               break;
                        case BFIN_IPI_CPU_STOP:
                                ipi_cpu_stop(cpu);
                                break;
@@ -210,7 +206,7 @@ void send_ipi(const struct cpumask *cpumask, enum ipi_message_type msg)
 
 void arch_send_call_function_single_ipi(int cpu)
 {
-       send_ipi(cpumask_of(cpu), BFIN_IPI_CALL_FUNC_SINGLE);
+       send_ipi(cpumask_of(cpu), BFIN_IPI_CALL_FUNC);
 }
 
 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
index 4d1b1e9baef102ed79013098b27918a4cea051a9..2a8fb730d1caa1dee26dc5819742d2e0972b2cce 100644 (file)
@@ -74,13 +74,6 @@ KBUILD_CFLAGS        += -mno-fdpic -mgpr-32 -msoft-float -mno-media
 KBUILD_CFLAGS  += -ffixed-fcc3 -ffixed-cc3 -ffixed-gr15 -ffixed-icc2
 KBUILD_AFLAGS  += -mno-fdpic
 
-# make sure the .S files get compiled with debug info
-# and disable optimisations that are unhelpful whilst debugging
-ifdef CONFIG_DEBUG_INFO
-#KBUILD_CFLAGS += -O1
-KBUILD_AFLAGS  += -Wa,--gdwarf2
-endif
-
 head-y         := arch/frv/kernel/head.o
 
 core-y         += arch/frv/kernel/ arch/frv/mm/
index 0721858fbd1ef6618b288bcbea33369995e35653..2d75ae246167a37f07d71a9bb743702fff686092 100644 (file)
@@ -62,17 +62,18 @@ struct nfhd_device {
 static void nfhd_make_request(struct request_queue *queue, struct bio *bio)
 {
        struct nfhd_device *dev = queue->queuedata;
-       struct bio_vec *bvec;
-       int i, dir, len, shift;
-       sector_t sec = bio->bi_sector;
+       struct bio_vec bvec;
+       struct bvec_iter iter;
+       int dir, len, shift;
+       sector_t sec = bio->bi_iter.bi_sector;
 
        dir = bio_data_dir(bio);
        shift = dev->bshift;
-       bio_for_each_segment(bvec, bio, i) {
-               len = bvec->bv_len;
+       bio_for_each_segment(bvec, bio, iter) {
+               len = bvec.bv_len;
                len >>= 9;
                nfhd_read_write(dev->id, 0, dir, sec >> shift, len >> shift,
-                               bvec_to_phys(bvec));
+                               bvec_to_phys(&bvec));
                sec += len;
        }
        bio_endio(bio, 0);
index a3d0fef3b126ba859e5fe0459b1e12658cbebe69..3f1ea5ddc40264d4760220b273f1e0b26067e4cd 100644 (file)
@@ -92,14 +92,6 @@ define archhelp
   echo  '* zImage        - Compressed kernel image (arch/$(ARCH)/boot/zImage)'
 endef
 
-# If you make sure the .S files get compiled with debug info,
-# uncomment the following to disable optimisations
-# that are unhelpful whilst debugging.
-ifdef CONFIG_DEBUG_INFO
-#KBUILD_CFLAGS += -O1
-KBUILD_AFLAGS  += -Wa,--gdwarf2
-endif
-
 #
 # include the appropriate processor- and unit-specific headers
 #
index 1c16141c031c9e2d2512b0d308a8456c15fd1ae1..47b6b9f81d4305537b7d0e6290e178efb4253f4c 100644 (file)
@@ -109,27 +109,28 @@ axon_ram_make_request(struct request_queue *queue, struct bio *bio)
        struct axon_ram_bank *bank = bio->bi_bdev->bd_disk->private_data;
        unsigned long phys_mem, phys_end;
        void *user_mem;
-       struct bio_vec *vec;
+       struct bio_vec vec;
        unsigned int transfered;
-       unsigned short idx;
+       struct bvec_iter iter;
 
-       phys_mem = bank->io_addr + (bio->bi_sector << AXON_RAM_SECTOR_SHIFT);
+       phys_mem = bank->io_addr + (bio->bi_iter.bi_sector <<
+                                   AXON_RAM_SECTOR_SHIFT);
        phys_end = bank->io_addr + bank->size;
        transfered = 0;
-       bio_for_each_segment(vec, bio, idx) {
-               if (unlikely(phys_mem + vec->bv_len > phys_end)) {
+       bio_for_each_segment(vec, bio, iter) {
+               if (unlikely(phys_mem + vec.bv_len > phys_end)) {
                        bio_io_error(bio);
                        return;
                }
 
-               user_mem = page_address(vec->bv_page) + vec->bv_offset;
+               user_mem = page_address(vec.bv_page) + vec.bv_offset;
                if (bio_data_dir(bio) == READ)
-                       memcpy(user_mem, (void *) phys_mem, vec->bv_len);
+                       memcpy(user_mem, (void *) phys_mem, vec.bv_len);
                else
-                       memcpy((void *) phys_mem, user_mem, vec->bv_len);
+                       memcpy((void *) phys_mem, user_mem, vec.bv_len);
 
-               phys_mem += vec->bv_len;
-               transfered += vec->bv_len;
+               phys_mem += vec.bv_len;
+               transfered += vec.bv_len;
        }
        bio_endio(bio, 0);
 }
index e030d2bdec1b6aa2b9f29288b28c6600710ecfd1..db02052bd137254c1c6d46314005ed4fce1a6204 100644 (file)
@@ -286,8 +286,8 @@ asmlinkage long sys32_getegid16(void)
 }
 
 #ifdef CONFIG_SYSVIPC
-COMPAT_SYSCALL_DEFINE5(s390_ipc, uint, call, int, first, unsigned long, second,
-               unsigned long, third, compat_uptr_t, ptr)
+COMPAT_SYSCALL_DEFINE5(s390_ipc, uint, call, int, first, compat_ulong_t, second,
+               compat_ulong_t, third, compat_uptr_t, ptr)
 {
        if (call >> 16)         /* hack for backward compatibility */
                return -EINVAL;
index 62ced589bcf78f1554bcadd4f420fad19c7ebcc1..b73274fb961a27a9b54eb5fd1719650762fef305 100644 (file)
 #define __NR_kern_features     340
 #define __NR_kcmp              341
 #define __NR_finit_module      342
+#define __NR_sched_setattr     343
+#define __NR_sched_getattr     344
 
-#define NR_syscalls            343
+#define NR_syscalls            345
 
 /* Bitmask values returned from kern_features system call.  */
 #define KERN_FEATURE_MIXED_MODE_STACK  0x00000001
index cb5d272d658acce52fc85a77b1e730a40c5d885a..de1c844dfabc02569cfe71c91b3e52a9ea705ef4 100644 (file)
@@ -6,7 +6,6 @@
 #include <linux/export.h>
 #include <linux/slab.h>
 #include <linux/kernel.h>
-#include <linux/init.h>
 #include <linux/cpumask.h>
 #include <linux/spinlock.h>
 #include <asm/cpudata.h>
index e306fb08ee5e1fab217823321460d94deac5cb75..acf8314cec489d1beec128ce5d3d422f53554231 100644 (file)
@@ -7,7 +7,6 @@
 #include <linux/export.h>
 #include <linux/kernel.h>
 #include <linux/types.h>
-#include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/delay.h>
 
index 4eb1a5a1d5440642aefbef171c527767cc722063..b7ddcdd1dea943a9f5e768c006547a3e7737d4f7 100644 (file)
@@ -3,7 +3,6 @@
  * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
  */
 
-#include <linux/init.h>
 
 #include <asm/thread_info.h>
 #include <asm/hypervisor.h>
index de199bf0cb051c6e695296d8b8ab392efd00c7c6..3241f56331c2aced51e918aa9dde9ef936e54e7c 100644 (file)
@@ -1,7 +1,6 @@
 #include <linux/string.h>
 #include <linux/kernel.h>
 #include <linux/of.h>
-#include <linux/init.h>
 #include <linux/export.h>
 #include <linux/mod_devicetable.h>
 #include <linux/errno.h>
index 7de8d1f590b7fbb7023c6d455b2a079714051bc7..1555bbcae1eee617fecd7cbecaa78eb5d6ab2ca8 100644 (file)
@@ -1005,6 +1005,5 @@ static int __init of_pci_slot_init(void)
 
        return 0;
 }
-
-module_init(of_pci_slot_init);
+device_initcall(of_pci_slot_init);
 #endif
index a6895987fb70254b7db258c2eff6fd65262aba46..944a06536ecc129ab81c9e758a6f89b92d3091fe 100644 (file)
@@ -5,7 +5,6 @@
 
 #include <linux/string.h>
 #include <linux/slab.h>
-#include <linux/init.h>
 #include <linux/pci.h>
 #include <linux/device.h>
 #include <linux/of_device.h>
index fdd819dfdacf2057474d6e09f590d7617eaf9cb2..510baec1b69b2ca79d3f9e8687520e1b0b541b91 100644 (file)
@@ -22,7 +22,6 @@
 #include <linux/reboot.h>
 #include <linux/delay.h>
 #include <linux/pm.h>
-#include <linux/init.h>
 #include <linux/slab.h>
 
 #include <asm/auxio.h>
index e521c54560f9dffa74e1057dc9480ead73aea2df..bf4ccb10a78c600ac48cba552cb1794836b094bf 100644 (file)
@@ -6,7 +6,6 @@
  */
 
 #include <linux/module.h>
-#include <linux/init.h>
 
 #include <asm/pgtable.h>
 #include <asm/uaccess.h>
index 9f5e24ddcc70b0b10240f2a476b34610e011023b..a92d5d2c46a3a6553bf13a57f95f65c6d61c334c 100644 (file)
@@ -7,7 +7,6 @@
 
 #include <linux/export.h>
 #include <linux/pci.h>
-#include <linux/init.h>
 #include <linux/bitops.h>
 
 #include <asm/cpudata.h>
index 7b87171ecf1e91803ce0a3908c629118d205c020..151ace8766cc2d99d5be3552a4b2014eb74fdaa6 100644 (file)
@@ -85,4 +85,4 @@ sys_call_table:
 /*325*/        .long sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init
 /*330*/        .long sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
 /*335*/        .long sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
-/*340*/        .long sys_ni_syscall, sys_kcmp, sys_finit_module
+/*340*/        .long sys_ni_syscall, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
index 6d81597064b6b5c7efa0861fc6c5c230e3c4984f..4bd4e2bb26cf4d35a8980f1a9040cdd88a645a4a 100644 (file)
@@ -86,7 +86,7 @@ sys_call_table32:
        .word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo, sys_perf_event_open, compat_sys_recvmmsg, sys_fanotify_init
 /*330*/        .word compat_sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime
        .word sys_syncfs, compat_sys_sendmmsg, sys_setns, compat_sys_process_vm_readv, compat_sys_process_vm_writev
-/*340*/        .word sys_kern_features, sys_kcmp, sys_finit_module
+/*340*/        .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
 
 #endif /* CONFIG_COMPAT */
 
@@ -164,4 +164,4 @@ sys_call_table:
        .word sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init
 /*330*/        .word sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
        .word sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
-/*340*/        .word sys_kern_features, sys_kcmp, sys_finit_module
+/*340*/        .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
index 76dcbd3c988aed9445de17442e8cb1076ef5cc49..3eed99fc69892f40c34e2865895928c35b78067f 100644 (file)
@@ -5,7 +5,6 @@
  * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
  */
 
-#include <linux/init.h>
 #include <asm/head.h>
 #include <asm/psr.h>
 #include <asm/page.h>
index ad4bde3bb61e6763df6baf6c44b9b0f7be921f7b..737f8cbc7d56cda4e4afbdb1c7166c4fcaad3ece 100644 (file)
@@ -4,7 +4,6 @@
  * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
  */
 
-#include <linux/init.h>
 
 #include <asm/head.h>
 #include <asm/asi.h>
index 30963178d7e940f4afe02ed86bc936020dd1ec8a..9bd9ce80bf77eb46605942278dd4c1fe72fa4da0 100644 (file)
@@ -4,7 +4,6 @@
  * Copyright (C) 2002, 2003, 2006 David S. Miller (davem@davemloft.net)
  */
 
-#include <linux/init.h>
 #include <linux/fs.h>
 #include <linux/mm.h>
 #include <linux/hugetlb.h>
index ad3bf4b4324d94f10504947bd88b2dea93a44b2e..b12cb5e72812140688d771ed0788b6a2cf2cfc16 100644 (file)
@@ -4,7 +4,6 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/init.h>
 #include <linux/percpu.h>
 #include <linux/mm.h>
 #include <linux/swap.h>
index 04a4540509dd87d5c533938810eaebeb55a21edf..e58b817263199f9d45d28aed5f735b38eb94904b 100644 (file)
@@ -5,7 +5,6 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/init.h>
 #include <linux/sched.h>
 #include <linux/smp.h>
 #include <linux/string.h>
index ee50c801f7b7eda1d4ea2aeea1f99cfaf1b03ccd..cc2d6a3aeae7ed24e9fd1d7e8e69204f1a0bc4e4 100644 (file)
 struct semid64_ds {
        struct ipc64_perm sem_perm;     /* permissions .. see ipc.h */
        __kernel_time_t sem_otime;      /* last semop time */
-       unsigned long   __unused1;
+       __kernel_ulong_t __unused1;
        __kernel_time_t sem_ctime;      /* last change time */
-       unsigned long   __unused2;
-       unsigned long   sem_nsems;      /* no. of semaphores in array */
-       unsigned long   __unused3;
-       unsigned long   __unused4;
+       __kernel_ulong_t __unused2;
+       __kernel_ulong_t sem_nsems;     /* no. of semaphores in array */
+       __kernel_ulong_t __unused3;
+       __kernel_ulong_t __unused4;
 };
 
 #endif /* _ASM_X86_SEMBUF_H */
index 59d353d2c599ec26d21a9ab65ea2641e121b25a5..a5449089cd9fef6e58a03174c5fe5a34cd48983a 100644 (file)
@@ -330,11 +330,6 @@ asmlinkage void FPU_exception(int n)
 
        RE_ENTRANT_CHECK_OFF;
        if ((~control_word & n & CW_Exceptions) || (n == EX_INTERNAL)) {
-#ifdef PRINT_MESSAGES
-               /* My message from the sponsor */
-               printk(FPU_VERSION " " __DATE__ " (C) W. Metzenthen.\n");
-#endif /* PRINT_MESSAGES */
-
                /* Get a name string for error reporting */
                for (i = 0; exception_names[i].type; i++)
                        if ((exception_names[i].type & n) ==
index 8f568dd79605058f0d9537e6aeb5c6010d7d1e46..79bb09d4f718f0a99f4ed4f3be8a2b38fd6fd0ae 100644 (file)
@@ -12,6 +12,7 @@
 #ifndef _PLATFORM_IPC_H_
 #define _PLATFORM_IPC_H_
 
-extern void __init ipc_device_handler(struct sfi_device_table_entry *pentry,
-                       struct devs_id *dev) __attribute__((weak));
+void __init
+ipc_device_handler(struct sfi_device_table_entry *pentry, struct devs_id *dev);
+
 #endif
index 917eb56d77dab1aa9ae6b520262b7121c378d181..b7be1d041da233e400203522db151f7d7f703423 100644 (file)
@@ -14,6 +14,6 @@
 
 extern struct intel_msic_platform_data msic_pdata;
 
-extern void *msic_generic_platform_data(void *info,
-                       enum intel_msic_block block) __attribute__((weak));
+void *msic_generic_platform_data(void *info, enum intel_msic_block block);
+
 #endif
index a537ffc16299bf95d567c1fcf8ba7ba13cd00309..46aa25c8ce06c3366b15357d110111de2bd004fc 100644 (file)
@@ -14,6 +14,6 @@
 /* For every CPU addition a new get_<cpuname>_ops interface needs
  * to be added.
  */
-extern void * __cpuinit get_penwell_ops(void) __attribute__((weak));
-extern void * __cpuinit get_cloverview_ops(void) __attribute__((weak));
-extern void * __init get_tangier_ops(void) __attribute__((weak));
+extern void *get_penwell_ops(void) __attribute__((weak));
+extern void *get_cloverview_ops(void) __attribute__((weak));
+extern void *get_tangier_ops(void) __attribute__((weak));
index 4f7884eebc149a5b0c57645df7b3e471e4edc7f9..23381d2174ae1d4d2795ebf9a568ff7e43cf8876 100644 (file)
@@ -58,18 +58,18 @@ static unsigned long __init mfld_calibrate_tsc(void)
        return 0;
 }
 
-static void __init penwell_arch_setup()
+static void __init penwell_arch_setup(void)
 {
        x86_platform.calibrate_tsc = mfld_calibrate_tsc;
        pm_power_off = mfld_power_off;
 }
 
-void * __cpuinit get_penwell_ops()
+void *get_penwell_ops(void)
 {
        return &penwell_ops;
 }
 
-void * __cpuinit get_cloverview_ops()
+void *get_cloverview_ops(void)
 {
        return &penwell_ops;
 }
index 09d10159e7b7f982dea436f28e0d7628cade8ead..aaca91753d3267b94a98af40f7b66f8b5cbbb21c 100644 (file)
@@ -97,7 +97,7 @@ static struct intel_mid_ops tangier_ops = {
        .arch_setup = tangier_arch_setup,
 };
 
-void * __cpuinit get_tangier_ops()
+void *get_tangier_ops(void)
 {
        return &tangier_ops;
 }
index 8c6e819cd8edcc26049348a9ca2e4a6d59c677b3..48eebacdf5fe089c0ff8591ddbaf07dc576a2729 100644 (file)
@@ -103,18 +103,18 @@ static void simdisk_transfer(struct simdisk *dev, unsigned long sector,
 
 static int simdisk_xfer_bio(struct simdisk *dev, struct bio *bio)
 {
-       int i;
-       struct bio_vec *bvec;
-       sector_t sector = bio->bi_sector;
+       struct bio_vec bvec;
+       struct bvec_iter iter;
+       sector_t sector = bio->bi_iter.bi_sector;
 
-       bio_for_each_segment(bvec, bio, i) {
-               char *buffer = __bio_kmap_atomic(bio, i);
-               unsigned len = bvec->bv_len >> SECTOR_SHIFT;
+       bio_for_each_segment(bvec, bio, iter) {
+               char *buffer = __bio_kmap_atomic(bio, iter);
+               unsigned len = bvec.bv_len >> SECTOR_SHIFT;
 
                simdisk_transfer(dev, sector, len, buffer,
                                bio_data_dir(bio) == WRITE);
                sector += len;
-               __bio_kunmap_atomic(bio);
+               __bio_kunmap_atomic(buffer);
        }
        return 0;
 }
index 8bdd0121212a51a1dba3c568c5a6a3e070447318..c00e0bdeab4ab4724c42b379717200d405d9c584 100644 (file)
@@ -38,6 +38,7 @@
 
 #include "blk.h"
 #include "blk-cgroup.h"
+#include "blk-mq.h"
 
 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
@@ -130,7 +131,7 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
        bio_advance(bio, nbytes);
 
        /* don't actually finish bio if it's part of flush sequence */
-       if (bio->bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ))
+       if (bio->bi_iter.bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ))
                bio_endio(bio, error);
 }
 
@@ -245,7 +246,16 @@ EXPORT_SYMBOL(blk_stop_queue);
 void blk_sync_queue(struct request_queue *q)
 {
        del_timer_sync(&q->timeout);
-       cancel_delayed_work_sync(&q->delay_work);
+
+       if (q->mq_ops) {
+               struct blk_mq_hw_ctx *hctx;
+               int i;
+
+               queue_for_each_hw_ctx(q, hctx, i)
+                       cancel_delayed_work_sync(&hctx->delayed_work);
+       } else {
+               cancel_delayed_work_sync(&q->delay_work);
+       }
 }
 EXPORT_SYMBOL(blk_sync_queue);
 
@@ -497,8 +507,13 @@ void blk_cleanup_queue(struct request_queue *q)
         * Drain all requests queued before DYING marking. Set DEAD flag to
         * prevent that q->request_fn() gets invoked after draining finished.
         */
-       spin_lock_irq(lock);
-       __blk_drain_queue(q, true);
+       if (q->mq_ops) {
+               blk_mq_drain_queue(q);
+               spin_lock_irq(lock);
+       } else {
+               spin_lock_irq(lock);
+               __blk_drain_queue(q, true);
+       }
        queue_flag_set(QUEUE_FLAG_DEAD, q);
        spin_unlock_irq(lock);
 
@@ -1326,7 +1341,7 @@ void blk_add_request_payload(struct request *rq, struct page *page,
        bio->bi_io_vec->bv_offset = 0;
        bio->bi_io_vec->bv_len = len;
 
-       bio->bi_size = len;
+       bio->bi_iter.bi_size = len;
        bio->bi_vcnt = 1;
        bio->bi_phys_segments = 1;
 
@@ -1351,7 +1366,7 @@ bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
 
        req->biotail->bi_next = bio;
        req->biotail = bio;
-       req->__data_len += bio->bi_size;
+       req->__data_len += bio->bi_iter.bi_size;
        req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
 
        blk_account_io_start(req, false);
@@ -1380,8 +1395,8 @@ bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
         * not touch req->buffer either...
         */
        req->buffer = bio_data(bio);
-       req->__sector = bio->bi_sector;
-       req->__data_len += bio->bi_size;
+       req->__sector = bio->bi_iter.bi_sector;
+       req->__data_len += bio->bi_iter.bi_size;
        req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
 
        blk_account_io_start(req, false);
@@ -1459,7 +1474,7 @@ void init_request_from_bio(struct request *req, struct bio *bio)
                req->cmd_flags |= REQ_FAILFAST_MASK;
 
        req->errors = 0;
-       req->__sector = bio->bi_sector;
+       req->__sector = bio->bi_iter.bi_sector;
        req->ioprio = bio_prio(bio);
        blk_rq_bio_prep(req->q, req, bio);
 }
@@ -1583,12 +1598,12 @@ static inline void blk_partition_remap(struct bio *bio)
        if (bio_sectors(bio) && bdev != bdev->bd_contains) {
                struct hd_struct *p = bdev->bd_part;
 
-               bio->bi_sector += p->start_sect;
+               bio->bi_iter.bi_sector += p->start_sect;
                bio->bi_bdev = bdev->bd_contains;
 
                trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), bio,
                                      bdev->bd_dev,
-                                     bio->bi_sector - p->start_sect);
+                                     bio->bi_iter.bi_sector - p->start_sect);
        }
 }
 
@@ -1654,7 +1669,7 @@ static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
        /* Test device or partition size, when known. */
        maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9;
        if (maxsector) {
-               sector_t sector = bio->bi_sector;
+               sector_t sector = bio->bi_iter.bi_sector;
 
                if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
                        /*
@@ -1690,7 +1705,7 @@ generic_make_request_checks(struct bio *bio)
                       "generic_make_request: Trying to access "
                        "nonexistent block-device %s (%Lu)\n",
                        bdevname(bio->bi_bdev, b),
-                       (long long) bio->bi_sector);
+                       (long long) bio->bi_iter.bi_sector);
                goto end_io;
        }
 
@@ -1704,9 +1719,9 @@ generic_make_request_checks(struct bio *bio)
        }
 
        part = bio->bi_bdev->bd_part;
-       if (should_fail_request(part, bio->bi_size) ||
+       if (should_fail_request(part, bio->bi_iter.bi_size) ||
            should_fail_request(&part_to_disk(part)->part0,
-                               bio->bi_size))
+                               bio->bi_iter.bi_size))
                goto end_io;
 
        /*
@@ -1865,7 +1880,7 @@ void submit_bio(int rw, struct bio *bio)
                if (rw & WRITE) {
                        count_vm_events(PGPGOUT, count);
                } else {
-                       task_io_account_read(bio->bi_size);
+                       task_io_account_read(bio->bi_iter.bi_size);
                        count_vm_events(PGPGIN, count);
                }
 
@@ -1874,7 +1889,7 @@ void submit_bio(int rw, struct bio *bio)
                        printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n",
                        current->comm, task_pid_nr(current),
                                (rw & WRITE) ? "WRITE" : "READ",
-                               (unsigned long long)bio->bi_sector,
+                               (unsigned long long)bio->bi_iter.bi_sector,
                                bdevname(bio->bi_bdev, b),
                                count);
                }
@@ -2007,7 +2022,7 @@ unsigned int blk_rq_err_bytes(const struct request *rq)
        for (bio = rq->bio; bio; bio = bio->bi_next) {
                if ((bio->bi_rw & ff) != ff)
                        break;
-               bytes += bio->bi_size;
+               bytes += bio->bi_iter.bi_size;
        }
 
        /* this could lead to infinite loop */
@@ -2378,9 +2393,9 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
        total_bytes = 0;
        while (req->bio) {
                struct bio *bio = req->bio;
-               unsigned bio_bytes = min(bio->bi_size, nr_bytes);
+               unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes);
 
-               if (bio_bytes == bio->bi_size)
+               if (bio_bytes == bio->bi_iter.bi_size)
                        req->bio = bio->bi_next;
 
                req_bio_endio(req, bio, bio_bytes, error);
@@ -2728,7 +2743,7 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
                rq->nr_phys_segments = bio_phys_segments(q, bio);
                rq->buffer = bio_data(bio);
        }
-       rq->__data_len = bio->bi_size;
+       rq->__data_len = bio->bi_iter.bi_size;
        rq->bio = rq->biotail = bio;
 
        if (bio->bi_bdev)
@@ -2746,10 +2761,10 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
 void rq_flush_dcache_pages(struct request *rq)
 {
        struct req_iterator iter;
-       struct bio_vec *bvec;
+       struct bio_vec bvec;
 
        rq_for_each_segment(bvec, rq, iter)
-               flush_dcache_page(bvec->bv_page);
+               flush_dcache_page(bvec.bv_page);
 }
 EXPORT_SYMBOL_GPL(rq_flush_dcache_pages);
 #endif
index c3edf9dff566f47883f3fac946b72e1e208c5286..bbfc072a79c2b5d0921ee84d322e4391d85c529b 100644 (file)
@@ -60,6 +60,10 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
        rq->rq_disk = bd_disk;
        rq->end_io = done;
 
+       /*
+        * don't check dying flag for MQ because the request won't
+        * be resued after dying flag is set
+        */
        if (q->mq_ops) {
                blk_mq_insert_request(q, rq, true);
                return;
index fb6f3c0ffa494f4f2adcce6fc35c95ecf383c9a8..9288aaf35c21fc8c0f579fa001f316e697a4dfbb 100644 (file)
@@ -548,7 +548,7 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
         * copied from blk_rq_pos(rq).
         */
        if (error_sector)
-               *error_sector = bio->bi_sector;
+               *error_sector = bio->bi_iter.bi_sector;
 
        bio_put(bio);
        return ret;
index 03cf7179e8ef1aac2f1698eae57377e65a94f275..7fbab84399e6c9c602c52c8157596535a2ce2e05 100644 (file)
@@ -43,30 +43,32 @@ static const char *bi_unsupported_name = "unsupported";
  */
 int blk_rq_count_integrity_sg(struct request_queue *q, struct bio *bio)
 {
-       struct bio_vec *iv, *ivprv = NULL;
+       struct bio_vec iv, ivprv = { NULL };
        unsigned int segments = 0;
        unsigned int seg_size = 0;
-       unsigned int i = 0;
+       struct bvec_iter iter;
+       int prev = 0;
 
-       bio_for_each_integrity_vec(iv, bio, i) {
+       bio_for_each_integrity_vec(iv, bio, iter) {
 
-               if (ivprv) {
-                       if (!BIOVEC_PHYS_MERGEABLE(ivprv, iv))
+               if (prev) {
+                       if (!BIOVEC_PHYS_MERGEABLE(&ivprv, &iv))
                                goto new_segment;
 
-                       if (!BIOVEC_SEG_BOUNDARY(q, ivprv, iv))
+                       if (!BIOVEC_SEG_BOUNDARY(q, &ivprv, &iv))
                                goto new_segment;
 
-                       if (seg_size + iv->bv_len > queue_max_segment_size(q))
+                       if (seg_size + iv.bv_len > queue_max_segment_size(q))
                                goto new_segment;
 
-                       seg_size += iv->bv_len;
+                       seg_size += iv.bv_len;
                } else {
 new_segment:
                        segments++;
-                       seg_size = iv->bv_len;
+                       seg_size = iv.bv_len;
                }
 
+               prev = 1;
                ivprv = iv;
        }
 
@@ -87,24 +89,25 @@ EXPORT_SYMBOL(blk_rq_count_integrity_sg);
 int blk_rq_map_integrity_sg(struct request_queue *q, struct bio *bio,
                            struct scatterlist *sglist)
 {
-       struct bio_vec *iv, *ivprv = NULL;
+       struct bio_vec iv, ivprv = { NULL };
        struct scatterlist *sg = NULL;
        unsigned int segments = 0;
-       unsigned int i = 0;
+       struct bvec_iter iter;
+       int prev = 0;
 
-       bio_for_each_integrity_vec(iv, bio, i) {
+       bio_for_each_integrity_vec(iv, bio, iter) {
 
-               if (ivprv) {
-                       if (!BIOVEC_PHYS_MERGEABLE(ivprv, iv))
+               if (prev) {
+                       if (!BIOVEC_PHYS_MERGEABLE(&ivprv, &iv))
                                goto new_segment;
 
-                       if (!BIOVEC_SEG_BOUNDARY(q, ivprv, iv))
+                       if (!BIOVEC_SEG_BOUNDARY(q, &ivprv, &iv))
                                goto new_segment;
 
-                       if (sg->length + iv->bv_len > queue_max_segment_size(q))
+                       if (sg->length + iv.bv_len > queue_max_segment_size(q))
                                goto new_segment;
 
-                       sg->length += iv->bv_len;
+                       sg->length += iv.bv_len;
                } else {
 new_segment:
                        if (!sg)
@@ -114,10 +117,11 @@ new_segment:
                                sg = sg_next(sg);
                        }
 
-                       sg_set_page(sg, iv->bv_page, iv->bv_len, iv->bv_offset);
+                       sg_set_page(sg, iv.bv_page, iv.bv_len, iv.bv_offset);
                        segments++;
                }
 
+               prev = 1;
                ivprv = iv;
        }
 
index 9b5b561cb92812fba2562b3bdb6c5bd8be012905..2da76c999ef3f37bd965f9d91b48dac43196a208 100644 (file)
@@ -108,12 +108,12 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
                        req_sects = end_sect - sector;
                }
 
-               bio->bi_sector = sector;
+               bio->bi_iter.bi_sector = sector;
                bio->bi_end_io = bio_batch_end_io;
                bio->bi_bdev = bdev;
                bio->bi_private = &bb;
 
-               bio->bi_size = req_sects << 9;
+               bio->bi_iter.bi_size = req_sects << 9;
                nr_sects -= req_sects;
                sector = end_sect;
 
@@ -174,7 +174,7 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
                        break;
                }
 
-               bio->bi_sector = sector;
+               bio->bi_iter.bi_sector = sector;
                bio->bi_end_io = bio_batch_end_io;
                bio->bi_bdev = bdev;
                bio->bi_private = &bb;
@@ -184,11 +184,11 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
                bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
 
                if (nr_sects > max_write_same_sectors) {
-                       bio->bi_size = max_write_same_sectors << 9;
+                       bio->bi_iter.bi_size = max_write_same_sectors << 9;
                        nr_sects -= max_write_same_sectors;
                        sector += max_write_same_sectors;
                } else {
-                       bio->bi_size = nr_sects << 9;
+                       bio->bi_iter.bi_size = nr_sects << 9;
                        nr_sects = 0;
                }
 
@@ -240,7 +240,7 @@ int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
                        break;
                }
 
-               bio->bi_sector = sector;
+               bio->bi_iter.bi_sector = sector;
                bio->bi_bdev   = bdev;
                bio->bi_end_io = bio_batch_end_io;
                bio->bi_private = &bb;
index 623e1cd4cffe997e71fbb54577bd42f220af64b3..ae4ae1047fd99575473a8b251dff562a151f0719 100644 (file)
@@ -20,7 +20,7 @@ int blk_rq_append_bio(struct request_queue *q, struct request *rq,
                rq->biotail->bi_next = bio;
                rq->biotail = bio;
 
-               rq->__data_len += bio->bi_size;
+               rq->__data_len += bio->bi_iter.bi_size;
        }
        return 0;
 }
@@ -76,7 +76,7 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
 
        ret = blk_rq_append_bio(q, rq, bio);
        if (!ret)
-               return bio->bi_size;
+               return bio->bi_iter.bi_size;
 
        /* if it was boucned we must call the end io function */
        bio_endio(bio, 0);
@@ -220,7 +220,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
        if (IS_ERR(bio))
                return PTR_ERR(bio);
 
-       if (bio->bi_size != len) {
+       if (bio->bi_iter.bi_size != len) {
                /*
                 * Grab an extra reference to this bio, as bio_unmap_user()
                 * expects to be able to drop it twice as it happens on the
index 1ffc58977835ff2e581c97faa35ee85ebc9a5095..8f8adaa95466ccc8335cde7e313b551b375ed9b9 100644 (file)
 static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
                                             struct bio *bio)
 {
-       struct bio_vec *bv, *bvprv = NULL;
-       int cluster, i, high, highprv = 1;
+       struct bio_vec bv, bvprv = { NULL };
+       int cluster, high, highprv = 1;
        unsigned int seg_size, nr_phys_segs;
        struct bio *fbio, *bbio;
+       struct bvec_iter iter;
 
        if (!bio)
                return 0;
@@ -25,25 +26,23 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
        seg_size = 0;
        nr_phys_segs = 0;
        for_each_bio(bio) {
-               bio_for_each_segment(bv, bio, i) {
+               bio_for_each_segment(bv, bio, iter) {
                        /*
                         * the trick here is making sure that a high page is
                         * never considered part of another segment, since that
                         * might change with the bounce page.
                         */
-                       high = page_to_pfn(bv->bv_page) > queue_bounce_pfn(q);
-                       if (high || highprv)
-                               goto new_segment;
-                       if (cluster) {
-                               if (seg_size + bv->bv_len
+                       high = page_to_pfn(bv.bv_page) > queue_bounce_pfn(q);
+                       if (!high && !highprv && cluster) {
+                               if (seg_size + bv.bv_len
                                    > queue_max_segment_size(q))
                                        goto new_segment;
-                               if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv))
+                               if (!BIOVEC_PHYS_MERGEABLE(&bvprv, &bv))
                                        goto new_segment;
-                               if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv))
+                               if (!BIOVEC_SEG_BOUNDARY(q, &bvprv, &bv))
                                        goto new_segment;
 
-                               seg_size += bv->bv_len;
+                               seg_size += bv.bv_len;
                                bvprv = bv;
                                continue;
                        }
@@ -54,7 +53,7 @@ new_segment:
 
                        nr_phys_segs++;
                        bvprv = bv;
-                       seg_size = bv->bv_len;
+                       seg_size = bv.bv_len;
                        highprv = high;
                }
                bbio = bio;
@@ -87,6 +86,9 @@ EXPORT_SYMBOL(blk_recount_segments);
 static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
                                   struct bio *nxt)
 {
+       struct bio_vec end_bv = { NULL }, nxt_bv;
+       struct bvec_iter iter;
+
        if (!blk_queue_cluster(q))
                return 0;
 
@@ -97,34 +99,40 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
        if (!bio_has_data(bio))
                return 1;
 
-       if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)))
+       bio_for_each_segment(end_bv, bio, iter)
+               if (end_bv.bv_len == iter.bi_size)
+                       break;
+
+       nxt_bv = bio_iovec(nxt);
+
+       if (!BIOVEC_PHYS_MERGEABLE(&end_bv, &nxt_bv))
                return 0;
 
        /*
         * bio and nxt are contiguous in memory; check if the queue allows
         * these two to be merged into one
         */
-       if (BIO_SEG_BOUNDARY(q, bio, nxt))
+       if (BIOVEC_SEG_BOUNDARY(q, &end_bv, &nxt_bv))
                return 1;
 
        return 0;
 }
 
-static void
+static inline void
 __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
-                    struct scatterlist *sglist, struct bio_vec **bvprv,
+                    struct scatterlist *sglist, struct bio_vec *bvprv,
                     struct scatterlist **sg, int *nsegs, int *cluster)
 {
 
        int nbytes = bvec->bv_len;
 
-       if (*bvprv && *cluster) {
+       if (*sg && *cluster) {
                if ((*sg)->length + nbytes > queue_max_segment_size(q))
                        goto new_segment;
 
-               if (!BIOVEC_PHYS_MERGEABLE(*bvprv, bvec))
+               if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
                        goto new_segment;
-               if (!BIOVEC_SEG_BOUNDARY(q, *bvprv, bvec))
+               if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
                        goto new_segment;
 
                (*sg)->length += nbytes;
@@ -150,7 +158,7 @@ new_segment:
                sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset);
                (*nsegs)++;
        }
-       *bvprv = bvec;
+       *bvprv = *bvec;
 }
 
 /*
@@ -160,7 +168,7 @@ new_segment:
 int blk_rq_map_sg(struct request_queue *q, struct request *rq,
                  struct scatterlist *sglist)
 {
-       struct bio_vec *bvec, *bvprv;
+       struct bio_vec bvec, bvprv = { NULL };
        struct req_iterator iter;
        struct scatterlist *sg;
        int nsegs, cluster;
@@ -171,10 +179,9 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
        /*
         * for each bio in rq
         */
-       bvprv = NULL;
        sg = NULL;
        rq_for_each_segment(bvec, rq, iter) {
-               __blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg,
+               __blk_segment_map_sg(q, &bvec, sglist, &bvprv, &sg,
                                     &nsegs, &cluster);
        } /* segments in rq */
 
@@ -223,18 +230,17 @@ EXPORT_SYMBOL(blk_rq_map_sg);
 int blk_bio_map_sg(struct request_queue *q, struct bio *bio,
                   struct scatterlist *sglist)
 {
-       struct bio_vec *bvec, *bvprv;
+       struct bio_vec bvec, bvprv = { NULL };
        struct scatterlist *sg;
        int nsegs, cluster;
-       unsigned long i;
+       struct bvec_iter iter;
 
        nsegs = 0;
        cluster = blk_queue_cluster(q);
 
-       bvprv = NULL;
        sg = NULL;
-       bio_for_each_segment(bvec, bio, i) {
-               __blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg,
+       bio_for_each_segment(bvec, bio, iter) {
+               __blk_segment_map_sg(q, &bvec, sglist, &bvprv, &sg,
                                     &nsegs, &cluster);
        } /* segments in bio */
 
@@ -543,9 +549,9 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
 
 int blk_try_merge(struct request *rq, struct bio *bio)
 {
-       if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_sector)
+       if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
                return ELEVATOR_BACK_MERGE;
-       else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_sector)
+       else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
                return ELEVATOR_FRONT_MERGE;
        return ELEVATOR_NO_MERGE;
 }
index 0045ace9bdf0301f724463c2e37ae8b62e9902cb..3146befb56aaac7b925428d0afee89c9e083094a 100644 (file)
@@ -28,36 +28,6 @@ static int blk_mq_main_cpu_notify(struct notifier_block *self,
        return NOTIFY_OK;
 }
 
-static void blk_mq_cpu_notify(void *data, unsigned long action,
-                             unsigned int cpu)
-{
-       if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
-               /*
-                * If the CPU goes away, ensure that we run any pending
-                * completions.
-                */
-               struct llist_node *node;
-               struct request *rq;
-
-               local_irq_disable();
-
-               node = llist_del_all(&per_cpu(ipi_lists, cpu));
-               while (node) {
-                       struct llist_node *next = node->next;
-
-                       rq = llist_entry(node, struct request, ll_list);
-                       __blk_mq_end_io(rq, rq->errors);
-                       node = next;
-               }
-
-               local_irq_enable();
-       }
-}
-
-static struct notifier_block __cpuinitdata blk_mq_main_cpu_notifier = {
-       .notifier_call  = blk_mq_main_cpu_notify,
-};
-
 void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier)
 {
        BUG_ON(!notifier->notify);
@@ -82,12 +52,7 @@ void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
        notifier->data = data;
 }
 
-static struct blk_mq_cpu_notifier __cpuinitdata cpu_notifier = {
-       .notify = blk_mq_cpu_notify,
-};
-
 void __init blk_mq_cpu_init(void)
 {
-       register_hotcpu_notifier(&blk_mq_main_cpu_notifier);
-       blk_mq_register_cpu_notifier(&cpu_notifier);
+       hotcpu_notifier(blk_mq_main_cpu_notify, 0);
 }
index c79126e110308e8b1ea4b322506a425ceeb3085c..57039fcd9c93e7c3e014842fbbcaf2fc6550edd1 100644 (file)
@@ -27,8 +27,6 @@ static LIST_HEAD(all_q_list);
 
 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx);
 
-DEFINE_PER_CPU(struct llist_head, ipi_lists);
-
 static struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
                                           unsigned int cpu)
 {
@@ -106,10 +104,13 @@ static int blk_mq_queue_enter(struct request_queue *q)
 
        spin_lock_irq(q->queue_lock);
        ret = wait_event_interruptible_lock_irq(q->mq_freeze_wq,
-               !blk_queue_bypass(q), *q->queue_lock);
+               !blk_queue_bypass(q) || blk_queue_dying(q),
+               *q->queue_lock);
        /* inc usage with lock hold to avoid freeze_queue runs here */
-       if (!ret)
+       if (!ret && !blk_queue_dying(q))
                __percpu_counter_add(&q->mq_usage_counter, 1, 1000000);
+       else if (blk_queue_dying(q))
+               ret = -ENODEV;
        spin_unlock_irq(q->queue_lock);
 
        return ret;
@@ -120,6 +121,22 @@ static void blk_mq_queue_exit(struct request_queue *q)
        __percpu_counter_add(&q->mq_usage_counter, -1, 1000000);
 }
 
+static void __blk_mq_drain_queue(struct request_queue *q)
+{
+       while (true) {
+               s64 count;
+
+               spin_lock_irq(q->queue_lock);
+               count = percpu_counter_sum(&q->mq_usage_counter);
+               spin_unlock_irq(q->queue_lock);
+
+               if (count == 0)
+                       break;
+               blk_mq_run_queues(q, false);
+               msleep(10);
+       }
+}
+
 /*
  * Guarantee no request is in use, so we can change any data structure of
  * the queue afterward.
@@ -133,21 +150,13 @@ static void blk_mq_freeze_queue(struct request_queue *q)
        queue_flag_set(QUEUE_FLAG_BYPASS, q);
        spin_unlock_irq(q->queue_lock);
 
-       if (!drain)
-               return;
-
-       while (true) {
-               s64 count;
-
-               spin_lock_irq(q->queue_lock);
-               count = percpu_counter_sum(&q->mq_usage_counter);
-               spin_unlock_irq(q->queue_lock);
+       if (drain)
+               __blk_mq_drain_queue(q);
+}
 
-               if (count == 0)
-                       break;
-               blk_mq_run_queues(q, false);
-               msleep(10);
-       }
+void blk_mq_drain_queue(struct request_queue *q)
+{
+       __blk_mq_drain_queue(q);
 }
 
 static void blk_mq_unfreeze_queue(struct request_queue *q)
@@ -179,6 +188,8 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
 
        rq->mq_ctx = ctx;
        rq->cmd_flags = rw_flags;
+       rq->start_time = jiffies;
+       set_start_time_ns(rq);
        ctx->rq_dispatched[rw_is_sync(rw_flags)]++;
 }
 
@@ -305,7 +316,7 @@ void blk_mq_complete_request(struct request *rq, int error)
                struct bio *next = bio->bi_next;
 
                bio->bi_next = NULL;
-               bytes += bio->bi_size;
+               bytes += bio->bi_iter.bi_size;
                blk_mq_bio_endio(rq, bio, error);
                bio = next;
        }
@@ -326,55 +337,12 @@ void __blk_mq_end_io(struct request *rq, int error)
                blk_mq_complete_request(rq, error);
 }
 
-#if defined(CONFIG_SMP)
-
-/*
- * Called with interrupts disabled.
- */
-static void ipi_end_io(void *data)
-{
-       struct llist_head *list = &per_cpu(ipi_lists, smp_processor_id());
-       struct llist_node *entry, *next;
-       struct request *rq;
-
-       entry = llist_del_all(list);
-
-       while (entry) {
-               next = entry->next;
-               rq = llist_entry(entry, struct request, ll_list);
-               __blk_mq_end_io(rq, rq->errors);
-               entry = next;
-       }
-}
-
-static int ipi_remote_cpu(struct blk_mq_ctx *ctx, const int cpu,
-                         struct request *rq, const int error)
+static void blk_mq_end_io_remote(void *data)
 {
-       struct call_single_data *data = &rq->csd;
-
-       rq->errors = error;
-       rq->ll_list.next = NULL;
+       struct request *rq = data;
 
-       /*
-        * If the list is non-empty, an existing IPI must already
-        * be "in flight". If that is the case, we need not schedule
-        * a new one.
-        */
-       if (llist_add(&rq->ll_list, &per_cpu(ipi_lists, ctx->cpu))) {
-               data->func = ipi_end_io;
-               data->flags = 0;
-               __smp_call_function_single(ctx->cpu, data, 0);
-       }
-
-       return true;
-}
-#else /* CONFIG_SMP */
-static int ipi_remote_cpu(struct blk_mq_ctx *ctx, const int cpu,
-                         struct request *rq, const int error)
-{
-       return false;
+       __blk_mq_end_io(rq, rq->errors);
 }
-#endif
 
 /*
  * End IO on this request on a multiqueue enabled driver. We'll either do
@@ -390,11 +358,15 @@ void blk_mq_end_io(struct request *rq, int error)
                return __blk_mq_end_io(rq, error);
 
        cpu = get_cpu();
-
-       if (cpu == ctx->cpu || !cpu_online(ctx->cpu) ||
-           !ipi_remote_cpu(ctx, cpu, rq, error))
+       if (cpu != ctx->cpu && cpu_online(ctx->cpu)) {
+               rq->errors = error;
+               rq->csd.func = blk_mq_end_io_remote;
+               rq->csd.info = rq;
+               rq->csd.flags = 0;
+               __smp_call_function_single(ctx->cpu, &rq->csd, 0);
+       } else {
                __blk_mq_end_io(rq, error);
-
+       }
        put_cpu();
 }
 EXPORT_SYMBOL(blk_mq_end_io);
@@ -1091,8 +1063,8 @@ static void blk_mq_free_rq_map(struct blk_mq_hw_ctx *hctx)
        struct page *page;
 
        while (!list_empty(&hctx->page_list)) {
-               page = list_first_entry(&hctx->page_list, struct page, list);
-               list_del_init(&page->list);
+               page = list_first_entry(&hctx->page_list, struct page, lru);
+               list_del_init(&page->lru);
                __free_pages(page, page->private);
        }
 
@@ -1156,7 +1128,7 @@ static int blk_mq_init_rq_map(struct blk_mq_hw_ctx *hctx,
                        break;
 
                page->private = this_order;
-               list_add_tail(&page->list, &hctx->page_list);
+               list_add_tail(&page->lru, &hctx->page_list);
 
                p = page_address(page);
                entries_per_page = order_to_size(this_order) / rq_size;
@@ -1429,7 +1401,6 @@ void blk_mq_free_queue(struct request_queue *q)
        int i;
 
        queue_for_each_hw_ctx(q, hctx, i) {
-               cancel_delayed_work_sync(&hctx->delayed_work);
                kfree(hctx->ctx_map);
                kfree(hctx->ctxs);
                blk_mq_free_rq_map(hctx);
@@ -1451,7 +1422,6 @@ void blk_mq_free_queue(struct request_queue *q)
        list_del_init(&q->all_q_node);
        mutex_unlock(&all_q_mutex);
 }
-EXPORT_SYMBOL(blk_mq_free_queue);
 
 /* Basically redo blk_mq_init_queue with queue frozen */
 static void blk_mq_queue_reinit(struct request_queue *q)
@@ -1495,11 +1465,6 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
 
 static int __init blk_mq_init(void)
 {
-       unsigned int i;
-
-       for_each_possible_cpu(i)
-               init_llist_head(&per_cpu(ipi_lists, i));
-
        blk_mq_cpu_init();
 
        /* Must be called after percpu_counter_hotcpu_callback() */
index 52bf1f96a2c239195e564fb5bdb19164709770f4..5c3917984b005f13ea35254074744ec91f2e5bd3 100644 (file)
@@ -27,6 +27,8 @@ void blk_mq_complete_request(struct request *rq, int error);
 void blk_mq_run_request(struct request *rq, bool run_queue, bool async);
 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
 void blk_mq_init_flush(struct request_queue *q);
+void blk_mq_drain_queue(struct request_queue *q);
+void blk_mq_free_queue(struct request_queue *q);
 
 /*
  * CPU hotplug helpers
@@ -38,7 +40,6 @@ void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
 void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier);
 void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier);
 void blk_mq_cpu_init(void);
-DECLARE_PER_CPU(struct llist_head, ipi_lists);
 
 /*
  * CPU -> queue mappings
index 05e826793e4e36b2e6c8de29802674767e3bd4d8..5d21239bc8599feb3fa12fa4906ba57aaf1c553f 100644 (file)
@@ -592,6 +592,10 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
                ret = -1;
        }
 
+       t->raid_partial_stripes_expensive =
+               max(t->raid_partial_stripes_expensive,
+                   b->raid_partial_stripes_expensive);
+
        /* Find lowest common alignment_offset */
        t->alignment_offset = lcm(t->alignment_offset, alignment)
                & (max(t->physical_block_size, t->io_min) - 1);
index 97779522472f8356d5b09e91a33b1b310293d230..8095c4a21fc0f53e6e46ff191de283500dcc97de 100644 (file)
@@ -11,6 +11,7 @@
 
 #include "blk.h"
 #include "blk-cgroup.h"
+#include "blk-mq.h"
 
 struct queue_sysfs_entry {
        struct attribute attr;
index a760857e6b62609dde239ad74aebe2b5ac2ebaac..1474c3ab7e72cb85698ffe8bb3687df66729281b 100644 (file)
@@ -877,14 +877,14 @@ static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
        do_div(tmp, HZ);
        bytes_allowed = tmp;
 
-       if (tg->bytes_disp[rw] + bio->bi_size <= bytes_allowed) {
+       if (tg->bytes_disp[rw] + bio->bi_iter.bi_size <= bytes_allowed) {
                if (wait)
                        *wait = 0;
                return 1;
        }
 
        /* Calc approx time to dispatch */
-       extra_bytes = tg->bytes_disp[rw] + bio->bi_size - bytes_allowed;
+       extra_bytes = tg->bytes_disp[rw] + bio->bi_iter.bi_size - bytes_allowed;
        jiffy_wait = div64_u64(extra_bytes * HZ, tg->bps[rw]);
 
        if (!jiffy_wait)
@@ -987,7 +987,7 @@ static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
        bool rw = bio_data_dir(bio);
 
        /* Charge the bio to the group */
-       tg->bytes_disp[rw] += bio->bi_size;
+       tg->bytes_disp[rw] += bio->bi_iter.bi_size;
        tg->io_disp[rw]++;
 
        /*
@@ -1003,8 +1003,8 @@ static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
         */
        if (!(bio->bi_rw & REQ_THROTTLED)) {
                bio->bi_rw |= REQ_THROTTLED;
-               throtl_update_dispatch_stats(tg_to_blkg(tg), bio->bi_size,
-                                            bio->bi_rw);
+               throtl_update_dispatch_stats(tg_to_blkg(tg),
+                                            bio->bi_iter.bi_size, bio->bi_rw);
        }
 }
 
@@ -1503,7 +1503,7 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
        if (tg) {
                if (!tg->has_rules[rw]) {
                        throtl_update_dispatch_stats(tg_to_blkg(tg),
-                                                    bio->bi_size, bio->bi_rw);
+                                       bio->bi_iter.bi_size, bio->bi_rw);
                        goto out_unlock_rcu;
                }
        }
@@ -1559,7 +1559,7 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
        /* out-of-limit, queue to @tg */
        throtl_log(sq, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d",
                   rw == READ ? 'R' : 'W',
-                  tg->bytes_disp[rw], bio->bi_size, tg->bps[rw],
+                  tg->bytes_disp[rw], bio->bi_iter.bi_size, tg->bps[rw],
                   tg->io_disp[rw], tg->iops[rw],
                   sq->nr_queued[READ], sq->nr_queued[WRITE]);
 
index cc2637f8674ed61df149fb3bf51da4cc3a04f7cf..9dbc67e42a993193fb56d169cab00ecf4b825ef0 100644 (file)
@@ -4,8 +4,7 @@
  * Written by Cai Zhiyong <caizhiyong@huawei.com>
  *
  */
-#include <linux/buffer_head.h>
-#include <linux/module.h>
+#include <linux/export.h>
 #include <linux/cmdline-parser.h>
 
 static int parse_subpart(struct cmdline_subpart **subpart, char *partdef)
@@ -159,6 +158,7 @@ void cmdline_parts_free(struct cmdline_parts **parts)
                *parts = next_parts;
        }
 }
+EXPORT_SYMBOL(cmdline_parts_free);
 
 int cmdline_parts_parse(struct cmdline_parts **parts, const char *cmdline)
 {
@@ -206,6 +206,7 @@ fail:
        cmdline_parts_free(parts);
        goto done;
 }
+EXPORT_SYMBOL(cmdline_parts_parse);
 
 struct cmdline_parts *cmdline_parts_find(struct cmdline_parts *parts,
                                         const char *bdev)
@@ -214,17 +215,17 @@ struct cmdline_parts *cmdline_parts_find(struct cmdline_parts *parts,
                parts = parts->next_parts;
        return parts;
 }
+EXPORT_SYMBOL(cmdline_parts_find);
 
 /*
  *  add_part()
  *    0 success.
  *    1 can not add so many partitions.
  */
-void cmdline_parts_set(struct cmdline_parts *parts, sector_t disk_size,
-                      int slot,
-                      int (*add_part)(int, struct cmdline_subpart *, void *),
-                      void *param)
-
+int cmdline_parts_set(struct cmdline_parts *parts, sector_t disk_size,
+                     int slot,
+                     int (*add_part)(int, struct cmdline_subpart *, void *),
+                     void *param)
 {
        sector_t from = 0;
        struct cmdline_subpart *subpart;
@@ -247,4 +248,7 @@ void cmdline_parts_set(struct cmdline_parts *parts, sector_t disk_size,
                if (add_part(slot, subpart, param))
                        break;
        }
+
+       return slot;
 }
+EXPORT_SYMBOL(cmdline_parts_set);
index b7ff2861b6bdc0bd8e57528ac776fc9b923b5c70..42c45a7d67144a5598f5d7b2242a63eb9d58e292 100644 (file)
@@ -440,7 +440,7 @@ int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
        /*
         * See if our hash lookup can find a potential backmerge.
         */
-       __rq = elv_rqhash_find(q, bio->bi_sector);
+       __rq = elv_rqhash_find(q, bio->bi_iter.bi_sector);
        if (__rq && elv_rq_merge_ok(__rq, bio)) {
                *req = __rq;
                return ELEVATOR_BACK_MERGE;
index 625e3e471d65f55495bd639b0418e8ad85d53d62..26487972ac549ba899a723201125e5b3c59934ff 100644 (file)
@@ -323,12 +323,14 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
 
        if (hdr->iovec_count) {
                size_t iov_data_len;
-               struct iovec *iov;
+               struct iovec *iov = NULL;
 
                ret = rw_copy_check_uvector(-1, hdr->dxferp, hdr->iovec_count,
                                            0, NULL, &iov);
-               if (ret < 0)
+               if (ret < 0) {
+                       kfree(iov);
                        goto out;
+               }
 
                iov_data_len = ret;
                ret = 0;
index 14a9d1912318b99fe764108420b143159c2bca32..9220f8e833d08228297373d116c308176c8d44a8 100644 (file)
@@ -100,11 +100,8 @@ enum {
 
 struct buf {
        ulong nframesout;
-       ulong resid;
-       ulong bv_resid;
-       sector_t sector;
        struct bio *bio;
-       struct bio_vec *bv;
+       struct bvec_iter iter;
        struct request *rq;
 };
 
@@ -120,13 +117,10 @@ struct frame {
        ulong waited;
        ulong waited_total;
        struct aoetgt *t;               /* parent target I belong to */
-       sector_t lba;
        struct sk_buff *skb;            /* command skb freed on module exit */
        struct sk_buff *r_skb;          /* response skb for async processing */
        struct buf *buf;
-       struct bio_vec *bv;
-       ulong bcnt;
-       ulong bv_off;
+       struct bvec_iter iter;
        char flags;
 };
 
index d2515435e23f2f87215558ec703f5a625ab8ac80..8184451b57c04999bd23c286080e14ca7f069031 100644 (file)
@@ -196,8 +196,7 @@ aoe_freetframe(struct frame *f)
 
        t = f->t;
        f->buf = NULL;
-       f->lba = 0;
-       f->bv = NULL;
+       memset(&f->iter, 0, sizeof(f->iter));
        f->r_skb = NULL;
        f->flags = 0;
        list_add(&f->head, &t->ffree);
@@ -295,21 +294,14 @@ newframe(struct aoedev *d)
 }
 
 static void
-skb_fillup(struct sk_buff *skb, struct bio_vec *bv, ulong off, ulong cnt)
+skb_fillup(struct sk_buff *skb, struct bio *bio, struct bvec_iter iter)
 {
        int frag = 0;
-       ulong fcnt;
-loop:
-       fcnt = bv->bv_len - (off - bv->bv_offset);
-       if (fcnt > cnt)
-               fcnt = cnt;
-       skb_fill_page_desc(skb, frag++, bv->bv_page, off, fcnt);
-       cnt -= fcnt;
-       if (cnt <= 0)
-               return;
-       bv++;
-       off = bv->bv_offset;
-       goto loop;
+       struct bio_vec bv;
+
+       __bio_for_each_segment(bv, bio, iter, iter)
+               skb_fill_page_desc(skb, frag++, bv.bv_page,
+                                  bv.bv_offset, bv.bv_len);
 }
 
 static void
@@ -346,12 +338,10 @@ ata_rw_frameinit(struct frame *f)
        t->nout++;
        f->waited = 0;
        f->waited_total = 0;
-       if (f->buf)
-               f->lba = f->buf->sector;
 
        /* set up ata header */
-       ah->scnt = f->bcnt >> 9;
-       put_lba(ah, f->lba);
+       ah->scnt = f->iter.bi_size >> 9;
+       put_lba(ah, f->iter.bi_sector);
        if (t->d->flags & DEVFL_EXT) {
                ah->aflags |= AOEAFL_EXT;
        } else {
@@ -360,11 +350,11 @@ ata_rw_frameinit(struct frame *f)
                ah->lba3 |= 0xe0;       /* LBA bit + obsolete 0xa0 */
        }
        if (f->buf && bio_data_dir(f->buf->bio) == WRITE) {
-               skb_fillup(skb, f->bv, f->bv_off, f->bcnt);
+               skb_fillup(skb, f->buf->bio, f->iter);
                ah->aflags |= AOEAFL_WRITE;
-               skb->len += f->bcnt;
-               skb->data_len = f->bcnt;
-               skb->truesize += f->bcnt;
+               skb->len += f->iter.bi_size;
+               skb->data_len = f->iter.bi_size;
+               skb->truesize += f->iter.bi_size;
                t->wpkts++;
        } else {
                t->rpkts++;
@@ -382,7 +372,6 @@ aoecmd_ata_rw(struct aoedev *d)
        struct buf *buf;
        struct sk_buff *skb;
        struct sk_buff_head queue;
-       ulong bcnt, fbcnt;
 
        buf = nextbuf(d);
        if (buf == NULL)
@@ -390,39 +379,22 @@ aoecmd_ata_rw(struct aoedev *d)
        f = newframe(d);
        if (f == NULL)
                return 0;
-       bcnt = d->maxbcnt;
-       if (bcnt == 0)
-               bcnt = DEFAULTBCNT;
-       if (bcnt > buf->resid)
-               bcnt = buf->resid;
-       fbcnt = bcnt;
-       f->bv = buf->bv;
-       f->bv_off = f->bv->bv_offset + (f->bv->bv_len - buf->bv_resid);
-       do {
-               if (fbcnt < buf->bv_resid) {
-                       buf->bv_resid -= fbcnt;
-                       buf->resid -= fbcnt;
-                       break;
-               }
-               fbcnt -= buf->bv_resid;
-               buf->resid -= buf->bv_resid;
-               if (buf->resid == 0) {
-                       d->ip.buf = NULL;
-                       break;
-               }
-               buf->bv++;
-               buf->bv_resid = buf->bv->bv_len;
-               WARN_ON(buf->bv_resid == 0);
-       } while (fbcnt);
 
        /* initialize the headers & frame */
        f->buf = buf;
-       f->bcnt = bcnt;
-       ata_rw_frameinit(f);
+       f->iter = buf->iter;
+       f->iter.bi_size = min_t(unsigned long,
+                               d->maxbcnt ?: DEFAULTBCNT,
+                               f->iter.bi_size);
+       bio_advance_iter(buf->bio, &buf->iter, f->iter.bi_size);
+
+       if (!buf->iter.bi_size)
+               d->ip.buf = NULL;
 
        /* mark all tracking fields and load out */
        buf->nframesout += 1;
-       buf->sector += bcnt >> 9;
+
+       ata_rw_frameinit(f);
 
        skb = skb_clone(f->skb, GFP_ATOMIC);
        if (skb) {
@@ -613,10 +585,7 @@ reassign_frame(struct frame *f)
        skb = nf->skb;
        nf->skb = f->skb;
        nf->buf = f->buf;
-       nf->bcnt = f->bcnt;
-       nf->lba = f->lba;
-       nf->bv = f->bv;
-       nf->bv_off = f->bv_off;
+       nf->iter = f->iter;
        nf->waited = 0;
        nf->waited_total = f->waited_total;
        nf->sent = f->sent;
@@ -648,19 +617,19 @@ probe(struct aoetgt *t)
        }
        f->flags |= FFL_PROBE;
        ifrotate(t);
-       f->bcnt = t->d->maxbcnt ? t->d->maxbcnt : DEFAULTBCNT;
+       f->iter.bi_size = t->d->maxbcnt ? t->d->maxbcnt : DEFAULTBCNT;
        ata_rw_frameinit(f);
        skb = f->skb;
-       for (frag = 0, n = f->bcnt; n > 0; ++frag, n -= m) {
+       for (frag = 0, n = f->iter.bi_size; n > 0; ++frag, n -= m) {
                if (n < PAGE_SIZE)
                        m = n;
                else
                        m = PAGE_SIZE;
                skb_fill_page_desc(skb, frag, empty_page, 0, m);
        }
-       skb->len += f->bcnt;
-       skb->data_len = f->bcnt;
-       skb->truesize += f->bcnt;
+       skb->len += f->iter.bi_size;
+       skb->data_len = f->iter.bi_size;
+       skb->truesize += f->iter.bi_size;
 
        skb = skb_clone(f->skb, GFP_ATOMIC);
        if (skb) {
@@ -897,15 +866,15 @@ rqbiocnt(struct request *r)
 static void
 bio_pageinc(struct bio *bio)
 {
-       struct bio_vec *bv;
+       struct bio_vec bv;
        struct page *page;
-       int i;
+       struct bvec_iter iter;
 
-       bio_for_each_segment(bv, bio, i) {
+       bio_for_each_segment(bv, bio, iter) {
                /* Non-zero page count for non-head members of
                 * compound pages is no longer allowed by the kernel.
                 */
-               page = compound_trans_head(bv->bv_page);
+               page = compound_trans_head(bv.bv_page);
                atomic_inc(&page->_count);
        }
 }
@@ -913,12 +882,12 @@ bio_pageinc(struct bio *bio)
 static void
 bio_pagedec(struct bio *bio)
 {
-       struct bio_vec *bv;
        struct page *page;
-       int i;
+       struct bio_vec bv;
+       struct bvec_iter iter;
 
-       bio_for_each_segment(bv, bio, i) {
-               page = compound_trans_head(bv->bv_page);
+       bio_for_each_segment(bv, bio, iter) {
+               page = compound_trans_head(bv.bv_page);
                atomic_dec(&page->_count);
        }
 }
@@ -929,12 +898,8 @@ bufinit(struct buf *buf, struct request *rq, struct bio *bio)
        memset(buf, 0, sizeof(*buf));
        buf->rq = rq;
        buf->bio = bio;
-       buf->resid = bio->bi_size;
-       buf->sector = bio->bi_sector;
+       buf->iter = bio->bi_iter;
        bio_pageinc(bio);
-       buf->bv = bio_iovec(bio);
-       buf->bv_resid = buf->bv->bv_len;
-       WARN_ON(buf->bv_resid == 0);
 }
 
 static struct buf *
@@ -1119,24 +1084,18 @@ gettgt(struct aoedev *d, char *addr)
 }
 
 static void
-bvcpy(struct bio_vec *bv, ulong off, struct sk_buff *skb, long cnt)
+bvcpy(struct sk_buff *skb, struct bio *bio, struct bvec_iter iter, long cnt)
 {
-       ulong fcnt;
-       char *p;
        int soff = 0;
-loop:
-       fcnt = bv->bv_len - (off - bv->bv_offset);
-       if (fcnt > cnt)
-               fcnt = cnt;
-       p = page_address(bv->bv_page) + off;
-       skb_copy_bits(skb, soff, p, fcnt);
-       soff += fcnt;
-       cnt -= fcnt;
-       if (cnt <= 0)
-               return;
-       bv++;
-       off = bv->bv_offset;
-       goto loop;
+       struct bio_vec bv;
+
+       iter.bi_size = cnt;
+
+       __bio_for_each_segment(bv, bio, iter, iter) {
+               char *p = page_address(bv.bv_page) + bv.bv_offset;
+               skb_copy_bits(skb, soff, p, bv.bv_len);
+               soff += bv.bv_len;
+       }
 }
 
 void
@@ -1152,7 +1111,7 @@ aoe_end_request(struct aoedev *d, struct request *rq, int fastfail)
        do {
                bio = rq->bio;
                bok = !fastfail && test_bit(BIO_UPTODATE, &bio->bi_flags);
-       } while (__blk_end_request(rq, bok ? 0 : -EIO, bio->bi_size));
+       } while (__blk_end_request(rq, bok ? 0 : -EIO, bio->bi_iter.bi_size));
 
        /* cf. http://lkml.org/lkml/2006/10/31/28 */
        if (!fastfail)
@@ -1229,7 +1188,15 @@ noskb:           if (buf)
                        clear_bit(BIO_UPTODATE, &buf->bio->bi_flags);
                        break;
                }
-               bvcpy(f->bv, f->bv_off, skb, n);
+               if (n > f->iter.bi_size) {
+                       pr_err_ratelimited("%s e%ld.%d.  bytes=%ld need=%u\n",
+                               "aoe: too-large data size in read from",
+                               (long) d->aoemajor, d->aoeminor,
+                               n, f->iter.bi_size);
+                       clear_bit(BIO_UPTODATE, &buf->bio->bi_flags);
+                       break;
+               }
+               bvcpy(skb, f->buf->bio, f->iter, n);
        case ATA_CMD_PIO_WRITE:
        case ATA_CMD_PIO_WRITE_EXT:
                spin_lock_irq(&d->lock);
@@ -1272,7 +1239,7 @@ out:
 
        aoe_freetframe(f);
 
-       if (buf && --buf->nframesout == 0 && buf->resid == 0)
+       if (buf && --buf->nframesout == 0 && buf->iter.bi_size == 0)
                aoe_end_buf(d, buf);
 
        spin_unlock_irq(&d->lock);
@@ -1727,7 +1694,7 @@ aoe_failbuf(struct aoedev *d, struct buf *buf)
 {
        if (buf == NULL)
                return;
-       buf->resid = 0;
+       buf->iter.bi_size = 0;
        clear_bit(BIO_UPTODATE, &buf->bio->bi_flags);
        if (buf->nframesout == 0)
                aoe_end_buf(d, buf);
index d91f1a56e8617f56c019bfb6389bb79f71fa8ad2..e73b85cf0756876adbf4ad9b8220fc466bcdb412 100644 (file)
@@ -328,18 +328,18 @@ static void brd_make_request(struct request_queue *q, struct bio *bio)
        struct block_device *bdev = bio->bi_bdev;
        struct brd_device *brd = bdev->bd_disk->private_data;
        int rw;
-       struct bio_vec *bvec;
+       struct bio_vec bvec;
        sector_t sector;
-       int i;
+       struct bvec_iter iter;
        int err = -EIO;
 
-       sector = bio->bi_sector;
+       sector = bio->bi_iter.bi_sector;
        if (bio_end_sector(bio) > get_capacity(bdev->bd_disk))
                goto out;
 
        if (unlikely(bio->bi_rw & REQ_DISCARD)) {
                err = 0;
-               discard_from_brd(brd, sector, bio->bi_size);
+               discard_from_brd(brd, sector, bio->bi_iter.bi_size);
                goto out;
        }
 
@@ -347,10 +347,10 @@ static void brd_make_request(struct request_queue *q, struct bio *bio)
        if (rw == READA)
                rw = READ;
 
-       bio_for_each_segment(bvec, bio, i) {
-               unsigned int len = bvec->bv_len;
-               err = brd_do_bvec(brd, bvec->bv_page, len,
-                                       bvec->bv_offset, rw, sector);
+       bio_for_each_segment(bvec, bio, iter) {
+               unsigned int len = bvec.bv_len;
+               err = brd_do_bvec(brd, bvec.bv_page, len,
+                                       bvec.bv_offset, rw, sector);
                if (err)
                        break;
                sector += len >> SECTOR_SHIFT;
index b35fc4f5237c3b44c7c51aaa0517e58a42a2ab81..036e8ab86c718057ae01c9f6bf3ba6c403a6b942 100644 (file)
@@ -5004,7 +5004,7 @@ reinit_after_soft_reset:
 
        i = alloc_cciss_hba(pdev);
        if (i < 0)
-               return -1;
+               return -ENOMEM;
 
        h = hba[i];
        h->pdev = pdev;
@@ -5205,7 +5205,7 @@ clean_no_release_regions:
         */
        pci_set_drvdata(pdev, NULL);
        free_hba(h);
-       return -1;
+       return -ENODEV;
 }
 
 static void cciss_shutdown(struct pci_dev *pdev)
index 28c73ca320a8f7f9b1741492785fa2f18e042d8c..a9b13f2cc420b055b8089fde687d601a23b8e635 100644 (file)
@@ -159,7 +159,7 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
 
        bio = bio_alloc_drbd(GFP_NOIO);
        bio->bi_bdev = bdev->md_bdev;
-       bio->bi_sector = sector;
+       bio->bi_iter.bi_sector = sector;
        err = -EIO;
        if (bio_add_page(bio, page, size, 0) != size)
                goto out;
index b12c11ec4bd21e405fe75276d656e3519c8ad873..597f111df67b3597987eb816f8ff2ca2ec17c285 100644 (file)
@@ -1028,7 +1028,7 @@ static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must
        } else
                page = b->bm_pages[page_nr];
        bio->bi_bdev = mdev->ldev->md_bdev;
-       bio->bi_sector = on_disk_sector;
+       bio->bi_iter.bi_sector = on_disk_sector;
        /* bio_add_page of a single page to an empty bio will always succeed,
         * according to api.  Do we want to assert that? */
        bio_add_page(bio, page, len, 0);
index 9e3818b1bc8321e5883a1ef1b3dfe9542e7ea619..929468e1512a687d44bb310b8e3cc94a6b15161d 100644 (file)
@@ -1537,15 +1537,17 @@ static int _drbd_send_page(struct drbd_conf *mdev, struct page *page,
 
 static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio)
 {
-       struct bio_vec *bvec;
-       int i;
+       struct bio_vec bvec;
+       struct bvec_iter iter;
+
        /* hint all but last page with MSG_MORE */
-       bio_for_each_segment(bvec, bio, i) {
+       bio_for_each_segment(bvec, bio, iter) {
                int err;
 
-               err = _drbd_no_send_page(mdev, bvec->bv_page,
-                                        bvec->bv_offset, bvec->bv_len,
-                                        i == bio->bi_vcnt - 1 ? 0 : MSG_MORE);
+               err = _drbd_no_send_page(mdev, bvec.bv_page,
+                                        bvec.bv_offset, bvec.bv_len,
+                                        bio_iter_last(bvec, iter)
+                                        ? 0 : MSG_MORE);
                if (err)
                        return err;
        }
@@ -1554,15 +1556,16 @@ static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio)
 
 static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio)
 {
-       struct bio_vec *bvec;
-       int i;
+       struct bio_vec bvec;
+       struct bvec_iter iter;
+
        /* hint all but last page with MSG_MORE */
-       bio_for_each_segment(bvec, bio, i) {
+       bio_for_each_segment(bvec, bio, iter) {
                int err;
 
-               err = _drbd_send_page(mdev, bvec->bv_page,
-                                     bvec->bv_offset, bvec->bv_len,
-                                     i == bio->bi_vcnt - 1 ? 0 : MSG_MORE);
+               err = _drbd_send_page(mdev, bvec.bv_page,
+                                     bvec.bv_offset, bvec.bv_len,
+                                     bio_iter_last(bvec, iter) ? 0 : MSG_MORE);
                if (err)
                        return err;
        }
index 6fa6673b36b396765b58142e8e8abcdc4beaae05..d073305ffd5e76e17a7d0804ad92be1ad82bba67 100644 (file)
@@ -1333,7 +1333,7 @@ next_bio:
                goto fail;
        }
        /* > peer_req->i.sector, unless this is the first bio */
-       bio->bi_sector = sector;
+       bio->bi_iter.bi_sector = sector;
        bio->bi_bdev = mdev->ldev->backing_bdev;
        bio->bi_rw = rw;
        bio->bi_private = peer_req;
@@ -1353,7 +1353,7 @@ next_bio:
                                dev_err(DEV,
                                        "bio_add_page failed for len=%u, "
                                        "bi_vcnt=0 (bi_sector=%llu)\n",
-                                       len, (unsigned long long)bio->bi_sector);
+                                       len, (uint64_t)bio->bi_iter.bi_sector);
                                err = -ENOSPC;
                                goto fail;
                        }
@@ -1595,9 +1595,10 @@ static int drbd_drain_block(struct drbd_conf *mdev, int data_size)
 static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
                           sector_t sector, int data_size)
 {
-       struct bio_vec *bvec;
+       struct bio_vec bvec;
+       struct bvec_iter iter;
        struct bio *bio;
-       int dgs, err, i, expect;
+       int dgs, err, expect;
        void *dig_in = mdev->tconn->int_dig_in;
        void *dig_vv = mdev->tconn->int_dig_vv;
 
@@ -1615,13 +1616,13 @@ static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
        mdev->recv_cnt += data_size>>9;
 
        bio = req->master_bio;
-       D_ASSERT(sector == bio->bi_sector);
+       D_ASSERT(sector == bio->bi_iter.bi_sector);
 
-       bio_for_each_segment(bvec, bio, i) {
-               void *mapped = kmap(bvec->bv_page) + bvec->bv_offset;
-               expect = min_t(int, data_size, bvec->bv_len);
+       bio_for_each_segment(bvec, bio, iter) {
+               void *mapped = kmap(bvec.bv_page) + bvec.bv_offset;
+               expect = min_t(int, data_size, bvec.bv_len);
                err = drbd_recv_all_warn(mdev->tconn, mapped, expect);
-               kunmap(bvec->bv_page);
+               kunmap(bvec.bv_page);
                if (err)
                        return err;
                data_size -= expect;
index fec7bef44994cf8b76e595f69b5e34b42cdaf230..104a040f24de74141274b364ee625aa49eefbb94 100644 (file)
@@ -77,8 +77,8 @@ static struct drbd_request *drbd_req_new(struct drbd_conf *mdev,
        req->epoch       = 0;
 
        drbd_clear_interval(&req->i);
-       req->i.sector     = bio_src->bi_sector;
-       req->i.size      = bio_src->bi_size;
+       req->i.sector     = bio_src->bi_iter.bi_sector;
+       req->i.size      = bio_src->bi_iter.bi_size;
        req->i.local = true;
        req->i.waiting = false;
 
@@ -1280,7 +1280,7 @@ void drbd_make_request(struct request_queue *q, struct bio *bio)
        /*
         * what we "blindly" assume:
         */
-       D_ASSERT(IS_ALIGNED(bio->bi_size, 512));
+       D_ASSERT(IS_ALIGNED(bio->bi_iter.bi_size, 512));
 
        inc_ap_bio(mdev);
        __drbd_make_request(mdev, bio, start_time);
index 978cb1addc98845fb8ca49838cfb5ec2478170f7..28e15d91197af1b234e43136740709adc090bcde 100644 (file)
@@ -269,7 +269,7 @@ static inline void drbd_req_make_private_bio(struct drbd_request *req, struct bi
 
 /* Short lived temporary struct on the stack.
  * We could squirrel the error to be returned into
- * bio->bi_size, or similar. But that would be too ugly. */
+ * bio->bi_iter.bi_size, or similar. But that would be too ugly. */
 struct bio_and_error {
        struct bio *bio;
        int error;
index 891c0ecaa292c84998f7357b82e3a80cdc0f6484..84d3175d493aaef91edabd97297238a717f6973c 100644 (file)
@@ -313,8 +313,8 @@ void drbd_csum_bio(struct drbd_conf *mdev, struct crypto_hash *tfm, struct bio *
 {
        struct hash_desc desc;
        struct scatterlist sg;
-       struct bio_vec *bvec;
-       int i;
+       struct bio_vec bvec;
+       struct bvec_iter iter;
 
        desc.tfm = tfm;
        desc.flags = 0;
@@ -322,8 +322,8 @@ void drbd_csum_bio(struct drbd_conf *mdev, struct crypto_hash *tfm, struct bio *
        sg_init_table(&sg, 1);
        crypto_hash_init(&desc);
 
-       bio_for_each_segment(bvec, bio, i) {
-               sg_set_page(&sg, bvec->bv_page, bvec->bv_len, bvec->bv_offset);
+       bio_for_each_segment(bvec, bio, iter) {
+               sg_set_page(&sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset);
                crypto_hash_update(&desc, &sg, sg.length);
        }
        crypto_hash_final(&desc, digest);
index 000abe2f105c60f06d5fac31a2a4889cddd22b15..2023043ce7c0e94b0e3c3618eb0787a9ad5ddb73 100644 (file)
@@ -2351,7 +2351,7 @@ static void rw_interrupt(void)
 /* Compute maximal contiguous buffer size. */
 static int buffer_chain_size(void)
 {
-       struct bio_vec *bv;
+       struct bio_vec bv;
        int size;
        struct req_iterator iter;
        char *base;
@@ -2360,10 +2360,10 @@ static int buffer_chain_size(void)
        size = 0;
 
        rq_for_each_segment(bv, current_req, iter) {
-               if (page_address(bv->bv_page) + bv->bv_offset != base + size)
+               if (page_address(bv.bv_page) + bv.bv_offset != base + size)
                        break;
 
-               size += bv->bv_len;
+               size += bv.bv_len;
        }
 
        return size >> 9;
@@ -2389,7 +2389,7 @@ static int transfer_size(int ssize, int max_sector, int max_size)
 static void copy_buffer(int ssize, int max_sector, int max_sector_2)
 {
        int remaining;          /* number of transferred 512-byte sectors */
-       struct bio_vec *bv;
+       struct bio_vec bv;
        char *buffer;
        char *dma_buffer;
        int size;
@@ -2427,10 +2427,10 @@ static void copy_buffer(int ssize, int max_sector, int max_sector_2)
                if (!remaining)
                        break;
 
-               size = bv->bv_len;
+               size = bv.bv_len;
                SUPBOUND(size, remaining);
 
-               buffer = page_address(bv->bv_page) + bv->bv_offset;
+               buffer = page_address(bv.bv_page) + bv.bv_offset;
                if (dma_buffer + size >
                    floppy_track_buffer + (max_buffer_sectors << 10) ||
                    dma_buffer < floppy_track_buffer) {
@@ -3691,9 +3691,12 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
        if (!(mode & FMODE_NDELAY)) {
                if (mode & (FMODE_READ|FMODE_WRITE)) {
                        UDRS->last_checked = 0;
+                       clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
                        check_disk_change(bdev);
                        if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags))
                                goto out;
+                       if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags))
+                               goto out;
                }
                res = -EROFS;
                if ((mode & FMODE_WRITE) &&
@@ -3746,17 +3749,29 @@ static unsigned int floppy_check_events(struct gendisk *disk,
  * a disk in the drive, and whether that disk is writable.
  */
 
-static void floppy_rb0_complete(struct bio *bio, int err)
+struct rb0_cbdata {
+       int drive;
+       struct completion complete;
+};
+
+static void floppy_rb0_cb(struct bio *bio, int err)
 {
-       complete((struct completion *)bio->bi_private);
+       struct rb0_cbdata *cbdata = (struct rb0_cbdata *)bio->bi_private;
+       int drive = cbdata->drive;
+
+       if (err) {
+               pr_info("floppy: error %d while reading block 0", err);
+               set_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
+       }
+       complete(&cbdata->complete);
 }
 
-static int __floppy_read_block_0(struct block_device *bdev)
+static int __floppy_read_block_0(struct block_device *bdev, int drive)
 {
        struct bio bio;
        struct bio_vec bio_vec;
-       struct completion complete;
        struct page *page;
+       struct rb0_cbdata cbdata;
        size_t size;
 
        page = alloc_page(GFP_NOIO);
@@ -3769,23 +3784,26 @@ static int __floppy_read_block_0(struct block_device *bdev)
        if (!size)
                size = 1024;
 
+       cbdata.drive = drive;
+
        bio_init(&bio);
        bio.bi_io_vec = &bio_vec;
        bio_vec.bv_page = page;
        bio_vec.bv_len = size;
        bio_vec.bv_offset = 0;
        bio.bi_vcnt = 1;
-       bio.bi_size = size;
+       bio.bi_iter.bi_size = size;
        bio.bi_bdev = bdev;
-       bio.bi_sector = 0;
+       bio.bi_iter.bi_sector = 0;
        bio.bi_flags = (1 << BIO_QUIET);
-       init_completion(&complete);
-       bio.bi_private = &complete;
-       bio.bi_end_io = floppy_rb0_complete;
+       bio.bi_private = &cbdata;
+       bio.bi_end_io = floppy_rb0_cb;
 
        submit_bio(READ, &bio);
        process_fd_request();
-       wait_for_completion(&complete);
+
+       init_completion(&cbdata.complete);
+       wait_for_completion(&cbdata.complete);
 
        __free_page(page);
 
@@ -3827,7 +3845,7 @@ static int floppy_revalidate(struct gendisk *disk)
                        UDRS->generation++;
                if (drive_no_geom(drive)) {
                        /* auto-sensing */
-                       res = __floppy_read_block_0(opened_bdev[drive]);
+                       res = __floppy_read_block_0(opened_bdev[drive], drive);
                } else {
                        if (cf)
                                poll_drive(false, FD_RAW_NEED_DISK);
index c8dac730524408f63e78cb9acdae8294aaf8dafa..66e8c3b94ef35443f46bf67ea3065023da8b808d 100644 (file)
@@ -288,9 +288,10 @@ static int lo_send(struct loop_device *lo, struct bio *bio, loff_t pos)
 {
        int (*do_lo_send)(struct loop_device *, struct bio_vec *, loff_t,
                        struct page *page);
-       struct bio_vec *bvec;
+       struct bio_vec bvec;
+       struct bvec_iter iter;
        struct page *page = NULL;
-       int i, ret = 0;
+       int ret = 0;
 
        if (lo->transfer != transfer_none) {
                page = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
@@ -302,11 +303,11 @@ static int lo_send(struct loop_device *lo, struct bio *bio, loff_t pos)
                do_lo_send = do_lo_send_direct_write;
        }
 
-       bio_for_each_segment(bvec, bio, i) {
-               ret = do_lo_send(lo, bvec, pos, page);
+       bio_for_each_segment(bvec, bio, iter) {
+               ret = do_lo_send(lo, &bvec, pos, page);
                if (ret < 0)
                        break;
-               pos += bvec->bv_len;
+               pos += bvec.bv_len;
        }
        if (page) {
                kunmap(page);
@@ -392,20 +393,20 @@ do_lo_receive(struct loop_device *lo,
 static int
 lo_receive(struct loop_device *lo, struct bio *bio, int bsize, loff_t pos)
 {
-       struct bio_vec *bvec;
+       struct bio_vec bvec;
+       struct bvec_iter iter;
        ssize_t s;
-       int i;
 
-       bio_for_each_segment(bvec, bio, i) {
-               s = do_lo_receive(lo, bvec, bsize, pos);
+       bio_for_each_segment(bvec, bio, iter) {
+               s = do_lo_receive(lo, &bvec, bsize, pos);
                if (s < 0)
                        return s;
 
-               if (s != bvec->bv_len) {
+               if (s != bvec.bv_len) {
                        zero_fill_bio(bio);
                        break;
                }
-               pos += bvec->bv_len;
+               pos += bvec.bv_len;
        }
        return 0;
 }
@@ -415,7 +416,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
        loff_t pos;
        int ret;
 
-       pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset;
+       pos = ((loff_t) bio->bi_iter.bi_sector << 9) + lo->lo_offset;
 
        if (bio_rw(bio) == WRITE) {
                struct file *file = lo->lo_backing_file;
@@ -444,7 +445,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
                                goto out;
                        }
                        ret = file->f_op->fallocate(file, mode, pos,
-                                                   bio->bi_size);
+                                                   bio->bi_iter.bi_size);
                        if (unlikely(ret && ret != -EINVAL &&
                                     ret != -EOPNOTSUPP))
                                ret = -EIO;
@@ -798,7 +799,7 @@ static void loop_config_discard(struct loop_device *lo)
 
        /*
         * We use punch hole to reclaim the free space used by the
-        * image a.k.a. discard. However we do support discard if
+        * image a.k.a. discard. However we do not support discard if
         * encryption is enabled, because it may give an attacker
         * useful information.
         */
index 7bc363f1ee82241227452594a78a1e7c5c2d3eca..eb59b124136690e217897dd6003473e09e5bd64a 100644 (file)
@@ -915,7 +915,7 @@ static int mg_probe(struct platform_device *plat_dev)
 
        /* disk reset */
        if (prv_data->dev_attr == MG_STORAGE_DEV) {
-               /* If POR seq. not yet finised, wait */
+               /* If POR seq. not yet finished, wait */
                err = mg_wait_rstout(host->rstout, MG_TMAX_RSTOUT);
                if (err)
                        goto probe_err_3b;
index 050c71267f146340281992e341ab86c93d79c33d..516026954be62d325a9e9d7c5541e5fbf51c7569 100644 (file)
 #include "mtip32xx.h"
 
 #define HW_CMD_SLOT_SZ         (MTIP_MAX_COMMAND_SLOTS * 32)
-#define HW_CMD_TBL_SZ          (AHCI_CMD_TBL_HDR_SZ + (MTIP_MAX_SG * 16))
-#define HW_CMD_TBL_AR_SZ       (HW_CMD_TBL_SZ * MTIP_MAX_COMMAND_SLOTS)
-#define HW_PORT_PRIV_DMA_SZ \
-               (HW_CMD_SLOT_SZ + HW_CMD_TBL_AR_SZ + AHCI_RX_FIS_SZ)
+
+/* DMA region containing RX Fis, Identify, RLE10, and SMART buffers */
+#define AHCI_RX_FIS_SZ          0x100
+#define AHCI_RX_FIS_OFFSET      0x0
+#define AHCI_IDFY_SZ            ATA_SECT_SIZE
+#define AHCI_IDFY_OFFSET        0x400
+#define AHCI_SECTBUF_SZ         ATA_SECT_SIZE
+#define AHCI_SECTBUF_OFFSET     0x800
+#define AHCI_SMARTBUF_SZ        ATA_SECT_SIZE
+#define AHCI_SMARTBUF_OFFSET    0xC00
+/* 0x100 + 0x200 + 0x200 + 0x200 is smaller than 4k but we pad it out */
+#define BLOCK_DMA_ALLOC_SZ      4096
+
+/* DMA region containing command table (should be 8192 bytes) */
+#define AHCI_CMD_SLOT_SZ        sizeof(struct mtip_cmd_hdr)
+#define AHCI_CMD_TBL_SZ         (MTIP_MAX_COMMAND_SLOTS * AHCI_CMD_SLOT_SZ)
+#define AHCI_CMD_TBL_OFFSET     0x0
+
+/* DMA region per command (contains header and SGL) */
+#define AHCI_CMD_TBL_HDR_SZ     0x80
+#define AHCI_CMD_TBL_HDR_OFFSET 0x0
+#define AHCI_CMD_TBL_SGL_SZ     (MTIP_MAX_SG * sizeof(struct mtip_cmd_sg))
+#define AHCI_CMD_TBL_SGL_OFFSET AHCI_CMD_TBL_HDR_SZ
+#define CMD_DMA_ALLOC_SZ        (AHCI_CMD_TBL_SGL_SZ + AHCI_CMD_TBL_HDR_SZ)
+
 
 #define HOST_CAP_NZDMA         (1 << 19)
 #define HOST_HSORG             0xFC
@@ -899,8 +920,9 @@ static void mtip_handle_tfe(struct driver_data *dd)
                        fail_reason = "thermal shutdown";
                }
                if (buf[288] == 0xBF) {
+                       set_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag);
                        dev_info(&dd->pdev->dev,
-                               "Drive indicates rebuild has failed.\n");
+                               "Drive indicates rebuild has failed. Secure erase required.\n");
                        fail_all_ncq_cmds = 1;
                        fail_reason = "rebuild failed";
                }
@@ -1566,6 +1588,12 @@ static int mtip_get_identify(struct mtip_port *port, void __user *user_buffer)
        }
 #endif
 
+       /* Check security locked state */
+       if (port->identify[128] & 0x4)
+               set_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag);
+       else
+               clear_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag);
+
 #ifdef MTIP_TRIM /* Disabling TRIM support temporarily */
        /* Demux ID.DRAT & ID.RZAT to determine trim support */
        if (port->identify[69] & (1 << 14) && port->identify[69] & (1 << 5))
@@ -1887,6 +1915,10 @@ static void mtip_dump_identify(struct mtip_port *port)
        strlcpy(cbuf, (char *)(port->identify+27), 41);
        dev_info(&port->dd->pdev->dev, "Model: %s\n", cbuf);
 
+       dev_info(&port->dd->pdev->dev, "Security: %04x %s\n",
+               port->identify[128],
+               port->identify[128] & 0x4 ? "(LOCKED)" : "");
+
        if (mtip_hw_get_capacity(port->dd, &sectors))
                dev_info(&port->dd->pdev->dev,
                        "Capacity: %llu sectors (%llu MB)\n",
@@ -3312,6 +3344,118 @@ st_out:
        return 0;
 }
 
+/*
+ * DMA region teardown
+ *
+ * @dd Pointer to driver_data structure
+ *
+ * return value
+ *      None
+ */
+static void mtip_dma_free(struct driver_data *dd)
+{
+       int i;
+       struct mtip_port *port = dd->port;
+
+       if (port->block1)
+               dmam_free_coherent(&dd->pdev->dev, BLOCK_DMA_ALLOC_SZ,
+                                       port->block1, port->block1_dma);
+
+       if (port->command_list) {
+               dmam_free_coherent(&dd->pdev->dev, AHCI_CMD_TBL_SZ,
+                               port->command_list, port->command_list_dma);
+       }
+
+       for (i = 0; i < MTIP_MAX_COMMAND_SLOTS; i++) {
+               if (port->commands[i].command)
+                       dmam_free_coherent(&dd->pdev->dev, CMD_DMA_ALLOC_SZ,
+                               port->commands[i].command,
+                               port->commands[i].command_dma);
+       }
+}
+
+/*
+ * DMA region setup
+ *
+ * @dd Pointer to driver_data structure
+ *
+ * return value
+ *      -ENOMEM Not enough free DMA region space to initialize driver
+ */
+static int mtip_dma_alloc(struct driver_data *dd)
+{
+       struct mtip_port *port = dd->port;
+       int i, rv = 0;
+       u32 host_cap_64 = readl(dd->mmio + HOST_CAP) & HOST_CAP_64;
+
+       /* Allocate dma memory for RX Fis, Identify, and Sector Bufffer */
+       port->block1 =
+               dmam_alloc_coherent(&dd->pdev->dev, BLOCK_DMA_ALLOC_SZ,
+                                       &port->block1_dma, GFP_KERNEL);
+       if (!port->block1)
+               return -ENOMEM;
+       memset(port->block1, 0, BLOCK_DMA_ALLOC_SZ);
+
+       /* Allocate dma memory for command list */
+       port->command_list =
+               dmam_alloc_coherent(&dd->pdev->dev, AHCI_CMD_TBL_SZ,
+                                       &port->command_list_dma, GFP_KERNEL);
+       if (!port->command_list) {
+               dmam_free_coherent(&dd->pdev->dev, BLOCK_DMA_ALLOC_SZ,
+                                       port->block1, port->block1_dma);
+               port->block1 = NULL;
+               port->block1_dma = 0;
+               return -ENOMEM;
+       }
+       memset(port->command_list, 0, AHCI_CMD_TBL_SZ);
+
+       /* Setup all pointers into first DMA region */
+       port->rxfis         = port->block1 + AHCI_RX_FIS_OFFSET;
+       port->rxfis_dma     = port->block1_dma + AHCI_RX_FIS_OFFSET;
+       port->identify      = port->block1 + AHCI_IDFY_OFFSET;
+       port->identify_dma  = port->block1_dma + AHCI_IDFY_OFFSET;
+       port->log_buf       = port->block1 + AHCI_SECTBUF_OFFSET;
+       port->log_buf_dma   = port->block1_dma + AHCI_SECTBUF_OFFSET;
+       port->smart_buf     = port->block1 + AHCI_SMARTBUF_OFFSET;
+       port->smart_buf_dma = port->block1_dma + AHCI_SMARTBUF_OFFSET;
+
+       /* Setup per command SGL DMA region */
+
+       /* Point the command headers at the command tables */
+       for (i = 0; i < MTIP_MAX_COMMAND_SLOTS; i++) {
+               port->commands[i].command =
+                       dmam_alloc_coherent(&dd->pdev->dev, CMD_DMA_ALLOC_SZ,
+                               &port->commands[i].command_dma, GFP_KERNEL);
+               if (!port->commands[i].command) {
+                       rv = -ENOMEM;
+                       mtip_dma_free(dd);
+                       return rv;
+               }
+               memset(port->commands[i].command, 0, CMD_DMA_ALLOC_SZ);
+
+               port->commands[i].command_header = port->command_list +
+                                       (sizeof(struct mtip_cmd_hdr) * i);
+               port->commands[i].command_header_dma =
+                                       dd->port->command_list_dma +
+                                       (sizeof(struct mtip_cmd_hdr) * i);
+
+               if (host_cap_64)
+                       port->commands[i].command_header->ctbau =
+                               __force_bit2int cpu_to_le32(
+                               (port->commands[i].command_dma >> 16) >> 16);
+
+               port->commands[i].command_header->ctba =
+                               __force_bit2int cpu_to_le32(
+                               port->commands[i].command_dma & 0xFFFFFFFF);
+
+               sg_init_table(port->commands[i].sg, MTIP_MAX_SG);
+
+               /* Mark command as currently inactive */
+               atomic_set(&dd->port->commands[i].active, 0);
+       }
+       return 0;
+}
+
 /*
  * Called once for each card.
  *
@@ -3370,83 +3514,10 @@ static int mtip_hw_init(struct driver_data *dd)
        dd->port->mmio  = dd->mmio + PORT_OFFSET;
        dd->port->dd    = dd;
 
-       /* Allocate memory for the command list. */
-       dd->port->command_list =
-               dmam_alloc_coherent(&dd->pdev->dev,
-                       HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 4),
-                       &dd->port->command_list_dma,
-                       GFP_KERNEL);
-       if (!dd->port->command_list) {
-               dev_err(&dd->pdev->dev,
-                       "Memory allocation: command list\n");
-               rv = -ENOMEM;
+       /* DMA allocations */
+       rv = mtip_dma_alloc(dd);
+       if (rv < 0)
                goto out1;
-       }
-
-       /* Clear the memory we have allocated. */
-       memset(dd->port->command_list,
-               0,
-               HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 4));
-
-       /* Setup the addresse of the RX FIS. */
-       dd->port->rxfis     = dd->port->command_list + HW_CMD_SLOT_SZ;
-       dd->port->rxfis_dma = dd->port->command_list_dma + HW_CMD_SLOT_SZ;
-
-       /* Setup the address of the command tables. */
-       dd->port->command_table   = dd->port->rxfis + AHCI_RX_FIS_SZ;
-       dd->port->command_tbl_dma = dd->port->rxfis_dma + AHCI_RX_FIS_SZ;
-
-       /* Setup the address of the identify data. */
-       dd->port->identify     = dd->port->command_table +
-                                       HW_CMD_TBL_AR_SZ;
-       dd->port->identify_dma = dd->port->command_tbl_dma +
-                                       HW_CMD_TBL_AR_SZ;
-
-       /* Setup the address of the sector buffer - for some non-ncq cmds */
-       dd->port->sector_buffer = (void *) dd->port->identify + ATA_SECT_SIZE;
-       dd->port->sector_buffer_dma = dd->port->identify_dma + ATA_SECT_SIZE;
-
-       /* Setup the address of the log buf - for read log command */
-       dd->port->log_buf = (void *)dd->port->sector_buffer  + ATA_SECT_SIZE;
-       dd->port->log_buf_dma = dd->port->sector_buffer_dma + ATA_SECT_SIZE;
-
-       /* Setup the address of the smart buf - for smart read data command */
-       dd->port->smart_buf = (void *)dd->port->log_buf  + ATA_SECT_SIZE;
-       dd->port->smart_buf_dma = dd->port->log_buf_dma + ATA_SECT_SIZE;
-
-
-       /* Point the command headers at the command tables. */
-       for (i = 0; i < num_command_slots; i++) {
-               dd->port->commands[i].command_header =
-                                       dd->port->command_list +
-                                       (sizeof(struct mtip_cmd_hdr) * i);
-               dd->port->commands[i].command_header_dma =
-                                       dd->port->command_list_dma +
-                                       (sizeof(struct mtip_cmd_hdr) * i);
-
-               dd->port->commands[i].command =
-                       dd->port->command_table + (HW_CMD_TBL_SZ * i);
-               dd->port->commands[i].command_dma =
-                       dd->port->command_tbl_dma + (HW_CMD_TBL_SZ * i);
-
-               if (readl(dd->mmio + HOST_CAP) & HOST_CAP_64)
-                       dd->port->commands[i].command_header->ctbau =
-                       __force_bit2int cpu_to_le32(
-                       (dd->port->commands[i].command_dma >> 16) >> 16);
-               dd->port->commands[i].command_header->ctba =
-                       __force_bit2int cpu_to_le32(
-                       dd->port->commands[i].command_dma & 0xFFFFFFFF);
-
-               /*
-                * If this is not done, a bug is reported by the stock
-                * FC11 i386. Due to the fact that it has lots of kernel
-                * debugging enabled.
-                */
-               sg_init_table(dd->port->commands[i].sg, MTIP_MAX_SG);
-
-               /* Mark all commands as currently inactive.*/
-               atomic_set(&dd->port->commands[i].active, 0);
-       }
 
        /* Setup the pointers to the extended s_active and CI registers. */
        for (i = 0; i < dd->slot_groups; i++) {
@@ -3594,12 +3665,8 @@ out3:
 
 out2:
        mtip_deinit_port(dd->port);
+       mtip_dma_free(dd);
 
-       /* Free the command/command header memory. */
-       dmam_free_coherent(&dd->pdev->dev,
-                               HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 4),
-                               dd->port->command_list,
-                               dd->port->command_list_dma);
 out1:
        /* Free the memory allocated for the for structure. */
        kfree(dd->port);
@@ -3622,7 +3689,8 @@ static int mtip_hw_exit(struct driver_data *dd)
         * saves its state.
         */
        if (!dd->sr) {
-               if (!test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag))
+               if (!test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags) &&
+                   !test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag))
                        if (mtip_standby_immediate(dd->port))
                                dev_warn(&dd->pdev->dev,
                                        "STANDBY IMMEDIATE failed\n");
@@ -3641,11 +3709,9 @@ static int mtip_hw_exit(struct driver_data *dd)
        irq_set_affinity_hint(dd->pdev->irq, NULL);
        devm_free_irq(&dd->pdev->dev, dd->pdev->irq, dd);
 
-       /* Free the command/command header memory. */
-       dmam_free_coherent(&dd->pdev->dev,
-                       HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 4),
-                       dd->port->command_list,
-                       dd->port->command_list_dma);
+       /* Free dma regions */
+       mtip_dma_free(dd);
+
        /* Free the memory allocated for the for structure. */
        kfree(dd->port);
        dd->port = NULL;
@@ -3962,8 +4028,9 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
 {
        struct driver_data *dd = queue->queuedata;
        struct scatterlist *sg;
-       struct bio_vec *bvec;
-       int i, nents = 0;
+       struct bio_vec bvec;
+       struct bvec_iter iter;
+       int nents = 0;
        int tag = 0, unaligned = 0;
 
        if (unlikely(dd->dd_flag & MTIP_DDF_STOP_IO)) {
@@ -3993,7 +4060,7 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
        }
 
        if (unlikely(bio->bi_rw & REQ_DISCARD)) {
-               bio_endio(bio, mtip_send_trim(dd, bio->bi_sector,
+               bio_endio(bio, mtip_send_trim(dd, bio->bi_iter.bi_sector,
                                                bio_sectors(bio)));
                return;
        }
@@ -4006,7 +4073,8 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
 
        if (bio_data_dir(bio) == WRITE && bio_sectors(bio) <= 64 &&
                                                        dd->unal_qdepth) {
-               if (bio->bi_sector % 8 != 0) /* Unaligned on 4k boundaries */
+               if (bio->bi_iter.bi_sector % 8 != 0)
+                       /* Unaligned on 4k boundaries */
                        unaligned = 1;
                else if (bio_sectors(bio) % 8 != 0) /* Aligned but not 4k/8k */
                        unaligned = 1;
@@ -4025,17 +4093,17 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
                }
 
                /* Create the scatter list for this bio. */
-               bio_for_each_segment(bvec, bio, i) {
+               bio_for_each_segment(bvec, bio, iter) {
                        sg_set_page(&sg[nents],
-                                       bvec->bv_page,
-                                       bvec->bv_len,
-                                       bvec->bv_offset);
+                                       bvec.bv_page,
+                                       bvec.bv_len,
+                                       bvec.bv_offset);
                        nents++;
                }
 
                /* Issue the read/write. */
                mtip_hw_submit_io(dd,
-                               bio->bi_sector,
+                               bio->bi_iter.bi_sector,
                                bio_sectors(bio),
                                nents,
                                tag,
index 9be7a1582ad3471a5400237b7db57b6a976cb2d9..b52e9a6d6aad6d602bf3a9d6f73c9b4c277c7e17 100644 (file)
@@ -69,7 +69,7 @@
  * Maximum number of scatter gather entries
  * a single command may have.
  */
-#define MTIP_MAX_SG            128
+#define MTIP_MAX_SG            504
 
 /*
  * Maximum number of slot groups (Command Issue & s_active registers)
@@ -92,7 +92,7 @@
 
 /* Driver name and version strings */
 #define MTIP_DRV_NAME          "mtip32xx"
-#define MTIP_DRV_VERSION       "1.2.6os3"
+#define MTIP_DRV_VERSION       "1.3.0"
 
 /* Maximum number of minor device numbers per device. */
 #define MTIP_MAX_MINORS                16
@@ -391,15 +391,13 @@ struct mtip_port {
         */
        dma_addr_t rxfis_dma;
        /*
-        * Pointer to the beginning of the command table memory as used
-        * by the driver.
+        * Pointer to the DMA region for RX Fis, Identify, RLE10, and SMART
         */
-       void *command_table;
+       void *block1;
        /*
-        * Pointer to the beginning of the command table memory as used
-        * by the DMA.
+        * DMA address of region for RX Fis, Identify, RLE10, and SMART
         */
-       dma_addr_t command_tbl_dma;
+       dma_addr_t block1_dma;
        /*
         * Pointer to the beginning of the identify data memory as used
         * by the driver.
index 2dc3b5153f0d82b42cfab720464e17bd963223f8..55298db36b2d61a113f25c22905fffb0f22ddd32 100644 (file)
@@ -271,18 +271,18 @@ static int nbd_send_req(struct nbd_device *nbd, struct request *req)
 
        if (nbd_cmd(req) == NBD_CMD_WRITE) {
                struct req_iterator iter;
-               struct bio_vec *bvec;
+               struct bio_vec bvec;
                /*
                 * we are really probing at internals to determine
                 * whether to set MSG_MORE or not...
                 */
                rq_for_each_segment(bvec, req, iter) {
                        flags = 0;
-                       if (!rq_iter_last(req, iter))
+                       if (!rq_iter_last(bvec, iter))
                                flags = MSG_MORE;
                        dprintk(DBG_TX, "%s: request %p: sending %d bytes data\n",
-                                       nbd->disk->disk_name, req, bvec->bv_len);
-                       result = sock_send_bvec(nbd, bvec, flags);
+                                       nbd->disk->disk_name, req, bvec.bv_len);
+                       result = sock_send_bvec(nbd, &bvec, flags);
                        if (result <= 0) {
                                dev_err(disk_to_dev(nbd->disk),
                                        "Send data failed (result %d)\n",
@@ -378,10 +378,10 @@ static struct request *nbd_read_stat(struct nbd_device *nbd)
                        nbd->disk->disk_name, req);
        if (nbd_cmd(req) == NBD_CMD_READ) {
                struct req_iterator iter;
-               struct bio_vec *bvec;
+               struct bio_vec bvec;
 
                rq_for_each_segment(bvec, req, iter) {
-                       result = sock_recv_bvec(nbd, bvec);
+                       result = sock_recv_bvec(nbd, &bvec);
                        if (result <= 0) {
                                dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
                                        result);
@@ -389,7 +389,7 @@ static struct request *nbd_read_stat(struct nbd_device *nbd)
                                return req;
                        }
                        dprintk(DBG_RX, "%s: request %p: got %d bytes data\n",
-                               nbd->disk->disk_name, req, bvec->bv_len);
+                               nbd->disk->disk_name, req, bvec.bv_len);
                }
        }
        return req;
index 83a598ebb65a4ab7699d1dcebe1b44b42ea8ae5a..3107282a9741f96665a2805b08217d3209c27bf7 100644 (file)
@@ -616,6 +616,11 @@ static int __init null_init(void)
                irqmode = NULL_IRQ_NONE;
        }
 #endif
+       if (bs > PAGE_SIZE) {
+               pr_warn("null_blk: invalid block size\n");
+               pr_warn("null_blk: defaults block size to %lu\n", PAGE_SIZE);
+               bs = PAGE_SIZE;
+       }
 
        if (queue_mode == NULL_Q_MQ && use_per_node_hctx) {
                if (submit_queues < nr_online_nodes) {
index 26d03fa0bf26696d9e004b3983a580d409d3d006..1f14ac4039450e84137b4eab0dacf043aa46d16e 100644 (file)
@@ -441,104 +441,19 @@ int nvme_setup_prps(struct nvme_dev *dev, struct nvme_common_command *cmd,
        return total_len;
 }
 
-struct nvme_bio_pair {
-       struct bio b1, b2, *parent;
-       struct bio_vec *bv1, *bv2;
-       int err;
-       atomic_t cnt;
-};
-
-static void nvme_bio_pair_endio(struct bio *bio, int err)
-{
-       struct nvme_bio_pair *bp = bio->bi_private;
-
-       if (err)
-               bp->err = err;
-
-       if (atomic_dec_and_test(&bp->cnt)) {
-               bio_endio(bp->parent, bp->err);
-               kfree(bp->bv1);
-               kfree(bp->bv2);
-               kfree(bp);
-       }
-}
-
-static struct nvme_bio_pair *nvme_bio_split(struct bio *bio, int idx,
-                                                       int len, int offset)
-{
-       struct nvme_bio_pair *bp;
-
-       BUG_ON(len > bio->bi_size);
-       BUG_ON(idx > bio->bi_vcnt);
-
-       bp = kmalloc(sizeof(*bp), GFP_ATOMIC);
-       if (!bp)
-               return NULL;
-       bp->err = 0;
-
-       bp->b1 = *bio;
-       bp->b2 = *bio;
-
-       bp->b1.bi_size = len;
-       bp->b2.bi_size -= len;
-       bp->b1.bi_vcnt = idx;
-       bp->b2.bi_idx = idx;
-       bp->b2.bi_sector += len >> 9;
-
-       if (offset) {
-               bp->bv1 = kmalloc(bio->bi_max_vecs * sizeof(struct bio_vec),
-                                                               GFP_ATOMIC);
-               if (!bp->bv1)
-                       goto split_fail_1;
-
-               bp->bv2 = kmalloc(bio->bi_max_vecs * sizeof(struct bio_vec),
-                                                               GFP_ATOMIC);
-               if (!bp->bv2)
-                       goto split_fail_2;
-
-               memcpy(bp->bv1, bio->bi_io_vec,
-                       bio->bi_max_vecs * sizeof(struct bio_vec));
-               memcpy(bp->bv2, bio->bi_io_vec,
-                       bio->bi_max_vecs * sizeof(struct bio_vec));
-
-               bp->b1.bi_io_vec = bp->bv1;
-               bp->b2.bi_io_vec = bp->bv2;
-               bp->b2.bi_io_vec[idx].bv_offset += offset;
-               bp->b2.bi_io_vec[idx].bv_len -= offset;
-               bp->b1.bi_io_vec[idx].bv_len = offset;
-               bp->b1.bi_vcnt++;
-       } else
-               bp->bv1 = bp->bv2 = NULL;
-
-       bp->b1.bi_private = bp;
-       bp->b2.bi_private = bp;
-
-       bp->b1.bi_end_io = nvme_bio_pair_endio;
-       bp->b2.bi_end_io = nvme_bio_pair_endio;
-
-       bp->parent = bio;
-       atomic_set(&bp->cnt, 2);
-
-       return bp;
-
- split_fail_2:
-       kfree(bp->bv1);
- split_fail_1:
-       kfree(bp);
-       return NULL;
-}
-
 static int nvme_split_and_submit(struct bio *bio, struct nvme_queue *nvmeq,
-                                               int idx, int len, int offset)
+                                int len)
 {
-       struct nvme_bio_pair *bp = nvme_bio_split(bio, idx, len, offset);
-       if (!bp)
+       struct bio *split = bio_split(bio, len >> 9, GFP_ATOMIC, NULL);
+       if (!split)
                return -ENOMEM;
 
+       bio_chain(split, bio);
+
        if (bio_list_empty(&nvmeq->sq_cong))
                add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
-       bio_list_add(&nvmeq->sq_cong, &bp->b1);
-       bio_list_add(&nvmeq->sq_cong, &bp->b2);
+       bio_list_add(&nvmeq->sq_cong, split);
+       bio_list_add(&nvmeq->sq_cong, bio);
 
        return 0;
 }
@@ -550,41 +465,44 @@ static int nvme_split_and_submit(struct bio *bio, struct nvme_queue *nvmeq,
 static int nvme_map_bio(struct nvme_queue *nvmeq, struct nvme_iod *iod,
                struct bio *bio, enum dma_data_direction dma_dir, int psegs)
 {
-       struct bio_vec *bvec, *bvprv = NULL;
+       struct bio_vec bvec, bvprv;
+       struct bvec_iter iter;
        struct scatterlist *sg = NULL;
-       int i, length = 0, nsegs = 0, split_len = bio->bi_size;
+       int length = 0, nsegs = 0, split_len = bio->bi_iter.bi_size;
+       int first = 1;
 
        if (nvmeq->dev->stripe_size)
                split_len = nvmeq->dev->stripe_size -
-                       ((bio->bi_sector << 9) & (nvmeq->dev->stripe_size - 1));
+                       ((bio->bi_iter.bi_sector << 9) &
+                        (nvmeq->dev->stripe_size - 1));
 
        sg_init_table(iod->sg, psegs);
-       bio_for_each_segment(bvec, bio, i) {
-               if (bvprv && BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) {
-                       sg->length += bvec->bv_len;
+       bio_for_each_segment(bvec, bio, iter) {
+               if (!first && BIOVEC_PHYS_MERGEABLE(&bvprv, &bvec)) {
+                       sg->length += bvec.bv_len;
                } else {
-                       if (bvprv && BIOVEC_NOT_VIRT_MERGEABLE(bvprv, bvec))
-                               return nvme_split_and_submit(bio, nvmeq, i,
-                                                               length, 0);
+                       if (!first && BIOVEC_NOT_VIRT_MERGEABLE(&bvprv, &bvec))
+                               return nvme_split_and_submit(bio, nvmeq,
+                                                            length);
 
                        sg = sg ? sg + 1 : iod->sg;
-                       sg_set_page(sg, bvec->bv_page, bvec->bv_len,
-                                                       bvec->bv_offset);
+                       sg_set_page(sg, bvec.bv_page,
+                                   bvec.bv_len, bvec.bv_offset);
                        nsegs++;
                }
 
-               if (split_len - length < bvec->bv_len)
-                       return nvme_split_and_submit(bio, nvmeq, i, split_len,
-                                                       split_len - length);
-               length += bvec->bv_len;
+               if (split_len - length < bvec.bv_len)
+                       return nvme_split_and_submit(bio, nvmeq, split_len);
+               length += bvec.bv_len;
                bvprv = bvec;
+               first = 0;
        }
        iod->nents = nsegs;
        sg_mark_end(sg);
        if (dma_map_sg(nvmeq->q_dmadev, iod->sg, iod->nents, dma_dir) == 0)
                return -ENOMEM;
 
-       BUG_ON(length != bio->bi_size);
+       BUG_ON(length != bio->bi_iter.bi_size);
        return length;
 }
 
@@ -608,8 +526,8 @@ static int nvme_submit_discard(struct nvme_queue *nvmeq, struct nvme_ns *ns,
        iod->npages = 0;
 
        range->cattr = cpu_to_le32(0);
-       range->nlb = cpu_to_le32(bio->bi_size >> ns->lba_shift);
-       range->slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_sector));
+       range->nlb = cpu_to_le32(bio->bi_iter.bi_size >> ns->lba_shift);
+       range->slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_iter.bi_sector));
 
        memset(cmnd, 0, sizeof(*cmnd));
        cmnd->dsm.opcode = nvme_cmd_dsm;
@@ -674,7 +592,7 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
        }
 
        result = -ENOMEM;
-       iod = nvme_alloc_iod(psegs, bio->bi_size, GFP_ATOMIC);
+       iod = nvme_alloc_iod(psegs, bio->bi_iter.bi_size, GFP_ATOMIC);
        if (!iod)
                goto nomem;
        iod->private = bio;
@@ -723,7 +641,7 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
        cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
        length = nvme_setup_prps(nvmeq->dev, &cmnd->common, iod, length,
                                                                GFP_ATOMIC);
-       cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_sector));
+       cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_iter.bi_sector));
        cmnd->rw.length = cpu_to_le16((length >> ns->lba_shift) - 1);
        cmnd->rw.control = cpu_to_le16(control);
        cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
index 4a27b1de5fcb9fb0fef805fb51d0d2f1bc418a29..2ce3dfd7e6b9bafd65c670aa45bf3a8660578360 100644 (file)
@@ -581,7 +581,7 @@ static ssize_t pg_write(struct file *filp, const char __user *buf, size_t count,
 
        if (hdr.magic != PG_MAGIC)
                return -EINVAL;
-       if (hdr.dlen > PG_MAX_DATA)
+       if (hdr.dlen < 0 || hdr.dlen > PG_MAX_DATA)
                return -EINVAL;
        if ((count - hs) > PG_MAX_DATA)
                return -EINVAL;
index ff8668c5efb10eebc1a02736d306cce1f43ad7ff..a2af73db187b694c3112bf1c6d08e4070596cd10 100644 (file)
@@ -651,7 +651,7 @@ static struct pkt_rb_node *pkt_rbtree_find(struct pktcdvd_device *pd, sector_t s
 
        for (;;) {
                tmp = rb_entry(n, struct pkt_rb_node, rb_node);
-               if (s <= tmp->bio->bi_sector)
+               if (s <= tmp->bio->bi_iter.bi_sector)
                        next = n->rb_left;
                else
                        next = n->rb_right;
@@ -660,12 +660,12 @@ static struct pkt_rb_node *pkt_rbtree_find(struct pktcdvd_device *pd, sector_t s
                n = next;
        }
 
-       if (s > tmp->bio->bi_sector) {
+       if (s > tmp->bio->bi_iter.bi_sector) {
                tmp = pkt_rbtree_next(tmp);
                if (!tmp)
                        return NULL;
        }
-       BUG_ON(s > tmp->bio->bi_sector);
+       BUG_ON(s > tmp->bio->bi_iter.bi_sector);
        return tmp;
 }
 
@@ -676,13 +676,13 @@ static void pkt_rbtree_insert(struct pktcdvd_device *pd, struct pkt_rb_node *nod
 {
        struct rb_node **p = &pd->bio_queue.rb_node;
        struct rb_node *parent = NULL;
-       sector_t s = node->bio->bi_sector;
+       sector_t s = node->bio->bi_iter.bi_sector;
        struct pkt_rb_node *tmp;
 
        while (*p) {
                parent = *p;
                tmp = rb_entry(parent, struct pkt_rb_node, rb_node);
-               if (s < tmp->bio->bi_sector)
+               if (s < tmp->bio->bi_iter.bi_sector)
                        p = &(*p)->rb_left;
                else
                        p = &(*p)->rb_right;
@@ -706,7 +706,9 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *
                             WRITE : READ, __GFP_WAIT);
 
        if (cgc->buflen) {
-               if (blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen, __GFP_WAIT))
+               ret = blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen,
+                                     __GFP_WAIT);
+               if (ret)
                        goto out;
        }
 
@@ -857,7 +859,8 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
                        spin_lock(&pd->iosched.lock);
                        bio = bio_list_peek(&pd->iosched.write_queue);
                        spin_unlock(&pd->iosched.lock);
-                       if (bio && (bio->bi_sector == pd->iosched.last_write))
+                       if (bio && (bio->bi_iter.bi_sector ==
+                                   pd->iosched.last_write))
                                need_write_seek = 0;
                        if (need_write_seek && reads_queued) {
                                if (atomic_read(&pd->cdrw.pending_bios) > 0) {
@@ -888,7 +891,8 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
                        continue;
 
                if (bio_data_dir(bio) == READ)
-                       pd->iosched.successive_reads += bio->bi_size >> 10;
+                       pd->iosched.successive_reads +=
+                               bio->bi_iter.bi_size >> 10;
                else {
                        pd->iosched.successive_reads = 0;
                        pd->iosched.last_write = bio_end_sector(bio);
@@ -978,7 +982,7 @@ static void pkt_end_io_read(struct bio *bio, int err)
 
        pkt_dbg(2, pd, "bio=%p sec0=%llx sec=%llx err=%d\n",
                bio, (unsigned long long)pkt->sector,
-               (unsigned long long)bio->bi_sector, err);
+               (unsigned long long)bio->bi_iter.bi_sector, err);
 
        if (err)
                atomic_inc(&pkt->io_errors);
@@ -1026,8 +1030,9 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
        memset(written, 0, sizeof(written));
        spin_lock(&pkt->lock);
        bio_list_for_each(bio, &pkt->orig_bios) {
-               int first_frame = (bio->bi_sector - pkt->sector) / (CD_FRAMESIZE >> 9);
-               int num_frames = bio->bi_size / CD_FRAMESIZE;
+               int first_frame = (bio->bi_iter.bi_sector - pkt->sector) /
+                       (CD_FRAMESIZE >> 9);
+               int num_frames = bio->bi_iter.bi_size / CD_FRAMESIZE;
                pd->stats.secs_w += num_frames * (CD_FRAMESIZE >> 9);
                BUG_ON(first_frame < 0);
                BUG_ON(first_frame + num_frames > pkt->frames);
@@ -1053,7 +1058,7 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
 
                bio = pkt->r_bios[f];
                bio_reset(bio);
-               bio->bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
+               bio->bi_iter.bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
                bio->bi_bdev = pd->bdev;
                bio->bi_end_io = pkt_end_io_read;
                bio->bi_private = pkt;
@@ -1150,8 +1155,8 @@ static int pkt_start_recovery(struct packet_data *pkt)
        bio_reset(pkt->bio);
        pkt->bio->bi_bdev = pd->bdev;
        pkt->bio->bi_rw = REQ_WRITE;
-       pkt->bio->bi_sector = new_sector;
-       pkt->bio->bi_size = pkt->frames * CD_FRAMESIZE;
+       pkt->bio->bi_iter.bi_sector = new_sector;
+       pkt->bio->bi_iter.bi_size = pkt->frames * CD_FRAMESIZE;
        pkt->bio->bi_vcnt = pkt->frames;
 
        pkt->bio->bi_end_io = pkt_end_io_packet_write;
@@ -1213,7 +1218,7 @@ static int pkt_handle_queue(struct pktcdvd_device *pd)
        node = first_node;
        while (node) {
                bio = node->bio;
-               zone = get_zone(bio->bi_sector, pd);
+               zone = get_zone(bio->bi_iter.bi_sector, pd);
                list_for_each_entry(p, &pd->cdrw.pkt_active_list, list) {
                        if (p->sector == zone) {
                                bio = NULL;
@@ -1252,14 +1257,14 @@ try_next_bio:
        pkt_dbg(2, pd, "looking for zone %llx\n", (unsigned long long)zone);
        while ((node = pkt_rbtree_find(pd, zone)) != NULL) {
                bio = node->bio;
-               pkt_dbg(2, pd, "found zone=%llx\n",
-                       (unsigned long long)get_zone(bio->bi_sector, pd));
-               if (get_zone(bio->bi_sector, pd) != zone)
+               pkt_dbg(2, pd, "found zone=%llx\n", (unsigned long long)
+                       get_zone(bio->bi_iter.bi_sector, pd));
+               if (get_zone(bio->bi_iter.bi_sector, pd) != zone)
                        break;
                pkt_rbtree_erase(pd, node);
                spin_lock(&pkt->lock);
                bio_list_add(&pkt->orig_bios, bio);
-               pkt->write_size += bio->bi_size / CD_FRAMESIZE;
+               pkt->write_size += bio->bi_iter.bi_size / CD_FRAMESIZE;
                spin_unlock(&pkt->lock);
        }
        /* check write congestion marks, and if bio_queue_size is
@@ -1293,7 +1298,7 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
        struct bio_vec *bvec = pkt->w_bio->bi_io_vec;
 
        bio_reset(pkt->w_bio);
-       pkt->w_bio->bi_sector = pkt->sector;
+       pkt->w_bio->bi_iter.bi_sector = pkt->sector;
        pkt->w_bio->bi_bdev = pd->bdev;
        pkt->w_bio->bi_end_io = pkt_end_io_packet_write;
        pkt->w_bio->bi_private = pkt;
@@ -2335,75 +2340,29 @@ static void pkt_end_io_read_cloned(struct bio *bio, int err)
        pkt_bio_finished(pd);
 }
 
-static void pkt_make_request(struct request_queue *q, struct bio *bio)
+static void pkt_make_request_read(struct pktcdvd_device *pd, struct bio *bio)
 {
-       struct pktcdvd_device *pd;
-       char b[BDEVNAME_SIZE];
+       struct bio *cloned_bio = bio_clone(bio, GFP_NOIO);
+       struct packet_stacked_data *psd = mempool_alloc(psd_pool, GFP_NOIO);
+
+       psd->pd = pd;
+       psd->bio = bio;
+       cloned_bio->bi_bdev = pd->bdev;
+       cloned_bio->bi_private = psd;
+       cloned_bio->bi_end_io = pkt_end_io_read_cloned;
+       pd->stats.secs_r += bio_sectors(bio);
+       pkt_queue_bio(pd, cloned_bio);
+}
+
+static void pkt_make_request_write(struct request_queue *q, struct bio *bio)
+{
+       struct pktcdvd_device *pd = q->queuedata;
        sector_t zone;
        struct packet_data *pkt;
        int was_empty, blocked_bio;
        struct pkt_rb_node *node;
 
-       pd = q->queuedata;
-       if (!pd) {
-               pr_err("%s incorrect request queue\n",
-                      bdevname(bio->bi_bdev, b));
-               goto end_io;
-       }
-
-       /*
-        * Clone READ bios so we can have our own bi_end_io callback.
-        */
-       if (bio_data_dir(bio) == READ) {
-               struct bio *cloned_bio = bio_clone(bio, GFP_NOIO);
-               struct packet_stacked_data *psd = mempool_alloc(psd_pool, GFP_NOIO);
-
-               psd->pd = pd;
-               psd->bio = bio;
-               cloned_bio->bi_bdev = pd->bdev;
-               cloned_bio->bi_private = psd;
-               cloned_bio->bi_end_io = pkt_end_io_read_cloned;
-               pd->stats.secs_r += bio_sectors(bio);
-               pkt_queue_bio(pd, cloned_bio);
-               return;
-       }
-
-       if (!test_bit(PACKET_WRITABLE, &pd->flags)) {
-               pkt_notice(pd, "WRITE for ro device (%llu)\n",
-                          (unsigned long long)bio->bi_sector);
-               goto end_io;
-       }
-
-       if (!bio->bi_size || (bio->bi_size % CD_FRAMESIZE)) {
-               pkt_err(pd, "wrong bio size\n");
-               goto end_io;
-       }
-
-       blk_queue_bounce(q, &bio);
-
-       zone = get_zone(bio->bi_sector, pd);
-       pkt_dbg(2, pd, "start = %6llx stop = %6llx\n",
-               (unsigned long long)bio->bi_sector,
-               (unsigned long long)bio_end_sector(bio));
-
-       /* Check if we have to split the bio */
-       {
-               struct bio_pair *bp;
-               sector_t last_zone;
-               int first_sectors;
-
-               last_zone = get_zone(bio_end_sector(bio) - 1, pd);
-               if (last_zone != zone) {
-                       BUG_ON(last_zone != zone + pd->settings.size);
-                       first_sectors = last_zone - bio->bi_sector;
-                       bp = bio_split(bio, first_sectors);
-                       BUG_ON(!bp);
-                       pkt_make_request(q, &bp->bio1);
-                       pkt_make_request(q, &bp->bio2);
-                       bio_pair_release(bp);
-                       return;
-               }
-       }
+       zone = get_zone(bio->bi_iter.bi_sector, pd);
 
        /*
         * If we find a matching packet in state WAITING or READ_WAIT, we can
@@ -2417,7 +2376,8 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio)
                        if ((pkt->state == PACKET_WAITING_STATE) ||
                            (pkt->state == PACKET_READ_WAIT_STATE)) {
                                bio_list_add(&pkt->orig_bios, bio);
-                               pkt->write_size += bio->bi_size / CD_FRAMESIZE;
+                               pkt->write_size +=
+                                       bio->bi_iter.bi_size / CD_FRAMESIZE;
                                if ((pkt->write_size >= pkt->frames) &&
                                    (pkt->state == PACKET_WAITING_STATE)) {
                                        atomic_inc(&pkt->run_sm);
@@ -2476,6 +2436,64 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio)
                 */
                wake_up(&pd->wqueue);
        }
+}
+
+static void pkt_make_request(struct request_queue *q, struct bio *bio)
+{
+       struct pktcdvd_device *pd;
+       char b[BDEVNAME_SIZE];
+       struct bio *split;
+
+       pd = q->queuedata;
+       if (!pd) {
+               pr_err("%s incorrect request queue\n",
+                      bdevname(bio->bi_bdev, b));
+               goto end_io;
+       }
+
+       pkt_dbg(2, pd, "start = %6llx stop = %6llx\n",
+               (unsigned long long)bio->bi_iter.bi_sector,
+               (unsigned long long)bio_end_sector(bio));
+
+       /*
+        * Clone READ bios so we can have our own bi_end_io callback.
+        */
+       if (bio_data_dir(bio) == READ) {
+               pkt_make_request_read(pd, bio);
+               return;
+       }
+
+       if (!test_bit(PACKET_WRITABLE, &pd->flags)) {
+               pkt_notice(pd, "WRITE for ro device (%llu)\n",
+                          (unsigned long long)bio->bi_iter.bi_sector);
+               goto end_io;
+       }
+
+       if (!bio->bi_iter.bi_size || (bio->bi_iter.bi_size % CD_FRAMESIZE)) {
+               pkt_err(pd, "wrong bio size\n");
+               goto end_io;
+       }
+
+       blk_queue_bounce(q, &bio);
+
+       do {
+               sector_t zone = get_zone(bio->bi_iter.bi_sector, pd);
+               sector_t last_zone = get_zone(bio_end_sector(bio) - 1, pd);
+
+               if (last_zone != zone) {
+                       BUG_ON(last_zone != zone + pd->settings.size);
+
+                       split = bio_split(bio, last_zone -
+                                         bio->bi_iter.bi_sector,
+                                         GFP_NOIO, fs_bio_set);
+                       bio_chain(split, bio);
+               } else {
+                       split = bio;
+               }
+
+               pkt_make_request_write(q, split);
+       } while (split != bio);
+
        return;
 end_io:
        bio_io_error(bio);
index d754a88d75858ef46f8553ac54b0163ff7b60d8a..c120d70d3fb3b31fdfb584859a8cdd5bf0849dab 100644 (file)
@@ -94,26 +94,25 @@ static void ps3disk_scatter_gather(struct ps3_storage_device *dev,
 {
        unsigned int offset = 0;
        struct req_iterator iter;
-       struct bio_vec *bvec;
+       struct bio_vec bvec;
        unsigned int i = 0;
        size_t size;
        void *buf;
 
        rq_for_each_segment(bvec, req, iter) {
                unsigned long flags;
-               dev_dbg(&dev->sbd.core,
-                       "%s:%u: bio %u: %u segs %u sectors from %lu\n",
-                       __func__, __LINE__, i, bio_segments(iter.bio),
-                       bio_sectors(iter.bio), iter.bio->bi_sector);
+               dev_dbg(&dev->sbd.core, "%s:%u: bio %u: %u sectors from %lu\n",
+                       __func__, __LINE__, i, bio_sectors(iter.bio),
+                       iter.bio->bi_iter.bi_sector);
 
-               size = bvec->bv_len;
-               buf = bvec_kmap_irq(bvec, &flags);
+               size = bvec.bv_len;
+               buf = bvec_kmap_irq(&bvec, &flags);
                if (gather)
                        memcpy(dev->bounce_buf+offset, buf, size);
                else
                        memcpy(buf, dev->bounce_buf+offset, size);
                offset += size;
-               flush_kernel_dcache_page(bvec->bv_page);
+               flush_kernel_dcache_page(bvec.bv_page);
                bvec_kunmap_irq(buf, &flags);
                i++;
        }
@@ -130,7 +129,7 @@ static int ps3disk_submit_request_sg(struct ps3_storage_device *dev,
 
 #ifdef DEBUG
        unsigned int n = 0;
-       struct bio_vec *bv;
+       struct bio_vec bv;
        struct req_iterator iter;
 
        rq_for_each_segment(bv, req, iter)
index 06a2e53e5f37299191b1d9c7fe5e2caf036461a6..ef45cfb98fd2f12278d9f9caa6d044c1336e7baa 100644 (file)
@@ -553,16 +553,16 @@ static struct bio *ps3vram_do_bio(struct ps3_system_bus_device *dev,
        struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
        int write = bio_data_dir(bio) == WRITE;
        const char *op = write ? "write" : "read";
-       loff_t offset = bio->bi_sector << 9;
+       loff_t offset = bio->bi_iter.bi_sector << 9;
        int error = 0;
-       struct bio_vec *bvec;
-       unsigned int i;
+       struct bio_vec bvec;
+       struct bvec_iter iter;
        struct bio *next;
 
-       bio_for_each_segment(bvec, bio, i) {
+       bio_for_each_segment(bvec, bio, iter) {
                /* PS3 is ppc64, so we don't handle highmem */
-               char *ptr = page_address(bvec->bv_page) + bvec->bv_offset;
-               size_t len = bvec->bv_len, retlen;
+               char *ptr = page_address(bvec.bv_page) + bvec.bv_offset;
+               size_t len = bvec.bv_len, retlen;
 
                dev_dbg(&dev->core, "    %s %zu bytes at offset %llu\n", op,
                        len, offset);
index 16cab6635163797da9414a27cb8634356d5cd999..b365e0dfccb66f7c256a9d07d7fd976fba17ae95 100644 (file)
@@ -1156,23 +1156,23 @@ static void bio_chain_put(struct bio *chain)
  */
 static void zero_bio_chain(struct bio *chain, int start_ofs)
 {
-       struct bio_vec *bv;
+       struct bio_vec bv;
+       struct bvec_iter iter;
        unsigned long flags;
        void *buf;
-       int i;
        int pos = 0;
 
        while (chain) {
-               bio_for_each_segment(bv, chain, i) {
-                       if (pos + bv->bv_len > start_ofs) {
+               bio_for_each_segment(bv, chain, iter) {
+                       if (pos + bv.bv_len > start_ofs) {
                                int remainder = max(start_ofs - pos, 0);
-                               buf = bvec_kmap_irq(bv, &flags);
+                               buf = bvec_kmap_irq(&bv, &flags);
                                memset(buf + remainder, 0,
-                                      bv->bv_len - remainder);
-                               flush_dcache_page(bv->bv_page);
+                                      bv.bv_len - remainder);
+                               flush_dcache_page(bv.bv_page);
                                bvec_kunmap_irq(buf, &flags);
                        }
-                       pos += bv->bv_len;
+                       pos += bv.bv_len;
                }
 
                chain = chain->bi_next;
@@ -1220,74 +1220,14 @@ static struct bio *bio_clone_range(struct bio *bio_src,
                                        unsigned int len,
                                        gfp_t gfpmask)
 {
-       struct bio_vec *bv;
-       unsigned int resid;
-       unsigned short idx;
-       unsigned int voff;
-       unsigned short end_idx;
-       unsigned short vcnt;
        struct bio *bio;
 
-       /* Handle the easy case for the caller */
-
-       if (!offset && len == bio_src->bi_size)
-               return bio_clone(bio_src, gfpmask);
-
-       if (WARN_ON_ONCE(!len))
-               return NULL;
-       if (WARN_ON_ONCE(len > bio_src->bi_size))
-               return NULL;
-       if (WARN_ON_ONCE(offset > bio_src->bi_size - len))
-               return NULL;
-
-       /* Find first affected segment... */
-
-       resid = offset;
-       bio_for_each_segment(bv, bio_src, idx) {
-               if (resid < bv->bv_len)
-                       break;
-               resid -= bv->bv_len;
-       }
-       voff = resid;
-
-       /* ...and the last affected segment */
-
-       resid += len;
-       __bio_for_each_segment(bv, bio_src, end_idx, idx) {
-               if (resid <= bv->bv_len)
-                       break;
-               resid -= bv->bv_len;
-       }
-       vcnt = end_idx - idx + 1;
-
-       /* Build the clone */
-
-       bio = bio_alloc(gfpmask, (unsigned int) vcnt);
+       bio = bio_clone(bio_src, gfpmask);
        if (!bio)
                return NULL;    /* ENOMEM */
 
-       bio->bi_bdev = bio_src->bi_bdev;
-       bio->bi_sector = bio_src->bi_sector + (offset >> SECTOR_SHIFT);
-       bio->bi_rw = bio_src->bi_rw;
-       bio->bi_flags |= 1 << BIO_CLONED;
-
-       /*
-        * Copy over our part of the bio_vec, then update the first
-        * and last (or only) entries.
-        */
-       memcpy(&bio->bi_io_vec[0], &bio_src->bi_io_vec[idx],
-                       vcnt * sizeof (struct bio_vec));
-       bio->bi_io_vec[0].bv_offset += voff;
-       if (vcnt > 1) {
-               bio->bi_io_vec[0].bv_len -= voff;
-               bio->bi_io_vec[vcnt - 1].bv_len = resid;
-       } else {
-               bio->bi_io_vec[0].bv_len = len;
-       }
-
-       bio->bi_vcnt = vcnt;
-       bio->bi_size = len;
-       bio->bi_idx = 0;
+       bio_advance(bio, offset);
+       bio->bi_iter.bi_size = len;
 
        return bio;
 }
@@ -1318,7 +1258,7 @@ static struct bio *bio_chain_clone_range(struct bio **bio_src,
 
        /* Build up a chain of clone bios up to the limit */
 
-       if (!bi || off >= bi->bi_size || !len)
+       if (!bi || off >= bi->bi_iter.bi_size || !len)
                return NULL;            /* Nothing to clone */
 
        end = &chain;
@@ -1330,7 +1270,7 @@ static struct bio *bio_chain_clone_range(struct bio **bio_src,
                        rbd_warn(NULL, "bio_chain exhausted with %u left", len);
                        goto out_err;   /* EINVAL; ran out of bio's */
                }
-               bi_size = min_t(unsigned int, bi->bi_size - off, len);
+               bi_size = min_t(unsigned int, bi->bi_iter.bi_size - off, len);
                bio = bio_clone_range(bi, off, bi_size, gfpmask);
                if (!bio)
                        goto out_err;   /* ENOMEM */
@@ -1339,7 +1279,7 @@ static struct bio *bio_chain_clone_range(struct bio **bio_src,
                end = &bio->bi_next;
 
                off += bi_size;
-               if (off == bi->bi_size) {
+               if (off == bi->bi_iter.bi_size) {
                        bi = bi->bi_next;
                        off = 0;
                }
@@ -2227,7 +2167,8 @@ static int rbd_img_request_fill(struct rbd_img_request *img_request,
 
        if (type == OBJ_REQUEST_BIO) {
                bio_list = data_desc;
-               rbd_assert(img_offset == bio_list->bi_sector << SECTOR_SHIFT);
+               rbd_assert(img_offset ==
+                          bio_list->bi_iter.bi_sector << SECTOR_SHIFT);
        } else {
                rbd_assert(type == OBJ_REQUEST_PAGES);
                pages = data_desc;
index 2284f5d3a54ad00dd05c512b30b7bfed30411482..2839d37e5af77922051cb1d48561602ea3a3c2b7 100644 (file)
@@ -174,7 +174,7 @@ static void rsxx_make_request(struct request_queue *q, struct bio *bio)
        if (!card)
                goto req_err;
 
-       if (bio->bi_sector + (bio->bi_size >> 9) > get_capacity(card->gendisk))
+       if (bio_end_sector(bio) > get_capacity(card->gendisk))
                goto req_err;
 
        if (unlikely(card->halt)) {
@@ -187,7 +187,7 @@ static void rsxx_make_request(struct request_queue *q, struct bio *bio)
                goto req_err;
        }
 
-       if (bio->bi_size == 0) {
+       if (bio->bi_iter.bi_size == 0) {
                dev_err(CARD_TO_DEV(card), "size zero BIO!\n");
                goto req_err;
        }
@@ -208,7 +208,7 @@ static void rsxx_make_request(struct request_queue *q, struct bio *bio)
 
        dev_dbg(CARD_TO_DEV(card), "BIO[%c]: meta: %p addr8: x%llx size: %d\n",
                 bio_data_dir(bio) ? 'W' : 'R', bio_meta,
-                (u64)bio->bi_sector << 9, bio->bi_size);
+                (u64)bio->bi_iter.bi_sector << 9, bio->bi_iter.bi_size);
 
        st = rsxx_dma_queue_bio(card, bio, &bio_meta->pending_dmas,
                                    bio_dma_done_cb, bio_meta);
index fc88ba3e1bd27835ecf170d5ba321cf8313a6cea..cf8cd293abb51d338cd6d0ae7762070c80be99a2 100644 (file)
@@ -684,7 +684,8 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
                           void *cb_data)
 {
        struct list_head dma_list[RSXX_MAX_TARGETS];
-       struct bio_vec *bvec;
+       struct bio_vec bvec;
+       struct bvec_iter iter;
        unsigned long long addr8;
        unsigned int laddr;
        unsigned int bv_len;
@@ -696,7 +697,7 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
        int st;
        int i;
 
-       addr8 = bio->bi_sector << 9; /* sectors are 512 bytes */
+       addr8 = bio->bi_iter.bi_sector << 9; /* sectors are 512 bytes */
        atomic_set(n_dmas, 0);
 
        for (i = 0; i < card->n_targets; i++) {
@@ -705,7 +706,7 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
        }
 
        if (bio->bi_rw & REQ_DISCARD) {
-               bv_len = bio->bi_size;
+               bv_len = bio->bi_iter.bi_size;
 
                while (bv_len > 0) {
                        tgt   = rsxx_get_dma_tgt(card, addr8);
@@ -722,9 +723,9 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
                        bv_len -= RSXX_HW_BLK_SIZE;
                }
        } else {
-               bio_for_each_segment(bvec, bio, i) {
-                       bv_len = bvec->bv_len;
-                       bv_off = bvec->bv_offset;
+               bio_for_each_segment(bvec, bio, iter) {
+                       bv_len = bvec.bv_len;
+                       bv_off = bvec.bv_offset;
 
                        while (bv_len > 0) {
                                tgt   = rsxx_get_dma_tgt(card, addr8);
@@ -736,7 +737,7 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
                                st = rsxx_queue_dma(card, &dma_list[tgt],
                                                        bio_data_dir(bio),
                                                        dma_off, dma_len,
-                                                       laddr, bvec->bv_page,
+                                                       laddr, bvec.bv_page,
                                                        bv_off, cb, cb_data);
                                if (st)
                                        goto bvec_err;
index 3fb6ab4c8b4e9e96f9ba487cf801fdcd776f0c73..d5e2d12b9d9e329d77fb21560b8f21e5e98a11f4 100644 (file)
@@ -1744,20 +1744,6 @@ static void carm_remove_one (struct pci_dev *pdev)
        kfree(host);
        pci_release_regions(pdev);
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
 }
 
-static int __init carm_init(void)
-{
-       return pci_register_driver(&carm_driver);
-}
-
-static void __exit carm_exit(void)
-{
-       pci_unregister_driver(&carm_driver);
-}
-
-module_init(carm_init);
-module_exit(carm_exit);
-
-
+module_pci_driver(carm_driver);
index ad70868f8a967b40bc866bc5430387b55f4601ac..4cf81b5bf0f7fba42a243a7e717ead98b2144759 100644 (file)
@@ -108,8 +108,7 @@ struct cardinfo {
                                    * have been written
                                    */
        struct bio      *bio, *currentbio, **biotail;
-       int             current_idx;
-       sector_t        current_sector;
+       struct bvec_iter current_iter;
 
        struct request_queue *queue;
 
@@ -118,7 +117,7 @@ struct cardinfo {
                struct mm_dma_desc      *desc;
                int                     cnt, headcnt;
                struct bio              *bio, **biotail;
-               int                     idx;
+               struct bvec_iter        iter;
        } mm_pages[2];
 #define DESC_PER_PAGE ((PAGE_SIZE*2)/sizeof(struct mm_dma_desc))
 
@@ -344,16 +343,13 @@ static int add_bio(struct cardinfo *card)
        dma_addr_t dma_handle;
        int offset;
        struct bio *bio;
-       struct bio_vec *vec;
-       int idx;
+       struct bio_vec vec;
        int rw;
-       int len;
 
        bio = card->currentbio;
        if (!bio && card->bio) {
                card->currentbio = card->bio;
-               card->current_idx = card->bio->bi_idx;
-               card->current_sector = card->bio->bi_sector;
+               card->current_iter = card->bio->bi_iter;
                card->bio = card->bio->bi_next;
                if (card->bio == NULL)
                        card->biotail = &card->bio;
@@ -362,18 +358,17 @@ static int add_bio(struct cardinfo *card)
        }
        if (!bio)
                return 0;
-       idx = card->current_idx;
 
        rw = bio_rw(bio);
        if (card->mm_pages[card->Ready].cnt >= DESC_PER_PAGE)
                return 0;
 
-       vec = bio_iovec_idx(bio, idx);
-       len = vec->bv_len;
+       vec = bio_iter_iovec(bio, card->current_iter);
+
        dma_handle = pci_map_page(card->dev,
-                                 vec->bv_page,
-                                 vec->bv_offset,
-                                 len,
+                                 vec.bv_page,
+                                 vec.bv_offset,
+                                 vec.bv_len,
                                  (rw == READ) ?
                                  PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE);
 
@@ -381,7 +376,7 @@ static int add_bio(struct cardinfo *card)
        desc = &p->desc[p->cnt];
        p->cnt++;
        if (p->bio == NULL)
-               p->idx = idx;
+               p->iter = card->current_iter;
        if ((p->biotail) != &bio->bi_next) {
                *(p->biotail) = bio;
                p->biotail = &(bio->bi_next);
@@ -391,8 +386,8 @@ static int add_bio(struct cardinfo *card)
        desc->data_dma_handle = dma_handle;
 
        desc->pci_addr = cpu_to_le64((u64)desc->data_dma_handle);
-       desc->local_addr = cpu_to_le64(card->current_sector << 9);
-       desc->transfer_size = cpu_to_le32(len);
+       desc->local_addr = cpu_to_le64(card->current_iter.bi_sector << 9);
+       desc->transfer_size = cpu_to_le32(vec.bv_len);
        offset = (((char *)&desc->sem_control_bits) - ((char *)p->desc));
        desc->sem_addr = cpu_to_le64((u64)(p->page_dma+offset));
        desc->zero1 = desc->zero2 = 0;
@@ -407,10 +402,9 @@ static int add_bio(struct cardinfo *card)
                desc->control_bits |= cpu_to_le32(DMASCR_TRANSFER_READ);
        desc->sem_control_bits = desc->control_bits;
 
-       card->current_sector += (len >> 9);
-       idx++;
-       card->current_idx = idx;
-       if (idx >= bio->bi_vcnt)
+
+       bio_advance_iter(bio, &card->current_iter, vec.bv_len);
+       if (!card->current_iter.bi_size)
                card->currentbio = NULL;
 
        return 1;
@@ -439,23 +433,25 @@ static void process_page(unsigned long data)
                struct mm_dma_desc *desc = &page->desc[page->headcnt];
                int control = le32_to_cpu(desc->sem_control_bits);
                int last = 0;
-               int idx;
+               struct bio_vec vec;
 
                if (!(control & DMASCR_DMA_COMPLETE)) {
                        control = dma_status;
                        last = 1;
                }
+
                page->headcnt++;
-               idx = page->idx;
-               page->idx++;
-               if (page->idx >= bio->bi_vcnt) {
+               vec = bio_iter_iovec(bio, page->iter);
+               bio_advance_iter(bio, &page->iter, vec.bv_len);
+
+               if (!page->iter.bi_size) {
                        page->bio = bio->bi_next;
                        if (page->bio)
-                               page->idx = page->bio->bi_idx;
+                               page->iter = page->bio->bi_iter;
                }
 
                pci_unmap_page(card->dev, desc->data_dma_handle,
-                              bio_iovec_idx(bio, idx)->bv_len,
+                              vec.bv_len,
                                 (control & DMASCR_TRANSFER_READ) ?
                                PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
                if (control & DMASCR_HARD_ERROR) {
@@ -532,7 +528,8 @@ static void mm_make_request(struct request_queue *q, struct bio *bio)
 {
        struct cardinfo *card = q->queuedata;
        pr_debug("mm_make_request %llu %u\n",
-                (unsigned long long)bio->bi_sector, bio->bi_size);
+                (unsigned long long)bio->bi_iter.bi_sector,
+                bio->bi_iter.bi_size);
 
        spin_lock_irq(&card->lock);
        *card->biotail = bio;
index 6620b73d04906191132d771dade31f9e00043e07..4b97b86da9265b4ca5dcb3a7ab562dbf1eb5bac0 100644 (file)
@@ -1257,7 +1257,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
                        bio->bi_bdev    = preq.bdev;
                        bio->bi_private = pending_req;
                        bio->bi_end_io  = end_block_io_op;
-                       bio->bi_sector  = preq.sector_number;
+                       bio->bi_iter.bi_sector  = preq.sector_number;
                }
 
                preq.sector_number += seg[i].nsec;
index f9c43f91f03e5de68bff030b663f094e56fc1f9f..8dcfb54f160302e0e1d91c232387f758b2f8e0f6 100644 (file)
@@ -1547,7 +1547,7 @@ static int blkif_recover(struct blkfront_info *info)
                        for (i = 0; i < pending; i++) {
                                offset = (i * segs * PAGE_SIZE) >> 9;
                                size = min((unsigned int)(segs * PAGE_SIZE) >> 9,
-                                          (unsigned int)(bio->bi_size >> 9) - offset);
+                                          (unsigned int)bio_sectors(bio) - offset);
                                cloned_bio = bio_clone(bio, GFP_NOIO);
                                BUG_ON(cloned_bio == NULL);
                                bio_trim(cloned_bio, offset, size);
index 5980cb9af857891491baee6026efe16cb84694de..51e75ad964223e07268e0781caa5fa9230f43824 100644 (file)
@@ -561,11 +561,11 @@ static int gdrom_set_interrupt_handlers(void)
        int err;
 
        err = request_irq(HW_EVENT_GDROM_CMD, gdrom_command_interrupt,
-               IRQF_DISABLED, "gdrom_command", &gd);
+               0, "gdrom_command", &gd);
        if (err)
                return err;
        err = request_irq(HW_EVENT_GDROM_DMA, gdrom_dma_interrupt,
-               IRQF_DISABLED, "gdrom_dma", &gd);
+               0, "gdrom_dma", &gd);
        if (err)
                free_irq(HW_EVENT_GDROM_CMD, &gd);
        return err;
index 290fe5b7fd327f48f9a9b8f7091a6f43b83c1745..a324f9303e36da8e9c63c423f3405d777f05cdbb 100644 (file)
@@ -49,7 +49,7 @@ obj-$(CONFIG_GPIO_TB0219)     += tb0219.o
 obj-$(CONFIG_TELCLOCK)         += tlclk.o
 
 obj-$(CONFIG_MWAVE)            += mwave/
-obj-$(CONFIG_AGP)              += agp/
+obj-y                          += agp/
 obj-$(CONFIG_PCMCIA)           += pcmcia/
 
 obj-$(CONFIG_HANGCHECK_TIMER)  += hangcheck-timer.o
index d8b1b576556cf8eba8c2fa398c6c9bc8d6960803..c528f96ee204fb3f480e29d96992e1e0c1870272 100644 (file)
@@ -68,6 +68,7 @@ config AGP_AMD64
 config AGP_INTEL
        tristate "Intel 440LX/BX/GX, I8xx and E7x05 chipset support"
        depends on AGP && X86
+       select INTEL_GTT
        help
          This option gives you AGP support for the GLX component of X
          on Intel 440LX/BX/GX, 815, 820, 830, 840, 845, 850, 860, 875,
@@ -155,3 +156,7 @@ config AGP_SGI_TIOCA
           This option gives you AGP GART support for the SGI TIO chipset
           for IA64 processors.
 
+config INTEL_GTT
+       tristate
+       depends on X86 && PCI
+
index 8eb56e273e75719f51d617fe3a2ed5c4b9cc06f2..604489bcdbf9b6c70b51e89f093accfdb34f4d25 100644 (file)
@@ -13,7 +13,7 @@ obj-$(CONFIG_AGP_HP_ZX1)      += hp-agp.o
 obj-$(CONFIG_AGP_PARISC)       += parisc-agp.o
 obj-$(CONFIG_AGP_I460)         += i460-agp.o
 obj-$(CONFIG_AGP_INTEL)                += intel-agp.o
-obj-$(CONFIG_AGP_INTEL)                += intel-gtt.o
+obj-$(CONFIG_INTEL_GTT)                += intel-gtt.o
 obj-$(CONFIG_AGP_NVIDIA)       += nvidia-agp.o
 obj-$(CONFIG_AGP_SGI_TIOCA)    += sgi-agp.o
 obj-$(CONFIG_AGP_SIS)          += sis-agp.o
index a7c276585a9f4f38b8dc510ea0c2fc6742f5b296..f9b9ca5d31b7946c3b1407ef692db1c2c149c628 100644 (file)
@@ -14,9 +14,6 @@
 #include "intel-agp.h"
 #include <drm/intel-gtt.h>
 
-int intel_agp_enabled;
-EXPORT_SYMBOL(intel_agp_enabled);
-
 static int intel_fetch_size(void)
 {
        int i;
@@ -806,8 +803,6 @@ static int agp_intel_probe(struct pci_dev *pdev,
 found_gmch:
        pci_set_drvdata(pdev, bridge);
        err = agp_add_bridge(bridge);
-       if (!err)
-               intel_agp_enabled = 1;
        return err;
 }
 
index ad5da1ffcbe9a87ca82c78c17ce8aab46d1fb811..5c85350f4c3d065e9c9bdf9ba762667604b7c0f3 100644 (file)
@@ -94,6 +94,7 @@ static struct _intel_private {
 #define IS_IRONLAKE    intel_private.driver->is_ironlake
 #define HAS_PGTBL_EN   intel_private.driver->has_pgtbl_enable
 
+#if IS_ENABLED(CONFIG_AGP_INTEL)
 static int intel_gtt_map_memory(struct page **pages,
                                unsigned int num_entries,
                                struct sg_table *st)
@@ -168,6 +169,7 @@ static void i8xx_destroy_pages(struct page *page)
        __free_pages(page, 2);
        atomic_dec(&agp_bridge->current_memory_agp);
 }
+#endif
 
 #define I810_GTT_ORDER 4
 static int i810_setup(void)
@@ -208,6 +210,7 @@ static void i810_cleanup(void)
        free_gatt_pages(intel_private.i81x_gtt_table, I810_GTT_ORDER);
 }
 
+#if IS_ENABLED(CONFIG_AGP_INTEL)
 static int i810_insert_dcache_entries(struct agp_memory *mem, off_t pg_start,
                                      int type)
 {
@@ -288,6 +291,7 @@ static void intel_i810_free_by_type(struct agp_memory *curr)
        }
        kfree(curr);
 }
+#endif
 
 static int intel_gtt_setup_scratch_page(void)
 {
@@ -645,7 +649,9 @@ static int intel_gtt_init(void)
                return -ENOMEM;
        }
 
+#if IS_ENABLED(CONFIG_AGP_INTEL)
        global_cache_flush();   /* FIXME: ? */
+#endif
 
        intel_private.stolen_size = intel_gtt_stolen_size();
 
@@ -666,6 +672,7 @@ static int intel_gtt_init(void)
        return 0;
 }
 
+#if IS_ENABLED(CONFIG_AGP_INTEL)
 static int intel_fake_agp_fetch_size(void)
 {
        int num_sizes = ARRAY_SIZE(intel_fake_agp_sizes);
@@ -684,6 +691,7 @@ static int intel_fake_agp_fetch_size(void)
 
        return 0;
 }
+#endif
 
 static void i830_cleanup(void)
 {
@@ -795,6 +803,7 @@ static int i830_setup(void)
        return 0;
 }
 
+#if IS_ENABLED(CONFIG_AGP_INTEL)
 static int intel_fake_agp_create_gatt_table(struct agp_bridge_data *bridge)
 {
        agp_bridge->gatt_table_real = NULL;
@@ -819,6 +828,7 @@ static int intel_fake_agp_configure(void)
 
        return 0;
 }
+#endif
 
 static bool i830_check_flags(unsigned int flags)
 {
@@ -857,6 +867,7 @@ void intel_gtt_insert_sg_entries(struct sg_table *st,
 }
 EXPORT_SYMBOL(intel_gtt_insert_sg_entries);
 
+#if IS_ENABLED(CONFIG_AGP_INTEL)
 static void intel_gtt_insert_pages(unsigned int first_entry,
                                   unsigned int num_entries,
                                   struct page **pages,
@@ -922,6 +933,7 @@ out_err:
        mem->is_flushed = true;
        return ret;
 }
+#endif
 
 void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries)
 {
@@ -935,6 +947,7 @@ void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries)
 }
 EXPORT_SYMBOL(intel_gtt_clear_range);
 
+#if IS_ENABLED(CONFIG_AGP_INTEL)
 static int intel_fake_agp_remove_entries(struct agp_memory *mem,
                                         off_t pg_start, int type)
 {
@@ -976,6 +989,7 @@ static struct agp_memory *intel_fake_agp_alloc_by_type(size_t pg_count,
        /* always return NULL for other allocation types for now */
        return NULL;
 }
+#endif
 
 static int intel_alloc_chipset_flush_resource(void)
 {
@@ -1129,6 +1143,7 @@ static int i9xx_setup(void)
        return 0;
 }
 
+#if IS_ENABLED(CONFIG_AGP_INTEL)
 static const struct agp_bridge_driver intel_fake_agp_driver = {
        .owner                  = THIS_MODULE,
        .size_type              = FIXED_APER_SIZE,
@@ -1150,6 +1165,7 @@ static const struct agp_bridge_driver intel_fake_agp_driver = {
        .agp_destroy_page       = agp_generic_destroy_page,
        .agp_destroy_pages      = agp_generic_destroy_pages,
 };
+#endif
 
 static const struct intel_gtt_driver i81x_gtt_driver = {
        .gen = 1,
@@ -1367,11 +1383,13 @@ int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
 
        intel_private.refcount++;
 
+#if IS_ENABLED(CONFIG_AGP_INTEL)
        if (bridge) {
                bridge->driver = &intel_fake_agp_driver;
                bridge->dev_private_data = &intel_private;
                bridge->dev = bridge_pdev;
        }
+#endif
 
        intel_private.bridge_dev = pci_dev_get(bridge_pdev);
 
index e210f858d3cbf854130e0c149703c0642710bf7e..d915707d2ba1d3eae5b3ade411dd5226a5f3296d 100644 (file)
@@ -4,7 +4,7 @@
  * Copyright (C) 2001  Massimo Dal Zotto <dz@debian.org>
  *
  * Hwmon integration:
- * Copyright (C) 2011  Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2011  Jean Delvare <jdelvare@suse.de>
  * Copyright (C) 2013  Guenter Roeck <linux@roeck-us.net>
  *
  * This program is free software; you can redistribute it and/or modify it
index 671c3852d35921852a0570c1636196d38e177599..03f41896d09050ff391ad55ef263faa1430de19a 100644 (file)
@@ -2724,6 +2724,7 @@ static struct platform_driver ipmi_driver = {
 static int ipmi_parisc_probe(struct parisc_device *dev)
 {
        struct smi_info *info;
+       int rv;
 
        info = smi_info_alloc();
 
index 0c16e9cdfb87857c3fd528eebefa72f4af28f629..a367a98317175f59745e5b61e6fdf175f29342bc 100644 (file)
@@ -9,45 +9,44 @@ obj-$(CONFIG_COMMON_CLK)      += clk-gate.o
 obj-$(CONFIG_COMMON_CLK)       += clk-mux.o
 obj-$(CONFIG_COMMON_CLK)       += clk-composite.o
 
-# SoCs specific
-obj-$(CONFIG_ARCH_BCM2835)     += clk-bcm2835.o
-obj-$(CONFIG_ARCH_EFM32)       += clk-efm32gg.o
-obj-$(CONFIG_ARCH_NOMADIK)     += clk-nomadik.o
-obj-$(CONFIG_ARCH_HIGHBANK)    += clk-highbank.o
-obj-$(CONFIG_ARCH_HI3xxx)      += hisilicon/
-obj-$(CONFIG_ARCH_NSPIRE)      += clk-nspire.o
-obj-$(CONFIG_ARCH_MXS)         += mxs/
-obj-$(CONFIG_ARCH_SOCFPGA)     += socfpga/
-obj-$(CONFIG_PLAT_SPEAR)       += spear/
-obj-$(CONFIG_ARCH_U300)                += clk-u300.o
-obj-$(CONFIG_COMMON_CLK_VERSATILE) += versatile/
-obj-$(CONFIG_COMMON_CLK_QCOM)  += qcom/
-obj-$(CONFIG_PLAT_ORION)       += mvebu/
+# hardware specific clock types
+# please keep this section sorted lexicographically by file/directory path name
+obj-$(CONFIG_COMMON_CLK_AXI_CLKGEN)    += clk-axi-clkgen.o
+obj-$(CONFIG_ARCH_BCM2835)             += clk-bcm2835.o
+obj-$(CONFIG_ARCH_EFM32)               += clk-efm32gg.o
+obj-$(CONFIG_ARCH_HIGHBANK)            += clk-highbank.o
+obj-$(CONFIG_MACH_LOONGSON1)           += clk-ls1x.o
+obj-$(CONFIG_COMMON_CLK_MAX77686)      += clk-max77686.o
+obj-$(CONFIG_ARCH_NOMADIK)             += clk-nomadik.o
+obj-$(CONFIG_ARCH_NSPIRE)              += clk-nspire.o
+obj-$(CONFIG_CLK_PPC_CORENET)          += clk-ppc-corenet.o
+obj-$(CONFIG_COMMON_CLK_S2MPS11)       += clk-s2mps11.o
+obj-$(CONFIG_COMMON_CLK_SI5351)                += clk-si5351.o
+obj-$(CONFIG_COMMON_CLK_SI570)         += clk-si570.o
+obj-$(CONFIG_CLK_TWL6040)              += clk-twl6040.o
+obj-$(CONFIG_ARCH_U300)                        += clk-u300.o
+obj-$(CONFIG_ARCH_VT8500)              += clk-vt8500.o
+obj-$(CONFIG_COMMON_CLK_WM831X)                += clk-wm831x.o
+obj-$(CONFIG_COMMON_CLK_XGENE)         += clk-xgene.o
+obj-$(CONFIG_COMMON_CLK_AT91)          += at91/
+obj-$(CONFIG_ARCH_HI3xxx)              += hisilicon/
+obj-$(CONFIG_COMMON_CLK_KEYSTONE)      += keystone/
 ifeq ($(CONFIG_COMMON_CLK), y)
-obj-$(CONFIG_ARCH_MMP)         += mmp/
+obj-$(CONFIG_ARCH_MMP)                 += mmp/
 endif
-obj-$(CONFIG_MACH_LOONGSON1)   += clk-ls1x.o
-obj-$(CONFIG_ARCH_ROCKCHIP)    += rockchip/
-obj-$(CONFIG_ARCH_SUNXI)       += sunxi/
-obj-$(CONFIG_ARCH_U8500)       += ux500/
-obj-$(CONFIG_ARCH_VT8500)      += clk-vt8500.o
-obj-$(CONFIG_ARCH_SIRF)                += sirf/
-obj-$(CONFIG_ARCH_ZYNQ)                += zynq/
-obj-$(CONFIG_ARCH_TEGRA)       += tegra/
-obj-$(CONFIG_PLAT_SAMSUNG)     += samsung/
-obj-$(CONFIG_COMMON_CLK_XGENE)  += clk-xgene.o
-obj-$(CONFIG_COMMON_CLK_KEYSTONE)      += keystone/
-obj-$(CONFIG_COMMON_CLK_AT91)  += at91/
+obj-$(CONFIG_PLAT_ORION)               += mvebu/
+obj-$(CONFIG_ARCH_MXS)                 += mxs/
+obj-$(CONFIG_COMMON_CLK_QCOM)          += qcom/
+obj-$(CONFIG_ARCH_ROCKCHIP)            += rockchip/
+obj-$(CONFIG_PLAT_SAMSUNG)             += samsung/
 obj-$(CONFIG_ARCH_SHMOBILE_MULTI)      += shmobile/
-
-obj-$(CONFIG_X86)              += x86/
-
-# Chip specific
-obj-$(CONFIG_COMMON_CLK_AXI_CLKGEN) += clk-axi-clkgen.o
-obj-$(CONFIG_COMMON_CLK_WM831X) += clk-wm831x.o
-obj-$(CONFIG_COMMON_CLK_MAX77686) += clk-max77686.o
-obj-$(CONFIG_COMMON_CLK_SI5351) += clk-si5351.o
-obj-$(CONFIG_COMMON_CLK_SI570) += clk-si570.o
-obj-$(CONFIG_COMMON_CLK_S2MPS11) += clk-s2mps11.o
-obj-$(CONFIG_CLK_TWL6040)      += clk-twl6040.o
-obj-$(CONFIG_CLK_PPC_CORENET)  += clk-ppc-corenet.o
+obj-$(CONFIG_ARCH_SIRF)                        += sirf/
+obj-$(CONFIG_ARCH_SOCFPGA)             += socfpga/
+obj-$(CONFIG_PLAT_SPEAR)               += spear/
+obj-$(CONFIG_ARCH_SUNXI)               += sunxi/
+obj-$(CONFIG_ARCH_TEGRA)               += tegra/
+obj-$(CONFIG_ARCH_OMAP2PLUS)           += ti/
+obj-$(CONFIG_ARCH_U8500)               += ux500/
+obj-$(CONFIG_COMMON_CLK_VERSATILE)     += versatile/
+obj-$(CONFIG_X86)                      += x86/
+obj-$(CONFIG_ARCH_ZYNQ)                        += zynq/
index c50e83744b0aec3a0a8841f068bbbe05adc5ef8d..3b2a66f78755113fe9afde8f9caaa4d4e801260b 100644 (file)
@@ -1111,11 +1111,11 @@ static const struct of_device_id si5351_dt_ids[] = {
 };
 MODULE_DEVICE_TABLE(of, si5351_dt_ids);
 
-static int si5351_dt_parse(struct i2c_client *client)
+static int si5351_dt_parse(struct i2c_client *client,
+                          enum si5351_variant variant)
 {
        struct device_node *child, *np = client->dev.of_node;
        struct si5351_platform_data *pdata;
-       const struct of_device_id *match;
        struct property *prop;
        const __be32 *p;
        int num = 0;
@@ -1124,15 +1124,10 @@ static int si5351_dt_parse(struct i2c_client *client)
        if (np == NULL)
                return 0;
 
-       match = of_match_node(si5351_dt_ids, np);
-       if (match == NULL)
-               return -EINVAL;
-
        pdata = devm_kzalloc(&client->dev, sizeof(*pdata), GFP_KERNEL);
        if (!pdata)
                return -ENOMEM;
 
-       pdata->variant = (enum si5351_variant)match->data;
        pdata->clk_xtal = of_clk_get(np, 0);
        if (!IS_ERR(pdata->clk_xtal))
                clk_put(pdata->clk_xtal);
@@ -1163,7 +1158,7 @@ static int si5351_dt_parse(struct i2c_client *client)
                        pdata->pll_src[num] = SI5351_PLL_SRC_XTAL;
                        break;
                case 1:
-                       if (pdata->variant != SI5351_VARIANT_C) {
+                       if (variant != SI5351_VARIANT_C) {
                                dev_err(&client->dev,
                                        "invalid parent %d for pll %d\n",
                                        val, num);
@@ -1187,7 +1182,7 @@ static int si5351_dt_parse(struct i2c_client *client)
                }
 
                if (num >= 8 ||
-                   (pdata->variant == SI5351_VARIANT_A3 && num >= 3)) {
+                   (variant == SI5351_VARIANT_A3 && num >= 3)) {
                        dev_err(&client->dev, "invalid clkout %d\n", num);
                        return -EINVAL;
                }
@@ -1226,7 +1221,7 @@ static int si5351_dt_parse(struct i2c_client *client)
                                        SI5351_CLKOUT_SRC_XTAL;
                                break;
                        case 3:
-                               if (pdata->variant != SI5351_VARIANT_C) {
+                               if (variant != SI5351_VARIANT_C) {
                                        dev_err(&client->dev,
                                                "invalid parent %d for clkout %d\n",
                                                val, num);
@@ -1298,7 +1293,7 @@ static int si5351_dt_parse(struct i2c_client *client)
        return 0;
 }
 #else
-static int si5351_dt_parse(struct i2c_client *client)
+static int si5351_dt_parse(struct i2c_client *client, enum si5351_variant variant)
 {
        return 0;
 }
@@ -1307,6 +1302,7 @@ static int si5351_dt_parse(struct i2c_client *client)
 static int si5351_i2c_probe(struct i2c_client *client,
                            const struct i2c_device_id *id)
 {
+       enum si5351_variant variant = (enum si5351_variant)id->driver_data;
        struct si5351_platform_data *pdata;
        struct si5351_driver_data *drvdata;
        struct clk_init_data init;
@@ -1315,7 +1311,7 @@ static int si5351_i2c_probe(struct i2c_client *client,
        u8 num_parents, num_clocks;
        int ret, n;
 
-       ret = si5351_dt_parse(client);
+       ret = si5351_dt_parse(client, variant);
        if (ret)
                return ret;
 
@@ -1331,7 +1327,7 @@ static int si5351_i2c_probe(struct i2c_client *client,
 
        i2c_set_clientdata(client, drvdata);
        drvdata->client = client;
-       drvdata->variant = pdata->variant;
+       drvdata->variant = variant;
        drvdata->pxtal = pdata->clk_xtal;
        drvdata->pclkin = pdata->clk_clkin;
 
@@ -1568,10 +1564,10 @@ static int si5351_i2c_probe(struct i2c_client *client,
 }
 
 static const struct i2c_device_id si5351_i2c_ids[] = {
-       { "si5351a", 0 },
-       { "si5351a-msop", 0 },
-       { "si5351b", 0 },
-       { "si5351c", 0 },
+       { "si5351a", SI5351_VARIANT_A },
+       { "si5351a-msop", SI5351_VARIANT_A3 },
+       { "si5351b", SI5351_VARIANT_B },
+       { "si5351c", SI5351_VARIANT_C },
        { }
 };
 MODULE_DEVICE_TABLE(i2c, si5351_i2c_ids);
index c0dbf2676872995c1ff698cefe37719f43f1236a..4d0746b50c32e8a6265fb5a410371daebfb5ba85 100644 (file)
 #define  SI5351_XTAL_ENABLE                    (1<<6)
 #define  SI5351_MULTISYNTH_ENABLE              (1<<4)
 
+/**
+ * enum si5351_variant - SiLabs Si5351 chip variant
+ * @SI5351_VARIANT_A: Si5351A (8 output clocks, XTAL input)
+ * @SI5351_VARIANT_A3: Si5351A MSOP10 (3 output clocks, XTAL input)
+ * @SI5351_VARIANT_B: Si5351B (8 output clocks, XTAL/VXCO input)
+ * @SI5351_VARIANT_C: Si5351C (8 output clocks, XTAL/CLKIN input)
+ */
+enum si5351_variant {
+       SI5351_VARIANT_A = 1,
+       SI5351_VARIANT_A3 = 2,
+       SI5351_VARIANT_B = 3,
+       SI5351_VARIANT_C = 4,
+};
+
 #endif
index 2b38dc99063f1b6574df031ccea84af5df9cb9e6..5517944495d893cc3c8dd60569b58e67e9289765 100644 (file)
@@ -575,16 +575,19 @@ struct clk_hw *__clk_get_hw(struct clk *clk)
 {
        return !clk ? NULL : clk->hw;
 }
+EXPORT_SYMBOL_GPL(__clk_get_hw);
 
 u8 __clk_get_num_parents(struct clk *clk)
 {
        return !clk ? 0 : clk->num_parents;
 }
+EXPORT_SYMBOL_GPL(__clk_get_num_parents);
 
 struct clk *__clk_get_parent(struct clk *clk)
 {
        return !clk ? NULL : clk->parent;
 }
+EXPORT_SYMBOL_GPL(__clk_get_parent);
 
 struct clk *clk_get_parent_by_index(struct clk *clk, u8 index)
 {
@@ -598,6 +601,7 @@ struct clk *clk_get_parent_by_index(struct clk *clk, u8 index)
        else
                return clk->parents[index];
 }
+EXPORT_SYMBOL_GPL(clk_get_parent_by_index);
 
 unsigned int __clk_get_enable_count(struct clk *clk)
 {
@@ -629,6 +633,7 @@ unsigned long __clk_get_rate(struct clk *clk)
 out:
        return ret;
 }
+EXPORT_SYMBOL_GPL(__clk_get_rate);
 
 unsigned long __clk_get_accuracy(struct clk *clk)
 {
@@ -685,6 +690,7 @@ bool __clk_is_enabled(struct clk *clk)
 out:
        return !!ret;
 }
+EXPORT_SYMBOL_GPL(__clk_is_enabled);
 
 static struct clk *__clk_lookup_subtree(const char *name, struct clk *clk)
 {
@@ -776,6 +782,7 @@ out:
 
        return best;
 }
+EXPORT_SYMBOL_GPL(__clk_mux_determine_rate);
 
 /***        clk api        ***/
 
@@ -2373,8 +2380,6 @@ struct of_clk_provider {
        void *data;
 };
 
-extern struct of_device_id __clk_of_table[];
-
 static const struct of_device_id __clk_of_table_sentinel
        __used __section(__clk_of_table_end);
 
@@ -2534,7 +2539,7 @@ void __init of_clk_init(const struct of_device_id *matches)
        struct device_node *np;
 
        if (!matches)
-               matches = __clk_of_table;
+               matches = &__clk_of_table;
 
        for_each_matching_node_and_match(np, matches, &match) {
                of_clk_init_cb_t clk_init_cb = match->data;
index 190d38433202dbe43e0714a72471df03a243c7e6..f60db2ef1aee6a8d9150ec6c896827921a8e803f 100644 (file)
@@ -1,11 +1,11 @@
 obj-$(CONFIG_COMMON_CLK_QCOM) += clk-qcom.o
 
-clk-qcom-$(CONFIG_COMMON_CLK_QCOM) += clk-regmap.o
-clk-qcom-$(CONFIG_COMMON_CLK_QCOM) += clk-pll.o
-clk-qcom-$(CONFIG_COMMON_CLK_QCOM) += clk-rcg.o
-clk-qcom-$(CONFIG_COMMON_CLK_QCOM) += clk-rcg2.o
-clk-qcom-$(CONFIG_COMMON_CLK_QCOM) += clk-branch.o
-clk-qcom-$(CONFIG_COMMON_CLK_QCOM) += reset.o
+clk-qcom-y += clk-regmap.o
+clk-qcom-y += clk-pll.o
+clk-qcom-y += clk-rcg.o
+clk-qcom-y += clk-rcg2.o
+clk-qcom-y += clk-branch.o
+clk-qcom-y += reset.o
 
 obj-$(CONFIG_MSM_GCC_8660) += gcc-msm8660.o
 obj-$(CONFIG_MSM_GCC_8960) += gcc-msm8960.o
index 529e11dc2c6b0e8af21506211a7756f04b324123..81e6d2f49aa001a9587b33e848c0b4692d5d82e0 100644 (file)
@@ -375,7 +375,7 @@ static int samsung_pll45xx_set_rate(struct clk_hw *hw, unsigned long drate,
                break;
        default:
                break;
-       };
+       }
 
        /* Set new configuration. */
        __raw_writel(con1, pll->con_reg + 0x4);
index 659e4ea31893a42bee5fb701d54fcbb34776ed67..abb6c5ac8a10297505a8a7f1cb1cb0d5f8eccf16 100644 (file)
@@ -875,7 +875,7 @@ static void __init sunxi_divs_clk_setup(struct device_node *node,
        if (!clk_data)
                return;
 
-       clks = kzalloc(SUNXI_DIVS_MAX_QTY * sizeof(struct clk *), GFP_KERNEL);
+       clks = kzalloc((SUNXI_DIVS_MAX_QTY+1) * sizeof(*clks), GFP_KERNEL);
        if (!clks)
                goto free_clkdata;
 
diff --git a/drivers/clk/ti/Makefile b/drivers/clk/ti/Makefile
new file mode 100644 (file)
index 0000000..4319d40
--- /dev/null
@@ -0,0 +1,11 @@
+ifneq ($(CONFIG_OF),)
+obj-y                                  += clk.o autoidle.o clockdomain.o
+clk-common                             = dpll.o composite.o divider.o gate.o \
+                                         fixed-factor.o mux.o apll.o
+obj-$(CONFIG_SOC_AM33XX)               += $(clk-common) clk-33xx.o
+obj-$(CONFIG_ARCH_OMAP3)               += $(clk-common) interface.o clk-3xxx.o
+obj-$(CONFIG_ARCH_OMAP4)               += $(clk-common) clk-44xx.o
+obj-$(CONFIG_SOC_OMAP5)                        += $(clk-common) clk-54xx.o
+obj-$(CONFIG_SOC_DRA7XX)               += $(clk-common) clk-7xx.o
+obj-$(CONFIG_SOC_AM43XX)               += $(clk-common) clk-43xx.o
+endif
diff --git a/drivers/clk/ti/apll.c b/drivers/clk/ti/apll.c
new file mode 100644 (file)
index 0000000..b986f61
--- /dev/null
@@ -0,0 +1,223 @@
+/*
+ * OMAP APLL clock support
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * J Keerthy <j-keerthy@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/log2.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/clk/ti.h>
+#include <linux/delay.h>
+
+#define APLL_FORCE_LOCK 0x1
+#define APLL_AUTO_IDLE 0x2
+#define MAX_APLL_WAIT_TRIES            1000000
+
+#undef pr_fmt
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+static int dra7_apll_enable(struct clk_hw *hw)
+{
+       struct clk_hw_omap *clk = to_clk_hw_omap(hw);
+       int r = 0, i = 0;
+       struct dpll_data *ad;
+       const char *clk_name;
+       u8 state = 1;
+       u32 v;
+
+       ad = clk->dpll_data;
+       if (!ad)
+               return -EINVAL;
+
+       clk_name = __clk_get_name(clk->hw.clk);
+
+       state <<= __ffs(ad->idlest_mask);
+
+       /* Check is already locked */
+       v = ti_clk_ll_ops->clk_readl(ad->idlest_reg);
+
+       if ((v & ad->idlest_mask) == state)
+               return r;
+
+       v = ti_clk_ll_ops->clk_readl(ad->control_reg);
+       v &= ~ad->enable_mask;
+       v |= APLL_FORCE_LOCK << __ffs(ad->enable_mask);
+       ti_clk_ll_ops->clk_writel(v, ad->control_reg);
+
+       state <<= __ffs(ad->idlest_mask);
+
+       while (1) {
+               v = ti_clk_ll_ops->clk_readl(ad->idlest_reg);
+               if ((v & ad->idlest_mask) == state)
+                       break;
+               if (i > MAX_APLL_WAIT_TRIES)
+                       break;
+               i++;
+               udelay(1);
+       }
+
+       if (i == MAX_APLL_WAIT_TRIES) {
+               pr_warn("clock: %s failed transition to '%s'\n",
+                       clk_name, (state) ? "locked" : "bypassed");
+       } else {
+               pr_debug("clock: %s transition to '%s' in %d loops\n",
+                        clk_name, (state) ? "locked" : "bypassed", i);
+
+               r = 0;
+       }
+
+       return r;
+}
+
+static void dra7_apll_disable(struct clk_hw *hw)
+{
+       struct clk_hw_omap *clk = to_clk_hw_omap(hw);
+       struct dpll_data *ad;
+       u8 state = 1;
+       u32 v;
+
+       ad = clk->dpll_data;
+
+       state <<= __ffs(ad->idlest_mask);
+
+       v = ti_clk_ll_ops->clk_readl(ad->control_reg);
+       v &= ~ad->enable_mask;
+       v |= APLL_AUTO_IDLE << __ffs(ad->enable_mask);
+       ti_clk_ll_ops->clk_writel(v, ad->control_reg);
+}
+
+static int dra7_apll_is_enabled(struct clk_hw *hw)
+{
+       struct clk_hw_omap *clk = to_clk_hw_omap(hw);
+       struct dpll_data *ad;
+       u32 v;
+
+       ad = clk->dpll_data;
+
+       v = ti_clk_ll_ops->clk_readl(ad->control_reg);
+       v &= ad->enable_mask;
+
+       v >>= __ffs(ad->enable_mask);
+
+       return v == APLL_AUTO_IDLE ? 0 : 1;
+}
+
+static u8 dra7_init_apll_parent(struct clk_hw *hw)
+{
+       return 0;
+}
+
+static const struct clk_ops apll_ck_ops = {
+       .enable         = &dra7_apll_enable,
+       .disable        = &dra7_apll_disable,
+       .is_enabled     = &dra7_apll_is_enabled,
+       .get_parent     = &dra7_init_apll_parent,
+};
+
+static void __init omap_clk_register_apll(struct clk_hw *hw,
+                                         struct device_node *node)
+{
+       struct clk_hw_omap *clk_hw = to_clk_hw_omap(hw);
+       struct dpll_data *ad = clk_hw->dpll_data;
+       struct clk *clk;
+
+       ad->clk_ref = of_clk_get(node, 0);
+       ad->clk_bypass = of_clk_get(node, 1);
+
+       if (IS_ERR(ad->clk_ref) || IS_ERR(ad->clk_bypass)) {
+               pr_debug("clk-ref or clk-bypass for %s not ready, retry\n",
+                        node->name);
+               if (!ti_clk_retry_init(node, hw, omap_clk_register_apll))
+                       return;
+
+               goto cleanup;
+       }
+
+       clk = clk_register(NULL, &clk_hw->hw);
+       if (!IS_ERR(clk)) {
+               of_clk_add_provider(node, of_clk_src_simple_get, clk);
+               kfree(clk_hw->hw.init->parent_names);
+               kfree(clk_hw->hw.init);
+               return;
+       }
+
+cleanup:
+       kfree(clk_hw->dpll_data);
+       kfree(clk_hw->hw.init->parent_names);
+       kfree(clk_hw->hw.init);
+       kfree(clk_hw);
+}
+
+static void __init of_dra7_apll_setup(struct device_node *node)
+{
+       struct dpll_data *ad = NULL;
+       struct clk_hw_omap *clk_hw = NULL;
+       struct clk_init_data *init = NULL;
+       const char **parent_names = NULL;
+       int i;
+
+       ad = kzalloc(sizeof(*ad), GFP_KERNEL);
+       clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
+       init = kzalloc(sizeof(*init), GFP_KERNEL);
+       if (!ad || !clk_hw || !init)
+               goto cleanup;
+
+       clk_hw->dpll_data = ad;
+       clk_hw->hw.init = init;
+       clk_hw->flags = MEMMAP_ADDRESSING;
+
+       init->name = node->name;
+       init->ops = &apll_ck_ops;
+
+       init->num_parents = of_clk_get_parent_count(node);
+       if (init->num_parents < 1) {
+               pr_err("dra7 apll %s must have parent(s)\n", node->name);
+               goto cleanup;
+       }
+
+       parent_names = kzalloc(sizeof(char *) * init->num_parents, GFP_KERNEL);
+       if (!parent_names)
+               goto cleanup;
+
+       for (i = 0; i < init->num_parents; i++)
+               parent_names[i] = of_clk_get_parent_name(node, i);
+
+       init->parent_names = parent_names;
+
+       ad->control_reg = ti_clk_get_reg_addr(node, 0);
+       ad->idlest_reg = ti_clk_get_reg_addr(node, 1);
+
+       if (!ad->control_reg || !ad->idlest_reg)
+               goto cleanup;
+
+       ad->idlest_mask = 0x1;
+       ad->enable_mask = 0x3;
+
+       omap_clk_register_apll(&clk_hw->hw, node);
+       return;
+
+cleanup:
+       kfree(parent_names);
+       kfree(ad);
+       kfree(clk_hw);
+       kfree(init);
+}
+CLK_OF_DECLARE(dra7_apll_clock, "ti,dra7-apll-clock", of_dra7_apll_setup);
diff --git a/drivers/clk/ti/autoidle.c b/drivers/clk/ti/autoidle.c
new file mode 100644 (file)
index 0000000..8912ff8
--- /dev/null
@@ -0,0 +1,133 @@
+/*
+ * TI clock autoidle support
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * Tero Kristo <t-kristo@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/clk/ti.h>
+
+struct clk_ti_autoidle {
+       void __iomem            *reg;
+       u8                      shift;
+       u8                      flags;
+       const char              *name;
+       struct list_head        node;
+};
+
+#define AUTOIDLE_LOW           0x1
+
+static LIST_HEAD(autoidle_clks);
+
+static void ti_allow_autoidle(struct clk_ti_autoidle *clk)
+{
+       u32 val;
+
+       val = ti_clk_ll_ops->clk_readl(clk->reg);
+
+       if (clk->flags & AUTOIDLE_LOW)
+               val &= ~(1 << clk->shift);
+       else
+               val |= (1 << clk->shift);
+
+       ti_clk_ll_ops->clk_writel(val, clk->reg);
+}
+
+static void ti_deny_autoidle(struct clk_ti_autoidle *clk)
+{
+       u32 val;
+
+       val = ti_clk_ll_ops->clk_readl(clk->reg);
+
+       if (clk->flags & AUTOIDLE_LOW)
+               val |= (1 << clk->shift);
+       else
+               val &= ~(1 << clk->shift);
+
+       ti_clk_ll_ops->clk_writel(val, clk->reg);
+}
+
+/**
+ * of_ti_clk_allow_autoidle_all - enable autoidle for all clocks
+ *
+ * Enables hardware autoidle for all registered DT clocks, which have
+ * the feature.
+ */
+void of_ti_clk_allow_autoidle_all(void)
+{
+       struct clk_ti_autoidle *c;
+
+       list_for_each_entry(c, &autoidle_clks, node)
+               ti_allow_autoidle(c);
+}
+
+/**
+ * of_ti_clk_deny_autoidle_all - disable autoidle for all clocks
+ *
+ * Disables hardware autoidle for all registered DT clocks, which have
+ * the feature.
+ */
+void of_ti_clk_deny_autoidle_all(void)
+{
+       struct clk_ti_autoidle *c;
+
+       list_for_each_entry(c, &autoidle_clks, node)
+               ti_deny_autoidle(c);
+}
+
+/**
+ * of_ti_clk_autoidle_setup - sets up hardware autoidle for a clock
+ * @node: pointer to the clock device node
+ *
+ * Checks if a clock has hardware autoidle support or not (check
+ * for presence of 'ti,autoidle-shift' property in the device tree
+ * node) and sets up the hardware autoidle feature for the clock
+ * if available. If autoidle is available, the clock is also added
+ * to the autoidle list for later processing. Returns 0 on success,
+ * negative error value on failure.
+ */
+int __init of_ti_clk_autoidle_setup(struct device_node *node)
+{
+       u32 shift;
+       struct clk_ti_autoidle *clk;
+
+       /* Check if this clock has autoidle support or not */
+       if (of_property_read_u32(node, "ti,autoidle-shift", &shift))
+               return 0;
+
+       clk = kzalloc(sizeof(*clk), GFP_KERNEL);
+
+       if (!clk)
+               return -ENOMEM;
+
+       clk->shift = shift;
+       clk->name = node->name;
+       clk->reg = ti_clk_get_reg_addr(node, 0);
+
+       if (!clk->reg) {
+               kfree(clk);
+               return -EINVAL;
+       }
+
+       if (of_property_read_bool(node, "ti,invert-autoidle-bit"))
+               clk->flags |= AUTOIDLE_LOW;
+
+       list_add(&clk->node, &autoidle_clks);
+
+       return 0;
+}
diff --git a/drivers/clk/ti/clk-33xx.c b/drivers/clk/ti/clk-33xx.c
new file mode 100644 (file)
index 0000000..776ee45
--- /dev/null
@@ -0,0 +1,161 @@
+/*
+ * AM33XX Clock init
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc
+ *     Tero Kristo (t-kristo@ti.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/clk-provider.h>
+#include <linux/clk/ti.h>
+
+static struct ti_dt_clk am33xx_clks[] = {
+       DT_CLK(NULL, "clk_32768_ck", "clk_32768_ck"),
+       DT_CLK(NULL, "clk_rc32k_ck", "clk_rc32k_ck"),
+       DT_CLK(NULL, "virt_19200000_ck", "virt_19200000_ck"),
+       DT_CLK(NULL, "virt_24000000_ck", "virt_24000000_ck"),
+       DT_CLK(NULL, "virt_25000000_ck", "virt_25000000_ck"),
+       DT_CLK(NULL, "virt_26000000_ck", "virt_26000000_ck"),
+       DT_CLK(NULL, "sys_clkin_ck", "sys_clkin_ck"),
+       DT_CLK(NULL, "tclkin_ck", "tclkin_ck"),
+       DT_CLK(NULL, "dpll_core_ck", "dpll_core_ck"),
+       DT_CLK(NULL, "dpll_core_x2_ck", "dpll_core_x2_ck"),
+       DT_CLK(NULL, "dpll_core_m4_ck", "dpll_core_m4_ck"),
+       DT_CLK(NULL, "dpll_core_m5_ck", "dpll_core_m5_ck"),
+       DT_CLK(NULL, "dpll_core_m6_ck", "dpll_core_m6_ck"),
+       DT_CLK(NULL, "dpll_mpu_ck", "dpll_mpu_ck"),
+       DT_CLK("cpu0", NULL, "dpll_mpu_ck"),
+       DT_CLK(NULL, "dpll_mpu_m2_ck", "dpll_mpu_m2_ck"),
+       DT_CLK(NULL, "dpll_ddr_ck", "dpll_ddr_ck"),
+       DT_CLK(NULL, "dpll_ddr_m2_ck", "dpll_ddr_m2_ck"),
+       DT_CLK(NULL, "dpll_ddr_m2_div2_ck", "dpll_ddr_m2_div2_ck"),
+       DT_CLK(NULL, "dpll_disp_ck", "dpll_disp_ck"),
+       DT_CLK(NULL, "dpll_disp_m2_ck", "dpll_disp_m2_ck"),
+       DT_CLK(NULL, "dpll_per_ck", "dpll_per_ck"),
+       DT_CLK(NULL, "dpll_per_m2_ck", "dpll_per_m2_ck"),
+       DT_CLK(NULL, "dpll_per_m2_div4_wkupdm_ck", "dpll_per_m2_div4_wkupdm_ck"),
+       DT_CLK(NULL, "dpll_per_m2_div4_ck", "dpll_per_m2_div4_ck"),
+       DT_CLK(NULL, "adc_tsc_fck", "adc_tsc_fck"),
+       DT_CLK(NULL, "cefuse_fck", "cefuse_fck"),
+       DT_CLK(NULL, "clkdiv32k_ck", "clkdiv32k_ck"),
+       DT_CLK(NULL, "clkdiv32k_ick", "clkdiv32k_ick"),
+       DT_CLK(NULL, "dcan0_fck", "dcan0_fck"),
+       DT_CLK("481cc000.d_can", NULL, "dcan0_fck"),
+       DT_CLK(NULL, "dcan1_fck", "dcan1_fck"),
+       DT_CLK("481d0000.d_can", NULL, "dcan1_fck"),
+       DT_CLK(NULL, "pruss_ocp_gclk", "pruss_ocp_gclk"),
+       DT_CLK(NULL, "mcasp0_fck", "mcasp0_fck"),
+       DT_CLK(NULL, "mcasp1_fck", "mcasp1_fck"),
+       DT_CLK(NULL, "mmu_fck", "mmu_fck"),
+       DT_CLK(NULL, "smartreflex0_fck", "smartreflex0_fck"),
+       DT_CLK(NULL, "smartreflex1_fck", "smartreflex1_fck"),
+       DT_CLK(NULL, "sha0_fck", "sha0_fck"),
+       DT_CLK(NULL, "aes0_fck", "aes0_fck"),
+       DT_CLK(NULL, "rng_fck", "rng_fck"),
+       DT_CLK(NULL, "timer1_fck", "timer1_fck"),
+       DT_CLK(NULL, "timer2_fck", "timer2_fck"),
+       DT_CLK(NULL, "timer3_fck", "timer3_fck"),
+       DT_CLK(NULL, "timer4_fck", "timer4_fck"),
+       DT_CLK(NULL, "timer5_fck", "timer5_fck"),
+       DT_CLK(NULL, "timer6_fck", "timer6_fck"),
+       DT_CLK(NULL, "timer7_fck", "timer7_fck"),
+       DT_CLK(NULL, "usbotg_fck", "usbotg_fck"),
+       DT_CLK(NULL, "ieee5000_fck", "ieee5000_fck"),
+       DT_CLK(NULL, "wdt1_fck", "wdt1_fck"),
+       DT_CLK(NULL, "l4_rtc_gclk", "l4_rtc_gclk"),
+       DT_CLK(NULL, "l3_gclk", "l3_gclk"),
+       DT_CLK(NULL, "dpll_core_m4_div2_ck", "dpll_core_m4_div2_ck"),
+       DT_CLK(NULL, "l4hs_gclk", "l4hs_gclk"),
+       DT_CLK(NULL, "l3s_gclk", "l3s_gclk"),
+       DT_CLK(NULL, "l4fw_gclk", "l4fw_gclk"),
+       DT_CLK(NULL, "l4ls_gclk", "l4ls_gclk"),
+       DT_CLK(NULL, "clk_24mhz", "clk_24mhz"),
+       DT_CLK(NULL, "sysclk_div_ck", "sysclk_div_ck"),
+       DT_CLK(NULL, "cpsw_125mhz_gclk", "cpsw_125mhz_gclk"),
+       DT_CLK(NULL, "cpsw_cpts_rft_clk", "cpsw_cpts_rft_clk"),
+       DT_CLK(NULL, "gpio0_dbclk_mux_ck", "gpio0_dbclk_mux_ck"),
+       DT_CLK(NULL, "gpio0_dbclk", "gpio0_dbclk"),
+       DT_CLK(NULL, "gpio1_dbclk", "gpio1_dbclk"),
+       DT_CLK(NULL, "gpio2_dbclk", "gpio2_dbclk"),
+       DT_CLK(NULL, "gpio3_dbclk", "gpio3_dbclk"),
+       DT_CLK(NULL, "lcd_gclk", "lcd_gclk"),
+       DT_CLK(NULL, "mmc_clk", "mmc_clk"),
+       DT_CLK(NULL, "gfx_fclk_clksel_ck", "gfx_fclk_clksel_ck"),
+       DT_CLK(NULL, "gfx_fck_div_ck", "gfx_fck_div_ck"),
+       DT_CLK(NULL, "sysclkout_pre_ck", "sysclkout_pre_ck"),
+       DT_CLK(NULL, "clkout2_div_ck", "clkout2_div_ck"),
+       DT_CLK(NULL, "timer_32k_ck", "clkdiv32k_ick"),
+       DT_CLK(NULL, "timer_sys_ck", "sys_clkin_ck"),
+       DT_CLK(NULL, "dbg_sysclk_ck", "dbg_sysclk_ck"),
+       DT_CLK(NULL, "dbg_clka_ck", "dbg_clka_ck"),
+       DT_CLK(NULL, "stm_pmd_clock_mux_ck", "stm_pmd_clock_mux_ck"),
+       DT_CLK(NULL, "trace_pmd_clk_mux_ck", "trace_pmd_clk_mux_ck"),
+       DT_CLK(NULL, "stm_clk_div_ck", "stm_clk_div_ck"),
+       DT_CLK(NULL, "trace_clk_div_ck", "trace_clk_div_ck"),
+       DT_CLK(NULL, "clkout2_ck", "clkout2_ck"),
+       DT_CLK("48300200.ehrpwm", "tbclk", "ehrpwm0_tbclk"),
+       DT_CLK("48302200.ehrpwm", "tbclk", "ehrpwm1_tbclk"),
+       DT_CLK("48304200.ehrpwm", "tbclk", "ehrpwm2_tbclk"),
+       { .node_name = NULL },
+};
+
+static const char *enable_init_clks[] = {
+       "dpll_ddr_m2_ck",
+       "dpll_mpu_m2_ck",
+       "l3_gclk",
+       "l4hs_gclk",
+       "l4fw_gclk",
+       "l4ls_gclk",
+       /* Required for external peripherals like, Audio codecs */
+       "clkout2_ck",
+};
+
+int __init am33xx_dt_clk_init(void)
+{
+       struct clk *clk1, *clk2;
+
+       ti_dt_clocks_register(am33xx_clks);
+
+       omap2_clk_disable_autoidle_all();
+
+       omap2_clk_enable_init_clocks(enable_init_clks,
+                                    ARRAY_SIZE(enable_init_clks));
+
+       /* TRM ERRATA: Timer 3 & 6 default parent (TCLKIN) may not be always
+        *    physically present, in such a case HWMOD enabling of
+        *    clock would be failure with default parent. And timer
+        *    probe thinks clock is already enabled, this leads to
+        *    crash upon accessing timer 3 & 6 registers in probe.
+        *    Fix by setting parent of both these timers to master
+        *    oscillator clock.
+        */
+
+       clk1 = clk_get_sys(NULL, "sys_clkin_ck");
+       clk2 = clk_get_sys(NULL, "timer3_fck");
+       clk_set_parent(clk2, clk1);
+
+       clk2 = clk_get_sys(NULL, "timer6_fck");
+       clk_set_parent(clk2, clk1);
+       /*
+        * The On-Chip 32K RC Osc clock is not an accurate clock-source as per
+        * the design/spec, so as a result, for example, timer which supposed
+        * to get expired @60Sec, but will expire somewhere ~@40Sec, which is
+        * not expected by any use-case, so change WDT1 clock source to PRCM
+        * 32KHz clock.
+        */
+       clk1 = clk_get_sys(NULL, "wdt1_fck");
+       clk2 = clk_get_sys(NULL, "clkdiv32k_ick");
+       clk_set_parent(clk1, clk2);
+
+       return 0;
+}
diff --git a/drivers/clk/ti/clk-3xxx.c b/drivers/clk/ti/clk-3xxx.c
new file mode 100644 (file)
index 0000000..d323023
--- /dev/null
@@ -0,0 +1,401 @@
+/*
+ * OMAP3 Clock init
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc
+ *     Tero Kristo (t-kristo@ti.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/clk-provider.h>
+#include <linux/clk/ti.h>
+
+
+static struct ti_dt_clk omap3xxx_clks[] = {
+       DT_CLK(NULL, "apb_pclk", "dummy_apb_pclk"),
+       DT_CLK(NULL, "omap_32k_fck", "omap_32k_fck"),
+       DT_CLK(NULL, "virt_12m_ck", "virt_12m_ck"),
+       DT_CLK(NULL, "virt_13m_ck", "virt_13m_ck"),
+       DT_CLK(NULL, "virt_19200000_ck", "virt_19200000_ck"),
+       DT_CLK(NULL, "virt_26000000_ck", "virt_26000000_ck"),
+       DT_CLK(NULL, "virt_38_4m_ck", "virt_38_4m_ck"),
+       DT_CLK(NULL, "osc_sys_ck", "osc_sys_ck"),
+       DT_CLK("twl", "fck", "osc_sys_ck"),
+       DT_CLK(NULL, "sys_ck", "sys_ck"),
+       DT_CLK(NULL, "omap_96m_alwon_fck", "omap_96m_alwon_fck"),
+       DT_CLK("etb", "emu_core_alwon_ck", "emu_core_alwon_ck"),
+       DT_CLK(NULL, "sys_altclk", "sys_altclk"),
+       DT_CLK(NULL, "mcbsp_clks", "mcbsp_clks"),
+       DT_CLK(NULL, "sys_clkout1", "sys_clkout1"),
+       DT_CLK(NULL, "dpll1_ck", "dpll1_ck"),
+       DT_CLK(NULL, "dpll1_x2_ck", "dpll1_x2_ck"),
+       DT_CLK(NULL, "dpll1_x2m2_ck", "dpll1_x2m2_ck"),
+       DT_CLK(NULL, "dpll3_ck", "dpll3_ck"),
+       DT_CLK(NULL, "core_ck", "core_ck"),
+       DT_CLK(NULL, "dpll3_x2_ck", "dpll3_x2_ck"),
+       DT_CLK(NULL, "dpll3_m2_ck", "dpll3_m2_ck"),
+       DT_CLK(NULL, "dpll3_m2x2_ck", "dpll3_m2x2_ck"),
+       DT_CLK(NULL, "dpll3_m3_ck", "dpll3_m3_ck"),
+       DT_CLK(NULL, "dpll3_m3x2_ck", "dpll3_m3x2_ck"),
+       DT_CLK(NULL, "dpll4_ck", "dpll4_ck"),
+       DT_CLK(NULL, "dpll4_x2_ck", "dpll4_x2_ck"),
+       DT_CLK(NULL, "omap_96m_fck", "omap_96m_fck"),
+       DT_CLK(NULL, "cm_96m_fck", "cm_96m_fck"),
+       DT_CLK(NULL, "omap_54m_fck", "omap_54m_fck"),
+       DT_CLK(NULL, "omap_48m_fck", "omap_48m_fck"),
+       DT_CLK(NULL, "omap_12m_fck", "omap_12m_fck"),
+       DT_CLK(NULL, "dpll4_m2_ck", "dpll4_m2_ck"),
+       DT_CLK(NULL, "dpll4_m2x2_ck", "dpll4_m2x2_ck"),
+       DT_CLK(NULL, "dpll4_m3_ck", "dpll4_m3_ck"),
+       DT_CLK(NULL, "dpll4_m3x2_ck", "dpll4_m3x2_ck"),
+       DT_CLK(NULL, "dpll4_m4_ck", "dpll4_m4_ck"),
+       DT_CLK(NULL, "dpll4_m4x2_ck", "dpll4_m4x2_ck"),
+       DT_CLK(NULL, "dpll4_m5_ck", "dpll4_m5_ck"),
+       DT_CLK(NULL, "dpll4_m5x2_ck", "dpll4_m5x2_ck"),
+       DT_CLK(NULL, "dpll4_m6_ck", "dpll4_m6_ck"),
+       DT_CLK(NULL, "dpll4_m6x2_ck", "dpll4_m6x2_ck"),
+       DT_CLK("etb", "emu_per_alwon_ck", "emu_per_alwon_ck"),
+       DT_CLK(NULL, "clkout2_src_ck", "clkout2_src_ck"),
+       DT_CLK(NULL, "sys_clkout2", "sys_clkout2"),
+       DT_CLK(NULL, "corex2_fck", "corex2_fck"),
+       DT_CLK(NULL, "dpll1_fck", "dpll1_fck"),
+       DT_CLK(NULL, "mpu_ck", "mpu_ck"),
+       DT_CLK(NULL, "arm_fck", "arm_fck"),
+       DT_CLK("etb", "emu_mpu_alwon_ck", "emu_mpu_alwon_ck"),
+       DT_CLK(NULL, "l3_ick", "l3_ick"),
+       DT_CLK(NULL, "l4_ick", "l4_ick"),
+       DT_CLK(NULL, "rm_ick", "rm_ick"),
+       DT_CLK(NULL, "gpt10_fck", "gpt10_fck"),
+       DT_CLK(NULL, "gpt11_fck", "gpt11_fck"),
+       DT_CLK(NULL, "core_96m_fck", "core_96m_fck"),
+       DT_CLK(NULL, "mmchs2_fck", "mmchs2_fck"),
+       DT_CLK(NULL, "mmchs1_fck", "mmchs1_fck"),
+       DT_CLK(NULL, "i2c3_fck", "i2c3_fck"),
+       DT_CLK(NULL, "i2c2_fck", "i2c2_fck"),
+       DT_CLK(NULL, "i2c1_fck", "i2c1_fck"),
+       DT_CLK(NULL, "mcbsp5_fck", "mcbsp5_fck"),
+       DT_CLK(NULL, "mcbsp1_fck", "mcbsp1_fck"),
+       DT_CLK(NULL, "core_48m_fck", "core_48m_fck"),
+       DT_CLK(NULL, "mcspi4_fck", "mcspi4_fck"),
+       DT_CLK(NULL, "mcspi3_fck", "mcspi3_fck"),
+       DT_CLK(NULL, "mcspi2_fck", "mcspi2_fck"),
+       DT_CLK(NULL, "mcspi1_fck", "mcspi1_fck"),
+       DT_CLK(NULL, "uart2_fck", "uart2_fck"),
+       DT_CLK(NULL, "uart1_fck", "uart1_fck"),
+       DT_CLK(NULL, "core_12m_fck", "core_12m_fck"),
+       DT_CLK("omap_hdq.0", "fck", "hdq_fck"),
+       DT_CLK(NULL, "hdq_fck", "hdq_fck"),
+       DT_CLK(NULL, "core_l3_ick", "core_l3_ick"),
+       DT_CLK(NULL, "sdrc_ick", "sdrc_ick"),
+       DT_CLK(NULL, "gpmc_fck", "gpmc_fck"),
+       DT_CLK(NULL, "core_l4_ick", "core_l4_ick"),
+       DT_CLK("omap_hsmmc.1", "ick", "mmchs2_ick"),
+       DT_CLK("omap_hsmmc.0", "ick", "mmchs1_ick"),
+       DT_CLK(NULL, "mmchs2_ick", "mmchs2_ick"),
+       DT_CLK(NULL, "mmchs1_ick", "mmchs1_ick"),
+       DT_CLK("omap_hdq.0", "ick", "hdq_ick"),
+       DT_CLK(NULL, "hdq_ick", "hdq_ick"),
+       DT_CLK("omap2_mcspi.4", "ick", "mcspi4_ick"),
+       DT_CLK("omap2_mcspi.3", "ick", "mcspi3_ick"),
+       DT_CLK("omap2_mcspi.2", "ick", "mcspi2_ick"),
+       DT_CLK("omap2_mcspi.1", "ick", "mcspi1_ick"),
+       DT_CLK(NULL, "mcspi4_ick", "mcspi4_ick"),
+       DT_CLK(NULL, "mcspi3_ick", "mcspi3_ick"),
+       DT_CLK(NULL, "mcspi2_ick", "mcspi2_ick"),
+       DT_CLK(NULL, "mcspi1_ick", "mcspi1_ick"),
+       DT_CLK("omap_i2c.3", "ick", "i2c3_ick"),
+       DT_CLK("omap_i2c.2", "ick", "i2c2_ick"),
+       DT_CLK("omap_i2c.1", "ick", "i2c1_ick"),
+       DT_CLK(NULL, "i2c3_ick", "i2c3_ick"),
+       DT_CLK(NULL, "i2c2_ick", "i2c2_ick"),
+       DT_CLK(NULL, "i2c1_ick", "i2c1_ick"),
+       DT_CLK(NULL, "uart2_ick", "uart2_ick"),
+       DT_CLK(NULL, "uart1_ick", "uart1_ick"),
+       DT_CLK(NULL, "gpt11_ick", "gpt11_ick"),
+       DT_CLK(NULL, "gpt10_ick", "gpt10_ick"),
+       DT_CLK("omap-mcbsp.5", "ick", "mcbsp5_ick"),
+       DT_CLK("omap-mcbsp.1", "ick", "mcbsp1_ick"),
+       DT_CLK(NULL, "mcbsp5_ick", "mcbsp5_ick"),
+       DT_CLK(NULL, "mcbsp1_ick", "mcbsp1_ick"),
+       DT_CLK(NULL, "omapctrl_ick", "omapctrl_ick"),
+       DT_CLK(NULL, "dss_tv_fck", "dss_tv_fck"),
+       DT_CLK(NULL, "dss_96m_fck", "dss_96m_fck"),
+       DT_CLK(NULL, "dss2_alwon_fck", "dss2_alwon_fck"),
+       DT_CLK(NULL, "utmi_p1_gfclk", "dummy_ck"),
+       DT_CLK(NULL, "utmi_p2_gfclk", "dummy_ck"),
+       DT_CLK(NULL, "xclk60mhsp1_ck", "dummy_ck"),
+       DT_CLK(NULL, "xclk60mhsp2_ck", "dummy_ck"),
+       DT_CLK(NULL, "init_60m_fclk", "dummy_ck"),
+       DT_CLK(NULL, "gpt1_fck", "gpt1_fck"),
+       DT_CLK(NULL, "aes2_ick", "aes2_ick"),
+       DT_CLK(NULL, "wkup_32k_fck", "wkup_32k_fck"),
+       DT_CLK(NULL, "gpio1_dbck", "gpio1_dbck"),
+       DT_CLK(NULL, "sha12_ick", "sha12_ick"),
+       DT_CLK(NULL, "wdt2_fck", "wdt2_fck"),
+       DT_CLK("omap_wdt", "ick", "wdt2_ick"),
+       DT_CLK(NULL, "wdt2_ick", "wdt2_ick"),
+       DT_CLK(NULL, "wdt1_ick", "wdt1_ick"),
+       DT_CLK(NULL, "gpio1_ick", "gpio1_ick"),
+       DT_CLK(NULL, "omap_32ksync_ick", "omap_32ksync_ick"),
+       DT_CLK(NULL, "gpt12_ick", "gpt12_ick"),
+       DT_CLK(NULL, "gpt1_ick", "gpt1_ick"),
+       DT_CLK(NULL, "per_96m_fck", "per_96m_fck"),
+       DT_CLK(NULL, "per_48m_fck", "per_48m_fck"),
+       DT_CLK(NULL, "uart3_fck", "uart3_fck"),
+       DT_CLK(NULL, "gpt2_fck", "gpt2_fck"),
+       DT_CLK(NULL, "gpt3_fck", "gpt3_fck"),
+       DT_CLK(NULL, "gpt4_fck", "gpt4_fck"),
+       DT_CLK(NULL, "gpt5_fck", "gpt5_fck"),
+       DT_CLK(NULL, "gpt6_fck", "gpt6_fck"),
+       DT_CLK(NULL, "gpt7_fck", "gpt7_fck"),
+       DT_CLK(NULL, "gpt8_fck", "gpt8_fck"),
+       DT_CLK(NULL, "gpt9_fck", "gpt9_fck"),
+       DT_CLK(NULL, "per_32k_alwon_fck", "per_32k_alwon_fck"),
+       DT_CLK(NULL, "gpio6_dbck", "gpio6_dbck"),
+       DT_CLK(NULL, "gpio5_dbck", "gpio5_dbck"),
+       DT_CLK(NULL, "gpio4_dbck", "gpio4_dbck"),
+       DT_CLK(NULL, "gpio3_dbck", "gpio3_dbck"),
+       DT_CLK(NULL, "gpio2_dbck", "gpio2_dbck"),
+       DT_CLK(NULL, "wdt3_fck", "wdt3_fck"),
+       DT_CLK(NULL, "per_l4_ick", "per_l4_ick"),
+       DT_CLK(NULL, "gpio6_ick", "gpio6_ick"),
+       DT_CLK(NULL, "gpio5_ick", "gpio5_ick"),
+       DT_CLK(NULL, "gpio4_ick", "gpio4_ick"),
+       DT_CLK(NULL, "gpio3_ick", "gpio3_ick"),
+       DT_CLK(NULL, "gpio2_ick", "gpio2_ick"),
+       DT_CLK(NULL, "wdt3_ick", "wdt3_ick"),
+       DT_CLK(NULL, "uart3_ick", "uart3_ick"),
+       DT_CLK(NULL, "uart4_ick", "uart4_ick"),
+       DT_CLK(NULL, "gpt9_ick", "gpt9_ick"),
+       DT_CLK(NULL, "gpt8_ick", "gpt8_ick"),
+       DT_CLK(NULL, "gpt7_ick", "gpt7_ick"),
+       DT_CLK(NULL, "gpt6_ick", "gpt6_ick"),
+       DT_CLK(NULL, "gpt5_ick", "gpt5_ick"),
+       DT_CLK(NULL, "gpt4_ick", "gpt4_ick"),
+       DT_CLK(NULL, "gpt3_ick", "gpt3_ick"),
+       DT_CLK(NULL, "gpt2_ick", "gpt2_ick"),
+       DT_CLK("omap-mcbsp.2", "ick", "mcbsp2_ick"),
+       DT_CLK("omap-mcbsp.3", "ick", "mcbsp3_ick"),
+       DT_CLK("omap-mcbsp.4", "ick", "mcbsp4_ick"),
+       DT_CLK(NULL, "mcbsp4_ick", "mcbsp2_ick"),
+       DT_CLK(NULL, "mcbsp3_ick", "mcbsp3_ick"),
+       DT_CLK(NULL, "mcbsp2_ick", "mcbsp4_ick"),
+       DT_CLK(NULL, "mcbsp2_fck", "mcbsp2_fck"),
+       DT_CLK(NULL, "mcbsp3_fck", "mcbsp3_fck"),
+       DT_CLK(NULL, "mcbsp4_fck", "mcbsp4_fck"),
+       DT_CLK("etb", "emu_src_ck", "emu_src_ck"),
+       DT_CLK(NULL, "emu_src_ck", "emu_src_ck"),
+       DT_CLK(NULL, "pclk_fck", "pclk_fck"),
+       DT_CLK(NULL, "pclkx2_fck", "pclkx2_fck"),
+       DT_CLK(NULL, "atclk_fck", "atclk_fck"),
+       DT_CLK(NULL, "traceclk_src_fck", "traceclk_src_fck"),
+       DT_CLK(NULL, "traceclk_fck", "traceclk_fck"),
+       DT_CLK(NULL, "secure_32k_fck", "secure_32k_fck"),
+       DT_CLK(NULL, "gpt12_fck", "gpt12_fck"),
+       DT_CLK(NULL, "wdt1_fck", "wdt1_fck"),
+       DT_CLK(NULL, "timer_32k_ck", "omap_32k_fck"),
+       DT_CLK(NULL, "timer_sys_ck", "sys_ck"),
+       DT_CLK(NULL, "cpufreq_ck", "dpll1_ck"),
+       { .node_name = NULL },
+};
+
+static struct ti_dt_clk omap34xx_omap36xx_clks[] = {
+       DT_CLK(NULL, "aes1_ick", "aes1_ick"),
+       DT_CLK("omap_rng", "ick", "rng_ick"),
+       DT_CLK("omap3-rom-rng", "ick", "rng_ick"),
+       DT_CLK(NULL, "sha11_ick", "sha11_ick"),
+       DT_CLK(NULL, "des1_ick", "des1_ick"),
+       DT_CLK(NULL, "cam_mclk", "cam_mclk"),
+       DT_CLK(NULL, "cam_ick", "cam_ick"),
+       DT_CLK(NULL, "csi2_96m_fck", "csi2_96m_fck"),
+       DT_CLK(NULL, "security_l3_ick", "security_l3_ick"),
+       DT_CLK(NULL, "pka_ick", "pka_ick"),
+       DT_CLK(NULL, "icr_ick", "icr_ick"),
+       DT_CLK("omap-aes", "ick", "aes2_ick"),
+       DT_CLK("omap-sham", "ick", "sha12_ick"),
+       DT_CLK(NULL, "des2_ick", "des2_ick"),
+       DT_CLK(NULL, "mspro_ick", "mspro_ick"),
+       DT_CLK(NULL, "mailboxes_ick", "mailboxes_ick"),
+       DT_CLK(NULL, "ssi_l4_ick", "ssi_l4_ick"),
+       DT_CLK(NULL, "sr1_fck", "sr1_fck"),
+       DT_CLK(NULL, "sr2_fck", "sr2_fck"),
+       DT_CLK(NULL, "sr_l4_ick", "sr_l4_ick"),
+       DT_CLK(NULL, "security_l4_ick2", "security_l4_ick2"),
+       DT_CLK(NULL, "wkup_l4_ick", "wkup_l4_ick"),
+       DT_CLK(NULL, "dpll2_fck", "dpll2_fck"),
+       DT_CLK(NULL, "iva2_ck", "iva2_ck"),
+       DT_CLK(NULL, "modem_fck", "modem_fck"),
+       DT_CLK(NULL, "sad2d_ick", "sad2d_ick"),
+       DT_CLK(NULL, "mad2d_ick", "mad2d_ick"),
+       DT_CLK(NULL, "mspro_fck", "mspro_fck"),
+       DT_CLK(NULL, "dpll2_ck", "dpll2_ck"),
+       DT_CLK(NULL, "dpll2_m2_ck", "dpll2_m2_ck"),
+       { .node_name = NULL },
+};
+
+static struct ti_dt_clk omap36xx_omap3430es2plus_clks[] = {
+       DT_CLK(NULL, "ssi_ssr_fck", "ssi_ssr_fck_3430es2"),
+       DT_CLK(NULL, "ssi_sst_fck", "ssi_sst_fck_3430es2"),
+       DT_CLK("musb-omap2430", "ick", "hsotgusb_ick_3430es2"),
+       DT_CLK(NULL, "hsotgusb_ick", "hsotgusb_ick_3430es2"),
+       DT_CLK(NULL, "ssi_ick", "ssi_ick_3430es2"),
+       DT_CLK(NULL, "usim_fck", "usim_fck"),
+       DT_CLK(NULL, "usim_ick", "usim_ick"),
+       { .node_name = NULL },
+};
+
+static struct ti_dt_clk omap3430es1_clks[] = {
+       DT_CLK(NULL, "gfx_l3_ck", "gfx_l3_ck"),
+       DT_CLK(NULL, "gfx_l3_fck", "gfx_l3_fck"),
+       DT_CLK(NULL, "gfx_l3_ick", "gfx_l3_ick"),
+       DT_CLK(NULL, "gfx_cg1_ck", "gfx_cg1_ck"),
+       DT_CLK(NULL, "gfx_cg2_ck", "gfx_cg2_ck"),
+       DT_CLK(NULL, "d2d_26m_fck", "d2d_26m_fck"),
+       DT_CLK(NULL, "fshostusb_fck", "fshostusb_fck"),
+       DT_CLK(NULL, "ssi_ssr_fck", "ssi_ssr_fck_3430es1"),
+       DT_CLK(NULL, "ssi_sst_fck", "ssi_sst_fck_3430es1"),
+       DT_CLK("musb-omap2430", "ick", "hsotgusb_ick_3430es1"),
+       DT_CLK(NULL, "hsotgusb_ick", "hsotgusb_ick_3430es1"),
+       DT_CLK(NULL, "fac_ick", "fac_ick"),
+       DT_CLK(NULL, "ssi_ick", "ssi_ick_3430es1"),
+       DT_CLK(NULL, "usb_l4_ick", "usb_l4_ick"),
+       DT_CLK(NULL, "dss1_alwon_fck", "dss1_alwon_fck_3430es1"),
+       DT_CLK("omapdss_dss", "ick", "dss_ick_3430es1"),
+       DT_CLK(NULL, "dss_ick", "dss_ick_3430es1"),
+       { .node_name = NULL },
+};
+
+static struct ti_dt_clk omap36xx_am35xx_omap3430es2plus_clks[] = {
+       DT_CLK(NULL, "virt_16_8m_ck", "virt_16_8m_ck"),
+       DT_CLK(NULL, "dpll5_ck", "dpll5_ck"),
+       DT_CLK(NULL, "dpll5_m2_ck", "dpll5_m2_ck"),
+       DT_CLK(NULL, "sgx_fck", "sgx_fck"),
+       DT_CLK(NULL, "sgx_ick", "sgx_ick"),
+       DT_CLK(NULL, "cpefuse_fck", "cpefuse_fck"),
+       DT_CLK(NULL, "ts_fck", "ts_fck"),
+       DT_CLK(NULL, "usbtll_fck", "usbtll_fck"),
+       DT_CLK(NULL, "usbtll_ick", "usbtll_ick"),
+       DT_CLK("omap_hsmmc.2", "ick", "mmchs3_ick"),
+       DT_CLK(NULL, "mmchs3_ick", "mmchs3_ick"),
+       DT_CLK(NULL, "mmchs3_fck", "mmchs3_fck"),
+       DT_CLK(NULL, "dss1_alwon_fck", "dss1_alwon_fck_3430es2"),
+       DT_CLK("omapdss_dss", "ick", "dss_ick_3430es2"),
+       DT_CLK(NULL, "dss_ick", "dss_ick_3430es2"),
+       DT_CLK(NULL, "usbhost_120m_fck", "usbhost_120m_fck"),
+       DT_CLK(NULL, "usbhost_48m_fck", "usbhost_48m_fck"),
+       DT_CLK(NULL, "usbhost_ick", "usbhost_ick"),
+       { .node_name = NULL },
+};
+
+static struct ti_dt_clk am35xx_clks[] = {
+       DT_CLK(NULL, "ipss_ick", "ipss_ick"),
+       DT_CLK(NULL, "rmii_ck", "rmii_ck"),
+       DT_CLK(NULL, "pclk_ck", "pclk_ck"),
+       DT_CLK(NULL, "emac_ick", "emac_ick"),
+       DT_CLK(NULL, "emac_fck", "emac_fck"),
+       DT_CLK("davinci_emac.0", NULL, "emac_ick"),
+       DT_CLK("davinci_mdio.0", NULL, "emac_fck"),
+       DT_CLK("vpfe-capture", "master", "vpfe_ick"),
+       DT_CLK("vpfe-capture", "slave", "vpfe_fck"),
+       DT_CLK(NULL, "hsotgusb_ick", "hsotgusb_ick_am35xx"),
+       DT_CLK(NULL, "hsotgusb_fck", "hsotgusb_fck_am35xx"),
+       DT_CLK(NULL, "hecc_ck", "hecc_ck"),
+       DT_CLK(NULL, "uart4_ick", "uart4_ick_am35xx"),
+       DT_CLK(NULL, "uart4_fck", "uart4_fck_am35xx"),
+       { .node_name = NULL },
+};
+
+static struct ti_dt_clk omap36xx_clks[] = {
+       DT_CLK(NULL, "omap_192m_alwon_fck", "omap_192m_alwon_fck"),
+       DT_CLK(NULL, "uart4_fck", "uart4_fck"),
+       { .node_name = NULL },
+};
+
+static const char *enable_init_clks[] = {
+       "sdrc_ick",
+       "gpmc_fck",
+       "omapctrl_ick",
+};
+
+enum {
+       OMAP3_SOC_AM35XX,
+       OMAP3_SOC_OMAP3430_ES1,
+       OMAP3_SOC_OMAP3430_ES2_PLUS,
+       OMAP3_SOC_OMAP3630,
+       OMAP3_SOC_TI81XX,
+};
+
+static int __init omap3xxx_dt_clk_init(int soc_type)
+{
+       if (soc_type == OMAP3_SOC_AM35XX || soc_type == OMAP3_SOC_OMAP3630 ||
+           soc_type == OMAP3_SOC_OMAP3430_ES1 ||
+           soc_type == OMAP3_SOC_OMAP3430_ES2_PLUS)
+               ti_dt_clocks_register(omap3xxx_clks);
+
+       if (soc_type == OMAP3_SOC_AM35XX)
+               ti_dt_clocks_register(am35xx_clks);
+
+       if (soc_type == OMAP3_SOC_OMAP3630 || soc_type == OMAP3_SOC_AM35XX ||
+           soc_type == OMAP3_SOC_OMAP3430_ES2_PLUS)
+               ti_dt_clocks_register(omap36xx_am35xx_omap3430es2plus_clks);
+
+       if (soc_type == OMAP3_SOC_OMAP3430_ES1)
+               ti_dt_clocks_register(omap3430es1_clks);
+
+       if (soc_type == OMAP3_SOC_OMAP3430_ES2_PLUS ||
+           soc_type == OMAP3_SOC_OMAP3630)
+               ti_dt_clocks_register(omap36xx_omap3430es2plus_clks);
+
+       if (soc_type == OMAP3_SOC_OMAP3430_ES1 ||
+           soc_type == OMAP3_SOC_OMAP3430_ES2_PLUS ||
+           soc_type == OMAP3_SOC_OMAP3630)
+               ti_dt_clocks_register(omap34xx_omap36xx_clks);
+
+       if (soc_type == OMAP3_SOC_OMAP3630)
+               ti_dt_clocks_register(omap36xx_clks);
+
+       omap2_clk_disable_autoidle_all();
+
+       omap2_clk_enable_init_clocks(enable_init_clks,
+                                    ARRAY_SIZE(enable_init_clks));
+
+       pr_info("Clocking rate (Crystal/Core/MPU): %ld.%01ld/%ld/%ld MHz\n",
+               (clk_get_rate(clk_get_sys(NULL, "osc_sys_ck")) / 1000000),
+               (clk_get_rate(clk_get_sys(NULL, "osc_sys_ck")) / 100000) % 10,
+               (clk_get_rate(clk_get_sys(NULL, "core_ck")) / 1000000),
+               (clk_get_rate(clk_get_sys(NULL, "arm_fck")) / 1000000));
+
+       if (soc_type != OMAP3_SOC_TI81XX && soc_type != OMAP3_SOC_OMAP3430_ES1)
+               omap3_clk_lock_dpll5();
+
+       return 0;
+}
+
+int __init omap3430_dt_clk_init(void)
+{
+       return omap3xxx_dt_clk_init(OMAP3_SOC_OMAP3430_ES2_PLUS);
+}
+
+int __init omap3630_dt_clk_init(void)
+{
+       return omap3xxx_dt_clk_init(OMAP3_SOC_OMAP3630);
+}
+
+int __init am35xx_dt_clk_init(void)
+{
+       return omap3xxx_dt_clk_init(OMAP3_SOC_AM35XX);
+}
+
+int __init ti81xx_dt_clk_init(void)
+{
+       return omap3xxx_dt_clk_init(OMAP3_SOC_TI81XX);
+}
diff --git a/drivers/clk/ti/clk-43xx.c b/drivers/clk/ti/clk-43xx.c
new file mode 100644 (file)
index 0000000..67c8de5
--- /dev/null
@@ -0,0 +1,118 @@
+/*
+ * AM43XX Clock init
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc
+ *     Tero Kristo (t-kristo@ti.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/clk-provider.h>
+#include <linux/clk/ti.h>
+
+static struct ti_dt_clk am43xx_clks[] = {
+       DT_CLK(NULL, "clk_32768_ck", "clk_32768_ck"),
+       DT_CLK(NULL, "clk_rc32k_ck", "clk_rc32k_ck"),
+       DT_CLK(NULL, "virt_19200000_ck", "virt_19200000_ck"),
+       DT_CLK(NULL, "virt_24000000_ck", "virt_24000000_ck"),
+       DT_CLK(NULL, "virt_25000000_ck", "virt_25000000_ck"),
+       DT_CLK(NULL, "virt_26000000_ck", "virt_26000000_ck"),
+       DT_CLK(NULL, "sys_clkin_ck", "sys_clkin_ck"),
+       DT_CLK(NULL, "tclkin_ck", "tclkin_ck"),
+       DT_CLK(NULL, "dpll_core_ck", "dpll_core_ck"),
+       DT_CLK(NULL, "dpll_core_x2_ck", "dpll_core_x2_ck"),
+       DT_CLK(NULL, "dpll_core_m4_ck", "dpll_core_m4_ck"),
+       DT_CLK(NULL, "dpll_core_m5_ck", "dpll_core_m5_ck"),
+       DT_CLK(NULL, "dpll_core_m6_ck", "dpll_core_m6_ck"),
+       DT_CLK(NULL, "dpll_mpu_ck", "dpll_mpu_ck"),
+       DT_CLK(NULL, "dpll_mpu_m2_ck", "dpll_mpu_m2_ck"),
+       DT_CLK(NULL, "dpll_ddr_ck", "dpll_ddr_ck"),
+       DT_CLK(NULL, "dpll_ddr_m2_ck", "dpll_ddr_m2_ck"),
+       DT_CLK(NULL, "dpll_disp_ck", "dpll_disp_ck"),
+       DT_CLK(NULL, "dpll_disp_m2_ck", "dpll_disp_m2_ck"),
+       DT_CLK(NULL, "dpll_per_ck", "dpll_per_ck"),
+       DT_CLK(NULL, "dpll_per_m2_ck", "dpll_per_m2_ck"),
+       DT_CLK(NULL, "dpll_per_m2_div4_wkupdm_ck", "dpll_per_m2_div4_wkupdm_ck"),
+       DT_CLK(NULL, "dpll_per_m2_div4_ck", "dpll_per_m2_div4_ck"),
+       DT_CLK(NULL, "adc_tsc_fck", "adc_tsc_fck"),
+       DT_CLK(NULL, "clkdiv32k_ck", "clkdiv32k_ck"),
+       DT_CLK(NULL, "clkdiv32k_ick", "clkdiv32k_ick"),
+       DT_CLK(NULL, "dcan0_fck", "dcan0_fck"),
+       DT_CLK(NULL, "dcan1_fck", "dcan1_fck"),
+       DT_CLK(NULL, "pruss_ocp_gclk", "pruss_ocp_gclk"),
+       DT_CLK(NULL, "mcasp0_fck", "mcasp0_fck"),
+       DT_CLK(NULL, "mcasp1_fck", "mcasp1_fck"),
+       DT_CLK(NULL, "smartreflex0_fck", "smartreflex0_fck"),
+       DT_CLK(NULL, "smartreflex1_fck", "smartreflex1_fck"),
+       DT_CLK(NULL, "sha0_fck", "sha0_fck"),
+       DT_CLK(NULL, "aes0_fck", "aes0_fck"),
+       DT_CLK(NULL, "timer1_fck", "timer1_fck"),
+       DT_CLK(NULL, "timer2_fck", "timer2_fck"),
+       DT_CLK(NULL, "timer3_fck", "timer3_fck"),
+       DT_CLK(NULL, "timer4_fck", "timer4_fck"),
+       DT_CLK(NULL, "timer5_fck", "timer5_fck"),
+       DT_CLK(NULL, "timer6_fck", "timer6_fck"),
+       DT_CLK(NULL, "timer7_fck", "timer7_fck"),
+       DT_CLK(NULL, "wdt1_fck", "wdt1_fck"),
+       DT_CLK(NULL, "l3_gclk", "l3_gclk"),
+       DT_CLK(NULL, "dpll_core_m4_div2_ck", "dpll_core_m4_div2_ck"),
+       DT_CLK(NULL, "l4hs_gclk", "l4hs_gclk"),
+       DT_CLK(NULL, "l3s_gclk", "l3s_gclk"),
+       DT_CLK(NULL, "l4ls_gclk", "l4ls_gclk"),
+       DT_CLK(NULL, "clk_24mhz", "clk_24mhz"),
+       DT_CLK(NULL, "cpsw_125mhz_gclk", "cpsw_125mhz_gclk"),
+       DT_CLK(NULL, "cpsw_cpts_rft_clk", "cpsw_cpts_rft_clk"),
+       DT_CLK(NULL, "gpio0_dbclk_mux_ck", "gpio0_dbclk_mux_ck"),
+       DT_CLK(NULL, "gpio0_dbclk", "gpio0_dbclk"),
+       DT_CLK(NULL, "gpio1_dbclk", "gpio1_dbclk"),
+       DT_CLK(NULL, "gpio2_dbclk", "gpio2_dbclk"),
+       DT_CLK(NULL, "gpio3_dbclk", "gpio3_dbclk"),
+       DT_CLK(NULL, "gpio4_dbclk", "gpio4_dbclk"),
+       DT_CLK(NULL, "gpio5_dbclk", "gpio5_dbclk"),
+       DT_CLK(NULL, "mmc_clk", "mmc_clk"),
+       DT_CLK(NULL, "gfx_fclk_clksel_ck", "gfx_fclk_clksel_ck"),
+       DT_CLK(NULL, "gfx_fck_div_ck", "gfx_fck_div_ck"),
+       DT_CLK(NULL, "timer_32k_ck", "clkdiv32k_ick"),
+       DT_CLK(NULL, "timer_sys_ck", "sys_clkin_ck"),
+       DT_CLK(NULL, "sysclk_div", "sysclk_div"),
+       DT_CLK(NULL, "disp_clk", "disp_clk"),
+       DT_CLK(NULL, "clk_32k_mosc_ck", "clk_32k_mosc_ck"),
+       DT_CLK(NULL, "clk_32k_tpm_ck", "clk_32k_tpm_ck"),
+       DT_CLK(NULL, "dpll_extdev_ck", "dpll_extdev_ck"),
+       DT_CLK(NULL, "dpll_extdev_m2_ck", "dpll_extdev_m2_ck"),
+       DT_CLK(NULL, "mux_synctimer32k_ck", "mux_synctimer32k_ck"),
+       DT_CLK(NULL, "synctimer_32kclk", "synctimer_32kclk"),
+       DT_CLK(NULL, "timer8_fck", "timer8_fck"),
+       DT_CLK(NULL, "timer9_fck", "timer9_fck"),
+       DT_CLK(NULL, "timer10_fck", "timer10_fck"),
+       DT_CLK(NULL, "timer11_fck", "timer11_fck"),
+       DT_CLK(NULL, "cpsw_50m_clkdiv", "cpsw_50m_clkdiv"),
+       DT_CLK(NULL, "cpsw_5m_clkdiv", "cpsw_5m_clkdiv"),
+       DT_CLK(NULL, "dpll_ddr_x2_ck", "dpll_ddr_x2_ck"),
+       DT_CLK(NULL, "dpll_ddr_m4_ck", "dpll_ddr_m4_ck"),
+       DT_CLK(NULL, "dpll_per_clkdcoldo", "dpll_per_clkdcoldo"),
+       DT_CLK(NULL, "dll_aging_clk_div", "dll_aging_clk_div"),
+       DT_CLK(NULL, "div_core_25m_ck", "div_core_25m_ck"),
+       DT_CLK(NULL, "func_12m_clk", "func_12m_clk"),
+       DT_CLK(NULL, "vtp_clk_div", "vtp_clk_div"),
+       DT_CLK(NULL, "usbphy_32khz_clkmux", "usbphy_32khz_clkmux"),
+       { .node_name = NULL },
+};
+
+int __init am43xx_dt_clk_init(void)
+{
+       ti_dt_clocks_register(am43xx_clks);
+
+       omap2_clk_disable_autoidle_all();
+
+       return 0;
+}
diff --git a/drivers/clk/ti/clk-44xx.c b/drivers/clk/ti/clk-44xx.c
new file mode 100644 (file)
index 0000000..ae00218
--- /dev/null
@@ -0,0 +1,316 @@
+/*
+ * OMAP4 Clock init
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * Tero Kristo (t-kristo@ti.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/clk-private.h>
+#include <linux/clkdev.h>
+#include <linux/clk/ti.h>
+
+/*
+ * OMAP4 ABE DPLL default frequency. In OMAP4460 TRM version V, section
+ * "3.6.3.2.3 CM1_ABE Clock Generator" states that the "DPLL_ABE_X2_CLK
+ * must be set to 196.608 MHz" and hence, the DPLL locked frequency is
+ * half of this value.
+ */
+#define OMAP4_DPLL_ABE_DEFFREQ                         98304000
+
+/*
+ * OMAP4 USB DPLL default frequency. In OMAP4430 TRM version V, section
+ * "3.6.3.9.5 DPLL_USB Preferred Settings" shows that the preferred
+ * locked frequency for the USB DPLL is 960MHz.
+ */
+#define OMAP4_DPLL_USB_DEFFREQ                         960000000
+
+static struct ti_dt_clk omap44xx_clks[] = {
+       DT_CLK(NULL, "extalt_clkin_ck", "extalt_clkin_ck"),
+       DT_CLK(NULL, "pad_clks_src_ck", "pad_clks_src_ck"),
+       DT_CLK(NULL, "pad_clks_ck", "pad_clks_ck"),
+       DT_CLK(NULL, "pad_slimbus_core_clks_ck", "pad_slimbus_core_clks_ck"),
+       DT_CLK(NULL, "secure_32k_clk_src_ck", "secure_32k_clk_src_ck"),
+       DT_CLK(NULL, "slimbus_src_clk", "slimbus_src_clk"),
+       DT_CLK(NULL, "slimbus_clk", "slimbus_clk"),
+       DT_CLK(NULL, "sys_32k_ck", "sys_32k_ck"),
+       DT_CLK(NULL, "virt_12000000_ck", "virt_12000000_ck"),
+       DT_CLK(NULL, "virt_13000000_ck", "virt_13000000_ck"),
+       DT_CLK(NULL, "virt_16800000_ck", "virt_16800000_ck"),
+       DT_CLK(NULL, "virt_19200000_ck", "virt_19200000_ck"),
+       DT_CLK(NULL, "virt_26000000_ck", "virt_26000000_ck"),
+       DT_CLK(NULL, "virt_27000000_ck", "virt_27000000_ck"),
+       DT_CLK(NULL, "virt_38400000_ck", "virt_38400000_ck"),
+       DT_CLK(NULL, "sys_clkin_ck", "sys_clkin_ck"),
+       DT_CLK(NULL, "tie_low_clock_ck", "tie_low_clock_ck"),
+       DT_CLK(NULL, "utmi_phy_clkout_ck", "utmi_phy_clkout_ck"),
+       DT_CLK(NULL, "xclk60mhsp1_ck", "xclk60mhsp1_ck"),
+       DT_CLK(NULL, "xclk60mhsp2_ck", "xclk60mhsp2_ck"),
+       DT_CLK(NULL, "xclk60motg_ck", "xclk60motg_ck"),
+       DT_CLK(NULL, "abe_dpll_bypass_clk_mux_ck", "abe_dpll_bypass_clk_mux_ck"),
+       DT_CLK(NULL, "abe_dpll_refclk_mux_ck", "abe_dpll_refclk_mux_ck"),
+       DT_CLK(NULL, "dpll_abe_ck", "dpll_abe_ck"),
+       DT_CLK(NULL, "dpll_abe_x2_ck", "dpll_abe_x2_ck"),
+       DT_CLK(NULL, "dpll_abe_m2x2_ck", "dpll_abe_m2x2_ck"),
+       DT_CLK(NULL, "abe_24m_fclk", "abe_24m_fclk"),
+       DT_CLK(NULL, "abe_clk", "abe_clk"),
+       DT_CLK(NULL, "aess_fclk", "aess_fclk"),
+       DT_CLK(NULL, "dpll_abe_m3x2_ck", "dpll_abe_m3x2_ck"),
+       DT_CLK(NULL, "core_hsd_byp_clk_mux_ck", "core_hsd_byp_clk_mux_ck"),
+       DT_CLK(NULL, "dpll_core_ck", "dpll_core_ck"),
+       DT_CLK(NULL, "dpll_core_x2_ck", "dpll_core_x2_ck"),
+       DT_CLK(NULL, "dpll_core_m6x2_ck", "dpll_core_m6x2_ck"),
+       DT_CLK(NULL, "dbgclk_mux_ck", "dbgclk_mux_ck"),
+       DT_CLK(NULL, "dpll_core_m2_ck", "dpll_core_m2_ck"),
+       DT_CLK(NULL, "ddrphy_ck", "ddrphy_ck"),
+       DT_CLK(NULL, "dpll_core_m5x2_ck", "dpll_core_m5x2_ck"),
+       DT_CLK(NULL, "div_core_ck", "div_core_ck"),
+       DT_CLK(NULL, "div_iva_hs_clk", "div_iva_hs_clk"),
+       DT_CLK(NULL, "div_mpu_hs_clk", "div_mpu_hs_clk"),
+       DT_CLK(NULL, "dpll_core_m4x2_ck", "dpll_core_m4x2_ck"),
+       DT_CLK(NULL, "dll_clk_div_ck", "dll_clk_div_ck"),
+       DT_CLK(NULL, "dpll_abe_m2_ck", "dpll_abe_m2_ck"),
+       DT_CLK(NULL, "dpll_core_m3x2_ck", "dpll_core_m3x2_ck"),
+       DT_CLK(NULL, "dpll_core_m7x2_ck", "dpll_core_m7x2_ck"),
+       DT_CLK(NULL, "iva_hsd_byp_clk_mux_ck", "iva_hsd_byp_clk_mux_ck"),
+       DT_CLK(NULL, "dpll_iva_ck", "dpll_iva_ck"),
+       DT_CLK(NULL, "dpll_iva_x2_ck", "dpll_iva_x2_ck"),
+       DT_CLK(NULL, "dpll_iva_m4x2_ck", "dpll_iva_m4x2_ck"),
+       DT_CLK(NULL, "dpll_iva_m5x2_ck", "dpll_iva_m5x2_ck"),
+       DT_CLK(NULL, "dpll_mpu_ck", "dpll_mpu_ck"),
+       DT_CLK(NULL, "dpll_mpu_m2_ck", "dpll_mpu_m2_ck"),
+       DT_CLK(NULL, "per_hs_clk_div_ck", "per_hs_clk_div_ck"),
+       DT_CLK(NULL, "per_hsd_byp_clk_mux_ck", "per_hsd_byp_clk_mux_ck"),
+       DT_CLK(NULL, "dpll_per_ck", "dpll_per_ck"),
+       DT_CLK(NULL, "dpll_per_m2_ck", "dpll_per_m2_ck"),
+       DT_CLK(NULL, "dpll_per_x2_ck", "dpll_per_x2_ck"),
+       DT_CLK(NULL, "dpll_per_m2x2_ck", "dpll_per_m2x2_ck"),
+       DT_CLK(NULL, "dpll_per_m3x2_ck", "dpll_per_m3x2_ck"),
+       DT_CLK(NULL, "dpll_per_m4x2_ck", "dpll_per_m4x2_ck"),
+       DT_CLK(NULL, "dpll_per_m5x2_ck", "dpll_per_m5x2_ck"),
+       DT_CLK(NULL, "dpll_per_m6x2_ck", "dpll_per_m6x2_ck"),
+       DT_CLK(NULL, "dpll_per_m7x2_ck", "dpll_per_m7x2_ck"),
+       DT_CLK(NULL, "usb_hs_clk_div_ck", "usb_hs_clk_div_ck"),
+       DT_CLK(NULL, "dpll_usb_ck", "dpll_usb_ck"),
+       DT_CLK(NULL, "dpll_usb_clkdcoldo_ck", "dpll_usb_clkdcoldo_ck"),
+       DT_CLK(NULL, "dpll_usb_m2_ck", "dpll_usb_m2_ck"),
+       DT_CLK(NULL, "ducati_clk_mux_ck", "ducati_clk_mux_ck"),
+       DT_CLK(NULL, "func_12m_fclk", "func_12m_fclk"),
+       DT_CLK(NULL, "func_24m_clk", "func_24m_clk"),
+       DT_CLK(NULL, "func_24mc_fclk", "func_24mc_fclk"),
+       DT_CLK(NULL, "func_48m_fclk", "func_48m_fclk"),
+       DT_CLK(NULL, "func_48mc_fclk", "func_48mc_fclk"),
+       DT_CLK(NULL, "func_64m_fclk", "func_64m_fclk"),
+       DT_CLK(NULL, "func_96m_fclk", "func_96m_fclk"),
+       DT_CLK(NULL, "init_60m_fclk", "init_60m_fclk"),
+       DT_CLK(NULL, "l3_div_ck", "l3_div_ck"),
+       DT_CLK(NULL, "l4_div_ck", "l4_div_ck"),
+       DT_CLK(NULL, "lp_clk_div_ck", "lp_clk_div_ck"),
+       DT_CLK(NULL, "l4_wkup_clk_mux_ck", "l4_wkup_clk_mux_ck"),
+       DT_CLK("smp_twd", NULL, "mpu_periphclk"),
+       DT_CLK(NULL, "ocp_abe_iclk", "ocp_abe_iclk"),
+       DT_CLK(NULL, "per_abe_24m_fclk", "per_abe_24m_fclk"),
+       DT_CLK(NULL, "per_abe_nc_fclk", "per_abe_nc_fclk"),
+       DT_CLK(NULL, "syc_clk_div_ck", "syc_clk_div_ck"),
+       DT_CLK(NULL, "aes1_fck", "aes1_fck"),
+       DT_CLK(NULL, "aes2_fck", "aes2_fck"),
+       DT_CLK(NULL, "dmic_sync_mux_ck", "dmic_sync_mux_ck"),
+       DT_CLK(NULL, "func_dmic_abe_gfclk", "func_dmic_abe_gfclk"),
+       DT_CLK(NULL, "dss_sys_clk", "dss_sys_clk"),
+       DT_CLK(NULL, "dss_tv_clk", "dss_tv_clk"),
+       DT_CLK(NULL, "dss_dss_clk", "dss_dss_clk"),
+       DT_CLK(NULL, "dss_48mhz_clk", "dss_48mhz_clk"),
+       DT_CLK(NULL, "dss_fck", "dss_fck"),
+       DT_CLK("omapdss_dss", "ick", "dss_fck"),
+       DT_CLK(NULL, "fdif_fck", "fdif_fck"),
+       DT_CLK(NULL, "gpio1_dbclk", "gpio1_dbclk"),
+       DT_CLK(NULL, "gpio2_dbclk", "gpio2_dbclk"),
+       DT_CLK(NULL, "gpio3_dbclk", "gpio3_dbclk"),
+       DT_CLK(NULL, "gpio4_dbclk", "gpio4_dbclk"),
+       DT_CLK(NULL, "gpio5_dbclk", "gpio5_dbclk"),
+       DT_CLK(NULL, "gpio6_dbclk", "gpio6_dbclk"),
+       DT_CLK(NULL, "sgx_clk_mux", "sgx_clk_mux"),
+       DT_CLK(NULL, "hsi_fck", "hsi_fck"),
+       DT_CLK(NULL, "iss_ctrlclk", "iss_ctrlclk"),
+       DT_CLK(NULL, "mcasp_sync_mux_ck", "mcasp_sync_mux_ck"),
+       DT_CLK(NULL, "func_mcasp_abe_gfclk", "func_mcasp_abe_gfclk"),
+       DT_CLK(NULL, "mcbsp1_sync_mux_ck", "mcbsp1_sync_mux_ck"),
+       DT_CLK(NULL, "func_mcbsp1_gfclk", "func_mcbsp1_gfclk"),
+       DT_CLK(NULL, "mcbsp2_sync_mux_ck", "mcbsp2_sync_mux_ck"),
+       DT_CLK(NULL, "func_mcbsp2_gfclk", "func_mcbsp2_gfclk"),
+       DT_CLK(NULL, "mcbsp3_sync_mux_ck", "mcbsp3_sync_mux_ck"),
+       DT_CLK(NULL, "func_mcbsp3_gfclk", "func_mcbsp3_gfclk"),
+       DT_CLK(NULL, "mcbsp4_sync_mux_ck", "mcbsp4_sync_mux_ck"),
+       DT_CLK(NULL, "per_mcbsp4_gfclk", "per_mcbsp4_gfclk"),
+       DT_CLK(NULL, "hsmmc1_fclk", "hsmmc1_fclk"),
+       DT_CLK(NULL, "hsmmc2_fclk", "hsmmc2_fclk"),
+       DT_CLK(NULL, "ocp2scp_usb_phy_phy_48m", "ocp2scp_usb_phy_phy_48m"),
+       DT_CLK(NULL, "sha2md5_fck", "sha2md5_fck"),
+       DT_CLK(NULL, "slimbus1_fclk_1", "slimbus1_fclk_1"),
+       DT_CLK(NULL, "slimbus1_fclk_0", "slimbus1_fclk_0"),
+       DT_CLK(NULL, "slimbus1_fclk_2", "slimbus1_fclk_2"),
+       DT_CLK(NULL, "slimbus1_slimbus_clk", "slimbus1_slimbus_clk"),
+       DT_CLK(NULL, "slimbus2_fclk_1", "slimbus2_fclk_1"),
+       DT_CLK(NULL, "slimbus2_fclk_0", "slimbus2_fclk_0"),
+       DT_CLK(NULL, "slimbus2_slimbus_clk", "slimbus2_slimbus_clk"),
+       DT_CLK(NULL, "smartreflex_core_fck", "smartreflex_core_fck"),
+       DT_CLK(NULL, "smartreflex_iva_fck", "smartreflex_iva_fck"),
+       DT_CLK(NULL, "smartreflex_mpu_fck", "smartreflex_mpu_fck"),
+       DT_CLK(NULL, "dmt1_clk_mux", "dmt1_clk_mux"),
+       DT_CLK(NULL, "cm2_dm10_mux", "cm2_dm10_mux"),
+       DT_CLK(NULL, "cm2_dm11_mux", "cm2_dm11_mux"),
+       DT_CLK(NULL, "cm2_dm2_mux", "cm2_dm2_mux"),
+       DT_CLK(NULL, "cm2_dm3_mux", "cm2_dm3_mux"),
+       DT_CLK(NULL, "cm2_dm4_mux", "cm2_dm4_mux"),
+       DT_CLK(NULL, "timer5_sync_mux", "timer5_sync_mux"),
+       DT_CLK(NULL, "timer6_sync_mux", "timer6_sync_mux"),
+       DT_CLK(NULL, "timer7_sync_mux", "timer7_sync_mux"),
+       DT_CLK(NULL, "timer8_sync_mux", "timer8_sync_mux"),
+       DT_CLK(NULL, "cm2_dm9_mux", "cm2_dm9_mux"),
+       DT_CLK(NULL, "usb_host_fs_fck", "usb_host_fs_fck"),
+       DT_CLK("usbhs_omap", "fs_fck", "usb_host_fs_fck"),
+       DT_CLK(NULL, "utmi_p1_gfclk", "utmi_p1_gfclk"),
+       DT_CLK(NULL, "usb_host_hs_utmi_p1_clk", "usb_host_hs_utmi_p1_clk"),
+       DT_CLK(NULL, "utmi_p2_gfclk", "utmi_p2_gfclk"),
+       DT_CLK(NULL, "usb_host_hs_utmi_p2_clk", "usb_host_hs_utmi_p2_clk"),
+       DT_CLK(NULL, "usb_host_hs_utmi_p3_clk", "usb_host_hs_utmi_p3_clk"),
+       DT_CLK(NULL, "usb_host_hs_hsic480m_p1_clk", "usb_host_hs_hsic480m_p1_clk"),
+       DT_CLK(NULL, "usb_host_hs_hsic60m_p1_clk", "usb_host_hs_hsic60m_p1_clk"),
+       DT_CLK(NULL, "usb_host_hs_hsic60m_p2_clk", "usb_host_hs_hsic60m_p2_clk"),
+       DT_CLK(NULL, "usb_host_hs_hsic480m_p2_clk", "usb_host_hs_hsic480m_p2_clk"),
+       DT_CLK(NULL, "usb_host_hs_func48mclk", "usb_host_hs_func48mclk"),
+       DT_CLK(NULL, "usb_host_hs_fck", "usb_host_hs_fck"),
+       DT_CLK("usbhs_omap", "hs_fck", "usb_host_hs_fck"),
+       DT_CLK(NULL, "otg_60m_gfclk", "otg_60m_gfclk"),
+       DT_CLK(NULL, "usb_otg_hs_xclk", "usb_otg_hs_xclk"),
+       DT_CLK(NULL, "usb_otg_hs_ick", "usb_otg_hs_ick"),
+       DT_CLK("musb-omap2430", "ick", "usb_otg_hs_ick"),
+       DT_CLK(NULL, "usb_phy_cm_clk32k", "usb_phy_cm_clk32k"),
+       DT_CLK(NULL, "usb_tll_hs_usb_ch2_clk", "usb_tll_hs_usb_ch2_clk"),
+       DT_CLK(NULL, "usb_tll_hs_usb_ch0_clk", "usb_tll_hs_usb_ch0_clk"),
+       DT_CLK(NULL, "usb_tll_hs_usb_ch1_clk", "usb_tll_hs_usb_ch1_clk"),
+       DT_CLK(NULL, "usb_tll_hs_ick", "usb_tll_hs_ick"),
+       DT_CLK("usbhs_omap", "usbtll_ick", "usb_tll_hs_ick"),
+       DT_CLK("usbhs_tll", "usbtll_ick", "usb_tll_hs_ick"),
+       DT_CLK(NULL, "usim_ck", "usim_ck"),
+       DT_CLK(NULL, "usim_fclk", "usim_fclk"),
+       DT_CLK(NULL, "pmd_stm_clock_mux_ck", "pmd_stm_clock_mux_ck"),
+       DT_CLK(NULL, "pmd_trace_clk_mux_ck", "pmd_trace_clk_mux_ck"),
+       DT_CLK(NULL, "stm_clk_div_ck", "stm_clk_div_ck"),
+       DT_CLK(NULL, "trace_clk_div_ck", "trace_clk_div_ck"),
+       DT_CLK(NULL, "auxclk0_src_ck", "auxclk0_src_ck"),
+       DT_CLK(NULL, "auxclk0_ck", "auxclk0_ck"),
+       DT_CLK(NULL, "auxclkreq0_ck", "auxclkreq0_ck"),
+       DT_CLK(NULL, "auxclk1_src_ck", "auxclk1_src_ck"),
+       DT_CLK(NULL, "auxclk1_ck", "auxclk1_ck"),
+       DT_CLK(NULL, "auxclkreq1_ck", "auxclkreq1_ck"),
+       DT_CLK(NULL, "auxclk2_src_ck", "auxclk2_src_ck"),
+       DT_CLK(NULL, "auxclk2_ck", "auxclk2_ck"),
+       DT_CLK(NULL, "auxclkreq2_ck", "auxclkreq2_ck"),
+       DT_CLK(NULL, "auxclk3_src_ck", "auxclk3_src_ck"),
+       DT_CLK(NULL, "auxclk3_ck", "auxclk3_ck"),
+       DT_CLK(NULL, "auxclkreq3_ck", "auxclkreq3_ck"),
+       DT_CLK(NULL, "auxclk4_src_ck", "auxclk4_src_ck"),
+       DT_CLK(NULL, "auxclk4_ck", "auxclk4_ck"),
+       DT_CLK(NULL, "auxclkreq4_ck", "auxclkreq4_ck"),
+       DT_CLK(NULL, "auxclk5_src_ck", "auxclk5_src_ck"),
+       DT_CLK(NULL, "auxclk5_ck", "auxclk5_ck"),
+       DT_CLK(NULL, "auxclkreq5_ck", "auxclkreq5_ck"),
+       DT_CLK("50000000.gpmc", "fck", "dummy_ck"),
+       DT_CLK("omap_i2c.1", "ick", "dummy_ck"),
+       DT_CLK("omap_i2c.2", "ick", "dummy_ck"),
+       DT_CLK("omap_i2c.3", "ick", "dummy_ck"),
+       DT_CLK("omap_i2c.4", "ick", "dummy_ck"),
+       DT_CLK(NULL, "mailboxes_ick", "dummy_ck"),
+       DT_CLK("omap_hsmmc.0", "ick", "dummy_ck"),
+       DT_CLK("omap_hsmmc.1", "ick", "dummy_ck"),
+       DT_CLK("omap_hsmmc.2", "ick", "dummy_ck"),
+       DT_CLK("omap_hsmmc.3", "ick", "dummy_ck"),
+       DT_CLK("omap_hsmmc.4", "ick", "dummy_ck"),
+       DT_CLK("omap-mcbsp.1", "ick", "dummy_ck"),
+       DT_CLK("omap-mcbsp.2", "ick", "dummy_ck"),
+       DT_CLK("omap-mcbsp.3", "ick", "dummy_ck"),
+       DT_CLK("omap-mcbsp.4", "ick", "dummy_ck"),
+       DT_CLK("omap2_mcspi.1", "ick", "dummy_ck"),
+       DT_CLK("omap2_mcspi.2", "ick", "dummy_ck"),
+       DT_CLK("omap2_mcspi.3", "ick", "dummy_ck"),
+       DT_CLK("omap2_mcspi.4", "ick", "dummy_ck"),
+       DT_CLK(NULL, "uart1_ick", "dummy_ck"),
+       DT_CLK(NULL, "uart2_ick", "dummy_ck"),
+       DT_CLK(NULL, "uart3_ick", "dummy_ck"),
+       DT_CLK(NULL, "uart4_ick", "dummy_ck"),
+       DT_CLK("usbhs_omap", "usbhost_ick", "dummy_ck"),
+       DT_CLK("usbhs_omap", "usbtll_fck", "dummy_ck"),
+       DT_CLK("usbhs_tll", "usbtll_fck", "dummy_ck"),
+       DT_CLK("omap_wdt", "ick", "dummy_ck"),
+       DT_CLK(NULL, "timer_32k_ck", "sys_32k_ck"),
+       DT_CLK("omap_timer.1", "timer_sys_ck", "sys_clkin_ck"),
+       DT_CLK("omap_timer.2", "timer_sys_ck", "sys_clkin_ck"),
+       DT_CLK("omap_timer.3", "timer_sys_ck", "sys_clkin_ck"),
+       DT_CLK("omap_timer.4", "timer_sys_ck", "sys_clkin_ck"),
+       DT_CLK("omap_timer.9", "timer_sys_ck", "sys_clkin_ck"),
+       DT_CLK("omap_timer.10", "timer_sys_ck", "sys_clkin_ck"),
+       DT_CLK("omap_timer.11", "timer_sys_ck", "sys_clkin_ck"),
+       DT_CLK("omap_timer.5", "timer_sys_ck", "syc_clk_div_ck"),
+       DT_CLK("omap_timer.6", "timer_sys_ck", "syc_clk_div_ck"),
+       DT_CLK("omap_timer.7", "timer_sys_ck", "syc_clk_div_ck"),
+       DT_CLK("omap_timer.8", "timer_sys_ck", "syc_clk_div_ck"),
+       DT_CLK("4a318000.timer", "timer_sys_ck", "sys_clkin_ck"),
+       DT_CLK("48032000.timer", "timer_sys_ck", "sys_clkin_ck"),
+       DT_CLK("48034000.timer", "timer_sys_ck", "sys_clkin_ck"),
+       DT_CLK("48036000.timer", "timer_sys_ck", "sys_clkin_ck"),
+       DT_CLK("4803e000.timer", "timer_sys_ck", "sys_clkin_ck"),
+       DT_CLK("48086000.timer", "timer_sys_ck", "sys_clkin_ck"),
+       DT_CLK("48088000.timer", "timer_sys_ck", "sys_clkin_ck"),
+       DT_CLK("40138000.timer", "timer_sys_ck", "syc_clk_div_ck"),
+       DT_CLK("4013a000.timer", "timer_sys_ck", "syc_clk_div_ck"),
+       DT_CLK("4013c000.timer", "timer_sys_ck", "syc_clk_div_ck"),
+       DT_CLK("4013e000.timer", "timer_sys_ck", "syc_clk_div_ck"),
+       DT_CLK(NULL, "cpufreq_ck", "dpll_mpu_ck"),
+       DT_CLK(NULL, "bandgap_fclk", "bandgap_fclk"),
+       DT_CLK(NULL, "div_ts_ck", "div_ts_ck"),
+       DT_CLK(NULL, "bandgap_ts_fclk", "bandgap_ts_fclk"),
+       { .node_name = NULL },
+};
+
+int __init omap4xxx_dt_clk_init(void)
+{
+       int rc;
+       struct clk *abe_dpll_ref, *abe_dpll, *sys_32k_ck, *usb_dpll;
+
+       ti_dt_clocks_register(omap44xx_clks);
+
+       omap2_clk_disable_autoidle_all();
+
+       /*
+        * Lock USB DPLL on OMAP4 devices so that the L3INIT power
+        * domain can transition to retention state when not in use.
+        */
+       usb_dpll = clk_get_sys(NULL, "dpll_usb_ck");
+       rc = clk_set_rate(usb_dpll, OMAP4_DPLL_USB_DEFFREQ);
+       if (rc)
+               pr_err("%s: failed to configure USB DPLL!\n", __func__);
+
+       /*
+        * On OMAP4460 the ABE DPLL fails to turn on if in idle low-power
+        * state when turning the ABE clock domain. Workaround this by
+        * locking the ABE DPLL on boot.
+        * Lock the ABE DPLL in any case to avoid issues with audio.
+        */
+       abe_dpll_ref = clk_get_sys(NULL, "abe_dpll_refclk_mux_ck");
+       sys_32k_ck = clk_get_sys(NULL, "sys_32k_ck");
+       rc = clk_set_parent(abe_dpll_ref, sys_32k_ck);
+       abe_dpll = clk_get_sys(NULL, "dpll_abe_ck");
+       if (!rc)
+               rc = clk_set_rate(abe_dpll, OMAP4_DPLL_ABE_DEFFREQ);
+       if (rc)
+               pr_err("%s: failed to configure ABE DPLL!\n", __func__);
+
+       return 0;
+}
diff --git a/drivers/clk/ti/clk-54xx.c b/drivers/clk/ti/clk-54xx.c
new file mode 100644 (file)
index 0000000..0ef9f58
--- /dev/null
@@ -0,0 +1,255 @@
+/*
+ * OMAP5 Clock init
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * Tero Kristo (t-kristo@ti.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/clk-private.h>
+#include <linux/clkdev.h>
+#include <linux/io.h>
+#include <linux/clk/ti.h>
+
+#define OMAP5_DPLL_ABE_DEFFREQ                         98304000
+
+/*
+ * OMAP543x TRM, section "3.6.3.9.5 DPLL_USB Preferred Settings"
+ * states it must be at 960MHz
+ */
+#define OMAP5_DPLL_USB_DEFFREQ                         960000000
+
+static struct ti_dt_clk omap54xx_clks[] = {
+       DT_CLK(NULL, "pad_clks_src_ck", "pad_clks_src_ck"),
+       DT_CLK(NULL, "pad_clks_ck", "pad_clks_ck"),
+       DT_CLK(NULL, "secure_32k_clk_src_ck", "secure_32k_clk_src_ck"),
+       DT_CLK(NULL, "slimbus_src_clk", "slimbus_src_clk"),
+       DT_CLK(NULL, "slimbus_clk", "slimbus_clk"),
+       DT_CLK(NULL, "sys_32k_ck", "sys_32k_ck"),
+       DT_CLK(NULL, "virt_12000000_ck", "virt_12000000_ck"),
+       DT_CLK(NULL, "virt_13000000_ck", "virt_13000000_ck"),
+       DT_CLK(NULL, "virt_16800000_ck", "virt_16800000_ck"),
+       DT_CLK(NULL, "virt_19200000_ck", "virt_19200000_ck"),
+       DT_CLK(NULL, "virt_26000000_ck", "virt_26000000_ck"),
+       DT_CLK(NULL, "virt_27000000_ck", "virt_27000000_ck"),
+       DT_CLK(NULL, "virt_38400000_ck", "virt_38400000_ck"),
+       DT_CLK(NULL, "sys_clkin", "sys_clkin"),
+       DT_CLK(NULL, "xclk60mhsp1_ck", "xclk60mhsp1_ck"),
+       DT_CLK(NULL, "xclk60mhsp2_ck", "xclk60mhsp2_ck"),
+       DT_CLK(NULL, "abe_dpll_bypass_clk_mux", "abe_dpll_bypass_clk_mux"),
+       DT_CLK(NULL, "abe_dpll_clk_mux", "abe_dpll_clk_mux"),
+       DT_CLK(NULL, "dpll_abe_ck", "dpll_abe_ck"),
+       DT_CLK(NULL, "dpll_abe_x2_ck", "dpll_abe_x2_ck"),
+       DT_CLK(NULL, "dpll_abe_m2x2_ck", "dpll_abe_m2x2_ck"),
+       DT_CLK(NULL, "abe_24m_fclk", "abe_24m_fclk"),
+       DT_CLK(NULL, "abe_clk", "abe_clk"),
+       DT_CLK(NULL, "abe_iclk", "abe_iclk"),
+       DT_CLK(NULL, "abe_lp_clk_div", "abe_lp_clk_div"),
+       DT_CLK(NULL, "dpll_abe_m3x2_ck", "dpll_abe_m3x2_ck"),
+       DT_CLK(NULL, "dpll_core_ck", "dpll_core_ck"),
+       DT_CLK(NULL, "dpll_core_x2_ck", "dpll_core_x2_ck"),
+       DT_CLK(NULL, "dpll_core_h21x2_ck", "dpll_core_h21x2_ck"),
+       DT_CLK(NULL, "c2c_fclk", "c2c_fclk"),
+       DT_CLK(NULL, "c2c_iclk", "c2c_iclk"),
+       DT_CLK(NULL, "custefuse_sys_gfclk_div", "custefuse_sys_gfclk_div"),
+       DT_CLK(NULL, "dpll_core_h11x2_ck", "dpll_core_h11x2_ck"),
+       DT_CLK(NULL, "dpll_core_h12x2_ck", "dpll_core_h12x2_ck"),
+       DT_CLK(NULL, "dpll_core_h13x2_ck", "dpll_core_h13x2_ck"),
+       DT_CLK(NULL, "dpll_core_h14x2_ck", "dpll_core_h14x2_ck"),
+       DT_CLK(NULL, "dpll_core_h22x2_ck", "dpll_core_h22x2_ck"),
+       DT_CLK(NULL, "dpll_core_h23x2_ck", "dpll_core_h23x2_ck"),
+       DT_CLK(NULL, "dpll_core_h24x2_ck", "dpll_core_h24x2_ck"),
+       DT_CLK(NULL, "dpll_core_m2_ck", "dpll_core_m2_ck"),
+       DT_CLK(NULL, "dpll_core_m3x2_ck", "dpll_core_m3x2_ck"),
+       DT_CLK(NULL, "iva_dpll_hs_clk_div", "iva_dpll_hs_clk_div"),
+       DT_CLK(NULL, "dpll_iva_ck", "dpll_iva_ck"),
+       DT_CLK(NULL, "dpll_iva_x2_ck", "dpll_iva_x2_ck"),
+       DT_CLK(NULL, "dpll_iva_h11x2_ck", "dpll_iva_h11x2_ck"),
+       DT_CLK(NULL, "dpll_iva_h12x2_ck", "dpll_iva_h12x2_ck"),
+       DT_CLK(NULL, "mpu_dpll_hs_clk_div", "mpu_dpll_hs_clk_div"),
+       DT_CLK(NULL, "dpll_mpu_ck", "dpll_mpu_ck"),
+       DT_CLK(NULL, "dpll_mpu_m2_ck", "dpll_mpu_m2_ck"),
+       DT_CLK(NULL, "per_dpll_hs_clk_div", "per_dpll_hs_clk_div"),
+       DT_CLK(NULL, "dpll_per_ck", "dpll_per_ck"),
+       DT_CLK(NULL, "dpll_per_x2_ck", "dpll_per_x2_ck"),
+       DT_CLK(NULL, "dpll_per_h11x2_ck", "dpll_per_h11x2_ck"),
+       DT_CLK(NULL, "dpll_per_h12x2_ck", "dpll_per_h12x2_ck"),
+       DT_CLK(NULL, "dpll_per_h14x2_ck", "dpll_per_h14x2_ck"),
+       DT_CLK(NULL, "dpll_per_m2_ck", "dpll_per_m2_ck"),
+       DT_CLK(NULL, "dpll_per_m2x2_ck", "dpll_per_m2x2_ck"),
+       DT_CLK(NULL, "dpll_per_m3x2_ck", "dpll_per_m3x2_ck"),
+       DT_CLK(NULL, "dpll_unipro1_ck", "dpll_unipro1_ck"),
+       DT_CLK(NULL, "dpll_unipro1_clkdcoldo", "dpll_unipro1_clkdcoldo"),
+       DT_CLK(NULL, "dpll_unipro1_m2_ck", "dpll_unipro1_m2_ck"),
+       DT_CLK(NULL, "dpll_unipro2_ck", "dpll_unipro2_ck"),
+       DT_CLK(NULL, "dpll_unipro2_clkdcoldo", "dpll_unipro2_clkdcoldo"),
+       DT_CLK(NULL, "dpll_unipro2_m2_ck", "dpll_unipro2_m2_ck"),
+       DT_CLK(NULL, "usb_dpll_hs_clk_div", "usb_dpll_hs_clk_div"),
+       DT_CLK(NULL, "dpll_usb_ck", "dpll_usb_ck"),
+       DT_CLK(NULL, "dpll_usb_clkdcoldo", "dpll_usb_clkdcoldo"),
+       DT_CLK(NULL, "dpll_usb_m2_ck", "dpll_usb_m2_ck"),
+       DT_CLK(NULL, "dss_syc_gfclk_div", "dss_syc_gfclk_div"),
+       DT_CLK(NULL, "func_128m_clk", "func_128m_clk"),
+       DT_CLK(NULL, "func_12m_fclk", "func_12m_fclk"),
+       DT_CLK(NULL, "func_24m_clk", "func_24m_clk"),
+       DT_CLK(NULL, "func_48m_fclk", "func_48m_fclk"),
+       DT_CLK(NULL, "func_96m_fclk", "func_96m_fclk"),
+       DT_CLK(NULL, "l3_iclk_div", "l3_iclk_div"),
+       DT_CLK(NULL, "gpu_l3_iclk", "gpu_l3_iclk"),
+       DT_CLK(NULL, "l3init_60m_fclk", "l3init_60m_fclk"),
+       DT_CLK(NULL, "wkupaon_iclk_mux", "wkupaon_iclk_mux"),
+       DT_CLK(NULL, "l3instr_ts_gclk_div", "l3instr_ts_gclk_div"),
+       DT_CLK(NULL, "l4_root_clk_div", "l4_root_clk_div"),
+       DT_CLK(NULL, "dss_32khz_clk", "dss_32khz_clk"),
+       DT_CLK(NULL, "dss_48mhz_clk", "dss_48mhz_clk"),
+       DT_CLK(NULL, "dss_dss_clk", "dss_dss_clk"),
+       DT_CLK(NULL, "dss_sys_clk", "dss_sys_clk"),
+       DT_CLK(NULL, "gpio1_dbclk", "gpio1_dbclk"),
+       DT_CLK(NULL, "gpio2_dbclk", "gpio2_dbclk"),
+       DT_CLK(NULL, "gpio3_dbclk", "gpio3_dbclk"),
+       DT_CLK(NULL, "gpio4_dbclk", "gpio4_dbclk"),
+       DT_CLK(NULL, "gpio5_dbclk", "gpio5_dbclk"),
+       DT_CLK(NULL, "gpio6_dbclk", "gpio6_dbclk"),
+       DT_CLK(NULL, "gpio7_dbclk", "gpio7_dbclk"),
+       DT_CLK(NULL, "gpio8_dbclk", "gpio8_dbclk"),
+       DT_CLK(NULL, "iss_ctrlclk", "iss_ctrlclk"),
+       DT_CLK(NULL, "lli_txphy_clk", "lli_txphy_clk"),
+       DT_CLK(NULL, "lli_txphy_ls_clk", "lli_txphy_ls_clk"),
+       DT_CLK(NULL, "mmc1_32khz_clk", "mmc1_32khz_clk"),
+       DT_CLK(NULL, "sata_ref_clk", "sata_ref_clk"),
+       DT_CLK(NULL, "slimbus1_slimbus_clk", "slimbus1_slimbus_clk"),
+       DT_CLK(NULL, "usb_host_hs_hsic480m_p1_clk", "usb_host_hs_hsic480m_p1_clk"),
+       DT_CLK(NULL, "usb_host_hs_hsic480m_p2_clk", "usb_host_hs_hsic480m_p2_clk"),
+       DT_CLK(NULL, "usb_host_hs_hsic480m_p3_clk", "usb_host_hs_hsic480m_p3_clk"),
+       DT_CLK(NULL, "usb_host_hs_hsic60m_p1_clk", "usb_host_hs_hsic60m_p1_clk"),
+       DT_CLK(NULL, "usb_host_hs_hsic60m_p2_clk", "usb_host_hs_hsic60m_p2_clk"),
+       DT_CLK(NULL, "usb_host_hs_hsic60m_p3_clk", "usb_host_hs_hsic60m_p3_clk"),
+       DT_CLK(NULL, "usb_host_hs_utmi_p1_clk", "usb_host_hs_utmi_p1_clk"),
+       DT_CLK(NULL, "usb_host_hs_utmi_p2_clk", "usb_host_hs_utmi_p2_clk"),
+       DT_CLK(NULL, "usb_host_hs_utmi_p3_clk", "usb_host_hs_utmi_p3_clk"),
+       DT_CLK(NULL, "usb_otg_ss_refclk960m", "usb_otg_ss_refclk960m"),
+       DT_CLK(NULL, "usb_phy_cm_clk32k", "usb_phy_cm_clk32k"),
+       DT_CLK(NULL, "usb_tll_hs_usb_ch0_clk", "usb_tll_hs_usb_ch0_clk"),
+       DT_CLK(NULL, "usb_tll_hs_usb_ch1_clk", "usb_tll_hs_usb_ch1_clk"),
+       DT_CLK(NULL, "usb_tll_hs_usb_ch2_clk", "usb_tll_hs_usb_ch2_clk"),
+       DT_CLK(NULL, "aess_fclk", "aess_fclk"),
+       DT_CLK(NULL, "dmic_sync_mux_ck", "dmic_sync_mux_ck"),
+       DT_CLK(NULL, "dmic_gfclk", "dmic_gfclk"),
+       DT_CLK(NULL, "fdif_fclk", "fdif_fclk"),
+       DT_CLK(NULL, "gpu_core_gclk_mux", "gpu_core_gclk_mux"),
+       DT_CLK(NULL, "gpu_hyd_gclk_mux", "gpu_hyd_gclk_mux"),
+       DT_CLK(NULL, "hsi_fclk", "hsi_fclk"),
+       DT_CLK(NULL, "mcasp_sync_mux_ck", "mcasp_sync_mux_ck"),
+       DT_CLK(NULL, "mcasp_gfclk", "mcasp_gfclk"),
+       DT_CLK(NULL, "mcbsp1_sync_mux_ck", "mcbsp1_sync_mux_ck"),
+       DT_CLK(NULL, "mcbsp1_gfclk", "mcbsp1_gfclk"),
+       DT_CLK(NULL, "mcbsp2_sync_mux_ck", "mcbsp2_sync_mux_ck"),
+       DT_CLK(NULL, "mcbsp2_gfclk", "mcbsp2_gfclk"),
+       DT_CLK(NULL, "mcbsp3_sync_mux_ck", "mcbsp3_sync_mux_ck"),
+       DT_CLK(NULL, "mcbsp3_gfclk", "mcbsp3_gfclk"),
+       DT_CLK(NULL, "mmc1_fclk_mux", "mmc1_fclk_mux"),
+       DT_CLK(NULL, "mmc1_fclk", "mmc1_fclk"),
+       DT_CLK(NULL, "mmc2_fclk_mux", "mmc2_fclk_mux"),
+       DT_CLK(NULL, "mmc2_fclk", "mmc2_fclk"),
+       DT_CLK(NULL, "timer10_gfclk_mux", "timer10_gfclk_mux"),
+       DT_CLK(NULL, "timer11_gfclk_mux", "timer11_gfclk_mux"),
+       DT_CLK(NULL, "timer1_gfclk_mux", "timer1_gfclk_mux"),
+       DT_CLK(NULL, "timer2_gfclk_mux", "timer2_gfclk_mux"),
+       DT_CLK(NULL, "timer3_gfclk_mux", "timer3_gfclk_mux"),
+       DT_CLK(NULL, "timer4_gfclk_mux", "timer4_gfclk_mux"),
+       DT_CLK(NULL, "timer5_gfclk_mux", "timer5_gfclk_mux"),
+       DT_CLK(NULL, "timer6_gfclk_mux", "timer6_gfclk_mux"),
+       DT_CLK(NULL, "timer7_gfclk_mux", "timer7_gfclk_mux"),
+       DT_CLK(NULL, "timer8_gfclk_mux", "timer8_gfclk_mux"),
+       DT_CLK(NULL, "timer9_gfclk_mux", "timer9_gfclk_mux"),
+       DT_CLK(NULL, "utmi_p1_gfclk", "utmi_p1_gfclk"),
+       DT_CLK(NULL, "utmi_p2_gfclk", "utmi_p2_gfclk"),
+       DT_CLK(NULL, "auxclk0_src_ck", "auxclk0_src_ck"),
+       DT_CLK(NULL, "auxclk0_ck", "auxclk0_ck"),
+       DT_CLK(NULL, "auxclkreq0_ck", "auxclkreq0_ck"),
+       DT_CLK(NULL, "auxclk1_src_ck", "auxclk1_src_ck"),
+       DT_CLK(NULL, "auxclk1_ck", "auxclk1_ck"),
+       DT_CLK(NULL, "auxclkreq1_ck", "auxclkreq1_ck"),
+       DT_CLK(NULL, "auxclk2_src_ck", "auxclk2_src_ck"),
+       DT_CLK(NULL, "auxclk2_ck", "auxclk2_ck"),
+       DT_CLK(NULL, "auxclkreq2_ck", "auxclkreq2_ck"),
+       DT_CLK(NULL, "auxclk3_src_ck", "auxclk3_src_ck"),
+       DT_CLK(NULL, "auxclk3_ck", "auxclk3_ck"),
+       DT_CLK(NULL, "auxclkreq3_ck", "auxclkreq3_ck"),
+       DT_CLK(NULL, "gpmc_ck", "dummy_ck"),
+       DT_CLK("omap_i2c.1", "ick", "dummy_ck"),
+       DT_CLK("omap_i2c.2", "ick", "dummy_ck"),
+       DT_CLK("omap_i2c.3", "ick", "dummy_ck"),
+       DT_CLK("omap_i2c.4", "ick", "dummy_ck"),
+       DT_CLK(NULL, "mailboxes_ick", "dummy_ck"),
+       DT_CLK("omap_hsmmc.0", "ick", "dummy_ck"),
+       DT_CLK("omap_hsmmc.1", "ick", "dummy_ck"),
+       DT_CLK("omap_hsmmc.2", "ick", "dummy_ck"),
+       DT_CLK("omap_hsmmc.3", "ick", "dummy_ck"),
+       DT_CLK("omap_hsmmc.4", "ick", "dummy_ck"),
+       DT_CLK("omap-mcbsp.1", "ick", "dummy_ck"),
+       DT_CLK("omap-mcbsp.2", "ick", "dummy_ck"),
+       DT_CLK("omap-mcbsp.3", "ick", "dummy_ck"),
+       DT_CLK("omap-mcbsp.4", "ick", "dummy_ck"),
+       DT_CLK("omap2_mcspi.1", "ick", "dummy_ck"),
+       DT_CLK("omap2_mcspi.2", "ick", "dummy_ck"),
+       DT_CLK("omap2_mcspi.3", "ick", "dummy_ck"),
+       DT_CLK("omap2_mcspi.4", "ick", "dummy_ck"),
+       DT_CLK(NULL, "uart1_ick", "dummy_ck"),
+       DT_CLK(NULL, "uart2_ick", "dummy_ck"),
+       DT_CLK(NULL, "uart3_ick", "dummy_ck"),
+       DT_CLK(NULL, "uart4_ick", "dummy_ck"),
+       DT_CLK("usbhs_omap", "usbhost_ick", "dummy_ck"),
+       DT_CLK("usbhs_omap", "usbtll_fck", "dummy_ck"),
+       DT_CLK("omap_wdt", "ick", "dummy_ck"),
+       DT_CLK(NULL, "timer_32k_ck", "sys_32k_ck"),
+       DT_CLK("omap_timer.1", "sys_ck", "sys_clkin"),
+       DT_CLK("omap_timer.2", "sys_ck", "sys_clkin"),
+       DT_CLK("omap_timer.3", "sys_ck", "sys_clkin"),
+       DT_CLK("omap_timer.4", "sys_ck", "sys_clkin"),
+       DT_CLK("omap_timer.9", "sys_ck", "sys_clkin"),
+       DT_CLK("omap_timer.10", "sys_ck", "sys_clkin"),
+       DT_CLK("omap_timer.11", "sys_ck", "sys_clkin"),
+       DT_CLK("omap_timer.5", "sys_ck", "dss_syc_gfclk_div"),
+       DT_CLK("omap_timer.6", "sys_ck", "dss_syc_gfclk_div"),
+       DT_CLK("omap_timer.7", "sys_ck", "dss_syc_gfclk_div"),
+       DT_CLK("omap_timer.8", "sys_ck", "dss_syc_gfclk_div"),
+       { .node_name = NULL },
+};
+
+int __init omap5xxx_dt_clk_init(void)
+{
+       int rc;
+       struct clk *abe_dpll_ref, *abe_dpll, *sys_32k_ck, *usb_dpll;
+
+       ti_dt_clocks_register(omap54xx_clks);
+
+       omap2_clk_disable_autoidle_all();
+
+       abe_dpll_ref = clk_get_sys(NULL, "abe_dpll_clk_mux");
+       sys_32k_ck = clk_get_sys(NULL, "sys_32k_ck");
+       rc = clk_set_parent(abe_dpll_ref, sys_32k_ck);
+       abe_dpll = clk_get_sys(NULL, "dpll_abe_ck");
+       if (!rc)
+               rc = clk_set_rate(abe_dpll, OMAP5_DPLL_ABE_DEFFREQ);
+       if (rc)
+               pr_err("%s: failed to configure ABE DPLL!\n", __func__);
+
+       usb_dpll = clk_get_sys(NULL, "dpll_usb_ck");
+       rc = clk_set_rate(usb_dpll, OMAP5_DPLL_USB_DEFFREQ);
+       if (rc)
+               pr_err("%s: failed to configure USB DPLL!\n", __func__);
+
+       usb_dpll = clk_get_sys(NULL, "dpll_usb_m2_ck");
+       rc = clk_set_rate(usb_dpll, OMAP5_DPLL_USB_DEFFREQ/2);
+       if (rc)
+               pr_err("%s: failed to set USB_DPLL M2 OUT\n", __func__);
+
+       return 0;
+}
diff --git a/drivers/clk/ti/clk-7xx.c b/drivers/clk/ti/clk-7xx.c
new file mode 100644 (file)
index 0000000..9977653
--- /dev/null
@@ -0,0 +1,332 @@
+/*
+ * DRA7 Clock init
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * Tero Kristo (t-kristo@ti.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/clk-private.h>
+#include <linux/clkdev.h>
+#include <linux/clk/ti.h>
+
+#define DRA7_DPLL_ABE_DEFFREQ                          361267200
+#define DRA7_DPLL_GMAC_DEFFREQ                         1000000000
+
+
+static struct ti_dt_clk dra7xx_clks[] = {
+       DT_CLK(NULL, "atl_clkin0_ck", "atl_clkin0_ck"),
+       DT_CLK(NULL, "atl_clkin1_ck", "atl_clkin1_ck"),
+       DT_CLK(NULL, "atl_clkin2_ck", "atl_clkin2_ck"),
+       DT_CLK(NULL, "atlclkin3_ck", "atlclkin3_ck"),
+       DT_CLK(NULL, "hdmi_clkin_ck", "hdmi_clkin_ck"),
+       DT_CLK(NULL, "mlb_clkin_ck", "mlb_clkin_ck"),
+       DT_CLK(NULL, "mlbp_clkin_ck", "mlbp_clkin_ck"),
+       DT_CLK(NULL, "pciesref_acs_clk_ck", "pciesref_acs_clk_ck"),
+       DT_CLK(NULL, "ref_clkin0_ck", "ref_clkin0_ck"),
+       DT_CLK(NULL, "ref_clkin1_ck", "ref_clkin1_ck"),
+       DT_CLK(NULL, "ref_clkin2_ck", "ref_clkin2_ck"),
+       DT_CLK(NULL, "ref_clkin3_ck", "ref_clkin3_ck"),
+       DT_CLK(NULL, "rmii_clk_ck", "rmii_clk_ck"),
+       DT_CLK(NULL, "sdvenc_clkin_ck", "sdvenc_clkin_ck"),
+       DT_CLK(NULL, "secure_32k_clk_src_ck", "secure_32k_clk_src_ck"),
+       DT_CLK(NULL, "sys_32k_ck", "sys_32k_ck"),
+       DT_CLK(NULL, "virt_12000000_ck", "virt_12000000_ck"),
+       DT_CLK(NULL, "virt_13000000_ck", "virt_13000000_ck"),
+       DT_CLK(NULL, "virt_16800000_ck", "virt_16800000_ck"),
+       DT_CLK(NULL, "virt_19200000_ck", "virt_19200000_ck"),
+       DT_CLK(NULL, "virt_20000000_ck", "virt_20000000_ck"),
+       DT_CLK(NULL, "virt_26000000_ck", "virt_26000000_ck"),
+       DT_CLK(NULL, "virt_27000000_ck", "virt_27000000_ck"),
+       DT_CLK(NULL, "virt_38400000_ck", "virt_38400000_ck"),
+       DT_CLK(NULL, "sys_clkin1", "sys_clkin1"),
+       DT_CLK(NULL, "sys_clkin2", "sys_clkin2"),
+       DT_CLK(NULL, "usb_otg_clkin_ck", "usb_otg_clkin_ck"),
+       DT_CLK(NULL, "video1_clkin_ck", "video1_clkin_ck"),
+       DT_CLK(NULL, "video1_m2_clkin_ck", "video1_m2_clkin_ck"),
+       DT_CLK(NULL, "video2_clkin_ck", "video2_clkin_ck"),
+       DT_CLK(NULL, "video2_m2_clkin_ck", "video2_m2_clkin_ck"),
+       DT_CLK(NULL, "abe_dpll_sys_clk_mux", "abe_dpll_sys_clk_mux"),
+       DT_CLK(NULL, "abe_dpll_bypass_clk_mux", "abe_dpll_bypass_clk_mux"),
+       DT_CLK(NULL, "abe_dpll_clk_mux", "abe_dpll_clk_mux"),
+       DT_CLK(NULL, "dpll_abe_ck", "dpll_abe_ck"),
+       DT_CLK(NULL, "dpll_abe_x2_ck", "dpll_abe_x2_ck"),
+       DT_CLK(NULL, "dpll_abe_m2x2_ck", "dpll_abe_m2x2_ck"),
+       DT_CLK(NULL, "abe_24m_fclk", "abe_24m_fclk"),
+       DT_CLK(NULL, "abe_clk", "abe_clk"),
+       DT_CLK(NULL, "aess_fclk", "aess_fclk"),
+       DT_CLK(NULL, "abe_giclk_div", "abe_giclk_div"),
+       DT_CLK(NULL, "abe_lp_clk_div", "abe_lp_clk_div"),
+       DT_CLK(NULL, "abe_sys_clk_div", "abe_sys_clk_div"),
+       DT_CLK(NULL, "adc_gfclk_mux", "adc_gfclk_mux"),
+       DT_CLK(NULL, "dpll_pcie_ref_ck", "dpll_pcie_ref_ck"),
+       DT_CLK(NULL, "dpll_pcie_ref_m2ldo_ck", "dpll_pcie_ref_m2ldo_ck"),
+       DT_CLK(NULL, "apll_pcie_ck", "apll_pcie_ck"),
+       DT_CLK(NULL, "apll_pcie_clkvcoldo", "apll_pcie_clkvcoldo"),
+       DT_CLK(NULL, "apll_pcie_clkvcoldo_div", "apll_pcie_clkvcoldo_div"),
+       DT_CLK(NULL, "apll_pcie_m2_ck", "apll_pcie_m2_ck"),
+       DT_CLK(NULL, "sys_clk1_dclk_div", "sys_clk1_dclk_div"),
+       DT_CLK(NULL, "sys_clk2_dclk_div", "sys_clk2_dclk_div"),
+       DT_CLK(NULL, "dpll_abe_m2_ck", "dpll_abe_m2_ck"),
+       DT_CLK(NULL, "per_abe_x1_dclk_div", "per_abe_x1_dclk_div"),
+       DT_CLK(NULL, "dpll_abe_m3x2_ck", "dpll_abe_m3x2_ck"),
+       DT_CLK(NULL, "dpll_core_ck", "dpll_core_ck"),
+       DT_CLK(NULL, "dpll_core_x2_ck", "dpll_core_x2_ck"),
+       DT_CLK(NULL, "dpll_core_h12x2_ck", "dpll_core_h12x2_ck"),
+       DT_CLK(NULL, "mpu_dpll_hs_clk_div", "mpu_dpll_hs_clk_div"),
+       DT_CLK(NULL, "dpll_mpu_ck", "dpll_mpu_ck"),
+       DT_CLK(NULL, "dpll_mpu_m2_ck", "dpll_mpu_m2_ck"),
+       DT_CLK(NULL, "mpu_dclk_div", "mpu_dclk_div"),
+       DT_CLK(NULL, "dsp_dpll_hs_clk_div", "dsp_dpll_hs_clk_div"),
+       DT_CLK(NULL, "dpll_dsp_ck", "dpll_dsp_ck"),
+       DT_CLK(NULL, "dpll_dsp_m2_ck", "dpll_dsp_m2_ck"),
+       DT_CLK(NULL, "dsp_gclk_div", "dsp_gclk_div"),
+       DT_CLK(NULL, "iva_dpll_hs_clk_div", "iva_dpll_hs_clk_div"),
+       DT_CLK(NULL, "dpll_iva_ck", "dpll_iva_ck"),
+       DT_CLK(NULL, "dpll_iva_m2_ck", "dpll_iva_m2_ck"),
+       DT_CLK(NULL, "iva_dclk", "iva_dclk"),
+       DT_CLK(NULL, "dpll_gpu_ck", "dpll_gpu_ck"),
+       DT_CLK(NULL, "dpll_gpu_m2_ck", "dpll_gpu_m2_ck"),
+       DT_CLK(NULL, "gpu_dclk", "gpu_dclk"),
+       DT_CLK(NULL, "dpll_core_m2_ck", "dpll_core_m2_ck"),
+       DT_CLK(NULL, "core_dpll_out_dclk_div", "core_dpll_out_dclk_div"),
+       DT_CLK(NULL, "dpll_ddr_ck", "dpll_ddr_ck"),
+       DT_CLK(NULL, "dpll_ddr_m2_ck", "dpll_ddr_m2_ck"),
+       DT_CLK(NULL, "emif_phy_dclk_div", "emif_phy_dclk_div"),
+       DT_CLK(NULL, "dpll_gmac_ck", "dpll_gmac_ck"),
+       DT_CLK(NULL, "dpll_gmac_m2_ck", "dpll_gmac_m2_ck"),
+       DT_CLK(NULL, "gmac_250m_dclk_div", "gmac_250m_dclk_div"),
+       DT_CLK(NULL, "video2_dclk_div", "video2_dclk_div"),
+       DT_CLK(NULL, "video1_dclk_div", "video1_dclk_div"),
+       DT_CLK(NULL, "hdmi_dclk_div", "hdmi_dclk_div"),
+       DT_CLK(NULL, "per_dpll_hs_clk_div", "per_dpll_hs_clk_div"),
+       DT_CLK(NULL, "dpll_per_ck", "dpll_per_ck"),
+       DT_CLK(NULL, "dpll_per_m2_ck", "dpll_per_m2_ck"),
+       DT_CLK(NULL, "func_96m_aon_dclk_div", "func_96m_aon_dclk_div"),
+       DT_CLK(NULL, "usb_dpll_hs_clk_div", "usb_dpll_hs_clk_div"),
+       DT_CLK(NULL, "dpll_usb_ck", "dpll_usb_ck"),
+       DT_CLK(NULL, "dpll_usb_m2_ck", "dpll_usb_m2_ck"),
+       DT_CLK(NULL, "l3init_480m_dclk_div", "l3init_480m_dclk_div"),
+       DT_CLK(NULL, "usb_otg_dclk_div", "usb_otg_dclk_div"),
+       DT_CLK(NULL, "sata_dclk_div", "sata_dclk_div"),
+       DT_CLK(NULL, "dpll_pcie_ref_m2_ck", "dpll_pcie_ref_m2_ck"),
+       DT_CLK(NULL, "pcie2_dclk_div", "pcie2_dclk_div"),
+       DT_CLK(NULL, "pcie_dclk_div", "pcie_dclk_div"),
+       DT_CLK(NULL, "emu_dclk_div", "emu_dclk_div"),
+       DT_CLK(NULL, "secure_32k_dclk_div", "secure_32k_dclk_div"),
+       DT_CLK(NULL, "eve_dpll_hs_clk_div", "eve_dpll_hs_clk_div"),
+       DT_CLK(NULL, "dpll_eve_ck", "dpll_eve_ck"),
+       DT_CLK(NULL, "dpll_eve_m2_ck", "dpll_eve_m2_ck"),
+       DT_CLK(NULL, "eve_dclk_div", "eve_dclk_div"),
+       DT_CLK(NULL, "clkoutmux0_clk_mux", "clkoutmux0_clk_mux"),
+       DT_CLK(NULL, "clkoutmux1_clk_mux", "clkoutmux1_clk_mux"),
+       DT_CLK(NULL, "clkoutmux2_clk_mux", "clkoutmux2_clk_mux"),
+       DT_CLK(NULL, "custefuse_sys_gfclk_div", "custefuse_sys_gfclk_div"),
+       DT_CLK(NULL, "dpll_core_h13x2_ck", "dpll_core_h13x2_ck"),
+       DT_CLK(NULL, "dpll_core_h14x2_ck", "dpll_core_h14x2_ck"),
+       DT_CLK(NULL, "dpll_core_h22x2_ck", "dpll_core_h22x2_ck"),
+       DT_CLK(NULL, "dpll_core_h23x2_ck", "dpll_core_h23x2_ck"),
+       DT_CLK(NULL, "dpll_core_h24x2_ck", "dpll_core_h24x2_ck"),
+       DT_CLK(NULL, "dpll_ddr_x2_ck", "dpll_ddr_x2_ck"),
+       DT_CLK(NULL, "dpll_ddr_h11x2_ck", "dpll_ddr_h11x2_ck"),
+       DT_CLK(NULL, "dpll_dsp_x2_ck", "dpll_dsp_x2_ck"),
+       DT_CLK(NULL, "dpll_dsp_m3x2_ck", "dpll_dsp_m3x2_ck"),
+       DT_CLK(NULL, "dpll_gmac_x2_ck", "dpll_gmac_x2_ck"),
+       DT_CLK(NULL, "dpll_gmac_h11x2_ck", "dpll_gmac_h11x2_ck"),
+       DT_CLK(NULL, "dpll_gmac_h12x2_ck", "dpll_gmac_h12x2_ck"),
+       DT_CLK(NULL, "dpll_gmac_h13x2_ck", "dpll_gmac_h13x2_ck"),
+       DT_CLK(NULL, "dpll_gmac_m3x2_ck", "dpll_gmac_m3x2_ck"),
+       DT_CLK(NULL, "dpll_per_x2_ck", "dpll_per_x2_ck"),
+       DT_CLK(NULL, "dpll_per_h11x2_ck", "dpll_per_h11x2_ck"),
+       DT_CLK(NULL, "dpll_per_h12x2_ck", "dpll_per_h12x2_ck"),
+       DT_CLK(NULL, "dpll_per_h13x2_ck", "dpll_per_h13x2_ck"),
+       DT_CLK(NULL, "dpll_per_h14x2_ck", "dpll_per_h14x2_ck"),
+       DT_CLK(NULL, "dpll_per_m2x2_ck", "dpll_per_m2x2_ck"),
+       DT_CLK(NULL, "dpll_usb_clkdcoldo", "dpll_usb_clkdcoldo"),
+       DT_CLK(NULL, "eve_clk", "eve_clk"),
+       DT_CLK(NULL, "func_128m_clk", "func_128m_clk"),
+       DT_CLK(NULL, "func_12m_fclk", "func_12m_fclk"),
+       DT_CLK(NULL, "func_24m_clk", "func_24m_clk"),
+       DT_CLK(NULL, "func_48m_fclk", "func_48m_fclk"),
+       DT_CLK(NULL, "func_96m_fclk", "func_96m_fclk"),
+       DT_CLK(NULL, "gmii_m_clk_div", "gmii_m_clk_div"),
+       DT_CLK(NULL, "hdmi_clk2_div", "hdmi_clk2_div"),
+       DT_CLK(NULL, "hdmi_div_clk", "hdmi_div_clk"),
+       DT_CLK(NULL, "hdmi_dpll_clk_mux", "hdmi_dpll_clk_mux"),
+       DT_CLK(NULL, "l3_iclk_div", "l3_iclk_div"),
+       DT_CLK(NULL, "l3init_60m_fclk", "l3init_60m_fclk"),
+       DT_CLK(NULL, "l4_root_clk_div", "l4_root_clk_div"),
+       DT_CLK(NULL, "mlb_clk", "mlb_clk"),
+       DT_CLK(NULL, "mlbp_clk", "mlbp_clk"),
+       DT_CLK(NULL, "per_abe_x1_gfclk2_div", "per_abe_x1_gfclk2_div"),
+       DT_CLK(NULL, "timer_sys_clk_div", "timer_sys_clk_div"),
+       DT_CLK(NULL, "video1_clk2_div", "video1_clk2_div"),
+       DT_CLK(NULL, "video1_div_clk", "video1_div_clk"),
+       DT_CLK(NULL, "video1_dpll_clk_mux", "video1_dpll_clk_mux"),
+       DT_CLK(NULL, "video2_clk2_div", "video2_clk2_div"),
+       DT_CLK(NULL, "video2_div_clk", "video2_div_clk"),
+       DT_CLK(NULL, "video2_dpll_clk_mux", "video2_dpll_clk_mux"),
+       DT_CLK(NULL, "wkupaon_iclk_mux", "wkupaon_iclk_mux"),
+       DT_CLK(NULL, "dss_32khz_clk", "dss_32khz_clk"),
+       DT_CLK(NULL, "dss_48mhz_clk", "dss_48mhz_clk"),
+       DT_CLK(NULL, "dss_dss_clk", "dss_dss_clk"),
+       DT_CLK(NULL, "dss_hdmi_clk", "dss_hdmi_clk"),
+       DT_CLK(NULL, "dss_video1_clk", "dss_video1_clk"),
+       DT_CLK(NULL, "dss_video2_clk", "dss_video2_clk"),
+       DT_CLK(NULL, "gpio1_dbclk", "gpio1_dbclk"),
+       DT_CLK(NULL, "gpio2_dbclk", "gpio2_dbclk"),
+       DT_CLK(NULL, "gpio3_dbclk", "gpio3_dbclk"),
+       DT_CLK(NULL, "gpio4_dbclk", "gpio4_dbclk"),
+       DT_CLK(NULL, "gpio5_dbclk", "gpio5_dbclk"),
+       DT_CLK(NULL, "gpio6_dbclk", "gpio6_dbclk"),
+       DT_CLK(NULL, "gpio7_dbclk", "gpio7_dbclk"),
+       DT_CLK(NULL, "gpio8_dbclk", "gpio8_dbclk"),
+       DT_CLK(NULL, "mmc1_clk32k", "mmc1_clk32k"),
+       DT_CLK(NULL, "mmc2_clk32k", "mmc2_clk32k"),
+       DT_CLK(NULL, "mmc3_clk32k", "mmc3_clk32k"),
+       DT_CLK(NULL, "mmc4_clk32k", "mmc4_clk32k"),
+       DT_CLK(NULL, "sata_ref_clk", "sata_ref_clk"),
+       DT_CLK(NULL, "usb_otg_ss1_refclk960m", "usb_otg_ss1_refclk960m"),
+       DT_CLK(NULL, "usb_otg_ss2_refclk960m", "usb_otg_ss2_refclk960m"),
+       DT_CLK(NULL, "usb_phy1_always_on_clk32k", "usb_phy1_always_on_clk32k"),
+       DT_CLK(NULL, "usb_phy2_always_on_clk32k", "usb_phy2_always_on_clk32k"),
+       DT_CLK(NULL, "usb_phy3_always_on_clk32k", "usb_phy3_always_on_clk32k"),
+       DT_CLK(NULL, "atl_dpll_clk_mux", "atl_dpll_clk_mux"),
+       DT_CLK(NULL, "atl_gfclk_mux", "atl_gfclk_mux"),
+       DT_CLK(NULL, "dcan1_sys_clk_mux", "dcan1_sys_clk_mux"),
+       DT_CLK(NULL, "gmac_gmii_ref_clk_div", "gmac_gmii_ref_clk_div"),
+       DT_CLK(NULL, "gmac_rft_clk_mux", "gmac_rft_clk_mux"),
+       DT_CLK(NULL, "gpu_core_gclk_mux", "gpu_core_gclk_mux"),
+       DT_CLK(NULL, "gpu_hyd_gclk_mux", "gpu_hyd_gclk_mux"),
+       DT_CLK(NULL, "ipu1_gfclk_mux", "ipu1_gfclk_mux"),
+       DT_CLK(NULL, "l3instr_ts_gclk_div", "l3instr_ts_gclk_div"),
+       DT_CLK(NULL, "mcasp1_ahclkr_mux", "mcasp1_ahclkr_mux"),
+       DT_CLK(NULL, "mcasp1_ahclkx_mux", "mcasp1_ahclkx_mux"),
+       DT_CLK(NULL, "mcasp1_aux_gfclk_mux", "mcasp1_aux_gfclk_mux"),
+       DT_CLK(NULL, "mcasp2_ahclkr_mux", "mcasp2_ahclkr_mux"),
+       DT_CLK(NULL, "mcasp2_ahclkx_mux", "mcasp2_ahclkx_mux"),
+       DT_CLK(NULL, "mcasp2_aux_gfclk_mux", "mcasp2_aux_gfclk_mux"),
+       DT_CLK(NULL, "mcasp3_ahclkx_mux", "mcasp3_ahclkx_mux"),
+       DT_CLK(NULL, "mcasp3_aux_gfclk_mux", "mcasp3_aux_gfclk_mux"),
+       DT_CLK(NULL, "mcasp4_ahclkx_mux", "mcasp4_ahclkx_mux"),
+       DT_CLK(NULL, "mcasp4_aux_gfclk_mux", "mcasp4_aux_gfclk_mux"),
+       DT_CLK(NULL, "mcasp5_ahclkx_mux", "mcasp5_ahclkx_mux"),
+       DT_CLK(NULL, "mcasp5_aux_gfclk_mux", "mcasp5_aux_gfclk_mux"),
+       DT_CLK(NULL, "mcasp6_ahclkx_mux", "mcasp6_ahclkx_mux"),
+       DT_CLK(NULL, "mcasp6_aux_gfclk_mux", "mcasp6_aux_gfclk_mux"),
+       DT_CLK(NULL, "mcasp7_ahclkx_mux", "mcasp7_ahclkx_mux"),
+       DT_CLK(NULL, "mcasp7_aux_gfclk_mux", "mcasp7_aux_gfclk_mux"),
+       DT_CLK(NULL, "mcasp8_ahclk_mux", "mcasp8_ahclk_mux"),
+       DT_CLK(NULL, "mcasp8_aux_gfclk_mux", "mcasp8_aux_gfclk_mux"),
+       DT_CLK(NULL, "mmc1_fclk_mux", "mmc1_fclk_mux"),
+       DT_CLK(NULL, "mmc1_fclk_div", "mmc1_fclk_div"),
+       DT_CLK(NULL, "mmc2_fclk_mux", "mmc2_fclk_mux"),
+       DT_CLK(NULL, "mmc2_fclk_div", "mmc2_fclk_div"),
+       DT_CLK(NULL, "mmc3_gfclk_mux", "mmc3_gfclk_mux"),
+       DT_CLK(NULL, "mmc3_gfclk_div", "mmc3_gfclk_div"),
+       DT_CLK(NULL, "mmc4_gfclk_mux", "mmc4_gfclk_mux"),
+       DT_CLK(NULL, "mmc4_gfclk_div", "mmc4_gfclk_div"),
+       DT_CLK(NULL, "qspi_gfclk_mux", "qspi_gfclk_mux"),
+       DT_CLK(NULL, "qspi_gfclk_div", "qspi_gfclk_div"),
+       DT_CLK(NULL, "timer10_gfclk_mux", "timer10_gfclk_mux"),
+       DT_CLK(NULL, "timer11_gfclk_mux", "timer11_gfclk_mux"),
+       DT_CLK(NULL, "timer13_gfclk_mux", "timer13_gfclk_mux"),
+       DT_CLK(NULL, "timer14_gfclk_mux", "timer14_gfclk_mux"),
+       DT_CLK(NULL, "timer15_gfclk_mux", "timer15_gfclk_mux"),
+       DT_CLK(NULL, "timer16_gfclk_mux", "timer16_gfclk_mux"),
+       DT_CLK(NULL, "timer1_gfclk_mux", "timer1_gfclk_mux"),
+       DT_CLK(NULL, "timer2_gfclk_mux", "timer2_gfclk_mux"),
+       DT_CLK(NULL, "timer3_gfclk_mux", "timer3_gfclk_mux"),
+       DT_CLK(NULL, "timer4_gfclk_mux", "timer4_gfclk_mux"),
+       DT_CLK(NULL, "timer5_gfclk_mux", "timer5_gfclk_mux"),
+       DT_CLK(NULL, "timer6_gfclk_mux", "timer6_gfclk_mux"),
+       DT_CLK(NULL, "timer7_gfclk_mux", "timer7_gfclk_mux"),
+       DT_CLK(NULL, "timer8_gfclk_mux", "timer8_gfclk_mux"),
+       DT_CLK(NULL, "timer9_gfclk_mux", "timer9_gfclk_mux"),
+       DT_CLK(NULL, "uart10_gfclk_mux", "uart10_gfclk_mux"),
+       DT_CLK(NULL, "uart1_gfclk_mux", "uart1_gfclk_mux"),
+       DT_CLK(NULL, "uart2_gfclk_mux", "uart2_gfclk_mux"),
+       DT_CLK(NULL, "uart3_gfclk_mux", "uart3_gfclk_mux"),
+       DT_CLK(NULL, "uart4_gfclk_mux", "uart4_gfclk_mux"),
+       DT_CLK(NULL, "uart5_gfclk_mux", "uart5_gfclk_mux"),
+       DT_CLK(NULL, "uart6_gfclk_mux", "uart6_gfclk_mux"),
+       DT_CLK(NULL, "uart7_gfclk_mux", "uart7_gfclk_mux"),
+       DT_CLK(NULL, "uart8_gfclk_mux", "uart8_gfclk_mux"),
+       DT_CLK(NULL, "uart9_gfclk_mux", "uart9_gfclk_mux"),
+       DT_CLK(NULL, "vip1_gclk_mux", "vip1_gclk_mux"),
+       DT_CLK(NULL, "vip2_gclk_mux", "vip2_gclk_mux"),
+       DT_CLK(NULL, "vip3_gclk_mux", "vip3_gclk_mux"),
+       DT_CLK(NULL, "gpmc_ck", "dummy_ck"),
+       DT_CLK("omap_i2c.1", "ick", "dummy_ck"),
+       DT_CLK("omap_i2c.2", "ick", "dummy_ck"),
+       DT_CLK("omap_i2c.3", "ick", "dummy_ck"),
+       DT_CLK("omap_i2c.4", "ick", "dummy_ck"),
+       DT_CLK(NULL, "mailboxes_ick", "dummy_ck"),
+       DT_CLK("omap_hsmmc.0", "ick", "dummy_ck"),
+       DT_CLK("omap_hsmmc.1", "ick", "dummy_ck"),
+       DT_CLK("omap_hsmmc.2", "ick", "dummy_ck"),
+       DT_CLK("omap_hsmmc.3", "ick", "dummy_ck"),
+       DT_CLK("omap_hsmmc.4", "ick", "dummy_ck"),
+       DT_CLK("omap-mcbsp.1", "ick", "dummy_ck"),
+       DT_CLK("omap-mcbsp.2", "ick", "dummy_ck"),
+       DT_CLK("omap-mcbsp.3", "ick", "dummy_ck"),
+       DT_CLK("omap-mcbsp.4", "ick", "dummy_ck"),
+       DT_CLK("omap2_mcspi.1", "ick", "dummy_ck"),
+       DT_CLK("omap2_mcspi.2", "ick", "dummy_ck"),
+       DT_CLK("omap2_mcspi.3", "ick", "dummy_ck"),
+       DT_CLK("omap2_mcspi.4", "ick", "dummy_ck"),
+       DT_CLK(NULL, "uart1_ick", "dummy_ck"),
+       DT_CLK(NULL, "uart2_ick", "dummy_ck"),
+       DT_CLK(NULL, "uart3_ick", "dummy_ck"),
+       DT_CLK(NULL, "uart4_ick", "dummy_ck"),
+       DT_CLK("usbhs_omap", "usbhost_ick", "dummy_ck"),
+       DT_CLK("usbhs_omap", "usbtll_fck", "dummy_ck"),
+       DT_CLK("omap_wdt", "ick", "dummy_ck"),
+       DT_CLK(NULL, "timer_32k_ck", "sys_32k_ck"),
+       DT_CLK("4ae18000.timer", "timer_sys_ck", "sys_clkin2"),
+       DT_CLK("48032000.timer", "timer_sys_ck", "sys_clkin2"),
+       DT_CLK("48034000.timer", "timer_sys_ck", "sys_clkin2"),
+       DT_CLK("48036000.timer", "timer_sys_ck", "sys_clkin2"),
+       DT_CLK("4803e000.timer", "timer_sys_ck", "sys_clkin2"),
+       DT_CLK("48086000.timer", "timer_sys_ck", "sys_clkin2"),
+       DT_CLK("48088000.timer", "timer_sys_ck", "sys_clkin2"),
+       DT_CLK("48820000.timer", "timer_sys_ck", "timer_sys_clk_div"),
+       DT_CLK("48822000.timer", "timer_sys_ck", "timer_sys_clk_div"),
+       DT_CLK("48824000.timer", "timer_sys_ck", "timer_sys_clk_div"),
+       DT_CLK("48826000.timer", "timer_sys_ck", "timer_sys_clk_div"),
+       DT_CLK(NULL, "sys_clkin", "sys_clkin1"),
+       { .node_name = NULL },
+};
+
+int __init dra7xx_dt_clk_init(void)
+{
+       int rc;
+       struct clk *abe_dpll_mux, *sys_clkin2, *dpll_ck;
+
+       ti_dt_clocks_register(dra7xx_clks);
+
+       omap2_clk_disable_autoidle_all();
+
+       abe_dpll_mux = clk_get_sys(NULL, "abe_dpll_sys_clk_mux");
+       sys_clkin2 = clk_get_sys(NULL, "sys_clkin2");
+       dpll_ck = clk_get_sys(NULL, "dpll_abe_ck");
+
+       rc = clk_set_parent(abe_dpll_mux, sys_clkin2);
+       if (!rc)
+               rc = clk_set_rate(dpll_ck, DRA7_DPLL_ABE_DEFFREQ);
+       if (rc)
+               pr_err("%s: failed to configure ABE DPLL!\n", __func__);
+
+       dpll_ck = clk_get_sys(NULL, "dpll_gmac_ck");
+       rc = clk_set_rate(dpll_ck, DRA7_DPLL_GMAC_DEFFREQ);
+       if (rc)
+               pr_err("%s: failed to configure GMAC DPLL!\n", __func__);
+
+       return rc;
+}
diff --git a/drivers/clk/ti/clk.c b/drivers/clk/ti/clk.c
new file mode 100644 (file)
index 0000000..b1a6f71
--- /dev/null
@@ -0,0 +1,167 @@
+/*
+ * TI clock support
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * Tero Kristo <t-kristo@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/clk/ti.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/list.h>
+
+#undef pr_fmt
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+static int ti_dt_clk_memmap_index;
+struct ti_clk_ll_ops *ti_clk_ll_ops;
+
+/**
+ * ti_dt_clocks_register - register DT alias clocks during boot
+ * @oclks: list of clocks to register
+ *
+ * Register alias or non-standard DT clock entries during boot. By
+ * default, DT clocks are found based on their node name. If any
+ * additional con-id / dev-id -> clock mapping is required, use this
+ * function to list these.
+ */
+void __init ti_dt_clocks_register(struct ti_dt_clk oclks[])
+{
+       struct ti_dt_clk *c;
+       struct device_node *node;
+       struct clk *clk;
+       struct of_phandle_args clkspec;
+
+       for (c = oclks; c->node_name != NULL; c++) {
+               node = of_find_node_by_name(NULL, c->node_name);
+               clkspec.np = node;
+               clk = of_clk_get_from_provider(&clkspec);
+
+               if (!IS_ERR(clk)) {
+                       c->lk.clk = clk;
+                       clkdev_add(&c->lk);
+               } else {
+                       pr_warn("failed to lookup clock node %s\n",
+                               c->node_name);
+               }
+       }
+}
+
+struct clk_init_item {
+       struct device_node *node;
+       struct clk_hw *hw;
+       ti_of_clk_init_cb_t func;
+       struct list_head link;
+};
+
+static LIST_HEAD(retry_list);
+
+/**
+ * ti_clk_retry_init - retries a failed clock init at later phase
+ * @node: device not for the clock
+ * @hw: partially initialized clk_hw struct for the clock
+ * @func: init function to be called for the clock
+ *
+ * Adds a failed clock init to the retry list. The retry list is parsed
+ * once all the other clocks have been initialized.
+ */
+int __init ti_clk_retry_init(struct device_node *node, struct clk_hw *hw,
+                             ti_of_clk_init_cb_t func)
+{
+       struct clk_init_item *retry;
+
+       pr_debug("%s: adding to retry list...\n", node->name);
+       retry = kzalloc(sizeof(*retry), GFP_KERNEL);
+       if (!retry)
+               return -ENOMEM;
+
+       retry->node = node;
+       retry->func = func;
+       retry->hw = hw;
+       list_add(&retry->link, &retry_list);
+
+       return 0;
+}
+
+/**
+ * ti_clk_get_reg_addr - get register address for a clock register
+ * @node: device node for the clock
+ * @index: register index from the clock node
+ *
+ * Builds clock register address from device tree information. This
+ * is a struct of type clk_omap_reg.
+ */
+void __iomem *ti_clk_get_reg_addr(struct device_node *node, int index)
+{
+       struct clk_omap_reg *reg;
+       u32 val;
+       u32 tmp;
+
+       reg = (struct clk_omap_reg *)&tmp;
+       reg->index = ti_dt_clk_memmap_index;
+
+       if (of_property_read_u32_index(node, "reg", index, &val)) {
+               pr_err("%s must have reg[%d]!\n", node->name, index);
+               return NULL;
+       }
+
+       reg->offset = val;
+
+       return (void __iomem *)tmp;
+}
+
+/**
+ * ti_dt_clk_init_provider - init master clock provider
+ * @parent: master node
+ * @index: internal index for clk_reg_ops
+ *
+ * Initializes a master clock IP block and its child clock nodes.
+ * Regmap is provided for accessing the register space for the
+ * IP block and all the clocks under it.
+ */
+void ti_dt_clk_init_provider(struct device_node *parent, int index)
+{
+       const struct of_device_id *match;
+       struct device_node *np;
+       struct device_node *clocks;
+       of_clk_init_cb_t clk_init_cb;
+       struct clk_init_item *retry;
+       struct clk_init_item *tmp;
+
+       ti_dt_clk_memmap_index = index;
+
+       /* get clocks for this parent */
+       clocks = of_get_child_by_name(parent, "clocks");
+       if (!clocks) {
+               pr_err("%s missing 'clocks' child node.\n", parent->name);
+               return;
+       }
+
+       for_each_child_of_node(clocks, np) {
+               match = of_match_node(&__clk_of_table, np);
+               if (!match)
+                       continue;
+               clk_init_cb = (of_clk_init_cb_t)match->data;
+               pr_debug("%s: initializing: %s\n", __func__, np->name);
+               clk_init_cb(np);
+       }
+
+       list_for_each_entry_safe(retry, tmp, &retry_list, link) {
+               pr_debug("retry-init: %s\n", retry->node->name);
+               retry->func(retry->hw, retry->node);
+               list_del(&retry->link);
+               kfree(retry);
+       }
+}
diff --git a/drivers/clk/ti/clockdomain.c b/drivers/clk/ti/clockdomain.c
new file mode 100644 (file)
index 0000000..f1e0038
--- /dev/null
@@ -0,0 +1,70 @@
+/*
+ * OMAP clockdomain support
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * Tero Kristo <t-kristo@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/clk/ti.h>
+
+#undef pr_fmt
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+static void __init of_ti_clockdomain_setup(struct device_node *node)
+{
+       struct clk *clk;
+       struct clk_hw *clk_hw;
+       const char *clkdm_name = node->name;
+       int i;
+       int num_clks;
+
+       num_clks = of_count_phandle_with_args(node, "clocks", "#clock-cells");
+
+       for (i = 0; i < num_clks; i++) {
+               clk = of_clk_get(node, i);
+               if (__clk_get_flags(clk) & CLK_IS_BASIC) {
+                       pr_warn("can't setup clkdm for basic clk %s\n",
+                               __clk_get_name(clk));
+                       continue;
+               }
+               clk_hw = __clk_get_hw(clk);
+               to_clk_hw_omap(clk_hw)->clkdm_name = clkdm_name;
+               omap2_init_clk_clkdm(clk_hw);
+       }
+}
+
+static struct of_device_id ti_clkdm_match_table[] __initdata = {
+       { .compatible = "ti,clockdomain" },
+       { }
+};
+
+/**
+ * ti_dt_clockdomains_setup - setup device tree clockdomains
+ *
+ * Initializes clockdomain nodes for a SoC. This parses through all the
+ * nodes with compatible = "ti,clockdomain", and add the clockdomain
+ * info for all the clocks listed under these. This function shall be
+ * called after rest of the DT clock init has completed and all
+ * clock nodes have been registered.
+ */
+void __init ti_dt_clockdomains_setup(void)
+{
+       struct device_node *np;
+       for_each_matching_node(np, ti_clkdm_match_table) {
+               of_ti_clockdomain_setup(np);
+       }
+}
diff --git a/drivers/clk/ti/composite.c b/drivers/clk/ti/composite.c
new file mode 100644 (file)
index 0000000..19d8980
--- /dev/null
@@ -0,0 +1,269 @@
+/*
+ * TI composite clock support
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * Tero Kristo <t-kristo@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/clk/ti.h>
+#include <linux/list.h>
+
+#undef pr_fmt
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#define to_clk_divider(_hw) container_of(_hw, struct clk_divider, hw)
+
+static unsigned long ti_composite_recalc_rate(struct clk_hw *hw,
+                                             unsigned long parent_rate)
+{
+       return ti_clk_divider_ops.recalc_rate(hw, parent_rate);
+}
+
+static long ti_composite_round_rate(struct clk_hw *hw, unsigned long rate,
+                                   unsigned long *prate)
+{
+       return -EINVAL;
+}
+
+static int ti_composite_set_rate(struct clk_hw *hw, unsigned long rate,
+                                unsigned long parent_rate)
+{
+       return -EINVAL;
+}
+
+static const struct clk_ops ti_composite_divider_ops = {
+       .recalc_rate    = &ti_composite_recalc_rate,
+       .round_rate     = &ti_composite_round_rate,
+       .set_rate       = &ti_composite_set_rate,
+};
+
+static const struct clk_ops ti_composite_gate_ops = {
+       .enable         = &omap2_dflt_clk_enable,
+       .disable        = &omap2_dflt_clk_disable,
+       .is_enabled     = &omap2_dflt_clk_is_enabled,
+};
+
+struct component_clk {
+       int num_parents;
+       const char **parent_names;
+       struct device_node *node;
+       int type;
+       struct clk_hw *hw;
+       struct list_head link;
+};
+
+static const char * __initconst component_clk_types[] = {
+       "gate", "divider", "mux"
+};
+
+static LIST_HEAD(component_clks);
+
+static struct device_node *_get_component_node(struct device_node *node, int i)
+{
+       int rc;
+       struct of_phandle_args clkspec;
+
+       rc = of_parse_phandle_with_args(node, "clocks", "#clock-cells", i,
+                                       &clkspec);
+       if (rc)
+               return NULL;
+
+       return clkspec.np;
+}
+
+static struct component_clk *_lookup_component(struct device_node *node)
+{
+       struct component_clk *comp;
+
+       list_for_each_entry(comp, &component_clks, link) {
+               if (comp->node == node)
+                       return comp;
+       }
+       return NULL;
+}
+
+struct clk_hw_omap_comp {
+       struct clk_hw hw;
+       struct device_node *comp_nodes[CLK_COMPONENT_TYPE_MAX];
+       struct component_clk *comp_clks[CLK_COMPONENT_TYPE_MAX];
+};
+
+static inline struct clk_hw *_get_hw(struct clk_hw_omap_comp *clk, int idx)
+{
+       if (!clk)
+               return NULL;
+
+       if (!clk->comp_clks[idx])
+               return NULL;
+
+       return clk->comp_clks[idx]->hw;
+}
+
+#define to_clk_hw_comp(_hw) container_of(_hw, struct clk_hw_omap_comp, hw)
+
+static void __init ti_clk_register_composite(struct clk_hw *hw,
+                                            struct device_node *node)
+{
+       struct clk *clk;
+       struct clk_hw_omap_comp *cclk = to_clk_hw_comp(hw);
+       struct component_clk *comp;
+       int num_parents = 0;
+       const char **parent_names = NULL;
+       int i;
+
+       /* Check for presence of each component clock */
+       for (i = 0; i < CLK_COMPONENT_TYPE_MAX; i++) {
+               if (!cclk->comp_nodes[i])
+                       continue;
+
+               comp = _lookup_component(cclk->comp_nodes[i]);
+               if (!comp) {
+                       pr_debug("component %s not ready for %s, retry\n",
+                                cclk->comp_nodes[i]->name, node->name);
+                       if (!ti_clk_retry_init(node, hw,
+                                              ti_clk_register_composite))
+                               return;
+
+                       goto cleanup;
+               }
+               if (cclk->comp_clks[comp->type] != NULL) {
+                       pr_err("duplicate component types for %s (%s)!\n",
+                              node->name, component_clk_types[comp->type]);
+                       goto cleanup;
+               }
+
+               cclk->comp_clks[comp->type] = comp;
+
+               /* Mark this node as found */
+               cclk->comp_nodes[i] = NULL;
+       }
+
+       /* All components exists, proceed with registration */
+       for (i = CLK_COMPONENT_TYPE_MAX - 1; i >= 0; i--) {
+               comp = cclk->comp_clks[i];
+               if (!comp)
+                       continue;
+               if (comp->num_parents) {
+                       num_parents = comp->num_parents;
+                       parent_names = comp->parent_names;
+                       break;
+               }
+       }
+
+       if (!num_parents) {
+               pr_err("%s: no parents found for %s!\n", __func__, node->name);
+               goto cleanup;
+       }
+
+       clk = clk_register_composite(NULL, node->name,
+                                    parent_names, num_parents,
+                                    _get_hw(cclk, CLK_COMPONENT_TYPE_MUX),
+                                    &ti_clk_mux_ops,
+                                    _get_hw(cclk, CLK_COMPONENT_TYPE_DIVIDER),
+                                    &ti_composite_divider_ops,
+                                    _get_hw(cclk, CLK_COMPONENT_TYPE_GATE),
+                                    &ti_composite_gate_ops, 0);
+
+       if (!IS_ERR(clk))
+               of_clk_add_provider(node, of_clk_src_simple_get, clk);
+
+cleanup:
+       /* Free component clock list entries */
+       for (i = 0; i < CLK_COMPONENT_TYPE_MAX; i++) {
+               if (!cclk->comp_clks[i])
+                       continue;
+               list_del(&cclk->comp_clks[i]->link);
+               kfree(cclk->comp_clks[i]);
+       }
+
+       kfree(cclk);
+}
+
+static void __init of_ti_composite_clk_setup(struct device_node *node)
+{
+       int num_clks;
+       int i;
+       struct clk_hw_omap_comp *cclk;
+
+       /* Number of component clocks to be put inside this clock */
+       num_clks = of_clk_get_parent_count(node);
+
+       if (num_clks < 1) {
+               pr_err("composite clk %s must have component(s)\n", node->name);
+               return;
+       }
+
+       cclk = kzalloc(sizeof(*cclk), GFP_KERNEL);
+       if (!cclk)
+               return;
+
+       /* Get device node pointers for each component clock */
+       for (i = 0; i < num_clks; i++)
+               cclk->comp_nodes[i] = _get_component_node(node, i);
+
+       ti_clk_register_composite(&cclk->hw, node);
+}
+CLK_OF_DECLARE(ti_composite_clock, "ti,composite-clock",
+              of_ti_composite_clk_setup);
+
+/**
+ * ti_clk_add_component - add a component clock to the pool
+ * @node: device node of the component clock
+ * @hw: hardware clock definition for the component clock
+ * @type: type of the component clock
+ *
+ * Adds a component clock to the list of available components, so that
+ * it can be registered by a composite clock.
+ */
+int __init ti_clk_add_component(struct device_node *node, struct clk_hw *hw,
+                               int type)
+{
+       int num_parents;
+       const char **parent_names;
+       struct component_clk *clk;
+       int i;
+
+       num_parents = of_clk_get_parent_count(node);
+
+       if (num_parents < 1) {
+               pr_err("component-clock %s must have parent(s)\n", node->name);
+               return -EINVAL;
+       }
+
+       parent_names = kzalloc((sizeof(char *) * num_parents), GFP_KERNEL);
+       if (!parent_names)
+               return -ENOMEM;
+
+       for (i = 0; i < num_parents; i++)
+               parent_names[i] = of_clk_get_parent_name(node, i);
+
+       clk = kzalloc(sizeof(*clk), GFP_KERNEL);
+       if (!clk) {
+               kfree(parent_names);
+               return -ENOMEM;
+       }
+
+       clk->num_parents = num_parents;
+       clk->parent_names = parent_names;
+       clk->hw = hw;
+       clk->node = node;
+       clk->type = type;
+       list_add(&clk->link, &component_clks);
+
+       return 0;
+}
diff --git a/drivers/clk/ti/divider.c b/drivers/clk/ti/divider.c
new file mode 100644 (file)
index 0000000..a15e445
--- /dev/null
@@ -0,0 +1,487 @@
+/*
+ * TI Divider Clock
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * Tero Kristo <t-kristo@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/clk/ti.h>
+
+#undef pr_fmt
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#define to_clk_divider(_hw) container_of(_hw, struct clk_divider, hw)
+
+#define div_mask(d)    ((1 << ((d)->width)) - 1)
+
+static unsigned int _get_table_maxdiv(const struct clk_div_table *table)
+{
+       unsigned int maxdiv = 0;
+       const struct clk_div_table *clkt;
+
+       for (clkt = table; clkt->div; clkt++)
+               if (clkt->div > maxdiv)
+                       maxdiv = clkt->div;
+       return maxdiv;
+}
+
+static unsigned int _get_maxdiv(struct clk_divider *divider)
+{
+       if (divider->flags & CLK_DIVIDER_ONE_BASED)
+               return div_mask(divider);
+       if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
+               return 1 << div_mask(divider);
+       if (divider->table)
+               return _get_table_maxdiv(divider->table);
+       return div_mask(divider) + 1;
+}
+
+static unsigned int _get_table_div(const struct clk_div_table *table,
+                                  unsigned int val)
+{
+       const struct clk_div_table *clkt;
+
+       for (clkt = table; clkt->div; clkt++)
+               if (clkt->val == val)
+                       return clkt->div;
+       return 0;
+}
+
+static unsigned int _get_div(struct clk_divider *divider, unsigned int val)
+{
+       if (divider->flags & CLK_DIVIDER_ONE_BASED)
+               return val;
+       if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
+               return 1 << val;
+       if (divider->table)
+               return _get_table_div(divider->table, val);
+       return val + 1;
+}
+
+static unsigned int _get_table_val(const struct clk_div_table *table,
+                                  unsigned int div)
+{
+       const struct clk_div_table *clkt;
+
+       for (clkt = table; clkt->div; clkt++)
+               if (clkt->div == div)
+                       return clkt->val;
+       return 0;
+}
+
+static unsigned int _get_val(struct clk_divider *divider, u8 div)
+{
+       if (divider->flags & CLK_DIVIDER_ONE_BASED)
+               return div;
+       if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
+               return __ffs(div);
+       if (divider->table)
+               return  _get_table_val(divider->table, div);
+       return div - 1;
+}
+
+static unsigned long ti_clk_divider_recalc_rate(struct clk_hw *hw,
+                                               unsigned long parent_rate)
+{
+       struct clk_divider *divider = to_clk_divider(hw);
+       unsigned int div, val;
+
+       val = ti_clk_ll_ops->clk_readl(divider->reg) >> divider->shift;
+       val &= div_mask(divider);
+
+       div = _get_div(divider, val);
+       if (!div) {
+               WARN(!(divider->flags & CLK_DIVIDER_ALLOW_ZERO),
+                    "%s: Zero divisor and CLK_DIVIDER_ALLOW_ZERO not set\n",
+                    __clk_get_name(hw->clk));
+               return parent_rate;
+       }
+
+       return parent_rate / div;
+}
+
+/*
+ * The reverse of DIV_ROUND_UP: The maximum number which
+ * divided by m is r
+ */
+#define MULT_ROUND_UP(r, m) ((r) * (m) + (m) - 1)
+
+static bool _is_valid_table_div(const struct clk_div_table *table,
+                               unsigned int div)
+{
+       const struct clk_div_table *clkt;
+
+       for (clkt = table; clkt->div; clkt++)
+               if (clkt->div == div)
+                       return true;
+       return false;
+}
+
+static bool _is_valid_div(struct clk_divider *divider, unsigned int div)
+{
+       if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
+               return is_power_of_2(div);
+       if (divider->table)
+               return _is_valid_table_div(divider->table, div);
+       return true;
+}
+
+static int ti_clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
+                                 unsigned long *best_parent_rate)
+{
+       struct clk_divider *divider = to_clk_divider(hw);
+       int i, bestdiv = 0;
+       unsigned long parent_rate, best = 0, now, maxdiv;
+       unsigned long parent_rate_saved = *best_parent_rate;
+
+       if (!rate)
+               rate = 1;
+
+       maxdiv = _get_maxdiv(divider);
+
+       if (!(__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT)) {
+               parent_rate = *best_parent_rate;
+               bestdiv = DIV_ROUND_UP(parent_rate, rate);
+               bestdiv = bestdiv == 0 ? 1 : bestdiv;
+               bestdiv = bestdiv > maxdiv ? maxdiv : bestdiv;
+               return bestdiv;
+       }
+
+       /*
+        * The maximum divider we can use without overflowing
+        * unsigned long in rate * i below
+        */
+       maxdiv = min(ULONG_MAX / rate, maxdiv);
+
+       for (i = 1; i <= maxdiv; i++) {
+               if (!_is_valid_div(divider, i))
+                       continue;
+               if (rate * i == parent_rate_saved) {
+                       /*
+                        * It's the most ideal case if the requested rate can be
+                        * divided from parent clock without needing to change
+                        * parent rate, so return the divider immediately.
+                        */
+                       *best_parent_rate = parent_rate_saved;
+                       return i;
+               }
+               parent_rate = __clk_round_rate(__clk_get_parent(hw->clk),
+                               MULT_ROUND_UP(rate, i));
+               now = parent_rate / i;
+               if (now <= rate && now > best) {
+                       bestdiv = i;
+                       best = now;
+                       *best_parent_rate = parent_rate;
+               }
+       }
+
+       if (!bestdiv) {
+               bestdiv = _get_maxdiv(divider);
+               *best_parent_rate =
+                       __clk_round_rate(__clk_get_parent(hw->clk), 1);
+       }
+
+       return bestdiv;
+}
+
+static long ti_clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
+                                     unsigned long *prate)
+{
+       int div;
+       div = ti_clk_divider_bestdiv(hw, rate, prate);
+
+       return *prate / div;
+}
+
+static int ti_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
+                                  unsigned long parent_rate)
+{
+       struct clk_divider *divider = to_clk_divider(hw);
+       unsigned int div, value;
+       unsigned long flags = 0;
+       u32 val;
+
+       div = parent_rate / rate;
+       value = _get_val(divider, div);
+
+       if (value > div_mask(divider))
+               value = div_mask(divider);
+
+       if (divider->lock)
+               spin_lock_irqsave(divider->lock, flags);
+
+       if (divider->flags & CLK_DIVIDER_HIWORD_MASK) {
+               val = div_mask(divider) << (divider->shift + 16);
+       } else {
+               val = ti_clk_ll_ops->clk_readl(divider->reg);
+               val &= ~(div_mask(divider) << divider->shift);
+       }
+       val |= value << divider->shift;
+       ti_clk_ll_ops->clk_writel(val, divider->reg);
+
+       if (divider->lock)
+               spin_unlock_irqrestore(divider->lock, flags);
+
+       return 0;
+}
+
+const struct clk_ops ti_clk_divider_ops = {
+       .recalc_rate = ti_clk_divider_recalc_rate,
+       .round_rate = ti_clk_divider_round_rate,
+       .set_rate = ti_clk_divider_set_rate,
+};
+
+static struct clk *_register_divider(struct device *dev, const char *name,
+                                    const char *parent_name,
+                                    unsigned long flags, void __iomem *reg,
+                                    u8 shift, u8 width, u8 clk_divider_flags,
+                                    const struct clk_div_table *table,
+                                    spinlock_t *lock)
+{
+       struct clk_divider *div;
+       struct clk *clk;
+       struct clk_init_data init;
+
+       if (clk_divider_flags & CLK_DIVIDER_HIWORD_MASK) {
+               if (width + shift > 16) {
+                       pr_warn("divider value exceeds LOWORD field\n");
+                       return ERR_PTR(-EINVAL);
+               }
+       }
+
+       /* allocate the divider */
+       div = kzalloc(sizeof(*div), GFP_KERNEL);
+       if (!div) {
+               pr_err("%s: could not allocate divider clk\n", __func__);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       init.name = name;
+       init.ops = &ti_clk_divider_ops;
+       init.flags = flags | CLK_IS_BASIC;
+       init.parent_names = (parent_name ? &parent_name : NULL);
+       init.num_parents = (parent_name ? 1 : 0);
+
+       /* struct clk_divider assignments */
+       div->reg = reg;
+       div->shift = shift;
+       div->width = width;
+       div->flags = clk_divider_flags;
+       div->lock = lock;
+       div->hw.init = &init;
+       div->table = table;
+
+       /* register the clock */
+       clk = clk_register(dev, &div->hw);
+
+       if (IS_ERR(clk))
+               kfree(div);
+
+       return clk;
+}
+
+static struct clk_div_table
+__init *ti_clk_get_div_table(struct device_node *node)
+{
+       struct clk_div_table *table;
+       const __be32 *divspec;
+       u32 val;
+       u32 num_div;
+       u32 valid_div;
+       int i;
+
+       divspec = of_get_property(node, "ti,dividers", &num_div);
+
+       if (!divspec)
+               return NULL;
+
+       num_div /= 4;
+
+       valid_div = 0;
+
+       /* Determine required size for divider table */
+       for (i = 0; i < num_div; i++) {
+               of_property_read_u32_index(node, "ti,dividers", i, &val);
+               if (val)
+                       valid_div++;
+       }
+
+       if (!valid_div) {
+               pr_err("no valid dividers for %s table\n", node->name);
+               return ERR_PTR(-EINVAL);
+       }
+
+       table = kzalloc(sizeof(*table) * (valid_div + 1), GFP_KERNEL);
+
+       if (!table)
+               return ERR_PTR(-ENOMEM);
+
+       valid_div = 0;
+
+       for (i = 0; i < num_div; i++) {
+               of_property_read_u32_index(node, "ti,dividers", i, &val);
+               if (val) {
+                       table[valid_div].div = val;
+                       table[valid_div].val = i;
+                       valid_div++;
+               }
+       }
+
+       return table;
+}
+
+static int _get_divider_width(struct device_node *node,
+                             const struct clk_div_table *table,
+                             u8 flags)
+{
+       u32 min_div;
+       u32 max_div;
+       u32 val = 0;
+       u32 div;
+
+       if (!table) {
+               /* Clk divider table not provided, determine min/max divs */
+               if (of_property_read_u32(node, "ti,min-div", &min_div))
+                       min_div = 1;
+
+               if (of_property_read_u32(node, "ti,max-div", &max_div)) {
+                       pr_err("no max-div for %s!\n", node->name);
+                       return -EINVAL;
+               }
+
+               /* Determine bit width for the field */
+               if (flags & CLK_DIVIDER_ONE_BASED)
+                       val = 1;
+
+               div = min_div;
+
+               while (div < max_div) {
+                       if (flags & CLK_DIVIDER_POWER_OF_TWO)
+                               div <<= 1;
+                       else
+                               div++;
+                       val++;
+               }
+       } else {
+               div = 0;
+
+               while (table[div].div) {
+                       val = table[div].val;
+                       div++;
+               }
+       }
+
+       return fls(val);
+}
+
+static int __init ti_clk_divider_populate(struct device_node *node,
+       void __iomem **reg, const struct clk_div_table **table,
+       u32 *flags, u8 *div_flags, u8 *width, u8 *shift)
+{
+       u32 val;
+
+       *reg = ti_clk_get_reg_addr(node, 0);
+       if (!*reg)
+               return -EINVAL;
+
+       if (!of_property_read_u32(node, "ti,bit-shift", &val))
+               *shift = val;
+       else
+               *shift = 0;
+
+       *flags = 0;
+       *div_flags = 0;
+
+       if (of_property_read_bool(node, "ti,index-starts-at-one"))
+               *div_flags |= CLK_DIVIDER_ONE_BASED;
+
+       if (of_property_read_bool(node, "ti,index-power-of-two"))
+               *div_flags |= CLK_DIVIDER_POWER_OF_TWO;
+
+       if (of_property_read_bool(node, "ti,set-rate-parent"))
+               *flags |= CLK_SET_RATE_PARENT;
+
+       *table = ti_clk_get_div_table(node);
+
+       if (IS_ERR(*table))
+               return PTR_ERR(*table);
+
+       *width = _get_divider_width(node, *table, *div_flags);
+
+       return 0;
+}
+
+/**
+ * of_ti_divider_clk_setup - Setup function for simple div rate clock
+ * @node: device node for this clock
+ *
+ * Sets up a basic divider clock.
+ */
+static void __init of_ti_divider_clk_setup(struct device_node *node)
+{
+       struct clk *clk;
+       const char *parent_name;
+       void __iomem *reg;
+       u8 clk_divider_flags = 0;
+       u8 width = 0;
+       u8 shift = 0;
+       const struct clk_div_table *table = NULL;
+       u32 flags = 0;
+
+       parent_name = of_clk_get_parent_name(node, 0);
+
+       if (ti_clk_divider_populate(node, &reg, &table, &flags,
+                                   &clk_divider_flags, &width, &shift))
+               goto cleanup;
+
+       clk = _register_divider(NULL, node->name, parent_name, flags, reg,
+                               shift, width, clk_divider_flags, table, NULL);
+
+       if (!IS_ERR(clk)) {
+               of_clk_add_provider(node, of_clk_src_simple_get, clk);
+               of_ti_clk_autoidle_setup(node);
+               return;
+       }
+
+cleanup:
+       kfree(table);
+}
+CLK_OF_DECLARE(divider_clk, "ti,divider-clock", of_ti_divider_clk_setup);
+
+static void __init of_ti_composite_divider_clk_setup(struct device_node *node)
+{
+       struct clk_divider *div;
+       u32 val;
+
+       div = kzalloc(sizeof(*div), GFP_KERNEL);
+       if (!div)
+               return;
+
+       if (ti_clk_divider_populate(node, &div->reg, &div->table, &val,
+                                   &div->flags, &div->width, &div->shift) < 0)
+               goto cleanup;
+
+       if (!ti_clk_add_component(node, &div->hw, CLK_COMPONENT_TYPE_DIVIDER))
+               return;
+
+cleanup:
+       kfree(div->table);
+       kfree(div);
+}
+CLK_OF_DECLARE(ti_composite_divider_clk, "ti,composite-divider-clock",
+              of_ti_composite_divider_clk_setup);
diff --git a/drivers/clk/ti/dpll.c b/drivers/clk/ti/dpll.c
new file mode 100644 (file)
index 0000000..7e498a4
--- /dev/null
@@ -0,0 +1,558 @@
+/*
+ * OMAP DPLL clock support
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * Tero Kristo <t-kristo@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/clk/ti.h>
+
+#undef pr_fmt
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#define DPLL_HAS_AUTOIDLE      0x1
+
+#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
+       defined(CONFIG_SOC_DRA7XX)
+static const struct clk_ops dpll_m4xen_ck_ops = {
+       .enable         = &omap3_noncore_dpll_enable,
+       .disable        = &omap3_noncore_dpll_disable,
+       .recalc_rate    = &omap4_dpll_regm4xen_recalc,
+       .round_rate     = &omap4_dpll_regm4xen_round_rate,
+       .set_rate       = &omap3_noncore_dpll_set_rate,
+       .get_parent     = &omap2_init_dpll_parent,
+};
+#endif
+
+static const struct clk_ops dpll_core_ck_ops = {
+       .recalc_rate    = &omap3_dpll_recalc,
+       .get_parent     = &omap2_init_dpll_parent,
+};
+
+#ifdef CONFIG_ARCH_OMAP3
+static const struct clk_ops omap3_dpll_core_ck_ops = {
+       .get_parent     = &omap2_init_dpll_parent,
+       .recalc_rate    = &omap3_dpll_recalc,
+       .round_rate     = &omap2_dpll_round_rate,
+};
+#endif
+
+static const struct clk_ops dpll_ck_ops = {
+       .enable         = &omap3_noncore_dpll_enable,
+       .disable        = &omap3_noncore_dpll_disable,
+       .recalc_rate    = &omap3_dpll_recalc,
+       .round_rate     = &omap2_dpll_round_rate,
+       .set_rate       = &omap3_noncore_dpll_set_rate,
+       .get_parent     = &omap2_init_dpll_parent,
+};
+
+static const struct clk_ops dpll_no_gate_ck_ops = {
+       .recalc_rate    = &omap3_dpll_recalc,
+       .get_parent     = &omap2_init_dpll_parent,
+       .round_rate     = &omap2_dpll_round_rate,
+       .set_rate       = &omap3_noncore_dpll_set_rate,
+};
+
+#ifdef CONFIG_ARCH_OMAP3
+static const struct clk_ops omap3_dpll_ck_ops = {
+       .enable         = &omap3_noncore_dpll_enable,
+       .disable        = &omap3_noncore_dpll_disable,
+       .get_parent     = &omap2_init_dpll_parent,
+       .recalc_rate    = &omap3_dpll_recalc,
+       .set_rate       = &omap3_noncore_dpll_set_rate,
+       .round_rate     = &omap2_dpll_round_rate,
+};
+
+static const struct clk_ops omap3_dpll_per_ck_ops = {
+       .enable         = &omap3_noncore_dpll_enable,
+       .disable        = &omap3_noncore_dpll_disable,
+       .get_parent     = &omap2_init_dpll_parent,
+       .recalc_rate    = &omap3_dpll_recalc,
+       .set_rate       = &omap3_dpll4_set_rate,
+       .round_rate     = &omap2_dpll_round_rate,
+};
+#endif
+
+static const struct clk_ops dpll_x2_ck_ops = {
+       .recalc_rate    = &omap3_clkoutx2_recalc,
+};
+
+/**
+ * ti_clk_register_dpll - low level registration of a DPLL clock
+ * @hw: hardware clock definition for the clock
+ * @node: device node for the clock
+ *
+ * Finalizes DPLL registration process. In case a failure (clk-ref or
+ * clk-bypass is missing), the clock is added to retry list and
+ * the initialization is retried on later stage.
+ */
+static void __init ti_clk_register_dpll(struct clk_hw *hw,
+                                       struct device_node *node)
+{
+       struct clk_hw_omap *clk_hw = to_clk_hw_omap(hw);
+       struct dpll_data *dd = clk_hw->dpll_data;
+       struct clk *clk;
+
+       dd->clk_ref = of_clk_get(node, 0);
+       dd->clk_bypass = of_clk_get(node, 1);
+
+       if (IS_ERR(dd->clk_ref) || IS_ERR(dd->clk_bypass)) {
+               pr_debug("clk-ref or clk-bypass missing for %s, retry later\n",
+                        node->name);
+               if (!ti_clk_retry_init(node, hw, ti_clk_register_dpll))
+                       return;
+
+               goto cleanup;
+       }
+
+       /* register the clock */
+       clk = clk_register(NULL, &clk_hw->hw);
+
+       if (!IS_ERR(clk)) {
+               omap2_init_clk_hw_omap_clocks(clk);
+               of_clk_add_provider(node, of_clk_src_simple_get, clk);
+               kfree(clk_hw->hw.init->parent_names);
+               kfree(clk_hw->hw.init);
+               return;
+       }
+
+cleanup:
+       kfree(clk_hw->dpll_data);
+       kfree(clk_hw->hw.init->parent_names);
+       kfree(clk_hw->hw.init);
+       kfree(clk_hw);
+}
+
+#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
+       defined(CONFIG_SOC_DRA7XX) || defined(CONFIG_SOC_AM33XX)
+/**
+ * ti_clk_register_dpll_x2 - Registers a DPLLx2 clock
+ * @node: device node for this clock
+ * @ops: clk_ops for this clock
+ * @hw_ops: clk_hw_ops for this clock
+ *
+ * Initializes a DPLL x 2 clock from device tree data.
+ */
+static void ti_clk_register_dpll_x2(struct device_node *node,
+                                   const struct clk_ops *ops,
+                                   const struct clk_hw_omap_ops *hw_ops)
+{
+       struct clk *clk;
+       struct clk_init_data init = { NULL };
+       struct clk_hw_omap *clk_hw;
+       const char *name = node->name;
+       const char *parent_name;
+
+       parent_name = of_clk_get_parent_name(node, 0);
+       if (!parent_name) {
+               pr_err("%s must have parent\n", node->name);
+               return;
+       }
+
+       clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
+       if (!clk_hw)
+               return;
+
+       clk_hw->ops = hw_ops;
+       clk_hw->hw.init = &init;
+
+       init.name = name;
+       init.ops = ops;
+       init.parent_names = &parent_name;
+       init.num_parents = 1;
+
+       /* register the clock */
+       clk = clk_register(NULL, &clk_hw->hw);
+
+       if (IS_ERR(clk)) {
+               kfree(clk_hw);
+       } else {
+               omap2_init_clk_hw_omap_clocks(clk);
+               of_clk_add_provider(node, of_clk_src_simple_get, clk);
+       }
+}
+#endif
+
+/**
+ * of_ti_dpll_setup - Setup function for OMAP DPLL clocks
+ * @node: device node containing the DPLL info
+ * @ops: ops for the DPLL
+ * @ddt: DPLL data template to use
+ * @init_flags: flags for controlling init types
+ *
+ * Initializes a DPLL clock from device tree data.
+ */
+static void __init of_ti_dpll_setup(struct device_node *node,
+                                   const struct clk_ops *ops,
+                                   const struct dpll_data *ddt,
+                                   u8 init_flags)
+{
+       struct clk_hw_omap *clk_hw = NULL;
+       struct clk_init_data *init = NULL;
+       const char **parent_names = NULL;
+       struct dpll_data *dd = NULL;
+       int i;
+       u8 dpll_mode = 0;
+
+       dd = kzalloc(sizeof(*dd), GFP_KERNEL);
+       clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
+       init = kzalloc(sizeof(*init), GFP_KERNEL);
+       if (!dd || !clk_hw || !init)
+               goto cleanup;
+
+       memcpy(dd, ddt, sizeof(*dd));
+
+       clk_hw->dpll_data = dd;
+       clk_hw->ops = &clkhwops_omap3_dpll;
+       clk_hw->hw.init = init;
+       clk_hw->flags = MEMMAP_ADDRESSING;
+
+       init->name = node->name;
+       init->ops = ops;
+
+       init->num_parents = of_clk_get_parent_count(node);
+       if (init->num_parents < 1) {
+               pr_err("%s must have parent(s)\n", node->name);
+               goto cleanup;
+       }
+
+       parent_names = kzalloc(sizeof(char *) * init->num_parents, GFP_KERNEL);
+       if (!parent_names)
+               goto cleanup;
+
+       for (i = 0; i < init->num_parents; i++)
+               parent_names[i] = of_clk_get_parent_name(node, i);
+
+       init->parent_names = parent_names;
+
+       dd->control_reg = ti_clk_get_reg_addr(node, 0);
+       dd->idlest_reg = ti_clk_get_reg_addr(node, 1);
+       dd->mult_div1_reg = ti_clk_get_reg_addr(node, 2);
+
+       if (!dd->control_reg || !dd->idlest_reg || !dd->mult_div1_reg)
+               goto cleanup;
+
+       if (init_flags & DPLL_HAS_AUTOIDLE) {
+               dd->autoidle_reg = ti_clk_get_reg_addr(node, 3);
+               if (!dd->autoidle_reg)
+                       goto cleanup;
+       }
+
+       if (of_property_read_bool(node, "ti,low-power-stop"))
+               dpll_mode |= 1 << DPLL_LOW_POWER_STOP;
+
+       if (of_property_read_bool(node, "ti,low-power-bypass"))
+               dpll_mode |= 1 << DPLL_LOW_POWER_BYPASS;
+
+       if (of_property_read_bool(node, "ti,lock"))
+               dpll_mode |= 1 << DPLL_LOCKED;
+
+       if (dpll_mode)
+               dd->modes = dpll_mode;
+
+       ti_clk_register_dpll(&clk_hw->hw, node);
+       return;
+
+cleanup:
+       kfree(dd);
+       kfree(parent_names);
+       kfree(init);
+       kfree(clk_hw);
+}
+
+#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
+       defined(CONFIG_SOC_DRA7XX)
+static void __init of_ti_omap4_dpll_x2_setup(struct device_node *node)
+{
+       ti_clk_register_dpll_x2(node, &dpll_x2_ck_ops, &clkhwops_omap4_dpllmx);
+}
+CLK_OF_DECLARE(ti_omap4_dpll_x2_clock, "ti,omap4-dpll-x2-clock",
+              of_ti_omap4_dpll_x2_setup);
+#endif
+
+#ifdef CONFIG_SOC_AM33XX
+static void __init of_ti_am3_dpll_x2_setup(struct device_node *node)
+{
+       ti_clk_register_dpll_x2(node, &dpll_x2_ck_ops, NULL);
+}
+CLK_OF_DECLARE(ti_am3_dpll_x2_clock, "ti,am3-dpll-x2-clock",
+              of_ti_am3_dpll_x2_setup);
+#endif
+
+#ifdef CONFIG_ARCH_OMAP3
+static void __init of_ti_omap3_dpll_setup(struct device_node *node)
+{
+       const struct dpll_data dd = {
+               .idlest_mask = 0x1,
+               .enable_mask = 0x7,
+               .autoidle_mask = 0x7,
+               .mult_mask = 0x7ff << 8,
+               .div1_mask = 0x7f,
+               .max_multiplier = 2047,
+               .max_divider = 128,
+               .min_divider = 1,
+               .freqsel_mask = 0xf0,
+               .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
+       };
+
+       of_ti_dpll_setup(node, &omap3_dpll_ck_ops, &dd, DPLL_HAS_AUTOIDLE);
+}
+CLK_OF_DECLARE(ti_omap3_dpll_clock, "ti,omap3-dpll-clock",
+              of_ti_omap3_dpll_setup);
+
+static void __init of_ti_omap3_core_dpll_setup(struct device_node *node)
+{
+       const struct dpll_data dd = {
+               .idlest_mask = 0x1,
+               .enable_mask = 0x7,
+               .autoidle_mask = 0x7,
+               .mult_mask = 0x7ff << 16,
+               .div1_mask = 0x7f << 8,
+               .max_multiplier = 2047,
+               .max_divider = 128,
+               .min_divider = 1,
+               .freqsel_mask = 0xf0,
+       };
+
+       of_ti_dpll_setup(node, &omap3_dpll_core_ck_ops, &dd, DPLL_HAS_AUTOIDLE);
+}
+CLK_OF_DECLARE(ti_omap3_core_dpll_clock, "ti,omap3-dpll-core-clock",
+              of_ti_omap3_core_dpll_setup);
+
+static void __init of_ti_omap3_per_dpll_setup(struct device_node *node)
+{
+       const struct dpll_data dd = {
+               .idlest_mask = 0x1 << 1,
+               .enable_mask = 0x7 << 16,
+               .autoidle_mask = 0x7 << 3,
+               .mult_mask = 0x7ff << 8,
+               .div1_mask = 0x7f,
+               .max_multiplier = 2047,
+               .max_divider = 128,
+               .min_divider = 1,
+               .freqsel_mask = 0xf00000,
+               .modes = (1 << DPLL_LOW_POWER_STOP) | (1 << DPLL_LOCKED),
+       };
+
+       of_ti_dpll_setup(node, &omap3_dpll_per_ck_ops, &dd, DPLL_HAS_AUTOIDLE);
+}
+CLK_OF_DECLARE(ti_omap3_per_dpll_clock, "ti,omap3-dpll-per-clock",
+              of_ti_omap3_per_dpll_setup);
+
+static void __init of_ti_omap3_per_jtype_dpll_setup(struct device_node *node)
+{
+       const struct dpll_data dd = {
+               .idlest_mask = 0x1 << 1,
+               .enable_mask = 0x7 << 16,
+               .autoidle_mask = 0x7 << 3,
+               .mult_mask = 0xfff << 8,
+               .div1_mask = 0x7f,
+               .max_multiplier = 4095,
+               .max_divider = 128,
+               .min_divider = 1,
+               .sddiv_mask = 0xff << 24,
+               .dco_mask = 0xe << 20,
+               .flags = DPLL_J_TYPE,
+               .modes = (1 << DPLL_LOW_POWER_STOP) | (1 << DPLL_LOCKED),
+       };
+
+       of_ti_dpll_setup(node, &omap3_dpll_per_ck_ops, &dd, DPLL_HAS_AUTOIDLE);
+}
+CLK_OF_DECLARE(ti_omap3_per_jtype_dpll_clock, "ti,omap3-dpll-per-j-type-clock",
+              of_ti_omap3_per_jtype_dpll_setup);
+#endif
+
+static void __init of_ti_omap4_dpll_setup(struct device_node *node)
+{
+       const struct dpll_data dd = {
+               .idlest_mask = 0x1,
+               .enable_mask = 0x7,
+               .autoidle_mask = 0x7,
+               .mult_mask = 0x7ff << 8,
+               .div1_mask = 0x7f,
+               .max_multiplier = 2047,
+               .max_divider = 128,
+               .min_divider = 1,
+               .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
+       };
+
+       of_ti_dpll_setup(node, &dpll_ck_ops, &dd, DPLL_HAS_AUTOIDLE);
+}
+CLK_OF_DECLARE(ti_omap4_dpll_clock, "ti,omap4-dpll-clock",
+              of_ti_omap4_dpll_setup);
+
+static void __init of_ti_omap4_core_dpll_setup(struct device_node *node)
+{
+       const struct dpll_data dd = {
+               .idlest_mask = 0x1,
+               .enable_mask = 0x7,
+               .autoidle_mask = 0x7,
+               .mult_mask = 0x7ff << 8,
+               .div1_mask = 0x7f,
+               .max_multiplier = 2047,
+               .max_divider = 128,
+               .min_divider = 1,
+               .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
+       };
+
+       of_ti_dpll_setup(node, &dpll_core_ck_ops, &dd, DPLL_HAS_AUTOIDLE);
+}
+CLK_OF_DECLARE(ti_omap4_core_dpll_clock, "ti,omap4-dpll-core-clock",
+              of_ti_omap4_core_dpll_setup);
+
+#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
+       defined(CONFIG_SOC_DRA7XX)
+static void __init of_ti_omap4_m4xen_dpll_setup(struct device_node *node)
+{
+       const struct dpll_data dd = {
+               .idlest_mask = 0x1,
+               .enable_mask = 0x7,
+               .autoidle_mask = 0x7,
+               .mult_mask = 0x7ff << 8,
+               .div1_mask = 0x7f,
+               .max_multiplier = 2047,
+               .max_divider = 128,
+               .min_divider = 1,
+               .m4xen_mask = 0x800,
+               .lpmode_mask = 1 << 10,
+               .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
+       };
+
+       of_ti_dpll_setup(node, &dpll_m4xen_ck_ops, &dd, DPLL_HAS_AUTOIDLE);
+}
+CLK_OF_DECLARE(ti_omap4_m4xen_dpll_clock, "ti,omap4-dpll-m4xen-clock",
+              of_ti_omap4_m4xen_dpll_setup);
+
+static void __init of_ti_omap4_jtype_dpll_setup(struct device_node *node)
+{
+       const struct dpll_data dd = {
+               .idlest_mask = 0x1,
+               .enable_mask = 0x7,
+               .autoidle_mask = 0x7,
+               .mult_mask = 0xfff << 8,
+               .div1_mask = 0xff,
+               .max_multiplier = 4095,
+               .max_divider = 256,
+               .min_divider = 1,
+               .sddiv_mask = 0xff << 24,
+               .flags = DPLL_J_TYPE,
+               .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
+       };
+
+       of_ti_dpll_setup(node, &dpll_m4xen_ck_ops, &dd, DPLL_HAS_AUTOIDLE);
+}
+CLK_OF_DECLARE(ti_omap4_jtype_dpll_clock, "ti,omap4-dpll-j-type-clock",
+              of_ti_omap4_jtype_dpll_setup);
+#endif
+
+static void __init of_ti_am3_no_gate_dpll_setup(struct device_node *node)
+{
+       const struct dpll_data dd = {
+               .idlest_mask = 0x1,
+               .enable_mask = 0x7,
+               .autoidle_mask = 0x7,
+               .mult_mask = 0x7ff << 8,
+               .div1_mask = 0x7f,
+               .max_multiplier = 2047,
+               .max_divider = 128,
+               .min_divider = 1,
+               .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
+       };
+
+       of_ti_dpll_setup(node, &dpll_no_gate_ck_ops, &dd, 0);
+}
+CLK_OF_DECLARE(ti_am3_no_gate_dpll_clock, "ti,am3-dpll-no-gate-clock",
+              of_ti_am3_no_gate_dpll_setup);
+
+static void __init of_ti_am3_jtype_dpll_setup(struct device_node *node)
+{
+       const struct dpll_data dd = {
+               .idlest_mask = 0x1,
+               .enable_mask = 0x7,
+               .autoidle_mask = 0x7,
+               .mult_mask = 0x7ff << 8,
+               .div1_mask = 0x7f,
+               .max_multiplier = 4095,
+               .max_divider = 256,
+               .min_divider = 2,
+               .flags = DPLL_J_TYPE,
+               .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
+       };
+
+       of_ti_dpll_setup(node, &dpll_ck_ops, &dd, 0);
+}
+CLK_OF_DECLARE(ti_am3_jtype_dpll_clock, "ti,am3-dpll-j-type-clock",
+              of_ti_am3_jtype_dpll_setup);
+
+static void __init of_ti_am3_no_gate_jtype_dpll_setup(struct device_node *node)
+{
+       const struct dpll_data dd = {
+               .idlest_mask = 0x1,
+               .enable_mask = 0x7,
+               .autoidle_mask = 0x7,
+               .mult_mask = 0x7ff << 8,
+               .div1_mask = 0x7f,
+               .max_multiplier = 2047,
+               .max_divider = 128,
+               .min_divider = 1,
+               .flags = DPLL_J_TYPE,
+               .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
+       };
+
+       of_ti_dpll_setup(node, &dpll_no_gate_ck_ops, &dd, 0);
+}
+CLK_OF_DECLARE(ti_am3_no_gate_jtype_dpll_clock,
+              "ti,am3-dpll-no-gate-j-type-clock",
+              of_ti_am3_no_gate_jtype_dpll_setup);
+
+static void __init of_ti_am3_dpll_setup(struct device_node *node)
+{
+       const struct dpll_data dd = {
+               .idlest_mask = 0x1,
+               .enable_mask = 0x7,
+               .autoidle_mask = 0x7,
+               .mult_mask = 0x7ff << 8,
+               .div1_mask = 0x7f,
+               .max_multiplier = 2047,
+               .max_divider = 128,
+               .min_divider = 1,
+               .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
+       };
+
+       of_ti_dpll_setup(node, &dpll_ck_ops, &dd, 0);
+}
+CLK_OF_DECLARE(ti_am3_dpll_clock, "ti,am3-dpll-clock", of_ti_am3_dpll_setup);
+
+static void __init of_ti_am3_core_dpll_setup(struct device_node *node)
+{
+       const struct dpll_data dd = {
+               .idlest_mask = 0x1,
+               .enable_mask = 0x7,
+               .autoidle_mask = 0x7,
+               .mult_mask = 0x7ff << 8,
+               .div1_mask = 0x7f,
+               .max_multiplier = 2047,
+               .max_divider = 128,
+               .min_divider = 1,
+               .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
+       };
+
+       of_ti_dpll_setup(node, &dpll_core_ck_ops, &dd, 0);
+}
+CLK_OF_DECLARE(ti_am3_core_dpll_clock, "ti,am3-dpll-core-clock",
+              of_ti_am3_core_dpll_setup);
diff --git a/drivers/clk/ti/fixed-factor.c b/drivers/clk/ti/fixed-factor.c
new file mode 100644 (file)
index 0000000..c2c8a28
--- /dev/null
@@ -0,0 +1,66 @@
+/*
+ * TI Fixed Factor Clock
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * Tero Kristo <t-kristo@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/clk/ti.h>
+
+#undef pr_fmt
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+/**
+ * of_ti_fixed_factor_clk_setup - Setup function for TI fixed factor clock
+ * @node: device node for this clock
+ *
+ * Sets up a simple fixed factor clock based on device tree info.
+ */
+static void __init of_ti_fixed_factor_clk_setup(struct device_node *node)
+{
+       struct clk *clk;
+       const char *clk_name = node->name;
+       const char *parent_name;
+       u32 div, mult;
+       u32 flags = 0;
+
+       if (of_property_read_u32(node, "ti,clock-div", &div)) {
+               pr_err("%s must have a clock-div property\n", node->name);
+               return;
+       }
+
+       if (of_property_read_u32(node, "ti,clock-mult", &mult)) {
+               pr_err("%s must have a clock-mult property\n", node->name);
+               return;
+       }
+
+       if (of_property_read_bool(node, "ti,set-rate-parent"))
+               flags |= CLK_SET_RATE_PARENT;
+
+       parent_name = of_clk_get_parent_name(node, 0);
+
+       clk = clk_register_fixed_factor(NULL, clk_name, parent_name, flags,
+                                       mult, div);
+
+       if (!IS_ERR(clk)) {
+               of_clk_add_provider(node, of_clk_src_simple_get, clk);
+               of_ti_clk_autoidle_setup(node);
+       }
+}
+CLK_OF_DECLARE(ti_fixed_factor_clk, "ti,fixed-factor-clock",
+              of_ti_fixed_factor_clk_setup);
diff --git a/drivers/clk/ti/gate.c b/drivers/clk/ti/gate.c
new file mode 100644 (file)
index 0000000..3e2999d
--- /dev/null
@@ -0,0 +1,249 @@
+/*
+ * OMAP gate clock support
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * Tero Kristo <t-kristo@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/clk/ti.h>
+
+#define to_clk_divider(_hw) container_of(_hw, struct clk_divider, hw)
+
+#undef pr_fmt
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+static int omap36xx_gate_clk_enable_with_hsdiv_restore(struct clk_hw *clk);
+
+static const struct clk_ops omap_gate_clkdm_clk_ops = {
+       .init           = &omap2_init_clk_clkdm,
+       .enable         = &omap2_clkops_enable_clkdm,
+       .disable        = &omap2_clkops_disable_clkdm,
+};
+
+static const struct clk_ops omap_gate_clk_ops = {
+       .init           = &omap2_init_clk_clkdm,
+       .enable         = &omap2_dflt_clk_enable,
+       .disable        = &omap2_dflt_clk_disable,
+       .is_enabled     = &omap2_dflt_clk_is_enabled,
+};
+
+static const struct clk_ops omap_gate_clk_hsdiv_restore_ops = {
+       .init           = &omap2_init_clk_clkdm,
+       .enable         = &omap36xx_gate_clk_enable_with_hsdiv_restore,
+       .disable        = &omap2_dflt_clk_disable,
+       .is_enabled     = &omap2_dflt_clk_is_enabled,
+};
+
+/**
+ * omap36xx_gate_clk_enable_with_hsdiv_restore - enable clocks suffering
+ *         from HSDivider PWRDN problem Implements Errata ID: i556.
+ * @clk: DPLL output struct clk
+ *
+ * 3630 only: dpll3_m3_ck, dpll4_m2_ck, dpll4_m3_ck, dpll4_m4_ck,
+ * dpll4_m5_ck & dpll4_m6_ck dividers gets loaded with reset
+ * valueafter their respective PWRDN bits are set.  Any dummy write
+ * (Any other value different from the Read value) to the
+ * corresponding CM_CLKSEL register will refresh the dividers.
+ */
+static int omap36xx_gate_clk_enable_with_hsdiv_restore(struct clk_hw *clk)
+{
+       struct clk_divider *parent;
+       struct clk_hw *parent_hw;
+       u32 dummy_v, orig_v;
+       int ret;
+
+       /* Clear PWRDN bit of HSDIVIDER */
+       ret = omap2_dflt_clk_enable(clk);
+
+       /* Parent is the x2 node, get parent of parent for the m2 div */
+       parent_hw = __clk_get_hw(__clk_get_parent(__clk_get_parent(clk->clk)));
+       parent = to_clk_divider(parent_hw);
+
+       /* Restore the dividers */
+       if (!ret) {
+               orig_v = ti_clk_ll_ops->clk_readl(parent->reg);
+               dummy_v = orig_v;
+
+               /* Write any other value different from the Read value */
+               dummy_v ^= (1 << parent->shift);
+               ti_clk_ll_ops->clk_writel(dummy_v, parent->reg);
+
+               /* Write the original divider */
+               ti_clk_ll_ops->clk_writel(orig_v, parent->reg);
+       }
+
+       return ret;
+}
+
+static void __init _of_ti_gate_clk_setup(struct device_node *node,
+                                        const struct clk_ops *ops,
+                                        const struct clk_hw_omap_ops *hw_ops)
+{
+       struct clk *clk;
+       struct clk_init_data init = { NULL };
+       struct clk_hw_omap *clk_hw;
+       const char *clk_name = node->name;
+       const char *parent_name;
+       u32 val;
+
+       clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
+       if (!clk_hw)
+               return;
+
+       clk_hw->hw.init = &init;
+
+       init.name = clk_name;
+       init.ops = ops;
+
+       if (ops != &omap_gate_clkdm_clk_ops) {
+               clk_hw->enable_reg = ti_clk_get_reg_addr(node, 0);
+               if (!clk_hw->enable_reg)
+                       goto cleanup;
+
+               if (!of_property_read_u32(node, "ti,bit-shift", &val))
+                       clk_hw->enable_bit = val;
+       }
+
+       clk_hw->ops = hw_ops;
+
+       clk_hw->flags = MEMMAP_ADDRESSING;
+
+       if (of_clk_get_parent_count(node) != 1) {
+               pr_err("%s must have 1 parent\n", clk_name);
+               goto cleanup;
+       }
+
+       parent_name = of_clk_get_parent_name(node, 0);
+       init.parent_names = &parent_name;
+       init.num_parents = 1;
+
+       if (of_property_read_bool(node, "ti,set-rate-parent"))
+               init.flags |= CLK_SET_RATE_PARENT;
+
+       if (of_property_read_bool(node, "ti,set-bit-to-disable"))
+               clk_hw->flags |= INVERT_ENABLE;
+
+       clk = clk_register(NULL, &clk_hw->hw);
+
+       if (!IS_ERR(clk)) {
+               of_clk_add_provider(node, of_clk_src_simple_get, clk);
+               return;
+       }
+
+cleanup:
+       kfree(clk_hw);
+}
+
+static void __init
+_of_ti_composite_gate_clk_setup(struct device_node *node,
+                               const struct clk_hw_omap_ops *hw_ops)
+{
+       struct clk_hw_omap *gate;
+       u32 val = 0;
+
+       gate = kzalloc(sizeof(*gate), GFP_KERNEL);
+       if (!gate)
+               return;
+
+       gate->enable_reg = ti_clk_get_reg_addr(node, 0);
+       if (!gate->enable_reg)
+               goto cleanup;
+
+       of_property_read_u32(node, "ti,bit-shift", &val);
+
+       gate->enable_bit = val;
+       gate->ops = hw_ops;
+       gate->flags = MEMMAP_ADDRESSING;
+
+       if (!ti_clk_add_component(node, &gate->hw, CLK_COMPONENT_TYPE_GATE))
+               return;
+
+cleanup:
+       kfree(gate);
+}
+
+static void __init
+of_ti_composite_no_wait_gate_clk_setup(struct device_node *node)
+{
+       _of_ti_composite_gate_clk_setup(node, NULL);
+}
+CLK_OF_DECLARE(ti_composite_no_wait_gate_clk, "ti,composite-no-wait-gate-clock",
+              of_ti_composite_no_wait_gate_clk_setup);
+
+#ifdef CONFIG_ARCH_OMAP3
+static void __init of_ti_composite_interface_clk_setup(struct device_node *node)
+{
+       _of_ti_composite_gate_clk_setup(node, &clkhwops_iclk_wait);
+}
+CLK_OF_DECLARE(ti_composite_interface_clk, "ti,composite-interface-clock",
+              of_ti_composite_interface_clk_setup);
+#endif
+
+static void __init of_ti_composite_gate_clk_setup(struct device_node *node)
+{
+       _of_ti_composite_gate_clk_setup(node, &clkhwops_wait);
+}
+CLK_OF_DECLARE(ti_composite_gate_clk, "ti,composite-gate-clock",
+              of_ti_composite_gate_clk_setup);
+
+
+static void __init of_ti_clkdm_gate_clk_setup(struct device_node *node)
+{
+       _of_ti_gate_clk_setup(node, &omap_gate_clkdm_clk_ops, NULL);
+}
+CLK_OF_DECLARE(ti_clkdm_gate_clk, "ti,clkdm-gate-clock",
+              of_ti_clkdm_gate_clk_setup);
+
+static void __init of_ti_hsdiv_gate_clk_setup(struct device_node *node)
+{
+       _of_ti_gate_clk_setup(node, &omap_gate_clk_hsdiv_restore_ops,
+                             &clkhwops_wait);
+}
+CLK_OF_DECLARE(ti_hsdiv_gate_clk, "ti,hsdiv-gate-clock",
+              of_ti_hsdiv_gate_clk_setup);
+
+static void __init of_ti_gate_clk_setup(struct device_node *node)
+{
+       _of_ti_gate_clk_setup(node, &omap_gate_clk_ops, NULL);
+}
+CLK_OF_DECLARE(ti_gate_clk, "ti,gate-clock", of_ti_gate_clk_setup)
+
+static void __init of_ti_wait_gate_clk_setup(struct device_node *node)
+{
+       _of_ti_gate_clk_setup(node, &omap_gate_clk_ops, &clkhwops_wait);
+}
+CLK_OF_DECLARE(ti_wait_gate_clk, "ti,wait-gate-clock",
+              of_ti_wait_gate_clk_setup);
+
+#ifdef CONFIG_ARCH_OMAP3
+static void __init of_ti_am35xx_gate_clk_setup(struct device_node *node)
+{
+       _of_ti_gate_clk_setup(node, &omap_gate_clk_ops,
+                             &clkhwops_am35xx_ipss_module_wait);
+}
+CLK_OF_DECLARE(ti_am35xx_gate_clk, "ti,am35xx-gate-clock",
+              of_ti_am35xx_gate_clk_setup);
+
+static void __init of_ti_dss_gate_clk_setup(struct device_node *node)
+{
+       _of_ti_gate_clk_setup(node, &omap_gate_clk_ops,
+                             &clkhwops_omap3430es2_dss_usbhost_wait);
+}
+CLK_OF_DECLARE(ti_dss_gate_clk, "ti,dss-gate-clock",
+              of_ti_dss_gate_clk_setup);
+#endif
diff --git a/drivers/clk/ti/interface.c b/drivers/clk/ti/interface.c
new file mode 100644 (file)
index 0000000..320a2b1
--- /dev/null
@@ -0,0 +1,125 @@
+/*
+ * OMAP interface clock support
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * Tero Kristo <t-kristo@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/clk/ti.h>
+
+#undef pr_fmt
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+static const struct clk_ops ti_interface_clk_ops = {
+       .init           = &omap2_init_clk_clkdm,
+       .enable         = &omap2_dflt_clk_enable,
+       .disable        = &omap2_dflt_clk_disable,
+       .is_enabled     = &omap2_dflt_clk_is_enabled,
+};
+
+static void __init _of_ti_interface_clk_setup(struct device_node *node,
+                                             const struct clk_hw_omap_ops *ops)
+{
+       struct clk *clk;
+       struct clk_init_data init = { NULL };
+       struct clk_hw_omap *clk_hw;
+       const char *parent_name;
+       u32 val;
+
+       clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
+       if (!clk_hw)
+               return;
+
+       clk_hw->hw.init = &init;
+       clk_hw->ops = ops;
+       clk_hw->flags = MEMMAP_ADDRESSING;
+
+       clk_hw->enable_reg = ti_clk_get_reg_addr(node, 0);
+       if (!clk_hw->enable_reg)
+               goto cleanup;
+
+       if (!of_property_read_u32(node, "ti,bit-shift", &val))
+               clk_hw->enable_bit = val;
+
+       init.name = node->name;
+       init.ops = &ti_interface_clk_ops;
+       init.flags = 0;
+
+       parent_name = of_clk_get_parent_name(node, 0);
+       if (!parent_name) {
+               pr_err("%s must have a parent\n", node->name);
+               goto cleanup;
+       }
+
+       init.num_parents = 1;
+       init.parent_names = &parent_name;
+
+       clk = clk_register(NULL, &clk_hw->hw);
+
+       if (!IS_ERR(clk)) {
+               of_clk_add_provider(node, of_clk_src_simple_get, clk);
+               omap2_init_clk_hw_omap_clocks(clk);
+               return;
+       }
+
+cleanup:
+       kfree(clk_hw);
+}
+
+static void __init of_ti_interface_clk_setup(struct device_node *node)
+{
+       _of_ti_interface_clk_setup(node, &clkhwops_iclk_wait);
+}
+CLK_OF_DECLARE(ti_interface_clk, "ti,omap3-interface-clock",
+              of_ti_interface_clk_setup);
+
+static void __init of_ti_no_wait_interface_clk_setup(struct device_node *node)
+{
+       _of_ti_interface_clk_setup(node, &clkhwops_iclk);
+}
+CLK_OF_DECLARE(ti_no_wait_interface_clk, "ti,omap3-no-wait-interface-clock",
+              of_ti_no_wait_interface_clk_setup);
+
+static void __init of_ti_hsotgusb_interface_clk_setup(struct device_node *node)
+{
+       _of_ti_interface_clk_setup(node,
+                                  &clkhwops_omap3430es2_iclk_hsotgusb_wait);
+}
+CLK_OF_DECLARE(ti_hsotgusb_interface_clk, "ti,omap3-hsotgusb-interface-clock",
+              of_ti_hsotgusb_interface_clk_setup);
+
+static void __init of_ti_dss_interface_clk_setup(struct device_node *node)
+{
+       _of_ti_interface_clk_setup(node,
+                                  &clkhwops_omap3430es2_iclk_dss_usbhost_wait);
+}
+CLK_OF_DECLARE(ti_dss_interface_clk, "ti,omap3-dss-interface-clock",
+              of_ti_dss_interface_clk_setup);
+
+static void __init of_ti_ssi_interface_clk_setup(struct device_node *node)
+{
+       _of_ti_interface_clk_setup(node, &clkhwops_omap3430es2_iclk_ssi_wait);
+}
+CLK_OF_DECLARE(ti_ssi_interface_clk, "ti,omap3-ssi-interface-clock",
+              of_ti_ssi_interface_clk_setup);
+
+static void __init of_ti_am35xx_interface_clk_setup(struct device_node *node)
+{
+       _of_ti_interface_clk_setup(node, &clkhwops_am35xx_ipss_wait);
+}
+CLK_OF_DECLARE(ti_am35xx_interface_clk, "ti,am35xx-interface-clock",
+              of_ti_am35xx_interface_clk_setup);
diff --git a/drivers/clk/ti/mux.c b/drivers/clk/ti/mux.c
new file mode 100644 (file)
index 0000000..0197a47
--- /dev/null
@@ -0,0 +1,246 @@
+/*
+ * TI Multiplexer Clock
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * Tero Kristo <t-kristo@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/clk/ti.h>
+
+#undef pr_fmt
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#define to_clk_mux(_hw) container_of(_hw, struct clk_mux, hw)
+
+static u8 ti_clk_mux_get_parent(struct clk_hw *hw)
+{
+       struct clk_mux *mux = to_clk_mux(hw);
+       int num_parents = __clk_get_num_parents(hw->clk);
+       u32 val;
+
+       /*
+        * FIXME need a mux-specific flag to determine if val is bitwise or
+        * numeric. e.g. sys_clkin_ck's clksel field is 3 bits wide, but ranges
+        * from 0x1 to 0x7 (index starts at one)
+        * OTOH, pmd_trace_clk_mux_ck uses a separate bit for each clock, so
+        * val = 0x4 really means "bit 2, index starts at bit 0"
+        */
+       val = ti_clk_ll_ops->clk_readl(mux->reg) >> mux->shift;
+       val &= mux->mask;
+
+       if (mux->table) {
+               int i;
+
+               for (i = 0; i < num_parents; i++)
+                       if (mux->table[i] == val)
+                               return i;
+               return -EINVAL;
+       }
+
+       if (val && (mux->flags & CLK_MUX_INDEX_BIT))
+               val = ffs(val) - 1;
+
+       if (val && (mux->flags & CLK_MUX_INDEX_ONE))
+               val--;
+
+       if (val >= num_parents)
+               return -EINVAL;
+
+       return val;
+}
+
+static int ti_clk_mux_set_parent(struct clk_hw *hw, u8 index)
+{
+       struct clk_mux *mux = to_clk_mux(hw);
+       u32 val;
+       unsigned long flags = 0;
+
+       if (mux->table) {
+               index = mux->table[index];
+       } else {
+               if (mux->flags & CLK_MUX_INDEX_BIT)
+                       index = (1 << ffs(index));
+
+               if (mux->flags & CLK_MUX_INDEX_ONE)
+                       index++;
+       }
+
+       if (mux->lock)
+               spin_lock_irqsave(mux->lock, flags);
+
+       if (mux->flags & CLK_MUX_HIWORD_MASK) {
+               val = mux->mask << (mux->shift + 16);
+       } else {
+               val = ti_clk_ll_ops->clk_readl(mux->reg);
+               val &= ~(mux->mask << mux->shift);
+       }
+       val |= index << mux->shift;
+       ti_clk_ll_ops->clk_writel(val, mux->reg);
+
+       if (mux->lock)
+               spin_unlock_irqrestore(mux->lock, flags);
+
+       return 0;
+}
+
+const struct clk_ops ti_clk_mux_ops = {
+       .get_parent = ti_clk_mux_get_parent,
+       .set_parent = ti_clk_mux_set_parent,
+       .determine_rate = __clk_mux_determine_rate,
+};
+
+static struct clk *_register_mux(struct device *dev, const char *name,
+                                const char **parent_names, u8 num_parents,
+                                unsigned long flags, void __iomem *reg,
+                                u8 shift, u32 mask, u8 clk_mux_flags,
+                                u32 *table, spinlock_t *lock)
+{
+       struct clk_mux *mux;
+       struct clk *clk;
+       struct clk_init_data init;
+
+       /* allocate the mux */
+       mux = kzalloc(sizeof(*mux), GFP_KERNEL);
+       if (!mux) {
+               pr_err("%s: could not allocate mux clk\n", __func__);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       init.name = name;
+       init.ops = &ti_clk_mux_ops;
+       init.flags = flags | CLK_IS_BASIC;
+       init.parent_names = parent_names;
+       init.num_parents = num_parents;
+
+       /* struct clk_mux assignments */
+       mux->reg = reg;
+       mux->shift = shift;
+       mux->mask = mask;
+       mux->flags = clk_mux_flags;
+       mux->lock = lock;
+       mux->table = table;
+       mux->hw.init = &init;
+
+       clk = clk_register(dev, &mux->hw);
+
+       if (IS_ERR(clk))
+               kfree(mux);
+
+       return clk;
+}
+
+/**
+ * of_mux_clk_setup - Setup function for simple mux rate clock
+ * @node: DT node for the clock
+ *
+ * Sets up a basic clock multiplexer.
+ */
+static void of_mux_clk_setup(struct device_node *node)
+{
+       struct clk *clk;
+       void __iomem *reg;
+       int num_parents;
+       const char **parent_names;
+       int i;
+       u8 clk_mux_flags = 0;
+       u32 mask = 0;
+       u32 shift = 0;
+       u32 flags = 0;
+
+       num_parents = of_clk_get_parent_count(node);
+       if (num_parents < 2) {
+               pr_err("mux-clock %s must have parents\n", node->name);
+               return;
+       }
+       parent_names = kzalloc((sizeof(char *) * num_parents), GFP_KERNEL);
+       if (!parent_names)
+               goto cleanup;
+
+       for (i = 0; i < num_parents; i++)
+               parent_names[i] = of_clk_get_parent_name(node, i);
+
+       reg = ti_clk_get_reg_addr(node, 0);
+
+       if (!reg)
+               goto cleanup;
+
+       of_property_read_u32(node, "ti,bit-shift", &shift);
+
+       if (of_property_read_bool(node, "ti,index-starts-at-one"))
+               clk_mux_flags |= CLK_MUX_INDEX_ONE;
+
+       if (of_property_read_bool(node, "ti,set-rate-parent"))
+               flags |= CLK_SET_RATE_PARENT;
+
+       /* Generate bit-mask based on parent info */
+       mask = num_parents;
+       if (!(clk_mux_flags & CLK_MUX_INDEX_ONE))
+               mask--;
+
+       mask = (1 << fls(mask)) - 1;
+
+       clk = _register_mux(NULL, node->name, parent_names, num_parents, flags,
+                           reg, shift, mask, clk_mux_flags, NULL, NULL);
+
+       if (!IS_ERR(clk))
+               of_clk_add_provider(node, of_clk_src_simple_get, clk);
+
+cleanup:
+       kfree(parent_names);
+}
+CLK_OF_DECLARE(mux_clk, "ti,mux-clock", of_mux_clk_setup);
+
+static void __init of_ti_composite_mux_clk_setup(struct device_node *node)
+{
+       struct clk_mux *mux;
+       int num_parents;
+       u32 val;
+
+       mux = kzalloc(sizeof(*mux), GFP_KERNEL);
+       if (!mux)
+               return;
+
+       mux->reg = ti_clk_get_reg_addr(node, 0);
+
+       if (!mux->reg)
+               goto cleanup;
+
+       if (!of_property_read_u32(node, "ti,bit-shift", &val))
+               mux->shift = val;
+
+       if (of_property_read_bool(node, "ti,index-starts-at-one"))
+               mux->flags |= CLK_MUX_INDEX_ONE;
+
+       num_parents = of_clk_get_parent_count(node);
+
+       if (num_parents < 2) {
+               pr_err("%s must have parents\n", node->name);
+               goto cleanup;
+       }
+
+       mux->mask = num_parents - 1;
+       mux->mask = (1 << fls(mux->mask)) - 1;
+
+       if (!ti_clk_add_component(node, &mux->hw, CLK_COMPONENT_TYPE_MUX))
+               return;
+
+cleanup:
+       kfree(mux);
+}
+CLK_OF_DECLARE(ti_composite_mux_clk_setup, "ti,composite-mux-clock",
+              of_ti_composite_mux_clk_setup);
index c10eb89a3c1bdd388da699f3ac4f8e00c92cc63b..9bed1a2a67a12e44cde304995b6895e3f8296c2a 100644 (file)
@@ -306,6 +306,12 @@ config DMA_OMAP
        select DMA_ENGINE
        select DMA_VIRTUAL_CHANNELS
 
+config DMA_BCM2835
+       tristate "BCM2835 DMA engine support"
+       depends on (ARCH_BCM2835 || MACH_BCM2708)
+       select DMA_ENGINE
+       select DMA_VIRTUAL_CHANNELS
+
 config TI_CPPI41
        tristate "AM33xx CPPI41 DMA support"
        depends on ARCH_OMAP
@@ -336,6 +342,14 @@ config K3_DMA
          Support the DMA engine for Hisilicon K3 platform
          devices.
 
+config MOXART_DMA
+       tristate "MOXART DMA support"
+       depends on ARCH_MOXART
+       select DMA_ENGINE
+       select DMA_VIRTUAL_CHANNELS
+       help
+         Enable support for the MOXA ART SoC DMA controller.
+
 config DMA_ENGINE
        bool
 
index 0ce2da97e42972b82a61197a4fe4bb52c35ceac4..a029d0f4a1be8088c00c459373f469580925d981 100644 (file)
@@ -38,7 +38,9 @@ obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
 obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o
 obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o
 obj-$(CONFIG_DMA_OMAP) += omap-dma.o
+obj-$(CONFIG_DMA_BCM2835) += bcm2835-dma.o
 obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
 obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
 obj-$(CONFIG_TI_CPPI41) += cppi41.o
 obj-$(CONFIG_K3_DMA) += k3dma.o
+obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
index e69b03c0fa50cfae7ca6528f5eef0d9eaf1d8d53..1e506afa33f5e5a9b95753ce0e98ceb2f4ce7e66 100644 (file)
@@ -30,11 +30,12 @@ static DEFINE_MUTEX(acpi_dma_lock);
  * @adev:      ACPI device to match with
  * @adma:      struct acpi_dma of the given DMA controller
  *
- * Returns 1 on success, 0 when no information is available, or appropriate
- * errno value on error.
- *
  * In order to match a device from DSDT table to the corresponding CSRT device
  * we use MMIO address and IRQ.
+ *
+ * Return:
+ * 1 on success, 0 when no information is available, or appropriate errno value
+ * on error.
  */
 static int acpi_dma_parse_resource_group(const struct acpi_csrt_group *grp,
                struct acpi_device *adev, struct acpi_dma *adma)
@@ -101,7 +102,6 @@ static int acpi_dma_parse_resource_group(const struct acpi_csrt_group *grp,
  *
  * We are using this table to get the request line range of the specific DMA
  * controller to be used later.
- *
  */
 static void acpi_dma_parse_csrt(struct acpi_device *adev, struct acpi_dma *adma)
 {
@@ -141,10 +141,11 @@ static void acpi_dma_parse_csrt(struct acpi_device *adev, struct acpi_dma *adma)
  * @data               pointer to controller specific data to be used by
  *                     translation function
  *
- * Returns 0 on success or appropriate errno value on error.
- *
  * Allocated memory should be freed with appropriate acpi_dma_controller_free()
  * call.
+ *
+ * Return:
+ * 0 on success or appropriate errno value on error.
  */
 int acpi_dma_controller_register(struct device *dev,
                struct dma_chan *(*acpi_dma_xlate)
@@ -188,6 +189,9 @@ EXPORT_SYMBOL_GPL(acpi_dma_controller_register);
  * @dev:       struct device of DMA controller
  *
  * Memory allocated by acpi_dma_controller_register() is freed here.
+ *
+ * Return:
+ * 0 on success or appropriate errno value on error.
  */
 int acpi_dma_controller_free(struct device *dev)
 {
@@ -225,6 +229,9 @@ static void devm_acpi_dma_release(struct device *dev, void *res)
  * Managed acpi_dma_controller_register(). DMA controller registered by this
  * function are automatically freed on driver detach. See
  * acpi_dma_controller_register() for more information.
+ *
+ * Return:
+ * 0 on success or appropriate errno value on error.
  */
 int devm_acpi_dma_controller_register(struct device *dev,
                struct dma_chan *(*acpi_dma_xlate)
@@ -267,8 +274,6 @@ EXPORT_SYMBOL_GPL(devm_acpi_dma_controller_free);
  * @adma:      struct acpi_dma of DMA controller
  * @dma_spec:  dma specifier to update
  *
- * Returns 0, if no information is avaiable, -1 on mismatch, and 1 otherwise.
- *
  * Accordingly to ACPI 5.0 Specification Table 6-170 "Fixed DMA Resource
  * Descriptor":
  *     DMA Request Line bits is a platform-relative number uniquely
@@ -276,6 +281,9 @@ EXPORT_SYMBOL_GPL(devm_acpi_dma_controller_free);
  *     mapping is done in a controller-specific OS driver.
  * That's why we can safely adjust slave_id when the appropriate controller is
  * found.
+ *
+ * Return:
+ * 0, if no information is avaiable, -1 on mismatch, and 1 otherwise.
  */
 static int acpi_dma_update_dma_spec(struct acpi_dma *adma,
                struct acpi_dma_spec *dma_spec)
@@ -334,7 +342,8 @@ static int acpi_dma_parse_fixed_dma(struct acpi_resource *res, void *data)
  * @dev:       struct device to get DMA request from
  * @index:     index of FixedDMA descriptor for @dev
  *
- * Returns pointer to appropriate dma channel on success or NULL on error.
+ * Return:
+ * Pointer to appropriate dma channel on success or NULL on error.
  */
 struct dma_chan *acpi_dma_request_slave_chan_by_index(struct device *dev,
                size_t index)
@@ -403,7 +412,8 @@ EXPORT_SYMBOL_GPL(acpi_dma_request_slave_chan_by_index);
  * translate the names "tx" and "rx" here based on the most common case where
  * the first FixedDMA descriptor is TX and second is RX.
  *
- * Returns pointer to appropriate dma channel on success or NULL on error.
+ * Return:
+ * Pointer to appropriate dma channel on success or NULL on error.
  */
 struct dma_chan *acpi_dma_request_slave_chan_by_name(struct device *dev,
                const char *name)
@@ -427,8 +437,10 @@ EXPORT_SYMBOL_GPL(acpi_dma_request_slave_chan_by_name);
  * @adma: pointer to ACPI DMA controller data
  *
  * A simple translation function for ACPI based devices. Passes &struct
- * dma_spec to the DMA controller driver provided filter function. Returns
- * pointer to the channel if found or %NULL otherwise.
+ * dma_spec to the DMA controller driver provided filter function.
+ *
+ * Return:
+ * Pointer to the channel if found or %NULL otherwise.
  */
 struct dma_chan *acpi_dma_simple_xlate(struct acpi_dma_spec *dma_spec,
                struct acpi_dma *adma)
index ec4ee5c1fe9dc2115e029d0c472bd32f48cb281c..8114731a1c62d6450cd5f8bbd490f87c7800b1ea 100644 (file)
@@ -83,6 +83,7 @@
 #include <linux/dmaengine.h>
 #include <linux/dmapool.h>
 #include <linux/dma-mapping.h>
+#include <linux/export.h>
 #include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/module.h>
@@ -1771,6 +1772,7 @@ bool pl08x_filter_id(struct dma_chan *chan, void *chan_id)
 
        return false;
 }
+EXPORT_SYMBOL_GPL(pl08x_filter_id);
 
 /*
  * Just check that the device is there and active
@@ -2167,7 +2169,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
        /* Register slave channels */
        ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave,
                        pl08x->pd->num_slave_channels, true);
-       if (ret <= 0) {
+       if (ret < 0) {
                dev_warn(&pl08x->adev->dev,
                        "%s failed to enumerate slave channels - %d\n",
                                __func__, ret);
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
new file mode 100644 (file)
index 0000000..a036021
--- /dev/null
@@ -0,0 +1,707 @@
+/*
+ * BCM2835 DMA engine support
+ *
+ * This driver only supports cyclic DMA transfers
+ * as needed for the I2S module.
+ *
+ * Author:      Florian Meier <florian.meier@koalo.de>
+ *              Copyright 2013
+ *
+ * Based on
+ *     OMAP DMAengine support by Russell King
+ *
+ *     BCM2708 DMA Driver
+ *     Copyright (C) 2010 Broadcom
+ *
+ *     Raspberry Pi PCM I2S ALSA Driver
+ *     Copyright (c) by Phil Poole 2013
+ *
+ *     MARVELL MMP Peripheral DMA Driver
+ *     Copyright 2012 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
+
+#include "virt-dma.h"
+
+struct bcm2835_dmadev {
+       struct dma_device ddev;
+       spinlock_t lock;
+       void __iomem *base;
+       struct device_dma_parameters dma_parms;
+};
+
+struct bcm2835_dma_cb {
+       uint32_t info;
+       uint32_t src;
+       uint32_t dst;
+       uint32_t length;
+       uint32_t stride;
+       uint32_t next;
+       uint32_t pad[2];
+};
+
+struct bcm2835_chan {
+       struct virt_dma_chan vc;
+       struct list_head node;
+
+       struct dma_slave_config cfg;
+       bool cyclic;
+       unsigned int dreq;
+
+       int ch;
+       struct bcm2835_desc *desc;
+
+       void __iomem *chan_base;
+       int irq_number;
+};
+
+struct bcm2835_desc {
+       struct virt_dma_desc vd;
+       enum dma_transfer_direction dir;
+
+       unsigned int control_block_size;
+       struct bcm2835_dma_cb *control_block_base;
+       dma_addr_t control_block_base_phys;
+
+       unsigned int frames;
+       size_t size;
+};
+
+#define BCM2835_DMA_CS         0x00
+#define BCM2835_DMA_ADDR       0x04
+#define BCM2835_DMA_SOURCE_AD  0x0c
+#define BCM2835_DMA_DEST_AD    0x10
+#define BCM2835_DMA_NEXTCB     0x1C
+
+/* DMA CS Control and Status bits */
+#define BCM2835_DMA_ACTIVE     BIT(0)
+#define BCM2835_DMA_INT        BIT(2)
+#define BCM2835_DMA_ISPAUSED   BIT(4)  /* Pause requested or not active */
+#define BCM2835_DMA_ISHELD     BIT(5)  /* Is held by DREQ flow control */
+#define BCM2835_DMA_ERR        BIT(8)
+#define BCM2835_DMA_ABORT      BIT(30) /* Stop current CB, go to next, WO */
+#define BCM2835_DMA_RESET      BIT(31) /* WO, self clearing */
+
+#define BCM2835_DMA_INT_EN     BIT(0)
+#define BCM2835_DMA_D_INC      BIT(4)
+#define BCM2835_DMA_D_DREQ     BIT(6)
+#define BCM2835_DMA_S_INC      BIT(8)
+#define BCM2835_DMA_S_DREQ     BIT(10)
+
+#define BCM2835_DMA_PER_MAP(x) ((x) << 16)
+
+#define BCM2835_DMA_DATA_TYPE_S8       1
+#define BCM2835_DMA_DATA_TYPE_S16      2
+#define BCM2835_DMA_DATA_TYPE_S32      4
+#define BCM2835_DMA_DATA_TYPE_S128     16
+
+#define BCM2835_DMA_BULK_MASK  BIT(0)
+#define BCM2835_DMA_FIQ_MASK   (BIT(2) | BIT(3))
+
+/* Valid only for channels 0 - 14, 15 has its own base address */
+#define BCM2835_DMA_CHAN(n)    ((n) << 8) /* Base address */
+#define BCM2835_DMA_CHANIO(base, n) ((base) + BCM2835_DMA_CHAN(n))
+
+static inline struct bcm2835_dmadev *to_bcm2835_dma_dev(struct dma_device *d)
+{
+       return container_of(d, struct bcm2835_dmadev, ddev);
+}
+
+static inline struct bcm2835_chan *to_bcm2835_dma_chan(struct dma_chan *c)
+{
+       return container_of(c, struct bcm2835_chan, vc.chan);
+}
+
+static inline struct bcm2835_desc *to_bcm2835_dma_desc(
+               struct dma_async_tx_descriptor *t)
+{
+       return container_of(t, struct bcm2835_desc, vd.tx);
+}
+
+static void bcm2835_dma_desc_free(struct virt_dma_desc *vd)
+{
+       struct bcm2835_desc *desc = container_of(vd, struct bcm2835_desc, vd);
+       dma_free_coherent(desc->vd.tx.chan->device->dev,
+                       desc->control_block_size,
+                       desc->control_block_base,
+                       desc->control_block_base_phys);
+       kfree(desc);
+}
+
+static int bcm2835_dma_abort(void __iomem *chan_base)
+{
+       unsigned long cs;
+       long int timeout = 10000;
+
+       cs = readl(chan_base + BCM2835_DMA_CS);
+       if (!(cs & BCM2835_DMA_ACTIVE))
+               return 0;
+
+       /* Write 0 to the active bit - Pause the DMA */
+       writel(0, chan_base + BCM2835_DMA_CS);
+
+       /* Wait for any current AXI transfer to complete */
+       while ((cs & BCM2835_DMA_ISPAUSED) && --timeout) {
+               cpu_relax();
+               cs = readl(chan_base + BCM2835_DMA_CS);
+       }
+
+       /* We'll un-pause when we set of our next DMA */
+       if (!timeout)
+               return -ETIMEDOUT;
+
+       if (!(cs & BCM2835_DMA_ACTIVE))
+               return 0;
+
+       /* Terminate the control block chain */
+       writel(0, chan_base + BCM2835_DMA_NEXTCB);
+
+       /* Abort the whole DMA */
+       writel(BCM2835_DMA_ABORT | BCM2835_DMA_ACTIVE,
+              chan_base + BCM2835_DMA_CS);
+
+       return 0;
+}
+
+static void bcm2835_dma_start_desc(struct bcm2835_chan *c)
+{
+       struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
+       struct bcm2835_desc *d;
+
+       if (!vd) {
+               c->desc = NULL;
+               return;
+       }
+
+       list_del(&vd->node);
+
+       c->desc = d = to_bcm2835_dma_desc(&vd->tx);
+
+       writel(d->control_block_base_phys, c->chan_base + BCM2835_DMA_ADDR);
+       writel(BCM2835_DMA_ACTIVE, c->chan_base + BCM2835_DMA_CS);
+}
+
+static irqreturn_t bcm2835_dma_callback(int irq, void *data)
+{
+       struct bcm2835_chan *c = data;
+       struct bcm2835_desc *d;
+       unsigned long flags;
+
+       spin_lock_irqsave(&c->vc.lock, flags);
+
+       /* Acknowledge interrupt */
+       writel(BCM2835_DMA_INT, c->chan_base + BCM2835_DMA_CS);
+
+       d = c->desc;
+
+       if (d) {
+               /* TODO Only works for cyclic DMA */
+               vchan_cyclic_callback(&d->vd);
+       }
+
+       /* Keep the DMA engine running */
+       writel(BCM2835_DMA_ACTIVE, c->chan_base + BCM2835_DMA_CS);
+
+       spin_unlock_irqrestore(&c->vc.lock, flags);
+
+       return IRQ_HANDLED;
+}
+
+static int bcm2835_dma_alloc_chan_resources(struct dma_chan *chan)
+{
+       struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
+
+       dev_dbg(c->vc.chan.device->dev,
+                       "Allocating DMA channel %d\n", c->ch);
+
+       return request_irq(c->irq_number,
+                       bcm2835_dma_callback, 0, "DMA IRQ", c);
+}
+
+static void bcm2835_dma_free_chan_resources(struct dma_chan *chan)
+{
+       struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
+
+       vchan_free_chan_resources(&c->vc);
+       free_irq(c->irq_number, c);
+
+       dev_dbg(c->vc.chan.device->dev, "Freeing DMA channel %u\n", c->ch);
+}
+
+static size_t bcm2835_dma_desc_size(struct bcm2835_desc *d)
+{
+       return d->size;
+}
+
+static size_t bcm2835_dma_desc_size_pos(struct bcm2835_desc *d, dma_addr_t addr)
+{
+       unsigned int i;
+       size_t size;
+
+       for (size = i = 0; i < d->frames; i++) {
+               struct bcm2835_dma_cb *control_block =
+                       &d->control_block_base[i];
+               size_t this_size = control_block->length;
+               dma_addr_t dma;
+
+               if (d->dir == DMA_DEV_TO_MEM)
+                       dma = control_block->dst;
+               else
+                       dma = control_block->src;
+
+               if (size)
+                       size += this_size;
+               else if (addr >= dma && addr < dma + this_size)
+                       size += dma + this_size - addr;
+       }
+
+       return size;
+}
+
+static enum dma_status bcm2835_dma_tx_status(struct dma_chan *chan,
+       dma_cookie_t cookie, struct dma_tx_state *txstate)
+{
+       struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
+       struct virt_dma_desc *vd;
+       enum dma_status ret;
+       unsigned long flags;
+
+       ret = dma_cookie_status(chan, cookie, txstate);
+       if (ret == DMA_COMPLETE || !txstate)
+               return ret;
+
+       spin_lock_irqsave(&c->vc.lock, flags);
+       vd = vchan_find_desc(&c->vc, cookie);
+       if (vd) {
+               txstate->residue =
+                       bcm2835_dma_desc_size(to_bcm2835_dma_desc(&vd->tx));
+       } else if (c->desc && c->desc->vd.tx.cookie == cookie) {
+               struct bcm2835_desc *d = c->desc;
+               dma_addr_t pos;
+
+               if (d->dir == DMA_MEM_TO_DEV)
+                       pos = readl(c->chan_base + BCM2835_DMA_SOURCE_AD);
+               else if (d->dir == DMA_DEV_TO_MEM)
+                       pos = readl(c->chan_base + BCM2835_DMA_DEST_AD);
+               else
+                       pos = 0;
+
+               txstate->residue = bcm2835_dma_desc_size_pos(d, pos);
+       } else {
+               txstate->residue = 0;
+       }
+
+       spin_unlock_irqrestore(&c->vc.lock, flags);
+
+       return ret;
+}
+
+static void bcm2835_dma_issue_pending(struct dma_chan *chan)
+{
+       struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
+       unsigned long flags;
+
+       c->cyclic = true; /* Nothing else is implemented */
+
+       spin_lock_irqsave(&c->vc.lock, flags);
+       if (vchan_issue_pending(&c->vc) && !c->desc)
+               bcm2835_dma_start_desc(c);
+
+       spin_unlock_irqrestore(&c->vc.lock, flags);
+}
+
+static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic(
+       struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
+       size_t period_len, enum dma_transfer_direction direction,
+       unsigned long flags, void *context)
+{
+       struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
+       enum dma_slave_buswidth dev_width;
+       struct bcm2835_desc *d;
+       dma_addr_t dev_addr;
+       unsigned int es, sync_type;
+       unsigned int frame;
+
+       /* Grab configuration */
+       if (!is_slave_direction(direction)) {
+               dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
+               return NULL;
+       }
+
+       if (direction == DMA_DEV_TO_MEM) {
+               dev_addr = c->cfg.src_addr;
+               dev_width = c->cfg.src_addr_width;
+               sync_type = BCM2835_DMA_S_DREQ;
+       } else {
+               dev_addr = c->cfg.dst_addr;
+               dev_width = c->cfg.dst_addr_width;
+               sync_type = BCM2835_DMA_D_DREQ;
+       }
+
+       /* Bus width translates to the element size (ES) */
+       switch (dev_width) {
+       case DMA_SLAVE_BUSWIDTH_4_BYTES:
+               es = BCM2835_DMA_DATA_TYPE_S32;
+               break;
+       default:
+               return NULL;
+       }
+
+       /* Now allocate and setup the descriptor. */
+       d = kzalloc(sizeof(*d), GFP_NOWAIT);
+       if (!d)
+               return NULL;
+
+       d->dir = direction;
+       d->frames = buf_len / period_len;
+
+       /* Allocate memory for control blocks */
+       d->control_block_size = d->frames * sizeof(struct bcm2835_dma_cb);
+       d->control_block_base = dma_zalloc_coherent(chan->device->dev,
+                       d->control_block_size, &d->control_block_base_phys,
+                       GFP_NOWAIT);
+
+       if (!d->control_block_base) {
+               kfree(d);
+               return NULL;
+       }
+
+       /*
+        * Iterate over all frames, create a control block
+        * for each frame and link them together.
+        */
+       for (frame = 0; frame < d->frames; frame++) {
+               struct bcm2835_dma_cb *control_block =
+                       &d->control_block_base[frame];
+
+               /* Setup adresses */
+               if (d->dir == DMA_DEV_TO_MEM) {
+                       control_block->info = BCM2835_DMA_D_INC;
+                       control_block->src = dev_addr;
+                       control_block->dst = buf_addr + frame * period_len;
+               } else {
+                       control_block->info = BCM2835_DMA_S_INC;
+                       control_block->src = buf_addr + frame * period_len;
+                       control_block->dst = dev_addr;
+               }
+
+               /* Enable interrupt */
+               control_block->info |= BCM2835_DMA_INT_EN;
+
+               /* Setup synchronization */
+               if (sync_type != 0)
+                       control_block->info |= sync_type;
+
+               /* Setup DREQ channel */
+               if (c->dreq != 0)
+                       control_block->info |=
+                               BCM2835_DMA_PER_MAP(c->dreq);
+
+               /* Length of a frame */
+               control_block->length = period_len;
+               d->size += control_block->length;
+
+               /*
+                * Next block is the next frame.
+                * This DMA engine driver currently only supports cyclic DMA.
+                * Therefore, wrap around at number of frames.
+                */
+               control_block->next = d->control_block_base_phys +
+                       sizeof(struct bcm2835_dma_cb)
+                       * ((frame + 1) % d->frames);
+       }
+
+       return vchan_tx_prep(&c->vc, &d->vd, flags);
+}
+
+static int bcm2835_dma_slave_config(struct bcm2835_chan *c,
+               struct dma_slave_config *cfg)
+{
+       if ((cfg->direction == DMA_DEV_TO_MEM &&
+            cfg->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) ||
+           (cfg->direction == DMA_MEM_TO_DEV &&
+            cfg->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) ||
+           !is_slave_direction(cfg->direction)) {
+               return -EINVAL;
+       }
+
+       c->cfg = *cfg;
+
+       return 0;
+}
+
+static int bcm2835_dma_terminate_all(struct bcm2835_chan *c)
+{
+       struct bcm2835_dmadev *d = to_bcm2835_dma_dev(c->vc.chan.device);
+       unsigned long flags;
+       int timeout = 10000;
+       LIST_HEAD(head);
+
+       spin_lock_irqsave(&c->vc.lock, flags);
+
+       /* Prevent this channel being scheduled */
+       spin_lock(&d->lock);
+       list_del_init(&c->node);
+       spin_unlock(&d->lock);
+
+       /*
+        * Stop DMA activity: we assume the callback will not be called
+        * after bcm_dma_abort() returns (even if it does, it will see
+        * c->desc is NULL and exit.)
+        */
+       if (c->desc) {
+               c->desc = NULL;
+               bcm2835_dma_abort(c->chan_base);
+
+               /* Wait for stopping */
+               while (--timeout) {
+                       if (!(readl(c->chan_base + BCM2835_DMA_CS) &
+                                               BCM2835_DMA_ACTIVE))
+                               break;
+
+                       cpu_relax();
+               }
+
+               if (!timeout)
+                       dev_err(d->ddev.dev, "DMA transfer could not be terminated\n");
+       }
+
+       vchan_get_all_descriptors(&c->vc, &head);
+       spin_unlock_irqrestore(&c->vc.lock, flags);
+       vchan_dma_desc_free_list(&c->vc, &head);
+
+       return 0;
+}
+
+static int bcm2835_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+       unsigned long arg)
+{
+       struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
+
+       switch (cmd) {
+       case DMA_SLAVE_CONFIG:
+               return bcm2835_dma_slave_config(c,
+                               (struct dma_slave_config *)arg);
+
+       case DMA_TERMINATE_ALL:
+               return bcm2835_dma_terminate_all(c);
+
+       default:
+               return -ENXIO;
+       }
+}
+
+static int bcm2835_dma_chan_init(struct bcm2835_dmadev *d, int chan_id, int irq)
+{
+       struct bcm2835_chan *c;
+
+       c = devm_kzalloc(d->ddev.dev, sizeof(*c), GFP_KERNEL);
+       if (!c)
+               return -ENOMEM;
+
+       c->vc.desc_free = bcm2835_dma_desc_free;
+       vchan_init(&c->vc, &d->ddev);
+       INIT_LIST_HEAD(&c->node);
+
+       d->ddev.chancnt++;
+
+       c->chan_base = BCM2835_DMA_CHANIO(d->base, chan_id);
+       c->ch = chan_id;
+       c->irq_number = irq;
+
+       return 0;
+}
+
+static void bcm2835_dma_free(struct bcm2835_dmadev *od)
+{
+       struct bcm2835_chan *c, *next;
+
+       list_for_each_entry_safe(c, next, &od->ddev.channels,
+                                vc.chan.device_node) {
+               list_del(&c->vc.chan.device_node);
+               tasklet_kill(&c->vc.task);
+       }
+}
+
+static const struct of_device_id bcm2835_dma_of_match[] = {
+       { .compatible = "brcm,bcm2835-dma", },
+       {},
+};
+MODULE_DEVICE_TABLE(of, bcm2835_dma_of_match);
+
+static struct dma_chan *bcm2835_dma_xlate(struct of_phandle_args *spec,
+                                          struct of_dma *ofdma)
+{
+       struct bcm2835_dmadev *d = ofdma->of_dma_data;
+       struct dma_chan *chan;
+
+       chan = dma_get_any_slave_channel(&d->ddev);
+       if (!chan)
+               return NULL;
+
+       /* Set DREQ from param */
+       to_bcm2835_dma_chan(chan)->dreq = spec->args[0];
+
+       return chan;
+}
+
+static int bcm2835_dma_device_slave_caps(struct dma_chan *dchan,
+       struct dma_slave_caps *caps)
+{
+       caps->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+       caps->dstn_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+       caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+       caps->cmd_pause = false;
+       caps->cmd_terminate = true;
+
+       return 0;
+}
+
+static int bcm2835_dma_probe(struct platform_device *pdev)
+{
+       struct bcm2835_dmadev *od;
+       struct resource *res;
+       void __iomem *base;
+       int rc;
+       int i;
+       int irq;
+       uint32_t chans_available;
+
+       if (!pdev->dev.dma_mask)
+               pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+
+       rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+       if (rc)
+               return rc;
+
+       od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
+       if (!od)
+               return -ENOMEM;
+
+       pdev->dev.dma_parms = &od->dma_parms;
+       dma_set_max_seg_size(&pdev->dev, 0x3FFFFFFF);
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       base = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(base))
+               return PTR_ERR(base);
+
+       od->base = base;
+
+       dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
+       dma_cap_set(DMA_PRIVATE, od->ddev.cap_mask);
+       dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask);
+       od->ddev.device_alloc_chan_resources = bcm2835_dma_alloc_chan_resources;
+       od->ddev.device_free_chan_resources = bcm2835_dma_free_chan_resources;
+       od->ddev.device_tx_status = bcm2835_dma_tx_status;
+       od->ddev.device_issue_pending = bcm2835_dma_issue_pending;
+       od->ddev.device_slave_caps = bcm2835_dma_device_slave_caps;
+       od->ddev.device_prep_dma_cyclic = bcm2835_dma_prep_dma_cyclic;
+       od->ddev.device_control = bcm2835_dma_control;
+       od->ddev.dev = &pdev->dev;
+       INIT_LIST_HEAD(&od->ddev.channels);
+       spin_lock_init(&od->lock);
+
+       platform_set_drvdata(pdev, od);
+
+       /* Request DMA channel mask from device tree */
+       if (of_property_read_u32(pdev->dev.of_node,
+                       "brcm,dma-channel-mask",
+                       &chans_available)) {
+               dev_err(&pdev->dev, "Failed to get channel mask\n");
+               rc = -EINVAL;
+               goto err_no_dma;
+       }
+
+       /*
+        * Do not use the FIQ and BULK channels,
+        * because they are used by the GPU.
+        */
+       chans_available &= ~(BCM2835_DMA_FIQ_MASK | BCM2835_DMA_BULK_MASK);
+
+       for (i = 0; i < pdev->num_resources; i++) {
+               irq = platform_get_irq(pdev, i);
+               if (irq < 0)
+                       break;
+
+               if (chans_available & (1 << i)) {
+                       rc = bcm2835_dma_chan_init(od, i, irq);
+                       if (rc)
+                               goto err_no_dma;
+               }
+       }
+
+       dev_dbg(&pdev->dev, "Initialized %i DMA channels\n", i);
+
+       /* Device-tree DMA controller registration */
+       rc = of_dma_controller_register(pdev->dev.of_node,
+                       bcm2835_dma_xlate, od);
+       if (rc) {
+               dev_err(&pdev->dev, "Failed to register DMA controller\n");
+               goto err_no_dma;
+       }
+
+       rc = dma_async_device_register(&od->ddev);
+       if (rc) {
+               dev_err(&pdev->dev,
+                       "Failed to register slave DMA engine device: %d\n", rc);
+               goto err_no_dma;
+       }
+
+       dev_dbg(&pdev->dev, "Load BCM2835 DMA engine driver\n");
+
+       return 0;
+
+err_no_dma:
+       bcm2835_dma_free(od);
+       return rc;
+}
+
+static int bcm2835_dma_remove(struct platform_device *pdev)
+{
+       struct bcm2835_dmadev *od = platform_get_drvdata(pdev);
+
+       dma_async_device_unregister(&od->ddev);
+       bcm2835_dma_free(od);
+
+       return 0;
+}
+
+static struct platform_driver bcm2835_dma_driver = {
+       .probe  = bcm2835_dma_probe,
+       .remove = bcm2835_dma_remove,
+       .driver = {
+               .name = "bcm2835-dma",
+               .owner = THIS_MODULE,
+               .of_match_table = of_match_ptr(bcm2835_dma_of_match),
+       },
+};
+
+module_platform_driver(bcm2835_dma_driver);
+
+MODULE_ALIAS("platform:bcm2835-dma");
+MODULE_DESCRIPTION("BCM2835 DMA engine driver");
+MODULE_AUTHOR("Florian Meier <florian.meier@koalo.de>");
+MODULE_LICENSE("GPL v2");
index c29dacff66fa951f136657536a628b6743ccda30..c18aebf7d5aa9a23199b556bc9b54c8b3237253d 100644 (file)
@@ -972,8 +972,10 @@ static int cppi41_dma_probe(struct platform_device *pdev)
                goto err_chans;
 
        irq = irq_of_parse_and_map(dev->of_node, 0);
-       if (!irq)
+       if (!irq) {
+               ret = -EINVAL;
                goto err_irq;
+       }
 
        cppi_writel(USBSS_IRQ_PD_COMP, cdd->usbss_mem + USBSS_IRQ_ENABLER);
 
index 9dfcaf5c12888d3de80483329ffccc7fbbacd02a..05b6dea770a407fc94e614b82bdbdf0ef0212593 100644 (file)
@@ -31,7 +31,7 @@ module_param_string(channel, test_channel, sizeof(test_channel),
                S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)");
 
-static char test_device[20];
+static char test_device[32];
 module_param_string(device, test_device, sizeof(test_device),
                S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)");
@@ -89,7 +89,7 @@ MODULE_PARM_DESC(verbose, "Enable \"success\" result messages (default: off)");
 struct dmatest_params {
        unsigned int    buf_size;
        char            channel[20];
-       char            device[20];
+       char            device[32];
        unsigned int    threads_per_chan;
        unsigned int    max_channels;
        unsigned int    iterations;
index 7516be4677cf7e778ba3bef2482cae9179ff41a3..13ac3f240e7963127c713f44f296bd421edf8999 100644 (file)
@@ -218,8 +218,10 @@ static inline void dwc_do_single_block(struct dw_dma_chan *dwc,
        struct dw_dma   *dw = to_dw_dma(dwc->chan.device);
        u32             ctllo;
 
-       /* Software emulation of LLP mode relies on interrupts to continue
-        * multi block transfer. */
+       /*
+        * Software emulation of LLP mode relies on interrupts to continue
+        * multi block transfer.
+        */
        ctllo = desc->lli.ctllo | DWC_CTLL_INT_EN;
 
        channel_writel(dwc, SAR, desc->lli.sar);
@@ -253,8 +255,7 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
                                                &dwc->flags);
                if (was_soft_llp) {
                        dev_err(chan2dev(&dwc->chan),
-                               "BUG: Attempted to start new LLP transfer "
-                               "inside ongoing one\n");
+                               "BUG: Attempted to start new LLP transfer inside ongoing one\n");
                        return;
                }
 
@@ -420,8 +421,7 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
                return;
        }
 
-       dev_vdbg(chan2dev(&dwc->chan), "%s: llp=0x%llx\n", __func__,
-                       (unsigned long long)llp);
+       dev_vdbg(chan2dev(&dwc->chan), "%s: llp=%pad\n", __func__, &llp);
 
        list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
                /* Initial residue value */
@@ -567,9 +567,9 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
                        unlikely(status_xfer & dwc->mask)) {
                int i;
 
-               dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s "
-                               "interrupt, stopping DMA transfer\n",
-                               status_xfer ? "xfer" : "error");
+               dev_err(chan2dev(&dwc->chan),
+                       "cyclic DMA unexpected %s interrupt, stopping DMA transfer\n",
+                       status_xfer ? "xfer" : "error");
 
                spin_lock_irqsave(&dwc->lock, flags);
 
@@ -711,9 +711,8 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
        u32                     ctllo;
 
        dev_vdbg(chan2dev(chan),
-                       "%s: d0x%llx s0x%llx l0x%zx f0x%lx\n", __func__,
-                       (unsigned long long)dest, (unsigned long long)src,
-                       len, flags);
+                       "%s: d%pad s%pad l0x%zx f0x%lx\n", __func__,
+                       &dest, &src, len, flags);
 
        if (unlikely(!len)) {
                dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__);
@@ -1401,9 +1400,9 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
        /* Let's make a cyclic list */
        last->lli.llp = cdesc->desc[0]->txd.phys;
 
-       dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%llx len %zu "
-                       "period %zu periods %d\n", (unsigned long long)buf_addr,
-                       buf_len, period_len, periods);
+       dev_dbg(chan2dev(&dwc->chan),
+                       "cyclic prepared buf %pad len %zu period %zu periods %d\n",
+                       &buf_addr, buf_len, period_len, periods);
 
        cdesc->periods = periods;
        dwc->cdesc = cdesc;
@@ -1603,9 +1602,11 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
                        dev_dbg(chip->dev, "DWC_PARAMS[%d]: 0x%08x\n", i,
                                           dwc_params);
 
-                       /* Decode maximum block size for given channel. The
+                       /*
+                        * Decode maximum block size for given channel. The
                         * stored 4 bit value represents blocks from 0x00 for 3
-                        * up to 0x0a for 4095. */
+                        * up to 0x0a for 4095.
+                        */
                        dwc->block_size =
                                (4 << ((max_blk_size >> 4 * i) & 0xf)) - 1;
                        dwc->nollp =
index 2539ea0cbc6394f918fb849ffd3c6f6ca73f33a0..cd8da451d1995fef8b6d076005b17ad1a17b44d1 100644 (file)
@@ -699,8 +699,8 @@ static int edma_alloc_chan_resources(struct dma_chan *chan)
        echan->alloced = true;
        echan->slot[0] = echan->ch_num;
 
-       dev_info(dev, "allocated channel for %u:%u\n",
-                EDMA_CTLR(echan->ch_num), EDMA_CHAN_SLOT(echan->ch_num));
+       dev_dbg(dev, "allocated channel for %u:%u\n",
+               EDMA_CTLR(echan->ch_num), EDMA_CHAN_SLOT(echan->ch_num));
 
        return 0;
 
@@ -736,7 +736,7 @@ static void edma_free_chan_resources(struct dma_chan *chan)
                echan->alloced = false;
        }
 
-       dev_info(dev, "freeing channel for %u\n", echan->ch_num);
+       dev_dbg(dev, "freeing channel for %u\n", echan->ch_num);
 }
 
 /* Send pending descriptor to hardware */
index 1ffc24484d23cdb0edd1e6011605aff5c9a6eb07..d56e83599825b16666960f78b91ea08620be4cef 100644 (file)
@@ -41,7 +41,7 @@
  * channel is allowed to transfer before the DMA engine pauses
  * the current channel and switches to the next channel
  */
-#define FSL_DMA_MR_BWC         0x08000000
+#define FSL_DMA_MR_BWC         0x0A000000
 
 /* Special MR definition for MPC8349 */
 #define FSL_DMA_MR_EOTIE       0x00000080
index c75679d420286c522679cdb9c344549c97e7a0c4..4e7918339b1263a2720c3da11d271714dd9669ad 100644 (file)
@@ -323,6 +323,7 @@ struct sdma_engine {
        struct clk                      *clk_ipg;
        struct clk                      *clk_ahb;
        spinlock_t                      channel_0_lock;
+       u32                             script_number;
        struct sdma_script_start_addrs  *script_addrs;
        const struct sdma_driver_data   *drvdata;
 };
@@ -724,6 +725,10 @@ static void sdma_get_pc(struct sdma_channel *sdmac,
                per_2_emi = sdma->script_addrs->app_2_mcu_addr;
                emi_2_per = sdma->script_addrs->mcu_2_app_addr;
                break;
+       case IMX_DMATYPE_SSI_DUAL:
+               per_2_emi = sdma->script_addrs->ssish_2_mcu_addr;
+               emi_2_per = sdma->script_addrs->mcu_2_ssish_addr;
+               break;
        case IMX_DMATYPE_SSI_SP:
        case IMX_DMATYPE_MMC:
        case IMX_DMATYPE_SDHC:
@@ -1238,6 +1243,7 @@ static void sdma_issue_pending(struct dma_chan *chan)
 }
 
 #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1        34
+#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2        38
 
 static void sdma_add_scripts(struct sdma_engine *sdma,
                const struct sdma_script_start_addrs *addr)
@@ -1246,7 +1252,11 @@ static void sdma_add_scripts(struct sdma_engine *sdma,
        s32 *saddr_arr = (u32 *)sdma->script_addrs;
        int i;
 
-       for (i = 0; i < SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; i++)
+       /* use the default firmware in ROM if missing external firmware */
+       if (!sdma->script_number)
+               sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1;
+
+       for (i = 0; i < sdma->script_number; i++)
                if (addr_arr[i] > 0)
                        saddr_arr[i] = addr_arr[i];
 }
@@ -1272,6 +1282,17 @@ static void sdma_load_firmware(const struct firmware *fw, void *context)
                goto err_firmware;
        if (header->ram_code_start + header->ram_code_size > fw->size)
                goto err_firmware;
+       switch (header->version_major) {
+               case 1:
+                       sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1;
+                       break;
+               case 2:
+                       sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2;
+                       break;
+               default:
+                       dev_err(sdma->dev, "unknown firmware version\n");
+                       goto err_firmware;
+       }
 
        addr = (void *)header + header->script_addrs_start;
        ram_code = (void *)header + header->ram_code_start;
index e26075408e9b95a365dfd188cad786593604412f..a1f911aaf220e0b107c1009a74edfaca1ff06634 100644 (file)
@@ -477,7 +477,7 @@ static struct dma_async_tx_descriptor *k3_dma_prep_slave_sg(
        dma_addr_t addr, src = 0, dst = 0;
        int num = sglen, i;
 
-       if (sgl == 0)
+       if (sgl == NULL)
                return NULL;
 
        for_each_sg(sgl, sg, sglen, i) {
@@ -817,7 +817,7 @@ static int k3_dma_resume(struct device *dev)
        return 0;
 }
 
-SIMPLE_DEV_PM_OPS(k3_dma_pmops, k3_dma_suspend, k3_dma_resume);
+static SIMPLE_DEV_PM_OPS(k3_dma_pmops, k3_dma_suspend, k3_dma_resume);
 
 static struct platform_driver k3_pdma_driver = {
        .driver         = {
index c6a01ea8bc591c289777d2ab837ef48887cf64d1..b439679f4126e98dcd6484607971276b2baf512f 100644 (file)
@@ -5,6 +5,7 @@
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  */
+
 #include <linux/err.h>
 #include <linux/module.h>
 #include <linux/init.h>
 #define DTADR          0x0208
 #define DCMD           0x020c
 
-#define DCSR_RUN       (1 << 31)       /* Run Bit (read / write) */
-#define DCSR_NODESC    (1 << 30)       /* No-Descriptor Fetch (read / write) */
-#define DCSR_STOPIRQEN (1 << 29)       /* Stop Interrupt Enable (read / write) */
-#define DCSR_REQPEND   (1 << 8)        /* Request Pending (read-only) */
-#define DCSR_STOPSTATE (1 << 3)        /* Stop State (read-only) */
-#define DCSR_ENDINTR   (1 << 2)        /* End Interrupt (read / write) */
-#define DCSR_STARTINTR (1 << 1)        /* Start Interrupt (read / write) */
-#define DCSR_BUSERR    (1 << 0)        /* Bus Error Interrupt (read / write) */
-
-#define DCSR_EORIRQEN  (1 << 28)       /* End of Receive Interrupt Enable (R/W) */
-#define DCSR_EORJMPEN  (1 << 27)       /* Jump to next descriptor on EOR */
-#define DCSR_EORSTOPEN (1 << 26)       /* STOP on an EOR */
-#define DCSR_SETCMPST  (1 << 25)       /* Set Descriptor Compare Status */
-#define DCSR_CLRCMPST  (1 << 24)       /* Clear Descriptor Compare Status */
-#define DCSR_CMPST     (1 << 10)       /* The Descriptor Compare Status */
-#define DCSR_EORINTR   (1 << 9)        /* The end of Receive */
-
-#define DRCMR(n)       ((((n) < 64) ? 0x0100 : 0x1100) + \
-                                (((n) & 0x3f) << 2))
-#define DRCMR_MAPVLD   (1 << 7)        /* Map Valid (read / write) */
-#define DRCMR_CHLNUM   0x1f            /* mask for Channel Number (read / write) */
+#define DCSR_RUN       BIT(31) /* Run Bit (read / write) */
+#define DCSR_NODESC    BIT(30) /* No-Descriptor Fetch (read / write) */
+#define DCSR_STOPIRQEN BIT(29) /* Stop Interrupt Enable (read / write) */
+#define DCSR_REQPEND   BIT(8)  /* Request Pending (read-only) */
+#define DCSR_STOPSTATE BIT(3)  /* Stop State (read-only) */
+#define DCSR_ENDINTR   BIT(2)  /* End Interrupt (read / write) */
+#define DCSR_STARTINTR BIT(1)  /* Start Interrupt (read / write) */
+#define DCSR_BUSERR    BIT(0)  /* Bus Error Interrupt (read / write) */
+
+#define DCSR_EORIRQEN  BIT(28) /* End of Receive Interrupt Enable (R/W) */
+#define DCSR_EORJMPEN  BIT(27) /* Jump to next descriptor on EOR */
+#define DCSR_EORSTOPEN BIT(26) /* STOP on an EOR */
+#define DCSR_SETCMPST  BIT(25) /* Set Descriptor Compare Status */
+#define DCSR_CLRCMPST  BIT(24) /* Clear Descriptor Compare Status */
+#define DCSR_CMPST     BIT(10) /* The Descriptor Compare Status */
+#define DCSR_EORINTR   BIT(9)  /* The end of Receive */
+
+#define DRCMR(n)       ((((n) < 64) ? 0x0100 : 0x1100) + (((n) & 0x3f) << 2))
+#define DRCMR_MAPVLD   BIT(7)  /* Map Valid (read / write) */
+#define DRCMR_CHLNUM   0x1f    /* mask for Channel Number (read / write) */
 
 #define DDADR_DESCADDR 0xfffffff0      /* Address of next descriptor (mask) */
-#define DDADR_STOP     (1 << 0)        /* Stop (read / write) */
-
-#define DCMD_INCSRCADDR        (1 << 31)       /* Source Address Increment Setting. */
-#define DCMD_INCTRGADDR        (1 << 30)       /* Target Address Increment Setting. */
-#define DCMD_FLOWSRC   (1 << 29)       /* Flow Control by the source. */
-#define DCMD_FLOWTRG   (1 << 28)       /* Flow Control by the target. */
-#define DCMD_STARTIRQEN        (1 << 22)       /* Start Interrupt Enable */
-#define DCMD_ENDIRQEN  (1 << 21)       /* End Interrupt Enable */
-#define DCMD_ENDIAN    (1 << 18)       /* Device Endian-ness. */
+#define DDADR_STOP     BIT(0)  /* Stop (read / write) */
+
+#define DCMD_INCSRCADDR        BIT(31) /* Source Address Increment Setting. */
+#define DCMD_INCTRGADDR        BIT(30) /* Target Address Increment Setting. */
+#define DCMD_FLOWSRC   BIT(29) /* Flow Control by the source. */
+#define DCMD_FLOWTRG   BIT(28) /* Flow Control by the target. */
+#define DCMD_STARTIRQEN        BIT(22) /* Start Interrupt Enable */
+#define DCMD_ENDIRQEN  BIT(21) /* End Interrupt Enable */
+#define DCMD_ENDIAN    BIT(18) /* Device Endian-ness. */
 #define DCMD_BURST8    (1 << 16)       /* 8 byte burst */
 #define DCMD_BURST16   (2 << 16)       /* 16 byte burst */
 #define DCMD_BURST32   (3 << 16)       /* 32 byte burst */
@@ -132,10 +132,14 @@ struct mmp_pdma_device {
        spinlock_t phy_lock; /* protect alloc/free phy channels */
 };
 
-#define tx_to_mmp_pdma_desc(tx) container_of(tx, struct mmp_pdma_desc_sw, async_tx)
-#define to_mmp_pdma_desc(lh) container_of(lh, struct mmp_pdma_desc_sw, node)
-#define to_mmp_pdma_chan(dchan) container_of(dchan, struct mmp_pdma_chan, chan)
-#define to_mmp_pdma_dev(dmadev) container_of(dmadev, struct mmp_pdma_device, device)
+#define tx_to_mmp_pdma_desc(tx)                                        \
+       container_of(tx, struct mmp_pdma_desc_sw, async_tx)
+#define to_mmp_pdma_desc(lh)                                   \
+       container_of(lh, struct mmp_pdma_desc_sw, node)
+#define to_mmp_pdma_chan(dchan)                                        \
+       container_of(dchan, struct mmp_pdma_chan, chan)
+#define to_mmp_pdma_dev(dmadev)                                        \
+       container_of(dmadev, struct mmp_pdma_device, device)
 
 static void set_desc(struct mmp_pdma_phy *phy, dma_addr_t addr)
 {
@@ -162,19 +166,18 @@ static void enable_chan(struct mmp_pdma_phy *phy)
        writel(dalgn, phy->base + DALGN);
 
        reg = (phy->idx << 2) + DCSR;
-       writel(readl(phy->base + reg) | DCSR_RUN,
-                                       phy->base + reg);
+       writel(readl(phy->base + reg) | DCSR_RUN, phy->base + reg);
 }
 
 static void disable_chan(struct mmp_pdma_phy *phy)
 {
        u32 reg;
 
-       if (phy) {
-               reg = (phy->idx << 2) + DCSR;
-               writel(readl(phy->base + reg) & ~DCSR_RUN,
-                                               phy->base + reg);
-       }
+       if (!phy)
+               return;
+
+       reg = (phy->idx << 2) + DCSR;
+       writel(readl(phy->base + reg) & ~DCSR_RUN, phy->base + reg);
 }
 
 static int clear_chan_irq(struct mmp_pdma_phy *phy)
@@ -183,26 +186,27 @@ static int clear_chan_irq(struct mmp_pdma_phy *phy)
        u32 dint = readl(phy->base + DINT);
        u32 reg = (phy->idx << 2) + DCSR;
 
-       if (dint & BIT(phy->idx)) {
-               /* clear irq */
-               dcsr = readl(phy->base + reg);
-               writel(dcsr, phy->base + reg);
-               if ((dcsr & DCSR_BUSERR) && (phy->vchan))
-                       dev_warn(phy->vchan->dev, "DCSR_BUSERR\n");
-               return 0;
-       }
-       return -EAGAIN;
+       if (!(dint & BIT(phy->idx)))
+               return -EAGAIN;
+
+       /* clear irq */
+       dcsr = readl(phy->base + reg);
+       writel(dcsr, phy->base + reg);
+       if ((dcsr & DCSR_BUSERR) && (phy->vchan))
+               dev_warn(phy->vchan->dev, "DCSR_BUSERR\n");
+
+       return 0;
 }
 
 static irqreturn_t mmp_pdma_chan_handler(int irq, void *dev_id)
 {
        struct mmp_pdma_phy *phy = dev_id;
 
-       if (clear_chan_irq(phy) == 0) {
-               tasklet_schedule(&phy->vchan->tasklet);
-               return IRQ_HANDLED;
-       } else
+       if (clear_chan_irq(phy) != 0)
                return IRQ_NONE;
+
+       tasklet_schedule(&phy->vchan->tasklet);
+       return IRQ_HANDLED;
 }
 
 static irqreturn_t mmp_pdma_int_handler(int irq, void *dev_id)
@@ -224,8 +228,8 @@ static irqreturn_t mmp_pdma_int_handler(int irq, void *dev_id)
 
        if (irq_num)
                return IRQ_HANDLED;
-       else
-               return IRQ_NONE;
+
+       return IRQ_NONE;
 }
 
 /* lookup free phy channel as descending priority */
@@ -245,9 +249,9 @@ static struct mmp_pdma_phy *lookup_phy(struct mmp_pdma_chan *pchan)
         */
 
        spin_lock_irqsave(&pdev->phy_lock, flags);
-       for (prio = 0; prio <= (((pdev->dma_channels - 1) & 0xf) >> 2); prio++) {
+       for (prio = 0; prio <= ((pdev->dma_channels - 1) & 0xf) >> 2; prio++) {
                for (i = 0; i < pdev->dma_channels; i++) {
-                       if (prio != ((i & 0xf) >> 2))
+                       if (prio != (i & 0xf) >> 2)
                                continue;
                        phy = &pdev->phy[i];
                        if (!phy->vchan) {
@@ -389,14 +393,16 @@ static int mmp_pdma_alloc_chan_resources(struct dma_chan *dchan)
        if (chan->desc_pool)
                return 1;
 
-       chan->desc_pool =
-               dma_pool_create(dev_name(&dchan->dev->device), chan->dev,
-                                 sizeof(struct mmp_pdma_desc_sw),
-                                 __alignof__(struct mmp_pdma_desc_sw), 0);
+       chan->desc_pool = dma_pool_create(dev_name(&dchan->dev->device),
+                                         chan->dev,
+                                         sizeof(struct mmp_pdma_desc_sw),
+                                         __alignof__(struct mmp_pdma_desc_sw),
+                                         0);
        if (!chan->desc_pool) {
                dev_err(chan->dev, "unable to allocate descriptor pool\n");
                return -ENOMEM;
        }
+
        mmp_pdma_free_phy(chan);
        chan->idle = true;
        chan->dev_addr = 0;
@@ -404,7 +410,7 @@ static int mmp_pdma_alloc_chan_resources(struct dma_chan *dchan)
 }
 
 static void mmp_pdma_free_desc_list(struct mmp_pdma_chan *chan,
-                                 struct list_head *list)
+                                   struct list_head *list)
 {
        struct mmp_pdma_desc_sw *desc, *_desc;
 
@@ -434,8 +440,8 @@ static void mmp_pdma_free_chan_resources(struct dma_chan *dchan)
 
 static struct dma_async_tx_descriptor *
 mmp_pdma_prep_memcpy(struct dma_chan *dchan,
-       dma_addr_t dma_dst, dma_addr_t dma_src,
-       size_t len, unsigned long flags)
+                    dma_addr_t dma_dst, dma_addr_t dma_src,
+                    size_t len, unsigned long flags)
 {
        struct mmp_pdma_chan *chan;
        struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new;
@@ -515,8 +521,8 @@ fail:
 
 static struct dma_async_tx_descriptor *
 mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
-                        unsigned int sg_len, enum dma_transfer_direction dir,
-                        unsigned long flags, void *context)
+                      unsigned int sg_len, enum dma_transfer_direction dir,
+                      unsigned long flags, void *context)
 {
        struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
        struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new = NULL;
@@ -591,10 +597,11 @@ fail:
        return NULL;
 }
 
-static struct dma_async_tx_descriptor *mmp_pdma_prep_dma_cyclic(
-       struct dma_chan *dchan, dma_addr_t buf_addr, size_t len,
-       size_t period_len, enum dma_transfer_direction direction,
-       unsigned long flags, void *context)
+static struct dma_async_tx_descriptor *
+mmp_pdma_prep_dma_cyclic(struct dma_chan *dchan,
+                        dma_addr_t buf_addr, size_t len, size_t period_len,
+                        enum dma_transfer_direction direction,
+                        unsigned long flags, void *context)
 {
        struct mmp_pdma_chan *chan;
        struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new;
@@ -636,8 +643,8 @@ static struct dma_async_tx_descriptor *mmp_pdma_prep_dma_cyclic(
                        goto fail;
                }
 
-               new->desc.dcmd = chan->dcmd | DCMD_ENDIRQEN |
-                                       (DCMD_LENGTH & period_len);
+               new->desc.dcmd = (chan->dcmd | DCMD_ENDIRQEN |
+                                 (DCMD_LENGTH & period_len));
                new->desc.dsadr = dma_src;
                new->desc.dtadr = dma_dst;
 
@@ -677,12 +684,11 @@ fail:
 }
 
 static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
-               unsigned long arg)
+                           unsigned long arg)
 {
        struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
        struct dma_slave_config *cfg = (void *)arg;
        unsigned long flags;
-       int ret = 0;
        u32 maxburst = 0, addr = 0;
        enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
 
@@ -739,11 +745,12 @@ static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
                return -ENOSYS;
        }
 
-       return ret;
+       return 0;
 }
 
 static enum dma_status mmp_pdma_tx_status(struct dma_chan *dchan,
-                       dma_cookie_t cookie, struct dma_tx_state *txstate)
+                                         dma_cookie_t cookie,
+                                         struct dma_tx_state *txstate)
 {
        return dma_cookie_status(dchan, cookie, txstate);
 }
@@ -845,15 +852,14 @@ static int mmp_pdma_remove(struct platform_device *op)
        return 0;
 }
 
-static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev,
-                                                       int idx, int irq)
+static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev, int idx, int irq)
 {
        struct mmp_pdma_phy *phy  = &pdev->phy[idx];
        struct mmp_pdma_chan *chan;
        int ret;
 
-       chan = devm_kzalloc(pdev->dev,
-                       sizeof(struct mmp_pdma_chan), GFP_KERNEL);
+       chan = devm_kzalloc(pdev->dev, sizeof(struct mmp_pdma_chan),
+                           GFP_KERNEL);
        if (chan == NULL)
                return -ENOMEM;
 
@@ -861,8 +867,8 @@ static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev,
        phy->base = pdev->base;
 
        if (irq) {
-               ret = devm_request_irq(pdev->dev, irq,
-                       mmp_pdma_chan_handler, 0, "pdma", phy);
+               ret = devm_request_irq(pdev->dev, irq, mmp_pdma_chan_handler, 0,
+                                      "pdma", phy);
                if (ret) {
                        dev_err(pdev->dev, "channel request irq fail!\n");
                        return ret;
@@ -877,8 +883,7 @@ static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev,
        INIT_LIST_HEAD(&chan->chain_running);
 
        /* register virt channel to dma engine */
-       list_add_tail(&chan->chan.device_node,
-                       &pdev->device.channels);
+       list_add_tail(&chan->chan.device_node, &pdev->device.channels);
 
        return 0;
 }
@@ -894,14 +899,12 @@ static struct dma_chan *mmp_pdma_dma_xlate(struct of_phandle_args *dma_spec,
 {
        struct mmp_pdma_device *d = ofdma->of_dma_data;
        struct dma_chan *chan;
-       struct mmp_pdma_chan *c;
 
        chan = dma_get_any_slave_channel(&d->device);
        if (!chan)
                return NULL;
 
-       c = to_mmp_pdma_chan(chan);
-       c->drcmr = dma_spec->args[0];
+       to_mmp_pdma_chan(chan)->drcmr = dma_spec->args[0];
 
        return chan;
 }
@@ -918,6 +921,7 @@ static int mmp_pdma_probe(struct platform_device *op)
        pdev = devm_kzalloc(&op->dev, sizeof(*pdev), GFP_KERNEL);
        if (!pdev)
                return -ENOMEM;
+
        pdev->dev = &op->dev;
 
        spin_lock_init(&pdev->phy_lock);
@@ -929,8 +933,8 @@ static int mmp_pdma_probe(struct platform_device *op)
 
        of_id = of_match_device(mmp_pdma_dt_ids, pdev->dev);
        if (of_id)
-               of_property_read_u32(pdev->dev->of_node,
-                               "#dma-channels", &dma_channels);
+               of_property_read_u32(pdev->dev->of_node, "#dma-channels",
+                                    &dma_channels);
        else if (pdata && pdata->dma_channels)
                dma_channels = pdata->dma_channels;
        else
@@ -942,8 +946,9 @@ static int mmp_pdma_probe(struct platform_device *op)
                        irq_num++;
        }
 
-       pdev->phy = devm_kzalloc(pdev->dev,
-               dma_channels * sizeof(struct mmp_pdma_chan), GFP_KERNEL);
+       pdev->phy = devm_kcalloc(pdev->dev,
+                                dma_channels, sizeof(struct mmp_pdma_chan),
+                                GFP_KERNEL);
        if (pdev->phy == NULL)
                return -ENOMEM;
 
@@ -952,8 +957,8 @@ static int mmp_pdma_probe(struct platform_device *op)
        if (irq_num != dma_channels) {
                /* all chan share one irq, demux inside */
                irq = platform_get_irq(op, 0);
-               ret = devm_request_irq(pdev->dev, irq,
-                       mmp_pdma_int_handler, 0, "pdma", pdev);
+               ret = devm_request_irq(pdev->dev, irq, mmp_pdma_int_handler, 0,
+                                      "pdma", pdev);
                if (ret)
                        return ret;
        }
@@ -1029,7 +1034,7 @@ bool mmp_pdma_filter_fn(struct dma_chan *chan, void *param)
        if (chan->device->dev->driver != &mmp_pdma_driver.driver)
                return false;
 
-       c->drcmr = *(unsigned int *) param;
+       c->drcmr = *(unsigned int *)param;
 
        return true;
 }
@@ -1037,6 +1042,6 @@ EXPORT_SYMBOL_GPL(mmp_pdma_filter_fn);
 
 module_platform_driver(mmp_pdma_driver);
 
-MODULE_DESCRIPTION("MARVELL MMP Periphera DMA Driver");
+MODULE_DESCRIPTION("MARVELL MMP Peripheral DMA Driver");
 MODULE_AUTHOR("Marvell International Ltd.");
 MODULE_LICENSE("GPL v2");
index 3ddacc14a7366611ffb089d21c70fb4ba3c896c5..33f96aaa80c759aff2f8098e2135dd6f1b67b90a 100644 (file)
@@ -121,11 +121,13 @@ struct mmp_tdma_chan {
        int                             idx;
        enum mmp_tdma_type              type;
        int                             irq;
-       unsigned long                   reg_base;
+       void __iomem                    *reg_base;
 
        size_t                          buf_len;
        size_t                          period_len;
        size_t                          pos;
+
+       struct gen_pool                 *pool;
 };
 
 #define TDMA_CHANNEL_NUM 2
@@ -182,7 +184,7 @@ static void mmp_tdma_pause_chan(struct mmp_tdma_chan *tdmac)
 
 static int mmp_tdma_config_chan(struct mmp_tdma_chan *tdmac)
 {
-       unsigned int tdcr;
+       unsigned int tdcr = 0;
 
        mmp_tdma_disable_chan(tdmac);
 
@@ -324,7 +326,7 @@ static void mmp_tdma_free_descriptor(struct mmp_tdma_chan *tdmac)
        struct gen_pool *gpool;
        int size = tdmac->desc_num * sizeof(struct mmp_tdma_desc);
 
-       gpool = sram_get_gpool("asram");
+       gpool = tdmac->pool;
        if (tdmac->desc_arr)
                gen_pool_free(gpool, (unsigned long)tdmac->desc_arr,
                                size);
@@ -374,7 +376,7 @@ struct mmp_tdma_desc *mmp_tdma_alloc_descriptor(struct mmp_tdma_chan *tdmac)
        struct gen_pool *gpool;
        int size = tdmac->desc_num * sizeof(struct mmp_tdma_desc);
 
-       gpool = sram_get_gpool("asram");
+       gpool = tdmac->pool;
        if (!gpool)
                return NULL;
 
@@ -505,7 +507,8 @@ static int mmp_tdma_remove(struct platform_device *pdev)
 }
 
 static int mmp_tdma_chan_init(struct mmp_tdma_device *tdev,
-                                               int idx, int irq, int type)
+                                       int idx, int irq,
+                                       int type, struct gen_pool *pool)
 {
        struct mmp_tdma_chan *tdmac;
 
@@ -526,7 +529,8 @@ static int mmp_tdma_chan_init(struct mmp_tdma_device *tdev,
        tdmac->chan.device = &tdev->device;
        tdmac->idx         = idx;
        tdmac->type        = type;
-       tdmac->reg_base    = (unsigned long)tdev->base + idx * 4;
+       tdmac->reg_base    = tdev->base + idx * 4;
+       tdmac->pool        = pool;
        tdmac->status = DMA_COMPLETE;
        tdev->tdmac[tdmac->idx] = tdmac;
        tasklet_init(&tdmac->tasklet, dma_do_tasklet, (unsigned long)tdmac);
@@ -553,6 +557,7 @@ static int mmp_tdma_probe(struct platform_device *pdev)
        int i, ret;
        int irq = 0, irq_num = 0;
        int chan_num = TDMA_CHANNEL_NUM;
+       struct gen_pool *pool;
 
        of_id = of_match_device(mmp_tdma_dt_ids, &pdev->dev);
        if (of_id)
@@ -579,6 +584,15 @@ static int mmp_tdma_probe(struct platform_device *pdev)
 
        INIT_LIST_HEAD(&tdev->device.channels);
 
+       if (pdev->dev.of_node)
+               pool = of_get_named_gen_pool(pdev->dev.of_node, "asram", 0);
+       else
+               pool = sram_get_gpool("asram");
+       if (!pool) {
+               dev_err(&pdev->dev, "asram pool not available\n");
+               return -ENOMEM;
+       }
+
        if (irq_num != chan_num) {
                irq = platform_get_irq(pdev, 0);
                ret = devm_request_irq(&pdev->dev, irq,
@@ -590,7 +604,7 @@ static int mmp_tdma_probe(struct platform_device *pdev)
        /* initialize channel parameters */
        for (i = 0; i < chan_num; i++) {
                irq = (irq_num != chan_num) ? 0 : platform_get_irq(pdev, i);
-               ret = mmp_tdma_chan_init(tdev, i, irq, type);
+               ret = mmp_tdma_chan_init(tdev, i, irq, type, pool);
                if (ret)
                        return ret;
        }
diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c
new file mode 100644 (file)
index 0000000..3258e48
--- /dev/null
@@ -0,0 +1,699 @@
+/*
+ * MOXA ART SoCs DMA Engine support.
+ *
+ * Copyright (C) 2013 Jonas Jensen
+ *
+ * Jonas Jensen <jonas.jensen@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_dma.h>
+#include <linux/bitops.h>
+
+#include <asm/cacheflush.h>
+
+#include "dmaengine.h"
+#include "virt-dma.h"
+
+#define APB_DMA_MAX_CHANNEL                    4
+
+#define REG_OFF_ADDRESS_SOURCE                 0
+#define REG_OFF_ADDRESS_DEST                   4
+#define REG_OFF_CYCLES                         8
+#define REG_OFF_CTRL                           12
+#define REG_OFF_CHAN_SIZE                      16
+
+#define APB_DMA_ENABLE                         BIT(0)
+#define APB_DMA_FIN_INT_STS                    BIT(1)
+#define APB_DMA_FIN_INT_EN                     BIT(2)
+#define APB_DMA_BURST_MODE                     BIT(3)
+#define APB_DMA_ERR_INT_STS                    BIT(4)
+#define APB_DMA_ERR_INT_EN                     BIT(5)
+
+/*
+ * Unset: APB
+ * Set:   AHB
+ */
+#define APB_DMA_SOURCE_SELECT                  0x40
+#define APB_DMA_DEST_SELECT                    0x80
+
+#define APB_DMA_SOURCE                         0x100
+#define APB_DMA_DEST                           0x1000
+
+#define APB_DMA_SOURCE_MASK                    0x700
+#define APB_DMA_DEST_MASK                      0x7000
+
+/*
+ * 000: No increment
+ * 001: +1 (Burst=0), +4  (Burst=1)
+ * 010: +2 (Burst=0), +8  (Burst=1)
+ * 011: +4 (Burst=0), +16 (Burst=1)
+ * 101: -1 (Burst=0), -4  (Burst=1)
+ * 110: -2 (Burst=0), -8  (Burst=1)
+ * 111: -4 (Burst=0), -16 (Burst=1)
+ */
+#define APB_DMA_SOURCE_INC_0                   0
+#define APB_DMA_SOURCE_INC_1_4                 0x100
+#define APB_DMA_SOURCE_INC_2_8                 0x200
+#define APB_DMA_SOURCE_INC_4_16                        0x300
+#define APB_DMA_SOURCE_DEC_1_4                 0x500
+#define APB_DMA_SOURCE_DEC_2_8                 0x600
+#define APB_DMA_SOURCE_DEC_4_16                        0x700
+#define APB_DMA_DEST_INC_0                     0
+#define APB_DMA_DEST_INC_1_4                   0x1000
+#define APB_DMA_DEST_INC_2_8                   0x2000
+#define APB_DMA_DEST_INC_4_16                  0x3000
+#define APB_DMA_DEST_DEC_1_4                   0x5000
+#define APB_DMA_DEST_DEC_2_8                   0x6000
+#define APB_DMA_DEST_DEC_4_16                  0x7000
+
+/*
+ * Request signal select source/destination address for DMA hardware handshake.
+ *
+ * The request line number is a property of the DMA controller itself,
+ * e.g. MMC must always request channels where dma_slave_config->slave_id is 5.
+ *
+ * 0:    No request / Grant signal
+ * 1-15: Request    / Grant signal
+ */
+#define APB_DMA_SOURCE_REQ_NO                  0x1000000
+#define APB_DMA_SOURCE_REQ_NO_MASK             0xf000000
+#define APB_DMA_DEST_REQ_NO                    0x10000
+#define APB_DMA_DEST_REQ_NO_MASK               0xf0000
+
+#define APB_DMA_DATA_WIDTH                     0x100000
+#define APB_DMA_DATA_WIDTH_MASK                        0x300000
+/*
+ * Data width of transfer:
+ *
+ * 00: Word
+ * 01: Half
+ * 10: Byte
+ */
+#define APB_DMA_DATA_WIDTH_4                   0
+#define APB_DMA_DATA_WIDTH_2                   0x100000
+#define APB_DMA_DATA_WIDTH_1                   0x200000
+
+#define APB_DMA_CYCLES_MASK                    0x00ffffff
+
+#define MOXART_DMA_DATA_TYPE_S8                        0x00
+#define MOXART_DMA_DATA_TYPE_S16               0x01
+#define MOXART_DMA_DATA_TYPE_S32               0x02
+
+struct moxart_sg {
+       dma_addr_t addr;
+       uint32_t len;
+};
+
+struct moxart_desc {
+       enum dma_transfer_direction     dma_dir;
+       dma_addr_t                      dev_addr;
+       unsigned int                    sglen;
+       unsigned int                    dma_cycles;
+       struct virt_dma_desc            vd;
+       uint8_t                         es;
+       struct moxart_sg                sg[0];
+};
+
+struct moxart_chan {
+       struct virt_dma_chan            vc;
+
+       void __iomem                    *base;
+       struct moxart_desc              *desc;
+
+       struct dma_slave_config         cfg;
+
+       bool                            allocated;
+       bool                            error;
+       int                             ch_num;
+       unsigned int                    line_reqno;
+       unsigned int                    sgidx;
+};
+
+struct moxart_dmadev {
+       struct dma_device               dma_slave;
+       struct moxart_chan              slave_chans[APB_DMA_MAX_CHANNEL];
+};
+
+struct moxart_filter_data {
+       struct moxart_dmadev            *mdc;
+       struct of_phandle_args          *dma_spec;
+};
+
+static const unsigned int es_bytes[] = {
+       [MOXART_DMA_DATA_TYPE_S8] = 1,
+       [MOXART_DMA_DATA_TYPE_S16] = 2,
+       [MOXART_DMA_DATA_TYPE_S32] = 4,
+};
+
+static struct device *chan2dev(struct dma_chan *chan)
+{
+       return &chan->dev->device;
+}
+
+static inline struct moxart_chan *to_moxart_dma_chan(struct dma_chan *c)
+{
+       return container_of(c, struct moxart_chan, vc.chan);
+}
+
+static inline struct moxart_desc *to_moxart_dma_desc(
+       struct dma_async_tx_descriptor *t)
+{
+       return container_of(t, struct moxart_desc, vd.tx);
+}
+
+static void moxart_dma_desc_free(struct virt_dma_desc *vd)
+{
+       kfree(container_of(vd, struct moxart_desc, vd));
+}
+
+static int moxart_terminate_all(struct dma_chan *chan)
+{
+       struct moxart_chan *ch = to_moxart_dma_chan(chan);
+       unsigned long flags;
+       LIST_HEAD(head);
+       u32 ctrl;
+
+       dev_dbg(chan2dev(chan), "%s: ch=%p\n", __func__, ch);
+
+       spin_lock_irqsave(&ch->vc.lock, flags);
+
+       if (ch->desc)
+               ch->desc = NULL;
+
+       ctrl = readl(ch->base + REG_OFF_CTRL);
+       ctrl &= ~(APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+       writel(ctrl, ch->base + REG_OFF_CTRL);
+
+       vchan_get_all_descriptors(&ch->vc, &head);
+       spin_unlock_irqrestore(&ch->vc.lock, flags);
+       vchan_dma_desc_free_list(&ch->vc, &head);
+
+       return 0;
+}
+
+static int moxart_slave_config(struct dma_chan *chan,
+                              struct dma_slave_config *cfg)
+{
+       struct moxart_chan *ch = to_moxart_dma_chan(chan);
+       u32 ctrl;
+
+       ch->cfg = *cfg;
+
+       ctrl = readl(ch->base + REG_OFF_CTRL);
+       ctrl |= APB_DMA_BURST_MODE;
+       ctrl &= ~(APB_DMA_DEST_MASK | APB_DMA_SOURCE_MASK);
+       ctrl &= ~(APB_DMA_DEST_REQ_NO_MASK | APB_DMA_SOURCE_REQ_NO_MASK);
+
+       switch (ch->cfg.src_addr_width) {
+       case DMA_SLAVE_BUSWIDTH_1_BYTE:
+               ctrl |= APB_DMA_DATA_WIDTH_1;
+               if (ch->cfg.direction != DMA_MEM_TO_DEV)
+                       ctrl |= APB_DMA_DEST_INC_1_4;
+               else
+                       ctrl |= APB_DMA_SOURCE_INC_1_4;
+               break;
+       case DMA_SLAVE_BUSWIDTH_2_BYTES:
+               ctrl |= APB_DMA_DATA_WIDTH_2;
+               if (ch->cfg.direction != DMA_MEM_TO_DEV)
+                       ctrl |= APB_DMA_DEST_INC_2_8;
+               else
+                       ctrl |= APB_DMA_SOURCE_INC_2_8;
+               break;
+       case DMA_SLAVE_BUSWIDTH_4_BYTES:
+               ctrl &= ~APB_DMA_DATA_WIDTH;
+               if (ch->cfg.direction != DMA_MEM_TO_DEV)
+                       ctrl |= APB_DMA_DEST_INC_4_16;
+               else
+                       ctrl |= APB_DMA_SOURCE_INC_4_16;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       if (ch->cfg.direction == DMA_MEM_TO_DEV) {
+               ctrl &= ~APB_DMA_DEST_SELECT;
+               ctrl |= APB_DMA_SOURCE_SELECT;
+               ctrl |= (ch->line_reqno << 16 &
+                        APB_DMA_DEST_REQ_NO_MASK);
+       } else {
+               ctrl |= APB_DMA_DEST_SELECT;
+               ctrl &= ~APB_DMA_SOURCE_SELECT;
+               ctrl |= (ch->line_reqno << 24 &
+                        APB_DMA_SOURCE_REQ_NO_MASK);
+       }
+
+       writel(ctrl, ch->base + REG_OFF_CTRL);
+
+       return 0;
+}
+
+static int moxart_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+                         unsigned long arg)
+{
+       int ret = 0;
+
+       switch (cmd) {
+       case DMA_PAUSE:
+       case DMA_RESUME:
+               return -EINVAL;
+       case DMA_TERMINATE_ALL:
+               moxart_terminate_all(chan);
+               break;
+       case DMA_SLAVE_CONFIG:
+               ret = moxart_slave_config(chan, (struct dma_slave_config *)arg);
+               break;
+       default:
+               ret = -ENOSYS;
+       }
+
+       return ret;
+}
+
+static struct dma_async_tx_descriptor *moxart_prep_slave_sg(
+       struct dma_chan *chan, struct scatterlist *sgl,
+       unsigned int sg_len, enum dma_transfer_direction dir,
+       unsigned long tx_flags, void *context)
+{
+       struct moxart_chan *ch = to_moxart_dma_chan(chan);
+       struct moxart_desc *d;
+       enum dma_slave_buswidth dev_width;
+       dma_addr_t dev_addr;
+       struct scatterlist *sgent;
+       unsigned int es;
+       unsigned int i;
+
+       if (!is_slave_direction(dir)) {
+               dev_err(chan2dev(chan), "%s: invalid DMA direction\n",
+                       __func__);
+               return NULL;
+       }
+
+       if (dir == DMA_DEV_TO_MEM) {
+               dev_addr = ch->cfg.src_addr;
+               dev_width = ch->cfg.src_addr_width;
+       } else {
+               dev_addr = ch->cfg.dst_addr;
+               dev_width = ch->cfg.dst_addr_width;
+       }
+
+       switch (dev_width) {
+       case DMA_SLAVE_BUSWIDTH_1_BYTE:
+               es = MOXART_DMA_DATA_TYPE_S8;
+               break;
+       case DMA_SLAVE_BUSWIDTH_2_BYTES:
+               es = MOXART_DMA_DATA_TYPE_S16;
+               break;
+       case DMA_SLAVE_BUSWIDTH_4_BYTES:
+               es = MOXART_DMA_DATA_TYPE_S32;
+               break;
+       default:
+               dev_err(chan2dev(chan), "%s: unsupported data width (%u)\n",
+                       __func__, dev_width);
+               return NULL;
+       }
+
+       d = kzalloc(sizeof(*d) + sg_len * sizeof(d->sg[0]), GFP_ATOMIC);
+       if (!d)
+               return NULL;
+
+       d->dma_dir = dir;
+       d->dev_addr = dev_addr;
+       d->es = es;
+
+       for_each_sg(sgl, sgent, sg_len, i) {
+               d->sg[i].addr = sg_dma_address(sgent);
+               d->sg[i].len = sg_dma_len(sgent);
+       }
+
+       d->sglen = sg_len;
+
+       ch->error = 0;
+
+       return vchan_tx_prep(&ch->vc, &d->vd, tx_flags);
+}
+
+static struct dma_chan *moxart_of_xlate(struct of_phandle_args *dma_spec,
+                                       struct of_dma *ofdma)
+{
+       struct moxart_dmadev *mdc = ofdma->of_dma_data;
+       struct dma_chan *chan;
+       struct moxart_chan *ch;
+
+       chan = dma_get_any_slave_channel(&mdc->dma_slave);
+       if (!chan)
+               return NULL;
+
+       ch = to_moxart_dma_chan(chan);
+       ch->line_reqno = dma_spec->args[0];
+
+       return chan;
+}
+
+static int moxart_alloc_chan_resources(struct dma_chan *chan)
+{
+       struct moxart_chan *ch = to_moxart_dma_chan(chan);
+
+       dev_dbg(chan2dev(chan), "%s: allocating channel #%u\n",
+               __func__, ch->ch_num);
+       ch->allocated = 1;
+
+       return 0;
+}
+
+static void moxart_free_chan_resources(struct dma_chan *chan)
+{
+       struct moxart_chan *ch = to_moxart_dma_chan(chan);
+
+       vchan_free_chan_resources(&ch->vc);
+
+       dev_dbg(chan2dev(chan), "%s: freeing channel #%u\n",
+               __func__, ch->ch_num);
+       ch->allocated = 0;
+}
+
+static void moxart_dma_set_params(struct moxart_chan *ch, dma_addr_t src_addr,
+                                 dma_addr_t dst_addr)
+{
+       writel(src_addr, ch->base + REG_OFF_ADDRESS_SOURCE);
+       writel(dst_addr, ch->base + REG_OFF_ADDRESS_DEST);
+}
+
+static void moxart_set_transfer_params(struct moxart_chan *ch, unsigned int len)
+{
+       struct moxart_desc *d = ch->desc;
+       unsigned int sglen_div = es_bytes[d->es];
+
+       d->dma_cycles = len >> sglen_div;
+
+       /*
+        * There are 4 cycles on 64 bytes copied, i.e. one cycle copies 16
+        * bytes ( when width is APB_DMAB_DATA_WIDTH_4 ).
+        */
+       writel(d->dma_cycles, ch->base + REG_OFF_CYCLES);
+
+       dev_dbg(chan2dev(&ch->vc.chan), "%s: set %u DMA cycles (len=%u)\n",
+               __func__, d->dma_cycles, len);
+}
+
+static void moxart_start_dma(struct moxart_chan *ch)
+{
+       u32 ctrl;
+
+       ctrl = readl(ch->base + REG_OFF_CTRL);
+       ctrl |= (APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+       writel(ctrl, ch->base + REG_OFF_CTRL);
+}
+
+static void moxart_dma_start_sg(struct moxart_chan *ch, unsigned int idx)
+{
+       struct moxart_desc *d = ch->desc;
+       struct moxart_sg *sg = ch->desc->sg + idx;
+
+       if (ch->desc->dma_dir == DMA_MEM_TO_DEV)
+               moxart_dma_set_params(ch, sg->addr, d->dev_addr);
+       else if (ch->desc->dma_dir == DMA_DEV_TO_MEM)
+               moxart_dma_set_params(ch, d->dev_addr, sg->addr);
+
+       moxart_set_transfer_params(ch, sg->len);
+
+       moxart_start_dma(ch);
+}
+
+static void moxart_dma_start_desc(struct dma_chan *chan)
+{
+       struct moxart_chan *ch = to_moxart_dma_chan(chan);
+       struct virt_dma_desc *vd;
+
+       vd = vchan_next_desc(&ch->vc);
+
+       if (!vd) {
+               ch->desc = NULL;
+               return;
+       }
+
+       list_del(&vd->node);
+
+       ch->desc = to_moxart_dma_desc(&vd->tx);
+       ch->sgidx = 0;
+
+       moxart_dma_start_sg(ch, 0);
+}
+
+static void moxart_issue_pending(struct dma_chan *chan)
+{
+       struct moxart_chan *ch = to_moxart_dma_chan(chan);
+       unsigned long flags;
+
+       spin_lock_irqsave(&ch->vc.lock, flags);
+       if (vchan_issue_pending(&ch->vc) && !ch->desc)
+               moxart_dma_start_desc(chan);
+       spin_unlock_irqrestore(&ch->vc.lock, flags);
+}
+
+static size_t moxart_dma_desc_size(struct moxart_desc *d,
+                                  unsigned int completed_sgs)
+{
+       unsigned int i;
+       size_t size;
+
+       for (size = i = completed_sgs; i < d->sglen; i++)
+               size += d->sg[i].len;
+
+       return size;
+}
+
+static size_t moxart_dma_desc_size_in_flight(struct moxart_chan *ch)
+{
+       size_t size;
+       unsigned int completed_cycles, cycles;
+
+       size = moxart_dma_desc_size(ch->desc, ch->sgidx);
+       cycles = readl(ch->base + REG_OFF_CYCLES);
+       completed_cycles = (ch->desc->dma_cycles - cycles);
+       size -= completed_cycles << es_bytes[ch->desc->es];
+
+       dev_dbg(chan2dev(&ch->vc.chan), "%s: size=%zu\n", __func__, size);
+
+       return size;
+}
+
+static enum dma_status moxart_tx_status(struct dma_chan *chan,
+                                       dma_cookie_t cookie,
+                                       struct dma_tx_state *txstate)
+{
+       struct moxart_chan *ch = to_moxart_dma_chan(chan);
+       struct virt_dma_desc *vd;
+       struct moxart_desc *d;
+       enum dma_status ret;
+       unsigned long flags;
+
+       /*
+        * dma_cookie_status() assigns initial residue value.
+        */
+       ret = dma_cookie_status(chan, cookie, txstate);
+
+       spin_lock_irqsave(&ch->vc.lock, flags);
+       vd = vchan_find_desc(&ch->vc, cookie);
+       if (vd) {
+               d = to_moxart_dma_desc(&vd->tx);
+               txstate->residue = moxart_dma_desc_size(d, 0);
+       } else if (ch->desc && ch->desc->vd.tx.cookie == cookie) {
+               txstate->residue = moxart_dma_desc_size_in_flight(ch);
+       }
+       spin_unlock_irqrestore(&ch->vc.lock, flags);
+
+       if (ch->error)
+               return DMA_ERROR;
+
+       return ret;
+}
+
+static void moxart_dma_init(struct dma_device *dma, struct device *dev)
+{
+       dma->device_prep_slave_sg               = moxart_prep_slave_sg;
+       dma->device_alloc_chan_resources        = moxart_alloc_chan_resources;
+       dma->device_free_chan_resources         = moxart_free_chan_resources;
+       dma->device_issue_pending               = moxart_issue_pending;
+       dma->device_tx_status                   = moxart_tx_status;
+       dma->device_control                     = moxart_control;
+       dma->dev                                = dev;
+
+       INIT_LIST_HEAD(&dma->channels);
+}
+
+static irqreturn_t moxart_dma_interrupt(int irq, void *devid)
+{
+       struct moxart_dmadev *mc = devid;
+       struct moxart_chan *ch = &mc->slave_chans[0];
+       unsigned int i;
+       unsigned long flags;
+       u32 ctrl;
+
+       dev_dbg(chan2dev(&ch->vc.chan), "%s\n", __func__);
+
+       for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, ch++) {
+               if (!ch->allocated)
+                       continue;
+
+               ctrl = readl(ch->base + REG_OFF_CTRL);
+
+               dev_dbg(chan2dev(&ch->vc.chan), "%s: ch=%p ch->base=%p ctrl=%x\n",
+                       __func__, ch, ch->base, ctrl);
+
+               if (ctrl & APB_DMA_FIN_INT_STS) {
+                       ctrl &= ~APB_DMA_FIN_INT_STS;
+                       if (ch->desc) {
+                               spin_lock_irqsave(&ch->vc.lock, flags);
+                               if (++ch->sgidx < ch->desc->sglen) {
+                                       moxart_dma_start_sg(ch, ch->sgidx);
+                               } else {
+                                       vchan_cookie_complete(&ch->desc->vd);
+                                       moxart_dma_start_desc(&ch->vc.chan);
+                               }
+                               spin_unlock_irqrestore(&ch->vc.lock, flags);
+                       }
+               }
+
+               if (ctrl & APB_DMA_ERR_INT_STS) {
+                       ctrl &= ~APB_DMA_ERR_INT_STS;
+                       ch->error = 1;
+               }
+
+               writel(ctrl, ch->base + REG_OFF_CTRL);
+       }
+
+       return IRQ_HANDLED;
+}
+
+static int moxart_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct device_node *node = dev->of_node;
+       struct resource *res;
+       static void __iomem *dma_base_addr;
+       int ret, i;
+       unsigned int irq;
+       struct moxart_chan *ch;
+       struct moxart_dmadev *mdc;
+
+       mdc = devm_kzalloc(dev, sizeof(*mdc), GFP_KERNEL);
+       if (!mdc) {
+               dev_err(dev, "can't allocate DMA container\n");
+               return -ENOMEM;
+       }
+
+       irq = irq_of_parse_and_map(node, 0);
+       if (irq == NO_IRQ) {
+               dev_err(dev, "no IRQ resource\n");
+               return -EINVAL;
+       }
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       dma_base_addr = devm_ioremap_resource(dev, res);
+       if (IS_ERR(dma_base_addr))
+               return PTR_ERR(dma_base_addr);
+
+       dma_cap_zero(mdc->dma_slave.cap_mask);
+       dma_cap_set(DMA_SLAVE, mdc->dma_slave.cap_mask);
+       dma_cap_set(DMA_PRIVATE, mdc->dma_slave.cap_mask);
+
+       moxart_dma_init(&mdc->dma_slave, dev);
+
+       ch = &mdc->slave_chans[0];
+       for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, ch++) {
+               ch->ch_num = i;
+               ch->base = dma_base_addr + i * REG_OFF_CHAN_SIZE;
+               ch->allocated = 0;
+
+               ch->vc.desc_free = moxart_dma_desc_free;
+               vchan_init(&ch->vc, &mdc->dma_slave);
+
+               dev_dbg(dev, "%s: chs[%d]: ch->ch_num=%u ch->base=%p\n",
+                       __func__, i, ch->ch_num, ch->base);
+       }
+
+       platform_set_drvdata(pdev, mdc);
+
+       ret = devm_request_irq(dev, irq, moxart_dma_interrupt, 0,
+                              "moxart-dma-engine", mdc);
+       if (ret) {
+               dev_err(dev, "devm_request_irq failed\n");
+               return ret;
+       }
+
+       ret = dma_async_device_register(&mdc->dma_slave);
+       if (ret) {
+               dev_err(dev, "dma_async_device_register failed\n");
+               return ret;
+       }
+
+       ret = of_dma_controller_register(node, moxart_of_xlate, mdc);
+       if (ret) {
+               dev_err(dev, "of_dma_controller_register failed\n");
+               dma_async_device_unregister(&mdc->dma_slave);
+               return ret;
+       }
+
+       dev_dbg(dev, "%s: IRQ=%u\n", __func__, irq);
+
+       return 0;
+}
+
+static int moxart_remove(struct platform_device *pdev)
+{
+       struct moxart_dmadev *m = platform_get_drvdata(pdev);
+
+       dma_async_device_unregister(&m->dma_slave);
+
+       if (pdev->dev.of_node)
+               of_dma_controller_free(pdev->dev.of_node);
+
+       return 0;
+}
+
+static const struct of_device_id moxart_dma_match[] = {
+       { .compatible = "moxa,moxart-dma" },
+       { }
+};
+
+static struct platform_driver moxart_driver = {
+       .probe  = moxart_probe,
+       .remove = moxart_remove,
+       .driver = {
+               .name           = "moxart-dma-engine",
+               .owner          = THIS_MODULE,
+               .of_match_table = moxart_dma_match,
+       },
+};
+
+static int moxart_init(void)
+{
+       return platform_driver_register(&moxart_driver);
+}
+subsys_initcall(moxart_init);
+
+static void __exit moxart_exit(void)
+{
+       platform_driver_unregister(&moxart_driver);
+}
+module_exit(moxart_exit);
+
+MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>");
+MODULE_DESCRIPTION("MOXART DMA engine driver");
+MODULE_LICENSE("GPL v2");
index 2f66cf4e54fe367754378c3c8be213fe20bb8f64..362e7c49f2e1ad9d264eef1acff1b3102f0e1212 100644 (file)
@@ -190,7 +190,7 @@ static int omap_dma_alloc_chan_resources(struct dma_chan *chan)
 {
        struct omap_chan *c = to_omap_dma_chan(chan);
 
-       dev_info(c->vc.chan.device->dev, "allocating channel for %u\n", c->dma_sig);
+       dev_dbg(c->vc.chan.device->dev, "allocating channel for %u\n", c->dma_sig);
 
        return omap_request_dma(c->dma_sig, "DMA engine",
                omap_dma_callback, c, &c->dma_ch);
@@ -203,7 +203,7 @@ static void omap_dma_free_chan_resources(struct dma_chan *chan)
        vchan_free_chan_resources(&c->vc);
        omap_free_dma(c->dma_ch);
 
-       dev_info(c->vc.chan.device->dev, "freeing channel for %u\n", c->dma_sig);
+       dev_dbg(c->vc.chan.device->dev, "freeing channel for %u\n", c->dma_sig);
 }
 
 static size_t omap_dma_sg_size(struct omap_sg *sg)
index c90edecee4633c3b7d9b76777534ab424a66ac7a..73fa9b7a10ab36b05dbc54c2850325c1e1a0b566 100644 (file)
@@ -543,7 +543,9 @@ struct dma_pl330_chan {
        /* DMA-Engine Channel */
        struct dma_chan chan;
 
-       /* List of to be xfered descriptors */
+       /* List of submitted descriptors */
+       struct list_head submitted_list;
+       /* List of issued descriptors */
        struct list_head work_list;
        /* List of completed descriptors */
        struct list_head completed_list;
@@ -578,12 +580,16 @@ struct dma_pl330_dmac {
        /* DMA-Engine Device */
        struct dma_device ddma;
 
+       /* Holds info about sg limitations */
+       struct device_dma_parameters dma_parms;
+
        /* Pool of descriptors available for the DMAC's channels */
        struct list_head desc_pool;
        /* To protect desc_pool manipulation */
        spinlock_t pool_lock;
 
        /* Peripheral channels connected to this DMAC */
+       unsigned int num_peripherals;
        struct dma_pl330_chan *peripherals; /* keep at end */
 };
 
@@ -606,11 +612,6 @@ struct dma_pl330_desc {
        struct dma_pl330_chan *pchan;
 };
 
-struct dma_pl330_filter_args {
-       struct dma_pl330_dmac *pdmac;
-       unsigned int chan_id;
-};
-
 static inline void _callback(struct pl330_req *r, enum pl330_op_err err)
 {
        if (r && r->xfer_cb)
@@ -2298,16 +2299,6 @@ static void dma_pl330_rqcb(void *token, enum pl330_op_err err)
        tasklet_schedule(&pch->task);
 }
 
-static bool pl330_dt_filter(struct dma_chan *chan, void *param)
-{
-       struct dma_pl330_filter_args *fargs = param;
-
-       if (chan->device != &fargs->pdmac->ddma)
-               return false;
-
-       return (chan->chan_id == fargs->chan_id);
-}
-
 bool pl330_filter(struct dma_chan *chan, void *param)
 {
        u8 *peri_id;
@@ -2325,23 +2316,16 @@ static struct dma_chan *of_dma_pl330_xlate(struct of_phandle_args *dma_spec,
 {
        int count = dma_spec->args_count;
        struct dma_pl330_dmac *pdmac = ofdma->of_dma_data;
-       struct dma_pl330_filter_args fargs;
-       dma_cap_mask_t cap;
-
-       if (!pdmac)
-               return NULL;
+       unsigned int chan_id;
 
        if (count != 1)
                return NULL;
 
-       fargs.pdmac = pdmac;
-       fargs.chan_id = dma_spec->args[0];
-
-       dma_cap_zero(cap);
-       dma_cap_set(DMA_SLAVE, cap);
-       dma_cap_set(DMA_CYCLIC, cap);
+       chan_id = dma_spec->args[0];
+       if (chan_id >= pdmac->num_peripherals)
+               return NULL;
 
-       return dma_request_channel(cap, pl330_dt_filter, &fargs);
+       return dma_get_slave_channel(&pdmac->peripherals[chan_id].chan);
 }
 
 static int pl330_alloc_chan_resources(struct dma_chan *chan)
@@ -2385,6 +2369,11 @@ static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned
                pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH);
 
                /* Mark all desc done */
+               list_for_each_entry(desc, &pch->submitted_list, node) {
+                       desc->status = FREE;
+                       dma_cookie_complete(&desc->txd);
+               }
+
                list_for_each_entry(desc, &pch->work_list , node) {
                        desc->status = FREE;
                        dma_cookie_complete(&desc->txd);
@@ -2395,6 +2384,7 @@ static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned
                        dma_cookie_complete(&desc->txd);
                }
 
+               list_splice_tail_init(&pch->submitted_list, &pdmac->desc_pool);
                list_splice_tail_init(&pch->work_list, &pdmac->desc_pool);
                list_splice_tail_init(&pch->completed_list, &pdmac->desc_pool);
                spin_unlock_irqrestore(&pch->lock, flags);
@@ -2453,7 +2443,14 @@ pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
 
 static void pl330_issue_pending(struct dma_chan *chan)
 {
-       pl330_tasklet((unsigned long) to_pchan(chan));
+       struct dma_pl330_chan *pch = to_pchan(chan);
+       unsigned long flags;
+
+       spin_lock_irqsave(&pch->lock, flags);
+       list_splice_tail_init(&pch->submitted_list, &pch->work_list);
+       spin_unlock_irqrestore(&pch->lock, flags);
+
+       pl330_tasklet((unsigned long)pch);
 }
 
 /*
@@ -2480,11 +2477,11 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
 
                dma_cookie_assign(&desc->txd);
 
-               list_move_tail(&desc->node, &pch->work_list);
+               list_move_tail(&desc->node, &pch->submitted_list);
        }
 
        cookie = dma_cookie_assign(&last->txd);
-       list_add_tail(&last->node, &pch->work_list);
+       list_add_tail(&last->node, &pch->submitted_list);
        spin_unlock_irqrestore(&pch->lock, flags);
 
        return cookie;
@@ -2960,6 +2957,8 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
        else
                num_chan = max_t(int, pi->pcfg.num_peri, pi->pcfg.num_chan);
 
+       pdmac->num_peripherals = num_chan;
+
        pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL);
        if (!pdmac->peripherals) {
                ret = -ENOMEM;
@@ -2974,6 +2973,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
                else
                        pch->chan.private = adev->dev.of_node;
 
+               INIT_LIST_HEAD(&pch->submitted_list);
                INIT_LIST_HEAD(&pch->work_list);
                INIT_LIST_HEAD(&pch->completed_list);
                spin_lock_init(&pch->lock);
@@ -3021,6 +3021,9 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
                        "unable to register DMA to the generic DT DMA helpers\n");
                }
        }
+
+       adev->dev.dma_parms = &pdmac->dma_parms;
+
        /*
         * This is the limit for transfers with a buswidth of 1, larger
         * buswidths will have larger limits.
index 8bba298535b0984e241793403af6920399d7366d..ce7a8d7564ba6f96649407a5840323eb92669350 100644 (file)
@@ -4114,6 +4114,7 @@ static int ppc440spe_adma_probe(struct platform_device *ofdev)
        regs = ioremap(res.start, resource_size(&res));
        if (!regs) {
                dev_err(&ofdev->dev, "failed to ioremap regs!\n");
+               ret = -ENOMEM;
                goto err_regs_alloc;
        }
 
index 6aec3ad814d37f16b69c51f44347d9826e411885..d4d3a3109b163f3c3a4a471cfdbc82c838024437 100644 (file)
@@ -640,6 +640,25 @@ bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id)
 }
 EXPORT_SYMBOL(sirfsoc_dma_filter_id);
 
+#define SIRFSOC_DMA_BUSWIDTHS \
+       (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
+       BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
+       BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
+       BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
+       BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
+
+static int sirfsoc_dma_device_slave_caps(struct dma_chan *dchan,
+       struct dma_slave_caps *caps)
+{
+       caps->src_addr_widths = SIRFSOC_DMA_BUSWIDTHS;
+       caps->dstn_addr_widths = SIRFSOC_DMA_BUSWIDTHS;
+       caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+       caps->cmd_pause = true;
+       caps->cmd_terminate = true;
+
+       return 0;
+}
+
 static int sirfsoc_dma_probe(struct platform_device *op)
 {
        struct device_node *dn = op->dev.of_node;
@@ -712,6 +731,7 @@ static int sirfsoc_dma_probe(struct platform_device *op)
        dma->device_tx_status = sirfsoc_dma_tx_status;
        dma->device_prep_interleaved_dma = sirfsoc_dma_prep_interleaved;
        dma->device_prep_dma_cyclic = sirfsoc_dma_prep_cyclic;
+       dma->device_slave_caps = sirfsoc_dma_device_slave_caps;
 
        INIT_LIST_HEAD(&dma->channels);
        dma_cap_set(DMA_SLAVE, dma->cap_mask);
index d11bb3620f2783115b7a91058a297dfbf657033d..03ad64ecaaf043a4325dd6d7d325e676672a16b4 100644 (file)
 #define TEGRA_APBDMA_APBSEQ_DATA_SWAP          BIT(27)
 #define TEGRA_APBDMA_APBSEQ_WRAP_WORD_1                (1 << 16)
 
+/* Tegra148 specific registers */
+#define TEGRA_APBDMA_CHAN_WCOUNT               0x20
+
+#define TEGRA_APBDMA_CHAN_WORD_TRANSFER                0x24
+
 /*
  * If any burst is in flight and DMA paused then this is the time to complete
  * on-flight burst and update DMA status register.
 /* Channel base address offset from APBDMA base address */
 #define TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET   0x1000
 
-/* DMA channel register space size */
-#define TEGRA_APBDMA_CHANNEL_REGISTER_SIZE     0x20
-
 struct tegra_dma;
 
 /*
  * tegra_dma_chip_data Tegra chip specific DMA data
  * @nr_channels: Number of channels available in the controller.
+ * @channel_reg_size: Channel register size/stride.
  * @max_dma_count: Maximum DMA transfer count supported by DMA controller.
  * @support_channel_pause: Support channel wise pause of dma.
+ * @support_separate_wcount_reg: Support separate word count register.
  */
 struct tegra_dma_chip_data {
        int nr_channels;
+       int channel_reg_size;
        int max_dma_count;
        bool support_channel_pause;
+       bool support_separate_wcount_reg;
 };
 
 /* DMA channel registers */
@@ -133,6 +139,7 @@ struct tegra_dma_channel_regs {
        unsigned long   apb_ptr;
        unsigned long   ahb_seq;
        unsigned long   apb_seq;
+       unsigned long   wcount;
 };
 
 /*
@@ -426,6 +433,8 @@ static void tegra_dma_start(struct tegra_dma_channel *tdc,
        tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, ch_regs->apb_ptr);
        tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBSEQ, ch_regs->ahb_seq);
        tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, ch_regs->ahb_ptr);
+       if (tdc->tdma->chip_data->support_separate_wcount_reg)
+               tdc_write(tdc, TEGRA_APBDMA_CHAN_WCOUNT, ch_regs->wcount);
 
        /* Start DMA */
        tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
@@ -465,6 +474,9 @@ static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc,
        /* Safe to program new configuration */
        tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, nsg_req->ch_regs.apb_ptr);
        tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, nsg_req->ch_regs.ahb_ptr);
+       if (tdc->tdma->chip_data->support_separate_wcount_reg)
+               tdc_write(tdc, TEGRA_APBDMA_CHAN_WCOUNT,
+                                               nsg_req->ch_regs.wcount);
        tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
                                nsg_req->ch_regs.csr | TEGRA_APBDMA_CSR_ENB);
        nsg_req->configured = true;
@@ -718,6 +730,7 @@ static void tegra_dma_terminate_all(struct dma_chan *dc)
        struct tegra_dma_desc *dma_desc;
        unsigned long flags;
        unsigned long status;
+       unsigned long wcount;
        bool was_busy;
 
        spin_lock_irqsave(&tdc->lock, flags);
@@ -738,6 +751,10 @@ static void tegra_dma_terminate_all(struct dma_chan *dc)
                tdc->isr_handler(tdc, true);
                status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
        }
+       if (tdc->tdma->chip_data->support_separate_wcount_reg)
+               wcount = tdc_read(tdc, TEGRA_APBDMA_CHAN_WORD_TRANSFER);
+       else
+               wcount = status;
 
        was_busy = tdc->busy;
        tegra_dma_stop(tdc);
@@ -746,7 +763,7 @@ static void tegra_dma_terminate_all(struct dma_chan *dc)
                sgreq = list_first_entry(&tdc->pending_sg_req,
                                        typeof(*sgreq), node);
                sgreq->dma_desc->bytes_transferred +=
-                               get_current_xferred_count(tdc, sgreq, status);
+                               get_current_xferred_count(tdc, sgreq, wcount);
        }
        tegra_dma_resume(tdc);
 
@@ -908,6 +925,17 @@ static int get_transfer_param(struct tegra_dma_channel *tdc,
        return -EINVAL;
 }
 
+static void tegra_dma_prep_wcount(struct tegra_dma_channel *tdc,
+       struct tegra_dma_channel_regs *ch_regs, u32 len)
+{
+       u32 len_field = (len - 4) & 0xFFFC;
+
+       if (tdc->tdma->chip_data->support_separate_wcount_reg)
+               ch_regs->wcount = len_field;
+       else
+               ch_regs->csr |= len_field;
+}
+
 static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
        struct dma_chan *dc, struct scatterlist *sgl, unsigned int sg_len,
        enum dma_transfer_direction direction, unsigned long flags,
@@ -991,7 +1019,8 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
 
                sg_req->ch_regs.apb_ptr = apb_ptr;
                sg_req->ch_regs.ahb_ptr = mem;
-               sg_req->ch_regs.csr = csr | ((len - 4) & 0xFFFC);
+               sg_req->ch_regs.csr = csr;
+               tegra_dma_prep_wcount(tdc, &sg_req->ch_regs, len);
                sg_req->ch_regs.apb_seq = apb_seq;
                sg_req->ch_regs.ahb_seq = ahb_seq;
                sg_req->configured = false;
@@ -1120,7 +1149,8 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
                ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len);
                sg_req->ch_regs.apb_ptr = apb_ptr;
                sg_req->ch_regs.ahb_ptr = mem;
-               sg_req->ch_regs.csr = csr | ((len - 4) & 0xFFFC);
+               sg_req->ch_regs.csr = csr;
+               tegra_dma_prep_wcount(tdc, &sg_req->ch_regs, len);
                sg_req->ch_regs.apb_seq = apb_seq;
                sg_req->ch_regs.ahb_seq = ahb_seq;
                sg_req->configured = false;
@@ -1234,27 +1264,45 @@ static struct dma_chan *tegra_dma_of_xlate(struct of_phandle_args *dma_spec,
 /* Tegra20 specific DMA controller information */
 static const struct tegra_dma_chip_data tegra20_dma_chip_data = {
        .nr_channels            = 16,
+       .channel_reg_size       = 0x20,
        .max_dma_count          = 1024UL * 64,
        .support_channel_pause  = false,
+       .support_separate_wcount_reg = false,
 };
 
 /* Tegra30 specific DMA controller information */
 static const struct tegra_dma_chip_data tegra30_dma_chip_data = {
        .nr_channels            = 32,
+       .channel_reg_size       = 0x20,
        .max_dma_count          = 1024UL * 64,
        .support_channel_pause  = false,
+       .support_separate_wcount_reg = false,
 };
 
 /* Tegra114 specific DMA controller information */
 static const struct tegra_dma_chip_data tegra114_dma_chip_data = {
        .nr_channels            = 32,
+       .channel_reg_size       = 0x20,
        .max_dma_count          = 1024UL * 64,
        .support_channel_pause  = true,
+       .support_separate_wcount_reg = false,
+};
+
+/* Tegra148 specific DMA controller information */
+static const struct tegra_dma_chip_data tegra148_dma_chip_data = {
+       .nr_channels            = 32,
+       .channel_reg_size       = 0x40,
+       .max_dma_count          = 1024UL * 64,
+       .support_channel_pause  = true,
+       .support_separate_wcount_reg = true,
 };
 
 
 static const struct of_device_id tegra_dma_of_match[] = {
        {
+               .compatible = "nvidia,tegra148-apbdma",
+               .data = &tegra148_dma_chip_data,
+       }, {
                .compatible = "nvidia,tegra114-apbdma",
                .data = &tegra114_dma_chip_data,
        }, {
@@ -1348,7 +1396,7 @@ static int tegra_dma_probe(struct platform_device *pdev)
                struct tegra_dma_channel *tdc = &tdma->channels[i];
 
                tdc->chan_base_offset = TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET +
-                                       i * TEGRA_APBDMA_CHANNEL_REGISTER_SIZE;
+                                       i * cdata->channel_reg_size;
 
                res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
                if (!res) {
index 85c19d63f9fbe9b6392b73238a209d6f55411215..181b95267866b605f521860f973aa3860d694fa0 100644 (file)
@@ -84,10 +84,12 @@ static inline bool vchan_issue_pending(struct virt_dma_chan *vc)
 static inline void vchan_cookie_complete(struct virt_dma_desc *vd)
 {
        struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
+       dma_cookie_t cookie;
 
+       cookie = vd->tx.cookie;
        dma_cookie_complete(&vd->tx);
        dev_vdbg(vc->chan.device->dev, "txd %p[%x]: marked complete\n",
-               vd, vd->tx.cookie);
+                vd, cookie);
        list_add_tail(&vd->node, &vc->desc_completed);
 
        tasklet_schedule(&vc->task);
index f8642759116770afa976a23f12f14f1f42149be4..8e7fa4dbaed867ca45cb9c885db53543a65a74b9 100644 (file)
@@ -20,6 +20,10 @@ menuconfig DRM
          details.  You should also select and configure AGP
          (/dev/agpgart) support if it is available for your platform.
 
+config DRM_MIPI_DSI
+       bool
+       depends on DRM
+
 config DRM_USB
        tristate
        depends on DRM
@@ -188,6 +192,10 @@ source "drivers/gpu/drm/tilcdc/Kconfig"
 
 source "drivers/gpu/drm/qxl/Kconfig"
 
+source "drivers/gpu/drm/bochs/Kconfig"
+
 source "drivers/gpu/drm/msm/Kconfig"
 
 source "drivers/gpu/drm/tegra/Kconfig"
+
+source "drivers/gpu/drm/panel/Kconfig"
index cc08b845f9655a6b8bb516dab365da2d4c829198..292a79d64146274428ad1ef251d08e1d78d46bf5 100644 (file)
@@ -18,6 +18,7 @@ drm-y       :=        drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
 drm-$(CONFIG_COMPAT) += drm_ioc32.o
 drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
 drm-$(CONFIG_PCI) += ati_pcigart.o
+drm-$(CONFIG_DRM_PANEL) += drm_panel.o
 
 drm-usb-y   := drm_usb.o
 
@@ -31,6 +32,7 @@ obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
 CFLAGS_drm_trace_points.o := -I$(src)
 
 obj-$(CONFIG_DRM)      += drm.o
+obj-$(CONFIG_DRM_MIPI_DSI) += drm_mipi_dsi.o
 obj-$(CONFIG_DRM_USB)   += drm_usb.o
 obj-$(CONFIG_DRM_TTM)  += ttm/
 obj-$(CONFIG_DRM_TDFX) += tdfx/
@@ -56,6 +58,8 @@ obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/
 obj-$(CONFIG_DRM_OMAP) += omapdrm/
 obj-$(CONFIG_DRM_TILCDC)       += tilcdc/
 obj-$(CONFIG_DRM_QXL) += qxl/
+obj-$(CONFIG_DRM_BOCHS) += bochs/
 obj-$(CONFIG_DRM_MSM) += msm/
 obj-$(CONFIG_DRM_TEGRA) += tegra/
 obj-y                  += i2c/
+obj-y                  += panel/
index 40d371521fe19fea47b4a59b0a4fe1db2054aa54..50ae88ad4d76fb85b863adfd6129ea7a75df2178 100644 (file)
@@ -5,6 +5,7 @@ config DRM_ARMADA
        select FB_CFB_COPYAREA
        select FB_CFB_IMAGEBLIT
        select DRM_KMS_HELPER
+       select DRM_KMS_FB_HELPER
        help
          Support the "LCD" controllers found on the Marvell Armada 510
          devices.  There are two controllers on the device, each controller
index 62d0ff3efddf9bb5a08e1b209b5a40c6b8148e47..acf3a36c9ebc453737b1a849aa6715f41b241d68 100644 (file)
@@ -128,6 +128,7 @@ static int armada_drm_load(struct drm_device *dev, unsigned long flags)
                return -ENOMEM;
        }
 
+       platform_set_drvdata(dev->platformdev, dev);
        dev->dev_private = priv;
 
        /* Get the implementation specific driver data. */
@@ -381,7 +382,7 @@ static int armada_drm_probe(struct platform_device *pdev)
 
 static int armada_drm_remove(struct platform_device *pdev)
 {
-       drm_platform_exit(&armada_drm_driver, pdev);
+       drm_put_dev(platform_get_drvdata(pdev));
        return 0;
 }
 
index 7b33e14e44aa17a8026d4c1c33b8e2dbc529f93f..3f65dd6676b2c1354a4faef7d723f171ad840c67 100644 (file)
@@ -65,7 +65,7 @@ static void ast_dirty_update(struct ast_fbdev *afbdev,
         * then the BO is being moved and we should
         * store up the damage until later.
         */
-       if (!in_interrupt())
+       if (!drm_can_sleep())
                ret = ast_bo_reserve(bo, true);
        if (ret) {
                if (ret != -EBUSY)
index af0b868a9dfd6c5b3849f147f01fbf2cc29c4471..50535fd5a88d258492b90ca9e9e9cebfaf2de563 100644 (file)
@@ -189,53 +189,6 @@ static int ast_get_dram_info(struct drm_device *dev)
        return 0;
 }
 
-uint32_t ast_get_max_dclk(struct drm_device *dev, int bpp)
-{
-       struct ast_private *ast = dev->dev_private;
-       uint32_t dclk, jreg;
-       uint32_t dram_bus_width, mclk, dram_bandwidth, actual_dram_bandwidth, dram_efficency = 500;
-
-       dram_bus_width = ast->dram_bus_width;
-       mclk = ast->mclk;
-
-       if (ast->chip == AST2100 ||
-           ast->chip == AST1100 ||
-           ast->chip == AST2200 ||
-           ast->chip == AST2150 ||
-           ast->dram_bus_width == 16)
-               dram_efficency = 600;
-       else if (ast->chip == AST2300)
-               dram_efficency = 400;
-
-       dram_bandwidth = mclk * dram_bus_width * 2 / 8;
-       actual_dram_bandwidth = dram_bandwidth * dram_efficency / 1000;
-
-       if (ast->chip == AST1180)
-               dclk = actual_dram_bandwidth / ((bpp + 1) / 8);
-       else {
-               jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
-               if ((jreg & 0x08) && (ast->chip == AST2000))
-                       dclk = actual_dram_bandwidth / ((bpp + 1 + 16) / 8);
-               else if ((jreg & 0x08) && (bpp == 8))
-                       dclk = actual_dram_bandwidth / ((bpp + 1 + 24) / 8);
-               else
-                       dclk = actual_dram_bandwidth / ((bpp + 1) / 8);
-       }
-
-       if (ast->chip == AST2100 ||
-           ast->chip == AST2200 ||
-           ast->chip == AST2300 ||
-           ast->chip == AST1180) {
-               if (dclk > 200)
-                       dclk = 200;
-       } else {
-               if (dclk > 165)
-                       dclk = 165;
-       }
-
-       return dclk;
-}
-
 static void ast_user_framebuffer_destroy(struct drm_framebuffer *fb)
 {
        struct ast_framebuffer *ast_fb = to_ast_framebuffer(fb);
@@ -449,7 +402,7 @@ int ast_dumb_create(struct drm_file *file,
        return 0;
 }
 
-void ast_bo_unref(struct ast_bo **bo)
+static void ast_bo_unref(struct ast_bo **bo)
 {
        struct ttm_buffer_object *tbo;
 
index 7fc9f7272b56e7e9ebe846b650e9e2f18f28cc5f..cca063b110831aa4d4fc2f46afd6e9cc35ac30b9 100644 (file)
@@ -404,7 +404,7 @@ static void ast_set_ext_reg(struct drm_crtc *crtc, struct drm_display_mode *mode
        }
 }
 
-void ast_set_sync_reg(struct drm_device *dev, struct drm_display_mode *mode,
+static void ast_set_sync_reg(struct drm_device *dev, struct drm_display_mode *mode,
                      struct ast_vbios_mode_info *vbios_mode)
 {
        struct ast_private *ast = dev->dev_private;
@@ -415,7 +415,7 @@ void ast_set_sync_reg(struct drm_device *dev, struct drm_display_mode *mode,
        ast_io_write8(ast, AST_IO_MISC_PORT_WRITE, jreg);
 }
 
-bool ast_set_dac_reg(struct drm_crtc *crtc, struct drm_display_mode *mode,
+static bool ast_set_dac_reg(struct drm_crtc *crtc, struct drm_display_mode *mode,
                     struct ast_vbios_mode_info *vbios_mode)
 {
        switch (crtc->fb->bits_per_pixel) {
@@ -427,7 +427,7 @@ bool ast_set_dac_reg(struct drm_crtc *crtc, struct drm_display_mode *mode,
        return true;
 }
 
-void ast_set_start_address_crt1(struct drm_crtc *crtc, unsigned offset)
+static void ast_set_start_address_crt1(struct drm_crtc *crtc, unsigned offset)
 {
        struct ast_private *ast = crtc->dev->dev_private;
        u32 addr;
@@ -623,7 +623,7 @@ static const struct drm_crtc_funcs ast_crtc_funcs = {
        .destroy = ast_crtc_destroy,
 };
 
-int ast_crtc_init(struct drm_device *dev)
+static int ast_crtc_init(struct drm_device *dev)
 {
        struct ast_crtc *crtc;
        int i;
@@ -710,7 +710,7 @@ static const struct drm_encoder_helper_funcs ast_enc_helper_funcs = {
        .mode_set = ast_encoder_mode_set,
 };
 
-int ast_encoder_init(struct drm_device *dev)
+static int ast_encoder_init(struct drm_device *dev)
 {
        struct ast_encoder *ast_encoder;
 
@@ -777,7 +777,7 @@ static const struct drm_connector_funcs ast_connector_funcs = {
        .destroy = ast_connector_destroy,
 };
 
-int ast_connector_init(struct drm_device *dev)
+static int ast_connector_init(struct drm_device *dev)
 {
        struct ast_connector *ast_connector;
        struct drm_connector *connector;
@@ -810,7 +810,7 @@ int ast_connector_init(struct drm_device *dev)
 }
 
 /* allocate cursor cache and pin at start of VRAM */
-int ast_cursor_init(struct drm_device *dev)
+static int ast_cursor_init(struct drm_device *dev)
 {
        struct ast_private *ast = dev->dev_private;
        int size;
@@ -847,7 +847,7 @@ fail:
        return ret;
 }
 
-void ast_cursor_fini(struct drm_device *dev)
+static void ast_cursor_fini(struct drm_device *dev)
 {
        struct ast_private *ast = dev->dev_private;
        ttm_bo_kunmap(&ast->cache_kmap);
@@ -965,7 +965,7 @@ static void ast_i2c_destroy(struct ast_i2c_chan *i2c)
        kfree(i2c);
 }
 
-void ast_show_cursor(struct drm_crtc *crtc)
+static void ast_show_cursor(struct drm_crtc *crtc)
 {
        struct ast_private *ast = crtc->dev->dev_private;
        u8 jreg;
@@ -976,7 +976,7 @@ void ast_show_cursor(struct drm_crtc *crtc)
        ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, 0xfc, jreg);
 }
 
-void ast_hide_cursor(struct drm_crtc *crtc)
+static void ast_hide_cursor(struct drm_crtc *crtc)
 {
        struct ast_private *ast = crtc->dev->dev_private;
        ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, 0xfc, 0x00);
index 32aecb34dbced78308a393833e177fd46a1e5c9b..4ea9b17ac17a9c5459617898718f1145eaf232ba 100644 (file)
@@ -80,7 +80,7 @@ static int ast_ttm_global_init(struct ast_private *ast)
        return 0;
 }
 
-void
+static void
 ast_ttm_global_release(struct ast_private *ast)
 {
        if (ast->ttm.mem_global_ref.release == NULL)
@@ -102,7 +102,7 @@ static void ast_bo_ttm_destroy(struct ttm_buffer_object *tbo)
        kfree(bo);
 }
 
-bool ast_ttm_bo_is_ast_bo(struct ttm_buffer_object *bo)
+static bool ast_ttm_bo_is_ast_bo(struct ttm_buffer_object *bo)
 {
        if (bo->destroy == &ast_bo_ttm_destroy)
                return true;
@@ -208,7 +208,7 @@ static struct ttm_backend_func ast_tt_backend_func = {
 };
 
 
-struct ttm_tt *ast_ttm_tt_create(struct ttm_bo_device *bdev,
+static struct ttm_tt *ast_ttm_tt_create(struct ttm_bo_device *bdev,
                                 unsigned long size, uint32_t page_flags,
                                 struct page *dummy_read_page)
 {
diff --git a/drivers/gpu/drm/bochs/Kconfig b/drivers/gpu/drm/bochs/Kconfig
new file mode 100644 (file)
index 0000000..c8fcf12
--- /dev/null
@@ -0,0 +1,11 @@
+config DRM_BOCHS
+       tristate "DRM Support for bochs dispi vga interface (qemu stdvga)"
+       depends on DRM && PCI
+       select DRM_KMS_HELPER
+       select FB_SYS_FILLRECT
+       select FB_SYS_COPYAREA
+       select FB_SYS_IMAGEBLIT
+       select DRM_TTM
+       help
+         Choose this option for qemu.
+         If M is selected the module will be called bochs-drm.
diff --git a/drivers/gpu/drm/bochs/Makefile b/drivers/gpu/drm/bochs/Makefile
new file mode 100644 (file)
index 0000000..844a556
--- /dev/null
@@ -0,0 +1,4 @@
+ccflags-y := -Iinclude/drm
+bochs-drm-y := bochs_drv.o bochs_mm.o bochs_kms.o bochs_fbdev.o bochs_hw.o
+
+obj-$(CONFIG_DRM_BOCHS)        += bochs-drm.o
diff --git a/drivers/gpu/drm/bochs/bochs.h b/drivers/gpu/drm/bochs/bochs.h
new file mode 100644 (file)
index 0000000..741965c
--- /dev/null
@@ -0,0 +1,164 @@
+#include <linux/io.h>
+#include <linux/fb.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+
+#include <ttm/ttm_bo_driver.h>
+#include <ttm/ttm_page_alloc.h>
+
+/* ---------------------------------------------------------------------- */
+
+#define VBE_DISPI_IOPORT_INDEX           0x01CE
+#define VBE_DISPI_IOPORT_DATA            0x01CF
+
+#define VBE_DISPI_INDEX_ID               0x0
+#define VBE_DISPI_INDEX_XRES             0x1
+#define VBE_DISPI_INDEX_YRES             0x2
+#define VBE_DISPI_INDEX_BPP              0x3
+#define VBE_DISPI_INDEX_ENABLE           0x4
+#define VBE_DISPI_INDEX_BANK             0x5
+#define VBE_DISPI_INDEX_VIRT_WIDTH       0x6
+#define VBE_DISPI_INDEX_VIRT_HEIGHT      0x7
+#define VBE_DISPI_INDEX_X_OFFSET         0x8
+#define VBE_DISPI_INDEX_Y_OFFSET         0x9
+#define VBE_DISPI_INDEX_VIDEO_MEMORY_64K 0xa
+
+#define VBE_DISPI_ID0                    0xB0C0
+#define VBE_DISPI_ID1                    0xB0C1
+#define VBE_DISPI_ID2                    0xB0C2
+#define VBE_DISPI_ID3                    0xB0C3
+#define VBE_DISPI_ID4                    0xB0C4
+#define VBE_DISPI_ID5                    0xB0C5
+
+#define VBE_DISPI_DISABLED               0x00
+#define VBE_DISPI_ENABLED                0x01
+#define VBE_DISPI_GETCAPS                0x02
+#define VBE_DISPI_8BIT_DAC               0x20
+#define VBE_DISPI_LFB_ENABLED            0x40
+#define VBE_DISPI_NOCLEARMEM             0x80
+
+/* ---------------------------------------------------------------------- */
+
+enum bochs_types {
+       BOCHS_QEMU_STDVGA,
+       BOCHS_UNKNOWN,
+};
+
+struct bochs_framebuffer {
+       struct drm_framebuffer base;
+       struct drm_gem_object *obj;
+};
+
+struct bochs_device {
+       /* hw */
+       void __iomem   *mmio;
+       int            ioports;
+       void __iomem   *fb_map;
+       unsigned long  fb_base;
+       unsigned long  fb_size;
+
+       /* mode */
+       u16 xres;
+       u16 yres;
+       u16 yres_virtual;
+       u32 stride;
+       u32 bpp;
+
+       /* drm */
+       struct drm_device  *dev;
+       struct drm_crtc crtc;
+       struct drm_encoder encoder;
+       struct drm_connector connector;
+       bool mode_config_initialized;
+
+       /* ttm */
+       struct {
+               struct drm_global_reference mem_global_ref;
+               struct ttm_bo_global_ref bo_global_ref;
+               struct ttm_bo_device bdev;
+               bool initialized;
+       } ttm;
+
+       /* fbdev */
+       struct {
+               struct bochs_framebuffer gfb;
+               struct drm_fb_helper helper;
+               int size;
+               int x1, y1, x2, y2; /* dirty rect */
+               spinlock_t dirty_lock;
+               bool initialized;
+       } fb;
+};
+
+#define to_bochs_framebuffer(x) container_of(x, struct bochs_framebuffer, base)
+
+struct bochs_bo {
+       struct ttm_buffer_object bo;
+       struct ttm_placement placement;
+       struct ttm_bo_kmap_obj kmap;
+       struct drm_gem_object gem;
+       u32 placements[3];
+       int pin_count;
+};
+
+static inline struct bochs_bo *bochs_bo(struct ttm_buffer_object *bo)
+{
+       return container_of(bo, struct bochs_bo, bo);
+}
+
+static inline struct bochs_bo *gem_to_bochs_bo(struct drm_gem_object *gem)
+{
+       return container_of(gem, struct bochs_bo, gem);
+}
+
+#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
+
+static inline u64 bochs_bo_mmap_offset(struct bochs_bo *bo)
+{
+       return drm_vma_node_offset_addr(&bo->bo.vma_node);
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* bochs_hw.c */
+int bochs_hw_init(struct drm_device *dev, uint32_t flags);
+void bochs_hw_fini(struct drm_device *dev);
+
+void bochs_hw_setmode(struct bochs_device *bochs,
+                     struct drm_display_mode *mode);
+void bochs_hw_setbase(struct bochs_device *bochs,
+                     int x, int y, u64 addr);
+
+/* bochs_mm.c */
+int bochs_mm_init(struct bochs_device *bochs);
+void bochs_mm_fini(struct bochs_device *bochs);
+int bochs_mmap(struct file *filp, struct vm_area_struct *vma);
+
+int bochs_gem_create(struct drm_device *dev, u32 size, bool iskernel,
+                    struct drm_gem_object **obj);
+int bochs_gem_init_object(struct drm_gem_object *obj);
+void bochs_gem_free_object(struct drm_gem_object *obj);
+int bochs_dumb_create(struct drm_file *file, struct drm_device *dev,
+                     struct drm_mode_create_dumb *args);
+int bochs_dumb_mmap_offset(struct drm_file *file, struct drm_device *dev,
+                          uint32_t handle, uint64_t *offset);
+
+int bochs_framebuffer_init(struct drm_device *dev,
+                          struct bochs_framebuffer *gfb,
+                          struct drm_mode_fb_cmd2 *mode_cmd,
+                          struct drm_gem_object *obj);
+int bochs_bo_pin(struct bochs_bo *bo, u32 pl_flag, u64 *gpu_addr);
+int bochs_bo_unpin(struct bochs_bo *bo);
+
+extern const struct drm_mode_config_funcs bochs_mode_funcs;
+
+/* bochs_kms.c */
+int bochs_kms_init(struct bochs_device *bochs);
+void bochs_kms_fini(struct bochs_device *bochs);
+
+/* bochs_fbdev.c */
+int bochs_fbdev_init(struct bochs_device *bochs);
+void bochs_fbdev_fini(struct bochs_device *bochs);
diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c
new file mode 100644 (file)
index 0000000..395bba2
--- /dev/null
@@ -0,0 +1,178 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include "bochs.h"
+
+static bool enable_fbdev = true;
+module_param_named(fbdev, enable_fbdev, bool, 0444);
+MODULE_PARM_DESC(fbdev, "register fbdev device");
+
+/* ---------------------------------------------------------------------- */
+/* drm interface                                                          */
+
+static int bochs_unload(struct drm_device *dev)
+{
+       struct bochs_device *bochs = dev->dev_private;
+
+       bochs_fbdev_fini(bochs);
+       bochs_kms_fini(bochs);
+       bochs_mm_fini(bochs);
+       bochs_hw_fini(dev);
+       kfree(bochs);
+       dev->dev_private = NULL;
+       return 0;
+}
+
+static int bochs_load(struct drm_device *dev, unsigned long flags)
+{
+       struct bochs_device *bochs;
+       int ret;
+
+       bochs = kzalloc(sizeof(*bochs), GFP_KERNEL);
+       if (bochs == NULL)
+               return -ENOMEM;
+       dev->dev_private = bochs;
+       bochs->dev = dev;
+
+       ret = bochs_hw_init(dev, flags);
+       if (ret)
+               goto err;
+
+       ret = bochs_mm_init(bochs);
+       if (ret)
+               goto err;
+
+       ret = bochs_kms_init(bochs);
+       if (ret)
+               goto err;
+
+       if (enable_fbdev)
+               bochs_fbdev_init(bochs);
+
+       return 0;
+
+err:
+       bochs_unload(dev);
+       return ret;
+}
+
+static const struct file_operations bochs_fops = {
+       .owner          = THIS_MODULE,
+       .open           = drm_open,
+       .release        = drm_release,
+       .unlocked_ioctl = drm_ioctl,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl   = drm_compat_ioctl,
+#endif
+       .poll           = drm_poll,
+       .read           = drm_read,
+       .llseek         = no_llseek,
+       .mmap           = bochs_mmap,
+};
+
+static struct drm_driver bochs_driver = {
+       .driver_features        = DRIVER_GEM | DRIVER_MODESET,
+       .load                   = bochs_load,
+       .unload                 = bochs_unload,
+       .fops                   = &bochs_fops,
+       .name                   = "bochs-drm",
+       .desc                   = "bochs dispi vga interface (qemu stdvga)",
+       .date                   = "20130925",
+       .major                  = 1,
+       .minor                  = 0,
+       .gem_free_object        = bochs_gem_free_object,
+       .dumb_create            = bochs_dumb_create,
+       .dumb_map_offset        = bochs_dumb_mmap_offset,
+       .dumb_destroy           = drm_gem_dumb_destroy,
+};
+
+/* ---------------------------------------------------------------------- */
+/* pci interface                                                          */
+
+static int bochs_kick_out_firmware_fb(struct pci_dev *pdev)
+{
+       struct apertures_struct *ap;
+
+       ap = alloc_apertures(1);
+       if (!ap)
+               return -ENOMEM;
+
+       ap->ranges[0].base = pci_resource_start(pdev, 0);
+       ap->ranges[0].size = pci_resource_len(pdev, 0);
+       remove_conflicting_framebuffers(ap, "bochsdrmfb", false);
+       kfree(ap);
+
+       return 0;
+}
+
+static int bochs_pci_probe(struct pci_dev *pdev,
+                          const struct pci_device_id *ent)
+{
+       int ret;
+
+       ret = bochs_kick_out_firmware_fb(pdev);
+       if (ret)
+               return ret;
+
+       return drm_get_pci_dev(pdev, ent, &bochs_driver);
+}
+
+static void bochs_pci_remove(struct pci_dev *pdev)
+{
+       struct drm_device *dev = pci_get_drvdata(pdev);
+
+       drm_put_dev(dev);
+}
+
+static DEFINE_PCI_DEVICE_TABLE(bochs_pci_tbl) = {
+       {
+               .vendor      = 0x1234,
+               .device      = 0x1111,
+               .subvendor   = 0x1af4,
+               .subdevice   = 0x1100,
+               .driver_data = BOCHS_QEMU_STDVGA,
+       },
+       {
+               .vendor      = 0x1234,
+               .device      = 0x1111,
+               .subvendor   = PCI_ANY_ID,
+               .subdevice   = PCI_ANY_ID,
+               .driver_data = BOCHS_UNKNOWN,
+       },
+       { /* end of list */ }
+};
+
+static struct pci_driver bochs_pci_driver = {
+       .name =         "bochs-drm",
+       .id_table =     bochs_pci_tbl,
+       .probe =        bochs_pci_probe,
+       .remove =       bochs_pci_remove,
+};
+
+/* ---------------------------------------------------------------------- */
+/* module init/exit                                                       */
+
+static int __init bochs_init(void)
+{
+       return drm_pci_init(&bochs_driver, &bochs_pci_driver);
+}
+
+static void __exit bochs_exit(void)
+{
+       drm_pci_exit(&bochs_driver, &bochs_pci_driver);
+}
+
+module_init(bochs_init);
+module_exit(bochs_exit);
+
+MODULE_DEVICE_TABLE(pci, bochs_pci_tbl);
+MODULE_AUTHOR("Gerd Hoffmann <kraxel@redhat.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/bochs/bochs_fbdev.c b/drivers/gpu/drm/bochs/bochs_fbdev.c
new file mode 100644 (file)
index 0000000..4da5206
--- /dev/null
@@ -0,0 +1,215 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include "bochs.h"
+
+/* ---------------------------------------------------------------------- */
+
+static struct fb_ops bochsfb_ops = {
+       .owner = THIS_MODULE,
+       .fb_check_var = drm_fb_helper_check_var,
+       .fb_set_par = drm_fb_helper_set_par,
+       .fb_fillrect = sys_fillrect,
+       .fb_copyarea = sys_copyarea,
+       .fb_imageblit = sys_imageblit,
+       .fb_pan_display = drm_fb_helper_pan_display,
+       .fb_blank = drm_fb_helper_blank,
+       .fb_setcmap = drm_fb_helper_setcmap,
+};
+
+static int bochsfb_create_object(struct bochs_device *bochs,
+                                struct drm_mode_fb_cmd2 *mode_cmd,
+                                struct drm_gem_object **gobj_p)
+{
+       struct drm_device *dev = bochs->dev;
+       struct drm_gem_object *gobj;
+       u32 size;
+       int ret = 0;
+
+       size = mode_cmd->pitches[0] * mode_cmd->height;
+       ret = bochs_gem_create(dev, size, true, &gobj);
+       if (ret)
+               return ret;
+
+       *gobj_p = gobj;
+       return ret;
+}
+
+static int bochsfb_create(struct drm_fb_helper *helper,
+                         struct drm_fb_helper_surface_size *sizes)
+{
+       struct bochs_device *bochs =
+               container_of(helper, struct bochs_device, fb.helper);
+       struct drm_device *dev = bochs->dev;
+       struct fb_info *info;
+       struct drm_framebuffer *fb;
+       struct drm_mode_fb_cmd2 mode_cmd;
+       struct device *device = &dev->pdev->dev;
+       struct drm_gem_object *gobj = NULL;
+       struct bochs_bo *bo = NULL;
+       int size, ret;
+
+       if (sizes->surface_bpp != 32)
+               return -EINVAL;
+
+       mode_cmd.width = sizes->surface_width;
+       mode_cmd.height = sizes->surface_height;
+       mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7) / 8);
+       mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+                                                         sizes->surface_depth);
+       size = mode_cmd.pitches[0] * mode_cmd.height;
+
+       /* alloc, pin & map bo */
+       ret = bochsfb_create_object(bochs, &mode_cmd, &gobj);
+       if (ret) {
+               DRM_ERROR("failed to create fbcon backing object %d\n", ret);
+               return ret;
+       }
+
+       bo = gem_to_bochs_bo(gobj);
+
+       ret = ttm_bo_reserve(&bo->bo, true, false, false, 0);
+       if (ret)
+               return ret;
+
+       ret = bochs_bo_pin(bo, TTM_PL_FLAG_VRAM, NULL);
+       if (ret) {
+               DRM_ERROR("failed to pin fbcon\n");
+               ttm_bo_unreserve(&bo->bo);
+               return ret;
+       }
+
+       ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages,
+                         &bo->kmap);
+       if (ret) {
+               DRM_ERROR("failed to kmap fbcon\n");
+               ttm_bo_unreserve(&bo->bo);
+               return ret;
+       }
+
+       ttm_bo_unreserve(&bo->bo);
+
+       /* init fb device */
+       info = framebuffer_alloc(0, device);
+       if (info == NULL)
+               return -ENOMEM;
+
+       info->par = &bochs->fb.helper;
+
+       ret = bochs_framebuffer_init(bochs->dev, &bochs->fb.gfb, &mode_cmd, gobj);
+       if (ret)
+               return ret;
+
+       bochs->fb.size = size;
+
+       /* setup helper */
+       fb = &bochs->fb.gfb.base;
+       bochs->fb.helper.fb = fb;
+       bochs->fb.helper.fbdev = info;
+
+       strcpy(info->fix.id, "bochsdrmfb");
+
+       info->flags = FBINFO_DEFAULT;
+       info->fbops = &bochsfb_ops;
+
+       drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+       drm_fb_helper_fill_var(info, &bochs->fb.helper, sizes->fb_width,
+                              sizes->fb_height);
+
+       info->screen_base = bo->kmap.virtual;
+       info->screen_size = size;
+
+#if 0
+       /* FIXME: get this right for mmap(/dev/fb0) */
+       info->fix.smem_start = bochs_bo_mmap_offset(bo);
+       info->fix.smem_len = size;
+#endif
+
+       ret = fb_alloc_cmap(&info->cmap, 256, 0);
+       if (ret) {
+               DRM_ERROR("%s: can't allocate color map\n", info->fix.id);
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+static int bochs_fbdev_destroy(struct bochs_device *bochs)
+{
+       struct bochs_framebuffer *gfb = &bochs->fb.gfb;
+       struct fb_info *info;
+
+       DRM_DEBUG_DRIVER("\n");
+
+       if (bochs->fb.helper.fbdev) {
+               info = bochs->fb.helper.fbdev;
+
+               unregister_framebuffer(info);
+               if (info->cmap.len)
+                       fb_dealloc_cmap(&info->cmap);
+               framebuffer_release(info);
+       }
+
+       if (gfb->obj) {
+               drm_gem_object_unreference_unlocked(gfb->obj);
+               gfb->obj = NULL;
+       }
+
+       drm_fb_helper_fini(&bochs->fb.helper);
+       drm_framebuffer_unregister_private(&gfb->base);
+       drm_framebuffer_cleanup(&gfb->base);
+
+       return 0;
+}
+
+void bochs_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+                       u16 blue, int regno)
+{
+}
+
+void bochs_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+                       u16 *blue, int regno)
+{
+       *red   = regno;
+       *green = regno;
+       *blue  = regno;
+}
+
+static struct drm_fb_helper_funcs bochs_fb_helper_funcs = {
+       .gamma_set = bochs_fb_gamma_set,
+       .gamma_get = bochs_fb_gamma_get,
+       .fb_probe = bochsfb_create,
+};
+
+int bochs_fbdev_init(struct bochs_device *bochs)
+{
+       int ret;
+
+       bochs->fb.helper.funcs = &bochs_fb_helper_funcs;
+       spin_lock_init(&bochs->fb.dirty_lock);
+
+       ret = drm_fb_helper_init(bochs->dev, &bochs->fb.helper,
+                                1, 1);
+       if (ret)
+               return ret;
+
+       drm_fb_helper_single_add_all_connectors(&bochs->fb.helper);
+       drm_helper_disable_unused_functions(bochs->dev);
+       drm_fb_helper_initial_config(&bochs->fb.helper, 32);
+
+       bochs->fb.initialized = true;
+       return 0;
+}
+
+void bochs_fbdev_fini(struct bochs_device *bochs)
+{
+       if (!bochs->fb.initialized)
+               return;
+
+       bochs_fbdev_destroy(bochs);
+       bochs->fb.initialized = false;
+}
diff --git a/drivers/gpu/drm/bochs/bochs_hw.c b/drivers/gpu/drm/bochs/bochs_hw.c
new file mode 100644 (file)
index 0000000..dbe619e
--- /dev/null
@@ -0,0 +1,177 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include "bochs.h"
+
+/* ---------------------------------------------------------------------- */
+
+static void bochs_vga_writeb(struct bochs_device *bochs, u16 ioport, u8 val)
+{
+       if (WARN_ON(ioport < 0x3c0 || ioport > 0x3df))
+               return;
+
+       if (bochs->mmio) {
+               int offset = ioport - 0x3c0 + 0x400;
+               writeb(val, bochs->mmio + offset);
+       } else {
+               outb(val, ioport);
+       }
+}
+
+static u16 bochs_dispi_read(struct bochs_device *bochs, u16 reg)
+{
+       u16 ret = 0;
+
+       if (bochs->mmio) {
+               int offset = 0x500 + (reg << 1);
+               ret = readw(bochs->mmio + offset);
+       } else {
+               outw(reg, VBE_DISPI_IOPORT_INDEX);
+               ret = inw(VBE_DISPI_IOPORT_DATA);
+       }
+       return ret;
+}
+
+static void bochs_dispi_write(struct bochs_device *bochs, u16 reg, u16 val)
+{
+       if (bochs->mmio) {
+               int offset = 0x500 + (reg << 1);
+               writew(val, bochs->mmio + offset);
+       } else {
+               outw(reg, VBE_DISPI_IOPORT_INDEX);
+               outw(val, VBE_DISPI_IOPORT_DATA);
+       }
+}
+
+int bochs_hw_init(struct drm_device *dev, uint32_t flags)
+{
+       struct bochs_device *bochs = dev->dev_private;
+       struct pci_dev *pdev = dev->pdev;
+       unsigned long addr, size, mem, ioaddr, iosize;
+       u16 id;
+
+       if (/* (ent->driver_data == BOCHS_QEMU_STDVGA) && */
+           (pdev->resource[2].flags & IORESOURCE_MEM)) {
+               /* mmio bar with vga and bochs registers present */
+               if (pci_request_region(pdev, 2, "bochs-drm") != 0) {
+                       DRM_ERROR("Cannot request mmio region\n");
+                       return -EBUSY;
+               }
+               ioaddr = pci_resource_start(pdev, 2);
+               iosize = pci_resource_len(pdev, 2);
+               bochs->mmio = ioremap(ioaddr, iosize);
+               if (bochs->mmio == NULL) {
+                       DRM_ERROR("Cannot map mmio region\n");
+                       return -ENOMEM;
+               }
+       } else {
+               ioaddr = VBE_DISPI_IOPORT_INDEX;
+               iosize = 2;
+               if (!request_region(ioaddr, iosize, "bochs-drm")) {
+                       DRM_ERROR("Cannot request ioports\n");
+                       return -EBUSY;
+               }
+               bochs->ioports = 1;
+       }
+
+       id = bochs_dispi_read(bochs, VBE_DISPI_INDEX_ID);
+       mem = bochs_dispi_read(bochs, VBE_DISPI_INDEX_VIDEO_MEMORY_64K)
+               * 64 * 1024;
+       if ((id & 0xfff0) != VBE_DISPI_ID0) {
+               DRM_ERROR("ID mismatch\n");
+               return -ENODEV;
+       }
+
+       if ((pdev->resource[0].flags & IORESOURCE_MEM) == 0)
+               return -ENODEV;
+       addr = pci_resource_start(pdev, 0);
+       size = pci_resource_len(pdev, 0);
+       if (addr == 0)
+               return -ENODEV;
+       if (size != mem) {
+               DRM_ERROR("Size mismatch: pci=%ld, bochs=%ld\n",
+                       size, mem);
+               size = min(size, mem);
+       }
+
+       if (pci_request_region(pdev, 0, "bochs-drm") != 0) {
+               DRM_ERROR("Cannot request framebuffer\n");
+               return -EBUSY;
+       }
+
+       bochs->fb_map = ioremap(addr, size);
+       if (bochs->fb_map == NULL) {
+               DRM_ERROR("Cannot map framebuffer\n");
+               return -ENOMEM;
+       }
+       bochs->fb_base = addr;
+       bochs->fb_size = size;
+
+       DRM_INFO("Found bochs VGA, ID 0x%x.\n", id);
+       DRM_INFO("Framebuffer size %ld kB @ 0x%lx, %s @ 0x%lx.\n",
+                size / 1024, addr,
+                bochs->ioports ? "ioports" : "mmio",
+                ioaddr);
+       return 0;
+}
+
+void bochs_hw_fini(struct drm_device *dev)
+{
+       struct bochs_device *bochs = dev->dev_private;
+
+       if (bochs->mmio)
+               iounmap(bochs->mmio);
+       if (bochs->ioports)
+               release_region(VBE_DISPI_IOPORT_INDEX, 2);
+       if (bochs->fb_map)
+               iounmap(bochs->fb_map);
+       pci_release_regions(dev->pdev);
+}
+
+void bochs_hw_setmode(struct bochs_device *bochs,
+                     struct drm_display_mode *mode)
+{
+       bochs->xres = mode->hdisplay;
+       bochs->yres = mode->vdisplay;
+       bochs->bpp = 32;
+       bochs->stride = mode->hdisplay * (bochs->bpp / 8);
+       bochs->yres_virtual = bochs->fb_size / bochs->stride;
+
+       DRM_DEBUG_DRIVER("%dx%d @ %d bpp, vy %d\n",
+                        bochs->xres, bochs->yres, bochs->bpp,
+                        bochs->yres_virtual);
+
+       bochs_vga_writeb(bochs, 0x3c0, 0x20); /* unblank */
+
+       bochs_dispi_write(bochs, VBE_DISPI_INDEX_BPP,         bochs->bpp);
+       bochs_dispi_write(bochs, VBE_DISPI_INDEX_XRES,        bochs->xres);
+       bochs_dispi_write(bochs, VBE_DISPI_INDEX_YRES,        bochs->yres);
+       bochs_dispi_write(bochs, VBE_DISPI_INDEX_BANK,        0);
+       bochs_dispi_write(bochs, VBE_DISPI_INDEX_VIRT_WIDTH,  bochs->xres);
+       bochs_dispi_write(bochs, VBE_DISPI_INDEX_VIRT_HEIGHT,
+                         bochs->yres_virtual);
+       bochs_dispi_write(bochs, VBE_DISPI_INDEX_X_OFFSET,    0);
+       bochs_dispi_write(bochs, VBE_DISPI_INDEX_Y_OFFSET,    0);
+
+       bochs_dispi_write(bochs, VBE_DISPI_INDEX_ENABLE,
+                         VBE_DISPI_ENABLED | VBE_DISPI_LFB_ENABLED);
+}
+
+void bochs_hw_setbase(struct bochs_device *bochs,
+                     int x, int y, u64 addr)
+{
+       unsigned long offset = (unsigned long)addr +
+               y * bochs->stride +
+               x * (bochs->bpp / 8);
+       int vy = offset / bochs->stride;
+       int vx = (offset % bochs->stride) * 8 / bochs->bpp;
+
+       DRM_DEBUG_DRIVER("x %d, y %d, addr %llx -> offset %lx, vx %d, vy %d\n",
+                        x, y, addr, offset, vx, vy);
+       bochs_dispi_write(bochs, VBE_DISPI_INDEX_X_OFFSET, vx);
+       bochs_dispi_write(bochs, VBE_DISPI_INDEX_Y_OFFSET, vy);
+}
diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c
new file mode 100644 (file)
index 0000000..62ec7d4
--- /dev/null
@@ -0,0 +1,294 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include "bochs.h"
+
+static int defx = 1024;
+static int defy = 768;
+
+module_param(defx, int, 0444);
+module_param(defy, int, 0444);
+MODULE_PARM_DESC(defx, "default x resolution");
+MODULE_PARM_DESC(defy, "default y resolution");
+
+/* ---------------------------------------------------------------------- */
+
+static void bochs_crtc_load_lut(struct drm_crtc *crtc)
+{
+}
+
+static void bochs_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+       switch (mode) {
+       case DRM_MODE_DPMS_ON:
+       case DRM_MODE_DPMS_STANDBY:
+       case DRM_MODE_DPMS_SUSPEND:
+       case DRM_MODE_DPMS_OFF:
+       default:
+               return;
+       }
+}
+
+static bool bochs_crtc_mode_fixup(struct drm_crtc *crtc,
+                                 const struct drm_display_mode *mode,
+                                 struct drm_display_mode *adjusted_mode)
+{
+       return true;
+}
+
+static int bochs_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+                                   struct drm_framebuffer *old_fb)
+{
+       struct bochs_device *bochs =
+               container_of(crtc, struct bochs_device, crtc);
+       struct bochs_framebuffer *bochs_fb;
+       struct bochs_bo *bo;
+       u64 gpu_addr = 0;
+       int ret;
+
+       if (old_fb) {
+               bochs_fb = to_bochs_framebuffer(old_fb);
+               bo = gem_to_bochs_bo(bochs_fb->obj);
+               ret = ttm_bo_reserve(&bo->bo, true, false, false, 0);
+               if (ret) {
+                       DRM_ERROR("failed to reserve old_fb bo\n");
+               } else {
+                       bochs_bo_unpin(bo);
+                       ttm_bo_unreserve(&bo->bo);
+               }
+       }
+
+       if (WARN_ON(crtc->fb == NULL))
+               return -EINVAL;
+
+       bochs_fb = to_bochs_framebuffer(crtc->fb);
+       bo = gem_to_bochs_bo(bochs_fb->obj);
+       ret = ttm_bo_reserve(&bo->bo, true, false, false, 0);
+       if (ret)
+               return ret;
+
+       ret = bochs_bo_pin(bo, TTM_PL_FLAG_VRAM, &gpu_addr);
+       if (ret) {
+               ttm_bo_unreserve(&bo->bo);
+               return ret;
+       }
+
+       ttm_bo_unreserve(&bo->bo);
+       bochs_hw_setbase(bochs, x, y, gpu_addr);
+       return 0;
+}
+
+static int bochs_crtc_mode_set(struct drm_crtc *crtc,
+                              struct drm_display_mode *mode,
+                              struct drm_display_mode *adjusted_mode,
+                              int x, int y, struct drm_framebuffer *old_fb)
+{
+       struct bochs_device *bochs =
+               container_of(crtc, struct bochs_device, crtc);
+
+       bochs_hw_setmode(bochs, mode);
+       bochs_crtc_mode_set_base(crtc, x, y, old_fb);
+       return 0;
+}
+
+static void bochs_crtc_prepare(struct drm_crtc *crtc)
+{
+}
+
+static void bochs_crtc_commit(struct drm_crtc *crtc)
+{
+}
+
+static void bochs_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+                                u16 *blue, uint32_t start, uint32_t size)
+{
+}
+
+/* These provide the minimum set of functions required to handle a CRTC */
+static const struct drm_crtc_funcs bochs_crtc_funcs = {
+       .gamma_set = bochs_crtc_gamma_set,
+       .set_config = drm_crtc_helper_set_config,
+       .destroy = drm_crtc_cleanup,
+};
+
+static const struct drm_crtc_helper_funcs bochs_helper_funcs = {
+       .dpms = bochs_crtc_dpms,
+       .mode_fixup = bochs_crtc_mode_fixup,
+       .mode_set = bochs_crtc_mode_set,
+       .mode_set_base = bochs_crtc_mode_set_base,
+       .prepare = bochs_crtc_prepare,
+       .commit = bochs_crtc_commit,
+       .load_lut = bochs_crtc_load_lut,
+};
+
+static void bochs_crtc_init(struct drm_device *dev)
+{
+       struct bochs_device *bochs = dev->dev_private;
+       struct drm_crtc *crtc = &bochs->crtc;
+
+       drm_crtc_init(dev, crtc, &bochs_crtc_funcs);
+       drm_mode_crtc_set_gamma_size(crtc, 256);
+       drm_crtc_helper_add(crtc, &bochs_helper_funcs);
+}
+
+static bool bochs_encoder_mode_fixup(struct drm_encoder *encoder,
+                                    const struct drm_display_mode *mode,
+                                    struct drm_display_mode *adjusted_mode)
+{
+       return true;
+}
+
+static void bochs_encoder_mode_set(struct drm_encoder *encoder,
+                                  struct drm_display_mode *mode,
+                                  struct drm_display_mode *adjusted_mode)
+{
+}
+
+static void bochs_encoder_dpms(struct drm_encoder *encoder, int state)
+{
+}
+
+static void bochs_encoder_prepare(struct drm_encoder *encoder)
+{
+}
+
+static void bochs_encoder_commit(struct drm_encoder *encoder)
+{
+}
+
+static const struct drm_encoder_helper_funcs bochs_encoder_helper_funcs = {
+       .dpms = bochs_encoder_dpms,
+       .mode_fixup = bochs_encoder_mode_fixup,
+       .mode_set = bochs_encoder_mode_set,
+       .prepare = bochs_encoder_prepare,
+       .commit = bochs_encoder_commit,
+};
+
+static const struct drm_encoder_funcs bochs_encoder_encoder_funcs = {
+       .destroy = drm_encoder_cleanup,
+};
+
+static void bochs_encoder_init(struct drm_device *dev)
+{
+       struct bochs_device *bochs = dev->dev_private;
+       struct drm_encoder *encoder = &bochs->encoder;
+
+       encoder->possible_crtcs = 0x1;
+       drm_encoder_init(dev, encoder, &bochs_encoder_encoder_funcs,
+                        DRM_MODE_ENCODER_DAC);
+       drm_encoder_helper_add(encoder, &bochs_encoder_helper_funcs);
+}
+
+
+int bochs_connector_get_modes(struct drm_connector *connector)
+{
+       int count;
+
+       count = drm_add_modes_noedid(connector, 8192, 8192);
+       drm_set_preferred_mode(connector, defx, defy);
+       return count;
+}
+
+static int bochs_connector_mode_valid(struct drm_connector *connector,
+                                     struct drm_display_mode *mode)
+{
+       struct bochs_device *bochs =
+               container_of(connector, struct bochs_device, connector);
+       unsigned long size = mode->hdisplay * mode->vdisplay * 4;
+
+       /*
+        * Make sure we can fit two framebuffers into video memory.
+        * This allows up to 1600x1200 with 16 MB (default size).
+        * If you want more try this:
+        *     'qemu -vga std -global VGA.vgamem_mb=32 $otherargs'
+        */
+       if (size * 2 > bochs->fb_size)
+               return MODE_BAD;
+
+       return MODE_OK;
+}
+
+static struct drm_encoder *
+bochs_connector_best_encoder(struct drm_connector *connector)
+{
+       int enc_id = connector->encoder_ids[0];
+       struct drm_mode_object *obj;
+       struct drm_encoder *encoder;
+
+       /* pick the encoder ids */
+       if (enc_id) {
+               obj = drm_mode_object_find(connector->dev, enc_id,
+                                          DRM_MODE_OBJECT_ENCODER);
+               if (!obj)
+                       return NULL;
+               encoder = obj_to_encoder(obj);
+               return encoder;
+       }
+       return NULL;
+}
+
+static enum drm_connector_status bochs_connector_detect(struct drm_connector
+                                                       *connector, bool force)
+{
+       return connector_status_connected;
+}
+
+struct drm_connector_helper_funcs bochs_connector_connector_helper_funcs = {
+       .get_modes = bochs_connector_get_modes,
+       .mode_valid = bochs_connector_mode_valid,
+       .best_encoder = bochs_connector_best_encoder,
+};
+
+struct drm_connector_funcs bochs_connector_connector_funcs = {
+       .dpms = drm_helper_connector_dpms,
+       .detect = bochs_connector_detect,
+       .fill_modes = drm_helper_probe_single_connector_modes,
+       .destroy = drm_connector_cleanup,
+};
+
+static void bochs_connector_init(struct drm_device *dev)
+{
+       struct bochs_device *bochs = dev->dev_private;
+       struct drm_connector *connector = &bochs->connector;
+
+       drm_connector_init(dev, connector, &bochs_connector_connector_funcs,
+                          DRM_MODE_CONNECTOR_VIRTUAL);
+       drm_connector_helper_add(connector,
+                                &bochs_connector_connector_helper_funcs);
+}
+
+
+int bochs_kms_init(struct bochs_device *bochs)
+{
+       drm_mode_config_init(bochs->dev);
+       bochs->mode_config_initialized = true;
+
+       bochs->dev->mode_config.max_width = 8192;
+       bochs->dev->mode_config.max_height = 8192;
+
+       bochs->dev->mode_config.fb_base = bochs->fb_base;
+       bochs->dev->mode_config.preferred_depth = 24;
+       bochs->dev->mode_config.prefer_shadow = 0;
+
+       bochs->dev->mode_config.funcs = (void *)&bochs_mode_funcs;
+
+       bochs_crtc_init(bochs->dev);
+       bochs_encoder_init(bochs->dev);
+       bochs_connector_init(bochs->dev);
+       drm_mode_connector_attach_encoder(&bochs->connector,
+                                         &bochs->encoder);
+
+       return 0;
+}
+
+void bochs_kms_fini(struct bochs_device *bochs)
+{
+       if (bochs->mode_config_initialized) {
+               drm_mode_config_cleanup(bochs->dev);
+               bochs->mode_config_initialized = false;
+       }
+}
diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c
new file mode 100644 (file)
index 0000000..ce68587
--- /dev/null
@@ -0,0 +1,546 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include "bochs.h"
+
+static void bochs_ttm_placement(struct bochs_bo *bo, int domain);
+
+/* ---------------------------------------------------------------------- */
+
+static inline struct bochs_device *bochs_bdev(struct ttm_bo_device *bd)
+{
+       return container_of(bd, struct bochs_device, ttm.bdev);
+}
+
+static int bochs_ttm_mem_global_init(struct drm_global_reference *ref)
+{
+       return ttm_mem_global_init(ref->object);
+}
+
+static void bochs_ttm_mem_global_release(struct drm_global_reference *ref)
+{
+       ttm_mem_global_release(ref->object);
+}
+
+static int bochs_ttm_global_init(struct bochs_device *bochs)
+{
+       struct drm_global_reference *global_ref;
+       int r;
+
+       global_ref = &bochs->ttm.mem_global_ref;
+       global_ref->global_type = DRM_GLOBAL_TTM_MEM;
+       global_ref->size = sizeof(struct ttm_mem_global);
+       global_ref->init = &bochs_ttm_mem_global_init;
+       global_ref->release = &bochs_ttm_mem_global_release;
+       r = drm_global_item_ref(global_ref);
+       if (r != 0) {
+               DRM_ERROR("Failed setting up TTM memory accounting "
+                         "subsystem.\n");
+               return r;
+       }
+
+       bochs->ttm.bo_global_ref.mem_glob =
+               bochs->ttm.mem_global_ref.object;
+       global_ref = &bochs->ttm.bo_global_ref.ref;
+       global_ref->global_type = DRM_GLOBAL_TTM_BO;
+       global_ref->size = sizeof(struct ttm_bo_global);
+       global_ref->init = &ttm_bo_global_init;
+       global_ref->release = &ttm_bo_global_release;
+       r = drm_global_item_ref(global_ref);
+       if (r != 0) {
+               DRM_ERROR("Failed setting up TTM BO subsystem.\n");
+               drm_global_item_unref(&bochs->ttm.mem_global_ref);
+               return r;
+       }
+
+       return 0;
+}
+
+static void bochs_ttm_global_release(struct bochs_device *bochs)
+{
+       if (bochs->ttm.mem_global_ref.release == NULL)
+               return;
+
+       drm_global_item_unref(&bochs->ttm.bo_global_ref.ref);
+       drm_global_item_unref(&bochs->ttm.mem_global_ref);
+       bochs->ttm.mem_global_ref.release = NULL;
+}
+
+
+static void bochs_bo_ttm_destroy(struct ttm_buffer_object *tbo)
+{
+       struct bochs_bo *bo;
+
+       bo = container_of(tbo, struct bochs_bo, bo);
+       drm_gem_object_release(&bo->gem);
+       kfree(bo);
+}
+
+static bool bochs_ttm_bo_is_bochs_bo(struct ttm_buffer_object *bo)
+{
+       if (bo->destroy == &bochs_bo_ttm_destroy)
+               return true;
+       return false;
+}
+
+static int bochs_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
+                                 struct ttm_mem_type_manager *man)
+{
+       switch (type) {
+       case TTM_PL_SYSTEM:
+               man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+               man->available_caching = TTM_PL_MASK_CACHING;
+               man->default_caching = TTM_PL_FLAG_CACHED;
+               break;
+       case TTM_PL_VRAM:
+               man->func = &ttm_bo_manager_func;
+               man->flags = TTM_MEMTYPE_FLAG_FIXED |
+                       TTM_MEMTYPE_FLAG_MAPPABLE;
+               man->available_caching = TTM_PL_FLAG_UNCACHED |
+                       TTM_PL_FLAG_WC;
+               man->default_caching = TTM_PL_FLAG_WC;
+               break;
+       default:
+               DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static void
+bochs_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
+{
+       struct bochs_bo *bochsbo = bochs_bo(bo);
+
+       if (!bochs_ttm_bo_is_bochs_bo(bo))
+               return;
+
+       bochs_ttm_placement(bochsbo, TTM_PL_FLAG_SYSTEM);
+       *pl = bochsbo->placement;
+}
+
+static int bochs_bo_verify_access(struct ttm_buffer_object *bo,
+                                 struct file *filp)
+{
+       struct bochs_bo *bochsbo = bochs_bo(bo);
+
+       return drm_vma_node_verify_access(&bochsbo->gem.vma_node, filp);
+}
+
+static int bochs_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
+                                   struct ttm_mem_reg *mem)
+{
+       struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+       struct bochs_device *bochs = bochs_bdev(bdev);
+
+       mem->bus.addr = NULL;
+       mem->bus.offset = 0;
+       mem->bus.size = mem->num_pages << PAGE_SHIFT;
+       mem->bus.base = 0;
+       mem->bus.is_iomem = false;
+       if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+               return -EINVAL;
+       switch (mem->mem_type) {
+       case TTM_PL_SYSTEM:
+               /* system memory */
+               return 0;
+       case TTM_PL_VRAM:
+               mem->bus.offset = mem->start << PAGE_SHIFT;
+               mem->bus.base = bochs->fb_base;
+               mem->bus.is_iomem = true;
+               break;
+       default:
+               return -EINVAL;
+               break;
+       }
+       return 0;
+}
+
+static void bochs_ttm_io_mem_free(struct ttm_bo_device *bdev,
+                                 struct ttm_mem_reg *mem)
+{
+}
+
+static int bochs_bo_move(struct ttm_buffer_object *bo,
+                        bool evict, bool interruptible,
+                        bool no_wait_gpu,
+                        struct ttm_mem_reg *new_mem)
+{
+       return ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
+}
+
+
+static void bochs_ttm_backend_destroy(struct ttm_tt *tt)
+{
+       ttm_tt_fini(tt);
+       kfree(tt);
+}
+
+static struct ttm_backend_func bochs_tt_backend_func = {
+       .destroy = &bochs_ttm_backend_destroy,
+};
+
+static struct ttm_tt *bochs_ttm_tt_create(struct ttm_bo_device *bdev,
+                                         unsigned long size,
+                                         uint32_t page_flags,
+                                         struct page *dummy_read_page)
+{
+       struct ttm_tt *tt;
+
+       tt = kzalloc(sizeof(struct ttm_tt), GFP_KERNEL);
+       if (tt == NULL)
+               return NULL;
+       tt->func = &bochs_tt_backend_func;
+       if (ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page)) {
+               kfree(tt);
+               return NULL;
+       }
+       return tt;
+}
+
+struct ttm_bo_driver bochs_bo_driver = {
+       .ttm_tt_create = bochs_ttm_tt_create,
+       .ttm_tt_populate = ttm_pool_populate,
+       .ttm_tt_unpopulate = ttm_pool_unpopulate,
+       .init_mem_type = bochs_bo_init_mem_type,
+       .evict_flags = bochs_bo_evict_flags,
+       .move = bochs_bo_move,
+       .verify_access = bochs_bo_verify_access,
+       .io_mem_reserve = &bochs_ttm_io_mem_reserve,
+       .io_mem_free = &bochs_ttm_io_mem_free,
+};
+
+int bochs_mm_init(struct bochs_device *bochs)
+{
+       struct ttm_bo_device *bdev = &bochs->ttm.bdev;
+       int ret;
+
+       ret = bochs_ttm_global_init(bochs);
+       if (ret)
+               return ret;
+
+       ret = ttm_bo_device_init(&bochs->ttm.bdev,
+                                bochs->ttm.bo_global_ref.ref.object,
+                                &bochs_bo_driver, DRM_FILE_PAGE_OFFSET,
+                                true);
+       if (ret) {
+               DRM_ERROR("Error initialising bo driver; %d\n", ret);
+               return ret;
+       }
+
+       ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
+                            bochs->fb_size >> PAGE_SHIFT);
+       if (ret) {
+               DRM_ERROR("Failed ttm VRAM init: %d\n", ret);
+               return ret;
+       }
+
+       bochs->ttm.initialized = true;
+       return 0;
+}
+
+void bochs_mm_fini(struct bochs_device *bochs)
+{
+       if (!bochs->ttm.initialized)
+               return;
+
+       ttm_bo_device_release(&bochs->ttm.bdev);
+       bochs_ttm_global_release(bochs);
+       bochs->ttm.initialized = false;
+}
+
+static void bochs_ttm_placement(struct bochs_bo *bo, int domain)
+{
+       u32 c = 0;
+       bo->placement.fpfn = 0;
+       bo->placement.lpfn = 0;
+       bo->placement.placement = bo->placements;
+       bo->placement.busy_placement = bo->placements;
+       if (domain & TTM_PL_FLAG_VRAM) {
+               bo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED
+                       | TTM_PL_FLAG_VRAM;
+       }
+       if (domain & TTM_PL_FLAG_SYSTEM) {
+               bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+       }
+       if (!c) {
+               bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+       }
+       bo->placement.num_placement = c;
+       bo->placement.num_busy_placement = c;
+}
+
+static inline u64 bochs_bo_gpu_offset(struct bochs_bo *bo)
+{
+       return bo->bo.offset;
+}
+
+int bochs_bo_pin(struct bochs_bo *bo, u32 pl_flag, u64 *gpu_addr)
+{
+       int i, ret;
+
+       if (bo->pin_count) {
+               bo->pin_count++;
+               if (gpu_addr)
+                       *gpu_addr = bochs_bo_gpu_offset(bo);
+               return 0;
+       }
+
+       bochs_ttm_placement(bo, pl_flag);
+       for (i = 0; i < bo->placement.num_placement; i++)
+               bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+       ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+       if (ret)
+               return ret;
+
+       bo->pin_count = 1;
+       if (gpu_addr)
+               *gpu_addr = bochs_bo_gpu_offset(bo);
+       return 0;
+}
+
+int bochs_bo_unpin(struct bochs_bo *bo)
+{
+       int i, ret;
+
+       if (!bo->pin_count) {
+               DRM_ERROR("unpin bad %p\n", bo);
+               return 0;
+       }
+       bo->pin_count--;
+
+       if (bo->pin_count)
+               return 0;
+
+       for (i = 0; i < bo->placement.num_placement; i++)
+               bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
+       ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+int bochs_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+       struct drm_file *file_priv;
+       struct bochs_device *bochs;
+
+       if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
+               return drm_mmap(filp, vma);
+
+       file_priv = filp->private_data;
+       bochs = file_priv->minor->dev->dev_private;
+       return ttm_bo_mmap(filp, vma, &bochs->ttm.bdev);
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int bochs_bo_create(struct drm_device *dev, int size, int align,
+                          uint32_t flags, struct bochs_bo **pbochsbo)
+{
+       struct bochs_device *bochs = dev->dev_private;
+       struct bochs_bo *bochsbo;
+       size_t acc_size;
+       int ret;
+
+       bochsbo = kzalloc(sizeof(struct bochs_bo), GFP_KERNEL);
+       if (!bochsbo)
+               return -ENOMEM;
+
+       ret = drm_gem_object_init(dev, &bochsbo->gem, size);
+       if (ret) {
+               kfree(bochsbo);
+               return ret;
+       }
+
+       bochsbo->bo.bdev = &bochs->ttm.bdev;
+       bochsbo->bo.bdev->dev_mapping = dev->dev_mapping;
+
+       bochs_ttm_placement(bochsbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
+
+       acc_size = ttm_bo_dma_acc_size(&bochs->ttm.bdev, size,
+                                      sizeof(struct bochs_bo));
+
+       ret = ttm_bo_init(&bochs->ttm.bdev, &bochsbo->bo, size,
+                         ttm_bo_type_device, &bochsbo->placement,
+                         align >> PAGE_SHIFT, false, NULL, acc_size,
+                         NULL, bochs_bo_ttm_destroy);
+       if (ret)
+               return ret;
+
+       *pbochsbo = bochsbo;
+       return 0;
+}
+
+int bochs_gem_create(struct drm_device *dev, u32 size, bool iskernel,
+                    struct drm_gem_object **obj)
+{
+       struct bochs_bo *bochsbo;
+       int ret;
+
+       *obj = NULL;
+
+       size = ALIGN(size, PAGE_SIZE);
+       if (size == 0)
+               return -EINVAL;
+
+       ret = bochs_bo_create(dev, size, 0, 0, &bochsbo);
+       if (ret) {
+               if (ret != -ERESTARTSYS)
+                       DRM_ERROR("failed to allocate GEM object\n");
+               return ret;
+       }
+       *obj = &bochsbo->gem;
+       return 0;
+}
+
+int bochs_dumb_create(struct drm_file *file, struct drm_device *dev,
+                     struct drm_mode_create_dumb *args)
+{
+       struct drm_gem_object *gobj;
+       u32 handle;
+       int ret;
+
+       args->pitch = args->width * ((args->bpp + 7) / 8);
+       args->size = args->pitch * args->height;
+
+       ret = bochs_gem_create(dev, args->size, false,
+                              &gobj);
+       if (ret)
+               return ret;
+
+       ret = drm_gem_handle_create(file, gobj, &handle);
+       drm_gem_object_unreference_unlocked(gobj);
+       if (ret)
+               return ret;
+
+       args->handle = handle;
+       return 0;
+}
+
+static void bochs_bo_unref(struct bochs_bo **bo)
+{
+       struct ttm_buffer_object *tbo;
+
+       if ((*bo) == NULL)
+               return;
+
+       tbo = &((*bo)->bo);
+       ttm_bo_unref(&tbo);
+       if (tbo == NULL)
+               *bo = NULL;
+
+}
+
+void bochs_gem_free_object(struct drm_gem_object *obj)
+{
+       struct bochs_bo *bochs_bo = gem_to_bochs_bo(obj);
+
+       if (!bochs_bo)
+               return;
+       bochs_bo_unref(&bochs_bo);
+}
+
+int bochs_dumb_mmap_offset(struct drm_file *file, struct drm_device *dev,
+                          uint32_t handle, uint64_t *offset)
+{
+       struct drm_gem_object *obj;
+       int ret;
+       struct bochs_bo *bo;
+
+       mutex_lock(&dev->struct_mutex);
+       obj = drm_gem_object_lookup(dev, file, handle);
+       if (obj == NULL) {
+               ret = -ENOENT;
+               goto out_unlock;
+       }
+
+       bo = gem_to_bochs_bo(obj);
+       *offset = bochs_bo_mmap_offset(bo);
+
+       drm_gem_object_unreference(obj);
+       ret = 0;
+out_unlock:
+       mutex_unlock(&dev->struct_mutex);
+       return ret;
+
+}
+
+/* ---------------------------------------------------------------------- */
+
+static void bochs_user_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+       struct bochs_framebuffer *bochs_fb = to_bochs_framebuffer(fb);
+       if (bochs_fb->obj)
+               drm_gem_object_unreference_unlocked(bochs_fb->obj);
+       drm_framebuffer_cleanup(fb);
+       kfree(fb);
+}
+
+static const struct drm_framebuffer_funcs bochs_fb_funcs = {
+       .destroy = bochs_user_framebuffer_destroy,
+};
+
+int bochs_framebuffer_init(struct drm_device *dev,
+                          struct bochs_framebuffer *gfb,
+                          struct drm_mode_fb_cmd2 *mode_cmd,
+                          struct drm_gem_object *obj)
+{
+       int ret;
+
+       drm_helper_mode_fill_fb_struct(&gfb->base, mode_cmd);
+       gfb->obj = obj;
+       ret = drm_framebuffer_init(dev, &gfb->base, &bochs_fb_funcs);
+       if (ret) {
+               DRM_ERROR("drm_framebuffer_init failed: %d\n", ret);
+               return ret;
+       }
+       return 0;
+}
+
+static struct drm_framebuffer *
+bochs_user_framebuffer_create(struct drm_device *dev,
+                             struct drm_file *filp,
+                             struct drm_mode_fb_cmd2 *mode_cmd)
+{
+       struct drm_gem_object *obj;
+       struct bochs_framebuffer *bochs_fb;
+       int ret;
+
+       DRM_DEBUG_DRIVER("%dx%d, format %c%c%c%c\n",
+              mode_cmd->width, mode_cmd->height,
+              (mode_cmd->pixel_format)       & 0xff,
+              (mode_cmd->pixel_format >> 8)  & 0xff,
+              (mode_cmd->pixel_format >> 16) & 0xff,
+              (mode_cmd->pixel_format >> 24) & 0xff);
+
+       if (mode_cmd->pixel_format != DRM_FORMAT_XRGB8888)
+               return ERR_PTR(-ENOENT);
+
+       obj = drm_gem_object_lookup(dev, filp, mode_cmd->handles[0]);
+       if (obj == NULL)
+               return ERR_PTR(-ENOENT);
+
+       bochs_fb = kzalloc(sizeof(*bochs_fb), GFP_KERNEL);
+       if (!bochs_fb) {
+               drm_gem_object_unreference_unlocked(obj);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       ret = bochs_framebuffer_init(dev, bochs_fb, mode_cmd, obj);
+       if (ret) {
+               drm_gem_object_unreference_unlocked(obj);
+               kfree(bochs_fb);
+               return ERR_PTR(ret);
+       }
+       return &bochs_fb->base;
+}
+
+const struct drm_mode_config_funcs bochs_mode_funcs = {
+       .fb_create = bochs_user_framebuffer_create,
+};
index b6aded73838bca0fb11c5a936db40a8db1e1e180..117d3eca5e3782e5db3571a968d8e92111c1da4c 100644 (file)
@@ -222,7 +222,7 @@ void cirrus_fbdev_fini(struct cirrus_device *cdev);
 void cirrus_driver_irq_preinstall(struct drm_device *dev);
 int cirrus_driver_irq_postinstall(struct drm_device *dev);
 void cirrus_driver_irq_uninstall(struct drm_device *dev);
-irqreturn_t cirrus_driver_irq_handler(DRM_IRQ_ARGS);
+irqreturn_t cirrus_driver_irq_handler(int irq, void *arg);
 
                                /* cirrus_kms.c */
 int cirrus_driver_load(struct drm_device *dev, unsigned long flags);
index b27e95666fabf6fb28d56b07cd62fd3363776a0b..2fd4a92162cb8880b3c0834ee294184b57b7edf7 100644 (file)
@@ -39,7 +39,7 @@ static void cirrus_dirty_update(struct cirrus_fbdev *afbdev,
         * then the BO is being moved and we should
         * store up the damage until later.
         */
-       if (!in_interrupt())
+       if (!drm_can_sleep())
                ret = cirrus_bo_reserve(bo, true);
        if (ret) {
                if (ret != -EBUSY)
@@ -233,6 +233,9 @@ static int cirrusfb_create(struct drm_fb_helper *helper,
        info->apertures->ranges[0].base = cdev->dev->mode_config.fb_base;
        info->apertures->ranges[0].size = cdev->mc.vram_size;
 
+       info->fix.smem_start = cdev->dev->mode_config.fb_base;
+       info->fix.smem_len = cdev->mc.vram_size;
+
        info->screen_base = sysram;
        info->screen_size = size;
 
index 78e76f24343d17bad9bc81e5474f900fbd090b33..4b0170cf53fd9225f07f731766cb8fe71fb40e91 100644 (file)
@@ -255,7 +255,7 @@ int cirrus_dumb_create(struct drm_file *file,
        return 0;
 }
 
-void cirrus_bo_unref(struct cirrus_bo **bo)
+static void cirrus_bo_unref(struct cirrus_bo **bo)
 {
        struct ttm_buffer_object *tbo;
 
index adabc3daaa5b4f644969944a28de07921ab1895d..530f78f84deed250a2f96350254657b49a0166c9 100644 (file)
@@ -102,7 +102,7 @@ static bool cirrus_crtc_mode_fixup(struct drm_crtc *crtc,
        return true;
 }
 
-void cirrus_set_start_address(struct drm_crtc *crtc, unsigned offset)
+static void cirrus_set_start_address(struct drm_crtc *crtc, unsigned offset)
 {
        struct cirrus_device *cdev = crtc->dev->dev_private;
        u32 addr;
@@ -273,8 +273,8 @@ static int cirrus_crtc_mode_set(struct drm_crtc *crtc,
                sr07 |= 0x11;
                break;
        case 16:
-               sr07 |= 0xc1;
-               hdr = 0xc0;
+               sr07 |= 0x17;
+               hdr = 0xc1;
                break;
        case 24:
                sr07 |= 0x15;
@@ -453,7 +453,7 @@ static void cirrus_encoder_commit(struct drm_encoder *encoder)
 {
 }
 
-void cirrus_encoder_destroy(struct drm_encoder *encoder)
+static void cirrus_encoder_destroy(struct drm_encoder *encoder)
 {
        struct cirrus_encoder *cirrus_encoder = to_cirrus_encoder(encoder);
        drm_encoder_cleanup(encoder);
@@ -492,7 +492,7 @@ static struct drm_encoder *cirrus_encoder_init(struct drm_device *dev)
 }
 
 
-int cirrus_vga_get_modes(struct drm_connector *connector)
+static int cirrus_vga_get_modes(struct drm_connector *connector)
 {
        int count;
 
@@ -509,7 +509,7 @@ static int cirrus_vga_mode_valid(struct drm_connector *connector,
        return MODE_OK;
 }
 
-struct drm_encoder *cirrus_connector_best_encoder(struct drm_connector
+static struct drm_encoder *cirrus_connector_best_encoder(struct drm_connector
                                                  *connector)
 {
        int enc_id = connector->encoder_ids[0];
index 75becdeac07d710e1322a64096df65850bf31fd4..8b37c25ff9bd0074810fc942fcee38fab3cbab4a 100644 (file)
@@ -80,7 +80,7 @@ static int cirrus_ttm_global_init(struct cirrus_device *cirrus)
        return 0;
 }
 
-void
+static void
 cirrus_ttm_global_release(struct cirrus_device *cirrus)
 {
        if (cirrus->ttm.mem_global_ref.release == NULL)
@@ -102,7 +102,7 @@ static void cirrus_bo_ttm_destroy(struct ttm_buffer_object *tbo)
        kfree(bo);
 }
 
-bool cirrus_ttm_bo_is_cirrus_bo(struct ttm_buffer_object *bo)
+static bool cirrus_ttm_bo_is_cirrus_bo(struct ttm_buffer_object *bo)
 {
        if (bo->destroy == &cirrus_bo_ttm_destroy)
                return true;
@@ -208,7 +208,7 @@ static struct ttm_backend_func cirrus_tt_backend_func = {
 };
 
 
-struct ttm_tt *cirrus_ttm_tt_create(struct ttm_bo_device *bdev,
+static struct ttm_tt *cirrus_ttm_tt_create(struct ttm_bo_device *bdev,
                                 unsigned long size, uint32_t page_flags,
                                 struct page *dummy_read_page)
 {
@@ -375,26 +375,6 @@ int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr)
        return 0;
 }
 
-int cirrus_bo_unpin(struct cirrus_bo *bo)
-{
-       int i, ret;
-       if (!bo->pin_count) {
-               DRM_ERROR("unpin bad %p\n", bo);
-               return 0;
-       }
-       bo->pin_count--;
-       if (bo->pin_count)
-               return 0;
-
-       for (i = 0; i < bo->placement.num_placement ; i++)
-               bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
-       ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
-       if (ret)
-               return ret;
-
-       return 0;
-}
-
 int cirrus_bo_push_sysram(struct cirrus_bo *bo)
 {
        int i, ret;
index e301d653d97e42f4537efdb50806cdc682930505..dde205cef384c08db92d114c16138c607ddc1da0 100644 (file)
@@ -53,7 +53,7 @@
  */
 int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info)
 {
-       DRM_AGP_KERN *kern;
+       struct agp_kern_info *kern;
 
        if (!dev->agp || !dev->agp->acquired)
                return -EINVAL;
@@ -198,17 +198,15 @@ int drm_agp_enable_ioctl(struct drm_device *dev, void *data,
 int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request)
 {
        struct drm_agp_mem *entry;
-       DRM_AGP_MEM *memory;
+       struct agp_memory *memory;
        unsigned long pages;
        u32 type;
 
        if (!dev->agp || !dev->agp->acquired)
                return -EINVAL;
-       if (!(entry = kmalloc(sizeof(*entry), GFP_KERNEL)))
+       if (!(entry = kzalloc(sizeof(*entry), GFP_KERNEL)))
                return -ENOMEM;
 
-       memset(entry, 0, sizeof(*entry));
-
        pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE;
        type = (u32) request->type;
        if (!(memory = agp_allocate_memory(dev->agp->bridge, pages, type))) {
@@ -393,14 +391,16 @@ int drm_agp_free_ioctl(struct drm_device *dev, void *data,
  * Gets the drm_agp_t structure which is made available by the agpgart module
  * via the inter_module_* functions. Creates and initializes a drm_agp_head
  * structure.
+ *
+ * Note that final cleanup of the kmalloced structure is directly done in
+ * drm_pci_agp_destroy.
  */
 struct drm_agp_head *drm_agp_init(struct drm_device *dev)
 {
        struct drm_agp_head *head = NULL;
 
-       if (!(head = kmalloc(sizeof(*head), GFP_KERNEL)))
+       if (!(head = kzalloc(sizeof(*head), GFP_KERNEL)))
                return NULL;
-       memset((void *)head, 0, sizeof(*head));
        head->bridge = agp_find_bridge(dev->pdev);
        if (!head->bridge) {
                if (!(head->bridge = agp_backend_acquire(dev->pdev))) {
@@ -439,7 +439,7 @@ void drm_agp_clear(struct drm_device *dev)
 {
        struct drm_agp_mem *entry, *tempe;
 
-       if (!drm_core_has_AGP(dev) || !dev->agp)
+       if (!dev->agp)
                return;
        if (drm_core_check_feature(dev, DRIVER_MODESET))
                return;
@@ -459,21 +459,6 @@ void drm_agp_clear(struct drm_device *dev)
        dev->agp->enabled = 0;
 }
 
-/**
- * drm_agp_destroy - Destroy AGP head
- * @dev: DRM device
- *
- * Destroy resources that were previously allocated via drm_agp_initp. Caller
- * must ensure to clean up all AGP resources before calling this. See
- * drm_agp_clear().
- *
- * Call this to destroy AGP heads allocated via drm_agp_init().
- */
-void drm_agp_destroy(struct drm_agp_head *agp)
-{
-       kfree(agp);
-}
-
 /**
  * Binds a collection of pages into AGP memory at the given offset, returning
  * the AGP memory structure containing them.
@@ -481,14 +466,14 @@ void drm_agp_destroy(struct drm_agp_head *agp)
  * No reference is held on the pages during this time -- it is up to the
  * caller to handle that.
  */
-DRM_AGP_MEM *
+struct agp_memory *
 drm_agp_bind_pages(struct drm_device *dev,
                   struct page **pages,
                   unsigned long num_pages,
                   uint32_t gtt_offset,
                   u32 type)
 {
-       DRM_AGP_MEM *mem;
+       struct agp_memory *mem;
        int ret, i;
 
        DRM_DEBUG("\n");
index 39a718340319ea13ce3f813ea30b82d60c0b9264..0406110f83edd2ac3f607fe575c8e7b6c491e88a 100644 (file)
@@ -114,7 +114,7 @@ int drm_buffer_copy_from_user(struct drm_buffer *buf,
 
        for (idx = 0; idx < nr_pages; ++idx) {
 
-               if (DRM_COPY_FROM_USER(buf->data[idx],
+               if (copy_from_user(buf->data[idx],
                        user_data + idx * PAGE_SIZE,
                        min(PAGE_SIZE, size - idx * PAGE_SIZE))) {
                        DRM_ERROR("Failed to copy user data (%p) to drm buffer"
index 471e051d295e383b61ebb2a2a19c0e0d67d8827a..edec31fe3fed865aa2669e7c8e17aec912a82058 100644 (file)
@@ -261,7 +261,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
                struct drm_agp_mem *entry;
                int valid = 0;
 
-               if (!drm_core_has_AGP(dev)) {
+               if (!dev->agp) {
                        kfree(map);
                        return -EINVAL;
                }
@@ -303,9 +303,6 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
 
                break;
        }
-       case _DRM_GEM:
-               DRM_ERROR("tried to addmap GEM object\n");
-               break;
        case _DRM_SCATTER_GATHER:
                if (!dev->sg) {
                        kfree(map);
@@ -483,9 +480,6 @@ int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
                dmah.size = map->size;
                __drm_pci_free(dev, &dmah);
                break;
-       case _DRM_GEM:
-               DRM_ERROR("tried to rmmap GEM object\n");
-               break;
        }
        kfree(map);
 
@@ -1396,7 +1390,7 @@ int drm_mapbufs(struct drm_device *dev, void *data,
        spin_unlock(&dev->count_lock);
 
        if (request->count >= dma->buf_count) {
-               if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
+               if ((dev->agp && (dma->flags & _DRM_DMA_USE_AGP))
                    || (drm_core_check_feature(dev, DRIVER_SG)
                        && (dma->flags & _DRM_DMA_USE_SG))) {
                        struct drm_local_map *map = dev->agp_buffer_map;
index d6cf77c472e710cf246193dca874bd3b8bc72b9e..3b7d32da16046ffcdc238ba41b1b03a3ec152435 100644 (file)
@@ -674,6 +674,29 @@ void drm_crtc_cleanup(struct drm_crtc *crtc)
 }
 EXPORT_SYMBOL(drm_crtc_cleanup);
 
+/**
+ * drm_crtc_index - find the index of a registered CRTC
+ * @crtc: CRTC to find index for
+ *
+ * Given a registered CRTC, return the index of that CRTC within a DRM
+ * device's list of CRTCs.
+ */
+unsigned int drm_crtc_index(struct drm_crtc *crtc)
+{
+       unsigned int index = 0;
+       struct drm_crtc *tmp;
+
+       list_for_each_entry(tmp, &crtc->dev->mode_config.crtc_list, head) {
+               if (tmp == crtc)
+                       return index;
+
+               index++;
+       }
+
+       BUG();
+}
+EXPORT_SYMBOL(drm_crtc_index);
+
 /**
  * drm_mode_probed_add - add a mode to a connector's probed mode list
  * @connector: connector the new mode
@@ -2767,10 +2790,8 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
        }
 
        if (fb->funcs->dirty) {
-               drm_modeset_lock_all(dev);
                ret = fb->funcs->dirty(fb, file_priv, flags, r->color,
                                       clips, num_clips);
-               drm_modeset_unlock_all(dev);
        } else {
                ret = -ENOSYS;
        }
index 01361aba033b4a39888a41bc15c4a7f5e4052152..ea92b827e787e3543a745b119822085618c16988 100644 (file)
@@ -324,35 +324,6 @@ void drm_helper_disable_unused_functions(struct drm_device *dev)
 }
 EXPORT_SYMBOL(drm_helper_disable_unused_functions);
 
-/**
- * drm_encoder_crtc_ok - can a given crtc drive a given encoder?
- * @encoder: encoder to test
- * @crtc: crtc to test
- *
- * Return false if @encoder can't be driven by @crtc, true otherwise.
- */
-static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
-                               struct drm_crtc *crtc)
-{
-       struct drm_device *dev;
-       struct drm_crtc *tmp;
-       int crtc_mask = 1;
-
-       WARN(!crtc, "checking null crtc?\n");
-
-       dev = crtc->dev;
-
-       list_for_each_entry(tmp, &dev->mode_config.crtc_list, head) {
-               if (tmp == crtc)
-                       break;
-               crtc_mask <<= 1;
-       }
-
-       if (encoder->possible_crtcs & crtc_mask)
-               return true;
-       return false;
-}
-
 /*
  * Check the CRTC we're going to map each output to vs. its current
  * CRTC.  If they don't match, we have to disable the output and the CRTC
@@ -536,7 +507,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
         * are later needed by vblank and swap-completion
         * timestamping. They are derived from true hwmode.
         */
-       drm_calc_timestamping_constants(crtc);
+       drm_calc_timestamping_constants(crtc, &crtc->hwmode);
 
        /* FIXME: add subpixel order */
 done:
index d9137e49c4e81594a992297a1dd10181006faf06..345be03c23db27203d4ce69bd74586a77e8f6ab1 100644 (file)
@@ -315,9 +315,6 @@ long drm_ioctl(struct file *filp,
        if (drm_device_is_unplugged(dev))
                return -ENODEV;
 
-       atomic_inc(&dev->ioctl_count);
-       ++file_priv->ioctl_count;
-
        if ((nr >= DRM_CORE_IOCTL_COUNT) &&
            ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END)))
                goto err_i1;
@@ -410,7 +407,6 @@ long drm_ioctl(struct file *filp,
 
        if (kdata != stack_kdata)
                kfree(kdata);
-       atomic_dec(&dev->ioctl_count);
        if (retcode)
                DRM_DEBUG("ret = %d\n", retcode);
        return retcode;
index 8835dcddfac3ab7c3ee1a4ba72360443e730aa74..b924306b84775f39592887a632f76553297c5a57 100644 (file)
@@ -605,347 +605,347 @@ static const struct drm_display_mode edid_cea_modes[] = {
        { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
                   752, 800, 0, 480, 490, 492, 525, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
-         .vrefresh = 60, },
+         .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
        /* 2 - 720x480@60Hz */
        { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
                   798, 858, 0, 480, 489, 495, 525, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
-         .vrefresh = 60, },
+         .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
        /* 3 - 720x480@60Hz */
        { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
                   798, 858, 0, 480, 489, 495, 525, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
-         .vrefresh = 60, },
+         .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
        /* 4 - 1280x720@60Hz */
        { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
                   1430, 1650, 0, 720, 725, 730, 750, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
-         .vrefresh = 60, },
+         .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
        /* 5 - 1920x1080i@60Hz */
        { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
                   2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
                        DRM_MODE_FLAG_INTERLACE),
-         .vrefresh = 60, },
+         .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
        /* 6 - 1440x480i@60Hz */
        { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
                   1602, 1716, 0, 480, 488, 494, 525, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
                        DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
-         .vrefresh = 60, },
+         .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
        /* 7 - 1440x480i@60Hz */
        { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
                   1602, 1716, 0, 480, 488, 494, 525, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
                        DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
-         .vrefresh = 60, },
+         .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
        /* 8 - 1440x240@60Hz */
        { DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
                   1602, 1716, 0, 240, 244, 247, 262, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
                        DRM_MODE_FLAG_DBLCLK),
-         .vrefresh = 60, },
+         .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
        /* 9 - 1440x240@60Hz */
        { DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
                   1602, 1716, 0, 240, 244, 247, 262, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
                        DRM_MODE_FLAG_DBLCLK),
-         .vrefresh = 60, },
+         .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
        /* 10 - 2880x480i@60Hz */
        { DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
                   3204, 3432, 0, 480, 488, 494, 525, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
                        DRM_MODE_FLAG_INTERLACE),
-         .vrefresh = 60, },
+         .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
        /* 11 - 2880x480i@60Hz */
        { DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
                   3204, 3432, 0, 480, 488, 494, 525, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
                        DRM_MODE_FLAG_INTERLACE),
-         .vrefresh = 60, },
+         .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
        /* 12 - 2880x240@60Hz */
        { DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
                   3204, 3432, 0, 240, 244, 247, 262, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
-         .vrefresh = 60, },
+         .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
        /* 13 - 2880x240@60Hz */
        { DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
                   3204, 3432, 0, 240, 244, 247, 262, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
-         .vrefresh = 60, },
+         .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
        /* 14 - 1440x480@60Hz */
        { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
                   1596, 1716, 0, 480, 489, 495, 525, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
-         .vrefresh = 60, },
+         .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
        /* 15 - 1440x480@60Hz */
        { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
                   1596, 1716, 0, 480, 489, 495, 525, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
-         .vrefresh = 60, },
+         .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
        /* 16 - 1920x1080@60Hz */
        { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
                   2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
-         .vrefresh = 60, },
+         .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
        /* 17 - 720x576@50Hz */
        { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
                   796, 864, 0, 576, 581, 586, 625, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
-         .vrefresh = 50, },
+         .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
        /* 18 - 720x576@50Hz */
        { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
                   796, 864, 0, 576, 581, 586, 625, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
-         .vrefresh = 50, },
+         .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
        /* 19 - 1280x720@50Hz */
        { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
                   1760, 1980, 0, 720, 725, 730, 750, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
-         .vrefresh = 50, },
+         .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
        /* 20 - 1920x1080i@50Hz */
        { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
                   2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
                        DRM_MODE_FLAG_INTERLACE),
-         .vrefresh = 50, },
+         .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
        /* 21 - 1440x576i@50Hz */
        { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
                   1590, 1728, 0, 576, 580, 586, 625, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
                        DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
-         .vrefresh = 50, },
+         .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
        /* 22 - 1440x576i@50Hz */
        { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
                   1590, 1728, 0, 576, 580, 586, 625, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
                        DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
-         .vrefresh = 50, },
+         .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
        /* 23 - 1440x288@50Hz */
        { DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
                   1590, 1728, 0, 288, 290, 293, 312, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
                        DRM_MODE_FLAG_DBLCLK),
-         .vrefresh = 50, },
+         .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
        /* 24 - 1440x288@50Hz */
        { DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
                   1590, 1728, 0, 288, 290, 293, 312, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
                        DRM_MODE_FLAG_DBLCLK),
-         .vrefresh = 50, },
+         .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
        /* 25 - 2880x576i@50Hz */
        { DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
                   3180, 3456, 0, 576, 580, 586, 625, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
                        DRM_MODE_FLAG_INTERLACE),
-         .vrefresh = 50, },
+         .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
        /* 26 - 2880x576i@50Hz */
        { DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
                   3180, 3456, 0, 576, 580, 586, 625, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
                        DRM_MODE_FLAG_INTERLACE),
-         .vrefresh = 50, },
+         .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
        /* 27 - 2880x288@50Hz */
        { DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
                   3180, 3456, 0, 288, 290, 293, 312, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
-         .vrefresh = 50, },
+         .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
        /* 28 - 2880x288@50Hz */
        { DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
                   3180, 3456, 0, 288, 290, 293, 312, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
-         .vrefresh = 50, },
+         .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
        /* 29 - 1440x576@50Hz */
        { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
                   1592, 1728, 0, 576, 581, 586, 625, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
-         .vrefresh = 50, },
+         .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
        /* 30 - 1440x576@50Hz */
        { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
                   1592, 1728, 0, 576, 581, 586, 625, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
-         .vrefresh = 50, },
+         .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
        /* 31 - 1920x1080@50Hz */
        { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
                   2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
-         .vrefresh = 50, },
+         .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
        /* 32 - 1920x1080@24Hz */
        { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558,
                   2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
-         .vrefresh = 24, },
+         .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
        /* 33 - 1920x1080@25Hz */
        { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
                   2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
-         .vrefresh = 25, },
+         .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
        /* 34 - 1920x1080@30Hz */
        { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
                   2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
-         .vrefresh = 30, },
+         .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
        /* 35 - 2880x480@60Hz */
        { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
                   3192, 3432, 0, 480, 489, 495, 525, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
-         .vrefresh = 60, },
+         .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
        /* 36 - 2880x480@60Hz */
        { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
                   3192, 3432, 0, 480, 489, 495, 525, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
-         .vrefresh = 60, },
+         .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
        /* 37 - 2880x576@50Hz */
        { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
                   3184, 3456, 0, 576, 581, 586, 625, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
-         .vrefresh = 50, },
+         .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
        /* 38 - 2880x576@50Hz */
        { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
                   3184, 3456, 0, 576, 581, 586, 625, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
-         .vrefresh = 50, },
+         .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
        /* 39 - 1920x1080i@50Hz */
        { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 72000, 1920, 1952,
                   2120, 2304, 0, 1080, 1126, 1136, 1250, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC |
                        DRM_MODE_FLAG_INTERLACE),
-         .vrefresh = 50, },
+         .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
        /* 40 - 1920x1080i@100Hz */
        { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
                   2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
                        DRM_MODE_FLAG_INTERLACE),
-         .vrefresh = 100, },
+         .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
        /* 41 - 1280x720@100Hz */
        { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720,
                   1760, 1980, 0, 720, 725, 730, 750, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
-         .vrefresh = 100, },
+         .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
        /* 42 - 720x576@100Hz */
        { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
                   796, 864, 0, 576, 581, 586, 625, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
-         .vrefresh = 100, },
+         .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
        /* 43 - 720x576@100Hz */
        { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
                   796, 864, 0, 576, 581, 586, 625, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
-         .vrefresh = 100, },
+         .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
        /* 44 - 1440x576i@100Hz */
        { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
                   1590, 1728, 0, 576, 580, 586, 625, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
                        DRM_MODE_FLAG_DBLCLK),
-         .vrefresh = 100, },
+         .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
        /* 45 - 1440x576i@100Hz */
        { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
                   1590, 1728, 0, 576, 580, 586, 625, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
                        DRM_MODE_FLAG_DBLCLK),
-         .vrefresh = 100, },
+         .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
        /* 46 - 1920x1080i@120Hz */
        { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
                   2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
                        DRM_MODE_FLAG_INTERLACE),
-         .vrefresh = 120, },
+         .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
        /* 47 - 1280x720@120Hz */
        { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390,
                   1430, 1650, 0, 720, 725, 730, 750, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
-         .vrefresh = 120, },
+         .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
        /* 48 - 720x480@120Hz */
        { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
                   798, 858, 0, 480, 489, 495, 525, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
-         .vrefresh = 120, },
+         .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
        /* 49 - 720x480@120Hz */
        { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
                   798, 858, 0, 480, 489, 495, 525, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
-         .vrefresh = 120, },
+         .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
        /* 50 - 1440x480i@120Hz */
        { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
                   1602, 1716, 0, 480, 488, 494, 525, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
                        DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
-         .vrefresh = 120, },
+         .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
        /* 51 - 1440x480i@120Hz */
        { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
                   1602, 1716, 0, 480, 488, 494, 525, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
                        DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
-         .vrefresh = 120, },
+         .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
        /* 52 - 720x576@200Hz */
        { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
                   796, 864, 0, 576, 581, 586, 625, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
-         .vrefresh = 200, },
+         .vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
        /* 53 - 720x576@200Hz */
        { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
                   796, 864, 0, 576, 581, 586, 625, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
-         .vrefresh = 200, },
+         .vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
        /* 54 - 1440x576i@200Hz */
        { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
                   1590, 1728, 0, 576, 580, 586, 625, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
                        DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
-         .vrefresh = 200, },
+         .vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
        /* 55 - 1440x576i@200Hz */
        { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
                   1590, 1728, 0, 576, 580, 586, 625, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
                        DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
-         .vrefresh = 200, },
+         .vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
        /* 56 - 720x480@240Hz */
        { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
                   798, 858, 0, 480, 489, 495, 525, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
-         .vrefresh = 240, },
+         .vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
        /* 57 - 720x480@240Hz */
        { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
                   798, 858, 0, 480, 489, 495, 525, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
-         .vrefresh = 240, },
+         .vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
        /* 58 - 1440x480i@240 */
        { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
                   1602, 1716, 0, 480, 488, 494, 525, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
                        DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
-         .vrefresh = 240, },
+         .vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
        /* 59 - 1440x480i@240 */
        { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
                   1602, 1716, 0, 480, 488, 494, 525, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
                        DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
-         .vrefresh = 240, },
+         .vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
        /* 60 - 1280x720@24Hz */
        { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040,
                   3080, 3300, 0, 720, 725, 730, 750, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
-         .vrefresh = 24, },
+         .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
        /* 61 - 1280x720@25Hz */
        { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700,
                   3740, 3960, 0, 720, 725, 730, 750, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
-         .vrefresh = 25, },
+         .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
        /* 62 - 1280x720@30Hz */
        { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040,
                   3080, 3300, 0, 720, 725, 730, 750, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
-         .vrefresh = 30, },
+         .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
        /* 63 - 1920x1080@120Hz */
        { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008,
                   2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
-        .vrefresh = 120, },
+        .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
        /* 64 - 1920x1080@100Hz */
        { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448,
                   2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
-        .vrefresh = 100, },
+        .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
 };
 
 /*
@@ -2562,25 +2562,40 @@ add_alternate_cea_modes(struct drm_connector *connector, struct edid *edid)
        return modes;
 }
 
-static int
-do_cea_modes(struct drm_connector *connector, const u8 *db, u8 len)
+static struct drm_display_mode *
+drm_display_mode_from_vic_index(struct drm_connector *connector,
+                               const u8 *video_db, u8 video_len,
+                               u8 video_index)
 {
        struct drm_device *dev = connector->dev;
-       const u8 *mode;
+       struct drm_display_mode *newmode;
        u8 cea_mode;
-       int modes = 0;
 
-       for (mode = db; mode < db + len; mode++) {
-               cea_mode = (*mode & 127) - 1; /* CEA modes are numbered 1..127 */
-               if (cea_mode < ARRAY_SIZE(edid_cea_modes)) {
-                       struct drm_display_mode *newmode;
-                       newmode = drm_mode_duplicate(dev,
-                                                    &edid_cea_modes[cea_mode]);
-                       if (newmode) {
-                               newmode->vrefresh = 0;
-                               drm_mode_probed_add(connector, newmode);
-                               modes++;
-                       }
+       if (video_db == NULL || video_index >= video_len)
+               return NULL;
+
+       /* CEA modes are numbered 1..127 */
+       cea_mode = (video_db[video_index] & 127) - 1;
+       if (cea_mode >= ARRAY_SIZE(edid_cea_modes))
+               return NULL;
+
+       newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]);
+       newmode->vrefresh = 0;
+
+       return newmode;
+}
+
+static int
+do_cea_modes(struct drm_connector *connector, const u8 *db, u8 len)
+{
+       int i, modes = 0;
+
+       for (i = 0; i < len; i++) {
+               struct drm_display_mode *mode;
+               mode = drm_display_mode_from_vic_index(connector, db, len, i);
+               if (mode) {
+                       drm_mode_probed_add(connector, mode);
+                       modes++;
                }
        }
 
@@ -2674,21 +2689,13 @@ static int add_hdmi_mode(struct drm_connector *connector, u8 vic)
 static int add_3d_struct_modes(struct drm_connector *connector, u16 structure,
                               const u8 *video_db, u8 video_len, u8 video_index)
 {
-       struct drm_device *dev = connector->dev;
        struct drm_display_mode *newmode;
        int modes = 0;
-       u8 cea_mode;
-
-       if (video_db == NULL || video_index >= video_len)
-               return 0;
-
-       /* CEA modes are numbered 1..127 */
-       cea_mode = (video_db[video_index] & 127) - 1;
-       if (cea_mode >= ARRAY_SIZE(edid_cea_modes))
-               return 0;
 
        if (structure & (1 << 0)) {
-               newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]);
+               newmode = drm_display_mode_from_vic_index(connector, video_db,
+                                                         video_len,
+                                                         video_index);
                if (newmode) {
                        newmode->flags |= DRM_MODE_FLAG_3D_FRAME_PACKING;
                        drm_mode_probed_add(connector, newmode);
@@ -2696,7 +2703,9 @@ static int add_3d_struct_modes(struct drm_connector *connector, u16 structure,
                }
        }
        if (structure & (1 << 6)) {
-               newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]);
+               newmode = drm_display_mode_from_vic_index(connector, video_db,
+                                                         video_len,
+                                                         video_index);
                if (newmode) {
                        newmode->flags |= DRM_MODE_FLAG_3D_TOP_AND_BOTTOM;
                        drm_mode_probed_add(connector, newmode);
@@ -2704,7 +2713,9 @@ static int add_3d_struct_modes(struct drm_connector *connector, u16 structure,
                }
        }
        if (structure & (1 << 8)) {
-               newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]);
+               newmode = drm_display_mode_from_vic_index(connector, video_db,
+                                                         video_len,
+                                                         video_index);
                if (newmode) {
                        newmode->flags |= DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF;
                        drm_mode_probed_add(connector, newmode);
@@ -2728,7 +2739,7 @@ static int
 do_hdmi_vsdb_modes(struct drm_connector *connector, const u8 *db, u8 len,
                   const u8 *video_db, u8 video_len)
 {
-       int modes = 0, offset = 0, i, multi_present = 0;
+       int modes = 0, offset = 0, i, multi_present = 0, multi_len;
        u8 vic_len, hdmi_3d_len = 0;
        u16 mask;
        u16 structure_all;
@@ -2774,32 +2785,84 @@ do_hdmi_vsdb_modes(struct drm_connector *connector, const u8 *db, u8 len,
        }
        offset += 1 + vic_len;
 
-       if (!(multi_present == 1 || multi_present == 2))
-               goto out;
+       if (multi_present == 1)
+               multi_len = 2;
+       else if (multi_present == 2)
+               multi_len = 4;
+       else
+               multi_len = 0;
 
-       if ((multi_present == 1 && len < (9 + offset)) ||
-           (multi_present == 2 && len < (11 + offset)))
+       if (len < (8 + offset + hdmi_3d_len - 1))
                goto out;
 
-       if ((multi_present == 1 && hdmi_3d_len < 2) ||
-           (multi_present == 2 && hdmi_3d_len < 4))
+       if (hdmi_3d_len < multi_len)
                goto out;
 
-       /* 3D_Structure_ALL */
-       structure_all = (db[8 + offset] << 8) | db[9 + offset];
+       if (multi_present == 1 || multi_present == 2) {
+               /* 3D_Structure_ALL */
+               structure_all = (db[8 + offset] << 8) | db[9 + offset];
 
-       /* check if 3D_MASK is present */
-       if (multi_present == 2)
-               mask = (db[10 + offset] << 8) | db[11 + offset];
-       else
-               mask = 0xffff;
-
-       for (i = 0; i < 16; i++) {
-               if (mask & (1 << i))
-                       modes += add_3d_struct_modes(connector,
-                                                    structure_all,
-                                                    video_db,
-                                                    video_len, i);
+               /* check if 3D_MASK is present */
+               if (multi_present == 2)
+                       mask = (db[10 + offset] << 8) | db[11 + offset];
+               else
+                       mask = 0xffff;
+
+               for (i = 0; i < 16; i++) {
+                       if (mask & (1 << i))
+                               modes += add_3d_struct_modes(connector,
+                                               structure_all,
+                                               video_db,
+                                               video_len, i);
+               }
+       }
+
+       offset += multi_len;
+
+       for (i = 0; i < (hdmi_3d_len - multi_len); i++) {
+               int vic_index;
+               struct drm_display_mode *newmode = NULL;
+               unsigned int newflag = 0;
+               bool detail_present;
+
+               detail_present = ((db[8 + offset + i] & 0x0f) > 7);
+
+               if (detail_present && (i + 1 == hdmi_3d_len - multi_len))
+                       break;
+
+               /* 2D_VIC_order_X */
+               vic_index = db[8 + offset + i] >> 4;
+
+               /* 3D_Structure_X */
+               switch (db[8 + offset + i] & 0x0f) {
+               case 0:
+                       newflag = DRM_MODE_FLAG_3D_FRAME_PACKING;
+                       break;
+               case 6:
+                       newflag = DRM_MODE_FLAG_3D_TOP_AND_BOTTOM;
+                       break;
+               case 8:
+                       /* 3D_Detail_X */
+                       if ((db[9 + offset + i] >> 4) == 1)
+                               newflag = DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF;
+                       break;
+               }
+
+               if (newflag != 0) {
+                       newmode = drm_display_mode_from_vic_index(connector,
+                                                                 video_db,
+                                                                 video_len,
+                                                                 vic_index);
+
+                       if (newmode) {
+                               newmode->flags |= newflag;
+                               drm_mode_probed_add(connector, newmode);
+                               modes++;
+                       }
+               }
+
+               if (detail_present)
+                       i++;
        }
 
 out:
index 9081172ef0573a0eb1b17ac8e3681b0c559486cc..1b4c7a5442c5de22210925c2cea8317790340b04 100644 (file)
@@ -141,7 +141,7 @@ static int edid_size(const u8 *edid, int data_size)
        return (edid[0x7e] + 1) * EDID_LENGTH;
 }
 
-static u8 *edid_load(struct drm_connector *connector, const char *name,
+static void *edid_load(struct drm_connector *connector, const char *name,
                        const char *connector_name)
 {
        const struct firmware *fw = NULL;
@@ -263,7 +263,7 @@ int drm_load_edid_firmware(struct drm_connector *connector)
        if (*last == '\n')
                *last = '\0';
 
-       edid = (struct edid *) edid_load(connector, edidname, connector_name);
+       edid = edid_load(connector, edidname, connector_name);
        if (IS_ERR_OR_NULL(edid))
                return 0;
 
index 0a19401aff803bcf2a1ff4dddcbd1fab42a0a187..98a03639b413d834d08c5c73c0ef08cec3d37c55 100644 (file)
@@ -359,6 +359,11 @@ static bool drm_fb_helper_is_bound(struct drm_fb_helper *fb_helper)
        struct drm_crtc *crtc;
        int bound = 0, crtcs_bound = 0;
 
+       /* Sometimes user space wants everything disabled, so don't steal the
+        * display if there's a master. */
+       if (dev->primary->master)
+               return false;
+
        list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
                if (crtc->fb)
                        crtcs_bound++;
@@ -368,6 +373,7 @@ static bool drm_fb_helper_is_bound(struct drm_fb_helper *fb_helper)
 
        if (bound < crtcs_bound)
                return false;
+
        return true;
 }
 
index c5b929c3f77ae617d14fa87b387557bbf3cd3f7b..7f2af9aca03895b97c75af76968093a61930b501 100644 (file)
@@ -232,7 +232,6 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
                goto out_put_pid;
        }
 
-       priv->ioctl_count = 0;
        /* for compatibility root is always authenticated */
        priv->always_authenticated = capable(CAP_SYS_ADMIN);
        priv->authenticated = priv->always_authenticated;
@@ -392,9 +391,6 @@ static void drm_legacy_dev_reinit(struct drm_device *dev)
        if (drm_core_check_feature(dev, DRIVER_MODESET))
                return;
 
-       atomic_set(&dev->ioctl_count, 0);
-       atomic_set(&dev->vma_count, 0);
-
        dev->sigdata.lock = NULL;
 
        dev->context_flag = 0;
@@ -578,12 +574,7 @@ int drm_release(struct inode *inode, struct file *filp)
         */
 
        if (!--dev->open_count) {
-               if (atomic_read(&dev->ioctl_count)) {
-                       DRM_ERROR("Device busy: %d\n",
-                                 atomic_read(&dev->ioctl_count));
-                       retcode = -EBUSY;
-               } else
-                       retcode = drm_lastclose(dev);
+               retcode = drm_lastclose(dev);
                if (drm_device_is_unplugged(dev))
                        drm_put_dev(dev);
        }
index 4761adedad2abe5f03ae586354436f3bf1b32cbb..5bbad873c798a8e2168f50c33828f27828c378ed 100644 (file)
 int
 drm_gem_init(struct drm_device *dev)
 {
-       struct drm_gem_mm *mm;
+       struct drm_vma_offset_manager *vma_offset_manager;
 
        mutex_init(&dev->object_name_lock);
        idr_init(&dev->object_name_idr);
 
-       mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
-       if (!mm) {
+       vma_offset_manager = kzalloc(sizeof(*vma_offset_manager), GFP_KERNEL);
+       if (!vma_offset_manager) {
                DRM_ERROR("out of memory\n");
                return -ENOMEM;
        }
 
-       dev->mm_private = mm;
-       drm_vma_offset_manager_init(&mm->vma_manager,
+       dev->vma_offset_manager = vma_offset_manager;
+       drm_vma_offset_manager_init(vma_offset_manager,
                                    DRM_FILE_PAGE_OFFSET_START,
                                    DRM_FILE_PAGE_OFFSET_SIZE);
 
@@ -113,11 +113,10 @@ drm_gem_init(struct drm_device *dev)
 void
 drm_gem_destroy(struct drm_device *dev)
 {
-       struct drm_gem_mm *mm = dev->mm_private;
 
-       drm_vma_offset_manager_destroy(&mm->vma_manager);
-       kfree(mm);
-       dev->mm_private = NULL;
+       drm_vma_offset_manager_destroy(dev->vma_offset_manager);
+       kfree(dev->vma_offset_manager);
+       dev->vma_offset_manager = NULL;
 }
 
 /**
@@ -129,11 +128,12 @@ int drm_gem_object_init(struct drm_device *dev,
 {
        struct file *filp;
 
+       drm_gem_private_object_init(dev, obj, size);
+
        filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
        if (IS_ERR(filp))
                return PTR_ERR(filp);
 
-       drm_gem_private_object_init(dev, obj, size);
        obj->filp = filp;
 
        return 0;
@@ -175,11 +175,6 @@ drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
        mutex_unlock(&filp->prime.lock);
 }
 
-static void drm_gem_object_ref_bug(struct kref *list_kref)
-{
-       BUG();
-}
-
 /**
  * Called after the last handle to the object has been closed
  *
@@ -195,13 +190,6 @@ static void drm_gem_object_handle_free(struct drm_gem_object *obj)
        if (obj->name) {
                idr_remove(&dev->object_name_idr, obj->name);
                obj->name = 0;
-               /*
-                * The object name held a reference to this object, drop
-                * that now.
-               *
-               * This cannot be the last reference, since the handle holds one too.
-                */
-               kref_put(&obj->refcount, drm_gem_object_ref_bug);
        }
 }
 
@@ -374,9 +362,8 @@ void
 drm_gem_free_mmap_offset(struct drm_gem_object *obj)
 {
        struct drm_device *dev = obj->dev;
-       struct drm_gem_mm *mm = dev->mm_private;
 
-       drm_vma_offset_remove(&mm->vma_manager, &obj->vma_node);
+       drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node);
 }
 EXPORT_SYMBOL(drm_gem_free_mmap_offset);
 
@@ -398,9 +385,8 @@ int
 drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
 {
        struct drm_device *dev = obj->dev;
-       struct drm_gem_mm *mm = dev->mm_private;
 
-       return drm_vma_offset_add(&mm->vma_manager, &obj->vma_node,
+       return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node,
                                  size / PAGE_SIZE);
 }
 EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);
@@ -602,9 +588,6 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data,
                        goto err;
 
                obj->name = ret;
-
-               /* Allocate a reference for the name table.  */
-               drm_gem_object_reference(obj);
        }
 
        args->name = (uint64_t) obj->name;
@@ -833,7 +816,6 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
 {
        struct drm_file *priv = filp->private_data;
        struct drm_device *dev = priv->minor->dev;
-       struct drm_gem_mm *mm = dev->mm_private;
        struct drm_gem_object *obj;
        struct drm_vma_offset_node *node;
        int ret = 0;
@@ -843,7 +825,8 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
 
        mutex_lock(&dev->struct_mutex);
 
-       node = drm_vma_offset_exact_lookup(&mm->vma_manager, vma->vm_pgoff,
+       node = drm_vma_offset_exact_lookup(dev->vma_offset_manager,
+                                          vma->vm_pgoff,
                                           vma_pages(vma));
        if (!node) {
                mutex_unlock(&dev->struct_mutex);
index 7d5a152eeb0288f05e5770a9699b92e253f689e4..7473035dd28b781ed8f382d0e8b6cd3743df772e 100644 (file)
@@ -186,14 +186,14 @@ int drm_clients_info(struct seq_file *m, void *data)
        struct drm_file *priv;
 
        mutex_lock(&dev->struct_mutex);
-       seq_printf(m, "a dev    pid    uid      magic     ioctls\n\n");
+       seq_printf(m, "a dev    pid    uid      magic\n\n");
        list_for_each_entry(priv, &dev->filelist, lhead) {
-               seq_printf(m, "%c %3d %5d %5d %10u %10lu\n",
+               seq_printf(m, "%c %3d %5d %5d %10u\n",
                           priv->authenticated ? 'y' : 'n',
                           priv->minor->index,
                           pid_vnr(priv->pid),
                           from_kuid_munged(seq_user_ns(m), priv->uid),
-                          priv->magic, priv->ioctl_count);
+                          priv->magic);
        }
        mutex_unlock(&dev->struct_mutex);
        return 0;
@@ -234,14 +234,18 @@ int drm_vma_info(struct seq_file *m, void *data)
        struct drm_device *dev = node->minor->dev;
        struct drm_vma_entry *pt;
        struct vm_area_struct *vma;
+       unsigned long vma_count = 0;
 #if defined(__i386__)
        unsigned int pgprot;
 #endif
 
        mutex_lock(&dev->struct_mutex);
-       seq_printf(m, "vma use count: %d, high_memory = %pK, 0x%pK\n",
-                  atomic_read(&dev->vma_count),
-                  high_memory, (void *)(unsigned long)virt_to_phys(high_memory));
+       list_for_each_entry(pt, &dev->vmalist, head)
+               vma_count++;
+
+       seq_printf(m, "vma use count: %lu, high_memory = %pK, 0x%pK\n",
+                  vma_count, high_memory,
+                  (void *)(unsigned long)virt_to_phys(high_memory));
 
        list_for_each_entry(pt, &dev->vmalist, head) {
                vma = pt->vma;
index 64c34d5876ffc7c8378b3df4a389db203dd1c576..c2676b5908d9f6edbcf4dd8d9fb126cfb93cd5ab 100644 (file)
@@ -368,7 +368,7 @@ int drm_irq_uninstall(struct drm_device *dev)
        if (dev->num_crtcs) {
                spin_lock_irqsave(&dev->vbl_lock, irqflags);
                for (i = 0; i < dev->num_crtcs; i++) {
-                       DRM_WAKEUP(&dev->vblank[i].queue);
+                       wake_up(&dev->vblank[i].queue);
                        dev->vblank[i].enabled = false;
                        dev->vblank[i].last =
                                dev->driver->get_vblank_counter(dev, i);
@@ -436,45 +436,41 @@ int drm_control(struct drm_device *dev, void *data,
 }
 
 /**
- * drm_calc_timestamping_constants - Calculate and
- * store various constants which are later needed by
- * vblank and swap-completion timestamping, e.g, by
- * drm_calc_vbltimestamp_from_scanoutpos().
- * They are derived from crtc's true scanout timing,
- * so they take things like panel scaling or other
- * adjustments into account.
+ * drm_calc_timestamping_constants - Calculate vblank timestamp constants
  *
  * @crtc drm_crtc whose timestamp constants should be updated.
+ * @mode display mode containing the scanout timings
  *
+ * Calculate and store various constants which are later
+ * needed by vblank and swap-completion timestamping, e.g,
+ * by drm_calc_vbltimestamp_from_scanoutpos(). They are
+ * derived from crtc's true scanout timing, so they take
+ * things like panel scaling or other adjustments into account.
  */
-void drm_calc_timestamping_constants(struct drm_crtc *crtc)
+void drm_calc_timestamping_constants(struct drm_crtc *crtc,
+                                    const struct drm_display_mode *mode)
 {
-       s64 linedur_ns = 0, pixeldur_ns = 0, framedur_ns = 0;
-       u64 dotclock;
-
-       /* Dot clock in Hz: */
-       dotclock = (u64) crtc->hwmode.clock * 1000;
-
-       /* Fields of interlaced scanout modes are only half a frame duration.
-        * Double the dotclock to get half the frame-/line-/pixelduration.
-        */
-       if (crtc->hwmode.flags & DRM_MODE_FLAG_INTERLACE)
-               dotclock *= 2;
+       int linedur_ns = 0, pixeldur_ns = 0, framedur_ns = 0;
+       int dotclock = mode->crtc_clock;
 
        /* Valid dotclock? */
        if (dotclock > 0) {
-               int frame_size;
-               /* Convert scanline length in pixels and video dot clock to
-                * line duration, frame duration and pixel duration in
-                * nanoseconds:
+               int frame_size = mode->crtc_htotal * mode->crtc_vtotal;
+
+               /*
+                * Convert scanline length in pixels and video
+                * dot clock to line duration, frame duration
+                * and pixel duration in nanoseconds:
                 */
-               pixeldur_ns = (s64) div64_u64(1000000000, dotclock);
-               linedur_ns  = (s64) div64_u64(((u64) crtc->hwmode.crtc_htotal *
-                                             1000000000), dotclock);
-               frame_size = crtc->hwmode.crtc_htotal *
-                               crtc->hwmode.crtc_vtotal;
-               framedur_ns = (s64) div64_u64((u64) frame_size * 1000000000,
-                                             dotclock);
+               pixeldur_ns = 1000000 / dotclock;
+               linedur_ns  = div_u64((u64) mode->crtc_htotal * 1000000, dotclock);
+               framedur_ns = div_u64((u64) frame_size * 1000000, dotclock);
+
+               /*
+                * Fields of interlaced scanout modes are only half a frame duration.
+                */
+               if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+                       framedur_ns /= 2;
        } else
                DRM_ERROR("crtc %d: Can't calculate constants, dotclock = 0!\n",
                          crtc->base.id);
@@ -484,11 +480,11 @@ void drm_calc_timestamping_constants(struct drm_crtc *crtc)
        crtc->framedur_ns = framedur_ns;
 
        DRM_DEBUG("crtc %d: hwmode: htotal %d, vtotal %d, vdisplay %d\n",
-                 crtc->base.id, crtc->hwmode.crtc_htotal,
-                 crtc->hwmode.crtc_vtotal, crtc->hwmode.crtc_vdisplay);
+                 crtc->base.id, mode->crtc_htotal,
+                 mode->crtc_vtotal, mode->crtc_vdisplay);
        DRM_DEBUG("crtc %d: clock %d kHz framedur %d linedur %d, pixeldur %d\n",
-                 crtc->base.id, (int) dotclock/1000, (int) framedur_ns,
-                 (int) linedur_ns, (int) pixeldur_ns);
+                 crtc->base.id, dotclock, framedur_ns,
+                 linedur_ns, pixeldur_ns);
 }
 EXPORT_SYMBOL(drm_calc_timestamping_constants);
 
@@ -521,6 +517,7 @@ EXPORT_SYMBOL(drm_calc_timestamping_constants);
  *         0 = Default.
  *         DRM_CALLED_FROM_VBLIRQ = If function is called from vbl irq handler.
  * @refcrtc: drm_crtc* of crtc which defines scanout timing.
+ * @mode: mode which defines the scanout timings
  *
  * Returns negative value on error, failure or if not supported in current
  * video mode:
@@ -540,14 +537,14 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
                                          int *max_error,
                                          struct timeval *vblank_time,
                                          unsigned flags,
-                                         struct drm_crtc *refcrtc)
+                                         const struct drm_crtc *refcrtc,
+                                         const struct drm_display_mode *mode)
 {
        ktime_t stime, etime, mono_time_offset;
        struct timeval tv_etime;
-       struct drm_display_mode *mode;
-       int vbl_status, vtotal, vdisplay;
+       int vbl_status;
        int vpos, hpos, i;
-       s64 framedur_ns, linedur_ns, pixeldur_ns, delta_ns, duration_ns;
+       int framedur_ns, linedur_ns, pixeldur_ns, delta_ns, duration_ns;
        bool invbl;
 
        if (crtc < 0 || crtc >= dev->num_crtcs) {
@@ -561,10 +558,6 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
                return -EIO;
        }
 
-       mode = &refcrtc->hwmode;
-       vtotal = mode->crtc_vtotal;
-       vdisplay = mode->crtc_vdisplay;
-
        /* Durations of frames, lines, pixels in nanoseconds. */
        framedur_ns = refcrtc->framedur_ns;
        linedur_ns  = refcrtc->linedur_ns;
@@ -573,7 +566,7 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
        /* If mode timing undefined, just return as no-op:
         * Happens during initial modesetting of a crtc.
         */
-       if (vtotal <= 0 || vdisplay <= 0 || framedur_ns == 0) {
+       if (framedur_ns == 0) {
                DRM_DEBUG("crtc %d: Noop due to uninitialized mode.\n", crtc);
                return -EAGAIN;
        }
@@ -590,7 +583,7 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
                 * Get vertical and horizontal scanout position vpos, hpos,
                 * and bounding timestamps stime, etime, pre/post query.
                 */
-               vbl_status = dev->driver->get_scanout_position(dev, crtc, &vpos,
+               vbl_status = dev->driver->get_scanout_position(dev, crtc, flags, &vpos,
                                                               &hpos, &stime, &etime);
 
                /*
@@ -611,18 +604,18 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
                duration_ns = ktime_to_ns(etime) - ktime_to_ns(stime);
 
                /* Accept result with <  max_error nsecs timing uncertainty. */
-               if (duration_ns <= (s64) *max_error)
+               if (duration_ns <= *max_error)
                        break;
        }
 
        /* Noisy system timing? */
        if (i == DRM_TIMESTAMP_MAXRETRIES) {
                DRM_DEBUG("crtc %d: Noisy timestamp %d us > %d us [%d reps].\n",
-                         crtc, (int) duration_ns/1000, *max_error/1000, i);
+                         crtc, duration_ns/1000, *max_error/1000, i);
        }
 
        /* Return upper bound of timestamp precision error. */
-       *max_error = (int) duration_ns;
+       *max_error = duration_ns;
 
        /* Check if in vblank area:
         * vpos is >=0 in video scanout area, but negative
@@ -635,25 +628,7 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
         * since start of scanout at first display scanline. delta_ns
         * can be negative if start of scanout hasn't happened yet.
         */
-       delta_ns = (s64) vpos * linedur_ns + (s64) hpos * pixeldur_ns;
-
-       /* Is vpos outside nominal vblank area, but less than
-        * 1/100 of a frame height away from start of vblank?
-        * If so, assume this isn't a massively delayed vblank
-        * interrupt, but a vblank interrupt that fired a few
-        * microseconds before true start of vblank. Compensate
-        * by adding a full frame duration to the final timestamp.
-        * Happens, e.g., on ATI R500, R600.
-        *
-        * We only do this if DRM_CALLED_FROM_VBLIRQ.
-        */
-       if ((flags & DRM_CALLED_FROM_VBLIRQ) && !invbl &&
-           ((vdisplay - vpos) < vtotal / 100)) {
-               delta_ns = delta_ns - framedur_ns;
-
-               /* Signal this correction as "applied". */
-               vbl_status |= 0x8;
-       }
+       delta_ns = vpos * linedur_ns + hpos * pixeldur_ns;
 
        if (!drm_timestamp_monotonic)
                etime = ktime_sub(etime, mono_time_offset);
@@ -673,7 +648,7 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
                  crtc, (int)vbl_status, hpos, vpos,
                  (long)tv_etime.tv_sec, (long)tv_etime.tv_usec,
                  (long)vblank_time->tv_sec, (long)vblank_time->tv_usec,
-                 (int)duration_ns/1000, i);
+                 duration_ns/1000, i);
 
        vbl_status = DRM_VBLANKTIME_SCANOUTPOS_METHOD;
        if (invbl)
@@ -960,7 +935,7 @@ void drm_vblank_put(struct drm_device *dev, int crtc)
        if (atomic_dec_and_test(&dev->vblank[crtc].refcount) &&
            (drm_vblank_offdelay > 0))
                mod_timer(&dev->vblank_disable_timer,
-                         jiffies + ((drm_vblank_offdelay * DRM_HZ)/1000));
+                         jiffies + ((drm_vblank_offdelay * HZ)/1000));
 }
 EXPORT_SYMBOL(drm_vblank_put);
 
@@ -980,7 +955,7 @@ void drm_vblank_off(struct drm_device *dev, int crtc)
 
        spin_lock_irqsave(&dev->vbl_lock, irqflags);
        vblank_disable_and_save(dev, crtc);
-       DRM_WAKEUP(&dev->vblank[crtc].queue);
+       wake_up(&dev->vblank[crtc].queue);
 
        /* Send any queued vblank events, lest the natives grow disquiet */
        seq = drm_vblank_count_and_time(dev, crtc, &now);
@@ -1244,7 +1219,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
        DRM_DEBUG("waiting on vblank count %d, crtc %d\n",
                  vblwait->request.sequence, crtc);
        dev->vblank[crtc].last_wait = vblwait->request.sequence;
-       DRM_WAIT_ON(ret, dev->vblank[crtc].queue, 3 * DRM_HZ,
+       DRM_WAIT_ON(ret, dev->vblank[crtc].queue, 3 * HZ,
                    (((drm_vblank_count(dev, crtc) -
                       vblwait->request.sequence) <= (1 << 23)) ||
                     !dev->irq_enabled));
@@ -1363,7 +1338,7 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
                          crtc, (int) diff_ns);
        }
 
-       DRM_WAKEUP(&dev->vblank[crtc].queue);
+       wake_up(&dev->vblank[crtc].queue);
        drm_handle_vblank_events(dev, crtc);
 
        spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
index 64e44fad8ae84282d43f449d21772c4346acab68..00c67c0f238127d1d36c8ba17e6caf184412c446 100644 (file)
@@ -82,19 +82,19 @@ static void *agp_remap(unsigned long offset, unsigned long size,
 }
 
 /** Wrapper around agp_free_memory() */
-void drm_free_agp(DRM_AGP_MEM * handle, int pages)
+void drm_free_agp(struct agp_memory * handle, int pages)
 {
        agp_free_memory(handle);
 }
 
 /** Wrapper around agp_bind_memory() */
-int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start)
+int drm_bind_agp(struct agp_memory * handle, unsigned int start)
 {
        return agp_bind_memory(handle, start);
 }
 
 /** Wrapper around agp_unbind_memory() */
-int drm_unbind_agp(DRM_AGP_MEM * handle)
+int drm_unbind_agp(struct agp_memory * handle)
 {
        return agp_unbind_memory(handle);
 }
@@ -110,8 +110,7 @@ static inline void *agp_remap(unsigned long offset, unsigned long size,
 
 void drm_core_ioremap(struct drm_local_map *map, struct drm_device *dev)
 {
-       if (drm_core_has_AGP(dev) &&
-           dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
+       if (dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
                map->handle = agp_remap(map->offset, map->size, dev);
        else
                map->handle = ioremap(map->offset, map->size);
@@ -120,8 +119,7 @@ EXPORT_SYMBOL(drm_core_ioremap);
 
 void drm_core_ioremap_wc(struct drm_local_map *map, struct drm_device *dev)
 {
-       if (drm_core_has_AGP(dev) &&
-           dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
+       if (dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
                map->handle = agp_remap(map->offset, map->size, dev);
        else
                map->handle = ioremap_wc(map->offset, map->size);
@@ -133,8 +131,7 @@ void drm_core_ioremapfree(struct drm_local_map *map, struct drm_device *dev)
        if (!map->handle || !map->size)
                return;
 
-       if (drm_core_has_AGP(dev) &&
-           dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
+       if (dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
                vunmap(map->handle);
        else
                iounmap(map->handle);
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
new file mode 100644 (file)
index 0000000..b155ee2
--- /dev/null
@@ -0,0 +1,315 @@
+/*
+ * MIPI DSI Bus
+ *
+ * Copyright (C) 2012-2013, Samsung Electronics, Co., Ltd.
+ * Andrzej Hajda <a.hajda@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <drm/drm_mipi_dsi.h>
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+
+#include <video/mipi_display.h>
+
+static int mipi_dsi_device_match(struct device *dev, struct device_driver *drv)
+{
+       return of_driver_match_device(dev, drv);
+}
+
+static const struct dev_pm_ops mipi_dsi_device_pm_ops = {
+       .runtime_suspend = pm_generic_runtime_suspend,
+       .runtime_resume = pm_generic_runtime_resume,
+       .suspend = pm_generic_suspend,
+       .resume = pm_generic_resume,
+       .freeze = pm_generic_freeze,
+       .thaw = pm_generic_thaw,
+       .poweroff = pm_generic_poweroff,
+       .restore = pm_generic_restore,
+};
+
+static struct bus_type mipi_dsi_bus_type = {
+       .name = "mipi-dsi",
+       .match = mipi_dsi_device_match,
+       .pm = &mipi_dsi_device_pm_ops,
+};
+
+static void mipi_dsi_dev_release(struct device *dev)
+{
+       struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev);
+
+       of_node_put(dev->of_node);
+       kfree(dsi);
+}
+
+static const struct device_type mipi_dsi_device_type = {
+       .release = mipi_dsi_dev_release,
+};
+
+static struct mipi_dsi_device *mipi_dsi_device_alloc(struct mipi_dsi_host *host)
+{
+       struct mipi_dsi_device *dsi;
+
+       dsi = kzalloc(sizeof(*dsi), GFP_KERNEL);
+       if (!dsi)
+               return ERR_PTR(-ENOMEM);
+
+       dsi->host = host;
+       dsi->dev.bus = &mipi_dsi_bus_type;
+       dsi->dev.parent = host->dev;
+       dsi->dev.type = &mipi_dsi_device_type;
+
+       device_initialize(&dsi->dev);
+
+       return dsi;
+}
+
+static int mipi_dsi_device_add(struct mipi_dsi_device *dsi)
+{
+       struct mipi_dsi_host *host = dsi->host;
+
+       dev_set_name(&dsi->dev, "%s.%d", dev_name(host->dev),  dsi->channel);
+
+       return device_add(&dsi->dev);
+}
+
+static struct mipi_dsi_device *
+of_mipi_dsi_device_add(struct mipi_dsi_host *host, struct device_node *node)
+{
+       struct mipi_dsi_device *dsi;
+       struct device *dev = host->dev;
+       int ret;
+       u32 reg;
+
+       ret = of_property_read_u32(node, "reg", &reg);
+       if (ret) {
+               dev_err(dev, "device node %s has no valid reg property: %d\n",
+                       node->full_name, ret);
+               return ERR_PTR(-EINVAL);
+       }
+
+       if (reg > 3) {
+               dev_err(dev, "device node %s has invalid reg property: %u\n",
+                       node->full_name, reg);
+               return ERR_PTR(-EINVAL);
+       }
+
+       dsi = mipi_dsi_device_alloc(host);
+       if (IS_ERR(dsi)) {
+               dev_err(dev, "failed to allocate DSI device %s: %ld\n",
+                       node->full_name, PTR_ERR(dsi));
+               return dsi;
+       }
+
+       dsi->dev.of_node = of_node_get(node);
+       dsi->channel = reg;
+
+       ret = mipi_dsi_device_add(dsi);
+       if (ret) {
+               dev_err(dev, "failed to add DSI device %s: %d\n",
+                       node->full_name, ret);
+               kfree(dsi);
+               return ERR_PTR(ret);
+       }
+
+       return dsi;
+}
+
+int mipi_dsi_host_register(struct mipi_dsi_host *host)
+{
+       struct device_node *node;
+
+       for_each_available_child_of_node(host->dev->of_node, node)
+               of_mipi_dsi_device_add(host, node);
+
+       return 0;
+}
+EXPORT_SYMBOL(mipi_dsi_host_register);
+
+static int mipi_dsi_remove_device_fn(struct device *dev, void *priv)
+{
+       struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev);
+
+       device_unregister(&dsi->dev);
+
+       return 0;
+}
+
+void mipi_dsi_host_unregister(struct mipi_dsi_host *host)
+{
+       device_for_each_child(host->dev, NULL, mipi_dsi_remove_device_fn);
+}
+EXPORT_SYMBOL(mipi_dsi_host_unregister);
+
+/**
+ * mipi_dsi_attach - attach a DSI device to its DSI host
+ * @dsi: DSI peripheral
+ */
+int mipi_dsi_attach(struct mipi_dsi_device *dsi)
+{
+       const struct mipi_dsi_host_ops *ops = dsi->host->ops;
+
+       if (!ops || !ops->attach)
+               return -ENOSYS;
+
+       return ops->attach(dsi->host, dsi);
+}
+EXPORT_SYMBOL(mipi_dsi_attach);
+
+/**
+ * mipi_dsi_detach - detach a DSI device from its DSI host
+ * @dsi: DSI peripheral
+ */
+int mipi_dsi_detach(struct mipi_dsi_device *dsi)
+{
+       const struct mipi_dsi_host_ops *ops = dsi->host->ops;
+
+       if (!ops || !ops->detach)
+               return -ENOSYS;
+
+       return ops->detach(dsi->host, dsi);
+}
+EXPORT_SYMBOL(mipi_dsi_detach);
+
+/**
+ * mipi_dsi_dcs_write - send DCS write command
+ * @dsi: DSI device
+ * @channel: virtual channel
+ * @data: pointer to the command followed by parameters
+ * @len: length of @data
+ */
+int mipi_dsi_dcs_write(struct mipi_dsi_device *dsi, unsigned int channel,
+                      const void *data, size_t len)
+{
+       const struct mipi_dsi_host_ops *ops = dsi->host->ops;
+       struct mipi_dsi_msg msg = {
+               .channel = channel,
+               .tx_buf = data,
+               .tx_len = len
+       };
+
+       if (!ops || !ops->transfer)
+               return -ENOSYS;
+
+       switch (len) {
+       case 0:
+               return -EINVAL;
+       case 1:
+               msg.type = MIPI_DSI_DCS_SHORT_WRITE;
+               break;
+       case 2:
+               msg.type = MIPI_DSI_DCS_SHORT_WRITE_PARAM;
+               break;
+       default:
+               msg.type = MIPI_DSI_DCS_LONG_WRITE;
+               break;
+       }
+
+       return ops->transfer(dsi->host, &msg);
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_write);
+
+/**
+ * mipi_dsi_dcs_read - send DCS read request command
+ * @dsi: DSI device
+ * @channel: virtual channel
+ * @cmd: DCS read command
+ * @data: pointer to read buffer
+ * @len: length of @data
+ *
+ * Function returns number of read bytes or error code.
+ */
+ssize_t mipi_dsi_dcs_read(struct mipi_dsi_device *dsi, unsigned int channel,
+                         u8 cmd, void *data, size_t len)
+{
+       const struct mipi_dsi_host_ops *ops = dsi->host->ops;
+       struct mipi_dsi_msg msg = {
+               .channel = channel,
+               .type = MIPI_DSI_DCS_READ,
+               .tx_buf = &cmd,
+               .tx_len = 1,
+               .rx_buf = data,
+               .rx_len = len
+       };
+
+       if (!ops || !ops->transfer)
+               return -ENOSYS;
+
+       return ops->transfer(dsi->host, &msg);
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_read);
+
+static int mipi_dsi_drv_probe(struct device *dev)
+{
+       struct mipi_dsi_driver *drv = to_mipi_dsi_driver(dev->driver);
+       struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev);
+
+       return drv->probe(dsi);
+}
+
+static int mipi_dsi_drv_remove(struct device *dev)
+{
+       struct mipi_dsi_driver *drv = to_mipi_dsi_driver(dev->driver);
+       struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev);
+
+       return drv->remove(dsi);
+}
+
+/**
+ * mipi_dsi_driver_register - register a driver for DSI devices
+ * @drv: DSI driver structure
+ */
+int mipi_dsi_driver_register(struct mipi_dsi_driver *drv)
+{
+       drv->driver.bus = &mipi_dsi_bus_type;
+       if (drv->probe)
+               drv->driver.probe = mipi_dsi_drv_probe;
+       if (drv->remove)
+               drv->driver.remove = mipi_dsi_drv_remove;
+
+       return driver_register(&drv->driver);
+}
+EXPORT_SYMBOL(mipi_dsi_driver_register);
+
+/**
+ * mipi_dsi_driver_unregister - unregister a driver for DSI devices
+ * @drv: DSI driver structure
+ */
+void mipi_dsi_driver_unregister(struct mipi_dsi_driver *drv)
+{
+       driver_unregister(&drv->driver);
+}
+EXPORT_SYMBOL(mipi_dsi_driver_unregister);
+
+static int __init mipi_dsi_bus_init(void)
+{
+       return bus_register(&mipi_dsi_bus_type);
+}
+postcore_initcall(mipi_dsi_bus_init);
+
+MODULE_AUTHOR("Andrzej Hajda <a.hajda@samsung.com>");
+MODULE_DESCRIPTION("MIPI DSI Bus");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c
new file mode 100644 (file)
index 0000000..2ef988e
--- /dev/null
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2013, NVIDIA Corporation.  All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/err.h>
+#include <linux/module.h>
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_panel.h>
+
+static DEFINE_MUTEX(panel_lock);
+static LIST_HEAD(panel_list);
+
+void drm_panel_init(struct drm_panel *panel)
+{
+       INIT_LIST_HEAD(&panel->list);
+}
+EXPORT_SYMBOL(drm_panel_init);
+
+int drm_panel_add(struct drm_panel *panel)
+{
+       mutex_lock(&panel_lock);
+       list_add_tail(&panel->list, &panel_list);
+       mutex_unlock(&panel_lock);
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_panel_add);
+
+void drm_panel_remove(struct drm_panel *panel)
+{
+       mutex_lock(&panel_lock);
+       list_del_init(&panel->list);
+       mutex_unlock(&panel_lock);
+}
+EXPORT_SYMBOL(drm_panel_remove);
+
+int drm_panel_attach(struct drm_panel *panel, struct drm_connector *connector)
+{
+       if (panel->connector)
+               return -EBUSY;
+
+       panel->connector = connector;
+       panel->drm = connector->dev;
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_panel_attach);
+
+int drm_panel_detach(struct drm_panel *panel)
+{
+       panel->connector = NULL;
+       panel->drm = NULL;
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_panel_detach);
+
+#ifdef CONFIG_OF
+struct drm_panel *of_drm_find_panel(struct device_node *np)
+{
+       struct drm_panel *panel;
+
+       mutex_lock(&panel_lock);
+
+       list_for_each_entry(panel, &panel_list, list) {
+               if (panel->dev->of_node == np) {
+                       mutex_unlock(&panel_lock);
+                       return panel;
+               }
+       }
+
+       mutex_unlock(&panel_lock);
+       return NULL;
+}
+EXPORT_SYMBOL(of_drm_find_panel);
+#endif
+
+MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
+MODULE_DESCRIPTION("DRM panel infrastructure");
+MODULE_LICENSE("GPL and additional rights");
index 02679793c9e2e73d3d175136bbfb638a7cd0784f..5736aaa7e86cb069d30fd458b5842fd42defae17 100644 (file)
@@ -262,16 +262,11 @@ static int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p)
        return 0;
 }
 
-static int drm_pci_agp_init(struct drm_device *dev)
+static void drm_pci_agp_init(struct drm_device *dev)
 {
-       if (drm_core_has_AGP(dev)) {
+       if (drm_core_check_feature(dev, DRIVER_USE_AGP)) {
                if (drm_pci_device_is_agp(dev))
                        dev->agp = drm_agp_init(dev);
-               if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP)
-                   && (dev->agp == NULL)) {
-                       DRM_ERROR("Cannot initialize the agpgart module.\n");
-                       return -EINVAL;
-               }
                if (dev->agp) {
                        dev->agp->agp_mtrr = arch_phys_wc_add(
                                dev->agp->agp_info.aper_base,
@@ -279,15 +274,14 @@ static int drm_pci_agp_init(struct drm_device *dev)
                                1024 * 1024);
                }
        }
-       return 0;
 }
 
-static void drm_pci_agp_destroy(struct drm_device *dev)
+void drm_pci_agp_destroy(struct drm_device *dev)
 {
-       if (drm_core_has_AGP(dev) && dev->agp) {
+       if (dev->agp) {
                arch_phys_wc_del(dev->agp->agp_mtrr);
                drm_agp_clear(dev);
-               drm_agp_destroy(dev->agp);
+               kfree(dev->agp);
                dev->agp = NULL;
        }
 }
@@ -299,8 +293,6 @@ static struct drm_bus drm_pci_bus = {
        .set_busid = drm_pci_set_busid,
        .set_unique = drm_pci_set_unique,
        .irq_by_busid = drm_pci_irq_by_busid,
-       .agp_init = drm_pci_agp_init,
-       .agp_destroy = drm_pci_agp_destroy,
 };
 
 /**
@@ -338,17 +330,25 @@ int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
        if (drm_core_check_feature(dev, DRIVER_MODESET))
                pci_set_drvdata(pdev, dev);
 
+       drm_pci_agp_init(dev);
+
        ret = drm_dev_register(dev, ent->driver_data);
        if (ret)
-               goto err_pci;
+               goto err_agp;
 
        DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
                 driver->name, driver->major, driver->minor, driver->patchlevel,
                 driver->date, pci_name(pdev), dev->primary->index);
 
+       /* No locking needed since shadow-attach is single-threaded since it may
+        * only be called from the per-driver module init hook. */
+       if (!drm_core_check_feature(dev, DRIVER_MODESET))
+               list_add_tail(&dev->legacy_dev_list, &driver->legacy_dev_list);
+
        return 0;
 
-err_pci:
+err_agp:
+       drm_pci_agp_destroy(dev);
        pci_disable_device(pdev);
 err_free:
        drm_dev_free(dev);
@@ -375,7 +375,6 @@ int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
 
        DRM_DEBUG("\n");
 
-       INIT_LIST_HEAD(&driver->device_list);
        driver->kdriver.pci = pdriver;
        driver->bus = &drm_pci_bus;
 
@@ -383,6 +382,7 @@ int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
                return pci_register_driver(pdriver);
 
        /* If not using KMS, fall back to stealth mode manual scanning. */
+       INIT_LIST_HEAD(&driver->legacy_dev_list);
        for (i = 0; pdriver->id_table[i].vendor != 0; i++) {
                pid = &pdriver->id_table[i];
 
@@ -452,6 +452,7 @@ int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
        return -1;
 }
 
+void drm_pci_agp_destroy(struct drm_device *dev) {}
 #endif
 
 EXPORT_SYMBOL(drm_pci_init);
@@ -465,8 +466,11 @@ void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver)
        if (driver->driver_features & DRIVER_MODESET) {
                pci_unregister_driver(pdriver);
        } else {
-               list_for_each_entry_safe(dev, tmp, &driver->device_list, driver_item)
+               list_for_each_entry_safe(dev, tmp, &driver->legacy_dev_list,
+                                        legacy_dev_list) {
                        drm_put_dev(dev);
+                       list_del(&dev->legacy_dev_list);
+               }
        }
        DRM_INFO("Module unloaded\n");
 }
index fc24fee8ec833b6bdb2c26ada2b45e947622b941..21fc82006b78c3423c801b54ef5acd40d4f79805 100644 (file)
@@ -147,18 +147,6 @@ int drm_platform_init(struct drm_driver *driver, struct platform_device *platfor
 
        driver->kdriver.platform_device = platform_device;
        driver->bus = &drm_platform_bus;
-       INIT_LIST_HEAD(&driver->device_list);
        return drm_get_platform_dev(platform_device, driver);
 }
 EXPORT_SYMBOL(drm_platform_init);
-
-void drm_platform_exit(struct drm_driver *driver, struct platform_device *platform_device)
-{
-       struct drm_device *dev, *tmp;
-       DRM_DEBUG("\n");
-
-       list_for_each_entry_safe(dev, tmp, &driver->device_list, driver_item)
-               drm_put_dev(dev);
-       DRM_INFO("Module unloaded\n");
-}
-EXPORT_SYMBOL(drm_platform_exit);
index 66dd3a001cf1b5ee43ec75f190b03146e7bbe46d..98a33c580ca1aeceed513f2af38b6742cee30f85 100644 (file)
@@ -99,13 +99,19 @@ void drm_ut_debug_printk(unsigned int request_level,
                         const char *function_name,
                         const char *format, ...)
 {
+       struct va_format vaf;
        va_list args;
 
        if (drm_debug & request_level) {
-               if (function_name)
-                       printk(KERN_DEBUG "[%s:%s], ", prefix, function_name);
                va_start(args, format);
-               vprintk(format, args);
+               vaf.fmt = format;
+               vaf.va = &args;
+
+               if (function_name)
+                       printk(KERN_DEBUG "[%s:%s], %pV", prefix,
+                              function_name, &vaf);
+               else
+                       printk(KERN_DEBUG "%pV", &vaf);
                va_end(args);
        }
 }
@@ -521,16 +527,10 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
 
        mutex_lock(&drm_global_mutex);
 
-       if (dev->driver->bus->agp_init) {
-               ret = dev->driver->bus->agp_init(dev);
-               if (ret)
-                       goto out_unlock;
-       }
-
        if (drm_core_check_feature(dev, DRIVER_MODESET)) {
                ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
                if (ret)
-                       goto err_agp;
+                       goto out_unlock;
        }
 
        if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
@@ -557,8 +557,6 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
                        goto err_unload;
        }
 
-       list_add_tail(&dev->driver_item, &dev->driver->device_list);
-
        ret = 0;
        goto out_unlock;
 
@@ -571,9 +569,6 @@ err_render_node:
        drm_unplug_minor(dev->render);
 err_control_node:
        drm_unplug_minor(dev->control);
-err_agp:
-       if (dev->driver->bus->agp_destroy)
-               dev->driver->bus->agp_destroy(dev);
 out_unlock:
        mutex_unlock(&drm_global_mutex);
        return ret;
@@ -597,8 +592,8 @@ void drm_dev_unregister(struct drm_device *dev)
        if (dev->driver->unload)
                dev->driver->unload(dev);
 
-       if (dev->driver->bus->agp_destroy)
-               dev->driver->bus->agp_destroy(dev);
+       if (dev->agp)
+               drm_pci_agp_destroy(dev);
 
        drm_vblank_cleanup(dev);
 
@@ -608,7 +603,5 @@ void drm_dev_unregister(struct drm_device *dev)
        drm_unplug_minor(dev->control);
        drm_unplug_minor(dev->render);
        drm_unplug_minor(dev->primary);
-
-       list_del(&dev->driver_item);
 }
 EXPORT_SYMBOL(drm_dev_unregister);
index b179b70e7853b4bb4eb69c9a97a57295ae26a084..0f8cb1ae76074d2bf21d75d678585222019f2cdd 100644 (file)
@@ -1,4 +1,5 @@
 #include <drm/drmP.h>
+#include <drm/drm_usb.h>
 #include <linux/usb.h>
 #include <linux/module.h>
 
@@ -63,7 +64,6 @@ int drm_usb_init(struct drm_driver *driver, struct usb_driver *udriver)
        int res;
        DRM_DEBUG("\n");
 
-       INIT_LIST_HEAD(&driver->device_list);
        driver->kdriver.usb = udriver;
        driver->bus = &drm_usb_bus;
 
index 93e95d7efd575fc7bf084e431ae560793e733728..24e045c4f53140270ebbb28f9a6be21209b704a7 100644 (file)
@@ -101,7 +101,7 @@ static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        /*
         * Find the right map
         */
-       if (!drm_core_has_AGP(dev))
+       if (!dev->agp)
                goto vm_fault_error;
 
        if (!dev->agp || !dev->agp->cant_use_aperture)
@@ -220,7 +220,6 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
 
        DRM_DEBUG("0x%08lx,0x%08lx\n",
                  vma->vm_start, vma->vm_end - vma->vm_start);
-       atomic_dec(&dev->vma_count);
 
        map = vma->vm_private_data;
 
@@ -266,9 +265,6 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
                                dmah.size = map->size;
                                __drm_pci_free(dev, &dmah);
                                break;
-                       case _DRM_GEM:
-                               DRM_ERROR("tried to rmmap GEM object\n");
-                               break;
                        }
                        kfree(map);
                }
@@ -408,7 +404,6 @@ void drm_vm_open_locked(struct drm_device *dev,
 
        DRM_DEBUG("0x%08lx,0x%08lx\n",
                  vma->vm_start, vma->vm_end - vma->vm_start);
-       atomic_inc(&dev->vma_count);
 
        vma_entry = kmalloc(sizeof(*vma_entry), GFP_KERNEL);
        if (vma_entry) {
@@ -436,7 +431,6 @@ void drm_vm_close_locked(struct drm_device *dev,
 
        DRM_DEBUG("0x%08lx,0x%08lx\n",
                  vma->vm_start, vma->vm_end - vma->vm_start);
-       atomic_dec(&dev->vma_count);
 
        list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
                if (pt->vma == vma) {
@@ -595,7 +589,7 @@ int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
        switch (map->type) {
 #if !defined(__arm__)
        case _DRM_AGP:
-               if (drm_core_has_AGP(dev) && dev->agp->cant_use_aperture) {
+               if (dev->agp && dev->agp->cant_use_aperture) {
                        /*
                         * On some platforms we can't talk to bus dma address from the CPU, so for
                         * memory of type DRM_AGP, we'll deal with sorting out the real physical
index 22b8f5eced80f3407b0fa9fc365c1d9e349d7e54..9d096a0c5f8d5f6bf0583d37f5efe433085504e4 100644 (file)
@@ -14,6 +14,8 @@
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
 
+#include <linux/anon_inodes.h>
+
 #include <drm/exynos_drm.h>
 
 #include "exynos_drm_drv.h"
@@ -119,6 +121,8 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
 
        drm_vblank_offdelay = VBLANK_OFF_DELAY;
 
+       platform_set_drvdata(dev->platformdev, dev);
+
        return 0;
 
 err_drm_device:
@@ -150,9 +154,14 @@ static int exynos_drm_unload(struct drm_device *dev)
        return 0;
 }
 
+static const struct file_operations exynos_drm_gem_fops = {
+       .mmap = exynos_drm_gem_mmap_buffer,
+};
+
 static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
 {
        struct drm_exynos_file_private *file_priv;
+       struct file *anon_filp;
        int ret;
 
        file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
@@ -167,6 +176,16 @@ static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
                file->driver_priv = NULL;
        }
 
+       anon_filp = anon_inode_getfile("exynos_gem", &exynos_drm_gem_fops,
+                                       NULL, 0);
+       if (IS_ERR(anon_filp)) {
+               kfree(file_priv);
+               return PTR_ERR(anon_filp);
+       }
+
+       anon_filp->f_mode = FMODE_READ | FMODE_WRITE;
+       file_priv->anon_filp = anon_filp;
+
        return ret;
 }
 
@@ -179,6 +198,7 @@ static void exynos_drm_preclose(struct drm_device *dev,
 static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
 {
        struct exynos_drm_private *private = dev->dev_private;
+       struct drm_exynos_file_private *file_priv;
        struct drm_pending_vblank_event *v, *vt;
        struct drm_pending_event *e, *et;
        unsigned long flags;
@@ -204,6 +224,9 @@ static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
        }
        spin_unlock_irqrestore(&dev->event_lock, flags);
 
+       file_priv = file->driver_priv;
+       if (file_priv->anon_filp)
+               fput(file_priv->anon_filp);
 
        kfree(file->driver_priv);
        file->driver_priv = NULL;
@@ -305,7 +328,7 @@ static int exynos_drm_platform_probe(struct platform_device *pdev)
 
 static int exynos_drm_platform_remove(struct platform_device *pdev)
 {
-       drm_platform_exit(&exynos_drm_driver, pdev);
+       drm_put_dev(platform_get_drvdata(pdev));
 
        return 0;
 }
index eaa19668bf00328657bdd370c304f27cf66ece5e..0eaf5a27e120e586129074446e816b902d7316f3 100644 (file)
@@ -226,6 +226,7 @@ struct exynos_drm_ipp_private {
 struct drm_exynos_file_private {
        struct exynos_drm_g2d_private   *g2d_priv;
        struct exynos_drm_ipp_private   *ipp_priv;
+       struct file                     *anon_filp;
 };
 
 /*
index a61878bf5dcd091cf31ed64a8374d230b580dc1e..a20440ce32e6c39ee450d926f668f5638df0cca8 100644 (file)
@@ -347,7 +347,7 @@ static void fimd_wait_for_vblank(struct device *dev)
         */
        if (!wait_event_timeout(ctx->wait_vsync_queue,
                                !atomic_read(&ctx->wait_vsync_event),
-                               DRM_HZ/20))
+                               HZ/20))
                DRM_DEBUG_KMS("vblank wait timed out.\n");
 }
 
@@ -706,7 +706,7 @@ static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
        /* set wait vsync event to zero and wake up queue. */
        if (atomic_read(&ctx->wait_vsync_event)) {
                atomic_set(&ctx->wait_vsync_event, 0);
-               DRM_WAKEUP(&ctx->wait_vsync_queue);
+               wake_up(&ctx->wait_vsync_queue);
        }
 out:
        return IRQ_HANDLED;
@@ -954,7 +954,7 @@ static int fimd_probe(struct platform_device *pdev)
        }
 
        ctx->driver_data = drm_fimd_get_driver_data(pdev);
-       DRM_INIT_WAITQUEUE(&ctx->wait_vsync_queue);
+       init_waitqueue_head(&ctx->wait_vsync_queue);
        atomic_set(&ctx->wait_vsync_event, 0);
 
        subdrv = &ctx->subdrv;
index be59d50d8b16c836fbd1a5c88ed2bea87a7b769f..42d2904d88c7e32d90cd6d2dd56f97a3c917dc84 100644 (file)
@@ -338,46 +338,22 @@ int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
                        &args->offset);
 }
 
-static struct drm_file *exynos_drm_find_drm_file(struct drm_device *drm_dev,
-                                                       struct file *filp)
-{
-       struct drm_file *file_priv;
-
-       /* find current process's drm_file from filelist. */
-       list_for_each_entry(file_priv, &drm_dev->filelist, lhead)
-               if (file_priv->filp == filp)
-                       return file_priv;
-
-       WARN_ON(1);
-
-       return ERR_PTR(-EFAULT);
-}
-
-static int exynos_drm_gem_mmap_buffer(struct file *filp,
+int exynos_drm_gem_mmap_buffer(struct file *filp,
                                      struct vm_area_struct *vma)
 {
        struct drm_gem_object *obj = filp->private_data;
        struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
        struct drm_device *drm_dev = obj->dev;
        struct exynos_drm_gem_buf *buffer;
-       struct drm_file *file_priv;
        unsigned long vm_size;
        int ret;
 
+       WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
+
        vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
        vma->vm_private_data = obj;
        vma->vm_ops = drm_dev->driver->gem_vm_ops;
 
-       /* restore it to driver's fops. */
-       filp->f_op = fops_get(drm_dev->driver->fops);
-
-       file_priv = exynos_drm_find_drm_file(drm_dev, filp);
-       if (IS_ERR(file_priv))
-               return PTR_ERR(file_priv);
-
-       /* restore it to drm_file. */
-       filp->private_data = file_priv;
-
        update_vm_cache_attr(exynos_gem_obj, vma);
 
        vm_size = vma->vm_end - vma->vm_start;
@@ -411,15 +387,13 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp,
        return 0;
 }
 
-static const struct file_operations exynos_drm_gem_fops = {
-       .mmap = exynos_drm_gem_mmap_buffer,
-};
-
 int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
                              struct drm_file *file_priv)
 {
+       struct drm_exynos_file_private *exynos_file_priv;
        struct drm_exynos_gem_mmap *args = data;
        struct drm_gem_object *obj;
+       struct file *anon_filp;
        unsigned long addr;
 
        if (!(dev->driver->driver_features & DRIVER_GEM)) {
@@ -427,47 +401,25 @@ int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
                return -ENODEV;
        }
 
+       mutex_lock(&dev->struct_mutex);
+
        obj = drm_gem_object_lookup(dev, file_priv, args->handle);
        if (!obj) {
                DRM_ERROR("failed to lookup gem object.\n");
+               mutex_unlock(&dev->struct_mutex);
                return -EINVAL;
        }
 
-       /*
-        * We have to use gem object and its fops for specific mmaper,
-        * but vm_mmap() can deliver only filp. So we have to change
-        * filp->f_op and filp->private_data temporarily, then restore
-        * again. So it is important to keep lock until restoration the
-        * settings to prevent others from misuse of filp->f_op or
-        * filp->private_data.
-        */
-       mutex_lock(&dev->struct_mutex);
-
-       /*
-        * Set specific mmper's fops. And it will be restored by
-        * exynos_drm_gem_mmap_buffer to dev->driver->fops.
-        * This is used to call specific mapper temporarily.
-        */
-       file_priv->filp->f_op = &exynos_drm_gem_fops;
-
-       /*
-        * Set gem object to private_data so that specific mmaper
-        * can get the gem object. And it will be restored by
-        * exynos_drm_gem_mmap_buffer to drm_file.
-        */
-       file_priv->filp->private_data = obj;
+       exynos_file_priv = file_priv->driver_priv;
+       anon_filp = exynos_file_priv->anon_filp;
+       anon_filp->private_data = obj;
 
-       addr = vm_mmap(file_priv->filp, 0, args->size,
-                       PROT_READ | PROT_WRITE, MAP_SHARED, 0);
+       addr = vm_mmap(anon_filp, 0, args->size, PROT_READ | PROT_WRITE,
+                       MAP_SHARED, 0);
 
        drm_gem_object_unreference(obj);
 
        if (IS_ERR_VALUE(addr)) {
-               /* check filp->f_op, filp->private_data are restored */
-               if (file_priv->filp->f_op == &exynos_drm_gem_fops) {
-                       file_priv->filp->f_op = fops_get(dev->driver->fops);
-                       file_priv->filp->private_data = file_priv;
-               }
                mutex_unlock(&dev->struct_mutex);
                return (int)addr;
        }
index b8c818ba2ff450e104fe545106a544ed92341632..1592c0ba7de86b596f2000d5bb3f780fa17ca33e 100644 (file)
@@ -122,6 +122,9 @@ int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
 int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
                              struct drm_file *file_priv);
 
+int exynos_drm_gem_mmap_buffer(struct file *filp,
+                                     struct vm_area_struct *vma);
+
 /* map user space allocated by malloc to pages. */
 int exynos_drm_gem_userptr_ioctl(struct drm_device *dev, void *data,
                                      struct drm_file *file_priv);
index 63bc5f92fbb320e6287fa106a8357c7eac503435..2dfa48c76f54644d1c7efd6fb6e52bedacc02e70 100644 (file)
@@ -868,7 +868,7 @@ static void mixer_wait_for_vblank(void *ctx)
         */
        if (!wait_event_timeout(mixer_ctx->wait_vsync_queue,
                                !atomic_read(&mixer_ctx->wait_vsync_event),
-                               DRM_HZ/20))
+                               HZ/20))
                DRM_DEBUG_KMS("vblank wait timed out.\n");
 }
 
@@ -1019,7 +1019,7 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
                /* set wait vsync event to zero and wake up queue. */
                if (atomic_read(&ctx->wait_vsync_event)) {
                        atomic_set(&ctx->wait_vsync_event, 0);
-                       DRM_WAKEUP(&ctx->wait_vsync_queue);
+                       wake_up(&ctx->wait_vsync_queue);
                }
        }
 
@@ -1209,7 +1209,7 @@ static int mixer_probe(struct platform_device *pdev)
        drm_hdmi_ctx->ctx = (void *)ctx;
        ctx->vp_enabled = drv->is_vp_enabled;
        ctx->mxr_ver = drv->version;
-       DRM_INIT_WAITQUEUE(&ctx->wait_vsync_queue);
+       init_waitqueue_head(&ctx->wait_vsync_queue);
        atomic_set(&ctx->wait_vsync_event, 0);
 
        platform_set_drvdata(pdev, drm_hdmi_ctx);
index d5ef1a5793c8832e9a251fa817f7e19c246c06e6..de6f62a6ceb7a2d85ab67ba2eb64e8f688c05320 100644 (file)
@@ -326,7 +326,7 @@ int psbfb_sync(struct fb_info *info)
        struct psb_framebuffer *psbfb = &fbdev->pfb;
        struct drm_device *dev = psbfb->base.dev;
        struct drm_psb_private *dev_priv = dev->dev_private;
-       unsigned long _end = jiffies + DRM_HZ;
+       unsigned long _end = jiffies + HZ;
        int busy = 0;
        unsigned long flags;
 
index f88a1815d87c4e795492ead6ef1c4c07a4d31b89..0490ce36b53fd9237e754de7877f51f3aaffee97 100644 (file)
@@ -483,7 +483,7 @@ cdv_intel_dp_aux_native_write(struct gma_encoder *encoder,
 
        if (send_bytes > 16)
                return -1;
-       msg[0] = AUX_NATIVE_WRITE << 4;
+       msg[0] = DP_AUX_NATIVE_WRITE << 4;
        msg[1] = address >> 8;
        msg[2] = address & 0xff;
        msg[3] = send_bytes - 1;
@@ -493,9 +493,10 @@ cdv_intel_dp_aux_native_write(struct gma_encoder *encoder,
                ret = cdv_intel_dp_aux_ch(encoder, msg, msg_bytes, &ack, 1);
                if (ret < 0)
                        return ret;
-               if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
+               ack >>= 4;
+               if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK)
                        break;
-               else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
+               else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER)
                        udelay(100);
                else
                        return -EIO;
@@ -523,7 +524,7 @@ cdv_intel_dp_aux_native_read(struct gma_encoder *encoder,
        uint8_t ack;
        int ret;
 
-       msg[0] = AUX_NATIVE_READ << 4;
+       msg[0] = DP_AUX_NATIVE_READ << 4;
        msg[1] = address >> 8;
        msg[2] = address & 0xff;
        msg[3] = recv_bytes - 1;
@@ -538,12 +539,12 @@ cdv_intel_dp_aux_native_read(struct gma_encoder *encoder,
                        return -EPROTO;
                if (ret < 0)
                        return ret;
-               ack = reply[0];
-               if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) {
+               ack = reply[0] >> 4;
+               if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK) {
                        memcpy(recv, reply + 1, ret - 1);
                        return ret - 1;
                }
-               else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
+               else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER)
                        udelay(100);
                else
                        return -EIO;
@@ -569,12 +570,12 @@ cdv_intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
 
        /* Set up the command byte */
        if (mode & MODE_I2C_READ)
-               msg[0] = AUX_I2C_READ << 4;
+               msg[0] = DP_AUX_I2C_READ << 4;
        else
-               msg[0] = AUX_I2C_WRITE << 4;
+               msg[0] = DP_AUX_I2C_WRITE << 4;
 
        if (!(mode & MODE_I2C_STOP))
-               msg[0] |= AUX_I2C_MOT << 4;
+               msg[0] |= DP_AUX_I2C_MOT << 4;
 
        msg[1] = address >> 8;
        msg[2] = address;
@@ -606,16 +607,16 @@ cdv_intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
                        return ret;
                }
 
-               switch (reply[0] & AUX_NATIVE_REPLY_MASK) {
-               case AUX_NATIVE_REPLY_ACK:
+               switch ((reply[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK) {
+               case DP_AUX_NATIVE_REPLY_ACK:
                        /* I2C-over-AUX Reply field is only valid
                         * when paired with AUX ACK.
                         */
                        break;
-               case AUX_NATIVE_REPLY_NACK:
+               case DP_AUX_NATIVE_REPLY_NACK:
                        DRM_DEBUG_KMS("aux_ch native nack\n");
                        return -EREMOTEIO;
-               case AUX_NATIVE_REPLY_DEFER:
+               case DP_AUX_NATIVE_REPLY_DEFER:
                        udelay(100);
                        continue;
                default:
@@ -624,16 +625,16 @@ cdv_intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
                        return -EREMOTEIO;
                }
 
-               switch (reply[0] & AUX_I2C_REPLY_MASK) {
-               case AUX_I2C_REPLY_ACK:
+               switch ((reply[0] >> 4) & DP_AUX_I2C_REPLY_MASK) {
+               case DP_AUX_I2C_REPLY_ACK:
                        if (mode == MODE_I2C_READ) {
                                *read_byte = reply[1];
                        }
                        return reply_bytes - 1;
-               case AUX_I2C_REPLY_NACK:
+               case DP_AUX_I2C_REPLY_NACK:
                        DRM_DEBUG_KMS("aux_i2c nack\n");
                        return -EREMOTEIO;
-               case AUX_I2C_REPLY_DEFER:
+               case DP_AUX_I2C_REPLY_DEFER:
                        DRM_DEBUG_KMS("aux_i2c defer\n");
                        udelay(100);
                        break;
@@ -677,7 +678,7 @@ cdv_intel_dp_i2c_init(struct gma_connector *connector,
        return ret;
 }
 
-void cdv_intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
+static void cdv_intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
        struct drm_display_mode *adjusted_mode)
 {
        adjusted_mode->hdisplay = fixed_mode->hdisplay;
index 24e8af3d22bfd34583d8b826b3fd75f7023002c7..386de2c9dc8649f449580a062d4d91c62fd3a733 100644 (file)
@@ -349,6 +349,7 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc,
        /* If we didn't get a handle then turn the cursor off */
        if (!handle) {
                temp = CURSOR_MODE_DISABLE;
+               mutex_lock(&dev->struct_mutex);
 
                if (gma_power_begin(dev, false)) {
                        REG_WRITE(control, temp);
@@ -365,6 +366,7 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc,
                        gma_crtc->cursor_obj = NULL;
                }
 
+               mutex_unlock(&dev->struct_mutex);
                return 0;
        }
 
@@ -374,9 +376,12 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc,
                return -EINVAL;
        }
 
+       mutex_lock(&dev->struct_mutex);
        obj = drm_gem_object_lookup(dev, file_priv, handle);
-       if (!obj)
-               return -ENOENT;
+       if (!obj) {
+               ret = -ENOENT;
+               goto unlock;
+       }
 
        if (obj->size < width * height * 4) {
                dev_dbg(dev->dev, "Buffer is too small\n");
@@ -440,10 +445,13 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc,
        }
 
        gma_crtc->cursor_obj = obj;
+unlock:
+       mutex_unlock(&dev->struct_mutex);
        return ret;
 
 unref_cursor:
        drm_gem_object_unreference(obj);
+       mutex_unlock(&dev->struct_mutex);
        return ret;
 }
 
index b59e6588c34327b9162c0346de826729633b20cc..5ad6a03e477eae63aa8cc635981887febca71ac1 100644 (file)
@@ -212,8 +212,8 @@ enum {
 #define PSB_HIGH_REG_OFFS 0x0600
 
 #define PSB_NUM_VBLANKS 2
-#define PSB_WATCHDOG_DELAY (DRM_HZ * 2)
-#define PSB_LID_DELAY (DRM_HZ / 10)
+#define PSB_WATCHDOG_DELAY (HZ * 2)
+#define PSB_LID_DELAY (HZ / 10)
 
 #define MDFLD_PNW_B0 0x04
 #define MDFLD_PNW_C0 0x08
@@ -232,7 +232,7 @@ enum {
 #define MDFLD_DSR_RR           45
 #define MDFLD_DPU_ENABLE       (1 << 31)
 #define MDFLD_DSR_FULLSCREEN   (1 << 30)
-#define MDFLD_DSR_DELAY                (DRM_HZ / MDFLD_DSR_RR)
+#define MDFLD_DSR_DELAY                (HZ / MDFLD_DSR_RR)
 
 #define PSB_PWR_STATE_ON               1
 #define PSB_PWR_STATE_OFF              2
@@ -769,7 +769,7 @@ extern void psb_mmu_remove_pages(struct psb_mmu_pd *pd,
  *psb_irq.c
  */
 
-extern irqreturn_t psb_irq_handler(DRM_IRQ_ARGS);
+extern irqreturn_t psb_irq_handler(int irq, void *arg);
 extern int psb_irq_enable_dpst(struct drm_device *dev);
 extern int psb_irq_disable_dpst(struct drm_device *dev);
 extern void psb_irq_preinstall(struct drm_device *dev);
index bde27fdb41bf9d9a7ab79f0e3545e5ae0f9c96a5..dc2c8eb030faeaf29f4bd7d32c87c803ab846e30 100644 (file)
@@ -250,11 +250,6 @@ extern void psb_intel_sdvo_set_hotplug(struct drm_connector *connector,
 extern int intelfb_probe(struct drm_device *dev);
 extern int intelfb_remove(struct drm_device *dev,
                          struct drm_framebuffer *fb);
-extern struct drm_framebuffer *psb_intel_framebuffer_create(struct drm_device
-                                                       *dev, struct
-                                                       drm_mode_fb_cmd
-                                                       *mode_cmd,
-                                                       void *mm_private);
 extern bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder,
                                      const struct drm_display_mode *mode,
                                      struct drm_display_mode *adjusted_mode);
index ba4830342d3450593bbc1f49a5a6fded4419c510..f883f9e4c5240ee923cbaabb3c57b88472983431 100644 (file)
@@ -200,7 +200,7 @@ static void psb_vdc_interrupt(struct drm_device *dev, uint32_t vdc_stat)
                mid_pipe_event_handler(dev, 1);
 }
 
-irqreturn_t psb_irq_handler(DRM_IRQ_ARGS)
+irqreturn_t psb_irq_handler(int irq, void *arg)
 {
        struct drm_device *dev = arg;
        struct drm_psb_private *dev_priv = dev->dev_private;
@@ -253,7 +253,7 @@ irqreturn_t psb_irq_handler(DRM_IRQ_ARGS)
 
        PSB_WVDC32(vdc_stat, PSB_INT_IDENTITY_R);
        (void) PSB_RVDC32(PSB_INT_IDENTITY_R);
-       DRM_READMEMORYBARRIER();
+       rmb();
 
        if (!handled)
                return IRQ_NONE;
@@ -450,21 +450,6 @@ int psb_irq_disable_dpst(struct drm_device *dev)
        return 0;
 }
 
-#ifdef PSB_FIXME
-static int psb_vblank_do_wait(struct drm_device *dev,
-                             unsigned int *sequence, atomic_t *counter)
-{
-       unsigned int cur_vblank;
-       int ret = 0;
-       DRM_WAIT_ON(ret, dev->vblank.queue, 3 * DRM_HZ,
-                   (((cur_vblank = atomic_read(counter))
-                     - *sequence) <= (1 << 23)));
-       *sequence = cur_vblank;
-
-       return ret;
-}
-#endif
-
 /*
  * It is used to enable VBLANK interrupt
  */
index debb7f190c062a725e43e38c567dc815823b38f0..d0b45ffa112600b65beb932f8fb1b4e5802453af 100644 (file)
@@ -32,7 +32,7 @@ void sysirq_uninit(struct drm_device *dev);
 void psb_irq_preinstall(struct drm_device *dev);
 int  psb_irq_postinstall(struct drm_device *dev);
 void psb_irq_uninstall(struct drm_device *dev);
-irqreturn_t psb_irq_handler(DRM_IRQ_ARGS);
+irqreturn_t psb_irq_handler(int irq, void *arg);
 
 int psb_irq_enable_dpst(struct drm_device *dev);
 int psb_irq_disable_dpst(struct drm_device *dev);
index 249fdff305c63b50fd72c2f62a499eb2a0d3e576..aeace37415aac8ae62ab2aa53a3b927ae648fb6c 100644 (file)
@@ -1193,6 +1193,10 @@ static int i810_flip_bufs(struct drm_device *dev, void *data,
 
 int i810_driver_load(struct drm_device *dev, unsigned long flags)
 {
+       /* Our userspace depends upon the agp mapping support. */
+       if (!dev->agp)
+               return -EINVAL;
+
        pci_set_master(dev->pdev);
 
        return 0;
index d8180d22ceddb38965e3c6f59367d75a8421dff2..441ccf8f5bdc3a7d2ebad9b4724eff2044d2c34b 100644 (file)
@@ -57,7 +57,7 @@ static const struct file_operations i810_driver_fops = {
 
 static struct drm_driver driver = {
        .driver_features =
-           DRIVER_USE_AGP | DRIVER_REQUIRE_AGP |
+           DRIVER_USE_AGP |
            DRIVER_HAVE_DMA,
        .dev_priv_size = sizeof(drm_i810_buf_priv_t),
        .load = i810_driver_load,
index 6199d0b5b958fb2610e83810e117ea89c5057e6e..73ed59eff139a88948c7766dcc0c999398f59254 100644 (file)
@@ -1,8 +1,10 @@
 config DRM_I915
        tristate "Intel 8xx/9xx/G3x/G4x/HD Graphics"
        depends on DRM
-       depends on AGP
-       depends on AGP_INTEL
+       depends on X86 && PCI
+       depends on (AGP || AGP=n)
+       select INTEL_GTT
+       select AGP_INTEL if AGP
        # we need shmfs for the swappable backing store, and in particular
        # the shmem_readpage() which depends upon tmpfs
        select SHMEM
@@ -35,15 +37,14 @@ config DRM_I915
 config DRM_I915_KMS
        bool "Enable modesetting on intel by default"
        depends on DRM_I915
+       default y
        help
-         Choose this option if you want kernel modesetting enabled by default,
-         and you have a new enough userspace to support this. Running old
-         userspaces with this enabled will cause pain.  Note that this causes
-         the driver to bind to PCI devices, which precludes loading things
-         like intelfb.
+         Choose this option if you want kernel modesetting enabled by default.
+
+         If in doubt, say "Y".
 
 config DRM_I915_FBDEV
-       bool "Enable legacy fbdev support for the modesettting intel driver"
+       bool "Enable legacy fbdev support for the modesetting intel driver"
        depends on DRM_I915
        select DRM_KMS_FB_HELPER
        select FB_CFB_FILLRECT
@@ -55,9 +56,12 @@ config DRM_I915_FBDEV
          support. Note that this support also provide the linux console
          support on top of the intel modesetting driver.
 
+         If in doubt, say "Y".
+
 config DRM_I915_PRELIMINARY_HW_SUPPORT
        bool "Enable preliminary support for prerelease Intel hardware by default"
        depends on DRM_I915
+       default n
        help
          Choose this option if you have prerelease Intel hardware and want the
          i915 driver to support it by default.  You can enable such support at
@@ -65,3 +69,15 @@ config DRM_I915_PRELIMINARY_HW_SUPPORT
          option changes the default for that module option.
 
          If in doubt, say "N".
+
+config DRM_I915_UMS
+       bool "Enable userspace modesetting on Intel hardware (DEPRECATED)"
+       depends on DRM_I915
+       default n
+       help
+         Choose this option if you still need userspace modesetting.
+
+         Userspace modesetting is deprecated for quite some time now, so
+         enable this only if you have ancient versions of the DDX drivers.
+
+         If in doubt, say "N".
index d4ae48b04cf2b127630e309192cce4c05509cbf4..9fd44f5f3b3b40ff5f1acf497b613a36e153cac2 100644 (file)
@@ -4,7 +4,6 @@
 
 ccflags-y := -Iinclude/drm
 i915-y := i915_drv.o i915_dma.o i915_irq.o \
-         i915_debugfs.o \
          i915_gpu_error.o \
           i915_suspend.o \
          i915_gem.o \
@@ -54,6 +53,8 @@ i915-$(CONFIG_ACPI)   += intel_acpi.o intel_opregion.o
 
 i915-$(CONFIG_DRM_I915_FBDEV) += intel_fbdev.o
 
+i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o
+
 obj-$(CONFIG_DRM_I915)  += i915.o
 
 CFLAGS_i915_trace_points.o := -I$(src)
index c4a255be697921caade4bf3dd064217e82230763..954acb2c7021d8eb9a90fe1e49ea5bc806f1f344 100644 (file)
@@ -87,49 +87,6 @@ struct ns2501_priv {
  * when switching the resolution.
  */
 
-static void enable_dvo(struct intel_dvo_device *dvo)
-{
-       struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
-       struct i2c_adapter *adapter = dvo->i2c_bus;
-       struct intel_gmbus *bus = container_of(adapter,
-                                              struct intel_gmbus,
-                                              adapter);
-       struct drm_i915_private *dev_priv = bus->dev_priv;
-
-       DRM_DEBUG_KMS("%s: Trying to re-enable the DVO\n", __FUNCTION__);
-
-       ns->dvoc = I915_READ(DVO_C);
-       ns->pll_a = I915_READ(_DPLL_A);
-       ns->srcdim = I915_READ(DVOC_SRCDIM);
-       ns->fw_blc = I915_READ(FW_BLC);
-
-       I915_WRITE(DVOC, 0x10004084);
-       I915_WRITE(_DPLL_A, 0xd0820000);
-       I915_WRITE(DVOC_SRCDIM, 0x400300);      // 1024x768
-       I915_WRITE(FW_BLC, 0x1080304);
-
-       I915_WRITE(DVOC, 0x90004084);
-}
-
-/*
- * Restore the I915 registers modified by the above
- * trigger function.
- */
-static void restore_dvo(struct intel_dvo_device *dvo)
-{
-       struct i2c_adapter *adapter = dvo->i2c_bus;
-       struct intel_gmbus *bus = container_of(adapter,
-                                              struct intel_gmbus,
-                                              adapter);
-       struct drm_i915_private *dev_priv = bus->dev_priv;
-       struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
-
-       I915_WRITE(DVOC, ns->dvoc);
-       I915_WRITE(_DPLL_A, ns->pll_a);
-       I915_WRITE(DVOC_SRCDIM, ns->srcdim);
-       I915_WRITE(FW_BLC, ns->fw_blc);
-}
-
 /*
 ** Read a register from the ns2501.
 ** Returns true if successful, false otherwise.
@@ -300,7 +257,7 @@ static void ns2501_mode_set(struct intel_dvo_device *dvo,
                            struct drm_display_mode *adjusted_mode)
 {
        bool ok;
-       bool restore = false;
+       int retries = 10;
        struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
 
        DRM_DEBUG_KMS
@@ -476,20 +433,7 @@ static void ns2501_mode_set(struct intel_dvo_device *dvo,
                        ns->reg_8_shadow |= NS2501_8_BPAS;
                }
                ok &= ns2501_writeb(dvo, NS2501_REG8, ns->reg_8_shadow);
-
-               if (!ok) {
-                       if (restore)
-                               restore_dvo(dvo);
-                       enable_dvo(dvo);
-                       restore = true;
-               }
-       } while (!ok);
-       /*
-        * Restore the old i915 registers before
-        * forcing the ns2501 on.
-        */
-       if (restore)
-               restore_dvo(dvo);
+       } while (!ok && retries--);
 }
 
 /* set the NS2501 power state */
@@ -510,7 +454,7 @@ static bool ns2501_get_hw_state(struct intel_dvo_device *dvo)
 static void ns2501_dpms(struct intel_dvo_device *dvo, bool enable)
 {
        bool ok;
-       bool restore = false;
+       int retries = 10;
        struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
        unsigned char ch;
 
@@ -537,16 +481,7 @@ static void ns2501_dpms(struct intel_dvo_device *dvo, bool enable)
                        ok &=
                            ns2501_writeb(dvo, 0x35,
                                          enable ? 0xff : 0x00);
-                       if (!ok) {
-                               if (restore)
-                                       restore_dvo(dvo);
-                               enable_dvo(dvo);
-                               restore = true;
-                       }
-               } while (!ok);
-
-               if (restore)
-                       restore_dvo(dvo);
+               } while (!ok && retries--);
        }
 }
 
index 6ed45a984230c6db3bb8cc4b8fbdc2eec9a661d1..b2b46c52294c6d4d9c2a20890fafaa4b122e7fa3 100644 (file)
@@ -40,8 +40,6 @@
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
 
-#if defined(CONFIG_DEBUG_FS)
-
 enum {
        ACTIVE_LIST,
        INACTIVE_LIST,
@@ -406,16 +404,26 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
        seq_putc(m, '\n');
        list_for_each_entry_reverse(file, &dev->filelist, lhead) {
                struct file_stats stats;
+               struct task_struct *task;
 
                memset(&stats, 0, sizeof(stats));
                idr_for_each(&file->object_idr, per_file_stats, &stats);
+               /*
+                * Although we have a valid reference on file->pid, that does
+                * not guarantee that the task_struct who called get_pid() is
+                * still alive (e.g. get_pid(current) => fork() => exit()).
+                * Therefore, we need to protect this ->comm access using RCU.
+                */
+               rcu_read_lock();
+               task = pid_task(file->pid, PIDTYPE_PID);
                seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu unbound)\n",
-                          get_pid_task(file->pid, PIDTYPE_PID)->comm,
+                          task ? task->comm : "<unknown>",
                           stats.count,
                           stats.total,
                           stats.active,
                           stats.inactive,
                           stats.unbound);
+               rcu_read_unlock();
        }
 
        mutex_unlock(&dev->struct_mutex);
@@ -564,10 +572,12 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data)
        ret = mutex_lock_interruptible(&dev->struct_mutex);
        if (ret)
                return ret;
+       intel_runtime_pm_get(dev_priv);
 
        for_each_ring(ring, dev_priv, i)
                i915_ring_seqno_info(m, ring);
 
+       intel_runtime_pm_put(dev_priv);
        mutex_unlock(&dev->struct_mutex);
 
        return 0;
@@ -585,6 +595,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
        ret = mutex_lock_interruptible(&dev->struct_mutex);
        if (ret)
                return ret;
+       intel_runtime_pm_get(dev_priv);
 
        if (INTEL_INFO(dev)->gen >= 8) {
                int i;
@@ -711,6 +722,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
                }
                i915_ring_seqno_info(m, ring);
        }
+       intel_runtime_pm_put(dev_priv);
        mutex_unlock(&dev->struct_mutex);
 
        return 0;
@@ -904,9 +916,11 @@ static int i915_rstdby_delays(struct seq_file *m, void *unused)
        ret = mutex_lock_interruptible(&dev->struct_mutex);
        if (ret)
                return ret;
+       intel_runtime_pm_get(dev_priv);
 
        crstanddelay = I915_READ16(CRSTANDVID);
 
+       intel_runtime_pm_put(dev_priv);
        mutex_unlock(&dev->struct_mutex);
 
        seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f));
@@ -919,7 +933,9 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
        struct drm_info_node *node = (struct drm_info_node *) m->private;
        struct drm_device *dev = node->minor->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       int ret;
+       int ret = 0;
+
+       intel_runtime_pm_get(dev_priv);
 
        flush_delayed_work(&dev_priv->rps.delayed_resume_work);
 
@@ -945,9 +961,9 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
                /* RPSTAT1 is in the GT power well */
                ret = mutex_lock_interruptible(&dev->struct_mutex);
                if (ret)
-                       return ret;
+                       goto out;
 
-               gen6_gt_force_wake_get(dev_priv);
+               gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
 
                reqf = I915_READ(GEN6_RPNSWREQ);
                reqf &= ~GEN6_TURBO_DISABLE;
@@ -970,7 +986,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
                        cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
                cagf *= GT_FREQUENCY_MULTIPLIER;
 
-               gen6_gt_force_wake_put(dev_priv);
+               gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
                mutex_unlock(&dev->struct_mutex);
 
                seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
@@ -1018,23 +1034,24 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
                seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
                seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
 
-               val = vlv_punit_read(dev_priv, PUNIT_FUSE_BUS1);
+               val = valleyview_rps_max_freq(dev_priv);
                seq_printf(m, "max GPU freq: %d MHz\n",
-                          vlv_gpu_freq(dev_priv->mem_freq, val));
+                          vlv_gpu_freq(dev_priv, val));
 
-               val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM);
+               val = valleyview_rps_min_freq(dev_priv);
                seq_printf(m, "min GPU freq: %d MHz\n",
-                          vlv_gpu_freq(dev_priv->mem_freq, val));
+                          vlv_gpu_freq(dev_priv, val));
 
                seq_printf(m, "current GPU freq: %d MHz\n",
-                          vlv_gpu_freq(dev_priv->mem_freq,
-                                       (freq_sts >> 8) & 0xff));
+                          vlv_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
                mutex_unlock(&dev_priv->rps.hw_lock);
        } else {
                seq_puts(m, "no P-state info available\n");
        }
 
-       return 0;
+out:
+       intel_runtime_pm_put(dev_priv);
+       return ret;
 }
 
 static int i915_delayfreq_table(struct seq_file *m, void *unused)
@@ -1048,6 +1065,7 @@ static int i915_delayfreq_table(struct seq_file *m, void *unused)
        ret = mutex_lock_interruptible(&dev->struct_mutex);
        if (ret)
                return ret;
+       intel_runtime_pm_get(dev_priv);
 
        for (i = 0; i < 16; i++) {
                delayfreq = I915_READ(PXVFREQ_BASE + i * 4);
@@ -1055,6 +1073,8 @@ static int i915_delayfreq_table(struct seq_file *m, void *unused)
                           (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT);
        }
 
+       intel_runtime_pm_put(dev_priv);
+
        mutex_unlock(&dev->struct_mutex);
 
        return 0;
@@ -1076,12 +1096,14 @@ static int i915_inttoext_table(struct seq_file *m, void *unused)
        ret = mutex_lock_interruptible(&dev->struct_mutex);
        if (ret)
                return ret;
+       intel_runtime_pm_get(dev_priv);
 
        for (i = 1; i <= 32; i++) {
                inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4);
                seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext);
        }
 
+       intel_runtime_pm_put(dev_priv);
        mutex_unlock(&dev->struct_mutex);
 
        return 0;
@@ -1099,11 +1121,13 @@ static int ironlake_drpc_info(struct seq_file *m)
        ret = mutex_lock_interruptible(&dev->struct_mutex);
        if (ret)
                return ret;
+       intel_runtime_pm_get(dev_priv);
 
        rgvmodectl = I915_READ(MEMMODECTL);
        rstdbyctl = I915_READ(RSTDBYCTL);
        crstandvid = I915_READ16(CRSTANDVID);
 
+       intel_runtime_pm_put(dev_priv);
        mutex_unlock(&dev->struct_mutex);
 
        seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
@@ -1154,6 +1178,50 @@ static int ironlake_drpc_info(struct seq_file *m)
        return 0;
 }
 
+static int vlv_drpc_info(struct seq_file *m)
+{
+
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_device *dev = node->minor->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 rpmodectl1, rcctl1;
+       unsigned fw_rendercount = 0, fw_mediacount = 0;
+
+       rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
+       rcctl1 = I915_READ(GEN6_RC_CONTROL);
+
+       seq_printf(m, "Video Turbo Mode: %s\n",
+                  yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
+       seq_printf(m, "Turbo enabled: %s\n",
+                  yesno(rpmodectl1 & GEN6_RP_ENABLE));
+       seq_printf(m, "HW control enabled: %s\n",
+                  yesno(rpmodectl1 & GEN6_RP_ENABLE));
+       seq_printf(m, "SW control enabled: %s\n",
+                  yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
+                         GEN6_RP_MEDIA_SW_MODE));
+       seq_printf(m, "RC6 Enabled: %s\n",
+                  yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
+                                       GEN6_RC_CTL_EI_MODE(1))));
+       seq_printf(m, "Render Power Well: %s\n",
+                       (I915_READ(VLV_GTLC_PW_STATUS) &
+                               VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
+       seq_printf(m, "Media Power Well: %s\n",
+                       (I915_READ(VLV_GTLC_PW_STATUS) &
+                               VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
+
+       spin_lock_irq(&dev_priv->uncore.lock);
+       fw_rendercount = dev_priv->uncore.fw_rendercount;
+       fw_mediacount = dev_priv->uncore.fw_mediacount;
+       spin_unlock_irq(&dev_priv->uncore.lock);
+
+       seq_printf(m, "Forcewake Render Count = %u\n", fw_rendercount);
+       seq_printf(m, "Forcewake Media Count = %u\n", fw_mediacount);
+
+
+       return 0;
+}
+
+
 static int gen6_drpc_info(struct seq_file *m)
 {
 
@@ -1167,6 +1235,7 @@ static int gen6_drpc_info(struct seq_file *m)
        ret = mutex_lock_interruptible(&dev->struct_mutex);
        if (ret)
                return ret;
+       intel_runtime_pm_get(dev_priv);
 
        spin_lock_irq(&dev_priv->uncore.lock);
        forcewake_count = dev_priv->uncore.forcewake_count;
@@ -1192,6 +1261,8 @@ static int gen6_drpc_info(struct seq_file *m)
        sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
        mutex_unlock(&dev_priv->rps.hw_lock);
 
+       intel_runtime_pm_put(dev_priv);
+
        seq_printf(m, "Video Turbo Mode: %s\n",
                   yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
        seq_printf(m, "HW control enabled: %s\n",
@@ -1256,7 +1327,9 @@ static int i915_drpc_info(struct seq_file *m, void *unused)
        struct drm_info_node *node = (struct drm_info_node *) m->private;
        struct drm_device *dev = node->minor->dev;
 
-       if (IS_GEN6(dev) || IS_GEN7(dev))
+       if (IS_VALLEYVIEW(dev))
+               return vlv_drpc_info(m);
+       else if (IS_GEN6(dev) || IS_GEN7(dev))
                return gen6_drpc_info(m);
        else
                return ironlake_drpc_info(m);
@@ -1268,7 +1341,7 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
        struct drm_device *dev = node->minor->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
 
-       if (!I915_HAS_FBC(dev)) {
+       if (!HAS_FBC(dev)) {
                seq_puts(m, "FBC unsupported on this chipset\n");
                return 0;
        }
@@ -1330,7 +1403,7 @@ static int i915_ips_status(struct seq_file *m, void *unused)
                return 0;
        }
 
-       if (I915_READ(IPS_CTL) & IPS_ENABLE)
+       if (IS_BROADWELL(dev) || I915_READ(IPS_CTL) & IPS_ENABLE)
                seq_puts(m, "enabled\n");
        else
                seq_puts(m, "disabled\n");
@@ -1406,6 +1479,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
        ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
        if (ret)
                return ret;
+       intel_runtime_pm_get(dev_priv);
 
        seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
 
@@ -1422,6 +1496,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
                           ((ia_freq >> 8) & 0xff) * 100);
        }
 
+       intel_runtime_pm_put(dev_priv);
        mutex_unlock(&dev_priv->rps.hw_lock);
 
        return 0;
@@ -1437,8 +1512,10 @@ static int i915_gfxec(struct seq_file *m, void *unused)
        ret = mutex_lock_interruptible(&dev->struct_mutex);
        if (ret)
                return ret;
+       intel_runtime_pm_get(dev_priv);
 
        seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
+       intel_runtime_pm_put(dev_priv);
 
        mutex_unlock(&dev->struct_mutex);
 
@@ -1565,13 +1642,21 @@ static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data)
        struct drm_info_node *node = (struct drm_info_node *) m->private;
        struct drm_device *dev = node->minor->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       unsigned forcewake_count;
+       unsigned forcewake_count = 0, fw_rendercount = 0, fw_mediacount = 0;
 
        spin_lock_irq(&dev_priv->uncore.lock);
-       forcewake_count = dev_priv->uncore.forcewake_count;
+       if (IS_VALLEYVIEW(dev)) {
+               fw_rendercount = dev_priv->uncore.fw_rendercount;
+               fw_mediacount = dev_priv->uncore.fw_mediacount;
+       } else
+               forcewake_count = dev_priv->uncore.forcewake_count;
        spin_unlock_irq(&dev_priv->uncore.lock);
 
-       seq_printf(m, "forcewake count = %u\n", forcewake_count);
+       if (IS_VALLEYVIEW(dev)) {
+               seq_printf(m, "fw_rendercount = %u\n", fw_rendercount);
+               seq_printf(m, "fw_mediacount = %u\n", fw_mediacount);
+       } else
+               seq_printf(m, "forcewake count = %u\n", forcewake_count);
 
        return 0;
 }
@@ -1610,6 +1695,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
        ret = mutex_lock_interruptible(&dev->struct_mutex);
        if (ret)
                return ret;
+       intel_runtime_pm_get(dev_priv);
 
        seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
                   swizzle_string(dev_priv->mm.bit_6_swizzle_x));
@@ -1641,6 +1727,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
                seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
                           I915_READ(DISP_ARB_CTL));
        }
+       intel_runtime_pm_put(dev_priv);
        mutex_unlock(&dev->struct_mutex);
 
        return 0;
@@ -1701,16 +1788,19 @@ static int i915_ppgtt_info(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = (struct drm_info_node *) m->private;
        struct drm_device *dev = node->minor->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
 
        int ret = mutex_lock_interruptible(&dev->struct_mutex);
        if (ret)
                return ret;
+       intel_runtime_pm_get(dev_priv);
 
        if (INTEL_INFO(dev)->gen >= 8)
                gen8_ppgtt_info(m, dev);
        else if (INTEL_INFO(dev)->gen >= 6)
                gen6_ppgtt_info(m, dev);
 
+       intel_runtime_pm_put(dev_priv);
        mutex_unlock(&dev->struct_mutex);
 
        return 0;
@@ -1735,28 +1825,28 @@ static int i915_dpio_info(struct seq_file *m, void *data)
 
        seq_printf(m, "DPIO_CTL: 0x%08x\n", I915_READ(DPIO_CTL));
 
-       seq_printf(m, "DPIO_DIV_A: 0x%08x\n",
-                  vlv_dpio_read(dev_priv, PIPE_A, _DPIO_DIV_A));
-       seq_printf(m, "DPIO_DIV_B: 0x%08x\n",
-                  vlv_dpio_read(dev_priv, PIPE_A, _DPIO_DIV_B));
+       seq_printf(m, "DPIO PLL DW3 CH0 : 0x%08x\n",
+                  vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW3(0)));
+       seq_printf(m, "DPIO PLL DW3 CH1: 0x%08x\n",
+                  vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW3(1)));
 
-       seq_printf(m, "DPIO_REFSFR_A: 0x%08x\n",
-                  vlv_dpio_read(dev_priv, PIPE_A, _DPIO_REFSFR_A));
-       seq_printf(m, "DPIO_REFSFR_B: 0x%08x\n",
-                  vlv_dpio_read(dev_priv, PIPE_A, _DPIO_REFSFR_B));
+       seq_printf(m, "DPIO PLL DW5 CH0: 0x%08x\n",
+                  vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW5(0)));
+       seq_printf(m, "DPIO PLL DW5 CH1: 0x%08x\n",
+                  vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW5(1)));
 
-       seq_printf(m, "DPIO_CORE_CLK_A: 0x%08x\n",
-                  vlv_dpio_read(dev_priv, PIPE_A, _DPIO_CORE_CLK_A));
-       seq_printf(m, "DPIO_CORE_CLK_B: 0x%08x\n",
-                  vlv_dpio_read(dev_priv, PIPE_A, _DPIO_CORE_CLK_B));
+       seq_printf(m, "DPIO PLL DW7 CH0: 0x%08x\n",
+                  vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW7(0)));
+       seq_printf(m, "DPIO PLL DW7 CH1: 0x%08x\n",
+                  vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW7(1)));
 
-       seq_printf(m, "DPIO_LPF_COEFF_A: 0x%08x\n",
-                  vlv_dpio_read(dev_priv, PIPE_A, _DPIO_LPF_COEFF_A));
-       seq_printf(m, "DPIO_LPF_COEFF_B: 0x%08x\n",
-                  vlv_dpio_read(dev_priv, PIPE_A, _DPIO_LPF_COEFF_B));
+       seq_printf(m, "DPIO PLL DW10 CH0: 0x%08x\n",
+                  vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW10(0)));
+       seq_printf(m, "DPIO PLL DW10 CH1: 0x%08x\n",
+                  vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW10(1)));
 
        seq_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n",
-                  vlv_dpio_read(dev_priv, PIPE_A, DPIO_FASTCLK_DISABLE));
+                  vlv_dpio_read(dev_priv, PIPE_A, VLV_CMN_DW0));
 
        mutex_unlock(&dev_priv->dpio_lock);
 
@@ -1784,6 +1874,8 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
        u32 psrperf = 0;
        bool enabled = false;
 
+       intel_runtime_pm_get(dev_priv);
+
        seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support));
        seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok));
 
@@ -1796,6 +1888,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
                        EDP_PSR_PERF_CNT_MASK;
        seq_printf(m, "Performance_Counter: %u\n", psrperf);
 
+       intel_runtime_pm_put(dev_priv);
        return 0;
 }
 
@@ -1845,6 +1938,76 @@ static int i915_pc8_status(struct seq_file *m, void *unused)
        return 0;
 }
 
+static const char *power_domain_str(enum intel_display_power_domain domain)
+{
+       switch (domain) {
+       case POWER_DOMAIN_PIPE_A:
+               return "PIPE_A";
+       case POWER_DOMAIN_PIPE_B:
+               return "PIPE_B";
+       case POWER_DOMAIN_PIPE_C:
+               return "PIPE_C";
+       case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
+               return "PIPE_A_PANEL_FITTER";
+       case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
+               return "PIPE_B_PANEL_FITTER";
+       case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
+               return "PIPE_C_PANEL_FITTER";
+       case POWER_DOMAIN_TRANSCODER_A:
+               return "TRANSCODER_A";
+       case POWER_DOMAIN_TRANSCODER_B:
+               return "TRANSCODER_B";
+       case POWER_DOMAIN_TRANSCODER_C:
+               return "TRANSCODER_C";
+       case POWER_DOMAIN_TRANSCODER_EDP:
+               return "TRANSCODER_EDP";
+       case POWER_DOMAIN_VGA:
+               return "VGA";
+       case POWER_DOMAIN_AUDIO:
+               return "AUDIO";
+       case POWER_DOMAIN_INIT:
+               return "INIT";
+       default:
+               WARN_ON(1);
+               return "?";
+       }
+}
+
+static int i915_power_domain_info(struct seq_file *m, void *unused)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_device *dev = node->minor->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct i915_power_domains *power_domains = &dev_priv->power_domains;
+       int i;
+
+       mutex_lock(&power_domains->lock);
+
+       seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
+       for (i = 0; i < power_domains->power_well_count; i++) {
+               struct i915_power_well *power_well;
+               enum intel_display_power_domain power_domain;
+
+               power_well = &power_domains->power_wells[i];
+               seq_printf(m, "%-25s %d\n", power_well->name,
+                          power_well->count);
+
+               for (power_domain = 0; power_domain < POWER_DOMAIN_NUM;
+                    power_domain++) {
+                       if (!(BIT(power_domain) & power_well->domains))
+                               continue;
+
+                       seq_printf(m, "  %-23s %d\n",
+                                power_domain_str(power_domain),
+                                power_domains->domain_use_count[power_domain]);
+               }
+       }
+
+       mutex_unlock(&power_domains->lock);
+
+       return 0;
+}
+
 struct pipe_crc_info {
        const char *name;
        struct drm_device *dev;
@@ -1857,6 +2020,9 @@ static int i915_pipe_crc_open(struct inode *inode, struct file *filep)
        struct drm_i915_private *dev_priv = info->dev->dev_private;
        struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
 
+       if (info->pipe >= INTEL_INFO(info->dev)->num_pipes)
+               return -ENODEV;
+
        spin_lock_irq(&pipe_crc->lock);
 
        if (pipe_crc->opened) {
@@ -2005,8 +2171,8 @@ static int i915_pipe_crc_create(struct dentry *root, struct drm_minor *minor,
        info->dev = dev;
        ent = debugfs_create_file(info->name, S_IRUGO, root, info,
                                  &i915_pipe_crc_fops);
-       if (IS_ERR(ent))
-               return PTR_ERR(ent);
+       if (!ent)
+               return -ENOMEM;
 
        return drm_add_fake_info_node(minor, ent, info);
 }
@@ -2347,7 +2513,7 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
-       u32 val;
+       u32 val = 0; /* shut up gcc */
        int ret;
 
        if (pipe_crc->source == source)
@@ -2742,7 +2908,7 @@ i915_drop_caches_set(void *data, u64 val)
        struct i915_vma *vma, *x;
        int ret;
 
-       DRM_DEBUG_DRIVER("Dropping caches: 0x%08llx\n", val);
+       DRM_DEBUG("Dropping caches: 0x%08llx\n", val);
 
        /* No need to check and wait for gpu resets, only libdrm auto-restarts
         * on ioctls on -EAGAIN. */
@@ -2810,8 +2976,7 @@ i915_max_freq_get(void *data, u64 *val)
                return ret;
 
        if (IS_VALLEYVIEW(dev))
-               *val = vlv_gpu_freq(dev_priv->mem_freq,
-                                   dev_priv->rps.max_delay);
+               *val = vlv_gpu_freq(dev_priv, dev_priv->rps.max_delay);
        else
                *val = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
        mutex_unlock(&dev_priv->rps.hw_lock);
@@ -2841,9 +3006,9 @@ i915_max_freq_set(void *data, u64 val)
         * Turbo will still be enabled, but won't go above the set value.
         */
        if (IS_VALLEYVIEW(dev)) {
-               val = vlv_freq_opcode(dev_priv->mem_freq, val);
+               val = vlv_freq_opcode(dev_priv, val);
                dev_priv->rps.max_delay = val;
-               gen6_set_rps(dev, val);
+               valleyview_set_rps(dev, val);
        } else {
                do_div(val, GT_FREQUENCY_MULTIPLIER);
                dev_priv->rps.max_delay = val;
@@ -2876,8 +3041,7 @@ i915_min_freq_get(void *data, u64 *val)
                return ret;
 
        if (IS_VALLEYVIEW(dev))
-               *val = vlv_gpu_freq(dev_priv->mem_freq,
-                                   dev_priv->rps.min_delay);
+               *val = vlv_gpu_freq(dev_priv, dev_priv->rps.min_delay);
        else
                *val = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
        mutex_unlock(&dev_priv->rps.hw_lock);
@@ -2907,7 +3071,7 @@ i915_min_freq_set(void *data, u64 val)
         * Turbo will still be enabled, but won't go below the set value.
         */
        if (IS_VALLEYVIEW(dev)) {
-               val = vlv_freq_opcode(dev_priv->mem_freq, val);
+               val = vlv_freq_opcode(dev_priv, val);
                dev_priv->rps.min_delay = val;
                valleyview_set_rps(dev, val);
        } else {
@@ -2938,8 +3102,11 @@ i915_cache_sharing_get(void *data, u64 *val)
        ret = mutex_lock_interruptible(&dev->struct_mutex);
        if (ret)
                return ret;
+       intel_runtime_pm_get(dev_priv);
 
        snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
+
+       intel_runtime_pm_put(dev_priv);
        mutex_unlock(&dev_priv->dev->struct_mutex);
 
        *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
@@ -2960,6 +3127,7 @@ i915_cache_sharing_set(void *data, u64 val)
        if (val > 3)
                return -EINVAL;
 
+       intel_runtime_pm_get(dev_priv);
        DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
 
        /* Update the cache sharing policy here as well */
@@ -2968,6 +3136,7 @@ i915_cache_sharing_set(void *data, u64 val)
        snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
        I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
 
+       intel_runtime_pm_put(dev_priv);
        return 0;
 }
 
@@ -2983,7 +3152,8 @@ static int i915_forcewake_open(struct inode *inode, struct file *file)
        if (INTEL_INFO(dev)->gen < 6)
                return 0;
 
-       gen6_gt_force_wake_get(dev_priv);
+       intel_runtime_pm_get(dev_priv);
+       gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
 
        return 0;
 }
@@ -2996,7 +3166,8 @@ static int i915_forcewake_release(struct inode *inode, struct file *file)
        if (INTEL_INFO(dev)->gen < 6)
                return 0;
 
-       gen6_gt_force_wake_put(dev_priv);
+       gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
+       intel_runtime_pm_put(dev_priv);
 
        return 0;
 }
@@ -3016,8 +3187,8 @@ static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor)
                                  S_IRUSR,
                                  root, dev,
                                  &i915_forcewake_fops);
-       if (IS_ERR(ent))
-               return PTR_ERR(ent);
+       if (!ent)
+               return -ENOMEM;
 
        return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops);
 }
@@ -3034,8 +3205,8 @@ static int i915_debugfs_create(struct dentry *root,
                                  S_IRUGO | S_IWUSR,
                                  root, dev,
                                  fops);
-       if (IS_ERR(ent))
-               return PTR_ERR(ent);
+       if (!ent)
+               return -ENOMEM;
 
        return drm_add_fake_info_node(minor, ent, fops);
 }
@@ -3079,6 +3250,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
        {"i915_edp_psr_status", i915_edp_psr_status, 0},
        {"i915_energy_uJ", i915_energy_uJ, 0},
        {"i915_pc8_status", i915_pc8_status, 0},
+       {"i915_power_domain_info", i915_power_domain_info, 0},
 };
 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
 
@@ -3102,10 +3274,10 @@ static const struct i915_debugfs_files {
 void intel_display_crc_init(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       int i;
+       enum pipe pipe;
 
-       for (i = 0; i < INTEL_INFO(dev)->num_pipes; i++) {
-               struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[i];
+       for_each_pipe(pipe) {
+               struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
 
                pipe_crc->opened = false;
                spin_lock_init(&pipe_crc->lock);
@@ -3164,5 +3336,3 @@ void i915_debugfs_cleanup(struct drm_minor *minor)
                drm_debugfs_remove_files(info_list, 1, minor);
        }
 }
-
-#endif /* CONFIG_DEBUG_FS */
index 5c648425c1e053616801b3e1e545791a5fe111c0..15a74f979b4bf8c773b6bde25c65f0badf6afb8e 100644 (file)
@@ -42,6 +42,8 @@
 #include <linux/vga_switcheroo.h>
 #include <linux/slab.h>
 #include <acpi/video.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
 
 #define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS])
 
@@ -791,7 +793,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
                master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
 
        if (ring->irq_get(ring)) {
-               DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ,
+               DRM_WAIT_ON(ret, ring->irq_queue, 3 * HZ,
                            READ_BREADCRUMB(dev_priv) >= irq_nr);
                ring->irq_put(ring);
        } else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000))
@@ -828,7 +830,7 @@ static int i915_irq_emit(struct drm_device *dev, void *data,
        result = i915_emit_irq(dev);
        mutex_unlock(&dev->struct_mutex);
 
-       if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
+       if (copy_to_user(emit->irq_seq, &result, sizeof(int))) {
                DRM_ERROR("copy_to_user\n");
                return -EFAULT;
        }
@@ -1016,8 +1018,8 @@ static int i915_getparam(struct drm_device *dev, void *data,
                return -EINVAL;
        }
 
-       if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
-               DRM_ERROR("DRM_COPY_TO_USER failed\n");
+       if (copy_to_user(param->value, &value, sizeof(int))) {
+               DRM_ERROR("copy_to_user failed\n");
                return -EFAULT;
        }
 
@@ -1411,7 +1413,7 @@ void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
        master->driver_priv = NULL;
 }
 
-#ifdef CONFIG_DRM_I915_FBDEV
+#if IS_ENABLED(CONFIG_FB)
 static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
 {
        struct apertures_struct *ap;
@@ -1484,6 +1486,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
                return -ENODEV;
        }
 
+       /* UMS needs agp support. */
+       if (!drm_core_check_feature(dev, DRIVER_MODESET) && !dev->agp)
+               return -EINVAL;
+
        dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
        if (dev_priv == NULL)
                return -ENOMEM;
@@ -1494,7 +1500,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
 
        spin_lock_init(&dev_priv->irq_lock);
        spin_lock_init(&dev_priv->gpu_error.lock);
-       spin_lock_init(&dev_priv->backlight.lock);
+       spin_lock_init(&dev_priv->backlight_lock);
        spin_lock_init(&dev_priv->uncore.lock);
        spin_lock_init(&dev_priv->mm.object_stat_lock);
        mutex_init(&dev_priv->dpio_lock);
@@ -1639,8 +1645,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
                        goto out_gem_unload;
        }
 
-       if (HAS_POWER_WELL(dev))
-               intel_power_domains_init(dev);
+       intel_power_domains_init(dev);
 
        if (drm_core_check_feature(dev, DRIVER_MODESET)) {
                ret = i915_load_modeset_init(dev);
@@ -1664,11 +1669,12 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
        if (IS_GEN5(dev))
                intel_gpu_ips_init(dev_priv);
 
+       intel_init_runtime_pm(dev_priv);
+
        return 0;
 
 out_power_well:
-       if (HAS_POWER_WELL(dev))
-               intel_power_domains_remove(dev);
+       intel_power_domains_remove(dev);
        drm_vblank_cleanup(dev);
 out_gem_unload:
        if (dev_priv->mm.inactive_shrinker.scan_objects)
@@ -1679,6 +1685,7 @@ out_gem_unload:
 
        intel_teardown_gmbus(dev);
        intel_teardown_mchbar(dev);
+       pm_qos_remove_request(&dev_priv->pm_qos);
        destroy_workqueue(dev_priv->wq);
 out_mtrrfree:
        arch_phys_wc_del(dev_priv->gtt.mtrr);
@@ -1704,25 +1711,27 @@ int i915_driver_unload(struct drm_device *dev)
        struct drm_i915_private *dev_priv = dev->dev_private;
        int ret;
 
+       ret = i915_gem_suspend(dev);
+       if (ret) {
+               DRM_ERROR("failed to idle hardware: %d\n", ret);
+               return ret;
+       }
+
+       intel_fini_runtime_pm(dev_priv);
+
        intel_gpu_ips_teardown();
 
-       if (HAS_POWER_WELL(dev)) {
-               /* The i915.ko module is still not prepared to be loaded when
-                * the power well is not enabled, so just enable it in case
-                * we're going to unload/reload. */
-               intel_display_set_init_power(dev, true);
-               intel_power_domains_remove(dev);
-       }
+       /* The i915.ko module is still not prepared to be loaded when
+        * the power well is not enabled, so just enable it in case
+        * we're going to unload/reload. */
+       intel_display_set_init_power(dev, true);
+       intel_power_domains_remove(dev);
 
        i915_teardown_sysfs(dev);
 
        if (dev_priv->mm.inactive_shrinker.scan_objects)
                unregister_shrinker(&dev_priv->mm.inactive_shrinker);
 
-       ret = i915_gem_suspend(dev);
-       if (ret)
-               DRM_ERROR("failed to idle hardware: %d\n", ret);
-
        io_mapping_free(dev_priv->gtt.mappable);
        arch_phys_wc_del(dev_priv->gtt.mtrr);
 
@@ -1777,7 +1786,6 @@ int i915_driver_unload(struct drm_device *dev)
 
        list_del(&dev_priv->gtt.base.global_link);
        WARN_ON(!list_empty(&dev_priv->vm_list));
-       drm_mm_takedown(&dev_priv->gtt.base.mm);
 
        drm_vblank_cleanup(dev);
 
@@ -1910,6 +1918,7 @@ const struct drm_ioctl_desc i915_ioctls[] = {
        DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
 };
 
 int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
index 5b7b7e06cb3a09caec1cd7d5ab90401389df7a40..04f1f02c4019f57873caeb0c70fea14e0816ddeb 100644 (file)
@@ -59,7 +59,7 @@ MODULE_PARM_DESC(powersave,
                "Enable powersavings, fbc, downclocking, etc. (default: true)");
 
 int i915_semaphores __read_mostly = -1;
-module_param_named(semaphores, i915_semaphores, int, 0600);
+module_param_named(semaphores, i915_semaphores, int, 0400);
 MODULE_PARM_DESC(semaphores,
                "Use semaphores for inter-ring sync (default: -1 (use per-chip defaults))");
 
@@ -114,7 +114,7 @@ MODULE_PARM_DESC(enable_hangcheck,
                "(default: true)");
 
 int i915_enable_ppgtt __read_mostly = -1;
-module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, int, 0600);
+module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, int, 0400);
 MODULE_PARM_DESC(i915_enable_ppgtt,
                "Enable PPGTT (default: true)");
 
@@ -155,7 +155,6 @@ MODULE_PARM_DESC(prefault_disable,
                "Disable page prefaulting for pread/pwrite/reloc (default:false). For developers only.");
 
 static struct drm_driver driver;
-extern int intel_agp_enabled;
 
 static const struct intel_device_info intel_i830_info = {
        .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
@@ -173,6 +172,7 @@ static const struct intel_device_info intel_i85x_info = {
        .gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2,
        .cursor_needs_physical = 1,
        .has_overlay = 1, .overlay_needs_physical = 1,
+       .has_fbc = 1,
        .ring_mask = RENDER_RING,
 };
 
@@ -192,6 +192,7 @@ static const struct intel_device_info intel_i915gm_info = {
        .cursor_needs_physical = 1,
        .has_overlay = 1, .overlay_needs_physical = 1,
        .supports_tv = 1,
+       .has_fbc = 1,
        .ring_mask = RENDER_RING,
 };
 static const struct intel_device_info intel_i945g_info = {
@@ -204,6 +205,7 @@ static const struct intel_device_info intel_i945gm_info = {
        .has_hotplug = 1, .cursor_needs_physical = 1,
        .has_overlay = 1, .overlay_needs_physical = 1,
        .supports_tv = 1,
+       .has_fbc = 1,
        .ring_mask = RENDER_RING,
 };
 
@@ -265,6 +267,7 @@ static const struct intel_device_info intel_ironlake_m_info = {
 static const struct intel_device_info intel_sandybridge_d_info = {
        .gen = 6, .num_pipes = 2,
        .need_gfx_hws = 1, .has_hotplug = 1,
+       .has_fbc = 1,
        .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
        .has_llc = 1,
 };
@@ -280,6 +283,7 @@ static const struct intel_device_info intel_sandybridge_m_info = {
 #define GEN7_FEATURES  \
        .gen = 7, .num_pipes = 3, \
        .need_gfx_hws = 1, .has_hotplug = 1, \
+       .has_fbc = 1, \
        .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
        .has_llc = 1
 
@@ -292,7 +296,6 @@ static const struct intel_device_info intel_ivybridge_m_info = {
        GEN7_FEATURES,
        .is_ivybridge = 1,
        .is_mobile = 1,
-       .has_fbc = 1,
 };
 
 static const struct intel_device_info intel_ivybridge_q_info = {
@@ -307,6 +310,7 @@ static const struct intel_device_info intel_valleyview_m_info = {
        .num_pipes = 2,
        .is_valleyview = 1,
        .display_mmio_offset = VLV_DISPLAY_BASE,
+       .has_fbc = 0, /* legal, last one wins */
        .has_llc = 0, /* legal, last one wins */
 };
 
@@ -315,6 +319,7 @@ static const struct intel_device_info intel_valleyview_d_info = {
        .num_pipes = 2,
        .is_valleyview = 1,
        .display_mmio_offset = VLV_DISPLAY_BASE,
+       .has_fbc = 0, /* legal, last one wins */
        .has_llc = 0, /* legal, last one wins */
 };
 
@@ -332,12 +337,10 @@ static const struct intel_device_info intel_haswell_m_info = {
        .is_mobile = 1,
        .has_ddi = 1,
        .has_fpga_dbg = 1,
-       .has_fbc = 1,
        .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
 };
 
 static const struct intel_device_info intel_broadwell_d_info = {
-       .is_preliminary = 1,
        .gen = 8, .num_pipes = 3,
        .need_gfx_hws = 1, .has_hotplug = 1,
        .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
@@ -346,7 +349,6 @@ static const struct intel_device_info intel_broadwell_d_info = {
 };
 
 static const struct intel_device_info intel_broadwell_m_info = {
-       .is_preliminary = 1,
        .gen = 8, .is_mobile = 1, .num_pipes = 3,
        .need_gfx_hws = 1, .has_hotplug = 1,
        .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
@@ -476,12 +478,12 @@ check_next:
 bool i915_semaphore_is_enabled(struct drm_device *dev)
 {
        if (INTEL_INFO(dev)->gen < 6)
-               return 0;
+               return false;
 
        /* Until we get further testing... */
        if (IS_GEN8(dev)) {
                WARN_ON(!i915_preliminary_hw_support);
-               return 0;
+               return false;
        }
 
        if (i915_semaphores >= 0)
@@ -493,7 +495,7 @@ bool i915_semaphore_is_enabled(struct drm_device *dev)
                return false;
 #endif
 
-       return 1;
+       return true;
 }
 
 static int i915_drm_freeze(struct drm_device *dev)
@@ -501,6 +503,8 @@ static int i915_drm_freeze(struct drm_device *dev)
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_crtc *crtc;
 
+       intel_runtime_pm_get(dev_priv);
+
        /* ignore lid events during suspend */
        mutex_lock(&dev_priv->modeset_restore_lock);
        dev_priv->modeset_restore = MODESET_SUSPENDED;
@@ -688,6 +692,8 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
        mutex_lock(&dev_priv->modeset_restore_lock);
        dev_priv->modeset_restore = MODESET_DONE;
        mutex_unlock(&dev_priv->modeset_restore_lock);
+
+       intel_runtime_pm_put(dev_priv);
        return error;
 }
 
@@ -762,14 +768,14 @@ int i915_reset(struct drm_device *dev)
                DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
                dev_priv->gpu_error.stop_rings = 0;
                if (ret == -ENODEV) {
-                       DRM_ERROR("Reset not implemented, but ignoring "
-                                 "error for simulated gpu hangs\n");
+                       DRM_INFO("Reset not implemented, but ignoring "
+                                "error for simulated gpu hangs\n");
                        ret = 0;
                }
        }
 
        if (ret) {
-               DRM_ERROR("Failed to reset chip.\n");
+               DRM_ERROR("Failed to reset chip: %i\n", ret);
                mutex_unlock(&dev->struct_mutex);
                return ret;
        }
@@ -790,12 +796,9 @@ int i915_reset(struct drm_device *dev)
         */
        if (drm_core_check_feature(dev, DRIVER_MODESET) ||
                        !dev_priv->ums.mm_suspended) {
-               bool hw_contexts_disabled = dev_priv->hw_contexts_disabled;
                dev_priv->ums.mm_suspended = 0;
 
                ret = i915_gem_init_hw(dev);
-               if (!hw_contexts_disabled && dev_priv->hw_contexts_disabled)
-                       DRM_ERROR("HW contexts didn't survive reset\n");
                mutex_unlock(&dev->struct_mutex);
                if (ret) {
                        DRM_ERROR("Failed hw init on reset %d\n", ret);
@@ -831,17 +834,7 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (PCI_FUNC(pdev->devfn))
                return -ENODEV;
 
-       /* We've managed to ship a kms-enabled ddx that shipped with an XvMC
-        * implementation for gen3 (and only gen3) that used legacy drm maps
-        * (gasp!) to share buffers between X and the client. Hence we need to
-        * keep around the fake agp stuff for gen3, even when kms is enabled. */
-       if (intel_info->gen != 3) {
-               driver.driver_features &=
-                       ~(DRIVER_USE_AGP | DRIVER_REQUIRE_AGP);
-       } else if (!intel_agp_enabled) {
-               DRM_ERROR("drm/i915 can't work without intel_agp module!\n");
-               return -ENODEV;
-       }
+       driver.driver_features &= ~(DRIVER_USE_AGP);
 
        return drm_get_pci_dev(pdev, ent, &driver);
 }
@@ -915,6 +908,49 @@ static int i915_pm_poweroff(struct device *dev)
        return i915_drm_freeze(drm_dev);
 }
 
+static int i915_runtime_suspend(struct device *device)
+{
+       struct pci_dev *pdev = to_pci_dev(device);
+       struct drm_device *dev = pci_get_drvdata(pdev);
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       WARN_ON(!HAS_RUNTIME_PM(dev));
+
+       DRM_DEBUG_KMS("Suspending device\n");
+
+       i915_gem_release_all_mmaps(dev_priv);
+
+       del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
+       dev_priv->pm.suspended = true;
+
+       /*
+        * current versions of firmware which depend on this opregion
+        * notification have repurposed the D1 definition to mean
+        * "runtime suspended" vs. what you would normally expect (D3)
+        * to distinguish it from notifications that might be sent
+        * via the suspend path.
+        */
+       intel_opregion_notify_adapter(dev, PCI_D1);
+
+       return 0;
+}
+
+static int i915_runtime_resume(struct device *device)
+{
+       struct pci_dev *pdev = to_pci_dev(device);
+       struct drm_device *dev = pci_get_drvdata(pdev);
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       WARN_ON(!HAS_RUNTIME_PM(dev));
+
+       DRM_DEBUG_KMS("Resuming device\n");
+
+       intel_opregion_notify_adapter(dev, PCI_D0);
+       dev_priv->pm.suspended = false;
+
+       return 0;
+}
+
 static const struct dev_pm_ops i915_pm_ops = {
        .suspend = i915_pm_suspend,
        .resume = i915_pm_resume,
@@ -922,6 +958,8 @@ static const struct dev_pm_ops i915_pm_ops = {
        .thaw = i915_pm_thaw,
        .poweroff = i915_pm_poweroff,
        .restore = i915_pm_resume,
+       .runtime_suspend = i915_runtime_suspend,
+       .runtime_resume = i915_runtime_resume,
 };
 
 static const struct vm_operations_struct i915_gem_vm_ops = {
@@ -949,7 +987,7 @@ static struct drm_driver driver = {
         * deal with them for Intel hardware.
         */
        .driver_features =
-           DRIVER_USE_AGP | DRIVER_REQUIRE_AGP |
+           DRIVER_USE_AGP |
            DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
            DRIVER_RENDER,
        .load = i915_driver_load,
@@ -1024,14 +1062,24 @@ static int __init i915_init(void)
                driver.driver_features &= ~DRIVER_MODESET;
 #endif
 
-       if (!(driver.driver_features & DRIVER_MODESET))
+       if (!(driver.driver_features & DRIVER_MODESET)) {
                driver.get_vblank_timestamp = NULL;
+#ifndef CONFIG_DRM_I915_UMS
+               /* Silently fail loading to not upset userspace. */
+               return 0;
+#endif
+       }
 
        return drm_pci_init(&driver, &i915_pci_driver);
 }
 
 static void __exit i915_exit(void)
 {
+#ifndef CONFIG_DRM_I915_UMS
+       if (!(driver.driver_features & DRIVER_MODESET))
+               return; /* Never loaded a driver. */
+#endif
+
        drm_pci_exit(&driver, &i915_pci_driver);
 }
 
index 1caa5e34fbe3d55825cd5b4340fbeb6575de4fa7..4a2bf8e3f739bff7b2b9f6e018100ec98e8b9c0e 100644 (file)
@@ -89,6 +89,18 @@ enum port {
 };
 #define port_name(p) ((p) + 'A')
 
+#define I915_NUM_PHYS_VLV 1
+
+enum dpio_channel {
+       DPIO_CH0,
+       DPIO_CH1
+};
+
+enum dpio_phy {
+       DPIO_PHY0,
+       DPIO_PHY1
+};
+
 enum intel_display_power_domain {
        POWER_DOMAIN_PIPE_A,
        POWER_DOMAIN_PIPE_B,
@@ -101,6 +113,7 @@ enum intel_display_power_domain {
        POWER_DOMAIN_TRANSCODER_C,
        POWER_DOMAIN_TRANSCODER_EDP,
        POWER_DOMAIN_VGA,
+       POWER_DOMAIN_AUDIO,
        POWER_DOMAIN_INIT,
 
        POWER_DOMAIN_NUM,
@@ -310,13 +323,14 @@ struct drm_i915_error_state {
        u32 instps[I915_NUM_RINGS];
        u32 extra_instdone[I915_NUM_INSTDONE_REG];
        u32 seqno[I915_NUM_RINGS];
-       u64 bbaddr;
+       u64 bbaddr[I915_NUM_RINGS];
        u32 fault_reg[I915_NUM_RINGS];
        u32 done_reg;
        u32 faddr[I915_NUM_RINGS];
        u64 fence[I915_MAX_NUM_FENCES];
        struct timeval time;
        struct drm_i915_error_ring {
+               bool valid;
                struct drm_i915_error_object {
                        int page_count;
                        u32 gtt_offset;
@@ -351,6 +365,7 @@ struct drm_i915_error_state {
        enum intel_ring_hangcheck_action hangcheck_action[I915_NUM_RINGS];
 };
 
+struct intel_connector;
 struct intel_crtc_config;
 struct intel_crtc;
 struct intel_limit;
@@ -358,7 +373,7 @@ struct dpll;
 
 struct drm_i915_display_funcs {
        bool (*fbc_enabled)(struct drm_device *dev);
-       void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval);
+       void (*enable_fbc)(struct drm_crtc *crtc);
        void (*disable_fbc)(struct drm_device *dev);
        int (*get_display_clock_speed)(struct drm_device *dev);
        int (*get_fifo_size)(struct drm_device *dev, int plane);
@@ -413,11 +428,20 @@ struct drm_i915_display_funcs {
        /* render clock increase/decrease */
        /* display clock increase/decrease */
        /* pll clock increase/decrease */
+
+       int (*setup_backlight)(struct intel_connector *connector);
+       uint32_t (*get_backlight)(struct intel_connector *connector);
+       void (*set_backlight)(struct intel_connector *connector,
+                             uint32_t level);
+       void (*disable_backlight)(struct intel_connector *connector);
+       void (*enable_backlight)(struct intel_connector *connector);
 };
 
 struct intel_uncore_funcs {
-       void (*force_wake_get)(struct drm_i915_private *dev_priv);
-       void (*force_wake_put)(struct drm_i915_private *dev_priv);
+       void (*force_wake_get)(struct drm_i915_private *dev_priv,
+                                                       int fw_engine);
+       void (*force_wake_put)(struct drm_i915_private *dev_priv,
+                                                       int fw_engine);
 
        uint8_t  (*mmio_readb)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
        uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
@@ -442,6 +466,9 @@ struct intel_uncore {
        unsigned fifo_count;
        unsigned forcewake_count;
 
+       unsigned fw_rendercount;
+       unsigned fw_mediacount;
+
        struct delayed_work force_wake_work;
 };
 
@@ -669,7 +696,6 @@ struct i915_fbc {
                struct delayed_work work;
                struct drm_crtc *crtc;
                struct drm_framebuffer *fb;
-               int interval;
        } *fbc_work;
 
        enum no_fbc_reason {
@@ -708,7 +734,6 @@ enum intel_sbi_destination {
 #define QUIRK_PIPEA_FORCE (1<<0)
 #define QUIRK_LVDS_SSC_DISABLE (1<<1)
 #define QUIRK_INVERT_BRIGHTNESS (1<<2)
-#define QUIRK_NO_PCH_PWM_ENABLE (1<<3)
 
 struct intel_fbdev;
 struct intel_fbc_work;
@@ -761,8 +786,6 @@ struct i915_suspend_saved_registers {
        u32 saveBLC_PWM_CTL;
        u32 saveBLC_PWM_CTL2;
        u32 saveBLC_HIST_CTL_B;
-       u32 saveBLC_PWM_CTL_B;
-       u32 saveBLC_PWM_CTL2_B;
        u32 saveBLC_CPU_PWM_CTL;
        u32 saveBLC_CPU_PWM_CTL2;
        u32 saveFPB0;
@@ -932,21 +955,29 @@ struct intel_ilk_power_mgmt {
 
 /* Power well structure for haswell */
 struct i915_power_well {
+       const char *name;
+       bool always_on;
        /* power well enable/disable usage count */
        int count;
+       unsigned long domains;
+       void *data;
+       void (*set)(struct drm_device *dev, struct i915_power_well *power_well,
+                   bool enable);
+       bool (*is_enabled)(struct drm_device *dev,
+                          struct i915_power_well *power_well);
 };
 
-#define I915_MAX_POWER_WELLS 1
-
 struct i915_power_domains {
        /*
         * Power wells needed for initialization at driver init and suspend
         * time are on. They are kept on until after the first modeset.
         */
        bool init_power_on;
+       int power_well_count;
 
        struct mutex lock;
-       struct i915_power_well power_wells[I915_MAX_POWER_WELLS];
+       int domain_use_count[POWER_DOMAIN_NUM];
+       struct i915_power_well *power_wells;
 };
 
 struct i915_dri1_state {
@@ -1077,34 +1108,30 @@ struct i915_gpu_error {
        unsigned long missed_irq_rings;
 
        /**
-        * State variable and reset counter controlling the reset flow
+        * State variable controlling the reset flow and count
         *
-        * Upper bits are for the reset counter.  This counter is used by the
-        * wait_seqno code to race-free noticed that a reset event happened and
-        * that it needs to restart the entire ioctl (since most likely the
-        * seqno it waited for won't ever signal anytime soon).
+        * This is a counter which gets incremented when reset is triggered,
+        * and again when reset has been handled. So odd values (lowest bit set)
+        * means that reset is in progress and even values that
+        * (reset_counter >> 1):th reset was successfully completed.
+        *
+        * If reset is not completed succesfully, the I915_WEDGE bit is
+        * set meaning that hardware is terminally sour and there is no
+        * recovery. All waiters on the reset_queue will be woken when
+        * that happens.
+        *
+        * This counter is used by the wait_seqno code to notice that reset
+        * event happened and it needs to restart the entire ioctl (since most
+        * likely the seqno it waited for won't ever signal anytime soon).
         *
         * This is important for lock-free wait paths, where no contended lock
         * naturally enforces the correct ordering between the bail-out of the
         * waiter and the gpu reset work code.
-        *
-        * Lowest bit controls the reset state machine: Set means a reset is in
-        * progress. This state will (presuming we don't have any bugs) decay
-        * into either unset (successful reset) or the special WEDGED value (hw
-        * terminally sour). All waiters on the reset_queue will be woken when
-        * that happens.
         */
        atomic_t reset_counter;
 
-       /**
-        * Special values/flags for reset_counter
-        *
-        * Note that the code relies on
-        *      I915_WEDGED & I915_RESET_IN_PROGRESS_FLAG
-        * being true.
-        */
 #define I915_RESET_IN_PROGRESS_FLAG    1
-#define I915_WEDGED                    0xffffffff
+#define I915_WEDGED                    (1 << 31)
 
        /**
         * Waitqueue to signal when the reset has completed. Used by clients
@@ -1158,6 +1185,11 @@ struct intel_vbt_data {
        int edp_bpp;
        struct edp_power_seq edp_pps;
 
+       struct {
+               u16 pwm_freq_hz;
+               bool active_low_pwm;
+       } backlight;
+
        /* MIPI DSI */
        struct {
                u16 panel_id;
@@ -1184,7 +1216,7 @@ struct intel_wm_level {
        uint32_t fbc_val;
 };
 
-struct hsw_wm_values {
+struct ilk_wm_values {
        uint32_t wm_pipe[3];
        uint32_t wm_lp[3];
        uint32_t wm_lp_spr[3];
@@ -1262,6 +1294,10 @@ struct i915_package_c8 {
        } regsave;
 };
 
+struct i915_runtime_pm {
+       bool suspended;
+};
+
 enum intel_pipe_crc_source {
        INTEL_PIPE_CRC_SOURCE_NONE,
        INTEL_PIPE_CRC_SOURCE_PLANE1,
@@ -1366,15 +1402,9 @@ typedef struct drm_i915_private {
 
        /* overlay */
        struct intel_overlay *overlay;
-       unsigned int sprite_scaling_enabled;
 
-       /* backlight */
-       struct {
-               int level;
-               bool enabled;
-               spinlock_t lock; /* bl registers and the above bl fields */
-               struct backlight_device *device;
-       } backlight;
+       /* backlight registers and fields in struct intel_panel */
+       spinlock_t backlight_lock;
 
        /* LVDS info */
        bool no_aux_handshake;
@@ -1426,6 +1456,7 @@ typedef struct drm_i915_private {
        int num_shared_dpll;
        struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
        struct intel_ddi_plls ddi_plls;
+       int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
 
        /* Reclocking support */
        bool render_reclock_avail;
@@ -1470,7 +1501,6 @@ typedef struct drm_i915_private {
        struct drm_property *broadcast_rgb_property;
        struct drm_property *force_audio_property;
 
-       bool hw_contexts_disabled;
        uint32_t hw_context_size;
        struct list_head context_list;
 
@@ -1492,11 +1522,13 @@ typedef struct drm_i915_private {
                uint16_t cur_latency[5];
 
                /* current hardware state */
-               struct hsw_wm_values hw;
+               struct ilk_wm_values hw;
        } wm;
 
        struct i915_package_c8 pc8;
 
+       struct i915_runtime_pm pm;
+
        /* Old dri1 support infrastructure, beware the dragons ya fools entering
         * here! */
        struct i915_dri1_state dri1;
@@ -1813,15 +1845,15 @@ struct drm_i915_file_private {
 
 #define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
 #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
-#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
+#define HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
 
 #define HAS_IPS(dev)           (IS_ULT(dev) || IS_BROADWELL(dev))
 
 #define HAS_DDI(dev)           (INTEL_INFO(dev)->has_ddi)
-#define HAS_POWER_WELL(dev)    (IS_HASWELL(dev) || IS_BROADWELL(dev))
 #define HAS_FPGA_DBG_UNCLAIMED(dev)    (INTEL_INFO(dev)->has_fpga_dbg)
 #define HAS_PSR(dev)           (IS_HASWELL(dev) || IS_BROADWELL(dev))
 #define HAS_PC8(dev)           (IS_HASWELL(dev)) /* XXX HSW:ULX */
+#define HAS_RUNTIME_PM(dev)    (IS_HASWELL(dev))
 
 #define INTEL_PCH_DEVICE_ID_MASK               0xff00
 #define INTEL_PCH_IBX_DEVICE_ID_TYPE           0x3b00
@@ -1911,7 +1943,6 @@ extern void intel_hpd_init(struct drm_device *dev);
 extern void intel_uncore_sanitize(struct drm_device *dev);
 extern void intel_uncore_early_sanitize(struct drm_device *dev);
 extern void intel_uncore_init(struct drm_device *dev);
-extern void intel_uncore_clear_errors(struct drm_device *dev);
 extern void intel_uncore_check_errors(struct drm_device *dev);
 extern void intel_uncore_fini(struct drm_device *dev);
 
@@ -1987,6 +2018,7 @@ void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
 int __must_check i915_vma_unbind(struct i915_vma *vma);
 int __must_check i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj);
 int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
+void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
 void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
 void i915_gem_lastclose(struct drm_device *dev);
 
@@ -2063,12 +2095,17 @@ int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
 static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
 {
        return unlikely(atomic_read(&error->reset_counter)
-                       & I915_RESET_IN_PROGRESS_FLAG);
+                       & (I915_RESET_IN_PROGRESS_FLAG | I915_WEDGED));
 }
 
 static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
 {
-       return atomic_read(&error->reset_counter) == I915_WEDGED;
+       return atomic_read(&error->reset_counter) & I915_WEDGED;
+}
+
+static inline u32 i915_reset_count(struct i915_gpu_error *error)
+{
+       return ((atomic_read(&error->reset_counter) & ~I915_WEDGED) + 1) / 2;
 }
 
 void i915_gem_reset(struct drm_device *dev);
@@ -2180,7 +2217,7 @@ i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
 }
 
 /* i915_gem_context.c */
-void i915_gem_context_init(struct drm_device *dev);
+int __must_check i915_gem_context_init(struct drm_device *dev);
 void i915_gem_context_fini(struct drm_device *dev);
 void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
 int i915_switch_context(struct intel_ring_buffer *ring,
@@ -2399,6 +2436,8 @@ extern int intel_enable_rc6(const struct drm_device *dev);
 extern bool i915_semaphore_is_enabled(struct drm_device *dev);
 int i915_reg_read_ioctl(struct drm_device *dev, void *data,
                        struct drm_file *file);
+int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data,
+                              struct drm_file *file);
 
 /* overlay */
 extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
@@ -2414,8 +2453,8 @@ extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
  * must be set to prevent GT core from power down and stale values being
  * returned.
  */
-void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
-void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
+void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine);
+void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine);
 
 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val);
 int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val);
@@ -2430,6 +2469,8 @@ u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg);
 void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
 u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg);
 void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
+u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg);
+void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
 u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg);
 void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
 u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg);
@@ -2438,9 +2479,30 @@ u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
                   enum intel_sbi_destination destination);
 void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
                     enum intel_sbi_destination destination);
+u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg);
+void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
+
+int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val);
+int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val);
+
+void vlv_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine);
+void vlv_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine);
+
+#define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
+       (((reg) >= 0x2000 && (reg) < 0x4000) ||\
+       ((reg) >= 0x5000 && (reg) < 0x8000) ||\
+       ((reg) >= 0xB000 && (reg) < 0x12000) ||\
+       ((reg) >= 0x2E000 && (reg) < 0x30000))
+
+#define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)\
+       (((reg) >= 0x12000 && (reg) < 0x14000) ||\
+       ((reg) >= 0x22000 && (reg) < 0x24000) ||\
+       ((reg) >= 0x30000 && (reg) < 0x40000))
+
+#define FORCEWAKE_RENDER       (1 << 0)
+#define FORCEWAKE_MEDIA                (1 << 1)
+#define FORCEWAKE_ALL          (FORCEWAKE_RENDER | FORCEWAKE_MEDIA)
 
-int vlv_gpu_freq(int ddr_freq, int val);
-int vlv_freq_opcode(int ddr_freq, int val);
 
 #define I915_READ8(reg)                dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true)
 #define I915_WRITE8(reg, val)  dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true)
index 76d3d1ab73c6965063eba62527594dce82dc41d4..00c8361547253ecccf8ba17d6c06d730c688ca3b 100644 (file)
@@ -1015,9 +1015,11 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
                        struct drm_i915_file_private *file_priv)
 {
        drm_i915_private_t *dev_priv = ring->dev->dev_private;
+       const bool irq_test_in_progress =
+               ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring);
        struct timespec before, now;
        DEFINE_WAIT(wait);
-       long timeout_jiffies;
+       unsigned long timeout_expire;
        int ret;
 
        WARN(dev_priv->pc8.irqs_disabled, "IRQs disabled\n");
@@ -1025,7 +1027,7 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
        if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
                return 0;
 
-       timeout_jiffies = timeout ? timespec_to_jiffies_timeout(timeout) : 1;
+       timeout_expire = timeout ? jiffies + timespec_to_jiffies_timeout(timeout) : 0;
 
        if (dev_priv->info->gen >= 6 && can_wait_boost(file_priv)) {
                gen6_rps_boost(dev_priv);
@@ -1035,8 +1037,7 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
                                         msecs_to_jiffies(100));
        }
 
-       if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)) &&
-           WARN_ON(!ring->irq_get(ring)))
+       if (!irq_test_in_progress && WARN_ON(!ring->irq_get(ring)))
                return -ENODEV;
 
        /* Record current time in case interrupted by signal, or wedged */
@@ -1044,7 +1045,6 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
        getrawmonotonic(&before);
        for (;;) {
                struct timer_list timer;
-               unsigned long expire;
 
                prepare_to_wait(&ring->irq_queue, &wait,
                                interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
@@ -1070,23 +1070,22 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
                        break;
                }
 
-               if (timeout_jiffies <= 0) {
+               if (timeout && time_after_eq(jiffies, timeout_expire)) {
                        ret = -ETIME;
                        break;
                }
 
                timer.function = NULL;
                if (timeout || missed_irq(dev_priv, ring)) {
+                       unsigned long expire;
+
                        setup_timer_on_stack(&timer, fake_irq, (unsigned long)current);
-                       expire = jiffies + (missed_irq(dev_priv, ring) ? 1: timeout_jiffies);
+                       expire = missed_irq(dev_priv, ring) ? jiffies + 1 : timeout_expire;
                        mod_timer(&timer, expire);
                }
 
                io_schedule();
 
-               if (timeout)
-                       timeout_jiffies = expire - jiffies;
-
                if (timer.function) {
                        del_singleshot_timer_sync(&timer);
                        destroy_timer_on_stack(&timer);
@@ -1095,7 +1094,8 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
        getrawmonotonic(&now);
        trace_i915_gem_request_wait_end(ring, seqno);
 
-       ring->irq_put(ring);
+       if (!irq_test_in_progress)
+               ring->irq_put(ring);
 
        finish_wait(&ring->irq_queue, &wait);
 
@@ -1380,6 +1380,8 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        int ret = 0;
        bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
 
+       intel_runtime_pm_get(dev_priv);
+
        /* We don't use vmf->pgoff since that has the fake offset */
        page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
                PAGE_SHIFT;
@@ -1427,8 +1429,10 @@ out:
                /* If this -EIO is due to a gpu hang, give the reset code a
                 * chance to clean up the mess. Otherwise return the proper
                 * SIGBUS. */
-               if (i915_terminally_wedged(&dev_priv->gpu_error))
-                       return VM_FAULT_SIGBUS;
+               if (i915_terminally_wedged(&dev_priv->gpu_error)) {
+                       ret = VM_FAULT_SIGBUS;
+                       break;
+               }
        case -EAGAIN:
                /*
                 * EAGAIN means the gpu is hung and we'll wait for the error
@@ -1443,15 +1447,38 @@ out:
                 * EBUSY is ok: this just means that another thread
                 * already did the job.
                 */
-               return VM_FAULT_NOPAGE;
+               ret = VM_FAULT_NOPAGE;
+               break;
        case -ENOMEM:
-               return VM_FAULT_OOM;
+               ret = VM_FAULT_OOM;
+               break;
        case -ENOSPC:
-               return VM_FAULT_SIGBUS;
+               ret = VM_FAULT_SIGBUS;
+               break;
        default:
                WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
-               return VM_FAULT_SIGBUS;
+               ret = VM_FAULT_SIGBUS;
+               break;
        }
+
+       intel_runtime_pm_put(dev_priv);
+       return ret;
+}
+
+void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
+{
+       struct i915_vma *vma;
+
+       /*
+        * Only the global gtt is relevant for gtt memory mappings, so restrict
+        * list traversal to objects bound into the global address space. Note
+        * that the active list should be empty, but better safe than sorry.
+        */
+       WARN_ON(!list_empty(&dev_priv->gtt.base.active_list));
+       list_for_each_entry(vma, &dev_priv->gtt.base.active_list, mm_list)
+               i915_gem_release_mmap(vma->obj);
+       list_for_each_entry(vma, &dev_priv->gtt.base.inactive_list, mm_list)
+               i915_gem_release_mmap(vma->obj);
 }
 
 /**
@@ -2303,7 +2330,7 @@ static void i915_set_reset_status(struct intel_ring_buffer *ring,
 
        if (ring->hangcheck.action != HANGCHECK_WAIT &&
            i915_request_guilty(request, acthd, &inside)) {
-               DRM_ERROR("%s hung %s bo (0x%lx ctx %d) at 0x%x\n",
+               DRM_DEBUG("%s hung %s bo (0x%lx ctx %d) at 0x%x\n",
                          ring->name,
                          inside ? "inside" : "flushing",
                          offset,
@@ -2361,16 +2388,6 @@ static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
 static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
                                        struct intel_ring_buffer *ring)
 {
-       while (!list_empty(&ring->request_list)) {
-               struct drm_i915_gem_request *request;
-
-               request = list_first_entry(&ring->request_list,
-                                          struct drm_i915_gem_request,
-                                          list);
-
-               i915_gem_free_request(request);
-       }
-
        while (!list_empty(&ring->active_list)) {
                struct drm_i915_gem_object *obj;
 
@@ -2380,6 +2397,23 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
 
                i915_gem_object_move_to_inactive(obj);
        }
+
+       /*
+        * We must free the requests after all the corresponding objects have
+        * been moved off active lists. Which is the same order as the normal
+        * retire_requests function does. This is important if object hold
+        * implicit references on things like e.g. ppgtt address spaces through
+        * the request.
+        */
+       while (!list_empty(&ring->request_list)) {
+               struct drm_i915_gem_request *request;
+
+               request = list_first_entry(&ring->request_list,
+                                          struct drm_i915_gem_request,
+                                          list);
+
+               i915_gem_free_request(request);
+       }
 }
 
 void i915_gem_restore_fences(struct drm_device *dev)
@@ -2760,7 +2794,6 @@ int i915_vma_unbind(struct i915_vma *vma)
                obj->has_aliasing_ppgtt_mapping = 0;
        }
        i915_gem_gtt_finish_object(obj);
-       i915_gem_object_unpin_pages(obj);
 
        list_del(&vma->mm_list);
        /* Avoid an unnecessary call to unbind on rebind. */
@@ -2768,7 +2801,6 @@ int i915_vma_unbind(struct i915_vma *vma)
                obj->map_and_fenceable = true;
 
        drm_mm_remove_node(&vma->node);
-
        i915_gem_vma_destroy(vma);
 
        /* Since the unbound list is global, only move to that list if
@@ -2776,6 +2808,12 @@ int i915_vma_unbind(struct i915_vma *vma)
        if (list_empty(&obj->vma_list))
                list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
 
+       /* And finally now the object is completely decoupled from this vma,
+        * we can drop its hold on the backing storage and allow it to be
+        * reaped by the shrinker.
+        */
+       i915_gem_object_unpin_pages(obj);
+
        return 0;
 }
 
@@ -3068,7 +3106,7 @@ i915_find_fence_reg(struct drm_device *dev)
        }
 
        if (avail == NULL)
-               return NULL;
+               goto deadlock;
 
        /* None available, try to steal one or wait for a user to finish */
        list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
@@ -3078,7 +3116,12 @@ i915_find_fence_reg(struct drm_device *dev)
                return reg;
        }
 
-       return NULL;
+deadlock:
+       /* Wait for completion of pending flips which consume fences */
+       if (intel_has_pending_fb_unpin(dev))
+               return ERR_PTR(-EAGAIN);
+
+       return ERR_PTR(-EDEADLK);
 }
 
 /**
@@ -3123,8 +3166,8 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
                }
        } else if (enable) {
                reg = i915_find_fence_reg(dev);
-               if (reg == NULL)
-                       return -EDEADLK;
+               if (IS_ERR(reg))
+                       return PTR_ERR(reg);
 
                if (reg->obj) {
                        struct drm_i915_gem_object *old = reg->obj;
@@ -4179,6 +4222,8 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct i915_vma *vma, *next;
 
+       intel_runtime_pm_get(dev_priv);
+
        trace_i915_gem_object_destroy(obj);
 
        if (obj->phys_obj)
@@ -4223,6 +4268,8 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
 
        kfree(obj->bit_17);
        i915_gem_object_free(obj);
+
+       intel_runtime_pm_put(dev_priv);
 }
 
 struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
@@ -4479,7 +4526,13 @@ i915_gem_init_hw(struct drm_device *dev)
         * XXX: There was some w/a described somewhere suggesting loading
         * contexts before PPGTT.
         */
-       i915_gem_context_init(dev);
+       ret = i915_gem_context_init(dev);
+       if (ret) {
+               i915_gem_cleanup_ringbuffer(dev);
+               DRM_ERROR("Context initialization failed %d\n", ret);
+               return ret;
+       }
+
        if (dev_priv->mm.aliasing_ppgtt) {
                ret = dev_priv->mm.aliasing_ppgtt->enable(dev);
                if (ret) {
index b0f42b9ca037ed472e1a0dd4cd663df6ffd70f06..e08acaba540269736777a7f7758a88d97f2aa4b6 100644 (file)
@@ -247,36 +247,34 @@ err_destroy:
        return ret;
 }
 
-void i915_gem_context_init(struct drm_device *dev)
+int i915_gem_context_init(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
+       int ret;
 
-       if (!HAS_HW_CONTEXTS(dev)) {
-               dev_priv->hw_contexts_disabled = true;
-               DRM_DEBUG_DRIVER("Disabling HW Contexts; old hardware\n");
-               return;
-       }
+       if (!HAS_HW_CONTEXTS(dev))
+               return 0;
 
        /* If called from reset, or thaw... we've been here already */
-       if (dev_priv->hw_contexts_disabled ||
-           dev_priv->ring[RCS].default_context)
-               return;
+       if (dev_priv->ring[RCS].default_context)
+               return 0;
 
        dev_priv->hw_context_size = round_up(get_context_size(dev), 4096);
 
        if (dev_priv->hw_context_size > (1<<20)) {
-               dev_priv->hw_contexts_disabled = true;
                DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size\n");
-               return;
+               return -E2BIG;
        }
 
-       if (create_default_context(dev_priv)) {
-               dev_priv->hw_contexts_disabled = true;
-               DRM_DEBUG_DRIVER("Disabling HW Contexts; create failed\n");
-               return;
+       ret = create_default_context(dev_priv);
+       if (ret) {
+               DRM_DEBUG_DRIVER("Disabling HW Contexts; create failed %d\n",
+                                ret);
+               return ret;
        }
 
        DRM_DEBUG_DRIVER("HW context support initialized\n");
+       return 0;
 }
 
 void i915_gem_context_fini(struct drm_device *dev)
@@ -284,7 +282,7 @@ void i915_gem_context_fini(struct drm_device *dev)
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct i915_hw_context *dctx = dev_priv->ring[RCS].default_context;
 
-       if (dev_priv->hw_contexts_disabled)
+       if (!HAS_HW_CONTEXTS(dev))
                return;
 
        /* The only known way to stop the gpu from accessing the hw context is
@@ -327,16 +325,16 @@ i915_gem_context_get_hang_stats(struct drm_device *dev,
                                struct drm_file *file,
                                u32 id)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_file_private *file_priv = file->driver_priv;
        struct i915_hw_context *ctx;
 
        if (id == DEFAULT_CONTEXT_ID)
                return &file_priv->hang_stats;
 
-       ctx = NULL;
-       if (!dev_priv->hw_contexts_disabled)
-               ctx = i915_gem_context_get(file->driver_priv, id);
+       if (!HAS_HW_CONTEXTS(dev))
+               return ERR_PTR(-ENOENT);
+
+       ctx = i915_gem_context_get(file->driver_priv, id);
        if (ctx == NULL)
                return ERR_PTR(-ENOENT);
 
@@ -502,8 +500,6 @@ static int do_switch(struct i915_hw_context *to)
  * @ring: ring for which we'll execute the context switch
  * @file_priv: file_priv associated with the context, may be NULL
  * @id: context id number
- * @seqno: sequence number by which the new context will be switched to
- * @flags:
  *
  * The context life cycle is simple. The context refcount is incremented and
  * decremented by 1 and create and destroy. If the context is in use by the GPU,
@@ -517,7 +513,7 @@ int i915_switch_context(struct intel_ring_buffer *ring,
        struct drm_i915_private *dev_priv = ring->dev->dev_private;
        struct i915_hw_context *to;
 
-       if (dev_priv->hw_contexts_disabled)
+       if (!HAS_HW_CONTEXTS(ring->dev))
                return 0;
 
        WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
@@ -542,7 +538,6 @@ int i915_switch_context(struct intel_ring_buffer *ring,
 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
                                  struct drm_file *file)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_context_create *args = data;
        struct drm_i915_file_private *file_priv = file->driver_priv;
        struct i915_hw_context *ctx;
@@ -551,7 +546,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
        if (!(dev->driver->driver_features & DRIVER_GEM))
                return -ENODEV;
 
-       if (dev_priv->hw_contexts_disabled)
+       if (!HAS_HW_CONTEXTS(dev))
                return -ENODEV;
 
        ret = i915_mutex_lock_interruptible(dev);
index 8f3adc7d0dc823bd5e7848f013bda20d1d133cbc..2ca280f9ee53e3f6f6422fcc3605fa86b2f52afa 100644 (file)
  */
 
 #include <drm/drmP.h>
-#include "i915_drv.h"
 #include <drm/i915_drm.h>
+
+#include "i915_drv.h"
+#include "intel_drv.h"
 #include "i915_trace.h"
 
 static bool
@@ -53,6 +55,7 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
        struct list_head eviction_list, unwind_list;
        struct i915_vma *vma;
        int ret = 0;
+       int pass = 0;
 
        trace_i915_gem_evict(dev, min_size, alignment, mappable);
 
@@ -119,14 +122,24 @@ none:
        /* Can we unpin some objects such as idle hw contents,
         * or pending flips?
         */
-       ret = nonblocking ? -ENOSPC : i915_gpu_idle(dev);
-       if (ret)
-               return ret;
+       if (nonblocking)
+               return -ENOSPC;
 
        /* Only idle the GPU and repeat the search once */
-       i915_gem_retire_requests(dev);
-       nonblocking = true;
-       goto search_again;
+       if (pass++ == 0) {
+               ret = i915_gpu_idle(dev);
+               if (ret)
+                       return ret;
+
+               i915_gem_retire_requests(dev);
+               goto search_again;
+       }
+
+       /* If we still have pending pageflip completions, drop
+        * back to userspace to give our workqueues time to
+        * acquire our locks and unpin the old scanouts.
+        */
+       return intel_has_pending_fb_unpin(dev) ? -EAGAIN : -ENOSPC;
 
 found:
        /* drm_mm doesn't allow any other other operations while
index a3ba9a8cd68794bbfd163c9236c91c7be9d15965..d269ecf46e264cbaaef6c9e1db6ddcfb57f1424d 100644 (file)
@@ -46,7 +46,7 @@ struct eb_vmas {
 };
 
 static struct eb_vmas *
-eb_create(struct drm_i915_gem_execbuffer2 *args, struct i915_address_space *vm)
+eb_create(struct drm_i915_gem_execbuffer2 *args)
 {
        struct eb_vmas *eb = NULL;
 
@@ -252,7 +252,7 @@ relocate_entry_cpu(struct drm_i915_gem_object *obj,
        struct drm_device *dev = obj->base.dev;
        uint32_t page_offset = offset_in_page(reloc->offset);
        char *vaddr;
-       int ret = -EINVAL;
+       int ret;
 
        ret = i915_gem_object_set_to_cpu_domain(obj, true);
        if (ret)
@@ -287,7 +287,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
        struct drm_i915_private *dev_priv = dev->dev_private;
        uint32_t __iomem *reloc_entry;
        void __iomem *reloc_page;
-       int ret = -EINVAL;
+       int ret;
 
        ret = i915_gem_object_set_to_gtt_domain(obj, true);
        if (ret)
@@ -335,7 +335,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
        struct drm_i915_gem_object *target_i915_obj;
        struct i915_vma *target_vma;
        uint32_t target_offset;
-       int ret = -EINVAL;
+       int ret;
 
        /* we've already hold a reference to all valid objects */
        target_vma = eb_get_vma(eb, reloc->target_handle);
@@ -344,7 +344,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
        target_i915_obj = target_vma->obj;
        target_obj = &target_vma->obj->base;
 
-       target_offset = i915_gem_obj_ggtt_offset(target_i915_obj);
+       target_offset = target_vma->node.start;
 
        /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
         * pipe_control writes because the gpu doesn't properly redirect them
@@ -365,7 +365,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
                          (int) reloc->offset,
                          reloc->read_domains,
                          reloc->write_domain);
-               return ret;
+               return -EINVAL;
        }
        if (unlikely((reloc->write_domain | reloc->read_domains)
                     & ~I915_GEM_GPU_DOMAINS)) {
@@ -376,7 +376,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
                          (int) reloc->offset,
                          reloc->read_domains,
                          reloc->write_domain);
-               return ret;
+               return -EINVAL;
        }
 
        target_obj->pending_read_domains |= reloc->read_domains;
@@ -396,14 +396,14 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
                          obj, reloc->target_handle,
                          (int) reloc->offset,
                          (int) obj->base.size);
-               return ret;
+               return -EINVAL;
        }
        if (unlikely(reloc->offset & 3)) {
                DRM_DEBUG("Relocation not 4-byte aligned: "
                          "obj %p target %d offset %d.\n",
                          obj, reloc->target_handle,
                          (int) reloc->offset);
-               return ret;
+               return -EINVAL;
        }
 
        /* We can't wait for rendering with pagefaults disabled */
@@ -491,8 +491,7 @@ i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
 }
 
 static int
-i915_gem_execbuffer_relocate(struct eb_vmas *eb,
-                            struct i915_address_space *vm)
+i915_gem_execbuffer_relocate(struct eb_vmas *eb)
 {
        struct i915_vma *vma;
        int ret = 0;
@@ -901,6 +900,24 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
        return 0;
 }
 
+static int
+i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
+                         const u32 ctx_id)
+{
+       struct i915_ctx_hang_stats *hs;
+
+       hs = i915_gem_context_get_hang_stats(dev, file, ctx_id);
+       if (IS_ERR(hs))
+               return PTR_ERR(hs);
+
+       if (hs->banned) {
+               DRM_DEBUG("Context %u tried to submit while banned\n", ctx_id);
+               return -EIO;
+       }
+
+       return 0;
+}
+
 static void
 i915_gem_execbuffer_move_to_active(struct list_head *vmas,
                                   struct intel_ring_buffer *ring)
@@ -980,8 +997,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
        struct drm_i915_gem_object *batch_obj;
        struct drm_clip_rect *cliprects = NULL;
        struct intel_ring_buffer *ring;
-       struct i915_ctx_hang_stats *hs;
-       u32 ctx_id = i915_execbuffer2_get_context_id(*args);
+       const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
        u32 exec_start, exec_len;
        u32 mask, flags;
        int ret, mode, i;
@@ -1108,6 +1124,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                }
        }
 
+       intel_runtime_pm_get(dev_priv);
+
        ret = i915_mutex_lock_interruptible(dev);
        if (ret)
                goto pre_mutex_err;
@@ -1118,7 +1136,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                goto pre_mutex_err;
        }
 
-       eb = eb_create(args, vm);
+       ret = i915_gem_validate_context(dev, file, ctx_id);
+       if (ret) {
+               mutex_unlock(&dev->struct_mutex);
+               goto pre_mutex_err;
+       }
+
+       eb = eb_create(args);
        if (eb == NULL) {
                mutex_unlock(&dev->struct_mutex);
                ret = -ENOMEM;
@@ -1141,7 +1165,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 
        /* The objects are in their final locations, apply the relocations. */
        if (need_relocs)
-               ret = i915_gem_execbuffer_relocate(eb, vm);
+               ret = i915_gem_execbuffer_relocate(eb);
        if (ret) {
                if (ret == -EFAULT) {
                        ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
@@ -1170,17 +1194,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
        if (ret)
                goto err;
 
-       hs = i915_gem_context_get_hang_stats(dev, file, ctx_id);
-       if (IS_ERR(hs)) {
-               ret = PTR_ERR(hs);
-               goto err;
-       }
-
-       if (hs->banned) {
-               ret = -EIO;
-               goto err;
-       }
-
        ret = i915_switch_context(ring, file, ctx_id);
        if (ret)
                goto err;
@@ -1242,6 +1255,10 @@ err:
 
 pre_mutex_err:
        kfree(cliprects);
+
+       /* intel_gpu_busy should also get a ref, so it will free when the device
+        * is really idle. */
+       intel_runtime_pm_put(dev_priv);
        return ret;
 }
 
index 3540569948dbd62db3a2fd5980e141879162115d..40a2b36b276baa774028b56ae60b6ae6c59e919d 100644 (file)
@@ -240,10 +240,16 @@ static int gen8_ppgtt_enable(struct drm_device *dev)
                for_each_ring(ring, dev_priv, j) {
                        ret = gen8_write_pdp(ring, i, addr);
                        if (ret)
-                               return ret;
+                               goto err_out;
                }
        }
        return 0;
+
+err_out:
+       for_each_ring(ring, dev_priv, j)
+               I915_WRITE(RING_MODE_GEN7(ring),
+                          _MASKED_BIT_DISABLE(GFX_PPGTT_ENABLE));
+       return ret;
 }
 
 static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
@@ -293,23 +299,23 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
        unsigned act_pte = first_entry % GEN8_PTES_PER_PAGE;
        struct sg_page_iter sg_iter;
 
-       pt_vaddr = kmap_atomic(&ppgtt->gen8_pt_pages[act_pt]);
+       pt_vaddr = NULL;
        for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
-               dma_addr_t page_addr;
+               if (pt_vaddr == NULL)
+                       pt_vaddr = kmap_atomic(&ppgtt->gen8_pt_pages[act_pt]);
 
-               page_addr = sg_dma_address(sg_iter.sg) +
-                               (sg_iter.sg_pgoffset << PAGE_SHIFT);
-               pt_vaddr[act_pte] = gen8_pte_encode(page_addr, cache_level,
-                                                   true);
+               pt_vaddr[act_pte] =
+                       gen8_pte_encode(sg_page_iter_dma_address(&sg_iter),
+                                       cache_level, true);
                if (++act_pte == GEN8_PTES_PER_PAGE) {
                        kunmap_atomic(pt_vaddr);
+                       pt_vaddr = NULL;
                        act_pt++;
-                       pt_vaddr = kmap_atomic(&ppgtt->gen8_pt_pages[act_pt]);
                        act_pte = 0;
-
                }
        }
-       kunmap_atomic(pt_vaddr);
+       if (pt_vaddr)
+               kunmap_atomic(pt_vaddr);
 }
 
 static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
@@ -318,6 +324,8 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
                container_of(vm, struct i915_hw_ppgtt, base);
        int i, j;
 
+       drm_mm_takedown(&vm->mm);
+
        for (i = 0; i < ppgtt->num_pd_pages ; i++) {
                if (ppgtt->pd_dma_addr[i]) {
                        pci_unmap_page(ppgtt->base.dev->pdev,
@@ -381,6 +389,8 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
        ppgtt->base.clear_range = gen8_ppgtt_clear_range;
        ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
        ppgtt->base.cleanup = gen8_ppgtt_cleanup;
+       ppgtt->base.start = 0;
+       ppgtt->base.total = ppgtt->num_pt_pages * GEN8_PTES_PER_PAGE * PAGE_SIZE;
 
        BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPS);
 
@@ -573,21 +583,23 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
        unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES;
        struct sg_page_iter sg_iter;
 
-       pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
+       pt_vaddr = NULL;
        for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
-               dma_addr_t page_addr;
+               if (pt_vaddr == NULL)
+                       pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
 
-               page_addr = sg_page_iter_dma_address(&sg_iter);
-               pt_vaddr[act_pte] = vm->pte_encode(page_addr, cache_level, true);
+               pt_vaddr[act_pte] =
+                       vm->pte_encode(sg_page_iter_dma_address(&sg_iter),
+                                      cache_level, true);
                if (++act_pte == I915_PPGTT_PT_ENTRIES) {
                        kunmap_atomic(pt_vaddr);
+                       pt_vaddr = NULL;
                        act_pt++;
-                       pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
                        act_pte = 0;
-
                }
        }
-       kunmap_atomic(pt_vaddr);
+       if (pt_vaddr)
+               kunmap_atomic(pt_vaddr);
 }
 
 static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
@@ -632,6 +644,8 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
        ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
        ppgtt->base.cleanup = gen6_ppgtt_cleanup;
        ppgtt->base.scratch = dev_priv->gtt.base.scratch;
+       ppgtt->base.start = 0;
+       ppgtt->base.total = GEN6_PPGTT_PD_ENTRIES * I915_PPGTT_PT_ENTRIES * PAGE_SIZE;
        ppgtt->pt_pages = kcalloc(ppgtt->num_pd_entries, sizeof(struct page *),
                                  GFP_KERNEL);
        if (!ppgtt->pt_pages)
@@ -1124,7 +1138,6 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
                if (ret)
                        DRM_DEBUG_KMS("Reservation failed\n");
                obj->has_global_gtt_mapping = 1;
-               list_add(&vma->vma_link, &obj->vma_list);
        }
 
        dev_priv->gtt.base.start = start;
@@ -1400,6 +1413,8 @@ static void gen6_gmch_remove(struct i915_address_space *vm)
 {
 
        struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base);
+
+       drm_mm_takedown(&vm->mm);
        iounmap(gtt->gsm);
        teardown_scratch_page(vm->dev);
 }
@@ -1425,6 +1440,9 @@ static int i915_gmch_probe(struct drm_device *dev,
        dev_priv->gtt.base.clear_range = i915_ggtt_clear_range;
        dev_priv->gtt.base.insert_entries = i915_ggtt_insert_entries;
 
+       if (unlikely(dev_priv->gtt.do_idle_maps))
+               DRM_INFO("applying Ironlake quirks for intel_iommu\n");
+
        return 0;
 }
 
index d284d892ed9491e8f2a618e22576673e06887f74..1a24e84f231578ae772d40ad75adda4c356b8d68 100644 (file)
@@ -250,7 +250,7 @@ i915_pages_create_for_stolen(struct drm_device *dev,
        }
 
        sg = st->sgl;
-       sg->offset = offset;
+       sg->offset = 0;
        sg->length = size;
 
        sg_dma_address(sg) = (dma_addr_t)dev_priv->mm.stolen_base + offset;
@@ -420,6 +420,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
 
        list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
        list_add_tail(&vma->mm_list, &ggtt->inactive_list);
+       i915_gem_object_pin_pages(obj);
 
        return obj;
 
index 79dcb8f896c6f34363e6bd62e90f1773bfcf8f0a..d7fd2fd2f0a5e1ba6ed25f9a4dce0c20dc03b9e3 100644 (file)
@@ -239,6 +239,9 @@ static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
                                  unsigned ring)
 {
        BUG_ON(ring >= I915_NUM_RINGS); /* shut up confused gcc */
+       if (!error->ring[ring].valid)
+               return;
+
        err_printf(m, "%s command stream:\n", ring_str(ring));
        err_printf(m, "  HEAD: 0x%08x\n", error->head[ring]);
        err_printf(m, "  TAIL: 0x%08x\n", error->tail[ring]);
@@ -247,12 +250,11 @@ static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
        err_printf(m, "  IPEIR: 0x%08x\n", error->ipeir[ring]);
        err_printf(m, "  IPEHR: 0x%08x\n", error->ipehr[ring]);
        err_printf(m, "  INSTDONE: 0x%08x\n", error->instdone[ring]);
-       if (ring == RCS && INTEL_INFO(dev)->gen >= 4)
-               err_printf(m, "  BBADDR: 0x%08llx\n", error->bbaddr);
-       if (INTEL_INFO(dev)->gen >= 4)
+       if (INTEL_INFO(dev)->gen >= 4) {
+               err_printf(m, "  BBADDR: 0x%08llx\n", error->bbaddr[ring]);
                err_printf(m, "  BB_STATE: 0x%08x\n", error->bbstate[ring]);
-       if (INTEL_INFO(dev)->gen >= 4)
                err_printf(m, "  INSTPS: 0x%08x\n", error->instps[ring]);
+       }
        err_printf(m, "  INSTPM: 0x%08x\n", error->instpm[ring]);
        err_printf(m, "  FADDR: 0x%08x\n", error->faddr[ring]);
        if (INTEL_INFO(dev)->gen >= 6) {
@@ -294,7 +296,6 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
        struct drm_device *dev = error_priv->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_i915_error_state *error = error_priv->error;
-       struct intel_ring_buffer *ring;
        int i, j, page, offset, elt;
 
        if (!error) {
@@ -329,7 +330,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
        if (INTEL_INFO(dev)->gen == 7)
                err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
 
-       for_each_ring(ring, dev_priv, i)
+       for (i = 0; i < ARRAY_SIZE(error->ring); i++)
                i915_ring_error_state(m, dev, error, i);
 
        if (error->active_bo)
@@ -386,8 +387,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
                        }
                }
 
-               obj = error->ring[i].ctx;
-               if (obj) {
+               if ((obj = error->ring[i].ctx)) {
                        err_printf(m, "%s --- HW Context = 0x%08x\n",
                                   dev_priv->ring[i].name,
                                   obj->gtt_offset);
@@ -668,7 +668,8 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
                        return NULL;
 
                obj = ring->scratch.obj;
-               if (acthd >= i915_gem_obj_ggtt_offset(obj) &&
+               if (obj != NULL &&
+                   acthd >= i915_gem_obj_ggtt_offset(obj) &&
                    acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size)
                        return i915_error_object_create(dev_priv, obj);
        }
@@ -725,8 +726,9 @@ static void i915_record_ring_state(struct drm_device *dev,
                error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
                error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
                error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
-               if (ring->id == RCS)
-                       error->bbaddr = I915_READ64(BB_ADDR);
+               error->bbaddr[ring->id] = I915_READ(RING_BBADDR(ring->mmio_base));
+               if (INTEL_INFO(dev)->gen >= 8)
+                       error->bbaddr[ring->id] |= (u64) I915_READ(RING_BBADDR_UDW(ring->mmio_base)) << 32;
                error->bbstate[ring->id] = I915_READ(RING_BBSTATE(ring->mmio_base));
        } else {
                error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
@@ -775,11 +777,17 @@ static void i915_gem_record_rings(struct drm_device *dev,
                                  struct drm_i915_error_state *error)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_ring_buffer *ring;
        struct drm_i915_gem_request *request;
        int i, count;
 
-       for_each_ring(ring, dev_priv, i) {
+       for (i = 0; i < I915_NUM_RINGS; i++) {
+               struct intel_ring_buffer *ring = &dev_priv->ring[i];
+
+               if (ring->dev == NULL)
+                       continue;
+
+               error->ring[i].valid = true;
+
                i915_record_ring_state(dev, error, ring);
 
                error->ring[i].batchbuffer =
index f13d5edc39d56c9bdfe3091a9218872ef59be071..17d8fcb1b6f7ac113b4c0c035088979b1c8083b4 100644 (file)
@@ -62,7 +62,7 @@ static const u32 hpd_mask_i915[] = {
        [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
 };
 
-static const u32 hpd_status_gen4[] = {
+static const u32 hpd_status_g4x[] = {
        [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
        [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
        [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
@@ -600,7 +600,7 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
         * Cook up a vblank counter by also checking the pixel
         * counter against vblank start.
         */
-       return ((high1 << 8) | low) + (pixel >= vbl_start);
+       return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
 }
 
 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
@@ -621,36 +621,15 @@ static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
 #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
 #define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__))
 
-static bool intel_pipe_in_vblank_locked(struct drm_device *dev, enum pipe pipe)
+static bool ilk_pipe_in_vblank_locked(struct drm_device *dev, enum pipe pipe)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        uint32_t status;
-       int reg;
 
-       if (IS_VALLEYVIEW(dev)) {
-               status = pipe == PIPE_A ?
-                       I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT :
-                       I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
-
-               reg = VLV_ISR;
-       } else if (IS_GEN2(dev)) {
-               status = pipe == PIPE_A ?
-                       I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT :
-                       I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
-
-               reg = ISR;
-       } else if (INTEL_INFO(dev)->gen < 5) {
-               status = pipe == PIPE_A ?
-                       I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT :
-                       I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
-
-               reg = ISR;
-       } else if (INTEL_INFO(dev)->gen < 7) {
+       if (INTEL_INFO(dev)->gen < 7) {
                status = pipe == PIPE_A ?
                        DE_PIPEA_VBLANK :
                        DE_PIPEB_VBLANK;
-
-               reg = DEISR;
        } else {
                switch (pipe) {
                default:
@@ -664,18 +643,14 @@ static bool intel_pipe_in_vblank_locked(struct drm_device *dev, enum pipe pipe)
                        status = DE_PIPEC_VBLANK_IVB;
                        break;
                }
-
-               reg = DEISR;
        }
 
-       if (IS_GEN2(dev))
-               return __raw_i915_read16(dev_priv, reg) & status;
-       else
-               return __raw_i915_read32(dev_priv, reg) & status;
+       return __raw_i915_read32(dev_priv, DEISR) & status;
 }
 
 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
-                            int *vpos, int *hpos, ktime_t *stime, ktime_t *etime)
+                                   unsigned int flags, int *vpos, int *hpos,
+                                   ktime_t *stime, ktime_t *etime)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
@@ -698,6 +673,12 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
        vbl_start = mode->crtc_vblank_start;
        vbl_end = mode->crtc_vblank_end;
 
+       if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
+               vbl_start = DIV_ROUND_UP(vbl_start, 2);
+               vbl_end /= 2;
+               vtotal /= 2;
+       }
+
        ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
 
        /*
@@ -722,17 +703,42 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
                else
                        position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
 
-               /*
-                * The scanline counter increments at the leading edge
-                * of hsync, ie. it completely misses the active portion
-                * of the line. Fix up the counter at both edges of vblank
-                * to get a more accurate picture whether we're in vblank
-                * or not.
-                */
-               in_vbl = intel_pipe_in_vblank_locked(dev, pipe);
-               if ((in_vbl && position == vbl_start - 1) ||
-                   (!in_vbl && position == vbl_end - 1))
-                       position = (position + 1) % vtotal;
+               if (HAS_PCH_SPLIT(dev)) {
+                       /*
+                        * The scanline counter increments at the leading edge
+                        * of hsync, ie. it completely misses the active portion
+                        * of the line. Fix up the counter at both edges of vblank
+                        * to get a more accurate picture whether we're in vblank
+                        * or not.
+                        */
+                       in_vbl = ilk_pipe_in_vblank_locked(dev, pipe);
+                       if ((in_vbl && position == vbl_start - 1) ||
+                           (!in_vbl && position == vbl_end - 1))
+                               position = (position + 1) % vtotal;
+               } else {
+                       /*
+                        * ISR vblank status bits don't work the way we'd want
+                        * them to work on non-PCH platforms (for
+                        * ilk_pipe_in_vblank_locked()), and there doesn't
+                        * appear any other way to determine if we're currently
+                        * in vblank.
+                        *
+                        * Instead let's assume that we're already in vblank if
+                        * we got called from the vblank interrupt and the
+                        * scanline counter value indicates that we're on the
+                        * line just prior to vblank start. This should result
+                        * in the correct answer, unless the vblank interrupt
+                        * delivery really got delayed for almost exactly one
+                        * full frame/field.
+                        */
+                       if (flags & DRM_CALLED_FROM_VBLIRQ &&
+                           position == vbl_start - 1) {
+                               position = (position + 1) % vtotal;
+
+                               /* Signal this correction as "applied". */
+                               ret |= 0x8;
+                       }
+               }
        } else {
                /* Have access to pixelcount since start of frame.
                 * We can split this into vertical and horizontal
@@ -809,7 +815,8 @@ static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
        /* Helper routine in DRM core does all the work: */
        return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
                                                     vblank_time, flags,
-                                                    crtc);
+                                                    crtc,
+                                                    &to_intel_crtc(crtc)->config.adjusted_mode);
 }
 
 static bool intel_hpd_irq_event(struct drm_device *dev,
@@ -1015,10 +1022,8 @@ static void gen6_pm_rps_work(struct work_struct *work)
        /* sysfs frequency interfaces may have snuck in while servicing the
         * interrupt
         */
-       if (new_delay < (int)dev_priv->rps.min_delay)
-               new_delay = dev_priv->rps.min_delay;
-       if (new_delay > (int)dev_priv->rps.max_delay)
-               new_delay = dev_priv->rps.max_delay;
+       new_delay = clamp_t(int, new_delay,
+                           dev_priv->rps.min_delay, dev_priv->rps.max_delay);
        dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_delay;
 
        if (IS_VALLEYVIEW(dev_priv->dev))
@@ -1235,9 +1240,10 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
        spin_lock(&dev_priv->irq_lock);
        for (i = 1; i < HPD_NUM_PINS; i++) {
 
-               WARN(((hpd[i] & hotplug_trigger) &&
-                     dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED),
-                    "Received HPD interrupt although disabled\n");
+               WARN_ONCE(hpd[i] & hotplug_trigger &&
+                         dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED,
+                         "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n",
+                         hotplug_trigger, i, hpd[i]);
 
                if (!(hpd[i] & hotplug_trigger) ||
                    dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
@@ -1474,6 +1480,9 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
 
                        intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
 
+                       if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
+                               dp_aux_irq_handler(dev);
+
                        I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
                        I915_READ(PORT_HOTPLUG_STAT);
                }
@@ -1993,7 +2002,7 @@ static void i915_error_work_func(struct work_struct *work)
                        kobject_uevent_env(&dev->primary->kdev->kobj,
                                           KOBJ_CHANGE, reset_done_event);
                } else {
-                       atomic_set(&error->reset_counter, I915_WEDGED);
+                       atomic_set_mask(I915_WEDGED, &error->reset_counter);
                }
 
                /*
@@ -3140,10 +3149,10 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
  * Returns true when a page flip has completed.
  */
 static bool i8xx_handle_vblank(struct drm_device *dev,
-                              int pipe, u16 iir)
+                              int plane, int pipe, u32 iir)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
-       u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(pipe);
+       u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
 
        if (!drm_handle_vblank(dev, pipe))
                return false;
@@ -3151,7 +3160,7 @@ static bool i8xx_handle_vblank(struct drm_device *dev,
        if ((iir & flip_pending) == 0)
                return false;
 
-       intel_prepare_page_flip(dev, pipe);
+       intel_prepare_page_flip(dev, plane);
 
        /* We detect FlipDone by looking for the change in PendingFlip from '1'
         * to '0' on the following vblank, i.e. IIR has the Pendingflip
@@ -3220,9 +3229,13 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
                        notify_ring(dev, &dev_priv->ring[RCS]);
 
                for_each_pipe(pipe) {
+                       int plane = pipe;
+                       if (HAS_FBC(dev))
+                               plane = !plane;
+
                        if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
-                           i8xx_handle_vblank(dev, pipe, iir))
-                               flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
+                           i8xx_handle_vblank(dev, plane, pipe, iir))
+                               flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
 
                        if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
                                i9xx_pipe_crc_irq_handler(dev, pipe);
@@ -3418,7 +3431,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
 
                for_each_pipe(pipe) {
                        int plane = pipe;
-                       if (IS_MOBILE(dev))
+                       if (HAS_FBC(dev))
                                plane = !plane;
 
                        if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
@@ -3655,7 +3668,11 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
                                  hotplug_status);
 
                        intel_hpd_irq_handler(dev, hotplug_trigger,
-                                             IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i915);
+                                             IS_G4X(dev) ? hpd_status_g4x : hpd_status_i915);
+
+                       if (IS_G4X(dev) &&
+                           (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X))
+                               dp_aux_irq_handler(dev);
 
                        I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
                        I915_READ(PORT_HOTPLUG_STAT);
@@ -3893,8 +3910,8 @@ void hsw_pc8_disable_interrupts(struct drm_device *dev)
        dev_priv->pc8.regsave.gtier = I915_READ(GTIER);
        dev_priv->pc8.regsave.gen6_pmimr = I915_READ(GEN6_PMIMR);
 
-       ironlake_disable_display_irq(dev_priv, ~DE_PCH_EVENT_IVB);
-       ibx_disable_display_interrupt(dev_priv, ~SDE_HOTPLUG_MASK_CPT);
+       ironlake_disable_display_irq(dev_priv, 0xffffffff);
+       ibx_disable_display_interrupt(dev_priv, 0xffffffff);
        ilk_disable_gt_irq(dev_priv, 0xffffffff);
        snb_disable_pm_irq(dev_priv, 0xffffffff);
 
@@ -3908,34 +3925,26 @@ void hsw_pc8_restore_interrupts(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned long irqflags;
-       uint32_t val, expected;
+       uint32_t val;
 
        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 
        val = I915_READ(DEIMR);
-       expected = ~DE_PCH_EVENT_IVB;
-       WARN(val != expected, "DEIMR is 0x%08x, not 0x%08x\n", val, expected);
+       WARN(val != 0xffffffff, "DEIMR is 0x%08x\n", val);
 
-       val = I915_READ(SDEIMR) & ~SDE_HOTPLUG_MASK_CPT;
-       expected = ~SDE_HOTPLUG_MASK_CPT;
-       WARN(val != expected, "SDEIMR non-HPD bits are 0x%08x, not 0x%08x\n",
-            val, expected);
+       val = I915_READ(SDEIMR);
+       WARN(val != 0xffffffff, "SDEIMR is 0x%08x\n", val);
 
        val = I915_READ(GTIMR);
-       expected = 0xffffffff;
-       WARN(val != expected, "GTIMR is 0x%08x, not 0x%08x\n", val, expected);
+       WARN(val != 0xffffffff, "GTIMR is 0x%08x\n", val);
 
        val = I915_READ(GEN6_PMIMR);
-       expected = 0xffffffff;
-       WARN(val != expected, "GEN6_PMIMR is 0x%08x, not 0x%08x\n", val,
-            expected);
+       WARN(val != 0xffffffff, "GEN6_PMIMR is 0x%08x\n", val);
 
        dev_priv->pc8.irqs_disabled = false;
 
        ironlake_enable_display_irq(dev_priv, ~dev_priv->pc8.regsave.deimr);
-       ibx_enable_display_interrupt(dev_priv,
-                                    ~dev_priv->pc8.regsave.sdeimr &
-                                    ~SDE_HOTPLUG_MASK_CPT);
+       ibx_enable_display_interrupt(dev_priv, ~dev_priv->pc8.regsave.sdeimr);
        ilk_enable_gt_irq(dev_priv, ~dev_priv->pc8.regsave.gtimr);
        snb_enable_pm_irq(dev_priv, ~dev_priv->pc8.regsave.gen6_pmimr);
        I915_WRITE(GTIER, dev_priv->pc8.regsave.gtier);
index ee2742122a02561f910b6d9d5383418794234eff..a48b7cad6f1135c29742f39cacf23e359f92faf4 100644 (file)
 #define   MI_SCENE_COUNT       (1 << 3) /* just increment scene count */
 #define   MI_END_SCENE         (1 << 4) /* flush binner and incr scene count */
 #define   MI_INVALIDATE_ISP    (1 << 5) /* invalidate indirect state pointers */
+#define MI_REPORT_HEAD         MI_INSTR(0x07, 0)
+#define MI_ARB_ON_OFF          MI_INSTR(0x08, 0)
+#define   MI_ARB_ENABLE                        (1<<0)
+#define   MI_ARB_DISABLE               (0<<0)
 #define MI_BATCH_BUFFER_END    MI_INSTR(0x0a, 0)
 #define MI_SUSPEND_FLUSH       MI_INSTR(0x0b, 0)
 #define   MI_SUSPEND_FLUSH_EN  (1<<0)
-#define MI_REPORT_HEAD         MI_INSTR(0x07, 0)
 #define MI_OVERLAY_FLIP                MI_INSTR(0x11, 0)
 #define   MI_OVERLAY_CONTINUE  (0x0<<21)
 #define   MI_OVERLAY_ON                (0x1<<21)
 #define   MI_DISPLAY_FLIP_IVB_SPRITE_B (3 << 19)
 #define   MI_DISPLAY_FLIP_IVB_PLANE_C  (4 << 19)
 #define   MI_DISPLAY_FLIP_IVB_SPRITE_C (5 << 19)
-#define MI_ARB_ON_OFF          MI_INSTR(0x08, 0)
-#define   MI_ARB_ENABLE                        (1<<0)
-#define   MI_ARB_DISABLE               (0<<0)
-
+#define MI_SEMAPHORE_MBOX      MI_INSTR(0x16, 1) /* gen6+ */
+#define   MI_SEMAPHORE_GLOBAL_GTT    (1<<22)
+#define   MI_SEMAPHORE_UPDATE      (1<<21)
+#define   MI_SEMAPHORE_COMPARE     (1<<20)
+#define   MI_SEMAPHORE_REGISTER            (1<<18)
+#define   MI_SEMAPHORE_SYNC_VR     (0<<16) /* RCS  wait for VCS  (RVSYNC) */
+#define   MI_SEMAPHORE_SYNC_VER            (1<<16) /* RCS  wait for VECS (RVESYNC) */
+#define   MI_SEMAPHORE_SYNC_BR     (2<<16) /* RCS  wait for BCS  (RBSYNC) */
+#define   MI_SEMAPHORE_SYNC_BV     (0<<16) /* VCS  wait for BCS  (VBSYNC) */
+#define   MI_SEMAPHORE_SYNC_VEV            (1<<16) /* VCS  wait for VECS (VVESYNC) */
+#define   MI_SEMAPHORE_SYNC_RV     (2<<16) /* VCS  wait for RCS  (VRSYNC) */
+#define   MI_SEMAPHORE_SYNC_RB     (0<<16) /* BCS  wait for RCS  (BRSYNC) */
+#define   MI_SEMAPHORE_SYNC_VEB            (1<<16) /* BCS  wait for VECS (BVESYNC) */
+#define   MI_SEMAPHORE_SYNC_VB     (2<<16) /* BCS  wait for VCS  (BVSYNC) */
+#define   MI_SEMAPHORE_SYNC_BVE            (0<<16) /* VECS wait for BCS  (VEBSYNC) */
+#define   MI_SEMAPHORE_SYNC_VVE            (1<<16) /* VECS wait for VCS  (VEVSYNC) */
+#define   MI_SEMAPHORE_SYNC_RVE            (2<<16) /* VECS wait for RCS  (VERSYNC) */
+#define   MI_SEMAPHORE_SYNC_INVALID  (3<<16)
 #define MI_SET_CONTEXT         MI_INSTR(0x18, 0)
 #define   MI_MM_SPACE_GTT              (1<<8)
 #define   MI_MM_SPACE_PHYSICAL         (0<<8)
  */
 #define MI_LOAD_REGISTER_IMM(x)        MI_INSTR(0x22, 2*x-1)
 #define MI_STORE_REGISTER_MEM(x) MI_INSTR(0x24, 2*x-1)
-#define  MI_SRM_LRM_GLOBAL_GTT         (1<<22)
+#define   MI_SRM_LRM_GLOBAL_GTT                (1<<22)
 #define MI_FLUSH_DW            MI_INSTR(0x26, 1) /* for GEN6 */
 #define   MI_FLUSH_DW_STORE_INDEX      (1<<21)
 #define   MI_INVALIDATE_TLB            (1<<18)
 #define MI_BATCH_BUFFER                MI_INSTR(0x30, 1)
 #define   MI_BATCH_NON_SECURE          (1)
 /* for snb/ivb/vlv this also means "batch in ppgtt" when ppgtt is enabled. */
-#define   MI_BATCH_NON_SECURE_I965     (1<<8)
+#define   MI_BATCH_NON_SECURE_I965     (1<<8)
 #define   MI_BATCH_PPGTT_HSW           (1<<8)
-#define   MI_BATCH_NON_SECURE_HSW      (1<<13)
+#define   MI_BATCH_NON_SECURE_HSW      (1<<13)
 #define MI_BATCH_BUFFER_START  MI_INSTR(0x31, 0)
 #define   MI_BATCH_GTT             (2<<6) /* aliased with (1<<7) on gen4 */
 #define MI_BATCH_BUFFER_START_GEN8     MI_INSTR(0x31, 1)
-#define MI_SEMAPHORE_MBOX      MI_INSTR(0x16, 1) /* gen6+ */
-#define  MI_SEMAPHORE_GLOBAL_GTT    (1<<22)
-#define  MI_SEMAPHORE_UPDATE       (1<<21)
-#define  MI_SEMAPHORE_COMPARE      (1<<20)
-#define  MI_SEMAPHORE_REGISTER     (1<<18)
-#define  MI_SEMAPHORE_SYNC_VR      (0<<16) /* RCS  wait for VCS  (RVSYNC) */
-#define  MI_SEMAPHORE_SYNC_VER     (1<<16) /* RCS  wait for VECS (RVESYNC) */
-#define  MI_SEMAPHORE_SYNC_BR      (2<<16) /* RCS  wait for BCS  (RBSYNC) */
-#define  MI_SEMAPHORE_SYNC_BV      (0<<16) /* VCS  wait for BCS  (VBSYNC) */
-#define  MI_SEMAPHORE_SYNC_VEV     (1<<16) /* VCS  wait for VECS (VVESYNC) */
-#define  MI_SEMAPHORE_SYNC_RV      (2<<16) /* VCS  wait for RCS  (VRSYNC) */
-#define  MI_SEMAPHORE_SYNC_RB      (0<<16) /* BCS  wait for RCS  (BRSYNC) */
-#define  MI_SEMAPHORE_SYNC_VEB     (1<<16) /* BCS  wait for VECS (BVESYNC) */
-#define  MI_SEMAPHORE_SYNC_VB      (2<<16) /* BCS  wait for VCS  (BVSYNC) */
-#define  MI_SEMAPHORE_SYNC_BVE     (0<<16) /* VECS wait for BCS  (VEBSYNC) */
-#define  MI_SEMAPHORE_SYNC_VVE     (1<<16) /* VECS wait for VCS  (VEVSYNC) */
-#define  MI_SEMAPHORE_SYNC_RVE     (2<<16) /* VECS wait for RCS  (VERSYNC) */
-#define  MI_SEMAPHORE_SYNC_INVALID  (3<<16)
+
 
 #define MI_PREDICATE_RESULT_2  (0x2214)
 #define  LOWER_SLICE_ENABLED   (1<<0)
 #define   IOSF_BYTE_ENABLES_SHIFT              4
 #define   IOSF_BAR_SHIFT                       1
 #define   IOSF_SB_BUSY                         (1<<0)
+#define   IOSF_PORT_BUNIT                      0x3
 #define   IOSF_PORT_PUNIT                      0x4
 #define   IOSF_PORT_NC                         0x11
 #define   IOSF_PORT_DPIO                       0x12
 #define   IOSF_PORT_CCK                                0x14
 #define   IOSF_PORT_CCU                                0xA9
 #define   IOSF_PORT_GPS_CORE                   0x48
+#define   IOSF_PORT_FLISDSI                    0x1B
 #define VLV_IOSF_DATA                          (VLV_DISPLAY_BASE + 0x2104)
 #define VLV_IOSF_ADDR                          (VLV_DISPLAY_BASE + 0x2108)
 
+/* See configdb bunit SB addr map */
+#define BUNIT_REG_BISOC                                0x11
+
 #define PUNIT_OPCODE_REG_READ                  6
 #define PUNIT_OPCODE_REG_WRITE                 7
 
+#define PUNIT_REG_DSPFREQ                      0x36
+#define   DSPFREQSTAT_SHIFT                    30
+#define   DSPFREQSTAT_MASK                     (0x3 << DSPFREQSTAT_SHIFT)
+#define   DSPFREQGUAR_SHIFT                    14
+#define   DSPFREQGUAR_MASK                     (0x3 << DSPFREQGUAR_SHIFT)
 #define PUNIT_REG_PWRGT_CTRL                   0x60
 #define PUNIT_REG_PWRGT_STATUS                 0x61
 #define          PUNIT_CLK_GATE                        1
 #define  DSI_PLL_N1_DIV_MASK                   (3 << 16)
 #define  DSI_PLL_M1_DIV_SHIFT                  0
 #define  DSI_PLL_M1_DIV_MASK                   (0x1ff << 0)
+#define CCK_DISPLAY_CLOCK_CONTROL              0x6b
 
 /*
  * DPIO - a special bus for various display related registers to hide behind
 #define  DPIO_SFR_BYPASS               (1<<1)
 #define  DPIO_CMNRST                   (1<<0)
 
-#define _DPIO_TX3_SWING_CTL4_A         0x690
-#define _DPIO_TX3_SWING_CTL4_B         0x2a90
-#define DPIO_TX3_SWING_CTL4(pipe) _PIPE(pipe, _DPIO_TX3_SWING_CTL4_A, \
-                                       _DPIO_TX3_SWING_CTL4_B)
+#define DPIO_PHY(pipe)                 ((pipe) >> 1)
+#define DPIO_PHY_IOSF_PORT(phy)                (dev_priv->dpio_phy_iosf_port[phy])
 
 /*
  * Per pipe/PLL DPIO regs
  */
-#define _DPIO_DIV_A                    0x800c
+#define _VLV_PLL_DW3_CH0               0x800c
 #define   DPIO_POST_DIV_SHIFT          (28) /* 3 bits */
 #define   DPIO_POST_DIV_DAC            0
 #define   DPIO_POST_DIV_HDMIDP         1 /* DAC 225-400M rate */
 #define   DPIO_ENABLE_CALIBRATION      (1<<11)
 #define   DPIO_M1DIV_SHIFT             (8) /* 3 bits */
 #define   DPIO_M2DIV_MASK              0xff
-#define _DPIO_DIV_B                    0x802c
-#define DPIO_DIV(pipe) _PIPE(pipe, _DPIO_DIV_A, _DPIO_DIV_B)
+#define _VLV_PLL_DW3_CH1               0x802c
+#define VLV_PLL_DW3(ch) _PIPE(ch, _VLV_PLL_DW3_CH0, _VLV_PLL_DW3_CH1)
 
-#define _DPIO_REFSFR_A                 0x8014
+#define _VLV_PLL_DW5_CH0               0x8014
 #define   DPIO_REFSEL_OVERRIDE         27
 #define   DPIO_PLL_MODESEL_SHIFT       24 /* 3 bits */
 #define   DPIO_BIAS_CURRENT_CTL_SHIFT  21 /* 3 bits, always 0x7 */
 #define   DPIO_PLL_REFCLK_SEL_MASK     3
 #define   DPIO_DRIVER_CTL_SHIFT                12 /* always set to 0x8 */
 #define   DPIO_CLK_BIAS_CTL_SHIFT      8 /* always set to 0x5 */
-#define _DPIO_REFSFR_B                 0x8034
-#define DPIO_REFSFR(pipe) _PIPE(pipe, _DPIO_REFSFR_A, _DPIO_REFSFR_B)
+#define _VLV_PLL_DW5_CH1               0x8034
+#define VLV_PLL_DW5(ch) _PIPE(ch, _VLV_PLL_DW5_CH0, _VLV_PLL_DW5_CH1)
 
-#define _DPIO_CORE_CLK_A               0x801c
-#define _DPIO_CORE_CLK_B               0x803c
-#define DPIO_CORE_CLK(pipe) _PIPE(pipe, _DPIO_CORE_CLK_A, _DPIO_CORE_CLK_B)
+#define _VLV_PLL_DW7_CH0               0x801c
+#define _VLV_PLL_DW7_CH1               0x803c
+#define VLV_PLL_DW7(ch) _PIPE(ch, _VLV_PLL_DW7_CH0, _VLV_PLL_DW7_CH1)
 
-#define _DPIO_IREF_CTL_A               0x8040
-#define _DPIO_IREF_CTL_B               0x8060
-#define DPIO_IREF_CTL(pipe) _PIPE(pipe, _DPIO_IREF_CTL_A, _DPIO_IREF_CTL_B)
+#define _VLV_PLL_DW8_CH0               0x8040
+#define _VLV_PLL_DW8_CH1               0x8060
+#define VLV_PLL_DW8(ch) _PIPE(ch, _VLV_PLL_DW8_CH0, _VLV_PLL_DW8_CH1)
 
-#define DPIO_IREF_BCAST                        0xc044
-#define _DPIO_IREF_A                   0x8044
-#define _DPIO_IREF_B                   0x8064
-#define DPIO_IREF(pipe) _PIPE(pipe, _DPIO_IREF_A, _DPIO_IREF_B)
+#define VLV_PLL_DW9_BCAST              0xc044
+#define _VLV_PLL_DW9_CH0               0x8044
+#define _VLV_PLL_DW9_CH1               0x8064
+#define VLV_PLL_DW9(ch) _PIPE(ch, _VLV_PLL_DW9_CH0, _VLV_PLL_DW9_CH1)
 
-#define _DPIO_PLL_CML_A                        0x804c
-#define _DPIO_PLL_CML_B                        0x806c
-#define DPIO_PLL_CML(pipe) _PIPE(pipe, _DPIO_PLL_CML_A, _DPIO_PLL_CML_B)
+#define _VLV_PLL_DW10_CH0              0x8048
+#define _VLV_PLL_DW10_CH1              0x8068
+#define VLV_PLL_DW10(ch) _PIPE(ch, _VLV_PLL_DW10_CH0, _VLV_PLL_DW10_CH1)
 
-#define _DPIO_LPF_COEFF_A              0x8048
-#define _DPIO_LPF_COEFF_B              0x8068
-#define DPIO_LPF_COEFF(pipe) _PIPE(pipe, _DPIO_LPF_COEFF_A, _DPIO_LPF_COEFF_B)
+#define _VLV_PLL_DW11_CH0              0x804c
+#define _VLV_PLL_DW11_CH1              0x806c
+#define VLV_PLL_DW11(ch) _PIPE(ch, _VLV_PLL_DW11_CH0, _VLV_PLL_DW11_CH1)
 
-#define DPIO_CALIBRATION               0x80ac
+/* Spec for ref block start counts at DW10 */
+#define VLV_REF_DW13                   0x80ac
 
-#define DPIO_FASTCLK_DISABLE           0x8100
+#define VLV_CMN_DW0                    0x8100
 
 /*
  * Per DDI channel DPIO regs
  */
 
-#define _DPIO_PCS_TX_0                 0x8200
-#define _DPIO_PCS_TX_1                 0x8400
+#define _VLV_PCS_DW0_CH0               0x8200
+#define _VLV_PCS_DW0_CH1               0x8400
 #define   DPIO_PCS_TX_LANE2_RESET      (1<<16)
 #define   DPIO_PCS_TX_LANE1_RESET      (1<<7)
-#define DPIO_PCS_TX(port) _PORT(port, _DPIO_PCS_TX_0, _DPIO_PCS_TX_1)
+#define VLV_PCS_DW0(ch) _PORT(ch, _VLV_PCS_DW0_CH0, _VLV_PCS_DW0_CH1)
 
-#define _DPIO_PCS_CLK_0                        0x8204
-#define _DPIO_PCS_CLK_1                        0x8404
+#define _VLV_PCS_DW1_CH0               0x8204
+#define _VLV_PCS_DW1_CH1               0x8404
 #define   DPIO_PCS_CLK_CRI_RXEB_EIOS_EN        (1<<22)
 #define   DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN (1<<21)
 #define   DPIO_PCS_CLK_DATAWIDTH_SHIFT (6)
 #define   DPIO_PCS_CLK_SOFT_RESET      (1<<5)
-#define DPIO_PCS_CLK(port) _PORT(port, _DPIO_PCS_CLK_0, _DPIO_PCS_CLK_1)
-
-#define _DPIO_PCS_CTL_OVR1_A           0x8224
-#define _DPIO_PCS_CTL_OVR1_B           0x8424
-#define DPIO_PCS_CTL_OVER1(port) _PORT(port, _DPIO_PCS_CTL_OVR1_A, \
-                                      _DPIO_PCS_CTL_OVR1_B)
-
-#define _DPIO_PCS_STAGGER0_A           0x822c
-#define _DPIO_PCS_STAGGER0_B           0x842c
-#define DPIO_PCS_STAGGER0(port) _PORT(port, _DPIO_PCS_STAGGER0_A, \
-                                     _DPIO_PCS_STAGGER0_B)
-
-#define _DPIO_PCS_STAGGER1_A           0x8230
-#define _DPIO_PCS_STAGGER1_B           0x8430
-#define DPIO_PCS_STAGGER1(port) _PORT(port, _DPIO_PCS_STAGGER1_A, \
-                                     _DPIO_PCS_STAGGER1_B)
-
-#define _DPIO_PCS_CLOCKBUF0_A          0x8238
-#define _DPIO_PCS_CLOCKBUF0_B          0x8438
-#define DPIO_PCS_CLOCKBUF0(port) _PORT(port, _DPIO_PCS_CLOCKBUF0_A, \
-                                      _DPIO_PCS_CLOCKBUF0_B)
-
-#define _DPIO_PCS_CLOCKBUF8_A          0x825c
-#define _DPIO_PCS_CLOCKBUF8_B          0x845c
-#define DPIO_PCS_CLOCKBUF8(port) _PORT(port, _DPIO_PCS_CLOCKBUF8_A, \
-                                      _DPIO_PCS_CLOCKBUF8_B)
-
-#define _DPIO_TX_SWING_CTL2_A          0x8288
-#define _DPIO_TX_SWING_CTL2_B          0x8488
-#define DPIO_TX_SWING_CTL2(port) _PORT(port, _DPIO_TX_SWING_CTL2_A, \
-                                      _DPIO_TX_SWING_CTL2_B)
-
-#define _DPIO_TX_SWING_CTL3_A          0x828c
-#define _DPIO_TX_SWING_CTL3_B          0x848c
-#define DPIO_TX_SWING_CTL3(port) _PORT(port, _DPIO_TX_SWING_CTL3_A, \
-                                      _DPIO_TX_SWING_CTL3_B)
-
-#define _DPIO_TX_SWING_CTL4_A          0x8290
-#define _DPIO_TX_SWING_CTL4_B          0x8490
-#define DPIO_TX_SWING_CTL4(port) _PORT(port, _DPIO_TX_SWING_CTL4_A, \
-                                      _DPIO_TX_SWING_CTL4_B)
-
-#define _DPIO_TX_OCALINIT_0            0x8294
-#define _DPIO_TX_OCALINIT_1            0x8494
+#define VLV_PCS_DW1(ch) _PORT(ch, _VLV_PCS_DW1_CH0, _VLV_PCS_DW1_CH1)
+
+#define _VLV_PCS_DW8_CH0               0x8220
+#define _VLV_PCS_DW8_CH1               0x8420
+#define VLV_PCS_DW8(ch) _PORT(ch, _VLV_PCS_DW8_CH0, _VLV_PCS_DW8_CH1)
+
+#define _VLV_PCS01_DW8_CH0             0x0220
+#define _VLV_PCS23_DW8_CH0             0x0420
+#define _VLV_PCS01_DW8_CH1             0x2620
+#define _VLV_PCS23_DW8_CH1             0x2820
+#define VLV_PCS01_DW8(port) _PORT(port, _VLV_PCS01_DW8_CH0, _VLV_PCS01_DW8_CH1)
+#define VLV_PCS23_DW8(port) _PORT(port, _VLV_PCS23_DW8_CH0, _VLV_PCS23_DW8_CH1)
+
+#define _VLV_PCS_DW9_CH0               0x8224
+#define _VLV_PCS_DW9_CH1               0x8424
+#define        VLV_PCS_DW9(ch) _PORT(ch, _VLV_PCS_DW9_CH0, _VLV_PCS_DW9_CH1)
+
+#define _VLV_PCS_DW11_CH0              0x822c
+#define _VLV_PCS_DW11_CH1              0x842c
+#define VLV_PCS_DW11(ch) _PORT(ch, _VLV_PCS_DW11_CH0, _VLV_PCS_DW11_CH1)
+
+#define _VLV_PCS_DW12_CH0              0x8230
+#define _VLV_PCS_DW12_CH1              0x8430
+#define VLV_PCS_DW12(ch) _PORT(ch, _VLV_PCS_DW12_CH0, _VLV_PCS_DW12_CH1)
+
+#define _VLV_PCS_DW14_CH0              0x8238
+#define _VLV_PCS_DW14_CH1              0x8438
+#define        VLV_PCS_DW14(ch) _PORT(ch, _VLV_PCS_DW14_CH0, _VLV_PCS_DW14_CH1)
+
+#define _VLV_PCS_DW23_CH0              0x825c
+#define _VLV_PCS_DW23_CH1              0x845c
+#define VLV_PCS_DW23(ch) _PORT(ch, _VLV_PCS_DW23_CH0, _VLV_PCS_DW23_CH1)
+
+#define _VLV_TX_DW2_CH0                        0x8288
+#define _VLV_TX_DW2_CH1                        0x8488
+#define VLV_TX_DW2(ch) _PORT(ch, _VLV_TX_DW2_CH0, _VLV_TX_DW2_CH1)
+
+#define _VLV_TX_DW3_CH0                        0x828c
+#define _VLV_TX_DW3_CH1                        0x848c
+#define VLV_TX_DW3(ch) _PORT(ch, _VLV_TX_DW3_CH0, _VLV_TX_DW3_CH1)
+
+#define _VLV_TX_DW4_CH0                        0x8290
+#define _VLV_TX_DW4_CH1                        0x8490
+#define VLV_TX_DW4(ch) _PORT(ch, _VLV_TX_DW4_CH0, _VLV_TX_DW4_CH1)
+
+#define _VLV_TX3_DW4_CH0               0x690
+#define _VLV_TX3_DW4_CH1               0x2a90
+#define VLV_TX3_DW4(ch) _PORT(ch, _VLV_TX3_DW4_CH0, _VLV_TX3_DW4_CH1)
+
+#define _VLV_TX_DW5_CH0                        0x8294
+#define _VLV_TX_DW5_CH1                        0x8494
 #define   DPIO_TX_OCALINIT_EN          (1<<31)
-#define DPIO_TX_OCALINIT(port) _PORT(port, _DPIO_TX_OCALINIT_0, \
-                                    _DPIO_TX_OCALINIT_1)
-
-#define _DPIO_TX_CTL_0                 0x82ac
-#define _DPIO_TX_CTL_1                 0x84ac
-#define DPIO_TX_CTL(port) _PORT(port, _DPIO_TX_CTL_0, _DPIO_TX_CTL_1)
-
-#define _DPIO_TX_LANE_0                        0x82b8
-#define _DPIO_TX_LANE_1                        0x84b8
-#define DPIO_TX_LANE(port) _PORT(port, _DPIO_TX_LANE_0, _DPIO_TX_LANE_1)
-
-#define _DPIO_DATA_CHANNEL1            0x8220
-#define _DPIO_DATA_CHANNEL2            0x8420
-#define DPIO_DATA_CHANNEL(port) _PORT(port, _DPIO_DATA_CHANNEL1, _DPIO_DATA_CHANNEL2)
-
-#define _DPIO_PORT0_PCS0               0x0220
-#define _DPIO_PORT0_PCS1               0x0420
-#define _DPIO_PORT1_PCS2               0x2620
-#define _DPIO_PORT1_PCS3               0x2820
-#define DPIO_DATA_LANE_A(port) _PORT(port, _DPIO_PORT0_PCS0, _DPIO_PORT1_PCS2)
-#define DPIO_DATA_LANE_B(port) _PORT(port, _DPIO_PORT0_PCS1, _DPIO_PORT1_PCS3)
-#define DPIO_DATA_CHANNEL1              0x8220
-#define DPIO_DATA_CHANNEL2              0x8420
+#define VLV_TX_DW5(ch) _PORT(ch, _VLV_TX_DW5_CH0, _VLV_TX_DW5_CH1)
+
+#define _VLV_TX_DW11_CH0               0x82ac
+#define _VLV_TX_DW11_CH1               0x84ac
+#define VLV_TX_DW11(ch) _PORT(ch, _VLV_TX_DW11_CH0, _VLV_TX_DW11_CH1)
+
+#define _VLV_TX_DW14_CH0               0x82b8
+#define _VLV_TX_DW14_CH1               0x84b8
+#define VLV_TX_DW14(ch) _PORT(ch, _VLV_TX_DW14_CH0, _VLV_TX_DW14_CH1)
 
 /*
  * Fence registers
 #define HWSTAM         0x02098
 #define DMA_FADD_I8XX  0x020d0
 #define RING_BBSTATE(base)     ((base)+0x110)
+#define RING_BBADDR(base)      ((base)+0x140)
+#define RING_BBADDR_UDW(base)  ((base)+0x168) /* gen8+ */
 
 #define ERROR_GEN6     0x040a0
 #define GEN7_ERR_INT   0x44040
 #define   CM0_COLOR_EVICT_DISABLE (1<<3)
 #define   CM0_DEPTH_WRITE_DISABLE (1<<1)
 #define   CM0_RC_OP_FLUSH_DISABLE (1<<0)
-#define BB_ADDR                0x02140 /* 8 bytes */
 #define GFX_FLSH_CNTL  0x02170 /* 915+ only */
 #define GFX_FLSH_CNTL_GEN6     0x101008
 #define   GFX_FLSH_CNTL_EN     (1<<0)
 
 #define GEN7_FF_THREAD_MODE            0x20a0
 #define   GEN7_FF_SCHED_MASK           0x0077070
+#define   GEN8_FF_DS_REF_CNT_FFME      (1 << 19)
 #define   GEN7_FF_TS_SCHED_HS1         (0x5<<16)
 #define   GEN7_FF_TS_SCHED_HS0         (0x3<<16)
 #define   GEN7_FF_TS_SCHED_LOAD_BALANCE        (0x1<<16)
 #define   FBC_CTL_UNCOMPRESSIBLE (1<<14)
 #define   FBC_CTL_C3_IDLE      (1<<13)
 #define   FBC_CTL_STRIDE_SHIFT (5)
-#define   FBC_CTL_FENCENO      (1<<0)
+#define   FBC_CTL_FENCENO_SHIFT        (0)
 #define FBC_COMMAND            0x0320c
 #define   FBC_CMD_COMPRESS     (1<<0)
 #define FBC_STATUS             0x03210
 #define   FBC_STAT_COMPRESSING (1<<31)
 #define   FBC_STAT_COMPRESSED  (1<<30)
 #define   FBC_STAT_MODIFIED    (1<<29)
-#define   FBC_STAT_CURRENT_LINE        (1<<0)
+#define   FBC_STAT_CURRENT_LINE_SHIFT  (0)
 #define FBC_CONTROL2           0x03214
 #define   FBC_CTL_FENCE_DBL    (0<<4)
 #define   FBC_CTL_IDLE_IMM     (0<<2)
  * Please check the detailed lore in the commit message for for experimental
  * evidence.
  */
-#define   PORTD_HOTPLUG_LIVE_STATUS               (1 << 29)
-#define   PORTC_HOTPLUG_LIVE_STATUS               (1 << 28)
-#define   PORTB_HOTPLUG_LIVE_STATUS               (1 << 27)
+#define   PORTD_HOTPLUG_LIVE_STATUS_G4X                (1 << 29)
+#define   PORTC_HOTPLUG_LIVE_STATUS_G4X                (1 << 28)
+#define   PORTB_HOTPLUG_LIVE_STATUS_G4X                (1 << 27)
+/* VLV DP/HDMI bits again match Bspec */
+#define   PORTD_HOTPLUG_LIVE_STATUS_VLV                (1 << 27)
+#define   PORTC_HOTPLUG_LIVE_STATUS_VLV                (1 << 28)
+#define   PORTB_HOTPLUG_LIVE_STATUS_VLV                (1 << 29)
 #define   PORTD_HOTPLUG_INT_STATUS             (3 << 21)
 #define   PORTC_HOTPLUG_INT_STATUS             (3 << 19)
 #define   PORTB_HOTPLUG_INT_STATUS             (3 << 17)
 #define   CRT_HOTPLUG_MONITOR_COLOR            (3 << 8)
 #define   CRT_HOTPLUG_MONITOR_MONO             (2 << 8)
 #define   CRT_HOTPLUG_MONITOR_NONE             (0 << 8)
+#define   DP_AUX_CHANNEL_D_INT_STATUS_G4X      (1 << 6)
+#define   DP_AUX_CHANNEL_C_INT_STATUS_G4X      (1 << 5)
+#define   DP_AUX_CHANNEL_B_INT_STATUS_G4X      (1 << 4)
+#define   DP_AUX_CHANNEL_MASK_INT_STATUS_G4X   (7 << 4)
+
 /* SDVO is different across gen3/4 */
 #define   SDVOC_HOTPLUG_INT_STATUS_G4X         (1 << 3)
 #define   SDVOB_HOTPLUG_INT_STATUS_G4X         (1 << 2)
 /* the unit of memory self-refresh latency time is 0.5us */
 #define  ILK_SRLT_MASK         0x3f
 
-/* define the fifo size on Ironlake */
-#define ILK_DISPLAY_FIFO       128
-#define ILK_DISPLAY_MAXWM      64
-#define ILK_DISPLAY_DFTWM      8
-#define ILK_CURSOR_FIFO                32
-#define ILK_CURSOR_MAXWM       16
-#define ILK_CURSOR_DFTWM       8
-
-#define ILK_DISPLAY_SR_FIFO    512
-#define ILK_DISPLAY_MAX_SRWM   0x1ff
-#define ILK_DISPLAY_DFT_SRWM   0x3f
-#define ILK_CURSOR_SR_FIFO     64
-#define ILK_CURSOR_MAX_SRWM    0x3f
-#define ILK_CURSOR_DFT_SRWM    8
-
-#define ILK_FIFO_LINE_SIZE     64
-
-/* define the WM info on Sandybridge */
-#define SNB_DISPLAY_FIFO       128
-#define SNB_DISPLAY_MAXWM      0x7f    /* bit 16:22 */
-#define SNB_DISPLAY_DFTWM      8
-#define SNB_CURSOR_FIFO                32
-#define SNB_CURSOR_MAXWM       0x1f    /* bit 4:0 */
-#define SNB_CURSOR_DFTWM       8
-
-#define SNB_DISPLAY_SR_FIFO    512
-#define SNB_DISPLAY_MAX_SRWM   0x1ff   /* bit 16:8 */
-#define SNB_DISPLAY_DFT_SRWM   0x3f
-#define SNB_CURSOR_SR_FIFO     64
-#define SNB_CURSOR_MAX_SRWM    0x3f    /* bit 5:0 */
-#define SNB_CURSOR_DFT_SRWM    8
-
-#define SNB_FBC_MAX_SRWM       0xf     /* bit 23:20 */
-
-#define SNB_FIFO_LINE_SIZE     64
-
 
 /* the address where we get all kinds of latency value */
 #define SSKPD                  0x5d10
 #define DISP_BASEADDR_MASK     (0xfffff000)
 #define I915_LO_DISPBASE(val)  (val & ~DISP_BASEADDR_MASK)
 #define I915_HI_DISPBASE(val)  (val & DISP_BASEADDR_MASK)
-#define I915_MODIFY_DISPBASE(reg, gfx_addr) \
-               (I915_WRITE((reg), (gfx_addr) | I915_LO_DISPBASE(I915_READ(reg))))
 
 /* VBIOS flags */
 #define SWF00                  (dev_priv->info->display_mmio_offset + 0x71410)
 
 #define _SPACNTR               (VLV_DISPLAY_BASE + 0x72180)
 #define   SP_ENABLE                    (1<<31)
-#define   SP_GEAMMA_ENABLE             (1<<30)
+#define   SP_GAMMA_ENABLE              (1<<30)
 #define   SP_PIXFORMAT_MASK            (0xf<<26)
 #define   SP_FORMAT_YUV422             (0<<26)
 #define   SP_FORMAT_BGR565             (5<<26)
 #define DISP_ARB_CTL   0x45000
 #define  DISP_TILE_SURFACE_SWIZZLING   (1<<13)
 #define  DISP_FBC_WM_DIS               (1<<15)
+#define DISP_ARB_CTL2  0x45004
+#define  DISP_DATA_PARTITION_5_6       (1<<6)
 #define GEN7_MSG_CTL   0x45010
 #define  WAIT_FOR_PCH_RESET_ACK                (1<<1)
 #define  WAIT_FOR_PCH_FLR_ACK          (1<<0)
 #define GEN7_L3SQCREG4                         0xb034
 #define  L3SQ_URB_READ_CAM_MATCH_DISABLE       (1<<27)
 
+/* GEN8 chicken */
+#define HDC_CHICKEN0                           0x7300
+#define  HDC_FORCE_NON_COHERENT                        (1<<4)
+
 /* WaCatErrorRejectionIssue */
 #define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG         0x9030
 #define  GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB      (1<<11)
 #define  FORCEWAKE_ACK                         0x130090
 #define  VLV_GTLC_WAKE_CTRL                    0x130090
 #define  VLV_GTLC_PW_STATUS                    0x130094
+#define VLV_GTLC_PW_RENDER_STATUS_MASK         0x80
+#define VLV_GTLC_PW_MEDIA_STATUS_MASK          0x20
 #define  FORCEWAKE_MT                          0xa188 /* multi-threaded */
 #define   FORCEWAKE_KERNEL                     0x1
 #define   FORCEWAKE_USER                       0x2
 #define    FORCEWAKE_MT_ENABLE                 (1<<5)
 
 #define  GTFIFODBG                             0x120000
-#define    GT_FIFO_CPU_ERROR_MASK              7
+#define    GT_FIFO_SBDROPERR                   (1<<6)
+#define    GT_FIFO_BLOBDROPERR                 (1<<5)
+#define    GT_FIFO_SB_READ_ABORTERR            (1<<4)
+#define    GT_FIFO_DROPERR                     (1<<3)
 #define    GT_FIFO_OVFERR                      (1<<2)
 #define    GT_FIFO_IAWRERR                     (1<<1)
 #define    GT_FIFO_IARDERR                     (1<<0)
 
-#define  GT_FIFO_FREE_ENTRIES                  0x120008
+#define  GTFIFOCTL                             0x120008
+#define    GT_FIFO_FREE_ENTRIES_MASK           0x7f
 #define    GT_FIFO_NUM_RESERVED_ENTRIES                20
 
 #define  HSW_IDICR                             0x9008
 #define   GEN6_RC_CTL_RC6_ENABLE               (1<<18)
 #define   GEN6_RC_CTL_RC1e_ENABLE              (1<<20)
 #define   GEN6_RC_CTL_RC7_ENABLE               (1<<22)
+#define   VLV_RC_CTL_CTX_RST_PARALLEL          (1<<24)
 #define   GEN7_RC_CTL_TO_MODE                  (1<<28)
 #define   GEN6_RC_CTL_EI_MODE(x)               ((x)<<27)
 #define   GEN6_RC_CTL_HW_ENABLE                        (1<<31)
index 98790c7cccb1ab0902662e4cc541de9966f2058c..8150fdc08d497122c49959b9bbfaf0090259c7df 100644 (file)
@@ -192,7 +192,6 @@ static void i915_restore_vga(struct drm_device *dev)
 static void i915_save_display(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       unsigned long flags;
 
        /* Display arbitration control */
        if (INTEL_INFO(dev)->gen <= 4)
@@ -203,46 +202,27 @@ static void i915_save_display(struct drm_device *dev)
        if (!drm_core_check_feature(dev, DRIVER_MODESET))
                i915_save_display_reg(dev);
 
-       spin_lock_irqsave(&dev_priv->backlight.lock, flags);
-
        /* LVDS state */
        if (HAS_PCH_SPLIT(dev)) {
                dev_priv->regfile.savePP_CONTROL = I915_READ(PCH_PP_CONTROL);
-               dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1);
-               dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2);
-               dev_priv->regfile.saveBLC_CPU_PWM_CTL = I915_READ(BLC_PWM_CPU_CTL);
-               dev_priv->regfile.saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2);
                if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
                        dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS);
        } else if (IS_VALLEYVIEW(dev)) {
                dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
                dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
 
-               dev_priv->regfile.saveBLC_PWM_CTL =
-                       I915_READ(VLV_BLC_PWM_CTL(PIPE_A));
                dev_priv->regfile.saveBLC_HIST_CTL =
                        I915_READ(VLV_BLC_HIST_CTL(PIPE_A));
-               dev_priv->regfile.saveBLC_PWM_CTL2 =
-                       I915_READ(VLV_BLC_PWM_CTL2(PIPE_A));
-               dev_priv->regfile.saveBLC_PWM_CTL_B =
-                       I915_READ(VLV_BLC_PWM_CTL(PIPE_B));
                dev_priv->regfile.saveBLC_HIST_CTL_B =
                        I915_READ(VLV_BLC_HIST_CTL(PIPE_B));
-               dev_priv->regfile.saveBLC_PWM_CTL2_B =
-                       I915_READ(VLV_BLC_PWM_CTL2(PIPE_B));
        } else {
                dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
                dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
-               dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
                dev_priv->regfile.saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL);
-               if (INTEL_INFO(dev)->gen >= 4)
-                       dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
                if (IS_MOBILE(dev) && !IS_I830(dev))
                        dev_priv->regfile.saveLVDS = I915_READ(LVDS);
        }
 
-       spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
-
        if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev))
                dev_priv->regfile.savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
 
@@ -257,7 +237,7 @@ static void i915_save_display(struct drm_device *dev)
        }
 
        /* Only regfile.save FBC state on the platform that supports FBC */
-       if (I915_HAS_FBC(dev)) {
+       if (HAS_FBC(dev)) {
                if (HAS_PCH_SPLIT(dev)) {
                        dev_priv->regfile.saveDPFC_CB_BASE = I915_READ(ILK_DPFC_CB_BASE);
                } else if (IS_GM45(dev)) {
@@ -278,7 +258,6 @@ static void i915_restore_display(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 mask = 0xffffffff;
-       unsigned long flags;
 
        /* Display arbitration */
        if (INTEL_INFO(dev)->gen <= 4)
@@ -287,12 +266,6 @@ static void i915_restore_display(struct drm_device *dev)
        if (!drm_core_check_feature(dev, DRIVER_MODESET))
                i915_restore_display_reg(dev);
 
-       spin_lock_irqsave(&dev_priv->backlight.lock, flags);
-
-       /* LVDS state */
-       if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
-               I915_WRITE(BLC_PWM_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
-
        if (drm_core_check_feature(dev, DRIVER_MODESET))
                mask = ~LVDS_PORT_EN;
 
@@ -305,13 +278,6 @@ static void i915_restore_display(struct drm_device *dev)
                I915_WRITE(PFIT_CONTROL, dev_priv->regfile.savePFIT_CONTROL);
 
        if (HAS_PCH_SPLIT(dev)) {
-               I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->regfile.saveBLC_PWM_CTL);
-               I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
-               /* NOTE: BLC_PWM_CPU_CTL must be written after BLC_PWM_CPU_CTL2;
-                * otherwise we get blank eDP screen after S3 on some machines
-                */
-               I915_WRITE(BLC_PWM_CPU_CTL2, dev_priv->regfile.saveBLC_CPU_PWM_CTL2);
-               I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->regfile.saveBLC_CPU_PWM_CTL);
                I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS);
                I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
                I915_WRITE(PCH_PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR);
@@ -319,21 +285,12 @@ static void i915_restore_display(struct drm_device *dev)
                I915_WRITE(RSTDBYCTL,
                           dev_priv->regfile.saveMCHBAR_RENDER_STANDBY);
        } else if (IS_VALLEYVIEW(dev)) {
-               I915_WRITE(VLV_BLC_PWM_CTL(PIPE_A),
-                          dev_priv->regfile.saveBLC_PWM_CTL);
                I915_WRITE(VLV_BLC_HIST_CTL(PIPE_A),
                           dev_priv->regfile.saveBLC_HIST_CTL);
-               I915_WRITE(VLV_BLC_PWM_CTL2(PIPE_A),
-                          dev_priv->regfile.saveBLC_PWM_CTL2);
-               I915_WRITE(VLV_BLC_PWM_CTL(PIPE_B),
-                          dev_priv->regfile.saveBLC_PWM_CTL);
                I915_WRITE(VLV_BLC_HIST_CTL(PIPE_B),
                           dev_priv->regfile.saveBLC_HIST_CTL);
-               I915_WRITE(VLV_BLC_PWM_CTL2(PIPE_B),
-                          dev_priv->regfile.saveBLC_PWM_CTL2);
        } else {
                I915_WRITE(PFIT_PGM_RATIOS, dev_priv->regfile.savePFIT_PGM_RATIOS);
-               I915_WRITE(BLC_PWM_CTL, dev_priv->regfile.saveBLC_PWM_CTL);
                I915_WRITE(BLC_HIST_CTL, dev_priv->regfile.saveBLC_HIST_CTL);
                I915_WRITE(PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS);
                I915_WRITE(PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
@@ -341,11 +298,9 @@ static void i915_restore_display(struct drm_device *dev)
                I915_WRITE(PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
        }
 
-       spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
-
        /* only restore FBC info on the platform that supports FBC*/
        intel_disable_fbc(dev);
-       if (I915_HAS_FBC(dev)) {
+       if (HAS_FBC(dev)) {
                if (HAS_PCH_SPLIT(dev)) {
                        I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->regfile.saveDPFC_CB_BASE);
                } else if (IS_GM45(dev)) {
index cef38fd320a7c5c53c687ca5b67b0d1d3192d615..33bcae314bf86ea5f7a2deafb80625d0382634af 100644 (file)
@@ -40,10 +40,13 @@ static u32 calc_residency(struct drm_device *dev, const u32 reg)
        struct drm_i915_private *dev_priv = dev->dev_private;
        u64 raw_time; /* 32b value may overflow during fixed point math */
        u64 units = 128ULL, div = 100000ULL, bias = 100ULL;
+       u32 ret;
 
        if (!intel_enable_rc6(dev))
                return 0;
 
+       intel_runtime_pm_get(dev_priv);
+
        /* On VLV, residency time is in CZ units rather than 1.28us */
        if (IS_VALLEYVIEW(dev)) {
                u32 clkctl2;
@@ -52,7 +55,8 @@ static u32 calc_residency(struct drm_device *dev, const u32 reg)
                        CLK_CTL2_CZCOUNT_30NS_SHIFT;
                if (!clkctl2) {
                        WARN(!clkctl2, "bogus CZ count value");
-                       return 0;
+                       ret = 0;
+                       goto out;
                }
                units = DIV_ROUND_UP_ULL(30ULL * bias, (u64)clkctl2);
                if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
@@ -62,7 +66,11 @@ static u32 calc_residency(struct drm_device *dev, const u32 reg)
        }
 
        raw_time = I915_READ(reg) * units;
-       return DIV_ROUND_UP_ULL(raw_time, div);
+       ret = DIV_ROUND_UP_ULL(raw_time, div);
+
+out:
+       intel_runtime_pm_put(dev_priv);
+       return ret;
 }
 
 static ssize_t
@@ -183,13 +191,13 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
        int slice = (int)(uintptr_t)attr->private;
        int ret;
 
+       if (!HAS_HW_CONTEXTS(drm_dev))
+               return -ENXIO;
+
        ret = l3_access_valid(drm_dev, offset);
        if (ret)
                return ret;
 
-       if (dev_priv->hw_contexts_disabled)
-               return -ENXIO;
-
        ret = i915_mutex_lock_interruptible(drm_dev);
        if (ret)
                return ret;
@@ -259,7 +267,7 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
        if (IS_VALLEYVIEW(dev_priv->dev)) {
                u32 freq;
                freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
-               ret = vlv_gpu_freq(dev_priv->mem_freq, (freq >> 8) & 0xff);
+               ret = vlv_gpu_freq(dev_priv, (freq >> 8) & 0xff);
        } else {
                ret = dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER;
        }
@@ -276,8 +284,7 @@ static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
        struct drm_i915_private *dev_priv = dev->dev_private;
 
        return snprintf(buf, PAGE_SIZE, "%d\n",
-                       vlv_gpu_freq(dev_priv->mem_freq,
-                                    dev_priv->rps.rpe_delay));
+                       vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay));
 }
 
 static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
@@ -291,7 +298,7 @@ static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute
 
        mutex_lock(&dev_priv->rps.hw_lock);
        if (IS_VALLEYVIEW(dev_priv->dev))
-               ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.max_delay);
+               ret = vlv_gpu_freq(dev_priv, dev_priv->rps.max_delay);
        else
                ret = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
        mutex_unlock(&dev_priv->rps.hw_lock);
@@ -318,7 +325,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
        mutex_lock(&dev_priv->rps.hw_lock);
 
        if (IS_VALLEYVIEW(dev_priv->dev)) {
-               val = vlv_freq_opcode(dev_priv->mem_freq, val);
+               val = vlv_freq_opcode(dev_priv, val);
 
                hw_max = valleyview_rps_max_freq(dev_priv);
                hw_min = valleyview_rps_min_freq(dev_priv);
@@ -342,15 +349,15 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
                DRM_DEBUG("User requested overclocking to %d\n",
                          val * GT_FREQUENCY_MULTIPLIER);
 
+       dev_priv->rps.max_delay = val;
+
        if (dev_priv->rps.cur_delay > val) {
-               if (IS_VALLEYVIEW(dev_priv->dev))
-                       valleyview_set_rps(dev_priv->dev, val);
+               if (IS_VALLEYVIEW(dev))
+                       valleyview_set_rps(dev, val);
                else
-                       gen6_set_rps(dev_priv->dev, val);
+                       gen6_set_rps(dev, val);
        }
 
-       dev_priv->rps.max_delay = val;
-
        mutex_unlock(&dev_priv->rps.hw_lock);
 
        return count;
@@ -367,7 +374,7 @@ static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute
 
        mutex_lock(&dev_priv->rps.hw_lock);
        if (IS_VALLEYVIEW(dev_priv->dev))
-               ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.min_delay);
+               ret = vlv_gpu_freq(dev_priv, dev_priv->rps.min_delay);
        else
                ret = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
        mutex_unlock(&dev_priv->rps.hw_lock);
@@ -394,7 +401,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
        mutex_lock(&dev_priv->rps.hw_lock);
 
        if (IS_VALLEYVIEW(dev)) {
-               val = vlv_freq_opcode(dev_priv->mem_freq, val);
+               val = vlv_freq_opcode(dev_priv, val);
 
                hw_max = valleyview_rps_max_freq(dev_priv);
                hw_min = valleyview_rps_min_freq(dev_priv);
@@ -411,15 +418,15 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
                return -EINVAL;
        }
 
+       dev_priv->rps.min_delay = val;
+
        if (dev_priv->rps.cur_delay < val) {
                if (IS_VALLEYVIEW(dev))
                        valleyview_set_rps(dev, val);
                else
-                       gen6_set_rps(dev_priv->dev, val);
+                       gen6_set_rps(dev, val);
        }
 
-       dev_priv->rps.min_delay = val;
-
        mutex_unlock(&dev_priv->rps.hw_lock);
 
        return count;
@@ -449,7 +456,9 @@ static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr
        ret = mutex_lock_interruptible(&dev->struct_mutex);
        if (ret)
                return ret;
+       intel_runtime_pm_get(dev_priv);
        rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+       intel_runtime_pm_put(dev_priv);
        mutex_unlock(&dev->struct_mutex);
 
        if (attr == &dev_attr_gt_RP0_freq_mhz) {
index 967da4772c449a2be05d16bce648fff9bd19a63e..caa18e855815eaf04a39ecb6eedcc572cf4d6290 100644 (file)
@@ -270,6 +270,18 @@ void i915_save_display_reg(struct drm_device *dev)
        }
        /* FIXME: regfile.save TV & SDVO state */
 
+       /* Backlight */
+       if (HAS_PCH_SPLIT(dev)) {
+               dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1);
+               dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2);
+               dev_priv->regfile.saveBLC_CPU_PWM_CTL = I915_READ(BLC_PWM_CPU_CTL);
+               dev_priv->regfile.saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2);
+       } else {
+               dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
+               if (INTEL_INFO(dev)->gen >= 4)
+                       dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
+       }
+
        return;
 }
 
@@ -280,6 +292,21 @@ void i915_restore_display_reg(struct drm_device *dev)
        int dpll_b_reg, fpb0_reg, fpb1_reg;
        int i;
 
+       /* Backlight */
+       if (HAS_PCH_SPLIT(dev)) {
+               I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->regfile.saveBLC_PWM_CTL);
+               I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
+               /* NOTE: BLC_PWM_CPU_CTL must be written after BLC_PWM_CPU_CTL2;
+                * otherwise we get blank eDP screen after S3 on some machines
+                */
+               I915_WRITE(BLC_PWM_CPU_CTL2, dev_priv->regfile.saveBLC_CPU_PWM_CTL2);
+               I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->regfile.saveBLC_CPU_PWM_CTL);
+       } else {
+               if (INTEL_INFO(dev)->gen >= 4)
+                       I915_WRITE(BLC_PWM_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
+               I915_WRITE(BLC_PWM_CTL, dev_priv->regfile.saveBLC_PWM_CTL);
+       }
+
        /* Display port ratios (must be done before clock is set) */
        if (SUPPORTS_INTEGRATED_DP(dev)) {
                I915_WRITE(_PIPEA_DATA_M_G4X, dev_priv->regfile.savePIPEA_GMCH_DATA_M);
index e4fba39631a5ae2cd5901f85c73738406ef7cdc9..f22041973f3a0a426e29424cb60ba68e042c3ed8 100644 (file)
@@ -281,6 +281,34 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
        }
 }
 
+static void
+parse_lfp_backlight(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
+{
+       const struct bdb_lfp_backlight_data *backlight_data;
+       const struct bdb_lfp_backlight_data_entry *entry;
+
+       backlight_data = find_section(bdb, BDB_LVDS_BACKLIGHT);
+       if (!backlight_data)
+               return;
+
+       if (backlight_data->entry_size != sizeof(backlight_data->data[0])) {
+               DRM_DEBUG_KMS("Unsupported backlight data entry size %u\n",
+                             backlight_data->entry_size);
+               return;
+       }
+
+       entry = &backlight_data->data[panel_type];
+
+       dev_priv->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;
+       dev_priv->vbt.backlight.active_low_pwm = entry->active_low_pwm;
+       DRM_DEBUG_KMS("VBT backlight PWM modulation frequency %u Hz, "
+                     "active %s, min brightness %u, level %u\n",
+                     dev_priv->vbt.backlight.pwm_freq_hz,
+                     dev_priv->vbt.backlight.active_low_pwm ? "low" : "high",
+                     entry->min_brightness,
+                     backlight_data->level[panel_type]);
+}
+
 /* Try to find sdvo panel data */
 static void
 parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
@@ -327,12 +355,12 @@ static int intel_bios_ssc_frequency(struct drm_device *dev,
 {
        switch (INTEL_INFO(dev)->gen) {
        case 2:
-               return alternate ? 66 : 48;
+               return alternate ? 66667 : 48000;
        case 3:
        case 4:
-               return alternate ? 100 : 96;
+               return alternate ? 100000 : 96000;
        default:
-               return alternate ? 100 : 120;
+               return alternate ? 100000 : 120000;
        }
 }
 
@@ -796,7 +824,7 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
         */
        dev_priv->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(dev,
                        !HAS_PCH_SPLIT(dev));
-       DRM_DEBUG_KMS("Set default to SSC at %dMHz\n", dev_priv->vbt.lvds_ssc_freq);
+       DRM_DEBUG_KMS("Set default to SSC at %d kHz\n", dev_priv->vbt.lvds_ssc_freq);
 
        for (port = PORT_A; port < I915_MAX_PORTS; port++) {
                struct ddi_vbt_port_info *info =
@@ -894,6 +922,7 @@ intel_parse_bios(struct drm_device *dev)
        parse_general_features(dev_priv, bdb);
        parse_general_definitions(dev_priv, bdb);
        parse_lfp_panel_data(dev_priv, bdb);
+       parse_lfp_backlight(dev_priv, bdb);
        parse_sdvo_panel_data(dev_priv, bdb);
        parse_sdvo_device_mapping(dev_priv, bdb);
        parse_device_mapping(dev_priv, bdb);
index f580a2b0ddd30f5d4338c7ab61414b6fe8bc62fc..282de5e9f39dee6566bb6d220f378864df86e8b9 100644 (file)
@@ -39,7 +39,7 @@ struct vbt_header {
        u8 reserved0;
        u32 bdb_offset;                 /**< from beginning of VBT */
        u32 aim_offset[4];              /**< from beginning of VBT */
-} __attribute__((packed));
+} __packed;
 
 struct bdb_header {
        u8 signature[16];               /**< Always 'BIOS_DATA_BLOCK' */
@@ -65,7 +65,7 @@ struct vbios_data {
        u8 rsvd4; /* popup memory size */
        u8 resize_pci_bios;
        u8 rsvd5; /* is crt already on ddc2 */
-} __attribute__((packed));
+} __packed;
 
 /*
  * There are several types of BIOS data blocks (BDBs), each block has
@@ -142,7 +142,7 @@ struct bdb_general_features {
        u8 dp_ssc_enb:1;        /* PCH attached eDP supports SSC */
        u8 dp_ssc_freq:1;       /* SSC freq for PCH attached eDP */
        u8 rsvd11:3; /* finish byte */
-} __attribute__((packed));
+} __packed;
 
 /* pre-915 */
 #define GPIO_PIN_DVI_LVDS      0x03 /* "DVI/LVDS DDC GPIO pins" */
@@ -225,7 +225,7 @@ struct old_child_dev_config {
        u8  dvo2_wiring;
        u16 extended_type;
        u8  dvo_function;
-} __attribute__((packed));
+} __packed;
 
 /* This one contains field offsets that are known to be common for all BDB
  * versions. Notice that the meaning of the contents contents may still change,
@@ -238,7 +238,7 @@ struct common_child_dev_config {
        u8 not_common2[2];
        u8 ddc_pin;
        u16 edid_ptr;
-} __attribute__((packed));
+} __packed;
 
 /* This field changes depending on the BDB version, so the most reliable way to
  * read it is by checking the BDB version and reading the raw pointer. */
@@ -279,7 +279,7 @@ struct bdb_general_definitions {
         *           sizeof(child_device_config);
         */
        union child_device_config devices[0];
-} __attribute__((packed));
+} __packed;
 
 struct bdb_lvds_options {
        u8 panel_type;
@@ -293,7 +293,7 @@ struct bdb_lvds_options {
        u8 lvds_edid:1;
        u8 rsvd2:1;
        u8 rsvd4;
-} __attribute__((packed));
+} __packed;
 
 /* LFP pointer table contains entries to the struct below */
 struct bdb_lvds_lfp_data_ptr {
@@ -303,12 +303,12 @@ struct bdb_lvds_lfp_data_ptr {
        u8 dvo_table_size;
        u16 panel_pnp_id_offset;
        u8 pnp_table_size;
-} __attribute__((packed));
+} __packed;
 
 struct bdb_lvds_lfp_data_ptrs {
        u8 lvds_entries; /* followed by one or more lvds_data_ptr structs */
        struct bdb_lvds_lfp_data_ptr ptr[16];
-} __attribute__((packed));
+} __packed;
 
 /* LFP data has 3 blocks per entry */
 struct lvds_fp_timing {
@@ -325,7 +325,7 @@ struct lvds_fp_timing {
        u32 pfit_reg;
        u32 pfit_reg_val;
        u16 terminator;
-} __attribute__((packed));
+} __packed;
 
 struct lvds_dvo_timing {
        u16 clock;              /**< In 10khz */
@@ -353,7 +353,7 @@ struct lvds_dvo_timing {
        u8 vsync_positive:1;
        u8 hsync_positive:1;
        u8 rsvd2:1;
-} __attribute__((packed));
+} __packed;
 
 struct lvds_pnp_id {
        u16 mfg_name;
@@ -361,17 +361,33 @@ struct lvds_pnp_id {
        u32 serial;
        u8 mfg_week;
        u8 mfg_year;
-} __attribute__((packed));
+} __packed;
 
 struct bdb_lvds_lfp_data_entry {
        struct lvds_fp_timing fp_timing;
        struct lvds_dvo_timing dvo_timing;
        struct lvds_pnp_id pnp_id;
-} __attribute__((packed));
+} __packed;
 
 struct bdb_lvds_lfp_data {
        struct bdb_lvds_lfp_data_entry data[16];
-} __attribute__((packed));
+} __packed;
+
+struct bdb_lfp_backlight_data_entry {
+       u8 type:2;
+       u8 active_low_pwm:1;
+       u8 obsolete1:5;
+       u16 pwm_freq_hz;
+       u8 min_brightness;
+       u8 obsolete2;
+       u8 obsolete3;
+} __packed;
+
+struct bdb_lfp_backlight_data {
+       u8 entry_size;
+       struct bdb_lfp_backlight_data_entry data[16];
+       u8 level[16];
+} __packed;
 
 struct aimdb_header {
        char signature[16];
@@ -379,12 +395,12 @@ struct aimdb_header {
        u16 aimdb_version;
        u16 aimdb_header_size;
        u16 aimdb_size;
-} __attribute__((packed));
+} __packed;
 
 struct aimdb_block {
        u8 aimdb_id;
        u16 aimdb_size;
-} __attribute__((packed));
+} __packed;
 
 struct vch_panel_data {
        u16 fp_timing_offset;
@@ -395,12 +411,12 @@ struct vch_panel_data {
        u8 text_fitting_size;
        u16 graphics_fitting_offset;
        u8 graphics_fitting_size;
-} __attribute__((packed));
+} __packed;
 
 struct vch_bdb_22 {
        struct aimdb_block aimdb_block;
        struct vch_panel_data panels[16];
-} __attribute__((packed));
+} __packed;
 
 struct bdb_sdvo_lvds_options {
        u8 panel_backlight;
@@ -416,7 +432,7 @@ struct bdb_sdvo_lvds_options {
        u8 panel_misc_bits_2;
        u8 panel_misc_bits_3;
        u8 panel_misc_bits_4;
-} __attribute__((packed));
+} __packed;
 
 
 #define BDB_DRIVER_FEATURE_NO_LVDS             0
@@ -462,7 +478,7 @@ struct bdb_driver_features {
 
        u8 hdmi_termination;
        u8 custom_vbt_version;
-} __attribute__((packed));
+} __packed;
 
 #define EDP_18BPP      0
 #define EDP_24BPP      1
@@ -487,14 +503,14 @@ struct edp_power_seq {
        u16 t9;
        u16 t10;
        u16 t11_t12;
-} __attribute__ ((packed));
+} __packed;
 
 struct edp_link_params {
        u8 rate:4;
        u8 lanes:4;
        u8 preemphasis:4;
        u8 vswing:4;
-} __attribute__ ((packed));
+} __packed;
 
 struct bdb_edp {
        struct edp_power_seq power_seqs[16];
@@ -505,7 +521,7 @@ struct bdb_edp {
        /* ith bit indicates enabled/disabled for (i+1)th panel */
        u16 edp_s3d_feature;
        u16 edp_t3_optimization;
-} __attribute__ ((packed));
+} __packed;
 
 void intel_setup_bios(struct drm_device *dev);
 int intel_parse_bios(struct drm_device *dev);
@@ -733,6 +749,6 @@ struct bdb_mipi {
        u32 hl_switch_cnt;
        u32 lp_byte_clk;
        u32 clk_lane_switch_cnt;
-} __attribute__((packed));
+} __packed;
 
 #endif /* _I830_BIOS_H_ */
index b5b1b9b23adf1b24559e7ffdce658ba37e6bbbab..e2e39e65f10954b8076b3ed01b565f0cb173bb0d 100644 (file)
@@ -222,8 +222,9 @@ static void intel_crt_dpms(struct drm_connector *connector, int mode)
        intel_modeset_check_state(connector->dev);
 }
 
-static int intel_crt_mode_valid(struct drm_connector *connector,
-                               struct drm_display_mode *mode)
+static enum drm_mode_status
+intel_crt_mode_valid(struct drm_connector *connector,
+                    struct drm_display_mode *mode)
 {
        struct drm_device *dev = connector->dev;
 
index b69dc3e66c165ac77782943450d98602ef75c205..e06b9e017d6ba918b87557de301df47076852434 100644 (file)
@@ -73,7 +73,7 @@ static const u32 hsw_ddi_translations_hdmi[] = {
 };
 
 static const u32 bdw_ddi_translations_edp[] = {
-       0x00FFFFFF, 0x00000012,         /* DP parameters */
+       0x00FFFFFF, 0x00000012,         /* eDP parameters */
        0x00EBAFFF, 0x00020011,
        0x00C71FFF, 0x0006000F,
        0x00FFFFFF, 0x00020011,
@@ -696,25 +696,25 @@ intel_ddi_calculate_wrpll(int clock /* in Hz */,
        *n2_out = best.n2;
        *p_out = best.p;
        *r2_out = best.r2;
-
-       DRM_DEBUG_KMS("WRPLL: %dHz refresh rate with p=%d, n2=%d r2=%d\n",
-                     clock, *p_out, *n2_out, *r2_out);
 }
 
-bool intel_ddi_pll_mode_set(struct drm_crtc *crtc)
+/*
+ * Tries to find a PLL for the CRTC. If it finds, it increases the refcount and
+ * stores it in intel_crtc->ddi_pll_sel, so other mode sets won't be able to
+ * steal the selected PLL. You need to call intel_ddi_pll_enable to actually
+ * enable the PLL.
+ */
+bool intel_ddi_pll_select(struct intel_crtc *intel_crtc)
 {
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       struct drm_crtc *crtc = &intel_crtc->base;
        struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
        struct drm_encoder *encoder = &intel_encoder->base;
        struct drm_i915_private *dev_priv = crtc->dev->dev_private;
        struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
        int type = intel_encoder->type;
        enum pipe pipe = intel_crtc->pipe;
-       uint32_t reg, val;
        int clock = intel_crtc->config.port_clock;
 
-       /* TODO: reuse PLLs when possible (compare values) */
-
        intel_ddi_put_crtc_pll(crtc);
 
        if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
@@ -736,66 +736,145 @@ bool intel_ddi_pll_mode_set(struct drm_crtc *crtc)
                        return false;
                }
 
-               /* We don't need to turn any PLL on because we'll use LCPLL. */
-               return true;
-
        } else if (type == INTEL_OUTPUT_HDMI) {
+               uint32_t reg, val;
                unsigned p, n2, r2;
 
-               if (plls->wrpll1_refcount == 0) {
+               intel_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
+
+               val = WRPLL_PLL_ENABLE | WRPLL_PLL_SELECT_LCPLL_2700 |
+                     WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
+                     WRPLL_DIVIDER_POST(p);
+
+               if (val == I915_READ(WRPLL_CTL1)) {
+                       DRM_DEBUG_KMS("Reusing WRPLL 1 on pipe %c\n",
+                                     pipe_name(pipe));
+                       reg = WRPLL_CTL1;
+               } else if (val == I915_READ(WRPLL_CTL2)) {
+                       DRM_DEBUG_KMS("Reusing WRPLL 2 on pipe %c\n",
+                                     pipe_name(pipe));
+                       reg = WRPLL_CTL2;
+               } else if (plls->wrpll1_refcount == 0) {
                        DRM_DEBUG_KMS("Using WRPLL 1 on pipe %c\n",
                                      pipe_name(pipe));
-                       plls->wrpll1_refcount++;
                        reg = WRPLL_CTL1;
-                       intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL1;
                } else if (plls->wrpll2_refcount == 0) {
                        DRM_DEBUG_KMS("Using WRPLL 2 on pipe %c\n",
                                      pipe_name(pipe));
-                       plls->wrpll2_refcount++;
                        reg = WRPLL_CTL2;
-                       intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL2;
                } else {
                        DRM_ERROR("No WRPLLs available!\n");
                        return false;
                }
 
-               WARN(I915_READ(reg) & WRPLL_PLL_ENABLE,
-                    "WRPLL already enabled\n");
+               DRM_DEBUG_KMS("WRPLL: %dKHz refresh rate with p=%d, n2=%d r2=%d\n",
+                             clock, p, n2, r2);
 
-               intel_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
-
-               val = WRPLL_PLL_ENABLE | WRPLL_PLL_SELECT_LCPLL_2700 |
-                     WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
-                     WRPLL_DIVIDER_POST(p);
+               if (reg == WRPLL_CTL1) {
+                       plls->wrpll1_refcount++;
+                       intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL1;
+               } else {
+                       plls->wrpll2_refcount++;
+                       intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL2;
+               }
 
        } else if (type == INTEL_OUTPUT_ANALOG) {
                if (plls->spll_refcount == 0) {
                        DRM_DEBUG_KMS("Using SPLL on pipe %c\n",
                                      pipe_name(pipe));
                        plls->spll_refcount++;
-                       reg = SPLL_CTL;
                        intel_crtc->ddi_pll_sel = PORT_CLK_SEL_SPLL;
                } else {
                        DRM_ERROR("SPLL already in use\n");
                        return false;
                }
 
-               WARN(I915_READ(reg) & SPLL_PLL_ENABLE,
-                    "SPLL already enabled\n");
-
-               val = SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC;
-
        } else {
                WARN(1, "Invalid DDI encoder type %d\n", type);
                return false;
        }
 
-       I915_WRITE(reg, val);
-       udelay(20);
-
        return true;
 }
 
+/*
+ * To be called after intel_ddi_pll_select(). That one selects the PLL to be
+ * used, this one actually enables the PLL.
+ */
+void intel_ddi_pll_enable(struct intel_crtc *crtc)
+{
+       struct drm_device *dev = crtc->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
+       int clock = crtc->config.port_clock;
+       uint32_t reg, cur_val, new_val;
+       int refcount;
+       const char *pll_name;
+       uint32_t enable_bit = (1 << 31);
+       unsigned int p, n2, r2;
+
+       BUILD_BUG_ON(enable_bit != SPLL_PLL_ENABLE);
+       BUILD_BUG_ON(enable_bit != WRPLL_PLL_ENABLE);
+
+       switch (crtc->ddi_pll_sel) {
+       case PORT_CLK_SEL_LCPLL_2700:
+       case PORT_CLK_SEL_LCPLL_1350:
+       case PORT_CLK_SEL_LCPLL_810:
+               /*
+                * LCPLL should always be enabled at this point of the mode set
+                * sequence, so nothing to do.
+                */
+               return;
+
+       case PORT_CLK_SEL_SPLL:
+               pll_name = "SPLL";
+               reg = SPLL_CTL;
+               refcount = plls->spll_refcount;
+               new_val = SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz |
+                         SPLL_PLL_SSC;
+               break;
+
+       case PORT_CLK_SEL_WRPLL1:
+       case PORT_CLK_SEL_WRPLL2:
+               if (crtc->ddi_pll_sel == PORT_CLK_SEL_WRPLL1) {
+                       pll_name = "WRPLL1";
+                       reg = WRPLL_CTL1;
+                       refcount = plls->wrpll1_refcount;
+               } else {
+                       pll_name = "WRPLL2";
+                       reg = WRPLL_CTL2;
+                       refcount = plls->wrpll2_refcount;
+               }
+
+               intel_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
+
+               new_val = WRPLL_PLL_ENABLE | WRPLL_PLL_SELECT_LCPLL_2700 |
+                         WRPLL_DIVIDER_REFERENCE(r2) |
+                         WRPLL_DIVIDER_FEEDBACK(n2) | WRPLL_DIVIDER_POST(p);
+
+               break;
+
+       case PORT_CLK_SEL_NONE:
+               WARN(1, "Bad selected pll: PORT_CLK_SEL_NONE\n");
+               return;
+       default:
+               WARN(1, "Bad selected pll: 0x%08x\n", crtc->ddi_pll_sel);
+               return;
+       }
+
+       cur_val = I915_READ(reg);
+
+       WARN(refcount < 1, "Bad %s refcount: %d\n", pll_name, refcount);
+       if (refcount == 1) {
+               WARN(cur_val & enable_bit, "%s already enabled\n", pll_name);
+               I915_WRITE(reg, new_val);
+               POSTING_READ(reg);
+               udelay(20);
+       } else {
+               WARN((cur_val & enable_bit) == 0, "%s disabled\n", pll_name);
+       }
+}
+
 void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
 {
        struct drm_i915_private *dev_priv = crtc->dev->dev_private;
@@ -1121,9 +1200,7 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
 
        if (type == INTEL_OUTPUT_EDP) {
                struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-               ironlake_edp_panel_vdd_on(intel_dp);
                ironlake_edp_panel_on(intel_dp);
-               ironlake_edp_panel_vdd_off(intel_dp, true);
        }
 
        WARN_ON(intel_crtc->ddi_pll_sel == PORT_CLK_SEL_NONE);
@@ -1166,7 +1243,6 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
 
        if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
                struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-               ironlake_edp_panel_vdd_on(intel_dp);
                intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
                ironlake_edp_panel_off(intel_dp);
        }
index 2bde35d34eb99d121aee6a51bfff5e2c8f75524a..9fa24347963a38d360e365cfa20bf7cefed8beaa 100644 (file)
@@ -90,8 +90,8 @@ intel_fdi_link_freq(struct drm_device *dev)
 
 static const intel_limit_t intel_limits_i8xx_dac = {
        .dot = { .min = 25000, .max = 350000 },
-       .vco = { .min = 930000, .max = 1400000 },
-       .n = { .min = 3, .max = 16 },
+       .vco = { .min = 908000, .max = 1512000 },
+       .n = { .min = 2, .max = 16 },
        .m = { .min = 96, .max = 140 },
        .m1 = { .min = 18, .max = 26 },
        .m2 = { .min = 6, .max = 16 },
@@ -103,8 +103,8 @@ static const intel_limit_t intel_limits_i8xx_dac = {
 
 static const intel_limit_t intel_limits_i8xx_dvo = {
        .dot = { .min = 25000, .max = 350000 },
-       .vco = { .min = 930000, .max = 1400000 },
-       .n = { .min = 3, .max = 16 },
+       .vco = { .min = 908000, .max = 1512000 },
+       .n = { .min = 2, .max = 16 },
        .m = { .min = 96, .max = 140 },
        .m1 = { .min = 18, .max = 26 },
        .m2 = { .min = 6, .max = 16 },
@@ -116,8 +116,8 @@ static const intel_limit_t intel_limits_i8xx_dvo = {
 
 static const intel_limit_t intel_limits_i8xx_lvds = {
        .dot = { .min = 25000, .max = 350000 },
-       .vco = { .min = 930000, .max = 1400000 },
-       .n = { .min = 3, .max = 16 },
+       .vco = { .min = 908000, .max = 1512000 },
+       .n = { .min = 2, .max = 16 },
        .m = { .min = 96, .max = 140 },
        .m1 = { .min = 18, .max = 26 },
        .m2 = { .min = 6, .max = 16 },
@@ -329,6 +329,8 @@ static void vlv_clock(int refclk, intel_clock_t *clock)
 {
        clock->m = clock->m1 * clock->m2;
        clock->p = clock->p1 * clock->p2;
+       if (WARN_ON(clock->n == 0 || clock->p == 0))
+               return;
        clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
        clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
 }
@@ -430,6 +432,8 @@ static void pineview_clock(int refclk, intel_clock_t *clock)
 {
        clock->m = clock->m2 + 2;
        clock->p = clock->p1 * clock->p2;
+       if (WARN_ON(clock->n == 0 || clock->p == 0))
+               return;
        clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
        clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
 }
@@ -443,6 +447,8 @@ static void i9xx_clock(int refclk, intel_clock_t *clock)
 {
        clock->m = i9xx_dpll_compute_m(clock);
        clock->p = clock->p1 * clock->p2;
+       if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
+               return;
        clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
        clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
 }
@@ -748,10 +754,10 @@ enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
        return intel_crtc->config.cpu_transcoder;
 }
 
-static void ironlake_wait_for_vblank(struct drm_device *dev, int pipe)
+static void g4x_wait_for_vblank(struct drm_device *dev, int pipe)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       u32 frame, frame_reg = PIPEFRAME(pipe);
+       u32 frame, frame_reg = PIPE_FRMCOUNT_GM45(pipe);
 
        frame = I915_READ(frame_reg);
 
@@ -772,8 +778,8 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe)
        struct drm_i915_private *dev_priv = dev->dev_private;
        int pipestat_reg = PIPESTAT(pipe);
 
-       if (INTEL_INFO(dev)->gen >= 5) {
-               ironlake_wait_for_vblank(dev, pipe);
+       if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
+               g4x_wait_for_vblank(dev, pipe);
                return;
        }
 
@@ -1205,15 +1211,12 @@ static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
        }
 }
 
-static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
+static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
 {
        u32 val;
        bool enabled;
 
-       if (HAS_PCH_LPT(dev_priv->dev)) {
-               DRM_DEBUG_DRIVER("LPT does not has PCH refclk, skipping check\n");
-               return;
-       }
+       WARN_ON(!(HAS_PCH_IBX(dev_priv->dev) || HAS_PCH_CPT(dev_priv->dev)));
 
        val = I915_READ(PCH_DREF_CONTROL);
        enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
@@ -1361,6 +1364,24 @@ static void intel_init_dpio(struct drm_device *dev)
        if (!IS_VALLEYVIEW(dev))
                return;
 
+       DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
+}
+
+static void intel_reset_dpio(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       if (!IS_VALLEYVIEW(dev))
+               return;
+
+       /*
+        * Enable the CRI clock source so we can get at the display and the
+        * reference clock for VGA hotplug / manual detection.
+        */
+       I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
+                  DPLL_REFA_CLK_ENABLE_VLV |
+                  DPLL_INTEGRATED_CRI_CLK_VLV);
+
        /*
         * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
         *  6.  De-assert cmn_reset/side_reset. Same as VLV X0.
@@ -1487,25 +1508,35 @@ static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
        /* Make sure the pipe isn't still relying on us */
        assert_pipe_disabled(dev_priv, pipe);
 
-       /* Leave integrated clock source enabled */
+       /*
+        * Leave integrated clock source and reference clock enabled for pipe B.
+        * The latter is needed for VGA hotplug / manual detection.
+        */
        if (pipe == PIPE_B)
-               val = DPLL_INTEGRATED_CRI_CLK_VLV;
+               val = DPLL_INTEGRATED_CRI_CLK_VLV | DPLL_REFA_CLK_ENABLE_VLV;
        I915_WRITE(DPLL(pipe), val);
        POSTING_READ(DPLL(pipe));
 }
 
-void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port)
+void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
+               struct intel_digital_port *dport)
 {
        u32 port_mask;
 
-       if (!port)
+       switch (dport->port) {
+       case PORT_B:
                port_mask = DPLL_PORTB_READY_MASK;
-       else
+               break;
+       case PORT_C:
                port_mask = DPLL_PORTC_READY_MASK;
+               break;
+       default:
+               BUG();
+       }
 
        if (wait_for((I915_READ(DPLL(0)) & port_mask) == 0, 1000))
                WARN(1, "timed out waiting for port %c ready: 0x%08x\n",
-                    'B' + port, I915_READ(DPLL(0)));
+                    port_name(dport->port), I915_READ(DPLL(0)));
 }
 
 /**
@@ -2083,8 +2114,8 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
                      fb->pitches[0]);
        I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
        if (INTEL_INFO(dev)->gen >= 4) {
-               I915_MODIFY_DISPBASE(DSPSURF(plane),
-                                    i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
+               I915_WRITE(DSPSURF(plane),
+                          i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
                I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
                I915_WRITE(DSPLINOFF(plane), linear_offset);
        } else
@@ -2174,8 +2205,8 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
                      i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
                      fb->pitches[0]);
        I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
-       I915_MODIFY_DISPBASE(DSPSURF(plane),
-                            i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
+       I915_WRITE(DSPSURF(plane),
+                  i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
        if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
                I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
        } else {
@@ -2233,7 +2264,12 @@ void intel_display_handle_reset(struct drm_device *dev)
                struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
                mutex_lock(&crtc->mutex);
-               if (intel_crtc->active)
+               /*
+                * FIXME: Once we have proper support for primary planes (and
+                * disabling them without disabling the entire crtc) allow again
+                * a NULL crtc->fb.
+                */
+               if (intel_crtc->active && crtc->fb)
                        dev_priv->display.update_plane(crtc, crtc->fb,
                                                       crtc->x, crtc->y);
                mutex_unlock(&crtc->mutex);
@@ -2350,6 +2386,8 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
                        I915_WRITE(PF_WIN_POS(intel_crtc->pipe), 0);
                        I915_WRITE(PF_WIN_SZ(intel_crtc->pipe), 0);
                }
+               intel_crtc->config.pipe_src_w = adjusted_mode->crtc_hdisplay;
+               intel_crtc->config.pipe_src_h = adjusted_mode->crtc_vdisplay;
        }
 
        ret = dev_priv->display.update_plane(crtc, fb, x, y);
@@ -2944,6 +2982,30 @@ static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
        return pending;
 }
 
+bool intel_has_pending_fb_unpin(struct drm_device *dev)
+{
+       struct intel_crtc *crtc;
+
+       /* Note that we don't need to be called with mode_config.lock here
+        * as our list of CRTC objects is static for the lifetime of the
+        * device and so cannot disappear as we iterate. Similarly, we can
+        * happily treat the predicates as racy, atomic checks as userspace
+        * cannot claim and pin a new fb without at least acquring the
+        * struct_mutex and so serialising with us.
+        */
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
+               if (atomic_read(&crtc->unpin_work_count) == 0)
+                       continue;
+
+               if (crtc->unpin_work)
+                       intel_wait_for_vblank(dev, crtc->pipe);
+
+               return true;
+       }
+
+       return false;
+}
+
 static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
@@ -3399,9 +3461,8 @@ void hsw_enable_ips(struct intel_crtc *crtc)
                mutex_unlock(&dev_priv->rps.hw_lock);
                /* Quoting Art Runyan: "its not safe to expect any particular
                 * value in IPS_CTL bit 31 after enabling IPS through the
-                * mailbox." Therefore we need to defer waiting on the state
-                * change.
-                * TODO: need to fix this for state checker
+                * mailbox." Moreover, the mailbox may return a bogus state,
+                * so we need to just enable it and continue on.
                 */
        } else {
                I915_WRITE(IPS_CTL, IPS_ENABLE);
@@ -3428,9 +3489,10 @@ void hsw_disable_ips(struct intel_crtc *crtc)
                mutex_lock(&dev_priv->rps.hw_lock);
                WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
                mutex_unlock(&dev_priv->rps.hw_lock);
-       } else
+       } else {
                I915_WRITE(IPS_CTL, 0);
-       POSTING_READ(IPS_CTL);
+               POSTING_READ(IPS_CTL);
+       }
 
        /* We need to wait for a vblank before we can disable the plane. */
        intel_wait_for_vblank(dev, crtc->pipe);
@@ -3465,7 +3527,7 @@ static void intel_crtc_load_lut(struct drm_crtc *crtc)
        /* Workaround : Do not read or write the pipe palette/gamma data while
         * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
         */
-       if (intel_crtc->config.ips_enabled &&
+       if (IS_HASWELL(dev) && intel_crtc->config.ips_enabled &&
            ((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) ==
             GAMMA_MODE_MODE_SPLIT)) {
                hsw_disable_ips(intel_crtc);
@@ -3910,6 +3972,174 @@ static void i9xx_pfit_enable(struct intel_crtc *crtc)
        I915_WRITE(BCLRPAT(crtc->pipe), 0);
 }
 
+int valleyview_get_vco(struct drm_i915_private *dev_priv)
+{
+       int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
+
+       /* Obtain SKU information */
+       mutex_lock(&dev_priv->dpio_lock);
+       hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
+               CCK_FUSE_HPLL_FREQ_MASK;
+       mutex_unlock(&dev_priv->dpio_lock);
+
+       return vco_freq[hpll_freq];
+}
+
+/* Adjust CDclk dividers to allow high res or save power if possible */
+static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 val, cmd;
+
+       if (cdclk >= 320) /* jump to highest voltage for 400MHz too */
+               cmd = 2;
+       else if (cdclk == 266)
+               cmd = 1;
+       else
+               cmd = 0;
+
+       mutex_lock(&dev_priv->rps.hw_lock);
+       val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
+       val &= ~DSPFREQGUAR_MASK;
+       val |= (cmd << DSPFREQGUAR_SHIFT);
+       vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
+       if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
+                     DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
+                    50)) {
+               DRM_ERROR("timed out waiting for CDclk change\n");
+       }
+       mutex_unlock(&dev_priv->rps.hw_lock);
+
+       if (cdclk == 400) {
+               u32 divider, vco;
+
+               vco = valleyview_get_vco(dev_priv);
+               divider = ((vco << 1) / cdclk) - 1;
+
+               mutex_lock(&dev_priv->dpio_lock);
+               /* adjust cdclk divider */
+               val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
+               val &= ~0xf;
+               val |= divider;
+               vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val);
+               mutex_unlock(&dev_priv->dpio_lock);
+       }
+
+       mutex_lock(&dev_priv->dpio_lock);
+       /* adjust self-refresh exit latency value */
+       val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC);
+       val &= ~0x7f;
+
+       /*
+        * For high bandwidth configs, we set a higher latency in the bunit
+        * so that the core display fetch happens in time to avoid underruns.
+        */
+       if (cdclk == 400)
+               val |= 4500 / 250; /* 4.5 usec */
+       else
+               val |= 3000 / 250; /* 3.0 usec */
+       vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val);
+       mutex_unlock(&dev_priv->dpio_lock);
+
+       /* Since we changed the CDclk, we need to update the GMBUSFREQ too */
+       intel_i2c_reset(dev);
+}
+
+static int valleyview_cur_cdclk(struct drm_i915_private *dev_priv)
+{
+       int cur_cdclk, vco;
+       int divider;
+
+       vco = valleyview_get_vco(dev_priv);
+
+       mutex_lock(&dev_priv->dpio_lock);
+       divider = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
+       mutex_unlock(&dev_priv->dpio_lock);
+
+       divider &= 0xf;
+
+       cur_cdclk = (vco << 1) / (divider + 1);
+
+       return cur_cdclk;
+}
+
+static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
+                                int max_pixclk)
+{
+       int cur_cdclk;
+
+       cur_cdclk = valleyview_cur_cdclk(dev_priv);
+
+       /*
+        * Really only a few cases to deal with, as only 4 CDclks are supported:
+        *   200MHz
+        *   267MHz
+        *   320MHz
+        *   400MHz
+        * So we check to see whether we're above 90% of the lower bin and
+        * adjust if needed.
+        */
+       if (max_pixclk > 288000) {
+               return 400;
+       } else if (max_pixclk > 240000) {
+               return 320;
+       } else
+               return 266;
+       /* Looks like the 200MHz CDclk freq doesn't work on some configs */
+}
+
+static int intel_mode_max_pixclk(struct drm_i915_private *dev_priv,
+                                unsigned modeset_pipes,
+                                struct intel_crtc_config *pipe_config)
+{
+       struct drm_device *dev = dev_priv->dev;
+       struct intel_crtc *intel_crtc;
+       int max_pixclk = 0;
+
+       list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
+                           base.head) {
+               if (modeset_pipes & (1 << intel_crtc->pipe))
+                       max_pixclk = max(max_pixclk,
+                                        pipe_config->adjusted_mode.crtc_clock);
+               else if (intel_crtc->base.enabled)
+                       max_pixclk = max(max_pixclk,
+                                        intel_crtc->config.adjusted_mode.crtc_clock);
+       }
+
+       return max_pixclk;
+}
+
+static void valleyview_modeset_global_pipes(struct drm_device *dev,
+                                           unsigned *prepare_pipes,
+                                           unsigned modeset_pipes,
+                                           struct intel_crtc_config *pipe_config)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc;
+       int max_pixclk = intel_mode_max_pixclk(dev_priv, modeset_pipes,
+                                              pipe_config);
+       int cur_cdclk = valleyview_cur_cdclk(dev_priv);
+
+       if (valleyview_calc_cdclk(dev_priv, max_pixclk) == cur_cdclk)
+               return;
+
+       list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
+                           base.head)
+               if (intel_crtc->base.enabled)
+                       *prepare_pipes |= (1 << intel_crtc->pipe);
+}
+
+static void valleyview_modeset_global_resources(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int max_pixclk = intel_mode_max_pixclk(dev_priv, 0, NULL);
+       int cur_cdclk = valleyview_cur_cdclk(dev_priv);
+       int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk);
+
+       if (req_cdclk != cur_cdclk)
+               valleyview_set_cdclk(dev, req_cdclk);
+}
+
 static void valleyview_crtc_enable(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
@@ -4570,9 +4800,8 @@ static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
                refclk = 100000;
        } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
            intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
-               refclk = dev_priv->vbt.lvds_ssc_freq * 1000;
-               DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
-                             refclk / 1000);
+               refclk = dev_priv->vbt.lvds_ssc_freq;
+               DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
        } else if (!IS_GEN2(dev)) {
                refclk = 96000;
        } else {
@@ -4634,24 +4863,24 @@ static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
         * PLLB opamp always calibrates to max value of 0x3f, force enable it
         * and set it to a reasonable value instead.
         */
-       reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_IREF(1));
+       reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
        reg_val &= 0xffffff00;
        reg_val |= 0x00000030;
-       vlv_dpio_write(dev_priv, pipe, DPIO_IREF(1), reg_val);
+       vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
 
-       reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_CALIBRATION);
+       reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
        reg_val &= 0x8cffffff;
        reg_val = 0x8c000000;
-       vlv_dpio_write(dev_priv, pipe, DPIO_CALIBRATION, reg_val);
+       vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
 
-       reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_IREF(1));
+       reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
        reg_val &= 0xffffff00;
-       vlv_dpio_write(dev_priv, pipe, DPIO_IREF(1), reg_val);
+       vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
 
-       reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_CALIBRATION);
+       reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
        reg_val &= 0x00ffffff;
        reg_val |= 0xb0000000;
-       vlv_dpio_write(dev_priv, pipe, DPIO_CALIBRATION, reg_val);
+       vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
 }
 
 static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
@@ -4720,15 +4949,15 @@ static void vlv_update_pll(struct intel_crtc *crtc)
                vlv_pllb_recal_opamp(dev_priv, pipe);
 
        /* Set up Tx target for periodic Rcomp update */
-       vlv_dpio_write(dev_priv, pipe, DPIO_IREF_BCAST, 0x0100000f);
+       vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
 
        /* Disable target IRef on PLL */
-       reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_IREF_CTL(pipe));
+       reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
        reg_val &= 0x00ffffff;
-       vlv_dpio_write(dev_priv, pipe, DPIO_IREF_CTL(pipe), reg_val);
+       vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
 
        /* Disable fast lock */
-       vlv_dpio_write(dev_priv, pipe, DPIO_FASTCLK_DISABLE, 0x610);
+       vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
 
        /* Set idtafcrecal before PLL is enabled */
        mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
@@ -4742,50 +4971,54 @@ static void vlv_update_pll(struct intel_crtc *crtc)
         * Note: don't use the DAC post divider as it seems unstable.
         */
        mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
-       vlv_dpio_write(dev_priv, pipe, DPIO_DIV(pipe), mdiv);
+       vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
 
        mdiv |= DPIO_ENABLE_CALIBRATION;
-       vlv_dpio_write(dev_priv, pipe, DPIO_DIV(pipe), mdiv);
+       vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
 
        /* Set HBR and RBR LPF coefficients */
        if (crtc->config.port_clock == 162000 ||
            intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_ANALOG) ||
            intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI))
-               vlv_dpio_write(dev_priv, pipe, DPIO_LPF_COEFF(pipe),
+               vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
                                 0x009f0003);
        else
-               vlv_dpio_write(dev_priv, pipe, DPIO_LPF_COEFF(pipe),
+               vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
                                 0x00d0000f);
 
        if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP) ||
            intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)) {
                /* Use SSC source */
                if (!pipe)
-                       vlv_dpio_write(dev_priv, pipe, DPIO_REFSFR(pipe),
+                       vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
                                         0x0df40000);
                else
-                       vlv_dpio_write(dev_priv, pipe, DPIO_REFSFR(pipe),
+                       vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
                                         0x0df70000);
        } else { /* HDMI or VGA */
                /* Use bend source */
                if (!pipe)
-                       vlv_dpio_write(dev_priv, pipe, DPIO_REFSFR(pipe),
+                       vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
                                         0x0df70000);
                else
-                       vlv_dpio_write(dev_priv, pipe, DPIO_REFSFR(pipe),
+                       vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
                                         0x0df40000);
        }
 
-       coreclk = vlv_dpio_read(dev_priv, pipe, DPIO_CORE_CLK(pipe));
+       coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
        coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
        if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT) ||
            intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP))
                coreclk |= 0x01000000;
-       vlv_dpio_write(dev_priv, pipe, DPIO_CORE_CLK(pipe), coreclk);
+       vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
 
-       vlv_dpio_write(dev_priv, pipe, DPIO_PLL_CML(pipe), 0x87871000);
+       vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
 
-       /* Enable DPIO clock input */
+       /*
+        * Enable DPIO clock input. We should never disable the reference
+        * clock for pipe B, since VGA hotplug / manual detection depends
+        * on it.
+        */
        dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
                DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV;
        /* We should never disable this, set it here for state tracking */
@@ -5230,6 +5463,9 @@ static void i9xx_get_pfit_config(struct intel_crtc *crtc,
        struct drm_i915_private *dev_priv = dev->dev_private;
        uint32_t tmp;
 
+       if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev)))
+               return;
+
        tmp = I915_READ(PFIT_CONTROL);
        if (!(tmp & PFIT_ENABLE))
                return;
@@ -5261,7 +5497,7 @@ static void vlv_crtc_clock_get(struct intel_crtc *crtc,
        int refclk = 100000;
 
        mutex_lock(&dev_priv->dpio_lock);
-       mdiv = vlv_dpio_read(dev_priv, pipe, DPIO_DIV(pipe));
+       mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
        mutex_unlock(&dev_priv->dpio_lock);
 
        clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
@@ -5718,9 +5954,9 @@ static int ironlake_get_refclk(struct drm_crtc *crtc)
        }
 
        if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
-               DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
+               DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
                              dev_priv->vbt.lvds_ssc_freq);
-               return dev_priv->vbt.lvds_ssc_freq * 1000;
+               return dev_priv->vbt.lvds_ssc_freq;
        }
 
        return 120000;
@@ -5982,7 +6218,7 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
        factor = 21;
        if (is_lvds) {
                if ((intel_panel_use_ssc(dev_priv) &&
-                    dev_priv->vbt.lvds_ssc_freq == 100) ||
+                    dev_priv->vbt.lvds_ssc_freq == 100000) ||
                    (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev)))
                        factor = 25;
        } else if (intel_crtc->config.sdvo_tv_clock)
@@ -6323,7 +6559,7 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
 
        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
        val = I915_READ(DEIMR);
-       WARN((val & ~DE_PCH_EVENT_IVB) != val,
+       WARN((val | DE_PCH_EVENT_IVB) != 0xffffffff,
             "Unexpected DEIMR bits enabled: 0x%x\n", val);
        val = I915_READ(SDEIMR);
        WARN((val | SDE_HOTPLUG_MASK_CPT) != 0xffffffff,
@@ -6402,7 +6638,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
 
        /* Make sure we're not on PC8 state before disabling PC8, otherwise
         * we'll hang the machine! */
-       gen6_gt_force_wake_get(dev_priv);
+       gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
 
        if (val & LCPLL_POWER_DOWN_ALLOW) {
                val &= ~LCPLL_POWER_DOWN_ALLOW;
@@ -6436,7 +6672,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
                        DRM_ERROR("Switching back to LCPLL failed\n");
        }
 
-       gen6_gt_force_wake_put(dev_priv);
+       gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
 }
 
 void hsw_enable_pc8_work(struct work_struct *__work)
@@ -6447,6 +6683,8 @@ void hsw_enable_pc8_work(struct work_struct *__work)
        struct drm_device *dev = dev_priv->dev;
        uint32_t val;
 
+       WARN_ON(!HAS_PC8(dev));
+
        if (dev_priv->pc8.enabled)
                return;
 
@@ -6463,6 +6701,8 @@ void hsw_enable_pc8_work(struct work_struct *__work)
        lpt_disable_clkout_dp(dev);
        hsw_pc8_disable_interrupts(dev);
        hsw_disable_lcpll(dev_priv, true, true);
+
+       intel_runtime_pm_put(dev_priv);
 }
 
 static void __hsw_enable_package_c8(struct drm_i915_private *dev_priv)
@@ -6492,12 +6732,16 @@ static void __hsw_disable_package_c8(struct drm_i915_private *dev_priv)
        if (dev_priv->pc8.disable_count != 1)
                return;
 
+       WARN_ON(!HAS_PC8(dev));
+
        cancel_delayed_work_sync(&dev_priv->pc8.enable_work);
        if (!dev_priv->pc8.enabled)
                return;
 
        DRM_DEBUG_KMS("Disabling package C8+\n");
 
+       intel_runtime_pm_get(dev_priv);
+
        hsw_restore_lcpll(dev_priv);
        hsw_pc8_restore_interrupts(dev);
        lpt_init_pch_refclk(dev);
@@ -6704,8 +6948,9 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc,
        int plane = intel_crtc->plane;
        int ret;
 
-       if (!intel_ddi_pll_mode_set(crtc))
+       if (!intel_ddi_pll_select(intel_crtc))
                return -EINVAL;
+       intel_ddi_pll_enable(intel_crtc);
 
        if (intel_crtc->config.has_dp_encoder)
                intel_dp_set_m_n(intel_crtc);
@@ -6796,8 +7041,9 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
        if (intel_display_power_enabled(dev, pfit_domain))
                ironlake_get_pfit_config(crtc, pipe_config);
 
-       pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
-                                  (I915_READ(IPS_CTL) & IPS_ENABLE);
+       if (IS_HASWELL(dev))
+               pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
+                       (I915_READ(IPS_CTL) & IPS_ENABLE);
 
        pipe_config->pixel_multiplier = 1;
 
@@ -7689,7 +7935,7 @@ static int i9xx_pll_refclk(struct drm_device *dev,
        u32 dpll = pipe_config->dpll_hw_state.dpll;
 
        if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
-               return dev_priv->vbt.lvds_ssc_freq * 1000;
+               return dev_priv->vbt.lvds_ssc_freq;
        else if (HAS_PCH_SPLIT(dev))
                return 120000;
        else if (!IS_GEN2(dev))
@@ -7752,12 +7998,17 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
                else
                        i9xx_clock(refclk, &clock);
        } else {
-               bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN);
+               u32 lvds = IS_I830(dev) ? 0 : I915_READ(LVDS);
+               bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
 
                if (is_lvds) {
                        clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
                                       DPLL_FPA01_P1_POST_DIV_SHIFT);
-                       clock.p2 = 14;
+
+                       if (lvds & LVDS_CLKB_POWER_UP)
+                               clock.p2 = 7;
+                       else
+                               clock.p2 = 14;
                } else {
                        if (dpll & PLL_P1_DIVIDE_BY_TWO)
                                clock.p1 = 2;
@@ -8493,28 +8744,6 @@ static struct drm_crtc_helper_funcs intel_helper_funcs = {
        .load_lut = intel_crtc_load_lut,
 };
 
-static bool intel_encoder_crtc_ok(struct drm_encoder *encoder,
-                                 struct drm_crtc *crtc)
-{
-       struct drm_device *dev;
-       struct drm_crtc *tmp;
-       int crtc_mask = 1;
-
-       WARN(!crtc, "checking null crtc?\n");
-
-       dev = crtc->dev;
-
-       list_for_each_entry(tmp, &dev->mode_config.crtc_list, head) {
-               if (tmp == crtc)
-                       break;
-               crtc_mask <<= 1;
-       }
-
-       if (encoder->possible_crtcs & crtc_mask)
-               return true;
-       return false;
-}
-
 /**
  * intel_modeset_update_staged_output_state
  *
@@ -9122,7 +9351,9 @@ intel_pipe_config_compare(struct drm_device *dev,
                PIPE_CONF_CHECK_I(pch_pfit.size);
        }
 
-       PIPE_CONF_CHECK_I(ips_enabled);
+       /* BDW+ don't expose a synchronous way to read the state */
+       if (IS_HASWELL(dev))
+               PIPE_CONF_CHECK_I(ips_enabled);
 
        PIPE_CONF_CHECK_I(double_wide);
 
@@ -9368,21 +9599,19 @@ static int __intel_set_mode(struct drm_crtc *crtc,
 {
        struct drm_device *dev = crtc->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_display_mode *saved_mode, *saved_hwmode;
+       struct drm_display_mode *saved_mode;
        struct intel_crtc_config *pipe_config = NULL;
        struct intel_crtc *intel_crtc;
        unsigned disable_pipes, prepare_pipes, modeset_pipes;
        int ret = 0;
 
-       saved_mode = kcalloc(2, sizeof(*saved_mode), GFP_KERNEL);
+       saved_mode = kmalloc(sizeof(*saved_mode), GFP_KERNEL);
        if (!saved_mode)
                return -ENOMEM;
-       saved_hwmode = saved_mode + 1;
 
        intel_modeset_affected_pipes(crtc, &modeset_pipes,
                                     &prepare_pipes, &disable_pipes);
 
-       *saved_hwmode = crtc->hwmode;
        *saved_mode = crtc->mode;
 
        /* Hack: Because we don't (yet) support global modeset on multiple
@@ -9402,6 +9631,21 @@ static int __intel_set_mode(struct drm_crtc *crtc,
                                       "[modeset]");
        }
 
+       /*
+        * See if the config requires any additional preparation, e.g.
+        * to adjust global state with pipes off.  We need to do this
+        * here so we can get the modeset_pipe updated config for the new
+        * mode set on this crtc.  For other crtcs we need to use the
+        * adjusted_mode bits in the crtc directly.
+        */
+       if (IS_VALLEYVIEW(dev)) {
+               valleyview_modeset_global_pipes(dev, &prepare_pipes,
+                                               modeset_pipes, pipe_config);
+
+               /* may have added more to prepare_pipes than we should */
+               prepare_pipes &= ~disable_pipes;
+       }
+
        for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc)
                intel_crtc_disable(&intel_crtc->base);
 
@@ -9418,6 +9662,14 @@ static int __intel_set_mode(struct drm_crtc *crtc,
                /* mode_set/enable/disable functions rely on a correct pipe
                 * config. */
                to_intel_crtc(crtc)->config = *pipe_config;
+
+               /*
+                * Calculate and store various constants which
+                * are later needed by vblank and swap-completion
+                * timestamping. They are derived from true hwmode.
+                */
+               drm_calc_timestamping_constants(crtc,
+                                               &pipe_config->adjusted_mode);
        }
 
        /* Only after disabling all output pipelines that will be changed can we
@@ -9441,23 +9693,10 @@ static int __intel_set_mode(struct drm_crtc *crtc,
        for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc)
                dev_priv->display.crtc_enable(&intel_crtc->base);
 
-       if (modeset_pipes) {
-               /* Store real post-adjustment hardware mode. */
-               crtc->hwmode = pipe_config->adjusted_mode;
-
-               /* Calculate and store various constants which
-                * are later needed by vblank and swap-completion
-                * timestamping. They are derived from true hwmode.
-                */
-               drm_calc_timestamping_constants(crtc);
-       }
-
        /* FIXME: add subpixel order */
 done:
-       if (ret && crtc->enabled) {
-               crtc->hwmode = *saved_hwmode;
+       if (ret && crtc->enabled)
                crtc->mode = *saved_mode;
-       }
 
 out:
        kfree(pipe_config);
@@ -9679,8 +9918,8 @@ intel_modeset_stage_output_state(struct drm_device *dev,
                }
 
                /* Make sure the new CRTC will work with the encoder */
-               if (!intel_encoder_crtc_ok(&connector->new_encoder->base,
-                                          new_crtc)) {
+               if (!drm_encoder_crtc_ok(&connector->new_encoder->base,
+                                        new_crtc)) {
                        return -EINVAL;
                }
                connector->encoder->new_crtc = to_intel_crtc(new_crtc);
@@ -9694,17 +9933,21 @@ intel_modeset_stage_output_state(struct drm_device *dev,
        /* Check for any encoders that needs to be disabled. */
        list_for_each_entry(encoder, &dev->mode_config.encoder_list,
                            base.head) {
+               int num_connectors = 0;
                list_for_each_entry(connector,
                                    &dev->mode_config.connector_list,
                                    base.head) {
                        if (connector->new_encoder == encoder) {
                                WARN_ON(!connector->new_encoder->new_crtc);
-
-                               goto next_encoder;
+                               num_connectors++;
                        }
                }
-               encoder->new_crtc = NULL;
-next_encoder:
+
+               if (num_connectors == 0)
+                       encoder->new_crtc = NULL;
+               else if (num_connectors > 1)
+                       return -EINVAL;
+
                /* Only now check for crtc changes so we don't miss encoders
                 * that will be disabled. */
                if (&encoder->new_crtc->base != encoder->base.crtc) {
@@ -9775,6 +10018,16 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
 
                ret = intel_pipe_set_base(set->crtc,
                                          set->x, set->y, set->fb);
+               /*
+                * In the fastboot case this may be our only check of the
+                * state after boot.  It would be better to only do it on
+                * the first update, but we don't have a nice way of doing that
+                * (and really, set_config isn't used much for high freq page
+                * flipping, so increasing its cost here shouldn't be a big
+                * deal).
+                */
+               if (i915_fastboot && ret == 0)
+                       intel_modeset_check_state(set->crtc->dev);
        }
 
        if (ret) {
@@ -9835,7 +10088,7 @@ static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
                                struct intel_shared_dpll *pll)
 {
        /* PCH refclock must be enabled first */
-       assert_pch_refclk_enabled(dev_priv);
+       ibx_assert_pch_refclk_enabled(dev_priv);
 
        I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll);
 
@@ -9903,8 +10156,6 @@ static void intel_shared_dpll_init(struct drm_device *dev)
                dev_priv->num_shared_dpll = 0;
 
        BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
-       DRM_DEBUG_KMS("%i shared PLLs initialized\n",
-                     dev_priv->num_shared_dpll);
 }
 
 static void intel_crtc_init(struct drm_device *dev, int pipe)
@@ -9926,10 +10177,13 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
                intel_crtc->lut_b[i] = i;
        }
 
-       /* Swap pipes & planes for FBC on pre-965 */
+       /*
+        * On gen2/3 only plane A can do fbc, but the panel fitter and lvds port
+        * is hooked to plane B. Hence we want plane A feeding pipe B.
+        */
        intel_crtc->pipe = pipe;
        intel_crtc->plane = pipe;
-       if (IS_MOBILE(dev) && IS_GEN3(dev)) {
+       if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) {
                DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
                intel_crtc->plane = !pipe;
        }
@@ -10018,6 +10272,28 @@ static bool has_edp_a(struct drm_device *dev)
        return true;
 }
 
+const char *intel_output_name(int output)
+{
+       static const char *names[] = {
+               [INTEL_OUTPUT_UNUSED] = "Unused",
+               [INTEL_OUTPUT_ANALOG] = "Analog",
+               [INTEL_OUTPUT_DVO] = "DVO",
+               [INTEL_OUTPUT_SDVO] = "SDVO",
+               [INTEL_OUTPUT_LVDS] = "LVDS",
+               [INTEL_OUTPUT_TVOUT] = "TV",
+               [INTEL_OUTPUT_HDMI] = "HDMI",
+               [INTEL_OUTPUT_DISPLAYPORT] = "DisplayPort",
+               [INTEL_OUTPUT_EDP] = "eDP",
+               [INTEL_OUTPUT_DSI] = "DSI",
+               [INTEL_OUTPUT_UNKNOWN] = "Unknown",
+       };
+
+       if (output < 0 || output >= ARRAY_SIZE(names) || !names[output])
+               return "Invalid";
+
+       return names[output];
+}
+
 static void intel_setup_outputs(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -10412,8 +10688,11 @@ static void intel_init_display(struct drm_device *dev)
                }
        } else if (IS_G4X(dev)) {
                dev_priv->display.write_eld = g4x_write_eld;
-       } else if (IS_VALLEYVIEW(dev))
+       } else if (IS_VALLEYVIEW(dev)) {
+               dev_priv->display.modeset_global_resources =
+                       valleyview_modeset_global_resources;
                dev_priv->display.write_eld = ironlake_write_eld;
+       }
 
        /* Default just returns -ENODEV to indicate unsupported */
        dev_priv->display.queue_flip = intel_default_queue_flip;
@@ -10440,6 +10719,8 @@ static void intel_init_display(struct drm_device *dev)
                dev_priv->display.queue_flip = intel_gen7_queue_flip;
                break;
        }
+
+       intel_panel_init_backlight_funcs(dev);
 }
 
 /*
@@ -10476,17 +10757,6 @@ static void quirk_invert_brightness(struct drm_device *dev)
        DRM_INFO("applying inverted panel brightness quirk\n");
 }
 
-/*
- * Some machines (Dell XPS13) suffer broken backlight controls if
- * BLM_PCH_PWM_ENABLE is set.
- */
-static void quirk_no_pcm_pwm_enable(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       dev_priv->quirks |= QUIRK_NO_PCH_PWM_ENABLE;
-       DRM_INFO("applying no-PCH_PWM_ENABLE quirk\n");
-}
-
 struct intel_quirk {
        int device;
        int subsystem_vendor;
@@ -10555,11 +10825,6 @@ static struct intel_quirk intel_quirks[] = {
 
        /* Acer Aspire 4736Z */
        { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
-
-       /* Dell XPS13 HD Sandy Bridge */
-       { 0x0116, 0x1028, 0x052e, quirk_no_pcm_pwm_enable },
-       /* Dell XPS13 HD and XPS13 FHD Ivy Bridge */
-       { 0x0166, 0x1028, 0x058b, quirk_no_pcm_pwm_enable },
 };
 
 static void intel_init_quirks(struct drm_device *dev)
@@ -10603,18 +10868,11 @@ static void i915_disable_vga(struct drm_device *dev)
 
 void intel_modeset_init_hw(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
        intel_prepare_ddi(dev);
 
        intel_init_clock_gating(dev);
 
-       /* Enable the CRI clock source so we can get at the display */
-       if (IS_VALLEYVIEW(dev))
-               I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
-                          DPLL_INTEGRATED_CRI_CLK_VLV);
-
-       intel_init_dpio(dev);
+       intel_reset_dpio(dev);
 
        mutex_lock(&dev->struct_mutex);
        intel_enable_gt_powersave(dev);
@@ -10676,6 +10934,9 @@ void intel_modeset_init(struct drm_device *dev)
                }
        }
 
+       intel_init_dpio(dev);
+       intel_reset_dpio(dev);
+
        intel_cpu_pll_init(dev);
        intel_shared_dpll_init(dev);
 
@@ -10879,7 +11140,7 @@ void i915_redisable_vga(struct drm_device *dev)
         * level, just check if the power well is enabled instead of trying to
         * follow the "don't touch the power well if we don't need it" policy
         * the rest of the driver uses. */
-       if (HAS_POWER_WELL(dev) &&
+       if ((IS_HASWELL(dev) || IS_BROADWELL(dev)) &&
            (I915_READ(HSW_PWR_WELL_DRIVER) & HSW_PWR_WELL_STATE_ENABLED) == 0)
                return;
 
@@ -11023,7 +11284,7 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
                pll->on = false;
        }
 
-       if (IS_HASWELL(dev))
+       if (HAS_PCH_SPLIT(dev))
                ilk_wm_get_hw_state(dev);
 
        if (force_restore) {
@@ -11101,12 +11362,11 @@ void intel_modeset_cleanup(struct drm_device *dev)
        /* flush any delayed tasks or pending work */
        flush_scheduled_work();
 
-       /* destroy backlight, if any, before the connectors */
-       intel_panel_destroy_backlight(dev);
-
-       /* destroy the sysfs files before encoders/connectors */
-       list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+       /* destroy the backlight and sysfs files before encoders/connectors */
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+               intel_panel_destroy_backlight(connector);
                drm_sysfs_connector_remove(connector);
+       }
 
        drm_mode_config_cleanup(dev);
 
@@ -11161,6 +11421,7 @@ struct intel_display_error_state {
        } cursor[I915_MAX_PIPES];
 
        struct intel_pipe_error_state {
+               bool power_domain_on;
                u32 source;
        } pipe[I915_MAX_PIPES];
 
@@ -11175,6 +11436,7 @@ struct intel_display_error_state {
        } plane[I915_MAX_PIPES];
 
        struct intel_transcoder_error_state {
+               bool power_domain_on;
                enum transcoder cpu_transcoder;
 
                u32 conf;
@@ -11208,11 +11470,13 @@ intel_display_capture_error_state(struct drm_device *dev)
        if (error == NULL)
                return NULL;
 
-       if (HAS_POWER_WELL(dev))
+       if (IS_HASWELL(dev) || IS_BROADWELL(dev))
                error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
 
        for_each_pipe(i) {
-               if (!intel_display_power_enabled(dev, POWER_DOMAIN_PIPE(i)))
+               error->pipe[i].power_domain_on =
+                       intel_display_power_enabled_sw(dev, POWER_DOMAIN_PIPE(i));
+               if (!error->pipe[i].power_domain_on)
                        continue;
 
                if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) {
@@ -11248,8 +11512,10 @@ intel_display_capture_error_state(struct drm_device *dev)
        for (i = 0; i < error->num_transcoders; i++) {
                enum transcoder cpu_transcoder = transcoders[i];
 
-               if (!intel_display_power_enabled(dev,
-                               POWER_DOMAIN_TRANSCODER(cpu_transcoder)))
+               error->transcoder[i].power_domain_on =
+                       intel_display_power_enabled_sw(dev,
+                               POWER_DOMAIN_TRANSCODER(cpu_transcoder));
+               if (!error->transcoder[i].power_domain_on)
                        continue;
 
                error->transcoder[i].cpu_transcoder = cpu_transcoder;
@@ -11279,11 +11545,13 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
                return;
 
        err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
-       if (HAS_POWER_WELL(dev))
+       if (IS_HASWELL(dev) || IS_BROADWELL(dev))
                err_printf(m, "PWR_WELL_CTL2: %08x\n",
                           error->power_well_driver);
        for_each_pipe(i) {
                err_printf(m, "Pipe [%d]:\n", i);
+               err_printf(m, "  Power: %s\n",
+                          error->pipe[i].power_domain_on ? "on" : "off");
                err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
 
                err_printf(m, "Plane [%d]:\n", i);
@@ -11309,6 +11577,8 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
        for (i = 0; i < error->num_transcoders; i++) {
                err_printf(m, "CPU transcoder: %c\n",
                           transcoder_name(error->transcoder[i].cpu_transcoder));
+               err_printf(m, "  Power: %s\n",
+                          error->transcoder[i].power_domain_on ? "on" : "off");
                err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
                err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
                err_printf(m, "  HBLANK: %08x\n", error->transcoder[i].hblank);
index 30c627c7b7ba18a0dbd546859b047a769cad1d64..5ede4e8e290df5cc2f3e1b7046ec409b9f50d538 100644 (file)
@@ -142,7 +142,7 @@ intel_dp_max_data_rate(int max_link_clock, int max_lanes)
        return (max_link_clock * max_lanes * 8) / 10;
 }
 
-static int
+static enum drm_mode_status
 intel_dp_mode_valid(struct drm_connector *connector,
                    struct drm_display_mode *mode)
 {
@@ -404,7 +404,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
        int i, ret, recv_bytes;
        uint32_t status;
        int try, precharge, clock = 0;
-       bool has_aux_irq = INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev);
+       bool has_aux_irq = true;
        uint32_t timeout;
 
        /* dp aux is extremely sensitive to irq latency, hence request the
@@ -542,7 +542,7 @@ intel_dp_aux_native_write(struct intel_dp *intel_dp,
                return -E2BIG;
 
        intel_dp_check_edp(intel_dp);
-       msg[0] = AUX_NATIVE_WRITE << 4;
+       msg[0] = DP_AUX_NATIVE_WRITE << 4;
        msg[1] = address >> 8;
        msg[2] = address & 0xff;
        msg[3] = send_bytes - 1;
@@ -552,9 +552,10 @@ intel_dp_aux_native_write(struct intel_dp *intel_dp,
                ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1);
                if (ret < 0)
                        return ret;
-               if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
+               ack >>= 4;
+               if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK)
                        break;
-               else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
+               else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER)
                        udelay(100);
                else
                        return -EIO;
@@ -586,7 +587,7 @@ intel_dp_aux_native_read(struct intel_dp *intel_dp,
                return -E2BIG;
 
        intel_dp_check_edp(intel_dp);
-       msg[0] = AUX_NATIVE_READ << 4;
+       msg[0] = DP_AUX_NATIVE_READ << 4;
        msg[1] = address >> 8;
        msg[2] = address & 0xff;
        msg[3] = recv_bytes - 1;
@@ -601,12 +602,12 @@ intel_dp_aux_native_read(struct intel_dp *intel_dp,
                        return -EPROTO;
                if (ret < 0)
                        return ret;
-               ack = reply[0];
-               if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) {
+               ack = reply[0] >> 4;
+               if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK) {
                        memcpy(recv, reply + 1, ret - 1);
                        return ret - 1;
                }
-               else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
+               else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER)
                        udelay(100);
                else
                        return -EIO;
@@ -633,12 +634,12 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
        intel_dp_check_edp(intel_dp);
        /* Set up the command byte */
        if (mode & MODE_I2C_READ)
-               msg[0] = AUX_I2C_READ << 4;
+               msg[0] = DP_AUX_I2C_READ << 4;
        else
-               msg[0] = AUX_I2C_WRITE << 4;
+               msg[0] = DP_AUX_I2C_WRITE << 4;
 
        if (!(mode & MODE_I2C_STOP))
-               msg[0] |= AUX_I2C_MOT << 4;
+               msg[0] |= DP_AUX_I2C_MOT << 4;
 
        msg[1] = address >> 8;
        msg[2] = address;
@@ -675,17 +676,17 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
                        goto out;
                }
 
-               switch (reply[0] & AUX_NATIVE_REPLY_MASK) {
-               case AUX_NATIVE_REPLY_ACK:
+               switch ((reply[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK) {
+               case DP_AUX_NATIVE_REPLY_ACK:
                        /* I2C-over-AUX Reply field is only valid
                         * when paired with AUX ACK.
                         */
                        break;
-               case AUX_NATIVE_REPLY_NACK:
+               case DP_AUX_NATIVE_REPLY_NACK:
                        DRM_DEBUG_KMS("aux_ch native nack\n");
                        ret = -EREMOTEIO;
                        goto out;
-               case AUX_NATIVE_REPLY_DEFER:
+               case DP_AUX_NATIVE_REPLY_DEFER:
                        /*
                         * For now, just give more slack to branch devices. We
                         * could check the DPCD for I2C bit rate capabilities,
@@ -706,18 +707,18 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
                        goto out;
                }
 
-               switch (reply[0] & AUX_I2C_REPLY_MASK) {
-               case AUX_I2C_REPLY_ACK:
+               switch ((reply[0] >> 4) & DP_AUX_I2C_REPLY_MASK) {
+               case DP_AUX_I2C_REPLY_ACK:
                        if (mode == MODE_I2C_READ) {
                                *read_byte = reply[1];
                        }
                        ret = reply_bytes - 1;
                        goto out;
-               case AUX_I2C_REPLY_NACK:
+               case DP_AUX_I2C_REPLY_NACK:
                        DRM_DEBUG_KMS("aux_i2c nack\n");
                        ret = -EREMOTEIO;
                        goto out;
-               case AUX_I2C_REPLY_DEFER:
+               case DP_AUX_I2C_REPLY_DEFER:
                        DRM_DEBUG_KMS("aux_i2c defer\n");
                        udelay(100);
                        break;
@@ -1037,6 +1038,8 @@ static void ironlake_wait_panel_status(struct intel_dp *intel_dp,
                                I915_READ(pp_stat_reg),
                                I915_READ(pp_ctrl_reg));
        }
+
+       DRM_DEBUG_KMS("Wait complete\n");
 }
 
 static void ironlake_wait_panel_on(struct intel_dp *intel_dp)
@@ -1092,6 +1095,8 @@ void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
        if (ironlake_edp_have_panel_vdd(intel_dp))
                return;
 
+       intel_runtime_pm_get(dev_priv);
+
        DRM_DEBUG_KMS("Turning eDP VDD on\n");
 
        if (!ironlake_edp_have_panel_power(intel_dp))
@@ -1140,7 +1145,11 @@ static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
                /* Make sure sequencer is idle before allowing subsequent activity */
                DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
                I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
-               msleep(intel_dp->panel_power_down_delay);
+
+               if ((pp & POWER_TARGET_ON) == 0)
+                       msleep(intel_dp->panel_power_cycle_delay);
+
+               intel_runtime_pm_put(dev_priv);
        }
 }
 
@@ -1233,20 +1242,16 @@ void ironlake_edp_panel_off(struct intel_dp *intel_dp)
 
        DRM_DEBUG_KMS("Turn eDP power off\n");
 
-       WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
-
        pp = ironlake_get_pp_control(intel_dp);
        /* We need to switch off panel power _and_ force vdd, for otherwise some
         * panels get very unhappy and cease to work. */
-       pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
+       pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_BLC_ENABLE);
 
        pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
 
        I915_WRITE(pp_ctrl_reg, pp);
        POSTING_READ(pp_ctrl_reg);
 
-       intel_dp->want_panel_vdd = false;
-
        ironlake_wait_panel_off(intel_dp);
 }
 
@@ -1772,7 +1777,6 @@ static void intel_disable_dp(struct intel_encoder *encoder)
 
        /* Make sure the panel is off before trying to change the mode. But also
         * ensure that we have vdd while we switch off the panel. */
-       ironlake_edp_panel_vdd_on(intel_dp);
        ironlake_edp_backlight_off(intel_dp);
        intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
        ironlake_edp_panel_off(intel_dp);
@@ -1845,23 +1849,23 @@ static void vlv_pre_enable_dp(struct intel_encoder *encoder)
        struct drm_device *dev = encoder->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
-       int port = vlv_dport_to_channel(dport);
+       enum dpio_channel port = vlv_dport_to_channel(dport);
        int pipe = intel_crtc->pipe;
        struct edp_power_seq power_seq;
        u32 val;
 
        mutex_lock(&dev_priv->dpio_lock);
 
-       val = vlv_dpio_read(dev_priv, pipe, DPIO_DATA_LANE_A(port));
+       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
        val = 0;
        if (pipe)
                val |= (1<<21);
        else
                val &= ~(1<<21);
        val |= 0x001000c4;
-       vlv_dpio_write(dev_priv, pipe, DPIO_DATA_CHANNEL(port), val);
-       vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLOCKBUF0(port), 0x00760018);
-       vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLOCKBUF8(port), 0x00400888);
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
 
        mutex_unlock(&dev_priv->dpio_lock);
 
@@ -1872,7 +1876,7 @@ static void vlv_pre_enable_dp(struct intel_encoder *encoder)
 
        intel_enable_dp(encoder);
 
-       vlv_wait_port_ready(dev_priv, port);
+       vlv_wait_port_ready(dev_priv, dport);
 }
 
 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
@@ -1882,24 +1886,24 @@ static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc =
                to_intel_crtc(encoder->base.crtc);
-       int port = vlv_dport_to_channel(dport);
+       enum dpio_channel port = vlv_dport_to_channel(dport);
        int pipe = intel_crtc->pipe;
 
        /* Program Tx lane resets to default */
        mutex_lock(&dev_priv->dpio_lock);
-       vlv_dpio_write(dev_priv, pipe, DPIO_PCS_TX(port),
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
                         DPIO_PCS_TX_LANE2_RESET |
                         DPIO_PCS_TX_LANE1_RESET);
-       vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLK(port),
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
                         DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
                         DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
                         (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
                                 DPIO_PCS_CLK_SOFT_RESET);
 
        /* Fix up inter-pair skew failure */
-       vlv_dpio_write(dev_priv, pipe, DPIO_PCS_STAGGER1(port), 0x00750f00);
-       vlv_dpio_write(dev_priv, pipe, DPIO_TX_CTL(port), 0x00001500);
-       vlv_dpio_write(dev_priv, pipe, DPIO_TX_LANE(port), 0x40400000);
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
+       vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
+       vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
        mutex_unlock(&dev_priv->dpio_lock);
 }
 
@@ -1941,18 +1945,6 @@ intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_
                                              DP_LINK_STATUS_SIZE);
 }
 
-#if 0
-static char    *voltage_names[] = {
-       "0.4V", "0.6V", "0.8V", "1.2V"
-};
-static char    *pre_emph_names[] = {
-       "0dB", "3.5dB", "6dB", "9.5dB"
-};
-static char    *link_train_names[] = {
-       "pattern 1", "pattern 2", "idle", "off"
-};
-#endif
-
 /*
  * These are source-specific values; current Intel hardware supports
  * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB
@@ -2050,7 +2042,7 @@ static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
        unsigned long demph_reg_value, preemph_reg_value,
                uniqtranscale_reg_value;
        uint8_t train_set = intel_dp->train_set[0];
-       int port = vlv_dport_to_channel(dport);
+       enum dpio_channel port = vlv_dport_to_channel(dport);
        int pipe = intel_crtc->pipe;
 
        switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
@@ -2127,14 +2119,14 @@ static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
        }
 
        mutex_lock(&dev_priv->dpio_lock);
-       vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port), 0x00000000);
-       vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL4(port), demph_reg_value);
-       vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL2(port),
+       vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
+       vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
+       vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
                         uniqtranscale_reg_value);
-       vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL3(port), 0x0C782040);
-       vlv_dpio_write(dev_priv, pipe, DPIO_PCS_STAGGER0(port), 0x00030000);
-       vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CTL_OVER1(port), preemph_reg_value);
-       vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port), 0x80000000);
+       vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
+       vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
        mutex_unlock(&dev_priv->dpio_lock);
 
        return 0;
@@ -2646,7 +2638,6 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
 
                if (cr_tries > 5) {
                        DRM_ERROR("failed to train DP, aborting\n");
-                       intel_dp_link_down(intel_dp);
                        break;
                }
 
@@ -2899,13 +2890,11 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
 
        /* Try to read receiver status if the link appears to be up */
        if (!intel_dp_get_link_status(intel_dp, link_status)) {
-               intel_dp_link_down(intel_dp);
                return;
        }
 
        /* Now read the DPCD to see if it's actually running */
        if (!intel_dp_get_dpcd(intel_dp)) {
-               intel_dp_link_down(intel_dp);
                return;
        }
 
@@ -3020,18 +3009,34 @@ g4x_dp_detect(struct intel_dp *intel_dp)
                return status;
        }
 
-       switch (intel_dig_port->port) {
-       case PORT_B:
-               bit = PORTB_HOTPLUG_LIVE_STATUS;
-               break;
-       case PORT_C:
-               bit = PORTC_HOTPLUG_LIVE_STATUS;
-               break;
-       case PORT_D:
-               bit = PORTD_HOTPLUG_LIVE_STATUS;
-               break;
-       default:
-               return connector_status_unknown;
+       if (IS_VALLEYVIEW(dev)) {
+               switch (intel_dig_port->port) {
+               case PORT_B:
+                       bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
+                       break;
+               case PORT_C:
+                       bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
+                       break;
+               case PORT_D:
+                       bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
+                       break;
+               default:
+                       return connector_status_unknown;
+               }
+       } else {
+               switch (intel_dig_port->port) {
+               case PORT_B:
+                       bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
+                       break;
+               case PORT_C:
+                       bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
+                       break;
+               case PORT_D:
+                       bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
+                       break;
+               default:
+                       return connector_status_unknown;
+               }
        }
 
        if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
@@ -3082,9 +3087,12 @@ intel_dp_detect(struct drm_connector *connector, bool force)
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        struct intel_encoder *intel_encoder = &intel_dig_port->base;
        struct drm_device *dev = connector->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
        enum drm_connector_status status;
        struct edid *edid = NULL;
 
+       intel_runtime_pm_get(dev_priv);
+
        DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
                      connector->base.id, drm_get_connector_name(connector));
 
@@ -3096,7 +3104,7 @@ intel_dp_detect(struct drm_connector *connector, bool force)
                status = g4x_dp_detect(intel_dp);
 
        if (status != connector_status_connected)
-               return status;
+               goto out;
 
        intel_dp_probe_oui(intel_dp);
 
@@ -3112,7 +3120,11 @@ intel_dp_detect(struct drm_connector *connector, bool force)
 
        if (intel_encoder->type != INTEL_OUTPUT_EDP)
                intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
-       return connector_status_connected;
+       status = connector_status_connected;
+
+out:
+       intel_runtime_pm_put(dev_priv);
+       return status;
 }
 
 static int intel_dp_get_modes(struct drm_connector *connector)
index 79f91f26e288d4bf2ae7815cb82cfc3271745bd5..fbfaaba5cc3b2a079ae9749401d5602c6674cf07 100644 (file)
@@ -65,8 +65,8 @@
 #define wait_for_atomic_us(COND, US) _wait_for((COND), \
                                               DIV_ROUND_UP((US), 1000), 0)
 
-#define KHz(x) (1000*x)
-#define MHz(x) KHz(1000*x)
+#define KHz(x) (1000 * (x))
+#define MHz(x) KHz(1000 * (x))
 
 /*
  * Display related stuff
@@ -155,7 +155,19 @@ struct intel_encoder {
 
 struct intel_panel {
        struct drm_display_mode *fixed_mode;
+       struct drm_display_mode *downclock_mode;
        int fitting_mode;
+
+       /* backlight */
+       struct {
+               bool present;
+               u32 level;
+               u32 max;
+               bool enabled;
+               bool combination_mode;  /* gen 2/4 only */
+               bool active_low_pwm;
+               struct backlight_device *device;
+       } backlight;
 };
 
 struct intel_connector {
@@ -443,7 +455,7 @@ struct intel_hdmi {
        bool rgb_quant_range_selectable;
        void (*write_infoframe)(struct drm_encoder *encoder,
                                enum hdmi_infoframe_type type,
-                               const uint8_t *frame, ssize_t len);
+                               const void *frame, ssize_t len);
        void (*set_infoframes)(struct drm_encoder *encoder,
                               struct drm_display_mode *adjusted_mode);
 };
@@ -490,9 +502,9 @@ vlv_dport_to_channel(struct intel_digital_port *dport)
 {
        switch (dport->port) {
        case PORT_B:
-               return 0;
+               return DPIO_CH0;
        case PORT_C:
-               return 1;
+               return DPIO_CH1;
        default:
                BUG();
        }
@@ -601,7 +613,8 @@ void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
 void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc);
 void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc);
 void intel_ddi_setup_hw_pll_state(struct drm_device *dev);
-bool intel_ddi_pll_mode_set(struct drm_crtc *crtc);
+bool intel_ddi_pll_select(struct intel_crtc *crtc);
+void intel_ddi_pll_enable(struct intel_crtc *crtc);
 void intel_ddi_put_crtc_pll(struct drm_crtc *crtc);
 void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
 void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder);
@@ -612,6 +625,8 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
 
 
 /* intel_display.c */
+const char *intel_output_name(int output);
+bool intel_has_pending_fb_unpin(struct drm_device *dev);
 int intel_pch_rawclk(struct drm_device *dev);
 void intel_mark_busy(struct drm_device *dev);
 void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
@@ -638,7 +653,8 @@ enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
 void intel_wait_for_vblank(struct drm_device *dev, int pipe);
 void intel_wait_for_pipe_off(struct drm_device *dev, int pipe);
 int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
-void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port);
+void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
+                        struct intel_digital_port *dport);
 bool intel_get_load_detect_pipe(struct drm_connector *connector,
                                struct drm_display_mode *mode,
                                struct intel_load_detect_pipe *old);
@@ -690,11 +706,10 @@ void
 ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config,
                                int dotclock);
 bool intel_crtc_active(struct drm_crtc *crtc);
-void i915_disable_vga_mem(struct drm_device *dev);
 void hsw_enable_ips(struct intel_crtc *crtc);
 void hsw_disable_ips(struct intel_crtc *crtc);
 void intel_display_set_init_power(struct drm_device *dev, bool enable);
-
+int valleyview_get_vco(struct drm_i915_private *dev_priv);
 
 /* intel_dp.c */
 void intel_dp_init(struct drm_device *dev, int output_reg, enum port port);
@@ -808,9 +823,13 @@ void intel_panel_set_backlight(struct intel_connector *connector, u32 level,
 int intel_panel_setup_backlight(struct drm_connector *connector);
 void intel_panel_enable_backlight(struct intel_connector *connector);
 void intel_panel_disable_backlight(struct intel_connector *connector);
-void intel_panel_destroy_backlight(struct drm_device *dev);
+void intel_panel_destroy_backlight(struct drm_connector *connector);
+void intel_panel_init_backlight_funcs(struct drm_device *dev);
 enum drm_connector_status intel_panel_detect(struct drm_device *dev);
-
+extern struct drm_display_mode *intel_find_panel_downclock(
+                               struct drm_device *dev,
+                               struct drm_display_mode *fixed_mode,
+                               struct drm_connector *connector);
 
 /* intel_pm.c */
 void intel_init_clock_gating(struct drm_device *dev);
@@ -830,6 +849,8 @@ int intel_power_domains_init(struct drm_device *dev);
 void intel_power_domains_remove(struct drm_device *dev);
 bool intel_display_power_enabled(struct drm_device *dev,
                                 enum intel_display_power_domain domain);
+bool intel_display_power_enabled_sw(struct drm_device *dev,
+                                   enum intel_display_power_domain domain);
 void intel_display_power_get(struct drm_device *dev,
                             enum intel_display_power_domain domain);
 void intel_display_power_put(struct drm_device *dev,
@@ -844,6 +865,10 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv);
 void gen6_rps_boost(struct drm_i915_private *dev_priv);
 void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
 void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
+void intel_runtime_pm_get(struct drm_i915_private *dev_priv);
+void intel_runtime_pm_put(struct drm_i915_private *dev_priv);
+void intel_init_runtime_pm(struct drm_i915_private *dev_priv);
+void intel_fini_runtime_pm(struct drm_i915_private *dev_priv);
 void ilk_wm_get_hw_state(struct drm_device *dev);
 
 
index d257b093ca68757874925999fae766a727755212..fabbf0d895cf2a5e133d804d5e8e05f0f26f79ad 100644 (file)
 static const struct intel_dsi_device intel_dsi_devices[] = {
 };
 
-
-static void vlv_cck_modify(struct drm_i915_private *dev_priv, u32 reg, u32 val,
-                          u32 mask)
-{
-       u32 tmp = vlv_cck_read(dev_priv, reg);
-       tmp &= ~mask;
-       tmp |= val;
-       vlv_cck_write(dev_priv, reg, tmp);
-}
-
-static void band_gap_wa(struct drm_i915_private *dev_priv)
+static void band_gap_reset(struct drm_i915_private *dev_priv)
 {
        mutex_lock(&dev_priv->dpio_lock);
 
-       /* Enable bandgap fix in GOP driver */
-       vlv_cck_modify(dev_priv, 0x6D, 0x00010000, 0x00030000);
-       msleep(20);
-       vlv_cck_modify(dev_priv, 0x6E, 0x00010000, 0x00030000);
-       msleep(20);
-       vlv_cck_modify(dev_priv, 0x6F, 0x00010000, 0x00030000);
-       msleep(20);
-       vlv_cck_modify(dev_priv, 0x00, 0x00008000, 0x00008000);
-       msleep(20);
-       vlv_cck_modify(dev_priv, 0x00, 0x00000000, 0x00008000);
-       msleep(20);
-
-       /* Turn Display Trunk on */
-       vlv_cck_modify(dev_priv, 0x6B, 0x00020000, 0x00030000);
-       msleep(20);
-
-       vlv_cck_modify(dev_priv, 0x6C, 0x00020000, 0x00030000);
-       msleep(20);
-
-       vlv_cck_modify(dev_priv, 0x6D, 0x00020000, 0x00030000);
-       msleep(20);
-       vlv_cck_modify(dev_priv, 0x6E, 0x00020000, 0x00030000);
-       msleep(20);
-       vlv_cck_modify(dev_priv, 0x6F, 0x00020000, 0x00030000);
+       vlv_flisdsi_write(dev_priv, 0x08, 0x0001);
+       vlv_flisdsi_write(dev_priv, 0x0F, 0x0005);
+       vlv_flisdsi_write(dev_priv, 0x0F, 0x0025);
+       udelay(150);
+       vlv_flisdsi_write(dev_priv, 0x0F, 0x0000);
+       vlv_flisdsi_write(dev_priv, 0x08, 0x0000);
 
        mutex_unlock(&dev_priv->dpio_lock);
-
-       /* Need huge delay, otherwise clock is not stable */
-       msleep(100);
 }
 
 static struct intel_dsi *intel_attached_dsi(struct drm_connector *connector)
@@ -132,14 +101,47 @@ static void intel_dsi_pre_pll_enable(struct intel_encoder *encoder)
        vlv_enable_dsi_pll(encoder);
 }
 
+static void intel_dsi_device_ready(struct intel_encoder *encoder)
+{
+       struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+       int pipe = intel_crtc->pipe;
+       u32 val;
+
+       DRM_DEBUG_KMS("\n");
+
+       val = I915_READ(MIPI_PORT_CTRL(pipe));
+       I915_WRITE(MIPI_PORT_CTRL(pipe), val | LP_OUTPUT_HOLD);
+       usleep_range(1000, 1500);
+       I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY | ULPS_STATE_EXIT);
+       usleep_range(2000, 2500);
+       I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY);
+       usleep_range(2000, 2500);
+       I915_WRITE(MIPI_DEVICE_READY(pipe), 0x00);
+       usleep_range(2000, 2500);
+       I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY);
+       usleep_range(2000, 2500);
+}
 static void intel_dsi_pre_enable(struct intel_encoder *encoder)
 {
+       struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+
        DRM_DEBUG_KMS("\n");
+
+       if (intel_dsi->dev.dev_ops->panel_reset)
+               intel_dsi->dev.dev_ops->panel_reset(&intel_dsi->dev);
+
+       /* put device in ready state */
+       intel_dsi_device_ready(encoder);
+
+       if (intel_dsi->dev.dev_ops->send_otp_cmds)
+               intel_dsi->dev.dev_ops->send_otp_cmds(&intel_dsi->dev);
 }
 
 static void intel_dsi_enable(struct intel_encoder *encoder)
 {
-       struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+       struct drm_device *dev = encoder->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
        struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
        int pipe = intel_crtc->pipe;
@@ -147,41 +149,28 @@ static void intel_dsi_enable(struct intel_encoder *encoder)
 
        DRM_DEBUG_KMS("\n");
 
-       temp = I915_READ(MIPI_DEVICE_READY(pipe));
-       if ((temp & DEVICE_READY) == 0) {
-               temp &= ~ULPS_STATE_MASK;
-               I915_WRITE(MIPI_DEVICE_READY(pipe), temp | DEVICE_READY);
-       } else if (temp & ULPS_STATE_MASK) {
-               temp &= ~ULPS_STATE_MASK;
-               I915_WRITE(MIPI_DEVICE_READY(pipe), temp | ULPS_STATE_EXIT);
-               /*
-                * We need to ensure that there is a minimum of 1 ms time
-                * available before clearing the UPLS exit state.
-                */
-               msleep(2);
-               I915_WRITE(MIPI_DEVICE_READY(pipe), temp);
-       }
-
        if (is_cmd_mode(intel_dsi))
                I915_WRITE(MIPI_MAX_RETURN_PKT_SIZE(pipe), 8 * 4);
-
-       if (is_vid_mode(intel_dsi)) {
+       else {
                msleep(20); /* XXX */
                dpi_send_cmd(intel_dsi, TURN_ON);
                msleep(100);
 
                /* assert ip_tg_enable signal */
-               temp = I915_READ(MIPI_PORT_CTRL(pipe));
+               temp = I915_READ(MIPI_PORT_CTRL(pipe)) & ~LANE_CONFIGURATION_MASK;
+               temp = temp | intel_dsi->port_bits;
                I915_WRITE(MIPI_PORT_CTRL(pipe), temp | DPI_ENABLE);
                POSTING_READ(MIPI_PORT_CTRL(pipe));
        }
 
-       intel_dsi->dev.dev_ops->enable(&intel_dsi->dev);
+       if (intel_dsi->dev.dev_ops->enable)
+               intel_dsi->dev.dev_ops->enable(&intel_dsi->dev);
 }
 
 static void intel_dsi_disable(struct intel_encoder *encoder)
 {
-       struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+       struct drm_device *dev = encoder->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
        struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
        int pipe = intel_crtc->pipe;
@@ -189,8 +178,6 @@ static void intel_dsi_disable(struct intel_encoder *encoder)
 
        DRM_DEBUG_KMS("\n");
 
-       intel_dsi->dev.dev_ops->disable(&intel_dsi->dev);
-
        if (is_vid_mode(intel_dsi)) {
                dpi_send_cmd(intel_dsi, SHUTDOWN);
                msleep(10);
@@ -203,20 +190,54 @@ static void intel_dsi_disable(struct intel_encoder *encoder)
                msleep(2);
        }
 
-       temp = I915_READ(MIPI_DEVICE_READY(pipe));
-       if (temp & DEVICE_READY) {
-               temp &= ~DEVICE_READY;
-               temp &= ~ULPS_STATE_MASK;
-               I915_WRITE(MIPI_DEVICE_READY(pipe), temp);
-       }
+       /* if disable packets are sent before sending shutdown packet then in
+        * some next enable sequence send turn on packet error is observed */
+       if (intel_dsi->dev.dev_ops->disable)
+               intel_dsi->dev.dev_ops->disable(&intel_dsi->dev);
 }
 
-static void intel_dsi_post_disable(struct intel_encoder *encoder)
+static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
 {
+       struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+       int pipe = intel_crtc->pipe;
+       u32 val;
+
        DRM_DEBUG_KMS("\n");
 
+       I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_ENTER);
+       usleep_range(2000, 2500);
+
+       I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_EXIT);
+       usleep_range(2000, 2500);
+
+       I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_ENTER);
+       usleep_range(2000, 2500);
+
+       val = I915_READ(MIPI_PORT_CTRL(pipe));
+       I915_WRITE(MIPI_PORT_CTRL(pipe), val & ~LP_OUTPUT_HOLD);
+       usleep_range(1000, 1500);
+
+       if (wait_for(((I915_READ(MIPI_PORT_CTRL(pipe)) & AFE_LATCHOUT)
+                                       == 0x00000), 30))
+               DRM_ERROR("DSI LP not going Low\n");
+
+       I915_WRITE(MIPI_DEVICE_READY(pipe), 0x00);
+       usleep_range(2000, 2500);
+
        vlv_disable_dsi_pll(encoder);
 }
+static void intel_dsi_post_disable(struct intel_encoder *encoder)
+{
+       struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+
+       DRM_DEBUG_KMS("\n");
+
+       intel_dsi_clear_device_ready(encoder);
+
+       if (intel_dsi->dev.dev_ops->disable_panel_power)
+               intel_dsi->dev.dev_ops->disable_panel_power(&intel_dsi->dev);
+}
 
 static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
                                   enum pipe *pipe)
@@ -251,8 +272,9 @@ static void intel_dsi_get_config(struct intel_encoder *encoder,
        /* XXX: read flags, set to adjusted_mode */
 }
 
-static int intel_dsi_mode_valid(struct drm_connector *connector,
-                               struct drm_display_mode *mode)
+static enum drm_mode_status
+intel_dsi_mode_valid(struct drm_connector *connector,
+                    struct drm_display_mode *mode)
 {
        struct intel_connector *intel_connector = to_intel_connector(connector);
        struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
@@ -352,11 +374,8 @@ static void intel_dsi_mode_set(struct intel_encoder *intel_encoder)
 
        DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe));
 
-       /* Update the DSI PLL */
-       vlv_enable_dsi_pll(intel_encoder);
-
        /* XXX: Location of the call */
-       band_gap_wa(dev_priv);
+       band_gap_reset(dev_priv);
 
        /* escape clock divider, 20MHz, shared for A and C. device ready must be
         * off when doing this! txclkesc? */
@@ -373,11 +392,7 @@ static void intel_dsi_mode_set(struct intel_encoder *intel_encoder)
        I915_WRITE(MIPI_INTR_STAT(pipe), 0xffffffff);
        I915_WRITE(MIPI_INTR_EN(pipe), 0xffffffff);
 
-       I915_WRITE(MIPI_DPHY_PARAM(pipe),
-                  0x3c << EXIT_ZERO_COUNT_SHIFT |
-                  0x1f << TRAIL_COUNT_SHIFT |
-                  0xc5 << CLK_ZERO_COUNT_SHIFT |
-                  0x1f << PREPARE_COUNT_SHIFT);
+       I915_WRITE(MIPI_DPHY_PARAM(pipe), intel_dsi->dphy_reg);
 
        I915_WRITE(MIPI_DPI_RESOLUTION(pipe),
                   adjusted_mode->vdisplay << VERTICAL_ADDRESS_SHIFT |
@@ -425,9 +440,9 @@ static void intel_dsi_mode_set(struct intel_encoder *intel_encoder)
                                       adjusted_mode->htotal,
                                       bpp, intel_dsi->lane_count) + 1);
        }
-       I915_WRITE(MIPI_LP_RX_TIMEOUT(pipe), 8309); /* max */
-       I915_WRITE(MIPI_TURN_AROUND_TIMEOUT(pipe), 0x14); /* max */
-       I915_WRITE(MIPI_DEVICE_RESET_TIMER(pipe), 0xffff); /* max */
+       I915_WRITE(MIPI_LP_RX_TIMEOUT(pipe), intel_dsi->lp_rx_timeout);
+       I915_WRITE(MIPI_TURN_AROUND_TIMEOUT(pipe), intel_dsi->turn_arnd_val);
+       I915_WRITE(MIPI_DEVICE_RESET_TIMER(pipe), intel_dsi->rst_timer_val);
 
        /* dphy stuff */
 
@@ -442,29 +457,31 @@ static void intel_dsi_mode_set(struct intel_encoder *intel_encoder)
         *
         * XXX: write MIPI_STOP_STATE_STALL?
         */
-       I915_WRITE(MIPI_HIGH_LOW_SWITCH_COUNT(pipe), 0x46);
+       I915_WRITE(MIPI_HIGH_LOW_SWITCH_COUNT(pipe),
+                                               intel_dsi->hs_to_lp_count);
 
        /* XXX: low power clock equivalence in terms of byte clock. the number
         * of byte clocks occupied in one low power clock. based on txbyteclkhs
         * and txclkesc. txclkesc time / txbyteclk time * (105 +
         * MIPI_STOP_STATE_STALL) / 105.???
         */
-       I915_WRITE(MIPI_LP_BYTECLK(pipe), 4);
+       I915_WRITE(MIPI_LP_BYTECLK(pipe), intel_dsi->lp_byte_clk);
 
        /* the bw essential for transmitting 16 long packets containing 252
         * bytes meant for dcs write memory command is programmed in this
         * register in terms of byte clocks. based on dsi transfer rate and the
         * number of lanes configured the time taken to transmit 16 long packets
         * in a dsi stream varies. */
-       I915_WRITE(MIPI_DBI_BW_CTRL(pipe), 0x820);
+       I915_WRITE(MIPI_DBI_BW_CTRL(pipe), intel_dsi->bw_timer);
 
        I915_WRITE(MIPI_CLK_LANE_SWITCH_TIME_CNT(pipe),
-                  0xa << LP_HS_SSW_CNT_SHIFT |
-                  0x14 << HS_LP_PWR_SW_CNT_SHIFT);
+                  intel_dsi->clk_lp_to_hs_count << LP_HS_SSW_CNT_SHIFT |
+                  intel_dsi->clk_hs_to_lp_count << HS_LP_PWR_SW_CNT_SHIFT);
 
        if (is_vid_mode(intel_dsi))
                I915_WRITE(MIPI_VIDEO_MODE_FORMAT(pipe),
-                          intel_dsi->video_mode_format);
+                               intel_dsi->video_frmt_cfg_bits |
+                               intel_dsi->video_mode_format);
 }
 
 static enum drm_connector_status
index c7765f33d5245fa2ff1fd03fe2bb12339a716473..b4a27cec882f76d7cab17400d8adf438532b4c9d 100644 (file)
@@ -39,6 +39,13 @@ struct intel_dsi_device {
 struct intel_dsi_dev_ops {
        bool (*init)(struct intel_dsi_device *dsi);
 
+       void (*panel_reset)(struct intel_dsi_device *dsi);
+
+       void (*disable_panel_power)(struct intel_dsi_device *dsi);
+
+       /* one time programmable commands if needed */
+       void (*send_otp_cmds)(struct intel_dsi_device *dsi);
+
        /* This callback must be able to assume DSI commands can be sent */
        void (*enable)(struct intel_dsi_device *dsi);
 
@@ -89,6 +96,20 @@ struct intel_dsi {
 
        /* eot for MIPI_EOT_DISABLE register */
        u32 eot_disable;
+
+       u32 port_bits;
+       u32 bw_timer;
+       u32 dphy_reg;
+       u32 video_frmt_cfg_bits;
+       u16 lp_byte_clk;
+
+       /* timeouts in byte clocks */
+       u16 lp_rx_timeout;
+       u16 turn_arnd_val;
+       u16 rst_timer_val;
+       u16 hs_to_lp_count;
+       u16 clk_lp_to_hs_count;
+       u16 clk_hs_to_lp_count;
 };
 
 static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder)
index 44279b2ade888a7821d75da44ab523ab8f1f318a..ba79ec19da3b8ce8d4379df2545c1ea54c3a8f3e 100644 (file)
@@ -50,6 +50,8 @@ static const u32 lfsr_converts[] = {
        71, 35                                                  /* 91 - 92 */
 };
 
+#ifdef DSI_CLK_FROM_RR
+
 static u32 dsi_rr_formula(const struct drm_display_mode *mode,
                          int pixel_format, int video_mode_format,
                          int lane_count, bool eotp)
@@ -121,7 +123,7 @@ static u32 dsi_rr_formula(const struct drm_display_mode *mode,
 
        /* the dsi clock is divided by 2 in the hardware to get dsi ddr clock */
        dsi_bit_clock_hz = bytes_per_x_frames_x_lanes * 8;
-       dsi_clk = dsi_bit_clock_hz / (1000 * 1000);
+       dsi_clk = dsi_bit_clock_hz / 1000;
 
        if (eotp && video_mode_format == VIDEO_MODE_BURST)
                dsi_clk *= 2;
@@ -129,64 +131,37 @@ static u32 dsi_rr_formula(const struct drm_display_mode *mode,
        return dsi_clk;
 }
 
-#ifdef MNP_FROM_TABLE
-
-struct dsi_clock_table {
-       u32 freq;
-       u8 m;
-       u8 p;
-};
-
-static const struct dsi_clock_table dsi_clk_tbl[] = {
-       {300, 72, 6}, {313, 75, 6}, {323, 78, 6}, {333, 80, 6},
-       {343, 82, 6}, {353, 85, 6}, {363, 87, 6}, {373, 90, 6},
-       {383, 92, 6}, {390, 78, 5}, {393, 79, 5}, {400, 80, 5},
-       {401, 80, 5}, {402, 80, 5}, {403, 81, 5}, {404, 81, 5},
-       {405, 81, 5}, {406, 81, 5}, {407, 81, 5}, {408, 82, 5},
-       {409, 82, 5}, {410, 82, 5}, {411, 82, 5}, {412, 82, 5},
-       {413, 83, 5}, {414, 83, 5}, {415, 83, 5}, {416, 83, 5},
-       {417, 83, 5}, {418, 84, 5}, {419, 84, 5}, {420, 84, 5},
-       {430, 86, 5}, {440, 88, 5}, {450, 90, 5}, {460, 92, 5},
-       {470, 75, 4}, {480, 77, 4}, {490, 78, 4}, {500, 80, 4},
-       {510, 82, 4}, {520, 83, 4}, {530, 85, 4}, {540, 86, 4},
-       {550, 88, 4}, {560, 90, 4}, {570, 91, 4}, {580, 70, 3},
-       {590, 71, 3}, {600, 72, 3}, {610, 73, 3}, {620, 74, 3},
-       {630, 76, 3}, {640, 77, 3}, {650, 78, 3}, {660, 79, 3},
-       {670, 80, 3}, {680, 82, 3}, {690, 83, 3}, {700, 84, 3},
-       {710, 85, 3}, {720, 86, 3}, {730, 88, 3}, {740, 89, 3},
-       {750, 90, 3}, {760, 91, 3}, {770, 92, 3}, {780, 62, 2},
-       {790, 63, 2}, {800, 64, 2}, {880, 70, 2}, {900, 72, 2},
-       {1000, 80, 2},          /* dsi clock frequency in Mhz*/
-};
+#else
 
-static int dsi_calc_mnp(u32 dsi_clk, struct dsi_mnp *dsi_mnp)
+/* Get DSI clock from pixel clock */
+static u32 dsi_clk_from_pclk(const struct drm_display_mode *mode,
+                         int pixel_format, int lane_count)
 {
-       unsigned int i;
-       u8 m;
-       u8 n;
-       u8 p;
-       u32 m_seed;
-
-       if (dsi_clk < 300 || dsi_clk > 1000)
-               return -ECHRNG;
+       u32 dsi_clk_khz;
+       u32 bpp;
 
-       for (i = 0; i <= ARRAY_SIZE(dsi_clk_tbl); i++) {
-               if (dsi_clk_tbl[i].freq > dsi_clk)
-                       break;
+       switch (pixel_format) {
+       default:
+       case VID_MODE_FORMAT_RGB888:
+       case VID_MODE_FORMAT_RGB666_LOOSE:
+               bpp = 24;
+               break;
+       case VID_MODE_FORMAT_RGB666:
+               bpp = 18;
+               break;
+       case VID_MODE_FORMAT_RGB565:
+               bpp = 16;
+               break;
        }
 
-       m = dsi_clk_tbl[i].m;
-       p = dsi_clk_tbl[i].p;
-       m_seed = lfsr_converts[m - 62];
-       n = 1;
-       dsi_mnp->dsi_pll_ctrl = 1 << (DSI_PLL_P1_POST_DIV_SHIFT + p - 2);
-       dsi_mnp->dsi_pll_div = (n - 1) << DSI_PLL_N1_DIV_SHIFT |
-               m_seed << DSI_PLL_M1_DIV_SHIFT;
+       /* DSI data rate = pixel clock * bits per pixel / lane count
+          pixel clock is converted from KHz to Hz */
+       dsi_clk_khz = DIV_ROUND_CLOSEST(mode->clock * bpp, lane_count);
 
-       return 0;
+       return dsi_clk_khz;
 }
 
-#else
+#endif
 
 static int dsi_calc_mnp(u32 dsi_clk, struct dsi_mnp *dsi_mnp)
 {
@@ -194,36 +169,47 @@ static int dsi_calc_mnp(u32 dsi_clk, struct dsi_mnp *dsi_mnp)
        u32 ref_clk;
        u32 error;
        u32 tmp_error;
-       u32 target_dsi_clk;
-       u32 calc_dsi_clk;
+       int target_dsi_clk;
+       int calc_dsi_clk;
        u32 calc_m;
        u32 calc_p;
        u32 m_seed;
 
-       if (dsi_clk < 300 || dsi_clk > 1150) {
+       /* dsi_clk is expected in KHZ */
+       if (dsi_clk < 300000 || dsi_clk > 1150000) {
                DRM_ERROR("DSI CLK Out of Range\n");
                return -ECHRNG;
        }
 
        ref_clk = 25000;
-       target_dsi_clk = dsi_clk * 1000;
+       target_dsi_clk = dsi_clk;
        error = 0xFFFFFFFF;
+       tmp_error = 0xFFFFFFFF;
        calc_m = 0;
        calc_p = 0;
 
        for (m = 62; m <= 92; m++) {
                for (p = 2; p <= 6; p++) {
-
+                       /* Find the optimal m and p divisors
+                       with minimal error +/- the required clock */
                        calc_dsi_clk = (m * ref_clk) / p;
-                       if (calc_dsi_clk >= target_dsi_clk) {
-                               tmp_error = calc_dsi_clk - target_dsi_clk;
-                               if (tmp_error < error) {
-                                       error = tmp_error;
-                                       calc_m = m;
-                                       calc_p = p;
-                               }
+                       if (calc_dsi_clk == target_dsi_clk) {
+                               calc_m = m;
+                               calc_p = p;
+                               error = 0;
+                               break;
+                       } else
+                               tmp_error = abs(target_dsi_clk - calc_dsi_clk);
+
+                       if (tmp_error < error) {
+                               error = tmp_error;
+                               calc_m = m;
+                               calc_p = p;
                        }
                }
+
+               if (error == 0)
+                       break;
        }
 
        m_seed = lfsr_converts[calc_m - 62];
@@ -235,8 +221,6 @@ static int dsi_calc_mnp(u32 dsi_clk, struct dsi_mnp *dsi_mnp)
        return 0;
 }
 
-#endif
-
 /*
  * XXX: The muxing and gating is hard coded for now. Need to add support for
  * sharing PLLs with two DSI outputs.
@@ -251,9 +235,8 @@ static void vlv_configure_dsi_pll(struct intel_encoder *encoder)
        struct dsi_mnp dsi_mnp;
        u32 dsi_clk;
 
-       dsi_clk = dsi_rr_formula(mode, intel_dsi->pixel_format,
-                                intel_dsi->video_mode_format,
-                                intel_dsi->lane_count, !intel_dsi->eot_disable);
+       dsi_clk = dsi_clk_from_pclk(mode, intel_dsi->pixel_format,
+                                               intel_dsi->lane_count);
 
        ret = dsi_calc_mnp(dsi_clk, &dsi_mnp);
        if (ret) {
index 3c77365468562ab026910a1e111fb932b288f242..eeff998e52efdea93534ffb680582503f9f2a56c 100644 (file)
@@ -234,8 +234,9 @@ static void intel_dvo_dpms(struct drm_connector *connector, int mode)
        intel_modeset_check_state(connector->dev);
 }
 
-static int intel_dvo_mode_valid(struct drm_connector *connector,
-                               struct drm_display_mode *mode)
+static enum drm_mode_status
+intel_dvo_mode_valid(struct drm_connector *connector,
+                    struct drm_display_mode *mode)
 {
        struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
 
index 895fcb4fbd9446bfbfafc90f95948d9a4bea5c55..39eac9937a4aa1a89c176ef77d727cfb615b8cd1 100644 (file)
@@ -57,18 +57,14 @@ static struct fb_ops intelfb_ops = {
        .fb_debug_leave = drm_fb_helper_debug_leave,
 };
 
-static int intelfb_create(struct drm_fb_helper *helper,
-                         struct drm_fb_helper_surface_size *sizes)
+static int intelfb_alloc(struct drm_fb_helper *helper,
+                        struct drm_fb_helper_surface_size *sizes)
 {
        struct intel_fbdev *ifbdev =
                container_of(helper, struct intel_fbdev, helper);
        struct drm_device *dev = helper->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct fb_info *info;
-       struct drm_framebuffer *fb;
        struct drm_mode_fb_cmd2 mode_cmd = {};
        struct drm_i915_gem_object *obj;
-       struct device *device = &dev->pdev->dev;
        int size, ret;
 
        /* we don't do packed 24bpp */
@@ -94,8 +90,6 @@ static int intelfb_create(struct drm_fb_helper *helper,
                goto out;
        }
 
-       mutex_lock(&dev->struct_mutex);
-
        /* Flush everything out, we'll be doing GTT only from now on */
        ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
        if (ret) {
@@ -103,7 +97,50 @@ static int intelfb_create(struct drm_fb_helper *helper,
                goto out_unref;
        }
 
-       info = framebuffer_alloc(0, device);
+       ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, obj);
+       if (ret)
+               goto out_unpin;
+
+       return 0;
+
+out_unpin:
+       i915_gem_object_unpin(obj);
+out_unref:
+       drm_gem_object_unreference(&obj->base);
+out:
+       return ret;
+}
+
+static int intelfb_create(struct drm_fb_helper *helper,
+                         struct drm_fb_helper_surface_size *sizes)
+{
+       struct intel_fbdev *ifbdev =
+               container_of(helper, struct intel_fbdev, helper);
+       struct intel_framebuffer *intel_fb = &ifbdev->ifb;
+       struct drm_device *dev = helper->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct fb_info *info;
+       struct drm_framebuffer *fb;
+       struct drm_i915_gem_object *obj;
+       int size, ret;
+
+       mutex_lock(&dev->struct_mutex);
+
+       if (!intel_fb->obj) {
+               DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n");
+               ret = intelfb_alloc(helper, sizes);
+               if (ret)
+                       goto out_unlock;
+       } else {
+               DRM_DEBUG_KMS("re-using BIOS fb\n");
+               sizes->fb_width = intel_fb->base.width;
+               sizes->fb_height = intel_fb->base.height;
+       }
+
+       obj = intel_fb->obj;
+       size = obj->base.size;
+
+       info = framebuffer_alloc(0, &dev->pdev->dev);
        if (!info) {
                ret = -ENOMEM;
                goto out_unpin;
@@ -111,10 +148,6 @@ static int intelfb_create(struct drm_fb_helper *helper,
 
        info->par = helper;
 
-       ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, obj);
-       if (ret)
-               goto out_unpin;
-
        fb = &ifbdev->ifb.base;
 
        ifbdev->helper.fb = fb;
@@ -170,17 +203,15 @@ static int intelfb_create(struct drm_fb_helper *helper,
                      fb->width, fb->height,
                      i915_gem_obj_ggtt_offset(obj), obj);
 
-
        mutex_unlock(&dev->struct_mutex);
        vga_switcheroo_client_fb_set(dev->pdev, info);
        return 0;
 
 out_unpin:
        i915_gem_object_unpin(obj);
-out_unref:
        drm_gem_object_unreference(&obj->base);
+out_unlock:
        mutex_unlock(&dev->struct_mutex);
-out:
        return ret;
 }
 
@@ -297,8 +328,6 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state)
        fb_set_suspend(info, state);
 }
 
-MODULE_LICENSE("GPL and additional rights");
-
 void intel_fbdev_output_poll_changed(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
index 03f9ca70530c03f441ae037b16cf0c5e80b908ab..6db0d9d17f47e7b5ce5c43a7aa7795de71ea6346 100644 (file)
@@ -130,9 +130,9 @@ static u32 hsw_infoframe_data_reg(enum hdmi_infoframe_type type,
 
 static void g4x_write_infoframe(struct drm_encoder *encoder,
                                enum hdmi_infoframe_type type,
-                               const uint8_t *frame, ssize_t len)
+                               const void *frame, ssize_t len)
 {
-       uint32_t *data = (uint32_t *)frame;
+       const uint32_t *data = frame;
        struct drm_device *dev = encoder->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 val = I915_READ(VIDEO_DIP_CTL);
@@ -167,9 +167,9 @@ static void g4x_write_infoframe(struct drm_encoder *encoder,
 
 static void ibx_write_infoframe(struct drm_encoder *encoder,
                                enum hdmi_infoframe_type type,
-                               const uint8_t *frame, ssize_t len)
+                               const void *frame, ssize_t len)
 {
-       uint32_t *data = (uint32_t *)frame;
+       const uint32_t *data = frame;
        struct drm_device *dev = encoder->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
@@ -205,9 +205,9 @@ static void ibx_write_infoframe(struct drm_encoder *encoder,
 
 static void cpt_write_infoframe(struct drm_encoder *encoder,
                                enum hdmi_infoframe_type type,
-                               const uint8_t *frame, ssize_t len)
+                               const void *frame, ssize_t len)
 {
-       uint32_t *data = (uint32_t *)frame;
+       const uint32_t *data = frame;
        struct drm_device *dev = encoder->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
@@ -246,9 +246,9 @@ static void cpt_write_infoframe(struct drm_encoder *encoder,
 
 static void vlv_write_infoframe(struct drm_encoder *encoder,
                                enum hdmi_infoframe_type type,
-                               const uint8_t *frame, ssize_t len)
+                               const void *frame, ssize_t len)
 {
-       uint32_t *data = (uint32_t *)frame;
+       const uint32_t *data = frame;
        struct drm_device *dev = encoder->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
@@ -284,9 +284,9 @@ static void vlv_write_infoframe(struct drm_encoder *encoder,
 
 static void hsw_write_infoframe(struct drm_encoder *encoder,
                                enum hdmi_infoframe_type type,
-                               const uint8_t *frame, ssize_t len)
+                               const void *frame, ssize_t len)
 {
-       uint32_t *data = (uint32_t *)frame;
+       const uint32_t *data = frame;
        struct drm_device *dev = encoder->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
@@ -853,8 +853,9 @@ static int hdmi_portclock_limit(struct intel_hdmi *hdmi)
                return 225000;
 }
 
-static int intel_hdmi_mode_valid(struct drm_connector *connector,
-                                struct drm_display_mode *mode)
+static enum drm_mode_status
+intel_hdmi_mode_valid(struct drm_connector *connector,
+                     struct drm_display_mode *mode)
 {
        if (mode->clock > hdmi_portclock_limit(intel_attached_hdmi(connector)))
                return MODE_CLOCK_HIGH;
@@ -1081,7 +1082,7 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder)
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc =
                to_intel_crtc(encoder->base.crtc);
-       int port = vlv_dport_to_channel(dport);
+       enum dpio_channel port = vlv_dport_to_channel(dport);
        int pipe = intel_crtc->pipe;
        u32 val;
 
@@ -1090,41 +1091,33 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder)
 
        /* Enable clock channels for this port */
        mutex_lock(&dev_priv->dpio_lock);
-       val = vlv_dpio_read(dev_priv, pipe, DPIO_DATA_LANE_A(port));
+       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
        val = 0;
        if (pipe)
                val |= (1<<21);
        else
                val &= ~(1<<21);
        val |= 0x001000c4;
-       vlv_dpio_write(dev_priv, pipe, DPIO_DATA_CHANNEL(port), val);
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
 
        /* HDMI 1.0V-2dB */
-       vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port), 0);
-       vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL4(port),
-                        0x2b245f5f);
-       vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL2(port),
-                        0x5578b83a);
-       vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL3(port),
-                        0x0c782040);
-       vlv_dpio_write(dev_priv, pipe, DPIO_TX3_SWING_CTL4(port),
-                        0x2b247878);
-       vlv_dpio_write(dev_priv, pipe, DPIO_PCS_STAGGER0(port), 0x00030000);
-       vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CTL_OVER1(port),
-                        0x00002000);
-       vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port),
-                        DPIO_TX_OCALINIT_EN);
+       vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0);
+       vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), 0x2b245f5f);
+       vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port), 0x5578b83a);
+       vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0c782040);
+       vlv_dpio_write(dev_priv, pipe, VLV_TX3_DW4(port), 0x2b247878);
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), 0x00002000);
+       vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN);
 
        /* Program lane clock */
-       vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLOCKBUF0(port),
-                        0x00760018);
-       vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLOCKBUF8(port),
-                        0x00400888);
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
        mutex_unlock(&dev_priv->dpio_lock);
 
        intel_enable_hdmi(encoder);
 
-       vlv_wait_port_ready(dev_priv, port);
+       vlv_wait_port_ready(dev_priv, dport);
 }
 
 static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
@@ -1134,7 +1127,7 @@ static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc =
                to_intel_crtc(encoder->base.crtc);
-       int port = vlv_dport_to_channel(dport);
+       enum dpio_channel port = vlv_dport_to_channel(dport);
        int pipe = intel_crtc->pipe;
 
        if (!IS_VALLEYVIEW(dev))
@@ -1142,24 +1135,22 @@ static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
 
        /* Program Tx lane resets to default */
        mutex_lock(&dev_priv->dpio_lock);
-       vlv_dpio_write(dev_priv, pipe, DPIO_PCS_TX(port),
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
                         DPIO_PCS_TX_LANE2_RESET |
                         DPIO_PCS_TX_LANE1_RESET);
-       vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLK(port),
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
                         DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
                         DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
                         (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
                         DPIO_PCS_CLK_SOFT_RESET);
 
        /* Fix up inter-pair skew failure */
-       vlv_dpio_write(dev_priv, pipe, DPIO_PCS_STAGGER1(port), 0x00750f00);
-       vlv_dpio_write(dev_priv, pipe, DPIO_TX_CTL(port), 0x00001500);
-       vlv_dpio_write(dev_priv, pipe, DPIO_TX_LANE(port), 0x40400000);
-
-       vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CTL_OVER1(port),
-                        0x00002000);
-       vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port),
-                        DPIO_TX_OCALINIT_EN);
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
+       vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
+       vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
+
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), 0x00002000);
+       vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN);
        mutex_unlock(&dev_priv->dpio_lock);
 }
 
@@ -1169,13 +1160,13 @@ static void vlv_hdmi_post_disable(struct intel_encoder *encoder)
        struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
        struct intel_crtc *intel_crtc =
                to_intel_crtc(encoder->base.crtc);
-       int port = vlv_dport_to_channel(dport);
+       enum dpio_channel port = vlv_dport_to_channel(dport);
        int pipe = intel_crtc->pipe;
 
        /* Reset lanes to avoid HDMI flicker (VLV w/a) */
        mutex_lock(&dev_priv->dpio_lock);
-       vlv_dpio_write(dev_priv, pipe, DPIO_PCS_TX(port), 0x00000000);
-       vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLK(port), 0x00e00060);
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port), 0x00000000);
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port), 0x00e00060);
        mutex_unlock(&dev_priv->dpio_lock);
 }
 
index 2ca17b14b6c1c91710601dfcb34c661b45707347..b1dc33f478991755ec114fe66fbad7bc40fec4c0 100644 (file)
@@ -82,20 +82,11 @@ static int get_disp_clk_div(struct drm_i915_private *dev_priv,
 
 static void gmbus_set_freq(struct drm_i915_private *dev_priv)
 {
-       int vco_freq[] = { 800, 1600, 2000, 2400 };
-       int gmbus_freq = 0, cdclk_div, hpll_freq;
+       int vco, gmbus_freq = 0, cdclk_div;
 
        BUG_ON(!IS_VALLEYVIEW(dev_priv->dev));
 
-       /* Skip setting the gmbus freq if BIOS has already programmed it */
-       if (I915_READ(GMBUSFREQ_VLV) != 0xA0)
-               return;
-
-       /* Obtain SKU information */
-       mutex_lock(&dev_priv->dpio_lock);
-       hpll_freq =
-               vlv_cck_read(dev_priv, CCK_FUSE_REG) & CCK_FUSE_HPLL_FREQ_MASK;
-       mutex_unlock(&dev_priv->dpio_lock);
+       vco = valleyview_get_vco(dev_priv);
 
        /* Get the CDCLK divide ratio */
        cdclk_div = get_disp_clk_div(dev_priv, CDCLK);
@@ -106,7 +97,7 @@ static void gmbus_set_freq(struct drm_i915_private *dev_priv)
         * in fact 1MHz is the correct frequency.
         */
        if (cdclk_div)
-               gmbus_freq = (vco_freq[hpll_freq] << 1) / cdclk_div;
+               gmbus_freq = (vco << 1) / cdclk_div;
 
        if (WARN_ON(gmbus_freq == 0))
                return;
index c3b4da7895ed1c82b185b78ca06d00053cff2402..8bcb93a2a9f6b1d09780c3154f6c656895451e8e 100644 (file)
@@ -256,8 +256,9 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
        POSTING_READ(lvds_encoder->reg);
 }
 
-static int intel_lvds_mode_valid(struct drm_connector *connector,
-                                struct drm_display_mode *mode)
+static enum drm_mode_status
+intel_lvds_mode_valid(struct drm_connector *connector,
+                     struct drm_display_mode *mode)
 {
        struct intel_connector *intel_connector = to_intel_connector(connector);
        struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
@@ -446,9 +447,19 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
        if (dev_priv->modeset_restore == MODESET_DONE)
                goto exit;
 
-       drm_modeset_lock_all(dev);
-       intel_modeset_setup_hw_state(dev, true);
-       drm_modeset_unlock_all(dev);
+       /*
+        * Some old platform's BIOS love to wreak havoc while the lid is closed.
+        * We try to detect this here and undo any damage. The split for PCH
+        * platforms is rather conservative and a bit arbitrary expect that on
+        * those platforms VGA disabling requires actual legacy VGA I/O access,
+        * and as part of the cleanup in the hw state restore we also redisable
+        * the vga plane.
+        */
+       if (!HAS_PCH_SPLIT(dev)) {
+               drm_modeset_lock_all(dev);
+               intel_modeset_setup_hw_state(dev, true);
+               drm_modeset_unlock_all(dev);
+       }
 
        dev_priv->modeset_restore = MODESET_DONE;
 
@@ -744,57 +755,6 @@ static const struct dmi_system_id intel_no_lvds[] = {
        { }     /* terminating entry */
 };
 
-/**
- * intel_find_lvds_downclock - find the reduced downclock for LVDS in EDID
- * @dev: drm device
- * @connector: LVDS connector
- *
- * Find the reduced downclock for LVDS in EDID.
- */
-static void intel_find_lvds_downclock(struct drm_device *dev,
-                                     struct drm_display_mode *fixed_mode,
-                                     struct drm_connector *connector)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_display_mode *scan;
-       int temp_downclock;
-
-       temp_downclock = fixed_mode->clock;
-       list_for_each_entry(scan, &connector->probed_modes, head) {
-               /*
-                * If one mode has the same resolution with the fixed_panel
-                * mode while they have the different refresh rate, it means
-                * that the reduced downclock is found for the LVDS. In such
-                * case we can set the different FPx0/1 to dynamically select
-                * between low and high frequency.
-                */
-               if (scan->hdisplay == fixed_mode->hdisplay &&
-                   scan->hsync_start == fixed_mode->hsync_start &&
-                   scan->hsync_end == fixed_mode->hsync_end &&
-                   scan->htotal == fixed_mode->htotal &&
-                   scan->vdisplay == fixed_mode->vdisplay &&
-                   scan->vsync_start == fixed_mode->vsync_start &&
-                   scan->vsync_end == fixed_mode->vsync_end &&
-                   scan->vtotal == fixed_mode->vtotal) {
-                       if (scan->clock < temp_downclock) {
-                               /*
-                                * The downclock is already found. But we
-                                * expect to find the lower downclock.
-                                */
-                               temp_downclock = scan->clock;
-                       }
-               }
-       }
-       if (temp_downclock < fixed_mode->clock && i915_lvds_downclock) {
-               /* We found the downclock for LVDS. */
-               dev_priv->lvds_downclock_avail = 1;
-               dev_priv->lvds_downclock = temp_downclock;
-               DRM_DEBUG_KMS("LVDS downclock is found in EDID. "
-                             "Normal clock %dKhz, downclock %dKhz\n",
-                             fixed_mode->clock, temp_downclock);
-       }
-}
-
 /*
  * Enumerate the child dev array parsed from VBT to check whether
  * the LVDS is present.
@@ -1072,8 +1032,22 @@ void intel_lvds_init(struct drm_device *dev)
 
                        fixed_mode = drm_mode_duplicate(dev, scan);
                        if (fixed_mode) {
-                               intel_find_lvds_downclock(dev, fixed_mode,
-                                                         connector);
+                               intel_connector->panel.downclock_mode =
+                                       intel_find_panel_downclock(dev,
+                                       fixed_mode, connector);
+                               if (intel_connector->panel.downclock_mode !=
+                                       NULL && i915_lvds_downclock) {
+                                       /* We found the downclock for LVDS. */
+                                       dev_priv->lvds_downclock_avail = true;
+                                       dev_priv->lvds_downclock =
+                                               intel_connector->panel.
+                                               downclock_mode->clock;
+                                       DRM_DEBUG_KMS("LVDS downclock is found"
+                                       " in EDID. Normal clock %dKhz, "
+                                       "downclock %dKhz\n",
+                                       fixed_mode->clock,
+                                       dev_priv->lvds_downclock);
+                               }
                                goto out;
                        }
                }
index 9a8804bee5cdfd08276592672d35d83e564fc9ef..4e960ec7419fb6802398b9b118b3b3a513199350 100644 (file)
@@ -63,7 +63,7 @@ struct opregion_header {
        u8 driver_ver[16];
        u32 mboxes;
        u8 reserved[164];
-} __attribute__((packed));
+} __packed;
 
 /* OpRegion mailbox #1: public ACPI methods */
 struct opregion_acpi {
@@ -85,7 +85,7 @@ struct opregion_acpi {
        u32 cnot;       /* current OS notification */
        u32 nrdy;       /* driver status */
        u8 rsvd2[60];
-} __attribute__((packed));
+} __packed;
 
 /* OpRegion mailbox #2: SWSCI */
 struct opregion_swsci {
@@ -93,7 +93,7 @@ struct opregion_swsci {
        u32 parm;       /* command parameters */
        u32 dslp;       /* driver sleep time-out */
        u8 rsvd[244];
-} __attribute__((packed));
+} __packed;
 
 /* OpRegion mailbox #3: ASLE */
 struct opregion_asle {
@@ -114,7 +114,7 @@ struct opregion_asle {
        u32 srot;       /* supported rotation angles */
        u32 iuer;       /* IUER events */
        u8 rsvd[86];
-} __attribute__((packed));
+} __packed;
 
 /* Driver readiness indicator */
 #define ASLE_ARDY_READY                (1 << 0)
@@ -395,13 +395,8 @@ int intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state)
 static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_encoder *encoder;
-       struct drm_connector *connector;
-       struct intel_connector *intel_connector = NULL;
-       struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[0];
+       struct intel_connector *intel_connector;
        struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
-       u32 ret = 0;
-       bool found = false;
 
        DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
 
@@ -413,38 +408,20 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
                return ASLC_BACKLIGHT_FAILED;
 
        mutex_lock(&dev->mode_config.mutex);
+
        /*
-        * Could match the OpRegion connector here instead, but we'd also need
-        * to verify the connector could handle a backlight call.
+        * Update backlight on all connectors that support backlight (usually
+        * only one).
         */
-       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
-               if (encoder->crtc == crtc) {
-                       found = true;
-                       break;
-               }
-
-       if (!found) {
-               ret = ASLC_BACKLIGHT_FAILED;
-               goto out;
-       }
-
-       list_for_each_entry(connector, &dev->mode_config.connector_list, head)
-               if (connector->encoder == encoder)
-                       intel_connector = to_intel_connector(connector);
-
-       if (!intel_connector) {
-               ret = ASLC_BACKLIGHT_FAILED;
-               goto out;
-       }
-
        DRM_DEBUG_KMS("updating opregion backlight %d/255\n", bclp);
-       intel_panel_set_backlight(intel_connector, bclp, 255);
+       list_for_each_entry(intel_connector, &dev->mode_config.connector_list, base.head)
+               intel_panel_set_backlight(intel_connector, bclp, 255);
        iowrite32(DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID, &asle->cblv);
 
-out:
        mutex_unlock(&dev->mode_config.mutex);
 
-       return ret;
+
+       return 0;
 }
 
 static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi)
index a98a990fbab3561368239ce7a7ac020c1d44f0c7..a759ecdb7a6ebaddee622ffd5a1d7900fcf5aee3 100644 (file)
@@ -1005,7 +1005,7 @@ static int intel_panel_fitter_pipe(struct drm_device *dev)
        u32  pfit_control;
 
        /* i830 doesn't have a panel fitter */
-       if (IS_I830(dev))
+       if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev)))
                return -1;
 
        pfit_control = I915_READ(PFIT_CONTROL);
index e6f782d1c6696d94fe4d4a80cf7f4d6ee6b5c7d7..350de359123af9cbd42354c182424f356281aa16 100644 (file)
@@ -325,214 +325,170 @@ out:
        pipe_config->gmch_pfit.lvds_border_bits = border;
 }
 
-static int is_backlight_combination_mode(struct drm_device *dev)
+static int i915_panel_invert_brightness;
+MODULE_PARM_DESC(invert_brightness, "Invert backlight brightness "
+       "(-1 force normal, 0 machine defaults, 1 force inversion), please "
+       "report PCI device ID, subsystem vendor and subsystem device ID "
+       "to dri-devel@lists.freedesktop.org, if your machine needs it. "
+       "It will then be included in an upcoming module version.");
+module_param_named(invert_brightness, i915_panel_invert_brightness, int, 0600);
+static u32 intel_panel_compute_brightness(struct intel_connector *connector,
+                                         u32 val)
 {
+       struct drm_device *dev = connector->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_panel *panel = &connector->panel;
 
-       if (IS_GEN4(dev))
-               return I915_READ(BLC_PWM_CTL2) & BLM_COMBINATION_MODE;
+       WARN_ON(panel->backlight.max == 0);
 
-       if (IS_GEN2(dev))
-               return I915_READ(BLC_PWM_CTL) & BLM_LEGACY_MODE;
+       if (i915_panel_invert_brightness < 0)
+               return val;
 
-       return 0;
+       if (i915_panel_invert_brightness > 0 ||
+           dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) {
+               return panel->backlight.max - val;
+       }
+
+       return val;
 }
 
-/* XXX: query mode clock or hardware clock and program max PWM appropriately
- * when it's 0.
- */
-static u32 i915_read_blc_pwm_ctl(struct drm_device *dev, enum pipe pipe)
+static u32 bdw_get_backlight(struct intel_connector *connector)
 {
+       struct drm_device *dev = connector->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       u32 val;
 
-       WARN_ON_SMP(!spin_is_locked(&dev_priv->backlight.lock));
-
-       /* Restore the CTL value if it lost, e.g. GPU reset */
-
-       if (HAS_PCH_SPLIT(dev_priv->dev)) {
-               val = I915_READ(BLC_PWM_PCH_CTL2);
-               if (dev_priv->regfile.saveBLC_PWM_CTL2 == 0) {
-                       dev_priv->regfile.saveBLC_PWM_CTL2 = val;
-               } else if (val == 0) {
-                       val = dev_priv->regfile.saveBLC_PWM_CTL2;
-                       I915_WRITE(BLC_PWM_PCH_CTL2, val);
-               }
-       } else if (IS_VALLEYVIEW(dev)) {
-               val = I915_READ(VLV_BLC_PWM_CTL(pipe));
-               if (dev_priv->regfile.saveBLC_PWM_CTL == 0) {
-                       dev_priv->regfile.saveBLC_PWM_CTL = val;
-                       dev_priv->regfile.saveBLC_PWM_CTL2 =
-                               I915_READ(VLV_BLC_PWM_CTL2(pipe));
-               } else if (val == 0) {
-                       val = dev_priv->regfile.saveBLC_PWM_CTL;
-                       I915_WRITE(VLV_BLC_PWM_CTL(pipe), val);
-                       I915_WRITE(VLV_BLC_PWM_CTL2(pipe),
-                                  dev_priv->regfile.saveBLC_PWM_CTL2);
-               }
+       return I915_READ(BLC_PWM_PCH_CTL2) & BACKLIGHT_DUTY_CYCLE_MASK;
+}
 
-               if (!val)
-                       val = 0x0f42ffff;
-       } else {
-               val = I915_READ(BLC_PWM_CTL);
-               if (dev_priv->regfile.saveBLC_PWM_CTL == 0) {
-                       dev_priv->regfile.saveBLC_PWM_CTL = val;
-                       if (INTEL_INFO(dev)->gen >= 4)
-                               dev_priv->regfile.saveBLC_PWM_CTL2 =
-                                       I915_READ(BLC_PWM_CTL2);
-               } else if (val == 0) {
-                       val = dev_priv->regfile.saveBLC_PWM_CTL;
-                       I915_WRITE(BLC_PWM_CTL, val);
-                       if (INTEL_INFO(dev)->gen >= 4)
-                               I915_WRITE(BLC_PWM_CTL2,
-                                          dev_priv->regfile.saveBLC_PWM_CTL2);
-               }
-       }
+static u32 pch_get_backlight(struct intel_connector *connector)
+{
+       struct drm_device *dev = connector->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
 
-       return val;
+       return I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
 }
 
-static u32 intel_panel_get_max_backlight(struct drm_device *dev,
-                                        enum pipe pipe)
+static u32 i9xx_get_backlight(struct intel_connector *connector)
 {
-       u32 max;
+       struct drm_device *dev = connector->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_panel *panel = &connector->panel;
+       u32 val;
 
-       max = i915_read_blc_pwm_ctl(dev, pipe);
+       val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
+       if (INTEL_INFO(dev)->gen < 4)
+               val >>= 1;
 
-       if (HAS_PCH_SPLIT(dev)) {
-               max >>= 16;
-       } else {
-               if (INTEL_INFO(dev)->gen < 4)
-                       max >>= 17;
-               else
-                       max >>= 16;
+       if (panel->backlight.combination_mode) {
+               u8 lbpc;
 
-               if (is_backlight_combination_mode(dev))
-                       max *= 0xff;
+               pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc);
+               val *= lbpc;
        }
 
-       DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max);
-
-       return max;
+       return val;
 }
 
-static int i915_panel_invert_brightness;
-MODULE_PARM_DESC(invert_brightness, "Invert backlight brightness "
-       "(-1 force normal, 0 machine defaults, 1 force inversion), please "
-       "report PCI device ID, subsystem vendor and subsystem device ID "
-       "to dri-devel@lists.freedesktop.org, if your machine needs it. "
-       "It will then be included in an upcoming module version.");
-module_param_named(invert_brightness, i915_panel_invert_brightness, int, 0600);
-static u32 intel_panel_compute_brightness(struct drm_device *dev,
-                                         enum pipe pipe, u32 val)
+static u32 _vlv_get_backlight(struct drm_device *dev, enum pipe pipe)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
-       if (i915_panel_invert_brightness < 0)
-               return val;
+       return I915_READ(VLV_BLC_PWM_CTL(pipe)) & BACKLIGHT_DUTY_CYCLE_MASK;
+}
 
-       if (i915_panel_invert_brightness > 0 ||
-           dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) {
-               u32 max = intel_panel_get_max_backlight(dev, pipe);
-               if (max)
-                       return max - val;
-       }
+static u32 vlv_get_backlight(struct intel_connector *connector)
+{
+       struct drm_device *dev = connector->base.dev;
+       enum pipe pipe = intel_get_pipe_from_connector(connector);
 
-       return val;
+       return _vlv_get_backlight(dev, pipe);
 }
 
-static u32 intel_panel_get_backlight(struct drm_device *dev,
-                                    enum pipe pipe)
+static u32 intel_panel_get_backlight(struct intel_connector *connector)
 {
+       struct drm_device *dev = connector->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 val;
        unsigned long flags;
-       int reg;
-
-       spin_lock_irqsave(&dev_priv->backlight.lock, flags);
-
-       if (IS_BROADWELL(dev)) {
-               val = I915_READ(BLC_PWM_PCH_CTL2) & BACKLIGHT_DUTY_CYCLE_MASK;
-       } else if (HAS_PCH_SPLIT(dev)) {
-               val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
-       } else {
-               if (IS_VALLEYVIEW(dev))
-                       reg = VLV_BLC_PWM_CTL(pipe);
-               else
-                       reg = BLC_PWM_CTL;
-
-               val = I915_READ(reg) & BACKLIGHT_DUTY_CYCLE_MASK;
-               if (INTEL_INFO(dev)->gen < 4)
-                       val >>= 1;
-
-               if (is_backlight_combination_mode(dev)) {
-                       u8 lbpc;
 
-                       pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc);
-                       val *= lbpc;
-               }
-       }
+       spin_lock_irqsave(&dev_priv->backlight_lock, flags);
 
-       val = intel_panel_compute_brightness(dev, pipe, val);
+       val = dev_priv->display.get_backlight(connector);
+       val = intel_panel_compute_brightness(connector, val);
 
-       spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
+       spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
 
        DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val);
        return val;
 }
 
-static void intel_bdw_panel_set_backlight(struct drm_device *dev, u32 level)
+static void bdw_set_backlight(struct intel_connector *connector, u32 level)
 {
+       struct drm_device *dev = connector->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 val = I915_READ(BLC_PWM_PCH_CTL2) & ~BACKLIGHT_DUTY_CYCLE_MASK;
        I915_WRITE(BLC_PWM_PCH_CTL2, val | level);
 }
 
-static void intel_pch_panel_set_backlight(struct drm_device *dev, u32 level)
+static void pch_set_backlight(struct intel_connector *connector, u32 level)
 {
+       struct drm_device *dev = connector->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       u32 val = I915_READ(BLC_PWM_CPU_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
-       I915_WRITE(BLC_PWM_CPU_CTL, val | level);
+       u32 tmp;
+
+       tmp = I915_READ(BLC_PWM_CPU_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+       I915_WRITE(BLC_PWM_CPU_CTL, tmp | level);
 }
 
-static void intel_panel_actually_set_backlight(struct drm_device *dev,
-                                              enum pipe pipe, u32 level)
+static void i9xx_set_backlight(struct intel_connector *connector, u32 level)
 {
+       struct drm_device *dev = connector->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       u32 tmp;
-       int reg;
-
-       DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level);
-       level = intel_panel_compute_brightness(dev, pipe, level);
+       struct intel_panel *panel = &connector->panel;
+       u32 tmp, mask;
 
-       if (IS_BROADWELL(dev))
-               return intel_bdw_panel_set_backlight(dev, level);
-       else if (HAS_PCH_SPLIT(dev))
-               return intel_pch_panel_set_backlight(dev, level);
+       WARN_ON(panel->backlight.max == 0);
 
-       if (is_backlight_combination_mode(dev)) {
-               u32 max = intel_panel_get_max_backlight(dev, pipe);
+       if (panel->backlight.combination_mode) {
                u8 lbpc;
 
-               /* we're screwed, but keep behaviour backwards compatible */
-               if (!max)
-                       max = 1;
-
-               lbpc = level * 0xfe / max + 1;
+               lbpc = level * 0xfe / panel->backlight.max + 1;
                level /= lbpc;
                pci_write_config_byte(dev->pdev, PCI_LBPC, lbpc);
        }
 
-       if (IS_VALLEYVIEW(dev))
-               reg = VLV_BLC_PWM_CTL(pipe);
-       else
-               reg = BLC_PWM_CTL;
-
-       tmp = I915_READ(reg);
-       if (INTEL_INFO(dev)->gen < 4)
+       if (IS_GEN4(dev)) {
+               mask = BACKLIGHT_DUTY_CYCLE_MASK;
+       } else {
                level <<= 1;
-       tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
-       I915_WRITE(reg, tmp | level);
+               mask = BACKLIGHT_DUTY_CYCLE_MASK_PNV;
+       }
+
+       tmp = I915_READ(BLC_PWM_CTL) & ~mask;
+       I915_WRITE(BLC_PWM_CTL, tmp | level);
+}
+
+static void vlv_set_backlight(struct intel_connector *connector, u32 level)
+{
+       struct drm_device *dev = connector->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       enum pipe pipe = intel_get_pipe_from_connector(connector);
+       u32 tmp;
+
+       tmp = I915_READ(VLV_BLC_PWM_CTL(pipe)) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+       I915_WRITE(VLV_BLC_PWM_CTL(pipe), tmp | level);
+}
+
+static void
+intel_panel_actually_set_backlight(struct intel_connector *connector, u32 level)
+{
+       struct drm_device *dev = connector->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level);
+
+       level = intel_panel_compute_brightness(connector, level);
+       dev_priv->display.set_backlight(connector, level);
 }
 
 /* set backlight brightness to level in range [0..max] */
@@ -541,45 +497,89 @@ void intel_panel_set_backlight(struct intel_connector *connector, u32 level,
 {
        struct drm_device *dev = connector->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_panel *panel = &connector->panel;
        enum pipe pipe = intel_get_pipe_from_connector(connector);
        u32 freq;
        unsigned long flags;
 
-       if (pipe == INVALID_PIPE)
+       if (!panel->backlight.present || pipe == INVALID_PIPE)
                return;
 
-       spin_lock_irqsave(&dev_priv->backlight.lock, flags);
+       spin_lock_irqsave(&dev_priv->backlight_lock, flags);
 
-       freq = intel_panel_get_max_backlight(dev, pipe);
-       if (!freq) {
-               /* we are screwed, bail out */
-               goto out;
-       }
+       WARN_ON(panel->backlight.max == 0);
 
-       /* scale to hardware, but be careful to not overflow */
+       /* scale to hardware max, but be careful to not overflow */
+       freq = panel->backlight.max;
        if (freq < max)
                level = level * freq / max;
        else
                level = freq / max * level;
 
-       dev_priv->backlight.level = level;
-       if (dev_priv->backlight.device)
-               dev_priv->backlight.device->props.brightness = level;
+       panel->backlight.level = level;
+       if (panel->backlight.device)
+               panel->backlight.device->props.brightness = level;
 
-       if (dev_priv->backlight.enabled)
-               intel_panel_actually_set_backlight(dev, pipe, level);
-out:
-       spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
+       if (panel->backlight.enabled)
+               intel_panel_actually_set_backlight(connector, level);
+
+       spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
+}
+
+static void pch_disable_backlight(struct intel_connector *connector)
+{
+       struct drm_device *dev = connector->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 tmp;
+
+       intel_panel_actually_set_backlight(connector, 0);
+
+       tmp = I915_READ(BLC_PWM_CPU_CTL2);
+       I915_WRITE(BLC_PWM_CPU_CTL2, tmp & ~BLM_PWM_ENABLE);
+
+       tmp = I915_READ(BLC_PWM_PCH_CTL1);
+       I915_WRITE(BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE);
+}
+
+static void i9xx_disable_backlight(struct intel_connector *connector)
+{
+       intel_panel_actually_set_backlight(connector, 0);
+}
+
+static void i965_disable_backlight(struct intel_connector *connector)
+{
+       struct drm_device *dev = connector->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 tmp;
+
+       intel_panel_actually_set_backlight(connector, 0);
+
+       tmp = I915_READ(BLC_PWM_CTL2);
+       I915_WRITE(BLC_PWM_CTL2, tmp & ~BLM_PWM_ENABLE);
+}
+
+static void vlv_disable_backlight(struct intel_connector *connector)
+{
+       struct drm_device *dev = connector->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       enum pipe pipe = intel_get_pipe_from_connector(connector);
+       u32 tmp;
+
+       intel_panel_actually_set_backlight(connector, 0);
+
+       tmp = I915_READ(VLV_BLC_PWM_CTL2(pipe));
+       I915_WRITE(VLV_BLC_PWM_CTL2(pipe), tmp & ~BLM_PWM_ENABLE);
 }
 
 void intel_panel_disable_backlight(struct intel_connector *connector)
 {
        struct drm_device *dev = connector->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_panel *panel = &connector->panel;
        enum pipe pipe = intel_get_pipe_from_connector(connector);
        unsigned long flags;
 
-       if (pipe == INVALID_PIPE)
+       if (!panel->backlight.present || pipe == INVALID_PIPE)
                return;
 
        /*
@@ -593,150 +593,215 @@ void intel_panel_disable_backlight(struct intel_connector *connector)
                return;
        }
 
-       spin_lock_irqsave(&dev_priv->backlight.lock, flags);
+       spin_lock_irqsave(&dev_priv->backlight_lock, flags);
 
-       dev_priv->backlight.enabled = false;
-       intel_panel_actually_set_backlight(dev, pipe, 0);
+       panel->backlight.enabled = false;
+       dev_priv->display.disable_backlight(connector);
 
-       if (INTEL_INFO(dev)->gen >= 4) {
-               uint32_t reg, tmp;
+       spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
+}
 
-               if (HAS_PCH_SPLIT(dev))
-                       reg = BLC_PWM_CPU_CTL2;
-               else if (IS_VALLEYVIEW(dev))
-                       reg = VLV_BLC_PWM_CTL2(pipe);
-               else
-                       reg = BLC_PWM_CTL2;
+static void bdw_enable_backlight(struct intel_connector *connector)
+{
+       struct drm_device *dev = connector->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_panel *panel = &connector->panel;
+       u32 pch_ctl1, pch_ctl2;
+
+       pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1);
+       if (pch_ctl1 & BLM_PCH_PWM_ENABLE) {
+               DRM_DEBUG_KMS("pch backlight already enabled\n");
+               pch_ctl1 &= ~BLM_PCH_PWM_ENABLE;
+               I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1);
+       }
 
-               I915_WRITE(reg, I915_READ(reg) & ~BLM_PWM_ENABLE);
+       pch_ctl2 = panel->backlight.max << 16;
+       I915_WRITE(BLC_PWM_PCH_CTL2, pch_ctl2);
 
-               if (HAS_PCH_SPLIT(dev)) {
-                       tmp = I915_READ(BLC_PWM_PCH_CTL1);
-                       tmp &= ~BLM_PCH_PWM_ENABLE;
-                       I915_WRITE(BLC_PWM_PCH_CTL1, tmp);
-               }
-       }
+       pch_ctl1 = 0;
+       if (panel->backlight.active_low_pwm)
+               pch_ctl1 |= BLM_PCH_POLARITY;
 
-       spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
+       /* BDW always uses the pch pwm controls. */
+       pch_ctl1 |= BLM_PCH_OVERRIDE_ENABLE;
+
+       I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1);
+       POSTING_READ(BLC_PWM_PCH_CTL1);
+       I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1 | BLM_PCH_PWM_ENABLE);
+
+       /* This won't stick until the above enable. */
+       intel_panel_actually_set_backlight(connector, panel->backlight.level);
 }
 
-void intel_panel_enable_backlight(struct intel_connector *connector)
+static void pch_enable_backlight(struct intel_connector *connector)
 {
        struct drm_device *dev = connector->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_panel *panel = &connector->panel;
        enum pipe pipe = intel_get_pipe_from_connector(connector);
        enum transcoder cpu_transcoder =
                intel_pipe_to_cpu_transcoder(dev_priv, pipe);
-       unsigned long flags;
+       u32 cpu_ctl2, pch_ctl1, pch_ctl2;
 
-       if (pipe == INVALID_PIPE)
-               return;
+       cpu_ctl2 = I915_READ(BLC_PWM_CPU_CTL2);
+       if (cpu_ctl2 & BLM_PWM_ENABLE) {
+               WARN(1, "cpu backlight already enabled\n");
+               cpu_ctl2 &= ~BLM_PWM_ENABLE;
+               I915_WRITE(BLC_PWM_CPU_CTL2, cpu_ctl2);
+       }
 
-       DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe));
+       pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1);
+       if (pch_ctl1 & BLM_PCH_PWM_ENABLE) {
+               DRM_DEBUG_KMS("pch backlight already enabled\n");
+               pch_ctl1 &= ~BLM_PCH_PWM_ENABLE;
+               I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1);
+       }
+
+       if (cpu_transcoder == TRANSCODER_EDP)
+               cpu_ctl2 = BLM_TRANSCODER_EDP;
+       else
+               cpu_ctl2 = BLM_PIPE(cpu_transcoder);
+       I915_WRITE(BLC_PWM_CPU_CTL2, cpu_ctl2);
+       POSTING_READ(BLC_PWM_CPU_CTL2);
+       I915_WRITE(BLC_PWM_CPU_CTL2, cpu_ctl2 | BLM_PWM_ENABLE);
+
+       /* This won't stick until the above enable. */
+       intel_panel_actually_set_backlight(connector, panel->backlight.level);
+
+       pch_ctl2 = panel->backlight.max << 16;
+       I915_WRITE(BLC_PWM_PCH_CTL2, pch_ctl2);
+
+       pch_ctl1 = 0;
+       if (panel->backlight.active_low_pwm)
+               pch_ctl1 |= BLM_PCH_POLARITY;
+
+       I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1);
+       POSTING_READ(BLC_PWM_PCH_CTL1);
+       I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1 | BLM_PCH_PWM_ENABLE);
+}
 
-       spin_lock_irqsave(&dev_priv->backlight.lock, flags);
+static void i9xx_enable_backlight(struct intel_connector *connector)
+{
+       struct drm_device *dev = connector->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_panel *panel = &connector->panel;
+       u32 ctl, freq;
 
-       if (dev_priv->backlight.level == 0) {
-               dev_priv->backlight.level = intel_panel_get_max_backlight(dev,
-                                                                         pipe);
-               if (dev_priv->backlight.device)
-                       dev_priv->backlight.device->props.brightness =
-                               dev_priv->backlight.level;
+       ctl = I915_READ(BLC_PWM_CTL);
+       if (ctl & BACKLIGHT_DUTY_CYCLE_MASK_PNV) {
+               WARN(1, "backlight already enabled\n");
+               I915_WRITE(BLC_PWM_CTL, 0);
        }
 
-       if (INTEL_INFO(dev)->gen >= 4) {
-               uint32_t reg, tmp;
+       freq = panel->backlight.max;
+       if (panel->backlight.combination_mode)
+               freq /= 0xff;
 
-               if (HAS_PCH_SPLIT(dev))
-                       reg = BLC_PWM_CPU_CTL2;
-               else if (IS_VALLEYVIEW(dev))
-                       reg = VLV_BLC_PWM_CTL2(pipe);
-               else
-                       reg = BLC_PWM_CTL2;
+       ctl = freq << 17;
+       if (IS_GEN2(dev) && panel->backlight.combination_mode)
+               ctl |= BLM_LEGACY_MODE;
+       if (IS_PINEVIEW(dev) && panel->backlight.active_low_pwm)
+               ctl |= BLM_POLARITY_PNV;
 
-               tmp = I915_READ(reg);
+       I915_WRITE(BLC_PWM_CTL, ctl);
+       POSTING_READ(BLC_PWM_CTL);
 
-               /* Note that this can also get called through dpms changes. And
-                * we don't track the backlight dpms state, hence check whether
-                * we have to do anything first. */
-               if (tmp & BLM_PWM_ENABLE)
-                       goto set_level;
+       /* XXX: combine this into above write? */
+       intel_panel_actually_set_backlight(connector, panel->backlight.level);
+}
 
-               if (INTEL_INFO(dev)->num_pipes == 3)
-                       tmp &= ~BLM_PIPE_SELECT_IVB;
-               else
-                       tmp &= ~BLM_PIPE_SELECT;
+static void i965_enable_backlight(struct intel_connector *connector)
+{
+       struct drm_device *dev = connector->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_panel *panel = &connector->panel;
+       enum pipe pipe = intel_get_pipe_from_connector(connector);
+       u32 ctl, ctl2, freq;
 
-               if (cpu_transcoder == TRANSCODER_EDP)
-                       tmp |= BLM_TRANSCODER_EDP;
-               else
-                       tmp |= BLM_PIPE(cpu_transcoder);
-               tmp &= ~BLM_PWM_ENABLE;
-
-               I915_WRITE(reg, tmp);
-               POSTING_READ(reg);
-               I915_WRITE(reg, tmp | BLM_PWM_ENABLE);
-
-               if (IS_BROADWELL(dev)) {
-                       /*
-                        * Broadwell requires PCH override to drive the PCH
-                        * backlight pin. The above will configure the CPU
-                        * backlight pin, which we don't plan to use.
-                        */
-                       tmp = I915_READ(BLC_PWM_PCH_CTL1);
-                       tmp |= BLM_PCH_OVERRIDE_ENABLE | BLM_PCH_PWM_ENABLE;
-                       I915_WRITE(BLC_PWM_PCH_CTL1, tmp);
-               } else if (HAS_PCH_SPLIT(dev) &&
-                   !(dev_priv->quirks & QUIRK_NO_PCH_PWM_ENABLE)) {
-                       tmp = I915_READ(BLC_PWM_PCH_CTL1);
-                       tmp |= BLM_PCH_PWM_ENABLE;
-                       tmp &= ~BLM_PCH_OVERRIDE_ENABLE;
-                       I915_WRITE(BLC_PWM_PCH_CTL1, tmp);
-               }
+       ctl2 = I915_READ(BLC_PWM_CTL2);
+       if (ctl2 & BLM_PWM_ENABLE) {
+               WARN(1, "backlight already enabled\n");
+               ctl2 &= ~BLM_PWM_ENABLE;
+               I915_WRITE(BLC_PWM_CTL2, ctl2);
        }
 
-set_level:
-       /* Call below after setting BLC_PWM_CPU_CTL2 and BLC_PWM_PCH_CTL1.
-        * BLC_PWM_CPU_CTL may be cleared to zero automatically when these
-        * registers are set.
-        */
-       dev_priv->backlight.enabled = true;
-       intel_panel_actually_set_backlight(dev, pipe,
-                                          dev_priv->backlight.level);
+       freq = panel->backlight.max;
+       if (panel->backlight.combination_mode)
+               freq /= 0xff;
+
+       ctl = freq << 16;
+       I915_WRITE(BLC_PWM_CTL, ctl);
+
+       /* XXX: combine this into above write? */
+       intel_panel_actually_set_backlight(connector, panel->backlight.level);
 
-       spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
+       ctl2 = BLM_PIPE(pipe);
+       if (panel->backlight.combination_mode)
+               ctl2 |= BLM_COMBINATION_MODE;
+       if (panel->backlight.active_low_pwm)
+               ctl2 |= BLM_POLARITY_I965;
+       I915_WRITE(BLC_PWM_CTL2, ctl2);
+       POSTING_READ(BLC_PWM_CTL2);
+       I915_WRITE(BLC_PWM_CTL2, ctl2 | BLM_PWM_ENABLE);
 }
 
-/* FIXME: use VBT vals to init PWM_CTL and PWM_CTL2 correctly */
-static void intel_panel_init_backlight_regs(struct drm_device *dev)
+static void vlv_enable_backlight(struct intel_connector *connector)
 {
+       struct drm_device *dev = connector->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_panel *panel = &connector->panel;
+       enum pipe pipe = intel_get_pipe_from_connector(connector);
+       u32 ctl, ctl2;
 
-       if (IS_VALLEYVIEW(dev)) {
-               enum pipe pipe;
+       ctl2 = I915_READ(VLV_BLC_PWM_CTL2(pipe));
+       if (ctl2 & BLM_PWM_ENABLE) {
+               WARN(1, "backlight already enabled\n");
+               ctl2 &= ~BLM_PWM_ENABLE;
+               I915_WRITE(VLV_BLC_PWM_CTL2(pipe), ctl2);
+       }
 
-               for_each_pipe(pipe) {
-                       u32 cur_val = I915_READ(VLV_BLC_PWM_CTL(pipe));
+       ctl = panel->backlight.max << 16;
+       I915_WRITE(VLV_BLC_PWM_CTL(pipe), ctl);
 
-                       /* Skip if the modulation freq is already set */
-                       if (cur_val & ~BACKLIGHT_DUTY_CYCLE_MASK)
-                               continue;
+       /* XXX: combine this into above write? */
+       intel_panel_actually_set_backlight(connector, panel->backlight.level);
 
-                       cur_val &= BACKLIGHT_DUTY_CYCLE_MASK;
-                       I915_WRITE(VLV_BLC_PWM_CTL(pipe), (0xf42 << 16) |
-                                  cur_val);
-               }
-       }
+       ctl2 = 0;
+       if (panel->backlight.active_low_pwm)
+               ctl2 |= BLM_POLARITY_I965;
+       I915_WRITE(VLV_BLC_PWM_CTL2(pipe), ctl2);
+       POSTING_READ(VLV_BLC_PWM_CTL2(pipe));
+       I915_WRITE(VLV_BLC_PWM_CTL2(pipe), ctl2 | BLM_PWM_ENABLE);
 }
 
-static void intel_panel_init_backlight(struct drm_device *dev)
+void intel_panel_enable_backlight(struct intel_connector *connector)
 {
+       struct drm_device *dev = connector->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_panel *panel = &connector->panel;
+       enum pipe pipe = intel_get_pipe_from_connector(connector);
+       unsigned long flags;
+
+       if (!panel->backlight.present || pipe == INVALID_PIPE)
+               return;
+
+       DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe));
+
+       spin_lock_irqsave(&dev_priv->backlight_lock, flags);
+
+       WARN_ON(panel->backlight.max == 0);
 
-       intel_panel_init_backlight_regs(dev);
+       if (panel->backlight.level == 0) {
+               panel->backlight.level = panel->backlight.max;
+               if (panel->backlight.device)
+                       panel->backlight.device->props.brightness =
+                               panel->backlight.level;
+       }
+
+       dev_priv->display.enable_backlight(connector);
+       panel->backlight.enabled = true;
 
-       dev_priv->backlight.level = intel_panel_get_backlight(dev, 0);
-       dev_priv->backlight.enabled = dev_priv->backlight.level != 0;
+       spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
 }
 
 enum drm_connector_status
@@ -762,7 +827,7 @@ intel_panel_detect(struct drm_device *dev)
 }
 
 #if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
-static int intel_panel_update_status(struct backlight_device *bd)
+static int intel_backlight_device_update_status(struct backlight_device *bd)
 {
        struct intel_connector *connector = bl_get_data(bd);
        struct drm_device *dev = connector->base.dev;
@@ -776,85 +841,362 @@ static int intel_panel_update_status(struct backlight_device *bd)
        return 0;
 }
 
-static int intel_panel_get_brightness(struct backlight_device *bd)
+static int intel_backlight_device_get_brightness(struct backlight_device *bd)
 {
        struct intel_connector *connector = bl_get_data(bd);
        struct drm_device *dev = connector->base.dev;
-       enum pipe pipe;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int ret;
 
+       intel_runtime_pm_get(dev_priv);
        mutex_lock(&dev->mode_config.mutex);
-       pipe = intel_get_pipe_from_connector(connector);
+       ret = intel_panel_get_backlight(connector);
        mutex_unlock(&dev->mode_config.mutex);
-       if (pipe == INVALID_PIPE)
-               return 0;
+       intel_runtime_pm_put(dev_priv);
 
-       return intel_panel_get_backlight(connector->base.dev, pipe);
+       return ret;
 }
 
-static const struct backlight_ops intel_panel_bl_ops = {
-       .update_status = intel_panel_update_status,
-       .get_brightness = intel_panel_get_brightness,
+static const struct backlight_ops intel_backlight_device_ops = {
+       .update_status = intel_backlight_device_update_status,
+       .get_brightness = intel_backlight_device_get_brightness,
 };
 
-int intel_panel_setup_backlight(struct drm_connector *connector)
+static int intel_backlight_device_register(struct intel_connector *connector)
 {
-       struct drm_device *dev = connector->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_panel *panel = &connector->panel;
        struct backlight_properties props;
-       unsigned long flags;
 
-       intel_panel_init_backlight(dev);
-
-       if (WARN_ON(dev_priv->backlight.device))
+       if (WARN_ON(panel->backlight.device))
                return -ENODEV;
 
+       BUG_ON(panel->backlight.max == 0);
+
        memset(&props, 0, sizeof(props));
        props.type = BACKLIGHT_RAW;
-       props.brightness = dev_priv->backlight.level;
+       props.brightness = panel->backlight.level;
+       props.max_brightness = panel->backlight.max;
 
-       spin_lock_irqsave(&dev_priv->backlight.lock, flags);
-       props.max_brightness = intel_panel_get_max_backlight(dev, 0);
-       spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
-
-       if (props.max_brightness == 0) {
-               DRM_DEBUG_DRIVER("Failed to get maximum backlight value\n");
-               return -ENODEV;
-       }
-       dev_priv->backlight.device =
+       /*
+        * Note: using the same name independent of the connector prevents
+        * registration of multiple backlight devices in the driver.
+        */
+       panel->backlight.device =
                backlight_device_register("intel_backlight",
-                                         connector->kdev,
-                                         to_intel_connector(connector),
-                                         &intel_panel_bl_ops, &props);
+                                         connector->base.kdev,
+                                         connector,
+                                         &intel_backlight_device_ops, &props);
 
-       if (IS_ERR(dev_priv->backlight.device)) {
+       if (IS_ERR(panel->backlight.device)) {
                DRM_ERROR("Failed to register backlight: %ld\n",
-                         PTR_ERR(dev_priv->backlight.device));
-               dev_priv->backlight.device = NULL;
+                         PTR_ERR(panel->backlight.device));
+               panel->backlight.device = NULL;
                return -ENODEV;
        }
        return 0;
 }
 
-void intel_panel_destroy_backlight(struct drm_device *dev)
+static void intel_backlight_device_unregister(struct intel_connector *connector)
+{
+       struct intel_panel *panel = &connector->panel;
+
+       if (panel->backlight.device) {
+               backlight_device_unregister(panel->backlight.device);
+               panel->backlight.device = NULL;
+       }
+}
+#else /* CONFIG_BACKLIGHT_CLASS_DEVICE */
+static int intel_backlight_device_register(struct intel_connector *connector)
+{
+       return 0;
+}
+static void intel_backlight_device_unregister(struct intel_connector *connector)
+{
+}
+#endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */
+
+/*
+ * Note: The setup hooks can't assume pipe is set!
+ *
+ * XXX: Query mode clock or hardware clock and program PWM modulation frequency
+ * appropriately when it's 0. Use VBT and/or sane defaults.
+ */
+static int bdw_setup_backlight(struct intel_connector *connector)
+{
+       struct drm_device *dev = connector->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_panel *panel = &connector->panel;
+       u32 pch_ctl1, pch_ctl2, val;
+
+       pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1);
+       panel->backlight.active_low_pwm = pch_ctl1 & BLM_PCH_POLARITY;
+
+       pch_ctl2 = I915_READ(BLC_PWM_PCH_CTL2);
+       panel->backlight.max = pch_ctl2 >> 16;
+       if (!panel->backlight.max)
+               return -ENODEV;
+
+       val = bdw_get_backlight(connector);
+       panel->backlight.level = intel_panel_compute_brightness(connector, val);
+
+       panel->backlight.enabled = (pch_ctl1 & BLM_PCH_PWM_ENABLE) &&
+               panel->backlight.level != 0;
+
+       return 0;
+}
+
+static int pch_setup_backlight(struct intel_connector *connector)
+{
+       struct drm_device *dev = connector->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_panel *panel = &connector->panel;
+       u32 cpu_ctl2, pch_ctl1, pch_ctl2, val;
+
+       pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1);
+       panel->backlight.active_low_pwm = pch_ctl1 & BLM_PCH_POLARITY;
+
+       pch_ctl2 = I915_READ(BLC_PWM_PCH_CTL2);
+       panel->backlight.max = pch_ctl2 >> 16;
+       if (!panel->backlight.max)
+               return -ENODEV;
+
+       val = pch_get_backlight(connector);
+       panel->backlight.level = intel_panel_compute_brightness(connector, val);
+
+       cpu_ctl2 = I915_READ(BLC_PWM_CPU_CTL2);
+       panel->backlight.enabled = (cpu_ctl2 & BLM_PWM_ENABLE) &&
+               (pch_ctl1 & BLM_PCH_PWM_ENABLE) && panel->backlight.level != 0;
+
+       return 0;
+}
+
+static int i9xx_setup_backlight(struct intel_connector *connector)
+{
+       struct drm_device *dev = connector->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_panel *panel = &connector->panel;
+       u32 ctl, val;
+
+       ctl = I915_READ(BLC_PWM_CTL);
+
+       if (IS_GEN2(dev))
+               panel->backlight.combination_mode = ctl & BLM_LEGACY_MODE;
+
+       if (IS_PINEVIEW(dev))
+               panel->backlight.active_low_pwm = ctl & BLM_POLARITY_PNV;
+
+       panel->backlight.max = ctl >> 17;
+       if (panel->backlight.combination_mode)
+               panel->backlight.max *= 0xff;
+
+       if (!panel->backlight.max)
+               return -ENODEV;
+
+       val = i9xx_get_backlight(connector);
+       panel->backlight.level = intel_panel_compute_brightness(connector, val);
+
+       panel->backlight.enabled = panel->backlight.level != 0;
+
+       return 0;
+}
+
+static int i965_setup_backlight(struct intel_connector *connector)
+{
+       struct drm_device *dev = connector->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_panel *panel = &connector->panel;
+       u32 ctl, ctl2, val;
+
+       ctl2 = I915_READ(BLC_PWM_CTL2);
+       panel->backlight.combination_mode = ctl2 & BLM_COMBINATION_MODE;
+       panel->backlight.active_low_pwm = ctl2 & BLM_POLARITY_I965;
+
+       ctl = I915_READ(BLC_PWM_CTL);
+       panel->backlight.max = ctl >> 16;
+       if (panel->backlight.combination_mode)
+               panel->backlight.max *= 0xff;
+
+       if (!panel->backlight.max)
+               return -ENODEV;
+
+       val = i9xx_get_backlight(connector);
+       panel->backlight.level = intel_panel_compute_brightness(connector, val);
+
+       panel->backlight.enabled = (ctl2 & BLM_PWM_ENABLE) &&
+               panel->backlight.level != 0;
+
+       return 0;
+}
+
+static int vlv_setup_backlight(struct intel_connector *connector)
 {
+       struct drm_device *dev = connector->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       if (dev_priv->backlight.device) {
-               backlight_device_unregister(dev_priv->backlight.device);
-               dev_priv->backlight.device = NULL;
+       struct intel_panel *panel = &connector->panel;
+       enum pipe pipe;
+       u32 ctl, ctl2, val;
+
+       for_each_pipe(pipe) {
+               u32 cur_val = I915_READ(VLV_BLC_PWM_CTL(pipe));
+
+               /* Skip if the modulation freq is already set */
+               if (cur_val & ~BACKLIGHT_DUTY_CYCLE_MASK)
+                       continue;
+
+               cur_val &= BACKLIGHT_DUTY_CYCLE_MASK;
+               I915_WRITE(VLV_BLC_PWM_CTL(pipe), (0xf42 << 16) |
+                          cur_val);
        }
+
+       ctl2 = I915_READ(VLV_BLC_PWM_CTL2(PIPE_A));
+       panel->backlight.active_low_pwm = ctl2 & BLM_POLARITY_I965;
+
+       ctl = I915_READ(VLV_BLC_PWM_CTL(PIPE_A));
+       panel->backlight.max = ctl >> 16;
+       if (!panel->backlight.max)
+               return -ENODEV;
+
+       val = _vlv_get_backlight(dev, PIPE_A);
+       panel->backlight.level = intel_panel_compute_brightness(connector, val);
+
+       panel->backlight.enabled = (ctl2 & BLM_PWM_ENABLE) &&
+               panel->backlight.level != 0;
+
+       return 0;
 }
-#else
+
 int intel_panel_setup_backlight(struct drm_connector *connector)
 {
-       intel_panel_init_backlight(connector->dev);
+       struct drm_device *dev = connector->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_connector *intel_connector = to_intel_connector(connector);
+       struct intel_panel *panel = &intel_connector->panel;
+       unsigned long flags;
+       int ret;
+
+       /* set level and max in panel struct */
+       spin_lock_irqsave(&dev_priv->backlight_lock, flags);
+       ret = dev_priv->display.setup_backlight(intel_connector);
+       spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
+
+       if (ret) {
+               DRM_DEBUG_KMS("failed to setup backlight for connector %s\n",
+                             drm_get_connector_name(connector));
+               return ret;
+       }
+
+       intel_backlight_device_register(intel_connector);
+
+       panel->backlight.present = true;
+
+       DRM_DEBUG_KMS("backlight initialized, %s, brightness %u/%u, "
+                     "sysfs interface %sregistered\n",
+                     panel->backlight.enabled ? "enabled" : "disabled",
+                     panel->backlight.level, panel->backlight.max,
+                     panel->backlight.device ? "" : "not ");
+
        return 0;
 }
 
-void intel_panel_destroy_backlight(struct drm_device *dev)
+void intel_panel_destroy_backlight(struct drm_connector *connector)
+{
+       struct intel_connector *intel_connector = to_intel_connector(connector);
+       struct intel_panel *panel = &intel_connector->panel;
+
+       panel->backlight.present = false;
+       intel_backlight_device_unregister(intel_connector);
+}
+
+/**
+ * intel_find_panel_downclock - find the reduced downclock for LVDS in EDID
+ * @dev: drm device
+ * @fixed_mode : panel native mode
+ * @connector: LVDS/eDP connector
+ *
+ * Return downclock_avail
+ * Find the reduced downclock for LVDS/eDP in EDID.
+ */
+struct drm_display_mode *
+intel_find_panel_downclock(struct drm_device *dev,
+                       struct drm_display_mode *fixed_mode,
+                       struct drm_connector *connector)
+{
+       struct drm_display_mode *scan, *tmp_mode;
+       int temp_downclock;
+
+       temp_downclock = fixed_mode->clock;
+       tmp_mode = NULL;
+
+       list_for_each_entry(scan, &connector->probed_modes, head) {
+               /*
+                * If one mode has the same resolution with the fixed_panel
+                * mode while they have the different refresh rate, it means
+                * that the reduced downclock is found. In such
+                * case we can set the different FPx0/1 to dynamically select
+                * between low and high frequency.
+                */
+               if (scan->hdisplay == fixed_mode->hdisplay &&
+                   scan->hsync_start == fixed_mode->hsync_start &&
+                   scan->hsync_end == fixed_mode->hsync_end &&
+                   scan->htotal == fixed_mode->htotal &&
+                   scan->vdisplay == fixed_mode->vdisplay &&
+                   scan->vsync_start == fixed_mode->vsync_start &&
+                   scan->vsync_end == fixed_mode->vsync_end &&
+                   scan->vtotal == fixed_mode->vtotal) {
+                       if (scan->clock < temp_downclock) {
+                               /*
+                                * The downclock is already found. But we
+                                * expect to find the lower downclock.
+                                */
+                               temp_downclock = scan->clock;
+                               tmp_mode = scan;
+                       }
+               }
+       }
+
+       if (temp_downclock < fixed_mode->clock)
+               return drm_mode_duplicate(dev, tmp_mode);
+       else
+               return NULL;
+}
+
+/* Set up chip specific backlight functions */
+void intel_panel_init_backlight_funcs(struct drm_device *dev)
 {
-       return;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       if (IS_BROADWELL(dev)) {
+               dev_priv->display.setup_backlight = bdw_setup_backlight;
+               dev_priv->display.enable_backlight = bdw_enable_backlight;
+               dev_priv->display.disable_backlight = pch_disable_backlight;
+               dev_priv->display.set_backlight = bdw_set_backlight;
+               dev_priv->display.get_backlight = bdw_get_backlight;
+       } else if (HAS_PCH_SPLIT(dev)) {
+               dev_priv->display.setup_backlight = pch_setup_backlight;
+               dev_priv->display.enable_backlight = pch_enable_backlight;
+               dev_priv->display.disable_backlight = pch_disable_backlight;
+               dev_priv->display.set_backlight = pch_set_backlight;
+               dev_priv->display.get_backlight = pch_get_backlight;
+       } else if (IS_VALLEYVIEW(dev)) {
+               dev_priv->display.setup_backlight = vlv_setup_backlight;
+               dev_priv->display.enable_backlight = vlv_enable_backlight;
+               dev_priv->display.disable_backlight = vlv_disable_backlight;
+               dev_priv->display.set_backlight = vlv_set_backlight;
+               dev_priv->display.get_backlight = vlv_get_backlight;
+       } else if (IS_GEN4(dev)) {
+               dev_priv->display.setup_backlight = i965_setup_backlight;
+               dev_priv->display.enable_backlight = i965_enable_backlight;
+               dev_priv->display.disable_backlight = i965_disable_backlight;
+               dev_priv->display.set_backlight = i9xx_set_backlight;
+               dev_priv->display.get_backlight = i9xx_get_backlight;
+       } else {
+               dev_priv->display.setup_backlight = i9xx_setup_backlight;
+               dev_priv->display.enable_backlight = i9xx_enable_backlight;
+               dev_priv->display.disable_backlight = i9xx_disable_backlight;
+               dev_priv->display.set_backlight = i9xx_set_backlight;
+               dev_priv->display.get_backlight = i9xx_get_backlight;
+       }
 }
-#endif
 
 int intel_panel_init(struct intel_panel *panel,
                     struct drm_display_mode *fixed_mode)
@@ -871,4 +1213,8 @@ void intel_panel_fini(struct intel_panel *panel)
 
        if (panel->fixed_mode)
                drm_mode_destroy(intel_connector->base.dev, panel->fixed_mode);
+
+       if (panel->downclock_mode)
+               drm_mode_destroy(intel_connector->base.dev,
+                               panel->downclock_mode);
 }
index 26c29c173221058aeeaa9a3c0d889500b8db9fec..d77cc81900f92100ba2f61d7130bc9b0b60454b5 100644 (file)
@@ -30,7 +30,9 @@
 #include "intel_drv.h"
 #include "../../../platform/x86/intel_ips.h"
 #include <linux/module.h>
+#include <linux/vgaarb.h>
 #include <drm/i915_powerwell.h>
+#include <linux/pm_runtime.h>
 
 /**
  * RC6 is a special power stage which allows the GPU to enter an very
@@ -86,7 +88,7 @@ static void i8xx_disable_fbc(struct drm_device *dev)
        DRM_DEBUG_KMS("disabled FBC\n");
 }
 
-static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+static void i8xx_enable_fbc(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -96,32 +98,40 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int cfb_pitch;
        int plane, i;
-       u32 fbc_ctl, fbc_ctl2;
+       u32 fbc_ctl;
 
        cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE;
        if (fb->pitches[0] < cfb_pitch)
                cfb_pitch = fb->pitches[0];
 
-       /* FBC_CTL wants 64B units */
-       cfb_pitch = (cfb_pitch / 64) - 1;
+       /* FBC_CTL wants 32B or 64B units */
+       if (IS_GEN2(dev))
+               cfb_pitch = (cfb_pitch / 32) - 1;
+       else
+               cfb_pitch = (cfb_pitch / 64) - 1;
        plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
 
        /* Clear old tags */
        for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
                I915_WRITE(FBC_TAG + (i * 4), 0);
 
-       /* Set it up... */
-       fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
-       fbc_ctl2 |= plane;
-       I915_WRITE(FBC_CONTROL2, fbc_ctl2);
-       I915_WRITE(FBC_FENCE_OFF, crtc->y);
+       if (IS_GEN4(dev)) {
+               u32 fbc_ctl2;
+
+               /* Set it up... */
+               fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
+               fbc_ctl2 |= plane;
+               I915_WRITE(FBC_CONTROL2, fbc_ctl2);
+               I915_WRITE(FBC_FENCE_OFF, crtc->y);
+       }
 
        /* enable it... */
-       fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
+       fbc_ctl = I915_READ(FBC_CONTROL);
+       fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT;
+       fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC;
        if (IS_I945GM(dev))
                fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
        fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
-       fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
        fbc_ctl |= obj->fence_reg;
        I915_WRITE(FBC_CONTROL, fbc_ctl);
 
@@ -136,7 +146,7 @@ static bool i8xx_fbc_enabled(struct drm_device *dev)
        return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
 }
 
-static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+static void g4x_enable_fbc(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -145,16 +155,12 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
        struct drm_i915_gem_object *obj = intel_fb->obj;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
-       unsigned long stall_watermark = 200;
        u32 dpfc_ctl;
 
        dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
        dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
        I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
 
-       I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
-                  (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
-                  (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
        I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
 
        /* enable it... */
@@ -191,7 +197,11 @@ static void sandybridge_blit_fbc_update(struct drm_device *dev)
        u32 blt_ecoskpd;
 
        /* Make sure blitter notifies FBC of writes */
-       gen6_gt_force_wake_get(dev_priv);
+
+       /* Blitter is part of Media powerwell on VLV. No impact of
+        * his param in other platforms for now */
+       gen6_gt_force_wake_get(dev_priv, FORCEWAKE_MEDIA);
+
        blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
        blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
                GEN6_BLITTER_LOCK_SHIFT;
@@ -202,10 +212,11 @@ static void sandybridge_blit_fbc_update(struct drm_device *dev)
                         GEN6_BLITTER_LOCK_SHIFT);
        I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
        POSTING_READ(GEN6_BLITTER_ECOSKPD);
-       gen6_gt_force_wake_put(dev_priv);
+
+       gen6_gt_force_wake_put(dev_priv, FORCEWAKE_MEDIA);
 }
 
-static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+static void ironlake_enable_fbc(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -214,7 +225,6 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
        struct drm_i915_gem_object *obj = intel_fb->obj;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
-       unsigned long stall_watermark = 200;
        u32 dpfc_ctl;
 
        dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
@@ -222,12 +232,11 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
        dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
        /* Set persistent mode for front-buffer rendering, ala X. */
        dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
-       dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg);
+       dpfc_ctl |= DPFC_CTL_FENCE_EN;
+       if (IS_GEN5(dev))
+               dpfc_ctl |= obj->fence_reg;
        I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
 
-       I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
-                  (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
-                  (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
        I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
        I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
        /* enable it... */
@@ -265,7 +274,7 @@ static bool ironlake_fbc_enabled(struct drm_device *dev)
        return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
 }
 
-static void gen7_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+static void gen7_enable_fbc(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -295,7 +304,7 @@ static void gen7_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
 
        sandybridge_blit_fbc_update(dev);
 
-       DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
+       DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
 }
 
 bool intel_fbc_enabled(struct drm_device *dev)
@@ -322,8 +331,7 @@ static void intel_fbc_work_fn(struct work_struct *__work)
                 * the prior work.
                 */
                if (work->crtc->fb == work->fb) {
-                       dev_priv->display.enable_fbc(work->crtc,
-                                                    work->interval);
+                       dev_priv->display.enable_fbc(work->crtc);
 
                        dev_priv->fbc.plane = to_intel_crtc(work->crtc)->plane;
                        dev_priv->fbc.fb_id = work->crtc->fb->base.id;
@@ -360,7 +368,7 @@ static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
        dev_priv->fbc.fbc_work = NULL;
 }
 
-static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+static void intel_enable_fbc(struct drm_crtc *crtc)
 {
        struct intel_fbc_work *work;
        struct drm_device *dev = crtc->dev;
@@ -374,13 +382,12 @@ static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
        work = kzalloc(sizeof(*work), GFP_KERNEL);
        if (work == NULL) {
                DRM_ERROR("Failed to allocate FBC work structure\n");
-               dev_priv->display.enable_fbc(crtc, interval);
+               dev_priv->display.enable_fbc(crtc);
                return;
        }
 
        work->crtc = crtc;
        work->fb = crtc->fb;
-       work->interval = interval;
        INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
 
        dev_priv->fbc.fbc_work = work;
@@ -454,7 +461,7 @@ void intel_update_fbc(struct drm_device *dev)
        const struct drm_display_mode *adjusted_mode;
        unsigned int max_width, max_height;
 
-       if (!I915_HAS_FBC(dev)) {
+       if (!HAS_FBC(dev)) {
                set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED);
                return;
        }
@@ -530,10 +537,10 @@ void intel_update_fbc(struct drm_device *dev)
                        DRM_DEBUG_KMS("mode too large for compression, disabling\n");
                goto out_disable;
        }
-       if ((IS_I915GM(dev) || IS_I945GM(dev) || IS_HASWELL(dev)) &&
-           intel_crtc->plane != 0) {
+       if ((INTEL_INFO(dev)->gen < 4 || IS_HASWELL(dev)) &&
+           intel_crtc->plane != PLANE_A) {
                if (set_no_fbc_reason(dev_priv, FBC_BAD_PLANE))
-                       DRM_DEBUG_KMS("plane not 0, disabling compression\n");
+                       DRM_DEBUG_KMS("plane not A, disabling compression\n");
                goto out_disable;
        }
 
@@ -595,7 +602,7 @@ void intel_update_fbc(struct drm_device *dev)
                intel_disable_fbc(dev);
        }
 
-       intel_enable_fbc(crtc, 500);
+       intel_enable_fbc(crtc);
        dev_priv->fbc.no_fbc_reason = FBC_OK;
        return;
 
@@ -817,7 +824,7 @@ static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
        return size;
 }
 
-static int i85x_get_fifo_size(struct drm_device *dev, int plane)
+static int i830_get_fifo_size(struct drm_device *dev, int plane)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        uint32_t dsparb = I915_READ(DSPARB);
@@ -850,21 +857,6 @@ static int i845_get_fifo_size(struct drm_device *dev, int plane)
        return size;
 }
 
-static int i830_get_fifo_size(struct drm_device *dev, int plane)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       uint32_t dsparb = I915_READ(DSPARB);
-       int size;
-
-       size = dsparb & 0x7f;
-       size >>= 1; /* Convert to cachelines */
-
-       DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
-                     plane ? "B" : "A", size);
-
-       return size;
-}
-
 /* Pineview has different values for various configs */
 static const struct intel_watermark_params pineview_display_wm = {
        PINEVIEW_DISPLAY_FIFO,
@@ -943,14 +935,14 @@ static const struct intel_watermark_params i915_wm_info = {
        2,
        I915_FIFO_LINE_SIZE
 };
-static const struct intel_watermark_params i855_wm_info = {
+static const struct intel_watermark_params i830_wm_info = {
        I855GM_FIFO_SIZE,
        I915_MAX_WM,
        1,
        2,
        I830_FIFO_LINE_SIZE
 };
-static const struct intel_watermark_params i830_wm_info = {
+static const struct intel_watermark_params i845_wm_info = {
        I830_FIFO_SIZE,
        I915_MAX_WM,
        1,
@@ -958,65 +950,6 @@ static const struct intel_watermark_params i830_wm_info = {
        I830_FIFO_LINE_SIZE
 };
 
-static const struct intel_watermark_params ironlake_display_wm_info = {
-       ILK_DISPLAY_FIFO,
-       ILK_DISPLAY_MAXWM,
-       ILK_DISPLAY_DFTWM,
-       2,
-       ILK_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params ironlake_cursor_wm_info = {
-       ILK_CURSOR_FIFO,
-       ILK_CURSOR_MAXWM,
-       ILK_CURSOR_DFTWM,
-       2,
-       ILK_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params ironlake_display_srwm_info = {
-       ILK_DISPLAY_SR_FIFO,
-       ILK_DISPLAY_MAX_SRWM,
-       ILK_DISPLAY_DFT_SRWM,
-       2,
-       ILK_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params ironlake_cursor_srwm_info = {
-       ILK_CURSOR_SR_FIFO,
-       ILK_CURSOR_MAX_SRWM,
-       ILK_CURSOR_DFT_SRWM,
-       2,
-       ILK_FIFO_LINE_SIZE
-};
-
-static const struct intel_watermark_params sandybridge_display_wm_info = {
-       SNB_DISPLAY_FIFO,
-       SNB_DISPLAY_MAXWM,
-       SNB_DISPLAY_DFTWM,
-       2,
-       SNB_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params sandybridge_cursor_wm_info = {
-       SNB_CURSOR_FIFO,
-       SNB_CURSOR_MAXWM,
-       SNB_CURSOR_DFTWM,
-       2,
-       SNB_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params sandybridge_display_srwm_info = {
-       SNB_DISPLAY_SR_FIFO,
-       SNB_DISPLAY_MAX_SRWM,
-       SNB_DISPLAY_DFT_SRWM,
-       2,
-       SNB_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params sandybridge_cursor_srwm_info = {
-       SNB_CURSOR_SR_FIFO,
-       SNB_CURSOR_MAX_SRWM,
-       SNB_CURSOR_DFT_SRWM,
-       2,
-       SNB_FIFO_LINE_SIZE
-};
-
-
 /**
  * intel_calculate_wm - calculate watermark level
  * @clock_in_khz: pixel clock
@@ -1567,7 +1500,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
        else if (!IS_GEN2(dev))
                wm_info = &i915_wm_info;
        else
-               wm_info = &i855_wm_info;
+               wm_info = &i830_wm_info;
 
        fifo_size = dev_priv->display.get_fifo_size(dev, 0);
        crtc = intel_get_crtc_for_plane(dev, 0);
@@ -1615,7 +1548,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
        if (IS_I945G(dev) || IS_I945GM(dev))
                I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
        else if (IS_I915GM(dev))
-               I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
+               I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_SELF_EN));
 
        /* Calc sr entries for one plane configs */
        if (HAS_FW_BLC(dev) && enabled) {
@@ -1667,14 +1600,14 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
                                I915_WRITE(FW_BLC_SELF,
                                           FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
                        else if (IS_I915GM(dev))
-                               I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
+                               I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_SELF_EN));
                        DRM_DEBUG_KMS("memory self refresh enabled\n");
                } else
                        DRM_DEBUG_KMS("memory self refresh disabled\n");
        }
 }
 
-static void i830_update_wm(struct drm_crtc *unused_crtc)
+static void i845_update_wm(struct drm_crtc *unused_crtc)
 {
        struct drm_device *dev = unused_crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1689,7 +1622,7 @@ static void i830_update_wm(struct drm_crtc *unused_crtc)
 
        adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
        planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
-                                      &i830_wm_info,
+                                      &i845_wm_info,
                                       dev_priv->display.get_fifo_size(dev, 0),
                                       4, latency_ns);
        fwater_lo = I915_READ(FW_BLC) & ~0xfff;
@@ -1700,423 +1633,6 @@ static void i830_update_wm(struct drm_crtc *unused_crtc)
        I915_WRITE(FW_BLC, fwater_lo);
 }
 
-/*
- * Check the wm result.
- *
- * If any calculated watermark values is larger than the maximum value that
- * can be programmed into the associated watermark register, that watermark
- * must be disabled.
- */
-static bool ironlake_check_srwm(struct drm_device *dev, int level,
-                               int fbc_wm, int display_wm, int cursor_wm,
-                               const struct intel_watermark_params *display,
-                               const struct intel_watermark_params *cursor)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
-                     " cursor %d\n", level, display_wm, fbc_wm, cursor_wm);
-
-       if (fbc_wm > SNB_FBC_MAX_SRWM) {
-               DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
-                             fbc_wm, SNB_FBC_MAX_SRWM, level);
-
-               /* fbc has it's own way to disable FBC WM */
-               I915_WRITE(DISP_ARB_CTL,
-                          I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
-               return false;
-       } else if (INTEL_INFO(dev)->gen >= 6) {
-               /* enable FBC WM (except on ILK, where it must remain off) */
-               I915_WRITE(DISP_ARB_CTL,
-                          I915_READ(DISP_ARB_CTL) & ~DISP_FBC_WM_DIS);
-       }
-
-       if (display_wm > display->max_wm) {
-               DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
-                             display_wm, SNB_DISPLAY_MAX_SRWM, level);
-               return false;
-       }
-
-       if (cursor_wm > cursor->max_wm) {
-               DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
-                             cursor_wm, SNB_CURSOR_MAX_SRWM, level);
-               return false;
-       }
-
-       if (!(fbc_wm || display_wm || cursor_wm)) {
-               DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level);
-               return false;
-       }
-
-       return true;
-}
-
-/*
- * Compute watermark values of WM[1-3],
- */
-static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
-                                 int latency_ns,
-                                 const struct intel_watermark_params *display,
-                                 const struct intel_watermark_params *cursor,
-                                 int *fbc_wm, int *display_wm, int *cursor_wm)
-{
-       struct drm_crtc *crtc;
-       const struct drm_display_mode *adjusted_mode;
-       unsigned long line_time_us;
-       int hdisplay, htotal, pixel_size, clock;
-       int line_count, line_size;
-       int small, large;
-       int entries;
-
-       if (!latency_ns) {
-               *fbc_wm = *display_wm = *cursor_wm = 0;
-               return false;
-       }
-
-       crtc = intel_get_crtc_for_plane(dev, plane);
-       adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
-       clock = adjusted_mode->crtc_clock;
-       htotal = adjusted_mode->crtc_htotal;
-       hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
-       pixel_size = crtc->fb->bits_per_pixel / 8;
-
-       line_time_us = (htotal * 1000) / clock;
-       line_count = (latency_ns / line_time_us + 1000) / 1000;
-       line_size = hdisplay * pixel_size;
-
-       /* Use the minimum of the small and large buffer method for primary */
-       small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
-       large = line_count * line_size;
-
-       entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
-       *display_wm = entries + display->guard_size;
-
-       /*
-        * Spec says:
-        * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
-        */
-       *fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2;
-
-       /* calculate the self-refresh watermark for display cursor */
-       entries = line_count * pixel_size * 64;
-       entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
-       *cursor_wm = entries + cursor->guard_size;
-
-       return ironlake_check_srwm(dev, level,
-                                  *fbc_wm, *display_wm, *cursor_wm,
-                                  display, cursor);
-}
-
-static void ironlake_update_wm(struct drm_crtc *crtc)
-{
-       struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       int fbc_wm, plane_wm, cursor_wm;
-       unsigned int enabled;
-
-       enabled = 0;
-       if (g4x_compute_wm0(dev, PIPE_A,
-                           &ironlake_display_wm_info,
-                           dev_priv->wm.pri_latency[0] * 100,
-                           &ironlake_cursor_wm_info,
-                           dev_priv->wm.cur_latency[0] * 100,
-                           &plane_wm, &cursor_wm)) {
-               I915_WRITE(WM0_PIPEA_ILK,
-                          (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
-               DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
-                             " plane %d, " "cursor: %d\n",
-                             plane_wm, cursor_wm);
-               enabled |= 1 << PIPE_A;
-       }
-
-       if (g4x_compute_wm0(dev, PIPE_B,
-                           &ironlake_display_wm_info,
-                           dev_priv->wm.pri_latency[0] * 100,
-                           &ironlake_cursor_wm_info,
-                           dev_priv->wm.cur_latency[0] * 100,
-                           &plane_wm, &cursor_wm)) {
-               I915_WRITE(WM0_PIPEB_ILK,
-                          (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
-               DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
-                             " plane %d, cursor: %d\n",
-                             plane_wm, cursor_wm);
-               enabled |= 1 << PIPE_B;
-       }
-
-       /*
-        * Calculate and update the self-refresh watermark only when one
-        * display plane is used.
-        */
-       I915_WRITE(WM3_LP_ILK, 0);
-       I915_WRITE(WM2_LP_ILK, 0);
-       I915_WRITE(WM1_LP_ILK, 0);
-
-       if (!single_plane_enabled(enabled))
-               return;
-       enabled = ffs(enabled) - 1;
-
-       /* WM1 */
-       if (!ironlake_compute_srwm(dev, 1, enabled,
-                                  dev_priv->wm.pri_latency[1] * 500,
-                                  &ironlake_display_srwm_info,
-                                  &ironlake_cursor_srwm_info,
-                                  &fbc_wm, &plane_wm, &cursor_wm))
-               return;
-
-       I915_WRITE(WM1_LP_ILK,
-                  WM1_LP_SR_EN |
-                  (dev_priv->wm.pri_latency[1] << WM1_LP_LATENCY_SHIFT) |
-                  (fbc_wm << WM1_LP_FBC_SHIFT) |
-                  (plane_wm << WM1_LP_SR_SHIFT) |
-                  cursor_wm);
-
-       /* WM2 */
-       if (!ironlake_compute_srwm(dev, 2, enabled,
-                                  dev_priv->wm.pri_latency[2] * 500,
-                                  &ironlake_display_srwm_info,
-                                  &ironlake_cursor_srwm_info,
-                                  &fbc_wm, &plane_wm, &cursor_wm))
-               return;
-
-       I915_WRITE(WM2_LP_ILK,
-                  WM2_LP_EN |
-                  (dev_priv->wm.pri_latency[2] << WM1_LP_LATENCY_SHIFT) |
-                  (fbc_wm << WM1_LP_FBC_SHIFT) |
-                  (plane_wm << WM1_LP_SR_SHIFT) |
-                  cursor_wm);
-
-       /*
-        * WM3 is unsupported on ILK, probably because we don't have latency
-        * data for that power state
-        */
-}
-
-static void sandybridge_update_wm(struct drm_crtc *crtc)
-{
-       struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       int latency = dev_priv->wm.pri_latency[0] * 100;        /* In unit 0.1us */
-       u32 val;
-       int fbc_wm, plane_wm, cursor_wm;
-       unsigned int enabled;
-
-       enabled = 0;
-       if (g4x_compute_wm0(dev, PIPE_A,
-                           &sandybridge_display_wm_info, latency,
-                           &sandybridge_cursor_wm_info, latency,
-                           &plane_wm, &cursor_wm)) {
-               val = I915_READ(WM0_PIPEA_ILK);
-               val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
-               I915_WRITE(WM0_PIPEA_ILK, val |
-                          ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
-               DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
-                             " plane %d, " "cursor: %d\n",
-                             plane_wm, cursor_wm);
-               enabled |= 1 << PIPE_A;
-       }
-
-       if (g4x_compute_wm0(dev, PIPE_B,
-                           &sandybridge_display_wm_info, latency,
-                           &sandybridge_cursor_wm_info, latency,
-                           &plane_wm, &cursor_wm)) {
-               val = I915_READ(WM0_PIPEB_ILK);
-               val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
-               I915_WRITE(WM0_PIPEB_ILK, val |
-                          ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
-               DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
-                             " plane %d, cursor: %d\n",
-                             plane_wm, cursor_wm);
-               enabled |= 1 << PIPE_B;
-       }
-
-       /*
-        * Calculate and update the self-refresh watermark only when one
-        * display plane is used.
-        *
-        * SNB support 3 levels of watermark.
-        *
-        * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
-        * and disabled in the descending order
-        *
-        */
-       I915_WRITE(WM3_LP_ILK, 0);
-       I915_WRITE(WM2_LP_ILK, 0);
-       I915_WRITE(WM1_LP_ILK, 0);
-
-       if (!single_plane_enabled(enabled) ||
-           dev_priv->sprite_scaling_enabled)
-               return;
-       enabled = ffs(enabled) - 1;
-
-       /* WM1 */
-       if (!ironlake_compute_srwm(dev, 1, enabled,
-                                  dev_priv->wm.pri_latency[1] * 500,
-                                  &sandybridge_display_srwm_info,
-                                  &sandybridge_cursor_srwm_info,
-                                  &fbc_wm, &plane_wm, &cursor_wm))
-               return;
-
-       I915_WRITE(WM1_LP_ILK,
-                  WM1_LP_SR_EN |
-                  (dev_priv->wm.pri_latency[1] << WM1_LP_LATENCY_SHIFT) |
-                  (fbc_wm << WM1_LP_FBC_SHIFT) |
-                  (plane_wm << WM1_LP_SR_SHIFT) |
-                  cursor_wm);
-
-       /* WM2 */
-       if (!ironlake_compute_srwm(dev, 2, enabled,
-                                  dev_priv->wm.pri_latency[2] * 500,
-                                  &sandybridge_display_srwm_info,
-                                  &sandybridge_cursor_srwm_info,
-                                  &fbc_wm, &plane_wm, &cursor_wm))
-               return;
-
-       I915_WRITE(WM2_LP_ILK,
-                  WM2_LP_EN |
-                  (dev_priv->wm.pri_latency[2] << WM1_LP_LATENCY_SHIFT) |
-                  (fbc_wm << WM1_LP_FBC_SHIFT) |
-                  (plane_wm << WM1_LP_SR_SHIFT) |
-                  cursor_wm);
-
-       /* WM3 */
-       if (!ironlake_compute_srwm(dev, 3, enabled,
-                                  dev_priv->wm.pri_latency[3] * 500,
-                                  &sandybridge_display_srwm_info,
-                                  &sandybridge_cursor_srwm_info,
-                                  &fbc_wm, &plane_wm, &cursor_wm))
-               return;
-
-       I915_WRITE(WM3_LP_ILK,
-                  WM3_LP_EN |
-                  (dev_priv->wm.pri_latency[3] << WM1_LP_LATENCY_SHIFT) |
-                  (fbc_wm << WM1_LP_FBC_SHIFT) |
-                  (plane_wm << WM1_LP_SR_SHIFT) |
-                  cursor_wm);
-}
-
-static void ivybridge_update_wm(struct drm_crtc *crtc)
-{
-       struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       int latency = dev_priv->wm.pri_latency[0] * 100;        /* In unit 0.1us */
-       u32 val;
-       int fbc_wm, plane_wm, cursor_wm;
-       int ignore_fbc_wm, ignore_plane_wm, ignore_cursor_wm;
-       unsigned int enabled;
-
-       enabled = 0;
-       if (g4x_compute_wm0(dev, PIPE_A,
-                           &sandybridge_display_wm_info, latency,
-                           &sandybridge_cursor_wm_info, latency,
-                           &plane_wm, &cursor_wm)) {
-               val = I915_READ(WM0_PIPEA_ILK);
-               val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
-               I915_WRITE(WM0_PIPEA_ILK, val |
-                          ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
-               DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
-                             " plane %d, " "cursor: %d\n",
-                             plane_wm, cursor_wm);
-               enabled |= 1 << PIPE_A;
-       }
-
-       if (g4x_compute_wm0(dev, PIPE_B,
-                           &sandybridge_display_wm_info, latency,
-                           &sandybridge_cursor_wm_info, latency,
-                           &plane_wm, &cursor_wm)) {
-               val = I915_READ(WM0_PIPEB_ILK);
-               val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
-               I915_WRITE(WM0_PIPEB_ILK, val |
-                          ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
-               DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
-                             " plane %d, cursor: %d\n",
-                             plane_wm, cursor_wm);
-               enabled |= 1 << PIPE_B;
-       }
-
-       if (g4x_compute_wm0(dev, PIPE_C,
-                           &sandybridge_display_wm_info, latency,
-                           &sandybridge_cursor_wm_info, latency,
-                           &plane_wm, &cursor_wm)) {
-               val = I915_READ(WM0_PIPEC_IVB);
-               val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
-               I915_WRITE(WM0_PIPEC_IVB, val |
-                          ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
-               DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
-                             " plane %d, cursor: %d\n",
-                             plane_wm, cursor_wm);
-               enabled |= 1 << PIPE_C;
-       }
-
-       /*
-        * Calculate and update the self-refresh watermark only when one
-        * display plane is used.
-        *
-        * SNB support 3 levels of watermark.
-        *
-        * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
-        * and disabled in the descending order
-        *
-        */
-       I915_WRITE(WM3_LP_ILK, 0);
-       I915_WRITE(WM2_LP_ILK, 0);
-       I915_WRITE(WM1_LP_ILK, 0);
-
-       if (!single_plane_enabled(enabled) ||
-           dev_priv->sprite_scaling_enabled)
-               return;
-       enabled = ffs(enabled) - 1;
-
-       /* WM1 */
-       if (!ironlake_compute_srwm(dev, 1, enabled,
-                                  dev_priv->wm.pri_latency[1] * 500,
-                                  &sandybridge_display_srwm_info,
-                                  &sandybridge_cursor_srwm_info,
-                                  &fbc_wm, &plane_wm, &cursor_wm))
-               return;
-
-       I915_WRITE(WM1_LP_ILK,
-                  WM1_LP_SR_EN |
-                  (dev_priv->wm.pri_latency[1] << WM1_LP_LATENCY_SHIFT) |
-                  (fbc_wm << WM1_LP_FBC_SHIFT) |
-                  (plane_wm << WM1_LP_SR_SHIFT) |
-                  cursor_wm);
-
-       /* WM2 */
-       if (!ironlake_compute_srwm(dev, 2, enabled,
-                                  dev_priv->wm.pri_latency[2] * 500,
-                                  &sandybridge_display_srwm_info,
-                                  &sandybridge_cursor_srwm_info,
-                                  &fbc_wm, &plane_wm, &cursor_wm))
-               return;
-
-       I915_WRITE(WM2_LP_ILK,
-                  WM2_LP_EN |
-                  (dev_priv->wm.pri_latency[2] << WM1_LP_LATENCY_SHIFT) |
-                  (fbc_wm << WM1_LP_FBC_SHIFT) |
-                  (plane_wm << WM1_LP_SR_SHIFT) |
-                  cursor_wm);
-
-       /* WM3, note we have to correct the cursor latency */
-       if (!ironlake_compute_srwm(dev, 3, enabled,
-                                  dev_priv->wm.pri_latency[3] * 500,
-                                  &sandybridge_display_srwm_info,
-                                  &sandybridge_cursor_srwm_info,
-                                  &fbc_wm, &plane_wm, &ignore_cursor_wm) ||
-           !ironlake_compute_srwm(dev, 3, enabled,
-                                  dev_priv->wm.cur_latency[3] * 500,
-                                  &sandybridge_display_srwm_info,
-                                  &sandybridge_cursor_srwm_info,
-                                  &ignore_fbc_wm, &ignore_plane_wm, &cursor_wm))
-               return;
-
-       I915_WRITE(WM3_LP_ILK,
-                  WM3_LP_EN |
-                  (dev_priv->wm.pri_latency[3] << WM1_LP_LATENCY_SHIFT) |
-                  (fbc_wm << WM1_LP_FBC_SHIFT) |
-                  (plane_wm << WM1_LP_SR_SHIFT) |
-                  cursor_wm);
-}
-
 static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev,
                                    struct drm_crtc *crtc)
 {
@@ -2185,7 +1701,7 @@ static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
        return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2;
 }
 
-struct hsw_pipe_wm_parameters {
+struct ilk_pipe_wm_parameters {
        bool active;
        uint32_t pipe_htotal;
        uint32_t pixel_rate;
@@ -2194,7 +1710,7 @@ struct hsw_pipe_wm_parameters {
        struct intel_plane_wm_parameters cur;
 };
 
-struct hsw_wm_maximums {
+struct ilk_wm_maximums {
        uint16_t pri;
        uint16_t spr;
        uint16_t cur;
@@ -2212,7 +1728,7 @@ struct intel_wm_config {
  * For both WM_PIPE and WM_LP.
  * mem_value must be in 0.1us units.
  */
-static uint32_t ilk_compute_pri_wm(const struct hsw_pipe_wm_parameters *params,
+static uint32_t ilk_compute_pri_wm(const struct ilk_pipe_wm_parameters *params,
                                   uint32_t mem_value,
                                   bool is_lp)
 {
@@ -2241,7 +1757,7 @@ static uint32_t ilk_compute_pri_wm(const struct hsw_pipe_wm_parameters *params,
  * For both WM_PIPE and WM_LP.
  * mem_value must be in 0.1us units.
  */
-static uint32_t ilk_compute_spr_wm(const struct hsw_pipe_wm_parameters *params,
+static uint32_t ilk_compute_spr_wm(const struct ilk_pipe_wm_parameters *params,
                                   uint32_t mem_value)
 {
        uint32_t method1, method2;
@@ -2264,7 +1780,7 @@ static uint32_t ilk_compute_spr_wm(const struct hsw_pipe_wm_parameters *params,
  * For both WM_PIPE and WM_LP.
  * mem_value must be in 0.1us units.
  */
-static uint32_t ilk_compute_cur_wm(const struct hsw_pipe_wm_parameters *params,
+static uint32_t ilk_compute_cur_wm(const struct ilk_pipe_wm_parameters *params,
                                   uint32_t mem_value)
 {
        if (!params->active || !params->cur.enabled)
@@ -2278,7 +1794,7 @@ static uint32_t ilk_compute_cur_wm(const struct hsw_pipe_wm_parameters *params,
 }
 
 /* Only for WM_LP. */
-static uint32_t ilk_compute_fbc_wm(const struct hsw_pipe_wm_parameters *params,
+static uint32_t ilk_compute_fbc_wm(const struct ilk_pipe_wm_parameters *params,
                                   uint32_t pri_val)
 {
        if (!params->active || !params->pri.enabled)
@@ -2383,7 +1899,7 @@ static void ilk_compute_wm_maximums(struct drm_device *dev,
                                    int level,
                                    const struct intel_wm_config *config,
                                    enum intel_ddb_partitioning ddb_partitioning,
-                                   struct hsw_wm_maximums *max)
+                                   struct ilk_wm_maximums *max)
 {
        max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
        max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
@@ -2392,7 +1908,7 @@ static void ilk_compute_wm_maximums(struct drm_device *dev,
 }
 
 static bool ilk_validate_wm_level(int level,
-                                 const struct hsw_wm_maximums *max,
+                                 const struct ilk_wm_maximums *max,
                                  struct intel_wm_level *result)
 {
        bool ret;
@@ -2434,7 +1950,7 @@ static bool ilk_validate_wm_level(int level,
 
 static void ilk_compute_wm_level(struct drm_i915_private *dev_priv,
                                 int level,
-                                const struct hsw_pipe_wm_parameters *p,
+                                const struct ilk_pipe_wm_parameters *p,
                                 struct intel_wm_level *result)
 {
        uint16_t pri_latency = dev_priv->wm.pri_latency[level];
@@ -2482,7 +1998,7 @@ static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[5])
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
-       if (IS_HASWELL(dev)) {
+       if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
                uint64_t sskpd = I915_READ64(MCH_SSKPD);
 
                wm[0] = (sskpd >> 56) & 0xFF;
@@ -2530,7 +2046,7 @@ static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5])
 static int ilk_wm_max_level(const struct drm_device *dev)
 {
        /* how many WM levels are we expecting */
-       if (IS_HASWELL(dev))
+       if (IS_HASWELL(dev) || IS_BROADWELL(dev))
                return 4;
        else if (INTEL_INFO(dev)->gen >= 6)
                return 3;
@@ -2582,8 +2098,8 @@ static void intel_setup_wm_latency(struct drm_device *dev)
        intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
 }
 
-static void hsw_compute_wm_parameters(struct drm_crtc *crtc,
-                                     struct hsw_pipe_wm_parameters *p,
+static void ilk_compute_wm_parameters(struct drm_crtc *crtc,
+                                     struct ilk_pipe_wm_parameters *p,
                                      struct intel_wm_config *config)
 {
        struct drm_device *dev = crtc->dev;
@@ -2593,7 +2109,7 @@ static void hsw_compute_wm_parameters(struct drm_crtc *crtc,
 
        p->active = intel_crtc_active(crtc);
        if (p->active) {
-               p->pipe_htotal = intel_crtc->config.adjusted_mode.htotal;
+               p->pipe_htotal = intel_crtc->config.adjusted_mode.crtc_htotal;
                p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc);
                p->pri.bytes_per_pixel = crtc->fb->bits_per_pixel / 8;
                p->cur.bytes_per_pixel = 4;
@@ -2620,7 +2136,7 @@ static void hsw_compute_wm_parameters(struct drm_crtc *crtc,
 
 /* Compute new watermarks for the pipe */
 static bool intel_compute_pipe_wm(struct drm_crtc *crtc,
-                                 const struct hsw_pipe_wm_parameters *params,
+                                 const struct ilk_pipe_wm_parameters *params,
                                  struct intel_pipe_wm *pipe_wm)
 {
        struct drm_device *dev = crtc->dev;
@@ -2632,16 +2148,25 @@ static bool intel_compute_pipe_wm(struct drm_crtc *crtc,
                .sprites_enabled = params->spr.enabled,
                .sprites_scaled = params->spr.scaled,
        };
-       struct hsw_wm_maximums max;
+       struct ilk_wm_maximums max;
 
        /* LP0 watermarks always use 1/2 DDB partitioning */
        ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
 
+       /* ILK/SNB: LP2+ watermarks only w/o sprites */
+       if (INTEL_INFO(dev)->gen <= 6 && params->spr.enabled)
+               max_level = 1;
+
+       /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
+       if (params->spr.scaled)
+               max_level = 0;
+
        for (level = 0; level <= max_level; level++)
                ilk_compute_wm_level(dev_priv, level, params,
                                     &pipe_wm->wm[level]);
 
-       pipe_wm->linetime = hsw_compute_linetime_wm(dev, crtc);
+       if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+               pipe_wm->linetime = hsw_compute_linetime_wm(dev, crtc);
 
        /* At least LP0 must be valid */
        return ilk_validate_wm_level(0, &max, &pipe_wm->wm[0]);
@@ -2676,12 +2201,19 @@ static void ilk_merge_wm_level(struct drm_device *dev,
  * Merge all low power watermarks for all active pipes.
  */
 static void ilk_wm_merge(struct drm_device *dev,
-                        const struct hsw_wm_maximums *max,
+                        const struct intel_wm_config *config,
+                        const struct ilk_wm_maximums *max,
                         struct intel_pipe_wm *merged)
 {
        int level, max_level = ilk_wm_max_level(dev);
 
-       merged->fbc_wm_enabled = true;
+       /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
+       if ((INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev)) &&
+           config->num_pipes_active > 1)
+               return;
+
+       /* ILK: FBC WM must be disabled always */
+       merged->fbc_wm_enabled = INTEL_INFO(dev)->gen >= 6;
 
        /* merge each WM1+ level */
        for (level = 1; level <= max_level; level++) {
@@ -2701,6 +2233,20 @@ static void ilk_wm_merge(struct drm_device *dev,
                        wm->fbc_val = 0;
                }
        }
+
+       /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
+       /*
+        * FIXME this is racy. FBC might get enabled later.
+        * What we should check here is whether FBC can be
+        * enabled sometime later.
+        */
+       if (IS_GEN5(dev) && !merged->fbc_wm_enabled && intel_fbc_enabled(dev)) {
+               for (level = 2; level <= max_level; level++) {
+                       struct intel_wm_level *wm = &merged->wm[level];
+
+                       wm->enable = false;
+               }
+       }
 }
 
 static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
@@ -2709,10 +2255,21 @@ static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
        return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable);
 }
 
-static void hsw_compute_wm_results(struct drm_device *dev,
+/* The value we need to program into the WM_LPx latency field */
+static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+               return 2 * level;
+       else
+               return dev_priv->wm.pri_latency[level];
+}
+
+static void ilk_compute_wm_results(struct drm_device *dev,
                                   const struct intel_pipe_wm *merged,
                                   enum intel_ddb_partitioning partitioning,
-                                  struct hsw_wm_values *results)
+                                  struct ilk_wm_values *results)
 {
        struct intel_crtc *intel_crtc;
        int level, wm_lp;
@@ -2731,7 +2288,7 @@ static void hsw_compute_wm_results(struct drm_device *dev,
                        break;
 
                results->wm_lp[wm_lp - 1] = WM3_LP_EN |
-                       ((level * 2) << WM1_LP_LATENCY_SHIFT) |
+                       (ilk_wm_lp_latency(dev, level) << WM1_LP_LATENCY_SHIFT) |
                        (r->pri_val << WM1_LP_SR_SHIFT) |
                        r->cur_val;
 
@@ -2742,7 +2299,11 @@ static void hsw_compute_wm_results(struct drm_device *dev,
                        results->wm_lp[wm_lp - 1] |=
                                r->fbc_val << WM1_LP_FBC_SHIFT;
 
-               results->wm_lp_spr[wm_lp - 1] = r->spr_val;
+               if (INTEL_INFO(dev)->gen <= 6 && r->spr_val) {
+                       WARN_ON(wm_lp != 1);
+                       results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val;
+               } else
+                       results->wm_lp_spr[wm_lp - 1] = r->spr_val;
        }
 
        /* LP0 register values */
@@ -2765,7 +2326,7 @@ static void hsw_compute_wm_results(struct drm_device *dev,
 
 /* Find the result with the highest level enabled. Check for enable_fbc_wm in
  * case both are at the same level. Prefer r1 in case they're the same. */
-static struct intel_pipe_wm *hsw_find_best_result(struct drm_device *dev,
+static struct intel_pipe_wm *ilk_find_best_result(struct drm_device *dev,
                                                  struct intel_pipe_wm *r1,
                                                  struct intel_pipe_wm *r2)
 {
@@ -2800,8 +2361,8 @@ static struct intel_pipe_wm *hsw_find_best_result(struct drm_device *dev,
 #define WM_DIRTY_DDB (1 << 25)
 
 static unsigned int ilk_compute_wm_dirty(struct drm_device *dev,
-                                        const struct hsw_wm_values *old,
-                                        const struct hsw_wm_values *new)
+                                        const struct ilk_wm_values *old,
+                                        const struct ilk_wm_values *new)
 {
        unsigned int dirty = 0;
        enum pipe pipe;
@@ -2851,27 +2412,53 @@ static unsigned int ilk_compute_wm_dirty(struct drm_device *dev,
        return dirty;
 }
 
+static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
+                              unsigned int dirty)
+{
+       struct ilk_wm_values *previous = &dev_priv->wm.hw;
+       bool changed = false;
+
+       if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM1_LP_SR_EN) {
+               previous->wm_lp[2] &= ~WM1_LP_SR_EN;
+               I915_WRITE(WM3_LP_ILK, previous->wm_lp[2]);
+               changed = true;
+       }
+       if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM1_LP_SR_EN) {
+               previous->wm_lp[1] &= ~WM1_LP_SR_EN;
+               I915_WRITE(WM2_LP_ILK, previous->wm_lp[1]);
+               changed = true;
+       }
+       if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM1_LP_SR_EN) {
+               previous->wm_lp[0] &= ~WM1_LP_SR_EN;
+               I915_WRITE(WM1_LP_ILK, previous->wm_lp[0]);
+               changed = true;
+       }
+
+       /*
+        * Don't touch WM1S_LP_EN here.
+        * Doing so could cause underruns.
+        */
+
+       return changed;
+}
+
 /*
  * The spec says we shouldn't write when we don't need, because every write
  * causes WMs to be re-evaluated, expending some power.
  */
-static void hsw_write_wm_values(struct drm_i915_private *dev_priv,
-                               struct hsw_wm_values *results)
+static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
+                               struct ilk_wm_values *results)
 {
-       struct hsw_wm_values *previous = &dev_priv->wm.hw;
+       struct drm_device *dev = dev_priv->dev;
+       struct ilk_wm_values *previous = &dev_priv->wm.hw;
        unsigned int dirty;
        uint32_t val;
 
-       dirty = ilk_compute_wm_dirty(dev_priv->dev, previous, results);
+       dirty = ilk_compute_wm_dirty(dev, previous, results);
        if (!dirty)
                return;
 
-       if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != 0)
-               I915_WRITE(WM3_LP_ILK, 0);
-       if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != 0)
-               I915_WRITE(WM2_LP_ILK, 0);
-       if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != 0)
-               I915_WRITE(WM1_LP_ILK, 0);
+       _ilk_disable_lp_wm(dev_priv, dirty);
 
        if (dirty & WM_DIRTY_PIPE(PIPE_A))
                I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]);
@@ -2888,12 +2475,21 @@ static void hsw_write_wm_values(struct drm_i915_private *dev_priv,
                I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
 
        if (dirty & WM_DIRTY_DDB) {
-               val = I915_READ(WM_MISC);
-               if (results->partitioning == INTEL_DDB_PART_1_2)
-                       val &= ~WM_MISC_DATA_PARTITION_5_6;
-               else
-                       val |= WM_MISC_DATA_PARTITION_5_6;
-               I915_WRITE(WM_MISC, val);
+               if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+                       val = I915_READ(WM_MISC);
+                       if (results->partitioning == INTEL_DDB_PART_1_2)
+                               val &= ~WM_MISC_DATA_PARTITION_5_6;
+                       else
+                               val |= WM_MISC_DATA_PARTITION_5_6;
+                       I915_WRITE(WM_MISC, val);
+               } else {
+                       val = I915_READ(DISP_ARB_CTL2);
+                       if (results->partitioning == INTEL_DDB_PART_1_2)
+                               val &= ~DISP_DATA_PARTITION_5_6;
+                       else
+                               val |= DISP_DATA_PARTITION_5_6;
+                       I915_WRITE(DISP_ARB_CTL2, val);
+               }
        }
 
        if (dirty & WM_DIRTY_FBC) {
@@ -2905,37 +2501,48 @@ static void hsw_write_wm_values(struct drm_i915_private *dev_priv,
                I915_WRITE(DISP_ARB_CTL, val);
        }
 
-       if (dirty & WM_DIRTY_LP(1) && previous->wm_lp_spr[0] != results->wm_lp_spr[0])
+       if (dirty & WM_DIRTY_LP(1) &&
+           previous->wm_lp_spr[0] != results->wm_lp_spr[0])
                I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
-       if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
-               I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
-       if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
-               I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
 
-       if (dirty & WM_DIRTY_LP(1) && results->wm_lp[0] != 0)
+       if (INTEL_INFO(dev)->gen >= 7) {
+               if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
+                       I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
+               if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
+                       I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
+       }
+
+       if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0])
                I915_WRITE(WM1_LP_ILK, results->wm_lp[0]);
-       if (dirty & WM_DIRTY_LP(2) && results->wm_lp[1] != 0)
+       if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1])
                I915_WRITE(WM2_LP_ILK, results->wm_lp[1]);
-       if (dirty & WM_DIRTY_LP(3) && results->wm_lp[2] != 0)
+       if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
                I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
 
        dev_priv->wm.hw = *results;
 }
 
-static void haswell_update_wm(struct drm_crtc *crtc)
+static bool ilk_disable_lp_wm(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
+}
+
+static void ilk_update_wm(struct drm_crtc *crtc)
 {
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct hsw_wm_maximums max;
-       struct hsw_pipe_wm_parameters params = {};
-       struct hsw_wm_values results = {};
+       struct ilk_wm_maximums max;
+       struct ilk_pipe_wm_parameters params = {};
+       struct ilk_wm_values results = {};
        enum intel_ddb_partitioning partitioning;
        struct intel_pipe_wm pipe_wm = {};
        struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
        struct intel_wm_config config = {};
 
-       hsw_compute_wm_parameters(crtc, &params, &config);
+       ilk_compute_wm_parameters(crtc, &params, &config);
 
        intel_compute_pipe_wm(crtc, &params, &pipe_wm);
 
@@ -2945,15 +2552,15 @@ static void haswell_update_wm(struct drm_crtc *crtc)
        intel_crtc->wm.active = pipe_wm;
 
        ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
-       ilk_wm_merge(dev, &max, &lp_wm_1_2);
+       ilk_wm_merge(dev, &config, &max, &lp_wm_1_2);
 
        /* 5/6 split only in single pipe config on IVB+ */
        if (INTEL_INFO(dev)->gen >= 7 &&
            config.num_pipes_active == 1 && config.sprites_enabled) {
                ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
-               ilk_wm_merge(dev, &max, &lp_wm_5_6);
+               ilk_wm_merge(dev, &config, &max, &lp_wm_5_6);
 
-               best_lp_wm = hsw_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
+               best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
        } else {
                best_lp_wm = &lp_wm_1_2;
        }
@@ -2961,193 +2568,42 @@ static void haswell_update_wm(struct drm_crtc *crtc)
        partitioning = (best_lp_wm == &lp_wm_1_2) ?
                       INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
 
-       hsw_compute_wm_results(dev, best_lp_wm, partitioning, &results);
+       ilk_compute_wm_results(dev, best_lp_wm, partitioning, &results);
 
-       hsw_write_wm_values(dev_priv, &results);
+       ilk_write_wm_values(dev_priv, &results);
 }
 
-static void haswell_update_sprite_wm(struct drm_plane *plane,
+static void ilk_update_sprite_wm(struct drm_plane *plane,
                                     struct drm_crtc *crtc,
                                     uint32_t sprite_width, int pixel_size,
                                     bool enabled, bool scaled)
-{
-       struct intel_plane *intel_plane = to_intel_plane(plane);
-
-       intel_plane->wm.enabled = enabled;
-       intel_plane->wm.scaled = scaled;
-       intel_plane->wm.horiz_pixels = sprite_width;
-       intel_plane->wm.bytes_per_pixel = pixel_size;
-
-       haswell_update_wm(crtc);
-}
-
-static bool
-sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
-                             uint32_t sprite_width, int pixel_size,
-                             const struct intel_watermark_params *display,
-                             int display_latency_ns, int *sprite_wm)
-{
-       struct drm_crtc *crtc;
-       int clock;
-       int entries, tlb_miss;
-
-       crtc = intel_get_crtc_for_plane(dev, plane);
-       if (!intel_crtc_active(crtc)) {
-               *sprite_wm = display->guard_size;
-               return false;
-       }
-
-       clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
-
-       /* Use the small buffer method to calculate the sprite watermark */
-       entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
-       tlb_miss = display->fifo_size*display->cacheline_size -
-               sprite_width * 8;
-       if (tlb_miss > 0)
-               entries += tlb_miss;
-       entries = DIV_ROUND_UP(entries, display->cacheline_size);
-       *sprite_wm = entries + display->guard_size;
-       if (*sprite_wm > (int)display->max_wm)
-               *sprite_wm = display->max_wm;
-
-       return true;
-}
-
-static bool
-sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
-                               uint32_t sprite_width, int pixel_size,
-                               const struct intel_watermark_params *display,
-                               int latency_ns, int *sprite_wm)
-{
-       struct drm_crtc *crtc;
-       unsigned long line_time_us;
-       int clock;
-       int line_count, line_size;
-       int small, large;
-       int entries;
-
-       if (!latency_ns) {
-               *sprite_wm = 0;
-               return false;
-       }
-
-       crtc = intel_get_crtc_for_plane(dev, plane);
-       clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
-       if (!clock) {
-               *sprite_wm = 0;
-               return false;
-       }
-
-       line_time_us = (sprite_width * 1000) / clock;
-       if (!line_time_us) {
-               *sprite_wm = 0;
-               return false;
-       }
-
-       line_count = (latency_ns / line_time_us + 1000) / 1000;
-       line_size = sprite_width * pixel_size;
-
-       /* Use the minimum of the small and large buffer method for primary */
-       small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
-       large = line_count * line_size;
-
-       entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
-       *sprite_wm = entries + display->guard_size;
-
-       return *sprite_wm > 0x3ff ? false : true;
-}
-
-static void sandybridge_update_sprite_wm(struct drm_plane *plane,
-                                        struct drm_crtc *crtc,
-                                        uint32_t sprite_width, int pixel_size,
-                                        bool enabled, bool scaled)
 {
        struct drm_device *dev = plane->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       int pipe = to_intel_plane(plane)->pipe;
-       int latency = dev_priv->wm.spr_latency[0] * 100;        /* In unit 0.1us */
-       u32 val;
-       int sprite_wm, reg;
-       int ret;
-
-       if (!enabled)
-               return;
-
-       switch (pipe) {
-       case 0:
-               reg = WM0_PIPEA_ILK;
-               break;
-       case 1:
-               reg = WM0_PIPEB_ILK;
-               break;
-       case 2:
-               reg = WM0_PIPEC_IVB;
-               break;
-       default:
-               return; /* bad pipe */
-       }
-
-       ret = sandybridge_compute_sprite_wm(dev, pipe, sprite_width, pixel_size,
-                                           &sandybridge_display_wm_info,
-                                           latency, &sprite_wm);
-       if (!ret) {
-               DRM_DEBUG_KMS("failed to compute sprite wm for pipe %c\n",
-                             pipe_name(pipe));
-               return;
-       }
-
-       val = I915_READ(reg);
-       val &= ~WM0_PIPE_SPRITE_MASK;
-       I915_WRITE(reg, val | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
-       DRM_DEBUG_KMS("sprite watermarks For pipe %c - %d\n", pipe_name(pipe), sprite_wm);
-
-
-       ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
-                                             pixel_size,
-                                             &sandybridge_display_srwm_info,
-                                             dev_priv->wm.spr_latency[1] * 500,
-                                             &sprite_wm);
-       if (!ret) {
-               DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %c\n",
-                             pipe_name(pipe));
-               return;
-       }
-       I915_WRITE(WM1S_LP_ILK, sprite_wm);
+       struct intel_plane *intel_plane = to_intel_plane(plane);
 
-       /* Only IVB has two more LP watermarks for sprite */
-       if (!IS_IVYBRIDGE(dev))
-               return;
+       intel_plane->wm.enabled = enabled;
+       intel_plane->wm.scaled = scaled;
+       intel_plane->wm.horiz_pixels = sprite_width;
+       intel_plane->wm.bytes_per_pixel = pixel_size;
 
-       ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
-                                             pixel_size,
-                                             &sandybridge_display_srwm_info,
-                                             dev_priv->wm.spr_latency[2] * 500,
-                                             &sprite_wm);
-       if (!ret) {
-               DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %c\n",
-                             pipe_name(pipe));
-               return;
-       }
-       I915_WRITE(WM2S_LP_IVB, sprite_wm);
+       /*
+        * IVB workaround: must disable low power watermarks for at least
+        * one frame before enabling scaling.  LP watermarks can be re-enabled
+        * when scaling is disabled.
+        *
+        * WaCxSRDisabledForSpriteScaling:ivb
+        */
+       if (IS_IVYBRIDGE(dev) && scaled && ilk_disable_lp_wm(dev))
+               intel_wait_for_vblank(dev, intel_plane->pipe);
 
-       ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
-                                             pixel_size,
-                                             &sandybridge_display_srwm_info,
-                                             dev_priv->wm.spr_latency[3] * 500,
-                                             &sprite_wm);
-       if (!ret) {
-               DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %c\n",
-                             pipe_name(pipe));
-               return;
-       }
-       I915_WRITE(WM3S_LP_IVB, sprite_wm);
+       ilk_update_wm(crtc);
 }
 
 static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct hsw_wm_values *hw = &dev_priv->wm.hw;
+       struct ilk_wm_values *hw = &dev_priv->wm.hw;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct intel_pipe_wm *active = &intel_crtc->wm.active;
        enum pipe pipe = intel_crtc->pipe;
@@ -3158,7 +2614,8 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
        };
 
        hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]);
-       hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
+       if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+               hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
 
        if (intel_crtc_active(crtc)) {
                u32 tmp = hw->wm_pipe[pipe];
@@ -3190,7 +2647,7 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
 void ilk_wm_get_hw_state(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct hsw_wm_values *hw = &dev_priv->wm.hw;
+       struct ilk_wm_values *hw = &dev_priv->wm.hw;
        struct drm_crtc *crtc;
 
        list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
@@ -3204,8 +2661,12 @@ void ilk_wm_get_hw_state(struct drm_device *dev)
        hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
        hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
 
-       hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
-               INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
+       if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+               hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
+                       INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
+       else if (IS_IVYBRIDGE(dev))
+               hw->partitioning = (I915_READ(DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
+                       INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
 
        hw->enable_fbc_wm =
                !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
@@ -3430,26 +2891,19 @@ static void ironlake_disable_drps(struct drm_device *dev)
  * ourselves, instead of doing a rmw cycle (which might result in us clearing
  * all limits and the gpu stuck at whatever frequency it is at atm).
  */
-static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 *val)
+static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 val)
 {
        u32 limits;
 
-       limits = 0;
-
-       if (*val >= dev_priv->rps.max_delay)
-               *val = dev_priv->rps.max_delay;
-       limits |= dev_priv->rps.max_delay << 24;
-
        /* Only set the down limit when we've reached the lowest level to avoid
         * getting more interrupts, otherwise leave this clear. This prevents a
         * race in the hw when coming out of rc6: There's a tiny window where
         * the hw runs at the minimal clock before selecting the desired
         * frequency, if the down threshold expires in that window we will not
         * receive a down interrupt. */
-       if (*val <= dev_priv->rps.min_delay) {
-               *val = dev_priv->rps.min_delay;
+       limits = dev_priv->rps.max_delay << 24;
+       if (val <= dev_priv->rps.min_delay)
                limits |= dev_priv->rps.min_delay << 16;
-       }
 
        return limits;
 }
@@ -3549,7 +3003,6 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
 void gen6_set_rps(struct drm_device *dev, u8 val)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       u32 limits = gen6_rps_limits(dev_priv, &val);
 
        WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
        WARN_ON(val > dev_priv->rps.max_delay);
@@ -3572,7 +3025,8 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
        /* Make sure we continue to get interrupts
         * until we hit the minimum or maximum frequencies.
         */
-       I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
+       I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
+                  gen6_rps_limits(dev_priv, val));
 
        POSTING_READ(GEN6_RPNSWREQ);
 
@@ -3583,9 +3037,11 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
 
 void gen6_rps_idle(struct drm_i915_private *dev_priv)
 {
+       struct drm_device *dev = dev_priv->dev;
+
        mutex_lock(&dev_priv->rps.hw_lock);
        if (dev_priv->rps.enabled) {
-               if (dev_priv->info->is_valleyview)
+               if (IS_VALLEYVIEW(dev))
                        valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
                else
                        gen6_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
@@ -3596,9 +3052,11 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv)
 
 void gen6_rps_boost(struct drm_i915_private *dev_priv)
 {
+       struct drm_device *dev = dev_priv->dev;
+
        mutex_lock(&dev_priv->rps.hw_lock);
        if (dev_priv->rps.enabled) {
-               if (dev_priv->info->is_valleyview)
+               if (IS_VALLEYVIEW(dev))
                        valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_delay);
                else
                        gen6_set_rps(dev_priv->dev, dev_priv->rps.max_delay);
@@ -3607,48 +3065,18 @@ void gen6_rps_boost(struct drm_i915_private *dev_priv)
        mutex_unlock(&dev_priv->rps.hw_lock);
 }
 
-/*
- * Wait until the previous freq change has completed,
- * or the timeout elapsed, and then update our notion
- * of the current GPU frequency.
- */
-static void vlv_update_rps_cur_delay(struct drm_i915_private *dev_priv)
-{
-       u32 pval;
-
-       WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
-
-       if (wait_for(((pval = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS)) & GENFREQSTATUS) == 0, 10))
-               DRM_DEBUG_DRIVER("timed out waiting for Punit\n");
-
-       pval >>= 8;
-
-       if (pval != dev_priv->rps.cur_delay)
-               DRM_DEBUG_DRIVER("Punit overrode GPU freq: %d MHz (%u) requested, but got %d Mhz (%u)\n",
-                                vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.cur_delay),
-                                dev_priv->rps.cur_delay,
-                                vlv_gpu_freq(dev_priv->mem_freq, pval), pval);
-
-       dev_priv->rps.cur_delay = pval;
-}
-
 void valleyview_set_rps(struct drm_device *dev, u8 val)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
-       gen6_rps_limits(dev_priv, &val);
-
        WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
        WARN_ON(val > dev_priv->rps.max_delay);
        WARN_ON(val < dev_priv->rps.min_delay);
 
-       vlv_update_rps_cur_delay(dev_priv);
-
        DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n",
-                        vlv_gpu_freq(dev_priv->mem_freq,
-                                     dev_priv->rps.cur_delay),
+                        vlv_gpu_freq(dev_priv, dev_priv->rps.cur_delay),
                         dev_priv->rps.cur_delay,
-                        vlv_gpu_freq(dev_priv->mem_freq, val), val);
+                        vlv_gpu_freq(dev_priv, val), val);
 
        if (val == dev_priv->rps.cur_delay)
                return;
@@ -3657,7 +3085,7 @@ void valleyview_set_rps(struct drm_device *dev, u8 val)
 
        dev_priv->rps.cur_delay = val;
 
-       trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv->mem_freq, val));
+       trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv, val));
 }
 
 static void gen6_disable_rps_interrupts(struct drm_device *dev)
@@ -3775,7 +3203,7 @@ static void gen8_enable_rps(struct drm_device *dev)
 
        /* 1c & 1d: Get forcewake during program sequence. Although the driver
         * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
-       gen6_gt_force_wake_get(dev_priv);
+       gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
 
        /* 2a: Disable RC states. */
        I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -3832,7 +3260,7 @@ static void gen8_enable_rps(struct drm_device *dev)
 
        gen6_enable_rps_interrupts(dev);
 
-       gen6_gt_force_wake_put(dev_priv);
+       gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
 }
 
 static void gen6_enable_rps(struct drm_device *dev)
@@ -3862,7 +3290,7 @@ static void gen6_enable_rps(struct drm_device *dev)
                I915_WRITE(GTFIFODBG, gtfifodbg);
        }
 
-       gen6_gt_force_wake_get(dev_priv);
+       gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
 
        rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
        gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
@@ -3954,7 +3382,7 @@ static void gen6_enable_rps(struct drm_device *dev)
                        DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
        }
 
-       gen6_gt_force_wake_put(dev_priv);
+       gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
 }
 
 void gen6_update_ring_freq(struct drm_device *dev)
@@ -4116,7 +3544,8 @@ static void valleyview_enable_rps(struct drm_device *dev)
 
        valleyview_setup_pctx(dev);
 
-       gen6_gt_force_wake_get(dev_priv);
+       /* If VLV, Forcewake all wells, else re-direct to regular path */
+       gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
 
        I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
        I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
@@ -4140,7 +3569,7 @@ static void valleyview_enable_rps(struct drm_device *dev)
        for_each_ring(ring, dev_priv, i)
                I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
 
-       I915_WRITE(GEN6_RC6_THRESHOLD, 0xc350);
+       I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
 
        /* allows RC6 residency counter to work */
        I915_WRITE(VLV_COUNTER_CONTROL,
@@ -4148,65 +3577,47 @@ static void valleyview_enable_rps(struct drm_device *dev)
                                      VLV_MEDIA_RC6_COUNT_EN |
                                      VLV_RENDER_RC6_COUNT_EN));
        if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
-               rc6_mode = GEN7_RC_CTL_TO_MODE;
+               rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
 
        intel_print_rc6_info(dev, rc6_mode);
 
        I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
 
        val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
-       switch ((val >> 6) & 3) {
-       case 0:
-       case 1:
-               dev_priv->mem_freq = 800;
-               break;
-       case 2:
-               dev_priv->mem_freq = 1066;
-               break;
-       case 3:
-               dev_priv->mem_freq = 1333;
-               break;
-       }
-       DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
 
        DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no");
        DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
 
        dev_priv->rps.cur_delay = (val >> 8) & 0xff;
        DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
-                        vlv_gpu_freq(dev_priv->mem_freq,
-                                     dev_priv->rps.cur_delay),
+                        vlv_gpu_freq(dev_priv, dev_priv->rps.cur_delay),
                         dev_priv->rps.cur_delay);
 
        dev_priv->rps.max_delay = valleyview_rps_max_freq(dev_priv);
        dev_priv->rps.hw_max = dev_priv->rps.max_delay;
        DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
-                        vlv_gpu_freq(dev_priv->mem_freq,
-                                     dev_priv->rps.max_delay),
+                        vlv_gpu_freq(dev_priv, dev_priv->rps.max_delay),
                         dev_priv->rps.max_delay);
 
        dev_priv->rps.rpe_delay = valleyview_rps_rpe_freq(dev_priv);
        DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
-                        vlv_gpu_freq(dev_priv->mem_freq,
-                                     dev_priv->rps.rpe_delay),
+                        vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay),
                         dev_priv->rps.rpe_delay);
 
        dev_priv->rps.min_delay = valleyview_rps_min_freq(dev_priv);
        DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
-                        vlv_gpu_freq(dev_priv->mem_freq,
-                                     dev_priv->rps.min_delay),
+                        vlv_gpu_freq(dev_priv, dev_priv->rps.min_delay),
                         dev_priv->rps.min_delay);
 
        DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
-                        vlv_gpu_freq(dev_priv->mem_freq,
-                                     dev_priv->rps.rpe_delay),
+                        vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay),
                         dev_priv->rps.rpe_delay);
 
        valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay);
 
        gen6_enable_rps_interrupts(dev);
 
-       gen6_gt_force_wake_put(dev_priv);
+       gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
 }
 
 void ironlake_teardown_rc6(struct drm_device *dev)
@@ -5019,6 +4430,20 @@ static void g4x_disable_trickle_feed(struct drm_device *dev)
        }
 }
 
+static void ilk_init_lp_watermarks(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN);
+       I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN);
+       I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
+
+       /*
+        * Don't touch WM1S_LP_EN here.
+        * Doing so could cause underruns.
+        */
+}
+
 static void ironlake_init_clock_gating(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -5052,9 +4477,8 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
        I915_WRITE(DISP_ARB_CTL,
                   (I915_READ(DISP_ARB_CTL) |
                    DISP_FBC_WM_DIS));
-       I915_WRITE(WM3_LP_ILK, 0);
-       I915_WRITE(WM2_LP_ILK, 0);
-       I915_WRITE(WM1_LP_ILK, 0);
+
+       ilk_init_lp_watermarks(dev);
 
        /*
         * Based on the document from hardware guys the following bits
@@ -5161,9 +4585,7 @@ static void gen6_init_clock_gating(struct drm_device *dev)
                I915_WRITE(GEN6_GT_MODE,
                           _MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE));
 
-       I915_WRITE(WM3_LP_ILK, 0);
-       I915_WRITE(WM2_LP_ILK, 0);
-       I915_WRITE(WM1_LP_ILK, 0);
+       ilk_init_lp_watermarks(dev);
 
        I915_WRITE(CACHE_MODE_0,
                   _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
@@ -5304,28 +4726,40 @@ static void gen8_init_clock_gating(struct drm_device *dev)
        I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
                   _MASKED_BIT_ENABLE(GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE));
 
-       /* WaSwitchSolVfFArbitrationPriority */
+       /* WaSwitchSolVfFArbitrationPriority:bdw */
        I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
 
-       /* WaPsrDPAMaskVBlankInSRD */
+       /* WaPsrDPAMaskVBlankInSRD:bdw */
        I915_WRITE(CHICKEN_PAR1_1,
                   I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
 
-       /* WaPsrDPRSUnmaskVBlankInSRD */
+       /* WaPsrDPRSUnmaskVBlankInSRD:bdw */
        for_each_pipe(i) {
                I915_WRITE(CHICKEN_PIPESL_1(i),
                           I915_READ(CHICKEN_PIPESL_1(i) |
                                     DPRS_MASK_VBLANK_SRD));
        }
+
+       /* Use Force Non-Coherent whenever executing a 3D context. This is a
+        * workaround for for a possible hang in the unlikely event a TLB
+        * invalidation occurs during a PSD flush.
+        */
+       I915_WRITE(HDC_CHICKEN0,
+                  I915_READ(HDC_CHICKEN0) |
+                  _MASKED_BIT_ENABLE(HDC_FORCE_NON_COHERENT));
+
+       /* WaVSRefCountFullforceMissDisable:bdw */
+       /* WaDSRefCountFullforceMissDisable:bdw */
+       I915_WRITE(GEN7_FF_THREAD_MODE,
+                  I915_READ(GEN7_FF_THREAD_MODE) &
+                  ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
 }
 
 static void haswell_init_clock_gating(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
-       I915_WRITE(WM3_LP_ILK, 0);
-       I915_WRITE(WM2_LP_ILK, 0);
-       I915_WRITE(WM1_LP_ILK, 0);
+       ilk_init_lp_watermarks(dev);
 
        /* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
         * This implements the WaDisableRCZUnitClockGating:hsw workaround.
@@ -5374,9 +4808,7 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
        struct drm_i915_private *dev_priv = dev->dev_private;
        uint32_t snpcr;
 
-       I915_WRITE(WM3_LP_ILK, 0);
-       I915_WRITE(WM2_LP_ILK, 0);
-       I915_WRITE(WM1_LP_ILK, 0);
+       ilk_init_lp_watermarks(dev);
 
        I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
 
@@ -5463,6 +4895,26 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
 static void valleyview_init_clock_gating(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 val;
+
+       mutex_lock(&dev_priv->rps.hw_lock);
+       val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
+       mutex_unlock(&dev_priv->rps.hw_lock);
+       switch ((val >> 6) & 3) {
+       case 0:
+               dev_priv->mem_freq = 800;
+               break;
+       case 1:
+               dev_priv->mem_freq = 1066;
+               break;
+       case 2:
+               dev_priv->mem_freq = 1333;
+               break;
+       case 3:
+               dev_priv->mem_freq = 1333;
+               break;
+       }
+       DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
 
        I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
 
@@ -5642,50 +5094,133 @@ void intel_suspend_hw(struct drm_device *dev)
                lpt_suspend_hw(dev);
 }
 
-static bool is_always_on_power_domain(struct drm_device *dev,
-                                     enum intel_display_power_domain domain)
-{
-       unsigned long always_on_domains;
-
-       BUG_ON(BIT(domain) & ~POWER_DOMAIN_MASK);
-
-       if (IS_BROADWELL(dev)) {
-               always_on_domains = BDW_ALWAYS_ON_POWER_DOMAINS;
-       } else if (IS_HASWELL(dev)) {
-               always_on_domains = HSW_ALWAYS_ON_POWER_DOMAINS;
-       } else {
-               WARN_ON(1);
-               return true;
-       }
+#define for_each_power_well(i, power_well, domain_mask, power_domains) \
+       for (i = 0;                                                     \
+            i < (power_domains)->power_well_count &&                   \
+                ((power_well) = &(power_domains)->power_wells[i]);     \
+            i++)                                                       \
+               if ((power_well)->domains & (domain_mask))
 
-       return BIT(domain) & always_on_domains;
-}
+#define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \
+       for (i = (power_domains)->power_well_count - 1;                  \
+            i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\
+            i--)                                                        \
+               if ((power_well)->domains & (domain_mask))
 
 /**
  * We should only use the power well if we explicitly asked the hardware to
  * enable it, so check if it's enabled and also check if we've requested it to
  * be enabled.
  */
+static bool hsw_power_well_enabled(struct drm_device *dev,
+                                  struct i915_power_well *power_well)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       return I915_READ(HSW_PWR_WELL_DRIVER) ==
+                    (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
+}
+
+bool intel_display_power_enabled_sw(struct drm_device *dev,
+                                   enum intel_display_power_domain domain)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct i915_power_domains *power_domains;
+
+       power_domains = &dev_priv->power_domains;
+
+       return power_domains->domain_use_count[domain];
+}
+
 bool intel_display_power_enabled(struct drm_device *dev,
                                 enum intel_display_power_domain domain)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
+       struct i915_power_domains *power_domains;
+       struct i915_power_well *power_well;
+       bool is_enabled;
+       int i;
+
+       power_domains = &dev_priv->power_domains;
 
-       if (!HAS_POWER_WELL(dev))
-               return true;
+       is_enabled = true;
 
-       if (is_always_on_power_domain(dev, domain))
-               return true;
+       mutex_lock(&power_domains->lock);
+       for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
+               if (power_well->always_on)
+                       continue;
 
-       return I915_READ(HSW_PWR_WELL_DRIVER) ==
-                    (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
+               if (!power_well->is_enabled(dev, power_well)) {
+                       is_enabled = false;
+                       break;
+               }
+       }
+       mutex_unlock(&power_domains->lock);
+
+       return is_enabled;
+}
+
+static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
+{
+       struct drm_device *dev = dev_priv->dev;
+       unsigned long irqflags;
+
+       /*
+        * After we re-enable the power well, if we touch VGA register 0x3d5
+        * we'll get unclaimed register interrupts. This stops after we write
+        * anything to the VGA MSR register. The vgacon module uses this
+        * register all the time, so if we unbind our driver and, as a
+        * consequence, bind vgacon, we'll get stuck in an infinite loop at
+        * console_unlock(). So make here we touch the VGA MSR register, making
+        * sure vgacon can keep working normally without triggering interrupts
+        * and error messages.
+        */
+       vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
+       outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
+       vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
+
+       if (IS_BROADWELL(dev)) {
+               spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+               I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_B),
+                          dev_priv->de_irq_mask[PIPE_B]);
+               I915_WRITE(GEN8_DE_PIPE_IER(PIPE_B),
+                          ~dev_priv->de_irq_mask[PIPE_B] |
+                          GEN8_PIPE_VBLANK);
+               I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_C),
+                          dev_priv->de_irq_mask[PIPE_C]);
+               I915_WRITE(GEN8_DE_PIPE_IER(PIPE_C),
+                          ~dev_priv->de_irq_mask[PIPE_C] |
+                          GEN8_PIPE_VBLANK);
+               POSTING_READ(GEN8_DE_PIPE_IER(PIPE_C));
+               spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+       }
+}
+
+static void hsw_power_well_post_disable(struct drm_i915_private *dev_priv)
+{
+       struct drm_device *dev = dev_priv->dev;
+       enum pipe p;
+       unsigned long irqflags;
+
+       /*
+        * After this, the registers on the pipes that are part of the power
+        * well will become zero, so we have to adjust our counters according to
+        * that.
+        *
+        * FIXME: Should we do this in general in drm_vblank_post_modeset?
+        */
+       spin_lock_irqsave(&dev->vbl_lock, irqflags);
+       for_each_pipe(p)
+               if (p != PIPE_A)
+                       dev->vblank[p].last = 0;
+       spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
 }
 
-static void __intel_set_power_well(struct drm_device *dev, bool enable)
+static void hsw_set_power_well(struct drm_device *dev,
+                              struct i915_power_well *power_well, bool enable)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        bool is_enabled, enable_requested;
-       unsigned long irqflags;
        uint32_t tmp;
 
        WARN_ON(dev_priv->pc8.enabled);
@@ -5706,42 +5241,14 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable)
                                DRM_ERROR("Timeout enabling power well\n");
                }
 
-               if (IS_BROADWELL(dev)) {
-                       spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-                       I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_B),
-                                  dev_priv->de_irq_mask[PIPE_B]);
-                       I915_WRITE(GEN8_DE_PIPE_IER(PIPE_B),
-                                  ~dev_priv->de_irq_mask[PIPE_B] |
-                                  GEN8_PIPE_VBLANK);
-                       I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_C),
-                                  dev_priv->de_irq_mask[PIPE_C]);
-                       I915_WRITE(GEN8_DE_PIPE_IER(PIPE_C),
-                                  ~dev_priv->de_irq_mask[PIPE_C] |
-                                  GEN8_PIPE_VBLANK);
-                       POSTING_READ(GEN8_DE_PIPE_IER(PIPE_C));
-                       spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
-               }
+               hsw_power_well_post_enable(dev_priv);
        } else {
                if (enable_requested) {
-                       enum pipe p;
-
                        I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
                        POSTING_READ(HSW_PWR_WELL_DRIVER);
                        DRM_DEBUG_KMS("Requesting to disable the power well\n");
 
-                       /*
-                        * After this, the registers on the pipes that are part
-                        * of the power well will become zero, so we have to
-                        * adjust our counters according to that.
-                        *
-                        * FIXME: Should we do this in general in
-                        * drm_vblank_post_modeset?
-                        */
-                       spin_lock_irqsave(&dev->vbl_lock, irqflags);
-                       for_each_pipe(p)
-                               if (p != PIPE_A)
-                                       dev->vblank[p].last = 0;
-                       spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+                       hsw_power_well_post_disable(dev_priv);
                }
        }
 }
@@ -5751,9 +5258,9 @@ static void __intel_power_well_get(struct drm_device *dev,
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
-       if (!power_well->count++) {
+       if (!power_well->count++ && power_well->set) {
                hsw_disable_package_c8(dev_priv);
-               __intel_set_power_well(dev, true);
+               power_well->set(dev, power_well, true);
        }
 }
 
@@ -5763,8 +5270,10 @@ static void __intel_power_well_put(struct drm_device *dev,
        struct drm_i915_private *dev_priv = dev->dev_private;
 
        WARN_ON(!power_well->count);
-       if (!--power_well->count && i915_disable_power_well) {
-               __intel_set_power_well(dev, false);
+
+       if (!--power_well->count && power_well->set &&
+           i915_disable_power_well) {
+               power_well->set(dev, power_well, false);
                hsw_enable_package_c8(dev_priv);
        }
 }
@@ -5774,17 +5283,18 @@ void intel_display_power_get(struct drm_device *dev,
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct i915_power_domains *power_domains;
-
-       if (!HAS_POWER_WELL(dev))
-               return;
-
-       if (is_always_on_power_domain(dev, domain))
-               return;
+       struct i915_power_well *power_well;
+       int i;
 
        power_domains = &dev_priv->power_domains;
 
        mutex_lock(&power_domains->lock);
-       __intel_power_well_get(dev, &power_domains->power_wells[0]);
+
+       for_each_power_well(i, power_well, BIT(domain), power_domains)
+               __intel_power_well_get(dev, power_well);
+
+       power_domains->domain_use_count[domain]++;
+
        mutex_unlock(&power_domains->lock);
 }
 
@@ -5793,17 +5303,19 @@ void intel_display_power_put(struct drm_device *dev,
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct i915_power_domains *power_domains;
-
-       if (!HAS_POWER_WELL(dev))
-               return;
-
-       if (is_always_on_power_domain(dev, domain))
-               return;
+       struct i915_power_well *power_well;
+       int i;
 
        power_domains = &dev_priv->power_domains;
 
        mutex_lock(&power_domains->lock);
-       __intel_power_well_put(dev, &power_domains->power_wells[0]);
+
+       WARN_ON(!power_domains->domain_use_count[domain]);
+       power_domains->domain_use_count[domain]--;
+
+       for_each_power_well_rev(i, power_well, BIT(domain), power_domains)
+               __intel_power_well_put(dev, power_well);
+
        mutex_unlock(&power_domains->lock);
 }
 
@@ -5819,10 +5331,7 @@ void i915_request_power_well(void)
 
        dev_priv = container_of(hsw_pwr, struct drm_i915_private,
                                power_domains);
-
-       mutex_lock(&hsw_pwr->lock);
-       __intel_power_well_get(dev_priv->dev, &hsw_pwr->power_wells[0]);
-       mutex_unlock(&hsw_pwr->lock);
+       intel_display_power_get(dev_priv->dev, POWER_DOMAIN_AUDIO);
 }
 EXPORT_SYMBOL_GPL(i915_request_power_well);
 
@@ -5836,24 +5345,71 @@ void i915_release_power_well(void)
 
        dev_priv = container_of(hsw_pwr, struct drm_i915_private,
                                power_domains);
-
-       mutex_lock(&hsw_pwr->lock);
-       __intel_power_well_put(dev_priv->dev, &hsw_pwr->power_wells[0]);
-       mutex_unlock(&hsw_pwr->lock);
+       intel_display_power_put(dev_priv->dev, POWER_DOMAIN_AUDIO);
 }
 EXPORT_SYMBOL_GPL(i915_release_power_well);
 
+static struct i915_power_well i9xx_always_on_power_well[] = {
+       {
+               .name = "always-on",
+               .always_on = 1,
+               .domains = POWER_DOMAIN_MASK,
+       },
+};
+
+static struct i915_power_well hsw_power_wells[] = {
+       {
+               .name = "always-on",
+               .always_on = 1,
+               .domains = HSW_ALWAYS_ON_POWER_DOMAINS,
+       },
+       {
+               .name = "display",
+               .domains = POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS,
+               .is_enabled = hsw_power_well_enabled,
+               .set = hsw_set_power_well,
+       },
+};
+
+static struct i915_power_well bdw_power_wells[] = {
+       {
+               .name = "always-on",
+               .always_on = 1,
+               .domains = BDW_ALWAYS_ON_POWER_DOMAINS,
+       },
+       {
+               .name = "display",
+               .domains = POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS,
+               .is_enabled = hsw_power_well_enabled,
+               .set = hsw_set_power_well,
+       },
+};
+
+#define set_power_wells(power_domains, __power_wells) ({               \
+       (power_domains)->power_wells = (__power_wells);                 \
+       (power_domains)->power_well_count = ARRAY_SIZE(__power_wells);  \
+})
+
 int intel_power_domains_init(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct i915_power_domains *power_domains = &dev_priv->power_domains;
-       struct i915_power_well *power_well;
 
        mutex_init(&power_domains->lock);
-       hsw_pwr = power_domains;
 
-       power_well = &power_domains->power_wells[0];
-       power_well->count = 0;
+       /*
+        * The enabling order will be from lower to higher indexed wells,
+        * the disabling order is reversed.
+        */
+       if (IS_HASWELL(dev)) {
+               set_power_wells(power_domains, hsw_power_wells);
+               hsw_pwr = power_domains;
+       } else if (IS_BROADWELL(dev)) {
+               set_power_wells(power_domains, bdw_power_wells);
+               hsw_pwr = power_domains;
+       } else {
+               set_power_wells(power_domains, i9xx_always_on_power_well);
+       }
 
        return 0;
 }
@@ -5868,15 +5424,13 @@ static void intel_power_domains_resume(struct drm_device *dev)
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct i915_power_domains *power_domains = &dev_priv->power_domains;
        struct i915_power_well *power_well;
-
-       if (!HAS_POWER_WELL(dev))
-               return;
+       int i;
 
        mutex_lock(&power_domains->lock);
-
-       power_well = &power_domains->power_wells[0];
-       __intel_set_power_well(dev, power_well->count > 0);
-
+       for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
+               if (power_well->set)
+                       power_well->set(dev, power_well, power_well->count > 0);
+       }
        mutex_unlock(&power_domains->lock);
 }
 
@@ -5890,13 +5444,13 @@ void intel_power_domains_init_hw(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
-       if (!HAS_POWER_WELL(dev))
-               return;
-
        /* For now, we need the power well to be always enabled. */
        intel_display_set_init_power(dev, true);
        intel_power_domains_resume(dev);
 
+       if (!(IS_HASWELL(dev) || IS_BROADWELL(dev)))
+               return;
+
        /* We're taking over the BIOS, so clear any requests made by it since
         * the driver is in charge now. */
        if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
@@ -5914,31 +5468,86 @@ void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv)
        hsw_enable_package_c8(dev_priv);
 }
 
+void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
+{
+       struct drm_device *dev = dev_priv->dev;
+       struct device *device = &dev->pdev->dev;
+
+       if (!HAS_RUNTIME_PM(dev))
+               return;
+
+       pm_runtime_get_sync(device);
+       WARN(dev_priv->pm.suspended, "Device still suspended.\n");
+}
+
+void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
+{
+       struct drm_device *dev = dev_priv->dev;
+       struct device *device = &dev->pdev->dev;
+
+       if (!HAS_RUNTIME_PM(dev))
+               return;
+
+       pm_runtime_mark_last_busy(device);
+       pm_runtime_put_autosuspend(device);
+}
+
+void intel_init_runtime_pm(struct drm_i915_private *dev_priv)
+{
+       struct drm_device *dev = dev_priv->dev;
+       struct device *device = &dev->pdev->dev;
+
+       dev_priv->pm.suspended = false;
+
+       if (!HAS_RUNTIME_PM(dev))
+               return;
+
+       pm_runtime_set_active(device);
+
+       pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */
+       pm_runtime_mark_last_busy(device);
+       pm_runtime_use_autosuspend(device);
+}
+
+void intel_fini_runtime_pm(struct drm_i915_private *dev_priv)
+{
+       struct drm_device *dev = dev_priv->dev;
+       struct device *device = &dev->pdev->dev;
+
+       if (!HAS_RUNTIME_PM(dev))
+               return;
+
+       /* Make sure we're not suspended first. */
+       pm_runtime_get_sync(device);
+       pm_runtime_disable(device);
+}
+
 /* Set up chip specific power management-related functions */
 void intel_init_pm(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
-       if (I915_HAS_FBC(dev)) {
-               if (HAS_PCH_SPLIT(dev)) {
+       if (HAS_FBC(dev)) {
+               if (INTEL_INFO(dev)->gen >= 7) {
                        dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
-                       if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
-                               dev_priv->display.enable_fbc =
-                                       gen7_enable_fbc;
-                       else
-                               dev_priv->display.enable_fbc =
-                                       ironlake_enable_fbc;
+                       dev_priv->display.enable_fbc = gen7_enable_fbc;
+                       dev_priv->display.disable_fbc = ironlake_disable_fbc;
+               } else if (INTEL_INFO(dev)->gen >= 5) {
+                       dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
+                       dev_priv->display.enable_fbc = ironlake_enable_fbc;
                        dev_priv->display.disable_fbc = ironlake_disable_fbc;
                } else if (IS_GM45(dev)) {
                        dev_priv->display.fbc_enabled = g4x_fbc_enabled;
                        dev_priv->display.enable_fbc = g4x_enable_fbc;
                        dev_priv->display.disable_fbc = g4x_disable_fbc;
-               } else if (IS_CRESTLINE(dev)) {
+               } else {
                        dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
                        dev_priv->display.enable_fbc = i8xx_enable_fbc;
                        dev_priv->display.disable_fbc = i8xx_disable_fbc;
+
+                       /* This value was pulled out of someone's hat */
+                       I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
                }
-               /* 855GM needs testing */
        }
 
        /* For cxsr */
@@ -5951,58 +5560,27 @@ void intel_init_pm(struct drm_device *dev)
        if (HAS_PCH_SPLIT(dev)) {
                intel_setup_wm_latency(dev);
 
-               if (IS_GEN5(dev)) {
-                       if (dev_priv->wm.pri_latency[1] &&
-                           dev_priv->wm.spr_latency[1] &&
-                           dev_priv->wm.cur_latency[1])
-                               dev_priv->display.update_wm = ironlake_update_wm;
-                       else {
-                               DRM_DEBUG_KMS("Failed to get proper latency. "
-                                             "Disable CxSR\n");
-                               dev_priv->display.update_wm = NULL;
-                       }
+               if ((IS_GEN5(dev) && dev_priv->wm.pri_latency[1] &&
+                    dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
+                   (!IS_GEN5(dev) && dev_priv->wm.pri_latency[0] &&
+                    dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
+                       dev_priv->display.update_wm = ilk_update_wm;
+                       dev_priv->display.update_sprite_wm = ilk_update_sprite_wm;
+               } else {
+                       DRM_DEBUG_KMS("Failed to read display plane latency. "
+                                     "Disable CxSR\n");
+               }
+
+               if (IS_GEN5(dev))
                        dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
-               } else if (IS_GEN6(dev)) {
-                       if (dev_priv->wm.pri_latency[0] &&
-                           dev_priv->wm.spr_latency[0] &&
-                           dev_priv->wm.cur_latency[0]) {
-                               dev_priv->display.update_wm = sandybridge_update_wm;
-                               dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
-                       } else {
-                               DRM_DEBUG_KMS("Failed to read display plane latency. "
-                                             "Disable CxSR\n");
-                               dev_priv->display.update_wm = NULL;
-                       }
+               else if (IS_GEN6(dev))
                        dev_priv->display.init_clock_gating = gen6_init_clock_gating;
-               } else if (IS_IVYBRIDGE(dev)) {
-                       if (dev_priv->wm.pri_latency[0] &&
-                           dev_priv->wm.spr_latency[0] &&
-                           dev_priv->wm.cur_latency[0]) {
-                               dev_priv->display.update_wm = ivybridge_update_wm;
-                               dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
-                       } else {
-                               DRM_DEBUG_KMS("Failed to read display plane latency. "
-                                             "Disable CxSR\n");
-                               dev_priv->display.update_wm = NULL;
-                       }
+               else if (IS_IVYBRIDGE(dev))
                        dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
-               } else if (IS_HASWELL(dev)) {
-                       if (dev_priv->wm.pri_latency[0] &&
-                           dev_priv->wm.spr_latency[0] &&
-                           dev_priv->wm.cur_latency[0]) {
-                               dev_priv->display.update_wm = haswell_update_wm;
-                               dev_priv->display.update_sprite_wm =
-                                       haswell_update_sprite_wm;
-                       } else {
-                               DRM_DEBUG_KMS("Failed to read display plane latency. "
-                                             "Disable CxSR\n");
-                               dev_priv->display.update_wm = NULL;
-                       }
+               else if (IS_HASWELL(dev))
                        dev_priv->display.init_clock_gating = haswell_init_clock_gating;
-               } else if (INTEL_INFO(dev)->gen == 8) {
+               else if (INTEL_INFO(dev)->gen == 8)
                        dev_priv->display.init_clock_gating = gen8_init_clock_gating;
-               } else
-                       dev_priv->display.update_wm = NULL;
        } else if (IS_VALLEYVIEW(dev)) {
                dev_priv->display.update_wm = valleyview_update_wm;
                dev_priv->display.init_clock_gating =
@@ -6036,21 +5614,21 @@ void intel_init_pm(struct drm_device *dev)
                dev_priv->display.update_wm = i9xx_update_wm;
                dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
                dev_priv->display.init_clock_gating = gen3_init_clock_gating;
-       } else if (IS_I865G(dev)) {
-               dev_priv->display.update_wm = i830_update_wm;
-               dev_priv->display.init_clock_gating = i85x_init_clock_gating;
-               dev_priv->display.get_fifo_size = i830_get_fifo_size;
-       } else if (IS_I85X(dev)) {
-               dev_priv->display.update_wm = i9xx_update_wm;
-               dev_priv->display.get_fifo_size = i85x_get_fifo_size;
-               dev_priv->display.init_clock_gating = i85x_init_clock_gating;
-       } else {
-               dev_priv->display.update_wm = i830_update_wm;
-               dev_priv->display.init_clock_gating = i830_init_clock_gating;
-               if (IS_845G(dev))
+       } else if (IS_GEN2(dev)) {
+               if (INTEL_INFO(dev)->num_pipes == 1) {
+                       dev_priv->display.update_wm = i845_update_wm;
                        dev_priv->display.get_fifo_size = i845_get_fifo_size;
-               else
+               } else {
+                       dev_priv->display.update_wm = i9xx_update_wm;
                        dev_priv->display.get_fifo_size = i830_get_fifo_size;
+               }
+
+               if (IS_I85X(dev) || IS_I865G(dev))
+                       dev_priv->display.init_clock_gating = i85x_init_clock_gating;
+               else
+                       dev_priv->display.init_clock_gating = i830_init_clock_gating;
+       } else {
+               DRM_ERROR("unexpected fall-through in intel_init_pm\n");
        }
 }
 
@@ -6101,59 +5679,48 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val)
        return 0;
 }
 
-int vlv_gpu_freq(int ddr_freq, int val)
+int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val)
 {
-       int mult, base;
+       int div;
 
-       switch (ddr_freq) {
+       /* 4 x czclk */
+       switch (dev_priv->mem_freq) {
        case 800:
-               mult = 20;
-               base = 120;
+               div = 10;
                break;
        case 1066:
-               mult = 22;
-               base = 133;
+               div = 12;
                break;
        case 1333:
-               mult = 21;
-               base = 125;
+               div = 16;
                break;
        default:
                return -1;
        }
 
-       return ((val - 0xbd) * mult) + base;
+       return DIV_ROUND_CLOSEST(dev_priv->mem_freq * (val + 6 - 0xbd), 4 * div);
 }
 
-int vlv_freq_opcode(int ddr_freq, int val)
+int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val)
 {
-       int mult, base;
+       int mul;
 
-       switch (ddr_freq) {
+       /* 4 x czclk */
+       switch (dev_priv->mem_freq) {
        case 800:
-               mult = 20;
-               base = 120;
+               mul = 10;
                break;
        case 1066:
-               mult = 22;
-               base = 133;
+               mul = 12;
                break;
        case 1333:
-               mult = 21;
-               base = 125;
+               mul = 16;
                break;
        default:
                return -1;
        }
 
-       val /= mult;
-       val -= base / mult;
-       val += 0xbd;
-
-       if (val > 0xea)
-               val = 0xea;
-
-       return val;
+       return DIV_ROUND_CLOSEST(4 * mul * val, dev_priv->mem_freq) + 0xbd - 6;
 }
 
 void intel_pm_setup(struct drm_device *dev)
index c2f09d4563008ff7e32238675dab1b4da02ec967..b7f1742caf878250c3fb6dc98b5bdbe63ae4a601 100644 (file)
@@ -285,14 +285,16 @@ static int gen7_ring_fbc_flush(struct intel_ring_buffer *ring, u32 value)
        if (!ring->fbc_dirty)
                return 0;
 
-       ret = intel_ring_begin(ring, 4);
+       ret = intel_ring_begin(ring, 6);
        if (ret)
                return ret;
-       intel_ring_emit(ring, MI_NOOP);
        /* WaFbcNukeOn3DBlt:ivb/hsw */
        intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
        intel_ring_emit(ring, MSG_FBC_REND_STATE);
        intel_ring_emit(ring, value);
+       intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) | MI_SRM_LRM_GLOBAL_GTT);
+       intel_ring_emit(ring, MSG_FBC_REND_STATE);
+       intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
        intel_ring_advance(ring);
 
        ring->fbc_dirty = false;
@@ -354,7 +356,7 @@ gen7_render_ring_flush(struct intel_ring_buffer *ring,
        intel_ring_emit(ring, 0);
        intel_ring_advance(ring);
 
-       if (flush_domains)
+       if (!invalidate_domains && flush_domains)
                return gen7_ring_fbc_flush(ring, FBC_REND_NUKE);
 
        return 0;
@@ -436,7 +438,7 @@ static int init_ring_common(struct intel_ring_buffer *ring)
        int ret = 0;
        u32 head;
 
-       gen6_gt_force_wake_get(dev_priv);
+       gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
 
        if (I915_NEED_GFX_HWS(dev))
                intel_ring_setup_status_page(ring);
@@ -509,7 +511,7 @@ static int init_ring_common(struct intel_ring_buffer *ring)
        memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
 
 out:
-       gen6_gt_force_wake_put(dev_priv);
+       gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
 
        return ret;
 }
@@ -661,19 +663,22 @@ gen6_add_request(struct intel_ring_buffer *ring)
        struct drm_device *dev = ring->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_ring_buffer *useless;
-       int i, ret;
+       int i, ret, num_dwords = 4;
 
-       ret = intel_ring_begin(ring, ((I915_NUM_RINGS-1) *
-                                     MBOX_UPDATE_DWORDS) +
-                                     4);
+       if (i915_semaphore_is_enabled(dev))
+               num_dwords += ((I915_NUM_RINGS-1) * MBOX_UPDATE_DWORDS);
+#undef MBOX_UPDATE_DWORDS
+
+       ret = intel_ring_begin(ring, num_dwords);
        if (ret)
                return ret;
-#undef MBOX_UPDATE_DWORDS
 
-       for_each_ring(useless, dev_priv, i) {
-               u32 mbox_reg = ring->signal_mbox[i];
-               if (mbox_reg != GEN6_NOSYNC)
-                       update_mboxes(ring, mbox_reg);
+       if (i915_semaphore_is_enabled(dev)) {
+               for_each_ring(useless, dev_priv, i) {
+                       u32 mbox_reg = ring->signal_mbox[i];
+                       if (mbox_reg != GEN6_NOSYNC)
+                               update_mboxes(ring, mbox_reg);
+               }
        }
 
        intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
@@ -1030,11 +1035,6 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring)
        if (!dev->irq_enabled)
               return false;
 
-       /* It looks like we need to prevent the gt from suspending while waiting
-        * for an notifiy irq, otherwise irqs seem to get lost on at least the
-        * blt/bsd rings on ivb. */
-       gen6_gt_force_wake_get(dev_priv);
-
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
        if (ring->irq_refcount++ == 0) {
                if (HAS_L3_DPF(dev) && ring->id == RCS)
@@ -1066,8 +1066,6 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring)
                ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask);
        }
        spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
-
-       gen6_gt_force_wake_put(dev_priv);
 }
 
 static bool
@@ -1611,8 +1609,8 @@ intel_ring_alloc_seqno(struct intel_ring_buffer *ring)
        return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);
 }
 
-static int __intel_ring_begin(struct intel_ring_buffer *ring,
-                             int bytes)
+static int __intel_ring_prepare(struct intel_ring_buffer *ring,
+                               int bytes)
 {
        int ret;
 
@@ -1628,7 +1626,6 @@ static int __intel_ring_begin(struct intel_ring_buffer *ring,
                        return ret;
        }
 
-       ring->space -= bytes;
        return 0;
 }
 
@@ -1643,12 +1640,17 @@ int intel_ring_begin(struct intel_ring_buffer *ring,
        if (ret)
                return ret;
 
+       ret = __intel_ring_prepare(ring, num_dwords * sizeof(uint32_t));
+       if (ret)
+               return ret;
+
        /* Preallocate the olr before touching the ring */
        ret = intel_ring_alloc_seqno(ring);
        if (ret)
                return ret;
 
-       return __intel_ring_begin(ring, num_dwords * sizeof(uint32_t));
+       ring->space -= num_dwords * sizeof(uint32_t);
+       return 0;
 }
 
 void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
@@ -1838,7 +1840,7 @@ static int gen6_ring_flush(struct intel_ring_buffer *ring,
        }
        intel_ring_advance(ring);
 
-       if (IS_GEN7(dev) && flush)
+       if (IS_GEN7(dev) && !invalidate && flush)
                return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN);
 
        return 0;
index a583e8f718a7f0f4b87b4fe724336c73ad1a8198..95bdfb3c431c8467b105c616f6d5f9567505804f 100644 (file)
@@ -413,23 +413,34 @@ static const struct _sdvo_cmd_name {
 static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
                                   const void *args, int args_len)
 {
-       int i;
+       int i, pos = 0;
+#define BUF_LEN 256
+       char buffer[BUF_LEN];
+
+#define BUF_PRINT(args...) \
+       pos += snprintf(buffer + pos, max_t(int, BUF_LEN - pos, 0), args)
+
 
-       DRM_DEBUG_KMS("%s: W: %02X ",
-                               SDVO_NAME(intel_sdvo), cmd);
-       for (i = 0; i < args_len; i++)
-               DRM_LOG_KMS("%02X ", ((u8 *)args)[i]);
-       for (; i < 8; i++)
-               DRM_LOG_KMS("   ");
+       for (i = 0; i < args_len; i++) {
+               BUF_PRINT("%02X ", ((u8 *)args)[i]);
+       }
+       for (; i < 8; i++) {
+               BUF_PRINT("   ");
+       }
        for (i = 0; i < ARRAY_SIZE(sdvo_cmd_names); i++) {
                if (cmd == sdvo_cmd_names[i].cmd) {
-                       DRM_LOG_KMS("(%s)", sdvo_cmd_names[i].name);
+                       BUF_PRINT("(%s)", sdvo_cmd_names[i].name);
                        break;
                }
        }
-       if (i == ARRAY_SIZE(sdvo_cmd_names))
-               DRM_LOG_KMS("(%02X)", cmd);
-       DRM_LOG_KMS("\n");
+       if (i == ARRAY_SIZE(sdvo_cmd_names)) {
+               BUF_PRINT("(%02X)", cmd);
+       }
+       BUG_ON(pos >= BUF_LEN - 1);
+#undef BUF_PRINT
+#undef BUF_LEN
+
+       DRM_DEBUG_KMS("%s: W: %02X %s\n", SDVO_NAME(intel_sdvo), cmd, buffer);
 }
 
 static const char *cmd_status_names[] = {
@@ -512,9 +523,10 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
 {
        u8 retry = 15; /* 5 quick checks, followed by 10 long checks */
        u8 status;
-       int i;
+       int i, pos = 0;
+#define BUF_LEN 256
+       char buffer[BUF_LEN];
 
-       DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(intel_sdvo));
 
        /*
         * The documentation states that all commands will be
@@ -551,10 +563,13 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
                        goto log_fail;
        }
 
+#define BUF_PRINT(args...) \
+       pos += snprintf(buffer + pos, max_t(int, BUF_LEN - pos, 0), args)
+
        if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
-               DRM_LOG_KMS("(%s)", cmd_status_names[status]);
+               BUF_PRINT("(%s)", cmd_status_names[status]);
        else
-               DRM_LOG_KMS("(??? %d)", status);
+               BUF_PRINT("(??? %d)", status);
 
        if (status != SDVO_CMD_STATUS_SUCCESS)
                goto log_fail;
@@ -565,13 +580,17 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
                                          SDVO_I2C_RETURN_0 + i,
                                          &((u8 *)response)[i]))
                        goto log_fail;
-               DRM_LOG_KMS(" %02X", ((u8 *)response)[i]);
+               BUF_PRINT(" %02X", ((u8 *)response)[i]);
        }
-       DRM_LOG_KMS("\n");
+       BUG_ON(pos >= BUF_LEN - 1);
+#undef BUF_PRINT
+#undef BUF_LEN
+
+       DRM_DEBUG_KMS("%s: R: %s\n", SDVO_NAME(intel_sdvo), buffer);
        return true;
 
 log_fail:
-       DRM_LOG_KMS("... failed\n");
+       DRM_DEBUG_KMS("%s: R: ... failed\n", SDVO_NAME(intel_sdvo));
        return false;
 }
 
@@ -933,7 +952,7 @@ static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo)
 
 static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo,
                                       unsigned if_index, uint8_t tx_rate,
-                                      uint8_t *data, unsigned length)
+                                      const uint8_t *data, unsigned length)
 {
        uint8_t set_buf_index[2] = { if_index, 0 };
        uint8_t hbuf_size, tmp[8];
@@ -1517,8 +1536,9 @@ static void intel_sdvo_dpms(struct drm_connector *connector, int mode)
        intel_modeset_check_state(connector->dev);
 }
 
-static int intel_sdvo_mode_valid(struct drm_connector *connector,
-                                struct drm_display_mode *mode)
+static enum drm_mode_status
+intel_sdvo_mode_valid(struct drm_connector *connector,
+                     struct drm_display_mode *mode)
 {
        struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
 
index 770bdd6ecd9fb96b46365bbadd97ba3011c0ef96..2e2d4eb4a00d190b24c03c1998b4e9f0cd9f06c5 100644 (file)
@@ -59,7 +59,7 @@ struct intel_sdvo_caps {
        unsigned int stall_support:1;
        unsigned int pad:1;
        u16 output_flags;
-} __attribute__((packed));
+} __packed;
 
 /* Note: SDVO detailed timing flags match EDID misc flags. */
 #define DTD_FLAG_HSYNC_POSITIVE (1 << 1)
@@ -94,12 +94,12 @@ struct intel_sdvo_dtd {
                u8 v_sync_off_high;
                u8 reserved;
        } part2;
-} __attribute__((packed));
+} __packed;
 
 struct intel_sdvo_pixel_clock_range {
        u16 min;        /**< pixel clock, in 10kHz units */
        u16 max;        /**< pixel clock, in 10kHz units */
-} __attribute__((packed));
+} __packed;
 
 struct intel_sdvo_preferred_input_timing_args {
        u16 clock;
@@ -108,7 +108,7 @@ struct intel_sdvo_preferred_input_timing_args {
        u8      interlace:1;
        u8      scaled:1;
        u8      pad:6;
-} __attribute__((packed));
+} __packed;
 
 /* I2C registers for SDVO */
 #define SDVO_I2C_ARG_0                         0x07
@@ -162,7 +162,7 @@ struct intel_sdvo_get_trained_inputs_response {
        unsigned int input0_trained:1;
        unsigned int input1_trained:1;
        unsigned int pad:6;
-} __attribute__((packed));
+} __packed;
 
 /** Returns a struct intel_sdvo_output_flags of active outputs. */
 #define SDVO_CMD_GET_ACTIVE_OUTPUTS                    0x04
@@ -219,7 +219,7 @@ struct intel_sdvo_get_interrupt_event_source_response {
        unsigned int ambient_light_interrupt:1;
        unsigned int hdmi_audio_encrypt_change:1;
        unsigned int pad:6;
-} __attribute__((packed));
+} __packed;
 
 /**
  * Selects which input is affected by future input commands.
@@ -232,7 +232,7 @@ struct intel_sdvo_get_interrupt_event_source_response {
 struct intel_sdvo_set_target_input_args {
        unsigned int target_1:1;
        unsigned int pad:7;
-} __attribute__((packed));
+} __packed;
 
 /**
  * Takes a struct intel_sdvo_output_flags of which outputs are targeted by
@@ -370,7 +370,7 @@ struct intel_sdvo_tv_format {
        unsigned int hdtv_std_eia_7702a_480i_60:1;
        unsigned int hdtv_std_eia_7702a_480p_60:1;
        unsigned int pad:3;
-} __attribute__((packed));
+} __packed;
 
 #define SDVO_CMD_GET_TV_FORMAT                         0x28
 
@@ -401,7 +401,7 @@ struct intel_sdvo_sdtv_resolution_request {
        unsigned int secam_l:1;
        unsigned int secam_60:1;
        unsigned int pad:5;
-} __attribute__((packed));
+} __packed;
 
 struct intel_sdvo_sdtv_resolution_reply {
        unsigned int res_320x200:1;
@@ -426,7 +426,7 @@ struct intel_sdvo_sdtv_resolution_reply {
        unsigned int res_1024x768:1;
        unsigned int res_1280x1024:1;
        unsigned int pad:5;
-} __attribute__((packed));
+} __packed;
 
 /* Get supported resolution with squire pixel aspect ratio that can be
    scaled for the requested HDTV format */
@@ -463,7 +463,7 @@ struct intel_sdvo_hdtv_resolution_request {
        unsigned int hdtv_std_eia_7702a_480i_60:1;
        unsigned int hdtv_std_eia_7702a_480p_60:1;
        unsigned int pad:6;
-} __attribute__((packed));
+} __packed;
 
 struct intel_sdvo_hdtv_resolution_reply {
        unsigned int res_640x480:1;
@@ -517,7 +517,7 @@ struct intel_sdvo_hdtv_resolution_reply {
 
        unsigned int res_1280x768:1;
        unsigned int pad5:7;
-} __attribute__((packed));
+} __packed;
 
 /* Get supported power state returns info for encoder and monitor, rely on
    last SetTargetInput and SetTargetOutput calls */
@@ -557,13 +557,13 @@ struct sdvo_panel_power_sequencing {
 
        unsigned int t4_high:2;
        unsigned int pad:6;
-} __attribute__((packed));
+} __packed;
 
 #define SDVO_CMD_GET_MAX_BACKLIGHT_LEVEL               0x30
 struct sdvo_max_backlight_reply {
        u8 max_value;
        u8 default_value;
-} __attribute__((packed));
+} __packed;
 
 #define SDVO_CMD_GET_BACKLIGHT_LEVEL                   0x31
 #define SDVO_CMD_SET_BACKLIGHT_LEVEL                   0x32
@@ -573,14 +573,14 @@ struct sdvo_get_ambient_light_reply {
        u16 trip_low;
        u16 trip_high;
        u16 value;
-} __attribute__((packed));
+} __packed;
 #define SDVO_CMD_SET_AMBIENT_LIGHT                     0x34
 struct sdvo_set_ambient_light_reply {
        u16 trip_low;
        u16 trip_high;
        unsigned int enable:1;
        unsigned int pad:7;
-} __attribute__((packed));
+} __packed;
 
 /* Set display power state */
 #define SDVO_CMD_SET_DISPLAY_POWER_STATE               0x7d
@@ -608,7 +608,7 @@ struct intel_sdvo_enhancements_reply {
        unsigned int dither:1;
        unsigned int tv_chroma_filter:1;
        unsigned int tv_luma_filter:1;
-} __attribute__((packed));
+} __packed;
 
 /* Picture enhancement limits below are dependent on the current TV format,
  * and thus need to be queried and set after it.
@@ -630,7 +630,7 @@ struct intel_sdvo_enhancements_reply {
 struct intel_sdvo_enhancement_limits_reply {
        u16 max_value;
        u16 default_value;
-} __attribute__((packed));
+} __packed;
 
 #define SDVO_CMD_GET_LVDS_PANEL_INFORMATION            0x7f
 #define SDVO_CMD_SET_LVDS_PANEL_INFORMATION            0x80
@@ -671,7 +671,7 @@ struct intel_sdvo_enhancement_limits_reply {
 #define SDVO_CMD_SET_TV_LUMA_FILTER                    0x79
 struct intel_sdvo_enhancements_arg {
        u16 value;
-} __attribute__((packed));
+} __packed;
 
 #define SDVO_CMD_GET_DOT_CRAWL                         0x70
 #define SDVO_CMD_SET_DOT_CRAWL                         0x71
@@ -727,4 +727,4 @@ struct intel_sdvo_enhancements_arg {
 struct intel_sdvo_encode {
        u8 dvi_rev;
        u8 hdmi_rev;
-} __attribute__ ((packed));
+} __packed;
index 9944d8135e87f88215d5d07d54a5fd87fcf4dc60..0954f132726ea0ae15593364ef976168fc2c909f 100644 (file)
@@ -90,6 +90,22 @@ void vlv_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val)
        mutex_unlock(&dev_priv->dpio_lock);
 }
 
+u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg)
+{
+       u32 val = 0;
+
+       vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_BUNIT,
+                       PUNIT_OPCODE_REG_READ, reg, &val);
+
+       return val;
+}
+
+void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
+{
+       vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_BUNIT,
+                       PUNIT_OPCODE_REG_WRITE, reg, &val);
+}
+
 u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr)
 {
        u32 val = 0;
@@ -160,27 +176,18 @@ void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
                        PUNIT_OPCODE_REG_WRITE, reg, &val);
 }
 
-static u32 vlv_get_phy_port(enum pipe pipe)
-{
-       u32 port = IOSF_PORT_DPIO;
-
-       WARN_ON ((pipe != PIPE_A) && (pipe != PIPE_B));
-
-       return port;
-}
-
 u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg)
 {
        u32 val = 0;
 
-       vlv_sideband_rw(dev_priv, DPIO_DEVFN, vlv_get_phy_port(pipe),
+       vlv_sideband_rw(dev_priv, DPIO_DEVFN, DPIO_PHY_IOSF_PORT(DPIO_PHY(pipe)),
                        DPIO_OPCODE_REG_READ, reg, &val);
        return val;
 }
 
 void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val)
 {
-       vlv_sideband_rw(dev_priv, DPIO_DEVFN, vlv_get_phy_port(pipe),
+       vlv_sideband_rw(dev_priv, DPIO_DEVFN, DPIO_PHY_IOSF_PORT(DPIO_PHY(pipe)),
                        DPIO_OPCODE_REG_WRITE, reg, &val);
 }
 
@@ -242,3 +249,17 @@ void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
                return;
        }
 }
+
+u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg)
+{
+       u32 val = 0;
+       vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_FLISDSI,
+                                       DPIO_OPCODE_REG_READ, reg, &val);
+       return val;
+}
+
+void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
+{
+       vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_FLISDSI,
+                                       DPIO_OPCODE_REG_WRITE, reg, &val);
+}
index b9fabf826f7de71f224bd9b808780f81ba1c9027..716a3c9c0751c18927cb1b10633b6effff584e7b 100644 (file)
@@ -104,6 +104,12 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
                break;
        }
 
+       /*
+        * Enable gamma to match primary/cursor plane behaviour.
+        * FIXME should be user controllable via propertiesa.
+        */
+       sprctl |= SP_GAMMA_ENABLE;
+
        if (obj->tiling_mode != I915_TILING_NONE)
                sprctl |= SP_TILED;
 
@@ -135,8 +141,8 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
 
        I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w);
        I915_WRITE(SPCNTR(pipe, plane), sprctl);
-       I915_MODIFY_DISPBASE(SPSURF(pipe, plane), i915_gem_obj_ggtt_offset(obj) +
-                            sprsurf_offset);
+       I915_WRITE(SPSURF(pipe, plane), i915_gem_obj_ggtt_offset(obj) +
+                  sprsurf_offset);
        POSTING_READ(SPSURF(pipe, plane));
 }
 
@@ -152,7 +158,7 @@ vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
        I915_WRITE(SPCNTR(pipe, plane), I915_READ(SPCNTR(pipe, plane)) &
                   ~SP_ENABLE);
        /* Activate double buffered register update */
-       I915_MODIFY_DISPBASE(SPSURF(pipe, plane), 0);
+       I915_WRITE(SPSURF(pipe, plane), 0);
        POSTING_READ(SPSURF(pipe, plane));
 
        intel_update_sprite_watermarks(dplane, crtc, 0, 0, false, false);
@@ -224,7 +230,6 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
        u32 sprctl, sprscale = 0;
        unsigned long sprsurf_offset, linear_offset;
        int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
-       bool scaling_was_enabled = dev_priv->sprite_scaling_enabled;
 
        sprctl = I915_READ(SPRCTL(pipe));
 
@@ -257,6 +262,12 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
                BUG();
        }
 
+       /*
+        * Enable gamma to match primary/cursor plane behaviour.
+        * FIXME should be user controllable via propertiesa.
+        */
+       sprctl |= SPRITE_GAMMA_ENABLE;
+
        if (obj->tiling_mode != I915_TILING_NONE)
                sprctl |= SPRITE_TILED;
 
@@ -279,21 +290,8 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
        crtc_w--;
        crtc_h--;
 
-       /*
-        * IVB workaround: must disable low power watermarks for at least
-        * one frame before enabling scaling.  LP watermarks can be re-enabled
-        * when scaling is disabled.
-        */
-       if (crtc_w != src_w || crtc_h != src_h) {
-               dev_priv->sprite_scaling_enabled |= 1 << pipe;
-
-               if (!scaling_was_enabled) {
-                       intel_update_watermarks(crtc);
-                       intel_wait_for_vblank(dev, pipe);
-               }
+       if (crtc_w != src_w || crtc_h != src_h)
                sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h;
-       } else
-               dev_priv->sprite_scaling_enabled &= ~(1 << pipe);
 
        I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
        I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
@@ -317,13 +315,9 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
        if (intel_plane->can_scale)
                I915_WRITE(SPRSCALE(pipe), sprscale);
        I915_WRITE(SPRCTL(pipe), sprctl);
-       I915_MODIFY_DISPBASE(SPRSURF(pipe),
-                            i915_gem_obj_ggtt_offset(obj) + sprsurf_offset);
+       I915_WRITE(SPRSURF(pipe),
+                  i915_gem_obj_ggtt_offset(obj) + sprsurf_offset);
        POSTING_READ(SPRSURF(pipe));
-
-       /* potentially re-enable LP watermarks */
-       if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled)
-               intel_update_watermarks(crtc);
 }
 
 static void
@@ -333,23 +327,22 @@ ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_plane *intel_plane = to_intel_plane(plane);
        int pipe = intel_plane->pipe;
-       bool scaling_was_enabled = dev_priv->sprite_scaling_enabled;
 
        I915_WRITE(SPRCTL(pipe), I915_READ(SPRCTL(pipe)) & ~SPRITE_ENABLE);
        /* Can't leave the scaler enabled... */
        if (intel_plane->can_scale)
                I915_WRITE(SPRSCALE(pipe), 0);
        /* Activate double buffered register update */
-       I915_MODIFY_DISPBASE(SPRSURF(pipe), 0);
+       I915_WRITE(SPRSURF(pipe), 0);
        POSTING_READ(SPRSURF(pipe));
 
-       dev_priv->sprite_scaling_enabled &= ~(1 << pipe);
+       /*
+        * Avoid underruns when disabling the sprite.
+        * FIXME remove once watermark updates are done properly.
+        */
+       intel_wait_for_vblank(dev, pipe);
 
        intel_update_sprite_watermarks(plane, crtc, 0, 0, false, false);
-
-       /* potentially re-enable LP watermarks */
-       if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled)
-               intel_update_watermarks(crtc);
 }
 
 static int
@@ -453,6 +446,12 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
                BUG();
        }
 
+       /*
+        * Enable gamma to match primary/cursor plane behaviour.
+        * FIXME should be user controllable via propertiesa.
+        */
+       dvscntr |= DVS_GAMMA_ENABLE;
+
        if (obj->tiling_mode != I915_TILING_NONE)
                dvscntr |= DVS_TILED;
 
@@ -470,7 +469,7 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
        crtc_h--;
 
        dvsscale = 0;
-       if (IS_GEN5(dev) || crtc_w != src_w || crtc_h != src_h)
+       if (crtc_w != src_w || crtc_h != src_h)
                dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h;
 
        I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
@@ -490,8 +489,8 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
        I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
        I915_WRITE(DVSSCALE(pipe), dvsscale);
        I915_WRITE(DVSCNTR(pipe), dvscntr);
-       I915_MODIFY_DISPBASE(DVSSURF(pipe),
-                            i915_gem_obj_ggtt_offset(obj) + dvssurf_offset);
+       I915_WRITE(DVSSURF(pipe),
+                  i915_gem_obj_ggtt_offset(obj) + dvssurf_offset);
        POSTING_READ(DVSSURF(pipe));
 }
 
@@ -507,9 +506,15 @@ ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
        /* Disable the scaler */
        I915_WRITE(DVSSCALE(pipe), 0);
        /* Flush double buffered register updates */
-       I915_MODIFY_DISPBASE(DVSSURF(pipe), 0);
+       I915_WRITE(DVSSURF(pipe), 0);
        POSTING_READ(DVSSURF(pipe));
 
+       /*
+        * Avoid underruns when disabling the sprite.
+        * FIXME remove once watermark updates are done properly.
+        */
+       intel_wait_for_vblank(dev, pipe);
+
        intel_update_sprite_watermarks(plane, crtc, 0, 0, false, false);
 }
 
@@ -643,6 +648,15 @@ format_is_yuv(uint32_t format)
        }
 }
 
+static bool colorkey_enabled(struct intel_plane *intel_plane)
+{
+       struct drm_intel_sprite_colorkey key;
+
+       intel_plane->get_colorkey(&intel_plane->base, &key);
+
+       return key.flags != I915_SET_COLORKEY_NONE;
+}
+
 static int
 intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
                   struct drm_framebuffer *fb, int crtc_x, int crtc_y,
@@ -828,7 +842,7 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
         * If the sprite is completely covering the primary plane,
         * we can disable the primary and save power.
         */
-       disable_primary = drm_rect_equals(&dst, &clip);
+       disable_primary = drm_rect_equals(&dst, &clip) && !colorkey_enabled(intel_plane);
        WARN_ON(disable_primary && !visible && intel_crtc->active);
 
        mutex_lock(&dev->struct_mutex);
index 25cbe073c388a3185d1a32afeb805993e43d12d3..87df68f5f504b5a2dd352f503d1af4dbf06c30ba 100644 (file)
@@ -64,7 +64,8 @@ static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv)
        __raw_posting_read(dev_priv, ECOBUS);
 }
 
-static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
+static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv,
+                                                       int fw_engine)
 {
        if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1) == 0,
                            FORCEWAKE_ACK_TIMEOUT_MS))
@@ -89,7 +90,8 @@ static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
        __raw_posting_read(dev_priv, ECOBUS);
 }
 
-static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
+static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv,
+                                                       int fw_engine)
 {
        u32 forcewake_ack;
 
@@ -121,12 +123,12 @@ static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
        u32 gtfifodbg;
 
        gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
-       if (WARN(gtfifodbg & GT_FIFO_CPU_ERROR_MASK,
-            "MMIO read or write has been dropped %x\n", gtfifodbg))
-               __raw_i915_write32(dev_priv, GTFIFODBG, GT_FIFO_CPU_ERROR_MASK);
+       if (WARN(gtfifodbg, "GT wake FIFO error 0x%x\n", gtfifodbg))
+               __raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg);
 }
 
-static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
+static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv,
+                                                       int fw_engine)
 {
        __raw_i915_write32(dev_priv, FORCEWAKE, 0);
        /* something from same cacheline, but !FORCEWAKE */
@@ -134,7 +136,8 @@ static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
        gen6_gt_check_fifodbg(dev_priv);
 }
 
-static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
+static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv,
+                                                       int fw_engine)
 {
        __raw_i915_write32(dev_priv, FORCEWAKE_MT,
                           _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
@@ -147,12 +150,19 @@ static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
 {
        int ret = 0;
 
+       /* On VLV, FIFO will be shared by both SW and HW.
+        * So, we need to read the FREE_ENTRIES everytime */
+       if (IS_VALLEYVIEW(dev_priv->dev))
+               dev_priv->uncore.fifo_count =
+                       __raw_i915_read32(dev_priv, GTFIFOCTL) &
+                                               GT_FIFO_FREE_ENTRIES_MASK;
+
        if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
                int loop = 500;
-               u32 fifo = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES);
+               u32 fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK;
                while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
                        udelay(10);
-                       fifo = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES);
+                       fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK;
                }
                if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
                        ++ret;
@@ -171,38 +181,112 @@ static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
        __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV);
 }
 
-static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
+static void __vlv_force_wake_get(struct drm_i915_private *dev_priv,
+                                               int fw_engine)
 {
-       if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL) == 0,
-                           FORCEWAKE_ACK_TIMEOUT_MS))
-               DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
+       /* Check for Render Engine */
+       if (FORCEWAKE_RENDER & fw_engine) {
+               if (wait_for_atomic((__raw_i915_read32(dev_priv,
+                                               FORCEWAKE_ACK_VLV) &
+                                               FORCEWAKE_KERNEL) == 0,
+                                       FORCEWAKE_ACK_TIMEOUT_MS))
+                       DRM_ERROR("Timed out: Render forcewake old ack to clear.\n");
 
-       __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
-                          _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
-       __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
-                          _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
+               __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
+                                  _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
 
-       if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL),
-                           FORCEWAKE_ACK_TIMEOUT_MS))
-               DRM_ERROR("Timed out waiting for GT to ack forcewake request.\n");
+               if (wait_for_atomic((__raw_i915_read32(dev_priv,
+                                               FORCEWAKE_ACK_VLV) &
+                                               FORCEWAKE_KERNEL),
+                                       FORCEWAKE_ACK_TIMEOUT_MS))
+                       DRM_ERROR("Timed out: waiting for Render to ack.\n");
+       }
 
-       if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_MEDIA_VLV) &
-                            FORCEWAKE_KERNEL),
-                           FORCEWAKE_ACK_TIMEOUT_MS))
-               DRM_ERROR("Timed out waiting for media to ack forcewake request.\n");
+       /* Check for Media Engine */
+       if (FORCEWAKE_MEDIA & fw_engine) {
+               if (wait_for_atomic((__raw_i915_read32(dev_priv,
+                                               FORCEWAKE_ACK_MEDIA_VLV) &
+                                               FORCEWAKE_KERNEL) == 0,
+                                       FORCEWAKE_ACK_TIMEOUT_MS))
+                       DRM_ERROR("Timed out: Media forcewake old ack to clear.\n");
+
+               __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
+                                  _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
+
+               if (wait_for_atomic((__raw_i915_read32(dev_priv,
+                                               FORCEWAKE_ACK_MEDIA_VLV) &
+                                               FORCEWAKE_KERNEL),
+                                       FORCEWAKE_ACK_TIMEOUT_MS))
+                       DRM_ERROR("Timed out: waiting for media to ack.\n");
+       }
 
        /* WaRsForcewakeWaitTC0:vlv */
        __gen6_gt_wait_for_thread_c0(dev_priv);
+
 }
 
-static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
+static void __vlv_force_wake_put(struct drm_i915_private *dev_priv,
+                                       int fw_engine)
 {
-       __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
-                          _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
-       __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
-                          _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
+
+       /* Check for Render Engine */
+       if (FORCEWAKE_RENDER & fw_engine)
+               __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
+                                       _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
+
+
+       /* Check for Media Engine */
+       if (FORCEWAKE_MEDIA & fw_engine)
+               __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
+                               _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
+
        /* The below doubles as a POSTING_READ */
        gen6_gt_check_fifodbg(dev_priv);
+
+}
+
+void vlv_force_wake_get(struct drm_i915_private *dev_priv,
+                                               int fw_engine)
+{
+       unsigned long irqflags;
+
+       spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+       if (FORCEWAKE_RENDER & fw_engine) {
+               if (dev_priv->uncore.fw_rendercount++ == 0)
+                       dev_priv->uncore.funcs.force_wake_get(dev_priv,
+                                                       FORCEWAKE_RENDER);
+       }
+       if (FORCEWAKE_MEDIA & fw_engine) {
+               if (dev_priv->uncore.fw_mediacount++ == 0)
+                       dev_priv->uncore.funcs.force_wake_get(dev_priv,
+                                                       FORCEWAKE_MEDIA);
+       }
+
+       spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+void vlv_force_wake_put(struct drm_i915_private *dev_priv,
+                                               int fw_engine)
+{
+       unsigned long irqflags;
+
+       spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+       if (FORCEWAKE_RENDER & fw_engine) {
+               WARN_ON(dev_priv->uncore.fw_rendercount == 0);
+               if (--dev_priv->uncore.fw_rendercount == 0)
+                       dev_priv->uncore.funcs.force_wake_put(dev_priv,
+                                                       FORCEWAKE_RENDER);
+       }
+
+       if (FORCEWAKE_MEDIA & fw_engine) {
+               WARN_ON(dev_priv->uncore.fw_mediacount == 0);
+               if (--dev_priv->uncore.fw_mediacount == 0)
+                       dev_priv->uncore.funcs.force_wake_put(dev_priv,
+                                                       FORCEWAKE_MEDIA);
+       }
+
+       spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
 
 static void gen6_force_wake_work(struct work_struct *work)
@@ -213,7 +297,7 @@ static void gen6_force_wake_work(struct work_struct *work)
 
        spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
        if (--dev_priv->uncore.forcewake_count == 0)
-               dev_priv->uncore.funcs.force_wake_put(dev_priv);
+               dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
 
@@ -248,6 +332,11 @@ void intel_uncore_early_sanitize(struct drm_device *dev)
                DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
        }
 
+       /* clear out old GT FIFO errors */
+       if (IS_GEN6(dev) || IS_GEN7(dev))
+               __raw_i915_write32(dev_priv, GTFIFODBG,
+                                  __raw_i915_read32(dev_priv, GTFIFODBG));
+
        intel_uncore_forcewake_reset(dev);
 }
 
@@ -256,8 +345,6 @@ void intel_uncore_sanitize(struct drm_device *dev)
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 reg_val;
 
-       intel_uncore_forcewake_reset(dev);
-
        /* BIOS often leaves RC6 enabled, but disable it for hw init */
        intel_disable_gt_powersave(dev);
 
@@ -281,29 +368,40 @@ void intel_uncore_sanitize(struct drm_device *dev)
  * be called at the beginning of the sequence followed by a call to
  * gen6_gt_force_wake_put() at the end of the sequence.
  */
-void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
+void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
 {
        unsigned long irqflags;
 
        if (!dev_priv->uncore.funcs.force_wake_get)
                return;
 
+       intel_runtime_pm_get(dev_priv);
+
+       /* Redirect to VLV specific routine */
+       if (IS_VALLEYVIEW(dev_priv->dev))
+               return vlv_force_wake_get(dev_priv, fw_engine);
+
        spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
        if (dev_priv->uncore.forcewake_count++ == 0)
-               dev_priv->uncore.funcs.force_wake_get(dev_priv);
+               dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
 
 /*
  * see gen6_gt_force_wake_get()
  */
-void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
+void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
 {
        unsigned long irqflags;
 
        if (!dev_priv->uncore.funcs.force_wake_put)
                return;
 
+       /* Redirect to VLV specific routine */
+       if (IS_VALLEYVIEW(dev_priv->dev))
+               return vlv_force_wake_put(dev_priv, fw_engine);
+
+
        spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
        if (--dev_priv->uncore.forcewake_count == 0) {
                dev_priv->uncore.forcewake_count++;
@@ -312,6 +410,8 @@ void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
                                 1);
        }
        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+
+       intel_runtime_pm_put(dev_priv);
 }
 
 /* We give fast paths for the really cool registers */
@@ -346,6 +446,13 @@ hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
        }
 }
 
+static void
+assert_device_not_suspended(struct drm_i915_private *dev_priv)
+{
+       WARN(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended,
+            "Device suspended\n");
+}
+
 #define REG_READ_HEADER(x) \
        unsigned long irqflags; \
        u##x val = 0; \
@@ -379,16 +486,51 @@ gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
        REG_READ_HEADER(x); \
        if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
                if (dev_priv->uncore.forcewake_count == 0) \
-                       dev_priv->uncore.funcs.force_wake_get(dev_priv); \
+                       dev_priv->uncore.funcs.force_wake_get(dev_priv, \
+                                                       FORCEWAKE_ALL); \
                val = __raw_i915_read##x(dev_priv, reg); \
                if (dev_priv->uncore.forcewake_count == 0) \
-                       dev_priv->uncore.funcs.force_wake_put(dev_priv); \
+                       dev_priv->uncore.funcs.force_wake_put(dev_priv, \
+                                                       FORCEWAKE_ALL); \
        } else { \
                val = __raw_i915_read##x(dev_priv, reg); \
        } \
        REG_READ_FOOTER; \
 }
 
+#define __vlv_read(x) \
+static u##x \
+vlv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
+       unsigned fwengine = 0; \
+       unsigned *fwcount; \
+       REG_READ_HEADER(x); \
+       if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg)) {   \
+               fwengine = FORCEWAKE_RENDER;            \
+               fwcount = &dev_priv->uncore.fw_rendercount;    \
+       }                                               \
+       else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)) {       \
+               fwengine = FORCEWAKE_MEDIA;             \
+               fwcount = &dev_priv->uncore.fw_mediacount;     \
+       }  \
+       if (fwengine != 0) {            \
+               if ((*fwcount)++ == 0) \
+                       (dev_priv)->uncore.funcs.force_wake_get(dev_priv, \
+                                                               fwengine); \
+               val = __raw_i915_read##x(dev_priv, reg); \
+               if (--(*fwcount) == 0) \
+                       (dev_priv)->uncore.funcs.force_wake_put(dev_priv, \
+                                                       fwengine); \
+       } else { \
+               val = __raw_i915_read##x(dev_priv, reg); \
+       } \
+       REG_READ_FOOTER; \
+}
+
+
+__vlv_read(8)
+__vlv_read(16)
+__vlv_read(32)
+__vlv_read(64)
 __gen6_read(8)
 __gen6_read(16)
 __gen6_read(32)
@@ -402,6 +544,7 @@ __gen4_read(16)
 __gen4_read(32)
 __gen4_read(64)
 
+#undef __vlv_read
 #undef __gen6_read
 #undef __gen5_read
 #undef __gen4_read
@@ -413,12 +556,15 @@ __gen4_read(64)
        trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
        spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
 
+#define REG_WRITE_FOOTER \
+       spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
+
 #define __gen4_write(x) \
 static void \
 gen4_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
        REG_WRITE_HEADER; \
        __raw_i915_write##x(dev_priv, reg, val); \
-       spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
+       REG_WRITE_FOOTER; \
 }
 
 #define __gen5_write(x) \
@@ -427,7 +573,7 @@ gen5_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace
        REG_WRITE_HEADER; \
        ilk_dummy_write(dev_priv); \
        __raw_i915_write##x(dev_priv, reg, val); \
-       spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
+       REG_WRITE_FOOTER; \
 }
 
 #define __gen6_write(x) \
@@ -438,11 +584,12 @@ gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace
        if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
                __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
        } \
+       assert_device_not_suspended(dev_priv); \
        __raw_i915_write##x(dev_priv, reg, val); \
        if (unlikely(__fifo_ret)) { \
                gen6_gt_check_fifodbg(dev_priv); \
        } \
-       spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
+       REG_WRITE_FOOTER; \
 }
 
 #define __hsw_write(x) \
@@ -453,13 +600,14 @@ hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace)
        if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
                __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
        } \
+       assert_device_not_suspended(dev_priv); \
        hsw_unclaimed_reg_clear(dev_priv, reg); \
        __raw_i915_write##x(dev_priv, reg, val); \
        if (unlikely(__fifo_ret)) { \
                gen6_gt_check_fifodbg(dev_priv); \
        } \
        hsw_unclaimed_reg_check(dev_priv, reg); \
-       spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
+       REG_WRITE_FOOTER; \
 }
 
 static const u32 gen8_shadowed_regs[] = {
@@ -486,16 +634,18 @@ static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg)
 #define __gen8_write(x) \
 static void \
 gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
-       bool __needs_put = !is_gen8_shadowed(dev_priv, reg); \
+       bool __needs_put = reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg); \
        REG_WRITE_HEADER; \
        if (__needs_put) { \
-               dev_priv->uncore.funcs.force_wake_get(dev_priv); \
+               dev_priv->uncore.funcs.force_wake_get(dev_priv, \
+                                                       FORCEWAKE_ALL); \
        } \
        __raw_i915_write##x(dev_priv, reg, val); \
        if (__needs_put) { \
-               dev_priv->uncore.funcs.force_wake_put(dev_priv); \
+               dev_priv->uncore.funcs.force_wake_put(dev_priv, \
+                                                       FORCEWAKE_ALL); \
        } \
-       spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
+       REG_WRITE_FOOTER; \
 }
 
 __gen8_write(8)
@@ -524,6 +674,7 @@ __gen4_write(64)
 #undef __gen6_write
 #undef __gen5_write
 #undef __gen4_write
+#undef REG_WRITE_FOOTER
 #undef REG_WRITE_HEADER
 
 void intel_uncore_init(struct drm_device *dev)
@@ -534,8 +685,8 @@ void intel_uncore_init(struct drm_device *dev)
                          gen6_force_wake_work);
 
        if (IS_VALLEYVIEW(dev)) {
-               dev_priv->uncore.funcs.force_wake_get = vlv_force_wake_get;
-               dev_priv->uncore.funcs.force_wake_put = vlv_force_wake_put;
+               dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get;
+               dev_priv->uncore.funcs.force_wake_put = __vlv_force_wake_put;
        } else if (IS_HASWELL(dev) || IS_GEN8(dev)) {
                dev_priv->uncore.funcs.force_wake_get = __gen6_gt_force_wake_mt_get;
                dev_priv->uncore.funcs.force_wake_put = __gen6_gt_force_wake_mt_put;
@@ -552,9 +703,9 @@ void intel_uncore_init(struct drm_device *dev)
                 * forcewake being disabled.
                 */
                mutex_lock(&dev->struct_mutex);
-               __gen6_gt_force_wake_mt_get(dev_priv);
+               __gen6_gt_force_wake_mt_get(dev_priv, FORCEWAKE_ALL);
                ecobus = __raw_i915_read32(dev_priv, ECOBUS);
-               __gen6_gt_force_wake_mt_put(dev_priv);
+               __gen6_gt_force_wake_mt_put(dev_priv, FORCEWAKE_ALL);
                mutex_unlock(&dev->struct_mutex);
 
                if (ecobus & FORCEWAKE_MT_ENABLE) {
@@ -601,10 +752,18 @@ void intel_uncore_init(struct drm_device *dev)
                        dev_priv->uncore.funcs.mmio_writel  = gen6_write32;
                        dev_priv->uncore.funcs.mmio_writeq  = gen6_write64;
                }
-               dev_priv->uncore.funcs.mmio_readb  = gen6_read8;
-               dev_priv->uncore.funcs.mmio_readw  = gen6_read16;
-               dev_priv->uncore.funcs.mmio_readl  = gen6_read32;
-               dev_priv->uncore.funcs.mmio_readq  = gen6_read64;
+
+               if (IS_VALLEYVIEW(dev)) {
+                       dev_priv->uncore.funcs.mmio_readb  = vlv_read8;
+                       dev_priv->uncore.funcs.mmio_readw  = vlv_read16;
+                       dev_priv->uncore.funcs.mmio_readl  = vlv_read32;
+                       dev_priv->uncore.funcs.mmio_readq  = vlv_read64;
+               } else {
+                       dev_priv->uncore.funcs.mmio_readb  = gen6_read8;
+                       dev_priv->uncore.funcs.mmio_readw  = gen6_read16;
+                       dev_priv->uncore.funcs.mmio_readl  = gen6_read32;
+                       dev_priv->uncore.funcs.mmio_readq  = gen6_read64;
+               }
                break;
        case 5:
                dev_priv->uncore.funcs.mmio_writeb  = gen5_write8;
@@ -646,7 +805,7 @@ static const struct register_whitelist {
        uint32_t size;
        uint32_t gen_bitmask; /* support gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
 } whitelist[] = {
-       { RING_TIMESTAMP(RENDER_RING_BASE), 8, 0xF0 },
+       { RING_TIMESTAMP(RENDER_RING_BASE), 8, 0x1F0 },
 };
 
 int i915_reg_read_ioctl(struct drm_device *dev,
@@ -687,6 +846,43 @@ int i915_reg_read_ioctl(struct drm_device *dev,
        return 0;
 }
 
+int i915_get_reset_stats_ioctl(struct drm_device *dev,
+                              void *data, struct drm_file *file)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_reset_stats *args = data;
+       struct i915_ctx_hang_stats *hs;
+       int ret;
+
+       if (args->flags || args->pad)
+               return -EINVAL;
+
+       if (args->ctx_id == DEFAULT_CONTEXT_ID && !capable(CAP_SYS_ADMIN))
+               return -EPERM;
+
+       ret = mutex_lock_interruptible(&dev->struct_mutex);
+       if (ret)
+               return ret;
+
+       hs = i915_gem_context_get_hang_stats(dev, file, args->ctx_id);
+       if (IS_ERR(hs)) {
+               mutex_unlock(&dev->struct_mutex);
+               return PTR_ERR(hs);
+       }
+
+       if (capable(CAP_SYS_ADMIN))
+               args->reset_count = i915_reset_count(&dev_priv->gpu_error);
+       else
+               args->reset_count = 0;
+
+       args->batch_active = hs->batch_active;
+       args->batch_pending = hs->batch_pending;
+
+       mutex_unlock(&dev->struct_mutex);
+
+       return 0;
+}
+
 static int i965_reset_complete(struct drm_device *dev)
 {
        u8 gdrst;
@@ -770,12 +966,12 @@ static int gen6_do_reset(struct drm_device *dev)
 
        /* If reset with a user forcewake, try to restore, otherwise turn it off */
        if (dev_priv->uncore.forcewake_count)
-               dev_priv->uncore.funcs.force_wake_get(dev_priv);
+               dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
        else
-               dev_priv->uncore.funcs.force_wake_put(dev_priv);
+               dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
 
        /* Restore fifo count */
-       dev_priv->uncore.fifo_count = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES);
+       dev_priv->uncore.fifo_count = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK;
 
        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
        return ret;
@@ -793,15 +989,6 @@ int intel_gpu_reset(struct drm_device *dev)
        }
 }
 
-void intel_uncore_clear_errors(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       /* XXX needs spinlock around caller's grouping */
-       if (HAS_FPGA_DBG_UNCLAIMED(dev))
-               __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
-}
-
 void intel_uncore_check_errors(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
index 087db33f6cff19f8cdaa32f390c6c4dd52a8c79d..c3bf059ba720569540145f225222e0f2b444827c 100644 (file)
@@ -1075,10 +1075,10 @@ static int mga_dma_get_buffers(struct drm_device *dev,
 
                buf->file_priv = file_priv;
 
-               if (DRM_COPY_TO_USER(&d->request_indices[i],
+               if (copy_to_user(&d->request_indices[i],
                                     &buf->idx, sizeof(buf->idx)))
                        return -EFAULT;
-               if (DRM_COPY_TO_USER(&d->request_sizes[i],
+               if (copy_to_user(&d->request_sizes[i],
                                     &buf->total, sizeof(buf->total)))
                        return -EFAULT;
 
index ca4bc54ea2146303f4cedf7bdd879b3c3bc812fd..fe453213600ab728e57385e40b272e4e8b8ebc49 100644 (file)
@@ -186,14 +186,14 @@ extern void mga_disable_vblank(struct drm_device *dev, int crtc);
 extern u32 mga_get_vblank_counter(struct drm_device *dev, int crtc);
 extern int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence);
 extern int mga_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence);
-extern irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS);
+extern irqreturn_t mga_driver_irq_handler(int irq, void *arg);
 extern void mga_driver_irq_preinstall(struct drm_device *dev);
 extern int mga_driver_irq_postinstall(struct drm_device *dev);
 extern void mga_driver_irq_uninstall(struct drm_device *dev);
 extern long mga_compat_ioctl(struct file *filp, unsigned int cmd,
                             unsigned long arg);
 
-#define mga_flush_write_combine()      DRM_WRITEMEMORYBARRIER()
+#define mga_flush_write_combine()      wmb()
 
 #define MGA_READ8(reg)         DRM_READ8(dev_priv->mmio, (reg))
 #define MGA_READ(reg)          DRM_READ32(dev_priv->mmio, (reg))
index 709e90db8c4087dfcb782a51deab776271a7feb0..86b4bb80485200e1067c47574117fa1c46c17b8f 100644 (file)
@@ -34,6 +34,7 @@
 
 #include <drm/drmP.h>
 #include <drm/mga_drm.h>
+#include "mga_drv.h"
 
 typedef struct drm32_mga_init {
        int func;
index 2b0ceb8dc11b8dce6894cf056998714603087a3d..1b071b8ff9dccec81e1d93376d0b6b09bc58521b 100644 (file)
@@ -47,7 +47,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
 }
 
 
-irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
+irqreturn_t mga_driver_irq_handler(int irq, void *arg)
 {
        struct drm_device *dev = (struct drm_device *) arg;
        drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
@@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
                        MGA_WRITE(MGA_PRIMEND, prim_end);
 
                atomic_inc(&dev_priv->last_fence_retired);
-               DRM_WAKEUP(&dev_priv->fence_queue);
+               wake_up(&dev_priv->fence_queue);
                handled = 1;
        }
 
@@ -128,7 +128,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
         * by about a day rather than she wants to wait for years
         * using fences.
         */
-       DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
+       DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * HZ,
                    (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
                      - *sequence) <= (1 << 23)));
 
@@ -151,7 +151,7 @@ int mga_driver_irq_postinstall(struct drm_device *dev)
 {
        drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
 
-       DRM_INIT_WAITQUEUE(&dev_priv->fence_queue);
+       init_waitqueue_head(&dev_priv->fence_queue);
 
        /* Turn on soft trap interrupt.  Vertical blank interrupts are enabled
         * in mga_enable_vblank.
index 37cc2fb4eadd9a033316f5951eddc81f8fb072e0..314685b7f41fc4acb21a2c9c0cad86ecbd1ccad2 100644 (file)
@@ -1029,7 +1029,7 @@ static int mga_getparam(struct drm_device *dev, void *data, struct drm_file *fil
                return -EINVAL;
        }
 
-       if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
+       if (copy_to_user(param->value, &value, sizeof(int))) {
                DRM_ERROR("copy_to_user\n");
                return -EFAULT;
        }
index 801731aeab61cddf3db56419dea57f5240ed5994..9f9780b7ddf0be7d3a82ba77205d21d29984b02f 100644 (file)
@@ -22,8 +22,10 @@ static void mga_hide_cursor(struct mga_device *mdev)
 {
        WREG8(MGA_CURPOSXL, 0);
        WREG8(MGA_CURPOSXH, 0);
-       mgag200_bo_unpin(mdev->cursor.pixels_1);
-       mgag200_bo_unpin(mdev->cursor.pixels_2);
+       if (mdev->cursor.pixels_1->pin_count)
+               mgag200_bo_unpin(mdev->cursor.pixels_1);
+       if (mdev->cursor.pixels_2->pin_count)
+               mgag200_bo_unpin(mdev->cursor.pixels_2);
 }
 
 int mga_crtc_cursor_set(struct drm_crtc *crtc,
@@ -32,7 +34,7 @@ int mga_crtc_cursor_set(struct drm_crtc *crtc,
                        uint32_t width,
                        uint32_t height)
 {
-       struct drm_device *dev = (struct drm_device *)file_priv->minor->dev;
+       struct drm_device *dev = crtc->dev;
        struct mga_device *mdev = (struct mga_device *)dev->dev_private;
        struct mgag200_bo *pixels_1 = mdev->cursor.pixels_1;
        struct mgag200_bo *pixels_2 = mdev->cursor.pixels_2;
index 964f58cee5ea67b37f02e680b0f1ab7ebb3e5ed3..f9adc27ef32a05fdee5b66a23e35b7e6df21fd3d 100644 (file)
@@ -41,7 +41,7 @@ static void mga_dirty_update(struct mga_fbdev *mfbdev,
         * then the BO is being moved and we should
         * store up the damage until later.
         */
-       if (!in_interrupt())
+       if (!drm_can_sleep())
                ret = mgag200_bo_reserve(bo, true);
        if (ret) {
                if (ret != -EBUSY)
@@ -282,6 +282,11 @@ int mgag200_fbdev_init(struct mga_device *mdev)
 {
        struct mga_fbdev *mfbdev;
        int ret;
+       int bpp_sel = 32;
+
+       /* prefer 16bpp on low end gpus with limited VRAM */
+       if (IS_G200_SE(mdev) && mdev->mc.vram_size < (2048*1024))
+               bpp_sel = 16;
 
        mfbdev = devm_kzalloc(mdev->dev->dev, sizeof(struct mga_fbdev), GFP_KERNEL);
        if (!mfbdev)
@@ -301,7 +306,7 @@ int mgag200_fbdev_init(struct mga_device *mdev)
        /* disable all the possible outputs/crtcs before entering KMS mode */
        drm_helper_disable_unused_functions(mdev->dev);
 
-       drm_fb_helper_initial_config(&mfbdev->helper, 32);
+       drm_fb_helper_initial_config(&mfbdev->helper, bpp_sel);
 
        return 0;
 }
index b1120cb1db6d76b76fd38afd3279a3684e6b188b..26868e5c55b076352188d34962b928a87a64d1a9 100644 (file)
@@ -217,7 +217,10 @@ int mgag200_driver_load(struct drm_device *dev, unsigned long flags)
 
        drm_mode_config_init(dev);
        dev->mode_config.funcs = (void *)&mga_mode_funcs;
-       dev->mode_config.preferred_depth = 24;
+       if (IS_G200_SE(mdev) && mdev->mc.vram_size < (2048*1024))
+               dev->mode_config.preferred_depth = 16;
+       else
+               dev->mode_config.preferred_depth = 24;
        dev->mode_config.prefer_shadow = 1;
 
        r = mgag200_modeset_init(mdev);
@@ -310,7 +313,7 @@ int mgag200_dumb_create(struct drm_file *file,
        return 0;
 }
 
-void mgag200_bo_unref(struct mgag200_bo **bo)
+static void mgag200_bo_unref(struct mgag200_bo **bo)
 {
        struct ttm_buffer_object *tbo;
 
index ee6ed633b7b1c1bb34ac6b59ec85db52c69a4f27..b8583f275e80519dd46d922d1f5c1a0ed9c8bcbf 100644 (file)
@@ -691,7 +691,7 @@ static void mga_g200wb_commit(struct drm_crtc *crtc)
    CRTCEXT0 has to be programmed last to trigger an update and make the
    new addr variable take effect.
  */
-void mga_set_start_address(struct drm_crtc *crtc, unsigned offset)
+static void mga_set_start_address(struct drm_crtc *crtc, unsigned offset)
 {
        struct mga_device *mdev = crtc->dev->dev_private;
        u32 addr;
@@ -1398,7 +1398,7 @@ static void mga_encoder_commit(struct drm_encoder *encoder)
 {
 }
 
-void mga_encoder_destroy(struct drm_encoder *encoder)
+static void mga_encoder_destroy(struct drm_encoder *encoder)
 {
        struct mga_encoder *mga_encoder = to_mga_encoder(encoder);
        drm_encoder_cleanup(encoder);
@@ -1558,7 +1558,7 @@ static int mga_vga_mode_valid(struct drm_connector *connector,
        return MODE_OK;
 }
 
-struct drm_encoder *mga_connector_best_encoder(struct drm_connector
+static struct drm_encoder *mga_connector_best_encoder(struct drm_connector
                                                  *connector)
 {
        int enc_id = connector->encoder_ids[0];
index 07b192fe15c6c6f9d0c23434280972616693ca48..adb5166a5dfdbe3a77e4e7154411e5162df1abd3 100644 (file)
@@ -80,7 +80,7 @@ static int mgag200_ttm_global_init(struct mga_device *ast)
        return 0;
 }
 
-void
+static void
 mgag200_ttm_global_release(struct mga_device *ast)
 {
        if (ast->ttm.mem_global_ref.release == NULL)
@@ -102,7 +102,7 @@ static void mgag200_bo_ttm_destroy(struct ttm_buffer_object *tbo)
        kfree(bo);
 }
 
-bool mgag200_ttm_bo_is_mgag200_bo(struct ttm_buffer_object *bo)
+static bool mgag200_ttm_bo_is_mgag200_bo(struct ttm_buffer_object *bo)
 {
        if (bo->destroy == &mgag200_bo_ttm_destroy)
                return true;
@@ -208,7 +208,7 @@ static struct ttm_backend_func mgag200_tt_backend_func = {
 };
 
 
-struct ttm_tt *mgag200_ttm_tt_create(struct ttm_bo_device *bdev,
+static struct ttm_tt *mgag200_ttm_tt_create(struct ttm_bo_device *bdev,
                                 unsigned long size, uint32_t page_flags,
                                 struct page *dummy_read_page)
 {
index f39ab7554fc992175630831acf28bbf223cd9f50..c69d1e07a3a67b397ac798e312df7b31b62e3d20 100644 (file)
@@ -2,8 +2,8 @@
 config DRM_MSM
        tristate "MSM DRM"
        depends on DRM
-       depends on ARCH_MSM
-       depends on ARCH_MSM8960
+       depends on MSM_IOMMU
+       depends on (ARCH_MSM && ARCH_MSM8960) || (ARM && COMPILE_TEST)
        select DRM_KMS_HELPER
        select SHMEM
        select TMPFS
index e5fa12b0d21eca645a2cc5efa8f74bef595ad2bb..4f977a593bea13334aebca905c87be43223b2e06 100644 (file)
@@ -12,18 +12,27 @@ msm-y := \
        hdmi/hdmi_i2c.o \
        hdmi/hdmi_phy_8960.o \
        hdmi/hdmi_phy_8x60.o \
-       mdp4/mdp4_crtc.o \
-       mdp4/mdp4_dtv_encoder.o \
-       mdp4/mdp4_format.o \
-       mdp4/mdp4_irq.o \
-       mdp4/mdp4_kms.o \
-       mdp4/mdp4_plane.o \
+       hdmi/hdmi_phy_8x74.o \
+       mdp/mdp_format.o \
+       mdp/mdp_kms.o \
+       mdp/mdp4/mdp4_crtc.o \
+       mdp/mdp4/mdp4_dtv_encoder.o \
+       mdp/mdp4/mdp4_irq.o \
+       mdp/mdp4/mdp4_kms.o \
+       mdp/mdp4/mdp4_plane.o \
+       mdp/mdp5/mdp5_crtc.o \
+       mdp/mdp5/mdp5_encoder.o \
+       mdp/mdp5/mdp5_irq.o \
+       mdp/mdp5/mdp5_kms.o \
+       mdp/mdp5/mdp5_plane.o \
+       mdp/mdp5/mdp5_smp.o \
        msm_drv.o \
        msm_fb.o \
        msm_gem.o \
        msm_gem_prime.o \
        msm_gem_submit.o \
        msm_gpu.o \
+       msm_iommu.o \
        msm_ringbuffer.o
 
 msm-$(CONFIG_DRM_MSM_FBDEV) += msm_fbdev.o
index e036f6c1db9447180ddca1a518174ec692d94738..9c4255b980218c229ae5f40a7c89c8445435d43a 100644 (file)
@@ -4,7 +4,7 @@ In the current snapdragon SoC's, we have (at least) 3 different
 display controller blocks at play:
  + MDP3 - ?? seems to be what is on geeksphone peak device
  + MDP4 - S3 (APQ8060, touchpad), S4-pro (APQ8064, nexus4 & ifc6410)
- + MDSS - snapdragon 800
+ + MDP5 - snapdragon 800
 
 (I don't have a completely clear picture on which display controller
 maps to which part #)
@@ -46,6 +46,24 @@ and treat the MDP4 block's irq as "the" irq.  Even though the connectors
 may have their own irqs which they install themselves.  For this reason
 the display controller is the "master" device.
 
+For MDP5, the mapping is:
+
+  plane   -> PIPE{RGBn,VIGn}             \
+  crtc    -> LM (layer mixer)            |-> MDP "device"
+  encoder -> INTF                        /
+  connector -> HDMI/DSI/eDP/etc          --> other device(s)
+
+Unlike MDP4, it appears we can get by with a single encoder, rather
+than needing a different implementation for DTV, DSI, etc.  (Ie. the
+register interface is same, just different bases.)
+
+Also unlike MDP4, with MDP5 all the IRQs for other blocks (HDMI, DSI,
+etc) are routed through MDP.
+
+And finally, MDP5 has this "Shared Memory Pool" (called "SMP"), from
+which blocks need to be allocated to the active pipes based on fetch
+stride.
+
 Each connector probably ends up being a separate device, just for the
 logistics of finding/mapping io region, irq, etc.  Idealy we would
 have a better way than just stashing the platform device in a global
index 9588098741b5b7f76e379d715eb358bfa78d92f0..85d615e7d62fb75789a127fcc021c111a0baa74e 100644 (file)
@@ -8,12 +8,13 @@ http://github.com/freedreno/envytools/
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml              (    327 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml           (  31003 bytes, from 2013-09-19 18:50:16)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml       (   8983 bytes, from 2013-07-24 01:38:36)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml          (   9759 bytes, from 2013-09-10 00:52:33)
-- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml           (  51983 bytes, from 2013-09-10 00:52:32)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml               (    364 bytes, from 2013-11-30 14:47:15)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml  (   1453 bytes, from 2013-03-31 16:51:27)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml          (  32814 bytes, from 2013-11-30 15:07:33)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml (   8900 bytes, from 2013-10-22 23:57:49)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml    (  10574 bytes, from 2013-11-13 05:44:45)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml          (  53644 bytes, from 2013-11-30 15:07:33)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml          (   8344 bytes, from 2013-11-30 14:49:47)
 
 Copyright (C) 2013 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
@@ -202,6 +203,12 @@ enum a2xx_rb_copy_sample_select {
        SAMPLE_0123 = 6,
 };
 
+enum adreno_mmu_clnt_beh {
+       BEH_NEVR = 0,
+       BEH_TRAN_RNG = 1,
+       BEH_TRAN_FLT = 2,
+};
+
 enum sq_tex_clamp {
        SQ_TEX_WRAP = 0,
        SQ_TEX_MIRROR = 1,
@@ -238,6 +245,92 @@ enum sq_tex_filter {
 
 #define REG_A2XX_CP_PFP_UCODE_DATA                             0x000000c1
 
+#define REG_A2XX_MH_MMU_CONFIG                                 0x00000040
+#define A2XX_MH_MMU_CONFIG_MMU_ENABLE                          0x00000001
+#define A2XX_MH_MMU_CONFIG_SPLIT_MODE_ENABLE                   0x00000002
+#define A2XX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__MASK            0x00000030
+#define A2XX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__SHIFT           4
+static inline uint32_t A2XX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+       return ((val) << A2XX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__MASK;
+}
+#define A2XX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__MASK            0x000000c0
+#define A2XX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__SHIFT           6
+static inline uint32_t A2XX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+       return ((val) << A2XX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__MASK;
+}
+#define A2XX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__MASK           0x00000300
+#define A2XX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__SHIFT          8
+static inline uint32_t A2XX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+       return ((val) << A2XX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__MASK;
+}
+#define A2XX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__MASK           0x00000c00
+#define A2XX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__SHIFT          10
+static inline uint32_t A2XX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+       return ((val) << A2XX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__MASK;
+}
+#define A2XX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__MASK           0x00003000
+#define A2XX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__SHIFT          12
+static inline uint32_t A2XX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+       return ((val) << A2XX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__MASK;
+}
+#define A2XX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__MASK           0x0000c000
+#define A2XX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__SHIFT          14
+static inline uint32_t A2XX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+       return ((val) << A2XX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__MASK;
+}
+#define A2XX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__MASK           0x00030000
+#define A2XX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__SHIFT          16
+static inline uint32_t A2XX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+       return ((val) << A2XX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__MASK;
+}
+#define A2XX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__MASK          0x000c0000
+#define A2XX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__SHIFT         18
+static inline uint32_t A2XX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+       return ((val) << A2XX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__MASK;
+}
+#define A2XX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__MASK          0x00300000
+#define A2XX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__SHIFT         20
+static inline uint32_t A2XX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+       return ((val) << A2XX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__MASK;
+}
+#define A2XX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__MASK            0x00c00000
+#define A2XX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__SHIFT           22
+static inline uint32_t A2XX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+       return ((val) << A2XX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__MASK;
+}
+#define A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__MASK            0x03000000
+#define A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__SHIFT           24
+static inline uint32_t A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+       return ((val) << A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__MASK;
+}
+
+#define REG_A2XX_MH_MMU_VA_RANGE                               0x00000041
+
+#define REG_A2XX_MH_MMU_PT_BASE                                        0x00000042
+
+#define REG_A2XX_MH_MMU_PAGE_FAULT                             0x00000043
+
+#define REG_A2XX_MH_MMU_TRAN_ERROR                             0x00000044
+
+#define REG_A2XX_MH_MMU_INVALIDATE                             0x00000045
+
+#define REG_A2XX_MH_MMU_MPU_BASE                               0x00000046
+
+#define REG_A2XX_MH_MMU_MPU_END                                        0x00000047
+
+#define REG_A2XX_NQWAIT_UNTIL                                  0x00000394
+
 #define REG_A2XX_RBBM_PERFCOUNTER1_SELECT                      0x00000395
 
 #define REG_A2XX_RBBM_PERFCOUNTER1_LO                          0x00000397
@@ -276,20 +369,6 @@ enum sq_tex_filter {
 
 #define REG_A2XX_CP_PERFCOUNTER_HI                             0x00000447
 
-#define REG_A2XX_CP_ST_BASE                                    0x0000044d
-
-#define REG_A2XX_CP_ST_BUFSZ                                   0x0000044e
-
-#define REG_A2XX_CP_IB1_BASE                                   0x00000458
-
-#define REG_A2XX_CP_IB1_BUFSZ                                  0x00000459
-
-#define REG_A2XX_CP_IB2_BASE                                   0x0000045a
-
-#define REG_A2XX_CP_IB2_BUFSZ                                  0x0000045b
-
-#define REG_A2XX_CP_STAT                                       0x0000047f
-
 #define REG_A2XX_RBBM_STATUS                                   0x000005d0
 #define A2XX_RBBM_STATUS_CMDFIFO_AVAIL__MASK                   0x0000001f
 #define A2XX_RBBM_STATUS_CMDFIFO_AVAIL__SHIFT                  0
@@ -808,6 +887,12 @@ static inline uint32_t A2XX_SQ_CONTEXT_MISC_PARAM_GEN_POS(uint32_t val)
 
 #define REG_A2XX_SQ_VS_PROGRAM                                 0x000021f7
 
+#define REG_A2XX_VGT_EVENT_INITIATOR                           0x000021f9
+
+#define REG_A2XX_VGT_DRAW_INITIATOR                            0x000021fc
+
+#define REG_A2XX_VGT_IMMED_DATA                                        0x000021fd
+
 #define REG_A2XX_RB_DEPTHCONTROL                               0x00002200
 #define A2XX_RB_DEPTHCONTROL_STENCIL_ENABLE                    0x00000001
 #define A2XX_RB_DEPTHCONTROL_Z_ENABLE                          0x00000002
index d4afdf6575597f0dd4d06ed2eac5fc09b2cad076..a7be56163d2324f4a24ae7e4836168f95db967a2 100644 (file)
@@ -8,12 +8,13 @@ http://github.com/freedreno/envytools/
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml              (    327 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml           (  31003 bytes, from 2013-09-19 18:50:16)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml       (   8983 bytes, from 2013-07-24 01:38:36)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml          (   9759 bytes, from 2013-09-10 00:52:33)
-- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml           (  51983 bytes, from 2013-09-10 00:52:32)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml               (    364 bytes, from 2013-11-30 14:47:15)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml  (   1453 bytes, from 2013-03-31 16:51:27)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml          (  32814 bytes, from 2013-11-30 15:07:33)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml (   8900 bytes, from 2013-10-22 23:57:49)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml    (  10574 bytes, from 2013-11-13 05:44:45)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml          (  53644 bytes, from 2013-11-30 15:07:33)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml          (   8344 bytes, from 2013-11-30 14:49:47)
 
 Copyright (C) 2013 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
@@ -292,6 +293,8 @@ enum a3xx_tex_type {
 #define A3XX_RBBM_STATUS_GPU_BUSY_NOHC                         0x40000000
 #define A3XX_RBBM_STATUS_GPU_BUSY                              0x80000000
 
+#define REG_A3XX_RBBM_NQWAIT_UNTIL                             0x00000040
+
 #define REG_A3XX_RBBM_WAIT_IDLE_CLOCKS_CTL                     0x00000033
 
 #define REG_A3XX_RBBM_INTERFACE_HANG_INT_CTL                   0x00000050
@@ -304,6 +307,8 @@ enum a3xx_tex_type {
 
 #define REG_A3XX_RBBM_INTERFACE_HANG_MASK_CTL3                 0x0000005a
 
+#define REG_A3XX_RBBM_INT_SET_CMD                              0x00000060
+
 #define REG_A3XX_RBBM_INT_CLEAR_CMD                            0x00000061
 
 #define REG_A3XX_RBBM_INT_0_MASK                               0x00000063
@@ -937,13 +942,13 @@ static inline uint32_t A3XX_RB_BLEND_ALPHA_FLOAT(float val)
        return ((util_float_to_half(val)) << A3XX_RB_BLEND_ALPHA_FLOAT__SHIFT) & A3XX_RB_BLEND_ALPHA_FLOAT__MASK;
 }
 
-#define REG_A3XX_UNKNOWN_20E8                                  0x000020e8
+#define REG_A3XX_RB_CLEAR_COLOR_DW0                            0x000020e8
 
-#define REG_A3XX_UNKNOWN_20E9                                  0x000020e9
+#define REG_A3XX_RB_CLEAR_COLOR_DW1                            0x000020e9
 
-#define REG_A3XX_UNKNOWN_20EA                                  0x000020ea
+#define REG_A3XX_RB_CLEAR_COLOR_DW2                            0x000020ea
 
-#define REG_A3XX_UNKNOWN_20EB                                  0x000020eb
+#define REG_A3XX_RB_CLEAR_COLOR_DW3                            0x000020eb
 
 #define REG_A3XX_RB_COPY_CONTROL                               0x000020ec
 #define A3XX_RB_COPY_CONTROL_MSAA_RESOLVE__MASK                        0x00000003
@@ -1026,7 +1031,7 @@ static inline uint32_t A3XX_RB_DEPTH_CONTROL_ZFUNC(enum adreno_compare_func val)
 #define A3XX_RB_DEPTH_CONTROL_BF_ENABLE                                0x00000080
 #define A3XX_RB_DEPTH_CONTROL_Z_TEST_ENABLE                    0x80000000
 
-#define REG_A3XX_UNKNOWN_2101                                  0x00002101
+#define REG_A3XX_RB_DEPTH_CLEAR                                        0x00002101
 
 #define REG_A3XX_RB_DEPTH_INFO                                 0x00002102
 #define A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK                  0x00000001
@@ -1103,11 +1108,11 @@ static inline uint32_t A3XX_RB_STENCIL_CONTROL_ZFAIL_BF(enum adreno_stencil_op v
        return ((val) << A3XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT) & A3XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK;
 }
 
-#define REG_A3XX_UNKNOWN_2105                                  0x00002105
+#define REG_A3XX_RB_STENCIL_CLEAR                              0x00002105
 
-#define REG_A3XX_UNKNOWN_2106                                  0x00002106
+#define REG_A3XX_RB_STENCIL_BUF_INFO                           0x00002106
 
-#define REG_A3XX_UNKNOWN_2107                                  0x00002107
+#define REG_A3XX_RB_STENCIL_BUF_PITCH                          0x00002107
 
 #define REG_A3XX_RB_STENCILREFMASK                             0x00002108
 #define A3XX_RB_STENCILREFMASK_STENCILREF__MASK                        0x000000ff
@@ -1149,20 +1154,31 @@ static inline uint32_t A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK(uint32_t val)
        return ((val) << A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT) & A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK;
 }
 
-#define REG_A3XX_PA_SC_WINDOW_OFFSET                           0x0000210e
-#define A3XX_PA_SC_WINDOW_OFFSET_X__MASK                       0x0000ffff
-#define A3XX_PA_SC_WINDOW_OFFSET_X__SHIFT                      0
-static inline uint32_t A3XX_PA_SC_WINDOW_OFFSET_X(uint32_t val)
+#define REG_A3XX_RB_LRZ_VSC_CONTROL                            0x0000210c
+#define A3XX_RB_LRZ_VSC_CONTROL_BINNING_ENABLE                 0x00000002
+
+#define REG_A3XX_RB_WINDOW_OFFSET                              0x0000210e
+#define A3XX_RB_WINDOW_OFFSET_X__MASK                          0x0000ffff
+#define A3XX_RB_WINDOW_OFFSET_X__SHIFT                         0
+static inline uint32_t A3XX_RB_WINDOW_OFFSET_X(uint32_t val)
 {
-       return ((val) << A3XX_PA_SC_WINDOW_OFFSET_X__SHIFT) & A3XX_PA_SC_WINDOW_OFFSET_X__MASK;
+       return ((val) << A3XX_RB_WINDOW_OFFSET_X__SHIFT) & A3XX_RB_WINDOW_OFFSET_X__MASK;
 }
-#define A3XX_PA_SC_WINDOW_OFFSET_Y__MASK                       0xffff0000
-#define A3XX_PA_SC_WINDOW_OFFSET_Y__SHIFT                      16
-static inline uint32_t A3XX_PA_SC_WINDOW_OFFSET_Y(uint32_t val)
+#define A3XX_RB_WINDOW_OFFSET_Y__MASK                          0xffff0000
+#define A3XX_RB_WINDOW_OFFSET_Y__SHIFT                         16
+static inline uint32_t A3XX_RB_WINDOW_OFFSET_Y(uint32_t val)
 {
-       return ((val) << A3XX_PA_SC_WINDOW_OFFSET_Y__SHIFT) & A3XX_PA_SC_WINDOW_OFFSET_Y__MASK;
+       return ((val) << A3XX_RB_WINDOW_OFFSET_Y__SHIFT) & A3XX_RB_WINDOW_OFFSET_Y__MASK;
 }
 
+#define REG_A3XX_RB_SAMPLE_COUNT_CONTROL                       0x00002110
+
+#define REG_A3XX_RB_SAMPLE_COUNT_ADDR                          0x00002111
+
+#define REG_A3XX_RB_Z_CLAMP_MIN                                        0x00002114
+
+#define REG_A3XX_RB_Z_CLAMP_MAX                                        0x00002115
+
 #define REG_A3XX_PC_VSTREAM_CONTROL                            0x000021e4
 
 #define REG_A3XX_PC_VERTEX_REUSE_BLOCK_CNTL                    0x000021ea
@@ -1309,6 +1325,8 @@ static inline uint32_t A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY(uint32_t val)
 
 #define REG_A3XX_HLSQ_CL_KERNEL_GROUP_X_REG                    0x00002215
 
+#define REG_A3XX_HLSQ_CL_KERNEL_GROUP_Y_REG                    0x00002216
+
 #define REG_A3XX_HLSQ_CL_KERNEL_GROUP_Z_REG                    0x00002217
 
 #define REG_A3XX_HLSQ_CL_WG_OFFSET_REG                         0x0000221a
@@ -1491,12 +1509,13 @@ static inline uint32_t REG_A3XX_VPC_VARYING_PS_REPL_MODE(uint32_t i0) { return 0
 
 #define REG_A3XX_SP_SP_CTRL_REG                                        0x000022c0
 #define A3XX_SP_SP_CTRL_REG_RESOLVE                            0x00010000
-#define A3XX_SP_SP_CTRL_REG_CONSTMODE__MASK                    0x000c0000
+#define A3XX_SP_SP_CTRL_REG_CONSTMODE__MASK                    0x00040000
 #define A3XX_SP_SP_CTRL_REG_CONSTMODE__SHIFT                   18
 static inline uint32_t A3XX_SP_SP_CTRL_REG_CONSTMODE(uint32_t val)
 {
        return ((val) << A3XX_SP_SP_CTRL_REG_CONSTMODE__SHIFT) & A3XX_SP_SP_CTRL_REG_CONSTMODE__MASK;
 }
+#define A3XX_SP_SP_CTRL_REG_BINNING                            0x00080000
 #define A3XX_SP_SP_CTRL_REG_SLEEPMODE__MASK                    0x00300000
 #define A3XX_SP_SP_CTRL_REG_SLEEPMODE__SHIFT                   20
 static inline uint32_t A3XX_SP_SP_CTRL_REG_SLEEPMODE(uint32_t val)
@@ -1669,7 +1688,7 @@ static inline uint32_t A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
 
 #define REG_A3XX_SP_VS_OBJ_START_REG                           0x000022d5
 
-#define REG_A3XX_SP_VS_PVT_MEM_CTRL_REG                                0x000022d6
+#define REG_A3XX_SP_VS_PVT_MEM_PARAM_REG                       0x000022d6
 
 #define REG_A3XX_SP_VS_PVT_MEM_ADDR_REG                                0x000022d7
 
@@ -1772,7 +1791,7 @@ static inline uint32_t A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
 
 #define REG_A3XX_SP_FS_OBJ_START_REG                           0x000022e3
 
-#define REG_A3XX_SP_FS_PVT_MEM_CTRL_REG                                0x000022e4
+#define REG_A3XX_SP_FS_PVT_MEM_PARAM_REG                       0x000022e4
 
 #define REG_A3XX_SP_FS_PVT_MEM_ADDR_REG                                0x000022e5
 
@@ -1943,6 +1962,9 @@ static inline uint32_t REG_A3XX_VSC_PIPE_DATA_ADDRESS(uint32_t i0) { return 0x00
 
 static inline uint32_t REG_A3XX_VSC_PIPE_DATA_LENGTH(uint32_t i0) { return 0x00000c08 + 0x3*i0; }
 
+#define REG_A3XX_VSC_BIN_CONTROL                               0x00000c3c
+#define A3XX_VSC_BIN_CONTROL_BINNING_ENABLE                    0x00000001
+
 #define REG_A3XX_UNKNOWN_0C3D                                  0x00000c3d
 
 #define REG_A3XX_PC_PERFCOUNTER0_SELECT                                0x00000c48
@@ -1953,7 +1975,7 @@ static inline uint32_t REG_A3XX_VSC_PIPE_DATA_LENGTH(uint32_t i0) { return 0x000
 
 #define REG_A3XX_PC_PERFCOUNTER3_SELECT                                0x00000c4b
 
-#define REG_A3XX_UNKNOWN_0C81                                  0x00000c81
+#define REG_A3XX_GRAS_TSE_DEBUG_ECO                            0x00000c81
 
 #define REG_A3XX_GRAS_PERFCOUNTER0_SELECT                      0x00000c88
 
@@ -1975,22 +1997,24 @@ static inline uint32_t REG_A3XX_GRAS_CL_USER_PLANE_W(uint32_t i0) { return 0x000
 
 #define REG_A3XX_RB_GMEM_BASE_ADDR                             0x00000cc0
 
+#define REG_A3XX_RB_DEBUG_ECO_CONTROLS_ADDR                    0x00000cc1
+
 #define REG_A3XX_RB_PERFCOUNTER0_SELECT                                0x00000cc6
 
 #define REG_A3XX_RB_PERFCOUNTER1_SELECT                                0x00000cc7
 
-#define REG_A3XX_RB_WINDOW_SIZE                                        0x00000ce0
-#define A3XX_RB_WINDOW_SIZE_WIDTH__MASK                                0x00003fff
-#define A3XX_RB_WINDOW_SIZE_WIDTH__SHIFT                       0
-static inline uint32_t A3XX_RB_WINDOW_SIZE_WIDTH(uint32_t val)
+#define REG_A3XX_RB_FRAME_BUFFER_DIMENSION                     0x00000ce0
+#define A3XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__MASK             0x00003fff
+#define A3XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__SHIFT            0
+static inline uint32_t A3XX_RB_FRAME_BUFFER_DIMENSION_WIDTH(uint32_t val)
 {
-       return ((val) << A3XX_RB_WINDOW_SIZE_WIDTH__SHIFT) & A3XX_RB_WINDOW_SIZE_WIDTH__MASK;
+       return ((val) << A3XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__SHIFT) & A3XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__MASK;
 }
-#define A3XX_RB_WINDOW_SIZE_HEIGHT__MASK                       0x0fffc000
-#define A3XX_RB_WINDOW_SIZE_HEIGHT__SHIFT                      14
-static inline uint32_t A3XX_RB_WINDOW_SIZE_HEIGHT(uint32_t val)
+#define A3XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__MASK            0x0fffc000
+#define A3XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__SHIFT           14
+static inline uint32_t A3XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT(uint32_t val)
 {
-       return ((val) << A3XX_RB_WINDOW_SIZE_HEIGHT__SHIFT) & A3XX_RB_WINDOW_SIZE_HEIGHT__MASK;
+       return ((val) << A3XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__SHIFT) & A3XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__MASK;
 }
 
 #define REG_A3XX_HLSQ_PERFCOUNTER0_SELECT                      0x00000e00
@@ -2088,6 +2112,14 @@ static inline uint32_t A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE(enum a3xx_cache_op
 
 #define REG_A3XX_TP_PERFCOUNTER5_SELECT                                0x00000f09
 
+#define REG_A3XX_VGT_CL_INITIATOR                              0x000021f0
+
+#define REG_A3XX_VGT_EVENT_INITIATOR                           0x000021f9
+
+#define REG_A3XX_VGT_DRAW_INITIATOR                            0x000021fc
+
+#define REG_A3XX_VGT_IMMED_DATA                                        0x000021fd
+
 #define REG_A3XX_TEX_SAMP_0                                    0x00000000
 #define A3XX_TEX_SAMP_0_MIPFILTER_LINEAR                       0x00000002
 #define A3XX_TEX_SAMP_0_XY_MAG__MASK                           0x0000000c
@@ -2123,6 +2155,18 @@ static inline uint32_t A3XX_TEX_SAMP_0_WRAP_R(enum a3xx_tex_clamp val)
 #define A3XX_TEX_SAMP_0_UNNORM_COORDS                          0x80000000
 
 #define REG_A3XX_TEX_SAMP_1                                    0x00000001
+#define A3XX_TEX_SAMP_1_MAX_LOD__MASK                          0x003ff000
+#define A3XX_TEX_SAMP_1_MAX_LOD__SHIFT                         12
+static inline uint32_t A3XX_TEX_SAMP_1_MAX_LOD(float val)
+{
+       return ((((uint32_t)(val * 12.0))) << A3XX_TEX_SAMP_1_MAX_LOD__SHIFT) & A3XX_TEX_SAMP_1_MAX_LOD__MASK;
+}
+#define A3XX_TEX_SAMP_1_MIN_LOD__MASK                          0xffc00000
+#define A3XX_TEX_SAMP_1_MIN_LOD__SHIFT                         22
+static inline uint32_t A3XX_TEX_SAMP_1_MIN_LOD(float val)
+{
+       return ((((uint32_t)(val * 12.0))) << A3XX_TEX_SAMP_1_MIN_LOD__SHIFT) & A3XX_TEX_SAMP_1_MIN_LOD__MASK;
+}
 
 #define REG_A3XX_TEX_CONST_0                                   0x00000000
 #define A3XX_TEX_CONST_0_TILED                                 0x00000001
index 035bd13dc8bdc3ab039bc2f1bab291cbf4c9bf17..461df93e825edf81e98d22683cfbf4fe7b4404aa 100644 (file)
  * this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
+#ifdef CONFIG_MSM_OCMEM
+#  include <mach/ocmem.h>
+#endif
+
 #include "a3xx_gpu.h"
 
 #define A3XX_INT0_MASK \
@@ -63,6 +67,7 @@ static void a3xx_me_init(struct msm_gpu *gpu)
 static int a3xx_hw_init(struct msm_gpu *gpu)
 {
        struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+       struct a3xx_gpu *a3xx_gpu = to_a3xx_gpu(adreno_gpu);
        uint32_t *ptr, len;
        int i, ret;
 
@@ -105,6 +110,21 @@ static int a3xx_hw_init(struct msm_gpu *gpu)
                gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x000000ff);
                gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT_CONF, 0x000000a4);
 
+       } else if (adreno_is_a330v2(adreno_gpu)) {
+               /*
+                * Most of the VBIF registers on 8974v2 have the correct
+                * values at power on, so we won't modify those if we don't
+                * need to
+                */
+               /* Enable 1k sort: */
+               gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x0001003f);
+               gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT_CONF, 0x000000a4);
+               /* Enable WR-REQ: */
+               gpu_write(gpu, REG_A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x00003f);
+               gpu_write(gpu, REG_A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303);
+               /* Set up VBIF_ROUND_ROBIN_QOS_ARB: */
+               gpu_write(gpu, REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0003);
+
        } else if (adreno_is_a330(adreno_gpu)) {
                /* Set up 16 deep read/write request queues: */
                gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF0, 0x18181818);
@@ -121,10 +141,10 @@ static int a3xx_hw_init(struct msm_gpu *gpu)
                /* Set up VBIF_ROUND_ROBIN_QOS_ARB: */
                gpu_write(gpu, REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0001);
                /* Set up AOOO: */
-               gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000ffff);
-               gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO, 0xffffffff);
+               gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003f);
+               gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO, 0x003f003f);
                /* Enable 1K sort: */
-               gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x0001ffff);
+               gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x0001003f);
                gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT_CONF, 0x000000a4);
                /* Disable VBIF clock gating. This is to enable AXI running
                 * higher frequency than GPU:
@@ -162,14 +182,23 @@ static int a3xx_hw_init(struct msm_gpu *gpu)
        gpu_write(gpu, REG_A3XX_UCHE_CACHE_MODE_CONTROL_REG, 0x00000001);
 
        /* Enable Clock gating: */
-       gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xbfffffff);
-
-       /* Set the OCMEM base address for A330 */
-//TODO:
-//     if (adreno_is_a330(adreno_gpu)) {
-//             gpu_write(gpu, REG_A3XX_RB_GMEM_BASE_ADDR,
-//                     (unsigned int)(a3xx_gpu->ocmem_base >> 14));
-//     }
+       if (adreno_is_a320(adreno_gpu))
+               gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xbfffffff);
+       else if (adreno_is_a330v2(adreno_gpu))
+               gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xaaaaaaaa);
+       else if (adreno_is_a330(adreno_gpu))
+               gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xbffcffff);
+
+       if (adreno_is_a330v2(adreno_gpu))
+               gpu_write(gpu, REG_A3XX_RBBM_GPR0_CTL, 0x05515455);
+       else if (adreno_is_a330(adreno_gpu))
+               gpu_write(gpu, REG_A3XX_RBBM_GPR0_CTL, 0x00000000);
+
+       /* Set the OCMEM base address for A330, etc */
+       if (a3xx_gpu->ocmem_hdl) {
+               gpu_write(gpu, REG_A3XX_RB_GMEM_BASE_ADDR,
+                       (unsigned int)(a3xx_gpu->ocmem_base >> 14));
+       }
 
        /* Turn on performance counters: */
        gpu_write(gpu, REG_A3XX_RBBM_PERFCTR_CTL, 0x01);
@@ -219,7 +248,7 @@ static int a3xx_hw_init(struct msm_gpu *gpu)
        /* Load PM4: */
        ptr = (uint32_t *)(adreno_gpu->pm4->data);
        len = adreno_gpu->pm4->size / 4;
-       DBG("loading PM4 ucode version: %u", ptr[0]);
+       DBG("loading PM4 ucode version: %x", ptr[1]);
 
        gpu_write(gpu, REG_AXXX_CP_DEBUG,
                        AXXX_CP_DEBUG_DYNAMIC_CLK_DISABLE |
@@ -231,19 +260,26 @@ static int a3xx_hw_init(struct msm_gpu *gpu)
        /* Load PFP: */
        ptr = (uint32_t *)(adreno_gpu->pfp->data);
        len = adreno_gpu->pfp->size / 4;
-       DBG("loading PFP ucode version: %u", ptr[0]);
+       DBG("loading PFP ucode version: %x", ptr[5]);
 
        gpu_write(gpu, REG_A3XX_CP_PFP_UCODE_ADDR, 0);
        for (i = 1; i < len; i++)
                gpu_write(gpu, REG_A3XX_CP_PFP_UCODE_DATA, ptr[i]);
 
        /* CP ROQ queue sizes (bytes) - RB:16, ST:16, IB1:32, IB2:64 */
-       if (adreno_is_a305(adreno_gpu) || adreno_is_a320(adreno_gpu))
+       if (adreno_is_a305(adreno_gpu) || adreno_is_a320(adreno_gpu)) {
                gpu_write(gpu, REG_AXXX_CP_QUEUE_THRESHOLDS,
                                AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START(2) |
                                AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START(6) |
                                AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START(14));
-
+       } else if (adreno_is_a330(adreno_gpu)) {
+               /* NOTE: this (value take from downstream android driver)
+                * includes some bits outside of the known bitfields.  But
+                * A330 has this "MERCIU queue" thing too, which might
+                * explain a new bitfield or reshuffling:
+                */
+               gpu_write(gpu, REG_AXXX_CP_QUEUE_THRESHOLDS, 0x003e2008);
+       }
 
        /* clear ME_HALT to start micro engine */
        gpu_write(gpu, REG_AXXX_CP_ME_CNTL, 0);
@@ -253,6 +289,14 @@ static int a3xx_hw_init(struct msm_gpu *gpu)
        return 0;
 }
 
+static void a3xx_recover(struct msm_gpu *gpu)
+{
+       gpu_write(gpu, REG_A3XX_RBBM_SW_RESET_CMD, 1);
+       gpu_read(gpu, REG_A3XX_RBBM_SW_RESET_CMD);
+       gpu_write(gpu, REG_A3XX_RBBM_SW_RESET_CMD, 0);
+       adreno_recover(gpu);
+}
+
 static void a3xx_destroy(struct msm_gpu *gpu)
 {
        struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -261,6 +305,12 @@ static void a3xx_destroy(struct msm_gpu *gpu)
        DBG("%s", gpu->name);
 
        adreno_gpu_cleanup(adreno_gpu);
+
+#ifdef CONFIG_MSM_OCMEM
+       if (a3xx_gpu->ocmem_base)
+               ocmem_free(OCMEM_GRAPHICS, a3xx_gpu->ocmem_hdl);
+#endif
+
        put_device(&a3xx_gpu->pdev->dev);
        kfree(a3xx_gpu);
 }
@@ -371,7 +421,7 @@ static const struct adreno_gpu_funcs funcs = {
                .hw_init = a3xx_hw_init,
                .pm_suspend = msm_gpu_pm_suspend,
                .pm_resume = msm_gpu_pm_resume,
-               .recover = adreno_recover,
+               .recover = a3xx_recover,
                .last_fence = adreno_last_fence,
                .submit = adreno_submit,
                .flush = adreno_flush,
@@ -387,6 +437,7 @@ static const struct adreno_gpu_funcs funcs = {
 struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
 {
        struct a3xx_gpu *a3xx_gpu = NULL;
+       struct adreno_gpu *adreno_gpu;
        struct msm_gpu *gpu;
        struct platform_device *pdev = a3xx_pdev;
        struct adreno_platform_config *config;
@@ -406,7 +457,8 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
                goto fail;
        }
 
-       gpu = &a3xx_gpu->base.base;
+       adreno_gpu = &a3xx_gpu->base;
+       gpu = &adreno_gpu->base;
 
        get_device(&pdev->dev);
        a3xx_gpu->pdev = pdev;
@@ -414,16 +466,46 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
        gpu->fast_rate = config->fast_rate;
        gpu->slow_rate = config->slow_rate;
        gpu->bus_freq  = config->bus_freq;
+#ifdef CONFIG_MSM_BUS_SCALING
+       gpu->bus_scale_table = config->bus_scale_table;
+#endif
 
        DBG("fast_rate=%u, slow_rate=%u, bus_freq=%u",
                        gpu->fast_rate, gpu->slow_rate, gpu->bus_freq);
 
-       ret = adreno_gpu_init(dev, pdev, &a3xx_gpu->base,
-                       &funcs, config->rev);
+       ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, config->rev);
        if (ret)
                goto fail;
 
-       return &a3xx_gpu->base.base;
+       /* if needed, allocate gmem: */
+       if (adreno_is_a330(adreno_gpu)) {
+#ifdef CONFIG_MSM_OCMEM
+               /* TODO this is different/missing upstream: */
+               struct ocmem_buf *ocmem_hdl =
+                               ocmem_allocate(OCMEM_GRAPHICS, adreno_gpu->gmem);
+
+               a3xx_gpu->ocmem_hdl = ocmem_hdl;
+               a3xx_gpu->ocmem_base = ocmem_hdl->addr;
+               adreno_gpu->gmem = ocmem_hdl->len;
+               DBG("using %dK of OCMEM at 0x%08x", adreno_gpu->gmem / 1024,
+                               a3xx_gpu->ocmem_base);
+#endif
+       }
+
+       if (!gpu->mmu) {
+               /* TODO we think it is possible to configure the GPU to
+                * restrict access to VRAM carveout.  But the required
+                * registers are unknown.  For now just bail out and
+                * limp along with just modesetting.  If it turns out
+                * to not be possible to restrict access, then we must
+                * implement a cmdstream validator.
+                */
+               dev_err(dev->dev, "No memory protection without IOMMU\n");
+               ret = -ENXIO;
+               goto fail;
+       }
+
+       return gpu;
 
 fail:
        if (a3xx_gpu)
@@ -436,19 +518,59 @@ fail:
  * The a3xx device:
  */
 
+#if defined(CONFIG_MSM_BUS_SCALING) && !defined(CONFIG_OF)
+#  include <mach/kgsl.h>
+#endif
+
 static int a3xx_probe(struct platform_device *pdev)
 {
        static struct adreno_platform_config config = {};
 #ifdef CONFIG_OF
-       /* TODO */
+       struct device_node *child, *node = pdev->dev.of_node;
+       u32 val;
+       int ret;
+
+       ret = of_property_read_u32(node, "qcom,chipid", &val);
+       if (ret) {
+               dev_err(&pdev->dev, "could not find chipid: %d\n", ret);
+               return ret;
+       }
+
+       config.rev = ADRENO_REV((val >> 24) & 0xff,
+                       (val >> 16) & 0xff, (val >> 8) & 0xff, val & 0xff);
+
+       /* find clock rates: */
+       config.fast_rate = 0;
+       config.slow_rate = ~0;
+       for_each_child_of_node(node, child) {
+               if (of_device_is_compatible(child, "qcom,gpu-pwrlevels")) {
+                       struct device_node *pwrlvl;
+                       for_each_child_of_node(child, pwrlvl) {
+                               ret = of_property_read_u32(pwrlvl, "qcom,gpu-freq", &val);
+                               if (ret) {
+                                       dev_err(&pdev->dev, "could not find gpu-freq: %d\n", ret);
+                                       return ret;
+                               }
+                               config.fast_rate = max(config.fast_rate, val);
+                               config.slow_rate = min(config.slow_rate, val);
+                       }
+               }
+       }
+
+       if (!config.fast_rate) {
+               dev_err(&pdev->dev, "could not find clk rates\n");
+               return -ENXIO;
+       }
+
 #else
+       struct kgsl_device_platform_data *pdata = pdev->dev.platform_data;
        uint32_t version = socinfo_get_version();
        if (cpu_is_apq8064ab()) {
                config.fast_rate = 450000000;
                config.slow_rate = 27000000;
                config.bus_freq  = 4;
                config.rev = ADRENO_REV(3, 2, 1, 0);
-       } else if (cpu_is_apq8064() || cpu_is_msm8960ab()) {
+       } else if (cpu_is_apq8064()) {
                config.fast_rate = 400000000;
                config.slow_rate = 27000000;
                config.bus_freq  = 4;
@@ -461,6 +583,16 @@ static int a3xx_probe(struct platform_device *pdev)
                else
                        config.rev = ADRENO_REV(3, 2, 0, 0);
 
+       } else if (cpu_is_msm8960ab()) {
+               config.fast_rate = 400000000;
+               config.slow_rate = 320000000;
+               config.bus_freq  = 4;
+
+               if (SOCINFO_VERSION_MINOR(version) == 0)
+                       config.rev = ADRENO_REV(3, 2, 1, 0);
+               else
+                       config.rev = ADRENO_REV(3, 2, 1, 1);
+
        } else if (cpu_is_msm8930()) {
                config.fast_rate = 400000000;
                config.slow_rate = 27000000;
@@ -473,6 +605,9 @@ static int a3xx_probe(struct platform_device *pdev)
                        config.rev = ADRENO_REV(3, 0, 5, 0);
 
        }
+#  ifdef CONFIG_MSM_BUS_SCALING
+       config.bus_scale_table = pdata->bus_scale_table;
+#  endif
 #endif
        pdev->dev.platform_data = &config;
        a3xx_pdev = pdev;
@@ -485,10 +620,19 @@ static int a3xx_remove(struct platform_device *pdev)
        return 0;
 }
 
+static const struct of_device_id dt_match[] = {
+       { .compatible = "qcom,kgsl-3d0" },
+       {}
+};
+MODULE_DEVICE_TABLE(of, dt_match);
+
 static struct platform_driver a3xx_driver = {
        .probe = a3xx_probe,
        .remove = a3xx_remove,
-       .driver.name = "kgsl-3d0",
+       .driver = {
+               .name = "kgsl-3d0",
+               .of_match_table = dt_match,
+       },
 };
 
 void __init a3xx_register(void)
index 32c398c2d00a7103a64a0e76f248b0ce4a712db0..bb9a8ca0507b3cdf4a16be9d59383459d6ff4c13 100644 (file)
 struct a3xx_gpu {
        struct adreno_gpu base;
        struct platform_device *pdev;
+
+       /* if OCMEM is used for GMEM: */
+       uint32_t ocmem_base;
+       void *ocmem_hdl;
 };
 #define to_a3xx_gpu(x) container_of(x, struct a3xx_gpu, base)
 
index 33dcc606c7c556a55a8e463d87bf56a0368c6363..d6e6ce2d1abde84742d02a0e950911fb1a401f8d 100644 (file)
@@ -8,12 +8,13 @@ http://github.com/freedreno/envytools/
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml              (    327 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml           (  31003 bytes, from 2013-09-19 18:50:16)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml       (   8983 bytes, from 2013-07-24 01:38:36)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml          (   9759 bytes, from 2013-09-10 00:52:33)
-- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml           (  51983 bytes, from 2013-09-10 00:52:32)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml               (    364 bytes, from 2013-11-30 14:47:15)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml  (   1453 bytes, from 2013-03-31 16:51:27)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml          (  32814 bytes, from 2013-11-30 15:07:33)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml (   8900 bytes, from 2013-10-22 23:57:49)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml    (  10574 bytes, from 2013-11-13 05:44:45)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml          (  53644 bytes, from 2013-11-30 15:07:33)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml          (   8344 bytes, from 2013-11-30 14:49:47)
 
 Copyright (C) 2013 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
@@ -115,96 +116,6 @@ enum adreno_rb_depth_format {
        DEPTHX_24_8 = 1,
 };
 
-enum adreno_mmu_clnt_beh {
-       BEH_NEVR = 0,
-       BEH_TRAN_RNG = 1,
-       BEH_TRAN_FLT = 2,
-};
-
-#define REG_AXXX_MH_MMU_CONFIG                                 0x00000040
-#define AXXX_MH_MMU_CONFIG_MMU_ENABLE                          0x00000001
-#define AXXX_MH_MMU_CONFIG_SPLIT_MODE_ENABLE                   0x00000002
-#define AXXX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__MASK            0x00000030
-#define AXXX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__SHIFT           4
-static inline uint32_t AXXX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
-{
-       return ((val) << AXXX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__MASK;
-}
-#define AXXX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__MASK            0x000000c0
-#define AXXX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__SHIFT           6
-static inline uint32_t AXXX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
-{
-       return ((val) << AXXX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__MASK;
-}
-#define AXXX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__MASK           0x00000300
-#define AXXX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__SHIFT          8
-static inline uint32_t AXXX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
-{
-       return ((val) << AXXX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__MASK;
-}
-#define AXXX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__MASK           0x00000c00
-#define AXXX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__SHIFT          10
-static inline uint32_t AXXX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
-{
-       return ((val) << AXXX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__MASK;
-}
-#define AXXX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__MASK           0x00003000
-#define AXXX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__SHIFT          12
-static inline uint32_t AXXX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
-{
-       return ((val) << AXXX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__MASK;
-}
-#define AXXX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__MASK           0x0000c000
-#define AXXX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__SHIFT          14
-static inline uint32_t AXXX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
-{
-       return ((val) << AXXX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__MASK;
-}
-#define AXXX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__MASK           0x00030000
-#define AXXX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__SHIFT          16
-static inline uint32_t AXXX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
-{
-       return ((val) << AXXX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__MASK;
-}
-#define AXXX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__MASK          0x000c0000
-#define AXXX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__SHIFT         18
-static inline uint32_t AXXX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
-{
-       return ((val) << AXXX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__MASK;
-}
-#define AXXX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__MASK          0x00300000
-#define AXXX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__SHIFT         20
-static inline uint32_t AXXX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
-{
-       return ((val) << AXXX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__MASK;
-}
-#define AXXX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__MASK            0x00c00000
-#define AXXX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__SHIFT           22
-static inline uint32_t AXXX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
-{
-       return ((val) << AXXX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__MASK;
-}
-#define AXXX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__MASK            0x03000000
-#define AXXX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__SHIFT           24
-static inline uint32_t AXXX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
-{
-       return ((val) << AXXX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__MASK;
-}
-
-#define REG_AXXX_MH_MMU_VA_RANGE                               0x00000041
-
-#define REG_AXXX_MH_MMU_PT_BASE                                        0x00000042
-
-#define REG_AXXX_MH_MMU_PAGE_FAULT                             0x00000043
-
-#define REG_AXXX_MH_MMU_TRAN_ERROR                             0x00000044
-
-#define REG_AXXX_MH_MMU_INVALIDATE                             0x00000045
-
-#define REG_AXXX_MH_MMU_MPU_BASE                               0x00000046
-
-#define REG_AXXX_MH_MMU_MPU_END                                        0x00000047
-
 #define REG_AXXX_CP_RB_BASE                                    0x000001c0
 
 #define REG_AXXX_CP_RB_CNTL                                    0x000001c1
@@ -275,6 +186,18 @@ static inline uint32_t AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START(uint32_t val)
 }
 
 #define REG_AXXX_CP_MEQ_THRESHOLDS                             0x000001d6
+#define AXXX_CP_MEQ_THRESHOLDS_MEQ_END__MASK                   0x001f0000
+#define AXXX_CP_MEQ_THRESHOLDS_MEQ_END__SHIFT                  16
+static inline uint32_t AXXX_CP_MEQ_THRESHOLDS_MEQ_END(uint32_t val)
+{
+       return ((val) << AXXX_CP_MEQ_THRESHOLDS_MEQ_END__SHIFT) & AXXX_CP_MEQ_THRESHOLDS_MEQ_END__MASK;
+}
+#define AXXX_CP_MEQ_THRESHOLDS_ROQ_END__MASK                   0x1f000000
+#define AXXX_CP_MEQ_THRESHOLDS_ROQ_END__SHIFT                  24
+static inline uint32_t AXXX_CP_MEQ_THRESHOLDS_ROQ_END(uint32_t val)
+{
+       return ((val) << AXXX_CP_MEQ_THRESHOLDS_ROQ_END__SHIFT) & AXXX_CP_MEQ_THRESHOLDS_ROQ_END__MASK;
+}
 
 #define REG_AXXX_CP_CSQ_AVAIL                                  0x000001d7
 #define AXXX_CP_CSQ_AVAIL_RING__MASK                           0x0000007f
@@ -402,6 +325,36 @@ static inline uint32_t AXXX_CP_CSQ_IB2_STAT_WPTR(uint32_t val)
        return ((val) << AXXX_CP_CSQ_IB2_STAT_WPTR__SHIFT) & AXXX_CP_CSQ_IB2_STAT_WPTR__MASK;
 }
 
+#define REG_AXXX_CP_NON_PREFETCH_CNTRS                         0x00000440
+
+#define REG_AXXX_CP_STQ_ST_STAT                                        0x00000443
+
+#define REG_AXXX_CP_ST_BASE                                    0x0000044d
+
+#define REG_AXXX_CP_ST_BUFSZ                                   0x0000044e
+
+#define REG_AXXX_CP_MEQ_STAT                                   0x0000044f
+
+#define REG_AXXX_CP_MIU_TAG_STAT                               0x00000452
+
+#define REG_AXXX_CP_BIN_MASK_LO                                        0x00000454
+
+#define REG_AXXX_CP_BIN_MASK_HI                                        0x00000455
+
+#define REG_AXXX_CP_BIN_SELECT_LO                              0x00000456
+
+#define REG_AXXX_CP_BIN_SELECT_HI                              0x00000457
+
+#define REG_AXXX_CP_IB1_BASE                                   0x00000458
+
+#define REG_AXXX_CP_IB1_BUFSZ                                  0x00000459
+
+#define REG_AXXX_CP_IB2_BASE                                   0x0000045a
+
+#define REG_AXXX_CP_IB2_BUFSZ                                  0x0000045b
+
+#define REG_AXXX_CP_STAT                                       0x0000047f
+
 #define REG_AXXX_CP_SCRATCH_REG0                               0x00000578
 
 #define REG_AXXX_CP_SCRATCH_REG1                               0x00000579
@@ -418,6 +371,26 @@ static inline uint32_t AXXX_CP_CSQ_IB2_STAT_WPTR(uint32_t val)
 
 #define REG_AXXX_CP_SCRATCH_REG7                               0x0000057f
 
+#define REG_AXXX_CP_ME_VS_EVENT_SRC                            0x00000600
+
+#define REG_AXXX_CP_ME_VS_EVENT_ADDR                           0x00000601
+
+#define REG_AXXX_CP_ME_VS_EVENT_DATA                           0x00000602
+
+#define REG_AXXX_CP_ME_VS_EVENT_ADDR_SWM                       0x00000603
+
+#define REG_AXXX_CP_ME_VS_EVENT_DATA_SWM                       0x00000604
+
+#define REG_AXXX_CP_ME_PS_EVENT_SRC                            0x00000605
+
+#define REG_AXXX_CP_ME_PS_EVENT_ADDR                           0x00000606
+
+#define REG_AXXX_CP_ME_PS_EVENT_DATA                           0x00000607
+
+#define REG_AXXX_CP_ME_PS_EVENT_ADDR_SWM                       0x00000608
+
+#define REG_AXXX_CP_ME_PS_EVENT_DATA_SWM                       0x00000609
+
 #define REG_AXXX_CP_ME_CF_EVENT_SRC                            0x0000060a
 
 #define REG_AXXX_CP_ME_CF_EVENT_ADDR                           0x0000060b
@@ -428,5 +401,11 @@ static inline uint32_t AXXX_CP_CSQ_IB2_STAT_WPTR(uint32_t val)
 
 #define REG_AXXX_CP_ME_NRT_DATA                                        0x0000060e
 
+#define REG_AXXX_CP_ME_VS_FETCH_DONE_SRC                       0x00000612
+
+#define REG_AXXX_CP_ME_VS_FETCH_DONE_ADDR                      0x00000613
+
+#define REG_AXXX_CP_ME_VS_FETCH_DONE_DATA                      0x00000614
+
 
 #endif /* ADRENO_COMMON_XML */
index a0b9d8a95b16c17ad6b11ae1b6da662e80484b04..d321099abdd45ac17b37bc32ae8399ce95bc6f32 100644 (file)
@@ -17,6 +17,7 @@
 
 #include "adreno_gpu.h"
 #include "msm_gem.h"
+#include "msm_mmu.h"
 
 struct adreno_info {
        struct adreno_rev rev;
@@ -44,7 +45,7 @@ static const struct adreno_info gpulist[] = {
                .pfpfw = "a300_pfp.fw",
                .gmem  = SZ_512K,
        }, {
-               .rev   = ADRENO_REV(3, 3, 0, 0),
+               .rev   = ADRENO_REV(3, 3, 0, ANY_ID),
                .revn  = 330,
                .name  = "A330",
                .pm4fw = "a330_pm4.fw",
@@ -53,6 +54,11 @@ static const struct adreno_info gpulist[] = {
        },
 };
 
+MODULE_FIRMWARE("a300_pm4.fw");
+MODULE_FIRMWARE("a300_pfp.fw");
+MODULE_FIRMWARE("a330_pm4.fw");
+MODULE_FIRMWARE("a330_pfp.fw");
+
 #define RB_SIZE    SZ_32K
 #define RB_BLKSIZE 16
 
@@ -65,7 +71,7 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
                *value = adreno_gpu->info->revn;
                return 0;
        case MSM_PARAM_GMEM_SIZE:
-               *value = adreno_gpu->info->gmem;
+               *value = adreno_gpu->gmem;
                return 0;
        default:
                DBG("%s: invalid param: %u", gpu->name, param);
@@ -86,7 +92,7 @@ int adreno_hw_init(struct msm_gpu *gpu)
        gpu_write(gpu, REG_AXXX_CP_RB_CNTL,
                        /* size is log2(quad-words): */
                        AXXX_CP_RB_CNTL_BUFSZ(ilog2(gpu->rb->size / 8)) |
-                       AXXX_CP_RB_CNTL_BLKSZ(RB_BLKSIZE));
+                       AXXX_CP_RB_CNTL_BLKSZ(ilog2(RB_BLKSIZE / 8)));
 
        /* Setup ringbuffer address: */
        gpu_write(gpu, REG_AXXX_CP_RB_BASE, gpu->rb_iova);
@@ -286,6 +292,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
                struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs,
                struct adreno_rev rev)
 {
+       struct msm_mmu *mmu;
        int i, ret;
 
        /* identify gpu: */
@@ -311,6 +318,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
                        rev.core, rev.major, rev.minor, rev.patchid);
 
        gpu->funcs = funcs;
+       gpu->gmem = gpu->info->gmem;
        gpu->rev = rev;
 
        ret = request_firmware(&gpu->pm4, gpu->info->pm4fw, drm->dev);
@@ -333,10 +341,13 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
        if (ret)
                return ret;
 
-       ret = msm_iommu_attach(drm, gpu->base.iommu,
-                       iommu_ports, ARRAY_SIZE(iommu_ports));
-       if (ret)
-               return ret;
+       mmu = gpu->base.mmu;
+       if (mmu) {
+               ret = mmu->funcs->attach(mmu, iommu_ports,
+                               ARRAY_SIZE(iommu_ports));
+               if (ret)
+                       return ret;
+       }
 
        gpu->memptrs_bo = msm_gem_new(drm, sizeof(*gpu->memptrs),
                        MSM_BO_UNCACHED);
index f73abfba7c22eb03b4aa0c7f1e962ade219edf1f..ca11ea4da165082fd3ab2f2ecae981d5cd7543ad 100644 (file)
@@ -51,6 +51,7 @@ struct adreno_gpu {
        struct msm_gpu base;
        struct adreno_rev rev;
        const struct adreno_info *info;
+       uint32_t gmem;  /* actual gmem size */
        uint32_t revn;  /* numeric revision name */
        const struct adreno_gpu_funcs *funcs;
 
@@ -70,6 +71,9 @@ struct adreno_gpu {
 struct adreno_platform_config {
        struct adreno_rev rev;
        uint32_t fast_rate, slow_rate, bus_freq;
+#ifdef CONFIG_MSM_BUS_SCALING
+       struct msm_bus_scale_pdata *bus_scale_table;
+#endif
 };
 
 #define ADRENO_IDLE_TIMEOUT (20 * 1000)
@@ -94,6 +98,11 @@ static inline bool adreno_is_a330(struct adreno_gpu *gpu)
        return gpu->revn == 330;
 }
 
+static inline bool adreno_is_a330v2(struct adreno_gpu *gpu)
+{
+       return adreno_is_a330(gpu) && (gpu->rev.patchid > 0);
+}
+
 int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
 int adreno_hw_init(struct msm_gpu *gpu);
 uint32_t adreno_last_fence(struct msm_gpu *gpu);
index 259ad709b0cc0467d1a4e6a55435e63ea4cb3415..ae992c71703f1ccc5151b1cf72b904c89e1cd8ec 100644 (file)
@@ -8,12 +8,13 @@ http://github.com/freedreno/envytools/
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml              (    327 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml           (  31003 bytes, from 2013-09-19 18:50:16)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml       (   8983 bytes, from 2013-07-24 01:38:36)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml          (   9759 bytes, from 2013-09-10 00:52:33)
-- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml           (  51983 bytes, from 2013-09-10 00:52:32)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml               (    364 bytes, from 2013-11-30 14:47:15)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml  (   1453 bytes, from 2013-03-31 16:51:27)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml          (  32814 bytes, from 2013-11-30 15:07:33)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml (   8900 bytes, from 2013-10-22 23:57:49)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml    (  10574 bytes, from 2013-11-13 05:44:45)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml          (  53644 bytes, from 2013-11-30 15:07:33)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml          (   8344 bytes, from 2013-11-30 14:49:47)
 
 Copyright (C) 2013 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
@@ -66,13 +67,15 @@ enum vgt_event_type {
 
 enum pc_di_primtype {
        DI_PT_NONE = 0,
-       DI_PT_POINTLIST = 1,
+       DI_PT_POINTLIST_A2XX = 1,
        DI_PT_LINELIST = 2,
        DI_PT_LINESTRIP = 3,
        DI_PT_TRILIST = 4,
        DI_PT_TRIFAN = 5,
        DI_PT_TRISTRIP = 6,
+       DI_PT_LINELOOP = 7,
        DI_PT_RECTLIST = 8,
+       DI_PT_POINTLIST_A3XX = 9,
        DI_PT_QUADLIST = 13,
        DI_PT_QUADSTRIP = 14,
        DI_PT_POLYGON = 15,
@@ -119,7 +122,7 @@ enum adreno_pm4_type3_packets {
        CP_WAIT_FOR_IDLE = 38,
        CP_WAIT_REG_MEM = 60,
        CP_WAIT_REG_EQ = 82,
-       CP_WAT_REG_GTE = 83,
+       CP_WAIT_REG_GTE = 83,
        CP_WAIT_UNTIL_READ = 92,
        CP_WAIT_IB_PFD_COMPLETE = 93,
        CP_REG_RMW = 33,
@@ -151,7 +154,6 @@ enum adreno_pm4_type3_packets {
        CP_CONTEXT_UPDATE = 94,
        CP_INTERRUPT = 64,
        CP_IM_STORE = 44,
-       CP_SET_BIN_BASE_OFFSET = 75,
        CP_SET_DRAW_INIT_FLAGS = 75,
        CP_SET_PROTECTED_MODE = 95,
        CP_LOAD_STATE = 48,
@@ -159,6 +161,16 @@ enum adreno_pm4_type3_packets {
        CP_COND_INDIRECT_BUFFER_PFD = 50,
        CP_INDIRECT_BUFFER_PFE = 63,
        CP_SET_BIN = 76,
+       CP_TEST_TWO_MEMS = 113,
+       CP_WAIT_FOR_ME = 19,
+       IN_IB_PREFETCH_END = 23,
+       IN_SUBBLK_PREFETCH = 31,
+       IN_INSTR_PREFETCH = 32,
+       IN_INSTR_MATCH = 71,
+       IN_CONST_PREFETCH = 73,
+       IN_INCR_UPDT_STATE = 85,
+       IN_INCR_UPDT_CONST = 86,
+       IN_INCR_UPDT_INSTR = 87,
 };
 
 enum adreno_state_block {
index 6d4c62bf70dc482cf9f38ea1739046cb19732943..87be647e3825f0ad96491f5df5b8cf271158f848 100644 (file)
@@ -8,14 +8,16 @@ http://github.com/freedreno/envytools/
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    595 bytes, from 2013-07-05 19:21:12)
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    647 bytes, from 2013-11-30 14:45:35)
 - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml           (  19332 bytes, from 2013-10-07 16:36:48)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  17996 bytes, from 2013-12-01 19:10:31)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   1615 bytes, from 2013-11-30 15:00:52)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  22517 bytes, from 2013-12-03 20:59:13)
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  11712 bytes, from 2013-08-17 17:13:43)
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2013-08-11 19:26:32)
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1544 bytes, from 2013-08-16 19:17:05)
 - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  19288 bytes, from 2013-08-11 18:14:15)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  20932 bytes, from 2013-12-01 15:13:04)
 
 Copyright (C) 2013 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
index d1df38bf574783bf98c39253315e3810527d1377..747a6ef4211f71748e759343213ce7ecdc5fb335 100644 (file)
@@ -8,14 +8,16 @@ http://github.com/freedreno/envytools/
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    595 bytes, from 2013-07-05 19:21:12)
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    647 bytes, from 2013-11-30 14:45:35)
 - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml           (  19332 bytes, from 2013-10-07 16:36:48)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  17996 bytes, from 2013-12-01 19:10:31)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   1615 bytes, from 2013-11-30 15:00:52)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  22517 bytes, from 2013-12-03 20:59:13)
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  11712 bytes, from 2013-08-17 17:13:43)
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2013-08-11 19:26:32)
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1544 bytes, from 2013-08-16 19:17:05)
 - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  19288 bytes, from 2013-08-11 18:14:15)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  20932 bytes, from 2013-12-01 15:13:04)
 
 Copyright (C) 2013 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
index 0030a111302dae44c6a4ecc0e21d5aa9e7364032..48e03acf19bf5bdf61659c96acd4444e6c716c73 100644 (file)
@@ -8,14 +8,16 @@ http://github.com/freedreno/envytools/
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    595 bytes, from 2013-07-05 19:21:12)
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    647 bytes, from 2013-11-30 14:45:35)
 - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml           (  19332 bytes, from 2013-10-07 16:36:48)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  17996 bytes, from 2013-12-01 19:10:31)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   1615 bytes, from 2013-11-30 15:00:52)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  22517 bytes, from 2013-12-03 20:59:13)
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  11712 bytes, from 2013-08-17 17:13:43)
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2013-08-11 19:26:32)
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1544 bytes, from 2013-08-16 19:17:05)
 - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  19288 bytes, from 2013-08-11 18:14:15)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  20932 bytes, from 2013-12-01 15:13:04)
 
 Copyright (C) 2013 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
index 50d11df35b21f1cac9ac15d41f649015a664cfbf..6f1588aa9071f8db8642dcb8b32e46642381b38d 100644 (file)
@@ -41,7 +41,7 @@ void hdmi_set_mode(struct hdmi *hdmi, bool power_on)
                        power_on ? "Enable" : "Disable", ctrl);
 }
 
-static irqreturn_t hdmi_irq(int irq, void *dev_id)
+irqreturn_t hdmi_irq(int irq, void *dev_id)
 {
        struct hdmi *hdmi = dev_id;
 
@@ -71,13 +71,13 @@ void hdmi_destroy(struct kref *kref)
 }
 
 /* initialize connector */
-int hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
+struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
 {
        struct hdmi *hdmi = NULL;
        struct msm_drm_private *priv = dev->dev_private;
        struct platform_device *pdev = hdmi_pdev;
        struct hdmi_platform_config *config;
-       int ret;
+       int i, ret;
 
        if (!pdev) {
                dev_err(dev->dev, "no hdmi device\n");
@@ -99,6 +99,7 @@ int hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
 
        hdmi->dev = dev;
        hdmi->pdev = pdev;
+       hdmi->config = config;
        hdmi->encoder = encoder;
 
        /* not sure about which phy maps to which msm.. probably I miss some */
@@ -114,44 +115,70 @@ int hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
                goto fail;
        }
 
-       hdmi->mmio = msm_ioremap(pdev, "hdmi_msm_hdmi_addr", "HDMI");
+       hdmi->mmio = msm_ioremap(pdev, config->mmio_name, "HDMI");
        if (IS_ERR(hdmi->mmio)) {
                ret = PTR_ERR(hdmi->mmio);
                goto fail;
        }
 
-       hdmi->mvs = devm_regulator_get(&pdev->dev, "8901_hdmi_mvs");
-       if (IS_ERR(hdmi->mvs))
-               hdmi->mvs = devm_regulator_get(&pdev->dev, "hdmi_mvs");
-       if (IS_ERR(hdmi->mvs)) {
-               ret = PTR_ERR(hdmi->mvs);
-               dev_err(dev->dev, "failed to get mvs regulator: %d\n", ret);
-               goto fail;
+       BUG_ON(config->hpd_reg_cnt > ARRAY_SIZE(hdmi->hpd_regs));
+       for (i = 0; i < config->hpd_reg_cnt; i++) {
+               struct regulator *reg;
+
+               reg = devm_regulator_get(&pdev->dev, config->hpd_reg_names[i]);
+               if (IS_ERR(reg)) {
+                       ret = PTR_ERR(reg);
+                       dev_err(dev->dev, "failed to get hpd regulator: %s (%d)\n",
+                                       config->hpd_reg_names[i], ret);
+                       goto fail;
+               }
+
+               hdmi->hpd_regs[i] = reg;
        }
 
-       hdmi->mpp0 = devm_regulator_get(&pdev->dev, "8901_mpp0");
-       if (IS_ERR(hdmi->mpp0))
-               hdmi->mpp0 = NULL;
+       BUG_ON(config->pwr_reg_cnt > ARRAY_SIZE(hdmi->pwr_regs));
+       for (i = 0; i < config->pwr_reg_cnt; i++) {
+               struct regulator *reg;
 
-       hdmi->clk = devm_clk_get(&pdev->dev, "core_clk");
-       if (IS_ERR(hdmi->clk)) {
-               ret = PTR_ERR(hdmi->clk);
-               dev_err(dev->dev, "failed to get 'clk': %d\n", ret);
-               goto fail;
+               reg = devm_regulator_get(&pdev->dev, config->pwr_reg_names[i]);
+               if (IS_ERR(reg)) {
+                       ret = PTR_ERR(reg);
+                       dev_err(dev->dev, "failed to get pwr regulator: %s (%d)\n",
+                                       config->pwr_reg_names[i], ret);
+                       goto fail;
+               }
+
+               hdmi->pwr_regs[i] = reg;
        }
 
-       hdmi->m_pclk = devm_clk_get(&pdev->dev, "master_iface_clk");
-       if (IS_ERR(hdmi->m_pclk)) {
-               ret = PTR_ERR(hdmi->m_pclk);
-               dev_err(dev->dev, "failed to get 'm_pclk': %d\n", ret);
-               goto fail;
+       BUG_ON(config->hpd_clk_cnt > ARRAY_SIZE(hdmi->hpd_clks));
+       for (i = 0; i < config->hpd_clk_cnt; i++) {
+               struct clk *clk;
+
+               clk = devm_clk_get(&pdev->dev, config->hpd_clk_names[i]);
+               if (IS_ERR(clk)) {
+                       ret = PTR_ERR(clk);
+                       dev_err(dev->dev, "failed to get hpd clk: %s (%d)\n",
+                                       config->hpd_clk_names[i], ret);
+                       goto fail;
+               }
+
+               hdmi->hpd_clks[i] = clk;
        }
 
-       hdmi->s_pclk = devm_clk_get(&pdev->dev, "slave_iface_clk");
-       if (IS_ERR(hdmi->s_pclk)) {
-               ret = PTR_ERR(hdmi->s_pclk);
-               dev_err(dev->dev, "failed to get 's_pclk': %d\n", ret);
-               goto fail;
+       BUG_ON(config->pwr_clk_cnt > ARRAY_SIZE(hdmi->pwr_clks));
+       for (i = 0; i < config->pwr_clk_cnt; i++) {
+               struct clk *clk;
+
+               clk = devm_clk_get(&pdev->dev, config->pwr_clk_names[i]);
+               if (IS_ERR(clk)) {
+                       ret = PTR_ERR(clk);
+                       dev_err(dev->dev, "failed to get pwr clk: %s (%d)\n",
+                                       config->pwr_clk_names[i], ret);
+                       goto fail;
+               }
+
+               hdmi->pwr_clks[i] = clk;
        }
 
        hdmi->i2c = hdmi_i2c_init(hdmi);
@@ -178,20 +205,22 @@ int hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
                goto fail;
        }
 
-       hdmi->irq = platform_get_irq(pdev, 0);
-       if (hdmi->irq < 0) {
-               ret = hdmi->irq;
-               dev_err(dev->dev, "failed to get irq: %d\n", ret);
-               goto fail;
-       }
+       if (!config->shared_irq) {
+               hdmi->irq = platform_get_irq(pdev, 0);
+               if (hdmi->irq < 0) {
+                       ret = hdmi->irq;
+                       dev_err(dev->dev, "failed to get irq: %d\n", ret);
+                       goto fail;
+               }
 
-       ret = devm_request_threaded_irq(&pdev->dev, hdmi->irq,
-                       NULL, hdmi_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
-                       "hdmi_isr", hdmi);
-       if (ret < 0) {
-               dev_err(dev->dev, "failed to request IRQ%u: %d\n",
-                               hdmi->irq, ret);
-               goto fail;
+               ret = devm_request_threaded_irq(&pdev->dev, hdmi->irq,
+                               NULL, hdmi_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+                               "hdmi_isr", hdmi);
+               if (ret < 0) {
+                       dev_err(dev->dev, "failed to request IRQ%u: %d\n",
+                                       hdmi->irq, ret);
+                       goto fail;
+               }
        }
 
        encoder->bridge = hdmi->bridge;
@@ -199,7 +228,7 @@ int hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
        priv->bridges[priv->num_bridges++]       = hdmi->bridge;
        priv->connectors[priv->num_connectors++] = hdmi->connector;
 
-       return 0;
+       return hdmi;
 
 fail:
        if (hdmi) {
@@ -211,37 +240,100 @@ fail:
                hdmi_destroy(&hdmi->refcount);
        }
 
-       return ret;
+       return ERR_PTR(ret);
 }
 
 /*
  * The hdmi device:
  */
 
+#include <linux/of_gpio.h>
+
 static int hdmi_dev_probe(struct platform_device *pdev)
 {
        static struct hdmi_platform_config config = {};
 #ifdef CONFIG_OF
-       /* TODO */
+       struct device_node *of_node = pdev->dev.of_node;
+
+       int get_gpio(const char *name)
+       {
+               int gpio = of_get_named_gpio(of_node, name, 0);
+               if (gpio < 0) {
+                       dev_err(&pdev->dev, "failed to get gpio: %s (%d)\n",
+                                       name, gpio);
+                       gpio = -1;
+               }
+               return gpio;
+       }
+
+       /* TODO actually use DT.. */
+       static const char *hpd_reg_names[] = {"hpd-gdsc", "hpd-5v"};
+       static const char *pwr_reg_names[] = {"core-vdda", "core-vcc"};
+       static const char *hpd_clk_names[] = {"iface_clk", "core_clk", "mdp_core_clk"};
+       static const char *pwr_clk_names[] = {"extp_clk", "alt_iface_clk"};
+
+       config.phy_init      = hdmi_phy_8x74_init;
+       config.mmio_name     = "core_physical";
+       config.hpd_reg_names = hpd_reg_names;
+       config.hpd_reg_cnt   = ARRAY_SIZE(hpd_reg_names);
+       config.pwr_reg_names = pwr_reg_names;
+       config.pwr_reg_cnt   = ARRAY_SIZE(pwr_reg_names);
+       config.hpd_clk_names = hpd_clk_names;
+       config.hpd_clk_cnt   = ARRAY_SIZE(hpd_clk_names);
+       config.pwr_clk_names = pwr_clk_names;
+       config.pwr_clk_cnt   = ARRAY_SIZE(pwr_clk_names);
+       config.ddc_clk_gpio  = get_gpio("qcom,hdmi-tx-ddc-clk");
+       config.ddc_data_gpio = get_gpio("qcom,hdmi-tx-ddc-data");
+       config.hpd_gpio      = get_gpio("qcom,hdmi-tx-hpd");
+       config.mux_en_gpio   = get_gpio("qcom,hdmi-tx-mux-en");
+       config.mux_sel_gpio  = get_gpio("qcom,hdmi-tx-mux-sel");
+       config.shared_irq    = true;
+
 #else
+       static const char *hpd_clk_names[] = {
+                       "core_clk", "master_iface_clk", "slave_iface_clk",
+       };
        if (cpu_is_apq8064()) {
+               static const char *hpd_reg_names[] = {"8921_hdmi_mvs"};
                config.phy_init      = hdmi_phy_8960_init;
+               config.mmio_name     = "hdmi_msm_hdmi_addr";
+               config.hpd_reg_names = hpd_reg_names;
+               config.hpd_reg_cnt   = ARRAY_SIZE(hpd_reg_names);
+               config.hpd_clk_names = hpd_clk_names;
+               config.hpd_clk_cnt   = ARRAY_SIZE(hpd_clk_names);
                config.ddc_clk_gpio  = 70;
                config.ddc_data_gpio = 71;
                config.hpd_gpio      = 72;
-               config.pmic_gpio     = 13 + NR_GPIO_IRQS;
-       } else if (cpu_is_msm8960()) {
+               config.mux_en_gpio   = -1;
+               config.mux_sel_gpio  = 13 + NR_GPIO_IRQS;
+       } else if (cpu_is_msm8960() || cpu_is_msm8960ab()) {
+               static const char *hpd_reg_names[] = {"8921_hdmi_mvs"};
                config.phy_init      = hdmi_phy_8960_init;
+               config.mmio_name     = "hdmi_msm_hdmi_addr";
+               config.hpd_reg_names = hpd_reg_names;
+               config.hpd_reg_cnt   = ARRAY_SIZE(hpd_reg_names);
+               config.hpd_clk_names = hpd_clk_names;
+               config.hpd_clk_cnt   = ARRAY_SIZE(hpd_clk_names);
                config.ddc_clk_gpio  = 100;
                config.ddc_data_gpio = 101;
                config.hpd_gpio      = 102;
-               config.pmic_gpio     = -1;
+               config.mux_en_gpio   = -1;
+               config.mux_sel_gpio  = -1;
        } else if (cpu_is_msm8x60()) {
+               static const char *hpd_reg_names[] = {
+                               "8901_hdmi_mvs", "8901_mpp0"
+               };
                config.phy_init      = hdmi_phy_8x60_init;
+               config.mmio_name     = "hdmi_msm_hdmi_addr";
+               config.hpd_reg_names = hpd_reg_names;
+               config.hpd_reg_cnt   = ARRAY_SIZE(hpd_reg_names);
+               config.hpd_clk_names = hpd_clk_names;
+               config.hpd_clk_cnt   = ARRAY_SIZE(hpd_clk_names);
                config.ddc_clk_gpio  = 170;
                config.ddc_data_gpio = 171;
                config.hpd_gpio      = 172;
-               config.pmic_gpio     = -1;
+               config.mux_en_gpio   = -1;
+               config.mux_sel_gpio  = -1;
        }
 #endif
        pdev->dev.platform_data = &config;
@@ -255,10 +347,19 @@ static int hdmi_dev_remove(struct platform_device *pdev)
        return 0;
 }
 
+static const struct of_device_id dt_match[] = {
+       { .compatible = "qcom,hdmi-tx" },
+       {}
+};
+MODULE_DEVICE_TABLE(of, dt_match);
+
 static struct platform_driver hdmi_driver = {
        .probe = hdmi_dev_probe,
        .remove = hdmi_dev_remove,
-       .driver.name = "hdmi_msm",
+       .driver = {
+               .name = "hdmi_msm",
+               .of_match_table = dt_match,
+       },
 };
 
 void __init hdmi_register(void)
index 2c2ec566394c7ac314c9bf3e97b70071bfce4d48..41b29add70b1be7a19b480d5a84ba25aeb79518b 100644 (file)
@@ -28,6 +28,7 @@
 
 
 struct hdmi_phy;
+struct hdmi_platform_config;
 
 struct hdmi {
        struct kref refcount;
@@ -35,14 +36,14 @@ struct hdmi {
        struct drm_device *dev;
        struct platform_device *pdev;
 
-       void __iomem *mmio;
+       const struct hdmi_platform_config *config;
 
-       struct regulator *mvs;        /* HDMI_5V */
-       struct regulator *mpp0;       /* External 5V */
+       void __iomem *mmio;
 
-       struct clk *clk;
-       struct clk *m_pclk;
-       struct clk *s_pclk;
+       struct regulator *hpd_regs[2];
+       struct regulator *pwr_regs[2];
+       struct clk *hpd_clks[3];
+       struct clk *pwr_clks[2];
 
        struct hdmi_phy *phy;
        struct i2c_adapter *i2c;
@@ -60,7 +61,29 @@ struct hdmi {
 /* platform config data (ie. from DT, or pdata) */
 struct hdmi_platform_config {
        struct hdmi_phy *(*phy_init)(struct hdmi *hdmi);
-       int ddc_clk_gpio, ddc_data_gpio, hpd_gpio, pmic_gpio;
+       const char *mmio_name;
+
+       /* regulators that need to be on for hpd: */
+       const char **hpd_reg_names;
+       int hpd_reg_cnt;
+
+       /* regulators that need to be on for screen pwr: */
+       const char **pwr_reg_names;
+       int pwr_reg_cnt;
+
+       /* clks that need to be on for hpd: */
+       const char **hpd_clk_names;
+       int hpd_clk_cnt;
+
+       /* clks that need to be on for screen pwr (ie pixel clk): */
+       const char **pwr_clk_names;
+       int pwr_clk_cnt;
+
+       /* gpio's: */
+       int ddc_clk_gpio, ddc_data_gpio, hpd_gpio, mux_en_gpio, mux_sel_gpio;
+
+       /* older devices had their own irq, mdp5+ it is shared w/ mdp: */
+       bool shared_irq;
 };
 
 void hdmi_set_mode(struct hdmi *hdmi, bool power_on);
@@ -106,6 +129,7 @@ struct hdmi_phy {
 
 struct hdmi_phy *hdmi_phy_8960_init(struct hdmi *hdmi);
 struct hdmi_phy *hdmi_phy_8x60_init(struct hdmi *hdmi);
+struct hdmi_phy *hdmi_phy_8x74_init(struct hdmi *hdmi);
 
 /*
  * hdmi bridge:
index 4e939f82918c68daee08cc95253eeb25f31c24b8..e2636582cfd753bc2443659ee615db428530e2dc 100644 (file)
@@ -8,14 +8,16 @@ http://github.com/freedreno/envytools/
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    595 bytes, from 2013-07-05 19:21:12)
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    647 bytes, from 2013-11-30 14:45:35)
 - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml           (  19332 bytes, from 2013-10-07 16:36:48)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  17996 bytes, from 2013-12-01 19:10:31)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   1615 bytes, from 2013-11-30 15:00:52)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  22517 bytes, from 2013-12-03 20:59:13)
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  11712 bytes, from 2013-08-17 17:13:43)
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2013-08-11 19:26:32)
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1544 bytes, from 2013-08-16 19:17:05)
 - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  19288 bytes, from 2013-08-11 18:14:15)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  20932 bytes, from 2013-12-01 15:13:04)
 
 Copyright (C) 2013 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
@@ -212,6 +214,20 @@ static inline uint32_t HDMI_HDCP_LINK0_STATUS_KEY_STATE(enum hdmi_hdcp_key_state
 #define REG_HDMI_HDCP_RESET                                    0x00000130
 #define HDMI_HDCP_RESET_LINK0_DEAUTHENTICATE                   0x00000001
 
+#define REG_HDMI_VENSPEC_INFO0                                 0x0000016c
+
+#define REG_HDMI_VENSPEC_INFO1                                 0x00000170
+
+#define REG_HDMI_VENSPEC_INFO2                                 0x00000174
+
+#define REG_HDMI_VENSPEC_INFO3                                 0x00000178
+
+#define REG_HDMI_VENSPEC_INFO4                                 0x0000017c
+
+#define REG_HDMI_VENSPEC_INFO5                                 0x00000180
+
+#define REG_HDMI_VENSPEC_INFO6                                 0x00000184
+
 #define REG_HDMI_AUDIO_CFG                                     0x000001d0
 #define HDMI_AUDIO_CFG_ENGINE_ENABLE                           0x00000001
 #define HDMI_AUDIO_CFG_FIFO_WATERMARK__MASK                    0x000000f0
@@ -235,6 +251,9 @@ static inline uint32_t HDMI_DDC_CTRL_TRANSACTION_CNT(uint32_t val)
        return ((val) << HDMI_DDC_CTRL_TRANSACTION_CNT__SHIFT) & HDMI_DDC_CTRL_TRANSACTION_CNT__MASK;
 }
 
+#define REG_HDMI_DDC_ARBITRATION                               0x00000210
+#define HDMI_DDC_ARBITRATION_HW_ARBITRATION                    0x00000010
+
 #define REG_HDMI_DDC_INT_CTRL                                  0x00000214
 #define HDMI_DDC_INT_CTRL_SW_DONE_INT                          0x00000001
 #define HDMI_DDC_INT_CTRL_SW_DONE_ACK                          0x00000002
@@ -340,6 +359,20 @@ static inline uint32_t HDMI_DDC_REF_REFTIMER(uint32_t val)
        return ((val) << HDMI_DDC_REF_REFTIMER__SHIFT) & HDMI_DDC_REF_REFTIMER__MASK;
 }
 
+#define REG_HDMI_CEC_STATUS                                    0x00000298
+
+#define REG_HDMI_CEC_INT                                       0x0000029c
+
+#define REG_HDMI_CEC_ADDR                                      0x000002a0
+
+#define REG_HDMI_CEC_TIME                                      0x000002a4
+
+#define REG_HDMI_CEC_REFTIMER                                  0x000002a8
+
+#define REG_HDMI_CEC_RD_DATA                                   0x000002ac
+
+#define REG_HDMI_CEC_RD_FILTER                                 0x000002b0
+
 #define REG_HDMI_ACTIVE_HSYNC                                  0x000002b4
 #define HDMI_ACTIVE_HSYNC_START__MASK                          0x00000fff
 #define HDMI_ACTIVE_HSYNC_START__SHIFT                         0
@@ -410,17 +443,33 @@ static inline uint32_t HDMI_VSYNC_TOTAL_F2_V_TOTAL(uint32_t val)
 #define HDMI_FRAME_CTRL_HSYNC_LOW                              0x20000000
 #define HDMI_FRAME_CTRL_INTERLACED_EN                          0x80000000
 
+#define REG_HDMI_AUD_INT                                       0x000002cc
+#define HDMI_AUD_INT_AUD_FIFO_URUN_INT                         0x00000001
+#define HDMI_AUD_INT_AUD_FIFO_URAN_MASK                                0x00000002
+#define HDMI_AUD_INT_AUD_SAM_DROP_INT                          0x00000004
+#define HDMI_AUD_INT_AUD_SAM_DROP_MASK                         0x00000008
+
 #define REG_HDMI_PHY_CTRL                                      0x000002d4
 #define HDMI_PHY_CTRL_SW_RESET_PLL                             0x00000001
 #define HDMI_PHY_CTRL_SW_RESET_PLL_LOW                         0x00000002
 #define HDMI_PHY_CTRL_SW_RESET                                 0x00000004
 #define HDMI_PHY_CTRL_SW_RESET_LOW                             0x00000008
 
-#define REG_HDMI_AUD_INT                                       0x000002cc
-#define HDMI_AUD_INT_AUD_FIFO_URUN_INT                         0x00000001
-#define HDMI_AUD_INT_AUD_FIFO_URAN_MASK                                0x00000002
-#define HDMI_AUD_INT_AUD_SAM_DROP_INT                          0x00000004
-#define HDMI_AUD_INT_AUD_SAM_DROP_MASK                         0x00000008
+#define REG_HDMI_CEC_WR_RANGE                                  0x000002dc
+
+#define REG_HDMI_CEC_RD_RANGE                                  0x000002e0
+
+#define REG_HDMI_VERSION                                       0x000002e4
+
+#define REG_HDMI_CEC_COMPL_CTL                                 0x00000360
+
+#define REG_HDMI_CEC_RD_START_RANGE                            0x00000364
+
+#define REG_HDMI_CEC_RD_TOTAL_RANGE                            0x00000368
+
+#define REG_HDMI_CEC_RD_ERR_RESP_LO                            0x0000036c
+
+#define REG_HDMI_CEC_WR_CHECK_CONFIG                           0x00000370
 
 #define REG_HDMI_8x60_PHY_REG0                                 0x00000300
 #define HDMI_8x60_PHY_REG0_DESER_DEL_CTRL__MASK                        0x0000001c
@@ -504,5 +553,23 @@ static inline uint32_t HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL(uint32_t val)
 
 #define REG_HDMI_8960_PHY_REG12                                        0x00000430
 
+#define REG_HDMI_8x74_ANA_CFG0                                 0x00000000
+
+#define REG_HDMI_8x74_ANA_CFG1                                 0x00000004
+
+#define REG_HDMI_8x74_PD_CTRL0                                 0x00000010
+
+#define REG_HDMI_8x74_PD_CTRL1                                 0x00000014
+
+#define REG_HDMI_8x74_BIST_CFG0                                        0x00000034
+
+#define REG_HDMI_8x74_BIST_PATN0                               0x0000003c
+
+#define REG_HDMI_8x74_BIST_PATN1                               0x00000040
+
+#define REG_HDMI_8x74_BIST_PATN2                               0x00000044
+
+#define REG_HDMI_8x74_BIST_PATN3                               0x00000048
+
 
 #endif /* HDMI_XML */
index 5a8ee3473cf5e0e229ec5c94743e4f7749950ea7..7d10e55403c61da733c1e60c7922799063eef79f 100644 (file)
@@ -21,6 +21,7 @@ struct hdmi_bridge {
        struct drm_bridge base;
 
        struct hdmi *hdmi;
+       bool power_on;
 
        unsigned long int pixclock;
 };
@@ -34,6 +35,65 @@ static void hdmi_bridge_destroy(struct drm_bridge *bridge)
        kfree(hdmi_bridge);
 }
 
+static void power_on(struct drm_bridge *bridge)
+{
+       struct drm_device *dev = bridge->dev;
+       struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
+       struct hdmi *hdmi = hdmi_bridge->hdmi;
+       const struct hdmi_platform_config *config = hdmi->config;
+       int i, ret;
+
+       for (i = 0; i < config->pwr_reg_cnt; i++) {
+               ret = regulator_enable(hdmi->pwr_regs[i]);
+               if (ret) {
+                       dev_err(dev->dev, "failed to enable pwr regulator: %s (%d)\n",
+                                       config->pwr_reg_names[i], ret);
+               }
+       }
+
+       if (config->pwr_clk_cnt > 0) {
+               DBG("pixclock: %lu", hdmi_bridge->pixclock);
+               ret = clk_set_rate(hdmi->pwr_clks[0], hdmi_bridge->pixclock);
+               if (ret) {
+                       dev_err(dev->dev, "failed to set pixel clk: %s (%d)\n",
+                                       config->pwr_clk_names[0], ret);
+               }
+       }
+
+       for (i = 0; i < config->pwr_clk_cnt; i++) {
+               ret = clk_prepare_enable(hdmi->pwr_clks[i]);
+               if (ret) {
+                       dev_err(dev->dev, "failed to enable pwr clk: %s (%d)\n",
+                                       config->pwr_clk_names[i], ret);
+               }
+       }
+}
+
+static void power_off(struct drm_bridge *bridge)
+{
+       struct drm_device *dev = bridge->dev;
+       struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
+       struct hdmi *hdmi = hdmi_bridge->hdmi;
+       const struct hdmi_platform_config *config = hdmi->config;
+       int i, ret;
+
+       /* TODO do we need to wait for final vblank somewhere before
+        * cutting the clocks?
+        */
+       mdelay(16 + 4);
+
+       for (i = 0; i < config->pwr_clk_cnt; i++)
+               clk_disable_unprepare(hdmi->pwr_clks[i]);
+
+       for (i = 0; i < config->pwr_reg_cnt; i++) {
+               ret = regulator_disable(hdmi->pwr_regs[i]);
+               if (ret) {
+                       dev_err(dev->dev, "failed to disable pwr regulator: %s (%d)\n",
+                                       config->pwr_reg_names[i], ret);
+               }
+       }
+}
+
 static void hdmi_bridge_pre_enable(struct drm_bridge *bridge)
 {
        struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
@@ -41,6 +101,12 @@ static void hdmi_bridge_pre_enable(struct drm_bridge *bridge)
        struct hdmi_phy *phy = hdmi->phy;
 
        DBG("power up");
+
+       if (!hdmi_bridge->power_on) {
+               power_on(bridge);
+               hdmi_bridge->power_on = true;
+       }
+
        phy->funcs->powerup(phy, hdmi_bridge->pixclock);
        hdmi_set_mode(hdmi, true);
 }
@@ -62,6 +128,11 @@ static void hdmi_bridge_post_disable(struct drm_bridge *bridge)
        DBG("power down");
        hdmi_set_mode(hdmi, false);
        phy->funcs->powerdown(phy);
+
+       if (hdmi_bridge->power_on) {
+               power_off(bridge);
+               hdmi_bridge->power_on = false;
+       }
 }
 
 static void hdmi_bridge_mode_set(struct drm_bridge *bridge,
index 823eee521a31b523e04e7470ab01612cdeb0887e..7dedfdd120759d51c7bd5a940bb01e874b87f579 100644 (file)
 
 #include <linux/gpio.h>
 
+#include "msm_kms.h"
 #include "hdmi.h"
 
 struct hdmi_connector {
        struct drm_connector base;
        struct hdmi *hdmi;
+       struct work_struct hpd_work;
 };
 #define to_hdmi_connector(x) container_of(x, struct hdmi_connector, base)
 
 static int gpio_config(struct hdmi *hdmi, bool on)
 {
        struct drm_device *dev = hdmi->dev;
-       struct hdmi_platform_config *config =
-                       hdmi->pdev->dev.platform_data;
+       const struct hdmi_platform_config *config = hdmi->config;
        int ret;
 
        if (on) {
@@ -39,26 +40,43 @@ static int gpio_config(struct hdmi *hdmi, bool on)
                                "HDMI_DDC_CLK", config->ddc_clk_gpio, ret);
                        goto error1;
                }
+               gpio_set_value_cansleep(config->ddc_clk_gpio, 1);
+
                ret = gpio_request(config->ddc_data_gpio, "HDMI_DDC_DATA");
                if (ret) {
                        dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n",
                                "HDMI_DDC_DATA", config->ddc_data_gpio, ret);
                        goto error2;
                }
+               gpio_set_value_cansleep(config->ddc_data_gpio, 1);
+
                ret = gpio_request(config->hpd_gpio, "HDMI_HPD");
                if (ret) {
                        dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n",
                                "HDMI_HPD", config->hpd_gpio, ret);
                        goto error3;
                }
-               if (config->pmic_gpio != -1) {
-                       ret = gpio_request(config->pmic_gpio, "PMIC_HDMI_MUX_SEL");
+               gpio_direction_input(config->hpd_gpio);
+               gpio_set_value_cansleep(config->hpd_gpio, 1);
+
+               if (config->mux_en_gpio != -1) {
+                       ret = gpio_request(config->mux_en_gpio, "HDMI_MUX_EN");
                        if (ret) {
                                dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n",
-                                       "PMIC_HDMI_MUX_SEL", config->pmic_gpio, ret);
+                                       "HDMI_MUX_SEL", config->mux_en_gpio, ret);
                                goto error4;
                        }
-                       gpio_set_value_cansleep(config->pmic_gpio, 0);
+                       gpio_set_value_cansleep(config->mux_en_gpio, 1);
+               }
+
+               if (config->mux_sel_gpio != -1) {
+                       ret = gpio_request(config->mux_sel_gpio, "HDMI_MUX_SEL");
+                       if (ret) {
+                               dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n",
+                                       "HDMI_MUX_SEL", config->mux_sel_gpio, ret);
+                               goto error5;
+                       }
+                       gpio_set_value_cansleep(config->mux_sel_gpio, 0);
                }
                DBG("gpio on");
        } else {
@@ -66,15 +84,23 @@ static int gpio_config(struct hdmi *hdmi, bool on)
                gpio_free(config->ddc_data_gpio);
                gpio_free(config->hpd_gpio);
 
-               if (config->pmic_gpio != -1) {
-                       gpio_set_value_cansleep(config->pmic_gpio, 1);
-                       gpio_free(config->pmic_gpio);
+               if (config->mux_en_gpio != -1) {
+                       gpio_set_value_cansleep(config->mux_en_gpio, 0);
+                       gpio_free(config->mux_en_gpio);
+               }
+
+               if (config->mux_sel_gpio != -1) {
+                       gpio_set_value_cansleep(config->mux_sel_gpio, 1);
+                       gpio_free(config->mux_sel_gpio);
                }
                DBG("gpio off");
        }
 
        return 0;
 
+error5:
+       if (config->mux_en_gpio != -1)
+               gpio_free(config->mux_en_gpio);
 error4:
        gpio_free(config->hpd_gpio);
 error3:
@@ -88,10 +114,11 @@ error1:
 static int hpd_enable(struct hdmi_connector *hdmi_connector)
 {
        struct hdmi *hdmi = hdmi_connector->hdmi;
+       const struct hdmi_platform_config *config = hdmi->config;
        struct drm_device *dev = hdmi_connector->base.dev;
        struct hdmi_phy *phy = hdmi->phy;
        uint32_t hpd_ctrl;
-       int ret;
+       int i, ret;
 
        ret = gpio_config(hdmi, true);
        if (ret) {
@@ -99,31 +126,22 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector)
                goto fail;
        }
 
-       ret = clk_prepare_enable(hdmi->clk);
-       if (ret) {
-               dev_err(dev->dev, "failed to enable 'clk': %d\n", ret);
-               goto fail;
-       }
-
-       ret = clk_prepare_enable(hdmi->m_pclk);
-       if (ret) {
-               dev_err(dev->dev, "failed to enable 'm_pclk': %d\n", ret);
-               goto fail;
-       }
-
-       ret = clk_prepare_enable(hdmi->s_pclk);
-       if (ret) {
-               dev_err(dev->dev, "failed to enable 's_pclk': %d\n", ret);
-               goto fail;
+       for (i = 0; i < config->hpd_clk_cnt; i++) {
+               ret = clk_prepare_enable(hdmi->hpd_clks[i]);
+               if (ret) {
+                       dev_err(dev->dev, "failed to enable hpd clk: %s (%d)\n",
+                                       config->hpd_clk_names[i], ret);
+                       goto fail;
+               }
        }
 
-       if (hdmi->mpp0)
-               ret = regulator_enable(hdmi->mpp0);
-       if (!ret)
-               ret = regulator_enable(hdmi->mvs);
-       if (ret) {
-               dev_err(dev->dev, "failed to enable regulators: %d\n", ret);
-               goto fail;
+       for (i = 0; i < config->hpd_reg_cnt; i++) {
+               ret = regulator_enable(hdmi->hpd_regs[i]);
+               if (ret) {
+                       dev_err(dev->dev, "failed to enable hpd regulator: %s (%d)\n",
+                                       config->hpd_reg_names[i], ret);
+                       goto fail;
+               }
        }
 
        hdmi_set_mode(hdmi, false);
@@ -156,26 +174,26 @@ fail:
 static int hdp_disable(struct hdmi_connector *hdmi_connector)
 {
        struct hdmi *hdmi = hdmi_connector->hdmi;
+       const struct hdmi_platform_config *config = hdmi->config;
        struct drm_device *dev = hdmi_connector->base.dev;
-       int ret = 0;
+       int i, ret = 0;
 
        /* Disable HPD interrupt */
        hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, 0);
 
        hdmi_set_mode(hdmi, false);
 
-       if (hdmi->mpp0)
-               ret = regulator_disable(hdmi->mpp0);
-       if (!ret)
-               ret = regulator_disable(hdmi->mvs);
-       if (ret) {
-               dev_err(dev->dev, "failed to enable regulators: %d\n", ret);
-               goto fail;
+       for (i = 0; i < config->hpd_reg_cnt; i++) {
+               ret = regulator_disable(hdmi->hpd_regs[i]);
+               if (ret) {
+                       dev_err(dev->dev, "failed to disable hpd regulator: %s (%d)\n",
+                                       config->hpd_reg_names[i], ret);
+                       goto fail;
+               }
        }
 
-       clk_disable_unprepare(hdmi->clk);
-       clk_disable_unprepare(hdmi->m_pclk);
-       clk_disable_unprepare(hdmi->s_pclk);
+       for (i = 0; i < config->hpd_clk_cnt; i++)
+               clk_disable_unprepare(hdmi->hpd_clks[i]);
 
        ret = gpio_config(hdmi, false);
        if (ret) {
@@ -189,9 +207,19 @@ fail:
        return ret;
 }
 
+static void
+hotplug_work(struct work_struct *work)
+{
+       struct hdmi_connector *hdmi_connector =
+               container_of(work, struct hdmi_connector, hpd_work);
+       struct drm_connector *connector = &hdmi_connector->base;
+       drm_helper_hpd_irq_event(connector->dev);
+}
+
 void hdmi_connector_irq(struct drm_connector *connector)
 {
        struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
+       struct msm_drm_private *priv = connector->dev->dev_private;
        struct hdmi *hdmi = hdmi_connector->hdmi;
        uint32_t hpd_int_status, hpd_int_ctrl;
 
@@ -209,13 +237,13 @@ void hdmi_connector_irq(struct drm_connector *connector)
                hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL,
                                hpd_int_ctrl | HDMI_HPD_INT_CTRL_INT_ACK);
 
-               drm_helper_hpd_irq_event(connector->dev);
-
                /* detect disconnect if we are connected or visa versa: */
                hpd_int_ctrl = HDMI_HPD_INT_CTRL_INT_EN;
                if (!detected)
                        hpd_int_ctrl |= HDMI_HPD_INT_CTRL_INT_CONNECT;
                hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, hpd_int_ctrl);
+
+               queue_work(priv->wq, &hdmi_connector->hpd_work);
        }
 }
 
@@ -224,6 +252,7 @@ static enum drm_connector_status hdmi_connector_detect(
 {
        struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
        struct hdmi *hdmi = hdmi_connector->hdmi;
+       const struct hdmi_platform_config *config = hdmi->config;
        uint32_t hpd_int_status;
        int retry = 20;
 
@@ -233,6 +262,14 @@ static enum drm_connector_status hdmi_connector_detect(
         * let that trick us into thinking the monitor is gone:
         */
        while (retry-- && !(hpd_int_status & HDMI_HPD_INT_STATUS_CABLE_DETECTED)) {
+               /* hdmi debounce logic seems to get stuck sometimes,
+                * read directly the gpio to get a second opinion:
+                */
+               if (gpio_get_value(config->hpd_gpio)) {
+                       DBG("gpio tells us we are connected!");
+                       hpd_int_status |= HDMI_HPD_INT_STATUS_CABLE_DETECTED;
+                       break;
+               }
                mdelay(10);
                hpd_int_status = hdmi_read(hdmi, REG_HDMI_HPD_INT_STATUS);
                DBG("status=%08x", hpd_int_status);
@@ -285,6 +322,8 @@ static int hdmi_connector_mode_valid(struct drm_connector *connector,
                                 struct drm_display_mode *mode)
 {
        struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
+       struct hdmi *hdmi = hdmi_connector->hdmi;
+       const struct hdmi_platform_config *config = hdmi->config;
        struct msm_drm_private *priv = connector->dev->dev_private;
        struct msm_kms *kms = priv->kms;
        long actual, requested;
@@ -293,6 +332,13 @@ static int hdmi_connector_mode_valid(struct drm_connector *connector,
        actual = kms->funcs->round_pixclk(kms,
                        requested, hdmi_connector->hdmi->encoder);
 
+       /* for mdp5/apq8074, we manage our own pixel clk (as opposed to
+        * mdp4/dtv stuff where pixel clk is assigned to mdp/encoder
+        * instead):
+        */
+       if (config->pwr_clk_cnt > 0)
+               actual = clk_round_rate(hdmi->pwr_clks[0], actual);
+
        DBG("requested=%ld, actual=%ld", requested, actual);
 
        if (actual != requested)
@@ -335,6 +381,7 @@ struct drm_connector *hdmi_connector_init(struct hdmi *hdmi)
        }
 
        hdmi_connector->hdmi = hdmi_reference(hdmi);
+       INIT_WORK(&hdmi_connector->hpd_work, hotplug_work);
 
        connector = &hdmi_connector->base;
 
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c
new file mode 100644 (file)
index 0000000..59fa6cd
--- /dev/null
@@ -0,0 +1,157 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "hdmi.h"
+
+struct hdmi_phy_8x74 {
+       struct hdmi_phy base;
+       struct hdmi *hdmi;
+       void __iomem *mmio;
+};
+#define to_hdmi_phy_8x74(x) container_of(x, struct hdmi_phy_8x74, base)
+
+
+static void phy_write(struct hdmi_phy_8x74 *phy, u32 reg, u32 data)
+{
+       msm_writel(data, phy->mmio + reg);
+}
+
+//static u32 phy_read(struct hdmi_phy_8x74 *phy, u32 reg)
+//{
+//     return msm_readl(phy->mmio + reg);
+//}
+
+static void hdmi_phy_8x74_destroy(struct hdmi_phy *phy)
+{
+       struct hdmi_phy_8x74 *phy_8x74 = to_hdmi_phy_8x74(phy);
+       kfree(phy_8x74);
+}
+
+static void hdmi_phy_8x74_reset(struct hdmi_phy *phy)
+{
+       struct hdmi_phy_8x74 *phy_8x74 = to_hdmi_phy_8x74(phy);
+       struct hdmi *hdmi = phy_8x74->hdmi;
+       unsigned int val;
+
+       /* NOTE that HDMI_PHY_CTL is in core mmio, not phy mmio: */
+
+       val = hdmi_read(hdmi, REG_HDMI_PHY_CTRL);
+
+       if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
+               /* pull low */
+               hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+                               val & ~HDMI_PHY_CTRL_SW_RESET);
+       } else {
+               /* pull high */
+               hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+                               val | HDMI_PHY_CTRL_SW_RESET);
+       }
+
+       if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) {
+               /* pull low */
+               hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+                               val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
+       } else {
+               /* pull high */
+               hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+                               val | HDMI_PHY_CTRL_SW_RESET_PLL);
+       }
+
+       msleep(100);
+
+       if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
+               /* pull high */
+               hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+                               val | HDMI_PHY_CTRL_SW_RESET);
+       } else {
+               /* pull low */
+               hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+                               val & ~HDMI_PHY_CTRL_SW_RESET);
+       }
+
+       if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) {
+               /* pull high */
+               hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+                               val | HDMI_PHY_CTRL_SW_RESET_PLL);
+       } else {
+               /* pull low */
+               hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+                               val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
+       }
+}
+
+static void hdmi_phy_8x74_powerup(struct hdmi_phy *phy,
+               unsigned long int pixclock)
+{
+       struct hdmi_phy_8x74 *phy_8x74 = to_hdmi_phy_8x74(phy);
+
+       phy_write(phy_8x74, REG_HDMI_8x74_ANA_CFG0,   0x1b);
+       phy_write(phy_8x74, REG_HDMI_8x74_ANA_CFG1,   0xf2);
+       phy_write(phy_8x74, REG_HDMI_8x74_BIST_CFG0,  0x0);
+       phy_write(phy_8x74, REG_HDMI_8x74_BIST_PATN0, 0x0);
+       phy_write(phy_8x74, REG_HDMI_8x74_BIST_PATN1, 0x0);
+       phy_write(phy_8x74, REG_HDMI_8x74_BIST_PATN2, 0x0);
+       phy_write(phy_8x74, REG_HDMI_8x74_BIST_PATN3, 0x0);
+       phy_write(phy_8x74, REG_HDMI_8x74_PD_CTRL1,   0x20);
+}
+
+static void hdmi_phy_8x74_powerdown(struct hdmi_phy *phy)
+{
+       struct hdmi_phy_8x74 *phy_8x74 = to_hdmi_phy_8x74(phy);
+       phy_write(phy_8x74, REG_HDMI_8x74_PD_CTRL0, 0x7f);
+}
+
+static const struct hdmi_phy_funcs hdmi_phy_8x74_funcs = {
+               .destroy = hdmi_phy_8x74_destroy,
+               .reset = hdmi_phy_8x74_reset,
+               .powerup = hdmi_phy_8x74_powerup,
+               .powerdown = hdmi_phy_8x74_powerdown,
+};
+
+struct hdmi_phy *hdmi_phy_8x74_init(struct hdmi *hdmi)
+{
+       struct hdmi_phy_8x74 *phy_8x74;
+       struct hdmi_phy *phy = NULL;
+       int ret;
+
+       phy_8x74 = kzalloc(sizeof(*phy_8x74), GFP_KERNEL);
+       if (!phy_8x74) {
+               ret = -ENOMEM;
+               goto fail;
+       }
+
+       phy = &phy_8x74->base;
+
+       phy->funcs = &hdmi_phy_8x74_funcs;
+
+       phy_8x74->hdmi = hdmi;
+
+       /* for 8x74, the phy mmio is mapped separately: */
+       phy_8x74->mmio = msm_ioremap(hdmi->pdev,
+                       "phy_physical", "HDMI_8x74");
+       if (IS_ERR(phy_8x74->mmio)) {
+               ret = PTR_ERR(phy_8x74->mmio);
+               goto fail;
+       }
+
+       return phy;
+
+fail:
+       if (phy)
+               hdmi_phy_8x74_destroy(phy);
+       return ERR_PTR(ret);
+}
index dbde4f6339b9f82b257c2d3ba24cc534d066760e..d591567173c405226eea5fc9d06b4591887f29d1 100644 (file)
@@ -8,14 +8,16 @@ http://github.com/freedreno/envytools/
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    595 bytes, from 2013-07-05 19:21:12)
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    647 bytes, from 2013-11-30 14:45:35)
 - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml           (  19332 bytes, from 2013-10-07 16:36:48)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  17996 bytes, from 2013-12-01 19:10:31)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   1615 bytes, from 2013-11-30 15:00:52)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  22517 bytes, from 2013-12-03 20:59:13)
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  11712 bytes, from 2013-08-17 17:13:43)
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2013-08-11 19:26:32)
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1544 bytes, from 2013-08-16 19:17:05)
 - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  19288 bytes, from 2013-08-11 18:14:15)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  20932 bytes, from 2013-12-01 15:13:04)
 
 Copyright (C) 2013 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
new file mode 100644 (file)
index 0000000..416a26e
--- /dev/null
@@ -0,0 +1,1033 @@
+#ifndef MDP4_XML
+#define MDP4_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    647 bytes, from 2013-11-30 14:45:35)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2013-03-31 16:51:27)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  17996 bytes, from 2013-12-01 19:10:31)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   1615 bytes, from 2013-11-30 15:00:52)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  22517 bytes, from 2013-12-03 20:59:13)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  11712 bytes, from 2013-08-17 17:13:43)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2013-08-11 19:26:32)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1544 bytes, from 2013-08-16 19:17:05)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2013-07-05 19:21:12)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  20932 bytes, from 2013-12-01 15:13:04)
+
+Copyright (C) 2013 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+enum mdp4_pipe {
+       VG1 = 0,
+       VG2 = 1,
+       RGB1 = 2,
+       RGB2 = 3,
+       RGB3 = 4,
+       VG3 = 5,
+       VG4 = 6,
+};
+
+enum mdp4_mixer {
+       MIXER0 = 0,
+       MIXER1 = 1,
+       MIXER2 = 2,
+};
+
+enum mdp4_intf {
+       INTF_LCDC_DTV = 0,
+       INTF_DSI_VIDEO = 1,
+       INTF_DSI_CMD = 2,
+       INTF_EBI2_TV = 3,
+};
+
+enum mdp4_cursor_format {
+       CURSOR_ARGB = 1,
+       CURSOR_XRGB = 2,
+};
+
+enum mdp4_dma {
+       DMA_P = 0,
+       DMA_S = 1,
+       DMA_E = 2,
+};
+
+#define MDP4_IRQ_OVERLAY0_DONE                                 0x00000001
+#define MDP4_IRQ_OVERLAY1_DONE                                 0x00000002
+#define MDP4_IRQ_DMA_S_DONE                                    0x00000004
+#define MDP4_IRQ_DMA_E_DONE                                    0x00000008
+#define MDP4_IRQ_DMA_P_DONE                                    0x00000010
+#define MDP4_IRQ_VG1_HISTOGRAM                                 0x00000020
+#define MDP4_IRQ_VG2_HISTOGRAM                                 0x00000040
+#define MDP4_IRQ_PRIMARY_VSYNC                                 0x00000080
+#define MDP4_IRQ_PRIMARY_INTF_UDERRUN                          0x00000100
+#define MDP4_IRQ_EXTERNAL_VSYNC                                        0x00000200
+#define MDP4_IRQ_EXTERNAL_INTF_UDERRUN                         0x00000400
+#define MDP4_IRQ_PRIMARY_RDPTR                                 0x00000800
+#define MDP4_IRQ_DMA_P_HISTOGRAM                               0x00020000
+#define MDP4_IRQ_DMA_S_HISTOGRAM                               0x04000000
+#define MDP4_IRQ_OVERLAY2_DONE                                 0x40000000
+#define REG_MDP4_VERSION                                       0x00000000
+#define MDP4_VERSION_MINOR__MASK                               0x00ff0000
+#define MDP4_VERSION_MINOR__SHIFT                              16
+static inline uint32_t MDP4_VERSION_MINOR(uint32_t val)
+{
+       return ((val) << MDP4_VERSION_MINOR__SHIFT) & MDP4_VERSION_MINOR__MASK;
+}
+#define MDP4_VERSION_MAJOR__MASK                               0xff000000
+#define MDP4_VERSION_MAJOR__SHIFT                              24
+static inline uint32_t MDP4_VERSION_MAJOR(uint32_t val)
+{
+       return ((val) << MDP4_VERSION_MAJOR__SHIFT) & MDP4_VERSION_MAJOR__MASK;
+}
+
+#define REG_MDP4_OVLP0_KICK                                    0x00000004
+
+#define REG_MDP4_OVLP1_KICK                                    0x00000008
+
+#define REG_MDP4_OVLP2_KICK                                    0x000000d0
+
+#define REG_MDP4_DMA_P_KICK                                    0x0000000c
+
+#define REG_MDP4_DMA_S_KICK                                    0x00000010
+
+#define REG_MDP4_DMA_E_KICK                                    0x00000014
+
+#define REG_MDP4_DISP_STATUS                                   0x00000018
+
+#define REG_MDP4_DISP_INTF_SEL                                 0x00000038
+#define MDP4_DISP_INTF_SEL_PRIM__MASK                          0x00000003
+#define MDP4_DISP_INTF_SEL_PRIM__SHIFT                         0
+static inline uint32_t MDP4_DISP_INTF_SEL_PRIM(enum mdp4_intf val)
+{
+       return ((val) << MDP4_DISP_INTF_SEL_PRIM__SHIFT) & MDP4_DISP_INTF_SEL_PRIM__MASK;
+}
+#define MDP4_DISP_INTF_SEL_SEC__MASK                           0x0000000c
+#define MDP4_DISP_INTF_SEL_SEC__SHIFT                          2
+static inline uint32_t MDP4_DISP_INTF_SEL_SEC(enum mdp4_intf val)
+{
+       return ((val) << MDP4_DISP_INTF_SEL_SEC__SHIFT) & MDP4_DISP_INTF_SEL_SEC__MASK;
+}
+#define MDP4_DISP_INTF_SEL_EXT__MASK                           0x00000030
+#define MDP4_DISP_INTF_SEL_EXT__SHIFT                          4
+static inline uint32_t MDP4_DISP_INTF_SEL_EXT(enum mdp4_intf val)
+{
+       return ((val) << MDP4_DISP_INTF_SEL_EXT__SHIFT) & MDP4_DISP_INTF_SEL_EXT__MASK;
+}
+#define MDP4_DISP_INTF_SEL_DSI_VIDEO                           0x00000040
+#define MDP4_DISP_INTF_SEL_DSI_CMD                             0x00000080
+
+#define REG_MDP4_RESET_STATUS                                  0x0000003c
+
+#define REG_MDP4_READ_CNFG                                     0x0000004c
+
+#define REG_MDP4_INTR_ENABLE                                   0x00000050
+
+#define REG_MDP4_INTR_STATUS                                   0x00000054
+
+#define REG_MDP4_INTR_CLEAR                                    0x00000058
+
+#define REG_MDP4_EBI2_LCD0                                     0x00000060
+
+#define REG_MDP4_EBI2_LCD1                                     0x00000064
+
+#define REG_MDP4_PORTMAP_MODE                                  0x00000070
+
+#define REG_MDP4_CS_CONTROLLER0                                        0x000000c0
+
+#define REG_MDP4_CS_CONTROLLER1                                        0x000000c4
+
+#define REG_MDP4_LAYERMIXER2_IN_CFG                            0x000100f0
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE0__MASK                    0x00000007
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE0__SHIFT                   0
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE0(enum mdp_mixer_stage_id val)
+{
+       return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE0__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE0__MASK;
+}
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE0_MIXER1                   0x00000008
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE1__MASK                    0x00000070
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE1__SHIFT                   4
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE1(enum mdp_mixer_stage_id val)
+{
+       return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE1__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE1__MASK;
+}
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE1_MIXER1                   0x00000080
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE2__MASK                    0x00000700
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE2__SHIFT                   8
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE2(enum mdp_mixer_stage_id val)
+{
+       return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE2__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE2__MASK;
+}
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE2_MIXER1                   0x00000800
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE3__MASK                    0x00007000
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE3__SHIFT                   12
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE3(enum mdp_mixer_stage_id val)
+{
+       return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE3__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE3__MASK;
+}
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE3_MIXER1                   0x00008000
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE4__MASK                    0x00070000
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE4__SHIFT                   16
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE4(enum mdp_mixer_stage_id val)
+{
+       return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE4__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE4__MASK;
+}
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE4_MIXER1                   0x00080000
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE5__MASK                    0x00700000
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE5__SHIFT                   20
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE5(enum mdp_mixer_stage_id val)
+{
+       return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE5__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE5__MASK;
+}
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE5_MIXER1                   0x00800000
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE6__MASK                    0x07000000
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE6__SHIFT                   24
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE6(enum mdp_mixer_stage_id val)
+{
+       return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE6__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE6__MASK;
+}
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE6_MIXER1                   0x08000000
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE7__MASK                    0x70000000
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE7__SHIFT                   28
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE7(enum mdp_mixer_stage_id val)
+{
+       return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE7__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE7__MASK;
+}
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE7_MIXER1                   0x80000000
+
+#define REG_MDP4_LAYERMIXER_IN_CFG_UPDATE_METHOD               0x000100fc
+
+#define REG_MDP4_LAYERMIXER_IN_CFG                             0x00010100
+#define MDP4_LAYERMIXER_IN_CFG_PIPE0__MASK                     0x00000007
+#define MDP4_LAYERMIXER_IN_CFG_PIPE0__SHIFT                    0
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE0(enum mdp_mixer_stage_id val)
+{
+       return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE0__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE0__MASK;
+}
+#define MDP4_LAYERMIXER_IN_CFG_PIPE0_MIXER1                    0x00000008
+#define MDP4_LAYERMIXER_IN_CFG_PIPE1__MASK                     0x00000070
+#define MDP4_LAYERMIXER_IN_CFG_PIPE1__SHIFT                    4
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE1(enum mdp_mixer_stage_id val)
+{
+       return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE1__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE1__MASK;
+}
+#define MDP4_LAYERMIXER_IN_CFG_PIPE1_MIXER1                    0x00000080
+#define MDP4_LAYERMIXER_IN_CFG_PIPE2__MASK                     0x00000700
+#define MDP4_LAYERMIXER_IN_CFG_PIPE2__SHIFT                    8
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE2(enum mdp_mixer_stage_id val)
+{
+       return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE2__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE2__MASK;
+}
+#define MDP4_LAYERMIXER_IN_CFG_PIPE2_MIXER1                    0x00000800
+#define MDP4_LAYERMIXER_IN_CFG_PIPE3__MASK                     0x00007000
+#define MDP4_LAYERMIXER_IN_CFG_PIPE3__SHIFT                    12
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE3(enum mdp_mixer_stage_id val)
+{
+       return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE3__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE3__MASK;
+}
+#define MDP4_LAYERMIXER_IN_CFG_PIPE3_MIXER1                    0x00008000
+#define MDP4_LAYERMIXER_IN_CFG_PIPE4__MASK                     0x00070000
+#define MDP4_LAYERMIXER_IN_CFG_PIPE4__SHIFT                    16
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE4(enum mdp_mixer_stage_id val)
+{
+       return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE4__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE4__MASK;
+}
+#define MDP4_LAYERMIXER_IN_CFG_PIPE4_MIXER1                    0x00080000
+#define MDP4_LAYERMIXER_IN_CFG_PIPE5__MASK                     0x00700000
+#define MDP4_LAYERMIXER_IN_CFG_PIPE5__SHIFT                    20
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE5(enum mdp_mixer_stage_id val)
+{
+       return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE5__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE5__MASK;
+}
+#define MDP4_LAYERMIXER_IN_CFG_PIPE5_MIXER1                    0x00800000
+#define MDP4_LAYERMIXER_IN_CFG_PIPE6__MASK                     0x07000000
+#define MDP4_LAYERMIXER_IN_CFG_PIPE6__SHIFT                    24
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE6(enum mdp_mixer_stage_id val)
+{
+       return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE6__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE6__MASK;
+}
+#define MDP4_LAYERMIXER_IN_CFG_PIPE6_MIXER1                    0x08000000
+#define MDP4_LAYERMIXER_IN_CFG_PIPE7__MASK                     0x70000000
+#define MDP4_LAYERMIXER_IN_CFG_PIPE7__SHIFT                    28
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE7(enum mdp_mixer_stage_id val)
+{
+       return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE7__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE7__MASK;
+}
+#define MDP4_LAYERMIXER_IN_CFG_PIPE7_MIXER1                    0x80000000
+
+#define REG_MDP4_VG2_SRC_FORMAT                                        0x00030050
+
+#define REG_MDP4_VG2_CONST_COLOR                               0x00031008
+
+#define REG_MDP4_OVERLAY_FLUSH                                 0x00018000
+#define MDP4_OVERLAY_FLUSH_OVLP0                               0x00000001
+#define MDP4_OVERLAY_FLUSH_OVLP1                               0x00000002
+#define MDP4_OVERLAY_FLUSH_VG1                                 0x00000004
+#define MDP4_OVERLAY_FLUSH_VG2                                 0x00000008
+#define MDP4_OVERLAY_FLUSH_RGB1                                        0x00000010
+#define MDP4_OVERLAY_FLUSH_RGB2                                        0x00000020
+
+static inline uint32_t __offset_OVLP(uint32_t idx)
+{
+       switch (idx) {
+               case 0: return 0x00010000;
+               case 1: return 0x00018000;
+               case 2: return 0x00088000;
+               default: return INVALID_IDX(idx);
+       }
+}
+static inline uint32_t REG_MDP4_OVLP(uint32_t i0) { return 0x00000000 + __offset_OVLP(i0); }
+
+static inline uint32_t REG_MDP4_OVLP_CFG(uint32_t i0) { return 0x00000004 + __offset_OVLP(i0); }
+
+static inline uint32_t REG_MDP4_OVLP_SIZE(uint32_t i0) { return 0x00000008 + __offset_OVLP(i0); }
+#define MDP4_OVLP_SIZE_HEIGHT__MASK                            0xffff0000
+#define MDP4_OVLP_SIZE_HEIGHT__SHIFT                           16
+static inline uint32_t MDP4_OVLP_SIZE_HEIGHT(uint32_t val)
+{
+       return ((val) << MDP4_OVLP_SIZE_HEIGHT__SHIFT) & MDP4_OVLP_SIZE_HEIGHT__MASK;
+}
+#define MDP4_OVLP_SIZE_WIDTH__MASK                             0x0000ffff
+#define MDP4_OVLP_SIZE_WIDTH__SHIFT                            0
+static inline uint32_t MDP4_OVLP_SIZE_WIDTH(uint32_t val)
+{
+       return ((val) << MDP4_OVLP_SIZE_WIDTH__SHIFT) & MDP4_OVLP_SIZE_WIDTH__MASK;
+}
+
+static inline uint32_t REG_MDP4_OVLP_BASE(uint32_t i0) { return 0x0000000c + __offset_OVLP(i0); }
+
+static inline uint32_t REG_MDP4_OVLP_STRIDE(uint32_t i0) { return 0x00000010 + __offset_OVLP(i0); }
+
+static inline uint32_t REG_MDP4_OVLP_OPMODE(uint32_t i0) { return 0x00000014 + __offset_OVLP(i0); }
+
+static inline uint32_t __offset_STAGE(uint32_t idx)
+{
+       switch (idx) {
+               case 0: return 0x00000104;
+               case 1: return 0x00000124;
+               case 2: return 0x00000144;
+               case 3: return 0x00000160;
+               default: return INVALID_IDX(idx);
+       }
+}
+static inline uint32_t REG_MDP4_OVLP_STAGE(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE(i1); }
+
+static inline uint32_t REG_MDP4_OVLP_STAGE_OP(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE(i1); }
+#define MDP4_OVLP_STAGE_OP_FG_ALPHA__MASK                      0x00000003
+#define MDP4_OVLP_STAGE_OP_FG_ALPHA__SHIFT                     0
+static inline uint32_t MDP4_OVLP_STAGE_OP_FG_ALPHA(enum mdp_alpha_type val)
+{
+       return ((val) << MDP4_OVLP_STAGE_OP_FG_ALPHA__SHIFT) & MDP4_OVLP_STAGE_OP_FG_ALPHA__MASK;
+}
+#define MDP4_OVLP_STAGE_OP_FG_INV_ALPHA                                0x00000004
+#define MDP4_OVLP_STAGE_OP_FG_MOD_ALPHA                                0x00000008
+#define MDP4_OVLP_STAGE_OP_BG_ALPHA__MASK                      0x00000030
+#define MDP4_OVLP_STAGE_OP_BG_ALPHA__SHIFT                     4
+static inline uint32_t MDP4_OVLP_STAGE_OP_BG_ALPHA(enum mdp_alpha_type val)
+{
+       return ((val) << MDP4_OVLP_STAGE_OP_BG_ALPHA__SHIFT) & MDP4_OVLP_STAGE_OP_BG_ALPHA__MASK;
+}
+#define MDP4_OVLP_STAGE_OP_BG_INV_ALPHA                                0x00000040
+#define MDP4_OVLP_STAGE_OP_BG_MOD_ALPHA                                0x00000080
+#define MDP4_OVLP_STAGE_OP_FG_TRANSP                           0x00000100
+#define MDP4_OVLP_STAGE_OP_BG_TRANSP                           0x00000200
+
+static inline uint32_t REG_MDP4_OVLP_STAGE_FG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000004 + __offset_OVLP(i0) + __offset_STAGE(i1); }
+
+static inline uint32_t REG_MDP4_OVLP_STAGE_BG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000008 + __offset_OVLP(i0) + __offset_STAGE(i1); }
+
+static inline uint32_t REG_MDP4_OVLP_STAGE_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000000c + __offset_OVLP(i0) + __offset_STAGE(i1); }
+
+static inline uint32_t REG_MDP4_OVLP_STAGE_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00000010 + __offset_OVLP(i0) + __offset_STAGE(i1); }
+
+static inline uint32_t REG_MDP4_OVLP_STAGE_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00000014 + __offset_OVLP(i0) + __offset_STAGE(i1); }
+
+static inline uint32_t REG_MDP4_OVLP_STAGE_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00000018 + __offset_OVLP(i0) + __offset_STAGE(i1); }
+
+static inline uint32_t __offset_STAGE_CO3(uint32_t idx)
+{
+       switch (idx) {
+               case 0: return 0x00001004;
+               case 1: return 0x00001404;
+               case 2: return 0x00001804;
+               case 3: return 0x00001b84;
+               default: return INVALID_IDX(idx);
+       }
+}
+static inline uint32_t REG_MDP4_OVLP_STAGE_CO3(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE_CO3(i1); }
+
+static inline uint32_t REG_MDP4_OVLP_STAGE_CO3_SEL(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE_CO3(i1); }
+#define MDP4_OVLP_STAGE_CO3_SEL_FG_ALPHA                       0x00000001
+
+static inline uint32_t REG_MDP4_OVLP_TRANSP_LOW0(uint32_t i0) { return 0x00000180 + __offset_OVLP(i0); }
+
+static inline uint32_t REG_MDP4_OVLP_TRANSP_LOW1(uint32_t i0) { return 0x00000184 + __offset_OVLP(i0); }
+
+static inline uint32_t REG_MDP4_OVLP_TRANSP_HIGH0(uint32_t i0) { return 0x00000188 + __offset_OVLP(i0); }
+
+static inline uint32_t REG_MDP4_OVLP_TRANSP_HIGH1(uint32_t i0) { return 0x0000018c + __offset_OVLP(i0); }
+
+static inline uint32_t REG_MDP4_OVLP_CSC_CONFIG(uint32_t i0) { return 0x00000200 + __offset_OVLP(i0); }
+
+static inline uint32_t REG_MDP4_OVLP_CSC(uint32_t i0) { return 0x00002000 + __offset_OVLP(i0); }
+
+
+static inline uint32_t REG_MDP4_OVLP_CSC_MV(uint32_t i0, uint32_t i1) { return 0x00002400 + __offset_OVLP(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_OVLP_CSC_MV_VAL(uint32_t i0, uint32_t i1) { return 0x00002400 + __offset_OVLP(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_OVLP_CSC_PRE_BV(uint32_t i0, uint32_t i1) { return 0x00002500 + __offset_OVLP(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_OVLP_CSC_PRE_BV_VAL(uint32_t i0, uint32_t i1) { return 0x00002500 + __offset_OVLP(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_OVLP_CSC_POST_BV(uint32_t i0, uint32_t i1) { return 0x00002580 + __offset_OVLP(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_OVLP_CSC_POST_BV_VAL(uint32_t i0, uint32_t i1) { return 0x00002580 + __offset_OVLP(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_OVLP_CSC_PRE_LV(uint32_t i0, uint32_t i1) { return 0x00002600 + __offset_OVLP(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_OVLP_CSC_PRE_LV_VAL(uint32_t i0, uint32_t i1) { return 0x00002600 + __offset_OVLP(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_OVLP_CSC_POST_LV(uint32_t i0, uint32_t i1) { return 0x00002680 + __offset_OVLP(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_OVLP_CSC_POST_LV_VAL(uint32_t i0, uint32_t i1) { return 0x00002680 + __offset_OVLP(i0) + 0x4*i1; }
+
+#define REG_MDP4_DMA_P_OP_MODE                                 0x00090070
+
+static inline uint32_t REG_MDP4_LUTN(uint32_t i0) { return 0x00094800 + 0x400*i0; }
+
+static inline uint32_t REG_MDP4_LUTN_LUT(uint32_t i0, uint32_t i1) { return 0x00094800 + 0x400*i0 + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_LUTN_LUT_VAL(uint32_t i0, uint32_t i1) { return 0x00094800 + 0x400*i0 + 0x4*i1; }
+
+#define REG_MDP4_DMA_S_OP_MODE                                 0x000a0028
+
+static inline uint32_t REG_MDP4_DMA_E_QUANT(uint32_t i0) { return 0x000b0070 + 0x4*i0; }
+
+static inline uint32_t __offset_DMA(enum mdp4_dma idx)
+{
+       switch (idx) {
+               case DMA_P: return 0x00090000;
+               case DMA_S: return 0x000a0000;
+               case DMA_E: return 0x000b0000;
+               default: return INVALID_IDX(idx);
+       }
+}
+static inline uint32_t REG_MDP4_DMA(enum mdp4_dma i0) { return 0x00000000 + __offset_DMA(i0); }
+
+static inline uint32_t REG_MDP4_DMA_CONFIG(enum mdp4_dma i0) { return 0x00000000 + __offset_DMA(i0); }
+#define MDP4_DMA_CONFIG_G_BPC__MASK                            0x00000003
+#define MDP4_DMA_CONFIG_G_BPC__SHIFT                           0
+static inline uint32_t MDP4_DMA_CONFIG_G_BPC(enum mdp_bpc val)
+{
+       return ((val) << MDP4_DMA_CONFIG_G_BPC__SHIFT) & MDP4_DMA_CONFIG_G_BPC__MASK;
+}
+#define MDP4_DMA_CONFIG_B_BPC__MASK                            0x0000000c
+#define MDP4_DMA_CONFIG_B_BPC__SHIFT                           2
+static inline uint32_t MDP4_DMA_CONFIG_B_BPC(enum mdp_bpc val)
+{
+       return ((val) << MDP4_DMA_CONFIG_B_BPC__SHIFT) & MDP4_DMA_CONFIG_B_BPC__MASK;
+}
+#define MDP4_DMA_CONFIG_R_BPC__MASK                            0x00000030
+#define MDP4_DMA_CONFIG_R_BPC__SHIFT                           4
+static inline uint32_t MDP4_DMA_CONFIG_R_BPC(enum mdp_bpc val)
+{
+       return ((val) << MDP4_DMA_CONFIG_R_BPC__SHIFT) & MDP4_DMA_CONFIG_R_BPC__MASK;
+}
+#define MDP4_DMA_CONFIG_PACK_ALIGN_MSB                         0x00000080
+#define MDP4_DMA_CONFIG_PACK__MASK                             0x0000ff00
+#define MDP4_DMA_CONFIG_PACK__SHIFT                            8
+static inline uint32_t MDP4_DMA_CONFIG_PACK(uint32_t val)
+{
+       return ((val) << MDP4_DMA_CONFIG_PACK__SHIFT) & MDP4_DMA_CONFIG_PACK__MASK;
+}
+#define MDP4_DMA_CONFIG_DEFLKR_EN                              0x01000000
+#define MDP4_DMA_CONFIG_DITHER_EN                              0x01000000
+
+static inline uint32_t REG_MDP4_DMA_SRC_SIZE(enum mdp4_dma i0) { return 0x00000004 + __offset_DMA(i0); }
+#define MDP4_DMA_SRC_SIZE_HEIGHT__MASK                         0xffff0000
+#define MDP4_DMA_SRC_SIZE_HEIGHT__SHIFT                                16
+static inline uint32_t MDP4_DMA_SRC_SIZE_HEIGHT(uint32_t val)
+{
+       return ((val) << MDP4_DMA_SRC_SIZE_HEIGHT__SHIFT) & MDP4_DMA_SRC_SIZE_HEIGHT__MASK;
+}
+#define MDP4_DMA_SRC_SIZE_WIDTH__MASK                          0x0000ffff
+#define MDP4_DMA_SRC_SIZE_WIDTH__SHIFT                         0
+static inline uint32_t MDP4_DMA_SRC_SIZE_WIDTH(uint32_t val)
+{
+       return ((val) << MDP4_DMA_SRC_SIZE_WIDTH__SHIFT) & MDP4_DMA_SRC_SIZE_WIDTH__MASK;
+}
+
+static inline uint32_t REG_MDP4_DMA_SRC_BASE(enum mdp4_dma i0) { return 0x00000008 + __offset_DMA(i0); }
+
+static inline uint32_t REG_MDP4_DMA_SRC_STRIDE(enum mdp4_dma i0) { return 0x0000000c + __offset_DMA(i0); }
+
+static inline uint32_t REG_MDP4_DMA_DST_SIZE(enum mdp4_dma i0) { return 0x00000010 + __offset_DMA(i0); }
+#define MDP4_DMA_DST_SIZE_HEIGHT__MASK                         0xffff0000
+#define MDP4_DMA_DST_SIZE_HEIGHT__SHIFT                                16
+static inline uint32_t MDP4_DMA_DST_SIZE_HEIGHT(uint32_t val)
+{
+       return ((val) << MDP4_DMA_DST_SIZE_HEIGHT__SHIFT) & MDP4_DMA_DST_SIZE_HEIGHT__MASK;
+}
+#define MDP4_DMA_DST_SIZE_WIDTH__MASK                          0x0000ffff
+#define MDP4_DMA_DST_SIZE_WIDTH__SHIFT                         0
+static inline uint32_t MDP4_DMA_DST_SIZE_WIDTH(uint32_t val)
+{
+       return ((val) << MDP4_DMA_DST_SIZE_WIDTH__SHIFT) & MDP4_DMA_DST_SIZE_WIDTH__MASK;
+}
+
+static inline uint32_t REG_MDP4_DMA_CURSOR_SIZE(enum mdp4_dma i0) { return 0x00000044 + __offset_DMA(i0); }
+#define MDP4_DMA_CURSOR_SIZE_WIDTH__MASK                       0x0000007f
+#define MDP4_DMA_CURSOR_SIZE_WIDTH__SHIFT                      0
+static inline uint32_t MDP4_DMA_CURSOR_SIZE_WIDTH(uint32_t val)
+{
+       return ((val) << MDP4_DMA_CURSOR_SIZE_WIDTH__SHIFT) & MDP4_DMA_CURSOR_SIZE_WIDTH__MASK;
+}
+#define MDP4_DMA_CURSOR_SIZE_HEIGHT__MASK                      0x007f0000
+#define MDP4_DMA_CURSOR_SIZE_HEIGHT__SHIFT                     16
+static inline uint32_t MDP4_DMA_CURSOR_SIZE_HEIGHT(uint32_t val)
+{
+       return ((val) << MDP4_DMA_CURSOR_SIZE_HEIGHT__SHIFT) & MDP4_DMA_CURSOR_SIZE_HEIGHT__MASK;
+}
+
+static inline uint32_t REG_MDP4_DMA_CURSOR_BASE(enum mdp4_dma i0) { return 0x00000048 + __offset_DMA(i0); }
+
+static inline uint32_t REG_MDP4_DMA_CURSOR_POS(enum mdp4_dma i0) { return 0x0000004c + __offset_DMA(i0); }
+#define MDP4_DMA_CURSOR_POS_X__MASK                            0x0000ffff
+#define MDP4_DMA_CURSOR_POS_X__SHIFT                           0
+static inline uint32_t MDP4_DMA_CURSOR_POS_X(uint32_t val)
+{
+       return ((val) << MDP4_DMA_CURSOR_POS_X__SHIFT) & MDP4_DMA_CURSOR_POS_X__MASK;
+}
+#define MDP4_DMA_CURSOR_POS_Y__MASK                            0xffff0000
+#define MDP4_DMA_CURSOR_POS_Y__SHIFT                           16
+static inline uint32_t MDP4_DMA_CURSOR_POS_Y(uint32_t val)
+{
+       return ((val) << MDP4_DMA_CURSOR_POS_Y__SHIFT) & MDP4_DMA_CURSOR_POS_Y__MASK;
+}
+
+static inline uint32_t REG_MDP4_DMA_CURSOR_BLEND_CONFIG(enum mdp4_dma i0) { return 0x00000060 + __offset_DMA(i0); }
+#define MDP4_DMA_CURSOR_BLEND_CONFIG_CURSOR_EN                 0x00000001
+#define MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT__MASK              0x00000006
+#define MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT__SHIFT             1
+static inline uint32_t MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT(enum mdp4_cursor_format val)
+{
+       return ((val) << MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT__SHIFT) & MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT__MASK;
+}
+#define MDP4_DMA_CURSOR_BLEND_CONFIG_TRANSP_EN                 0x00000008
+
+static inline uint32_t REG_MDP4_DMA_CURSOR_BLEND_PARAM(enum mdp4_dma i0) { return 0x00000064 + __offset_DMA(i0); }
+
+static inline uint32_t REG_MDP4_DMA_BLEND_TRANS_LOW(enum mdp4_dma i0) { return 0x00000068 + __offset_DMA(i0); }
+
+static inline uint32_t REG_MDP4_DMA_BLEND_TRANS_HIGH(enum mdp4_dma i0) { return 0x0000006c + __offset_DMA(i0); }
+
+static inline uint32_t REG_MDP4_DMA_FETCH_CONFIG(enum mdp4_dma i0) { return 0x00001004 + __offset_DMA(i0); }
+
+static inline uint32_t REG_MDP4_DMA_CSC(enum mdp4_dma i0) { return 0x00003000 + __offset_DMA(i0); }
+
+
+static inline uint32_t REG_MDP4_DMA_CSC_MV(enum mdp4_dma i0, uint32_t i1) { return 0x00003400 + __offset_DMA(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_DMA_CSC_MV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003400 + __offset_DMA(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_DMA_CSC_PRE_BV(enum mdp4_dma i0, uint32_t i1) { return 0x00003500 + __offset_DMA(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_DMA_CSC_PRE_BV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003500 + __offset_DMA(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_DMA_CSC_POST_BV(enum mdp4_dma i0, uint32_t i1) { return 0x00003580 + __offset_DMA(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_DMA_CSC_POST_BV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003580 + __offset_DMA(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_DMA_CSC_PRE_LV(enum mdp4_dma i0, uint32_t i1) { return 0x00003600 + __offset_DMA(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_DMA_CSC_PRE_LV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003600 + __offset_DMA(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_DMA_CSC_POST_LV(enum mdp4_dma i0, uint32_t i1) { return 0x00003680 + __offset_DMA(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_DMA_CSC_POST_LV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003680 + __offset_DMA(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_PIPE(enum mdp4_pipe i0) { return 0x00020000 + 0x10000*i0; }
+
+static inline uint32_t REG_MDP4_PIPE_SRC_SIZE(enum mdp4_pipe i0) { return 0x00020000 + 0x10000*i0; }
+#define MDP4_PIPE_SRC_SIZE_HEIGHT__MASK                                0xffff0000
+#define MDP4_PIPE_SRC_SIZE_HEIGHT__SHIFT                       16
+static inline uint32_t MDP4_PIPE_SRC_SIZE_HEIGHT(uint32_t val)
+{
+       return ((val) << MDP4_PIPE_SRC_SIZE_HEIGHT__SHIFT) & MDP4_PIPE_SRC_SIZE_HEIGHT__MASK;
+}
+#define MDP4_PIPE_SRC_SIZE_WIDTH__MASK                         0x0000ffff
+#define MDP4_PIPE_SRC_SIZE_WIDTH__SHIFT                                0
+static inline uint32_t MDP4_PIPE_SRC_SIZE_WIDTH(uint32_t val)
+{
+       return ((val) << MDP4_PIPE_SRC_SIZE_WIDTH__SHIFT) & MDP4_PIPE_SRC_SIZE_WIDTH__MASK;
+}
+
+static inline uint32_t REG_MDP4_PIPE_SRC_XY(enum mdp4_pipe i0) { return 0x00020004 + 0x10000*i0; }
+#define MDP4_PIPE_SRC_XY_Y__MASK                               0xffff0000
+#define MDP4_PIPE_SRC_XY_Y__SHIFT                              16
+static inline uint32_t MDP4_PIPE_SRC_XY_Y(uint32_t val)
+{
+       return ((val) << MDP4_PIPE_SRC_XY_Y__SHIFT) & MDP4_PIPE_SRC_XY_Y__MASK;
+}
+#define MDP4_PIPE_SRC_XY_X__MASK                               0x0000ffff
+#define MDP4_PIPE_SRC_XY_X__SHIFT                              0
+static inline uint32_t MDP4_PIPE_SRC_XY_X(uint32_t val)
+{
+       return ((val) << MDP4_PIPE_SRC_XY_X__SHIFT) & MDP4_PIPE_SRC_XY_X__MASK;
+}
+
+static inline uint32_t REG_MDP4_PIPE_DST_SIZE(enum mdp4_pipe i0) { return 0x00020008 + 0x10000*i0; }
+#define MDP4_PIPE_DST_SIZE_HEIGHT__MASK                                0xffff0000
+#define MDP4_PIPE_DST_SIZE_HEIGHT__SHIFT                       16
+static inline uint32_t MDP4_PIPE_DST_SIZE_HEIGHT(uint32_t val)
+{
+       return ((val) << MDP4_PIPE_DST_SIZE_HEIGHT__SHIFT) & MDP4_PIPE_DST_SIZE_HEIGHT__MASK;
+}
+#define MDP4_PIPE_DST_SIZE_WIDTH__MASK                         0x0000ffff
+#define MDP4_PIPE_DST_SIZE_WIDTH__SHIFT                                0
+static inline uint32_t MDP4_PIPE_DST_SIZE_WIDTH(uint32_t val)
+{
+       return ((val) << MDP4_PIPE_DST_SIZE_WIDTH__SHIFT) & MDP4_PIPE_DST_SIZE_WIDTH__MASK;
+}
+
+static inline uint32_t REG_MDP4_PIPE_DST_XY(enum mdp4_pipe i0) { return 0x0002000c + 0x10000*i0; }
+#define MDP4_PIPE_DST_XY_Y__MASK                               0xffff0000
+#define MDP4_PIPE_DST_XY_Y__SHIFT                              16
+static inline uint32_t MDP4_PIPE_DST_XY_Y(uint32_t val)
+{
+       return ((val) << MDP4_PIPE_DST_XY_Y__SHIFT) & MDP4_PIPE_DST_XY_Y__MASK;
+}
+#define MDP4_PIPE_DST_XY_X__MASK                               0x0000ffff
+#define MDP4_PIPE_DST_XY_X__SHIFT                              0
+static inline uint32_t MDP4_PIPE_DST_XY_X(uint32_t val)
+{
+       return ((val) << MDP4_PIPE_DST_XY_X__SHIFT) & MDP4_PIPE_DST_XY_X__MASK;
+}
+
+static inline uint32_t REG_MDP4_PIPE_SRCP0_BASE(enum mdp4_pipe i0) { return 0x00020010 + 0x10000*i0; }
+
+static inline uint32_t REG_MDP4_PIPE_SRCP1_BASE(enum mdp4_pipe i0) { return 0x00020014 + 0x10000*i0; }
+
+static inline uint32_t REG_MDP4_PIPE_SRCP2_BASE(enum mdp4_pipe i0) { return 0x00020018 + 0x10000*i0; }
+
+static inline uint32_t REG_MDP4_PIPE_SRC_STRIDE_A(enum mdp4_pipe i0) { return 0x00020040 + 0x10000*i0; }
+#define MDP4_PIPE_SRC_STRIDE_A_P0__MASK                                0x0000ffff
+#define MDP4_PIPE_SRC_STRIDE_A_P0__SHIFT                       0
+static inline uint32_t MDP4_PIPE_SRC_STRIDE_A_P0(uint32_t val)
+{
+       return ((val) << MDP4_PIPE_SRC_STRIDE_A_P0__SHIFT) & MDP4_PIPE_SRC_STRIDE_A_P0__MASK;
+}
+#define MDP4_PIPE_SRC_STRIDE_A_P1__MASK                                0xffff0000
+#define MDP4_PIPE_SRC_STRIDE_A_P1__SHIFT                       16
+static inline uint32_t MDP4_PIPE_SRC_STRIDE_A_P1(uint32_t val)
+{
+       return ((val) << MDP4_PIPE_SRC_STRIDE_A_P1__SHIFT) & MDP4_PIPE_SRC_STRIDE_A_P1__MASK;
+}
+
+static inline uint32_t REG_MDP4_PIPE_SRC_STRIDE_B(enum mdp4_pipe i0) { return 0x00020044 + 0x10000*i0; }
+#define MDP4_PIPE_SRC_STRIDE_B_P2__MASK                                0x0000ffff
+#define MDP4_PIPE_SRC_STRIDE_B_P2__SHIFT                       0
+static inline uint32_t MDP4_PIPE_SRC_STRIDE_B_P2(uint32_t val)
+{
+       return ((val) << MDP4_PIPE_SRC_STRIDE_B_P2__SHIFT) & MDP4_PIPE_SRC_STRIDE_B_P2__MASK;
+}
+#define MDP4_PIPE_SRC_STRIDE_B_P3__MASK                                0xffff0000
+#define MDP4_PIPE_SRC_STRIDE_B_P3__SHIFT                       16
+static inline uint32_t MDP4_PIPE_SRC_STRIDE_B_P3(uint32_t val)
+{
+       return ((val) << MDP4_PIPE_SRC_STRIDE_B_P3__SHIFT) & MDP4_PIPE_SRC_STRIDE_B_P3__MASK;
+}
+
+static inline uint32_t REG_MDP4_PIPE_FRAME_SIZE(enum mdp4_pipe i0) { return 0x00020048 + 0x10000*i0; }
+#define MDP4_PIPE_FRAME_SIZE_HEIGHT__MASK                      0xffff0000
+#define MDP4_PIPE_FRAME_SIZE_HEIGHT__SHIFT                     16
+static inline uint32_t MDP4_PIPE_FRAME_SIZE_HEIGHT(uint32_t val)
+{
+       return ((val) << MDP4_PIPE_FRAME_SIZE_HEIGHT__SHIFT) & MDP4_PIPE_FRAME_SIZE_HEIGHT__MASK;
+}
+#define MDP4_PIPE_FRAME_SIZE_WIDTH__MASK                       0x0000ffff
+#define MDP4_PIPE_FRAME_SIZE_WIDTH__SHIFT                      0
+static inline uint32_t MDP4_PIPE_FRAME_SIZE_WIDTH(uint32_t val)
+{
+       return ((val) << MDP4_PIPE_FRAME_SIZE_WIDTH__SHIFT) & MDP4_PIPE_FRAME_SIZE_WIDTH__MASK;
+}
+
+static inline uint32_t REG_MDP4_PIPE_SRC_FORMAT(enum mdp4_pipe i0) { return 0x00020050 + 0x10000*i0; }
+#define MDP4_PIPE_SRC_FORMAT_G_BPC__MASK                       0x00000003
+#define MDP4_PIPE_SRC_FORMAT_G_BPC__SHIFT                      0
+static inline uint32_t MDP4_PIPE_SRC_FORMAT_G_BPC(enum mdp_bpc val)
+{
+       return ((val) << MDP4_PIPE_SRC_FORMAT_G_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_G_BPC__MASK;
+}
+#define MDP4_PIPE_SRC_FORMAT_B_BPC__MASK                       0x0000000c
+#define MDP4_PIPE_SRC_FORMAT_B_BPC__SHIFT                      2
+static inline uint32_t MDP4_PIPE_SRC_FORMAT_B_BPC(enum mdp_bpc val)
+{
+       return ((val) << MDP4_PIPE_SRC_FORMAT_B_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_B_BPC__MASK;
+}
+#define MDP4_PIPE_SRC_FORMAT_R_BPC__MASK                       0x00000030
+#define MDP4_PIPE_SRC_FORMAT_R_BPC__SHIFT                      4
+static inline uint32_t MDP4_PIPE_SRC_FORMAT_R_BPC(enum mdp_bpc val)
+{
+       return ((val) << MDP4_PIPE_SRC_FORMAT_R_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_R_BPC__MASK;
+}
+#define MDP4_PIPE_SRC_FORMAT_A_BPC__MASK                       0x000000c0
+#define MDP4_PIPE_SRC_FORMAT_A_BPC__SHIFT                      6
+static inline uint32_t MDP4_PIPE_SRC_FORMAT_A_BPC(enum mdp_bpc_alpha val)
+{
+       return ((val) << MDP4_PIPE_SRC_FORMAT_A_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_A_BPC__MASK;
+}
+#define MDP4_PIPE_SRC_FORMAT_ALPHA_ENABLE                      0x00000100
+#define MDP4_PIPE_SRC_FORMAT_CPP__MASK                         0x00000600
+#define MDP4_PIPE_SRC_FORMAT_CPP__SHIFT                                9
+static inline uint32_t MDP4_PIPE_SRC_FORMAT_CPP(uint32_t val)
+{
+       return ((val) << MDP4_PIPE_SRC_FORMAT_CPP__SHIFT) & MDP4_PIPE_SRC_FORMAT_CPP__MASK;
+}
+#define MDP4_PIPE_SRC_FORMAT_ROTATED_90                                0x00001000
+#define MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT__MASK                        0x00006000
+#define MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT__SHIFT               13
+static inline uint32_t MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT(uint32_t val)
+{
+       return ((val) << MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT__SHIFT) & MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT__MASK;
+}
+#define MDP4_PIPE_SRC_FORMAT_UNPACK_TIGHT                      0x00020000
+#define MDP4_PIPE_SRC_FORMAT_UNPACK_ALIGN_MSB                  0x00040000
+#define MDP4_PIPE_SRC_FORMAT_SOLID_FILL                                0x00400000
+
+static inline uint32_t REG_MDP4_PIPE_SRC_UNPACK(enum mdp4_pipe i0) { return 0x00020054 + 0x10000*i0; }
+#define MDP4_PIPE_SRC_UNPACK_ELEM0__MASK                       0x000000ff
+#define MDP4_PIPE_SRC_UNPACK_ELEM0__SHIFT                      0
+static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM0(uint32_t val)
+{
+       return ((val) << MDP4_PIPE_SRC_UNPACK_ELEM0__SHIFT) & MDP4_PIPE_SRC_UNPACK_ELEM0__MASK;
+}
+#define MDP4_PIPE_SRC_UNPACK_ELEM1__MASK                       0x0000ff00
+#define MDP4_PIPE_SRC_UNPACK_ELEM1__SHIFT                      8
+static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM1(uint32_t val)
+{
+       return ((val) << MDP4_PIPE_SRC_UNPACK_ELEM1__SHIFT) & MDP4_PIPE_SRC_UNPACK_ELEM1__MASK;
+}
+#define MDP4_PIPE_SRC_UNPACK_ELEM2__MASK                       0x00ff0000
+#define MDP4_PIPE_SRC_UNPACK_ELEM2__SHIFT                      16
+static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM2(uint32_t val)
+{
+       return ((val) << MDP4_PIPE_SRC_UNPACK_ELEM2__SHIFT) & MDP4_PIPE_SRC_UNPACK_ELEM2__MASK;
+}
+#define MDP4_PIPE_SRC_UNPACK_ELEM3__MASK                       0xff000000
+#define MDP4_PIPE_SRC_UNPACK_ELEM3__SHIFT                      24
+static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM3(uint32_t val)
+{
+       return ((val) << MDP4_PIPE_SRC_UNPACK_ELEM3__SHIFT) & MDP4_PIPE_SRC_UNPACK_ELEM3__MASK;
+}
+
+static inline uint32_t REG_MDP4_PIPE_OP_MODE(enum mdp4_pipe i0) { return 0x00020058 + 0x10000*i0; }
+#define MDP4_PIPE_OP_MODE_SCALEX_EN                            0x00000001
+#define MDP4_PIPE_OP_MODE_SCALEY_EN                            0x00000002
+#define MDP4_PIPE_OP_MODE_SRC_YCBCR                            0x00000200
+#define MDP4_PIPE_OP_MODE_DST_YCBCR                            0x00000400
+#define MDP4_PIPE_OP_MODE_CSC_EN                               0x00000800
+#define MDP4_PIPE_OP_MODE_FLIP_LR                              0x00002000
+#define MDP4_PIPE_OP_MODE_FLIP_UD                              0x00004000
+#define MDP4_PIPE_OP_MODE_DITHER_EN                            0x00008000
+#define MDP4_PIPE_OP_MODE_IGC_LUT_EN                           0x00010000
+#define MDP4_PIPE_OP_MODE_DEINT_EN                             0x00040000
+#define MDP4_PIPE_OP_MODE_DEINT_ODD_REF                                0x00080000
+
+static inline uint32_t REG_MDP4_PIPE_PHASEX_STEP(enum mdp4_pipe i0) { return 0x0002005c + 0x10000*i0; }
+
+static inline uint32_t REG_MDP4_PIPE_PHASEY_STEP(enum mdp4_pipe i0) { return 0x00020060 + 0x10000*i0; }
+
+static inline uint32_t REG_MDP4_PIPE_FETCH_CONFIG(enum mdp4_pipe i0) { return 0x00021004 + 0x10000*i0; }
+
+static inline uint32_t REG_MDP4_PIPE_SOLID_COLOR(enum mdp4_pipe i0) { return 0x00021008 + 0x10000*i0; }
+
+static inline uint32_t REG_MDP4_PIPE_CSC(enum mdp4_pipe i0) { return 0x00024000 + 0x10000*i0; }
+
+
+static inline uint32_t REG_MDP4_PIPE_CSC_MV(enum mdp4_pipe i0, uint32_t i1) { return 0x00024400 + 0x10000*i0 + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_PIPE_CSC_MV_VAL(enum mdp4_pipe i0, uint32_t i1) { return 0x00024400 + 0x10000*i0 + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_PIPE_CSC_PRE_BV(enum mdp4_pipe i0, uint32_t i1) { return 0x00024500 + 0x10000*i0 + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_PIPE_CSC_PRE_BV_VAL(enum mdp4_pipe i0, uint32_t i1) { return 0x00024500 + 0x10000*i0 + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_PIPE_CSC_POST_BV(enum mdp4_pipe i0, uint32_t i1) { return 0x00024580 + 0x10000*i0 + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_PIPE_CSC_POST_BV_VAL(enum mdp4_pipe i0, uint32_t i1) { return 0x00024580 + 0x10000*i0 + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_PIPE_CSC_PRE_LV(enum mdp4_pipe i0, uint32_t i1) { return 0x00024600 + 0x10000*i0 + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_PIPE_CSC_PRE_LV_VAL(enum mdp4_pipe i0, uint32_t i1) { return 0x00024600 + 0x10000*i0 + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_PIPE_CSC_POST_LV(enum mdp4_pipe i0, uint32_t i1) { return 0x00024680 + 0x10000*i0 + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_PIPE_CSC_POST_LV_VAL(enum mdp4_pipe i0, uint32_t i1) { return 0x00024680 + 0x10000*i0 + 0x4*i1; }
+
+#define REG_MDP4_LCDC                                          0x000c0000
+
+#define REG_MDP4_LCDC_ENABLE                                   0x000c0000
+
+#define REG_MDP4_LCDC_HSYNC_CTRL                               0x000c0004
+#define MDP4_LCDC_HSYNC_CTRL_PULSEW__MASK                      0x0000ffff
+#define MDP4_LCDC_HSYNC_CTRL_PULSEW__SHIFT                     0
+static inline uint32_t MDP4_LCDC_HSYNC_CTRL_PULSEW(uint32_t val)
+{
+       return ((val) << MDP4_LCDC_HSYNC_CTRL_PULSEW__SHIFT) & MDP4_LCDC_HSYNC_CTRL_PULSEW__MASK;
+}
+#define MDP4_LCDC_HSYNC_CTRL_PERIOD__MASK                      0xffff0000
+#define MDP4_LCDC_HSYNC_CTRL_PERIOD__SHIFT                     16
+static inline uint32_t MDP4_LCDC_HSYNC_CTRL_PERIOD(uint32_t val)
+{
+       return ((val) << MDP4_LCDC_HSYNC_CTRL_PERIOD__SHIFT) & MDP4_LCDC_HSYNC_CTRL_PERIOD__MASK;
+}
+
+#define REG_MDP4_LCDC_VSYNC_PERIOD                             0x000c0008
+
+#define REG_MDP4_LCDC_VSYNC_LEN                                        0x000c000c
+
+#define REG_MDP4_LCDC_DISPLAY_HCTRL                            0x000c0010
+#define MDP4_LCDC_DISPLAY_HCTRL_START__MASK                    0x0000ffff
+#define MDP4_LCDC_DISPLAY_HCTRL_START__SHIFT                   0
+static inline uint32_t MDP4_LCDC_DISPLAY_HCTRL_START(uint32_t val)
+{
+       return ((val) << MDP4_LCDC_DISPLAY_HCTRL_START__SHIFT) & MDP4_LCDC_DISPLAY_HCTRL_START__MASK;
+}
+#define MDP4_LCDC_DISPLAY_HCTRL_END__MASK                      0xffff0000
+#define MDP4_LCDC_DISPLAY_HCTRL_END__SHIFT                     16
+static inline uint32_t MDP4_LCDC_DISPLAY_HCTRL_END(uint32_t val)
+{
+       return ((val) << MDP4_LCDC_DISPLAY_HCTRL_END__SHIFT) & MDP4_LCDC_DISPLAY_HCTRL_END__MASK;
+}
+
+#define REG_MDP4_LCDC_DISPLAY_VSTART                           0x000c0014
+
+#define REG_MDP4_LCDC_DISPLAY_VEND                             0x000c0018
+
+#define REG_MDP4_LCDC_ACTIVE_HCTL                              0x000c001c
+#define MDP4_LCDC_ACTIVE_HCTL_START__MASK                      0x00007fff
+#define MDP4_LCDC_ACTIVE_HCTL_START__SHIFT                     0
+static inline uint32_t MDP4_LCDC_ACTIVE_HCTL_START(uint32_t val)
+{
+       return ((val) << MDP4_LCDC_ACTIVE_HCTL_START__SHIFT) & MDP4_LCDC_ACTIVE_HCTL_START__MASK;
+}
+#define MDP4_LCDC_ACTIVE_HCTL_END__MASK                                0x7fff0000
+#define MDP4_LCDC_ACTIVE_HCTL_END__SHIFT                       16
+static inline uint32_t MDP4_LCDC_ACTIVE_HCTL_END(uint32_t val)
+{
+       return ((val) << MDP4_LCDC_ACTIVE_HCTL_END__SHIFT) & MDP4_LCDC_ACTIVE_HCTL_END__MASK;
+}
+#define MDP4_LCDC_ACTIVE_HCTL_ACTIVE_START_X                   0x80000000
+
+#define REG_MDP4_LCDC_ACTIVE_VSTART                            0x000c0020
+
+#define REG_MDP4_LCDC_ACTIVE_VEND                              0x000c0024
+
+#define REG_MDP4_LCDC_BORDER_CLR                               0x000c0028
+
+#define REG_MDP4_LCDC_UNDERFLOW_CLR                            0x000c002c
+#define MDP4_LCDC_UNDERFLOW_CLR_COLOR__MASK                    0x00ffffff
+#define MDP4_LCDC_UNDERFLOW_CLR_COLOR__SHIFT                   0
+static inline uint32_t MDP4_LCDC_UNDERFLOW_CLR_COLOR(uint32_t val)
+{
+       return ((val) << MDP4_LCDC_UNDERFLOW_CLR_COLOR__SHIFT) & MDP4_LCDC_UNDERFLOW_CLR_COLOR__MASK;
+}
+#define MDP4_LCDC_UNDERFLOW_CLR_ENABLE_RECOVERY                        0x80000000
+
+#define REG_MDP4_LCDC_HSYNC_SKEW                               0x000c0030
+
+#define REG_MDP4_LCDC_TEST_CNTL                                        0x000c0034
+
+#define REG_MDP4_LCDC_CTRL_POLARITY                            0x000c0038
+#define MDP4_LCDC_CTRL_POLARITY_HSYNC_LOW                      0x00000001
+#define MDP4_LCDC_CTRL_POLARITY_VSYNC_LOW                      0x00000002
+#define MDP4_LCDC_CTRL_POLARITY_DATA_EN_LOW                    0x00000004
+
+#define REG_MDP4_DTV                                           0x000d0000
+
+#define REG_MDP4_DTV_ENABLE                                    0x000d0000
+
+#define REG_MDP4_DTV_HSYNC_CTRL                                        0x000d0004
+#define MDP4_DTV_HSYNC_CTRL_PULSEW__MASK                       0x0000ffff
+#define MDP4_DTV_HSYNC_CTRL_PULSEW__SHIFT                      0
+static inline uint32_t MDP4_DTV_HSYNC_CTRL_PULSEW(uint32_t val)
+{
+       return ((val) << MDP4_DTV_HSYNC_CTRL_PULSEW__SHIFT) & MDP4_DTV_HSYNC_CTRL_PULSEW__MASK;
+}
+#define MDP4_DTV_HSYNC_CTRL_PERIOD__MASK                       0xffff0000
+#define MDP4_DTV_HSYNC_CTRL_PERIOD__SHIFT                      16
+static inline uint32_t MDP4_DTV_HSYNC_CTRL_PERIOD(uint32_t val)
+{
+       return ((val) << MDP4_DTV_HSYNC_CTRL_PERIOD__SHIFT) & MDP4_DTV_HSYNC_CTRL_PERIOD__MASK;
+}
+
+#define REG_MDP4_DTV_VSYNC_PERIOD                              0x000d0008
+
+#define REG_MDP4_DTV_VSYNC_LEN                                 0x000d000c
+
+#define REG_MDP4_DTV_DISPLAY_HCTRL                             0x000d0018
+#define MDP4_DTV_DISPLAY_HCTRL_START__MASK                     0x0000ffff
+#define MDP4_DTV_DISPLAY_HCTRL_START__SHIFT                    0
+static inline uint32_t MDP4_DTV_DISPLAY_HCTRL_START(uint32_t val)
+{
+       return ((val) << MDP4_DTV_DISPLAY_HCTRL_START__SHIFT) & MDP4_DTV_DISPLAY_HCTRL_START__MASK;
+}
+#define MDP4_DTV_DISPLAY_HCTRL_END__MASK                       0xffff0000
+#define MDP4_DTV_DISPLAY_HCTRL_END__SHIFT                      16
+static inline uint32_t MDP4_DTV_DISPLAY_HCTRL_END(uint32_t val)
+{
+       return ((val) << MDP4_DTV_DISPLAY_HCTRL_END__SHIFT) & MDP4_DTV_DISPLAY_HCTRL_END__MASK;
+}
+
+#define REG_MDP4_DTV_DISPLAY_VSTART                            0x000d001c
+
+#define REG_MDP4_DTV_DISPLAY_VEND                              0x000d0020
+
+#define REG_MDP4_DTV_ACTIVE_HCTL                               0x000d002c
+#define MDP4_DTV_ACTIVE_HCTL_START__MASK                       0x00007fff
+#define MDP4_DTV_ACTIVE_HCTL_START__SHIFT                      0
+static inline uint32_t MDP4_DTV_ACTIVE_HCTL_START(uint32_t val)
+{
+       return ((val) << MDP4_DTV_ACTIVE_HCTL_START__SHIFT) & MDP4_DTV_ACTIVE_HCTL_START__MASK;
+}
+#define MDP4_DTV_ACTIVE_HCTL_END__MASK                         0x7fff0000
+#define MDP4_DTV_ACTIVE_HCTL_END__SHIFT                                16
+static inline uint32_t MDP4_DTV_ACTIVE_HCTL_END(uint32_t val)
+{
+       return ((val) << MDP4_DTV_ACTIVE_HCTL_END__SHIFT) & MDP4_DTV_ACTIVE_HCTL_END__MASK;
+}
+#define MDP4_DTV_ACTIVE_HCTL_ACTIVE_START_X                    0x80000000
+
+#define REG_MDP4_DTV_ACTIVE_VSTART                             0x000d0030
+
+#define REG_MDP4_DTV_ACTIVE_VEND                               0x000d0038
+
+#define REG_MDP4_DTV_BORDER_CLR                                        0x000d0040
+
+#define REG_MDP4_DTV_UNDERFLOW_CLR                             0x000d0044
+#define MDP4_DTV_UNDERFLOW_CLR_COLOR__MASK                     0x00ffffff
+#define MDP4_DTV_UNDERFLOW_CLR_COLOR__SHIFT                    0
+static inline uint32_t MDP4_DTV_UNDERFLOW_CLR_COLOR(uint32_t val)
+{
+       return ((val) << MDP4_DTV_UNDERFLOW_CLR_COLOR__SHIFT) & MDP4_DTV_UNDERFLOW_CLR_COLOR__MASK;
+}
+#define MDP4_DTV_UNDERFLOW_CLR_ENABLE_RECOVERY                 0x80000000
+
+#define REG_MDP4_DTV_HSYNC_SKEW                                        0x000d0048
+
+#define REG_MDP4_DTV_TEST_CNTL                                 0x000d004c
+
+#define REG_MDP4_DTV_CTRL_POLARITY                             0x000d0050
+#define MDP4_DTV_CTRL_POLARITY_HSYNC_LOW                       0x00000001
+#define MDP4_DTV_CTRL_POLARITY_VSYNC_LOW                       0x00000002
+#define MDP4_DTV_CTRL_POLARITY_DATA_EN_LOW                     0x00000004
+
+#define REG_MDP4_DSI                                           0x000e0000
+
+#define REG_MDP4_DSI_ENABLE                                    0x000e0000
+
+#define REG_MDP4_DSI_HSYNC_CTRL                                        0x000e0004
+#define MDP4_DSI_HSYNC_CTRL_PULSEW__MASK                       0x0000ffff
+#define MDP4_DSI_HSYNC_CTRL_PULSEW__SHIFT                      0
+static inline uint32_t MDP4_DSI_HSYNC_CTRL_PULSEW(uint32_t val)
+{
+       return ((val) << MDP4_DSI_HSYNC_CTRL_PULSEW__SHIFT) & MDP4_DSI_HSYNC_CTRL_PULSEW__MASK;
+}
+#define MDP4_DSI_HSYNC_CTRL_PERIOD__MASK                       0xffff0000
+#define MDP4_DSI_HSYNC_CTRL_PERIOD__SHIFT                      16
+static inline uint32_t MDP4_DSI_HSYNC_CTRL_PERIOD(uint32_t val)
+{
+       return ((val) << MDP4_DSI_HSYNC_CTRL_PERIOD__SHIFT) & MDP4_DSI_HSYNC_CTRL_PERIOD__MASK;
+}
+
+#define REG_MDP4_DSI_VSYNC_PERIOD                              0x000e0008
+
+#define REG_MDP4_DSI_VSYNC_LEN                                 0x000e000c
+
+#define REG_MDP4_DSI_DISPLAY_HCTRL                             0x000e0010
+#define MDP4_DSI_DISPLAY_HCTRL_START__MASK                     0x0000ffff
+#define MDP4_DSI_DISPLAY_HCTRL_START__SHIFT                    0
+static inline uint32_t MDP4_DSI_DISPLAY_HCTRL_START(uint32_t val)
+{
+       return ((val) << MDP4_DSI_DISPLAY_HCTRL_START__SHIFT) & MDP4_DSI_DISPLAY_HCTRL_START__MASK;
+}
+#define MDP4_DSI_DISPLAY_HCTRL_END__MASK                       0xffff0000
+#define MDP4_DSI_DISPLAY_HCTRL_END__SHIFT                      16
+static inline uint32_t MDP4_DSI_DISPLAY_HCTRL_END(uint32_t val)
+{
+       return ((val) << MDP4_DSI_DISPLAY_HCTRL_END__SHIFT) & MDP4_DSI_DISPLAY_HCTRL_END__MASK;
+}
+
+#define REG_MDP4_DSI_DISPLAY_VSTART                            0x000e0014
+
+#define REG_MDP4_DSI_DISPLAY_VEND                              0x000e0018
+
+#define REG_MDP4_DSI_ACTIVE_HCTL                               0x000e001c
+#define MDP4_DSI_ACTIVE_HCTL_START__MASK                       0x00007fff
+#define MDP4_DSI_ACTIVE_HCTL_START__SHIFT                      0
+static inline uint32_t MDP4_DSI_ACTIVE_HCTL_START(uint32_t val)
+{
+       return ((val) << MDP4_DSI_ACTIVE_HCTL_START__SHIFT) & MDP4_DSI_ACTIVE_HCTL_START__MASK;
+}
+#define MDP4_DSI_ACTIVE_HCTL_END__MASK                         0x7fff0000
+#define MDP4_DSI_ACTIVE_HCTL_END__SHIFT                                16
+static inline uint32_t MDP4_DSI_ACTIVE_HCTL_END(uint32_t val)
+{
+       return ((val) << MDP4_DSI_ACTIVE_HCTL_END__SHIFT) & MDP4_DSI_ACTIVE_HCTL_END__MASK;
+}
+#define MDP4_DSI_ACTIVE_HCTL_ACTIVE_START_X                    0x80000000
+
+#define REG_MDP4_DSI_ACTIVE_VSTART                             0x000e0020
+
+#define REG_MDP4_DSI_ACTIVE_VEND                               0x000e0024
+
+#define REG_MDP4_DSI_BORDER_CLR                                        0x000e0028
+
+#define REG_MDP4_DSI_UNDERFLOW_CLR                             0x000e002c
+#define MDP4_DSI_UNDERFLOW_CLR_COLOR__MASK                     0x00ffffff
+#define MDP4_DSI_UNDERFLOW_CLR_COLOR__SHIFT                    0
+static inline uint32_t MDP4_DSI_UNDERFLOW_CLR_COLOR(uint32_t val)
+{
+       return ((val) << MDP4_DSI_UNDERFLOW_CLR_COLOR__SHIFT) & MDP4_DSI_UNDERFLOW_CLR_COLOR__MASK;
+}
+#define MDP4_DSI_UNDERFLOW_CLR_ENABLE_RECOVERY                 0x80000000
+
+#define REG_MDP4_DSI_HSYNC_SKEW                                        0x000e0030
+
+#define REG_MDP4_DSI_TEST_CNTL                                 0x000e0034
+
+#define REG_MDP4_DSI_CTRL_POLARITY                             0x000e0038
+#define MDP4_DSI_CTRL_POLARITY_HSYNC_LOW                       0x00000001
+#define MDP4_DSI_CTRL_POLARITY_VSYNC_LOW                       0x00000002
+#define MDP4_DSI_CTRL_POLARITY_DATA_EN_LOW                     0x00000004
+
+
+#endif /* MDP4_XML */
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
new file mode 100644 (file)
index 0000000..1964f4f
--- /dev/null
@@ -0,0 +1,753 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "mdp4_kms.h"
+
+#include <drm/drm_mode.h>
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+#include "drm_flip_work.h"
+
+struct mdp4_crtc {
+       struct drm_crtc base;
+       char name[8];
+       struct drm_plane *plane;
+       struct drm_plane *planes[8];
+       int id;
+       int ovlp;
+       enum mdp4_dma dma;
+       bool enabled;
+
+       /* which mixer/encoder we route output to: */
+       int mixer;
+
+       struct {
+               spinlock_t lock;
+               bool stale;
+               uint32_t width, height;
+
+               /* next cursor to scan-out: */
+               uint32_t next_iova;
+               struct drm_gem_object *next_bo;
+
+               /* current cursor being scanned out: */
+               struct drm_gem_object *scanout_bo;
+       } cursor;
+
+
+       /* if there is a pending flip, these will be non-null: */
+       struct drm_pending_vblank_event *event;
+       struct msm_fence_cb pageflip_cb;
+
+#define PENDING_CURSOR 0x1
+#define PENDING_FLIP   0x2
+       atomic_t pending;
+
+       /* the fb that we currently hold a scanout ref to: */
+       struct drm_framebuffer *fb;
+
+       /* for unref'ing framebuffers after scanout completes: */
+       struct drm_flip_work unref_fb_work;
+
+       /* for unref'ing cursor bo's after scanout completes: */
+       struct drm_flip_work unref_cursor_work;
+
+       struct mdp_irq vblank;
+       struct mdp_irq err;
+};
+#define to_mdp4_crtc(x) container_of(x, struct mdp4_crtc, base)
+
+static struct mdp4_kms *get_kms(struct drm_crtc *crtc)
+{
+       struct msm_drm_private *priv = crtc->dev->dev_private;
+       return to_mdp4_kms(to_mdp_kms(priv->kms));
+}
+
+static void update_fb(struct drm_crtc *crtc, bool async,
+               struct drm_framebuffer *new_fb)
+{
+       struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+       struct drm_framebuffer *old_fb = mdp4_crtc->fb;
+
+       if (old_fb)
+               drm_flip_work_queue(&mdp4_crtc->unref_fb_work, old_fb);
+
+       /* grab reference to incoming scanout fb: */
+       drm_framebuffer_reference(new_fb);
+       mdp4_crtc->base.fb = new_fb;
+       mdp4_crtc->fb = new_fb;
+
+       if (!async) {
+               /* enable vblank to pick up the old_fb */
+               mdp_irq_register(&get_kms(crtc)->base, &mdp4_crtc->vblank);
+       }
+}
+
+/* if file!=NULL, this is preclose potential cancel-flip path */
+static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
+{
+       struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+       struct drm_device *dev = crtc->dev;
+       struct drm_pending_vblank_event *event;
+       unsigned long flags;
+
+       spin_lock_irqsave(&dev->event_lock, flags);
+       event = mdp4_crtc->event;
+       if (event) {
+               /* if regular vblank case (!file) or if cancel-flip from
+                * preclose on file that requested flip, then send the
+                * event:
+                */
+               if (!file || (event->base.file_priv == file)) {
+                       mdp4_crtc->event = NULL;
+                       drm_send_vblank_event(dev, mdp4_crtc->id, event);
+               }
+       }
+       spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
+static void crtc_flush(struct drm_crtc *crtc)
+{
+       struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+       struct mdp4_kms *mdp4_kms = get_kms(crtc);
+       uint32_t i, flush = 0;
+
+       for (i = 0; i < ARRAY_SIZE(mdp4_crtc->planes); i++) {
+               struct drm_plane *plane = mdp4_crtc->planes[i];
+               if (plane) {
+                       enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
+                       flush |= pipe2flush(pipe_id);
+               }
+       }
+       flush |= ovlp2flush(mdp4_crtc->ovlp);
+
+       DBG("%s: flush=%08x", mdp4_crtc->name, flush);
+
+       mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush);
+}
+
+static void request_pending(struct drm_crtc *crtc, uint32_t pending)
+{
+       struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+
+       atomic_or(pending, &mdp4_crtc->pending);
+       mdp_irq_register(&get_kms(crtc)->base, &mdp4_crtc->vblank);
+}
+
+static void pageflip_cb(struct msm_fence_cb *cb)
+{
+       struct mdp4_crtc *mdp4_crtc =
+               container_of(cb, struct mdp4_crtc, pageflip_cb);
+       struct drm_crtc *crtc = &mdp4_crtc->base;
+       struct drm_framebuffer *fb = crtc->fb;
+
+       if (!fb)
+               return;
+
+       mdp4_plane_set_scanout(mdp4_crtc->plane, fb);
+       crtc_flush(crtc);
+
+       /* enable vblank to complete flip: */
+       request_pending(crtc, PENDING_FLIP);
+}
+
+static void unref_fb_worker(struct drm_flip_work *work, void *val)
+{
+       struct mdp4_crtc *mdp4_crtc =
+               container_of(work, struct mdp4_crtc, unref_fb_work);
+       struct drm_device *dev = mdp4_crtc->base.dev;
+
+       mutex_lock(&dev->mode_config.mutex);
+       drm_framebuffer_unreference(val);
+       mutex_unlock(&dev->mode_config.mutex);
+}
+
+static void unref_cursor_worker(struct drm_flip_work *work, void *val)
+{
+       struct mdp4_crtc *mdp4_crtc =
+               container_of(work, struct mdp4_crtc, unref_cursor_work);
+       struct mdp4_kms *mdp4_kms = get_kms(&mdp4_crtc->base);
+
+       msm_gem_put_iova(val, mdp4_kms->id);
+       drm_gem_object_unreference_unlocked(val);
+}
+
+static void mdp4_crtc_destroy(struct drm_crtc *crtc)
+{
+       struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+
+       mdp4_crtc->plane->funcs->destroy(mdp4_crtc->plane);
+
+       drm_crtc_cleanup(crtc);
+       drm_flip_work_cleanup(&mdp4_crtc->unref_fb_work);
+       drm_flip_work_cleanup(&mdp4_crtc->unref_cursor_work);
+
+       kfree(mdp4_crtc);
+}
+
+static void mdp4_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+       struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+       struct mdp4_kms *mdp4_kms = get_kms(crtc);
+       bool enabled = (mode == DRM_MODE_DPMS_ON);
+
+       DBG("%s: mode=%d", mdp4_crtc->name, mode);
+
+       if (enabled != mdp4_crtc->enabled) {
+               if (enabled) {
+                       mdp4_enable(mdp4_kms);
+                       mdp_irq_register(&mdp4_kms->base, &mdp4_crtc->err);
+               } else {
+                       mdp_irq_unregister(&mdp4_kms->base, &mdp4_crtc->err);
+                       mdp4_disable(mdp4_kms);
+               }
+               mdp4_crtc->enabled = enabled;
+       }
+}
+
+static bool mdp4_crtc_mode_fixup(struct drm_crtc *crtc,
+               const struct drm_display_mode *mode,
+               struct drm_display_mode *adjusted_mode)
+{
+       return true;
+}
+
+static void blend_setup(struct drm_crtc *crtc)
+{
+       struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+       struct mdp4_kms *mdp4_kms = get_kms(crtc);
+       int i, ovlp = mdp4_crtc->ovlp;
+       uint32_t mixer_cfg = 0;
+       static const enum mdp_mixer_stage_id stages[] = {
+                       STAGE_BASE, STAGE0, STAGE1, STAGE2, STAGE3,
+       };
+       /* statically (for now) map planes to mixer stage (z-order): */
+       static const int idxs[] = {
+                       [VG1]  = 1,
+                       [VG2]  = 2,
+                       [RGB1] = 0,
+                       [RGB2] = 0,
+                       [RGB3] = 0,
+                       [VG3]  = 3,
+                       [VG4]  = 4,
+
+       };
+       bool alpha[4]= { false, false, false, false };
+
+       mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW0(ovlp), 0);
+       mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW1(ovlp), 0);
+       mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH0(ovlp), 0);
+       mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH1(ovlp), 0);
+
+       /* TODO single register for all CRTCs, so this won't work properly
+        * when multiple CRTCs are active..
+        */
+       for (i = 0; i < ARRAY_SIZE(mdp4_crtc->planes); i++) {
+               struct drm_plane *plane = mdp4_crtc->planes[i];
+               if (plane) {
+                       enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
+                       int idx = idxs[pipe_id];
+                       if (idx > 0) {
+                               const struct mdp_format *format =
+                                       to_mdp_format(msm_framebuffer_format(plane->fb));
+                               alpha[idx-1] = format->alpha_enable;
+                       }
+                       mixer_cfg |= mixercfg(mdp4_crtc->mixer, pipe_id, stages[idx]);
+               }
+       }
+
+       /* this shouldn't happen.. and seems to cause underflow: */
+       WARN_ON(!mixer_cfg);
+
+       for (i = 0; i < 4; i++) {
+               uint32_t op;
+
+               if (alpha[i]) {
+                       op = MDP4_OVLP_STAGE_OP_FG_ALPHA(FG_PIXEL) |
+                                       MDP4_OVLP_STAGE_OP_BG_ALPHA(FG_PIXEL) |
+                                       MDP4_OVLP_STAGE_OP_BG_INV_ALPHA;
+               } else {
+                       op = MDP4_OVLP_STAGE_OP_FG_ALPHA(FG_CONST) |
+                                       MDP4_OVLP_STAGE_OP_BG_ALPHA(BG_CONST);
+               }
+
+               mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_FG_ALPHA(ovlp, i), 0xff);
+               mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_BG_ALPHA(ovlp, i), 0x00);
+               mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_OP(ovlp, i), op);
+               mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_CO3(ovlp, i), 1);
+               mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_LOW0(ovlp, i), 0);
+               mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_LOW1(ovlp, i), 0);
+               mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH0(ovlp, i), 0);
+               mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH1(ovlp, i), 0);
+       }
+
+       mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG, mixer_cfg);
+}
+
+static int mdp4_crtc_mode_set(struct drm_crtc *crtc,
+               struct drm_display_mode *mode,
+               struct drm_display_mode *adjusted_mode,
+               int x, int y,
+               struct drm_framebuffer *old_fb)
+{
+       struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+       struct mdp4_kms *mdp4_kms = get_kms(crtc);
+       enum mdp4_dma dma = mdp4_crtc->dma;
+       int ret, ovlp = mdp4_crtc->ovlp;
+
+       mode = adjusted_mode;
+
+       DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
+                       mdp4_crtc->name, mode->base.id, mode->name,
+                       mode->vrefresh, mode->clock,
+                       mode->hdisplay, mode->hsync_start,
+                       mode->hsync_end, mode->htotal,
+                       mode->vdisplay, mode->vsync_start,
+                       mode->vsync_end, mode->vtotal,
+                       mode->type, mode->flags);
+
+       mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_SIZE(dma),
+                       MDP4_DMA_SRC_SIZE_WIDTH(mode->hdisplay) |
+                       MDP4_DMA_SRC_SIZE_HEIGHT(mode->vdisplay));
+
+       /* take data from pipe: */
+       mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_BASE(dma), 0);
+       mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_STRIDE(dma),
+                       crtc->fb->pitches[0]);
+       mdp4_write(mdp4_kms, REG_MDP4_DMA_DST_SIZE(dma),
+                       MDP4_DMA_DST_SIZE_WIDTH(0) |
+                       MDP4_DMA_DST_SIZE_HEIGHT(0));
+
+       mdp4_write(mdp4_kms, REG_MDP4_OVLP_BASE(ovlp), 0);
+       mdp4_write(mdp4_kms, REG_MDP4_OVLP_SIZE(ovlp),
+                       MDP4_OVLP_SIZE_WIDTH(mode->hdisplay) |
+                       MDP4_OVLP_SIZE_HEIGHT(mode->vdisplay));
+       mdp4_write(mdp4_kms, REG_MDP4_OVLP_STRIDE(ovlp),
+                       crtc->fb->pitches[0]);
+
+       mdp4_write(mdp4_kms, REG_MDP4_OVLP_CFG(ovlp), 1);
+
+       update_fb(crtc, false, crtc->fb);
+
+       ret = mdp4_plane_mode_set(mdp4_crtc->plane, crtc, crtc->fb,
+                       0, 0, mode->hdisplay, mode->vdisplay,
+                       x << 16, y << 16,
+                       mode->hdisplay << 16, mode->vdisplay << 16);
+       if (ret) {
+               dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n",
+                               mdp4_crtc->name, ret);
+               return ret;
+       }
+
+       if (dma == DMA_E) {
+               mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(0), 0x00ff0000);
+               mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(1), 0x00ff0000);
+               mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(2), 0x00ff0000);
+       }
+
+       return 0;
+}
+
+static void mdp4_crtc_prepare(struct drm_crtc *crtc)
+{
+       struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+       DBG("%s", mdp4_crtc->name);
+       /* make sure we hold a ref to mdp clks while setting up mode: */
+       mdp4_enable(get_kms(crtc));
+       mdp4_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+}
+
+static void mdp4_crtc_commit(struct drm_crtc *crtc)
+{
+       mdp4_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+       crtc_flush(crtc);
+       /* drop the ref to mdp clk's that we got in prepare: */
+       mdp4_disable(get_kms(crtc));
+}
+
+static int mdp4_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+               struct drm_framebuffer *old_fb)
+{
+       struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+       struct drm_plane *plane = mdp4_crtc->plane;
+       struct drm_display_mode *mode = &crtc->mode;
+
+       update_fb(crtc, false, crtc->fb);
+
+       return mdp4_plane_mode_set(plane, crtc, crtc->fb,
+                       0, 0, mode->hdisplay, mode->vdisplay,
+                       x << 16, y << 16,
+                       mode->hdisplay << 16, mode->vdisplay << 16);
+}
+
+static void mdp4_crtc_load_lut(struct drm_crtc *crtc)
+{
+}
+
+static int mdp4_crtc_page_flip(struct drm_crtc *crtc,
+               struct drm_framebuffer *new_fb,
+               struct drm_pending_vblank_event *event,
+               uint32_t page_flip_flags)
+{
+       struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+       struct drm_device *dev = crtc->dev;
+       struct drm_gem_object *obj;
+       unsigned long flags;
+
+       if (mdp4_crtc->event) {
+               dev_err(dev->dev, "already pending flip!\n");
+               return -EBUSY;
+       }
+
+       obj = msm_framebuffer_bo(new_fb, 0);
+
+       spin_lock_irqsave(&dev->event_lock, flags);
+       mdp4_crtc->event = event;
+       spin_unlock_irqrestore(&dev->event_lock, flags);
+
+       update_fb(crtc, true, new_fb);
+
+       return msm_gem_queue_inactive_cb(obj, &mdp4_crtc->pageflip_cb);
+}
+
+static int mdp4_crtc_set_property(struct drm_crtc *crtc,
+               struct drm_property *property, uint64_t val)
+{
+       // XXX
+       return -EINVAL;
+}
+
+#define CURSOR_WIDTH 64
+#define CURSOR_HEIGHT 64
+
+/* called from IRQ to update cursor related registers (if needed).  The
+ * cursor registers, other than x/y position, appear not to be double
+ * buffered, and changing them other than from vblank seems to trigger
+ * underflow.
+ */
+static void update_cursor(struct drm_crtc *crtc)
+{
+       struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+       enum mdp4_dma dma = mdp4_crtc->dma;
+       unsigned long flags;
+
+       spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
+       if (mdp4_crtc->cursor.stale) {
+               struct mdp4_kms *mdp4_kms = get_kms(crtc);
+               struct drm_gem_object *next_bo = mdp4_crtc->cursor.next_bo;
+               struct drm_gem_object *prev_bo = mdp4_crtc->cursor.scanout_bo;
+               uint32_t iova = mdp4_crtc->cursor.next_iova;
+
+               if (next_bo) {
+                       /* take a obj ref + iova ref when we start scanning out: */
+                       drm_gem_object_reference(next_bo);
+                       msm_gem_get_iova_locked(next_bo, mdp4_kms->id, &iova);
+
+                       /* enable cursor: */
+                       mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_SIZE(dma),
+                                       MDP4_DMA_CURSOR_SIZE_WIDTH(mdp4_crtc->cursor.width) |
+                                       MDP4_DMA_CURSOR_SIZE_HEIGHT(mdp4_crtc->cursor.height));
+                       mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma), iova);
+                       mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BLEND_CONFIG(dma),
+                                       MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT(CURSOR_ARGB) |
+                                       MDP4_DMA_CURSOR_BLEND_CONFIG_CURSOR_EN);
+               } else {
+                       /* disable cursor: */
+                       mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma), 0);
+                       mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BLEND_CONFIG(dma),
+                                       MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT(CURSOR_ARGB));
+               }
+
+               /* and drop the iova ref + obj rev when done scanning out: */
+               if (prev_bo)
+                       drm_flip_work_queue(&mdp4_crtc->unref_cursor_work, prev_bo);
+
+               mdp4_crtc->cursor.scanout_bo = next_bo;
+               mdp4_crtc->cursor.stale = false;
+       }
+       spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
+}
+
+static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
+               struct drm_file *file_priv, uint32_t handle,
+               uint32_t width, uint32_t height)
+{
+       struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+       struct mdp4_kms *mdp4_kms = get_kms(crtc);
+       struct drm_device *dev = crtc->dev;
+       struct drm_gem_object *cursor_bo, *old_bo;
+       unsigned long flags;
+       uint32_t iova;
+       int ret;
+
+       if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
+               dev_err(dev->dev, "bad cursor size: %dx%d\n", width, height);
+               return -EINVAL;
+       }
+
+       if (handle) {
+               cursor_bo = drm_gem_object_lookup(dev, file_priv, handle);
+               if (!cursor_bo)
+                       return -ENOENT;
+       } else {
+               cursor_bo = NULL;
+       }
+
+       if (cursor_bo) {
+               ret = msm_gem_get_iova(cursor_bo, mdp4_kms->id, &iova);
+               if (ret)
+                       goto fail;
+       } else {
+               iova = 0;
+       }
+
+       spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
+       old_bo = mdp4_crtc->cursor.next_bo;
+       mdp4_crtc->cursor.next_bo   = cursor_bo;
+       mdp4_crtc->cursor.next_iova = iova;
+       mdp4_crtc->cursor.width     = width;
+       mdp4_crtc->cursor.height    = height;
+       mdp4_crtc->cursor.stale     = true;
+       spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
+
+       if (old_bo) {
+               /* drop our previous reference: */
+               msm_gem_put_iova(old_bo, mdp4_kms->id);
+               drm_gem_object_unreference_unlocked(old_bo);
+       }
+
+       request_pending(crtc, PENDING_CURSOR);
+
+       return 0;
+
+fail:
+       drm_gem_object_unreference_unlocked(cursor_bo);
+       return ret;
+}
+
+static int mdp4_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
+{
+       struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+       struct mdp4_kms *mdp4_kms = get_kms(crtc);
+       enum mdp4_dma dma = mdp4_crtc->dma;
+
+       mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_POS(dma),
+                       MDP4_DMA_CURSOR_POS_X(x) |
+                       MDP4_DMA_CURSOR_POS_Y(y));
+
+       return 0;
+}
+
+static const struct drm_crtc_funcs mdp4_crtc_funcs = {
+       .set_config = drm_crtc_helper_set_config,
+       .destroy = mdp4_crtc_destroy,
+       .page_flip = mdp4_crtc_page_flip,
+       .set_property = mdp4_crtc_set_property,
+       .cursor_set = mdp4_crtc_cursor_set,
+       .cursor_move = mdp4_crtc_cursor_move,
+};
+
+static const struct drm_crtc_helper_funcs mdp4_crtc_helper_funcs = {
+       .dpms = mdp4_crtc_dpms,
+       .mode_fixup = mdp4_crtc_mode_fixup,
+       .mode_set = mdp4_crtc_mode_set,
+       .prepare = mdp4_crtc_prepare,
+       .commit = mdp4_crtc_commit,
+       .mode_set_base = mdp4_crtc_mode_set_base,
+       .load_lut = mdp4_crtc_load_lut,
+};
+
+static void mdp4_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
+{
+       struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, vblank);
+       struct drm_crtc *crtc = &mdp4_crtc->base;
+       struct msm_drm_private *priv = crtc->dev->dev_private;
+       unsigned pending;
+
+       mdp_irq_unregister(&get_kms(crtc)->base, &mdp4_crtc->vblank);
+
+       pending = atomic_xchg(&mdp4_crtc->pending, 0);
+
+       if (pending & PENDING_FLIP) {
+               complete_flip(crtc, NULL);
+               drm_flip_work_commit(&mdp4_crtc->unref_fb_work, priv->wq);
+       }
+
+       if (pending & PENDING_CURSOR) {
+               update_cursor(crtc);
+               drm_flip_work_commit(&mdp4_crtc->unref_cursor_work, priv->wq);
+       }
+}
+
+static void mdp4_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
+{
+       struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, err);
+       struct drm_crtc *crtc = &mdp4_crtc->base;
+       DBG("%s: error: %08x", mdp4_crtc->name, irqstatus);
+       crtc_flush(crtc);
+}
+
+uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc)
+{
+       struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+       return mdp4_crtc->vblank.irqmask;
+}
+
+void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file)
+{
+       DBG("cancel: %p", file);
+       complete_flip(crtc, file);
+}
+
+/* set dma config, ie. the format the encoder wants. */
+void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config)
+{
+       struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+       struct mdp4_kms *mdp4_kms = get_kms(crtc);
+
+       mdp4_write(mdp4_kms, REG_MDP4_DMA_CONFIG(mdp4_crtc->dma), config);
+}
+
+/* set interface for routing crtc->encoder: */
+void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf)
+{
+       struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+       struct mdp4_kms *mdp4_kms = get_kms(crtc);
+       uint32_t intf_sel;
+
+       intf_sel = mdp4_read(mdp4_kms, REG_MDP4_DISP_INTF_SEL);
+
+       switch (mdp4_crtc->dma) {
+       case DMA_P:
+               intf_sel &= ~MDP4_DISP_INTF_SEL_PRIM__MASK;
+               intf_sel |= MDP4_DISP_INTF_SEL_PRIM(intf);
+               break;
+       case DMA_S:
+               intf_sel &= ~MDP4_DISP_INTF_SEL_SEC__MASK;
+               intf_sel |= MDP4_DISP_INTF_SEL_SEC(intf);
+               break;
+       case DMA_E:
+               intf_sel &= ~MDP4_DISP_INTF_SEL_EXT__MASK;
+               intf_sel |= MDP4_DISP_INTF_SEL_EXT(intf);
+               break;
+       }
+
+       if (intf == INTF_DSI_VIDEO) {
+               intf_sel &= ~MDP4_DISP_INTF_SEL_DSI_CMD;
+               intf_sel |= MDP4_DISP_INTF_SEL_DSI_VIDEO;
+               mdp4_crtc->mixer = 0;
+       } else if (intf == INTF_DSI_CMD) {
+               intf_sel &= ~MDP4_DISP_INTF_SEL_DSI_VIDEO;
+               intf_sel |= MDP4_DISP_INTF_SEL_DSI_CMD;
+               mdp4_crtc->mixer = 0;
+       } else if (intf == INTF_LCDC_DTV){
+               mdp4_crtc->mixer = 1;
+       }
+
+       blend_setup(crtc);
+
+       DBG("%s: intf_sel=%08x", mdp4_crtc->name, intf_sel);
+
+       mdp4_write(mdp4_kms, REG_MDP4_DISP_INTF_SEL, intf_sel);
+}
+
+static void set_attach(struct drm_crtc *crtc, enum mdp4_pipe pipe_id,
+               struct drm_plane *plane)
+{
+       struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+
+       BUG_ON(pipe_id >= ARRAY_SIZE(mdp4_crtc->planes));
+
+       if (mdp4_crtc->planes[pipe_id] == plane)
+               return;
+
+       mdp4_crtc->planes[pipe_id] = plane;
+       blend_setup(crtc);
+       if (mdp4_crtc->enabled && (plane != mdp4_crtc->plane))
+               crtc_flush(crtc);
+}
+
+void mdp4_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane)
+{
+       set_attach(crtc, mdp4_plane_pipe(plane), plane);
+}
+
+void mdp4_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane)
+{
+       set_attach(crtc, mdp4_plane_pipe(plane), NULL);
+}
+
+static const char *dma_names[] = {
+               "DMA_P", "DMA_S", "DMA_E",
+};
+
+/* initialize crtc */
+struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
+               struct drm_plane *plane, int id, int ovlp_id,
+               enum mdp4_dma dma_id)
+{
+       struct drm_crtc *crtc = NULL;
+       struct mdp4_crtc *mdp4_crtc;
+       int ret;
+
+       mdp4_crtc = kzalloc(sizeof(*mdp4_crtc), GFP_KERNEL);
+       if (!mdp4_crtc) {
+               ret = -ENOMEM;
+               goto fail;
+       }
+
+       crtc = &mdp4_crtc->base;
+
+       mdp4_crtc->plane = plane;
+
+       mdp4_crtc->ovlp = ovlp_id;
+       mdp4_crtc->dma = dma_id;
+
+       mdp4_crtc->vblank.irqmask = dma2irq(mdp4_crtc->dma);
+       mdp4_crtc->vblank.irq = mdp4_crtc_vblank_irq;
+
+       mdp4_crtc->err.irqmask = dma2err(mdp4_crtc->dma);
+       mdp4_crtc->err.irq = mdp4_crtc_err_irq;
+
+       snprintf(mdp4_crtc->name, sizeof(mdp4_crtc->name), "%s:%d",
+                       dma_names[dma_id], ovlp_id);
+
+       spin_lock_init(&mdp4_crtc->cursor.lock);
+
+       ret = drm_flip_work_init(&mdp4_crtc->unref_fb_work, 16,
+                       "unref fb", unref_fb_worker);
+       if (ret)
+               goto fail;
+
+       ret = drm_flip_work_init(&mdp4_crtc->unref_cursor_work, 64,
+                       "unref cursor", unref_cursor_worker);
+
+       INIT_FENCE_CB(&mdp4_crtc->pageflip_cb, pageflip_cb);
+
+       drm_crtc_init(dev, crtc, &mdp4_crtc_funcs);
+       drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs);
+
+       mdp4_plane_install_properties(mdp4_crtc->plane, &crtc->base);
+
+       return crtc;
+
+fail:
+       if (crtc)
+               mdp4_crtc_destroy(crtc);
+
+       return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c
new file mode 100644 (file)
index 0000000..067ed03
--- /dev/null
@@ -0,0 +1,303 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "mdp4_kms.h"
+
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+
+
+struct mdp4_dtv_encoder {
+       struct drm_encoder base;
+       struct clk *src_clk;
+       struct clk *hdmi_clk;
+       struct clk *mdp_clk;
+       unsigned long int pixclock;
+       bool enabled;
+       uint32_t bsc;
+};
+#define to_mdp4_dtv_encoder(x) container_of(x, struct mdp4_dtv_encoder, base)
+
+static struct mdp4_kms *get_kms(struct drm_encoder *encoder)
+{
+       struct msm_drm_private *priv = encoder->dev->dev_private;
+       return to_mdp4_kms(to_mdp_kms(priv->kms));
+}
+
+#ifdef CONFIG_MSM_BUS_SCALING
+#include <mach/board.h>
+/* not ironically named at all.. no, really.. */
+static void bs_init(struct mdp4_dtv_encoder *mdp4_dtv_encoder)
+{
+       struct drm_device *dev = mdp4_dtv_encoder->base.dev;
+       struct lcdc_platform_data *dtv_pdata = mdp4_find_pdata("dtv.0");
+
+       if (!dtv_pdata) {
+               dev_err(dev->dev, "could not find dtv pdata\n");
+               return;
+       }
+
+       if (dtv_pdata->bus_scale_table) {
+               mdp4_dtv_encoder->bsc = msm_bus_scale_register_client(
+                               dtv_pdata->bus_scale_table);
+               DBG("bus scale client: %08x", mdp4_dtv_encoder->bsc);
+               DBG("lcdc_power_save: %p", dtv_pdata->lcdc_power_save);
+               if (dtv_pdata->lcdc_power_save)
+                       dtv_pdata->lcdc_power_save(1);
+       }
+}
+
+static void bs_fini(struct mdp4_dtv_encoder *mdp4_dtv_encoder)
+{
+       if (mdp4_dtv_encoder->bsc) {
+               msm_bus_scale_unregister_client(mdp4_dtv_encoder->bsc);
+               mdp4_dtv_encoder->bsc = 0;
+       }
+}
+
+static void bs_set(struct mdp4_dtv_encoder *mdp4_dtv_encoder, int idx)
+{
+       if (mdp4_dtv_encoder->bsc) {
+               DBG("set bus scaling: %d", idx);
+               msm_bus_scale_client_update_request(mdp4_dtv_encoder->bsc, idx);
+       }
+}
+#else
+static void bs_init(struct mdp4_dtv_encoder *mdp4_dtv_encoder) {}
+static void bs_fini(struct mdp4_dtv_encoder *mdp4_dtv_encoder) {}
+static void bs_set(struct mdp4_dtv_encoder *mdp4_dtv_encoder, int idx) {}
+#endif
+
+static void mdp4_dtv_encoder_destroy(struct drm_encoder *encoder)
+{
+       struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder);
+       bs_fini(mdp4_dtv_encoder);
+       drm_encoder_cleanup(encoder);
+       kfree(mdp4_dtv_encoder);
+}
+
+static const struct drm_encoder_funcs mdp4_dtv_encoder_funcs = {
+       .destroy = mdp4_dtv_encoder_destroy,
+};
+
+static void mdp4_dtv_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+       struct drm_device *dev = encoder->dev;
+       struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder);
+       struct mdp4_kms *mdp4_kms = get_kms(encoder);
+       bool enabled = (mode == DRM_MODE_DPMS_ON);
+
+       DBG("mode=%d", mode);
+
+       if (enabled == mdp4_dtv_encoder->enabled)
+               return;
+
+       if (enabled) {
+               unsigned long pc = mdp4_dtv_encoder->pixclock;
+               int ret;
+
+               bs_set(mdp4_dtv_encoder, 1);
+
+               DBG("setting src_clk=%lu", pc);
+
+               ret = clk_set_rate(mdp4_dtv_encoder->src_clk, pc);
+               if (ret)
+                       dev_err(dev->dev, "failed to set src_clk to %lu: %d\n", pc, ret);
+               clk_prepare_enable(mdp4_dtv_encoder->src_clk);
+               ret = clk_prepare_enable(mdp4_dtv_encoder->hdmi_clk);
+               if (ret)
+                       dev_err(dev->dev, "failed to enable hdmi_clk: %d\n", ret);
+               ret = clk_prepare_enable(mdp4_dtv_encoder->mdp_clk);
+               if (ret)
+                       dev_err(dev->dev, "failed to enabled mdp_clk: %d\n", ret);
+
+               mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 1);
+       } else {
+               mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 0);
+
+               /*
+                * Wait for a vsync so we know the ENABLE=0 latched before
+                * the (connector) source of the vsync's gets disabled,
+                * otherwise we end up in a funny state if we re-enable
+                * before the disable latches, which results that some of
+                * the settings changes for the new modeset (like new
+                * scanout buffer) don't latch properly..
+                */
+               mdp_irq_wait(&mdp4_kms->base, MDP4_IRQ_EXTERNAL_VSYNC);
+
+               clk_disable_unprepare(mdp4_dtv_encoder->src_clk);
+               clk_disable_unprepare(mdp4_dtv_encoder->hdmi_clk);
+               clk_disable_unprepare(mdp4_dtv_encoder->mdp_clk);
+
+               bs_set(mdp4_dtv_encoder, 0);
+       }
+
+       mdp4_dtv_encoder->enabled = enabled;
+}
+
+static bool mdp4_dtv_encoder_mode_fixup(struct drm_encoder *encoder,
+               const struct drm_display_mode *mode,
+               struct drm_display_mode *adjusted_mode)
+{
+       return true;
+}
+
+static void mdp4_dtv_encoder_mode_set(struct drm_encoder *encoder,
+               struct drm_display_mode *mode,
+               struct drm_display_mode *adjusted_mode)
+{
+       struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder);
+       struct mdp4_kms *mdp4_kms = get_kms(encoder);
+       uint32_t dtv_hsync_skew, vsync_period, vsync_len, ctrl_pol;
+       uint32_t display_v_start, display_v_end;
+       uint32_t hsync_start_x, hsync_end_x;
+
+       mode = adjusted_mode;
+
+       DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
+                       mode->base.id, mode->name,
+                       mode->vrefresh, mode->clock,
+                       mode->hdisplay, mode->hsync_start,
+                       mode->hsync_end, mode->htotal,
+                       mode->vdisplay, mode->vsync_start,
+                       mode->vsync_end, mode->vtotal,
+                       mode->type, mode->flags);
+
+       mdp4_dtv_encoder->pixclock = mode->clock * 1000;
+
+       DBG("pixclock=%lu", mdp4_dtv_encoder->pixclock);
+
+       ctrl_pol = 0;
+       if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+               ctrl_pol |= MDP4_DTV_CTRL_POLARITY_HSYNC_LOW;
+       if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+               ctrl_pol |= MDP4_DTV_CTRL_POLARITY_VSYNC_LOW;
+       /* probably need to get DATA_EN polarity from panel.. */
+
+       dtv_hsync_skew = 0;  /* get this from panel? */
+
+       hsync_start_x = (mode->htotal - mode->hsync_start);
+       hsync_end_x = mode->htotal - (mode->hsync_start - mode->hdisplay) - 1;
+
+       vsync_period = mode->vtotal * mode->htotal;
+       vsync_len = (mode->vsync_end - mode->vsync_start) * mode->htotal;
+       display_v_start = (mode->vtotal - mode->vsync_start) * mode->htotal + dtv_hsync_skew;
+       display_v_end = vsync_period - ((mode->vsync_start - mode->vdisplay) * mode->htotal) + dtv_hsync_skew - 1;
+
+       mdp4_write(mdp4_kms, REG_MDP4_DTV_HSYNC_CTRL,
+                       MDP4_DTV_HSYNC_CTRL_PULSEW(mode->hsync_end - mode->hsync_start) |
+                       MDP4_DTV_HSYNC_CTRL_PERIOD(mode->htotal));
+       mdp4_write(mdp4_kms, REG_MDP4_DTV_VSYNC_PERIOD, vsync_period);
+       mdp4_write(mdp4_kms, REG_MDP4_DTV_VSYNC_LEN, vsync_len);
+       mdp4_write(mdp4_kms, REG_MDP4_DTV_DISPLAY_HCTRL,
+                       MDP4_DTV_DISPLAY_HCTRL_START(hsync_start_x) |
+                       MDP4_DTV_DISPLAY_HCTRL_END(hsync_end_x));
+       mdp4_write(mdp4_kms, REG_MDP4_DTV_DISPLAY_VSTART, display_v_start);
+       mdp4_write(mdp4_kms, REG_MDP4_DTV_DISPLAY_VEND, display_v_end);
+       mdp4_write(mdp4_kms, REG_MDP4_DTV_BORDER_CLR, 0);
+       mdp4_write(mdp4_kms, REG_MDP4_DTV_UNDERFLOW_CLR,
+                       MDP4_DTV_UNDERFLOW_CLR_ENABLE_RECOVERY |
+                       MDP4_DTV_UNDERFLOW_CLR_COLOR(0xff));
+       mdp4_write(mdp4_kms, REG_MDP4_DTV_HSYNC_SKEW, dtv_hsync_skew);
+       mdp4_write(mdp4_kms, REG_MDP4_DTV_CTRL_POLARITY, ctrl_pol);
+       mdp4_write(mdp4_kms, REG_MDP4_DTV_ACTIVE_HCTL,
+                       MDP4_DTV_ACTIVE_HCTL_START(0) |
+                       MDP4_DTV_ACTIVE_HCTL_END(0));
+       mdp4_write(mdp4_kms, REG_MDP4_DTV_ACTIVE_VSTART, 0);
+       mdp4_write(mdp4_kms, REG_MDP4_DTV_ACTIVE_VEND, 0);
+}
+
+static void mdp4_dtv_encoder_prepare(struct drm_encoder *encoder)
+{
+       mdp4_dtv_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+static void mdp4_dtv_encoder_commit(struct drm_encoder *encoder)
+{
+       mdp4_crtc_set_config(encoder->crtc,
+                       MDP4_DMA_CONFIG_R_BPC(BPC8) |
+                       MDP4_DMA_CONFIG_G_BPC(BPC8) |
+                       MDP4_DMA_CONFIG_B_BPC(BPC8) |
+                       MDP4_DMA_CONFIG_PACK(0x21));
+       mdp4_crtc_set_intf(encoder->crtc, INTF_LCDC_DTV);
+       mdp4_dtv_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
+}
+
+static const struct drm_encoder_helper_funcs mdp4_dtv_encoder_helper_funcs = {
+       .dpms = mdp4_dtv_encoder_dpms,
+       .mode_fixup = mdp4_dtv_encoder_mode_fixup,
+       .mode_set = mdp4_dtv_encoder_mode_set,
+       .prepare = mdp4_dtv_encoder_prepare,
+       .commit = mdp4_dtv_encoder_commit,
+};
+
+long mdp4_dtv_round_pixclk(struct drm_encoder *encoder, unsigned long rate)
+{
+       struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder);
+       return clk_round_rate(mdp4_dtv_encoder->src_clk, rate);
+}
+
+/* initialize encoder */
+struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev)
+{
+       struct drm_encoder *encoder = NULL;
+       struct mdp4_dtv_encoder *mdp4_dtv_encoder;
+       int ret;
+
+       mdp4_dtv_encoder = kzalloc(sizeof(*mdp4_dtv_encoder), GFP_KERNEL);
+       if (!mdp4_dtv_encoder) {
+               ret = -ENOMEM;
+               goto fail;
+       }
+
+       encoder = &mdp4_dtv_encoder->base;
+
+       drm_encoder_init(dev, encoder, &mdp4_dtv_encoder_funcs,
+                        DRM_MODE_ENCODER_TMDS);
+       drm_encoder_helper_add(encoder, &mdp4_dtv_encoder_helper_funcs);
+
+       mdp4_dtv_encoder->src_clk = devm_clk_get(dev->dev, "src_clk");
+       if (IS_ERR(mdp4_dtv_encoder->src_clk)) {
+               dev_err(dev->dev, "failed to get src_clk\n");
+               ret = PTR_ERR(mdp4_dtv_encoder->src_clk);
+               goto fail;
+       }
+
+       mdp4_dtv_encoder->hdmi_clk = devm_clk_get(dev->dev, "hdmi_clk");
+       if (IS_ERR(mdp4_dtv_encoder->hdmi_clk)) {
+               dev_err(dev->dev, "failed to get hdmi_clk\n");
+               ret = PTR_ERR(mdp4_dtv_encoder->hdmi_clk);
+               goto fail;
+       }
+
+       mdp4_dtv_encoder->mdp_clk = devm_clk_get(dev->dev, "mdp_clk");
+       if (IS_ERR(mdp4_dtv_encoder->mdp_clk)) {
+               dev_err(dev->dev, "failed to get mdp_clk\n");
+               ret = PTR_ERR(mdp4_dtv_encoder->mdp_clk);
+               goto fail;
+       }
+
+       bs_init(mdp4_dtv_encoder);
+
+       return encoder;
+
+fail:
+       if (encoder)
+               mdp4_dtv_encoder_destroy(encoder);
+
+       return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
new file mode 100644 (file)
index 0000000..c740ccd
--- /dev/null
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+#include "msm_drv.h"
+#include "mdp4_kms.h"
+
+void mdp4_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask)
+{
+       mdp4_write(to_mdp4_kms(mdp_kms), REG_MDP4_INTR_ENABLE, irqmask);
+}
+
+static void mdp4_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus)
+{
+       DRM_ERROR("errors: %08x\n", irqstatus);
+}
+
+void mdp4_irq_preinstall(struct msm_kms *kms)
+{
+       struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
+       mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, 0xffffffff);
+}
+
+int mdp4_irq_postinstall(struct msm_kms *kms)
+{
+       struct mdp_kms *mdp_kms = to_mdp_kms(kms);
+       struct mdp4_kms *mdp4_kms = to_mdp4_kms(mdp_kms);
+       struct mdp_irq *error_handler = &mdp4_kms->error_handler;
+
+       error_handler->irq = mdp4_irq_error_handler;
+       error_handler->irqmask = MDP4_IRQ_PRIMARY_INTF_UDERRUN |
+                       MDP4_IRQ_EXTERNAL_INTF_UDERRUN;
+
+       mdp_irq_register(mdp_kms, error_handler);
+
+       return 0;
+}
+
+void mdp4_irq_uninstall(struct msm_kms *kms)
+{
+       struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
+       mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, 0x00000000);
+}
+
+irqreturn_t mdp4_irq(struct msm_kms *kms)
+{
+       struct mdp_kms *mdp_kms = to_mdp_kms(kms);
+       struct mdp4_kms *mdp4_kms = to_mdp4_kms(mdp_kms);
+       struct drm_device *dev = mdp4_kms->dev;
+       struct msm_drm_private *priv = dev->dev_private;
+       unsigned int id;
+       uint32_t status;
+
+       status = mdp4_read(mdp4_kms, REG_MDP4_INTR_STATUS);
+       mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, status);
+
+       VERB("status=%08x", status);
+
+       for (id = 0; id < priv->num_crtcs; id++)
+               if (status & mdp4_crtc_vblank(priv->crtcs[id]))
+                       drm_handle_vblank(dev, id);
+
+       mdp_dispatch_irqs(mdp_kms, status);
+
+       return IRQ_HANDLED;
+}
+
+int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
+{
+       mdp_update_vblank_mask(to_mdp_kms(kms),
+                       mdp4_crtc_vblank(crtc), true);
+       return 0;
+}
+
+void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
+{
+       mdp_update_vblank_mask(to_mdp_kms(kms),
+                       mdp4_crtc_vblank(crtc), false);
+}
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
new file mode 100644 (file)
index 0000000..272e707
--- /dev/null
@@ -0,0 +1,397 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+#include "msm_drv.h"
+#include "msm_mmu.h"
+#include "mdp4_kms.h"
+
+static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev);
+
+static int mdp4_hw_init(struct msm_kms *kms)
+{
+       struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
+       struct drm_device *dev = mdp4_kms->dev;
+       uint32_t version, major, minor, dmap_cfg, vg_cfg;
+       unsigned long clk;
+       int ret = 0;
+
+       pm_runtime_get_sync(dev->dev);
+
+       mdp4_enable(mdp4_kms);
+       version = mdp4_read(mdp4_kms, REG_MDP4_VERSION);
+       mdp4_disable(mdp4_kms);
+
+       major = FIELD(version, MDP4_VERSION_MAJOR);
+       minor = FIELD(version, MDP4_VERSION_MINOR);
+
+       DBG("found MDP4 version v%d.%d", major, minor);
+
+       if (major != 4) {
+               dev_err(dev->dev, "unexpected MDP version: v%d.%d\n",
+                               major, minor);
+               ret = -ENXIO;
+               goto out;
+       }
+
+       mdp4_kms->rev = minor;
+
+       if (mdp4_kms->dsi_pll_vdda) {
+               if ((mdp4_kms->rev == 2) || (mdp4_kms->rev == 4)) {
+                       ret = regulator_set_voltage(mdp4_kms->dsi_pll_vdda,
+                                       1200000, 1200000);
+                       if (ret) {
+                               dev_err(dev->dev,
+                                       "failed to set dsi_pll_vdda voltage: %d\n", ret);
+                               goto out;
+                       }
+               }
+       }
+
+       if (mdp4_kms->dsi_pll_vddio) {
+               if (mdp4_kms->rev == 2) {
+                       ret = regulator_set_voltage(mdp4_kms->dsi_pll_vddio,
+                                       1800000, 1800000);
+                       if (ret) {
+                               dev_err(dev->dev,
+                                       "failed to set dsi_pll_vddio voltage: %d\n", ret);
+                               goto out;
+                       }
+               }
+       }
+
+       if (mdp4_kms->rev > 1) {
+               mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER0, 0x0707ffff);
+               mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER1, 0x03073f3f);
+       }
+
+       mdp4_write(mdp4_kms, REG_MDP4_PORTMAP_MODE, 0x3);
+
+       /* max read pending cmd config, 3 pending requests: */
+       mdp4_write(mdp4_kms, REG_MDP4_READ_CNFG, 0x02222);
+
+       clk = clk_get_rate(mdp4_kms->clk);
+
+       if ((mdp4_kms->rev >= 1) || (clk >= 90000000)) {
+               dmap_cfg = 0x47;     /* 16 bytes-burst x 8 req */
+               vg_cfg = 0x47;       /* 16 bytes-burs x 8 req */
+       } else {
+               dmap_cfg = 0x27;     /* 8 bytes-burst x 8 req */
+               vg_cfg = 0x43;       /* 16 bytes-burst x 4 req */
+       }
+
+       DBG("fetch config: dmap=%02x, vg=%02x", dmap_cfg, vg_cfg);
+
+       mdp4_write(mdp4_kms, REG_MDP4_DMA_FETCH_CONFIG(DMA_P), dmap_cfg);
+       mdp4_write(mdp4_kms, REG_MDP4_DMA_FETCH_CONFIG(DMA_E), dmap_cfg);
+
+       mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(VG1), vg_cfg);
+       mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(VG2), vg_cfg);
+       mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(RGB1), vg_cfg);
+       mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(RGB2), vg_cfg);
+
+       if (mdp4_kms->rev >= 2)
+               mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG_UPDATE_METHOD, 1);
+
+       /* disable CSC matrix / YUV by default: */
+       mdp4_write(mdp4_kms, REG_MDP4_PIPE_OP_MODE(VG1), 0);
+       mdp4_write(mdp4_kms, REG_MDP4_PIPE_OP_MODE(VG2), 0);
+       mdp4_write(mdp4_kms, REG_MDP4_DMA_P_OP_MODE, 0);
+       mdp4_write(mdp4_kms, REG_MDP4_DMA_S_OP_MODE, 0);
+       mdp4_write(mdp4_kms, REG_MDP4_OVLP_CSC_CONFIG(1), 0);
+       mdp4_write(mdp4_kms, REG_MDP4_OVLP_CSC_CONFIG(2), 0);
+
+       if (mdp4_kms->rev > 1)
+               mdp4_write(mdp4_kms, REG_MDP4_RESET_STATUS, 1);
+
+out:
+       pm_runtime_put_sync(dev->dev);
+
+       return ret;
+}
+
+static long mdp4_round_pixclk(struct msm_kms *kms, unsigned long rate,
+               struct drm_encoder *encoder)
+{
+       /* if we had >1 encoder, we'd need something more clever: */
+       return mdp4_dtv_round_pixclk(encoder, rate);
+}
+
+static void mdp4_preclose(struct msm_kms *kms, struct drm_file *file)
+{
+       struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
+       struct msm_drm_private *priv = mdp4_kms->dev->dev_private;
+       unsigned i;
+
+       for (i = 0; i < priv->num_crtcs; i++)
+               mdp4_crtc_cancel_pending_flip(priv->crtcs[i], file);
+}
+
+static void mdp4_destroy(struct msm_kms *kms)
+{
+       struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
+       kfree(mdp4_kms);
+}
+
+static const struct mdp_kms_funcs kms_funcs = {
+       .base = {
+               .hw_init         = mdp4_hw_init,
+               .irq_preinstall  = mdp4_irq_preinstall,
+               .irq_postinstall = mdp4_irq_postinstall,
+               .irq_uninstall   = mdp4_irq_uninstall,
+               .irq             = mdp4_irq,
+               .enable_vblank   = mdp4_enable_vblank,
+               .disable_vblank  = mdp4_disable_vblank,
+               .get_format      = mdp_get_format,
+               .round_pixclk    = mdp4_round_pixclk,
+               .preclose        = mdp4_preclose,
+               .destroy         = mdp4_destroy,
+       },
+       .set_irqmask         = mdp4_set_irqmask,
+};
+
+int mdp4_disable(struct mdp4_kms *mdp4_kms)
+{
+       DBG("");
+
+       clk_disable_unprepare(mdp4_kms->clk);
+       if (mdp4_kms->pclk)
+               clk_disable_unprepare(mdp4_kms->pclk);
+       clk_disable_unprepare(mdp4_kms->lut_clk);
+
+       return 0;
+}
+
+int mdp4_enable(struct mdp4_kms *mdp4_kms)
+{
+       DBG("");
+
+       clk_prepare_enable(mdp4_kms->clk);
+       if (mdp4_kms->pclk)
+               clk_prepare_enable(mdp4_kms->pclk);
+       clk_prepare_enable(mdp4_kms->lut_clk);
+
+       return 0;
+}
+
+static int modeset_init(struct mdp4_kms *mdp4_kms)
+{
+       struct drm_device *dev = mdp4_kms->dev;
+       struct msm_drm_private *priv = dev->dev_private;
+       struct drm_plane *plane;
+       struct drm_crtc *crtc;
+       struct drm_encoder *encoder;
+       struct hdmi *hdmi;
+       int ret;
+
+       /*
+        *  NOTE: this is a bit simplistic until we add support
+        * for more than just RGB1->DMA_E->DTV->HDMI
+        */
+
+       /* construct non-private planes: */
+       plane = mdp4_plane_init(dev, VG1, false);
+       if (IS_ERR(plane)) {
+               dev_err(dev->dev, "failed to construct plane for VG1\n");
+               ret = PTR_ERR(plane);
+               goto fail;
+       }
+       priv->planes[priv->num_planes++] = plane;
+
+       plane = mdp4_plane_init(dev, VG2, false);
+       if (IS_ERR(plane)) {
+               dev_err(dev->dev, "failed to construct plane for VG2\n");
+               ret = PTR_ERR(plane);
+               goto fail;
+       }
+       priv->planes[priv->num_planes++] = plane;
+
+       /* the CRTCs get constructed with a private plane: */
+       plane = mdp4_plane_init(dev, RGB1, true);
+       if (IS_ERR(plane)) {
+               dev_err(dev->dev, "failed to construct plane for RGB1\n");
+               ret = PTR_ERR(plane);
+               goto fail;
+       }
+
+       crtc  = mdp4_crtc_init(dev, plane, priv->num_crtcs, 1, DMA_E);
+       if (IS_ERR(crtc)) {
+               dev_err(dev->dev, "failed to construct crtc for DMA_E\n");
+               ret = PTR_ERR(crtc);
+               goto fail;
+       }
+       priv->crtcs[priv->num_crtcs++] = crtc;
+
+       encoder = mdp4_dtv_encoder_init(dev);
+       if (IS_ERR(encoder)) {
+               dev_err(dev->dev, "failed to construct DTV encoder\n");
+               ret = PTR_ERR(encoder);
+               goto fail;
+       }
+       encoder->possible_crtcs = 0x1;     /* DTV can be hooked to DMA_E */
+       priv->encoders[priv->num_encoders++] = encoder;
+
+       hdmi = hdmi_init(dev, encoder);
+       if (IS_ERR(hdmi)) {
+               ret = PTR_ERR(hdmi);
+               dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret);
+               goto fail;
+       }
+
+       return 0;
+
+fail:
+       return ret;
+}
+
+static const char *iommu_ports[] = {
+               "mdp_port0_cb0", "mdp_port1_cb0",
+};
+
+struct msm_kms *mdp4_kms_init(struct drm_device *dev)
+{
+       struct platform_device *pdev = dev->platformdev;
+       struct mdp4_platform_config *config = mdp4_get_config(pdev);
+       struct mdp4_kms *mdp4_kms;
+       struct msm_kms *kms = NULL;
+       struct msm_mmu *mmu;
+       int ret;
+
+       mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL);
+       if (!mdp4_kms) {
+               dev_err(dev->dev, "failed to allocate kms\n");
+               ret = -ENOMEM;
+               goto fail;
+       }
+
+       mdp_kms_init(&mdp4_kms->base, &kms_funcs);
+
+       kms = &mdp4_kms->base.base;
+
+       mdp4_kms->dev = dev;
+
+       mdp4_kms->mmio = msm_ioremap(pdev, NULL, "MDP4");
+       if (IS_ERR(mdp4_kms->mmio)) {
+               ret = PTR_ERR(mdp4_kms->mmio);
+               goto fail;
+       }
+
+       mdp4_kms->dsi_pll_vdda = devm_regulator_get(&pdev->dev, "dsi_pll_vdda");
+       if (IS_ERR(mdp4_kms->dsi_pll_vdda))
+               mdp4_kms->dsi_pll_vdda = NULL;
+
+       mdp4_kms->dsi_pll_vddio = devm_regulator_get(&pdev->dev, "dsi_pll_vddio");
+       if (IS_ERR(mdp4_kms->dsi_pll_vddio))
+               mdp4_kms->dsi_pll_vddio = NULL;
+
+       mdp4_kms->vdd = devm_regulator_get(&pdev->dev, "vdd");
+       if (IS_ERR(mdp4_kms->vdd))
+               mdp4_kms->vdd = NULL;
+
+       if (mdp4_kms->vdd) {
+               ret = regulator_enable(mdp4_kms->vdd);
+               if (ret) {
+                       dev_err(dev->dev, "failed to enable regulator vdd: %d\n", ret);
+                       goto fail;
+               }
+       }
+
+       mdp4_kms->clk = devm_clk_get(&pdev->dev, "core_clk");
+       if (IS_ERR(mdp4_kms->clk)) {
+               dev_err(dev->dev, "failed to get core_clk\n");
+               ret = PTR_ERR(mdp4_kms->clk);
+               goto fail;
+       }
+
+       mdp4_kms->pclk = devm_clk_get(&pdev->dev, "iface_clk");
+       if (IS_ERR(mdp4_kms->pclk))
+               mdp4_kms->pclk = NULL;
+
+       // XXX if (rev >= MDP_REV_42) { ???
+       mdp4_kms->lut_clk = devm_clk_get(&pdev->dev, "lut_clk");
+       if (IS_ERR(mdp4_kms->lut_clk)) {
+               dev_err(dev->dev, "failed to get lut_clk\n");
+               ret = PTR_ERR(mdp4_kms->lut_clk);
+               goto fail;
+       }
+
+       clk_set_rate(mdp4_kms->clk, config->max_clk);
+       clk_set_rate(mdp4_kms->lut_clk, config->max_clk);
+
+       /* make sure things are off before attaching iommu (bootloader could
+        * have left things on, in which case we'll start getting faults if
+        * we don't disable):
+        */
+       mdp4_enable(mdp4_kms);
+       mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 0);
+       mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 0);
+       mdp4_write(mdp4_kms, REG_MDP4_DSI_ENABLE, 0);
+       mdp4_disable(mdp4_kms);
+       mdelay(16);
+
+       if (config->iommu) {
+               mmu = msm_iommu_new(dev, config->iommu);
+               if (IS_ERR(mmu)) {
+                       ret = PTR_ERR(mmu);
+                       goto fail;
+               }
+               ret = mmu->funcs->attach(mmu, iommu_ports,
+                               ARRAY_SIZE(iommu_ports));
+               if (ret)
+                       goto fail;
+       } else {
+               dev_info(dev->dev, "no iommu, fallback to phys "
+                               "contig buffers for scanout\n");
+               mmu = NULL;
+       }
+
+       mdp4_kms->id = msm_register_mmu(dev, mmu);
+       if (mdp4_kms->id < 0) {
+               ret = mdp4_kms->id;
+               dev_err(dev->dev, "failed to register mdp4 iommu: %d\n", ret);
+               goto fail;
+       }
+
+       ret = modeset_init(mdp4_kms);
+       if (ret) {
+               dev_err(dev->dev, "modeset_init failed: %d\n", ret);
+               goto fail;
+       }
+
+       return kms;
+
+fail:
+       if (kms)
+               mdp4_destroy(kms);
+       return ERR_PTR(ret);
+}
+
+static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev)
+{
+       static struct mdp4_platform_config config = {};
+#ifdef CONFIG_OF
+       /* TODO */
+#else
+       if (cpu_is_apq8064())
+               config.max_clk = 266667000;
+       else
+               config.max_clk = 200000000;
+
+       config.iommu = msm_get_iommu_domain(DISPLAY_READ_DOMAIN);
+#endif
+       return &config;
+}
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
new file mode 100644 (file)
index 0000000..66a4d31
--- /dev/null
@@ -0,0 +1,214 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __MDP4_KMS_H__
+#define __MDP4_KMS_H__
+
+#include "msm_drv.h"
+#include "msm_kms.h"
+#include "mdp/mdp_kms.h"
+#include "mdp4.xml.h"
+
+struct mdp4_kms {
+       struct mdp_kms base;
+
+       struct drm_device *dev;
+
+       int rev;
+
+       /* mapper-id used to request GEM buffer mapped for scanout: */
+       int id;
+
+       void __iomem *mmio;
+
+       struct regulator *dsi_pll_vdda;
+       struct regulator *dsi_pll_vddio;
+       struct regulator *vdd;
+
+       struct clk *clk;
+       struct clk *pclk;
+       struct clk *lut_clk;
+
+       struct mdp_irq error_handler;
+};
+#define to_mdp4_kms(x) container_of(x, struct mdp4_kms, base)
+
+/* platform config data (ie. from DT, or pdata) */
+struct mdp4_platform_config {
+       struct iommu_domain *iommu;
+       uint32_t max_clk;
+};
+
+static inline void mdp4_write(struct mdp4_kms *mdp4_kms, u32 reg, u32 data)
+{
+       msm_writel(data, mdp4_kms->mmio + reg);
+}
+
+static inline u32 mdp4_read(struct mdp4_kms *mdp4_kms, u32 reg)
+{
+       return msm_readl(mdp4_kms->mmio + reg);
+}
+
+static inline uint32_t pipe2flush(enum mdp4_pipe pipe)
+{
+       switch (pipe) {
+       case VG1:      return MDP4_OVERLAY_FLUSH_VG1;
+       case VG2:      return MDP4_OVERLAY_FLUSH_VG2;
+       case RGB1:     return MDP4_OVERLAY_FLUSH_RGB1;
+       case RGB2:     return MDP4_OVERLAY_FLUSH_RGB1;
+       default:       return 0;
+       }
+}
+
+static inline uint32_t ovlp2flush(int ovlp)
+{
+       switch (ovlp) {
+       case 0:        return MDP4_OVERLAY_FLUSH_OVLP0;
+       case 1:        return MDP4_OVERLAY_FLUSH_OVLP1;
+       default:       return 0;
+       }
+}
+
+static inline uint32_t dma2irq(enum mdp4_dma dma)
+{
+       switch (dma) {
+       case DMA_P:    return MDP4_IRQ_DMA_P_DONE;
+       case DMA_S:    return MDP4_IRQ_DMA_S_DONE;
+       case DMA_E:    return MDP4_IRQ_DMA_E_DONE;
+       default:       return 0;
+       }
+}
+
+static inline uint32_t dma2err(enum mdp4_dma dma)
+{
+       switch (dma) {
+       case DMA_P:    return MDP4_IRQ_PRIMARY_INTF_UDERRUN;
+       case DMA_S:    return 0;  // ???
+       case DMA_E:    return MDP4_IRQ_EXTERNAL_INTF_UDERRUN;
+       default:       return 0;
+       }
+}
+
+static inline uint32_t mixercfg(int mixer, enum mdp4_pipe pipe,
+               enum mdp_mixer_stage_id stage)
+{
+       uint32_t mixer_cfg = 0;
+
+       switch (pipe) {
+       case VG1:
+               mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE0(stage) |
+                       COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE0_MIXER1);
+               break;
+       case VG2:
+               mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE1(stage) |
+                       COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE1_MIXER1);
+               break;
+       case RGB1:
+               mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE2(stage) |
+                       COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE2_MIXER1);
+               break;
+       case RGB2:
+               mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE3(stage) |
+                       COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE3_MIXER1);
+               break;
+       case RGB3:
+               mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE4(stage) |
+                       COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE4_MIXER1);
+               break;
+       case VG3:
+               mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE5(stage) |
+                       COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE5_MIXER1);
+               break;
+       case VG4:
+               mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE6(stage) |
+                       COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE6_MIXER1);
+               break;
+       default:
+               WARN_ON("invalid pipe");
+               break;
+       }
+
+       return mixer_cfg;
+}
+
+int mdp4_disable(struct mdp4_kms *mdp4_kms);
+int mdp4_enable(struct mdp4_kms *mdp4_kms);
+
+void mdp4_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask);
+void mdp4_irq_preinstall(struct msm_kms *kms);
+int mdp4_irq_postinstall(struct msm_kms *kms);
+void mdp4_irq_uninstall(struct msm_kms *kms);
+irqreturn_t mdp4_irq(struct msm_kms *kms);
+int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
+void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
+
+static inline
+uint32_t mdp4_get_formats(enum mdp4_pipe pipe_id, uint32_t *pixel_formats,
+               uint32_t max_formats)
+{
+       /* TODO when we have YUV, we need to filter supported formats
+        * based on pipe_id..
+        */
+       return mdp_get_formats(pixel_formats, max_formats);
+}
+
+void mdp4_plane_install_properties(struct drm_plane *plane,
+               struct drm_mode_object *obj);
+void mdp4_plane_set_scanout(struct drm_plane *plane,
+               struct drm_framebuffer *fb);
+int mdp4_plane_mode_set(struct drm_plane *plane,
+               struct drm_crtc *crtc, struct drm_framebuffer *fb,
+               int crtc_x, int crtc_y,
+               unsigned int crtc_w, unsigned int crtc_h,
+               uint32_t src_x, uint32_t src_y,
+               uint32_t src_w, uint32_t src_h);
+enum mdp4_pipe mdp4_plane_pipe(struct drm_plane *plane);
+struct drm_plane *mdp4_plane_init(struct drm_device *dev,
+               enum mdp4_pipe pipe_id, bool private_plane);
+
+uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc);
+void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file);
+void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config);
+void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf);
+void mdp4_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane);
+void mdp4_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane);
+struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
+               struct drm_plane *plane, int id, int ovlp_id,
+               enum mdp4_dma dma_id);
+
+long mdp4_dtv_round_pixclk(struct drm_encoder *encoder, unsigned long rate);
+struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev);
+
+#ifdef CONFIG_MSM_BUS_SCALING
+static inline int match_dev_name(struct device *dev, void *data)
+{
+       return !strcmp(dev_name(dev), data);
+}
+/* bus scaling data is associated with extra pointless platform devices,
+ * "dtv", etc.. this is a bit of a hack, but we need a way for encoders
+ * to find their pdata to make the bus-scaling stuff work.
+ */
+static inline void *mdp4_find_pdata(const char *devname)
+{
+       struct device *dev;
+       dev = bus_find_device(&platform_bus_type, NULL,
+                       (void *)devname, match_dev_name);
+       return dev ? dev->platform_data : NULL;
+}
+#endif
+
+#endif /* __MDP4_KMS_H__ */
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
new file mode 100644 (file)
index 0000000..2406027
--- /dev/null
@@ -0,0 +1,253 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "mdp4_kms.h"
+
+
+struct mdp4_plane {
+       struct drm_plane base;
+       const char *name;
+
+       enum mdp4_pipe pipe;
+
+       uint32_t nformats;
+       uint32_t formats[32];
+
+       bool enabled;
+};
+#define to_mdp4_plane(x) container_of(x, struct mdp4_plane, base)
+
+static struct mdp4_kms *get_kms(struct drm_plane *plane)
+{
+       struct msm_drm_private *priv = plane->dev->dev_private;
+       return to_mdp4_kms(to_mdp_kms(priv->kms));
+}
+
+static int mdp4_plane_update(struct drm_plane *plane,
+               struct drm_crtc *crtc, struct drm_framebuffer *fb,
+               int crtc_x, int crtc_y,
+               unsigned int crtc_w, unsigned int crtc_h,
+               uint32_t src_x, uint32_t src_y,
+               uint32_t src_w, uint32_t src_h)
+{
+       struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
+
+       mdp4_plane->enabled = true;
+
+       if (plane->fb)
+               drm_framebuffer_unreference(plane->fb);
+
+       drm_framebuffer_reference(fb);
+
+       return mdp4_plane_mode_set(plane, crtc, fb,
+                       crtc_x, crtc_y, crtc_w, crtc_h,
+                       src_x, src_y, src_w, src_h);
+}
+
+static int mdp4_plane_disable(struct drm_plane *plane)
+{
+       struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
+       DBG("%s: disable", mdp4_plane->name);
+       if (plane->crtc)
+               mdp4_crtc_detach(plane->crtc, plane);
+       return 0;
+}
+
+static void mdp4_plane_destroy(struct drm_plane *plane)
+{
+       struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
+
+       mdp4_plane_disable(plane);
+       drm_plane_cleanup(plane);
+
+       kfree(mdp4_plane);
+}
+
+/* helper to install properties which are common to planes and crtcs */
+void mdp4_plane_install_properties(struct drm_plane *plane,
+               struct drm_mode_object *obj)
+{
+       // XXX
+}
+
+int mdp4_plane_set_property(struct drm_plane *plane,
+               struct drm_property *property, uint64_t val)
+{
+       // XXX
+       return -EINVAL;
+}
+
+static const struct drm_plane_funcs mdp4_plane_funcs = {
+               .update_plane = mdp4_plane_update,
+               .disable_plane = mdp4_plane_disable,
+               .destroy = mdp4_plane_destroy,
+               .set_property = mdp4_plane_set_property,
+};
+
+void mdp4_plane_set_scanout(struct drm_plane *plane,
+               struct drm_framebuffer *fb)
+{
+       struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
+       struct mdp4_kms *mdp4_kms = get_kms(plane);
+       enum mdp4_pipe pipe = mdp4_plane->pipe;
+       uint32_t iova;
+
+       mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_STRIDE_A(pipe),
+                       MDP4_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) |
+                       MDP4_PIPE_SRC_STRIDE_A_P1(fb->pitches[1]));
+
+       mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_STRIDE_B(pipe),
+                       MDP4_PIPE_SRC_STRIDE_B_P2(fb->pitches[2]) |
+                       MDP4_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
+
+       msm_gem_get_iova(msm_framebuffer_bo(fb, 0), mdp4_kms->id, &iova);
+       mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP0_BASE(pipe), iova);
+
+       plane->fb = fb;
+}
+
+#define MDP4_VG_PHASE_STEP_DEFAULT     0x20000000
+
+int mdp4_plane_mode_set(struct drm_plane *plane,
+               struct drm_crtc *crtc, struct drm_framebuffer *fb,
+               int crtc_x, int crtc_y,
+               unsigned int crtc_w, unsigned int crtc_h,
+               uint32_t src_x, uint32_t src_y,
+               uint32_t src_w, uint32_t src_h)
+{
+       struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
+       struct mdp4_kms *mdp4_kms = get_kms(plane);
+       enum mdp4_pipe pipe = mdp4_plane->pipe;
+       const struct mdp_format *format;
+       uint32_t op_mode = 0;
+       uint32_t phasex_step = MDP4_VG_PHASE_STEP_DEFAULT;
+       uint32_t phasey_step = MDP4_VG_PHASE_STEP_DEFAULT;
+
+       /* src values are in Q16 fixed point, convert to integer: */
+       src_x = src_x >> 16;
+       src_y = src_y >> 16;
+       src_w = src_w >> 16;
+       src_h = src_h >> 16;
+
+       DBG("%s: FB[%u] %u,%u,%u,%u -> CRTC[%u] %d,%d,%u,%u", mdp4_plane->name,
+                       fb->base.id, src_x, src_y, src_w, src_h,
+                       crtc->base.id, crtc_x, crtc_y, crtc_w, crtc_h);
+
+       if (src_w != crtc_w) {
+               op_mode |= MDP4_PIPE_OP_MODE_SCALEX_EN;
+               /* TODO calc phasex_step */
+       }
+
+       if (src_h != crtc_h) {
+               op_mode |= MDP4_PIPE_OP_MODE_SCALEY_EN;
+               /* TODO calc phasey_step */
+       }
+
+       mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_SIZE(pipe),
+                       MDP4_PIPE_SRC_SIZE_WIDTH(src_w) |
+                       MDP4_PIPE_SRC_SIZE_HEIGHT(src_h));
+
+       mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_XY(pipe),
+                       MDP4_PIPE_SRC_XY_X(src_x) |
+                       MDP4_PIPE_SRC_XY_Y(src_y));
+
+       mdp4_write(mdp4_kms, REG_MDP4_PIPE_DST_SIZE(pipe),
+                       MDP4_PIPE_DST_SIZE_WIDTH(crtc_w) |
+                       MDP4_PIPE_DST_SIZE_HEIGHT(crtc_h));
+
+       mdp4_write(mdp4_kms, REG_MDP4_PIPE_DST_XY(pipe),
+                       MDP4_PIPE_SRC_XY_X(crtc_x) |
+                       MDP4_PIPE_SRC_XY_Y(crtc_y));
+
+       mdp4_plane_set_scanout(plane, fb);
+
+       format = to_mdp_format(msm_framebuffer_format(fb));
+
+       mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_FORMAT(pipe),
+                       MDP4_PIPE_SRC_FORMAT_A_BPC(format->bpc_a) |
+                       MDP4_PIPE_SRC_FORMAT_R_BPC(format->bpc_r) |
+                       MDP4_PIPE_SRC_FORMAT_G_BPC(format->bpc_g) |
+                       MDP4_PIPE_SRC_FORMAT_B_BPC(format->bpc_b) |
+                       COND(format->alpha_enable, MDP4_PIPE_SRC_FORMAT_ALPHA_ENABLE) |
+                       MDP4_PIPE_SRC_FORMAT_CPP(format->cpp - 1) |
+                       MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT(format->unpack_count - 1) |
+                       COND(format->unpack_tight, MDP4_PIPE_SRC_FORMAT_UNPACK_TIGHT));
+
+       mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_UNPACK(pipe),
+                       MDP4_PIPE_SRC_UNPACK_ELEM0(format->unpack[0]) |
+                       MDP4_PIPE_SRC_UNPACK_ELEM1(format->unpack[1]) |
+                       MDP4_PIPE_SRC_UNPACK_ELEM2(format->unpack[2]) |
+                       MDP4_PIPE_SRC_UNPACK_ELEM3(format->unpack[3]));
+
+       mdp4_write(mdp4_kms, REG_MDP4_PIPE_OP_MODE(pipe), op_mode);
+       mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEX_STEP(pipe), phasex_step);
+       mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEY_STEP(pipe), phasey_step);
+
+       /* TODO detach from old crtc (if we had more than one) */
+       mdp4_crtc_attach(crtc, plane);
+
+       return 0;
+}
+
+static const char *pipe_names[] = {
+               "VG1", "VG2",
+               "RGB1", "RGB2", "RGB3",
+               "VG3", "VG4",
+};
+
+enum mdp4_pipe mdp4_plane_pipe(struct drm_plane *plane)
+{
+       struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
+       return mdp4_plane->pipe;
+}
+
+/* initialize plane */
+struct drm_plane *mdp4_plane_init(struct drm_device *dev,
+               enum mdp4_pipe pipe_id, bool private_plane)
+{
+       struct drm_plane *plane = NULL;
+       struct mdp4_plane *mdp4_plane;
+       int ret;
+
+       mdp4_plane = kzalloc(sizeof(*mdp4_plane), GFP_KERNEL);
+       if (!mdp4_plane) {
+               ret = -ENOMEM;
+               goto fail;
+       }
+
+       plane = &mdp4_plane->base;
+
+       mdp4_plane->pipe = pipe_id;
+       mdp4_plane->name = pipe_names[pipe_id];
+
+       mdp4_plane->nformats = mdp4_get_formats(pipe_id, mdp4_plane->formats,
+                       ARRAY_SIZE(mdp4_plane->formats));
+
+       drm_plane_init(dev, plane, 0xff, &mdp4_plane_funcs,
+                       mdp4_plane->formats, mdp4_plane->nformats,
+                       private_plane);
+
+       mdp4_plane_install_properties(plane, &plane->base);
+
+       return plane;
+
+fail:
+       if (plane)
+               mdp4_plane_destroy(plane);
+
+       return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
new file mode 100644 (file)
index 0000000..0aa5151
--- /dev/null
@@ -0,0 +1,1036 @@
+#ifndef MDP5_XML
+#define MDP5_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    647 bytes, from 2013-11-30 14:45:35)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2013-03-31 16:51:27)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  17996 bytes, from 2013-12-01 19:10:31)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   1615 bytes, from 2013-11-30 15:00:52)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  22517 bytes, from 2013-12-03 20:59:13)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  11712 bytes, from 2013-08-17 17:13:43)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2013-08-11 19:26:32)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1544 bytes, from 2013-08-16 19:17:05)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2013-07-05 19:21:12)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  20932 bytes, from 2013-12-01 15:13:04)
+
+Copyright (C) 2013 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+enum mdp5_intf {
+       INTF_DSI = 1,
+       INTF_HDMI = 3,
+       INTF_LCDC = 5,
+       INTF_eDP = 9,
+};
+
+enum mdp5_intfnum {
+       NO_INTF = 0,
+       INTF0 = 1,
+       INTF1 = 2,
+       INTF2 = 3,
+       INTF3 = 4,
+};
+
+enum mdp5_pipe {
+       SSPP_VIG0 = 0,
+       SSPP_VIG1 = 1,
+       SSPP_VIG2 = 2,
+       SSPP_RGB0 = 3,
+       SSPP_RGB1 = 4,
+       SSPP_RGB2 = 5,
+       SSPP_DMA0 = 6,
+       SSPP_DMA1 = 7,
+};
+
+enum mdp5_ctl_mode {
+       MODE_NONE = 0,
+       MODE_ROT0 = 1,
+       MODE_ROT1 = 2,
+       MODE_WB0 = 3,
+       MODE_WB1 = 4,
+       MODE_WFD = 5,
+};
+
+enum mdp5_pack_3d {
+       PACK_3D_FRAME_INT = 0,
+       PACK_3D_H_ROW_INT = 1,
+       PACK_3D_V_ROW_INT = 2,
+       PACK_3D_COL_INT = 3,
+};
+
+enum mdp5_chroma_samp_type {
+       CHROMA_RGB = 0,
+       CHROMA_H2V1 = 1,
+       CHROMA_H1V2 = 2,
+       CHROMA_420 = 3,
+};
+
+enum mdp5_scale_filter {
+       SCALE_FILTER_NEAREST = 0,
+       SCALE_FILTER_BIL = 1,
+       SCALE_FILTER_PCMN = 2,
+       SCALE_FILTER_CA = 3,
+};
+
+enum mdp5_pipe_bwc {
+       BWC_LOSSLESS = 0,
+       BWC_Q_HIGH = 1,
+       BWC_Q_MED = 2,
+};
+
+enum mdp5_client_id {
+       CID_UNUSED = 0,
+       CID_VIG0_Y = 1,
+       CID_VIG0_CR = 2,
+       CID_VIG0_CB = 3,
+       CID_VIG1_Y = 4,
+       CID_VIG1_CR = 5,
+       CID_VIG1_CB = 6,
+       CID_VIG2_Y = 7,
+       CID_VIG2_CR = 8,
+       CID_VIG2_CB = 9,
+       CID_DMA0_Y = 10,
+       CID_DMA0_CR = 11,
+       CID_DMA0_CB = 12,
+       CID_DMA1_Y = 13,
+       CID_DMA1_CR = 14,
+       CID_DMA1_CB = 15,
+       CID_RGB0 = 16,
+       CID_RGB1 = 17,
+       CID_RGB2 = 18,
+       CID_MAX = 19,
+};
+
+enum mdp5_igc_type {
+       IGC_VIG = 0,
+       IGC_RGB = 1,
+       IGC_DMA = 2,
+       IGC_DSPP = 3,
+};
+
+#define MDP5_IRQ_INTF0_WB_ROT_COMP                             0x00000001
+#define MDP5_IRQ_INTF1_WB_ROT_COMP                             0x00000002
+#define MDP5_IRQ_INTF2_WB_ROT_COMP                             0x00000004
+#define MDP5_IRQ_INTF3_WB_ROT_COMP                             0x00000008
+#define MDP5_IRQ_INTF0_WB_WFD                                  0x00000010
+#define MDP5_IRQ_INTF1_WB_WFD                                  0x00000020
+#define MDP5_IRQ_INTF2_WB_WFD                                  0x00000040
+#define MDP5_IRQ_INTF3_WB_WFD                                  0x00000080
+#define MDP5_IRQ_INTF0_PING_PONG_COMP                          0x00000100
+#define MDP5_IRQ_INTF1_PING_PONG_COMP                          0x00000200
+#define MDP5_IRQ_INTF2_PING_PONG_COMP                          0x00000400
+#define MDP5_IRQ_INTF3_PING_PONG_COMP                          0x00000800
+#define MDP5_IRQ_INTF0_PING_PONG_RD_PTR                                0x00001000
+#define MDP5_IRQ_INTF1_PING_PONG_RD_PTR                                0x00002000
+#define MDP5_IRQ_INTF2_PING_PONG_RD_PTR                                0x00004000
+#define MDP5_IRQ_INTF3_PING_PONG_RD_PTR                                0x00008000
+#define MDP5_IRQ_INTF0_PING_PONG_WR_PTR                                0x00010000
+#define MDP5_IRQ_INTF1_PING_PONG_WR_PTR                                0x00020000
+#define MDP5_IRQ_INTF2_PING_PONG_WR_PTR                                0x00040000
+#define MDP5_IRQ_INTF3_PING_PONG_WR_PTR                                0x00080000
+#define MDP5_IRQ_INTF0_PING_PONG_AUTO_REF                      0x00100000
+#define MDP5_IRQ_INTF1_PING_PONG_AUTO_REF                      0x00200000
+#define MDP5_IRQ_INTF2_PING_PONG_AUTO_REF                      0x00400000
+#define MDP5_IRQ_INTF3_PING_PONG_AUTO_REF                      0x00800000
+#define MDP5_IRQ_INTF0_UNDER_RUN                               0x01000000
+#define MDP5_IRQ_INTF0_VSYNC                                   0x02000000
+#define MDP5_IRQ_INTF1_UNDER_RUN                               0x04000000
+#define MDP5_IRQ_INTF1_VSYNC                                   0x08000000
+#define MDP5_IRQ_INTF2_UNDER_RUN                               0x10000000
+#define MDP5_IRQ_INTF2_VSYNC                                   0x20000000
+#define MDP5_IRQ_INTF3_UNDER_RUN                               0x40000000
+#define MDP5_IRQ_INTF3_VSYNC                                   0x80000000
+#define REG_MDP5_HW_VERSION                                    0x00000000
+
+#define REG_MDP5_HW_INTR_STATUS                                        0x00000010
+#define MDP5_HW_INTR_STATUS_INTR_MDP                           0x00000001
+#define MDP5_HW_INTR_STATUS_INTR_DSI0                          0x00000010
+#define MDP5_HW_INTR_STATUS_INTR_DSI1                          0x00000020
+#define MDP5_HW_INTR_STATUS_INTR_HDMI                          0x00000100
+#define MDP5_HW_INTR_STATUS_INTR_EDP                           0x00001000
+
+#define REG_MDP5_MDP_VERSION                                   0x00000100
+#define MDP5_MDP_VERSION_MINOR__MASK                           0x00ff0000
+#define MDP5_MDP_VERSION_MINOR__SHIFT                          16
+static inline uint32_t MDP5_MDP_VERSION_MINOR(uint32_t val)
+{
+       return ((val) << MDP5_MDP_VERSION_MINOR__SHIFT) & MDP5_MDP_VERSION_MINOR__MASK;
+}
+#define MDP5_MDP_VERSION_MAJOR__MASK                           0xf0000000
+#define MDP5_MDP_VERSION_MAJOR__SHIFT                          28
+static inline uint32_t MDP5_MDP_VERSION_MAJOR(uint32_t val)
+{
+       return ((val) << MDP5_MDP_VERSION_MAJOR__SHIFT) & MDP5_MDP_VERSION_MAJOR__MASK;
+}
+
+#define REG_MDP5_DISP_INTF_SEL                                 0x00000104
+#define MDP5_DISP_INTF_SEL_INTF0__MASK                         0x000000ff
+#define MDP5_DISP_INTF_SEL_INTF0__SHIFT                                0
+static inline uint32_t MDP5_DISP_INTF_SEL_INTF0(enum mdp5_intf val)
+{
+       return ((val) << MDP5_DISP_INTF_SEL_INTF0__SHIFT) & MDP5_DISP_INTF_SEL_INTF0__MASK;
+}
+#define MDP5_DISP_INTF_SEL_INTF1__MASK                         0x0000ff00
+#define MDP5_DISP_INTF_SEL_INTF1__SHIFT                                8
+static inline uint32_t MDP5_DISP_INTF_SEL_INTF1(enum mdp5_intf val)
+{
+       return ((val) << MDP5_DISP_INTF_SEL_INTF1__SHIFT) & MDP5_DISP_INTF_SEL_INTF1__MASK;
+}
+#define MDP5_DISP_INTF_SEL_INTF2__MASK                         0x00ff0000
+#define MDP5_DISP_INTF_SEL_INTF2__SHIFT                                16
+static inline uint32_t MDP5_DISP_INTF_SEL_INTF2(enum mdp5_intf val)
+{
+       return ((val) << MDP5_DISP_INTF_SEL_INTF2__SHIFT) & MDP5_DISP_INTF_SEL_INTF2__MASK;
+}
+#define MDP5_DISP_INTF_SEL_INTF3__MASK                         0xff000000
+#define MDP5_DISP_INTF_SEL_INTF3__SHIFT                                24
+static inline uint32_t MDP5_DISP_INTF_SEL_INTF3(enum mdp5_intf val)
+{
+       return ((val) << MDP5_DISP_INTF_SEL_INTF3__SHIFT) & MDP5_DISP_INTF_SEL_INTF3__MASK;
+}
+
+#define REG_MDP5_INTR_EN                                       0x00000110
+
+#define REG_MDP5_INTR_STATUS                                   0x00000114
+
+#define REG_MDP5_INTR_CLEAR                                    0x00000118
+
+#define REG_MDP5_HIST_INTR_EN                                  0x0000011c
+
+#define REG_MDP5_HIST_INTR_STATUS                              0x00000120
+
+#define REG_MDP5_HIST_INTR_CLEAR                               0x00000124
+
+static inline uint32_t REG_MDP5_SMP_ALLOC_W(uint32_t i0) { return 0x00000180 + 0x4*i0; }
+
+static inline uint32_t REG_MDP5_SMP_ALLOC_W_REG(uint32_t i0) { return 0x00000180 + 0x4*i0; }
+#define MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK                     0x000000ff
+#define MDP5_SMP_ALLOC_W_REG_CLIENT0__SHIFT                    0
+static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT0(enum mdp5_client_id val)
+{
+       return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT0__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK;
+}
+#define MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK                     0x0000ff00
+#define MDP5_SMP_ALLOC_W_REG_CLIENT1__SHIFT                    8
+static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT1(enum mdp5_client_id val)
+{
+       return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT1__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK;
+}
+#define MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK                     0x00ff0000
+#define MDP5_SMP_ALLOC_W_REG_CLIENT2__SHIFT                    16
+static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT2(enum mdp5_client_id val)
+{
+       return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT2__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK;
+}
+
+static inline uint32_t REG_MDP5_SMP_ALLOC_R(uint32_t i0) { return 0x00000230 + 0x4*i0; }
+
+static inline uint32_t REG_MDP5_SMP_ALLOC_R_REG(uint32_t i0) { return 0x00000230 + 0x4*i0; }
+#define MDP5_SMP_ALLOC_R_REG_CLIENT0__MASK                     0x000000ff
+#define MDP5_SMP_ALLOC_R_REG_CLIENT0__SHIFT                    0
+static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT0(enum mdp5_client_id val)
+{
+       return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT0__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT0__MASK;
+}
+#define MDP5_SMP_ALLOC_R_REG_CLIENT1__MASK                     0x0000ff00
+#define MDP5_SMP_ALLOC_R_REG_CLIENT1__SHIFT                    8
+static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT1(enum mdp5_client_id val)
+{
+       return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT1__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT1__MASK;
+}
+#define MDP5_SMP_ALLOC_R_REG_CLIENT2__MASK                     0x00ff0000
+#define MDP5_SMP_ALLOC_R_REG_CLIENT2__SHIFT                    16
+static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT2(enum mdp5_client_id val)
+{
+       return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT2__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT2__MASK;
+}
+
+static inline uint32_t __offset_IGC(enum mdp5_igc_type idx)
+{
+       switch (idx) {
+               case IGC_VIG: return 0x00000300;
+               case IGC_RGB: return 0x00000310;
+               case IGC_DMA: return 0x00000320;
+               case IGC_DSPP: return 0x00000400;
+               default: return INVALID_IDX(idx);
+       }
+}
+static inline uint32_t REG_MDP5_IGC(enum mdp5_igc_type i0) { return 0x00000000 + __offset_IGC(i0); }
+
+static inline uint32_t REG_MDP5_IGC_LUT(enum mdp5_igc_type i0, uint32_t i1) { return 0x00000000 + __offset_IGC(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP5_IGC_LUT_REG(enum mdp5_igc_type i0, uint32_t i1) { return 0x00000000 + __offset_IGC(i0) + 0x4*i1; }
+#define MDP5_IGC_LUT_REG_VAL__MASK                             0x00000fff
+#define MDP5_IGC_LUT_REG_VAL__SHIFT                            0
+static inline uint32_t MDP5_IGC_LUT_REG_VAL(uint32_t val)
+{
+       return ((val) << MDP5_IGC_LUT_REG_VAL__SHIFT) & MDP5_IGC_LUT_REG_VAL__MASK;
+}
+#define MDP5_IGC_LUT_REG_INDEX_UPDATE                          0x02000000
+#define MDP5_IGC_LUT_REG_DISABLE_PIPE_0                                0x10000000
+#define MDP5_IGC_LUT_REG_DISABLE_PIPE_1                                0x20000000
+#define MDP5_IGC_LUT_REG_DISABLE_PIPE_2                                0x40000000
+
+static inline uint32_t REG_MDP5_CTL(uint32_t i0) { return 0x00000600 + 0x100*i0; }
+
+static inline uint32_t REG_MDP5_CTL_LAYER(uint32_t i0, uint32_t i1) { return 0x00000600 + 0x100*i0 + 0x4*i1; }
+
+static inline uint32_t REG_MDP5_CTL_LAYER_REG(uint32_t i0, uint32_t i1) { return 0x00000600 + 0x100*i0 + 0x4*i1; }
+#define MDP5_CTL_LAYER_REG_VIG0__MASK                          0x00000007
+#define MDP5_CTL_LAYER_REG_VIG0__SHIFT                         0
+static inline uint32_t MDP5_CTL_LAYER_REG_VIG0(enum mdp_mixer_stage_id val)
+{
+       return ((val) << MDP5_CTL_LAYER_REG_VIG0__SHIFT) & MDP5_CTL_LAYER_REG_VIG0__MASK;
+}
+#define MDP5_CTL_LAYER_REG_VIG1__MASK                          0x00000038
+#define MDP5_CTL_LAYER_REG_VIG1__SHIFT                         3
+static inline uint32_t MDP5_CTL_LAYER_REG_VIG1(enum mdp_mixer_stage_id val)
+{
+       return ((val) << MDP5_CTL_LAYER_REG_VIG1__SHIFT) & MDP5_CTL_LAYER_REG_VIG1__MASK;
+}
+#define MDP5_CTL_LAYER_REG_VIG2__MASK                          0x000001c0
+#define MDP5_CTL_LAYER_REG_VIG2__SHIFT                         6
+static inline uint32_t MDP5_CTL_LAYER_REG_VIG2(enum mdp_mixer_stage_id val)
+{
+       return ((val) << MDP5_CTL_LAYER_REG_VIG2__SHIFT) & MDP5_CTL_LAYER_REG_VIG2__MASK;
+}
+#define MDP5_CTL_LAYER_REG_RGB0__MASK                          0x00000e00
+#define MDP5_CTL_LAYER_REG_RGB0__SHIFT                         9
+static inline uint32_t MDP5_CTL_LAYER_REG_RGB0(enum mdp_mixer_stage_id val)
+{
+       return ((val) << MDP5_CTL_LAYER_REG_RGB0__SHIFT) & MDP5_CTL_LAYER_REG_RGB0__MASK;
+}
+#define MDP5_CTL_LAYER_REG_RGB1__MASK                          0x00007000
+#define MDP5_CTL_LAYER_REG_RGB1__SHIFT                         12
+static inline uint32_t MDP5_CTL_LAYER_REG_RGB1(enum mdp_mixer_stage_id val)
+{
+       return ((val) << MDP5_CTL_LAYER_REG_RGB1__SHIFT) & MDP5_CTL_LAYER_REG_RGB1__MASK;
+}
+#define MDP5_CTL_LAYER_REG_RGB2__MASK                          0x00038000
+#define MDP5_CTL_LAYER_REG_RGB2__SHIFT                         15
+static inline uint32_t MDP5_CTL_LAYER_REG_RGB2(enum mdp_mixer_stage_id val)
+{
+       return ((val) << MDP5_CTL_LAYER_REG_RGB2__SHIFT) & MDP5_CTL_LAYER_REG_RGB2__MASK;
+}
+#define MDP5_CTL_LAYER_REG_DMA0__MASK                          0x001c0000
+#define MDP5_CTL_LAYER_REG_DMA0__SHIFT                         18
+static inline uint32_t MDP5_CTL_LAYER_REG_DMA0(enum mdp_mixer_stage_id val)
+{
+       return ((val) << MDP5_CTL_LAYER_REG_DMA0__SHIFT) & MDP5_CTL_LAYER_REG_DMA0__MASK;
+}
+#define MDP5_CTL_LAYER_REG_DMA1__MASK                          0x00e00000
+#define MDP5_CTL_LAYER_REG_DMA1__SHIFT                         21
+static inline uint32_t MDP5_CTL_LAYER_REG_DMA1(enum mdp_mixer_stage_id val)
+{
+       return ((val) << MDP5_CTL_LAYER_REG_DMA1__SHIFT) & MDP5_CTL_LAYER_REG_DMA1__MASK;
+}
+#define MDP5_CTL_LAYER_REG_BORDER_COLOR                                0x01000000
+#define MDP5_CTL_LAYER_REG_CURSOR_OUT                          0x02000000
+
+static inline uint32_t REG_MDP5_CTL_OP(uint32_t i0) { return 0x00000614 + 0x100*i0; }
+#define MDP5_CTL_OP_MODE__MASK                                 0x0000000f
+#define MDP5_CTL_OP_MODE__SHIFT                                        0
+static inline uint32_t MDP5_CTL_OP_MODE(enum mdp5_ctl_mode val)
+{
+       return ((val) << MDP5_CTL_OP_MODE__SHIFT) & MDP5_CTL_OP_MODE__MASK;
+}
+#define MDP5_CTL_OP_INTF_NUM__MASK                             0x00000070
+#define MDP5_CTL_OP_INTF_NUM__SHIFT                            4
+static inline uint32_t MDP5_CTL_OP_INTF_NUM(enum mdp5_intfnum val)
+{
+       return ((val) << MDP5_CTL_OP_INTF_NUM__SHIFT) & MDP5_CTL_OP_INTF_NUM__MASK;
+}
+#define MDP5_CTL_OP_CMD_MODE                                   0x00020000
+#define MDP5_CTL_OP_PACK_3D_ENABLE                             0x00080000
+#define MDP5_CTL_OP_PACK_3D__MASK                              0x00300000
+#define MDP5_CTL_OP_PACK_3D__SHIFT                             20
+static inline uint32_t MDP5_CTL_OP_PACK_3D(enum mdp5_pack_3d val)
+{
+       return ((val) << MDP5_CTL_OP_PACK_3D__SHIFT) & MDP5_CTL_OP_PACK_3D__MASK;
+}
+
+static inline uint32_t REG_MDP5_CTL_FLUSH(uint32_t i0) { return 0x00000618 + 0x100*i0; }
+#define MDP5_CTL_FLUSH_VIG0                                    0x00000001
+#define MDP5_CTL_FLUSH_VIG1                                    0x00000002
+#define MDP5_CTL_FLUSH_VIG2                                    0x00000004
+#define MDP5_CTL_FLUSH_RGB0                                    0x00000008
+#define MDP5_CTL_FLUSH_RGB1                                    0x00000010
+#define MDP5_CTL_FLUSH_RGB2                                    0x00000020
+#define MDP5_CTL_FLUSH_LM0                                     0x00000040
+#define MDP5_CTL_FLUSH_LM1                                     0x00000080
+#define MDP5_CTL_FLUSH_LM2                                     0x00000100
+#define MDP5_CTL_FLUSH_DMA0                                    0x00000800
+#define MDP5_CTL_FLUSH_DMA1                                    0x00001000
+#define MDP5_CTL_FLUSH_DSPP0                                   0x00002000
+#define MDP5_CTL_FLUSH_DSPP1                                   0x00004000
+#define MDP5_CTL_FLUSH_DSPP2                                   0x00008000
+#define MDP5_CTL_FLUSH_CTL                                     0x00020000
+
+static inline uint32_t REG_MDP5_CTL_START(uint32_t i0) { return 0x0000061c + 0x100*i0; }
+
+static inline uint32_t REG_MDP5_CTL_PACK_3D(uint32_t i0) { return 0x00000620 + 0x100*i0; }
+
+static inline uint32_t REG_MDP5_PIPE(enum mdp5_pipe i0) { return 0x00001200 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_HIST_CTL_BASE(enum mdp5_pipe i0) { return 0x000014c4 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_HIST_LUT_BASE(enum mdp5_pipe i0) { return 0x000014f0 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_HIST_LUT_SWAP(enum mdp5_pipe i0) { return 0x00001500 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_SRC_SIZE(enum mdp5_pipe i0) { return 0x00001200 + 0x400*i0; }
+#define MDP5_PIPE_SRC_SIZE_HEIGHT__MASK                                0xffff0000
+#define MDP5_PIPE_SRC_SIZE_HEIGHT__SHIFT                       16
+static inline uint32_t MDP5_PIPE_SRC_SIZE_HEIGHT(uint32_t val)
+{
+       return ((val) << MDP5_PIPE_SRC_SIZE_HEIGHT__SHIFT) & MDP5_PIPE_SRC_SIZE_HEIGHT__MASK;
+}
+#define MDP5_PIPE_SRC_SIZE_WIDTH__MASK                         0x0000ffff
+#define MDP5_PIPE_SRC_SIZE_WIDTH__SHIFT                                0
+static inline uint32_t MDP5_PIPE_SRC_SIZE_WIDTH(uint32_t val)
+{
+       return ((val) << MDP5_PIPE_SRC_SIZE_WIDTH__SHIFT) & MDP5_PIPE_SRC_SIZE_WIDTH__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_SRC_IMG_SIZE(enum mdp5_pipe i0) { return 0x00001204 + 0x400*i0; }
+#define MDP5_PIPE_SRC_IMG_SIZE_HEIGHT__MASK                    0xffff0000
+#define MDP5_PIPE_SRC_IMG_SIZE_HEIGHT__SHIFT                   16
+static inline uint32_t MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(uint32_t val)
+{
+       return ((val) << MDP5_PIPE_SRC_IMG_SIZE_HEIGHT__SHIFT) & MDP5_PIPE_SRC_IMG_SIZE_HEIGHT__MASK;
+}
+#define MDP5_PIPE_SRC_IMG_SIZE_WIDTH__MASK                     0x0000ffff
+#define MDP5_PIPE_SRC_IMG_SIZE_WIDTH__SHIFT                    0
+static inline uint32_t MDP5_PIPE_SRC_IMG_SIZE_WIDTH(uint32_t val)
+{
+       return ((val) << MDP5_PIPE_SRC_IMG_SIZE_WIDTH__SHIFT) & MDP5_PIPE_SRC_IMG_SIZE_WIDTH__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_SRC_XY(enum mdp5_pipe i0) { return 0x00001208 + 0x400*i0; }
+#define MDP5_PIPE_SRC_XY_Y__MASK                               0xffff0000
+#define MDP5_PIPE_SRC_XY_Y__SHIFT                              16
+static inline uint32_t MDP5_PIPE_SRC_XY_Y(uint32_t val)
+{
+       return ((val) << MDP5_PIPE_SRC_XY_Y__SHIFT) & MDP5_PIPE_SRC_XY_Y__MASK;
+}
+#define MDP5_PIPE_SRC_XY_X__MASK                               0x0000ffff
+#define MDP5_PIPE_SRC_XY_X__SHIFT                              0
+static inline uint32_t MDP5_PIPE_SRC_XY_X(uint32_t val)
+{
+       return ((val) << MDP5_PIPE_SRC_XY_X__SHIFT) & MDP5_PIPE_SRC_XY_X__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_OUT_SIZE(enum mdp5_pipe i0) { return 0x0000120c + 0x400*i0; }
+#define MDP5_PIPE_OUT_SIZE_HEIGHT__MASK                                0xffff0000
+#define MDP5_PIPE_OUT_SIZE_HEIGHT__SHIFT                       16
+static inline uint32_t MDP5_PIPE_OUT_SIZE_HEIGHT(uint32_t val)
+{
+       return ((val) << MDP5_PIPE_OUT_SIZE_HEIGHT__SHIFT) & MDP5_PIPE_OUT_SIZE_HEIGHT__MASK;
+}
+#define MDP5_PIPE_OUT_SIZE_WIDTH__MASK                         0x0000ffff
+#define MDP5_PIPE_OUT_SIZE_WIDTH__SHIFT                                0
+static inline uint32_t MDP5_PIPE_OUT_SIZE_WIDTH(uint32_t val)
+{
+       return ((val) << MDP5_PIPE_OUT_SIZE_WIDTH__SHIFT) & MDP5_PIPE_OUT_SIZE_WIDTH__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_OUT_XY(enum mdp5_pipe i0) { return 0x00001210 + 0x400*i0; }
+#define MDP5_PIPE_OUT_XY_Y__MASK                               0xffff0000
+#define MDP5_PIPE_OUT_XY_Y__SHIFT                              16
+static inline uint32_t MDP5_PIPE_OUT_XY_Y(uint32_t val)
+{
+       return ((val) << MDP5_PIPE_OUT_XY_Y__SHIFT) & MDP5_PIPE_OUT_XY_Y__MASK;
+}
+#define MDP5_PIPE_OUT_XY_X__MASK                               0x0000ffff
+#define MDP5_PIPE_OUT_XY_X__SHIFT                              0
+static inline uint32_t MDP5_PIPE_OUT_XY_X(uint32_t val)
+{
+       return ((val) << MDP5_PIPE_OUT_XY_X__SHIFT) & MDP5_PIPE_OUT_XY_X__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_SRC0_ADDR(enum mdp5_pipe i0) { return 0x00001214 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_SRC1_ADDR(enum mdp5_pipe i0) { return 0x00001218 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_SRC2_ADDR(enum mdp5_pipe i0) { return 0x0000121c + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_SRC3_ADDR(enum mdp5_pipe i0) { return 0x00001220 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_SRC_STRIDE_A(enum mdp5_pipe i0) { return 0x00001224 + 0x400*i0; }
+#define MDP5_PIPE_SRC_STRIDE_A_P0__MASK                                0x0000ffff
+#define MDP5_PIPE_SRC_STRIDE_A_P0__SHIFT                       0
+static inline uint32_t MDP5_PIPE_SRC_STRIDE_A_P0(uint32_t val)
+{
+       return ((val) << MDP5_PIPE_SRC_STRIDE_A_P0__SHIFT) & MDP5_PIPE_SRC_STRIDE_A_P0__MASK;
+}
+#define MDP5_PIPE_SRC_STRIDE_A_P1__MASK                                0xffff0000
+#define MDP5_PIPE_SRC_STRIDE_A_P1__SHIFT                       16
+static inline uint32_t MDP5_PIPE_SRC_STRIDE_A_P1(uint32_t val)
+{
+       return ((val) << MDP5_PIPE_SRC_STRIDE_A_P1__SHIFT) & MDP5_PIPE_SRC_STRIDE_A_P1__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_SRC_STRIDE_B(enum mdp5_pipe i0) { return 0x00001228 + 0x400*i0; }
+#define MDP5_PIPE_SRC_STRIDE_B_P2__MASK                                0x0000ffff
+#define MDP5_PIPE_SRC_STRIDE_B_P2__SHIFT                       0
+static inline uint32_t MDP5_PIPE_SRC_STRIDE_B_P2(uint32_t val)
+{
+       return ((val) << MDP5_PIPE_SRC_STRIDE_B_P2__SHIFT) & MDP5_PIPE_SRC_STRIDE_B_P2__MASK;
+}
+#define MDP5_PIPE_SRC_STRIDE_B_P3__MASK                                0xffff0000
+#define MDP5_PIPE_SRC_STRIDE_B_P3__SHIFT                       16
+static inline uint32_t MDP5_PIPE_SRC_STRIDE_B_P3(uint32_t val)
+{
+       return ((val) << MDP5_PIPE_SRC_STRIDE_B_P3__SHIFT) & MDP5_PIPE_SRC_STRIDE_B_P3__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_STILE_FRAME_SIZE(enum mdp5_pipe i0) { return 0x0000122c + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_SRC_FORMAT(enum mdp5_pipe i0) { return 0x00001230 + 0x400*i0; }
+#define MDP5_PIPE_SRC_FORMAT_G_BPC__MASK                       0x00000003
+#define MDP5_PIPE_SRC_FORMAT_G_BPC__SHIFT                      0
+static inline uint32_t MDP5_PIPE_SRC_FORMAT_G_BPC(enum mdp_bpc val)
+{
+       return ((val) << MDP5_PIPE_SRC_FORMAT_G_BPC__SHIFT) & MDP5_PIPE_SRC_FORMAT_G_BPC__MASK;
+}
+#define MDP5_PIPE_SRC_FORMAT_B_BPC__MASK                       0x0000000c
+#define MDP5_PIPE_SRC_FORMAT_B_BPC__SHIFT                      2
+static inline uint32_t MDP5_PIPE_SRC_FORMAT_B_BPC(enum mdp_bpc val)
+{
+       return ((val) << MDP5_PIPE_SRC_FORMAT_B_BPC__SHIFT) & MDP5_PIPE_SRC_FORMAT_B_BPC__MASK;
+}
+#define MDP5_PIPE_SRC_FORMAT_R_BPC__MASK                       0x00000030
+#define MDP5_PIPE_SRC_FORMAT_R_BPC__SHIFT                      4
+static inline uint32_t MDP5_PIPE_SRC_FORMAT_R_BPC(enum mdp_bpc val)
+{
+       return ((val) << MDP5_PIPE_SRC_FORMAT_R_BPC__SHIFT) & MDP5_PIPE_SRC_FORMAT_R_BPC__MASK;
+}
+#define MDP5_PIPE_SRC_FORMAT_A_BPC__MASK                       0x000000c0
+#define MDP5_PIPE_SRC_FORMAT_A_BPC__SHIFT                      6
+static inline uint32_t MDP5_PIPE_SRC_FORMAT_A_BPC(enum mdp_bpc_alpha val)
+{
+       return ((val) << MDP5_PIPE_SRC_FORMAT_A_BPC__SHIFT) & MDP5_PIPE_SRC_FORMAT_A_BPC__MASK;
+}
+#define MDP5_PIPE_SRC_FORMAT_ALPHA_ENABLE                      0x00000100
+#define MDP5_PIPE_SRC_FORMAT_CPP__MASK                         0x00000600
+#define MDP5_PIPE_SRC_FORMAT_CPP__SHIFT                                9
+static inline uint32_t MDP5_PIPE_SRC_FORMAT_CPP(uint32_t val)
+{
+       return ((val) << MDP5_PIPE_SRC_FORMAT_CPP__SHIFT) & MDP5_PIPE_SRC_FORMAT_CPP__MASK;
+}
+#define MDP5_PIPE_SRC_FORMAT_ROT90                             0x00000800
+#define MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT__MASK                        0x00003000
+#define MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT__SHIFT               12
+static inline uint32_t MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT(uint32_t val)
+{
+       return ((val) << MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT__SHIFT) & MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT__MASK;
+}
+#define MDP5_PIPE_SRC_FORMAT_UNPACK_TIGHT                      0x00020000
+#define MDP5_PIPE_SRC_FORMAT_UNPACK_ALIGN_MSB                  0x00040000
+#define MDP5_PIPE_SRC_FORMAT_NUM_PLANES__MASK                  0x00780000
+#define MDP5_PIPE_SRC_FORMAT_NUM_PLANES__SHIFT                 19
+static inline uint32_t MDP5_PIPE_SRC_FORMAT_NUM_PLANES(uint32_t val)
+{
+       return ((val) << MDP5_PIPE_SRC_FORMAT_NUM_PLANES__SHIFT) & MDP5_PIPE_SRC_FORMAT_NUM_PLANES__MASK;
+}
+#define MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__MASK                 0x01800000
+#define MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__SHIFT                        23
+static inline uint32_t MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP(enum mdp5_chroma_samp_type val)
+{
+       return ((val) << MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__SHIFT) & MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_SRC_UNPACK(enum mdp5_pipe i0) { return 0x00001234 + 0x400*i0; }
+#define MDP5_PIPE_SRC_UNPACK_ELEM0__MASK                       0x000000ff
+#define MDP5_PIPE_SRC_UNPACK_ELEM0__SHIFT                      0
+static inline uint32_t MDP5_PIPE_SRC_UNPACK_ELEM0(uint32_t val)
+{
+       return ((val) << MDP5_PIPE_SRC_UNPACK_ELEM0__SHIFT) & MDP5_PIPE_SRC_UNPACK_ELEM0__MASK;
+}
+#define MDP5_PIPE_SRC_UNPACK_ELEM1__MASK                       0x0000ff00
+#define MDP5_PIPE_SRC_UNPACK_ELEM1__SHIFT                      8
+static inline uint32_t MDP5_PIPE_SRC_UNPACK_ELEM1(uint32_t val)
+{
+       return ((val) << MDP5_PIPE_SRC_UNPACK_ELEM1__SHIFT) & MDP5_PIPE_SRC_UNPACK_ELEM1__MASK;
+}
+#define MDP5_PIPE_SRC_UNPACK_ELEM2__MASK                       0x00ff0000
+#define MDP5_PIPE_SRC_UNPACK_ELEM2__SHIFT                      16
+static inline uint32_t MDP5_PIPE_SRC_UNPACK_ELEM2(uint32_t val)
+{
+       return ((val) << MDP5_PIPE_SRC_UNPACK_ELEM2__SHIFT) & MDP5_PIPE_SRC_UNPACK_ELEM2__MASK;
+}
+#define MDP5_PIPE_SRC_UNPACK_ELEM3__MASK                       0xff000000
+#define MDP5_PIPE_SRC_UNPACK_ELEM3__SHIFT                      24
+static inline uint32_t MDP5_PIPE_SRC_UNPACK_ELEM3(uint32_t val)
+{
+       return ((val) << MDP5_PIPE_SRC_UNPACK_ELEM3__SHIFT) & MDP5_PIPE_SRC_UNPACK_ELEM3__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_SRC_OP_MODE(enum mdp5_pipe i0) { return 0x00001238 + 0x400*i0; }
+#define MDP5_PIPE_SRC_OP_MODE_BWC_EN                           0x00000001
+#define MDP5_PIPE_SRC_OP_MODE_BWC__MASK                                0x00000006
+#define MDP5_PIPE_SRC_OP_MODE_BWC__SHIFT                       1
+static inline uint32_t MDP5_PIPE_SRC_OP_MODE_BWC(enum mdp5_pipe_bwc val)
+{
+       return ((val) << MDP5_PIPE_SRC_OP_MODE_BWC__SHIFT) & MDP5_PIPE_SRC_OP_MODE_BWC__MASK;
+}
+#define MDP5_PIPE_SRC_OP_MODE_FLIP_LR                          0x00002000
+#define MDP5_PIPE_SRC_OP_MODE_FLIP_UD                          0x00004000
+#define MDP5_PIPE_SRC_OP_MODE_IGC_EN                           0x00010000
+#define MDP5_PIPE_SRC_OP_MODE_IGC_ROM_0                                0x00020000
+#define MDP5_PIPE_SRC_OP_MODE_IGC_ROM_1                                0x00040000
+#define MDP5_PIPE_SRC_OP_MODE_DEINTERLACE                      0x00400000
+#define MDP5_PIPE_SRC_OP_MODE_DEINTERLACE_ODD                  0x00800000
+
+static inline uint32_t REG_MDP5_PIPE_SRC_CONSTANT_COLOR(enum mdp5_pipe i0) { return 0x0000123c + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_FETCH_CONFIG(enum mdp5_pipe i0) { return 0x00001248 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_VC1_RANGE(enum mdp5_pipe i0) { return 0x0000124c + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(enum mdp5_pipe i0) { return 0x00001250 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(enum mdp5_pipe i0) { return 0x00001254 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(enum mdp5_pipe i0) { return 0x00001258 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_SRC_ADDR_SW_STATUS(enum mdp5_pipe i0) { return 0x00001270 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC0_ADDR(enum mdp5_pipe i0) { return 0x000012a4 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC1_ADDR(enum mdp5_pipe i0) { return 0x000012a8 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC2_ADDR(enum mdp5_pipe i0) { return 0x000012ac + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC3_ADDR(enum mdp5_pipe i0) { return 0x000012b0 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_DECIMATION(enum mdp5_pipe i0) { return 0x000012b4 + 0x400*i0; }
+#define MDP5_PIPE_DECIMATION_VERT__MASK                                0x000000ff
+#define MDP5_PIPE_DECIMATION_VERT__SHIFT                       0
+static inline uint32_t MDP5_PIPE_DECIMATION_VERT(uint32_t val)
+{
+       return ((val) << MDP5_PIPE_DECIMATION_VERT__SHIFT) & MDP5_PIPE_DECIMATION_VERT__MASK;
+}
+#define MDP5_PIPE_DECIMATION_HORZ__MASK                                0x0000ff00
+#define MDP5_PIPE_DECIMATION_HORZ__SHIFT                       8
+static inline uint32_t MDP5_PIPE_DECIMATION_HORZ(uint32_t val)
+{
+       return ((val) << MDP5_PIPE_DECIMATION_HORZ__SHIFT) & MDP5_PIPE_DECIMATION_HORZ__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_SCALE_CONFIG(enum mdp5_pipe i0) { return 0x00001404 + 0x400*i0; }
+#define MDP5_PIPE_SCALE_CONFIG_SCALEX_EN                       0x00000001
+#define MDP5_PIPE_SCALE_CONFIG_SCALEY_EN                       0x00000002
+#define MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER__MASK         0x00000300
+#define MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER__SHIFT                8
+static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER(enum mdp5_scale_filter val)
+{
+       return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER__MASK;
+}
+#define MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER__MASK         0x00000c00
+#define MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER__SHIFT                10
+static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER(enum mdp5_scale_filter val)
+{
+       return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER__MASK;
+}
+#define MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER__MASK          0x00003000
+#define MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER__SHIFT         12
+static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER(enum mdp5_scale_filter val)
+{
+       return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER__MASK;
+}
+#define MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER__MASK          0x0000c000
+#define MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER__SHIFT         14
+static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER(enum mdp5_scale_filter val)
+{
+       return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER__MASK;
+}
+#define MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER__MASK         0x00030000
+#define MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER__SHIFT                16
+static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER(enum mdp5_scale_filter val)
+{
+       return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER__MASK;
+}
+#define MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER__MASK         0x000c0000
+#define MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER__SHIFT                18
+static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER(enum mdp5_scale_filter val)
+{
+       return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_SCALE_PHASE_STEP_X(enum mdp5_pipe i0) { return 0x00001410 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(enum mdp5_pipe i0) { return 0x00001414 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_SCALE_INIT_PHASE_X(enum mdp5_pipe i0) { return 0x00001420 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_PIPE_SCALE_INIT_PHASE_Y(enum mdp5_pipe i0) { return 0x00001424 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM(uint32_t i0) { return 0x00003200 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_BLEND_COLOR_OUT(uint32_t i0) { return 0x00003200 + 0x400*i0; }
+#define MDP5_LM_BLEND_COLOR_OUT_STAGE0_FG_ALPHA                        0x00000002
+#define MDP5_LM_BLEND_COLOR_OUT_STAGE1_FG_ALPHA                        0x00000004
+#define MDP5_LM_BLEND_COLOR_OUT_STAGE2_FG_ALPHA                        0x00000008
+#define MDP5_LM_BLEND_COLOR_OUT_STAGE3_FG_ALPHA                        0x00000010
+
+static inline uint32_t REG_MDP5_LM_OUT_SIZE(uint32_t i0) { return 0x00003204 + 0x400*i0; }
+#define MDP5_LM_OUT_SIZE_HEIGHT__MASK                          0xffff0000
+#define MDP5_LM_OUT_SIZE_HEIGHT__SHIFT                         16
+static inline uint32_t MDP5_LM_OUT_SIZE_HEIGHT(uint32_t val)
+{
+       return ((val) << MDP5_LM_OUT_SIZE_HEIGHT__SHIFT) & MDP5_LM_OUT_SIZE_HEIGHT__MASK;
+}
+#define MDP5_LM_OUT_SIZE_WIDTH__MASK                           0x0000ffff
+#define MDP5_LM_OUT_SIZE_WIDTH__SHIFT                          0
+static inline uint32_t MDP5_LM_OUT_SIZE_WIDTH(uint32_t val)
+{
+       return ((val) << MDP5_LM_OUT_SIZE_WIDTH__SHIFT) & MDP5_LM_OUT_SIZE_WIDTH__MASK;
+}
+
+static inline uint32_t REG_MDP5_LM_BORDER_COLOR_0(uint32_t i0) { return 0x00003208 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_BORDER_COLOR_1(uint32_t i0) { return 0x00003210 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_BLEND(uint32_t i0, uint32_t i1) { return 0x00003220 + 0x400*i0 + 0x30*i1; }
+
+static inline uint32_t REG_MDP5_LM_BLEND_OP_MODE(uint32_t i0, uint32_t i1) { return 0x00003220 + 0x400*i0 + 0x30*i1; }
+#define MDP5_LM_BLEND_OP_MODE_FG_ALPHA__MASK                   0x00000003
+#define MDP5_LM_BLEND_OP_MODE_FG_ALPHA__SHIFT                  0
+static inline uint32_t MDP5_LM_BLEND_OP_MODE_FG_ALPHA(enum mdp_alpha_type val)
+{
+       return ((val) << MDP5_LM_BLEND_OP_MODE_FG_ALPHA__SHIFT) & MDP5_LM_BLEND_OP_MODE_FG_ALPHA__MASK;
+}
+#define MDP5_LM_BLEND_OP_MODE_FG_INV_ALPHA                     0x00000004
+#define MDP5_LM_BLEND_OP_MODE_FG_MOD_ALPHA                     0x00000008
+#define MDP5_LM_BLEND_OP_MODE_FG_INV_MOD_ALPHA                 0x00000010
+#define MDP5_LM_BLEND_OP_MODE_FG_TRANSP_EN                     0x00000020
+#define MDP5_LM_BLEND_OP_MODE_BG_ALPHA__MASK                   0x00000300
+#define MDP5_LM_BLEND_OP_MODE_BG_ALPHA__SHIFT                  8
+static inline uint32_t MDP5_LM_BLEND_OP_MODE_BG_ALPHA(enum mdp_alpha_type val)
+{
+       return ((val) << MDP5_LM_BLEND_OP_MODE_BG_ALPHA__SHIFT) & MDP5_LM_BLEND_OP_MODE_BG_ALPHA__MASK;
+}
+#define MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA                     0x00000400
+#define MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA                     0x00000800
+#define MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA                 0x00001000
+#define MDP5_LM_BLEND_OP_MODE_BG_TRANSP_EN                     0x00002000
+
+static inline uint32_t REG_MDP5_LM_BLEND_FG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00003224 + 0x400*i0 + 0x30*i1; }
+
+static inline uint32_t REG_MDP5_LM_BLEND_BG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00003228 + 0x400*i0 + 0x30*i1; }
+
+static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000322c + 0x400*i0 + 0x30*i1; }
+
+static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00003230 + 0x400*i0 + 0x30*i1; }
+
+static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00003234 + 0x400*i0 + 0x30*i1; }
+
+static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00003238 + 0x400*i0 + 0x30*i1; }
+
+static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000323c + 0x400*i0 + 0x30*i1; }
+
+static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00003240 + 0x400*i0 + 0x30*i1; }
+
+static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00003244 + 0x400*i0 + 0x30*i1; }
+
+static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00003248 + 0x400*i0 + 0x30*i1; }
+
+static inline uint32_t REG_MDP5_LM_CURSOR_IMG_SIZE(uint32_t i0) { return 0x000032e0 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_CURSOR_SIZE(uint32_t i0) { return 0x000032e4 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_CURSOR_XY(uint32_t i0) { return 0x000032e8 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_CURSOR_STRIDE(uint32_t i0) { return 0x000032dc + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_CURSOR_FORMAT(uint32_t i0) { return 0x000032ec + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_CURSOR_BASE_ADDR(uint32_t i0) { return 0x000032f0 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_CURSOR_START_XY(uint32_t i0) { return 0x000032f4 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_CONFIG(uint32_t i0) { return 0x000032f8 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_PARAM(uint32_t i0) { return 0x000032fc + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_LOW0(uint32_t i0) { return 0x00003300 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_LOW1(uint32_t i0) { return 0x00003304 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_HIGH0(uint32_t i0) { return 0x00003308 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_HIGH1(uint32_t i0) { return 0x0000330c + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_LM_GC_LUT_BASE(uint32_t i0) { return 0x00003310 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_DSPP(uint32_t i0) { return 0x00004600 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_DSPP_OP_MODE(uint32_t i0) { return 0x00004600 + 0x400*i0; }
+#define MDP5_DSPP_OP_MODE_IGC_LUT_EN                           0x00000001
+#define MDP5_DSPP_OP_MODE_IGC_TBL_IDX__MASK                    0x0000000e
+#define MDP5_DSPP_OP_MODE_IGC_TBL_IDX__SHIFT                   1
+static inline uint32_t MDP5_DSPP_OP_MODE_IGC_TBL_IDX(uint32_t val)
+{
+       return ((val) << MDP5_DSPP_OP_MODE_IGC_TBL_IDX__SHIFT) & MDP5_DSPP_OP_MODE_IGC_TBL_IDX__MASK;
+}
+#define MDP5_DSPP_OP_MODE_PCC_EN                               0x00000010
+#define MDP5_DSPP_OP_MODE_DITHER_EN                            0x00000100
+#define MDP5_DSPP_OP_MODE_HIST_EN                              0x00010000
+#define MDP5_DSPP_OP_MODE_AUTO_CLEAR                           0x00020000
+#define MDP5_DSPP_OP_MODE_HIST_LUT_EN                          0x00080000
+#define MDP5_DSPP_OP_MODE_PA_EN                                        0x00100000
+#define MDP5_DSPP_OP_MODE_GAMUT_EN                             0x00800000
+#define MDP5_DSPP_OP_MODE_GAMUT_ORDER                          0x01000000
+
+static inline uint32_t REG_MDP5_DSPP_PCC_BASE(uint32_t i0) { return 0x00004630 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_DSPP_DITHER_DEPTH(uint32_t i0) { return 0x00004750 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_DSPP_HIST_CTL_BASE(uint32_t i0) { return 0x00004810 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_DSPP_HIST_LUT_BASE(uint32_t i0) { return 0x00004830 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_DSPP_HIST_LUT_SWAP(uint32_t i0) { return 0x00004834 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_DSPP_PA_BASE(uint32_t i0) { return 0x00004838 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_DSPP_GAMUT_BASE(uint32_t i0) { return 0x000048dc + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_DSPP_GC_BASE(uint32_t i0) { return 0x000048b0 + 0x400*i0; }
+
+static inline uint32_t REG_MDP5_INTF(uint32_t i0) { return 0x00012500 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_TIMING_ENGINE_EN(uint32_t i0) { return 0x00012500 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_CONFIG(uint32_t i0) { return 0x00012504 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_HSYNC_CTL(uint32_t i0) { return 0x00012508 + 0x200*i0; }
+#define MDP5_INTF_HSYNC_CTL_PULSEW__MASK                       0x0000ffff
+#define MDP5_INTF_HSYNC_CTL_PULSEW__SHIFT                      0
+static inline uint32_t MDP5_INTF_HSYNC_CTL_PULSEW(uint32_t val)
+{
+       return ((val) << MDP5_INTF_HSYNC_CTL_PULSEW__SHIFT) & MDP5_INTF_HSYNC_CTL_PULSEW__MASK;
+}
+#define MDP5_INTF_HSYNC_CTL_PERIOD__MASK                       0xffff0000
+#define MDP5_INTF_HSYNC_CTL_PERIOD__SHIFT                      16
+static inline uint32_t MDP5_INTF_HSYNC_CTL_PERIOD(uint32_t val)
+{
+       return ((val) << MDP5_INTF_HSYNC_CTL_PERIOD__SHIFT) & MDP5_INTF_HSYNC_CTL_PERIOD__MASK;
+}
+
+static inline uint32_t REG_MDP5_INTF_VSYNC_PERIOD_F0(uint32_t i0) { return 0x0001250c + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_VSYNC_PERIOD_F1(uint32_t i0) { return 0x00012510 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_VSYNC_LEN_F0(uint32_t i0) { return 0x00012514 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_VSYNC_LEN_F1(uint32_t i0) { return 0x00012518 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_DISPLAY_VSTART_F0(uint32_t i0) { return 0x0001251c + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_DISPLAY_VSTART_F1(uint32_t i0) { return 0x00012520 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_DISPLAY_VEND_F0(uint32_t i0) { return 0x00012524 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_DISPLAY_VEND_F1(uint32_t i0) { return 0x00012528 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_ACTIVE_VSTART_F0(uint32_t i0) { return 0x0001252c + 0x200*i0; }
+#define MDP5_INTF_ACTIVE_VSTART_F0_VAL__MASK                   0x7fffffff
+#define MDP5_INTF_ACTIVE_VSTART_F0_VAL__SHIFT                  0
+static inline uint32_t MDP5_INTF_ACTIVE_VSTART_F0_VAL(uint32_t val)
+{
+       return ((val) << MDP5_INTF_ACTIVE_VSTART_F0_VAL__SHIFT) & MDP5_INTF_ACTIVE_VSTART_F0_VAL__MASK;
+}
+#define MDP5_INTF_ACTIVE_VSTART_F0_ACTIVE_V_ENABLE             0x80000000
+
+static inline uint32_t REG_MDP5_INTF_ACTIVE_VSTART_F1(uint32_t i0) { return 0x00012530 + 0x200*i0; }
+#define MDP5_INTF_ACTIVE_VSTART_F1_VAL__MASK                   0x7fffffff
+#define MDP5_INTF_ACTIVE_VSTART_F1_VAL__SHIFT                  0
+static inline uint32_t MDP5_INTF_ACTIVE_VSTART_F1_VAL(uint32_t val)
+{
+       return ((val) << MDP5_INTF_ACTIVE_VSTART_F1_VAL__SHIFT) & MDP5_INTF_ACTIVE_VSTART_F1_VAL__MASK;
+}
+
+static inline uint32_t REG_MDP5_INTF_ACTIVE_VEND_F0(uint32_t i0) { return 0x00012534 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_ACTIVE_VEND_F1(uint32_t i0) { return 0x00012538 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_DISPLAY_HCTL(uint32_t i0) { return 0x0001253c + 0x200*i0; }
+#define MDP5_INTF_DISPLAY_HCTL_START__MASK                     0x0000ffff
+#define MDP5_INTF_DISPLAY_HCTL_START__SHIFT                    0
+static inline uint32_t MDP5_INTF_DISPLAY_HCTL_START(uint32_t val)
+{
+       return ((val) << MDP5_INTF_DISPLAY_HCTL_START__SHIFT) & MDP5_INTF_DISPLAY_HCTL_START__MASK;
+}
+#define MDP5_INTF_DISPLAY_HCTL_END__MASK                       0xffff0000
+#define MDP5_INTF_DISPLAY_HCTL_END__SHIFT                      16
+static inline uint32_t MDP5_INTF_DISPLAY_HCTL_END(uint32_t val)
+{
+       return ((val) << MDP5_INTF_DISPLAY_HCTL_END__SHIFT) & MDP5_INTF_DISPLAY_HCTL_END__MASK;
+}
+
+static inline uint32_t REG_MDP5_INTF_ACTIVE_HCTL(uint32_t i0) { return 0x00012540 + 0x200*i0; }
+#define MDP5_INTF_ACTIVE_HCTL_START__MASK                      0x00007fff
+#define MDP5_INTF_ACTIVE_HCTL_START__SHIFT                     0
+static inline uint32_t MDP5_INTF_ACTIVE_HCTL_START(uint32_t val)
+{
+       return ((val) << MDP5_INTF_ACTIVE_HCTL_START__SHIFT) & MDP5_INTF_ACTIVE_HCTL_START__MASK;
+}
+#define MDP5_INTF_ACTIVE_HCTL_END__MASK                                0x7fff0000
+#define MDP5_INTF_ACTIVE_HCTL_END__SHIFT                       16
+static inline uint32_t MDP5_INTF_ACTIVE_HCTL_END(uint32_t val)
+{
+       return ((val) << MDP5_INTF_ACTIVE_HCTL_END__SHIFT) & MDP5_INTF_ACTIVE_HCTL_END__MASK;
+}
+#define MDP5_INTF_ACTIVE_HCTL_ACTIVE_H_ENABLE                  0x80000000
+
+static inline uint32_t REG_MDP5_INTF_BORDER_COLOR(uint32_t i0) { return 0x00012544 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_UNDERFLOW_COLOR(uint32_t i0) { return 0x00012548 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_HSYNC_SKEW(uint32_t i0) { return 0x0001254c + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_POLARITY_CTL(uint32_t i0) { return 0x00012550 + 0x200*i0; }
+#define MDP5_INTF_POLARITY_CTL_HSYNC_LOW                       0x00000001
+#define MDP5_INTF_POLARITY_CTL_VSYNC_LOW                       0x00000002
+#define MDP5_INTF_POLARITY_CTL_DATA_EN_LOW                     0x00000004
+
+static inline uint32_t REG_MDP5_INTF_TEST_CTL(uint32_t i0) { return 0x00012554 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_TP_COLOR0(uint32_t i0) { return 0x00012558 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_TP_COLOR1(uint32_t i0) { return 0x0001255c + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_DSI_CMD_MODE_TRIGGER_EN(uint32_t i0) { return 0x00012584 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_PANEL_FORMAT(uint32_t i0) { return 0x00012590 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_FRAME_LINE_COUNT_EN(uint32_t i0) { return 0x000125a8 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_FRAME_COUNT(uint32_t i0) { return 0x000125ac + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_LINE_COUNT(uint32_t i0) { return 0x000125b0 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_DEFLICKER_CONFIG(uint32_t i0) { return 0x000125f0 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_DEFLICKER_STRNG_COEFF(uint32_t i0) { return 0x000125f4 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_DEFLICKER_WEAK_COEFF(uint32_t i0) { return 0x000125f8 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_TPG_ENABLE(uint32_t i0) { return 0x00012600 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_TPG_MAIN_CONTROL(uint32_t i0) { return 0x00012604 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_TPG_VIDEO_CONFIG(uint32_t i0) { return 0x00012608 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_TPG_COMPONENT_LIMITS(uint32_t i0) { return 0x0001260c + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_TPG_RECTANGLE(uint32_t i0) { return 0x00012610 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_TPG_INITIAL_VALUE(uint32_t i0) { return 0x00012614 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_TPG_BLK_WHITE_PATTERN_FRAME(uint32_t i0) { return 0x00012618 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_INTF_TPG_RGB_MAPPING(uint32_t i0) { return 0x0001261c + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD(uint32_t i0) { return 0x00013100 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_BYPASS(uint32_t i0) { return 0x00013100 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_CTRL_0(uint32_t i0) { return 0x00013104 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_CTRL_1(uint32_t i0) { return 0x00013108 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_FRAME_SIZE(uint32_t i0) { return 0x0001310c + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_CON_CTRL_0(uint32_t i0) { return 0x00013110 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_CON_CTRL_1(uint32_t i0) { return 0x00013114 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_STR_MAN(uint32_t i0) { return 0x00013118 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_VAR(uint32_t i0) { return 0x0001311c + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_DITH(uint32_t i0) { return 0x00013120 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_DITH_CTRL(uint32_t i0) { return 0x00013124 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_AMP_LIM(uint32_t i0) { return 0x00013128 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_SLOPE(uint32_t i0) { return 0x0001312c + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_BW_LVL(uint32_t i0) { return 0x00013130 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_LOGO_POS(uint32_t i0) { return 0x00013134 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_LUT_FI(uint32_t i0) { return 0x00013138 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_LUT_CC(uint32_t i0) { return 0x0001317c + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_STR_LIM(uint32_t i0) { return 0x000131c8 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_CALIB_AB(uint32_t i0) { return 0x000131cc + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_CALIB_CD(uint32_t i0) { return 0x000131d0 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_MODE_SEL(uint32_t i0) { return 0x000131d4 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_TFILT_CTRL(uint32_t i0) { return 0x000131d8 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_BL_MINMAX(uint32_t i0) { return 0x000131dc + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_BL(uint32_t i0) { return 0x000131e0 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_BL_MAX(uint32_t i0) { return 0x000131e8 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_AL(uint32_t i0) { return 0x000131ec + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_AL_MIN(uint32_t i0) { return 0x000131f0 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_AL_FILT(uint32_t i0) { return 0x000131f4 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_CFG_BUF(uint32_t i0) { return 0x000131f8 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_LUT_AL(uint32_t i0) { return 0x00013200 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_TARG_STR(uint32_t i0) { return 0x00013244 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_START_CALC(uint32_t i0) { return 0x00013248 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_STR_OUT(uint32_t i0) { return 0x0001324c + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_BL_OUT(uint32_t i0) { return 0x00013254 + 0x200*i0; }
+
+static inline uint32_t REG_MDP5_AD_CALC_DONE(uint32_t i0) { return 0x00013258 + 0x200*i0; }
+
+
+#endif /* MDP5_XML */
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
new file mode 100644 (file)
index 0000000..71a3b23
--- /dev/null
@@ -0,0 +1,569 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "mdp5_kms.h"
+
+#include <drm/drm_mode.h>
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+#include "drm_flip_work.h"
+
+struct mdp5_crtc {
+       struct drm_crtc base;
+       char name[8];
+       struct drm_plane *plane;
+       struct drm_plane *planes[8];
+       int id;
+       bool enabled;
+
+       /* which mixer/encoder we route output to: */
+       int mixer;
+
+       /* if there is a pending flip, these will be non-null: */
+       struct drm_pending_vblank_event *event;
+       struct msm_fence_cb pageflip_cb;
+
+#define PENDING_CURSOR 0x1
+#define PENDING_FLIP   0x2
+       atomic_t pending;
+
+       /* the fb that we logically (from PoV of KMS API) hold a ref
+        * to.  Which we may not yet be scanning out (we may still
+        * be scanning out previous in case of page_flip while waiting
+        * for gpu rendering to complete:
+        */
+       struct drm_framebuffer *fb;
+
+       /* the fb that we currently hold a scanout ref to: */
+       struct drm_framebuffer *scanout_fb;
+
+       /* for unref'ing framebuffers after scanout completes: */
+       struct drm_flip_work unref_fb_work;
+
+       struct mdp_irq vblank;
+       struct mdp_irq err;
+};
+#define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base)
+
+static struct mdp5_kms *get_kms(struct drm_crtc *crtc)
+{
+       struct msm_drm_private *priv = crtc->dev->dev_private;
+       return to_mdp5_kms(to_mdp_kms(priv->kms));
+}
+
+static void request_pending(struct drm_crtc *crtc, uint32_t pending)
+{
+       struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+
+       atomic_or(pending, &mdp5_crtc->pending);
+       mdp_irq_register(&get_kms(crtc)->base, &mdp5_crtc->vblank);
+}
+
+static void crtc_flush(struct drm_crtc *crtc)
+{
+       struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+       struct mdp5_kms *mdp5_kms = get_kms(crtc);
+       int id = mdp5_crtc->id;
+       uint32_t i, flush = 0;
+
+       for (i = 0; i < ARRAY_SIZE(mdp5_crtc->planes); i++) {
+               struct drm_plane *plane = mdp5_crtc->planes[i];
+               if (plane) {
+                       enum mdp5_pipe pipe = mdp5_plane_pipe(plane);
+                       flush |= pipe2flush(pipe);
+               }
+       }
+       flush |= mixer2flush(mdp5_crtc->id);
+       flush |= MDP5_CTL_FLUSH_CTL;
+
+       DBG("%s: flush=%08x", mdp5_crtc->name, flush);
+
+       mdp5_write(mdp5_kms, REG_MDP5_CTL_FLUSH(id), flush);
+}
+
+static void update_fb(struct drm_crtc *crtc, struct drm_framebuffer *new_fb)
+{
+       struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+       struct drm_framebuffer *old_fb = mdp5_crtc->fb;
+
+       /* grab reference to incoming scanout fb: */
+       drm_framebuffer_reference(new_fb);
+       mdp5_crtc->base.fb = new_fb;
+       mdp5_crtc->fb = new_fb;
+
+       if (old_fb)
+               drm_flip_work_queue(&mdp5_crtc->unref_fb_work, old_fb);
+}
+
+/* unlike update_fb(), take a ref to the new scanout fb *before* updating
+ * plane, then call this.  Needed to ensure we don't unref the buffer that
+ * is actually still being scanned out.
+ *
+ * Note that this whole thing goes away with atomic.. since we can defer
+ * calling into driver until rendering is done.
+ */
+static void update_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb)
+{
+       struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+
+       /* flush updates, to make sure hw is updated to new scanout fb,
+        * so that we can safely queue unref to current fb (ie. next
+        * vblank we know hw is done w/ previous scanout_fb).
+        */
+       crtc_flush(crtc);
+
+       if (mdp5_crtc->scanout_fb)
+               drm_flip_work_queue(&mdp5_crtc->unref_fb_work,
+                               mdp5_crtc->scanout_fb);
+
+       mdp5_crtc->scanout_fb = fb;
+
+       /* enable vblank to complete flip: */
+       request_pending(crtc, PENDING_FLIP);
+}
+
+/* if file!=NULL, this is preclose potential cancel-flip path */
+static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
+{
+       struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+       struct drm_device *dev = crtc->dev;
+       struct drm_pending_vblank_event *event;
+       unsigned long flags, i;
+
+       spin_lock_irqsave(&dev->event_lock, flags);
+       event = mdp5_crtc->event;
+       if (event) {
+               /* if regular vblank case (!file) or if cancel-flip from
+                * preclose on file that requested flip, then send the
+                * event:
+                */
+               if (!file || (event->base.file_priv == file)) {
+                       mdp5_crtc->event = NULL;
+                       drm_send_vblank_event(dev, mdp5_crtc->id, event);
+               }
+       }
+       spin_unlock_irqrestore(&dev->event_lock, flags);
+
+       for (i = 0; i < ARRAY_SIZE(mdp5_crtc->planes); i++) {
+               struct drm_plane *plane = mdp5_crtc->planes[i];
+               if (plane)
+                       mdp5_plane_complete_flip(plane);
+       }
+}
+
+static void pageflip_cb(struct msm_fence_cb *cb)
+{
+       struct mdp5_crtc *mdp5_crtc =
+               container_of(cb, struct mdp5_crtc, pageflip_cb);
+       struct drm_crtc *crtc = &mdp5_crtc->base;
+       struct drm_framebuffer *fb = mdp5_crtc->fb;
+
+       if (!fb)
+               return;
+
+       drm_framebuffer_reference(fb);
+       mdp5_plane_set_scanout(mdp5_crtc->plane, fb);
+       update_scanout(crtc, fb);
+}
+
+static void unref_fb_worker(struct drm_flip_work *work, void *val)
+{
+       struct mdp5_crtc *mdp5_crtc =
+               container_of(work, struct mdp5_crtc, unref_fb_work);
+       struct drm_device *dev = mdp5_crtc->base.dev;
+
+       mutex_lock(&dev->mode_config.mutex);
+       drm_framebuffer_unreference(val);
+       mutex_unlock(&dev->mode_config.mutex);
+}
+
+static void mdp5_crtc_destroy(struct drm_crtc *crtc)
+{
+       struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+
+       mdp5_crtc->plane->funcs->destroy(mdp5_crtc->plane);
+
+       drm_crtc_cleanup(crtc);
+       drm_flip_work_cleanup(&mdp5_crtc->unref_fb_work);
+
+       kfree(mdp5_crtc);
+}
+
+static void mdp5_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+       struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+       struct mdp5_kms *mdp5_kms = get_kms(crtc);
+       bool enabled = (mode == DRM_MODE_DPMS_ON);
+
+       DBG("%s: mode=%d", mdp5_crtc->name, mode);
+
+       if (enabled != mdp5_crtc->enabled) {
+               if (enabled) {
+                       mdp5_enable(mdp5_kms);
+                       mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err);
+               } else {
+                       mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err);
+                       mdp5_disable(mdp5_kms);
+               }
+               mdp5_crtc->enabled = enabled;
+       }
+}
+
+static bool mdp5_crtc_mode_fixup(struct drm_crtc *crtc,
+               const struct drm_display_mode *mode,
+               struct drm_display_mode *adjusted_mode)
+{
+       return true;
+}
+
+static void blend_setup(struct drm_crtc *crtc)
+{
+       struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+       struct mdp5_kms *mdp5_kms = get_kms(crtc);
+       int id = mdp5_crtc->id;
+
+       /*
+        * Hard-coded setup for now until I figure out how the
+        * layer-mixer works
+        */
+
+       /* LM[id]: */
+       mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(id),
+                       MDP5_LM_BLEND_COLOR_OUT_STAGE0_FG_ALPHA);
+       mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(id, 0),
+                       MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
+                       MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL) |
+                       MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA);
+       mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(id, 0), 0xff);
+       mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(id, 0), 0x00);
+
+       /* NOTE: seems that LM[n] and CTL[m], we do not need n==m.. but
+        * we want to be setting CTL[m].LAYER[n].  Not sure what the
+        * point of having CTL[m].LAYER[o] (for o!=n).. maybe that is
+        * used when chaining up mixers for high resolution displays?
+        */
+
+       /* CTL[id]: */
+       mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 0),
+                       MDP5_CTL_LAYER_REG_RGB0(STAGE0) |
+                       MDP5_CTL_LAYER_REG_BORDER_COLOR);
+       mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 1), 0);
+       mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 2), 0);
+       mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 3), 0);
+       mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 4), 0);
+}
+
+static int mdp5_crtc_mode_set(struct drm_crtc *crtc,
+               struct drm_display_mode *mode,
+               struct drm_display_mode *adjusted_mode,
+               int x, int y,
+               struct drm_framebuffer *old_fb)
+{
+       struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+       struct mdp5_kms *mdp5_kms = get_kms(crtc);
+       int ret;
+
+       mode = adjusted_mode;
+
+       DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
+                       mdp5_crtc->name, mode->base.id, mode->name,
+                       mode->vrefresh, mode->clock,
+                       mode->hdisplay, mode->hsync_start,
+                       mode->hsync_end, mode->htotal,
+                       mode->vdisplay, mode->vsync_start,
+                       mode->vsync_end, mode->vtotal,
+                       mode->type, mode->flags);
+
+       /* grab extra ref for update_scanout() */
+       drm_framebuffer_reference(crtc->fb);
+
+       ret = mdp5_plane_mode_set(mdp5_crtc->plane, crtc, crtc->fb,
+                       0, 0, mode->hdisplay, mode->vdisplay,
+                       x << 16, y << 16,
+                       mode->hdisplay << 16, mode->vdisplay << 16);
+       if (ret) {
+               dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n",
+                               mdp5_crtc->name, ret);
+               return ret;
+       }
+
+       mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(mdp5_crtc->id),
+                       MDP5_LM_OUT_SIZE_WIDTH(mode->hdisplay) |
+                       MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
+
+       update_fb(crtc, crtc->fb);
+       update_scanout(crtc, crtc->fb);
+
+       return 0;
+}
+
+static void mdp5_crtc_prepare(struct drm_crtc *crtc)
+{
+       struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+       DBG("%s", mdp5_crtc->name);
+       /* make sure we hold a ref to mdp clks while setting up mode: */
+       mdp5_enable(get_kms(crtc));
+       mdp5_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+}
+
+static void mdp5_crtc_commit(struct drm_crtc *crtc)
+{
+       mdp5_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+       crtc_flush(crtc);
+       /* drop the ref to mdp clk's that we got in prepare: */
+       mdp5_disable(get_kms(crtc));
+}
+
+static int mdp5_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+               struct drm_framebuffer *old_fb)
+{
+       struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+       struct drm_plane *plane = mdp5_crtc->plane;
+       struct drm_display_mode *mode = &crtc->mode;
+       int ret;
+
+       /* grab extra ref for update_scanout() */
+       drm_framebuffer_reference(crtc->fb);
+
+       ret = mdp5_plane_mode_set(plane, crtc, crtc->fb,
+                       0, 0, mode->hdisplay, mode->vdisplay,
+                       x << 16, y << 16,
+                       mode->hdisplay << 16, mode->vdisplay << 16);
+
+       update_fb(crtc, crtc->fb);
+       update_scanout(crtc, crtc->fb);
+
+       return ret;
+}
+
+static void mdp5_crtc_load_lut(struct drm_crtc *crtc)
+{
+}
+
+static int mdp5_crtc_page_flip(struct drm_crtc *crtc,
+               struct drm_framebuffer *new_fb,
+               struct drm_pending_vblank_event *event,
+               uint32_t page_flip_flags)
+{
+       struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+       struct drm_device *dev = crtc->dev;
+       struct drm_gem_object *obj;
+       unsigned long flags;
+
+       if (mdp5_crtc->event) {
+               dev_err(dev->dev, "already pending flip!\n");
+               return -EBUSY;
+       }
+
+       obj = msm_framebuffer_bo(new_fb, 0);
+
+       spin_lock_irqsave(&dev->event_lock, flags);
+       mdp5_crtc->event = event;
+       spin_unlock_irqrestore(&dev->event_lock, flags);
+
+       update_fb(crtc, new_fb);
+
+       return msm_gem_queue_inactive_cb(obj, &mdp5_crtc->pageflip_cb);
+}
+
+static int mdp5_crtc_set_property(struct drm_crtc *crtc,
+               struct drm_property *property, uint64_t val)
+{
+       // XXX
+       return -EINVAL;
+}
+
+static const struct drm_crtc_funcs mdp5_crtc_funcs = {
+       .set_config = drm_crtc_helper_set_config,
+       .destroy = mdp5_crtc_destroy,
+       .page_flip = mdp5_crtc_page_flip,
+       .set_property = mdp5_crtc_set_property,
+};
+
+static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
+       .dpms = mdp5_crtc_dpms,
+       .mode_fixup = mdp5_crtc_mode_fixup,
+       .mode_set = mdp5_crtc_mode_set,
+       .prepare = mdp5_crtc_prepare,
+       .commit = mdp5_crtc_commit,
+       .mode_set_base = mdp5_crtc_mode_set_base,
+       .load_lut = mdp5_crtc_load_lut,
+};
+
+static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
+{
+       struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, vblank);
+       struct drm_crtc *crtc = &mdp5_crtc->base;
+       struct msm_drm_private *priv = crtc->dev->dev_private;
+       unsigned pending;
+
+       mdp_irq_unregister(&get_kms(crtc)->base, &mdp5_crtc->vblank);
+
+       pending = atomic_xchg(&mdp5_crtc->pending, 0);
+
+       if (pending & PENDING_FLIP) {
+               complete_flip(crtc, NULL);
+               drm_flip_work_commit(&mdp5_crtc->unref_fb_work, priv->wq);
+       }
+}
+
+static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
+{
+       struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, err);
+       struct drm_crtc *crtc = &mdp5_crtc->base;
+       DBG("%s: error: %08x", mdp5_crtc->name, irqstatus);
+       crtc_flush(crtc);
+}
+
+uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc)
+{
+       struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+       return mdp5_crtc->vblank.irqmask;
+}
+
+void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file)
+{
+       DBG("cancel: %p", file);
+       complete_flip(crtc, file);
+}
+
+/* set interface for routing crtc->encoder: */
+void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
+               enum mdp5_intf intf_id)
+{
+       struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+       struct mdp5_kms *mdp5_kms = get_kms(crtc);
+       static const enum mdp5_intfnum intfnum[] = {
+                       INTF0, INTF1, INTF2, INTF3,
+       };
+       uint32_t intf_sel;
+
+       /* now that we know what irq's we want: */
+       mdp5_crtc->err.irqmask = intf2err(intf);
+       mdp5_crtc->vblank.irqmask = intf2vblank(intf);
+
+       /* when called from modeset_init(), skip the rest until later: */
+       if (!mdp5_kms)
+               return;
+
+       intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL);
+
+       switch (intf) {
+       case 0:
+               intf_sel &= ~MDP5_DISP_INTF_SEL_INTF0__MASK;
+               intf_sel |= MDP5_DISP_INTF_SEL_INTF0(intf_id);
+               break;
+       case 1:
+               intf_sel &= ~MDP5_DISP_INTF_SEL_INTF1__MASK;
+               intf_sel |= MDP5_DISP_INTF_SEL_INTF1(intf_id);
+               break;
+       case 2:
+               intf_sel &= ~MDP5_DISP_INTF_SEL_INTF2__MASK;
+               intf_sel |= MDP5_DISP_INTF_SEL_INTF2(intf_id);
+               break;
+       case 3:
+               intf_sel &= ~MDP5_DISP_INTF_SEL_INTF3__MASK;
+               intf_sel |= MDP5_DISP_INTF_SEL_INTF3(intf_id);
+               break;
+       default:
+               BUG();
+               break;
+       }
+
+       blend_setup(crtc);
+
+       DBG("%s: intf_sel=%08x", mdp5_crtc->name, intf_sel);
+
+       mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, intf_sel);
+       mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(mdp5_crtc->id),
+                       MDP5_CTL_OP_MODE(MODE_NONE) |
+                       MDP5_CTL_OP_INTF_NUM(intfnum[intf]));
+
+       crtc_flush(crtc);
+}
+
+static void set_attach(struct drm_crtc *crtc, enum mdp5_pipe pipe_id,
+               struct drm_plane *plane)
+{
+       struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+
+       BUG_ON(pipe_id >= ARRAY_SIZE(mdp5_crtc->planes));
+
+       if (mdp5_crtc->planes[pipe_id] == plane)
+               return;
+
+       mdp5_crtc->planes[pipe_id] = plane;
+       blend_setup(crtc);
+       if (mdp5_crtc->enabled && (plane != mdp5_crtc->plane))
+               crtc_flush(crtc);
+}
+
+void mdp5_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane)
+{
+       set_attach(crtc, mdp5_plane_pipe(plane), plane);
+}
+
+void mdp5_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane)
+{
+       set_attach(crtc, mdp5_plane_pipe(plane), NULL);
+}
+
+/* initialize crtc */
+struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
+               struct drm_plane *plane, int id)
+{
+       struct drm_crtc *crtc = NULL;
+       struct mdp5_crtc *mdp5_crtc;
+       int ret;
+
+       mdp5_crtc = kzalloc(sizeof(*mdp5_crtc), GFP_KERNEL);
+       if (!mdp5_crtc) {
+               ret = -ENOMEM;
+               goto fail;
+       }
+
+       crtc = &mdp5_crtc->base;
+
+       mdp5_crtc->plane = plane;
+       mdp5_crtc->id = id;
+
+       mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq;
+       mdp5_crtc->err.irq = mdp5_crtc_err_irq;
+
+       snprintf(mdp5_crtc->name, sizeof(mdp5_crtc->name), "%s:%d",
+                       pipe2name(mdp5_plane_pipe(plane)), id);
+
+       ret = drm_flip_work_init(&mdp5_crtc->unref_fb_work, 16,
+                       "unref fb", unref_fb_worker);
+       if (ret)
+               goto fail;
+
+       INIT_FENCE_CB(&mdp5_crtc->pageflip_cb, pageflip_cb);
+
+       drm_crtc_init(dev, crtc, &mdp5_crtc_funcs);
+       drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs);
+
+       mdp5_plane_install_properties(mdp5_crtc->plane, &crtc->base);
+
+       return crtc;
+
+fail:
+       if (crtc)
+               mdp5_crtc_destroy(crtc);
+
+       return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
new file mode 100644 (file)
index 0000000..edec7bf
--- /dev/null
@@ -0,0 +1,258 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "mdp5_kms.h"
+
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+
+struct mdp5_encoder {
+       struct drm_encoder base;
+       int intf;
+       enum mdp5_intf intf_id;
+       bool enabled;
+       uint32_t bsc;
+};
+#define to_mdp5_encoder(x) container_of(x, struct mdp5_encoder, base)
+
+static struct mdp5_kms *get_kms(struct drm_encoder *encoder)
+{
+       struct msm_drm_private *priv = encoder->dev->dev_private;
+       return to_mdp5_kms(to_mdp_kms(priv->kms));
+}
+
+#ifdef CONFIG_MSM_BUS_SCALING
+#include <mach/board.h>
+#include <mach/msm_bus.h>
+#include <mach/msm_bus_board.h>
+#define MDP_BUS_VECTOR_ENTRY(ab_val, ib_val)           \
+       {                                               \
+               .src = MSM_BUS_MASTER_MDP_PORT0,        \
+               .dst = MSM_BUS_SLAVE_EBI_CH0,           \
+               .ab = (ab_val),                         \
+               .ib = (ib_val),                         \
+       }
+
+static struct msm_bus_vectors mdp_bus_vectors[] = {
+       MDP_BUS_VECTOR_ENTRY(0, 0),
+       MDP_BUS_VECTOR_ENTRY(2000000000, 2000000000),
+};
+static struct msm_bus_paths mdp_bus_usecases[] = { {
+               .num_paths = 1,
+               .vectors = &mdp_bus_vectors[0],
+}, {
+               .num_paths = 1,
+               .vectors = &mdp_bus_vectors[1],
+} };
+static struct msm_bus_scale_pdata mdp_bus_scale_table = {
+       .usecase = mdp_bus_usecases,
+       .num_usecases = ARRAY_SIZE(mdp_bus_usecases),
+       .name = "mdss_mdp",
+};
+
+static void bs_init(struct mdp5_encoder *mdp5_encoder)
+{
+       mdp5_encoder->bsc = msm_bus_scale_register_client(
+                       &mdp_bus_scale_table);
+       DBG("bus scale client: %08x", mdp5_encoder->bsc);
+}
+
+static void bs_fini(struct mdp5_encoder *mdp5_encoder)
+{
+       if (mdp5_encoder->bsc) {
+               msm_bus_scale_unregister_client(mdp5_encoder->bsc);
+               mdp5_encoder->bsc = 0;
+       }
+}
+
+static void bs_set(struct mdp5_encoder *mdp5_encoder, int idx)
+{
+       if (mdp5_encoder->bsc) {
+               DBG("set bus scaling: %d", idx);
+               /* HACK: scaling down, and then immediately back up
+                * seems to leave things broken (underflow).. so
+                * never disable:
+                */
+               idx = 1;
+               msm_bus_scale_client_update_request(mdp5_encoder->bsc, idx);
+       }
+}
+#else
+static void bs_init(struct mdp5_encoder *mdp5_encoder) {}
+static void bs_fini(struct mdp5_encoder *mdp5_encoder) {}
+static void bs_set(struct mdp5_encoder *mdp5_encoder, int idx) {}
+#endif
+
+static void mdp5_encoder_destroy(struct drm_encoder *encoder)
+{
+       struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
+       bs_fini(mdp5_encoder);
+       drm_encoder_cleanup(encoder);
+       kfree(mdp5_encoder);
+}
+
+static const struct drm_encoder_funcs mdp5_encoder_funcs = {
+       .destroy = mdp5_encoder_destroy,
+};
+
+static void mdp5_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+       struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
+       struct mdp5_kms *mdp5_kms = get_kms(encoder);
+       int intf = mdp5_encoder->intf;
+       bool enabled = (mode == DRM_MODE_DPMS_ON);
+
+       DBG("mode=%d", mode);
+
+       if (enabled == mdp5_encoder->enabled)
+               return;
+
+       if (enabled) {
+               bs_set(mdp5_encoder, 1);
+               mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 1);
+       } else {
+               mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 0);
+               bs_set(mdp5_encoder, 0);
+       }
+
+       mdp5_encoder->enabled = enabled;
+}
+
+static bool mdp5_encoder_mode_fixup(struct drm_encoder *encoder,
+               const struct drm_display_mode *mode,
+               struct drm_display_mode *adjusted_mode)
+{
+       return true;
+}
+
+static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
+               struct drm_display_mode *mode,
+               struct drm_display_mode *adjusted_mode)
+{
+       struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
+       struct mdp5_kms *mdp5_kms = get_kms(encoder);
+       int intf = mdp5_encoder->intf;
+       uint32_t dtv_hsync_skew, vsync_period, vsync_len, ctrl_pol;
+       uint32_t display_v_start, display_v_end;
+       uint32_t hsync_start_x, hsync_end_x;
+       uint32_t format;
+
+       mode = adjusted_mode;
+
+       DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
+                       mode->base.id, mode->name,
+                       mode->vrefresh, mode->clock,
+                       mode->hdisplay, mode->hsync_start,
+                       mode->hsync_end, mode->htotal,
+                       mode->vdisplay, mode->vsync_start,
+                       mode->vsync_end, mode->vtotal,
+                       mode->type, mode->flags);
+
+       ctrl_pol = 0;
+       if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+               ctrl_pol |= MDP5_INTF_POLARITY_CTL_HSYNC_LOW;
+       if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+               ctrl_pol |= MDP5_INTF_POLARITY_CTL_VSYNC_LOW;
+       /* probably need to get DATA_EN polarity from panel.. */
+
+       dtv_hsync_skew = 0;  /* get this from panel? */
+       format = 0x213f;     /* get this from panel? */
+
+       hsync_start_x = (mode->htotal - mode->hsync_start);
+       hsync_end_x = mode->htotal - (mode->hsync_start - mode->hdisplay) - 1;
+
+       vsync_period = mode->vtotal * mode->htotal;
+       vsync_len = (mode->vsync_end - mode->vsync_start) * mode->htotal;
+       display_v_start = (mode->vtotal - mode->vsync_start) * mode->htotal + dtv_hsync_skew;
+       display_v_end = vsync_period - ((mode->vsync_start - mode->vdisplay) * mode->htotal) + dtv_hsync_skew - 1;
+
+       mdp5_write(mdp5_kms, REG_MDP5_INTF_HSYNC_CTL(intf),
+                       MDP5_INTF_HSYNC_CTL_PULSEW(mode->hsync_end - mode->hsync_start) |
+                       MDP5_INTF_HSYNC_CTL_PERIOD(mode->htotal));
+       mdp5_write(mdp5_kms, REG_MDP5_INTF_VSYNC_PERIOD_F0(intf), vsync_period);
+       mdp5_write(mdp5_kms, REG_MDP5_INTF_VSYNC_LEN_F0(intf), vsync_len);
+       mdp5_write(mdp5_kms, REG_MDP5_INTF_DISPLAY_HCTL(intf),
+                       MDP5_INTF_DISPLAY_HCTL_START(hsync_start_x) |
+                       MDP5_INTF_DISPLAY_HCTL_END(hsync_end_x));
+       mdp5_write(mdp5_kms, REG_MDP5_INTF_DISPLAY_VSTART_F0(intf), display_v_start);
+       mdp5_write(mdp5_kms, REG_MDP5_INTF_DISPLAY_VEND_F0(intf), display_v_end);
+       mdp5_write(mdp5_kms, REG_MDP5_INTF_BORDER_COLOR(intf), 0);
+       mdp5_write(mdp5_kms, REG_MDP5_INTF_UNDERFLOW_COLOR(intf), 0xff);
+       mdp5_write(mdp5_kms, REG_MDP5_INTF_HSYNC_SKEW(intf), dtv_hsync_skew);
+       mdp5_write(mdp5_kms, REG_MDP5_INTF_POLARITY_CTL(intf), ctrl_pol);
+       mdp5_write(mdp5_kms, REG_MDP5_INTF_ACTIVE_HCTL(intf),
+                       MDP5_INTF_ACTIVE_HCTL_START(0) |
+                       MDP5_INTF_ACTIVE_HCTL_END(0));
+       mdp5_write(mdp5_kms, REG_MDP5_INTF_ACTIVE_VSTART_F0(intf), 0);
+       mdp5_write(mdp5_kms, REG_MDP5_INTF_ACTIVE_VEND_F0(intf), 0);
+       mdp5_write(mdp5_kms, REG_MDP5_INTF_PANEL_FORMAT(intf), format);
+       mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(intf), 0x3);  /* frame+line? */
+}
+
+static void mdp5_encoder_prepare(struct drm_encoder *encoder)
+{
+       mdp5_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+static void mdp5_encoder_commit(struct drm_encoder *encoder)
+{
+       struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
+       mdp5_crtc_set_intf(encoder->crtc, mdp5_encoder->intf,
+                       mdp5_encoder->intf_id);
+       mdp5_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
+}
+
+static const struct drm_encoder_helper_funcs mdp5_encoder_helper_funcs = {
+       .dpms = mdp5_encoder_dpms,
+       .mode_fixup = mdp5_encoder_mode_fixup,
+       .mode_set = mdp5_encoder_mode_set,
+       .prepare = mdp5_encoder_prepare,
+       .commit = mdp5_encoder_commit,
+};
+
+/* initialize encoder */
+struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, int intf,
+               enum mdp5_intf intf_id)
+{
+       struct drm_encoder *encoder = NULL;
+       struct mdp5_encoder *mdp5_encoder;
+       int ret;
+
+       mdp5_encoder = kzalloc(sizeof(*mdp5_encoder), GFP_KERNEL);
+       if (!mdp5_encoder) {
+               ret = -ENOMEM;
+               goto fail;
+       }
+
+       mdp5_encoder->intf = intf;
+       mdp5_encoder->intf_id = intf_id;
+       encoder = &mdp5_encoder->base;
+
+       drm_encoder_init(dev, encoder, &mdp5_encoder_funcs,
+                        DRM_MODE_ENCODER_TMDS);
+       drm_encoder_helper_add(encoder, &mdp5_encoder_helper_funcs);
+
+       bs_init(mdp5_encoder);
+
+       return encoder;
+
+fail:
+       if (encoder)
+               mdp5_encoder_destroy(encoder);
+
+       return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
new file mode 100644 (file)
index 0000000..353d494
--- /dev/null
@@ -0,0 +1,111 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+#include "msm_drv.h"
+#include "mdp5_kms.h"
+
+void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask)
+{
+       mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_INTR_EN, irqmask);
+}
+
+static void mdp5_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus)
+{
+       DRM_ERROR("errors: %08x\n", irqstatus);
+}
+
+void mdp5_irq_preinstall(struct msm_kms *kms)
+{
+       struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+       mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, 0xffffffff);
+}
+
+int mdp5_irq_postinstall(struct msm_kms *kms)
+{
+       struct mdp_kms *mdp_kms = to_mdp_kms(kms);
+       struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms);
+       struct mdp_irq *error_handler = &mdp5_kms->error_handler;
+
+       error_handler->irq = mdp5_irq_error_handler;
+       error_handler->irqmask = MDP5_IRQ_INTF0_UNDER_RUN |
+                       MDP5_IRQ_INTF1_UNDER_RUN |
+                       MDP5_IRQ_INTF2_UNDER_RUN |
+                       MDP5_IRQ_INTF3_UNDER_RUN;
+
+       mdp_irq_register(mdp_kms, error_handler);
+
+       return 0;
+}
+
+void mdp5_irq_uninstall(struct msm_kms *kms)
+{
+       struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+       mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000);
+}
+
+static void mdp5_irq_mdp(struct mdp_kms *mdp_kms)
+{
+       struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms);
+       struct drm_device *dev = mdp5_kms->dev;
+       struct msm_drm_private *priv = dev->dev_private;
+       unsigned int id;
+       uint32_t status;
+
+       status = mdp5_read(mdp5_kms, REG_MDP5_INTR_STATUS);
+       mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, status);
+
+       VERB("status=%08x", status);
+
+       for (id = 0; id < priv->num_crtcs; id++)
+               if (status & mdp5_crtc_vblank(priv->crtcs[id]))
+                       drm_handle_vblank(dev, id);
+
+       mdp_dispatch_irqs(mdp_kms, status);
+}
+
+irqreturn_t mdp5_irq(struct msm_kms *kms)
+{
+       struct mdp_kms *mdp_kms = to_mdp_kms(kms);
+       struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms);
+       uint32_t intr;
+
+       intr = mdp5_read(mdp5_kms, REG_MDP5_HW_INTR_STATUS);
+
+       VERB("intr=%08x", intr);
+
+       if (intr & MDP5_HW_INTR_STATUS_INTR_MDP)
+               mdp5_irq_mdp(mdp_kms);
+
+       if (intr & MDP5_HW_INTR_STATUS_INTR_HDMI)
+               hdmi_irq(0, mdp5_kms->hdmi);
+
+       return IRQ_HANDLED;
+}
+
+int mdp5_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
+{
+       mdp_update_vblank_mask(to_mdp_kms(kms),
+                       mdp5_crtc_vblank(crtc), true);
+       return 0;
+}
+
+void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
+{
+       mdp_update_vblank_mask(to_mdp_kms(kms),
+                       mdp5_crtc_vblank(crtc), false);
+}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
new file mode 100644 (file)
index 0000000..ee8446c
--- /dev/null
@@ -0,0 +1,350 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+#include "msm_drv.h"
+#include "msm_mmu.h"
+#include "mdp5_kms.h"
+
+static struct mdp5_platform_config *mdp5_get_config(struct platform_device *dev);
+
+static int mdp5_hw_init(struct msm_kms *kms)
+{
+       struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+       struct drm_device *dev = mdp5_kms->dev;
+       uint32_t version, major, minor;
+       int ret = 0;
+
+       pm_runtime_get_sync(dev->dev);
+
+       mdp5_enable(mdp5_kms);
+       version = mdp5_read(mdp5_kms, REG_MDP5_MDP_VERSION);
+       mdp5_disable(mdp5_kms);
+
+       major = FIELD(version, MDP5_MDP_VERSION_MAJOR);
+       minor = FIELD(version, MDP5_MDP_VERSION_MINOR);
+
+       DBG("found MDP5 version v%d.%d", major, minor);
+
+       if ((major != 1) || ((minor != 0) && (minor != 2))) {
+               dev_err(dev->dev, "unexpected MDP version: v%d.%d\n",
+                               major, minor);
+               ret = -ENXIO;
+               goto out;
+       }
+
+       mdp5_kms->rev = minor;
+
+       /* Magic unknown register writes:
+        *
+        *    W VBIF:0x004 00000001      (mdss_mdp.c:839)
+        *    W MDP5:0x2e0 0xe9          (mdss_mdp.c:839)
+        *    W MDP5:0x2e4 0x55          (mdss_mdp.c:839)
+        *    W MDP5:0x3ac 0xc0000ccc    (mdss_mdp.c:839)
+        *    W MDP5:0x3b4 0xc0000ccc    (mdss_mdp.c:839)
+        *    W MDP5:0x3bc 0xcccccc      (mdss_mdp.c:839)
+        *    W MDP5:0x4a8 0xcccc0c0     (mdss_mdp.c:839)
+        *    W MDP5:0x4b0 0xccccc0c0    (mdss_mdp.c:839)
+        *    W MDP5:0x4b8 0xccccc000    (mdss_mdp.c:839)
+        *
+        * Downstream fbdev driver gets these register offsets/values
+        * from DT.. not really sure what these registers are or if
+        * different values for different boards/SoC's, etc.  I guess
+        * they are the golden registers.
+        *
+        * Not setting these does not seem to cause any problem.  But
+        * we may be getting lucky with the bootloader initializing
+        * them for us.  OTOH, if we can always count on the bootloader
+        * setting the golden registers, then perhaps we don't need to
+        * care.
+        */
+
+       mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, 0);
+       mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(0), 0);
+       mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(1), 0);
+       mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(2), 0);
+       mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(3), 0);
+
+out:
+       pm_runtime_put_sync(dev->dev);
+
+       return ret;
+}
+
+static long mdp5_round_pixclk(struct msm_kms *kms, unsigned long rate,
+               struct drm_encoder *encoder)
+{
+       return rate;
+}
+
+static void mdp5_preclose(struct msm_kms *kms, struct drm_file *file)
+{
+       struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+       struct msm_drm_private *priv = mdp5_kms->dev->dev_private;
+       unsigned i;
+
+       for (i = 0; i < priv->num_crtcs; i++)
+               mdp5_crtc_cancel_pending_flip(priv->crtcs[i], file);
+}
+
+static void mdp5_destroy(struct msm_kms *kms)
+{
+       struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+       kfree(mdp5_kms);
+}
+
+static const struct mdp_kms_funcs kms_funcs = {
+       .base = {
+               .hw_init         = mdp5_hw_init,
+               .irq_preinstall  = mdp5_irq_preinstall,
+               .irq_postinstall = mdp5_irq_postinstall,
+               .irq_uninstall   = mdp5_irq_uninstall,
+               .irq             = mdp5_irq,
+               .enable_vblank   = mdp5_enable_vblank,
+               .disable_vblank  = mdp5_disable_vblank,
+               .get_format      = mdp_get_format,
+               .round_pixclk    = mdp5_round_pixclk,
+               .preclose        = mdp5_preclose,
+               .destroy         = mdp5_destroy,
+       },
+       .set_irqmask         = mdp5_set_irqmask,
+};
+
+int mdp5_disable(struct mdp5_kms *mdp5_kms)
+{
+       DBG("");
+
+       clk_disable_unprepare(mdp5_kms->ahb_clk);
+       clk_disable_unprepare(mdp5_kms->axi_clk);
+       clk_disable_unprepare(mdp5_kms->core_clk);
+       clk_disable_unprepare(mdp5_kms->lut_clk);
+
+       return 0;
+}
+
+int mdp5_enable(struct mdp5_kms *mdp5_kms)
+{
+       DBG("");
+
+       clk_prepare_enable(mdp5_kms->ahb_clk);
+       clk_prepare_enable(mdp5_kms->axi_clk);
+       clk_prepare_enable(mdp5_kms->core_clk);
+       clk_prepare_enable(mdp5_kms->lut_clk);
+
+       return 0;
+}
+
+static int modeset_init(struct mdp5_kms *mdp5_kms)
+{
+       static const enum mdp5_pipe crtcs[] = {
+                       SSPP_RGB0, SSPP_RGB1, SSPP_RGB2,
+       };
+       struct drm_device *dev = mdp5_kms->dev;
+       struct msm_drm_private *priv = dev->dev_private;
+       struct drm_encoder *encoder;
+       int i, ret;
+
+       /* construct CRTCs: */
+       for (i = 0; i < ARRAY_SIZE(crtcs); i++) {
+               struct drm_plane *plane;
+               struct drm_crtc *crtc;
+
+               plane = mdp5_plane_init(dev, crtcs[i], true);
+               if (IS_ERR(plane)) {
+                       ret = PTR_ERR(plane);
+                       dev_err(dev->dev, "failed to construct plane for %s (%d)\n",
+                                       pipe2name(crtcs[i]), ret);
+                       goto fail;
+               }
+
+               crtc  = mdp5_crtc_init(dev, plane, i);
+               if (IS_ERR(crtc)) {
+                       ret = PTR_ERR(crtc);
+                       dev_err(dev->dev, "failed to construct crtc for %s (%d)\n",
+                                       pipe2name(crtcs[i]), ret);
+                       goto fail;
+               }
+               priv->crtcs[priv->num_crtcs++] = crtc;
+       }
+
+       /* Construct encoder for HDMI: */
+       encoder = mdp5_encoder_init(dev, 3, INTF_HDMI);
+       if (IS_ERR(encoder)) {
+               dev_err(dev->dev, "failed to construct encoder\n");
+               ret = PTR_ERR(encoder);
+               goto fail;
+       }
+
+       /* NOTE: the vsync and error irq's are actually associated with
+        * the INTF/encoder.. the easiest way to deal with this (ie. what
+        * we do now) is assume a fixed relationship between crtc's and
+        * encoders.  I'm not sure if there is ever a need to more freely
+        * assign crtcs to encoders, but if there is then we need to take
+        * care of error and vblank irq's that the crtc has registered,
+        * and also update user-requested vblank_mask.
+        */
+       encoder->possible_crtcs = BIT(0);
+       mdp5_crtc_set_intf(priv->crtcs[0], 3, INTF_HDMI);
+
+       priv->encoders[priv->num_encoders++] = encoder;
+
+       /* Construct bridge/connector for HDMI: */
+       mdp5_kms->hdmi = hdmi_init(dev, encoder);
+       if (IS_ERR(mdp5_kms->hdmi)) {
+               ret = PTR_ERR(mdp5_kms->hdmi);
+               dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret);
+               goto fail;
+       }
+
+       return 0;
+
+fail:
+       return ret;
+}
+
+static const char *iommu_ports[] = {
+               "mdp_0",
+};
+
+static int get_clk(struct platform_device *pdev, struct clk **clkp,
+               const char *name)
+{
+       struct device *dev = &pdev->dev;
+       struct clk *clk = devm_clk_get(dev, name);
+       if (IS_ERR(clk)) {
+               dev_err(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk));
+               return PTR_ERR(clk);
+       }
+       *clkp = clk;
+       return 0;
+}
+
+struct msm_kms *mdp5_kms_init(struct drm_device *dev)
+{
+       struct platform_device *pdev = dev->platformdev;
+       struct mdp5_platform_config *config = mdp5_get_config(pdev);
+       struct mdp5_kms *mdp5_kms;
+       struct msm_kms *kms = NULL;
+       struct msm_mmu *mmu;
+       int ret;
+
+       mdp5_kms = kzalloc(sizeof(*mdp5_kms), GFP_KERNEL);
+       if (!mdp5_kms) {
+               dev_err(dev->dev, "failed to allocate kms\n");
+               ret = -ENOMEM;
+               goto fail;
+       }
+
+       mdp_kms_init(&mdp5_kms->base, &kms_funcs);
+
+       kms = &mdp5_kms->base.base;
+
+       mdp5_kms->dev = dev;
+       mdp5_kms->smp_blk_cnt = config->smp_blk_cnt;
+
+       mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys", "MDP5");
+       if (IS_ERR(mdp5_kms->mmio)) {
+               ret = PTR_ERR(mdp5_kms->mmio);
+               goto fail;
+       }
+
+       mdp5_kms->vbif = msm_ioremap(pdev, "vbif_phys", "VBIF");
+       if (IS_ERR(mdp5_kms->vbif)) {
+               ret = PTR_ERR(mdp5_kms->vbif);
+               goto fail;
+       }
+
+       mdp5_kms->vdd = devm_regulator_get(&pdev->dev, "vdd");
+       if (IS_ERR(mdp5_kms->vdd)) {
+               ret = PTR_ERR(mdp5_kms->vdd);
+               goto fail;
+       }
+
+       ret = regulator_enable(mdp5_kms->vdd);
+       if (ret) {
+               dev_err(dev->dev, "failed to enable regulator vdd: %d\n", ret);
+               goto fail;
+       }
+
+       ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus_clk") ||
+                       get_clk(pdev, &mdp5_kms->ahb_clk, "iface_clk") ||
+                       get_clk(pdev, &mdp5_kms->src_clk, "core_clk_src") ||
+                       get_clk(pdev, &mdp5_kms->core_clk, "core_clk") ||
+                       get_clk(pdev, &mdp5_kms->lut_clk, "lut_clk") ||
+                       get_clk(pdev, &mdp5_kms->vsync_clk, "vsync_clk");
+       if (ret)
+               goto fail;
+
+       ret = clk_set_rate(mdp5_kms->src_clk, config->max_clk);
+
+       /* make sure things are off before attaching iommu (bootloader could
+        * have left things on, in which case we'll start getting faults if
+        * we don't disable):
+        */
+       mdp5_enable(mdp5_kms);
+       mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(0), 0);
+       mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(1), 0);
+       mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(2), 0);
+       mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(3), 0);
+       mdp5_disable(mdp5_kms);
+       mdelay(16);
+
+       if (config->iommu) {
+               mmu = msm_iommu_new(dev, config->iommu);
+               if (IS_ERR(mmu)) {
+                       ret = PTR_ERR(mmu);
+                       goto fail;
+               }
+               ret = mmu->funcs->attach(mmu, iommu_ports,
+                               ARRAY_SIZE(iommu_ports));
+               if (ret)
+                       goto fail;
+       } else {
+               dev_info(dev->dev, "no iommu, fallback to phys "
+                               "contig buffers for scanout\n");
+               mmu = NULL;
+       }
+
+       mdp5_kms->id = msm_register_mmu(dev, mmu);
+       if (mdp5_kms->id < 0) {
+               ret = mdp5_kms->id;
+               dev_err(dev->dev, "failed to register mdp5 iommu: %d\n", ret);
+               goto fail;
+       }
+
+       ret = modeset_init(mdp5_kms);
+       if (ret) {
+               dev_err(dev->dev, "modeset_init failed: %d\n", ret);
+               goto fail;
+       }
+
+       return kms;
+
+fail:
+       if (kms)
+               mdp5_destroy(kms);
+       return ERR_PTR(ret);
+}
+
+static struct mdp5_platform_config *mdp5_get_config(struct platform_device *dev)
+{
+       static struct mdp5_platform_config config = {};
+#ifdef CONFIG_OF
+       /* TODO */
+#endif
+       return &config;
+}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
new file mode 100644 (file)
index 0000000..c8b1a25
--- /dev/null
@@ -0,0 +1,213 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __MDP5_KMS_H__
+#define __MDP5_KMS_H__
+
+#include "msm_drv.h"
+#include "msm_kms.h"
+#include "mdp/mdp_kms.h"
+#include "mdp5.xml.h"
+#include "mdp5_smp.h"
+
+struct mdp5_kms {
+       struct mdp_kms base;
+
+       struct drm_device *dev;
+
+       int rev;
+
+       /* mapper-id used to request GEM buffer mapped for scanout: */
+       int id;
+
+       /* for tracking smp allocation amongst pipes: */
+       mdp5_smp_state_t smp_state;
+       struct mdp5_client_smp_state smp_client_state[CID_MAX];
+       int smp_blk_cnt;
+
+       /* io/register spaces: */
+       void __iomem *mmio, *vbif;
+
+       struct regulator *vdd;
+
+       struct clk *axi_clk;
+       struct clk *ahb_clk;
+       struct clk *src_clk;
+       struct clk *core_clk;
+       struct clk *lut_clk;
+       struct clk *vsync_clk;
+
+       struct hdmi *hdmi;
+
+       struct mdp_irq error_handler;
+};
+#define to_mdp5_kms(x) container_of(x, struct mdp5_kms, base)
+
+/* platform config data (ie. from DT, or pdata) */
+struct mdp5_platform_config {
+       struct iommu_domain *iommu;
+       uint32_t max_clk;
+       int smp_blk_cnt;
+};
+
+static inline void mdp5_write(struct mdp5_kms *mdp5_kms, u32 reg, u32 data)
+{
+       msm_writel(data, mdp5_kms->mmio + reg);
+}
+
+static inline u32 mdp5_read(struct mdp5_kms *mdp5_kms, u32 reg)
+{
+       return msm_readl(mdp5_kms->mmio + reg);
+}
+
+static inline const char *pipe2name(enum mdp5_pipe pipe)
+{
+       static const char *names[] = {
+#define NAME(n) [SSPP_ ## n] = #n
+               NAME(VIG0), NAME(VIG1), NAME(VIG2),
+               NAME(RGB0), NAME(RGB1), NAME(RGB2),
+               NAME(DMA0), NAME(DMA1),
+#undef NAME
+       };
+       return names[pipe];
+}
+
+static inline uint32_t pipe2flush(enum mdp5_pipe pipe)
+{
+       switch (pipe) {
+       case SSPP_VIG0: return MDP5_CTL_FLUSH_VIG0;
+       case SSPP_VIG1: return MDP5_CTL_FLUSH_VIG1;
+       case SSPP_VIG2: return MDP5_CTL_FLUSH_VIG2;
+       case SSPP_RGB0: return MDP5_CTL_FLUSH_RGB0;
+       case SSPP_RGB1: return MDP5_CTL_FLUSH_RGB1;
+       case SSPP_RGB2: return MDP5_CTL_FLUSH_RGB2;
+       case SSPP_DMA0: return MDP5_CTL_FLUSH_DMA0;
+       case SSPP_DMA1: return MDP5_CTL_FLUSH_DMA1;
+       default:        return 0;
+       }
+}
+
+static inline int pipe2nclients(enum mdp5_pipe pipe)
+{
+       switch (pipe) {
+       case SSPP_RGB0:
+       case SSPP_RGB1:
+       case SSPP_RGB2:
+               return 1;
+       default:
+               return 3;
+       }
+}
+
+static inline enum mdp5_client_id pipe2client(enum mdp5_pipe pipe, int plane)
+{
+       WARN_ON(plane >= pipe2nclients(pipe));
+       switch (pipe) {
+       case SSPP_VIG0: return CID_VIG0_Y + plane;
+       case SSPP_VIG1: return CID_VIG1_Y + plane;
+       case SSPP_VIG2: return CID_VIG2_Y + plane;
+       case SSPP_RGB0: return CID_RGB0;
+       case SSPP_RGB1: return CID_RGB1;
+       case SSPP_RGB2: return CID_RGB2;
+       case SSPP_DMA0: return CID_DMA0_Y + plane;
+       case SSPP_DMA1: return CID_DMA1_Y + plane;
+       default:        return CID_UNUSED;
+       }
+}
+
+static inline uint32_t mixer2flush(int lm)
+{
+       switch (lm) {
+       case 0:  return MDP5_CTL_FLUSH_LM0;
+       case 1:  return MDP5_CTL_FLUSH_LM1;
+       case 2:  return MDP5_CTL_FLUSH_LM2;
+       default: return 0;
+       }
+}
+
+static inline uint32_t intf2err(int intf)
+{
+       switch (intf) {
+       case 0:  return MDP5_IRQ_INTF0_UNDER_RUN;
+       case 1:  return MDP5_IRQ_INTF1_UNDER_RUN;
+       case 2:  return MDP5_IRQ_INTF2_UNDER_RUN;
+       case 3:  return MDP5_IRQ_INTF3_UNDER_RUN;
+       default: return 0;
+       }
+}
+
+static inline uint32_t intf2vblank(int intf)
+{
+       switch (intf) {
+       case 0:  return MDP5_IRQ_INTF0_VSYNC;
+       case 1:  return MDP5_IRQ_INTF1_VSYNC;
+       case 2:  return MDP5_IRQ_INTF2_VSYNC;
+       case 3:  return MDP5_IRQ_INTF3_VSYNC;
+       default: return 0;
+       }
+}
+
+int mdp5_disable(struct mdp5_kms *mdp5_kms);
+int mdp5_enable(struct mdp5_kms *mdp5_kms);
+
+void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask);
+void mdp5_irq_preinstall(struct msm_kms *kms);
+int mdp5_irq_postinstall(struct msm_kms *kms);
+void mdp5_irq_uninstall(struct msm_kms *kms);
+irqreturn_t mdp5_irq(struct msm_kms *kms);
+int mdp5_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
+void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
+
+static inline
+uint32_t mdp5_get_formats(enum mdp5_pipe pipe, uint32_t *pixel_formats,
+               uint32_t max_formats)
+{
+       /* TODO when we have YUV, we need to filter supported formats
+        * based on pipe id..
+        */
+       return mdp_get_formats(pixel_formats, max_formats);
+}
+
+void mdp5_plane_install_properties(struct drm_plane *plane,
+               struct drm_mode_object *obj);
+void mdp5_plane_set_scanout(struct drm_plane *plane,
+               struct drm_framebuffer *fb);
+int mdp5_plane_mode_set(struct drm_plane *plane,
+               struct drm_crtc *crtc, struct drm_framebuffer *fb,
+               int crtc_x, int crtc_y,
+               unsigned int crtc_w, unsigned int crtc_h,
+               uint32_t src_x, uint32_t src_y,
+               uint32_t src_w, uint32_t src_h);
+void mdp5_plane_complete_flip(struct drm_plane *plane);
+enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane);
+struct drm_plane *mdp5_plane_init(struct drm_device *dev,
+               enum mdp5_pipe pipe, bool private_plane);
+
+uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc);
+
+void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file);
+void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
+               enum mdp5_intf intf_id);
+void mdp5_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane);
+void mdp5_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane);
+struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
+               struct drm_plane *plane, int id);
+
+struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, int intf,
+               enum mdp5_intf intf_id);
+
+#endif /* __MDP5_KMS_H__ */
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
new file mode 100644 (file)
index 0000000..0ac8bb5
--- /dev/null
@@ -0,0 +1,389 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "mdp5_kms.h"
+
+
+struct mdp5_plane {
+       struct drm_plane base;
+       const char *name;
+
+       enum mdp5_pipe pipe;
+
+       uint32_t nformats;
+       uint32_t formats[32];
+
+       bool enabled;
+};
+#define to_mdp5_plane(x) container_of(x, struct mdp5_plane, base)
+
+static struct mdp5_kms *get_kms(struct drm_plane *plane)
+{
+       struct msm_drm_private *priv = plane->dev->dev_private;
+       return to_mdp5_kms(to_mdp_kms(priv->kms));
+}
+
+static int mdp5_plane_update(struct drm_plane *plane,
+               struct drm_crtc *crtc, struct drm_framebuffer *fb,
+               int crtc_x, int crtc_y,
+               unsigned int crtc_w, unsigned int crtc_h,
+               uint32_t src_x, uint32_t src_y,
+               uint32_t src_w, uint32_t src_h)
+{
+       struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
+
+       mdp5_plane->enabled = true;
+
+       if (plane->fb)
+               drm_framebuffer_unreference(plane->fb);
+
+       drm_framebuffer_reference(fb);
+
+       return mdp5_plane_mode_set(plane, crtc, fb,
+                       crtc_x, crtc_y, crtc_w, crtc_h,
+                       src_x, src_y, src_w, src_h);
+}
+
+static int mdp5_plane_disable(struct drm_plane *plane)
+{
+       struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
+       struct mdp5_kms *mdp5_kms = get_kms(plane);
+       enum mdp5_pipe pipe = mdp5_plane->pipe;
+       int i;
+
+       DBG("%s: disable", mdp5_plane->name);
+
+       /* update our SMP request to zero (release all our blks): */
+       for (i = 0; i < pipe2nclients(pipe); i++)
+               mdp5_smp_request(mdp5_kms, pipe2client(pipe, i), 0);
+
+       /* TODO detaching now will cause us not to get the last
+        * vblank and mdp5_smp_commit().. so other planes will
+        * still see smp blocks previously allocated to us as
+        * in-use..
+        */
+       if (plane->crtc)
+               mdp5_crtc_detach(plane->crtc, plane);
+
+       return 0;
+}
+
+static void mdp5_plane_destroy(struct drm_plane *plane)
+{
+       struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
+
+       mdp5_plane_disable(plane);
+       drm_plane_cleanup(plane);
+
+       kfree(mdp5_plane);
+}
+
+/* helper to install properties which are common to planes and crtcs */
+void mdp5_plane_install_properties(struct drm_plane *plane,
+               struct drm_mode_object *obj)
+{
+       // XXX
+}
+
+int mdp5_plane_set_property(struct drm_plane *plane,
+               struct drm_property *property, uint64_t val)
+{
+       // XXX
+       return -EINVAL;
+}
+
+static const struct drm_plane_funcs mdp5_plane_funcs = {
+               .update_plane = mdp5_plane_update,
+               .disable_plane = mdp5_plane_disable,
+               .destroy = mdp5_plane_destroy,
+               .set_property = mdp5_plane_set_property,
+};
+
+void mdp5_plane_set_scanout(struct drm_plane *plane,
+               struct drm_framebuffer *fb)
+{
+       struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
+       struct mdp5_kms *mdp5_kms = get_kms(plane);
+       enum mdp5_pipe pipe = mdp5_plane->pipe;
+       uint32_t nplanes = drm_format_num_planes(fb->pixel_format);
+       uint32_t iova[4];
+       int i;
+
+       for (i = 0; i < nplanes; i++) {
+               struct drm_gem_object *bo = msm_framebuffer_bo(fb, i);
+               msm_gem_get_iova(bo, mdp5_kms->id, &iova[i]);
+       }
+       for (; i < 4; i++)
+               iova[i] = 0;
+
+       mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_A(pipe),
+                       MDP5_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) |
+                       MDP5_PIPE_SRC_STRIDE_A_P1(fb->pitches[1]));
+
+       mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_B(pipe),
+                       MDP5_PIPE_SRC_STRIDE_B_P2(fb->pitches[2]) |
+                       MDP5_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
+
+       mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC0_ADDR(pipe), iova[0]);
+       mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC1_ADDR(pipe), iova[1]);
+       mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC2_ADDR(pipe), iova[2]);
+       mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe), iova[3]);
+
+       plane->fb = fb;
+}
+
+/* NOTE: looks like if horizontal decimation is used (if we supported that)
+ * then the width used to calculate SMP block requirements is the post-
+ * decimated width.  Ie. SMP buffering sits downstream of decimation (which
+ * presumably happens during the dma from scanout buffer).
+ */
+static int request_smp_blocks(struct drm_plane *plane, uint32_t format,
+               uint32_t nplanes, uint32_t width)
+{
+       struct drm_device *dev = plane->dev;
+       struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
+       struct mdp5_kms *mdp5_kms = get_kms(plane);
+       enum mdp5_pipe pipe = mdp5_plane->pipe;
+       int i, hsub, nlines, nblks, ret;
+
+       hsub = drm_format_horz_chroma_subsampling(format);
+
+       /* different if BWC (compressed framebuffer?) enabled: */
+       nlines = 2;
+
+       for (i = 0, nblks = 0; i < nplanes; i++) {
+               int n, fetch_stride, cpp;
+
+               cpp = drm_format_plane_cpp(format, i);
+               fetch_stride = width * cpp / (i ? hsub : 1);
+
+               n = DIV_ROUND_UP(fetch_stride * nlines, SMP_BLK_SIZE);
+
+               /* for hw rev v1.00 */
+               if (mdp5_kms->rev == 0)
+                       n = roundup_pow_of_two(n);
+
+               DBG("%s[%d]: request %d SMP blocks", mdp5_plane->name, i, n);
+               ret = mdp5_smp_request(mdp5_kms, pipe2client(pipe, i), n);
+               if (ret) {
+                       dev_err(dev->dev, "Could not allocate %d SMP blocks: %d\n",
+                                       n, ret);
+                       return ret;
+               }
+
+               nblks += n;
+       }
+
+       /* in success case, return total # of blocks allocated: */
+       return nblks;
+}
+
+static void set_fifo_thresholds(struct drm_plane *plane, int nblks)
+{
+       struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
+       struct mdp5_kms *mdp5_kms = get_kms(plane);
+       enum mdp5_pipe pipe = mdp5_plane->pipe;
+       uint32_t val;
+
+       /* 1/4 of SMP pool that is being fetched */
+       val = (nblks * SMP_ENTRIES_PER_BLK) / 4;
+
+       mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(pipe), val * 1);
+       mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(pipe), val * 2);
+       mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(pipe), val * 3);
+
+}
+
+int mdp5_plane_mode_set(struct drm_plane *plane,
+               struct drm_crtc *crtc, struct drm_framebuffer *fb,
+               int crtc_x, int crtc_y,
+               unsigned int crtc_w, unsigned int crtc_h,
+               uint32_t src_x, uint32_t src_y,
+               uint32_t src_w, uint32_t src_h)
+{
+       struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
+       struct mdp5_kms *mdp5_kms = get_kms(plane);
+       enum mdp5_pipe pipe = mdp5_plane->pipe;
+       const struct mdp_format *format;
+       uint32_t nplanes, config = 0;
+       uint32_t phasex_step = 0, phasey_step = 0;
+       uint32_t hdecm = 0, vdecm = 0;
+       int i, nblks;
+
+       nplanes = drm_format_num_planes(fb->pixel_format);
+
+       /* bad formats should already be rejected: */
+       if (WARN_ON(nplanes > pipe2nclients(pipe)))
+               return -EINVAL;
+
+       /* src values are in Q16 fixed point, convert to integer: */
+       src_x = src_x >> 16;
+       src_y = src_y >> 16;
+       src_w = src_w >> 16;
+       src_h = src_h >> 16;
+
+       DBG("%s: FB[%u] %u,%u,%u,%u -> CRTC[%u] %d,%d,%u,%u", mdp5_plane->name,
+                       fb->base.id, src_x, src_y, src_w, src_h,
+                       crtc->base.id, crtc_x, crtc_y, crtc_w, crtc_h);
+
+       /*
+        * Calculate and request required # of smp blocks:
+        */
+       nblks = request_smp_blocks(plane, fb->pixel_format, nplanes, src_w);
+       if (nblks < 0)
+               return nblks;
+
+       /*
+        * Currently we update the hw for allocations/requests immediately,
+        * but once atomic modeset/pageflip is in place, the allocation
+        * would move into atomic->check_plane_state(), while updating the
+        * hw would remain here:
+        */
+       for (i = 0; i < pipe2nclients(pipe); i++)
+               mdp5_smp_configure(mdp5_kms, pipe2client(pipe, i));
+
+       if (src_w != crtc_w) {
+               config |= MDP5_PIPE_SCALE_CONFIG_SCALEX_EN;
+               /* TODO calc phasex_step, hdecm */
+       }
+
+       if (src_h != crtc_h) {
+               config |= MDP5_PIPE_SCALE_CONFIG_SCALEY_EN;
+               /* TODO calc phasey_step, vdecm */
+       }
+
+       mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_IMG_SIZE(pipe),
+                       MDP5_PIPE_SRC_IMG_SIZE_WIDTH(src_w) |
+                       MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(src_h));
+
+       mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_SIZE(pipe),
+                       MDP5_PIPE_SRC_SIZE_WIDTH(src_w) |
+                       MDP5_PIPE_SRC_SIZE_HEIGHT(src_h));
+
+       mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_XY(pipe),
+                       MDP5_PIPE_SRC_XY_X(src_x) |
+                       MDP5_PIPE_SRC_XY_Y(src_y));
+
+       mdp5_write(mdp5_kms, REG_MDP5_PIPE_OUT_SIZE(pipe),
+                       MDP5_PIPE_OUT_SIZE_WIDTH(crtc_w) |
+                       MDP5_PIPE_OUT_SIZE_HEIGHT(crtc_h));
+
+       mdp5_write(mdp5_kms, REG_MDP5_PIPE_OUT_XY(pipe),
+                       MDP5_PIPE_OUT_XY_X(crtc_x) |
+                       MDP5_PIPE_OUT_XY_Y(crtc_y));
+
+       mdp5_plane_set_scanout(plane, fb);
+
+       format = to_mdp_format(msm_framebuffer_format(fb));
+
+       mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_FORMAT(pipe),
+                       MDP5_PIPE_SRC_FORMAT_A_BPC(format->bpc_a) |
+                       MDP5_PIPE_SRC_FORMAT_R_BPC(format->bpc_r) |
+                       MDP5_PIPE_SRC_FORMAT_G_BPC(format->bpc_g) |
+                       MDP5_PIPE_SRC_FORMAT_B_BPC(format->bpc_b) |
+                       COND(format->alpha_enable, MDP5_PIPE_SRC_FORMAT_ALPHA_ENABLE) |
+                       MDP5_PIPE_SRC_FORMAT_CPP(format->cpp - 1) |
+                       MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT(format->unpack_count - 1) |
+                       COND(format->unpack_tight, MDP5_PIPE_SRC_FORMAT_UNPACK_TIGHT) |
+                       MDP5_PIPE_SRC_FORMAT_NUM_PLANES(nplanes - 1) |
+                       MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP(CHROMA_RGB));
+
+       mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_UNPACK(pipe),
+                       MDP5_PIPE_SRC_UNPACK_ELEM0(format->unpack[0]) |
+                       MDP5_PIPE_SRC_UNPACK_ELEM1(format->unpack[1]) |
+                       MDP5_PIPE_SRC_UNPACK_ELEM2(format->unpack[2]) |
+                       MDP5_PIPE_SRC_UNPACK_ELEM3(format->unpack[3]));
+
+       mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_OP_MODE(pipe),
+                       MDP5_PIPE_SRC_OP_MODE_BWC(BWC_LOSSLESS));
+
+       /* not using secure mode: */
+       mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_ADDR_SW_STATUS(pipe), 0);
+
+       mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_X(pipe), phasex_step);
+       mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(pipe), phasey_step);
+       mdp5_write(mdp5_kms, REG_MDP5_PIPE_DECIMATION(pipe),
+                       MDP5_PIPE_DECIMATION_VERT(vdecm) |
+                       MDP5_PIPE_DECIMATION_HORZ(hdecm));
+       mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CONFIG(pipe),
+                       MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER(SCALE_FILTER_NEAREST) |
+                       MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER(SCALE_FILTER_NEAREST) |
+                       MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER(SCALE_FILTER_NEAREST) |
+                       MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER(SCALE_FILTER_NEAREST) |
+                       MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER(SCALE_FILTER_NEAREST) |
+                       MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER(SCALE_FILTER_NEAREST));
+
+       set_fifo_thresholds(plane, nblks);
+
+       /* TODO detach from old crtc (if we had more than one) */
+       mdp5_crtc_attach(crtc, plane);
+
+       return 0;
+}
+
+void mdp5_plane_complete_flip(struct drm_plane *plane)
+{
+       struct mdp5_kms *mdp5_kms = get_kms(plane);
+       enum mdp5_pipe pipe = to_mdp5_plane(plane)->pipe;
+       int i;
+
+       for (i = 0; i < pipe2nclients(pipe); i++)
+               mdp5_smp_commit(mdp5_kms, pipe2client(pipe, i));
+}
+
+enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane)
+{
+       struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
+       return mdp5_plane->pipe;
+}
+
+/* initialize plane */
+struct drm_plane *mdp5_plane_init(struct drm_device *dev,
+               enum mdp5_pipe pipe, bool private_plane)
+{
+       struct drm_plane *plane = NULL;
+       struct mdp5_plane *mdp5_plane;
+       int ret;
+
+       mdp5_plane = kzalloc(sizeof(*mdp5_plane), GFP_KERNEL);
+       if (!mdp5_plane) {
+               ret = -ENOMEM;
+               goto fail;
+       }
+
+       plane = &mdp5_plane->base;
+
+       mdp5_plane->pipe = pipe;
+       mdp5_plane->name = pipe2name(pipe);
+
+       mdp5_plane->nformats = mdp5_get_formats(pipe, mdp5_plane->formats,
+                       ARRAY_SIZE(mdp5_plane->formats));
+
+       drm_plane_init(dev, plane, 0xff, &mdp5_plane_funcs,
+                       mdp5_plane->formats, mdp5_plane->nformats,
+                       private_plane);
+
+       mdp5_plane_install_properties(plane, &plane->base);
+
+       return plane;
+
+fail:
+       if (plane)
+               mdp5_plane_destroy(plane);
+
+       return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
new file mode 100644 (file)
index 0000000..2d0236b
--- /dev/null
@@ -0,0 +1,173 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+#include "mdp5_kms.h"
+#include "mdp5_smp.h"
+
+
+/* SMP - Shared Memory Pool
+ *
+ * These are shared between all the clients, where each plane in a
+ * scanout buffer is a SMP client.  Ie. scanout of 3 plane I420 on
+ * pipe VIG0 => 3 clients: VIG0_Y, VIG0_CB, VIG0_CR.
+ *
+ * Based on the size of the attached scanout buffer, a certain # of
+ * blocks must be allocated to that client out of the shared pool.
+ *
+ * For each block, it can be either free, or pending/in-use by a
+ * client.  The updates happen in three steps:
+ *
+ *  1) mdp5_smp_request():
+ *     When plane scanout is setup, calculate required number of
+ *     blocks needed per client, and request.  Blocks not inuse or
+ *     pending by any other client are added to client's pending
+ *     set.
+ *
+ *  2) mdp5_smp_configure():
+ *     As hw is programmed, before FLUSH, MDP5_SMP_ALLOC registers
+ *     are configured for the union(pending, inuse)
+ *
+ *  3) mdp5_smp_commit():
+ *     After next vblank, copy pending -> inuse.  Optionally update
+ *     MDP5_SMP_ALLOC registers if there are newly unused blocks
+ *
+ * On the next vblank after changes have been committed to hw, the
+ * client's pending blocks become it's in-use blocks (and no-longer
+ * in-use blocks become available to other clients).
+ *
+ * btw, hurray for confusing overloaded acronyms!  :-/
+ *
+ * NOTE: for atomic modeset/pageflip NONBLOCK operations, step #1
+ * should happen at (or before)? atomic->check().  And we'd need
+ * an API to discard previous requests if update is aborted or
+ * (test-only).
+ *
+ * TODO would perhaps be nice to have debugfs to dump out kernel
+ * inuse and pending state of all clients..
+ */
+
+static DEFINE_SPINLOCK(smp_lock);
+
+
+/* step #1: update # of blocks pending for the client: */
+int mdp5_smp_request(struct mdp5_kms *mdp5_kms,
+               enum mdp5_client_id cid, int nblks)
+{
+       struct mdp5_client_smp_state *ps = &mdp5_kms->smp_client_state[cid];
+       int i, ret, avail, cur_nblks, cnt = mdp5_kms->smp_blk_cnt;
+       unsigned long flags;
+
+       spin_lock_irqsave(&smp_lock, flags);
+
+       avail = cnt - bitmap_weight(mdp5_kms->smp_state, cnt);
+       if (nblks > avail) {
+               ret = -ENOSPC;
+               goto fail;
+       }
+
+       cur_nblks = bitmap_weight(ps->pending, cnt);
+       if (nblks > cur_nblks) {
+               /* grow the existing pending reservation: */
+               for (i = cur_nblks; i < nblks; i++) {
+                       int blk = find_first_zero_bit(mdp5_kms->smp_state, cnt);
+                       set_bit(blk, ps->pending);
+                       set_bit(blk, mdp5_kms->smp_state);
+               }
+       } else {
+               /* shrink the existing pending reservation: */
+               for (i = cur_nblks; i > nblks; i--) {
+                       int blk = find_first_bit(ps->pending, cnt);
+                       clear_bit(blk, ps->pending);
+                       /* don't clear in global smp_state until _commit() */
+               }
+       }
+
+fail:
+       spin_unlock_irqrestore(&smp_lock, flags);
+       return 0;
+}
+
+static void update_smp_state(struct mdp5_kms *mdp5_kms,
+               enum mdp5_client_id cid, mdp5_smp_state_t *assigned)
+{
+       int cnt = mdp5_kms->smp_blk_cnt;
+       uint32_t blk, val;
+
+       for_each_set_bit(blk, *assigned, cnt) {
+               int idx = blk / 3;
+               int fld = blk % 3;
+
+               val = mdp5_read(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(idx));
+
+               switch (fld) {
+               case 0:
+                       val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK;
+                       val |= MDP5_SMP_ALLOC_W_REG_CLIENT0(cid);
+                       break;
+               case 1:
+                       val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK;
+                       val |= MDP5_SMP_ALLOC_W_REG_CLIENT1(cid);
+                       break;
+               case 2:
+                       val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK;
+                       val |= MDP5_SMP_ALLOC_W_REG_CLIENT2(cid);
+                       break;
+               }
+
+               mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(idx), val);
+               mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_R_REG(idx), val);
+       }
+}
+
+/* step #2: configure hw for union(pending, inuse): */
+void mdp5_smp_configure(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid)
+{
+       struct mdp5_client_smp_state *ps = &mdp5_kms->smp_client_state[cid];
+       int cnt = mdp5_kms->smp_blk_cnt;
+       mdp5_smp_state_t assigned;
+
+       bitmap_or(assigned, ps->inuse, ps->pending, cnt);
+       update_smp_state(mdp5_kms, cid, &assigned);
+}
+
+/* step #3: after vblank, copy pending -> inuse: */
+void mdp5_smp_commit(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid)
+{
+       struct mdp5_client_smp_state *ps = &mdp5_kms->smp_client_state[cid];
+       int cnt = mdp5_kms->smp_blk_cnt;
+       mdp5_smp_state_t released;
+
+       /*
+        * Figure out if there are any blocks we where previously
+        * using, which can be released and made available to other
+        * clients:
+        */
+       if (bitmap_andnot(released, ps->inuse, ps->pending, cnt)) {
+               unsigned long flags;
+
+               spin_lock_irqsave(&smp_lock, flags);
+               /* clear released blocks: */
+               bitmap_andnot(mdp5_kms->smp_state, mdp5_kms->smp_state,
+                               released, cnt);
+               spin_unlock_irqrestore(&smp_lock, flags);
+
+               update_smp_state(mdp5_kms, CID_UNUSED, &released);
+       }
+
+       bitmap_copy(ps->inuse, ps->pending, cnt);
+}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
new file mode 100644 (file)
index 0000000..0ab739e
--- /dev/null
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __MDP5_SMP_H__
+#define __MDP5_SMP_H__
+
+#include "msm_drv.h"
+
+#define MAX_SMP_BLOCKS  22
+#define SMP_BLK_SIZE    4096
+#define SMP_ENTRIES_PER_BLK (SMP_BLK_SIZE / 16)
+
+typedef DECLARE_BITMAP(mdp5_smp_state_t, MAX_SMP_BLOCKS);
+
+struct mdp5_client_smp_state {
+       mdp5_smp_state_t inuse;
+       mdp5_smp_state_t pending;
+};
+
+struct mdp5_kms;
+
+int mdp5_smp_request(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid, int nblks);
+void mdp5_smp_configure(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid);
+void mdp5_smp_commit(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid);
+
+
+#endif /* __MDP5_SMP_H__ */
diff --git a/drivers/gpu/drm/msm/mdp/mdp_common.xml.h b/drivers/gpu/drm/msm/mdp/mdp_common.xml.h
new file mode 100644 (file)
index 0000000..a9629b8
--- /dev/null
@@ -0,0 +1,78 @@
+#ifndef MDP_COMMON_XML
+#define MDP_COMMON_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    647 bytes, from 2013-11-30 14:45:35)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2013-03-31 16:51:27)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  17996 bytes, from 2013-12-01 19:10:31)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   1615 bytes, from 2013-11-30 15:00:52)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  22517 bytes, from 2013-12-03 20:59:13)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  11712 bytes, from 2013-08-17 17:13:43)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2013-08-11 19:26:32)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1544 bytes, from 2013-08-16 19:17:05)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2013-07-05 19:21:12)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  20932 bytes, from 2013-12-01 15:13:04)
+
+Copyright (C) 2013 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+enum mdp_mixer_stage_id {
+       STAGE_UNUSED = 0,
+       STAGE_BASE = 1,
+       STAGE0 = 2,
+       STAGE1 = 3,
+       STAGE2 = 4,
+       STAGE3 = 5,
+};
+
+enum mdp_alpha_type {
+       FG_CONST = 0,
+       BG_CONST = 1,
+       FG_PIXEL = 2,
+       BG_PIXEL = 3,
+};
+
+enum mdp_bpc {
+       BPC1 = 0,
+       BPC5 = 1,
+       BPC6 = 2,
+       BPC8 = 3,
+};
+
+enum mdp_bpc_alpha {
+       BPC1A = 0,
+       BPC4A = 1,
+       BPC6A = 2,
+       BPC8A = 3,
+};
+
+
+#endif /* MDP_COMMON_XML */
diff --git a/drivers/gpu/drm/msm/mdp/mdp_format.c b/drivers/gpu/drm/msm/mdp/mdp_format.c
new file mode 100644 (file)
index 0000000..e0a6ffb
--- /dev/null
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+#include "msm_drv.h"
+#include "mdp_kms.h"
+
+#define FMT(name, a, r, g, b, e0, e1, e2, e3, alpha, tight, c, cnt) { \
+               .base = { .pixel_format = DRM_FORMAT_ ## name }, \
+               .bpc_a = BPC ## a ## A,                          \
+               .bpc_r = BPC ## r,                               \
+               .bpc_g = BPC ## g,                               \
+               .bpc_b = BPC ## b,                               \
+               .unpack = { e0, e1, e2, e3 },                    \
+               .alpha_enable = alpha,                           \
+               .unpack_tight = tight,                           \
+               .cpp = c,                                        \
+               .unpack_count = cnt,                             \
+       }
+
+#define BPC0A 0
+
+static const struct mdp_format formats[] = {
+       /*  name      a  r  g  b   e0 e1 e2 e3  alpha   tight  cpp cnt */
+       FMT(ARGB8888, 8, 8, 8, 8,  1, 0, 2, 3,  true,   true,  4,  4),
+       FMT(XRGB8888, 8, 8, 8, 8,  1, 0, 2, 3,  false,  true,  4,  4),
+       FMT(RGB888,   0, 8, 8, 8,  1, 0, 2, 0,  false,  true,  3,  3),
+       FMT(BGR888,   0, 8, 8, 8,  2, 0, 1, 0,  false,  true,  3,  3),
+       FMT(RGB565,   0, 5, 6, 5,  1, 0, 2, 0,  false,  true,  2,  3),
+       FMT(BGR565,   0, 5, 6, 5,  2, 0, 1, 0,  false,  true,  2,  3),
+};
+
+uint32_t mdp_get_formats(uint32_t *pixel_formats, uint32_t max_formats)
+{
+       uint32_t i;
+       for (i = 0; i < ARRAY_SIZE(formats); i++) {
+               const struct mdp_format *f = &formats[i];
+
+               if (i == max_formats)
+                       break;
+
+               pixel_formats[i] = f->base.pixel_format;
+       }
+
+       return i;
+}
+
+const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format)
+{
+       int i;
+       for (i = 0; i < ARRAY_SIZE(formats); i++) {
+               const struct mdp_format *f = &formats[i];
+               if (f->base.pixel_format == format)
+                       return &f->base;
+       }
+       return NULL;
+}
diff --git a/drivers/gpu/drm/msm/mdp/mdp_kms.c b/drivers/gpu/drm/msm/mdp/mdp_kms.c
new file mode 100644 (file)
index 0000000..3be48f7
--- /dev/null
@@ -0,0 +1,144 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+#include "msm_drv.h"
+#include "mdp_kms.h"
+
+
+struct mdp_irq_wait {
+       struct mdp_irq irq;
+       int count;
+};
+
+static DECLARE_WAIT_QUEUE_HEAD(wait_event);
+
+static DEFINE_SPINLOCK(list_lock);
+
+static void update_irq(struct mdp_kms *mdp_kms)
+{
+       struct mdp_irq *irq;
+       uint32_t irqmask = mdp_kms->vblank_mask;
+
+       BUG_ON(!spin_is_locked(&list_lock));
+
+       list_for_each_entry(irq, &mdp_kms->irq_list, node)
+               irqmask |= irq->irqmask;
+
+       mdp_kms->funcs->set_irqmask(mdp_kms, irqmask);
+}
+
+static void update_irq_unlocked(struct mdp_kms *mdp_kms)
+{
+       unsigned long flags;
+       spin_lock_irqsave(&list_lock, flags);
+       update_irq(mdp_kms);
+       spin_unlock_irqrestore(&list_lock, flags);
+}
+
+void mdp_dispatch_irqs(struct mdp_kms *mdp_kms, uint32_t status)
+{
+       struct mdp_irq *handler, *n;
+       unsigned long flags;
+
+       spin_lock_irqsave(&list_lock, flags);
+       mdp_kms->in_irq = true;
+       list_for_each_entry_safe(handler, n, &mdp_kms->irq_list, node) {
+               if (handler->irqmask & status) {
+                       spin_unlock_irqrestore(&list_lock, flags);
+                       handler->irq(handler, handler->irqmask & status);
+                       spin_lock_irqsave(&list_lock, flags);
+               }
+       }
+       mdp_kms->in_irq = false;
+       update_irq(mdp_kms);
+       spin_unlock_irqrestore(&list_lock, flags);
+
+}
+
+void mdp_update_vblank_mask(struct mdp_kms *mdp_kms, uint32_t mask, bool enable)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&list_lock, flags);
+       if (enable)
+               mdp_kms->vblank_mask |= mask;
+       else
+               mdp_kms->vblank_mask &= ~mask;
+       update_irq(mdp_kms);
+       spin_unlock_irqrestore(&list_lock, flags);
+}
+
+static void wait_irq(struct mdp_irq *irq, uint32_t irqstatus)
+{
+       struct mdp_irq_wait *wait =
+                       container_of(irq, struct mdp_irq_wait, irq);
+       wait->count--;
+       wake_up_all(&wait_event);
+}
+
+void mdp_irq_wait(struct mdp_kms *mdp_kms, uint32_t irqmask)
+{
+       struct mdp_irq_wait wait = {
+               .irq = {
+                       .irq = wait_irq,
+                       .irqmask = irqmask,
+               },
+               .count = 1,
+       };
+       mdp_irq_register(mdp_kms, &wait.irq);
+       wait_event(wait_event, (wait.count <= 0));
+       mdp_irq_unregister(mdp_kms, &wait.irq);
+}
+
+void mdp_irq_register(struct mdp_kms *mdp_kms, struct mdp_irq *irq)
+{
+       unsigned long flags;
+       bool needs_update = false;
+
+       spin_lock_irqsave(&list_lock, flags);
+
+       if (!irq->registered) {
+               irq->registered = true;
+               list_add(&irq->node, &mdp_kms->irq_list);
+               needs_update = !mdp_kms->in_irq;
+       }
+
+       spin_unlock_irqrestore(&list_lock, flags);
+
+       if (needs_update)
+               update_irq_unlocked(mdp_kms);
+}
+
+void mdp_irq_unregister(struct mdp_kms *mdp_kms, struct mdp_irq *irq)
+{
+       unsigned long flags;
+       bool needs_update = false;
+
+       spin_lock_irqsave(&list_lock, flags);
+
+       if (irq->registered) {
+               irq->registered = false;
+               list_del(&irq->node);
+               needs_update = !mdp_kms->in_irq;
+       }
+
+       spin_unlock_irqrestore(&list_lock, flags);
+
+       if (needs_update)
+               update_irq_unlocked(mdp_kms);
+}
diff --git a/drivers/gpu/drm/msm/mdp/mdp_kms.h b/drivers/gpu/drm/msm/mdp/mdp_kms.h
new file mode 100644 (file)
index 0000000..99557b5
--- /dev/null
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __MDP_KMS_H__
+#define __MDP_KMS_H__
+
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+
+#include "msm_drv.h"
+#include "msm_kms.h"
+#include "mdp_common.xml.h"
+
+struct mdp_kms;
+
+struct mdp_kms_funcs {
+       struct msm_kms_funcs base;
+       void (*set_irqmask)(struct mdp_kms *mdp_kms, uint32_t irqmask);
+};
+
+struct mdp_kms {
+       struct msm_kms base;
+
+       const struct mdp_kms_funcs *funcs;
+
+       /* irq handling: */
+       bool in_irq;
+       struct list_head irq_list;    /* list of mdp4_irq */
+       uint32_t vblank_mask;         /* irq bits set for userspace vblank */
+};
+#define to_mdp_kms(x) container_of(x, struct mdp_kms, base)
+
+static inline void mdp_kms_init(struct mdp_kms *mdp_kms,
+               const struct mdp_kms_funcs *funcs)
+{
+       mdp_kms->funcs = funcs;
+       INIT_LIST_HEAD(&mdp_kms->irq_list);
+       msm_kms_init(&mdp_kms->base, &funcs->base);
+}
+
+/*
+ * irq helpers:
+ */
+
+/* For transiently registering for different MDP irqs that various parts
+ * of the KMS code need during setup/configuration.  These are not
+ * necessarily the same as what drm_vblank_get/put() are requesting, and
+ * the hysteresis in drm_vblank_put() is not necessarily desirable for
+ * internal housekeeping related irq usage.
+ */
+struct mdp_irq {
+       struct list_head node;
+       uint32_t irqmask;
+       bool registered;
+       void (*irq)(struct mdp_irq *irq, uint32_t irqstatus);
+};
+
+void mdp_dispatch_irqs(struct mdp_kms *mdp_kms, uint32_t status);
+void mdp_update_vblank_mask(struct mdp_kms *mdp_kms, uint32_t mask, bool enable);
+void mdp_irq_wait(struct mdp_kms *mdp_kms, uint32_t irqmask);
+void mdp_irq_register(struct mdp_kms *mdp_kms, struct mdp_irq *irq);
+void mdp_irq_unregister(struct mdp_kms *mdp_kms, struct mdp_irq *irq);
+
+
+/*
+ * pixel format helpers:
+ */
+
+struct mdp_format {
+       struct msm_format base;
+       enum mdp_bpc bpc_r, bpc_g, bpc_b;
+       enum mdp_bpc_alpha bpc_a;
+       uint8_t unpack[4];
+       bool alpha_enable, unpack_tight;
+       uint8_t cpp, unpack_count;
+};
+#define to_mdp_format(x) container_of(x, struct mdp_format, base)
+
+uint32_t mdp_get_formats(uint32_t *formats, uint32_t max_formats);
+const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format);
+
+#endif /* __MDP_KMS_H__ */
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4.xml.h b/drivers/gpu/drm/msm/mdp4/mdp4.xml.h
deleted file mode 100644 (file)
index 9908ffe..0000000
+++ /dev/null
@@ -1,1061 +0,0 @@
-#ifndef MDP4_XML
-#define MDP4_XML
-
-/* Autogenerated file, DO NOT EDIT manually!
-
-This file was generated by the rules-ng-ng headergen tool in this git repository:
-http://github.com/freedreno/envytools/
-git clone https://github.com/freedreno/envytools.git
-
-The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    595 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml           (  19332 bytes, from 2013-10-07 16:36:48)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  11712 bytes, from 2013-08-17 17:13:43)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2013-08-11 19:26:32)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1544 bytes, from 2013-08-16 19:17:05)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  19288 bytes, from 2013-08-11 18:14:15)
-
-Copyright (C) 2013 by the following authors:
-- Rob Clark <robdclark@gmail.com> (robclark)
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice (including the
-next paragraph) shall be included in all copies or substantial
-portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-*/
-
-
-enum mdp4_bpc {
-       BPC1 = 0,
-       BPC5 = 1,
-       BPC6 = 2,
-       BPC8 = 3,
-};
-
-enum mdp4_bpc_alpha {
-       BPC1A = 0,
-       BPC4A = 1,
-       BPC6A = 2,
-       BPC8A = 3,
-};
-
-enum mdp4_alpha_type {
-       FG_CONST = 0,
-       BG_CONST = 1,
-       FG_PIXEL = 2,
-       BG_PIXEL = 3,
-};
-
-enum mdp4_pipe {
-       VG1 = 0,
-       VG2 = 1,
-       RGB1 = 2,
-       RGB2 = 3,
-       RGB3 = 4,
-       VG3 = 5,
-       VG4 = 6,
-};
-
-enum mdp4_mixer {
-       MIXER0 = 0,
-       MIXER1 = 1,
-       MIXER2 = 2,
-};
-
-enum mdp4_mixer_stage_id {
-       STAGE_UNUSED = 0,
-       STAGE_BASE = 1,
-       STAGE0 = 2,
-       STAGE1 = 3,
-       STAGE2 = 4,
-       STAGE3 = 5,
-};
-
-enum mdp4_intf {
-       INTF_LCDC_DTV = 0,
-       INTF_DSI_VIDEO = 1,
-       INTF_DSI_CMD = 2,
-       INTF_EBI2_TV = 3,
-};
-
-enum mdp4_cursor_format {
-       CURSOR_ARGB = 1,
-       CURSOR_XRGB = 2,
-};
-
-enum mdp4_dma {
-       DMA_P = 0,
-       DMA_S = 1,
-       DMA_E = 2,
-};
-
-#define MDP4_IRQ_OVERLAY0_DONE                                 0x00000001
-#define MDP4_IRQ_OVERLAY1_DONE                                 0x00000002
-#define MDP4_IRQ_DMA_S_DONE                                    0x00000004
-#define MDP4_IRQ_DMA_E_DONE                                    0x00000008
-#define MDP4_IRQ_DMA_P_DONE                                    0x00000010
-#define MDP4_IRQ_VG1_HISTOGRAM                                 0x00000020
-#define MDP4_IRQ_VG2_HISTOGRAM                                 0x00000040
-#define MDP4_IRQ_PRIMARY_VSYNC                                 0x00000080
-#define MDP4_IRQ_PRIMARY_INTF_UDERRUN                          0x00000100
-#define MDP4_IRQ_EXTERNAL_VSYNC                                        0x00000200
-#define MDP4_IRQ_EXTERNAL_INTF_UDERRUN                         0x00000400
-#define MDP4_IRQ_PRIMARY_RDPTR                                 0x00000800
-#define MDP4_IRQ_DMA_P_HISTOGRAM                               0x00020000
-#define MDP4_IRQ_DMA_S_HISTOGRAM                               0x04000000
-#define MDP4_IRQ_OVERLAY2_DONE                                 0x40000000
-#define REG_MDP4_VERSION                                       0x00000000
-#define MDP4_VERSION_MINOR__MASK                               0x00ff0000
-#define MDP4_VERSION_MINOR__SHIFT                              16
-static inline uint32_t MDP4_VERSION_MINOR(uint32_t val)
-{
-       return ((val) << MDP4_VERSION_MINOR__SHIFT) & MDP4_VERSION_MINOR__MASK;
-}
-#define MDP4_VERSION_MAJOR__MASK                               0xff000000
-#define MDP4_VERSION_MAJOR__SHIFT                              24
-static inline uint32_t MDP4_VERSION_MAJOR(uint32_t val)
-{
-       return ((val) << MDP4_VERSION_MAJOR__SHIFT) & MDP4_VERSION_MAJOR__MASK;
-}
-
-#define REG_MDP4_OVLP0_KICK                                    0x00000004
-
-#define REG_MDP4_OVLP1_KICK                                    0x00000008
-
-#define REG_MDP4_OVLP2_KICK                                    0x000000d0
-
-#define REG_MDP4_DMA_P_KICK                                    0x0000000c
-
-#define REG_MDP4_DMA_S_KICK                                    0x00000010
-
-#define REG_MDP4_DMA_E_KICK                                    0x00000014
-
-#define REG_MDP4_DISP_STATUS                                   0x00000018
-
-#define REG_MDP4_DISP_INTF_SEL                                 0x00000038
-#define MDP4_DISP_INTF_SEL_PRIM__MASK                          0x00000003
-#define MDP4_DISP_INTF_SEL_PRIM__SHIFT                         0
-static inline uint32_t MDP4_DISP_INTF_SEL_PRIM(enum mdp4_intf val)
-{
-       return ((val) << MDP4_DISP_INTF_SEL_PRIM__SHIFT) & MDP4_DISP_INTF_SEL_PRIM__MASK;
-}
-#define MDP4_DISP_INTF_SEL_SEC__MASK                           0x0000000c
-#define MDP4_DISP_INTF_SEL_SEC__SHIFT                          2
-static inline uint32_t MDP4_DISP_INTF_SEL_SEC(enum mdp4_intf val)
-{
-       return ((val) << MDP4_DISP_INTF_SEL_SEC__SHIFT) & MDP4_DISP_INTF_SEL_SEC__MASK;
-}
-#define MDP4_DISP_INTF_SEL_EXT__MASK                           0x00000030
-#define MDP4_DISP_INTF_SEL_EXT__SHIFT                          4
-static inline uint32_t MDP4_DISP_INTF_SEL_EXT(enum mdp4_intf val)
-{
-       return ((val) << MDP4_DISP_INTF_SEL_EXT__SHIFT) & MDP4_DISP_INTF_SEL_EXT__MASK;
-}
-#define MDP4_DISP_INTF_SEL_DSI_VIDEO                           0x00000040
-#define MDP4_DISP_INTF_SEL_DSI_CMD                             0x00000080
-
-#define REG_MDP4_RESET_STATUS                                  0x0000003c
-
-#define REG_MDP4_READ_CNFG                                     0x0000004c
-
-#define REG_MDP4_INTR_ENABLE                                   0x00000050
-
-#define REG_MDP4_INTR_STATUS                                   0x00000054
-
-#define REG_MDP4_INTR_CLEAR                                    0x00000058
-
-#define REG_MDP4_EBI2_LCD0                                     0x00000060
-
-#define REG_MDP4_EBI2_LCD1                                     0x00000064
-
-#define REG_MDP4_PORTMAP_MODE                                  0x00000070
-
-#define REG_MDP4_CS_CONTROLLER0                                        0x000000c0
-
-#define REG_MDP4_CS_CONTROLLER1                                        0x000000c4
-
-#define REG_MDP4_LAYERMIXER2_IN_CFG                            0x000100f0
-#define MDP4_LAYERMIXER2_IN_CFG_PIPE0__MASK                    0x00000007
-#define MDP4_LAYERMIXER2_IN_CFG_PIPE0__SHIFT                   0
-static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE0(enum mdp4_mixer_stage_id val)
-{
-       return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE0__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE0__MASK;
-}
-#define MDP4_LAYERMIXER2_IN_CFG_PIPE0_MIXER1                   0x00000008
-#define MDP4_LAYERMIXER2_IN_CFG_PIPE1__MASK                    0x00000070
-#define MDP4_LAYERMIXER2_IN_CFG_PIPE1__SHIFT                   4
-static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE1(enum mdp4_mixer_stage_id val)
-{
-       return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE1__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE1__MASK;
-}
-#define MDP4_LAYERMIXER2_IN_CFG_PIPE1_MIXER1                   0x00000080
-#define MDP4_LAYERMIXER2_IN_CFG_PIPE2__MASK                    0x00000700
-#define MDP4_LAYERMIXER2_IN_CFG_PIPE2__SHIFT                   8
-static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE2(enum mdp4_mixer_stage_id val)
-{
-       return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE2__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE2__MASK;
-}
-#define MDP4_LAYERMIXER2_IN_CFG_PIPE2_MIXER1                   0x00000800
-#define MDP4_LAYERMIXER2_IN_CFG_PIPE3__MASK                    0x00007000
-#define MDP4_LAYERMIXER2_IN_CFG_PIPE3__SHIFT                   12
-static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE3(enum mdp4_mixer_stage_id val)
-{
-       return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE3__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE3__MASK;
-}
-#define MDP4_LAYERMIXER2_IN_CFG_PIPE3_MIXER1                   0x00008000
-#define MDP4_LAYERMIXER2_IN_CFG_PIPE4__MASK                    0x00070000
-#define MDP4_LAYERMIXER2_IN_CFG_PIPE4__SHIFT                   16
-static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE4(enum mdp4_mixer_stage_id val)
-{
-       return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE4__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE4__MASK;
-}
-#define MDP4_LAYERMIXER2_IN_CFG_PIPE4_MIXER1                   0x00080000
-#define MDP4_LAYERMIXER2_IN_CFG_PIPE5__MASK                    0x00700000
-#define MDP4_LAYERMIXER2_IN_CFG_PIPE5__SHIFT                   20
-static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE5(enum mdp4_mixer_stage_id val)
-{
-       return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE5__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE5__MASK;
-}
-#define MDP4_LAYERMIXER2_IN_CFG_PIPE5_MIXER1                   0x00800000
-#define MDP4_LAYERMIXER2_IN_CFG_PIPE6__MASK                    0x07000000
-#define MDP4_LAYERMIXER2_IN_CFG_PIPE6__SHIFT                   24
-static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE6(enum mdp4_mixer_stage_id val)
-{
-       return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE6__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE6__MASK;
-}
-#define MDP4_LAYERMIXER2_IN_CFG_PIPE6_MIXER1                   0x08000000
-#define MDP4_LAYERMIXER2_IN_CFG_PIPE7__MASK                    0x70000000
-#define MDP4_LAYERMIXER2_IN_CFG_PIPE7__SHIFT                   28
-static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE7(enum mdp4_mixer_stage_id val)
-{
-       return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE7__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE7__MASK;
-}
-#define MDP4_LAYERMIXER2_IN_CFG_PIPE7_MIXER1                   0x80000000
-
-#define REG_MDP4_LAYERMIXER_IN_CFG_UPDATE_METHOD               0x000100fc
-
-#define REG_MDP4_LAYERMIXER_IN_CFG                             0x00010100
-#define MDP4_LAYERMIXER_IN_CFG_PIPE0__MASK                     0x00000007
-#define MDP4_LAYERMIXER_IN_CFG_PIPE0__SHIFT                    0
-static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE0(enum mdp4_mixer_stage_id val)
-{
-       return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE0__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE0__MASK;
-}
-#define MDP4_LAYERMIXER_IN_CFG_PIPE0_MIXER1                    0x00000008
-#define MDP4_LAYERMIXER_IN_CFG_PIPE1__MASK                     0x00000070
-#define MDP4_LAYERMIXER_IN_CFG_PIPE1__SHIFT                    4
-static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE1(enum mdp4_mixer_stage_id val)
-{
-       return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE1__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE1__MASK;
-}
-#define MDP4_LAYERMIXER_IN_CFG_PIPE1_MIXER1                    0x00000080
-#define MDP4_LAYERMIXER_IN_CFG_PIPE2__MASK                     0x00000700
-#define MDP4_LAYERMIXER_IN_CFG_PIPE2__SHIFT                    8
-static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE2(enum mdp4_mixer_stage_id val)
-{
-       return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE2__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE2__MASK;
-}
-#define MDP4_LAYERMIXER_IN_CFG_PIPE2_MIXER1                    0x00000800
-#define MDP4_LAYERMIXER_IN_CFG_PIPE3__MASK                     0x00007000
-#define MDP4_LAYERMIXER_IN_CFG_PIPE3__SHIFT                    12
-static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE3(enum mdp4_mixer_stage_id val)
-{
-       return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE3__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE3__MASK;
-}
-#define MDP4_LAYERMIXER_IN_CFG_PIPE3_MIXER1                    0x00008000
-#define MDP4_LAYERMIXER_IN_CFG_PIPE4__MASK                     0x00070000
-#define MDP4_LAYERMIXER_IN_CFG_PIPE4__SHIFT                    16
-static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE4(enum mdp4_mixer_stage_id val)
-{
-       return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE4__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE4__MASK;
-}
-#define MDP4_LAYERMIXER_IN_CFG_PIPE4_MIXER1                    0x00080000
-#define MDP4_LAYERMIXER_IN_CFG_PIPE5__MASK                     0x00700000
-#define MDP4_LAYERMIXER_IN_CFG_PIPE5__SHIFT                    20
-static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE5(enum mdp4_mixer_stage_id val)
-{
-       return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE5__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE5__MASK;
-}
-#define MDP4_LAYERMIXER_IN_CFG_PIPE5_MIXER1                    0x00800000
-#define MDP4_LAYERMIXER_IN_CFG_PIPE6__MASK                     0x07000000
-#define MDP4_LAYERMIXER_IN_CFG_PIPE6__SHIFT                    24
-static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE6(enum mdp4_mixer_stage_id val)
-{
-       return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE6__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE6__MASK;
-}
-#define MDP4_LAYERMIXER_IN_CFG_PIPE6_MIXER1                    0x08000000
-#define MDP4_LAYERMIXER_IN_CFG_PIPE7__MASK                     0x70000000
-#define MDP4_LAYERMIXER_IN_CFG_PIPE7__SHIFT                    28
-static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE7(enum mdp4_mixer_stage_id val)
-{
-       return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE7__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE7__MASK;
-}
-#define MDP4_LAYERMIXER_IN_CFG_PIPE7_MIXER1                    0x80000000
-
-#define REG_MDP4_VG2_SRC_FORMAT                                        0x00030050
-
-#define REG_MDP4_VG2_CONST_COLOR                               0x00031008
-
-#define REG_MDP4_OVERLAY_FLUSH                                 0x00018000
-#define MDP4_OVERLAY_FLUSH_OVLP0                               0x00000001
-#define MDP4_OVERLAY_FLUSH_OVLP1                               0x00000002
-#define MDP4_OVERLAY_FLUSH_VG1                                 0x00000004
-#define MDP4_OVERLAY_FLUSH_VG2                                 0x00000008
-#define MDP4_OVERLAY_FLUSH_RGB1                                        0x00000010
-#define MDP4_OVERLAY_FLUSH_RGB2                                        0x00000020
-
-static inline uint32_t __offset_OVLP(uint32_t idx)
-{
-       switch (idx) {
-               case 0: return 0x00010000;
-               case 1: return 0x00018000;
-               case 2: return 0x00088000;
-               default: return INVALID_IDX(idx);
-       }
-}
-static inline uint32_t REG_MDP4_OVLP(uint32_t i0) { return 0x00000000 + __offset_OVLP(i0); }
-
-static inline uint32_t REG_MDP4_OVLP_CFG(uint32_t i0) { return 0x00000004 + __offset_OVLP(i0); }
-
-static inline uint32_t REG_MDP4_OVLP_SIZE(uint32_t i0) { return 0x00000008 + __offset_OVLP(i0); }
-#define MDP4_OVLP_SIZE_HEIGHT__MASK                            0xffff0000
-#define MDP4_OVLP_SIZE_HEIGHT__SHIFT                           16
-static inline uint32_t MDP4_OVLP_SIZE_HEIGHT(uint32_t val)
-{
-       return ((val) << MDP4_OVLP_SIZE_HEIGHT__SHIFT) & MDP4_OVLP_SIZE_HEIGHT__MASK;
-}
-#define MDP4_OVLP_SIZE_WIDTH__MASK                             0x0000ffff
-#define MDP4_OVLP_SIZE_WIDTH__SHIFT                            0
-static inline uint32_t MDP4_OVLP_SIZE_WIDTH(uint32_t val)
-{
-       return ((val) << MDP4_OVLP_SIZE_WIDTH__SHIFT) & MDP4_OVLP_SIZE_WIDTH__MASK;
-}
-
-static inline uint32_t REG_MDP4_OVLP_BASE(uint32_t i0) { return 0x0000000c + __offset_OVLP(i0); }
-
-static inline uint32_t REG_MDP4_OVLP_STRIDE(uint32_t i0) { return 0x00000010 + __offset_OVLP(i0); }
-
-static inline uint32_t REG_MDP4_OVLP_OPMODE(uint32_t i0) { return 0x00000014 + __offset_OVLP(i0); }
-
-static inline uint32_t __offset_STAGE(uint32_t idx)
-{
-       switch (idx) {
-               case 0: return 0x00000104;
-               case 1: return 0x00000124;
-               case 2: return 0x00000144;
-               case 3: return 0x00000160;
-               default: return INVALID_IDX(idx);
-       }
-}
-static inline uint32_t REG_MDP4_OVLP_STAGE(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE(i1); }
-
-static inline uint32_t REG_MDP4_OVLP_STAGE_OP(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE(i1); }
-#define MDP4_OVLP_STAGE_OP_FG_ALPHA__MASK                      0x00000003
-#define MDP4_OVLP_STAGE_OP_FG_ALPHA__SHIFT                     0
-static inline uint32_t MDP4_OVLP_STAGE_OP_FG_ALPHA(enum mdp4_alpha_type val)
-{
-       return ((val) << MDP4_OVLP_STAGE_OP_FG_ALPHA__SHIFT) & MDP4_OVLP_STAGE_OP_FG_ALPHA__MASK;
-}
-#define MDP4_OVLP_STAGE_OP_FG_INV_ALPHA                                0x00000004
-#define MDP4_OVLP_STAGE_OP_FG_MOD_ALPHA                                0x00000008
-#define MDP4_OVLP_STAGE_OP_BG_ALPHA__MASK                      0x00000030
-#define MDP4_OVLP_STAGE_OP_BG_ALPHA__SHIFT                     4
-static inline uint32_t MDP4_OVLP_STAGE_OP_BG_ALPHA(enum mdp4_alpha_type val)
-{
-       return ((val) << MDP4_OVLP_STAGE_OP_BG_ALPHA__SHIFT) & MDP4_OVLP_STAGE_OP_BG_ALPHA__MASK;
-}
-#define MDP4_OVLP_STAGE_OP_BG_INV_ALPHA                                0x00000040
-#define MDP4_OVLP_STAGE_OP_BG_MOD_ALPHA                                0x00000080
-#define MDP4_OVLP_STAGE_OP_FG_TRANSP                           0x00000100
-#define MDP4_OVLP_STAGE_OP_BG_TRANSP                           0x00000200
-
-static inline uint32_t REG_MDP4_OVLP_STAGE_FG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000004 + __offset_OVLP(i0) + __offset_STAGE(i1); }
-
-static inline uint32_t REG_MDP4_OVLP_STAGE_BG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000008 + __offset_OVLP(i0) + __offset_STAGE(i1); }
-
-static inline uint32_t REG_MDP4_OVLP_STAGE_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000000c + __offset_OVLP(i0) + __offset_STAGE(i1); }
-
-static inline uint32_t REG_MDP4_OVLP_STAGE_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00000010 + __offset_OVLP(i0) + __offset_STAGE(i1); }
-
-static inline uint32_t REG_MDP4_OVLP_STAGE_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00000014 + __offset_OVLP(i0) + __offset_STAGE(i1); }
-
-static inline uint32_t REG_MDP4_OVLP_STAGE_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00000018 + __offset_OVLP(i0) + __offset_STAGE(i1); }
-
-static inline uint32_t __offset_STAGE_CO3(uint32_t idx)
-{
-       switch (idx) {
-               case 0: return 0x00001004;
-               case 1: return 0x00001404;
-               case 2: return 0x00001804;
-               case 3: return 0x00001b84;
-               default: return INVALID_IDX(idx);
-       }
-}
-static inline uint32_t REG_MDP4_OVLP_STAGE_CO3(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE_CO3(i1); }
-
-static inline uint32_t REG_MDP4_OVLP_STAGE_CO3_SEL(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE_CO3(i1); }
-#define MDP4_OVLP_STAGE_CO3_SEL_FG_ALPHA                       0x00000001
-
-static inline uint32_t REG_MDP4_OVLP_TRANSP_LOW0(uint32_t i0) { return 0x00000180 + __offset_OVLP(i0); }
-
-static inline uint32_t REG_MDP4_OVLP_TRANSP_LOW1(uint32_t i0) { return 0x00000184 + __offset_OVLP(i0); }
-
-static inline uint32_t REG_MDP4_OVLP_TRANSP_HIGH0(uint32_t i0) { return 0x00000188 + __offset_OVLP(i0); }
-
-static inline uint32_t REG_MDP4_OVLP_TRANSP_HIGH1(uint32_t i0) { return 0x0000018c + __offset_OVLP(i0); }
-
-static inline uint32_t REG_MDP4_OVLP_CSC_CONFIG(uint32_t i0) { return 0x00000200 + __offset_OVLP(i0); }
-
-static inline uint32_t REG_MDP4_OVLP_CSC(uint32_t i0) { return 0x00002000 + __offset_OVLP(i0); }
-
-
-static inline uint32_t REG_MDP4_OVLP_CSC_MV(uint32_t i0, uint32_t i1) { return 0x00002400 + __offset_OVLP(i0) + 0x4*i1; }
-
-static inline uint32_t REG_MDP4_OVLP_CSC_MV_VAL(uint32_t i0, uint32_t i1) { return 0x00002400 + __offset_OVLP(i0) + 0x4*i1; }
-
-static inline uint32_t REG_MDP4_OVLP_CSC_PRE_BV(uint32_t i0, uint32_t i1) { return 0x00002500 + __offset_OVLP(i0) + 0x4*i1; }
-
-static inline uint32_t REG_MDP4_OVLP_CSC_PRE_BV_VAL(uint32_t i0, uint32_t i1) { return 0x00002500 + __offset_OVLP(i0) + 0x4*i1; }
-
-static inline uint32_t REG_MDP4_OVLP_CSC_POST_BV(uint32_t i0, uint32_t i1) { return 0x00002580 + __offset_OVLP(i0) + 0x4*i1; }
-
-static inline uint32_t REG_MDP4_OVLP_CSC_POST_BV_VAL(uint32_t i0, uint32_t i1) { return 0x00002580 + __offset_OVLP(i0) + 0x4*i1; }
-
-static inline uint32_t REG_MDP4_OVLP_CSC_PRE_LV(uint32_t i0, uint32_t i1) { return 0x00002600 + __offset_OVLP(i0) + 0x4*i1; }
-
-static inline uint32_t REG_MDP4_OVLP_CSC_PRE_LV_VAL(uint32_t i0, uint32_t i1) { return 0x00002600 + __offset_OVLP(i0) + 0x4*i1; }
-
-static inline uint32_t REG_MDP4_OVLP_CSC_POST_LV(uint32_t i0, uint32_t i1) { return 0x00002680 + __offset_OVLP(i0) + 0x4*i1; }
-
-static inline uint32_t REG_MDP4_OVLP_CSC_POST_LV_VAL(uint32_t i0, uint32_t i1) { return 0x00002680 + __offset_OVLP(i0) + 0x4*i1; }
-
-#define REG_MDP4_DMA_P_OP_MODE                                 0x00090070
-
-static inline uint32_t REG_MDP4_LUTN(uint32_t i0) { return 0x00094800 + 0x400*i0; }
-
-static inline uint32_t REG_MDP4_LUTN_LUT(uint32_t i0, uint32_t i1) { return 0x00094800 + 0x400*i0 + 0x4*i1; }
-
-static inline uint32_t REG_MDP4_LUTN_LUT_VAL(uint32_t i0, uint32_t i1) { return 0x00094800 + 0x400*i0 + 0x4*i1; }
-
-#define REG_MDP4_DMA_S_OP_MODE                                 0x000a0028
-
-static inline uint32_t REG_MDP4_DMA_E_QUANT(uint32_t i0) { return 0x000b0070 + 0x4*i0; }
-
-static inline uint32_t __offset_DMA(enum mdp4_dma idx)
-{
-       switch (idx) {
-               case DMA_P: return 0x00090000;
-               case DMA_S: return 0x000a0000;
-               case DMA_E: return 0x000b0000;
-               default: return INVALID_IDX(idx);
-       }
-}
-static inline uint32_t REG_MDP4_DMA(enum mdp4_dma i0) { return 0x00000000 + __offset_DMA(i0); }
-
-static inline uint32_t REG_MDP4_DMA_CONFIG(enum mdp4_dma i0) { return 0x00000000 + __offset_DMA(i0); }
-#define MDP4_DMA_CONFIG_G_BPC__MASK                            0x00000003
-#define MDP4_DMA_CONFIG_G_BPC__SHIFT                           0
-static inline uint32_t MDP4_DMA_CONFIG_G_BPC(enum mdp4_bpc val)
-{
-       return ((val) << MDP4_DMA_CONFIG_G_BPC__SHIFT) & MDP4_DMA_CONFIG_G_BPC__MASK;
-}
-#define MDP4_DMA_CONFIG_B_BPC__MASK                            0x0000000c
-#define MDP4_DMA_CONFIG_B_BPC__SHIFT                           2
-static inline uint32_t MDP4_DMA_CONFIG_B_BPC(enum mdp4_bpc val)
-{
-       return ((val) << MDP4_DMA_CONFIG_B_BPC__SHIFT) & MDP4_DMA_CONFIG_B_BPC__MASK;
-}
-#define MDP4_DMA_CONFIG_R_BPC__MASK                            0x00000030
-#define MDP4_DMA_CONFIG_R_BPC__SHIFT                           4
-static inline uint32_t MDP4_DMA_CONFIG_R_BPC(enum mdp4_bpc val)
-{
-       return ((val) << MDP4_DMA_CONFIG_R_BPC__SHIFT) & MDP4_DMA_CONFIG_R_BPC__MASK;
-}
-#define MDP4_DMA_CONFIG_PACK_ALIGN_MSB                         0x00000080
-#define MDP4_DMA_CONFIG_PACK__MASK                             0x0000ff00
-#define MDP4_DMA_CONFIG_PACK__SHIFT                            8
-static inline uint32_t MDP4_DMA_CONFIG_PACK(uint32_t val)
-{
-       return ((val) << MDP4_DMA_CONFIG_PACK__SHIFT) & MDP4_DMA_CONFIG_PACK__MASK;
-}
-#define MDP4_DMA_CONFIG_DEFLKR_EN                              0x01000000
-#define MDP4_DMA_CONFIG_DITHER_EN                              0x01000000
-
-static inline uint32_t REG_MDP4_DMA_SRC_SIZE(enum mdp4_dma i0) { return 0x00000004 + __offset_DMA(i0); }
-#define MDP4_DMA_SRC_SIZE_HEIGHT__MASK                         0xffff0000
-#define MDP4_DMA_SRC_SIZE_HEIGHT__SHIFT                                16
-static inline uint32_t MDP4_DMA_SRC_SIZE_HEIGHT(uint32_t val)
-{
-       return ((val) << MDP4_DMA_SRC_SIZE_HEIGHT__SHIFT) & MDP4_DMA_SRC_SIZE_HEIGHT__MASK;
-}
-#define MDP4_DMA_SRC_SIZE_WIDTH__MASK                          0x0000ffff
-#define MDP4_DMA_SRC_SIZE_WIDTH__SHIFT                         0
-static inline uint32_t MDP4_DMA_SRC_SIZE_WIDTH(uint32_t val)
-{
-       return ((val) << MDP4_DMA_SRC_SIZE_WIDTH__SHIFT) & MDP4_DMA_SRC_SIZE_WIDTH__MASK;
-}
-
-static inline uint32_t REG_MDP4_DMA_SRC_BASE(enum mdp4_dma i0) { return 0x00000008 + __offset_DMA(i0); }
-
-static inline uint32_t REG_MDP4_DMA_SRC_STRIDE(enum mdp4_dma i0) { return 0x0000000c + __offset_DMA(i0); }
-
-static inline uint32_t REG_MDP4_DMA_DST_SIZE(enum mdp4_dma i0) { return 0x00000010 + __offset_DMA(i0); }
-#define MDP4_DMA_DST_SIZE_HEIGHT__MASK                         0xffff0000
-#define MDP4_DMA_DST_SIZE_HEIGHT__SHIFT                                16
-static inline uint32_t MDP4_DMA_DST_SIZE_HEIGHT(uint32_t val)
-{
-       return ((val) << MDP4_DMA_DST_SIZE_HEIGHT__SHIFT) & MDP4_DMA_DST_SIZE_HEIGHT__MASK;
-}
-#define MDP4_DMA_DST_SIZE_WIDTH__MASK                          0x0000ffff
-#define MDP4_DMA_DST_SIZE_WIDTH__SHIFT                         0
-static inline uint32_t MDP4_DMA_DST_SIZE_WIDTH(uint32_t val)
-{
-       return ((val) << MDP4_DMA_DST_SIZE_WIDTH__SHIFT) & MDP4_DMA_DST_SIZE_WIDTH__MASK;
-}
-
-static inline uint32_t REG_MDP4_DMA_CURSOR_SIZE(enum mdp4_dma i0) { return 0x00000044 + __offset_DMA(i0); }
-#define MDP4_DMA_CURSOR_SIZE_WIDTH__MASK                       0x0000007f
-#define MDP4_DMA_CURSOR_SIZE_WIDTH__SHIFT                      0
-static inline uint32_t MDP4_DMA_CURSOR_SIZE_WIDTH(uint32_t val)
-{
-       return ((val) << MDP4_DMA_CURSOR_SIZE_WIDTH__SHIFT) & MDP4_DMA_CURSOR_SIZE_WIDTH__MASK;
-}
-#define MDP4_DMA_CURSOR_SIZE_HEIGHT__MASK                      0x007f0000
-#define MDP4_DMA_CURSOR_SIZE_HEIGHT__SHIFT                     16
-static inline uint32_t MDP4_DMA_CURSOR_SIZE_HEIGHT(uint32_t val)
-{
-       return ((val) << MDP4_DMA_CURSOR_SIZE_HEIGHT__SHIFT) & MDP4_DMA_CURSOR_SIZE_HEIGHT__MASK;
-}
-
-static inline uint32_t REG_MDP4_DMA_CURSOR_BASE(enum mdp4_dma i0) { return 0x00000048 + __offset_DMA(i0); }
-
-static inline uint32_t REG_MDP4_DMA_CURSOR_POS(enum mdp4_dma i0) { return 0x0000004c + __offset_DMA(i0); }
-#define MDP4_DMA_CURSOR_POS_X__MASK                            0x0000ffff
-#define MDP4_DMA_CURSOR_POS_X__SHIFT                           0
-static inline uint32_t MDP4_DMA_CURSOR_POS_X(uint32_t val)
-{
-       return ((val) << MDP4_DMA_CURSOR_POS_X__SHIFT) & MDP4_DMA_CURSOR_POS_X__MASK;
-}
-#define MDP4_DMA_CURSOR_POS_Y__MASK                            0xffff0000
-#define MDP4_DMA_CURSOR_POS_Y__SHIFT                           16
-static inline uint32_t MDP4_DMA_CURSOR_POS_Y(uint32_t val)
-{
-       return ((val) << MDP4_DMA_CURSOR_POS_Y__SHIFT) & MDP4_DMA_CURSOR_POS_Y__MASK;
-}
-
-static inline uint32_t REG_MDP4_DMA_CURSOR_BLEND_CONFIG(enum mdp4_dma i0) { return 0x00000060 + __offset_DMA(i0); }
-#define MDP4_DMA_CURSOR_BLEND_CONFIG_CURSOR_EN                 0x00000001
-#define MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT__MASK              0x00000006
-#define MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT__SHIFT             1
-static inline uint32_t MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT(enum mdp4_cursor_format val)
-{
-       return ((val) << MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT__SHIFT) & MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT__MASK;
-}
-#define MDP4_DMA_CURSOR_BLEND_CONFIG_TRANSP_EN                 0x00000008
-
-static inline uint32_t REG_MDP4_DMA_CURSOR_BLEND_PARAM(enum mdp4_dma i0) { return 0x00000064 + __offset_DMA(i0); }
-
-static inline uint32_t REG_MDP4_DMA_BLEND_TRANS_LOW(enum mdp4_dma i0) { return 0x00000068 + __offset_DMA(i0); }
-
-static inline uint32_t REG_MDP4_DMA_BLEND_TRANS_HIGH(enum mdp4_dma i0) { return 0x0000006c + __offset_DMA(i0); }
-
-static inline uint32_t REG_MDP4_DMA_FETCH_CONFIG(enum mdp4_dma i0) { return 0x00001004 + __offset_DMA(i0); }
-
-static inline uint32_t REG_MDP4_DMA_CSC(enum mdp4_dma i0) { return 0x00003000 + __offset_DMA(i0); }
-
-
-static inline uint32_t REG_MDP4_DMA_CSC_MV(enum mdp4_dma i0, uint32_t i1) { return 0x00003400 + __offset_DMA(i0) + 0x4*i1; }
-
-static inline uint32_t REG_MDP4_DMA_CSC_MV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003400 + __offset_DMA(i0) + 0x4*i1; }
-
-static inline uint32_t REG_MDP4_DMA_CSC_PRE_BV(enum mdp4_dma i0, uint32_t i1) { return 0x00003500 + __offset_DMA(i0) + 0x4*i1; }
-
-static inline uint32_t REG_MDP4_DMA_CSC_PRE_BV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003500 + __offset_DMA(i0) + 0x4*i1; }
-
-static inline uint32_t REG_MDP4_DMA_CSC_POST_BV(enum mdp4_dma i0, uint32_t i1) { return 0x00003580 + __offset_DMA(i0) + 0x4*i1; }
-
-static inline uint32_t REG_MDP4_DMA_CSC_POST_BV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003580 + __offset_DMA(i0) + 0x4*i1; }
-
-static inline uint32_t REG_MDP4_DMA_CSC_PRE_LV(enum mdp4_dma i0, uint32_t i1) { return 0x00003600 + __offset_DMA(i0) + 0x4*i1; }
-
-static inline uint32_t REG_MDP4_DMA_CSC_PRE_LV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003600 + __offset_DMA(i0) + 0x4*i1; }
-
-static inline uint32_t REG_MDP4_DMA_CSC_POST_LV(enum mdp4_dma i0, uint32_t i1) { return 0x00003680 + __offset_DMA(i0) + 0x4*i1; }
-
-static inline uint32_t REG_MDP4_DMA_CSC_POST_LV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003680 + __offset_DMA(i0) + 0x4*i1; }
-
-static inline uint32_t REG_MDP4_PIPE(enum mdp4_pipe i0) { return 0x00020000 + 0x10000*i0; }
-
-static inline uint32_t REG_MDP4_PIPE_SRC_SIZE(enum mdp4_pipe i0) { return 0x00020000 + 0x10000*i0; }
-#define MDP4_PIPE_SRC_SIZE_HEIGHT__MASK                                0xffff0000
-#define MDP4_PIPE_SRC_SIZE_HEIGHT__SHIFT                       16
-static inline uint32_t MDP4_PIPE_SRC_SIZE_HEIGHT(uint32_t val)
-{
-       return ((val) << MDP4_PIPE_SRC_SIZE_HEIGHT__SHIFT) & MDP4_PIPE_SRC_SIZE_HEIGHT__MASK;
-}
-#define MDP4_PIPE_SRC_SIZE_WIDTH__MASK                         0x0000ffff
-#define MDP4_PIPE_SRC_SIZE_WIDTH__SHIFT                                0
-static inline uint32_t MDP4_PIPE_SRC_SIZE_WIDTH(uint32_t val)
-{
-       return ((val) << MDP4_PIPE_SRC_SIZE_WIDTH__SHIFT) & MDP4_PIPE_SRC_SIZE_WIDTH__MASK;
-}
-
-static inline uint32_t REG_MDP4_PIPE_SRC_XY(enum mdp4_pipe i0) { return 0x00020004 + 0x10000*i0; }
-#define MDP4_PIPE_SRC_XY_Y__MASK                               0xffff0000
-#define MDP4_PIPE_SRC_XY_Y__SHIFT                              16
-static inline uint32_t MDP4_PIPE_SRC_XY_Y(uint32_t val)
-{
-       return ((val) << MDP4_PIPE_SRC_XY_Y__SHIFT) & MDP4_PIPE_SRC_XY_Y__MASK;
-}
-#define MDP4_PIPE_SRC_XY_X__MASK                               0x0000ffff
-#define MDP4_PIPE_SRC_XY_X__SHIFT                              0
-static inline uint32_t MDP4_PIPE_SRC_XY_X(uint32_t val)
-{
-       return ((val) << MDP4_PIPE_SRC_XY_X__SHIFT) & MDP4_PIPE_SRC_XY_X__MASK;
-}
-
-static inline uint32_t REG_MDP4_PIPE_DST_SIZE(enum mdp4_pipe i0) { return 0x00020008 + 0x10000*i0; }
-#define MDP4_PIPE_DST_SIZE_HEIGHT__MASK                                0xffff0000
-#define MDP4_PIPE_DST_SIZE_HEIGHT__SHIFT                       16
-static inline uint32_t MDP4_PIPE_DST_SIZE_HEIGHT(uint32_t val)
-{
-       return ((val) << MDP4_PIPE_DST_SIZE_HEIGHT__SHIFT) & MDP4_PIPE_DST_SIZE_HEIGHT__MASK;
-}
-#define MDP4_PIPE_DST_SIZE_WIDTH__MASK                         0x0000ffff
-#define MDP4_PIPE_DST_SIZE_WIDTH__SHIFT                                0
-static inline uint32_t MDP4_PIPE_DST_SIZE_WIDTH(uint32_t val)
-{
-       return ((val) << MDP4_PIPE_DST_SIZE_WIDTH__SHIFT) & MDP4_PIPE_DST_SIZE_WIDTH__MASK;
-}
-
-static inline uint32_t REG_MDP4_PIPE_DST_XY(enum mdp4_pipe i0) { return 0x0002000c + 0x10000*i0; }
-#define MDP4_PIPE_DST_XY_Y__MASK                               0xffff0000
-#define MDP4_PIPE_DST_XY_Y__SHIFT                              16
-static inline uint32_t MDP4_PIPE_DST_XY_Y(uint32_t val)
-{
-       return ((val) << MDP4_PIPE_DST_XY_Y__SHIFT) & MDP4_PIPE_DST_XY_Y__MASK;
-}
-#define MDP4_PIPE_DST_XY_X__MASK                               0x0000ffff
-#define MDP4_PIPE_DST_XY_X__SHIFT                              0
-static inline uint32_t MDP4_PIPE_DST_XY_X(uint32_t val)
-{
-       return ((val) << MDP4_PIPE_DST_XY_X__SHIFT) & MDP4_PIPE_DST_XY_X__MASK;
-}
-
-static inline uint32_t REG_MDP4_PIPE_SRCP0_BASE(enum mdp4_pipe i0) { return 0x00020010 + 0x10000*i0; }
-
-static inline uint32_t REG_MDP4_PIPE_SRCP1_BASE(enum mdp4_pipe i0) { return 0x00020014 + 0x10000*i0; }
-
-static inline uint32_t REG_MDP4_PIPE_SRCP2_BASE(enum mdp4_pipe i0) { return 0x00020018 + 0x10000*i0; }
-
-static inline uint32_t REG_MDP4_PIPE_SRC_STRIDE_A(enum mdp4_pipe i0) { return 0x00020040 + 0x10000*i0; }
-#define MDP4_PIPE_SRC_STRIDE_A_P0__MASK                                0x0000ffff
-#define MDP4_PIPE_SRC_STRIDE_A_P0__SHIFT                       0
-static inline uint32_t MDP4_PIPE_SRC_STRIDE_A_P0(uint32_t val)
-{
-       return ((val) << MDP4_PIPE_SRC_STRIDE_A_P0__SHIFT) & MDP4_PIPE_SRC_STRIDE_A_P0__MASK;
-}
-#define MDP4_PIPE_SRC_STRIDE_A_P1__MASK                                0xffff0000
-#define MDP4_PIPE_SRC_STRIDE_A_P1__SHIFT                       16
-static inline uint32_t MDP4_PIPE_SRC_STRIDE_A_P1(uint32_t val)
-{
-       return ((val) << MDP4_PIPE_SRC_STRIDE_A_P1__SHIFT) & MDP4_PIPE_SRC_STRIDE_A_P1__MASK;
-}
-
-static inline uint32_t REG_MDP4_PIPE_SRC_STRIDE_B(enum mdp4_pipe i0) { return 0x00020044 + 0x10000*i0; }
-#define MDP4_PIPE_SRC_STRIDE_B_P2__MASK                                0x0000ffff
-#define MDP4_PIPE_SRC_STRIDE_B_P2__SHIFT                       0
-static inline uint32_t MDP4_PIPE_SRC_STRIDE_B_P2(uint32_t val)
-{
-       return ((val) << MDP4_PIPE_SRC_STRIDE_B_P2__SHIFT) & MDP4_PIPE_SRC_STRIDE_B_P2__MASK;
-}
-#define MDP4_PIPE_SRC_STRIDE_B_P3__MASK                                0xffff0000
-#define MDP4_PIPE_SRC_STRIDE_B_P3__SHIFT                       16
-static inline uint32_t MDP4_PIPE_SRC_STRIDE_B_P3(uint32_t val)
-{
-       return ((val) << MDP4_PIPE_SRC_STRIDE_B_P3__SHIFT) & MDP4_PIPE_SRC_STRIDE_B_P3__MASK;
-}
-
-static inline uint32_t REG_MDP4_PIPE_FRAME_SIZE(enum mdp4_pipe i0) { return 0x00020048 + 0x10000*i0; }
-#define MDP4_PIPE_FRAME_SIZE_HEIGHT__MASK                      0xffff0000
-#define MDP4_PIPE_FRAME_SIZE_HEIGHT__SHIFT                     16
-static inline uint32_t MDP4_PIPE_FRAME_SIZE_HEIGHT(uint32_t val)
-{
-       return ((val) << MDP4_PIPE_FRAME_SIZE_HEIGHT__SHIFT) & MDP4_PIPE_FRAME_SIZE_HEIGHT__MASK;
-}
-#define MDP4_PIPE_FRAME_SIZE_WIDTH__MASK                       0x0000ffff
-#define MDP4_PIPE_FRAME_SIZE_WIDTH__SHIFT                      0
-static inline uint32_t MDP4_PIPE_FRAME_SIZE_WIDTH(uint32_t val)
-{
-       return ((val) << MDP4_PIPE_FRAME_SIZE_WIDTH__SHIFT) & MDP4_PIPE_FRAME_SIZE_WIDTH__MASK;
-}
-
-static inline uint32_t REG_MDP4_PIPE_SRC_FORMAT(enum mdp4_pipe i0) { return 0x00020050 + 0x10000*i0; }
-#define MDP4_PIPE_SRC_FORMAT_G_BPC__MASK                       0x00000003
-#define MDP4_PIPE_SRC_FORMAT_G_BPC__SHIFT                      0
-static inline uint32_t MDP4_PIPE_SRC_FORMAT_G_BPC(enum mdp4_bpc val)
-{
-       return ((val) << MDP4_PIPE_SRC_FORMAT_G_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_G_BPC__MASK;
-}
-#define MDP4_PIPE_SRC_FORMAT_B_BPC__MASK                       0x0000000c
-#define MDP4_PIPE_SRC_FORMAT_B_BPC__SHIFT                      2
-static inline uint32_t MDP4_PIPE_SRC_FORMAT_B_BPC(enum mdp4_bpc val)
-{
-       return ((val) << MDP4_PIPE_SRC_FORMAT_B_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_B_BPC__MASK;
-}
-#define MDP4_PIPE_SRC_FORMAT_R_BPC__MASK                       0x00000030
-#define MDP4_PIPE_SRC_FORMAT_R_BPC__SHIFT                      4
-static inline uint32_t MDP4_PIPE_SRC_FORMAT_R_BPC(enum mdp4_bpc val)
-{
-       return ((val) << MDP4_PIPE_SRC_FORMAT_R_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_R_BPC__MASK;
-}
-#define MDP4_PIPE_SRC_FORMAT_A_BPC__MASK                       0x000000c0
-#define MDP4_PIPE_SRC_FORMAT_A_BPC__SHIFT                      6
-static inline uint32_t MDP4_PIPE_SRC_FORMAT_A_BPC(enum mdp4_bpc_alpha val)
-{
-       return ((val) << MDP4_PIPE_SRC_FORMAT_A_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_A_BPC__MASK;
-}
-#define MDP4_PIPE_SRC_FORMAT_ALPHA_ENABLE                      0x00000100
-#define MDP4_PIPE_SRC_FORMAT_CPP__MASK                         0x00000600
-#define MDP4_PIPE_SRC_FORMAT_CPP__SHIFT                                9
-static inline uint32_t MDP4_PIPE_SRC_FORMAT_CPP(uint32_t val)
-{
-       return ((val) << MDP4_PIPE_SRC_FORMAT_CPP__SHIFT) & MDP4_PIPE_SRC_FORMAT_CPP__MASK;
-}
-#define MDP4_PIPE_SRC_FORMAT_ROTATED_90                                0x00001000
-#define MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT__MASK                        0x00006000
-#define MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT__SHIFT               13
-static inline uint32_t MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT(uint32_t val)
-{
-       return ((val) << MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT__SHIFT) & MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT__MASK;
-}
-#define MDP4_PIPE_SRC_FORMAT_UNPACK_TIGHT                      0x00020000
-#define MDP4_PIPE_SRC_FORMAT_UNPACK_ALIGN_MSB                  0x00040000
-#define MDP4_PIPE_SRC_FORMAT_SOLID_FILL                                0x00400000
-
-static inline uint32_t REG_MDP4_PIPE_SRC_UNPACK(enum mdp4_pipe i0) { return 0x00020054 + 0x10000*i0; }
-#define MDP4_PIPE_SRC_UNPACK_ELEM0__MASK                       0x000000ff
-#define MDP4_PIPE_SRC_UNPACK_ELEM0__SHIFT                      0
-static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM0(uint32_t val)
-{
-       return ((val) << MDP4_PIPE_SRC_UNPACK_ELEM0__SHIFT) & MDP4_PIPE_SRC_UNPACK_ELEM0__MASK;
-}
-#define MDP4_PIPE_SRC_UNPACK_ELEM1__MASK                       0x0000ff00
-#define MDP4_PIPE_SRC_UNPACK_ELEM1__SHIFT                      8
-static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM1(uint32_t val)
-{
-       return ((val) << MDP4_PIPE_SRC_UNPACK_ELEM1__SHIFT) & MDP4_PIPE_SRC_UNPACK_ELEM1__MASK;
-}
-#define MDP4_PIPE_SRC_UNPACK_ELEM2__MASK                       0x00ff0000
-#define MDP4_PIPE_SRC_UNPACK_ELEM2__SHIFT                      16
-static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM2(uint32_t val)
-{
-       return ((val) << MDP4_PIPE_SRC_UNPACK_ELEM2__SHIFT) & MDP4_PIPE_SRC_UNPACK_ELEM2__MASK;
-}
-#define MDP4_PIPE_SRC_UNPACK_ELEM3__MASK                       0xff000000
-#define MDP4_PIPE_SRC_UNPACK_ELEM3__SHIFT                      24
-static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM3(uint32_t val)
-{
-       return ((val) << MDP4_PIPE_SRC_UNPACK_ELEM3__SHIFT) & MDP4_PIPE_SRC_UNPACK_ELEM3__MASK;
-}
-
-static inline uint32_t REG_MDP4_PIPE_OP_MODE(enum mdp4_pipe i0) { return 0x00020058 + 0x10000*i0; }
-#define MDP4_PIPE_OP_MODE_SCALEX_EN                            0x00000001
-#define MDP4_PIPE_OP_MODE_SCALEY_EN                            0x00000002
-#define MDP4_PIPE_OP_MODE_SRC_YCBCR                            0x00000200
-#define MDP4_PIPE_OP_MODE_DST_YCBCR                            0x00000400
-#define MDP4_PIPE_OP_MODE_CSC_EN                               0x00000800
-#define MDP4_PIPE_OP_MODE_FLIP_LR                              0x00002000
-#define MDP4_PIPE_OP_MODE_FLIP_UD                              0x00004000
-#define MDP4_PIPE_OP_MODE_DITHER_EN                            0x00008000
-#define MDP4_PIPE_OP_MODE_IGC_LUT_EN                           0x00010000
-#define MDP4_PIPE_OP_MODE_DEINT_EN                             0x00040000
-#define MDP4_PIPE_OP_MODE_DEINT_ODD_REF                                0x00080000
-
-static inline uint32_t REG_MDP4_PIPE_PHASEX_STEP(enum mdp4_pipe i0) { return 0x0002005c + 0x10000*i0; }
-
-static inline uint32_t REG_MDP4_PIPE_PHASEY_STEP(enum mdp4_pipe i0) { return 0x00020060 + 0x10000*i0; }
-
-static inline uint32_t REG_MDP4_PIPE_FETCH_CONFIG(enum mdp4_pipe i0) { return 0x00021004 + 0x10000*i0; }
-
-static inline uint32_t REG_MDP4_PIPE_SOLID_COLOR(enum mdp4_pipe i0) { return 0x00021008 + 0x10000*i0; }
-
-static inline uint32_t REG_MDP4_PIPE_CSC(enum mdp4_pipe i0) { return 0x00024000 + 0x10000*i0; }
-
-
-static inline uint32_t REG_MDP4_PIPE_CSC_MV(enum mdp4_pipe i0, uint32_t i1) { return 0x00024400 + 0x10000*i0 + 0x4*i1; }
-
-static inline uint32_t REG_MDP4_PIPE_CSC_MV_VAL(enum mdp4_pipe i0, uint32_t i1) { return 0x00024400 + 0x10000*i0 + 0x4*i1; }
-
-static inline uint32_t REG_MDP4_PIPE_CSC_PRE_BV(enum mdp4_pipe i0, uint32_t i1) { return 0x00024500 + 0x10000*i0 + 0x4*i1; }
-
-static inline uint32_t REG_MDP4_PIPE_CSC_PRE_BV_VAL(enum mdp4_pipe i0, uint32_t i1) { return 0x00024500 + 0x10000*i0 + 0x4*i1; }
-
-static inline uint32_t REG_MDP4_PIPE_CSC_POST_BV(enum mdp4_pipe i0, uint32_t i1) { return 0x00024580 + 0x10000*i0 + 0x4*i1; }
-
-static inline uint32_t REG_MDP4_PIPE_CSC_POST_BV_VAL(enum mdp4_pipe i0, uint32_t i1) { return 0x00024580 + 0x10000*i0 + 0x4*i1; }
-
-static inline uint32_t REG_MDP4_PIPE_CSC_PRE_LV(enum mdp4_pipe i0, uint32_t i1) { return 0x00024600 + 0x10000*i0 + 0x4*i1; }
-
-static inline uint32_t REG_MDP4_PIPE_CSC_PRE_LV_VAL(enum mdp4_pipe i0, uint32_t i1) { return 0x00024600 + 0x10000*i0 + 0x4*i1; }
-
-static inline uint32_t REG_MDP4_PIPE_CSC_POST_LV(enum mdp4_pipe i0, uint32_t i1) { return 0x00024680 + 0x10000*i0 + 0x4*i1; }
-
-static inline uint32_t REG_MDP4_PIPE_CSC_POST_LV_VAL(enum mdp4_pipe i0, uint32_t i1) { return 0x00024680 + 0x10000*i0 + 0x4*i1; }
-
-#define REG_MDP4_LCDC                                          0x000c0000
-
-#define REG_MDP4_LCDC_ENABLE                                   0x000c0000
-
-#define REG_MDP4_LCDC_HSYNC_CTRL                               0x000c0004
-#define MDP4_LCDC_HSYNC_CTRL_PULSEW__MASK                      0x0000ffff
-#define MDP4_LCDC_HSYNC_CTRL_PULSEW__SHIFT                     0
-static inline uint32_t MDP4_LCDC_HSYNC_CTRL_PULSEW(uint32_t val)
-{
-       return ((val) << MDP4_LCDC_HSYNC_CTRL_PULSEW__SHIFT) & MDP4_LCDC_HSYNC_CTRL_PULSEW__MASK;
-}
-#define MDP4_LCDC_HSYNC_CTRL_PERIOD__MASK                      0xffff0000
-#define MDP4_LCDC_HSYNC_CTRL_PERIOD__SHIFT                     16
-static inline uint32_t MDP4_LCDC_HSYNC_CTRL_PERIOD(uint32_t val)
-{
-       return ((val) << MDP4_LCDC_HSYNC_CTRL_PERIOD__SHIFT) & MDP4_LCDC_HSYNC_CTRL_PERIOD__MASK;
-}
-
-#define REG_MDP4_LCDC_VSYNC_PERIOD                             0x000c0008
-
-#define REG_MDP4_LCDC_VSYNC_LEN                                        0x000c000c
-
-#define REG_MDP4_LCDC_DISPLAY_HCTRL                            0x000c0010
-#define MDP4_LCDC_DISPLAY_HCTRL_START__MASK                    0x0000ffff
-#define MDP4_LCDC_DISPLAY_HCTRL_START__SHIFT                   0
-static inline uint32_t MDP4_LCDC_DISPLAY_HCTRL_START(uint32_t val)
-{
-       return ((val) << MDP4_LCDC_DISPLAY_HCTRL_START__SHIFT) & MDP4_LCDC_DISPLAY_HCTRL_START__MASK;
-}
-#define MDP4_LCDC_DISPLAY_HCTRL_END__MASK                      0xffff0000
-#define MDP4_LCDC_DISPLAY_HCTRL_END__SHIFT                     16
-static inline uint32_t MDP4_LCDC_DISPLAY_HCTRL_END(uint32_t val)
-{
-       return ((val) << MDP4_LCDC_DISPLAY_HCTRL_END__SHIFT) & MDP4_LCDC_DISPLAY_HCTRL_END__MASK;
-}
-
-#define REG_MDP4_LCDC_DISPLAY_VSTART                           0x000c0014
-
-#define REG_MDP4_LCDC_DISPLAY_VEND                             0x000c0018
-
-#define REG_MDP4_LCDC_ACTIVE_HCTL                              0x000c001c
-#define MDP4_LCDC_ACTIVE_HCTL_START__MASK                      0x00007fff
-#define MDP4_LCDC_ACTIVE_HCTL_START__SHIFT                     0
-static inline uint32_t MDP4_LCDC_ACTIVE_HCTL_START(uint32_t val)
-{
-       return ((val) << MDP4_LCDC_ACTIVE_HCTL_START__SHIFT) & MDP4_LCDC_ACTIVE_HCTL_START__MASK;
-}
-#define MDP4_LCDC_ACTIVE_HCTL_END__MASK                                0x7fff0000
-#define MDP4_LCDC_ACTIVE_HCTL_END__SHIFT                       16
-static inline uint32_t MDP4_LCDC_ACTIVE_HCTL_END(uint32_t val)
-{
-       return ((val) << MDP4_LCDC_ACTIVE_HCTL_END__SHIFT) & MDP4_LCDC_ACTIVE_HCTL_END__MASK;
-}
-#define MDP4_LCDC_ACTIVE_HCTL_ACTIVE_START_X                   0x80000000
-
-#define REG_MDP4_LCDC_ACTIVE_VSTART                            0x000c0020
-
-#define REG_MDP4_LCDC_ACTIVE_VEND                              0x000c0024
-
-#define REG_MDP4_LCDC_BORDER_CLR                               0x000c0028
-
-#define REG_MDP4_LCDC_UNDERFLOW_CLR                            0x000c002c
-#define MDP4_LCDC_UNDERFLOW_CLR_COLOR__MASK                    0x00ffffff
-#define MDP4_LCDC_UNDERFLOW_CLR_COLOR__SHIFT                   0
-static inline uint32_t MDP4_LCDC_UNDERFLOW_CLR_COLOR(uint32_t val)
-{
-       return ((val) << MDP4_LCDC_UNDERFLOW_CLR_COLOR__SHIFT) & MDP4_LCDC_UNDERFLOW_CLR_COLOR__MASK;
-}
-#define MDP4_LCDC_UNDERFLOW_CLR_ENABLE_RECOVERY                        0x80000000
-
-#define REG_MDP4_LCDC_HSYNC_SKEW                               0x000c0030
-
-#define REG_MDP4_LCDC_TEST_CNTL                                        0x000c0034
-
-#define REG_MDP4_LCDC_CTRL_POLARITY                            0x000c0038
-#define MDP4_LCDC_CTRL_POLARITY_HSYNC_LOW                      0x00000001
-#define MDP4_LCDC_CTRL_POLARITY_VSYNC_LOW                      0x00000002
-#define MDP4_LCDC_CTRL_POLARITY_DATA_EN_LOW                    0x00000004
-
-#define REG_MDP4_DTV                                           0x000d0000
-
-#define REG_MDP4_DTV_ENABLE                                    0x000d0000
-
-#define REG_MDP4_DTV_HSYNC_CTRL                                        0x000d0004
-#define MDP4_DTV_HSYNC_CTRL_PULSEW__MASK                       0x0000ffff
-#define MDP4_DTV_HSYNC_CTRL_PULSEW__SHIFT                      0
-static inline uint32_t MDP4_DTV_HSYNC_CTRL_PULSEW(uint32_t val)
-{
-       return ((val) << MDP4_DTV_HSYNC_CTRL_PULSEW__SHIFT) & MDP4_DTV_HSYNC_CTRL_PULSEW__MASK;
-}
-#define MDP4_DTV_HSYNC_CTRL_PERIOD__MASK                       0xffff0000
-#define MDP4_DTV_HSYNC_CTRL_PERIOD__SHIFT                      16
-static inline uint32_t MDP4_DTV_HSYNC_CTRL_PERIOD(uint32_t val)
-{
-       return ((val) << MDP4_DTV_HSYNC_CTRL_PERIOD__SHIFT) & MDP4_DTV_HSYNC_CTRL_PERIOD__MASK;
-}
-
-#define REG_MDP4_DTV_VSYNC_PERIOD                              0x000d0008
-
-#define REG_MDP4_DTV_VSYNC_LEN                                 0x000d000c
-
-#define REG_MDP4_DTV_DISPLAY_HCTRL                             0x000d0018
-#define MDP4_DTV_DISPLAY_HCTRL_START__MASK                     0x0000ffff
-#define MDP4_DTV_DISPLAY_HCTRL_START__SHIFT                    0
-static inline uint32_t MDP4_DTV_DISPLAY_HCTRL_START(uint32_t val)
-{
-       return ((val) << MDP4_DTV_DISPLAY_HCTRL_START__SHIFT) & MDP4_DTV_DISPLAY_HCTRL_START__MASK;
-}
-#define MDP4_DTV_DISPLAY_HCTRL_END__MASK                       0xffff0000
-#define MDP4_DTV_DISPLAY_HCTRL_END__SHIFT                      16
-static inline uint32_t MDP4_DTV_DISPLAY_HCTRL_END(uint32_t val)
-{
-       return ((val) << MDP4_DTV_DISPLAY_HCTRL_END__SHIFT) & MDP4_DTV_DISPLAY_HCTRL_END__MASK;
-}
-
-#define REG_MDP4_DTV_DISPLAY_VSTART                            0x000d001c
-
-#define REG_MDP4_DTV_DISPLAY_VEND                              0x000d0020
-
-#define REG_MDP4_DTV_ACTIVE_HCTL                               0x000d002c
-#define MDP4_DTV_ACTIVE_HCTL_START__MASK                       0x00007fff
-#define MDP4_DTV_ACTIVE_HCTL_START__SHIFT                      0
-static inline uint32_t MDP4_DTV_ACTIVE_HCTL_START(uint32_t val)
-{
-       return ((val) << MDP4_DTV_ACTIVE_HCTL_START__SHIFT) & MDP4_DTV_ACTIVE_HCTL_START__MASK;
-}
-#define MDP4_DTV_ACTIVE_HCTL_END__MASK                         0x7fff0000
-#define MDP4_DTV_ACTIVE_HCTL_END__SHIFT                                16
-static inline uint32_t MDP4_DTV_ACTIVE_HCTL_END(uint32_t val)
-{
-       return ((val) << MDP4_DTV_ACTIVE_HCTL_END__SHIFT) & MDP4_DTV_ACTIVE_HCTL_END__MASK;
-}
-#define MDP4_DTV_ACTIVE_HCTL_ACTIVE_START_X                    0x80000000
-
-#define REG_MDP4_DTV_ACTIVE_VSTART                             0x000d0030
-
-#define REG_MDP4_DTV_ACTIVE_VEND                               0x000d0038
-
-#define REG_MDP4_DTV_BORDER_CLR                                        0x000d0040
-
-#define REG_MDP4_DTV_UNDERFLOW_CLR                             0x000d0044
-#define MDP4_DTV_UNDERFLOW_CLR_COLOR__MASK                     0x00ffffff
-#define MDP4_DTV_UNDERFLOW_CLR_COLOR__SHIFT                    0
-static inline uint32_t MDP4_DTV_UNDERFLOW_CLR_COLOR(uint32_t val)
-{
-       return ((val) << MDP4_DTV_UNDERFLOW_CLR_COLOR__SHIFT) & MDP4_DTV_UNDERFLOW_CLR_COLOR__MASK;
-}
-#define MDP4_DTV_UNDERFLOW_CLR_ENABLE_RECOVERY                 0x80000000
-
-#define REG_MDP4_DTV_HSYNC_SKEW                                        0x000d0048
-
-#define REG_MDP4_DTV_TEST_CNTL                                 0x000d004c
-
-#define REG_MDP4_DTV_CTRL_POLARITY                             0x000d0050
-#define MDP4_DTV_CTRL_POLARITY_HSYNC_LOW                       0x00000001
-#define MDP4_DTV_CTRL_POLARITY_VSYNC_LOW                       0x00000002
-#define MDP4_DTV_CTRL_POLARITY_DATA_EN_LOW                     0x00000004
-
-#define REG_MDP4_DSI                                           0x000e0000
-
-#define REG_MDP4_DSI_ENABLE                                    0x000e0000
-
-#define REG_MDP4_DSI_HSYNC_CTRL                                        0x000e0004
-#define MDP4_DSI_HSYNC_CTRL_PULSEW__MASK                       0x0000ffff
-#define MDP4_DSI_HSYNC_CTRL_PULSEW__SHIFT                      0
-static inline uint32_t MDP4_DSI_HSYNC_CTRL_PULSEW(uint32_t val)
-{
-       return ((val) << MDP4_DSI_HSYNC_CTRL_PULSEW__SHIFT) & MDP4_DSI_HSYNC_CTRL_PULSEW__MASK;
-}
-#define MDP4_DSI_HSYNC_CTRL_PERIOD__MASK                       0xffff0000
-#define MDP4_DSI_HSYNC_CTRL_PERIOD__SHIFT                      16
-static inline uint32_t MDP4_DSI_HSYNC_CTRL_PERIOD(uint32_t val)
-{
-       return ((val) << MDP4_DSI_HSYNC_CTRL_PERIOD__SHIFT) & MDP4_DSI_HSYNC_CTRL_PERIOD__MASK;
-}
-
-#define REG_MDP4_DSI_VSYNC_PERIOD                              0x000e0008
-
-#define REG_MDP4_DSI_VSYNC_LEN                                 0x000e000c
-
-#define REG_MDP4_DSI_DISPLAY_HCTRL                             0x000e0010
-#define MDP4_DSI_DISPLAY_HCTRL_START__MASK                     0x0000ffff
-#define MDP4_DSI_DISPLAY_HCTRL_START__SHIFT                    0
-static inline uint32_t MDP4_DSI_DISPLAY_HCTRL_START(uint32_t val)
-{
-       return ((val) << MDP4_DSI_DISPLAY_HCTRL_START__SHIFT) & MDP4_DSI_DISPLAY_HCTRL_START__MASK;
-}
-#define MDP4_DSI_DISPLAY_HCTRL_END__MASK                       0xffff0000
-#define MDP4_DSI_DISPLAY_HCTRL_END__SHIFT                      16
-static inline uint32_t MDP4_DSI_DISPLAY_HCTRL_END(uint32_t val)
-{
-       return ((val) << MDP4_DSI_DISPLAY_HCTRL_END__SHIFT) & MDP4_DSI_DISPLAY_HCTRL_END__MASK;
-}
-
-#define REG_MDP4_DSI_DISPLAY_VSTART                            0x000e0014
-
-#define REG_MDP4_DSI_DISPLAY_VEND                              0x000e0018
-
-#define REG_MDP4_DSI_ACTIVE_HCTL                               0x000e001c
-#define MDP4_DSI_ACTIVE_HCTL_START__MASK                       0x00007fff
-#define MDP4_DSI_ACTIVE_HCTL_START__SHIFT                      0
-static inline uint32_t MDP4_DSI_ACTIVE_HCTL_START(uint32_t val)
-{
-       return ((val) << MDP4_DSI_ACTIVE_HCTL_START__SHIFT) & MDP4_DSI_ACTIVE_HCTL_START__MASK;
-}
-#define MDP4_DSI_ACTIVE_HCTL_END__MASK                         0x7fff0000
-#define MDP4_DSI_ACTIVE_HCTL_END__SHIFT                                16
-static inline uint32_t MDP4_DSI_ACTIVE_HCTL_END(uint32_t val)
-{
-       return ((val) << MDP4_DSI_ACTIVE_HCTL_END__SHIFT) & MDP4_DSI_ACTIVE_HCTL_END__MASK;
-}
-#define MDP4_DSI_ACTIVE_HCTL_ACTIVE_START_X                    0x80000000
-
-#define REG_MDP4_DSI_ACTIVE_VSTART                             0x000e0020
-
-#define REG_MDP4_DSI_ACTIVE_VEND                               0x000e0024
-
-#define REG_MDP4_DSI_BORDER_CLR                                        0x000e0028
-
-#define REG_MDP4_DSI_UNDERFLOW_CLR                             0x000e002c
-#define MDP4_DSI_UNDERFLOW_CLR_COLOR__MASK                     0x00ffffff
-#define MDP4_DSI_UNDERFLOW_CLR_COLOR__SHIFT                    0
-static inline uint32_t MDP4_DSI_UNDERFLOW_CLR_COLOR(uint32_t val)
-{
-       return ((val) << MDP4_DSI_UNDERFLOW_CLR_COLOR__SHIFT) & MDP4_DSI_UNDERFLOW_CLR_COLOR__MASK;
-}
-#define MDP4_DSI_UNDERFLOW_CLR_ENABLE_RECOVERY                 0x80000000
-
-#define REG_MDP4_DSI_HSYNC_SKEW                                        0x000e0030
-
-#define REG_MDP4_DSI_TEST_CNTL                                 0x000e0034
-
-#define REG_MDP4_DSI_CTRL_POLARITY                             0x000e0038
-#define MDP4_DSI_CTRL_POLARITY_HSYNC_LOW                       0x00000001
-#define MDP4_DSI_CTRL_POLARITY_VSYNC_LOW                       0x00000002
-#define MDP4_DSI_CTRL_POLARITY_DATA_EN_LOW                     0x00000004
-
-
-#endif /* MDP4_XML */
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp4/mdp4_crtc.c
deleted file mode 100644 (file)
index 019d530..0000000
+++ /dev/null
@@ -1,753 +0,0 @@
-/*
- * Copyright (C) 2013 Red Hat
- * Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include "mdp4_kms.h"
-
-#include <drm/drm_mode.h>
-#include "drm_crtc.h"
-#include "drm_crtc_helper.h"
-#include "drm_flip_work.h"
-
-struct mdp4_crtc {
-       struct drm_crtc base;
-       char name[8];
-       struct drm_plane *plane;
-       struct drm_plane *planes[8];
-       int id;
-       int ovlp;
-       enum mdp4_dma dma;
-       bool enabled;
-
-       /* which mixer/encoder we route output to: */
-       int mixer;
-
-       struct {
-               spinlock_t lock;
-               bool stale;
-               uint32_t width, height;
-
-               /* next cursor to scan-out: */
-               uint32_t next_iova;
-               struct drm_gem_object *next_bo;
-
-               /* current cursor being scanned out: */
-               struct drm_gem_object *scanout_bo;
-       } cursor;
-
-
-       /* if there is a pending flip, these will be non-null: */
-       struct drm_pending_vblank_event *event;
-       struct msm_fence_cb pageflip_cb;
-
-#define PENDING_CURSOR 0x1
-#define PENDING_FLIP   0x2
-       atomic_t pending;
-
-       /* the fb that we currently hold a scanout ref to: */
-       struct drm_framebuffer *fb;
-
-       /* for unref'ing framebuffers after scanout completes: */
-       struct drm_flip_work unref_fb_work;
-
-       /* for unref'ing cursor bo's after scanout completes: */
-       struct drm_flip_work unref_cursor_work;
-
-       struct mdp4_irq vblank;
-       struct mdp4_irq err;
-};
-#define to_mdp4_crtc(x) container_of(x, struct mdp4_crtc, base)
-
-static struct mdp4_kms *get_kms(struct drm_crtc *crtc)
-{
-       struct msm_drm_private *priv = crtc->dev->dev_private;
-       return to_mdp4_kms(priv->kms);
-}
-
-static void update_fb(struct drm_crtc *crtc, bool async,
-               struct drm_framebuffer *new_fb)
-{
-       struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
-       struct drm_framebuffer *old_fb = mdp4_crtc->fb;
-
-       if (old_fb)
-               drm_flip_work_queue(&mdp4_crtc->unref_fb_work, old_fb);
-
-       /* grab reference to incoming scanout fb: */
-       drm_framebuffer_reference(new_fb);
-       mdp4_crtc->base.fb = new_fb;
-       mdp4_crtc->fb = new_fb;
-
-       if (!async) {
-               /* enable vblank to pick up the old_fb */
-               mdp4_irq_register(get_kms(crtc), &mdp4_crtc->vblank);
-       }
-}
-
-/* if file!=NULL, this is preclose potential cancel-flip path */
-static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
-{
-       struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
-       struct drm_device *dev = crtc->dev;
-       struct drm_pending_vblank_event *event;
-       unsigned long flags;
-
-       spin_lock_irqsave(&dev->event_lock, flags);
-       event = mdp4_crtc->event;
-       if (event) {
-               /* if regular vblank case (!file) or if cancel-flip from
-                * preclose on file that requested flip, then send the
-                * event:
-                */
-               if (!file || (event->base.file_priv == file)) {
-                       mdp4_crtc->event = NULL;
-                       drm_send_vblank_event(dev, mdp4_crtc->id, event);
-               }
-       }
-       spin_unlock_irqrestore(&dev->event_lock, flags);
-}
-
-static void crtc_flush(struct drm_crtc *crtc)
-{
-       struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
-       struct mdp4_kms *mdp4_kms = get_kms(crtc);
-       uint32_t i, flush = 0;
-
-       for (i = 0; i < ARRAY_SIZE(mdp4_crtc->planes); i++) {
-               struct drm_plane *plane = mdp4_crtc->planes[i];
-               if (plane) {
-                       enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
-                       flush |= pipe2flush(pipe_id);
-               }
-       }
-       flush |= ovlp2flush(mdp4_crtc->ovlp);
-
-       DBG("%s: flush=%08x", mdp4_crtc->name, flush);
-
-       mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush);
-}
-
-static void request_pending(struct drm_crtc *crtc, uint32_t pending)
-{
-       struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
-
-       atomic_or(pending, &mdp4_crtc->pending);
-       mdp4_irq_register(get_kms(crtc), &mdp4_crtc->vblank);
-}
-
-static void pageflip_cb(struct msm_fence_cb *cb)
-{
-       struct mdp4_crtc *mdp4_crtc =
-               container_of(cb, struct mdp4_crtc, pageflip_cb);
-       struct drm_crtc *crtc = &mdp4_crtc->base;
-       struct drm_framebuffer *fb = crtc->fb;
-
-       if (!fb)
-               return;
-
-       mdp4_plane_set_scanout(mdp4_crtc->plane, fb);
-       crtc_flush(crtc);
-
-       /* enable vblank to complete flip: */
-       request_pending(crtc, PENDING_FLIP);
-}
-
-static void unref_fb_worker(struct drm_flip_work *work, void *val)
-{
-       struct mdp4_crtc *mdp4_crtc =
-               container_of(work, struct mdp4_crtc, unref_fb_work);
-       struct drm_device *dev = mdp4_crtc->base.dev;
-
-       mutex_lock(&dev->mode_config.mutex);
-       drm_framebuffer_unreference(val);
-       mutex_unlock(&dev->mode_config.mutex);
-}
-
-static void unref_cursor_worker(struct drm_flip_work *work, void *val)
-{
-       struct mdp4_crtc *mdp4_crtc =
-               container_of(work, struct mdp4_crtc, unref_cursor_work);
-       struct mdp4_kms *mdp4_kms = get_kms(&mdp4_crtc->base);
-
-       msm_gem_put_iova(val, mdp4_kms->id);
-       drm_gem_object_unreference_unlocked(val);
-}
-
-static void mdp4_crtc_destroy(struct drm_crtc *crtc)
-{
-       struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
-
-       mdp4_crtc->plane->funcs->destroy(mdp4_crtc->plane);
-
-       drm_crtc_cleanup(crtc);
-       drm_flip_work_cleanup(&mdp4_crtc->unref_fb_work);
-       drm_flip_work_cleanup(&mdp4_crtc->unref_cursor_work);
-
-       kfree(mdp4_crtc);
-}
-
-static void mdp4_crtc_dpms(struct drm_crtc *crtc, int mode)
-{
-       struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
-       struct mdp4_kms *mdp4_kms = get_kms(crtc);
-       bool enabled = (mode == DRM_MODE_DPMS_ON);
-
-       DBG("%s: mode=%d", mdp4_crtc->name, mode);
-
-       if (enabled != mdp4_crtc->enabled) {
-               if (enabled) {
-                       mdp4_enable(mdp4_kms);
-                       mdp4_irq_register(mdp4_kms, &mdp4_crtc->err);
-               } else {
-                       mdp4_irq_unregister(mdp4_kms, &mdp4_crtc->err);
-                       mdp4_disable(mdp4_kms);
-               }
-               mdp4_crtc->enabled = enabled;
-       }
-}
-
-static bool mdp4_crtc_mode_fixup(struct drm_crtc *crtc,
-               const struct drm_display_mode *mode,
-               struct drm_display_mode *adjusted_mode)
-{
-       return true;
-}
-
-static void blend_setup(struct drm_crtc *crtc)
-{
-       struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
-       struct mdp4_kms *mdp4_kms = get_kms(crtc);
-       int i, ovlp = mdp4_crtc->ovlp;
-       uint32_t mixer_cfg = 0;
-       static const enum mdp4_mixer_stage_id stages[] = {
-                       STAGE_BASE, STAGE0, STAGE1, STAGE2, STAGE3,
-       };
-       /* statically (for now) map planes to mixer stage (z-order): */
-       static const int idxs[] = {
-                       [VG1]  = 1,
-                       [VG2]  = 2,
-                       [RGB1] = 0,
-                       [RGB2] = 0,
-                       [RGB3] = 0,
-                       [VG3]  = 3,
-                       [VG4]  = 4,
-
-       };
-       bool alpha[4]= { false, false, false, false };
-
-       mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW0(ovlp), 0);
-       mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW1(ovlp), 0);
-       mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH0(ovlp), 0);
-       mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH1(ovlp), 0);
-
-       /* TODO single register for all CRTCs, so this won't work properly
-        * when multiple CRTCs are active..
-        */
-       for (i = 0; i < ARRAY_SIZE(mdp4_crtc->planes); i++) {
-               struct drm_plane *plane = mdp4_crtc->planes[i];
-               if (plane) {
-                       enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
-                       int idx = idxs[pipe_id];
-                       if (idx > 0) {
-                               const struct mdp4_format *format =
-                                       to_mdp4_format(msm_framebuffer_format(plane->fb));
-                               alpha[idx-1] = format->alpha_enable;
-                       }
-                       mixer_cfg |= mixercfg(mdp4_crtc->mixer, pipe_id, stages[idx]);
-               }
-       }
-
-       /* this shouldn't happen.. and seems to cause underflow: */
-       WARN_ON(!mixer_cfg);
-
-       for (i = 0; i < 4; i++) {
-               uint32_t op;
-
-               if (alpha[i]) {
-                       op = MDP4_OVLP_STAGE_OP_FG_ALPHA(FG_PIXEL) |
-                                       MDP4_OVLP_STAGE_OP_BG_ALPHA(FG_PIXEL) |
-                                       MDP4_OVLP_STAGE_OP_BG_INV_ALPHA;
-               } else {
-                       op = MDP4_OVLP_STAGE_OP_FG_ALPHA(FG_CONST) |
-                                       MDP4_OVLP_STAGE_OP_BG_ALPHA(BG_CONST);
-               }
-
-               mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_FG_ALPHA(ovlp, i), 0xff);
-               mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_BG_ALPHA(ovlp, i), 0x00);
-               mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_OP(ovlp, i), op);
-               mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_CO3(ovlp, i), 1);
-               mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_LOW0(ovlp, i), 0);
-               mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_LOW1(ovlp, i), 0);
-               mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH0(ovlp, i), 0);
-               mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH1(ovlp, i), 0);
-       }
-
-       mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG, mixer_cfg);
-}
-
-static int mdp4_crtc_mode_set(struct drm_crtc *crtc,
-               struct drm_display_mode *mode,
-               struct drm_display_mode *adjusted_mode,
-               int x, int y,
-               struct drm_framebuffer *old_fb)
-{
-       struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
-       struct mdp4_kms *mdp4_kms = get_kms(crtc);
-       enum mdp4_dma dma = mdp4_crtc->dma;
-       int ret, ovlp = mdp4_crtc->ovlp;
-
-       mode = adjusted_mode;
-
-       DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
-                       mdp4_crtc->name, mode->base.id, mode->name,
-                       mode->vrefresh, mode->clock,
-                       mode->hdisplay, mode->hsync_start,
-                       mode->hsync_end, mode->htotal,
-                       mode->vdisplay, mode->vsync_start,
-                       mode->vsync_end, mode->vtotal,
-                       mode->type, mode->flags);
-
-       mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_SIZE(dma),
-                       MDP4_DMA_SRC_SIZE_WIDTH(mode->hdisplay) |
-                       MDP4_DMA_SRC_SIZE_HEIGHT(mode->vdisplay));
-
-       /* take data from pipe: */
-       mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_BASE(dma), 0);
-       mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_STRIDE(dma),
-                       crtc->fb->pitches[0]);
-       mdp4_write(mdp4_kms, REG_MDP4_DMA_DST_SIZE(dma),
-                       MDP4_DMA_DST_SIZE_WIDTH(0) |
-                       MDP4_DMA_DST_SIZE_HEIGHT(0));
-
-       mdp4_write(mdp4_kms, REG_MDP4_OVLP_BASE(ovlp), 0);
-       mdp4_write(mdp4_kms, REG_MDP4_OVLP_SIZE(ovlp),
-                       MDP4_OVLP_SIZE_WIDTH(mode->hdisplay) |
-                       MDP4_OVLP_SIZE_HEIGHT(mode->vdisplay));
-       mdp4_write(mdp4_kms, REG_MDP4_OVLP_STRIDE(ovlp),
-                       crtc->fb->pitches[0]);
-
-       mdp4_write(mdp4_kms, REG_MDP4_OVLP_CFG(ovlp), 1);
-
-       update_fb(crtc, false, crtc->fb);
-
-       ret = mdp4_plane_mode_set(mdp4_crtc->plane, crtc, crtc->fb,
-                       0, 0, mode->hdisplay, mode->vdisplay,
-                       x << 16, y << 16,
-                       mode->hdisplay << 16, mode->vdisplay << 16);
-       if (ret) {
-               dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n",
-                               mdp4_crtc->name, ret);
-               return ret;
-       }
-
-       if (dma == DMA_E) {
-               mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(0), 0x00ff0000);
-               mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(1), 0x00ff0000);
-               mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(2), 0x00ff0000);
-       }
-
-       return 0;
-}
-
-static void mdp4_crtc_prepare(struct drm_crtc *crtc)
-{
-       struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
-       DBG("%s", mdp4_crtc->name);
-       /* make sure we hold a ref to mdp clks while setting up mode: */
-       mdp4_enable(get_kms(crtc));
-       mdp4_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
-}
-
-static void mdp4_crtc_commit(struct drm_crtc *crtc)
-{
-       mdp4_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
-       crtc_flush(crtc);
-       /* drop the ref to mdp clk's that we got in prepare: */
-       mdp4_disable(get_kms(crtc));
-}
-
-static int mdp4_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
-               struct drm_framebuffer *old_fb)
-{
-       struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
-       struct drm_plane *plane = mdp4_crtc->plane;
-       struct drm_display_mode *mode = &crtc->mode;
-
-       update_fb(crtc, false, crtc->fb);
-
-       return mdp4_plane_mode_set(plane, crtc, crtc->fb,
-                       0, 0, mode->hdisplay, mode->vdisplay,
-                       x << 16, y << 16,
-                       mode->hdisplay << 16, mode->vdisplay << 16);
-}
-
-static void mdp4_crtc_load_lut(struct drm_crtc *crtc)
-{
-}
-
-static int mdp4_crtc_page_flip(struct drm_crtc *crtc,
-               struct drm_framebuffer *new_fb,
-               struct drm_pending_vblank_event *event,
-               uint32_t page_flip_flags)
-{
-       struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
-       struct drm_device *dev = crtc->dev;
-       struct drm_gem_object *obj;
-       unsigned long flags;
-
-       if (mdp4_crtc->event) {
-               dev_err(dev->dev, "already pending flip!\n");
-               return -EBUSY;
-       }
-
-       obj = msm_framebuffer_bo(new_fb, 0);
-
-       spin_lock_irqsave(&dev->event_lock, flags);
-       mdp4_crtc->event = event;
-       spin_unlock_irqrestore(&dev->event_lock, flags);
-
-       update_fb(crtc, true, new_fb);
-
-       return msm_gem_queue_inactive_cb(obj, &mdp4_crtc->pageflip_cb);
-}
-
-static int mdp4_crtc_set_property(struct drm_crtc *crtc,
-               struct drm_property *property, uint64_t val)
-{
-       // XXX
-       return -EINVAL;
-}
-
-#define CURSOR_WIDTH 64
-#define CURSOR_HEIGHT 64
-
-/* called from IRQ to update cursor related registers (if needed).  The
- * cursor registers, other than x/y position, appear not to be double
- * buffered, and changing them other than from vblank seems to trigger
- * underflow.
- */
-static void update_cursor(struct drm_crtc *crtc)
-{
-       struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
-       enum mdp4_dma dma = mdp4_crtc->dma;
-       unsigned long flags;
-
-       spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
-       if (mdp4_crtc->cursor.stale) {
-               struct mdp4_kms *mdp4_kms = get_kms(crtc);
-               struct drm_gem_object *next_bo = mdp4_crtc->cursor.next_bo;
-               struct drm_gem_object *prev_bo = mdp4_crtc->cursor.scanout_bo;
-               uint32_t iova = mdp4_crtc->cursor.next_iova;
-
-               if (next_bo) {
-                       /* take a obj ref + iova ref when we start scanning out: */
-                       drm_gem_object_reference(next_bo);
-                       msm_gem_get_iova_locked(next_bo, mdp4_kms->id, &iova);
-
-                       /* enable cursor: */
-                       mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_SIZE(dma),
-                                       MDP4_DMA_CURSOR_SIZE_WIDTH(mdp4_crtc->cursor.width) |
-                                       MDP4_DMA_CURSOR_SIZE_HEIGHT(mdp4_crtc->cursor.height));
-                       mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma), iova);
-                       mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BLEND_CONFIG(dma),
-                                       MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT(CURSOR_ARGB) |
-                                       MDP4_DMA_CURSOR_BLEND_CONFIG_CURSOR_EN);
-               } else {
-                       /* disable cursor: */
-                       mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma), 0);
-                       mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BLEND_CONFIG(dma),
-                                       MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT(CURSOR_ARGB));
-               }
-
-               /* and drop the iova ref + obj rev when done scanning out: */
-               if (prev_bo)
-                       drm_flip_work_queue(&mdp4_crtc->unref_cursor_work, prev_bo);
-
-               mdp4_crtc->cursor.scanout_bo = next_bo;
-               mdp4_crtc->cursor.stale = false;
-       }
-       spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
-}
-
-static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
-               struct drm_file *file_priv, uint32_t handle,
-               uint32_t width, uint32_t height)
-{
-       struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
-       struct mdp4_kms *mdp4_kms = get_kms(crtc);
-       struct drm_device *dev = crtc->dev;
-       struct drm_gem_object *cursor_bo, *old_bo;
-       unsigned long flags;
-       uint32_t iova;
-       int ret;
-
-       if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
-               dev_err(dev->dev, "bad cursor size: %dx%d\n", width, height);
-               return -EINVAL;
-       }
-
-       if (handle) {
-               cursor_bo = drm_gem_object_lookup(dev, file_priv, handle);
-               if (!cursor_bo)
-                       return -ENOENT;
-       } else {
-               cursor_bo = NULL;
-       }
-
-       if (cursor_bo) {
-               ret = msm_gem_get_iova(cursor_bo, mdp4_kms->id, &iova);
-               if (ret)
-                       goto fail;
-       } else {
-               iova = 0;
-       }
-
-       spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
-       old_bo = mdp4_crtc->cursor.next_bo;
-       mdp4_crtc->cursor.next_bo   = cursor_bo;
-       mdp4_crtc->cursor.next_iova = iova;
-       mdp4_crtc->cursor.width     = width;
-       mdp4_crtc->cursor.height    = height;
-       mdp4_crtc->cursor.stale     = true;
-       spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
-
-       if (old_bo) {
-               /* drop our previous reference: */
-               msm_gem_put_iova(old_bo, mdp4_kms->id);
-               drm_gem_object_unreference_unlocked(old_bo);
-       }
-
-       request_pending(crtc, PENDING_CURSOR);
-
-       return 0;
-
-fail:
-       drm_gem_object_unreference_unlocked(cursor_bo);
-       return ret;
-}
-
-static int mdp4_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
-{
-       struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
-       struct mdp4_kms *mdp4_kms = get_kms(crtc);
-       enum mdp4_dma dma = mdp4_crtc->dma;
-
-       mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_POS(dma),
-                       MDP4_DMA_CURSOR_POS_X(x) |
-                       MDP4_DMA_CURSOR_POS_Y(y));
-
-       return 0;
-}
-
-static const struct drm_crtc_funcs mdp4_crtc_funcs = {
-       .set_config = drm_crtc_helper_set_config,
-       .destroy = mdp4_crtc_destroy,
-       .page_flip = mdp4_crtc_page_flip,
-       .set_property = mdp4_crtc_set_property,
-       .cursor_set = mdp4_crtc_cursor_set,
-       .cursor_move = mdp4_crtc_cursor_move,
-};
-
-static const struct drm_crtc_helper_funcs mdp4_crtc_helper_funcs = {
-       .dpms = mdp4_crtc_dpms,
-       .mode_fixup = mdp4_crtc_mode_fixup,
-       .mode_set = mdp4_crtc_mode_set,
-       .prepare = mdp4_crtc_prepare,
-       .commit = mdp4_crtc_commit,
-       .mode_set_base = mdp4_crtc_mode_set_base,
-       .load_lut = mdp4_crtc_load_lut,
-};
-
-static void mdp4_crtc_vblank_irq(struct mdp4_irq *irq, uint32_t irqstatus)
-{
-       struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, vblank);
-       struct drm_crtc *crtc = &mdp4_crtc->base;
-       struct msm_drm_private *priv = crtc->dev->dev_private;
-       unsigned pending;
-
-       mdp4_irq_unregister(get_kms(crtc), &mdp4_crtc->vblank);
-
-       pending = atomic_xchg(&mdp4_crtc->pending, 0);
-
-       if (pending & PENDING_FLIP) {
-               complete_flip(crtc, NULL);
-               drm_flip_work_commit(&mdp4_crtc->unref_fb_work, priv->wq);
-       }
-
-       if (pending & PENDING_CURSOR) {
-               update_cursor(crtc);
-               drm_flip_work_commit(&mdp4_crtc->unref_cursor_work, priv->wq);
-       }
-}
-
-static void mdp4_crtc_err_irq(struct mdp4_irq *irq, uint32_t irqstatus)
-{
-       struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, err);
-       struct drm_crtc *crtc = &mdp4_crtc->base;
-       DBG("%s: error: %08x", mdp4_crtc->name, irqstatus);
-       crtc_flush(crtc);
-}
-
-uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc)
-{
-       struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
-       return mdp4_crtc->vblank.irqmask;
-}
-
-void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file)
-{
-       DBG("cancel: %p", file);
-       complete_flip(crtc, file);
-}
-
-/* set dma config, ie. the format the encoder wants. */
-void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config)
-{
-       struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
-       struct mdp4_kms *mdp4_kms = get_kms(crtc);
-
-       mdp4_write(mdp4_kms, REG_MDP4_DMA_CONFIG(mdp4_crtc->dma), config);
-}
-
-/* set interface for routing crtc->encoder: */
-void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf)
-{
-       struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
-       struct mdp4_kms *mdp4_kms = get_kms(crtc);
-       uint32_t intf_sel;
-
-       intf_sel = mdp4_read(mdp4_kms, REG_MDP4_DISP_INTF_SEL);
-
-       switch (mdp4_crtc->dma) {
-       case DMA_P:
-               intf_sel &= ~MDP4_DISP_INTF_SEL_PRIM__MASK;
-               intf_sel |= MDP4_DISP_INTF_SEL_PRIM(intf);
-               break;
-       case DMA_S:
-               intf_sel &= ~MDP4_DISP_INTF_SEL_SEC__MASK;
-               intf_sel |= MDP4_DISP_INTF_SEL_SEC(intf);
-               break;
-       case DMA_E:
-               intf_sel &= ~MDP4_DISP_INTF_SEL_EXT__MASK;
-               intf_sel |= MDP4_DISP_INTF_SEL_EXT(intf);
-               break;
-       }
-
-       if (intf == INTF_DSI_VIDEO) {
-               intf_sel &= ~MDP4_DISP_INTF_SEL_DSI_CMD;
-               intf_sel |= MDP4_DISP_INTF_SEL_DSI_VIDEO;
-               mdp4_crtc->mixer = 0;
-       } else if (intf == INTF_DSI_CMD) {
-               intf_sel &= ~MDP4_DISP_INTF_SEL_DSI_VIDEO;
-               intf_sel |= MDP4_DISP_INTF_SEL_DSI_CMD;
-               mdp4_crtc->mixer = 0;
-       } else if (intf == INTF_LCDC_DTV){
-               mdp4_crtc->mixer = 1;
-       }
-
-       blend_setup(crtc);
-
-       DBG("%s: intf_sel=%08x", mdp4_crtc->name, intf_sel);
-
-       mdp4_write(mdp4_kms, REG_MDP4_DISP_INTF_SEL, intf_sel);
-}
-
-static void set_attach(struct drm_crtc *crtc, enum mdp4_pipe pipe_id,
-               struct drm_plane *plane)
-{
-       struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
-
-       BUG_ON(pipe_id >= ARRAY_SIZE(mdp4_crtc->planes));
-
-       if (mdp4_crtc->planes[pipe_id] == plane)
-               return;
-
-       mdp4_crtc->planes[pipe_id] = plane;
-       blend_setup(crtc);
-       if (mdp4_crtc->enabled && (plane != mdp4_crtc->plane))
-               crtc_flush(crtc);
-}
-
-void mdp4_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane)
-{
-       set_attach(crtc, mdp4_plane_pipe(plane), plane);
-}
-
-void mdp4_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane)
-{
-       set_attach(crtc, mdp4_plane_pipe(plane), NULL);
-}
-
-static const char *dma_names[] = {
-               "DMA_P", "DMA_S", "DMA_E",
-};
-
-/* initialize crtc */
-struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
-               struct drm_plane *plane, int id, int ovlp_id,
-               enum mdp4_dma dma_id)
-{
-       struct drm_crtc *crtc = NULL;
-       struct mdp4_crtc *mdp4_crtc;
-       int ret;
-
-       mdp4_crtc = kzalloc(sizeof(*mdp4_crtc), GFP_KERNEL);
-       if (!mdp4_crtc) {
-               ret = -ENOMEM;
-               goto fail;
-       }
-
-       crtc = &mdp4_crtc->base;
-
-       mdp4_crtc->plane = plane;
-
-       mdp4_crtc->ovlp = ovlp_id;
-       mdp4_crtc->dma = dma_id;
-
-       mdp4_crtc->vblank.irqmask = dma2irq(mdp4_crtc->dma);
-       mdp4_crtc->vblank.irq = mdp4_crtc_vblank_irq;
-
-       mdp4_crtc->err.irqmask = dma2err(mdp4_crtc->dma);
-       mdp4_crtc->err.irq = mdp4_crtc_err_irq;
-
-       snprintf(mdp4_crtc->name, sizeof(mdp4_crtc->name), "%s:%d",
-                       dma_names[dma_id], ovlp_id);
-
-       spin_lock_init(&mdp4_crtc->cursor.lock);
-
-       ret = drm_flip_work_init(&mdp4_crtc->unref_fb_work, 16,
-                       "unref fb", unref_fb_worker);
-       if (ret)
-               goto fail;
-
-       ret = drm_flip_work_init(&mdp4_crtc->unref_cursor_work, 64,
-                       "unref cursor", unref_cursor_worker);
-
-       INIT_FENCE_CB(&mdp4_crtc->pageflip_cb, pageflip_cb);
-
-       drm_crtc_init(dev, crtc, &mdp4_crtc_funcs);
-       drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs);
-
-       mdp4_plane_install_properties(mdp4_crtc->plane, &crtc->base);
-
-       return crtc;
-
-fail:
-       if (crtc)
-               mdp4_crtc_destroy(crtc);
-
-       return ERR_PTR(ret);
-}
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_dtv_encoder.c b/drivers/gpu/drm/msm/mdp4/mdp4_dtv_encoder.c
deleted file mode 100644 (file)
index 5e0dcae..0000000
+++ /dev/null
@@ -1,305 +0,0 @@
-/*
- * Copyright (C) 2013 Red Hat
- * Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <mach/clk.h>
-
-#include "mdp4_kms.h"
-
-#include "drm_crtc.h"
-#include "drm_crtc_helper.h"
-
-
-struct mdp4_dtv_encoder {
-       struct drm_encoder base;
-       struct clk *src_clk;
-       struct clk *hdmi_clk;
-       struct clk *mdp_clk;
-       unsigned long int pixclock;
-       bool enabled;
-       uint32_t bsc;
-};
-#define to_mdp4_dtv_encoder(x) container_of(x, struct mdp4_dtv_encoder, base)
-
-static struct mdp4_kms *get_kms(struct drm_encoder *encoder)
-{
-       struct msm_drm_private *priv = encoder->dev->dev_private;
-       return to_mdp4_kms(priv->kms);
-}
-
-#ifdef CONFIG_MSM_BUS_SCALING
-#include <mach/board.h>
-/* not ironically named at all.. no, really.. */
-static void bs_init(struct mdp4_dtv_encoder *mdp4_dtv_encoder)
-{
-       struct drm_device *dev = mdp4_dtv_encoder->base.dev;
-       struct lcdc_platform_data *dtv_pdata = mdp4_find_pdata("dtv.0");
-
-       if (!dtv_pdata) {
-               dev_err(dev->dev, "could not find dtv pdata\n");
-               return;
-       }
-
-       if (dtv_pdata->bus_scale_table) {
-               mdp4_dtv_encoder->bsc = msm_bus_scale_register_client(
-                               dtv_pdata->bus_scale_table);
-               DBG("bus scale client: %08x", mdp4_dtv_encoder->bsc);
-               DBG("lcdc_power_save: %p", dtv_pdata->lcdc_power_save);
-               if (dtv_pdata->lcdc_power_save)
-                       dtv_pdata->lcdc_power_save(1);
-       }
-}
-
-static void bs_fini(struct mdp4_dtv_encoder *mdp4_dtv_encoder)
-{
-       if (mdp4_dtv_encoder->bsc) {
-               msm_bus_scale_unregister_client(mdp4_dtv_encoder->bsc);
-               mdp4_dtv_encoder->bsc = 0;
-       }
-}
-
-static void bs_set(struct mdp4_dtv_encoder *mdp4_dtv_encoder, int idx)
-{
-       if (mdp4_dtv_encoder->bsc) {
-               DBG("set bus scaling: %d", idx);
-               msm_bus_scale_client_update_request(mdp4_dtv_encoder->bsc, idx);
-       }
-}
-#else
-static void bs_init(struct mdp4_dtv_encoder *mdp4_dtv_encoder) {}
-static void bs_fini(struct mdp4_dtv_encoder *mdp4_dtv_encoder) {}
-static void bs_set(struct mdp4_dtv_encoder *mdp4_dtv_encoder, int idx) {}
-#endif
-
-static void mdp4_dtv_encoder_destroy(struct drm_encoder *encoder)
-{
-       struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder);
-       bs_fini(mdp4_dtv_encoder);
-       drm_encoder_cleanup(encoder);
-       kfree(mdp4_dtv_encoder);
-}
-
-static const struct drm_encoder_funcs mdp4_dtv_encoder_funcs = {
-       .destroy = mdp4_dtv_encoder_destroy,
-};
-
-static void mdp4_dtv_encoder_dpms(struct drm_encoder *encoder, int mode)
-{
-       struct drm_device *dev = encoder->dev;
-       struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder);
-       struct mdp4_kms *mdp4_kms = get_kms(encoder);
-       bool enabled = (mode == DRM_MODE_DPMS_ON);
-
-       DBG("mode=%d", mode);
-
-       if (enabled == mdp4_dtv_encoder->enabled)
-               return;
-
-       if (enabled) {
-               unsigned long pc = mdp4_dtv_encoder->pixclock;
-               int ret;
-
-               bs_set(mdp4_dtv_encoder, 1);
-
-               DBG("setting src_clk=%lu", pc);
-
-               ret = clk_set_rate(mdp4_dtv_encoder->src_clk, pc);
-               if (ret)
-                       dev_err(dev->dev, "failed to set src_clk to %lu: %d\n", pc, ret);
-               clk_prepare_enable(mdp4_dtv_encoder->src_clk);
-               ret = clk_prepare_enable(mdp4_dtv_encoder->hdmi_clk);
-               if (ret)
-                       dev_err(dev->dev, "failed to enable hdmi_clk: %d\n", ret);
-               ret = clk_prepare_enable(mdp4_dtv_encoder->mdp_clk);
-               if (ret)
-                       dev_err(dev->dev, "failed to enabled mdp_clk: %d\n", ret);
-
-               mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 1);
-       } else {
-               mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 0);
-
-               /*
-                * Wait for a vsync so we know the ENABLE=0 latched before
-                * the (connector) source of the vsync's gets disabled,
-                * otherwise we end up in a funny state if we re-enable
-                * before the disable latches, which results that some of
-                * the settings changes for the new modeset (like new
-                * scanout buffer) don't latch properly..
-                */
-               mdp4_irq_wait(mdp4_kms, MDP4_IRQ_EXTERNAL_VSYNC);
-
-               clk_disable_unprepare(mdp4_dtv_encoder->src_clk);
-               clk_disable_unprepare(mdp4_dtv_encoder->hdmi_clk);
-               clk_disable_unprepare(mdp4_dtv_encoder->mdp_clk);
-
-               bs_set(mdp4_dtv_encoder, 0);
-       }
-
-       mdp4_dtv_encoder->enabled = enabled;
-}
-
-static bool mdp4_dtv_encoder_mode_fixup(struct drm_encoder *encoder,
-               const struct drm_display_mode *mode,
-               struct drm_display_mode *adjusted_mode)
-{
-       return true;
-}
-
-static void mdp4_dtv_encoder_mode_set(struct drm_encoder *encoder,
-               struct drm_display_mode *mode,
-               struct drm_display_mode *adjusted_mode)
-{
-       struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder);
-       struct mdp4_kms *mdp4_kms = get_kms(encoder);
-       uint32_t dtv_hsync_skew, vsync_period, vsync_len, ctrl_pol;
-       uint32_t display_v_start, display_v_end;
-       uint32_t hsync_start_x, hsync_end_x;
-
-       mode = adjusted_mode;
-
-       DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
-                       mode->base.id, mode->name,
-                       mode->vrefresh, mode->clock,
-                       mode->hdisplay, mode->hsync_start,
-                       mode->hsync_end, mode->htotal,
-                       mode->vdisplay, mode->vsync_start,
-                       mode->vsync_end, mode->vtotal,
-                       mode->type, mode->flags);
-
-       mdp4_dtv_encoder->pixclock = mode->clock * 1000;
-
-       DBG("pixclock=%lu", mdp4_dtv_encoder->pixclock);
-
-       ctrl_pol = 0;
-       if (mode->flags & DRM_MODE_FLAG_NHSYNC)
-               ctrl_pol |= MDP4_DTV_CTRL_POLARITY_HSYNC_LOW;
-       if (mode->flags & DRM_MODE_FLAG_NVSYNC)
-               ctrl_pol |= MDP4_DTV_CTRL_POLARITY_VSYNC_LOW;
-       /* probably need to get DATA_EN polarity from panel.. */
-
-       dtv_hsync_skew = 0;  /* get this from panel? */
-
-       hsync_start_x = (mode->htotal - mode->hsync_start);
-       hsync_end_x = mode->htotal - (mode->hsync_start - mode->hdisplay) - 1;
-
-       vsync_period = mode->vtotal * mode->htotal;
-       vsync_len = (mode->vsync_end - mode->vsync_start) * mode->htotal;
-       display_v_start = (mode->vtotal - mode->vsync_start) * mode->htotal + dtv_hsync_skew;
-       display_v_end = vsync_period - ((mode->vsync_start - mode->vdisplay) * mode->htotal) + dtv_hsync_skew - 1;
-
-       mdp4_write(mdp4_kms, REG_MDP4_DTV_HSYNC_CTRL,
-                       MDP4_DTV_HSYNC_CTRL_PULSEW(mode->hsync_end - mode->hsync_start) |
-                       MDP4_DTV_HSYNC_CTRL_PERIOD(mode->htotal));
-       mdp4_write(mdp4_kms, REG_MDP4_DTV_VSYNC_PERIOD, vsync_period);
-       mdp4_write(mdp4_kms, REG_MDP4_DTV_VSYNC_LEN, vsync_len);
-       mdp4_write(mdp4_kms, REG_MDP4_DTV_DISPLAY_HCTRL,
-                       MDP4_DTV_DISPLAY_HCTRL_START(hsync_start_x) |
-                       MDP4_DTV_DISPLAY_HCTRL_END(hsync_end_x));
-       mdp4_write(mdp4_kms, REG_MDP4_DTV_DISPLAY_VSTART, display_v_start);
-       mdp4_write(mdp4_kms, REG_MDP4_DTV_DISPLAY_VEND, display_v_end);
-       mdp4_write(mdp4_kms, REG_MDP4_DTV_BORDER_CLR, 0);
-       mdp4_write(mdp4_kms, REG_MDP4_DTV_UNDERFLOW_CLR,
-                       MDP4_DTV_UNDERFLOW_CLR_ENABLE_RECOVERY |
-                       MDP4_DTV_UNDERFLOW_CLR_COLOR(0xff));
-       mdp4_write(mdp4_kms, REG_MDP4_DTV_HSYNC_SKEW, dtv_hsync_skew);
-       mdp4_write(mdp4_kms, REG_MDP4_DTV_CTRL_POLARITY, ctrl_pol);
-       mdp4_write(mdp4_kms, REG_MDP4_DTV_ACTIVE_HCTL,
-                       MDP4_DTV_ACTIVE_HCTL_START(0) |
-                       MDP4_DTV_ACTIVE_HCTL_END(0));
-       mdp4_write(mdp4_kms, REG_MDP4_DTV_ACTIVE_VSTART, 0);
-       mdp4_write(mdp4_kms, REG_MDP4_DTV_ACTIVE_VEND, 0);
-}
-
-static void mdp4_dtv_encoder_prepare(struct drm_encoder *encoder)
-{
-       mdp4_dtv_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
-}
-
-static void mdp4_dtv_encoder_commit(struct drm_encoder *encoder)
-{
-       mdp4_crtc_set_config(encoder->crtc,
-                       MDP4_DMA_CONFIG_R_BPC(BPC8) |
-                       MDP4_DMA_CONFIG_G_BPC(BPC8) |
-                       MDP4_DMA_CONFIG_B_BPC(BPC8) |
-                       MDP4_DMA_CONFIG_PACK(0x21));
-       mdp4_crtc_set_intf(encoder->crtc, INTF_LCDC_DTV);
-       mdp4_dtv_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
-}
-
-static const struct drm_encoder_helper_funcs mdp4_dtv_encoder_helper_funcs = {
-       .dpms = mdp4_dtv_encoder_dpms,
-       .mode_fixup = mdp4_dtv_encoder_mode_fixup,
-       .mode_set = mdp4_dtv_encoder_mode_set,
-       .prepare = mdp4_dtv_encoder_prepare,
-       .commit = mdp4_dtv_encoder_commit,
-};
-
-long mdp4_dtv_round_pixclk(struct drm_encoder *encoder, unsigned long rate)
-{
-       struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder);
-       return clk_round_rate(mdp4_dtv_encoder->src_clk, rate);
-}
-
-/* initialize encoder */
-struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev)
-{
-       struct drm_encoder *encoder = NULL;
-       struct mdp4_dtv_encoder *mdp4_dtv_encoder;
-       int ret;
-
-       mdp4_dtv_encoder = kzalloc(sizeof(*mdp4_dtv_encoder), GFP_KERNEL);
-       if (!mdp4_dtv_encoder) {
-               ret = -ENOMEM;
-               goto fail;
-       }
-
-       encoder = &mdp4_dtv_encoder->base;
-
-       drm_encoder_init(dev, encoder, &mdp4_dtv_encoder_funcs,
-                        DRM_MODE_ENCODER_TMDS);
-       drm_encoder_helper_add(encoder, &mdp4_dtv_encoder_helper_funcs);
-
-       mdp4_dtv_encoder->src_clk = devm_clk_get(dev->dev, "src_clk");
-       if (IS_ERR(mdp4_dtv_encoder->src_clk)) {
-               dev_err(dev->dev, "failed to get src_clk\n");
-               ret = PTR_ERR(mdp4_dtv_encoder->src_clk);
-               goto fail;
-       }
-
-       mdp4_dtv_encoder->hdmi_clk = devm_clk_get(dev->dev, "hdmi_clk");
-       if (IS_ERR(mdp4_dtv_encoder->hdmi_clk)) {
-               dev_err(dev->dev, "failed to get hdmi_clk\n");
-               ret = PTR_ERR(mdp4_dtv_encoder->hdmi_clk);
-               goto fail;
-       }
-
-       mdp4_dtv_encoder->mdp_clk = devm_clk_get(dev->dev, "mdp_clk");
-       if (IS_ERR(mdp4_dtv_encoder->mdp_clk)) {
-               dev_err(dev->dev, "failed to get mdp_clk\n");
-               ret = PTR_ERR(mdp4_dtv_encoder->mdp_clk);
-               goto fail;
-       }
-
-       bs_init(mdp4_dtv_encoder);
-
-       return encoder;
-
-fail:
-       if (encoder)
-               mdp4_dtv_encoder_destroy(encoder);
-
-       return ERR_PTR(ret);
-}
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_format.c b/drivers/gpu/drm/msm/mdp4/mdp4_format.c
deleted file mode 100644 (file)
index 17330b0..0000000
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Copyright (C) 2013 Red Hat
- * Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-
-
-#include "msm_drv.h"
-#include "mdp4_kms.h"
-
-#define FMT(name, a, r, g, b, e0, e1, e2, e3, alpha, tight, c, cnt) { \
-               .base = { .pixel_format = DRM_FORMAT_ ## name }, \
-               .bpc_a = BPC ## a ## A,                          \
-               .bpc_r = BPC ## r,                               \
-               .bpc_g = BPC ## g,                               \
-               .bpc_b = BPC ## b,                               \
-               .unpack = { e0, e1, e2, e3 },                    \
-               .alpha_enable = alpha,                           \
-               .unpack_tight = tight,                           \
-               .cpp = c,                                        \
-               .unpack_count = cnt,                             \
-       }
-
-#define BPC0A 0
-
-static const struct mdp4_format formats[] = {
-       /*  name      a  r  g  b   e0 e1 e2 e3  alpha   tight  cpp cnt */
-       FMT(ARGB8888, 8, 8, 8, 8,  1, 0, 2, 3,  true,   true,  4,  4),
-       FMT(XRGB8888, 8, 8, 8, 8,  1, 0, 2, 3,  false,  true,  4,  4),
-       FMT(RGB888,   0, 8, 8, 8,  1, 0, 2, 0,  false,  true,  3,  3),
-       FMT(BGR888,   0, 8, 8, 8,  2, 0, 1, 0,  false,  true,  3,  3),
-       FMT(RGB565,   0, 5, 6, 5,  1, 0, 2, 0,  false,  true,  2,  3),
-       FMT(BGR565,   0, 5, 6, 5,  2, 0, 1, 0,  false,  true,  2,  3),
-};
-
-uint32_t mdp4_get_formats(enum mdp4_pipe pipe_id, uint32_t *pixel_formats,
-               uint32_t max_formats)
-{
-       uint32_t i;
-       for (i = 0; i < ARRAY_SIZE(formats); i++) {
-               const struct mdp4_format *f = &formats[i];
-
-               if (i == max_formats)
-                       break;
-
-               pixel_formats[i] = f->base.pixel_format;
-       }
-
-       return i;
-}
-
-const struct msm_format *mdp4_get_format(struct msm_kms *kms, uint32_t format)
-{
-       int i;
-       for (i = 0; i < ARRAY_SIZE(formats); i++) {
-               const struct mdp4_format *f = &formats[i];
-               if (f->base.pixel_format == format)
-                       return &f->base;
-       }
-       return NULL;
-}
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_irq.c b/drivers/gpu/drm/msm/mdp4/mdp4_irq.c
deleted file mode 100644 (file)
index 5c6b7fc..0000000
+++ /dev/null
@@ -1,203 +0,0 @@
-/*
- * Copyright (C) 2013 Red Hat
- * Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-
-
-#include "msm_drv.h"
-#include "mdp4_kms.h"
-
-
-struct mdp4_irq_wait {
-       struct mdp4_irq irq;
-       int count;
-};
-
-static DECLARE_WAIT_QUEUE_HEAD(wait_event);
-
-static DEFINE_SPINLOCK(list_lock);
-
-static void update_irq(struct mdp4_kms *mdp4_kms)
-{
-       struct mdp4_irq *irq;
-       uint32_t irqmask = mdp4_kms->vblank_mask;
-
-       BUG_ON(!spin_is_locked(&list_lock));
-
-       list_for_each_entry(irq, &mdp4_kms->irq_list, node)
-               irqmask |= irq->irqmask;
-
-       mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, irqmask);
-}
-
-static void update_irq_unlocked(struct mdp4_kms *mdp4_kms)
-{
-       unsigned long flags;
-       spin_lock_irqsave(&list_lock, flags);
-       update_irq(mdp4_kms);
-       spin_unlock_irqrestore(&list_lock, flags);
-}
-
-static void mdp4_irq_error_handler(struct mdp4_irq *irq, uint32_t irqstatus)
-{
-       DRM_ERROR("errors: %08x\n", irqstatus);
-}
-
-void mdp4_irq_preinstall(struct msm_kms *kms)
-{
-       struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
-       mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, 0xffffffff);
-}
-
-int mdp4_irq_postinstall(struct msm_kms *kms)
-{
-       struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
-       struct mdp4_irq *error_handler = &mdp4_kms->error_handler;
-
-       INIT_LIST_HEAD(&mdp4_kms->irq_list);
-
-       error_handler->irq = mdp4_irq_error_handler;
-       error_handler->irqmask = MDP4_IRQ_PRIMARY_INTF_UDERRUN |
-                       MDP4_IRQ_EXTERNAL_INTF_UDERRUN;
-
-       mdp4_irq_register(mdp4_kms, error_handler);
-
-       return 0;
-}
-
-void mdp4_irq_uninstall(struct msm_kms *kms)
-{
-       struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
-       mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, 0x00000000);
-}
-
-irqreturn_t mdp4_irq(struct msm_kms *kms)
-{
-       struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
-       struct drm_device *dev = mdp4_kms->dev;
-       struct msm_drm_private *priv = dev->dev_private;
-       struct mdp4_irq *handler, *n;
-       unsigned long flags;
-       unsigned int id;
-       uint32_t status;
-
-       status = mdp4_read(mdp4_kms, REG_MDP4_INTR_STATUS);
-       mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, status);
-
-       VERB("status=%08x", status);
-
-       for (id = 0; id < priv->num_crtcs; id++)
-               if (status & mdp4_crtc_vblank(priv->crtcs[id]))
-                       drm_handle_vblank(dev, id);
-
-       spin_lock_irqsave(&list_lock, flags);
-       mdp4_kms->in_irq = true;
-       list_for_each_entry_safe(handler, n, &mdp4_kms->irq_list, node) {
-               if (handler->irqmask & status) {
-                       spin_unlock_irqrestore(&list_lock, flags);
-                       handler->irq(handler, handler->irqmask & status);
-                       spin_lock_irqsave(&list_lock, flags);
-               }
-       }
-       mdp4_kms->in_irq = false;
-       update_irq(mdp4_kms);
-       spin_unlock_irqrestore(&list_lock, flags);
-
-       return IRQ_HANDLED;
-}
-
-int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
-{
-       struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
-       unsigned long flags;
-
-       spin_lock_irqsave(&list_lock, flags);
-       mdp4_kms->vblank_mask |= mdp4_crtc_vblank(crtc);
-       update_irq(mdp4_kms);
-       spin_unlock_irqrestore(&list_lock, flags);
-
-       return 0;
-}
-
-void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
-{
-       struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
-       unsigned long flags;
-
-       spin_lock_irqsave(&list_lock, flags);
-       mdp4_kms->vblank_mask &= ~mdp4_crtc_vblank(crtc);
-       update_irq(mdp4_kms);
-       spin_unlock_irqrestore(&list_lock, flags);
-}
-
-static void wait_irq(struct mdp4_irq *irq, uint32_t irqstatus)
-{
-       struct mdp4_irq_wait *wait =
-                       container_of(irq, struct mdp4_irq_wait, irq);
-       wait->count--;
-       wake_up_all(&wait_event);
-}
-
-void mdp4_irq_wait(struct mdp4_kms *mdp4_kms, uint32_t irqmask)
-{
-       struct mdp4_irq_wait wait = {
-               .irq = {
-                       .irq = wait_irq,
-                       .irqmask = irqmask,
-               },
-               .count = 1,
-       };
-       mdp4_irq_register(mdp4_kms, &wait.irq);
-       wait_event(wait_event, (wait.count <= 0));
-       mdp4_irq_unregister(mdp4_kms, &wait.irq);
-}
-
-void mdp4_irq_register(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq)
-{
-       unsigned long flags;
-       bool needs_update = false;
-
-       spin_lock_irqsave(&list_lock, flags);
-
-       if (!irq->registered) {
-               irq->registered = true;
-               list_add(&irq->node, &mdp4_kms->irq_list);
-               needs_update = !mdp4_kms->in_irq;
-       }
-
-       spin_unlock_irqrestore(&list_lock, flags);
-
-       if (needs_update)
-               update_irq_unlocked(mdp4_kms);
-}
-
-void mdp4_irq_unregister(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq)
-{
-       unsigned long flags;
-       bool needs_update = false;
-
-       spin_lock_irqsave(&list_lock, flags);
-
-       if (irq->registered) {
-               irq->registered = false;
-               list_del(&irq->node);
-               needs_update = !mdp4_kms->in_irq;
-       }
-
-       spin_unlock_irqrestore(&list_lock, flags);
-
-       if (needs_update)
-               update_irq_unlocked(mdp4_kms);
-}
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp4/mdp4_kms.c
deleted file mode 100644 (file)
index 8972ac3..0000000
+++ /dev/null
@@ -1,380 +0,0 @@
-/*
- * Copyright (C) 2013 Red Hat
- * Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-
-
-#include "msm_drv.h"
-#include "mdp4_kms.h"
-
-static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev);
-
-static int mdp4_hw_init(struct msm_kms *kms)
-{
-       struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
-       struct drm_device *dev = mdp4_kms->dev;
-       uint32_t version, major, minor, dmap_cfg, vg_cfg;
-       unsigned long clk;
-       int ret = 0;
-
-       pm_runtime_get_sync(dev->dev);
-
-       version = mdp4_read(mdp4_kms, REG_MDP4_VERSION);
-
-       major = FIELD(version, MDP4_VERSION_MAJOR);
-       minor = FIELD(version, MDP4_VERSION_MINOR);
-
-       DBG("found MDP version v%d.%d", major, minor);
-
-       if (major != 4) {
-               dev_err(dev->dev, "unexpected MDP version: v%d.%d\n",
-                               major, minor);
-               ret = -ENXIO;
-               goto out;
-       }
-
-       mdp4_kms->rev = minor;
-
-       if (mdp4_kms->dsi_pll_vdda) {
-               if ((mdp4_kms->rev == 2) || (mdp4_kms->rev == 4)) {
-                       ret = regulator_set_voltage(mdp4_kms->dsi_pll_vdda,
-                                       1200000, 1200000);
-                       if (ret) {
-                               dev_err(dev->dev,
-                                       "failed to set dsi_pll_vdda voltage: %d\n", ret);
-                               goto out;
-                       }
-               }
-       }
-
-       if (mdp4_kms->dsi_pll_vddio) {
-               if (mdp4_kms->rev == 2) {
-                       ret = regulator_set_voltage(mdp4_kms->dsi_pll_vddio,
-                                       1800000, 1800000);
-                       if (ret) {
-                               dev_err(dev->dev,
-                                       "failed to set dsi_pll_vddio voltage: %d\n", ret);
-                               goto out;
-                       }
-               }
-       }
-
-       if (mdp4_kms->rev > 1) {
-               mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER0, 0x0707ffff);
-               mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER1, 0x03073f3f);
-       }
-
-       mdp4_write(mdp4_kms, REG_MDP4_PORTMAP_MODE, 0x3);
-
-       /* max read pending cmd config, 3 pending requests: */
-       mdp4_write(mdp4_kms, REG_MDP4_READ_CNFG, 0x02222);
-
-       clk = clk_get_rate(mdp4_kms->clk);
-
-       if ((mdp4_kms->rev >= 1) || (clk >= 90000000)) {
-               dmap_cfg = 0x47;     /* 16 bytes-burst x 8 req */
-               vg_cfg = 0x47;       /* 16 bytes-burs x 8 req */
-       } else {
-               dmap_cfg = 0x27;     /* 8 bytes-burst x 8 req */
-               vg_cfg = 0x43;       /* 16 bytes-burst x 4 req */
-       }
-
-       DBG("fetch config: dmap=%02x, vg=%02x", dmap_cfg, vg_cfg);
-
-       mdp4_write(mdp4_kms, REG_MDP4_DMA_FETCH_CONFIG(DMA_P), dmap_cfg);
-       mdp4_write(mdp4_kms, REG_MDP4_DMA_FETCH_CONFIG(DMA_E), dmap_cfg);
-
-       mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(VG1), vg_cfg);
-       mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(VG2), vg_cfg);
-       mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(RGB1), vg_cfg);
-       mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(RGB2), vg_cfg);
-
-       if (mdp4_kms->rev >= 2)
-               mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG_UPDATE_METHOD, 1);
-
-       /* disable CSC matrix / YUV by default: */
-       mdp4_write(mdp4_kms, REG_MDP4_PIPE_OP_MODE(VG1), 0);
-       mdp4_write(mdp4_kms, REG_MDP4_PIPE_OP_MODE(VG2), 0);
-       mdp4_write(mdp4_kms, REG_MDP4_DMA_P_OP_MODE, 0);
-       mdp4_write(mdp4_kms, REG_MDP4_DMA_S_OP_MODE, 0);
-       mdp4_write(mdp4_kms, REG_MDP4_OVLP_CSC_CONFIG(1), 0);
-       mdp4_write(mdp4_kms, REG_MDP4_OVLP_CSC_CONFIG(2), 0);
-
-       if (mdp4_kms->rev > 1)
-               mdp4_write(mdp4_kms, REG_MDP4_RESET_STATUS, 1);
-
-out:
-       pm_runtime_put_sync(dev->dev);
-
-       return ret;
-}
-
-static long mdp4_round_pixclk(struct msm_kms *kms, unsigned long rate,
-               struct drm_encoder *encoder)
-{
-       /* if we had >1 encoder, we'd need something more clever: */
-       return mdp4_dtv_round_pixclk(encoder, rate);
-}
-
-static void mdp4_preclose(struct msm_kms *kms, struct drm_file *file)
-{
-       struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
-       struct msm_drm_private *priv = mdp4_kms->dev->dev_private;
-       unsigned i;
-
-       for (i = 0; i < priv->num_crtcs; i++)
-               mdp4_crtc_cancel_pending_flip(priv->crtcs[i], file);
-}
-
-static void mdp4_destroy(struct msm_kms *kms)
-{
-       struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
-       kfree(mdp4_kms);
-}
-
-static const struct msm_kms_funcs kms_funcs = {
-               .hw_init         = mdp4_hw_init,
-               .irq_preinstall  = mdp4_irq_preinstall,
-               .irq_postinstall = mdp4_irq_postinstall,
-               .irq_uninstall   = mdp4_irq_uninstall,
-               .irq             = mdp4_irq,
-               .enable_vblank   = mdp4_enable_vblank,
-               .disable_vblank  = mdp4_disable_vblank,
-               .get_format      = mdp4_get_format,
-               .round_pixclk    = mdp4_round_pixclk,
-               .preclose        = mdp4_preclose,
-               .destroy         = mdp4_destroy,
-};
-
-int mdp4_disable(struct mdp4_kms *mdp4_kms)
-{
-       DBG("");
-
-       clk_disable_unprepare(mdp4_kms->clk);
-       if (mdp4_kms->pclk)
-               clk_disable_unprepare(mdp4_kms->pclk);
-       clk_disable_unprepare(mdp4_kms->lut_clk);
-
-       return 0;
-}
-
-int mdp4_enable(struct mdp4_kms *mdp4_kms)
-{
-       DBG("");
-
-       clk_prepare_enable(mdp4_kms->clk);
-       if (mdp4_kms->pclk)
-               clk_prepare_enable(mdp4_kms->pclk);
-       clk_prepare_enable(mdp4_kms->lut_clk);
-
-       return 0;
-}
-
-static int modeset_init(struct mdp4_kms *mdp4_kms)
-{
-       struct drm_device *dev = mdp4_kms->dev;
-       struct msm_drm_private *priv = dev->dev_private;
-       struct drm_plane *plane;
-       struct drm_crtc *crtc;
-       struct drm_encoder *encoder;
-       int ret;
-
-       /*
-        *  NOTE: this is a bit simplistic until we add support
-        * for more than just RGB1->DMA_E->DTV->HDMI
-        */
-
-       /* construct non-private planes: */
-       plane = mdp4_plane_init(dev, VG1, false);
-       if (IS_ERR(plane)) {
-               dev_err(dev->dev, "failed to construct plane for VG1\n");
-               ret = PTR_ERR(plane);
-               goto fail;
-       }
-       priv->planes[priv->num_planes++] = plane;
-
-       plane = mdp4_plane_init(dev, VG2, false);
-       if (IS_ERR(plane)) {
-               dev_err(dev->dev, "failed to construct plane for VG2\n");
-               ret = PTR_ERR(plane);
-               goto fail;
-       }
-       priv->planes[priv->num_planes++] = plane;
-
-       /* the CRTCs get constructed with a private plane: */
-       plane = mdp4_plane_init(dev, RGB1, true);
-       if (IS_ERR(plane)) {
-               dev_err(dev->dev, "failed to construct plane for RGB1\n");
-               ret = PTR_ERR(plane);
-               goto fail;
-       }
-
-       crtc  = mdp4_crtc_init(dev, plane, priv->num_crtcs, 1, DMA_E);
-       if (IS_ERR(crtc)) {
-               dev_err(dev->dev, "failed to construct crtc for DMA_E\n");
-               ret = PTR_ERR(crtc);
-               goto fail;
-       }
-       priv->crtcs[priv->num_crtcs++] = crtc;
-
-       encoder = mdp4_dtv_encoder_init(dev);
-       if (IS_ERR(encoder)) {
-               dev_err(dev->dev, "failed to construct DTV encoder\n");
-               ret = PTR_ERR(encoder);
-               goto fail;
-       }
-       encoder->possible_crtcs = 0x1;     /* DTV can be hooked to DMA_E */
-       priv->encoders[priv->num_encoders++] = encoder;
-
-       ret = hdmi_init(dev, encoder);
-       if (ret) {
-               dev_err(dev->dev, "failed to initialize HDMI\n");
-               goto fail;
-       }
-
-       return 0;
-
-fail:
-       return ret;
-}
-
-static const char *iommu_ports[] = {
-               "mdp_port0_cb0", "mdp_port1_cb0",
-};
-
-struct msm_kms *mdp4_kms_init(struct drm_device *dev)
-{
-       struct platform_device *pdev = dev->platformdev;
-       struct mdp4_platform_config *config = mdp4_get_config(pdev);
-       struct mdp4_kms *mdp4_kms;
-       struct msm_kms *kms = NULL;
-       int ret;
-
-       mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL);
-       if (!mdp4_kms) {
-               dev_err(dev->dev, "failed to allocate kms\n");
-               ret = -ENOMEM;
-               goto fail;
-       }
-
-       kms = &mdp4_kms->base;
-       kms->funcs = &kms_funcs;
-
-       mdp4_kms->dev = dev;
-
-       mdp4_kms->mmio = msm_ioremap(pdev, NULL, "MDP4");
-       if (IS_ERR(mdp4_kms->mmio)) {
-               ret = PTR_ERR(mdp4_kms->mmio);
-               goto fail;
-       }
-
-       mdp4_kms->dsi_pll_vdda = devm_regulator_get(&pdev->dev, "dsi_pll_vdda");
-       if (IS_ERR(mdp4_kms->dsi_pll_vdda))
-               mdp4_kms->dsi_pll_vdda = NULL;
-
-       mdp4_kms->dsi_pll_vddio = devm_regulator_get(&pdev->dev, "dsi_pll_vddio");
-       if (IS_ERR(mdp4_kms->dsi_pll_vddio))
-               mdp4_kms->dsi_pll_vddio = NULL;
-
-       mdp4_kms->vdd = devm_regulator_get(&pdev->dev, "vdd");
-       if (IS_ERR(mdp4_kms->vdd))
-               mdp4_kms->vdd = NULL;
-
-       if (mdp4_kms->vdd) {
-               ret = regulator_enable(mdp4_kms->vdd);
-               if (ret) {
-                       dev_err(dev->dev, "failed to enable regulator vdd: %d\n", ret);
-                       goto fail;
-               }
-       }
-
-       mdp4_kms->clk = devm_clk_get(&pdev->dev, "core_clk");
-       if (IS_ERR(mdp4_kms->clk)) {
-               dev_err(dev->dev, "failed to get core_clk\n");
-               ret = PTR_ERR(mdp4_kms->clk);
-               goto fail;
-       }
-
-       mdp4_kms->pclk = devm_clk_get(&pdev->dev, "iface_clk");
-       if (IS_ERR(mdp4_kms->pclk))
-               mdp4_kms->pclk = NULL;
-
-       // XXX if (rev >= MDP_REV_42) { ???
-       mdp4_kms->lut_clk = devm_clk_get(&pdev->dev, "lut_clk");
-       if (IS_ERR(mdp4_kms->lut_clk)) {
-               dev_err(dev->dev, "failed to get lut_clk\n");
-               ret = PTR_ERR(mdp4_kms->lut_clk);
-               goto fail;
-       }
-
-       clk_set_rate(mdp4_kms->clk, config->max_clk);
-       clk_set_rate(mdp4_kms->lut_clk, config->max_clk);
-
-       if (!config->iommu) {
-               dev_err(dev->dev, "no iommu\n");
-               ret = -ENXIO;
-               goto fail;
-       }
-
-       /* make sure things are off before attaching iommu (bootloader could
-        * have left things on, in which case we'll start getting faults if
-        * we don't disable):
-        */
-       mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 0);
-       mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 0);
-       mdp4_write(mdp4_kms, REG_MDP4_DSI_ENABLE, 0);
-       mdelay(16);
-
-       ret = msm_iommu_attach(dev, config->iommu,
-                       iommu_ports, ARRAY_SIZE(iommu_ports));
-       if (ret)
-               goto fail;
-
-       mdp4_kms->id = msm_register_iommu(dev, config->iommu);
-       if (mdp4_kms->id < 0) {
-               ret = mdp4_kms->id;
-               dev_err(dev->dev, "failed to register mdp4 iommu: %d\n", ret);
-               goto fail;
-       }
-
-       ret = modeset_init(mdp4_kms);
-       if (ret) {
-               dev_err(dev->dev, "modeset_init failed: %d\n", ret);
-               goto fail;
-       }
-
-       return kms;
-
-fail:
-       if (kms)
-               mdp4_destroy(kms);
-       return ERR_PTR(ret);
-}
-
-static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev)
-{
-       static struct mdp4_platform_config config = {};
-#ifdef CONFIG_OF
-       /* TODO */
-#else
-       if (cpu_is_apq8064())
-               config.max_clk = 266667000;
-       else
-               config.max_clk = 200000000;
-
-       config.iommu = msm_get_iommu_domain(DISPLAY_READ_DOMAIN);
-#endif
-       return &config;
-}
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp4/mdp4_kms.h
deleted file mode 100644 (file)
index eb015c8..0000000
+++ /dev/null
@@ -1,240 +0,0 @@
-/*
- * Copyright (C) 2013 Red Hat
- * Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef __MDP4_KMS_H__
-#define __MDP4_KMS_H__
-
-#include <linux/clk.h>
-#include <linux/platform_device.h>
-#include <linux/regulator/consumer.h>
-
-#include "msm_drv.h"
-#include "mdp4.xml.h"
-
-
-/* For transiently registering for different MDP4 irqs that various parts
- * of the KMS code need during setup/configuration.  We these are not
- * necessarily the same as what drm_vblank_get/put() are requesting, and
- * the hysteresis in drm_vblank_put() is not necessarily desirable for
- * internal housekeeping related irq usage.
- */
-struct mdp4_irq {
-       struct list_head node;
-       uint32_t irqmask;
-       bool registered;
-       void (*irq)(struct mdp4_irq *irq, uint32_t irqstatus);
-};
-
-struct mdp4_kms {
-       struct msm_kms base;
-
-       struct drm_device *dev;
-
-       int rev;
-
-       /* mapper-id used to request GEM buffer mapped for scanout: */
-       int id;
-
-       void __iomem *mmio;
-
-       struct regulator *dsi_pll_vdda;
-       struct regulator *dsi_pll_vddio;
-       struct regulator *vdd;
-
-       struct clk *clk;
-       struct clk *pclk;
-       struct clk *lut_clk;
-
-       /* irq handling: */
-       bool in_irq;
-       struct list_head irq_list;    /* list of mdp4_irq */
-       uint32_t vblank_mask;         /* irq bits set for userspace vblank */
-       struct mdp4_irq error_handler;
-};
-#define to_mdp4_kms(x) container_of(x, struct mdp4_kms, base)
-
-/* platform config data (ie. from DT, or pdata) */
-struct mdp4_platform_config {
-       struct iommu_domain *iommu;
-       uint32_t max_clk;
-};
-
-struct mdp4_format {
-       struct msm_format base;
-       enum mdp4_bpc bpc_r, bpc_g, bpc_b;
-       enum mdp4_bpc_alpha bpc_a;
-       uint8_t unpack[4];
-       bool alpha_enable, unpack_tight;
-       uint8_t cpp, unpack_count;
-};
-#define to_mdp4_format(x) container_of(x, struct mdp4_format, base)
-
-static inline void mdp4_write(struct mdp4_kms *mdp4_kms, u32 reg, u32 data)
-{
-       msm_writel(data, mdp4_kms->mmio + reg);
-}
-
-static inline u32 mdp4_read(struct mdp4_kms *mdp4_kms, u32 reg)
-{
-       return msm_readl(mdp4_kms->mmio + reg);
-}
-
-static inline uint32_t pipe2flush(enum mdp4_pipe pipe)
-{
-       switch (pipe) {
-       case VG1:      return MDP4_OVERLAY_FLUSH_VG1;
-       case VG2:      return MDP4_OVERLAY_FLUSH_VG2;
-       case RGB1:     return MDP4_OVERLAY_FLUSH_RGB1;
-       case RGB2:     return MDP4_OVERLAY_FLUSH_RGB1;
-       default:       return 0;
-       }
-}
-
-static inline uint32_t ovlp2flush(int ovlp)
-{
-       switch (ovlp) {
-       case 0:        return MDP4_OVERLAY_FLUSH_OVLP0;
-       case 1:        return MDP4_OVERLAY_FLUSH_OVLP1;
-       default:       return 0;
-       }
-}
-
-static inline uint32_t dma2irq(enum mdp4_dma dma)
-{
-       switch (dma) {
-       case DMA_P:    return MDP4_IRQ_DMA_P_DONE;
-       case DMA_S:    return MDP4_IRQ_DMA_S_DONE;
-       case DMA_E:    return MDP4_IRQ_DMA_E_DONE;
-       default:       return 0;
-       }
-}
-
-static inline uint32_t dma2err(enum mdp4_dma dma)
-{
-       switch (dma) {
-       case DMA_P:    return MDP4_IRQ_PRIMARY_INTF_UDERRUN;
-       case DMA_S:    return 0;  // ???
-       case DMA_E:    return MDP4_IRQ_EXTERNAL_INTF_UDERRUN;
-       default:       return 0;
-       }
-}
-
-static inline uint32_t mixercfg(int mixer, enum mdp4_pipe pipe,
-               enum mdp4_mixer_stage_id stage)
-{
-       uint32_t mixer_cfg = 0;
-
-       switch (pipe) {
-       case VG1:
-               mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE0(stage) |
-                       COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE0_MIXER1);
-               break;
-       case VG2:
-               mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE1(stage) |
-                       COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE1_MIXER1);
-               break;
-       case RGB1:
-               mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE2(stage) |
-                       COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE2_MIXER1);
-               break;
-       case RGB2:
-               mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE3(stage) |
-                       COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE3_MIXER1);
-               break;
-       case RGB3:
-               mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE4(stage) |
-                       COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE4_MIXER1);
-               break;
-       case VG3:
-               mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE5(stage) |
-                       COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE5_MIXER1);
-               break;
-       case VG4:
-               mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE6(stage) |
-                       COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE6_MIXER1);
-               break;
-       default:
-               WARN_ON("invalid pipe");
-               break;
-       }
-
-       return mixer_cfg;
-}
-
-int mdp4_disable(struct mdp4_kms *mdp4_kms);
-int mdp4_enable(struct mdp4_kms *mdp4_kms);
-
-void mdp4_irq_preinstall(struct msm_kms *kms);
-int mdp4_irq_postinstall(struct msm_kms *kms);
-void mdp4_irq_uninstall(struct msm_kms *kms);
-irqreturn_t mdp4_irq(struct msm_kms *kms);
-void mdp4_irq_wait(struct mdp4_kms *mdp4_kms, uint32_t irqmask);
-void mdp4_irq_register(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq);
-void mdp4_irq_unregister(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq);
-int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
-void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
-
-uint32_t mdp4_get_formats(enum mdp4_pipe pipe_id, uint32_t *formats,
-               uint32_t max_formats);
-const struct msm_format *mdp4_get_format(struct msm_kms *kms, uint32_t format);
-
-void mdp4_plane_install_properties(struct drm_plane *plane,
-               struct drm_mode_object *obj);
-void mdp4_plane_set_scanout(struct drm_plane *plane,
-               struct drm_framebuffer *fb);
-int mdp4_plane_mode_set(struct drm_plane *plane,
-               struct drm_crtc *crtc, struct drm_framebuffer *fb,
-               int crtc_x, int crtc_y,
-               unsigned int crtc_w, unsigned int crtc_h,
-               uint32_t src_x, uint32_t src_y,
-               uint32_t src_w, uint32_t src_h);
-enum mdp4_pipe mdp4_plane_pipe(struct drm_plane *plane);
-struct drm_plane *mdp4_plane_init(struct drm_device *dev,
-               enum mdp4_pipe pipe_id, bool private_plane);
-
-uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc);
-void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file);
-void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config);
-void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf);
-void mdp4_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane);
-void mdp4_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane);
-struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
-               struct drm_plane *plane, int id, int ovlp_id,
-               enum mdp4_dma dma_id);
-
-long mdp4_dtv_round_pixclk(struct drm_encoder *encoder, unsigned long rate);
-struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev);
-
-#ifdef CONFIG_MSM_BUS_SCALING
-static inline int match_dev_name(struct device *dev, void *data)
-{
-       return !strcmp(dev_name(dev), data);
-}
-/* bus scaling data is associated with extra pointless platform devices,
- * "dtv", etc.. this is a bit of a hack, but we need a way for encoders
- * to find their pdata to make the bus-scaling stuff work.
- */
-static inline void *mdp4_find_pdata(const char *devname)
-{
-       struct device *dev;
-       dev = bus_find_device(&platform_bus_type, NULL,
-                       (void *)devname, match_dev_name);
-       return dev ? dev->platform_data : NULL;
-}
-#endif
-
-#endif /* __MDP4_KMS_H__ */
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp4/mdp4_plane.c
deleted file mode 100644 (file)
index 0f0af24..0000000
+++ /dev/null
@@ -1,253 +0,0 @@
-/*
- * Copyright (C) 2013 Red Hat
- * Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include "mdp4_kms.h"
-
-
-struct mdp4_plane {
-       struct drm_plane base;
-       const char *name;
-
-       enum mdp4_pipe pipe;
-
-       uint32_t nformats;
-       uint32_t formats[32];
-
-       bool enabled;
-};
-#define to_mdp4_plane(x) container_of(x, struct mdp4_plane, base)
-
-static struct mdp4_kms *get_kms(struct drm_plane *plane)
-{
-       struct msm_drm_private *priv = plane->dev->dev_private;
-       return to_mdp4_kms(priv->kms);
-}
-
-static int mdp4_plane_update(struct drm_plane *plane,
-               struct drm_crtc *crtc, struct drm_framebuffer *fb,
-               int crtc_x, int crtc_y,
-               unsigned int crtc_w, unsigned int crtc_h,
-               uint32_t src_x, uint32_t src_y,
-               uint32_t src_w, uint32_t src_h)
-{
-       struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
-
-       mdp4_plane->enabled = true;
-
-       if (plane->fb)
-               drm_framebuffer_unreference(plane->fb);
-
-       drm_framebuffer_reference(fb);
-
-       return mdp4_plane_mode_set(plane, crtc, fb,
-                       crtc_x, crtc_y, crtc_w, crtc_h,
-                       src_x, src_y, src_w, src_h);
-}
-
-static int mdp4_plane_disable(struct drm_plane *plane)
-{
-       struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
-       DBG("%s: disable", mdp4_plane->name);
-       if (plane->crtc)
-               mdp4_crtc_detach(plane->crtc, plane);
-       return 0;
-}
-
-static void mdp4_plane_destroy(struct drm_plane *plane)
-{
-       struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
-
-       mdp4_plane_disable(plane);
-       drm_plane_cleanup(plane);
-
-       kfree(mdp4_plane);
-}
-
-/* helper to install properties which are common to planes and crtcs */
-void mdp4_plane_install_properties(struct drm_plane *plane,
-               struct drm_mode_object *obj)
-{
-       // XXX
-}
-
-int mdp4_plane_set_property(struct drm_plane *plane,
-               struct drm_property *property, uint64_t val)
-{
-       // XXX
-       return -EINVAL;
-}
-
-static const struct drm_plane_funcs mdp4_plane_funcs = {
-               .update_plane = mdp4_plane_update,
-               .disable_plane = mdp4_plane_disable,
-               .destroy = mdp4_plane_destroy,
-               .set_property = mdp4_plane_set_property,
-};
-
-void mdp4_plane_set_scanout(struct drm_plane *plane,
-               struct drm_framebuffer *fb)
-{
-       struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
-       struct mdp4_kms *mdp4_kms = get_kms(plane);
-       enum mdp4_pipe pipe = mdp4_plane->pipe;
-       uint32_t iova;
-
-       mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_STRIDE_A(pipe),
-                       MDP4_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) |
-                       MDP4_PIPE_SRC_STRIDE_A_P1(fb->pitches[1]));
-
-       mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_STRIDE_B(pipe),
-                       MDP4_PIPE_SRC_STRIDE_B_P2(fb->pitches[2]) |
-                       MDP4_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
-
-       msm_gem_get_iova(msm_framebuffer_bo(fb, 0), mdp4_kms->id, &iova);
-       mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP0_BASE(pipe), iova);
-
-       plane->fb = fb;
-}
-
-#define MDP4_VG_PHASE_STEP_DEFAULT     0x20000000
-
-int mdp4_plane_mode_set(struct drm_plane *plane,
-               struct drm_crtc *crtc, struct drm_framebuffer *fb,
-               int crtc_x, int crtc_y,
-               unsigned int crtc_w, unsigned int crtc_h,
-               uint32_t src_x, uint32_t src_y,
-               uint32_t src_w, uint32_t src_h)
-{
-       struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
-       struct mdp4_kms *mdp4_kms = get_kms(plane);
-       enum mdp4_pipe pipe = mdp4_plane->pipe;
-       const struct mdp4_format *format;
-       uint32_t op_mode = 0;
-       uint32_t phasex_step = MDP4_VG_PHASE_STEP_DEFAULT;
-       uint32_t phasey_step = MDP4_VG_PHASE_STEP_DEFAULT;
-
-       /* src values are in Q16 fixed point, convert to integer: */
-       src_x = src_x >> 16;
-       src_y = src_y >> 16;
-       src_w = src_w >> 16;
-       src_h = src_h >> 16;
-
-       DBG("%s: FB[%u] %u,%u,%u,%u -> CRTC[%u] %d,%d,%u,%u", mdp4_plane->name,
-                       fb->base.id, src_x, src_y, src_w, src_h,
-                       crtc->base.id, crtc_x, crtc_y, crtc_w, crtc_h);
-
-       if (src_w != crtc_w) {
-               op_mode |= MDP4_PIPE_OP_MODE_SCALEX_EN;
-               /* TODO calc phasex_step */
-       }
-
-       if (src_h != crtc_h) {
-               op_mode |= MDP4_PIPE_OP_MODE_SCALEY_EN;
-               /* TODO calc phasey_step */
-       }
-
-       mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_SIZE(pipe),
-                       MDP4_PIPE_SRC_SIZE_WIDTH(src_w) |
-                       MDP4_PIPE_SRC_SIZE_HEIGHT(src_h));
-
-       mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_XY(pipe),
-                       MDP4_PIPE_SRC_XY_X(src_x) |
-                       MDP4_PIPE_SRC_XY_Y(src_y));
-
-       mdp4_write(mdp4_kms, REG_MDP4_PIPE_DST_SIZE(pipe),
-                       MDP4_PIPE_DST_SIZE_WIDTH(crtc_w) |
-                       MDP4_PIPE_DST_SIZE_HEIGHT(crtc_h));
-
-       mdp4_write(mdp4_kms, REG_MDP4_PIPE_DST_XY(pipe),
-                       MDP4_PIPE_SRC_XY_X(crtc_x) |
-                       MDP4_PIPE_SRC_XY_Y(crtc_y));
-
-       mdp4_plane_set_scanout(plane, fb);
-
-       format = to_mdp4_format(msm_framebuffer_format(fb));
-
-       mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_FORMAT(pipe),
-                       MDP4_PIPE_SRC_FORMAT_A_BPC(format->bpc_a) |
-                       MDP4_PIPE_SRC_FORMAT_R_BPC(format->bpc_r) |
-                       MDP4_PIPE_SRC_FORMAT_G_BPC(format->bpc_g) |
-                       MDP4_PIPE_SRC_FORMAT_B_BPC(format->bpc_b) |
-                       COND(format->alpha_enable, MDP4_PIPE_SRC_FORMAT_ALPHA_ENABLE) |
-                       MDP4_PIPE_SRC_FORMAT_CPP(format->cpp - 1) |
-                       MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT(format->unpack_count - 1) |
-                       COND(format->unpack_tight, MDP4_PIPE_SRC_FORMAT_UNPACK_TIGHT));
-
-       mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_UNPACK(pipe),
-                       MDP4_PIPE_SRC_UNPACK_ELEM0(format->unpack[0]) |
-                       MDP4_PIPE_SRC_UNPACK_ELEM1(format->unpack[1]) |
-                       MDP4_PIPE_SRC_UNPACK_ELEM2(format->unpack[2]) |
-                       MDP4_PIPE_SRC_UNPACK_ELEM3(format->unpack[3]));
-
-       mdp4_write(mdp4_kms, REG_MDP4_PIPE_OP_MODE(pipe), op_mode);
-       mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEX_STEP(pipe), phasex_step);
-       mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEY_STEP(pipe), phasey_step);
-
-       /* TODO detach from old crtc (if we had more than one) */
-       mdp4_crtc_attach(crtc, plane);
-
-       return 0;
-}
-
-static const char *pipe_names[] = {
-               "VG1", "VG2",
-               "RGB1", "RGB2", "RGB3",
-               "VG3", "VG4",
-};
-
-enum mdp4_pipe mdp4_plane_pipe(struct drm_plane *plane)
-{
-       struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
-       return mdp4_plane->pipe;
-}
-
-/* initialize plane */
-struct drm_plane *mdp4_plane_init(struct drm_device *dev,
-               enum mdp4_pipe pipe_id, bool private_plane)
-{
-       struct drm_plane *plane = NULL;
-       struct mdp4_plane *mdp4_plane;
-       int ret;
-
-       mdp4_plane = kzalloc(sizeof(*mdp4_plane), GFP_KERNEL);
-       if (!mdp4_plane) {
-               ret = -ENOMEM;
-               goto fail;
-       }
-
-       plane = &mdp4_plane->base;
-
-       mdp4_plane->pipe = pipe_id;
-       mdp4_plane->name = pipe_names[pipe_id];
-
-       mdp4_plane->nformats = mdp4_get_formats(pipe_id, mdp4_plane->formats,
-                       ARRAY_SIZE(mdp4_plane->formats));
-
-       drm_plane_init(dev, plane, 0xff, &mdp4_plane_funcs,
-                       mdp4_plane->formats, mdp4_plane->nformats,
-                       private_plane);
-
-       mdp4_plane_install_properties(plane, &plane->base);
-
-       return plane;
-
-fail:
-       if (plane)
-               mdp4_plane_destroy(plane);
-
-       return ERR_PTR(ret);
-}
index 86537692e45c4efaeae162d32753cf5613216954..e6adafc7eff3177b789395f6bc81f4ab62042e11 100644 (file)
@@ -17,6 +17,7 @@
 
 #include "msm_drv.h"
 #include "msm_gpu.h"
+#include "msm_kms.h"
 
 static void msm_fb_output_poll_changed(struct drm_device *dev)
 {
@@ -30,50 +31,19 @@ static const struct drm_mode_config_funcs mode_config_funcs = {
        .output_poll_changed = msm_fb_output_poll_changed,
 };
 
-static int msm_fault_handler(struct iommu_domain *iommu, struct device *dev,
-               unsigned long iova, int flags, void *arg)
-{
-       DBG("*** fault: iova=%08lx, flags=%d", iova, flags);
-       return 0;
-}
-
-int msm_register_iommu(struct drm_device *dev, struct iommu_domain *iommu)
+int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu)
 {
        struct msm_drm_private *priv = dev->dev_private;
-       int idx = priv->num_iommus++;
+       int idx = priv->num_mmus++;
 
-       if (WARN_ON(idx >= ARRAY_SIZE(priv->iommus)))
+       if (WARN_ON(idx >= ARRAY_SIZE(priv->mmus)))
                return -EINVAL;
 
-       priv->iommus[idx] = iommu;
-
-       iommu_set_fault_handler(iommu, msm_fault_handler, dev);
-
-       /* need to iommu_attach_device() somewhere??  on resume?? */
+       priv->mmus[idx] = mmu;
 
        return idx;
 }
 
-int msm_iommu_attach(struct drm_device *dev, struct iommu_domain *iommu,
-               const char **names, int cnt)
-{
-       int i, ret;
-
-       for (i = 0; i < cnt; i++) {
-               /* TODO maybe some day msm iommu won't require this hack: */
-               struct device *msm_iommu_get_ctx(const char *ctx_name);
-               struct device *ctx = msm_iommu_get_ctx(names[i]);
-               if (!ctx)
-                       continue;
-               ret = iommu_attach_device(iommu, ctx);
-               if (ret) {
-                       dev_warn(dev->dev, "could not attach iommu to %s", names[i]);
-                       return ret;
-               }
-       }
-       return 0;
-}
-
 #ifdef CONFIG_DRM_MSM_REGISTER_LOGGING
 static bool reglog = false;
 MODULE_PARM_DESC(reglog, "Enable register read/write logging");
@@ -82,6 +52,10 @@ module_param(reglog, bool, 0600);
 #define reglog 0
 #endif
 
+static char *vram;
+MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU");
+module_param(vram, charp, 0);
+
 void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
                const char *dbgname)
 {
@@ -161,6 +135,14 @@ static int msm_unload(struct drm_device *dev)
                mutex_unlock(&dev->struct_mutex);
        }
 
+       if (priv->vram.paddr) {
+               DEFINE_DMA_ATTRS(attrs);
+               dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
+               drm_mm_takedown(&priv->vram.mm);
+               dma_free_attrs(dev->dev, priv->vram.size, NULL,
+                               priv->vram.paddr, &attrs);
+       }
+
        dev->dev_private = NULL;
 
        kfree(priv);
@@ -168,6 +150,24 @@ static int msm_unload(struct drm_device *dev)
        return 0;
 }
 
+static int get_mdp_ver(struct platform_device *pdev)
+{
+#ifdef CONFIG_OF
+       const static struct of_device_id match_types[] = { {
+               .compatible = "qcom,mdss_mdp",
+               .data   = (void *)5,
+       }, {
+               /* end node */
+       } };
+       struct device *dev = &pdev->dev;
+       const struct of_device_id *match;
+       match = of_match_node(match_types, dev->of_node);
+       if (match)
+               return (int)match->data;
+#endif
+       return 4;
+}
+
 static int msm_load(struct drm_device *dev, unsigned long flags)
 {
        struct platform_device *pdev = dev->platformdev;
@@ -191,7 +191,53 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
 
        drm_mode_config_init(dev);
 
-       kms = mdp4_kms_init(dev);
+       /* if we have no IOMMU, then we need to use carveout allocator.
+        * Grab the entire CMA chunk carved out in early startup in
+        * mach-msm:
+        */
+       if (!iommu_present(&platform_bus_type)) {
+               DEFINE_DMA_ATTRS(attrs);
+               unsigned long size;
+               void *p;
+
+               DBG("using %s VRAM carveout", vram);
+               size = memparse(vram, NULL);
+               priv->vram.size = size;
+
+               drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1);
+
+               dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
+               dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
+
+               /* note that for no-kernel-mapping, the vaddr returned
+                * is bogus, but non-null if allocation succeeded:
+                */
+               p = dma_alloc_attrs(dev->dev, size,
+                               &priv->vram.paddr, 0, &attrs);
+               if (!p) {
+                       dev_err(dev->dev, "failed to allocate VRAM\n");
+                       priv->vram.paddr = 0;
+                       ret = -ENOMEM;
+                       goto fail;
+               }
+
+               dev_info(dev->dev, "VRAM: %08x->%08x\n",
+                               (uint32_t)priv->vram.paddr,
+                               (uint32_t)(priv->vram.paddr + size));
+       }
+
+       switch (get_mdp_ver(pdev)) {
+       case 4:
+               kms = mdp4_kms_init(dev);
+               break;
+       case 5:
+               kms = mdp5_kms_init(dev);
+               break;
+       default:
+               kms = ERR_PTR(-ENODEV);
+               break;
+       }
+
        if (IS_ERR(kms)) {
                /*
                 * NOTE: once we have GPU support, having no kms should not
@@ -326,7 +372,7 @@ static void msm_lastclose(struct drm_device *dev)
        }
 }
 
-static irqreturn_t msm_irq(DRM_IRQ_ARGS)
+static irqreturn_t msm_irq(int irq, void *arg)
 {
        struct drm_device *dev = arg;
        struct msm_drm_private *priv = dev->dev_private;
@@ -415,7 +461,7 @@ static int msm_gem_show(struct drm_device *dev, struct seq_file *m)
 
 static int msm_mm_show(struct drm_device *dev, struct seq_file *m)
 {
-       return drm_mm_dump_table(m, dev->mm_private);
+       return drm_mm_dump_table(m, &dev->vma_offset_manager->vm_addr_space_mm);
 }
 
 static int msm_fb_show(struct drm_device *dev, struct seq_file *m)
@@ -778,12 +824,13 @@ static const struct dev_pm_ops msm_pm_ops = {
 
 static int msm_pdev_probe(struct platform_device *pdev)
 {
+       pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
        return drm_platform_init(&msm_driver, pdev);
 }
 
 static int msm_pdev_remove(struct platform_device *pdev)
 {
-       drm_platform_exit(&msm_driver, pdev);
+       drm_put_dev(platform_get_drvdata(pdev));
 
        return 0;
 }
@@ -793,12 +840,19 @@ static const struct platform_device_id msm_id[] = {
        { }
 };
 
+static const struct of_device_id dt_match[] = {
+       { .compatible = "qcom,mdss_mdp" },
+       {}
+};
+MODULE_DEVICE_TABLE(of, dt_match);
+
 static struct platform_driver msm_platform_driver = {
        .probe      = msm_pdev_probe,
        .remove     = msm_pdev_remove,
        .driver     = {
                .owner  = THIS_MODULE,
                .name   = "msm",
+               .of_match_table = dt_match,
                .pm     = &msm_pm_ops,
        },
        .id_table   = msm_id,
index d39f0862b19ebe8db2a74fdcedbb8a52086df3a7..3d63269c5b29c51e69bc0dfe7714dddae71e4fee 100644 (file)
 #include <linux/types.h>
 #include <asm/sizes.h>
 
+
+#if defined(CONFIG_COMPILE_TEST) && !defined(CONFIG_ARCH_MSM)
+/* stubs we need for compile-test: */
+static inline struct device *msm_iommu_get_ctx(const char *ctx_name)
+{
+       return NULL;
+}
+#endif
+
 #ifndef CONFIG_OF
 #include <mach/board.h>
 #include <mach/socinfo.h>
@@ -44,6 +53,7 @@
 
 struct msm_kms;
 struct msm_gpu;
+struct msm_mmu;
 
 #define NUM_DOMAINS 2    /* one for KMS, then one per gpu core (?) */
 
@@ -76,9 +86,9 @@ struct msm_drm_private {
        /* callbacks deferred until bo is inactive: */
        struct list_head fence_cbs;
 
-       /* registered IOMMU domains: */
-       unsigned int num_iommus;
-       struct iommu_domain *iommus[NUM_DOMAINS];
+       /* registered MMUs: */
+       unsigned int num_mmus;
+       struct msm_mmu *mmus[NUM_DOMAINS];
 
        unsigned int num_planes;
        struct drm_plane *planes[8];
@@ -94,6 +104,16 @@ struct msm_drm_private {
 
        unsigned int num_connectors;
        struct drm_connector *connectors[8];
+
+       /* VRAM carveout, used when no IOMMU: */
+       struct {
+               unsigned long size;
+               dma_addr_t paddr;
+               /* NOTE: mm managed at the page level, size is in # of pages
+                * and position mm_node->start is in # of pages:
+                */
+               struct drm_mm mm;
+       } vram;
 };
 
 struct msm_format {
@@ -114,39 +134,7 @@ void __msm_fence_worker(struct work_struct *work);
                (_cb)->func = _func;                         \
        } while (0)
 
-/* As there are different display controller blocks depending on the
- * snapdragon version, the kms support is split out and the appropriate
- * implementation is loaded at runtime.  The kms module is responsible
- * for constructing the appropriate planes/crtcs/encoders/connectors.
- */
-struct msm_kms_funcs {
-       /* hw initialization: */
-       int (*hw_init)(struct msm_kms *kms);
-       /* irq handling: */
-       void (*irq_preinstall)(struct msm_kms *kms);
-       int (*irq_postinstall)(struct msm_kms *kms);
-       void (*irq_uninstall)(struct msm_kms *kms);
-       irqreturn_t (*irq)(struct msm_kms *kms);
-       int (*enable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
-       void (*disable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
-       /* misc: */
-       const struct msm_format *(*get_format)(struct msm_kms *kms, uint32_t format);
-       long (*round_pixclk)(struct msm_kms *kms, unsigned long rate,
-                       struct drm_encoder *encoder);
-       /* cleanup: */
-       void (*preclose)(struct msm_kms *kms, struct drm_file *file);
-       void (*destroy)(struct msm_kms *kms);
-};
-
-struct msm_kms {
-       const struct msm_kms_funcs *funcs;
-};
-
-struct msm_kms *mdp4_kms_init(struct drm_device *dev);
-
-int msm_register_iommu(struct drm_device *dev, struct iommu_domain *iommu);
-int msm_iommu_attach(struct drm_device *dev, struct iommu_domain *iommu,
-               const char **names, int cnt);
+int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
 
 int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
                struct timespec *timeout);
@@ -202,7 +190,9 @@ struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev,
 
 struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev);
 
-int hdmi_init(struct drm_device *dev, struct drm_encoder *encoder);
+struct hdmi;
+struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder);
+irqreturn_t hdmi_irq(int irq, void *dev_id);
 void __init hdmi_register(void);
 void __exit hdmi_unregister(void);
 
index 0286c0eeb10ca1e967c27bf559928de58d14358f..81bafdf19ab39fc32428e6f5be692c597a6a8276 100644 (file)
@@ -16,6 +16,7 @@
  */
 
 #include "msm_drv.h"
+#include "msm_kms.h"
 
 #include "drm_crtc.h"
 #include "drm_crtc_helper.h"
index e587d251c5900cd4532cdbff584d0683131616ed..d8d60c969ac7858bce0e6114f092a5f1aad450aa 100644 (file)
 #include "msm_drv.h"
 #include "msm_gem.h"
 #include "msm_gpu.h"
+#include "msm_mmu.h"
 
+static dma_addr_t physaddr(struct drm_gem_object *obj)
+{
+       struct msm_gem_object *msm_obj = to_msm_bo(obj);
+       struct msm_drm_private *priv = obj->dev->dev_private;
+       return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
+                       priv->vram.paddr;
+}
+
+/* allocate pages from VRAM carveout, used when no IOMMU: */
+static struct page **get_pages_vram(struct drm_gem_object *obj,
+               int npages)
+{
+       struct msm_gem_object *msm_obj = to_msm_bo(obj);
+       struct msm_drm_private *priv = obj->dev->dev_private;
+       dma_addr_t paddr;
+       struct page **p;
+       int ret, i;
+
+       p = drm_malloc_ab(npages, sizeof(struct page *));
+       if (!p)
+               return ERR_PTR(-ENOMEM);
+
+       ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node,
+                       npages, 0, DRM_MM_SEARCH_DEFAULT);
+       if (ret) {
+               drm_free_large(p);
+               return ERR_PTR(ret);
+       }
+
+       paddr = physaddr(obj);
+       for (i = 0; i < npages; i++) {
+               p[i] = phys_to_page(paddr);
+               paddr += PAGE_SIZE;
+       }
+
+       return p;
+}
 
 /* called with dev->struct_mutex held */
 static struct page **get_pages(struct drm_gem_object *obj)
@@ -31,9 +69,14 @@ static struct page **get_pages(struct drm_gem_object *obj)
 
        if (!msm_obj->pages) {
                struct drm_device *dev = obj->dev;
-               struct page **p = drm_gem_get_pages(obj, 0);
+               struct page **p;
                int npages = obj->size >> PAGE_SHIFT;
 
+               if (iommu_present(&platform_bus_type))
+                       p = drm_gem_get_pages(obj, 0);
+               else
+                       p = get_pages_vram(obj, npages);
+
                if (IS_ERR(p)) {
                        dev_err(dev->dev, "could not get pages: %ld\n",
                                        PTR_ERR(p));
@@ -73,7 +116,11 @@ static void put_pages(struct drm_gem_object *obj)
                sg_free_table(msm_obj->sgt);
                kfree(msm_obj->sgt);
 
-               drm_gem_put_pages(obj, msm_obj->pages, true, false);
+               if (iommu_present(&platform_bus_type))
+                       drm_gem_put_pages(obj, msm_obj->pages, true, false);
+               else
+                       drm_mm_remove_node(msm_obj->vram_node);
+
                msm_obj->pages = NULL;
        }
 }
@@ -138,7 +185,6 @@ int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
 int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
        struct drm_gem_object *obj = vma->vm_private_data;
-       struct msm_gem_object *msm_obj = to_msm_bo(obj);
        struct drm_device *dev = obj->dev;
        struct page **pages;
        unsigned long pfn;
@@ -163,7 +209,7 @@ int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        pgoff = ((unsigned long)vmf->virtual_address -
                        vma->vm_start) >> PAGE_SHIFT;
 
-       pfn = page_to_pfn(msm_obj->pages[pgoff]);
+       pfn = page_to_pfn(pages[pgoff]);
 
        VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
                        pfn, pfn << PAGE_SHIFT);
@@ -219,67 +265,6 @@ uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
        return offset;
 }
 
-/* helpers for dealing w/ iommu: */
-static int map_range(struct iommu_domain *domain, unsigned int iova,
-               struct sg_table *sgt, unsigned int len, int prot)
-{
-       struct scatterlist *sg;
-       unsigned int da = iova;
-       unsigned int i, j;
-       int ret;
-
-       if (!domain || !sgt)
-               return -EINVAL;
-
-       for_each_sg(sgt->sgl, sg, sgt->nents, i) {
-               u32 pa = sg_phys(sg) - sg->offset;
-               size_t bytes = sg->length + sg->offset;
-
-               VERB("map[%d]: %08x %08x(%x)", i, iova, pa, bytes);
-
-               ret = iommu_map(domain, da, pa, bytes, prot);
-               if (ret)
-                       goto fail;
-
-               da += bytes;
-       }
-
-       return 0;
-
-fail:
-       da = iova;
-
-       for_each_sg(sgt->sgl, sg, i, j) {
-               size_t bytes = sg->length + sg->offset;
-               iommu_unmap(domain, da, bytes);
-               da += bytes;
-       }
-       return ret;
-}
-
-static void unmap_range(struct iommu_domain *domain, unsigned int iova,
-               struct sg_table *sgt, unsigned int len)
-{
-       struct scatterlist *sg;
-       unsigned int da = iova;
-       int i;
-
-       for_each_sg(sgt->sgl, sg, sgt->nents, i) {
-               size_t bytes = sg->length + sg->offset;
-               size_t unmapped;
-
-               unmapped = iommu_unmap(domain, da, bytes);
-               if (unmapped < bytes)
-                       break;
-
-               VERB("unmap[%d]: %08x(%x)", i, iova, bytes);
-
-               BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
-
-               da += bytes;
-       }
-}
-
 /* should be called under struct_mutex.. although it can be called
  * from atomic context without struct_mutex to acquire an extra
  * iova ref if you know one is already held.
@@ -295,15 +280,20 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
 
        if (!msm_obj->domain[id].iova) {
                struct msm_drm_private *priv = obj->dev->dev_private;
-               uint32_t offset = (uint32_t)mmap_offset(obj);
-               struct page **pages;
-               pages = get_pages(obj);
+               struct msm_mmu *mmu = priv->mmus[id];
+               struct page **pages = get_pages(obj);
+
                if (IS_ERR(pages))
                        return PTR_ERR(pages);
-               // XXX ideally we would not map buffers writable when not needed...
-               ret = map_range(priv->iommus[id], offset, msm_obj->sgt,
-                               obj->size, IOMMU_READ | IOMMU_WRITE);
-               msm_obj->domain[id].iova = offset;
+
+               if (iommu_present(&platform_bus_type)) {
+                       uint32_t offset = (uint32_t)mmap_offset(obj);
+                       ret = mmu->funcs->map(mmu, offset, msm_obj->sgt,
+                                       obj->size, IOMMU_READ | IOMMU_WRITE);
+                       msm_obj->domain[id].iova = offset;
+               } else {
+                       msm_obj->domain[id].iova = physaddr(obj);
+               }
        }
 
        if (!ret)
@@ -514,6 +504,7 @@ void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
 void msm_gem_free_object(struct drm_gem_object *obj)
 {
        struct drm_device *dev = obj->dev;
+       struct msm_drm_private *priv = obj->dev->dev_private;
        struct msm_gem_object *msm_obj = to_msm_bo(obj);
        int id;
 
@@ -525,11 +516,10 @@ void msm_gem_free_object(struct drm_gem_object *obj)
        list_del(&msm_obj->mm_list);
 
        for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
-               if (msm_obj->domain[id].iova) {
-                       struct msm_drm_private *priv = obj->dev->dev_private;
+               struct msm_mmu *mmu = priv->mmus[id];
+               if (mmu && msm_obj->domain[id].iova) {
                        uint32_t offset = (uint32_t)mmap_offset(obj);
-                       unmap_range(priv->iommus[id], offset,
-                                       msm_obj->sgt, obj->size);
+                       mmu->funcs->unmap(mmu, offset, msm_obj->sgt, obj->size);
                }
        }
 
@@ -591,6 +581,7 @@ static int msm_gem_new_impl(struct drm_device *dev,
 {
        struct msm_drm_private *priv = dev->dev_private;
        struct msm_gem_object *msm_obj;
+       unsigned sz;
 
        switch (flags & MSM_BO_CACHE_MASK) {
        case MSM_BO_UNCACHED:
@@ -603,10 +594,17 @@ static int msm_gem_new_impl(struct drm_device *dev,
                return -EINVAL;
        }
 
-       msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
+       sz = sizeof(*msm_obj);
+       if (!iommu_present(&platform_bus_type))
+               sz += sizeof(struct drm_mm_node);
+
+       msm_obj = kzalloc(sz, GFP_KERNEL);
        if (!msm_obj)
                return -ENOMEM;
 
+       if (!iommu_present(&platform_bus_type))
+               msm_obj->vram_node = (void *)&msm_obj[1];
+
        msm_obj->flags = flags;
 
        msm_obj->resv = &msm_obj->_resv;
@@ -623,7 +621,7 @@ static int msm_gem_new_impl(struct drm_device *dev,
 struct drm_gem_object *msm_gem_new(struct drm_device *dev,
                uint32_t size, uint32_t flags)
 {
-       struct drm_gem_object *obj;
+       struct drm_gem_object *obj = NULL;
        int ret;
 
        WARN_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -634,9 +632,13 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
        if (ret)
                goto fail;
 
-       ret = drm_gem_object_init(dev, obj, size);
-       if (ret)
-               goto fail;
+       if (iommu_present(&platform_bus_type)) {
+               ret = drm_gem_object_init(dev, obj, size);
+               if (ret)
+                       goto fail;
+       } else {
+               drm_gem_private_object_init(dev, obj, size);
+       }
 
        return obj;
 
@@ -654,6 +656,12 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
        struct drm_gem_object *obj;
        int ret, npages;
 
+       /* if we don't have IOMMU, don't bother pretending we can import: */
+       if (!iommu_present(&platform_bus_type)) {
+               dev_err(dev->dev, "cannot import without IOMMU\n");
+               return ERR_PTR(-EINVAL);
+       }
+
        size = PAGE_ALIGN(size);
 
        ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
index f4f23a578d9dc76fe4b7d03498729b099471694a..3246bb46c4f2add0636bca94b1b4c726542b1178 100644 (file)
@@ -57,6 +57,11 @@ struct msm_gem_object {
        /* normally (resv == &_resv) except for imported bo's */
        struct reservation_object *resv;
        struct reservation_object _resv;
+
+       /* For physically contiguous buffers.  Used when we don't have
+        * an IOMMU.
+        */
+       struct drm_mm_node *vram_node;
 };
 #define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
 
index 4583d61556f5bed5c7d9cf9d26988d1160ddb386..4ebce8be489db6cdf51f6ba88fb8f7bfd1dbd5ce 100644 (file)
@@ -17,6 +17,7 @@
 
 #include "msm_gpu.h"
 #include "msm_gem.h"
+#include "msm_mmu.h"
 
 
 /*
 
 #ifdef CONFIG_MSM_BUS_SCALING
 #include <mach/board.h>
-#include <mach/kgsl.h>
-static void bs_init(struct msm_gpu *gpu, struct platform_device *pdev)
+static void bs_init(struct msm_gpu *gpu)
 {
-       struct drm_device *dev = gpu->dev;
-       struct kgsl_device_platform_data *pdata;
-
-       if (!pdev) {
-               dev_err(dev->dev, "could not find dtv pdata\n");
-               return;
-       }
-
-       pdata = pdev->dev.platform_data;
-       if (pdata->bus_scale_table) {
-               gpu->bsc = msm_bus_scale_register_client(pdata->bus_scale_table);
+       if (gpu->bus_scale_table) {
+               gpu->bsc = msm_bus_scale_register_client(gpu->bus_scale_table);
                DBG("bus scale client: %08x", gpu->bsc);
        }
 }
@@ -59,7 +50,7 @@ static void bs_set(struct msm_gpu *gpu, int idx)
        }
 }
 #else
-static void bs_init(struct msm_gpu *gpu, struct platform_device *pdev) {}
+static void bs_init(struct msm_gpu *gpu) {}
 static void bs_fini(struct msm_gpu *gpu) {}
 static void bs_set(struct msm_gpu *gpu, int idx) {}
 #endif
@@ -363,6 +354,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
                struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
                const char *name, const char *ioname, const char *irqname, int ringsz)
 {
+       struct iommu_domain *iommu;
        int i, ret;
 
        gpu->dev = drm;
@@ -428,13 +420,14 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
         * and have separate page tables per context.  For now, to keep things
         * simple and to get something working, just use a single address space:
         */
-       gpu->iommu = iommu_domain_alloc(&platform_bus_type);
-       if (!gpu->iommu) {
-               dev_err(drm->dev, "failed to allocate IOMMU\n");
-               ret = -ENOMEM;
-               goto fail;
+       iommu = iommu_domain_alloc(&platform_bus_type);
+       if (iommu) {
+               dev_info(drm->dev, "%s: using IOMMU\n", name);
+               gpu->mmu = msm_iommu_new(drm, iommu);
+       } else {
+               dev_info(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
        }
-       gpu->id = msm_register_iommu(drm, gpu->iommu);
+       gpu->id = msm_register_mmu(drm, gpu->mmu);
 
        /* Create ringbuffer: */
        gpu->rb = msm_ringbuffer_new(gpu, ringsz);
@@ -452,7 +445,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
                goto fail;
        }
 
-       bs_init(gpu, pdev);
+       bs_init(gpu);
 
        return 0;
 
@@ -474,6 +467,6 @@ void msm_gpu_cleanup(struct msm_gpu *gpu)
                msm_ringbuffer_destroy(gpu->rb);
        }
 
-       if (gpu->iommu)
-               iommu_domain_free(gpu->iommu);
+       if (gpu->mmu)
+               gpu->mmu->funcs->destroy(gpu->mmu);
 }
index 8cd829e520bb84b6bfa10d90fce8236cf9fbd3c5..458db8c64c28873f260aa59b44bbd96da7a3b18f 100644 (file)
@@ -78,14 +78,18 @@ struct msm_gpu {
        void __iomem *mmio;
        int irq;
 
-       struct iommu_domain *iommu;
+       struct msm_mmu *mmu;
        int id;
 
        /* Power Control: */
        struct regulator *gpu_reg, *gpu_cx;
        struct clk *ebi1_clk, *grp_clks[5];
        uint32_t fast_rate, slow_rate, bus_freq;
+
+#ifdef CONFIG_MSM_BUS_SCALING
+       struct msm_bus_scale_pdata *bus_scale_table;
        uint32_t bsc;
+#endif
 
        /* Hang Detction: */
 #define DRM_MSM_HANGCHECK_PERIOD 500 /* in ms */
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
new file mode 100644 (file)
index 0000000..92b7459
--- /dev/null
@@ -0,0 +1,148 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "msm_drv.h"
+#include "msm_mmu.h"
+
+struct msm_iommu {
+       struct msm_mmu base;
+       struct iommu_domain *domain;
+};
+#define to_msm_iommu(x) container_of(x, struct msm_iommu, base)
+
+static int msm_fault_handler(struct iommu_domain *iommu, struct device *dev,
+               unsigned long iova, int flags, void *arg)
+{
+       DBG("*** fault: iova=%08lx, flags=%d", iova, flags);
+       return 0;
+}
+
+static int msm_iommu_attach(struct msm_mmu *mmu, const char **names, int cnt)
+{
+       struct drm_device *dev = mmu->dev;
+       struct msm_iommu *iommu = to_msm_iommu(mmu);
+       int i, ret;
+
+       for (i = 0; i < cnt; i++) {
+               struct device *msm_iommu_get_ctx(const char *ctx_name);
+               struct device *ctx = msm_iommu_get_ctx(names[i]);
+               if (IS_ERR_OR_NULL(ctx))
+                       continue;
+               ret = iommu_attach_device(iommu->domain, ctx);
+               if (ret) {
+                       dev_warn(dev->dev, "could not attach iommu to %s", names[i]);
+                       return ret;
+               }
+       }
+
+       return 0;
+}
+
+static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova,
+               struct sg_table *sgt, unsigned len, int prot)
+{
+       struct msm_iommu *iommu = to_msm_iommu(mmu);
+       struct iommu_domain *domain = iommu->domain;
+       struct scatterlist *sg;
+       unsigned int da = iova;
+       unsigned int i, j;
+       int ret;
+
+       if (!domain || !sgt)
+               return -EINVAL;
+
+       for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+               u32 pa = sg_phys(sg) - sg->offset;
+               size_t bytes = sg->length + sg->offset;
+
+               VERB("map[%d]: %08x %08x(%x)", i, iova, pa, bytes);
+
+               ret = iommu_map(domain, da, pa, bytes, prot);
+               if (ret)
+                       goto fail;
+
+               da += bytes;
+       }
+
+       return 0;
+
+fail:
+       da = iova;
+
+       for_each_sg(sgt->sgl, sg, i, j) {
+               size_t bytes = sg->length + sg->offset;
+               iommu_unmap(domain, da, bytes);
+               da += bytes;
+       }
+       return ret;
+}
+
+static int msm_iommu_unmap(struct msm_mmu *mmu, uint32_t iova,
+               struct sg_table *sgt, unsigned len)
+{
+       struct msm_iommu *iommu = to_msm_iommu(mmu);
+       struct iommu_domain *domain = iommu->domain;
+       struct scatterlist *sg;
+       unsigned int da = iova;
+       int i;
+
+       for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+               size_t bytes = sg->length + sg->offset;
+               size_t unmapped;
+
+               unmapped = iommu_unmap(domain, da, bytes);
+               if (unmapped < bytes)
+                       return unmapped;
+
+               VERB("unmap[%d]: %08x(%x)", i, iova, bytes);
+
+               BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
+
+               da += bytes;
+       }
+
+       return 0;
+}
+
+static void msm_iommu_destroy(struct msm_mmu *mmu)
+{
+       struct msm_iommu *iommu = to_msm_iommu(mmu);
+       iommu_domain_free(iommu->domain);
+       kfree(iommu);
+}
+
+static const struct msm_mmu_funcs funcs = {
+               .attach = msm_iommu_attach,
+               .map = msm_iommu_map,
+               .unmap = msm_iommu_unmap,
+               .destroy = msm_iommu_destroy,
+};
+
+struct msm_mmu *msm_iommu_new(struct drm_device *dev, struct iommu_domain *domain)
+{
+       struct msm_iommu *iommu;
+
+       iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
+       if (!iommu)
+               return ERR_PTR(-ENOMEM);
+
+       iommu->domain = domain;
+       msm_mmu_init(&iommu->base, dev, &funcs);
+       iommu_set_fault_handler(domain, msm_fault_handler, dev);
+
+       return &iommu->base;
+}
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
new file mode 100644 (file)
index 0000000..0643774
--- /dev/null
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __MSM_KMS_H__
+#define __MSM_KMS_H__
+
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+
+#include "msm_drv.h"
+
+/* As there are different display controller blocks depending on the
+ * snapdragon version, the kms support is split out and the appropriate
+ * implementation is loaded at runtime.  The kms module is responsible
+ * for constructing the appropriate planes/crtcs/encoders/connectors.
+ */
+struct msm_kms_funcs {
+       /* hw initialization: */
+       int (*hw_init)(struct msm_kms *kms);
+       /* irq handling: */
+       void (*irq_preinstall)(struct msm_kms *kms);
+       int (*irq_postinstall)(struct msm_kms *kms);
+       void (*irq_uninstall)(struct msm_kms *kms);
+       irqreturn_t (*irq)(struct msm_kms *kms);
+       int (*enable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
+       void (*disable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
+       /* misc: */
+       const struct msm_format *(*get_format)(struct msm_kms *kms, uint32_t format);
+       long (*round_pixclk)(struct msm_kms *kms, unsigned long rate,
+                       struct drm_encoder *encoder);
+       /* cleanup: */
+       void (*preclose)(struct msm_kms *kms, struct drm_file *file);
+       void (*destroy)(struct msm_kms *kms);
+};
+
+struct msm_kms {
+       const struct msm_kms_funcs *funcs;
+
+       /* irq handling: */
+       bool in_irq;
+       struct list_head irq_list;    /* list of mdp4_irq */
+       uint32_t vblank_mask;         /* irq bits set for userspace vblank */
+};
+
+static inline void msm_kms_init(struct msm_kms *kms,
+               const struct msm_kms_funcs *funcs)
+{
+       kms->funcs = funcs;
+}
+
+struct msm_kms *mdp4_kms_init(struct drm_device *dev);
+struct msm_kms *mdp5_kms_init(struct drm_device *dev);
+
+#endif /* __MSM_KMS_H__ */
diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h
new file mode 100644 (file)
index 0000000..0303244
--- /dev/null
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __MSM_MMU_H__
+#define __MSM_MMU_H__
+
+#include <linux/iommu.h>
+
+struct msm_mmu_funcs {
+       int (*attach)(struct msm_mmu *mmu, const char **names, int cnt);
+       int (*map)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt,
+                       unsigned len, int prot);
+       int (*unmap)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt,
+                       unsigned len);
+       void (*destroy)(struct msm_mmu *mmu);
+};
+
+struct msm_mmu {
+       const struct msm_mmu_funcs *funcs;
+       struct drm_device *dev;
+};
+
+static inline void msm_mmu_init(struct msm_mmu *mmu, struct drm_device *dev,
+               const struct msm_mmu_funcs *funcs)
+{
+       mmu->dev = dev;
+       mmu->funcs = funcs;
+}
+
+struct msm_mmu *msm_iommu_new(struct drm_device *dev, struct iommu_domain *domain);
+struct msm_mmu *msm_gpummu_new(struct drm_device *dev, struct msm_gpu *gpu);
+
+#endif /* __MSM_MMU_H__ */
index b3fa1ba191b7115be412894936c3633bf9221188..e88145ba1bf515429a3b85753bde5df7d28bde70 100644 (file)
@@ -41,6 +41,7 @@ nouveau-y += core/subdev/bios/init.o
 nouveau-y += core/subdev/bios/mxm.o
 nouveau-y += core/subdev/bios/perf.o
 nouveau-y += core/subdev/bios/pll.o
+nouveau-y += core/subdev/bios/ramcfg.o
 nouveau-y += core/subdev/bios/rammap.o
 nouveau-y += core/subdev/bios/timing.o
 nouveau-y += core/subdev/bios/therm.o
@@ -71,7 +72,10 @@ nouveau-y += core/subdev/devinit/nv10.o
 nouveau-y += core/subdev/devinit/nv1a.o
 nouveau-y += core/subdev/devinit/nv20.o
 nouveau-y += core/subdev/devinit/nv50.o
+nouveau-y += core/subdev/devinit/nv84.o
+nouveau-y += core/subdev/devinit/nv98.o
 nouveau-y += core/subdev/devinit/nva3.o
+nouveau-y += core/subdev/devinit/nvaf.o
 nouveau-y += core/subdev/devinit/nvc0.o
 nouveau-y += core/subdev/fb/base.o
 nouveau-y += core/subdev/fb/nv04.o
@@ -232,6 +236,7 @@ nouveau-y += core/engine/fifo/nv50.o
 nouveau-y += core/engine/fifo/nv84.o
 nouveau-y += core/engine/fifo/nvc0.o
 nouveau-y += core/engine/fifo/nve0.o
+nouveau-y += core/engine/fifo/nv108.o
 nouveau-y += core/engine/graph/ctxnv40.o
 nouveau-y += core/engine/graph/ctxnv50.o
 nouveau-y += core/engine/graph/ctxnvc0.o
@@ -242,6 +247,7 @@ nouveau-y += core/engine/graph/ctxnvd7.o
 nouveau-y += core/engine/graph/ctxnvd9.o
 nouveau-y += core/engine/graph/ctxnve4.o
 nouveau-y += core/engine/graph/ctxnvf0.o
+nouveau-y += core/engine/graph/ctxnv108.o
 nouveau-y += core/engine/graph/nv04.o
 nouveau-y += core/engine/graph/nv10.o
 nouveau-y += core/engine/graph/nv20.o
@@ -260,6 +266,7 @@ nouveau-y += core/engine/graph/nvd7.o
 nouveau-y += core/engine/graph/nvd9.o
 nouveau-y += core/engine/graph/nve4.o
 nouveau-y += core/engine/graph/nvf0.o
+nouveau-y += core/engine/graph/nv108.o
 nouveau-y += core/engine/mpeg/nv31.o
 nouveau-y += core/engine/mpeg/nv40.o
 nouveau-y += core/engine/mpeg/nv44.o
index c8bed4a2683339072786a0b4d146b06c9594cc5b..1f6954ae9dd36e51141a9b810d91e49f1ddad2b2 100644 (file)
@@ -42,11 +42,24 @@ nouveau_engine_create_(struct nouveau_object *parent,
        if (ret)
                return ret;
 
-       if ( parent &&
-           !nouveau_boolopt(nv_device(parent)->cfgopt, iname, enable)) {
-               if (!enable)
-                       nv_warn(engine, "disabled, %s=1 to enable\n", iname);
-               return -ENODEV;
+       if (parent) {
+               struct nouveau_device *device = nv_device(parent);
+               int engidx = nv_engidx(nv_object(engine));
+
+               if (device->disable_mask & (1ULL << engidx)) {
+                       if (!nouveau_boolopt(device->cfgopt, iname, false)) {
+                               nv_debug(engine, "engine disabled by hw/fw\n");
+                               return -ENODEV;
+                       }
+
+                       nv_warn(engine, "ignoring hw/fw engine disable\n");
+               }
+
+               if (!nouveau_boolopt(device->cfgopt, iname, enable)) {
+                       if (!enable)
+                               nv_warn(engine, "disabled, %s=1 to enable\n", iname);
+                       return -ENODEV;
+               }
        }
 
        INIT_LIST_HEAD(&engine->contexts);
index 993df09ad64386e51ab7316e1dbc57de5cba5409..ac3291f781f6e4c204506e02d39d8ca521cf8917 100644 (file)
@@ -105,9 +105,6 @@ nvc0_copy0_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        struct nvc0_copy_priv *priv;
        int ret;
 
-       if (nv_rd32(parent, 0x022500) & 0x00000100)
-               return -ENODEV;
-
        ret = nouveau_falcon_create(parent, engine, oclass, 0x104000, true,
                                    "PCE0", "copy0", &priv);
        *pobject = nv_object(priv);
@@ -133,9 +130,6 @@ nvc0_copy1_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        struct nvc0_copy_priv *priv;
        int ret;
 
-       if (nv_rd32(parent, 0x022500) & 0x00000200)
-               return -ENODEV;
-
        ret = nouveau_falcon_create(parent, engine, oclass, 0x105000, true,
                                    "PCE1", "copy1", &priv);
        *pobject = nv_object(priv);
index 30f1ef1edcc59db83deb0e12a41f6d1f399e23c6..748a61eb3c6f25fe4c08bed0a374b2148852e0f5 100644 (file)
@@ -88,9 +88,6 @@ nve0_copy0_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        struct nve0_copy_priv *priv;
        int ret;
 
-       if (nv_rd32(parent, 0x022500) & 0x00000100)
-               return -ENODEV;
-
        ret = nouveau_engine_create(parent, engine, oclass, true,
                                    "PCE0", "copy0", &priv);
        *pobject = nv_object(priv);
@@ -112,9 +109,6 @@ nve0_copy1_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        struct nve0_copy_priv *priv;
        int ret;
 
-       if (nv_rd32(parent, 0x022500) & 0x00000200)
-               return -ENODEV;
-
        ret = nouveau_engine_create(parent, engine, oclass, true,
                                    "PCE1", "copy1", &priv);
        *pobject = nv_object(priv);
index dbd2dde7b7e7649d2873c513d69a263b59329b77..32113b08c4d5fb16e401e871ec65080d61a63e7c 100644 (file)
@@ -49,12 +49,12 @@ nv04_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
                device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
                device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
-               device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv04_devinit_oclass;
+               device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv04_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
                device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
                device->oclass[NVDEV_SUBDEV_FB     ] =  nv04_fb_oclass;
-               device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+               device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
                device->oclass[NVDEV_ENGINE_FIFO   ] =  nv04_fifo_oclass;
@@ -67,12 +67,12 @@ nv04_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
                device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
                device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
-               device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv05_devinit_oclass;
+               device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv05_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
                device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
                device->oclass[NVDEV_SUBDEV_FB     ] =  nv04_fb_oclass;
-               device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+               device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
                device->oclass[NVDEV_ENGINE_FIFO   ] =  nv04_fifo_oclass;
index 6e03dd6abeea51da34fe9e1046e50075fc481070..744f15d7e1315a6a3b9406f364d0a8bf78231cd5 100644 (file)
@@ -51,12 +51,12 @@ nv10_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
                device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
                device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
-               device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
+               device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv10_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
                device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
                device->oclass[NVDEV_SUBDEV_FB     ] =  nv10_fb_oclass;
-               device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+               device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
                device->oclass[NVDEV_ENGINE_GR     ] = &nv10_graph_oclass;
@@ -68,12 +68,12 @@ nv10_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
                device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
                device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
-               device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
+               device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv10_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
                device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
                device->oclass[NVDEV_SUBDEV_FB     ] =  nv10_fb_oclass;
-               device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+               device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
                device->oclass[NVDEV_ENGINE_FIFO   ] =  nv10_fifo_oclass;
@@ -87,12 +87,12 @@ nv10_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
                device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
                device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
-               device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
+               device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv10_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
                device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
                device->oclass[NVDEV_SUBDEV_FB     ] =  nv10_fb_oclass;
-               device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+               device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
                device->oclass[NVDEV_ENGINE_FIFO   ] =  nv10_fifo_oclass;
@@ -106,12 +106,12 @@ nv10_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
                device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
                device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
-               device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+               device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
                device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
                device->oclass[NVDEV_SUBDEV_FB     ] =  nv1a_fb_oclass;
-               device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+               device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
                device->oclass[NVDEV_ENGINE_FIFO   ] =  nv10_fifo_oclass;
@@ -125,12 +125,12 @@ nv10_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
                device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
                device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
-               device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
+               device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv10_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
                device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
                device->oclass[NVDEV_SUBDEV_FB     ] =  nv10_fb_oclass;
-               device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+               device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
                device->oclass[NVDEV_ENGINE_FIFO   ] =  nv10_fifo_oclass;
@@ -144,12 +144,12 @@ nv10_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
                device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
                device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
-               device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
+               device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv10_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
                device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
                device->oclass[NVDEV_SUBDEV_FB     ] =  nv10_fb_oclass;
-               device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+               device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
                device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
@@ -163,12 +163,12 @@ nv10_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
                device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
                device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
-               device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+               device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
                device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
                device->oclass[NVDEV_SUBDEV_FB     ] =  nv1a_fb_oclass;
-               device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+               device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
                device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
@@ -182,12 +182,12 @@ nv10_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
                device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
                device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
-               device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
+               device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv10_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
                device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
                device->oclass[NVDEV_SUBDEV_FB     ] =  nv10_fb_oclass;
-               device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+               device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
                device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
index dcde53b9f07f7cd77f2886b8c8b927054691f289..27ba61fb271045c1efa9acd2982c48ce7b60f278 100644 (file)
@@ -52,12 +52,12 @@ nv20_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
                device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
                device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
-               device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
+               device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv20_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
                device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
                device->oclass[NVDEV_SUBDEV_FB     ] =  nv20_fb_oclass;
-               device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+               device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
                device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
@@ -71,12 +71,12 @@ nv20_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
                device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
                device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
-               device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
+               device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv20_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
                device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
                device->oclass[NVDEV_SUBDEV_FB     ] =  nv25_fb_oclass;
-               device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+               device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
                device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
@@ -90,12 +90,12 @@ nv20_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
                device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
                device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
-               device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
+               device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv20_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
                device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
                device->oclass[NVDEV_SUBDEV_FB     ] =  nv25_fb_oclass;
-               device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+               device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
                device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
@@ -109,12 +109,12 @@ nv20_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
                device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
                device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
-               device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
+               device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv20_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
                device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
                device->oclass[NVDEV_SUBDEV_FB     ] =  nv25_fb_oclass;
-               device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+               device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
                device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
index 7b8662ef4f59193a91246ad074c8e87d84bb9019..fd47ace67543cd15f6be3c6b6799aefe497e4dcf 100644 (file)
@@ -52,12 +52,12 @@ nv30_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
                device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
                device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
-               device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
+               device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv20_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
                device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
                device->oclass[NVDEV_SUBDEV_FB     ] =  nv30_fb_oclass;
-               device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+               device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
                device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
@@ -71,12 +71,12 @@ nv30_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
                device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
                device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
-               device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
+               device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv20_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
                device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
                device->oclass[NVDEV_SUBDEV_FB     ] =  nv35_fb_oclass;
-               device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+               device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
                device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
@@ -90,12 +90,12 @@ nv30_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
                device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
                device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
-               device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
+               device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv20_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
                device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
                device->oclass[NVDEV_SUBDEV_FB     ] =  nv30_fb_oclass;
-               device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+               device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
                device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
@@ -110,12 +110,12 @@ nv30_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
                device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
                device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
-               device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
+               device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv20_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
                device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
                device->oclass[NVDEV_SUBDEV_FB     ] =  nv36_fb_oclass;
-               device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+               device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
                device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
@@ -130,12 +130,12 @@ nv30_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
                device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
                device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
-               device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
+               device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv10_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
                device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
                device->oclass[NVDEV_SUBDEV_FB     ] =  nv10_fb_oclass;
-               device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+               device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
                device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
index c8c41e93695ee3d0f83da140f0615cb737c14472..1b653dd74a7046ceaae41bca181f3097827dcccd 100644 (file)
@@ -57,12 +57,12 @@ nv40_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
                device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
                device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-               device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+               device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] =  nv40_mc_oclass;
                device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
                device->oclass[NVDEV_SUBDEV_FB     ] =  nv40_fb_oclass;
-               device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+               device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
                device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -80,12 +80,12 @@ nv40_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
                device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
                device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-               device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+               device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] =  nv40_mc_oclass;
                device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
                device->oclass[NVDEV_SUBDEV_FB     ] =  nv41_fb_oclass;
-               device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+               device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv41_vmmgr_oclass;
                device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -103,12 +103,12 @@ nv40_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
                device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
                device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-               device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+               device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] =  nv40_mc_oclass;
                device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
                device->oclass[NVDEV_SUBDEV_FB     ] =  nv41_fb_oclass;
-               device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+               device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv41_vmmgr_oclass;
                device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -126,12 +126,12 @@ nv40_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
                device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
                device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-               device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+               device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] =  nv40_mc_oclass;
                device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
                device->oclass[NVDEV_SUBDEV_FB     ] =  nv41_fb_oclass;
-               device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+               device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv41_vmmgr_oclass;
                device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -149,12 +149,12 @@ nv40_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
                device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
                device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-               device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+               device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] =  nv40_mc_oclass;
                device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
                device->oclass[NVDEV_SUBDEV_FB     ] =  nv40_fb_oclass;
-               device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+               device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
                device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -172,12 +172,12 @@ nv40_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
                device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
                device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-               device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+               device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] =  nv40_mc_oclass;
                device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
                device->oclass[NVDEV_SUBDEV_FB     ] =  nv47_fb_oclass;
-               device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+               device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv41_vmmgr_oclass;
                device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -195,12 +195,12 @@ nv40_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
                device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
                device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-               device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+               device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] =  nv40_mc_oclass;
                device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
                device->oclass[NVDEV_SUBDEV_FB     ] =  nv49_fb_oclass;
-               device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+               device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv41_vmmgr_oclass;
                device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -218,12 +218,12 @@ nv40_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
                device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
                device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-               device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+               device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] =  nv40_mc_oclass;
                device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
                device->oclass[NVDEV_SUBDEV_FB     ] =  nv49_fb_oclass;
-               device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+               device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv41_vmmgr_oclass;
                device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -241,12 +241,12 @@ nv40_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
                device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
                device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-               device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+               device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] =  nv44_mc_oclass;
                device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
                device->oclass[NVDEV_SUBDEV_FB     ] =  nv44_fb_oclass;
-               device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+               device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv44_vmmgr_oclass;
                device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -264,12 +264,12 @@ nv40_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
                device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
                device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-               device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+               device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] =  nv44_mc_oclass;
                device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
                device->oclass[NVDEV_SUBDEV_FB     ] =  nv46_fb_oclass;
-               device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+               device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv44_vmmgr_oclass;
                device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -287,12 +287,12 @@ nv40_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
                device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
                device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-               device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+               device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] =  nv44_mc_oclass;
                device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
                device->oclass[NVDEV_SUBDEV_FB     ] =  nv44_fb_oclass;
-               device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+               device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv44_vmmgr_oclass;
                device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -310,12 +310,12 @@ nv40_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
                device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
                device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-               device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+               device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] =  nv44_mc_oclass;
                device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
                device->oclass[NVDEV_SUBDEV_FB     ] =  nv46_fb_oclass;
-               device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+               device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv44_vmmgr_oclass;
                device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -333,12 +333,12 @@ nv40_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_I2C    ] = &nv4e_i2c_oclass;
                device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
                device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-               device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+               device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] =  nv44_mc_oclass;
                device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
                device->oclass[NVDEV_SUBDEV_FB     ] =  nv4e_fb_oclass;
-               device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+               device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv44_vmmgr_oclass;
                device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -356,12 +356,12 @@ nv40_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
                device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
                device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-               device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+               device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] =  nv44_mc_oclass;
                device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
                device->oclass[NVDEV_SUBDEV_FB     ] =  nv46_fb_oclass;
-               device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+               device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv44_vmmgr_oclass;
                device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -379,12 +379,12 @@ nv40_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
                device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
                device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-               device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+               device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] =  nv44_mc_oclass;
                device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
                device->oclass[NVDEV_SUBDEV_FB     ] =  nv46_fb_oclass;
-               device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+               device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv44_vmmgr_oclass;
                device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -402,12 +402,12 @@ nv40_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
                device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
                device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-               device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+               device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] =  nv44_mc_oclass;
                device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
                device->oclass[NVDEV_SUBDEV_FB     ] =  nv46_fb_oclass;
-               device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+               device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv44_vmmgr_oclass;
                device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
index db3fc7be856a733d07f8431a182326e3f20b4182..81d5c26643d50dcff2067abdf0f2bb7a227621ad 100644 (file)
@@ -65,12 +65,12 @@ nv50_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_CLOCK  ] =  nv50_clock_oclass;
                device->oclass[NVDEV_SUBDEV_THERM  ] = &nv50_therm_oclass;
                device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-               device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+               device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv50_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] =  nv50_mc_oclass;
                device->oclass[NVDEV_SUBDEV_BUS    ] =  nv50_bus_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
                device->oclass[NVDEV_SUBDEV_FB     ] =  nv50_fb_oclass;
-               device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+               device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
                device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
                device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
@@ -90,12 +90,12 @@ nv50_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_CLOCK  ] =  nv84_clock_oclass;
                device->oclass[NVDEV_SUBDEV_THERM  ] = &nv84_therm_oclass;
                device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-               device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+               device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv84_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] =  nv50_mc_oclass;
                device->oclass[NVDEV_SUBDEV_BUS    ] =  nv50_bus_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
                device->oclass[NVDEV_SUBDEV_FB     ] =  nv84_fb_oclass;
-               device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+               device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
                device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
                device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
@@ -118,12 +118,12 @@ nv50_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_CLOCK  ] =  nv84_clock_oclass;
                device->oclass[NVDEV_SUBDEV_THERM  ] = &nv84_therm_oclass;
                device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-               device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+               device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv84_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] =  nv50_mc_oclass;
                device->oclass[NVDEV_SUBDEV_BUS    ] =  nv50_bus_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
                device->oclass[NVDEV_SUBDEV_FB     ] =  nv84_fb_oclass;
-               device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+               device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
                device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
                device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
@@ -146,12 +146,12 @@ nv50_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_CLOCK  ] =  nv84_clock_oclass;
                device->oclass[NVDEV_SUBDEV_THERM  ] = &nv84_therm_oclass;
                device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-               device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+               device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv84_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] =  nv50_mc_oclass;
                device->oclass[NVDEV_SUBDEV_BUS    ] =  nv50_bus_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
                device->oclass[NVDEV_SUBDEV_FB     ] =  nv84_fb_oclass;
-               device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+               device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
                device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
                device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
@@ -174,12 +174,12 @@ nv50_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_CLOCK  ] =  nv84_clock_oclass;
                device->oclass[NVDEV_SUBDEV_THERM  ] = &nv84_therm_oclass;
                device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-               device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+               device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv84_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] =  nv94_mc_oclass;
                device->oclass[NVDEV_SUBDEV_BUS    ] =  nv94_bus_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
                device->oclass[NVDEV_SUBDEV_FB     ] =  nv84_fb_oclass;
-               device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+               device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
                device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
                device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
@@ -202,12 +202,12 @@ nv50_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_CLOCK  ] =  nv84_clock_oclass;
                device->oclass[NVDEV_SUBDEV_THERM  ] = &nv84_therm_oclass;
                device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-               device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+               device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv84_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] =  nv94_mc_oclass;
                device->oclass[NVDEV_SUBDEV_BUS    ] =  nv94_bus_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
                device->oclass[NVDEV_SUBDEV_FB     ] =  nv84_fb_oclass;
-               device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+               device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
                device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
                device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
@@ -230,12 +230,12 @@ nv50_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_CLOCK  ] =  nv84_clock_oclass;
                device->oclass[NVDEV_SUBDEV_THERM  ] = &nv84_therm_oclass;
                device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-               device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+               device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv98_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] =  nv98_mc_oclass;
                device->oclass[NVDEV_SUBDEV_BUS    ] =  nv94_bus_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
                device->oclass[NVDEV_SUBDEV_FB     ] =  nv84_fb_oclass;
-               device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+               device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
                device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
                device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
@@ -258,12 +258,12 @@ nv50_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_CLOCK  ] =  nv84_clock_oclass;
                device->oclass[NVDEV_SUBDEV_THERM  ] = &nv84_therm_oclass;
                device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-               device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+               device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv84_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] =  nv98_mc_oclass;
                device->oclass[NVDEV_SUBDEV_BUS    ] =  nv94_bus_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
                device->oclass[NVDEV_SUBDEV_FB     ] =  nv84_fb_oclass;
-               device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+               device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
                device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
                device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
@@ -286,12 +286,12 @@ nv50_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_CLOCK  ] =  nvaa_clock_oclass;
                device->oclass[NVDEV_SUBDEV_THERM  ] = &nv84_therm_oclass;
                device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-               device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+               device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv98_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] =  nv98_mc_oclass;
                device->oclass[NVDEV_SUBDEV_BUS    ] =  nv94_bus_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
                device->oclass[NVDEV_SUBDEV_FB     ] =  nvaa_fb_oclass;
-               device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+               device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
                device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
                device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
@@ -314,12 +314,12 @@ nv50_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_CLOCK  ] =  nvaa_clock_oclass;
                device->oclass[NVDEV_SUBDEV_THERM  ] = &nv84_therm_oclass;
                device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-               device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+               device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv98_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] =  nv98_mc_oclass;
                device->oclass[NVDEV_SUBDEV_BUS    ] =  nv94_bus_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
                device->oclass[NVDEV_SUBDEV_FB     ] =  nvaa_fb_oclass;
-               device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+               device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
                device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
                device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
@@ -342,12 +342,12 @@ nv50_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nva3_clock_oclass;
                device->oclass[NVDEV_SUBDEV_THERM  ] = &nva3_therm_oclass;
                device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-               device->oclass[NVDEV_SUBDEV_DEVINIT] = &nva3_devinit_oclass;
+               device->oclass[NVDEV_SUBDEV_DEVINIT] =  nva3_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] =  nv98_mc_oclass;
                device->oclass[NVDEV_SUBDEV_BUS    ] =  nv94_bus_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
                device->oclass[NVDEV_SUBDEV_FB     ] =  nva3_fb_oclass;
-               device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+               device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
                device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
                device->oclass[NVDEV_SUBDEV_PWR    ] = &nva3_pwr_oclass;
@@ -372,12 +372,12 @@ nv50_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nva3_clock_oclass;
                device->oclass[NVDEV_SUBDEV_THERM  ] = &nva3_therm_oclass;
                device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-               device->oclass[NVDEV_SUBDEV_DEVINIT] = &nva3_devinit_oclass;
+               device->oclass[NVDEV_SUBDEV_DEVINIT] =  nva3_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] =  nv98_mc_oclass;
                device->oclass[NVDEV_SUBDEV_BUS    ] =  nv94_bus_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
                device->oclass[NVDEV_SUBDEV_FB     ] =  nva3_fb_oclass;
-               device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+               device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
                device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
                device->oclass[NVDEV_SUBDEV_PWR    ] = &nva3_pwr_oclass;
@@ -401,12 +401,12 @@ nv50_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nva3_clock_oclass;
                device->oclass[NVDEV_SUBDEV_THERM  ] = &nva3_therm_oclass;
                device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-               device->oclass[NVDEV_SUBDEV_DEVINIT] = &nva3_devinit_oclass;
+               device->oclass[NVDEV_SUBDEV_DEVINIT] =  nva3_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] =  nv98_mc_oclass;
                device->oclass[NVDEV_SUBDEV_BUS    ] =  nv94_bus_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
                device->oclass[NVDEV_SUBDEV_FB     ] =  nva3_fb_oclass;
-               device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+               device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
                device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
                device->oclass[NVDEV_SUBDEV_PWR    ] = &nva3_pwr_oclass;
@@ -430,12 +430,12 @@ nv50_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nva3_clock_oclass;
                device->oclass[NVDEV_SUBDEV_THERM  ] = &nva3_therm_oclass;
                device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-               device->oclass[NVDEV_SUBDEV_DEVINIT] = &nva3_devinit_oclass;
+               device->oclass[NVDEV_SUBDEV_DEVINIT] =  nvaf_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] =  nv98_mc_oclass;
                device->oclass[NVDEV_SUBDEV_BUS    ] =  nv94_bus_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
                device->oclass[NVDEV_SUBDEV_FB     ] =  nvaf_fb_oclass;
-               device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+               device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
                device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
                device->oclass[NVDEV_SUBDEV_PWR    ] = &nva3_pwr_oclass;
index dbc5e33de94f8b6caebbab7319d3bfb7117c57f2..b7d66b59f43d4c7cc3ea0c733f809f966c37aa3d 100644 (file)
@@ -65,14 +65,14 @@ nvc0_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nvc0_clock_oclass;
                device->oclass[NVDEV_SUBDEV_THERM  ] = &nva3_therm_oclass;
                device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-               device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
+               device->oclass[NVDEV_SUBDEV_DEVINIT] =  nvc0_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] =  nvc0_mc_oclass;
                device->oclass[NVDEV_SUBDEV_BUS    ] =  nvc0_bus_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
                device->oclass[NVDEV_SUBDEV_FB     ] =  nvc0_fb_oclass;
                device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
                device->oclass[NVDEV_SUBDEV_IBUS   ] = &nvc0_ibus_oclass;
-               device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+               device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
                device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
                device->oclass[NVDEV_SUBDEV_PWR    ] = &nvc0_pwr_oclass;
@@ -97,14 +97,14 @@ nvc0_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nvc0_clock_oclass;
                device->oclass[NVDEV_SUBDEV_THERM  ] = &nva3_therm_oclass;
                device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-               device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
+               device->oclass[NVDEV_SUBDEV_DEVINIT] =  nvc0_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] =  nvc0_mc_oclass;
                device->oclass[NVDEV_SUBDEV_BUS    ] =  nvc0_bus_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
                device->oclass[NVDEV_SUBDEV_FB     ] =  nvc0_fb_oclass;
                device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
                device->oclass[NVDEV_SUBDEV_IBUS   ] = &nvc0_ibus_oclass;
-               device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+               device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
                device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
                device->oclass[NVDEV_SUBDEV_PWR    ] = &nvc0_pwr_oclass;
@@ -129,14 +129,14 @@ nvc0_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nvc0_clock_oclass;
                device->oclass[NVDEV_SUBDEV_THERM  ] = &nva3_therm_oclass;
                device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-               device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
+               device->oclass[NVDEV_SUBDEV_DEVINIT] =  nvc0_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] =  nvc3_mc_oclass;
                device->oclass[NVDEV_SUBDEV_BUS    ] =  nvc0_bus_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
                device->oclass[NVDEV_SUBDEV_FB     ] =  nvc0_fb_oclass;
                device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
                device->oclass[NVDEV_SUBDEV_IBUS   ] = &nvc0_ibus_oclass;
-               device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+               device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
                device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
                device->oclass[NVDEV_SUBDEV_PWR    ] = &nvc0_pwr_oclass;
@@ -160,14 +160,14 @@ nvc0_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nvc0_clock_oclass;
                device->oclass[NVDEV_SUBDEV_THERM  ] = &nva3_therm_oclass;
                device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-               device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
+               device->oclass[NVDEV_SUBDEV_DEVINIT] =  nvc0_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] =  nvc0_mc_oclass;
                device->oclass[NVDEV_SUBDEV_BUS    ] =  nvc0_bus_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
                device->oclass[NVDEV_SUBDEV_FB     ] =  nvc0_fb_oclass;
                device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
                device->oclass[NVDEV_SUBDEV_IBUS   ] = &nvc0_ibus_oclass;
-               device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+               device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
                device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
                device->oclass[NVDEV_SUBDEV_PWR    ] = &nvc0_pwr_oclass;
@@ -192,14 +192,14 @@ nvc0_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nvc0_clock_oclass;
                device->oclass[NVDEV_SUBDEV_THERM  ] = &nva3_therm_oclass;
                device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-               device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
+               device->oclass[NVDEV_SUBDEV_DEVINIT] =  nvc0_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] =  nvc3_mc_oclass;
                device->oclass[NVDEV_SUBDEV_BUS    ] =  nvc0_bus_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
                device->oclass[NVDEV_SUBDEV_FB     ] =  nvc0_fb_oclass;
                device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
                device->oclass[NVDEV_SUBDEV_IBUS   ] = &nvc0_ibus_oclass;
-               device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+               device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
                device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
                device->oclass[NVDEV_SUBDEV_PWR    ] = &nvc0_pwr_oclass;
@@ -224,14 +224,14 @@ nvc0_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nvc0_clock_oclass;
                device->oclass[NVDEV_SUBDEV_THERM  ] = &nva3_therm_oclass;
                device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-               device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
+               device->oclass[NVDEV_SUBDEV_DEVINIT] =  nvc0_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] =  nvc3_mc_oclass;
                device->oclass[NVDEV_SUBDEV_BUS    ] =  nvc0_bus_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
                device->oclass[NVDEV_SUBDEV_FB     ] =  nvc0_fb_oclass;
                device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
                device->oclass[NVDEV_SUBDEV_IBUS   ] = &nvc0_ibus_oclass;
-               device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+               device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
                device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
                device->oclass[NVDEV_SUBDEV_PWR    ] = &nvc0_pwr_oclass;
@@ -255,14 +255,14 @@ nvc0_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nvc0_clock_oclass;
                device->oclass[NVDEV_SUBDEV_THERM  ] = &nva3_therm_oclass;
                device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-               device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
+               device->oclass[NVDEV_SUBDEV_DEVINIT] =  nvc0_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] =  nvc0_mc_oclass;
                device->oclass[NVDEV_SUBDEV_BUS    ] =  nvc0_bus_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
                device->oclass[NVDEV_SUBDEV_FB     ] =  nvc0_fb_oclass;
                device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
                device->oclass[NVDEV_SUBDEV_IBUS   ] = &nvc0_ibus_oclass;
-               device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+               device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
                device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
                device->oclass[NVDEV_SUBDEV_PWR    ] = &nvc0_pwr_oclass;
@@ -287,14 +287,14 @@ nvc0_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nvc0_clock_oclass;
                device->oclass[NVDEV_SUBDEV_THERM  ] = &nvd0_therm_oclass;
                device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-               device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
+               device->oclass[NVDEV_SUBDEV_DEVINIT] =  nvc0_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] =  nvc3_mc_oclass;
                device->oclass[NVDEV_SUBDEV_BUS    ] =  nvc0_bus_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
                device->oclass[NVDEV_SUBDEV_FB     ] =  nvc0_fb_oclass;
                device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
                device->oclass[NVDEV_SUBDEV_IBUS   ] = &nvc0_ibus_oclass;
-               device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+               device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
                device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
                device->oclass[NVDEV_SUBDEV_PWR    ] = &nvd0_pwr_oclass;
@@ -318,14 +318,14 @@ nvc0_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nvc0_clock_oclass;
                device->oclass[NVDEV_SUBDEV_THERM  ] = &nvd0_therm_oclass;
                device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-               device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
+               device->oclass[NVDEV_SUBDEV_DEVINIT] =  nvc0_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] =  nvc3_mc_oclass;
                device->oclass[NVDEV_SUBDEV_BUS    ] =  nvc0_bus_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
                device->oclass[NVDEV_SUBDEV_FB     ] =  nvc0_fb_oclass;
                device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
                device->oclass[NVDEV_SUBDEV_IBUS   ] = &nvc0_ibus_oclass;
-               device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+               device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
                device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
index 3900104976fc7bfc2ca3df441318254bdae3bf97..987edbc30a0917cc2db2a035c749f42c5818b5b9 100644 (file)
@@ -65,14 +65,14 @@ nve0_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nve0_clock_oclass;
                device->oclass[NVDEV_SUBDEV_THERM  ] = &nvd0_therm_oclass;
                device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-               device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
+               device->oclass[NVDEV_SUBDEV_DEVINIT] =  nvc0_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] =  nvc3_mc_oclass;
                device->oclass[NVDEV_SUBDEV_BUS    ] =  nvc0_bus_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
                device->oclass[NVDEV_SUBDEV_FB     ] =  nve0_fb_oclass;
                device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
                device->oclass[NVDEV_SUBDEV_IBUS   ] = &nve0_ibus_oclass;
-               device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+               device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
                device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
                device->oclass[NVDEV_SUBDEV_PWR    ] = &nvd0_pwr_oclass;
@@ -98,14 +98,14 @@ nve0_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nve0_clock_oclass;
                device->oclass[NVDEV_SUBDEV_THERM  ] = &nvd0_therm_oclass;
                device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-               device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
+               device->oclass[NVDEV_SUBDEV_DEVINIT] =  nvc0_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] =  nvc3_mc_oclass;
                device->oclass[NVDEV_SUBDEV_BUS    ] =  nvc0_bus_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
                device->oclass[NVDEV_SUBDEV_FB     ] =  nve0_fb_oclass;
                device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
                device->oclass[NVDEV_SUBDEV_IBUS   ] = &nve0_ibus_oclass;
-               device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+               device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
                device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
                device->oclass[NVDEV_SUBDEV_PWR    ] = &nvd0_pwr_oclass;
@@ -131,14 +131,14 @@ nve0_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nve0_clock_oclass;
                device->oclass[NVDEV_SUBDEV_THERM  ] = &nvd0_therm_oclass;
                device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-               device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
+               device->oclass[NVDEV_SUBDEV_DEVINIT] =  nvc0_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] =  nvc3_mc_oclass;
                device->oclass[NVDEV_SUBDEV_BUS    ] =  nvc0_bus_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
                device->oclass[NVDEV_SUBDEV_FB     ] =  nve0_fb_oclass;
                device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
                device->oclass[NVDEV_SUBDEV_IBUS   ] = &nve0_ibus_oclass;
-               device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+               device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
                device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
                device->oclass[NVDEV_SUBDEV_PWR    ] = &nvd0_pwr_oclass;
@@ -164,14 +164,14 @@ nve0_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nve0_clock_oclass;
                device->oclass[NVDEV_SUBDEV_THERM  ] = &nvd0_therm_oclass;
                device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-               device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
+               device->oclass[NVDEV_SUBDEV_DEVINIT] =  nvc0_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] =  nvc3_mc_oclass;
                device->oclass[NVDEV_SUBDEV_BUS    ] =  nvc0_bus_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
                device->oclass[NVDEV_SUBDEV_FB     ] =  nve0_fb_oclass;
                device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
                device->oclass[NVDEV_SUBDEV_IBUS   ] = &nve0_ibus_oclass;
-               device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+               device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
                device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
                device->oclass[NVDEV_SUBDEV_PWR    ] = &nvd0_pwr_oclass;
@@ -199,29 +199,27 @@ nve0_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nve0_clock_oclass;
                device->oclass[NVDEV_SUBDEV_THERM  ] = &nvd0_therm_oclass;
                device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-               device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
+               device->oclass[NVDEV_SUBDEV_DEVINIT] =  nvc0_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] =  nvc3_mc_oclass;
                device->oclass[NVDEV_SUBDEV_BUS    ] =  nvc0_bus_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
                device->oclass[NVDEV_SUBDEV_FB     ] =  nve0_fb_oclass;
                device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
                device->oclass[NVDEV_SUBDEV_IBUS   ] = &nve0_ibus_oclass;
-               device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+               device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
                device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
                device->oclass[NVDEV_SUBDEV_PWR    ] = &nv108_pwr_oclass;
                device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
-#if 0
-               device->oclass[NVDEV_ENGINE_FIFO   ] =  nve0_fifo_oclass;
+               device->oclass[NVDEV_ENGINE_FIFO   ] =  nv108_fifo_oclass;
                device->oclass[NVDEV_ENGINE_SW     ] =  nvc0_software_oclass;
-               device->oclass[NVDEV_ENGINE_GR     ] =  nvf0_graph_oclass;
-#endif
+               device->oclass[NVDEV_ENGINE_GR     ] =  nv108_graph_oclass;
                device->oclass[NVDEV_ENGINE_DISP   ] = &nvf0_disp_oclass;
-#if 0
                device->oclass[NVDEV_ENGINE_COPY0  ] = &nve0_copy0_oclass;
                device->oclass[NVDEV_ENGINE_COPY1  ] = &nve0_copy1_oclass;
                device->oclass[NVDEV_ENGINE_COPY2  ] = &nve0_copy2_oclass;
+#if 0
                device->oclass[NVDEV_ENGINE_BSP    ] = &nve0_bsp_oclass;
                device->oclass[NVDEV_ENGINE_VP     ] = &nve0_vp_oclass;
                device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
index a0bc8a89b69941b1bcf449084ce1dcea9a2ff666..7cf8b13486326fdcf64066cb368203668024d11a 100644 (file)
@@ -31,9 +31,45 @@ struct nv04_disp_priv {
        struct nouveau_disp base;
 };
 
+static int
+nv04_disp_scanoutpos(struct nouveau_object *object, u32 mthd,
+                    void *data, u32 size)
+{
+       struct nv04_disp_priv *priv = (void *)object->engine;
+       struct nv04_display_scanoutpos *args = data;
+       const int head = (mthd & NV04_DISP_MTHD_HEAD);
+       u32 line;
+
+       if (size < sizeof(*args))
+               return -EINVAL;
+
+       args->vblanks = nv_rd32(priv, 0x680800 + (head * 0x2000)) & 0xffff;
+       args->vtotal  = nv_rd32(priv, 0x680804 + (head * 0x2000)) & 0xffff;
+       args->vblanke = args->vtotal - 1;
+
+       args->hblanks = nv_rd32(priv, 0x680820 + (head * 0x2000)) & 0xffff;
+       args->htotal  = nv_rd32(priv, 0x680824 + (head * 0x2000)) & 0xffff;
+       args->hblanke = args->htotal - 1;
+
+       args->time[0] = ktime_to_ns(ktime_get());
+       line = nv_rd32(priv, 0x600868 + (head * 0x2000));
+       args->time[1] = ktime_to_ns(ktime_get());
+       args->hline = (line & 0xffff0000) >> 16;
+       args->vline = (line & 0x0000ffff);
+       return 0;
+}
+
+#define HEAD_MTHD(n) (n), (n) + 0x01
+
+static struct nouveau_omthds
+nv04_disp_omthds[] = {
+       { HEAD_MTHD(NV04_DISP_SCANOUTPOS), nv04_disp_scanoutpos },
+       {}
+};
+
 static struct nouveau_oclass
 nv04_disp_sclass[] = {
-       { NV04_DISP_CLASS, &nouveau_object_ofuncs },
+       { NV04_DISP_CLASS, &nouveau_object_ofuncs, nv04_disp_omthds },
        {},
 };
 
index c168ae3eaa97cab0a18e6a10b9ee811e815a5a66..940eaa5d8b9a4bb0e3ea224cfe2ced8ec8585b44 100644 (file)
@@ -541,6 +541,35 @@ nv50_disp_curs_ofuncs = {
  * Base display object
  ******************************************************************************/
 
+int
+nv50_disp_base_scanoutpos(struct nouveau_object *object, u32 mthd,
+                         void *data, u32 size)
+{
+       struct nv50_disp_priv *priv = (void *)object->engine;
+       struct nv04_display_scanoutpos *args = data;
+       const int head = (mthd & NV50_DISP_MTHD_HEAD);
+       u32 blanke, blanks, total;
+
+       if (size < sizeof(*args) || head >= priv->head.nr)
+               return -EINVAL;
+       blanke = nv_rd32(priv, 0x610aec + (head * 0x540));
+       blanks = nv_rd32(priv, 0x610af4 + (head * 0x540));
+       total  = nv_rd32(priv, 0x610afc + (head * 0x540));
+
+       args->vblanke = (blanke & 0xffff0000) >> 16;
+       args->hblanke = (blanke & 0x0000ffff);
+       args->vblanks = (blanks & 0xffff0000) >> 16;
+       args->hblanks = (blanks & 0x0000ffff);
+       args->vtotal  = ( total & 0xffff0000) >> 16;
+       args->htotal  = ( total & 0x0000ffff);
+
+       args->time[0] = ktime_to_ns(ktime_get());
+       args->vline   = nv_rd32(priv, 0x616340 + (head * 0x800)) & 0xffff;
+       args->time[1] = ktime_to_ns(ktime_get()); /* vline read locks hline */
+       args->hline   = nv_rd32(priv, 0x616344 + (head * 0x800)) & 0xffff;
+       return 0;
+}
+
 static void
 nv50_disp_base_vblank_enable(struct nouveau_event *event, int head)
 {
@@ -675,6 +704,7 @@ nv50_disp_base_ofuncs = {
 
 static struct nouveau_omthds
 nv50_disp_base_omthds[] = {
+       { HEAD_MTHD(NV50_DISP_SCANOUTPOS)     , nv50_disp_base_scanoutpos },
        { SOR_MTHD(NV50_DISP_SOR_PWR)         , nv50_sor_mthd },
        { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
        { DAC_MTHD(NV50_DISP_DAC_PWR)         , nv50_dac_mthd },
index 1ae6ceb5670403b0150f931f2cb01815a4a56569..d31d426ea1f6327c96d77e21210d0b792ba87505 100644 (file)
@@ -43,6 +43,10 @@ struct nv50_disp_priv {
        } pior;
 };
 
+#define HEAD_MTHD(n) (n), (n) + 0x03
+
+int nv50_disp_base_scanoutpos(struct nouveau_object *, u32, void *, u32);
+
 #define DAC_MTHD(n) (n), (n) + 0x03
 
 int nv50_dac_mthd(struct nouveau_object *, u32, void *, u32);
@@ -132,13 +136,12 @@ void nv50_disp_intr(struct nouveau_subdev *);
 
 extern struct nouveau_omthds nv84_disp_base_omthds[];
 
-extern struct nouveau_omthds nva3_disp_base_omthds[];
-
 extern struct nouveau_ofuncs nvd0_disp_mast_ofuncs;
 extern struct nouveau_ofuncs nvd0_disp_sync_ofuncs;
 extern struct nouveau_ofuncs nvd0_disp_ovly_ofuncs;
 extern struct nouveau_ofuncs nvd0_disp_oimm_ofuncs;
 extern struct nouveau_ofuncs nvd0_disp_curs_ofuncs;
+extern struct nouveau_omthds nvd0_disp_base_omthds[];
 extern struct nouveau_ofuncs nvd0_disp_base_ofuncs;
 extern struct nouveau_oclass nvd0_disp_cclass;
 void nvd0_disp_intr_supervisor(struct work_struct *);
index d8c74c0883a16904b92081857552182721eca183..ef9ce300a496e0dc1c5baae4c5f1a041d112ea2a 100644 (file)
@@ -41,6 +41,7 @@ nv84_disp_sclass[] = {
 
 struct nouveau_omthds
 nv84_disp_base_omthds[] = {
+       { HEAD_MTHD(NV50_DISP_SCANOUTPOS)     , nv50_disp_base_scanoutpos },
        { SOR_MTHD(NV50_DISP_SOR_PWR)         , nv50_sor_mthd },
        { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR)    , nv50_sor_mthd },
        { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
index a66f949c1f84119479f5c5ffbe809fad278db4d2..a518543c00ab624f8df1b4fd22e66293943b8f02 100644 (file)
@@ -41,6 +41,7 @@ nv94_disp_sclass[] = {
 
 static struct nouveau_omthds
 nv94_disp_base_omthds[] = {
+       { HEAD_MTHD(NV50_DISP_SCANOUTPOS)     , nv50_disp_base_scanoutpos },
        { SOR_MTHD(NV50_DISP_SOR_PWR)         , nv50_sor_mthd },
        { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR)    , nv50_sor_mthd },
        { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
index b75413169eaee91e07dd73dd08e1a7efada6ed34..6ad6dcece43bde52062c5c4563662586dfaef7d6 100644 (file)
@@ -39,8 +39,9 @@ nva3_disp_sclass[] = {
        {}
 };
 
-struct nouveau_omthds
+static struct nouveau_omthds
 nva3_disp_base_omthds[] = {
+       { HEAD_MTHD(NV50_DISP_SCANOUTPOS)     , nv50_disp_base_scanoutpos },
        { SOR_MTHD(NV50_DISP_SOR_PWR)         , nv50_sor_mthd },
        { SOR_MTHD(NVA3_DISP_SOR_HDA_ELD)     , nv50_sor_mthd },
        { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR)    , nv50_sor_mthd },
index 378a015091d2bf22d9a2ba5e49cd095b3ac774c7..1c5e4e8b2c822b37a140932d5b5378d5d2deb309 100644 (file)
@@ -440,6 +440,36 @@ nvd0_disp_curs_ofuncs = {
  * Base display object
  ******************************************************************************/
 
+static int
+nvd0_disp_base_scanoutpos(struct nouveau_object *object, u32 mthd,
+                         void *data, u32 size)
+{
+       struct nv50_disp_priv *priv = (void *)object->engine;
+       struct nv04_display_scanoutpos *args = data;
+       const int head = (mthd & NV50_DISP_MTHD_HEAD);
+       u32 blanke, blanks, total;
+
+       if (size < sizeof(*args) || head >= priv->head.nr)
+               return -EINVAL;
+
+       total  = nv_rd32(priv, 0x640414 + (head * 0x300));
+       blanke = nv_rd32(priv, 0x64041c + (head * 0x300));
+       blanks = nv_rd32(priv, 0x640420 + (head * 0x300));
+
+       args->vblanke = (blanke & 0xffff0000) >> 16;
+       args->hblanke = (blanke & 0x0000ffff);
+       args->vblanks = (blanks & 0xffff0000) >> 16;
+       args->hblanks = (blanks & 0x0000ffff);
+       args->vtotal  = ( total & 0xffff0000) >> 16;
+       args->htotal  = ( total & 0x0000ffff);
+
+       args->time[0] = ktime_to_ns(ktime_get());
+       args->vline   = nv_rd32(priv, 0x616340 + (head * 0x800)) & 0xffff;
+       args->time[1] = ktime_to_ns(ktime_get()); /* vline read locks hline */
+       args->hline   = nv_rd32(priv, 0x616344 + (head * 0x800)) & 0xffff;
+       return 0;
+}
+
 static void
 nvd0_disp_base_vblank_enable(struct nouveau_event *event, int head)
 {
@@ -573,9 +603,24 @@ nvd0_disp_base_ofuncs = {
        .fini = nvd0_disp_base_fini,
 };
 
+struct nouveau_omthds
+nvd0_disp_base_omthds[] = {
+       { HEAD_MTHD(NV50_DISP_SCANOUTPOS)     , nvd0_disp_base_scanoutpos },
+       { SOR_MTHD(NV50_DISP_SOR_PWR)         , nv50_sor_mthd },
+       { SOR_MTHD(NVA3_DISP_SOR_HDA_ELD)     , nv50_sor_mthd },
+       { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR)    , nv50_sor_mthd },
+       { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
+       { DAC_MTHD(NV50_DISP_DAC_PWR)         , nv50_dac_mthd },
+       { DAC_MTHD(NV50_DISP_DAC_LOAD)        , nv50_dac_mthd },
+       { PIOR_MTHD(NV50_DISP_PIOR_PWR)       , nv50_pior_mthd },
+       { PIOR_MTHD(NV50_DISP_PIOR_TMDS_PWR)  , nv50_pior_mthd },
+       { PIOR_MTHD(NV50_DISP_PIOR_DP_PWR)    , nv50_pior_mthd },
+       {},
+};
+
 static struct nouveau_oclass
 nvd0_disp_base_oclass[] = {
-       { NVD0_DISP_CLASS, &nvd0_disp_base_ofuncs, nva3_disp_base_omthds },
+       { NVD0_DISP_CLASS, &nvd0_disp_base_ofuncs, nvd0_disp_base_omthds },
        {}
 };
 
@@ -967,9 +1012,6 @@ nvd0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        int heads = nv_rd32(parent, 0x022448);
        int ret;
 
-       if (nv_rd32(parent, 0x022500) & 0x00000001)
-               return -ENODEV;
-
        ret = nouveau_disp_create(parent, engine, oclass, heads,
                                  "PDISP", "display", &priv);
        *pobject = nv_object(priv);
index fb1fe6ae5e74ddbd6f3336a3d34de8eb58757b5b..ab63f32c00b2478d2cb0a2ebd062f946d394044d 100644 (file)
@@ -41,7 +41,7 @@ nve0_disp_sclass[] = {
 
 static struct nouveau_oclass
 nve0_disp_base_oclass[] = {
-       { NVE0_DISP_CLASS, &nvd0_disp_base_ofuncs, nva3_disp_base_omthds },
+       { NVE0_DISP_CLASS, &nvd0_disp_base_ofuncs, nvd0_disp_base_omthds },
        {}
 };
 
@@ -54,9 +54,6 @@ nve0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        int heads = nv_rd32(parent, 0x022448);
        int ret;
 
-       if (nv_rd32(parent, 0x022500) & 0x00000001)
-               return -ENODEV;
-
        ret = nouveau_disp_create(parent, engine, oclass, heads,
                                  "PDISP", "display", &priv);
        *pobject = nv_object(priv);
index 42aa6b97dbea3c5867676f0209151a1ead3a849d..05fee10e0c975a166fec5b68d98d6e53cd1fae55 100644 (file)
@@ -41,7 +41,7 @@ nvf0_disp_sclass[] = {
 
 static struct nouveau_oclass
 nvf0_disp_base_oclass[] = {
-       { NVF0_DISP_CLASS, &nvd0_disp_base_ofuncs, nva3_disp_base_omthds },
+       { NVF0_DISP_CLASS, &nvd0_disp_base_ofuncs, nvd0_disp_base_omthds },
        {}
 };
 
@@ -54,9 +54,6 @@ nvf0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        int heads = nv_rd32(parent, 0x022448);
        int ret;
 
-       if (nv_rd32(parent, 0x022500) & 0x00000001)
-               return -ENODEV;
-
        ret = nouveau_disp_create(parent, engine, oclass, heads,
                                  "PDISP", "display", &priv);
        *pobject = nv_object(priv);
index 5a1c68474597cbc979eb216735941714b1ce4901..8836c3cb99c3862a84ed2b946e6c3e34c47e520d 100644 (file)
@@ -138,10 +138,15 @@ nv_wrvgai(void *obj, int head, u16 port, u8 index, u8 value)
 bool
 nv_lockvgac(void *obj, bool lock)
 {
+       struct nouveau_device *dev = nv_device(obj);
+
        bool locked = !nv_rdvgac(obj, 0, 0x1f);
        u8 data = lock ? 0x99 : 0x57;
-       nv_wrvgac(obj, 0, 0x1f, data);
-       if (nv_device(obj)->chipset == 0x11) {
+       if (dev->card_type < NV_50)
+               nv_wrvgac(obj, 0, 0x1f, data);
+       else
+               nv_wrvgac(obj, 0, 0x3f, data);
+       if (dev->chipset == 0x11) {
                if (!(nv_rd32(obj, 0x001084) & 0x10000000))
                        nv_wrvgac(obj, 1, 0x1f, data);
        }
index e03fc8e4dc1dac8c8262a93b2b0b812dcae60841..5e077e4ed7f6aa23d0e8ed3fde50ad58c2fd99d7 100644 (file)
@@ -56,6 +56,16 @@ _nouveau_falcon_wr32(struct nouveau_object *object, u64 addr, u32 data)
        nv_wr32(falcon, falcon->addr + addr, data);
 }
 
+static void *
+vmemdup(const void *src, size_t len)
+{
+       void *p = vmalloc(len);
+
+       if (p)
+               memcpy(p, src, len);
+       return p;
+}
+
 int
 _nouveau_falcon_init(struct nouveau_object *object)
 {
@@ -111,7 +121,7 @@ _nouveau_falcon_init(struct nouveau_object *object)
 
                ret = request_firmware(&fw, name, &device->pdev->dev);
                if (ret == 0) {
-                       falcon->code.data = kmemdup(fw->data, fw->size, GFP_KERNEL);
+                       falcon->code.data = vmemdup(fw->data, fw->size);
                        falcon->code.size = fw->size;
                        falcon->data.data = NULL;
                        falcon->data.size = 0;
@@ -134,7 +144,7 @@ _nouveau_falcon_init(struct nouveau_object *object)
                        return ret;
                }
 
-               falcon->data.data = kmemdup(fw->data, fw->size, GFP_KERNEL);
+               falcon->data.data = vmemdup(fw->data, fw->size);
                falcon->data.size = fw->size;
                release_firmware(fw);
                if (!falcon->data.data)
@@ -149,7 +159,7 @@ _nouveau_falcon_init(struct nouveau_object *object)
                        return ret;
                }
 
-               falcon->code.data = kmemdup(fw->data, fw->size, GFP_KERNEL);
+               falcon->code.data = vmemdup(fw->data, fw->size);
                falcon->code.size = fw->size;
                release_firmware(fw);
                if (!falcon->code.data)
@@ -235,8 +245,8 @@ _nouveau_falcon_fini(struct nouveau_object *object, bool suspend)
        if (!suspend) {
                nouveau_gpuobj_ref(NULL, &falcon->core);
                if (falcon->external) {
-                       kfree(falcon->data.data);
-                       kfree(falcon->code.data);
+                       vfree(falcon->data.data);
+                       vfree(falcon->code.data);
                        falcon->code.data = NULL;
                }
        }
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv108.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv108.c
new file mode 100644 (file)
index 0000000..09362a5
--- /dev/null
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nve0.h"
+
+struct nouveau_oclass *
+nv108_fifo_oclass = &(struct nve0_fifo_impl) {
+       .base.handle = NV_ENGINE(FIFO, 0x08),
+       .base.ofuncs = &(struct nouveau_ofuncs) {
+               .ctor = nve0_fifo_ctor,
+               .dtor = nve0_fifo_dtor,
+               .init = nve0_fifo_init,
+               .fini = _nouveau_fifo_fini,
+       },
+       .channels = 1024,
+}.base;
index 9ac94d4e5646d2cd24579f3b47777f2e17f5e9fe..b22a33f0702dd0159ed01f79edb00b7349f6b561 100644 (file)
@@ -33,6 +33,7 @@
 
 #include <subdev/timer.h>
 #include <subdev/bar.h>
+#include <subdev/fb.h>
 #include <subdev/vm.h>
 
 #include <engine/dmaobj.h>
index 04f412922d2d43f5a5fbce11185fc2f9f4196e7e..9a850fe19515fe2ddcd513c3497c7d87c86cba8a 100644 (file)
 
 #include <subdev/timer.h>
 #include <subdev/bar.h>
+#include <subdev/fb.h>
 #include <subdev/vm.h>
 
 #include <engine/dmaobj.h>
-#include <engine/fifo.h>
+
+#include "nve0.h"
 
 #define _(a,b) { (a), ((1ULL << (a)) | (b)) }
 static const struct {
@@ -56,8 +58,8 @@ static const struct {
 #define FIFO_ENGINE_NR ARRAY_SIZE(fifo_engine)
 
 struct nve0_fifo_engn {
-       struct nouveau_gpuobj *playlist[2];
-       int cur_playlist;
+       struct nouveau_gpuobj *runlist[2];
+       int cur_runlist;
 };
 
 struct nve0_fifo_priv {
@@ -86,7 +88,7 @@ struct nve0_fifo_chan {
  ******************************************************************************/
 
 static void
-nve0_fifo_playlist_update(struct nve0_fifo_priv *priv, u32 engine)
+nve0_fifo_runlist_update(struct nve0_fifo_priv *priv, u32 engine)
 {
        struct nouveau_bar *bar = nouveau_bar(priv);
        struct nve0_fifo_engn *engn = &priv->engine[engine];
@@ -95,8 +97,8 @@ nve0_fifo_playlist_update(struct nve0_fifo_priv *priv, u32 engine)
        int i, p;
 
        mutex_lock(&nv_subdev(priv)->mutex);
-       cur = engn->playlist[engn->cur_playlist];
-       engn->cur_playlist = !engn->cur_playlist;
+       cur = engn->runlist[engn->cur_runlist];
+       engn->cur_runlist = !engn->cur_runlist;
 
        for (i = 0, p = 0; i < priv->base.max; i++) {
                u32 ctrl = nv_rd32(priv, 0x800004 + (i * 8)) & 0x001f0001;
@@ -111,7 +113,7 @@ nve0_fifo_playlist_update(struct nve0_fifo_priv *priv, u32 engine)
        nv_wr32(priv, 0x002270, cur->addr >> 12);
        nv_wr32(priv, 0x002274, (engine << 20) | (p >> 3));
        if (!nv_wait(priv, 0x002284 + (engine * 4), 0x00100000, 0x00000000))
-               nv_error(priv, "playlist %d update timeout\n", engine);
+               nv_error(priv, "runlist %d update timeout\n", engine);
        mutex_unlock(&nv_subdev(priv)->mutex);
 }
 
@@ -278,7 +280,7 @@ nve0_fifo_chan_init(struct nouveau_object *object)
        nv_mask(priv, 0x800004 + (chid * 8), 0x000f0000, chan->engine << 16);
        nv_wr32(priv, 0x800000 + (chid * 8), 0x80000000 | base->addr >> 12);
        nv_mask(priv, 0x800004 + (chid * 8), 0x00000400, 0x00000400);
-       nve0_fifo_playlist_update(priv, chan->engine);
+       nve0_fifo_runlist_update(priv, chan->engine);
        nv_mask(priv, 0x800004 + (chid * 8), 0x00000400, 0x00000400);
        return 0;
 }
@@ -291,7 +293,7 @@ nve0_fifo_chan_fini(struct nouveau_object *object, bool suspend)
        u32 chid = chan->base.chid;
 
        nv_mask(priv, 0x800004 + (chid * 8), 0x00000800, 0x00000800);
-       nve0_fifo_playlist_update(priv, chan->engine);
+       nve0_fifo_runlist_update(priv, chan->engine);
        nv_wr32(priv, 0x800000 + (chid * 8), 0x00000000);
 
        return nouveau_fifo_channel_fini(&chan->base, suspend);
@@ -375,54 +377,189 @@ nve0_fifo_cclass = {
  * PFIFO engine
  ******************************************************************************/
 
-static const struct nouveau_enum nve0_fifo_fault_unit[] = {
+static const struct nouveau_enum nve0_fifo_sched_reason[] = {
+       { 0x0a, "CTXSW_TIMEOUT" },
+       {}
+};
+
+static const struct nouveau_enum nve0_fifo_fault_engine[] = {
+       { 0x00, "GR", NULL, NVDEV_ENGINE_GR },
+       { 0x03, "IFB" },
+       { 0x04, "BAR1", NULL, NVDEV_SUBDEV_BAR },
+       { 0x05, "BAR3", NULL, NVDEV_SUBDEV_INSTMEM },
+       { 0x07, "PBDMA0", NULL, NVDEV_ENGINE_FIFO },
+       { 0x08, "PBDMA1", NULL, NVDEV_ENGINE_FIFO },
+       { 0x09, "PBDMA2", NULL, NVDEV_ENGINE_FIFO },
+       { 0x10, "MSVLD", NULL, NVDEV_ENGINE_BSP },
+       { 0x11, "MSPPP", NULL, NVDEV_ENGINE_PPP },
+       { 0x13, "PERF" },
+       { 0x14, "MSPDEC", NULL, NVDEV_ENGINE_VP },
+       { 0x15, "CE0", NULL, NVDEV_ENGINE_COPY0 },
+       { 0x16, "CE1", NULL, NVDEV_ENGINE_COPY1 },
+       { 0x17, "PMU" },
+       { 0x19, "MSENC", NULL, NVDEV_ENGINE_VENC },
+       { 0x1b, "CE2", NULL, NVDEV_ENGINE_COPY2 },
        {}
 };
 
 static const struct nouveau_enum nve0_fifo_fault_reason[] = {
-       { 0x00, "PT_NOT_PRESENT" },
-       { 0x01, "PT_TOO_SHORT" },
-       { 0x02, "PAGE_NOT_PRESENT" },
-       { 0x03, "VM_LIMIT_EXCEEDED" },
-       { 0x04, "NO_CHANNEL" },
-       { 0x05, "PAGE_SYSTEM_ONLY" },
-       { 0x06, "PAGE_READ_ONLY" },
-       { 0x0a, "COMPRESSED_SYSRAM" },
-       { 0x0c, "INVALID_STORAGE_TYPE" },
+       { 0x00, "PDE" },
+       { 0x01, "PDE_SIZE" },
+       { 0x02, "PTE" },
+       { 0x03, "VA_LIMIT_VIOLATION" },
+       { 0x04, "UNBOUND_INST_BLOCK" },
+       { 0x05, "PRIV_VIOLATION" },
+       { 0x06, "RO_VIOLATION" },
+       { 0x07, "WO_VIOLATION" },
+       { 0x08, "PITCH_MASK_VIOLATION" },
+       { 0x09, "WORK_CREATION" },
+       { 0x0a, "UNSUPPORTED_APERTURE" },
+       { 0x0b, "COMPRESSION_FAILURE" },
+       { 0x0c, "UNSUPPORTED_KIND" },
+       { 0x0d, "REGION_VIOLATION" },
+       { 0x0e, "BOTH_PTES_VALID" },
+       { 0x0f, "INFO_TYPE_POISONED" },
        {}
 };
 
 static const struct nouveau_enum nve0_fifo_fault_hubclient[] = {
+       { 0x00, "VIP" },
+       { 0x01, "CE0" },
+       { 0x02, "CE1" },
+       { 0x03, "DNISO" },
+       { 0x04, "FE" },
+       { 0x05, "FECS" },
+       { 0x06, "HOST" },
+       { 0x07, "HOST_CPU" },
+       { 0x08, "HOST_CPU_NB" },
+       { 0x09, "ISO" },
+       { 0x0a, "MMU" },
+       { 0x0b, "MSPDEC" },
+       { 0x0c, "MSPPP" },
+       { 0x0d, "MSVLD" },
+       { 0x0e, "NISO" },
+       { 0x0f, "P2P" },
+       { 0x10, "PD" },
+       { 0x11, "PERF" },
+       { 0x12, "PMU" },
+       { 0x13, "RASTERTWOD" },
+       { 0x14, "SCC" },
+       { 0x15, "SCC_NB" },
+       { 0x16, "SEC" },
+       { 0x17, "SSYNC" },
+       { 0x18, "GR_COPY" },
+       { 0x19, "CE2" },
+       { 0x1a, "XV" },
+       { 0x1b, "MMU_NB" },
+       { 0x1c, "MSENC" },
+       { 0x1d, "DFALCON" },
+       { 0x1e, "SKED" },
+       { 0x1f, "AFALCON" },
        {}
 };
 
 static const struct nouveau_enum nve0_fifo_fault_gpcclient[] = {
+       { 0x00, "L1_0" }, { 0x01, "T1_0" }, { 0x02, "PE_0" },
+       { 0x03, "L1_1" }, { 0x04, "T1_1" }, { 0x05, "PE_1" },
+       { 0x06, "L1_2" }, { 0x07, "T1_2" }, { 0x08, "PE_2" },
+       { 0x09, "L1_3" }, { 0x0a, "T1_3" }, { 0x0b, "PE_3" },
+       { 0x0c, "RAST" },
+       { 0x0d, "GCC" },
+       { 0x0e, "GPCCS" },
+       { 0x0f, "PROP_0" },
+       { 0x10, "PROP_1" },
+       { 0x11, "PROP_2" },
+       { 0x12, "PROP_3" },
+       { 0x13, "L1_4" }, { 0x14, "T1_4" }, { 0x15, "PE_4" },
+       { 0x16, "L1_5" }, { 0x17, "T1_5" }, { 0x18, "PE_5" },
+       { 0x19, "L1_6" }, { 0x1a, "T1_6" }, { 0x1b, "PE_6" },
+       { 0x1c, "L1_7" }, { 0x1d, "T1_7" }, { 0x1e, "PE_7" },
+       { 0x1f, "GPM" },
+       { 0x20, "LTP_UTLB_0" },
+       { 0x21, "LTP_UTLB_1" },
+       { 0x22, "LTP_UTLB_2" },
+       { 0x23, "LTP_UTLB_3" },
+       { 0x24, "GPC_RGG_UTLB" },
        {}
 };
 
-static const struct nouveau_bitfield nve0_fifo_subfifo_intr[] = {
-       { 0x00200000, "ILLEGAL_MTHD" },
-       { 0x00800000, "EMPTY_SUBC" },
+static const struct nouveau_bitfield nve0_fifo_pbdma_intr[] = {
+       { 0x00000001, "MEMREQ" },
+       { 0x00000002, "MEMACK_TIMEOUT" },
+       { 0x00000004, "MEMACK_EXTRA" },
+       { 0x00000008, "MEMDAT_TIMEOUT" },
+       { 0x00000010, "MEMDAT_EXTRA" },
+       { 0x00000020, "MEMFLUSH" },
+       { 0x00000040, "MEMOP" },
+       { 0x00000080, "LBCONNECT" },
+       { 0x00000100, "LBREQ" },
+       { 0x00000200, "LBACK_TIMEOUT" },
+       { 0x00000400, "LBACK_EXTRA" },
+       { 0x00000800, "LBDAT_TIMEOUT" },
+       { 0x00001000, "LBDAT_EXTRA" },
+       { 0x00002000, "GPFIFO" },
+       { 0x00004000, "GPPTR" },
+       { 0x00008000, "GPENTRY" },
+       { 0x00010000, "GPCRC" },
+       { 0x00020000, "PBPTR" },
+       { 0x00040000, "PBENTRY" },
+       { 0x00080000, "PBCRC" },
+       { 0x00100000, "XBARCONNECT" },
+       { 0x00200000, "METHOD" },
+       { 0x00400000, "METHODCRC" },
+       { 0x00800000, "DEVICE" },
+       { 0x02000000, "SEMAPHORE" },
+       { 0x04000000, "ACQUIRE" },
+       { 0x08000000, "PRI" },
+       { 0x20000000, "NO_CTXSW_SEG" },
+       { 0x40000000, "PBSEG" },
+       { 0x80000000, "SIGNATURE" },
        {}
 };
 
 static void
-nve0_fifo_isr_vm_fault(struct nve0_fifo_priv *priv, int unit)
+nve0_fifo_intr_sched(struct nve0_fifo_priv *priv)
+{
+       u32 intr = nv_rd32(priv, 0x00254c);
+       u32 code = intr & 0x000000ff;
+       nv_error(priv, "SCHED_ERROR [");
+       nouveau_enum_print(nve0_fifo_sched_reason, code);
+       pr_cont("]\n");
+}
+
+static void
+nve0_fifo_intr_chsw(struct nve0_fifo_priv *priv)
+{
+       u32 stat = nv_rd32(priv, 0x00256c);
+       nv_error(priv, "CHSW_ERROR 0x%08x\n", stat);
+       nv_wr32(priv, 0x00256c, stat);
+}
+
+static void
+nve0_fifo_intr_dropped_fault(struct nve0_fifo_priv *priv)
+{
+       u32 stat = nv_rd32(priv, 0x00259c);
+       nv_error(priv, "DROPPED_MMU_FAULT 0x%08x\n", stat);
+}
+
+static void
+nve0_fifo_intr_fault(struct nve0_fifo_priv *priv, int unit)
 {
        u32 inst = nv_rd32(priv, 0x2800 + (unit * 0x10));
        u32 valo = nv_rd32(priv, 0x2804 + (unit * 0x10));
        u32 vahi = nv_rd32(priv, 0x2808 + (unit * 0x10));
        u32 stat = nv_rd32(priv, 0x280c + (unit * 0x10));
        u32 client = (stat & 0x00001f00) >> 8;
-       const struct nouveau_enum *en;
-       struct nouveau_engine *engine;
+       struct nouveau_engine *engine = NULL;
        struct nouveau_object *engctx = NULL;
+       const struct nouveau_enum *en;
+       const char *name = "unknown";
 
        nv_error(priv, "PFIFO: %s fault at 0x%010llx [", (stat & 0x00000080) ?
                       "write" : "read", (u64)vahi << 32 | valo);
        nouveau_enum_print(nve0_fifo_fault_reason, stat & 0x0000000f);
        pr_cont("] from ");
-       en = nouveau_enum_print(nve0_fifo_fault_unit, unit);
+       en = nouveau_enum_print(nve0_fifo_fault_engine, unit);
        if (stat & 0x00000040) {
                pr_cont("/");
                nouveau_enum_print(nve0_fifo_fault_hubclient, client);
@@ -432,14 +569,22 @@ nve0_fifo_isr_vm_fault(struct nve0_fifo_priv *priv, int unit)
        }
 
        if (en && en->data2) {
-               engine = nouveau_engine(priv, en->data2);
-               if (engine)
-                       engctx = nouveau_engctx_get(engine, inst);
-
+               if (en->data2 == NVDEV_SUBDEV_BAR) {
+                       nv_mask(priv, 0x001704, 0x00000000, 0x00000000);
+                       name = "BAR1";
+               } else
+               if (en->data2 == NVDEV_SUBDEV_INSTMEM) {
+                       nv_mask(priv, 0x001714, 0x00000000, 0x00000000);
+                       name = "BAR3";
+               } else {
+                       engine = nouveau_engine(priv, en->data2);
+                       if (engine) {
+                               engctx = nouveau_engctx_get(engine, inst);
+                               name   = nouveau_client_name(engctx);
+                       }
+               }
        }
-
-       pr_cont(" on channel 0x%010llx [%s]\n", (u64)inst << 12,
-                       nouveau_client_name(engctx));
+       pr_cont(" on channel 0x%010llx [%s]\n", (u64)inst << 12, name);
 
        nouveau_engctx_put(engctx);
 }
@@ -471,7 +616,7 @@ out:
 }
 
 static void
-nve0_fifo_isr_subfifo_intr(struct nve0_fifo_priv *priv, int unit)
+nve0_fifo_intr_pbdma(struct nve0_fifo_priv *priv, int unit)
 {
        u32 stat = nv_rd32(priv, 0x040108 + (unit * 0x2000));
        u32 addr = nv_rd32(priv, 0x0400c0 + (unit * 0x2000));
@@ -487,11 +632,11 @@ nve0_fifo_isr_subfifo_intr(struct nve0_fifo_priv *priv, int unit)
        }
 
        if (show) {
-               nv_error(priv, "SUBFIFO%d:", unit);
-               nouveau_bitfield_print(nve0_fifo_subfifo_intr, show);
+               nv_error(priv, "PBDMA%d:", unit);
+               nouveau_bitfield_print(nve0_fifo_pbdma_intr, show);
                pr_cont("\n");
                nv_error(priv,
-                        "SUBFIFO%d: ch %d [%s] subc %d mthd 0x%04x data 0x%08x\n",
+                        "PBDMA%d: ch %d [%s] subc %d mthd 0x%04x data 0x%08x\n",
                         unit, chid,
                         nouveau_client_name_for_fifo_chid(&priv->base, chid),
                         subc, mthd, data);
@@ -508,19 +653,56 @@ nve0_fifo_intr(struct nouveau_subdev *subdev)
        u32 mask = nv_rd32(priv, 0x002140);
        u32 stat = nv_rd32(priv, 0x002100) & mask;
 
+       if (stat & 0x00000001) {
+               u32 stat = nv_rd32(priv, 0x00252c);
+               nv_error(priv, "BIND_ERROR 0x%08x\n", stat);
+               nv_wr32(priv, 0x002100, 0x00000001);
+               stat &= ~0x00000001;
+       }
+
+       if (stat & 0x00000010) {
+               nv_error(priv, "PIO_ERROR\n");
+               nv_wr32(priv, 0x002100, 0x00000010);
+               stat &= ~0x00000010;
+       }
+
        if (stat & 0x00000100) {
-               nv_warn(priv, "unknown status 0x00000100\n");
+               nve0_fifo_intr_sched(priv);
                nv_wr32(priv, 0x002100, 0x00000100);
                stat &= ~0x00000100;
        }
 
+       if (stat & 0x00010000) {
+               nve0_fifo_intr_chsw(priv);
+               nv_wr32(priv, 0x002100, 0x00010000);
+               stat &= ~0x00010000;
+       }
+
+       if (stat & 0x00800000) {
+               nv_error(priv, "FB_FLUSH_TIMEOUT\n");
+               nv_wr32(priv, 0x002100, 0x00800000);
+               stat &= ~0x00800000;
+       }
+
+       if (stat & 0x01000000) {
+               nv_error(priv, "LB_ERROR\n");
+               nv_wr32(priv, 0x002100, 0x01000000);
+               stat &= ~0x01000000;
+       }
+
+       if (stat & 0x08000000) {
+               nve0_fifo_intr_dropped_fault(priv);
+               nv_wr32(priv, 0x002100, 0x08000000);
+               stat &= ~0x08000000;
+       }
+
        if (stat & 0x10000000) {
                u32 units = nv_rd32(priv, 0x00259c);
                u32 u = units;
 
                while (u) {
                        int i = ffs(u) - 1;
-                       nve0_fifo_isr_vm_fault(priv, i);
+                       nve0_fifo_intr_fault(priv, i);
                        u &= ~(1 << i);
                }
 
@@ -529,22 +711,28 @@ nve0_fifo_intr(struct nouveau_subdev *subdev)
        }
 
        if (stat & 0x20000000) {
-               u32 units = nv_rd32(priv, 0x0025a0);
-               u32 u = units;
+               u32 mask = nv_rd32(priv, 0x0025a0);
+               u32 temp = mask;
 
-               while (u) {
-                       int i = ffs(u) - 1;
-                       nve0_fifo_isr_subfifo_intr(priv, i);
-                       u &= ~(1 << i);
+               while (temp) {
+                       u32 unit = ffs(temp) - 1;
+                       nve0_fifo_intr_pbdma(priv, unit);
+                       temp &= ~(1 << unit);
                }
 
-               nv_wr32(priv, 0x0025a0, units);
+               nv_wr32(priv, 0x0025a0, mask);
                stat &= ~0x20000000;
        }
 
        if (stat & 0x40000000) {
-               nv_warn(priv, "unknown status 0x40000000\n");
-               nv_mask(priv, 0x002a00, 0x00000000, 0x00000000);
+               u32 mask = nv_mask(priv, 0x002a00, 0x00000000, 0x00000000);
+
+               while (mask) {
+                       u32 engn = ffs(mask) - 1;
+                       /* runlist event, not currently used */
+                       mask &= ~(1 << engn);
+               }
+
                stat &= ~0x40000000;
        }
 
@@ -575,53 +763,52 @@ nve0_fifo_uevent_disable(struct nouveau_event *event, int index)
        nv_mask(priv, 0x002140, 0x80000000, 0x00000000);
 }
 
-static int
-nve0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-              struct nouveau_oclass *oclass, void *data, u32 size,
-              struct nouveau_object **pobject)
+int
+nve0_fifo_fini(struct nouveau_object *object, bool suspend)
 {
-       struct nve0_fifo_priv *priv;
-       int ret, i;
+       struct nve0_fifo_priv *priv = (void *)object;
+       int ret;
 
-       ret = nouveau_fifo_create(parent, engine, oclass, 0, 4095, &priv);
-       *pobject = nv_object(priv);
+       ret = nouveau_fifo_fini(&priv->base, suspend);
        if (ret)
                return ret;
 
-       for (i = 0; i < FIFO_ENGINE_NR; i++) {
-               ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x8000, 0x1000,
-                                        0, &priv->engine[i].playlist[0]);
-               if (ret)
-                       return ret;
+       /* allow mmu fault interrupts, even when we're not using fifo */
+       nv_mask(priv, 0x002140, 0x10000000, 0x10000000);
+       return 0;
+}
 
-               ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x8000, 0x1000,
-                                        0, &priv->engine[i].playlist[1]);
-               if (ret)
-                       return ret;
-       }
+int
+nve0_fifo_init(struct nouveau_object *object)
+{
+       struct nve0_fifo_priv *priv = (void *)object;
+       int ret, i;
 
-       ret = nouveau_gpuobj_new(nv_object(priv), NULL, 4096 * 0x200, 0x1000,
-                                NVOBJ_FLAG_ZERO_ALLOC, &priv->user.mem);
+       ret = nouveau_fifo_init(&priv->base);
        if (ret)
                return ret;
 
-       ret = nouveau_gpuobj_map(priv->user.mem, NV_MEM_ACCESS_RW,
-                               &priv->user.bar);
-       if (ret)
-               return ret;
+       /* enable all available PBDMA units */
+       nv_wr32(priv, 0x000204, 0xffffffff);
+       priv->spoon_nr = hweight32(nv_rd32(priv, 0x000204));
+       nv_debug(priv, "%d PBDMA unit(s)\n", priv->spoon_nr);
 
-       priv->base.uevent->enable = nve0_fifo_uevent_enable;
-       priv->base.uevent->disable = nve0_fifo_uevent_disable;
-       priv->base.uevent->priv = priv;
+       /* PBDMA[n] */
+       for (i = 0; i < priv->spoon_nr; i++) {
+               nv_mask(priv, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
+               nv_wr32(priv, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
+               nv_wr32(priv, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
+       }
 
-       nv_subdev(priv)->unit = 0x00000100;
-       nv_subdev(priv)->intr = nve0_fifo_intr;
-       nv_engine(priv)->cclass = &nve0_fifo_cclass;
-       nv_engine(priv)->sclass = nve0_fifo_sclass;
+       nv_wr32(priv, 0x002254, 0x10000000 | priv->user.bar.offset >> 12);
+
+       nv_wr32(priv, 0x002a00, 0xffffffff);
+       nv_wr32(priv, 0x002100, 0xffffffff);
+       nv_wr32(priv, 0x002140, 0x3fffffff);
        return 0;
 }
 
-static void
+void
 nve0_fifo_dtor(struct nouveau_object *object)
 {
        struct nve0_fifo_priv *priv = (void *)object;
@@ -631,50 +818,69 @@ nve0_fifo_dtor(struct nouveau_object *object)
        nouveau_gpuobj_ref(NULL, &priv->user.mem);
 
        for (i = 0; i < FIFO_ENGINE_NR; i++) {
-               nouveau_gpuobj_ref(NULL, &priv->engine[i].playlist[1]);
-               nouveau_gpuobj_ref(NULL, &priv->engine[i].playlist[0]);
+               nouveau_gpuobj_ref(NULL, &priv->engine[i].runlist[1]);
+               nouveau_gpuobj_ref(NULL, &priv->engine[i].runlist[0]);
        }
 
        nouveau_fifo_destroy(&priv->base);
 }
 
-static int
-nve0_fifo_init(struct nouveau_object *object)
+int
+nve0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+              struct nouveau_oclass *oclass, void *data, u32 size,
+              struct nouveau_object **pobject)
 {
-       struct nve0_fifo_priv *priv = (void *)object;
+       struct nve0_fifo_impl *impl = (void *)oclass;
+       struct nve0_fifo_priv *priv;
        int ret, i;
 
-       ret = nouveau_fifo_init(&priv->base);
+       ret = nouveau_fifo_create(parent, engine, oclass, 0,
+                                 impl->channels - 1, &priv);
+       *pobject = nv_object(priv);
        if (ret)
                return ret;
 
-       /* enable all available PSUBFIFOs */
-       nv_wr32(priv, 0x000204, 0xffffffff);
-       priv->spoon_nr = hweight32(nv_rd32(priv, 0x000204));
-       nv_debug(priv, "%d subfifo(s)\n", priv->spoon_nr);
+       for (i = 0; i < FIFO_ENGINE_NR; i++) {
+               ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x8000, 0x1000,
+                                        0, &priv->engine[i].runlist[0]);
+               if (ret)
+                       return ret;
 
-       /* PSUBFIFO[n] */
-       for (i = 0; i < priv->spoon_nr; i++) {
-               nv_mask(priv, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
-               nv_wr32(priv, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
-               nv_wr32(priv, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
+               ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x8000, 0x1000,
+                                        0, &priv->engine[i].runlist[1]);
+               if (ret)
+                       return ret;
        }
 
-       nv_wr32(priv, 0x002254, 0x10000000 | priv->user.bar.offset >> 12);
+       ret = nouveau_gpuobj_new(nv_object(priv), NULL, 4096 * 0x200, 0x1000,
+                                NVOBJ_FLAG_ZERO_ALLOC, &priv->user.mem);
+       if (ret)
+               return ret;
 
-       nv_wr32(priv, 0x002a00, 0xffffffff);
-       nv_wr32(priv, 0x002100, 0xffffffff);
-       nv_wr32(priv, 0x002140, 0x3fffffff);
+       ret = nouveau_gpuobj_map(priv->user.mem, NV_MEM_ACCESS_RW,
+                               &priv->user.bar);
+       if (ret)
+               return ret;
+
+       priv->base.uevent->enable = nve0_fifo_uevent_enable;
+       priv->base.uevent->disable = nve0_fifo_uevent_disable;
+       priv->base.uevent->priv = priv;
+
+       nv_subdev(priv)->unit = 0x00000100;
+       nv_subdev(priv)->intr = nve0_fifo_intr;
+       nv_engine(priv)->cclass = &nve0_fifo_cclass;
+       nv_engine(priv)->sclass = nve0_fifo_sclass;
        return 0;
 }
 
 struct nouveau_oclass *
-nve0_fifo_oclass = &(struct nouveau_oclass) {
-       .handle = NV_ENGINE(FIFO, 0xe0),
-       .ofuncs = &(struct nouveau_ofuncs) {
+nve0_fifo_oclass = &(struct nve0_fifo_impl) {
+       .base.handle = NV_ENGINE(FIFO, 0xe0),
+       .base.ofuncs = &(struct nouveau_ofuncs) {
                .ctor = nve0_fifo_ctor,
                .dtor = nve0_fifo_dtor,
                .init = nve0_fifo_init,
-               .fini = _nouveau_fifo_fini,
+               .fini = nve0_fifo_fini,
        },
-};
+       .channels = 4096,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.h b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.h
new file mode 100644 (file)
index 0000000..014344e
--- /dev/null
@@ -0,0 +1,17 @@
+#ifndef __NVKM_FIFO_NVE0_H__
+#define __NVKM_FIFO_NVE0_H__
+
+#include <engine/fifo.h>
+
+int  nve0_fifo_ctor(struct nouveau_object *, struct nouveau_object *,
+                   struct nouveau_oclass *, void *, u32,
+                   struct nouveau_object **);
+void nve0_fifo_dtor(struct nouveau_object *);
+int  nve0_fifo_init(struct nouveau_object *);
+
+struct nve0_fifo_impl {
+       struct nouveau_oclass base;
+       u32 channels;
+};
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv108.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv108.c
new file mode 100644 (file)
index 0000000..a86bd33
--- /dev/null
@@ -0,0 +1,1408 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include "nvc0.h"
+
+static struct nvc0_graph_init
+nv108_grctx_init_icmd[] = {
+       { 0x001000,   1, 0x01, 0x00000004 },
+       { 0x000039,   3, 0x01, 0x00000000 },
+       { 0x0000a9,   1, 0x01, 0x0000ffff },
+       { 0x000038,   1, 0x01, 0x0fac6881 },
+       { 0x00003d,   1, 0x01, 0x00000001 },
+       { 0x0000e8,   8, 0x01, 0x00000400 },
+       { 0x000078,   8, 0x01, 0x00000300 },
+       { 0x000050,   1, 0x01, 0x00000011 },
+       { 0x000058,   8, 0x01, 0x00000008 },
+       { 0x000208,   8, 0x01, 0x00000001 },
+       { 0x000081,   1, 0x01, 0x00000001 },
+       { 0x000085,   1, 0x01, 0x00000004 },
+       { 0x000088,   1, 0x01, 0x00000400 },
+       { 0x000090,   1, 0x01, 0x00000300 },
+       { 0x000098,   1, 0x01, 0x00001001 },
+       { 0x0000e3,   1, 0x01, 0x00000001 },
+       { 0x0000da,   1, 0x01, 0x00000001 },
+       { 0x0000f8,   1, 0x01, 0x00000003 },
+       { 0x0000fa,   1, 0x01, 0x00000001 },
+       { 0x00009f,   4, 0x01, 0x0000ffff },
+       { 0x0000b1,   1, 0x01, 0x00000001 },
+       { 0x0000ad,   1, 0x01, 0x0000013e },
+       { 0x0000e1,   1, 0x01, 0x00000010 },
+       { 0x000290,  16, 0x01, 0x00000000 },
+       { 0x0003b0,  16, 0x01, 0x00000000 },
+       { 0x0002a0,  16, 0x01, 0x00000000 },
+       { 0x000420,  16, 0x01, 0x00000000 },
+       { 0x0002b0,  16, 0x01, 0x00000000 },
+       { 0x000430,  16, 0x01, 0x00000000 },
+       { 0x0002c0,  16, 0x01, 0x00000000 },
+       { 0x0004d0,  16, 0x01, 0x00000000 },
+       { 0x000720,  16, 0x01, 0x00000000 },
+       { 0x0008c0,  16, 0x01, 0x00000000 },
+       { 0x000890,  16, 0x01, 0x00000000 },
+       { 0x0008e0,  16, 0x01, 0x00000000 },
+       { 0x0008a0,  16, 0x01, 0x00000000 },
+       { 0x0008f0,  16, 0x01, 0x00000000 },
+       { 0x00094c,   1, 0x01, 0x000000ff },
+       { 0x00094d,   1, 0x01, 0xffffffff },
+       { 0x00094e,   1, 0x01, 0x00000002 },
+       { 0x0002ec,   1, 0x01, 0x00000001 },
+       { 0x0002f2,   2, 0x01, 0x00000001 },
+       { 0x0002f5,   1, 0x01, 0x00000001 },
+       { 0x0002f7,   1, 0x01, 0x00000001 },
+       { 0x000303,   1, 0x01, 0x00000001 },
+       { 0x0002e6,   1, 0x01, 0x00000001 },
+       { 0x000466,   1, 0x01, 0x00000052 },
+       { 0x000301,   1, 0x01, 0x3f800000 },
+       { 0x000304,   1, 0x01, 0x30201000 },
+       { 0x000305,   1, 0x01, 0x70605040 },
+       { 0x000306,   1, 0x01, 0xb8a89888 },
+       { 0x000307,   1, 0x01, 0xf8e8d8c8 },
+       { 0x00030a,   1, 0x01, 0x00ffff00 },
+       { 0x00030b,   1, 0x01, 0x0000001a },
+       { 0x00030c,   1, 0x01, 0x00000001 },
+       { 0x000318,   1, 0x01, 0x00000001 },
+       { 0x000340,   1, 0x01, 0x00000000 },
+       { 0x000375,   1, 0x01, 0x00000001 },
+       { 0x00037d,   1, 0x01, 0x00000006 },
+       { 0x0003a0,   1, 0x01, 0x00000002 },
+       { 0x0003aa,   1, 0x01, 0x00000001 },
+       { 0x0003a9,   1, 0x01, 0x00000001 },
+       { 0x000380,   1, 0x01, 0x00000001 },
+       { 0x000383,   1, 0x01, 0x00000011 },
+       { 0x000360,   1, 0x01, 0x00000040 },
+       { 0x000366,   2, 0x01, 0x00000000 },
+       { 0x000368,   1, 0x01, 0x00000fff },
+       { 0x000370,   2, 0x01, 0x00000000 },
+       { 0x000372,   1, 0x01, 0x000fffff },
+       { 0x00037a,   1, 0x01, 0x00000012 },
+       { 0x000619,   1, 0x01, 0x00000003 },
+       { 0x000811,   1, 0x01, 0x00000003 },
+       { 0x000812,   1, 0x01, 0x00000004 },
+       { 0x000813,   1, 0x01, 0x00000006 },
+       { 0x000814,   1, 0x01, 0x00000008 },
+       { 0x000815,   1, 0x01, 0x0000000b },
+       { 0x000800,   6, 0x01, 0x00000001 },
+       { 0x000632,   1, 0x01, 0x00000001 },
+       { 0x000633,   1, 0x01, 0x00000002 },
+       { 0x000634,   1, 0x01, 0x00000003 },
+       { 0x000635,   1, 0x01, 0x00000004 },
+       { 0x000654,   1, 0x01, 0x3f800000 },
+       { 0x000657,   1, 0x01, 0x3f800000 },
+       { 0x000655,   2, 0x01, 0x3f800000 },
+       { 0x0006cd,   1, 0x01, 0x3f800000 },
+       { 0x0007f5,   1, 0x01, 0x3f800000 },
+       { 0x0007dc,   1, 0x01, 0x39291909 },
+       { 0x0007dd,   1, 0x01, 0x79695949 },
+       { 0x0007de,   1, 0x01, 0xb9a99989 },
+       { 0x0007df,   1, 0x01, 0xf9e9d9c9 },
+       { 0x0007e8,   1, 0x01, 0x00003210 },
+       { 0x0007e9,   1, 0x01, 0x00007654 },
+       { 0x0007ea,   1, 0x01, 0x00000098 },
+       { 0x0007ec,   1, 0x01, 0x39291909 },
+       { 0x0007ed,   1, 0x01, 0x79695949 },
+       { 0x0007ee,   1, 0x01, 0xb9a99989 },
+       { 0x0007ef,   1, 0x01, 0xf9e9d9c9 },
+       { 0x0007f0,   1, 0x01, 0x00003210 },
+       { 0x0007f1,   1, 0x01, 0x00007654 },
+       { 0x0007f2,   1, 0x01, 0x00000098 },
+       { 0x0005a5,   1, 0x01, 0x00000001 },
+       { 0x000980, 128, 0x01, 0x00000000 },
+       { 0x000468,   1, 0x01, 0x00000004 },
+       { 0x00046c,   1, 0x01, 0x00000001 },
+       { 0x000470,  96, 0x01, 0x00000000 },
+       { 0x000510,  16, 0x01, 0x3f800000 },
+       { 0x000520,   1, 0x01, 0x000002b6 },
+       { 0x000529,   1, 0x01, 0x00000001 },
+       { 0x000530,  16, 0x01, 0xffff0000 },
+       { 0x000585,   1, 0x01, 0x0000003f },
+       { 0x000576,   1, 0x01, 0x00000003 },
+       { 0x00057b,   1, 0x01, 0x00000059 },
+       { 0x000586,   1, 0x01, 0x00000040 },
+       { 0x000582,   2, 0x01, 0x00000080 },
+       { 0x0005c2,   1, 0x01, 0x00000001 },
+       { 0x000638,   2, 0x01, 0x00000001 },
+       { 0x00063a,   1, 0x01, 0x00000002 },
+       { 0x00063b,   2, 0x01, 0x00000001 },
+       { 0x00063d,   1, 0x01, 0x00000002 },
+       { 0x00063e,   1, 0x01, 0x00000001 },
+       { 0x0008b8,   8, 0x01, 0x00000001 },
+       { 0x000900,   8, 0x01, 0x00000001 },
+       { 0x000908,   8, 0x01, 0x00000002 },
+       { 0x000910,  16, 0x01, 0x00000001 },
+       { 0x000920,   8, 0x01, 0x00000002 },
+       { 0x000928,   8, 0x01, 0x00000001 },
+       { 0x000662,   1, 0x01, 0x00000001 },
+       { 0x000648,   9, 0x01, 0x00000001 },
+       { 0x000658,   1, 0x01, 0x0000000f },
+       { 0x0007ff,   1, 0x01, 0x0000000a },
+       { 0x00066a,   1, 0x01, 0x40000000 },
+       { 0x00066b,   1, 0x01, 0x10000000 },
+       { 0x00066c,   2, 0x01, 0xffff0000 },
+       { 0x0007af,   2, 0x01, 0x00000008 },
+       { 0x0007f6,   1, 0x01, 0x00000001 },
+       { 0x00080b,   1, 0x01, 0x00000002 },
+       { 0x0006b2,   1, 0x01, 0x00000055 },
+       { 0x0007ad,   1, 0x01, 0x00000003 },
+       { 0x000937,   1, 0x01, 0x00000001 },
+       { 0x000971,   1, 0x01, 0x00000008 },
+       { 0x000972,   1, 0x01, 0x00000040 },
+       { 0x000973,   1, 0x01, 0x0000012c },
+       { 0x00097c,   1, 0x01, 0x00000040 },
+       { 0x000979,   1, 0x01, 0x00000003 },
+       { 0x000975,   1, 0x01, 0x00000020 },
+       { 0x000976,   1, 0x01, 0x00000001 },
+       { 0x000977,   1, 0x01, 0x00000020 },
+       { 0x000978,   1, 0x01, 0x00000001 },
+       { 0x000957,   1, 0x01, 0x00000003 },
+       { 0x00095e,   1, 0x01, 0x20164010 },
+       { 0x00095f,   1, 0x01, 0x00000020 },
+       { 0x000a0d,   1, 0x01, 0x00000006 },
+       { 0x00097d,   1, 0x01, 0x00000020 },
+       { 0x000683,   1, 0x01, 0x00000006 },
+       { 0x000685,   1, 0x01, 0x003fffff },
+       { 0x000687,   1, 0x01, 0x003fffff },
+       { 0x0006a0,   1, 0x01, 0x00000005 },
+       { 0x000840,   1, 0x01, 0x00400008 },
+       { 0x000841,   1, 0x01, 0x08000080 },
+       { 0x000842,   1, 0x01, 0x00400008 },
+       { 0x000843,   1, 0x01, 0x08000080 },
+       { 0x0006aa,   1, 0x01, 0x00000001 },
+       { 0x0006ab,   1, 0x01, 0x00000002 },
+       { 0x0006ac,   1, 0x01, 0x00000080 },
+       { 0x0006ad,   2, 0x01, 0x00000100 },
+       { 0x0006b1,   1, 0x01, 0x00000011 },
+       { 0x0006bb,   1, 0x01, 0x000000cf },
+       { 0x0006ce,   1, 0x01, 0x2a712488 },
+       { 0x000739,   1, 0x01, 0x4085c000 },
+       { 0x00073a,   1, 0x01, 0x00000080 },
+       { 0x000786,   1, 0x01, 0x80000100 },
+       { 0x00073c,   1, 0x01, 0x00010100 },
+       { 0x00073d,   1, 0x01, 0x02800000 },
+       { 0x000787,   1, 0x01, 0x000000cf },
+       { 0x00078c,   1, 0x01, 0x00000008 },
+       { 0x000792,   1, 0x01, 0x00000001 },
+       { 0x000794,   3, 0x01, 0x00000001 },
+       { 0x000797,   1, 0x01, 0x000000cf },
+       { 0x000836,   1, 0x01, 0x00000001 },
+       { 0x00079a,   1, 0x01, 0x00000002 },
+       { 0x000833,   1, 0x01, 0x04444480 },
+       { 0x0007a1,   1, 0x01, 0x00000001 },
+       { 0x0007a3,   3, 0x01, 0x00000001 },
+       { 0x000831,   1, 0x01, 0x00000004 },
+       { 0x000b07,   1, 0x01, 0x00000002 },
+       { 0x000b08,   2, 0x01, 0x00000100 },
+       { 0x000b0a,   1, 0x01, 0x00000001 },
+       { 0x000a04,   1, 0x01, 0x000000ff },
+       { 0x000a0b,   1, 0x01, 0x00000040 },
+       { 0x00097f,   1, 0x01, 0x00000100 },
+       { 0x000a02,   1, 0x01, 0x00000001 },
+       { 0x000809,   1, 0x01, 0x00000007 },
+       { 0x00c221,   1, 0x01, 0x00000040 },
+       { 0x00c1b0,   8, 0x01, 0x0000000f },
+       { 0x00c1b8,   1, 0x01, 0x0fac6881 },
+       { 0x00c1b9,   1, 0x01, 0x00fac688 },
+       { 0x00c401,   1, 0x01, 0x00000001 },
+       { 0x00c402,   1, 0x01, 0x00010001 },
+       { 0x00c403,   2, 0x01, 0x00000001 },
+       { 0x00c40e,   1, 0x01, 0x00000020 },
+       { 0x00c500,   1, 0x01, 0x00000003 },
+       { 0x01e100,   1, 0x01, 0x00000001 },
+       { 0x001000,   1, 0x01, 0x00000002 },
+       { 0x0006aa,   1, 0x01, 0x00000001 },
+       { 0x0006ad,   2, 0x01, 0x00000100 },
+       { 0x0006b1,   1, 0x01, 0x00000011 },
+       { 0x00078c,   1, 0x01, 0x00000008 },
+       { 0x000792,   1, 0x01, 0x00000001 },
+       { 0x000794,   3, 0x01, 0x00000001 },
+       { 0x000797,   1, 0x01, 0x000000cf },
+       { 0x00079a,   1, 0x01, 0x00000002 },
+       { 0x0007a1,   1, 0x01, 0x00000001 },
+       { 0x0007a3,   3, 0x01, 0x00000001 },
+       { 0x000831,   1, 0x01, 0x00000004 },
+       { 0x01e100,   1, 0x01, 0x00000001 },
+       { 0x001000,   1, 0x01, 0x00000008 },
+       { 0x000039,   3, 0x01, 0x00000000 },
+       { 0x000380,   1, 0x01, 0x00000001 },
+       { 0x000366,   2, 0x01, 0x00000000 },
+       { 0x000368,   1, 0x01, 0x00000fff },
+       { 0x000370,   2, 0x01, 0x00000000 },
+       { 0x000372,   1, 0x01, 0x000fffff },
+       { 0x000813,   1, 0x01, 0x00000006 },
+       { 0x000814,   1, 0x01, 0x00000008 },
+       { 0x000957,   1, 0x01, 0x00000003 },
+       { 0x000b07,   1, 0x01, 0x00000002 },
+       { 0x000b08,   2, 0x01, 0x00000100 },
+       { 0x000b0a,   1, 0x01, 0x00000001 },
+       { 0x000a04,   1, 0x01, 0x000000ff },
+       { 0x000a0b,   1, 0x01, 0x00000040 },
+       { 0x00097f,   1, 0x01, 0x00000100 },
+       { 0x000a02,   1, 0x01, 0x00000001 },
+       { 0x000809,   1, 0x01, 0x00000007 },
+       { 0x00c221,   1, 0x01, 0x00000040 },
+       { 0x00c401,   1, 0x01, 0x00000001 },
+       { 0x00c402,   1, 0x01, 0x00010001 },
+       { 0x00c403,   2, 0x01, 0x00000001 },
+       { 0x00c40e,   1, 0x01, 0x00000020 },
+       { 0x00c500,   1, 0x01, 0x00000003 },
+       { 0x01e100,   1, 0x01, 0x00000001 },
+       { 0x001000,   1, 0x01, 0x00000001 },
+       { 0x000b07,   1, 0x01, 0x00000002 },
+       { 0x000b08,   2, 0x01, 0x00000100 },
+       { 0x000b0a,   1, 0x01, 0x00000001 },
+       { 0x01e100,   1, 0x01, 0x00000001 },
+       {}
+};
+
+static struct nvc0_graph_init
+nv108_grctx_init_a197[] = {
+       { 0x000800,   1, 0x04, 0x00000000 },
+       { 0x000840,   1, 0x04, 0x00000000 },
+       { 0x000880,   1, 0x04, 0x00000000 },
+       { 0x0008c0,   1, 0x04, 0x00000000 },
+       { 0x000900,   1, 0x04, 0x00000000 },
+       { 0x000940,   1, 0x04, 0x00000000 },
+       { 0x000980,   1, 0x04, 0x00000000 },
+       { 0x0009c0,   1, 0x04, 0x00000000 },
+       { 0x000804,   1, 0x04, 0x00000000 },
+       { 0x000844,   1, 0x04, 0x00000000 },
+       { 0x000884,   1, 0x04, 0x00000000 },
+       { 0x0008c4,   1, 0x04, 0x00000000 },
+       { 0x000904,   1, 0x04, 0x00000000 },
+       { 0x000944,   1, 0x04, 0x00000000 },
+       { 0x000984,   1, 0x04, 0x00000000 },
+       { 0x0009c4,   1, 0x04, 0x00000000 },
+       { 0x000808,   1, 0x04, 0x00000400 },
+       { 0x000848,   1, 0x04, 0x00000400 },
+       { 0x000888,   1, 0x04, 0x00000400 },
+       { 0x0008c8,   1, 0x04, 0x00000400 },
+       { 0x000908,   1, 0x04, 0x00000400 },
+       { 0x000948,   1, 0x04, 0x00000400 },
+       { 0x000988,   1, 0x04, 0x00000400 },
+       { 0x0009c8,   1, 0x04, 0x00000400 },
+       { 0x00080c,   1, 0x04, 0x00000300 },
+       { 0x00084c,   1, 0x04, 0x00000300 },
+       { 0x00088c,   1, 0x04, 0x00000300 },
+       { 0x0008cc,   1, 0x04, 0x00000300 },
+       { 0x00090c,   1, 0x04, 0x00000300 },
+       { 0x00094c,   1, 0x04, 0x00000300 },
+       { 0x00098c,   1, 0x04, 0x00000300 },
+       { 0x0009cc,   1, 0x04, 0x00000300 },
+       { 0x000810,   1, 0x04, 0x000000cf },
+       { 0x000850,   1, 0x04, 0x00000000 },
+       { 0x000890,   1, 0x04, 0x00000000 },
+       { 0x0008d0,   1, 0x04, 0x00000000 },
+       { 0x000910,   1, 0x04, 0x00000000 },
+       { 0x000950,   1, 0x04, 0x00000000 },
+       { 0x000990,   1, 0x04, 0x00000000 },
+       { 0x0009d0,   1, 0x04, 0x00000000 },
+       { 0x000814,   1, 0x04, 0x00000040 },
+       { 0x000854,   1, 0x04, 0x00000040 },
+       { 0x000894,   1, 0x04, 0x00000040 },
+       { 0x0008d4,   1, 0x04, 0x00000040 },
+       { 0x000914,   1, 0x04, 0x00000040 },
+       { 0x000954,   1, 0x04, 0x00000040 },
+       { 0x000994,   1, 0x04, 0x00000040 },
+       { 0x0009d4,   1, 0x04, 0x00000040 },
+       { 0x000818,   1, 0x04, 0x00000001 },
+       { 0x000858,   1, 0x04, 0x00000001 },
+       { 0x000898,   1, 0x04, 0x00000001 },
+       { 0x0008d8,   1, 0x04, 0x00000001 },
+       { 0x000918,   1, 0x04, 0x00000001 },
+       { 0x000958,   1, 0x04, 0x00000001 },
+       { 0x000998,   1, 0x04, 0x00000001 },
+       { 0x0009d8,   1, 0x04, 0x00000001 },
+       { 0x00081c,   1, 0x04, 0x00000000 },
+       { 0x00085c,   1, 0x04, 0x00000000 },
+       { 0x00089c,   1, 0x04, 0x00000000 },
+       { 0x0008dc,   1, 0x04, 0x00000000 },
+       { 0x00091c,   1, 0x04, 0x00000000 },
+       { 0x00095c,   1, 0x04, 0x00000000 },
+       { 0x00099c,   1, 0x04, 0x00000000 },
+       { 0x0009dc,   1, 0x04, 0x00000000 },
+       { 0x000820,   1, 0x04, 0x00000000 },
+       { 0x000860,   1, 0x04, 0x00000000 },
+       { 0x0008a0,   1, 0x04, 0x00000000 },
+       { 0x0008e0,   1, 0x04, 0x00000000 },
+       { 0x000920,   1, 0x04, 0x00000000 },
+       { 0x000960,   1, 0x04, 0x00000000 },
+       { 0x0009a0,   1, 0x04, 0x00000000 },
+       { 0x0009e0,   1, 0x04, 0x00000000 },
+       { 0x001c00,   1, 0x04, 0x00000000 },
+       { 0x001c10,   1, 0x04, 0x00000000 },
+       { 0x001c20,   1, 0x04, 0x00000000 },
+       { 0x001c30,   1, 0x04, 0x00000000 },
+       { 0x001c40,   1, 0x04, 0x00000000 },
+       { 0x001c50,   1, 0x04, 0x00000000 },
+       { 0x001c60,   1, 0x04, 0x00000000 },
+       { 0x001c70,   1, 0x04, 0x00000000 },
+       { 0x001c80,   1, 0x04, 0x00000000 },
+       { 0x001c90,   1, 0x04, 0x00000000 },
+       { 0x001ca0,   1, 0x04, 0x00000000 },
+       { 0x001cb0,   1, 0x04, 0x00000000 },
+       { 0x001cc0,   1, 0x04, 0x00000000 },
+       { 0x001cd0,   1, 0x04, 0x00000000 },
+       { 0x001ce0,   1, 0x04, 0x00000000 },
+       { 0x001cf0,   1, 0x04, 0x00000000 },
+       { 0x001c04,   1, 0x04, 0x00000000 },
+       { 0x001c14,   1, 0x04, 0x00000000 },
+       { 0x001c24,   1, 0x04, 0x00000000 },
+       { 0x001c34,   1, 0x04, 0x00000000 },
+       { 0x001c44,   1, 0x04, 0x00000000 },
+       { 0x001c54,   1, 0x04, 0x00000000 },
+       { 0x001c64,   1, 0x04, 0x00000000 },
+       { 0x001c74,   1, 0x04, 0x00000000 },
+       { 0x001c84,   1, 0x04, 0x00000000 },
+       { 0x001c94,   1, 0x04, 0x00000000 },
+       { 0x001ca4,   1, 0x04, 0x00000000 },
+       { 0x001cb4,   1, 0x04, 0x00000000 },
+       { 0x001cc4,   1, 0x04, 0x00000000 },
+       { 0x001cd4,   1, 0x04, 0x00000000 },
+       { 0x001ce4,   1, 0x04, 0x00000000 },
+       { 0x001cf4,   1, 0x04, 0x00000000 },
+       { 0x001c08,   1, 0x04, 0x00000000 },
+       { 0x001c18,   1, 0x04, 0x00000000 },
+       { 0x001c28,   1, 0x04, 0x00000000 },
+       { 0x001c38,   1, 0x04, 0x00000000 },
+       { 0x001c48,   1, 0x04, 0x00000000 },
+       { 0x001c58,   1, 0x04, 0x00000000 },
+       { 0x001c68,   1, 0x04, 0x00000000 },
+       { 0x001c78,   1, 0x04, 0x00000000 },
+       { 0x001c88,   1, 0x04, 0x00000000 },
+       { 0x001c98,   1, 0x04, 0x00000000 },
+       { 0x001ca8,   1, 0x04, 0x00000000 },
+       { 0x001cb8,   1, 0x04, 0x00000000 },
+       { 0x001cc8,   1, 0x04, 0x00000000 },
+       { 0x001cd8,   1, 0x04, 0x00000000 },
+       { 0x001ce8,   1, 0x04, 0x00000000 },
+       { 0x001cf8,   1, 0x04, 0x00000000 },
+       { 0x001c0c,   1, 0x04, 0x00000000 },
+       { 0x001c1c,   1, 0x04, 0x00000000 },
+       { 0x001c2c,   1, 0x04, 0x00000000 },
+       { 0x001c3c,   1, 0x04, 0x00000000 },
+       { 0x001c4c,   1, 0x04, 0x00000000 },
+       { 0x001c5c,   1, 0x04, 0x00000000 },
+       { 0x001c6c,   1, 0x04, 0x00000000 },
+       { 0x001c7c,   1, 0x04, 0x00000000 },
+       { 0x001c8c,   1, 0x04, 0x00000000 },
+       { 0x001c9c,   1, 0x04, 0x00000000 },
+       { 0x001cac,   1, 0x04, 0x00000000 },
+       { 0x001cbc,   1, 0x04, 0x00000000 },
+       { 0x001ccc,   1, 0x04, 0x00000000 },
+       { 0x001cdc,   1, 0x04, 0x00000000 },
+       { 0x001cec,   1, 0x04, 0x00000000 },
+       { 0x001cfc,   2, 0x04, 0x00000000 },
+       { 0x001d10,   1, 0x04, 0x00000000 },
+       { 0x001d20,   1, 0x04, 0x00000000 },
+       { 0x001d30,   1, 0x04, 0x00000000 },
+       { 0x001d40,   1, 0x04, 0x00000000 },
+       { 0x001d50,   1, 0x04, 0x00000000 },
+       { 0x001d60,   1, 0x04, 0x00000000 },
+       { 0x001d70,   1, 0x04, 0x00000000 },
+       { 0x001d80,   1, 0x04, 0x00000000 },
+       { 0x001d90,   1, 0x04, 0x00000000 },
+       { 0x001da0,   1, 0x04, 0x00000000 },
+       { 0x001db0,   1, 0x04, 0x00000000 },
+       { 0x001dc0,   1, 0x04, 0x00000000 },
+       { 0x001dd0,   1, 0x04, 0x00000000 },
+       { 0x001de0,   1, 0x04, 0x00000000 },
+       { 0x001df0,   1, 0x04, 0x00000000 },
+       { 0x001d04,   1, 0x04, 0x00000000 },
+       { 0x001d14,   1, 0x04, 0x00000000 },
+       { 0x001d24,   1, 0x04, 0x00000000 },
+       { 0x001d34,   1, 0x04, 0x00000000 },
+       { 0x001d44,   1, 0x04, 0x00000000 },
+       { 0x001d54,   1, 0x04, 0x00000000 },
+       { 0x001d64,   1, 0x04, 0x00000000 },
+       { 0x001d74,   1, 0x04, 0x00000000 },
+       { 0x001d84,   1, 0x04, 0x00000000 },
+       { 0x001d94,   1, 0x04, 0x00000000 },
+       { 0x001da4,   1, 0x04, 0x00000000 },
+       { 0x001db4,   1, 0x04, 0x00000000 },
+       { 0x001dc4,   1, 0x04, 0x00000000 },
+       { 0x001dd4,   1, 0x04, 0x00000000 },
+       { 0x001de4,   1, 0x04, 0x00000000 },
+       { 0x001df4,   1, 0x04, 0x00000000 },
+       { 0x001d08,   1, 0x04, 0x00000000 },
+       { 0x001d18,   1, 0x04, 0x00000000 },
+       { 0x001d28,   1, 0x04, 0x00000000 },
+       { 0x001d38,   1, 0x04, 0x00000000 },
+       { 0x001d48,   1, 0x04, 0x00000000 },
+       { 0x001d58,   1, 0x04, 0x00000000 },
+       { 0x001d68,   1, 0x04, 0x00000000 },
+       { 0x001d78,   1, 0x04, 0x00000000 },
+       { 0x001d88,   1, 0x04, 0x00000000 },
+       { 0x001d98,   1, 0x04, 0x00000000 },
+       { 0x001da8,   1, 0x04, 0x00000000 },
+       { 0x001db8,   1, 0x04, 0x00000000 },
+       { 0x001dc8,   1, 0x04, 0x00000000 },
+       { 0x001dd8,   1, 0x04, 0x00000000 },
+       { 0x001de8,   1, 0x04, 0x00000000 },
+       { 0x001df8,   1, 0x04, 0x00000000 },
+       { 0x001d0c,   1, 0x04, 0x00000000 },
+       { 0x001d1c,   1, 0x04, 0x00000000 },
+       { 0x001d2c,   1, 0x04, 0x00000000 },
+       { 0x001d3c,   1, 0x04, 0x00000000 },
+       { 0x001d4c,   1, 0x04, 0x00000000 },
+       { 0x001d5c,   1, 0x04, 0x00000000 },
+       { 0x001d6c,   1, 0x04, 0x00000000 },
+       { 0x001d7c,   1, 0x04, 0x00000000 },
+       { 0x001d8c,   1, 0x04, 0x00000000 },
+       { 0x001d9c,   1, 0x04, 0x00000000 },
+       { 0x001dac,   1, 0x04, 0x00000000 },
+       { 0x001dbc,   1, 0x04, 0x00000000 },
+       { 0x001dcc,   1, 0x04, 0x00000000 },
+       { 0x001ddc,   1, 0x04, 0x00000000 },
+       { 0x001dec,   1, 0x04, 0x00000000 },
+       { 0x001dfc,   1, 0x04, 0x00000000 },
+       { 0x001f00,   1, 0x04, 0x00000000 },
+       { 0x001f08,   1, 0x04, 0x00000000 },
+       { 0x001f10,   1, 0x04, 0x00000000 },
+       { 0x001f18,   1, 0x04, 0x00000000 },
+       { 0x001f20,   1, 0x04, 0x00000000 },
+       { 0x001f28,   1, 0x04, 0x00000000 },
+       { 0x001f30,   1, 0x04, 0x00000000 },
+       { 0x001f38,   1, 0x04, 0x00000000 },
+       { 0x001f40,   1, 0x04, 0x00000000 },
+       { 0x001f48,   1, 0x04, 0x00000000 },
+       { 0x001f50,   1, 0x04, 0x00000000 },
+       { 0x001f58,   1, 0x04, 0x00000000 },
+       { 0x001f60,   1, 0x04, 0x00000000 },
+       { 0x001f68,   1, 0x04, 0x00000000 },
+       { 0x001f70,   1, 0x04, 0x00000000 },
+       { 0x001f78,   1, 0x04, 0x00000000 },
+       { 0x001f04,   1, 0x04, 0x00000000 },
+       { 0x001f0c,   1, 0x04, 0x00000000 },
+       { 0x001f14,   1, 0x04, 0x00000000 },
+       { 0x001f1c,   1, 0x04, 0x00000000 },
+       { 0x001f24,   1, 0x04, 0x00000000 },
+       { 0x001f2c,   1, 0x04, 0x00000000 },
+       { 0x001f34,   1, 0x04, 0x00000000 },
+       { 0x001f3c,   1, 0x04, 0x00000000 },
+       { 0x001f44,   1, 0x04, 0x00000000 },
+       { 0x001f4c,   1, 0x04, 0x00000000 },
+       { 0x001f54,   1, 0x04, 0x00000000 },
+       { 0x001f5c,   1, 0x04, 0x00000000 },
+       { 0x001f64,   1, 0x04, 0x00000000 },
+       { 0x001f6c,   1, 0x04, 0x00000000 },
+       { 0x001f74,   1, 0x04, 0x00000000 },
+       { 0x001f7c,   2, 0x04, 0x00000000 },
+       { 0x001f88,   1, 0x04, 0x00000000 },
+       { 0x001f90,   1, 0x04, 0x00000000 },
+       { 0x001f98,   1, 0x04, 0x00000000 },
+       { 0x001fa0,   1, 0x04, 0x00000000 },
+       { 0x001fa8,   1, 0x04, 0x00000000 },
+       { 0x001fb0,   1, 0x04, 0x00000000 },
+       { 0x001fb8,   1, 0x04, 0x00000000 },
+       { 0x001fc0,   1, 0x04, 0x00000000 },
+       { 0x001fc8,   1, 0x04, 0x00000000 },
+       { 0x001fd0,   1, 0x04, 0x00000000 },
+       { 0x001fd8,   1, 0x04, 0x00000000 },
+       { 0x001fe0,   1, 0x04, 0x00000000 },
+       { 0x001fe8,   1, 0x04, 0x00000000 },
+       { 0x001ff0,   1, 0x04, 0x00000000 },
+       { 0x001ff8,   1, 0x04, 0x00000000 },
+       { 0x001f84,   1, 0x04, 0x00000000 },
+       { 0x001f8c,   1, 0x04, 0x00000000 },
+       { 0x001f94,   1, 0x04, 0x00000000 },
+       { 0x001f9c,   1, 0x04, 0x00000000 },
+       { 0x001fa4,   1, 0x04, 0x00000000 },
+       { 0x001fac,   1, 0x04, 0x00000000 },
+       { 0x001fb4,   1, 0x04, 0x00000000 },
+       { 0x001fbc,   1, 0x04, 0x00000000 },
+       { 0x001fc4,   1, 0x04, 0x00000000 },
+       { 0x001fcc,   1, 0x04, 0x00000000 },
+       { 0x001fd4,   1, 0x04, 0x00000000 },
+       { 0x001fdc,   1, 0x04, 0x00000000 },
+       { 0x001fe4,   1, 0x04, 0x00000000 },
+       { 0x001fec,   1, 0x04, 0x00000000 },
+       { 0x001ff4,   1, 0x04, 0x00000000 },
+       { 0x001ffc,   2, 0x04, 0x00000000 },
+       { 0x002040,   1, 0x04, 0x00000011 },
+       { 0x002080,   1, 0x04, 0x00000020 },
+       { 0x0020c0,   1, 0x04, 0x00000030 },
+       { 0x002100,   1, 0x04, 0x00000040 },
+       { 0x002140,   1, 0x04, 0x00000051 },
+       { 0x00200c,   1, 0x04, 0x00000001 },
+       { 0x00204c,   1, 0x04, 0x00000001 },
+       { 0x00208c,   1, 0x04, 0x00000001 },
+       { 0x0020cc,   1, 0x04, 0x00000001 },
+       { 0x00210c,   1, 0x04, 0x00000001 },
+       { 0x00214c,   1, 0x04, 0x00000001 },
+       { 0x002010,   1, 0x04, 0x00000000 },
+       { 0x002050,   1, 0x04, 0x00000000 },
+       { 0x002090,   1, 0x04, 0x00000001 },
+       { 0x0020d0,   1, 0x04, 0x00000002 },
+       { 0x002110,   1, 0x04, 0x00000003 },
+       { 0x002150,   1, 0x04, 0x00000004 },
+       { 0x000380,   1, 0x04, 0x00000000 },
+       { 0x0003a0,   1, 0x04, 0x00000000 },
+       { 0x0003c0,   1, 0x04, 0x00000000 },
+       { 0x0003e0,   1, 0x04, 0x00000000 },
+       { 0x000384,   1, 0x04, 0x00000000 },
+       { 0x0003a4,   1, 0x04, 0x00000000 },
+       { 0x0003c4,   1, 0x04, 0x00000000 },
+       { 0x0003e4,   1, 0x04, 0x00000000 },
+       { 0x000388,   1, 0x04, 0x00000000 },
+       { 0x0003a8,   1, 0x04, 0x00000000 },
+       { 0x0003c8,   1, 0x04, 0x00000000 },
+       { 0x0003e8,   1, 0x04, 0x00000000 },
+       { 0x00038c,   1, 0x04, 0x00000000 },
+       { 0x0003ac,   1, 0x04, 0x00000000 },
+       { 0x0003cc,   1, 0x04, 0x00000000 },
+       { 0x0003ec,   1, 0x04, 0x00000000 },
+       { 0x000700,   1, 0x04, 0x00000000 },
+       { 0x000710,   1, 0x04, 0x00000000 },
+       { 0x000720,   1, 0x04, 0x00000000 },
+       { 0x000730,   1, 0x04, 0x00000000 },
+       { 0x000704,   1, 0x04, 0x00000000 },
+       { 0x000714,   1, 0x04, 0x00000000 },
+       { 0x000724,   1, 0x04, 0x00000000 },
+       { 0x000734,   1, 0x04, 0x00000000 },
+       { 0x000708,   1, 0x04, 0x00000000 },
+       { 0x000718,   1, 0x04, 0x00000000 },
+       { 0x000728,   1, 0x04, 0x00000000 },
+       { 0x000738,   1, 0x04, 0x00000000 },
+       { 0x002800, 128, 0x04, 0x00000000 },
+       { 0x000a00,   1, 0x04, 0x00000000 },
+       { 0x000a20,   1, 0x04, 0x00000000 },
+       { 0x000a40,   1, 0x04, 0x00000000 },
+       { 0x000a60,   1, 0x04, 0x00000000 },
+       { 0x000a80,   1, 0x04, 0x00000000 },
+       { 0x000aa0,   1, 0x04, 0x00000000 },
+       { 0x000ac0,   1, 0x04, 0x00000000 },
+       { 0x000ae0,   1, 0x04, 0x00000000 },
+       { 0x000b00,   1, 0x04, 0x00000000 },
+       { 0x000b20,   1, 0x04, 0x00000000 },
+       { 0x000b40,   1, 0x04, 0x00000000 },
+       { 0x000b60,   1, 0x04, 0x00000000 },
+       { 0x000b80,   1, 0x04, 0x00000000 },
+       { 0x000ba0,   1, 0x04, 0x00000000 },
+       { 0x000bc0,   1, 0x04, 0x00000000 },
+       { 0x000be0,   1, 0x04, 0x00000000 },
+       { 0x000a04,   1, 0x04, 0x00000000 },
+       { 0x000a24,   1, 0x04, 0x00000000 },
+       { 0x000a44,   1, 0x04, 0x00000000 },
+       { 0x000a64,   1, 0x04, 0x00000000 },
+       { 0x000a84,   1, 0x04, 0x00000000 },
+       { 0x000aa4,   1, 0x04, 0x00000000 },
+       { 0x000ac4,   1, 0x04, 0x00000000 },
+       { 0x000ae4,   1, 0x04, 0x00000000 },
+       { 0x000b04,   1, 0x04, 0x00000000 },
+       { 0x000b24,   1, 0x04, 0x00000000 },
+       { 0x000b44,   1, 0x04, 0x00000000 },
+       { 0x000b64,   1, 0x04, 0x00000000 },
+       { 0x000b84,   1, 0x04, 0x00000000 },
+       { 0x000ba4,   1, 0x04, 0x00000000 },
+       { 0x000bc4,   1, 0x04, 0x00000000 },
+       { 0x000be4,   1, 0x04, 0x00000000 },
+       { 0x000a08,   1, 0x04, 0x00000000 },
+       { 0x000a28,   1, 0x04, 0x00000000 },
+       { 0x000a48,   1, 0x04, 0x00000000 },
+       { 0x000a68,   1, 0x04, 0x00000000 },
+       { 0x000a88,   1, 0x04, 0x00000000 },
+       { 0x000aa8,   1, 0x04, 0x00000000 },
+       { 0x000ac8,   1, 0x04, 0x00000000 },
+       { 0x000ae8,   1, 0x04, 0x00000000 },
+       { 0x000b08,   1, 0x04, 0x00000000 },
+       { 0x000b28,   1, 0x04, 0x00000000 },
+       { 0x000b48,   1, 0x04, 0x00000000 },
+       { 0x000b68,   1, 0x04, 0x00000000 },
+       { 0x000b88,   1, 0x04, 0x00000000 },
+       { 0x000ba8,   1, 0x04, 0x00000000 },
+       { 0x000bc8,   1, 0x04, 0x00000000 },
+       { 0x000be8,   1, 0x04, 0x00000000 },
+       { 0x000a0c,   1, 0x04, 0x00000000 },
+       { 0x000a2c,   1, 0x04, 0x00000000 },
+       { 0x000a4c,   1, 0x04, 0x00000000 },
+       { 0x000a6c,   1, 0x04, 0x00000000 },
+       { 0x000a8c,   1, 0x04, 0x00000000 },
+       { 0x000aac,   1, 0x04, 0x00000000 },
+       { 0x000acc,   1, 0x04, 0x00000000 },
+       { 0x000aec,   1, 0x04, 0x00000000 },
+       { 0x000b0c,   1, 0x04, 0x00000000 },
+       { 0x000b2c,   1, 0x04, 0x00000000 },
+       { 0x000b4c,   1, 0x04, 0x00000000 },
+       { 0x000b6c,   1, 0x04, 0x00000000 },
+       { 0x000b8c,   1, 0x04, 0x00000000 },
+       { 0x000bac,   1, 0x04, 0x00000000 },
+       { 0x000bcc,   1, 0x04, 0x00000000 },
+       { 0x000bec,   1, 0x04, 0x00000000 },
+       { 0x000a10,   1, 0x04, 0x00000000 },
+       { 0x000a30,   1, 0x04, 0x00000000 },
+       { 0x000a50,   1, 0x04, 0x00000000 },
+       { 0x000a70,   1, 0x04, 0x00000000 },
+       { 0x000a90,   1, 0x04, 0x00000000 },
+       { 0x000ab0,   1, 0x04, 0x00000000 },
+       { 0x000ad0,   1, 0x04, 0x00000000 },
+       { 0x000af0,   1, 0x04, 0x00000000 },
+       { 0x000b10,   1, 0x04, 0x00000000 },
+       { 0x000b30,   1, 0x04, 0x00000000 },
+       { 0x000b50,   1, 0x04, 0x00000000 },
+       { 0x000b70,   1, 0x04, 0x00000000 },
+       { 0x000b90,   1, 0x04, 0x00000000 },
+       { 0x000bb0,   1, 0x04, 0x00000000 },
+       { 0x000bd0,   1, 0x04, 0x00000000 },
+       { 0x000bf0,   1, 0x04, 0x00000000 },
+       { 0x000a14,   1, 0x04, 0x00000000 },
+       { 0x000a34,   1, 0x04, 0x00000000 },
+       { 0x000a54,   1, 0x04, 0x00000000 },
+       { 0x000a74,   1, 0x04, 0x00000000 },
+       { 0x000a94,   1, 0x04, 0x00000000 },
+       { 0x000ab4,   1, 0x04, 0x00000000 },
+       { 0x000ad4,   1, 0x04, 0x00000000 },
+       { 0x000af4,   1, 0x04, 0x00000000 },
+       { 0x000b14,   1, 0x04, 0x00000000 },
+       { 0x000b34,   1, 0x04, 0x00000000 },
+       { 0x000b54,   1, 0x04, 0x00000000 },
+       { 0x000b74,   1, 0x04, 0x00000000 },
+       { 0x000b94,   1, 0x04, 0x00000000 },
+       { 0x000bb4,   1, 0x04, 0x00000000 },
+       { 0x000bd4,   1, 0x04, 0x00000000 },
+       { 0x000bf4,   1, 0x04, 0x00000000 },
+       { 0x000c00,   1, 0x04, 0x00000000 },
+       { 0x000c10,   1, 0x04, 0x00000000 },
+       { 0x000c20,   1, 0x04, 0x00000000 },
+       { 0x000c30,   1, 0x04, 0x00000000 },
+       { 0x000c40,   1, 0x04, 0x00000000 },
+       { 0x000c50,   1, 0x04, 0x00000000 },
+       { 0x000c60,   1, 0x04, 0x00000000 },
+       { 0x000c70,   1, 0x04, 0x00000000 },
+       { 0x000c80,   1, 0x04, 0x00000000 },
+       { 0x000c90,   1, 0x04, 0x00000000 },
+       { 0x000ca0,   1, 0x04, 0x00000000 },
+       { 0x000cb0,   1, 0x04, 0x00000000 },
+       { 0x000cc0,   1, 0x04, 0x00000000 },
+       { 0x000cd0,   1, 0x04, 0x00000000 },
+       { 0x000ce0,   1, 0x04, 0x00000000 },
+       { 0x000cf0,   1, 0x04, 0x00000000 },
+       { 0x000c04,   1, 0x04, 0x00000000 },
+       { 0x000c14,   1, 0x04, 0x00000000 },
+       { 0x000c24,   1, 0x04, 0x00000000 },
+       { 0x000c34,   1, 0x04, 0x00000000 },
+       { 0x000c44,   1, 0x04, 0x00000000 },
+       { 0x000c54,   1, 0x04, 0x00000000 },
+       { 0x000c64,   1, 0x04, 0x00000000 },
+       { 0x000c74,   1, 0x04, 0x00000000 },
+       { 0x000c84,   1, 0x04, 0x00000000 },
+       { 0x000c94,   1, 0x04, 0x00000000 },
+       { 0x000ca4,   1, 0x04, 0x00000000 },
+       { 0x000cb4,   1, 0x04, 0x00000000 },
+       { 0x000cc4,   1, 0x04, 0x00000000 },
+       { 0x000cd4,   1, 0x04, 0x00000000 },
+       { 0x000ce4,   1, 0x04, 0x00000000 },
+       { 0x000cf4,   1, 0x04, 0x00000000 },
+       { 0x000c08,   1, 0x04, 0x00000000 },
+       { 0x000c18,   1, 0x04, 0x00000000 },
+       { 0x000c28,   1, 0x04, 0x00000000 },
+       { 0x000c38,   1, 0x04, 0x00000000 },
+       { 0x000c48,   1, 0x04, 0x00000000 },
+       { 0x000c58,   1, 0x04, 0x00000000 },
+       { 0x000c68,   1, 0x04, 0x00000000 },
+       { 0x000c78,   1, 0x04, 0x00000000 },
+       { 0x000c88,   1, 0x04, 0x00000000 },
+       { 0x000c98,   1, 0x04, 0x00000000 },
+       { 0x000ca8,   1, 0x04, 0x00000000 },
+       { 0x000cb8,   1, 0x04, 0x00000000 },
+       { 0x000cc8,   1, 0x04, 0x00000000 },
+       { 0x000cd8,   1, 0x04, 0x00000000 },
+       { 0x000ce8,   1, 0x04, 0x00000000 },
+       { 0x000cf8,   1, 0x04, 0x00000000 },
+       { 0x000c0c,   1, 0x04, 0x3f800000 },
+       { 0x000c1c,   1, 0x04, 0x3f800000 },
+       { 0x000c2c,   1, 0x04, 0x3f800000 },
+       { 0x000c3c,   1, 0x04, 0x3f800000 },
+       { 0x000c4c,   1, 0x04, 0x3f800000 },
+       { 0x000c5c,   1, 0x04, 0x3f800000 },
+       { 0x000c6c,   1, 0x04, 0x3f800000 },
+       { 0x000c7c,   1, 0x04, 0x3f800000 },
+       { 0x000c8c,   1, 0x04, 0x3f800000 },
+       { 0x000c9c,   1, 0x04, 0x3f800000 },
+       { 0x000cac,   1, 0x04, 0x3f800000 },
+       { 0x000cbc,   1, 0x04, 0x3f800000 },
+       { 0x000ccc,   1, 0x04, 0x3f800000 },
+       { 0x000cdc,   1, 0x04, 0x3f800000 },
+       { 0x000cec,   1, 0x04, 0x3f800000 },
+       { 0x000cfc,   1, 0x04, 0x3f800000 },
+       { 0x000d00,   1, 0x04, 0xffff0000 },
+       { 0x000d08,   1, 0x04, 0xffff0000 },
+       { 0x000d10,   1, 0x04, 0xffff0000 },
+       { 0x000d18,   1, 0x04, 0xffff0000 },
+       { 0x000d20,   1, 0x04, 0xffff0000 },
+       { 0x000d28,   1, 0x04, 0xffff0000 },
+       { 0x000d30,   1, 0x04, 0xffff0000 },
+       { 0x000d38,   1, 0x04, 0xffff0000 },
+       { 0x000d04,   1, 0x04, 0xffff0000 },
+       { 0x000d0c,   1, 0x04, 0xffff0000 },
+       { 0x000d14,   1, 0x04, 0xffff0000 },
+       { 0x000d1c,   1, 0x04, 0xffff0000 },
+       { 0x000d24,   1, 0x04, 0xffff0000 },
+       { 0x000d2c,   1, 0x04, 0xffff0000 },
+       { 0x000d34,   1, 0x04, 0xffff0000 },
+       { 0x000d3c,   1, 0x04, 0xffff0000 },
+       { 0x000e00,   1, 0x04, 0x00000000 },
+       { 0x000e10,   1, 0x04, 0x00000000 },
+       { 0x000e20,   1, 0x04, 0x00000000 },
+       { 0x000e30,   1, 0x04, 0x00000000 },
+       { 0x000e40,   1, 0x04, 0x00000000 },
+       { 0x000e50,   1, 0x04, 0x00000000 },
+       { 0x000e60,   1, 0x04, 0x00000000 },
+       { 0x000e70,   1, 0x04, 0x00000000 },
+       { 0x000e80,   1, 0x04, 0x00000000 },
+       { 0x000e90,   1, 0x04, 0x00000000 },
+       { 0x000ea0,   1, 0x04, 0x00000000 },
+       { 0x000eb0,   1, 0x04, 0x00000000 },
+       { 0x000ec0,   1, 0x04, 0x00000000 },
+       { 0x000ed0,   1, 0x04, 0x00000000 },
+       { 0x000ee0,   1, 0x04, 0x00000000 },
+       { 0x000ef0,   1, 0x04, 0x00000000 },
+       { 0x000e04,   1, 0x04, 0xffff0000 },
+       { 0x000e14,   1, 0x04, 0xffff0000 },
+       { 0x000e24,   1, 0x04, 0xffff0000 },
+       { 0x000e34,   1, 0x04, 0xffff0000 },
+       { 0x000e44,   1, 0x04, 0xffff0000 },
+       { 0x000e54,   1, 0x04, 0xffff0000 },
+       { 0x000e64,   1, 0x04, 0xffff0000 },
+       { 0x000e74,   1, 0x04, 0xffff0000 },
+       { 0x000e84,   1, 0x04, 0xffff0000 },
+       { 0x000e94,   1, 0x04, 0xffff0000 },
+       { 0x000ea4,   1, 0x04, 0xffff0000 },
+       { 0x000eb4,   1, 0x04, 0xffff0000 },
+       { 0x000ec4,   1, 0x04, 0xffff0000 },
+       { 0x000ed4,   1, 0x04, 0xffff0000 },
+       { 0x000ee4,   1, 0x04, 0xffff0000 },
+       { 0x000ef4,   1, 0x04, 0xffff0000 },
+       { 0x000e08,   1, 0x04, 0xffff0000 },
+       { 0x000e18,   1, 0x04, 0xffff0000 },
+       { 0x000e28,   1, 0x04, 0xffff0000 },
+       { 0x000e38,   1, 0x04, 0xffff0000 },
+       { 0x000e48,   1, 0x04, 0xffff0000 },
+       { 0x000e58,   1, 0x04, 0xffff0000 },
+       { 0x000e68,   1, 0x04, 0xffff0000 },
+       { 0x000e78,   1, 0x04, 0xffff0000 },
+       { 0x000e88,   1, 0x04, 0xffff0000 },
+       { 0x000e98,   1, 0x04, 0xffff0000 },
+       { 0x000ea8,   1, 0x04, 0xffff0000 },
+       { 0x000eb8,   1, 0x04, 0xffff0000 },
+       { 0x000ec8,   1, 0x04, 0xffff0000 },
+       { 0x000ed8,   1, 0x04, 0xffff0000 },
+       { 0x000ee8,   1, 0x04, 0xffff0000 },
+       { 0x000ef8,   1, 0x04, 0xffff0000 },
+       { 0x000d40,   1, 0x04, 0x00000000 },
+       { 0x000d48,   1, 0x04, 0x00000000 },
+       { 0x000d50,   1, 0x04, 0x00000000 },
+       { 0x000d58,   1, 0x04, 0x00000000 },
+       { 0x000d44,   1, 0x04, 0x00000000 },
+       { 0x000d4c,   1, 0x04, 0x00000000 },
+       { 0x000d54,   1, 0x04, 0x00000000 },
+       { 0x000d5c,   1, 0x04, 0x00000000 },
+       { 0x001e00,   1, 0x04, 0x00000001 },
+       { 0x001e20,   1, 0x04, 0x00000001 },
+       { 0x001e40,   1, 0x04, 0x00000001 },
+       { 0x001e60,   1, 0x04, 0x00000001 },
+       { 0x001e80,   1, 0x04, 0x00000001 },
+       { 0x001ea0,   1, 0x04, 0x00000001 },
+       { 0x001ec0,   1, 0x04, 0x00000001 },
+       { 0x001ee0,   1, 0x04, 0x00000001 },
+       { 0x001e04,   1, 0x04, 0x00000001 },
+       { 0x001e24,   1, 0x04, 0x00000001 },
+       { 0x001e44,   1, 0x04, 0x00000001 },
+       { 0x001e64,   1, 0x04, 0x00000001 },
+       { 0x001e84,   1, 0x04, 0x00000001 },
+       { 0x001ea4,   1, 0x04, 0x00000001 },
+       { 0x001ec4,   1, 0x04, 0x00000001 },
+       { 0x001ee4,   1, 0x04, 0x00000001 },
+       { 0x001e08,   1, 0x04, 0x00000002 },
+       { 0x001e28,   1, 0x04, 0x00000002 },
+       { 0x001e48,   1, 0x04, 0x00000002 },
+       { 0x001e68,   1, 0x04, 0x00000002 },
+       { 0x001e88,   1, 0x04, 0x00000002 },
+       { 0x001ea8,   1, 0x04, 0x00000002 },
+       { 0x001ec8,   1, 0x04, 0x00000002 },
+       { 0x001ee8,   1, 0x04, 0x00000002 },
+       { 0x001e0c,   1, 0x04, 0x00000001 },
+       { 0x001e2c,   1, 0x04, 0x00000001 },
+       { 0x001e4c,   1, 0x04, 0x00000001 },
+       { 0x001e6c,   1, 0x04, 0x00000001 },
+       { 0x001e8c,   1, 0x04, 0x00000001 },
+       { 0x001eac,   1, 0x04, 0x00000001 },
+       { 0x001ecc,   1, 0x04, 0x00000001 },
+       { 0x001eec,   1, 0x04, 0x00000001 },
+       { 0x001e10,   1, 0x04, 0x00000001 },
+       { 0x001e30,   1, 0x04, 0x00000001 },
+       { 0x001e50,   1, 0x04, 0x00000001 },
+       { 0x001e70,   1, 0x04, 0x00000001 },
+       { 0x001e90,   1, 0x04, 0x00000001 },
+       { 0x001eb0,   1, 0x04, 0x00000001 },
+       { 0x001ed0,   1, 0x04, 0x00000001 },
+       { 0x001ef0,   1, 0x04, 0x00000001 },
+       { 0x001e14,   1, 0x04, 0x00000002 },
+       { 0x001e34,   1, 0x04, 0x00000002 },
+       { 0x001e54,   1, 0x04, 0x00000002 },
+       { 0x001e74,   1, 0x04, 0x00000002 },
+       { 0x001e94,   1, 0x04, 0x00000002 },
+       { 0x001eb4,   1, 0x04, 0x00000002 },
+       { 0x001ed4,   1, 0x04, 0x00000002 },
+       { 0x001ef4,   1, 0x04, 0x00000002 },
+       { 0x001e18,   1, 0x04, 0x00000001 },
+       { 0x001e38,   1, 0x04, 0x00000001 },
+       { 0x001e58,   1, 0x04, 0x00000001 },
+       { 0x001e78,   1, 0x04, 0x00000001 },
+       { 0x001e98,   1, 0x04, 0x00000001 },
+       { 0x001eb8,   1, 0x04, 0x00000001 },
+       { 0x001ed8,   1, 0x04, 0x00000001 },
+       { 0x001ef8,   1, 0x04, 0x00000001 },
+       { 0x003400, 128, 0x04, 0x00000000 },
+       { 0x00030c,   1, 0x04, 0x00000001 },
+       { 0x001944,   1, 0x04, 0x00000000 },
+       { 0x001514,   1, 0x04, 0x00000000 },
+       { 0x000d68,   1, 0x04, 0x0000ffff },
+       { 0x00121c,   1, 0x04, 0x0fac6881 },
+       { 0x000fac,   1, 0x04, 0x00000001 },
+       { 0x001538,   1, 0x04, 0x00000001 },
+       { 0x000fe0,   2, 0x04, 0x00000000 },
+       { 0x000fe8,   1, 0x04, 0x00000014 },
+       { 0x000fec,   1, 0x04, 0x00000040 },
+       { 0x000ff0,   1, 0x04, 0x00000000 },
+       { 0x00179c,   1, 0x04, 0x00000000 },
+       { 0x001228,   1, 0x04, 0x00000400 },
+       { 0x00122c,   1, 0x04, 0x00000300 },
+       { 0x001230,   1, 0x04, 0x00010001 },
+       { 0x0007f8,   1, 0x04, 0x00000000 },
+       { 0x0015b4,   1, 0x04, 0x00000001 },
+       { 0x0015cc,   1, 0x04, 0x00000000 },
+       { 0x001534,   1, 0x04, 0x00000000 },
+       { 0x000fb0,   1, 0x04, 0x00000000 },
+       { 0x0015d0,   1, 0x04, 0x00000000 },
+       { 0x00153c,   1, 0x04, 0x00000000 },
+       { 0x0016b4,   1, 0x04, 0x00000003 },
+       { 0x000fbc,   4, 0x04, 0x0000ffff },
+       { 0x000df8,   2, 0x04, 0x00000000 },
+       { 0x001948,   1, 0x04, 0x00000000 },
+       { 0x001970,   1, 0x04, 0x00000001 },
+       { 0x00161c,   1, 0x04, 0x000009f0 },
+       { 0x000dcc,   1, 0x04, 0x00000010 },
+       { 0x00163c,   1, 0x04, 0x00000000 },
+       { 0x0015e4,   1, 0x04, 0x00000000 },
+       { 0x001160,  32, 0x04, 0x25e00040 },
+       { 0x001880,  32, 0x04, 0x00000000 },
+       { 0x000f84,   2, 0x04, 0x00000000 },
+       { 0x0017c8,   2, 0x04, 0x00000000 },
+       { 0x0017d0,   1, 0x04, 0x000000ff },
+       { 0x0017d4,   1, 0x04, 0xffffffff },
+       { 0x0017d8,   1, 0x04, 0x00000002 },
+       { 0x0017dc,   1, 0x04, 0x00000000 },
+       { 0x0015f4,   2, 0x04, 0x00000000 },
+       { 0x001434,   2, 0x04, 0x00000000 },
+       { 0x000d74,   1, 0x04, 0x00000000 },
+       { 0x000dec,   1, 0x04, 0x00000001 },
+       { 0x0013a4,   1, 0x04, 0x00000000 },
+       { 0x001318,   1, 0x04, 0x00000001 },
+       { 0x001644,   1, 0x04, 0x00000000 },
+       { 0x000748,   1, 0x04, 0x00000000 },
+       { 0x000de8,   1, 0x04, 0x00000000 },
+       { 0x001648,   1, 0x04, 0x00000000 },
+       { 0x0012a4,   1, 0x04, 0x00000000 },
+       { 0x001120,   4, 0x04, 0x00000000 },
+       { 0x001118,   1, 0x04, 0x00000000 },
+       { 0x00164c,   1, 0x04, 0x00000000 },
+       { 0x001658,   1, 0x04, 0x00000000 },
+       { 0x001910,   1, 0x04, 0x00000290 },
+       { 0x001518,   1, 0x04, 0x00000000 },
+       { 0x00165c,   1, 0x04, 0x00000001 },
+       { 0x001520,   1, 0x04, 0x00000000 },
+       { 0x001604,   1, 0x04, 0x00000000 },
+       { 0x001570,   1, 0x04, 0x00000000 },
+       { 0x0013b0,   2, 0x04, 0x3f800000 },
+       { 0x00020c,   1, 0x04, 0x00000000 },
+       { 0x001670,   1, 0x04, 0x30201000 },
+       { 0x001674,   1, 0x04, 0x70605040 },
+       { 0x001678,   1, 0x04, 0xb8a89888 },
+       { 0x00167c,   1, 0x04, 0xf8e8d8c8 },
+       { 0x00166c,   1, 0x04, 0x00000000 },
+       { 0x001680,   1, 0x04, 0x00ffff00 },
+       { 0x0012d0,   1, 0x04, 0x00000003 },
+       { 0x0012d4,   1, 0x04, 0x00000002 },
+       { 0x001684,   2, 0x04, 0x00000000 },
+       { 0x000dac,   2, 0x04, 0x00001b02 },
+       { 0x000db4,   1, 0x04, 0x00000000 },
+       { 0x00168c,   1, 0x04, 0x00000000 },
+       { 0x0015bc,   1, 0x04, 0x00000000 },
+       { 0x00156c,   1, 0x04, 0x00000000 },
+       { 0x00187c,   1, 0x04, 0x00000000 },
+       { 0x001110,   1, 0x04, 0x00000001 },
+       { 0x000dc0,   3, 0x04, 0x00000000 },
+       { 0x001234,   1, 0x04, 0x00000000 },
+       { 0x001690,   1, 0x04, 0x00000000 },
+       { 0x0012ac,   1, 0x04, 0x00000001 },
+       { 0x0002c4,   1, 0x04, 0x00000000 },
+       { 0x000790,   5, 0x04, 0x00000000 },
+       { 0x00077c,   1, 0x04, 0x00000000 },
+       { 0x001000,   1, 0x04, 0x00000010 },
+       { 0x0010fc,   1, 0x04, 0x00000000 },
+       { 0x001290,   1, 0x04, 0x00000000 },
+       { 0x000218,   1, 0x04, 0x00000010 },
+       { 0x0012d8,   1, 0x04, 0x00000000 },
+       { 0x0012dc,   1, 0x04, 0x00000010 },
+       { 0x000d94,   1, 0x04, 0x00000001 },
+       { 0x00155c,   2, 0x04, 0x00000000 },
+       { 0x001564,   1, 0x04, 0x00000fff },
+       { 0x001574,   2, 0x04, 0x00000000 },
+       { 0x00157c,   1, 0x04, 0x000fffff },
+       { 0x001354,   1, 0x04, 0x00000000 },
+       { 0x001610,   1, 0x04, 0x00000012 },
+       { 0x001608,   2, 0x04, 0x00000000 },
+       { 0x00260c,   1, 0x04, 0x00000000 },
+       { 0x0007ac,   1, 0x04, 0x00000000 },
+       { 0x00162c,   1, 0x04, 0x00000003 },
+       { 0x000210,   1, 0x04, 0x00000000 },
+       { 0x000320,   1, 0x04, 0x00000000 },
+       { 0x000324,   6, 0x04, 0x3f800000 },
+       { 0x000750,   1, 0x04, 0x00000000 },
+       { 0x000760,   1, 0x04, 0x39291909 },
+       { 0x000764,   1, 0x04, 0x79695949 },
+       { 0x000768,   1, 0x04, 0xb9a99989 },
+       { 0x00076c,   1, 0x04, 0xf9e9d9c9 },
+       { 0x000770,   1, 0x04, 0x30201000 },
+       { 0x000774,   1, 0x04, 0x70605040 },
+       { 0x000778,   1, 0x04, 0x00009080 },
+       { 0x000780,   1, 0x04, 0x39291909 },
+       { 0x000784,   1, 0x04, 0x79695949 },
+       { 0x000788,   1, 0x04, 0xb9a99989 },
+       { 0x00078c,   1, 0x04, 0xf9e9d9c9 },
+       { 0x0007d0,   1, 0x04, 0x30201000 },
+       { 0x0007d4,   1, 0x04, 0x70605040 },
+       { 0x0007d8,   1, 0x04, 0x00009080 },
+       { 0x00037c,   1, 0x04, 0x00000001 },
+       { 0x000740,   2, 0x04, 0x00000000 },
+       { 0x002600,   1, 0x04, 0x00000000 },
+       { 0x001918,   1, 0x04, 0x00000000 },
+       { 0x00191c,   1, 0x04, 0x00000900 },
+       { 0x001920,   1, 0x04, 0x00000405 },
+       { 0x001308,   1, 0x04, 0x00000001 },
+       { 0x001924,   1, 0x04, 0x00000000 },
+       { 0x0013ac,   1, 0x04, 0x00000000 },
+       { 0x00192c,   1, 0x04, 0x00000001 },
+       { 0x00193c,   1, 0x04, 0x00002c1c },
+       { 0x000d7c,   1, 0x04, 0x00000000 },
+       { 0x000f8c,   1, 0x04, 0x00000000 },
+       { 0x0002c0,   1, 0x04, 0x00000001 },
+       { 0x001510,   1, 0x04, 0x00000000 },
+       { 0x001940,   1, 0x04, 0x00000000 },
+       { 0x000ff4,   2, 0x04, 0x00000000 },
+       { 0x00194c,   2, 0x04, 0x00000000 },
+       { 0x001968,   1, 0x04, 0x00000000 },
+       { 0x001590,   1, 0x04, 0x0000003f },
+       { 0x0007e8,   4, 0x04, 0x00000000 },
+       { 0x00196c,   1, 0x04, 0x00000011 },
+       { 0x0002e4,   1, 0x04, 0x0000b001 },
+       { 0x00036c,   2, 0x04, 0x00000000 },
+       { 0x00197c,   1, 0x04, 0x00000000 },
+       { 0x000fcc,   2, 0x04, 0x00000000 },
+       { 0x0002d8,   1, 0x04, 0x00000040 },
+       { 0x001980,   1, 0x04, 0x00000080 },
+       { 0x001504,   1, 0x04, 0x00000080 },
+       { 0x001984,   1, 0x04, 0x00000000 },
+       { 0x000300,   1, 0x04, 0x00000001 },
+       { 0x0013a8,   1, 0x04, 0x00000000 },
+       { 0x0012ec,   1, 0x04, 0x00000000 },
+       { 0x001310,   1, 0x04, 0x00000000 },
+       { 0x001314,   1, 0x04, 0x00000001 },
+       { 0x001380,   1, 0x04, 0x00000000 },
+       { 0x001384,   4, 0x04, 0x00000001 },
+       { 0x001394,   1, 0x04, 0x00000000 },
+       { 0x00139c,   1, 0x04, 0x00000000 },
+       { 0x001398,   1, 0x04, 0x00000000 },
+       { 0x001594,   1, 0x04, 0x00000000 },
+       { 0x001598,   4, 0x04, 0x00000001 },
+       { 0x000f54,   3, 0x04, 0x00000000 },
+       { 0x0019bc,   1, 0x04, 0x00000000 },
+       { 0x000f9c,   2, 0x04, 0x00000000 },
+       { 0x0012cc,   1, 0x04, 0x00000000 },
+       { 0x0012e8,   1, 0x04, 0x00000000 },
+       { 0x00130c,   1, 0x04, 0x00000001 },
+       { 0x001360,   8, 0x04, 0x00000000 },
+       { 0x00133c,   2, 0x04, 0x00000001 },
+       { 0x001344,   1, 0x04, 0x00000002 },
+       { 0x001348,   2, 0x04, 0x00000001 },
+       { 0x001350,   1, 0x04, 0x00000002 },
+       { 0x001358,   1, 0x04, 0x00000001 },
+       { 0x0012e4,   1, 0x04, 0x00000000 },
+       { 0x00131c,   4, 0x04, 0x00000000 },
+       { 0x0019c0,   1, 0x04, 0x00000000 },
+       { 0x001140,   1, 0x04, 0x00000000 },
+       { 0x0019c4,   1, 0x04, 0x00000000 },
+       { 0x0019c8,   1, 0x04, 0x00001500 },
+       { 0x00135c,   1, 0x04, 0x00000000 },
+       { 0x000f90,   1, 0x04, 0x00000000 },
+       { 0x0019e0,   8, 0x04, 0x00000001 },
+       { 0x0019cc,   1, 0x04, 0x00000001 },
+       { 0x0015b8,   1, 0x04, 0x00000000 },
+       { 0x001a00,   1, 0x04, 0x00001111 },
+       { 0x001a04,   7, 0x04, 0x00000000 },
+       { 0x000d6c,   2, 0x04, 0xffff0000 },
+       { 0x0010f8,   1, 0x04, 0x00001010 },
+       { 0x000d80,   5, 0x04, 0x00000000 },
+       { 0x000da0,   1, 0x04, 0x00000000 },
+       { 0x0007a4,   2, 0x04, 0x00000000 },
+       { 0x001508,   1, 0x04, 0x80000000 },
+       { 0x00150c,   1, 0x04, 0x40000000 },
+       { 0x001668,   1, 0x04, 0x00000000 },
+       { 0x000318,   2, 0x04, 0x00000008 },
+       { 0x000d9c,   1, 0x04, 0x00000001 },
+       { 0x000ddc,   1, 0x04, 0x00000002 },
+       { 0x000374,   1, 0x04, 0x00000000 },
+       { 0x000378,   1, 0x04, 0x00000020 },
+       { 0x0007dc,   1, 0x04, 0x00000000 },
+       { 0x00074c,   1, 0x04, 0x00000055 },
+       { 0x001420,   1, 0x04, 0x00000003 },
+       { 0x0017bc,   2, 0x04, 0x00000000 },
+       { 0x0017c4,   1, 0x04, 0x00000001 },
+       { 0x001008,   1, 0x04, 0x00000008 },
+       { 0x00100c,   1, 0x04, 0x00000040 },
+       { 0x001010,   1, 0x04, 0x0000012c },
+       { 0x000d60,   1, 0x04, 0x00000040 },
+       { 0x00075c,   1, 0x04, 0x00000003 },
+       { 0x001018,   1, 0x04, 0x00000020 },
+       { 0x00101c,   1, 0x04, 0x00000001 },
+       { 0x001020,   1, 0x04, 0x00000020 },
+       { 0x001024,   1, 0x04, 0x00000001 },
+       { 0x001444,   3, 0x04, 0x00000000 },
+       { 0x000360,   1, 0x04, 0x20164010 },
+       { 0x000364,   1, 0x04, 0x00000020 },
+       { 0x000368,   1, 0x04, 0x00000000 },
+       { 0x000de4,   1, 0x04, 0x00000000 },
+       { 0x000204,   1, 0x04, 0x00000006 },
+       { 0x000208,   1, 0x04, 0x00000000 },
+       { 0x0002cc,   2, 0x04, 0x003fffff },
+       { 0x001220,   1, 0x04, 0x00000005 },
+       { 0x000fdc,   1, 0x04, 0x00000000 },
+       { 0x000f98,   1, 0x04, 0x00400008 },
+       { 0x001284,   1, 0x04, 0x08000080 },
+       { 0x001450,   1, 0x04, 0x00400008 },
+       { 0x001454,   1, 0x04, 0x08000080 },
+       { 0x000214,   1, 0x04, 0x00000000 },
+       {}
+};
+
+static struct nvc0_graph_init
+nv108_grctx_init_unk40xx[] = {
+       { 0x404004,   8, 0x04, 0x00000000 },
+       { 0x404024,   1, 0x04, 0x0000e000 },
+       { 0x404028,   8, 0x04, 0x00000000 },
+       { 0x4040a8,   8, 0x04, 0x00000000 },
+       { 0x4040c8,   1, 0x04, 0xf800008f },
+       { 0x4040d0,   6, 0x04, 0x00000000 },
+       { 0x4040e8,   1, 0x04, 0x00001000 },
+       { 0x4040f8,   1, 0x04, 0x00000000 },
+       { 0x404100,  10, 0x04, 0x00000000 },
+       { 0x404130,   2, 0x04, 0x00000000 },
+       { 0x404138,   1, 0x04, 0x20000040 },
+       { 0x404150,   1, 0x04, 0x0000002e },
+       { 0x404154,   1, 0x04, 0x00000400 },
+       { 0x404158,   1, 0x04, 0x00000200 },
+       { 0x404164,   1, 0x04, 0x00000055 },
+       { 0x40417c,   2, 0x04, 0x00000000 },
+       { 0x404194,   1, 0x04, 0x01000700 },
+       { 0x4041a0,   4, 0x04, 0x00000000 },
+       { 0x404200,   1, 0x04, 0x0000a197 },
+       { 0x404204,   1, 0x04, 0x0000a1c0 },
+       { 0x404208,   1, 0x04, 0x0000a140 },
+       { 0x40420c,   1, 0x04, 0x0000902d },
+       {}
+};
+
+static struct nvc0_graph_init
+nv108_grctx_init_unk58xx[] = {
+       { 0x405800,   1, 0x04, 0x0f8000bf },
+       { 0x405830,   1, 0x04, 0x02180648 },
+       { 0x405834,   1, 0x04, 0x08000000 },
+       { 0x405838,   1, 0x04, 0x00000000 },
+       { 0x405854,   1, 0x04, 0x00000000 },
+       { 0x405870,   4, 0x04, 0x00000001 },
+       { 0x405a00,   2, 0x04, 0x00000000 },
+       { 0x405a18,   1, 0x04, 0x00000000 },
+       { 0x405a1c,   1, 0x04, 0x000000ff },
+       {}
+};
+
+static struct nvc0_graph_init
+nv108_grctx_init_unk64xx[] = {
+       { 0x4064a8,   1, 0x04, 0x00000000 },
+       { 0x4064ac,   1, 0x04, 0x00003fff },
+       { 0x4064b0,   3, 0x04, 0x00000000 },
+       { 0x4064c0,   1, 0x04, 0x802000f0 },
+       { 0x4064c4,   1, 0x04, 0x0192ffff },
+       { 0x4064c8,   1, 0x04, 0x00c20200 },
+       { 0x4064cc,   9, 0x04, 0x00000000 },
+       { 0x4064fc,   1, 0x04, 0x0000022a },
+       {}
+};
+
+static struct nvc0_graph_init
+nv108_grctx_init_unk78xx[] = {
+       { 0x407804,   1, 0x04, 0x00000063 },
+       { 0x40780c,   1, 0x04, 0x0a418820 },
+       { 0x407810,   1, 0x04, 0x062080e6 },
+       { 0x407814,   1, 0x04, 0x020398a4 },
+       { 0x407818,   1, 0x04, 0x0e629062 },
+       { 0x40781c,   1, 0x04, 0x0a418820 },
+       { 0x407820,   1, 0x04, 0x000000e6 },
+       { 0x4078bc,   1, 0x04, 0x00000103 },
+       {}
+};
+
+static struct nvc0_graph_init
+nv108_grctx_init_unk88xx[] = {
+       { 0x408800,   1, 0x04, 0x32802a3c },
+       { 0x408804,   1, 0x04, 0x00000040 },
+       { 0x408808,   1, 0x04, 0x1003e005 },
+       { 0x408840,   1, 0x04, 0x0000000b },
+       { 0x408900,   1, 0x04, 0xb080b801 },
+       { 0x408904,   1, 0x04, 0x62000001 },
+       { 0x408908,   1, 0x04, 0x02c8102f },
+       { 0x408980,   1, 0x04, 0x0000011d },
+       {}
+};
+
+static struct nvc0_graph_init
+nv108_grctx_init_gpc_0[] = {
+       { 0x418380,   1, 0x04, 0x00000016 },
+       { 0x418400,   1, 0x04, 0x38005e00 },
+       { 0x418404,   1, 0x04, 0x71e0ffff },
+       { 0x41840c,   1, 0x04, 0x00001008 },
+       { 0x418410,   1, 0x04, 0x0fff0fff },
+       { 0x418414,   1, 0x04, 0x02200fff },
+       { 0x418450,   6, 0x04, 0x00000000 },
+       { 0x418468,   1, 0x04, 0x00000001 },
+       { 0x41846c,   2, 0x04, 0x00000000 },
+       { 0x418600,   1, 0x04, 0x0000007f },
+       { 0x418684,   1, 0x04, 0x0000001f },
+       { 0x418700,   1, 0x04, 0x00000002 },
+       { 0x418704,   2, 0x04, 0x00000080 },
+       { 0x41870c,   2, 0x04, 0x00000000 },
+       { 0x418800,   1, 0x04, 0x7006863a },
+       { 0x418808,   1, 0x04, 0x00000000 },
+       { 0x41880c,   1, 0x04, 0x00000030 },
+       { 0x418810,   1, 0x04, 0x00000000 },
+       { 0x418828,   1, 0x04, 0x00000044 },
+       { 0x418830,   1, 0x04, 0x10000001 },
+       { 0x4188d8,   1, 0x04, 0x00000008 },
+       { 0x4188e0,   1, 0x04, 0x01000000 },
+       { 0x4188e8,   5, 0x04, 0x00000000 },
+       { 0x4188fc,   1, 0x04, 0x20100058 },
+       { 0x41891c,   1, 0x04, 0x00ff00ff },
+       { 0x418924,   1, 0x04, 0x00000000 },
+       { 0x418928,   1, 0x04, 0x00ffff00 },
+       { 0x41892c,   1, 0x04, 0x0000ff00 },
+       { 0x418b00,   1, 0x04, 0x0000001e },
+       { 0x418b08,   1, 0x04, 0x0a418820 },
+       { 0x418b0c,   1, 0x04, 0x062080e6 },
+       { 0x418b10,   1, 0x04, 0x020398a4 },
+       { 0x418b14,   1, 0x04, 0x0e629062 },
+       { 0x418b18,   1, 0x04, 0x0a418820 },
+       { 0x418b1c,   1, 0x04, 0x000000e6 },
+       { 0x418bb8,   1, 0x04, 0x00000103 },
+       { 0x418c08,   1, 0x04, 0x00000001 },
+       { 0x418c10,   8, 0x04, 0x00000000 },
+       { 0x418c40,   1, 0x04, 0xffffffff },
+       { 0x418c6c,   1, 0x04, 0x00000001 },
+       { 0x418c80,   1, 0x04, 0x2020000c },
+       { 0x418c8c,   1, 0x04, 0x00000001 },
+       { 0x418d24,   1, 0x04, 0x00000000 },
+       { 0x419000,   1, 0x04, 0x00000780 },
+       { 0x419004,   2, 0x04, 0x00000000 },
+       { 0x419014,   1, 0x04, 0x00000004 },
+       {}
+};
+
+static struct nvc0_graph_init
+nv108_grctx_init_tpc[] = {
+       { 0x419848,   1, 0x04, 0x00000000 },
+       { 0x419864,   1, 0x04, 0x00000129 },
+       { 0x419888,   1, 0x04, 0x00000000 },
+       { 0x419a00,   1, 0x04, 0x000100f0 },
+       { 0x419a04,   1, 0x04, 0x00000001 },
+       { 0x419a08,   1, 0x04, 0x00000421 },
+       { 0x419a0c,   1, 0x04, 0x00120000 },
+       { 0x419a10,   1, 0x04, 0x00000000 },
+       { 0x419a14,   1, 0x04, 0x00000200 },
+       { 0x419a1c,   1, 0x04, 0x0000c000 },
+       { 0x419a20,   1, 0x04, 0x00000800 },
+       { 0x419a30,   1, 0x04, 0x00000001 },
+       { 0x419ac4,   1, 0x04, 0x0037f440 },
+       { 0x419c00,   1, 0x04, 0x0000001a },
+       { 0x419c04,   1, 0x04, 0x80000006 },
+       { 0x419c08,   1, 0x04, 0x00000002 },
+       { 0x419c20,   1, 0x04, 0x00000000 },
+       { 0x419c24,   1, 0x04, 0x00084210 },
+       { 0x419c28,   1, 0x04, 0x3efbefbe },
+       { 0x419ce8,   1, 0x04, 0x00000000 },
+       { 0x419cf4,   1, 0x04, 0x00000203 },
+       { 0x419e04,   1, 0x04, 0x00000000 },
+       { 0x419e08,   1, 0x04, 0x0000001d },
+       { 0x419e0c,   1, 0x04, 0x00000000 },
+       { 0x419e10,   1, 0x04, 0x00001c02 },
+       { 0x419e44,   1, 0x04, 0x0013eff2 },
+       { 0x419e48,   1, 0x04, 0x00000000 },
+       { 0x419e4c,   1, 0x04, 0x0000007f },
+       { 0x419e50,   2, 0x04, 0x00000000 },
+       { 0x419e58,   1, 0x04, 0x00000001 },
+       { 0x419e5c,   3, 0x04, 0x00000000 },
+       { 0x419e68,   1, 0x04, 0x00000002 },
+       { 0x419e6c,  12, 0x04, 0x00000000 },
+       { 0x419eac,   1, 0x04, 0x00001f8f },
+       { 0x419eb0,   1, 0x04, 0x0db00da0 },
+       { 0x419eb8,   1, 0x04, 0x00000000 },
+       { 0x419ec8,   1, 0x04, 0x0001304f },
+       { 0x419f30,   4, 0x04, 0x00000000 },
+       { 0x419f40,   1, 0x04, 0x00000018 },
+       { 0x419f44,   3, 0x04, 0x00000000 },
+       { 0x419f58,   1, 0x04, 0x00000020 },
+       { 0x419f70,   1, 0x04, 0x00000000 },
+       { 0x419f78,   1, 0x04, 0x000001eb },
+       { 0x419f7c,   1, 0x04, 0x00000404 },
+       {}
+};
+
+static struct nvc0_graph_init
+nv108_grctx_init_unk[] = {
+       { 0x41be24,   1, 0x04, 0x00000006 },
+       { 0x41bec0,   1, 0x04, 0x10000000 },
+       { 0x41bec4,   1, 0x04, 0x00037f7f },
+       { 0x41bee4,   1, 0x04, 0x00000000 },
+       { 0x41bef0,   1, 0x04, 0x000003ff },
+       { 0x41bf00,   1, 0x04, 0x0a418820 },
+       { 0x41bf04,   1, 0x04, 0x062080e6 },
+       { 0x41bf08,   1, 0x04, 0x020398a4 },
+       { 0x41bf0c,   1, 0x04, 0x0e629062 },
+       { 0x41bf10,   1, 0x04, 0x0a418820 },
+       { 0x41bf14,   1, 0x04, 0x000000e6 },
+       { 0x41bfd0,   1, 0x04, 0x00900103 },
+       { 0x41bfe0,   1, 0x04, 0x00400001 },
+       { 0x41bfe4,   1, 0x04, 0x00000000 },
+       {}
+};
+
+static void
+nv108_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
+{
+       u32 magic[GPC_MAX][2];
+       u32 offset;
+       int gpc;
+
+       mmio_data(0x003000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS);
+       mmio_data(0x008000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS);
+       mmio_data(0x060000, 0x1000, NV_MEM_ACCESS_RW);
+       mmio_list(0x40800c, 0x00000000,  8, 1);
+       mmio_list(0x408010, 0x80000000,  0, 0);
+       mmio_list(0x419004, 0x00000000,  8, 1);
+       mmio_list(0x419008, 0x00000000,  0, 0);
+       mmio_list(0x408004, 0x00000000,  8, 0);
+       mmio_list(0x408008, 0x80000030,  0, 0);
+       mmio_list(0x418808, 0x00000000,  8, 0);
+       mmio_list(0x41880c, 0x80000030,  0, 0);
+       mmio_list(0x418810, 0x80000000, 12, 2);
+       mmio_list(0x419848, 0x10000000, 12, 2);
+
+       mmio_list(0x405830, 0x02180648,  0, 0);
+       mmio_list(0x4064c4, 0x0192ffff,  0, 0);
+
+       for (gpc = 0, offset = 0; gpc < priv->gpc_nr; gpc++) {
+               u16 magic0 = 0x0218 * priv->tpc_nr[gpc];
+               u16 magic1 = 0x0648 * priv->tpc_nr[gpc];
+               magic[gpc][0]  = 0x10000000 | (magic0 << 16) | offset;
+               magic[gpc][1]  = 0x00000000 | (magic1 << 16);
+               offset += 0x0324 * priv->tpc_nr[gpc];
+       }
+
+       for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+               mmio_list(GPC_UNIT(gpc, 0x30c0), magic[gpc][0], 0, 0);
+               mmio_list(GPC_UNIT(gpc, 0x30e4), magic[gpc][1] | offset, 0, 0);
+               offset += 0x07ff * priv->tpc_nr[gpc];
+       }
+
+       mmio_list(0x17e91c, 0x0b040a0b, 0, 0);
+       mmio_list(0x17e920, 0x00090d08, 0, 0);
+}
+
+static struct nvc0_graph_init *
+nv108_grctx_init_hub[] = {
+       nvc0_grctx_init_base,
+       nv108_grctx_init_unk40xx,
+       nvf0_grctx_init_unk44xx,
+       nve4_grctx_init_unk46xx,
+       nve4_grctx_init_unk47xx,
+       nv108_grctx_init_unk58xx,
+       nvf0_grctx_init_unk5bxx,
+       nvf0_grctx_init_unk60xx,
+       nv108_grctx_init_unk64xx,
+       nv108_grctx_init_unk78xx,
+       nve4_grctx_init_unk80xx,
+       nv108_grctx_init_unk88xx,
+       NULL
+};
+
+struct nvc0_graph_init *
+nv108_grctx_init_gpc[] = {
+       nv108_grctx_init_gpc_0,
+       nvc0_grctx_init_gpc_1,
+       nv108_grctx_init_tpc,
+       nv108_grctx_init_unk,
+       NULL
+};
+
+struct nvc0_graph_init
+nv108_grctx_init_mthd_magic[] = {
+       { 0x3410, 1, 0x04, 0x8e0e2006 },
+       { 0x3414, 1, 0x04, 0x00000038 },
+       {}
+};
+
+static struct nvc0_graph_mthd
+nv108_grctx_init_mthd[] = {
+       { 0xa197, nv108_grctx_init_a197, },
+       { 0x902d, nvc0_grctx_init_902d, },
+       { 0x902d, nv108_grctx_init_mthd_magic, },
+       {}
+};
+
+struct nouveau_oclass *
+nv108_grctx_oclass = &(struct nvc0_grctx_oclass) {
+       .base.handle = NV_ENGCTX(GR, 0x08),
+       .base.ofuncs = &(struct nouveau_ofuncs) {
+               .ctor = nvc0_graph_context_ctor,
+               .dtor = nvc0_graph_context_dtor,
+               .init = _nouveau_graph_context_init,
+               .fini = _nouveau_graph_context_fini,
+               .rd32 = _nouveau_graph_context_rd32,
+               .wr32 = _nouveau_graph_context_wr32,
+       },
+       .main = nve4_grctx_generate_main,
+       .mods = nv108_grctx_generate_mods,
+       .unkn = nve4_grctx_generate_unkn,
+       .hub  = nv108_grctx_init_hub,
+       .gpc  = nv108_grctx_init_gpc,
+       .icmd = nv108_grctx_init_icmd,
+       .mthd = nv108_grctx_init_mthd,
+}.base;
index dcb2ebb8c29d93ac211bebd3d79f6224b48604b8..44012c3da53832c325e2286bafcc191208d9e946 100644 (file)
@@ -50,7 +50,7 @@ nvf0_grctx_init_unk40xx[] = {
        {}
 };
 
-static struct nvc0_graph_init
+struct nvc0_graph_init
 nvf0_grctx_init_unk44xx[] = {
        { 0x404404,  12, 0x04, 0x00000000 },
        { 0x404438,   1, 0x04, 0x00000000 },
@@ -62,7 +62,7 @@ nvf0_grctx_init_unk44xx[] = {
        {}
 };
 
-static struct nvc0_graph_init
+struct nvc0_graph_init
 nvf0_grctx_init_unk5bxx[] = {
        { 0x405b00,   1, 0x04, 0x00000000 },
        { 0x405b10,   1, 0x04, 0x00001000 },
@@ -70,7 +70,7 @@ nvf0_grctx_init_unk5bxx[] = {
        {}
 };
 
-static struct nvc0_graph_init
+struct nvc0_graph_init
 nvf0_grctx_init_unk60xx[] = {
        { 0x406020,   1, 0x04, 0x034103c1 },
        { 0x406028,   4, 0x04, 0x00000001 },
@@ -286,7 +286,6 @@ nvf0_grctx_init_hub[] = {
        nvf0_grctx_init_unk64xx,
        nve4_grctx_init_unk80xx,
        nvf0_grctx_init_unk88xx,
-       nvd9_grctx_init_rop,
        NULL
 };
 
index 5d24b6de16cce636e3db922c7c81d09018d9f2cc..e148961b8075e96be5ab2f1b86ebc7170676db4e 100644 (file)
@@ -38,7 +38,7 @@ queue_put:
        cmpu b32 $r8 $r9
        bra ne #queue_put_next
                mov $r15 E_CMD_OVERFLOW
-               call #error
+               call(error)
                ret
 
        // store cmd/data on queue
@@ -92,18 +92,16 @@ queue_get_done:
 // Out: $r15 value
 //
 nv_rd32:
-       mov $r11 0x728
-       shl b32 $r11 6
        mov b32 $r12 $r14
        bset $r12 31                    // MMIO_CTRL_PENDING
-       iowr I[$r11 + 0x000] $r12       // MMIO_CTRL
+       nv_iowr(NV_PGRAPH_FECS_MMIO_CTRL, 0, $r12)
        nv_rd32_wait:
-               iord $r12 I[$r11 + 0x000]
+               nv_iord($r12, NV_PGRAPH_FECS_MMIO_CTRL, 0)
                xbit $r12 $r12 31
                bra ne #nv_rd32_wait
        mov $r10 6                      // DONE_MMIO_RD
-       call #wait_doneo
-       iord $r15 I[$r11 + 0x100]       // MMIO_RDVAL
+       call(wait_doneo)
+       nv_iord($r15, NV_PGRAPH_FECS_MMIO_RDVAL, 0)
        ret
 
 // nv_wr32 - write 32-bit value to nv register
@@ -112,37 +110,17 @@ nv_rd32:
 //      $r15 value
 //
 nv_wr32:
-       mov $r11 0x728
-       shl b32 $r11 6
-       iowr I[$r11 + 0x200] $r15       // MMIO_WRVAL
+       nv_iowr(NV_PGRAPH_FECS_MMIO_WRVAL, 0, $r15)
        mov b32 $r12 $r14
        bset $r12 31                    // MMIO_CTRL_PENDING
        bset $r12 30                    // MMIO_CTRL_WRITE
-       iowr I[$r11 + 0x000] $r12       // MMIO_CTRL
+       nv_iowr(NV_PGRAPH_FECS_MMIO_CTRL, 0, $r12)
        nv_wr32_wait:
-               iord $r12 I[$r11 + 0x000]
+               nv_iord($r12, NV_PGRAPH_FECS_MMIO_CTRL, 0)
                xbit $r12 $r12 31
                bra ne #nv_wr32_wait
        ret
 
-// (re)set watchdog timer
-//
-// In : $r15 timeout
-//
-watchdog_reset:
-       mov $r8 0x430
-       shl b32 $r8 6
-       bset $r15 31
-       iowr I[$r8 + 0x000] $r15
-       ret
-
-// clear watchdog timer
-watchdog_clear:
-       mov $r8 0x430
-       shl b32 $r8 6
-       iowr I[$r8 + 0x000] $r0
-       ret
-
 // wait_donez - wait on FUC_DONE bit to become clear
 //
 // In : $r10 bit to wait on
@@ -163,13 +141,9 @@ wait_donez:
 //
 wait_doneo:
        trace_set(T_WAIT);
-       mov $r8 0x818
-       shl b32 $r8 6
-       iowr I[$r8 + 0x000] $r10
+       nv_iowr(NV_PGRAPH_FECS_CC_SCRATCH_VAL(6), 0, $r10)
        wait_doneo_e:
-               mov $r8 0x400
-               shl b32 $r8 6
-               iord $r8 I[$r8 + 0x000]
+               nv_iord($r8, NV_PGRAPH_FECS_SIGNAL, 0)
                xbit $r8 $r8 $r10
                bra e #wait_doneo_e
        trace_clr(T_WAIT)
@@ -209,21 +183,18 @@ mmctx_size:
 //
 mmctx_xfer:
        trace_set(T_MMCTX)
-       mov $r8 0x710
-       shl b32 $r8 6
        clear b32 $r9
        or $r11 $r11
        bra e #mmctx_base_disabled
-               iowr I[$r8 + 0x000] $r11        // MMCTX_BASE
+               nv_iowr(NV_PGRAPH_FECS_MMCTX_BASE, 0, $r11)
                bset $r9 0                      // BASE_EN
        mmctx_base_disabled:
        or $r14 $r14
        bra e #mmctx_multi_disabled
-               iowr I[$r8 + 0x200] $r14        // MMCTX_MULTI_STRIDE
-               iowr I[$r8 + 0x300] $r15        // MMCTX_MULTI_MASK
+               nv_iowr(NV_PGRAPH_FECS_MMCTX_MULTI_STRIDE, 0, $r14)
+               nv_iowr(NV_PGRAPH_FECS_MMCTX_MULTI_MASK, 0, $r15)
                bset $r9 1                      // MULTI_EN
        mmctx_multi_disabled:
-       add b32 $r8 0x100
 
        xbit $r11 $r10 0
        shl b32 $r11 16                 // DIR
@@ -231,20 +202,20 @@ mmctx_xfer:
        xbit $r14 $r10 1
        shl b32 $r14 17
        or $r11 $r14                    // START_TRIGGER
-       iowr I[$r8 + 0x000] $r11        // MMCTX_CTRL
+       nv_iowr(NV_PGRAPH_FECS_MMCTX_CTRL, 0, $r11)
 
        // loop over the mmio list, and send requests to the hw
        mmctx_exec_loop:
                // wait for space in mmctx queue
                mmctx_wait_free:
-                       iord $r14 I[$r8 + 0x000] // MMCTX_CTRL
+                       nv_iord($r14, NV_PGRAPH_FECS_MMCTX_CTRL, 0)
                        and $r14 0x1f
                        bra e #mmctx_wait_free
 
                // queue up an entry
                ld b32 $r14 D[$r12]
                or $r14 $r9
-               iowr I[$r8 + 0x300] $r14
+               nv_iowr(NV_PGRAPH_FECS_MMCTX_QUEUE, 0, $r14)
                add b32 $r12 4
                cmpu b32 $r12 $r13
                bra ne #mmctx_exec_loop
@@ -253,22 +224,22 @@ mmctx_xfer:
        bra ne #mmctx_stop
                // wait for queue to empty
                mmctx_fini_wait:
-                       iord $r11 I[$r8 + 0x000]        // MMCTX_CTRL
+                       nv_iord($r11, NV_PGRAPH_FECS_MMCTX_CTRL, 0)
                        and $r11 0x1f
                        cmpu b32 $r11 0x10
                        bra ne #mmctx_fini_wait
                mov $r10 2                              // DONE_MMCTX
-               call #wait_donez
+               call(wait_donez)
                bra #mmctx_done
        mmctx_stop:
                xbit $r11 $r10 0
                shl b32 $r11 16                 // DIR
                bset $r11 12                    // QLIMIT = 0x10
                bset $r11 18                    // STOP_TRIGGER
-               iowr I[$r8 + 0x000] $r11        // MMCTX_CTRL
+               nv_iowr(NV_PGRAPH_FECS_MMCTX_CTRL, 0, $r11)
                mmctx_stop_wait:
                        // wait for STOP_TRIGGER to clear
-                       iord $r11 I[$r8 + 0x000] // MMCTX_CTRL
+                       nv_iord($r11, NV_PGRAPH_FECS_MMCTX_CTRL, 0)
                        xbit $r11 $r11 18
                        bra ne #mmctx_stop_wait
        mmctx_done:
@@ -280,28 +251,24 @@ mmctx_xfer:
 strand_wait:
        push $r10
        mov $r10 2
-       call #wait_donez
+       call(wait_donez)
        pop $r10
        ret
 
 // unknown - call before issuing strand commands
 //
 strand_pre:
-       mov $r8 0x4afc
-       sethi $r8 0x20000
-       mov $r9 0xc
-       iowr I[$r8] $r9
-       call #strand_wait
+       mov $r9 NV_PGRAPH_FECS_STRAND_CMD_ENABLE
+       nv_iowr(NV_PGRAPH_FECS_STRAND_CMD, 0x3f, $r9)
+       call(strand_wait)
        ret
 
 // unknown - call after issuing strand commands
 //
 strand_post:
-       mov $r8 0x4afc
-       sethi $r8 0x20000
-       mov $r9 0xd
-       iowr I[$r8] $r9
-       call #strand_wait
+       mov $r9 NV_PGRAPH_FECS_STRAND_CMD_DISABLE
+       nv_iowr(NV_PGRAPH_FECS_STRAND_CMD, 0x3f, $r9)
+       call(strand_wait)
        ret
 
 // Selects strand set?!
@@ -309,18 +276,14 @@ strand_post:
 // In: $r14 id
 //
 strand_set:
-       mov $r10 0x4ffc
-       sethi $r10 0x20000
-       sub b32 $r11 $r10 0x500
        mov $r12 0xf
-       iowr I[$r10 + 0x000] $r12               // 0x93c = 0xf
-       mov $r12 0xb
-       iowr I[$r11 + 0x000] $r12               // 0x928 = 0xb
-       call #strand_wait
-       iowr I[$r10 + 0x000] $r14               // 0x93c = <id>
-       mov $r12 0xa
-       iowr I[$r11 + 0x000] $r12               // 0x928 = 0xa
-       call #strand_wait
+       nv_iowr(NV_PGRAPH_FECS_STRAND_FILTER, 0x3f, $r12)
+       mov $r12 NV_PGRAPH_FECS_STRAND_CMD_DEACTIVATE_FILTER
+       nv_iowr(NV_PGRAPH_FECS_STRAND_CMD, 0x3f, $r12)
+       nv_iowr(NV_PGRAPH_FECS_STRAND_FILTER, 0x3f, $r14)
+       mov $r12 NV_PGRAPH_FECS_STRAND_CMD_ACTIVATE_FILTER
+       nv_iowr(NV_PGRAPH_FECS_STRAND_CMD, 0x3f, $r12)
+       call(strand_wait)
        ret
 
 // Initialise strand context data
@@ -332,30 +295,27 @@ strand_set:
 //
 strand_ctx_init:
        trace_set(T_STRINIT)
-       call #strand_pre
+       call(strand_pre)
        mov $r14 3
-       call #strand_set
-       mov $r10 0x46fc
-       sethi $r10 0x20000
-       add b32 $r11 $r10 0x400
-       iowr I[$r10 + 0x100] $r0        // STRAND_FIRST_GENE = 0
-       mov $r12 1
-       iowr I[$r11 + 0x000] $r12       // STRAND_CMD = LATCH_FIRST_GENE
-       call #strand_wait
+       call(strand_set)
+
+       clear b32 $r12
+       nv_iowr(NV_PGRAPH_FECS_STRAND_SELECT, 0x3f, $r12)
+       mov $r12 NV_PGRAPH_FECS_STRAND_CMD_SEEK
+       nv_iowr(NV_PGRAPH_FECS_STRAND_CMD, 0x3f, $r12)
+       call(strand_wait)
        sub b32 $r12 $r0 1
-       iowr I[$r10 + 0x000] $r12       // STRAND_GENE_CNT = 0xffffffff
-       mov $r12 2
-       iowr I[$r11 + 0x000] $r12       // STRAND_CMD = LATCH_GENE_CNT
-       call #strand_wait
-       call #strand_post
+       nv_iowr(NV_PGRAPH_FECS_STRAND_DATA, 0x3f, $r12)
+       mov $r12 NV_PGRAPH_FECS_STRAND_CMD_GET_INFO
+       nv_iowr(NV_PGRAPH_FECS_STRAND_CMD, 0x3f, $r12)
+       call(strand_wait)
+       call(strand_post)
 
        // read the size of each strand, poke the context offset of
        // each into STRAND_{SAVE,LOAD}_SWBASE now, no need to worry
        // about it later then.
-       mov $r8 0x880
-       shl b32 $r8 6
-       iord $r9 I[$r8 + 0x000]         // STRANDS
-       add b32 $r8 0x2200
+       nv_mkio($r8, NV_PGRAPH_FECS_STRAND_SAVE_SWBASE, 0x00)
+       nv_iord($r9, NV_PGRAPH_FECS_STRANDS_CNT, 0x00)
        shr b32 $r14 $r15 8
        ctx_init_strand_loop:
                iowr I[$r8 + 0x000] $r14        // STRAND_SAVE_SWBASE
index 5547c1b3f4f29414a1334e2a7d5cae9f7452e998..96cbcea3b2c96aa71f2c7ef3d59b3d3184a67e17 100644 (file)
@@ -58,12 +58,9 @@ mmio_list_base:
 //
 error:
        push $r14
-       mov $r14 -0x67ec        // 0x9814
-       sethi $r14 0x400000
-       call #nv_wr32           // HUB_CTXCTL_CC_SCRATCH[5] = error code
-       add b32 $r14 0x41c
+       nv_wr32(NV_PGRAPH_FECS_CC_SCRATCH_VAL(5), $r15)
        mov $r15 1
-       call #nv_wr32           // HUB_CTXCTL_INTR_UP_SET
+       nv_wr32(NV_PGRAPH_FECS_INTR_UP_SET, $r15)
        pop $r14
        ret
 
@@ -84,46 +81,40 @@ init:
        mov $sp $r0
 
        // enable fifo access
-       mov $r1 0x1200
-       mov $r2 2
-       iowr I[$r1 + 0x000] $r2         // FIFO_ENABLE
+       mov $r2 NV_PGRAPH_GPCX_GPCCS_ACCESS_FIFO
+       nv_iowr(NV_PGRAPH_GPCX_GPCCS_ACCESS, 0, $r2)
 
        // setup i0 handler, and route all interrupts to it
        mov $r1 #ih
        mov $iv0 $r1
-       mov $r1 0x400
-       iowr I[$r1 + 0x300] $r0         // INTR_DISPATCH
+       nv_iowr(NV_PGRAPH_GPCX_GPCCS_INTR_ROUTE, 0, $r0)
 
        // enable fifo interrupt
-       mov $r2 4
-       iowr I[$r1 + 0x000] $r2         // INTR_EN_SET
+       mov $r2 NV_PGRAPH_GPCX_GPCCS_INTR_EN_SET_FIFO
+       nv_iowr(NV_PGRAPH_GPCX_GPCCS_INTR_EN_SET, 0, $r2)
 
        // enable interrupts
        bset $flags ie0
 
        // figure out which GPC we are, and how many TPCs we have
-       mov $r1 0x608
-       shl b32 $r1 6
-       iord $r2 I[$r1 + 0x000]         // UNITS
+       nv_iord($r2, NV_PGRAPH_GPCX_GPCCS_UNITS, 0)
        mov $r3 1
        and $r2 0x1f
        shl b32 $r3 $r2
        sub b32 $r3 1
        st b32 D[$r0 + #tpc_count] $r2
        st b32 D[$r0 + #tpc_mask] $r3
-       add b32 $r1 0x400
-       iord $r2 I[$r1 + 0x000]         // MYINDEX
+       nv_iord($r2, NV_PGRAPH_GPCX_GPCCS_MYINDEX, 0)
        st b32 D[$r0 + #gpc_id] $r2
 
 #if NV_PGRAPH_GPCX_UNK__SIZE > 0
        // figure out which, and how many, UNKs are actually present
-       mov $r14 0x0c30
-       sethi $r14 0x500000
+       imm32($r14, 0x500c30)
        clear b32 $r2
        clear b32 $r3
        clear b32 $r4
        init_unk_loop:
-               call #nv_rd32
+               call(nv_rd32)
                cmp b32 $r15 0
                bra z #init_unk_next
                        mov $r15 1
@@ -146,23 +137,21 @@ init:
 
        // set mmctx base addresses now so we don't have to do it later,
        // they don't currently ever change
-       mov $r4 0x700
-       shl b32 $r4 6
        shr b32 $r5 $r2 8
-       iowr I[$r4 + 0x000] $r5         // MMCTX_SAVE_SWBASE
-       iowr I[$r4 + 0x100] $r5         // MMCTX_LOAD_SWBASE
+       nv_iowr(NV_PGRAPH_GPCX_GPCCS_MMCTX_SAVE_SWBASE, 0, $r5)
+       nv_iowr(NV_PGRAPH_GPCX_GPCCS_MMCTX_LOAD_SWBASE, 0, $r5)
 
        // calculate GPC mmio context size
        ld b32 $r14 D[$r0 + #gpc_mmio_list_head]
        ld b32 $r15 D[$r0 + #gpc_mmio_list_tail]
-       call #mmctx_size
+       call(mmctx_size)
        add b32 $r2 $r15
        add b32 $r3 $r15
 
        // calculate per-TPC mmio context size
        ld b32 $r14 D[$r0 + #tpc_mmio_list_head]
        ld b32 $r15 D[$r0 + #tpc_mmio_list_tail]
-       call #mmctx_size
+       call(mmctx_size)
        ld b32 $r14 D[$r0 + #tpc_count]
        mulu $r14 $r15
        add b32 $r2 $r14
@@ -172,7 +161,7 @@ init:
        // calculate per-UNK mmio context size
        ld b32 $r14 D[$r0 + #unk_mmio_list_head]
        ld b32 $r15 D[$r0 + #unk_mmio_list_tail]
-       call #mmctx_size
+       call(mmctx_size)
        ld b32 $r14 D[$r0 + #unk_count]
        mulu $r14 $r15
        add b32 $r2 $r14
@@ -180,9 +169,8 @@ init:
 #endif
 
        // round up base/size to 256 byte boundary (for strand SWBASE)
-       add b32 $r4 0x1300
        shr b32 $r3 2
-       iowr I[$r4 + 0x000] $r3         // MMCTX_LOAD_COUNT, wtf for?!?
+       nv_iowr(NV_PGRAPH_GPCX_GPCCS_MMCTX_LOAD_COUNT, 0, $r3) // wtf for?!
        shr b32 $r2 8
        shr b32 $r3 6
        add b32 $r2 1
@@ -192,7 +180,7 @@ init:
 
        // calculate size of strand context data
        mov b32 $r15 $r2
-       call #strand_ctx_init
+       call(strand_ctx_init)
        add b32 $r3 $r15
 
        // save context size, and tell HUB we're done
@@ -208,7 +196,7 @@ main:
        bset $flags $p0
        sleep $p0
        mov $r13 #cmd_queue
-       call #queue_get
+       call(queue_get)
        bra $p1 #main
 
        // 0x0000-0x0003 are all context transfers
@@ -224,13 +212,13 @@ main:
                or $r1 $r14
                mov $flags $r1
                // transfer context data
-               call #ctx_xfer
+               call(ctx_xfer)
                bra #main
 
        main_not_ctx_xfer:
        shl b32 $r15 $r14 16
        or $r15 E_BAD_COMMAND
-       call #error
+       call(error)
        bra #main
 
 // interrupt handler
@@ -247,22 +235,20 @@ ih:
        clear b32 $r0
 
        // incoming fifo command?
-       iord $r10 I[$r0 + 0x200]        // INTR
-       and $r11 $r10 0x00000004
+       nv_iord($r10, NV_PGRAPH_GPCX_GPCCS_INTR, 0)
+       and $r11 $r10 NV_PGRAPH_GPCX_GPCCS_INTR_FIFO
        bra e #ih_no_fifo
                // queue incoming fifo command for later processing
-               mov $r11 0x1900
                mov $r13 #cmd_queue
-               iord $r14 I[$r11 + 0x100]       // FIFO_CMD
-               iord $r15 I[$r11 + 0x000]       // FIFO_DATA
-               call #queue_put
-               add b32 $r11 0x400
+               nv_iord($r14, NV_PGRAPH_GPCX_GPCCS_FIFO_CMD, 0)
+               nv_iord($r15, NV_PGRAPH_GPCX_GPCCS_FIFO_DATA, 0)
+               call(queue_put)
                mov $r14 1
-               iowr I[$r11 + 0x000] $r14       // FIFO_ACK
+               nv_iowr(NV_PGRAPH_GPCX_GPCCS_FIFO_ACK, 0, $r14)
 
        // ack, and wake up main()
        ih_no_fifo:
-       iowr I[$r0 + 0x100] $r10        // INTR_ACK
+       nv_iowr(NV_PGRAPH_GPCX_GPCCS_INTR_ACK, 0, $r10)
 
        pop $r15
        pop $r14
@@ -283,9 +269,7 @@ hub_barrier_done:
        mov $r15 1
        ld b32 $r14 D[$r0 + #gpc_id]
        shl b32 $r15 $r14
-       mov $r14 -0x6be8        // 0x409418 - HUB_BAR_SET
-       sethi $r14 0x400000
-       call #nv_wr32
+       nv_wr32(0x409418, $r15) // 0x409418 - HUB_BAR_SET
        ret
 
 // Disables various things, waits a bit, and re-enables them..
@@ -295,16 +279,15 @@ hub_barrier_done:
 // funny things happen.
 //
 ctx_redswitch:
-       mov $r14 0x614
-       shl b32 $r14 6
-       mov $r15 0x020
-       iowr I[$r14] $r15       // GPC_RED_SWITCH = POWER
-       mov $r15 8
+       mov $r15 NV_PGRAPH_GPCX_GPCCS_RED_SWITCH_POWER
+       nv_iowr(NV_PGRAPH_GPCX_GPCCS_RED_SWITCH, 0, $r15)
+       mov $r14 8
        ctx_redswitch_delay:
-               sub b32 $r15 1
+               sub b32 $r14 1
                bra ne #ctx_redswitch_delay
-       mov $r15 0xa20
-       iowr I[$r14] $r15       // GPC_RED_SWITCH = UNK11, ENABLE, POWER
+       or $r15 NV_PGRAPH_GPCX_GPCCS_RED_SWITCH_UNK11
+       or $r15 NV_PGRAPH_GPCX_GPCCS_RED_SWITCH_ENABLE
+       nv_iowr(NV_PGRAPH_GPCX_GPCCS_RED_SWITCH, 0, $r15)
        ret
 
 // Transfer GPC context data between GPU and storage area
@@ -317,46 +300,37 @@ ctx_redswitch:
 //
 ctx_xfer:
        // set context base address
-       mov $r1 0xa04
-       shl b32 $r1 6
-       iowr I[$r1 + 0x000] $r15// MEM_BASE
+       nv_iowr(NV_PGRAPH_GPCX_GPCCS_MEM_BASE, 0, $r15)
        bra not $p1 #ctx_xfer_not_load
-               call #ctx_redswitch
+               call(ctx_redswitch)
        ctx_xfer_not_load:
 
        // strands
-       mov $r1 0x4afc
-       sethi $r1 0x20000
-       mov $r2 0xc
-       iowr I[$r1] $r2         // STRAND_CMD(0x3f) = 0x0c
-       call #strand_wait
-       mov $r2 0x47fc
-       sethi $r2 0x20000
-       iowr I[$r2] $r0         // STRAND_FIRST_GENE(0x3f) = 0x00
-       xbit $r2 $flags $p1
-       add b32 $r2 3
-       iowr I[$r1] $r2         // STRAND_CMD(0x3f) = 0x03/0x04 (SAVE/LOAD)
+       call(strand_pre)
+       clear b32 $r2
+       nv_iowr(NV_PGRAPH_GPCX_GPCCS_STRAND_SELECT, 0x3f, $r2)
+       xbit $r2 $flags $p1     // SAVE/LOAD
+       add b32 $r2 NV_PGRAPH_GPCX_GPCCS_STRAND_CMD_SAVE
+       nv_iowr(NV_PGRAPH_GPCX_GPCCS_STRAND_CMD, 0x3f, $r2)
 
        // mmio context
        xbit $r10 $flags $p1    // direction
        or $r10 2               // first
-       mov $r11 0x0000
-       sethi $r11 0x500000
+       imm32($r11,0x500000)
        ld b32 $r12 D[$r0 + #gpc_id]
        shl b32 $r12 15
        add b32 $r11 $r12       // base = NV_PGRAPH_GPCn
        ld b32 $r12 D[$r0 + #gpc_mmio_list_head]
        ld b32 $r13 D[$r0 + #gpc_mmio_list_tail]
        mov $r14 0              // not multi
-       call #mmctx_xfer
+       call(mmctx_xfer)
 
        // per-TPC mmio context
        xbit $r10 $flags $p1    // direction
 #if !NV_PGRAPH_GPCX_UNK__SIZE
        or $r10 4               // last
 #endif
-       mov $r11 0x4000
-       sethi $r11 0x500000     // base = NV_PGRAPH_GPC0_TPC0
+       imm32($r11, 0x504000)
        ld b32 $r12 D[$r0 + #gpc_id]
        shl b32 $r12 15
        add b32 $r11 $r12       // base = NV_PGRAPH_GPCn_TPC0
@@ -364,14 +338,13 @@ ctx_xfer:
        ld b32 $r13 D[$r0 + #tpc_mmio_list_tail]
        ld b32 $r15 D[$r0 + #tpc_mask]
        mov $r14 0x800          // stride = 0x800
-       call #mmctx_xfer
+       call(mmctx_xfer)
 
 #if NV_PGRAPH_GPCX_UNK__SIZE > 0
        // per-UNK mmio context
        xbit $r10 $flags $p1    // direction
        or $r10 4               // last
-       mov $r11 0x3000
-       sethi $r11 0x500000     // base = NV_PGRAPH_GPC0_UNK0
+       imm32($r11, 0x503000)
        ld b32 $r12 D[$r0 + #gpc_id]
        shl b32 $r12 15
        add b32 $r11 $r12       // base = NV_PGRAPH_GPCn_UNK0
@@ -379,11 +352,11 @@ ctx_xfer:
        ld b32 $r13 D[$r0 + #unk_mmio_list_tail]
        ld b32 $r15 D[$r0 + #unk_mask]
        mov $r14 0x200          // stride = 0x200
-       call #mmctx_xfer
+       call(mmctx_xfer)
 #endif
 
        // wait for strands to finish
-       call #strand_wait
+       call(strand_wait)
 
        // if load, or a save without a load following, do some
        // unknown stuff that's done after finishing a block of
@@ -391,14 +364,10 @@ ctx_xfer:
        bra $p1 #ctx_xfer_post
        bra not $p2 #ctx_xfer_done
        ctx_xfer_post:
-               mov $r1 0x4afc
-               sethi $r1 0x20000
-               mov $r2 0xd
-               iowr I[$r1] $r2         // STRAND_CMD(0x3f) = 0x0d
-               call #strand_wait
+               call(strand_post)
 
        // mark completion in HUB's barrier
        ctx_xfer_done:
-       call #hub_barrier_done
+       call(hub_barrier_done)
        ret
 #endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnv108.fuc5 b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnv108.fuc5
new file mode 100644 (file)
index 0000000..bd30262
--- /dev/null
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#define NV_PGRAPH_GPCX_UNK__SIZE                                     0x00000001
+
+#define CHIPSET GK208
+#include "macros.fuc"
+
+.section #nv108_grgpc_data
+#define INCLUDE_DATA
+#include "com.fuc"
+#include "gpc.fuc"
+#undef INCLUDE_DATA
+
+.section #nv108_grgpc_code
+#define INCLUDE_CODE
+bra #init
+#include "com.fuc"
+#include "gpc.fuc"
+.align 256
+#undef INCLUDE_CODE
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnv108.fuc5.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnv108.fuc5.h
new file mode 100644 (file)
index 0000000..27dc128
--- /dev/null
@@ -0,0 +1,473 @@
+uint32_t nv108_grgpc_data[] = {
+/* 0x0000: gpc_mmio_list_head */
+       0x0000006c,
+/* 0x0004: gpc_mmio_list_tail */
+/* 0x0004: tpc_mmio_list_head */
+       0x0000006c,
+/* 0x0008: tpc_mmio_list_tail */
+/* 0x0008: unk_mmio_list_head */
+       0x0000006c,
+/* 0x000c: unk_mmio_list_tail */
+       0x0000006c,
+/* 0x0010: gpc_id */
+       0x00000000,
+/* 0x0014: tpc_count */
+       0x00000000,
+/* 0x0018: tpc_mask */
+       0x00000000,
+/* 0x001c: unk_count */
+       0x00000000,
+/* 0x0020: unk_mask */
+       0x00000000,
+/* 0x0024: cmd_queue */
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+};
+
+uint32_t nv108_grgpc_code[] = {
+       0x03140ef5,
+/* 0x0004: queue_put */
+       0x9800d898,
+       0x86f001d9,
+       0xf489a408,
+       0x020f0b1b,
+       0x0002f87e,
+/* 0x001a: queue_put_next */
+       0x98c400f8,
+       0x0384b607,
+       0xb6008dbb,
+       0x8eb50880,
+       0x018fb500,
+       0xf00190b6,
+       0xd9b50f94,
+/* 0x0037: queue_get */
+       0xf400f801,
+       0xd8980131,
+       0x01d99800,
+       0x0bf489a4,
+       0x0789c421,
+       0xbb0394b6,
+       0x90b6009d,
+       0x009e9808,
+       0xb6019f98,
+       0x84f00180,
+       0x00d8b50f,
+/* 0x0063: queue_get_done */
+       0xf80132f4,
+/* 0x0065: nv_rd32 */
+       0xf0ecb200,
+       0x00801fc9,
+       0x0cf601ca,
+/* 0x0073: nv_rd32_wait */
+       0x8c04bd00,
+       0xcf01ca00,
+       0xccc800cc,
+       0xf61bf41f,
+       0xec7e060a,
+       0x008f0000,
+       0xffcf01cb,
+/* 0x008f: nv_wr32 */
+       0x8000f800,
+       0xf601cc00,
+       0x04bd000f,
+       0xc9f0ecb2,
+       0x1ec9f01f,
+       0x01ca0080,
+       0xbd000cf6,
+/* 0x00a9: nv_wr32_wait */
+       0xca008c04,
+       0x00cccf01,
+       0xf41fccc8,
+       0x00f8f61b,
+/* 0x00b8: wait_donez */
+       0x99f094bd,
+       0x37008000,
+       0x0009f602,
+       0x008004bd,
+       0x0af60206,
+/* 0x00cf: wait_donez_ne */
+       0x8804bd00,
+       0xcf010000,
+       0x8aff0088,
+       0xf61bf488,
+       0x99f094bd,
+       0x17008000,
+       0x0009f602,
+       0x00f804bd,
+/* 0x00ec: wait_doneo */
+       0x99f094bd,
+       0x37008000,
+       0x0009f602,
+       0x008004bd,
+       0x0af60206,
+/* 0x0103: wait_doneo_e */
+       0x8804bd00,
+       0xcf010000,
+       0x8aff0088,
+       0xf60bf488,
+       0x99f094bd,
+       0x17008000,
+       0x0009f602,
+       0x00f804bd,
+/* 0x0120: mmctx_size */
+/* 0x0122: nv_mmctx_size_loop */
+       0xe89894bd,
+       0x1a85b600,
+       0xb60180b6,
+       0x98bb0284,
+       0x04e0b600,
+       0x1bf4efa4,
+       0xf89fb2ec,
+/* 0x013d: mmctx_xfer */
+       0xf094bd00,
+       0x00800199,
+       0x09f60237,
+       0xbd04bd00,
+       0x05bbfd94,
+       0x800f0bf4,
+       0xf601c400,
+       0x04bd000b,
+/* 0x015f: mmctx_base_disabled */
+       0xfd0099f0,
+       0x0bf405ee,
+       0xc6008018,
+       0x000ef601,
+       0x008004bd,
+       0x0ff601c7,
+       0xf004bd00,
+/* 0x017a: mmctx_multi_disabled */
+       0xabc80199,
+       0x10b4b600,
+       0xc80cb9f0,
+       0xe4b601ae,
+       0x05befd11,
+       0x01c50080,
+       0xbd000bf6,
+/* 0x0195: mmctx_exec_loop */
+/* 0x0195: mmctx_wait_free */
+       0xc5008e04,
+       0x00eecf01,
+       0xf41fe4f0,
+       0xce98f60b,
+       0x05e9fd00,
+       0x01c80080,
+       0xbd000ef6,
+       0x04c0b604,
+       0x1bf4cda4,
+       0x02abc8df,
+/* 0x01bf: mmctx_fini_wait */
+       0x8b1c1bf4,
+       0xcf01c500,
+       0xb4f000bb,
+       0x10b4b01f,
+       0x0af31bf4,
+       0x00b87e02,
+       0x250ef400,
+/* 0x01d8: mmctx_stop */
+       0xb600abc8,
+       0xb9f010b4,
+       0x12b9f00c,
+       0x01c50080,
+       0xbd000bf6,
+/* 0x01ed: mmctx_stop_wait */
+       0xc5008b04,
+       0x00bbcf01,
+       0xf412bbc8,
+/* 0x01fa: mmctx_done */
+       0x94bdf61b,
+       0x800199f0,
+       0xf6021700,
+       0x04bd0009,
+/* 0x020a: strand_wait */
+       0xa0f900f8,
+       0xb87e020a,
+       0xa0fc0000,
+/* 0x0216: strand_pre */
+       0x0c0900f8,
+       0x024afc80,
+       0xbd0009f6,
+       0x020a7e04,
+/* 0x0227: strand_post */
+       0x0900f800,
+       0x4afc800d,
+       0x0009f602,
+       0x0a7e04bd,
+       0x00f80002,
+/* 0x0238: strand_set */
+       0xfc800f0c,
+       0x0cf6024f,
+       0x0c04bd00,
+       0x4afc800b,
+       0x000cf602,
+       0xfc8004bd,
+       0x0ef6024f,
+       0x0c04bd00,
+       0x4afc800a,
+       0x000cf602,
+       0x0a7e04bd,
+       0x00f80002,
+/* 0x0268: strand_ctx_init */
+       0x99f094bd,
+       0x37008003,
+       0x0009f602,
+       0x167e04bd,
+       0x030e0002,
+       0x0002387e,
+       0xfc80c4bd,
+       0x0cf60247,
+       0x0c04bd00,
+       0x4afc8001,
+       0x000cf602,
+       0x0a7e04bd,
+       0x0c920002,
+       0x46fc8001,
+       0x000cf602,
+       0x020c04bd,
+       0x024afc80,
+       0xbd000cf6,
+       0x020a7e04,
+       0x02277e00,
+       0x42008800,
+       0x20008902,
+       0x0099cf02,
+/* 0x02c7: ctx_init_strand_loop */
+       0xf608fe95,
+       0x8ef6008e,
+       0x808acf40,
+       0xb606a5b6,
+       0xeabb01a0,
+       0x0480b600,
+       0xf40192b6,
+       0xe4b6e81b,
+       0xf2efbc08,
+       0x99f094bd,
+       0x17008003,
+       0x0009f602,
+       0x00f804bd,
+/* 0x02f8: error */
+       0xffb2e0f9,
+       0x4098148e,
+       0x00008f7e,
+       0xffb2010f,
+       0x409c1c8e,
+       0x00008f7e,
+       0x00f8e0fc,
+/* 0x0314: init */
+       0x04fe04bd,
+       0x40020200,
+       0x02f61200,
+       0x4104bd00,
+       0x10fe0465,
+       0x07004000,
+       0xbd0000f6,
+       0x40040204,
+       0x02f60400,
+       0xf404bd00,
+       0x00821031,
+       0x22cf0182,
+       0xf0010300,
+       0x32bb1f24,
+       0x0132b604,
+       0xb50502b5,
+       0x00820603,
+       0x22cf0186,
+       0x0402b500,
+       0x500c308e,
+       0x34bd24bd,
+/* 0x036a: init_unk_loop */
+       0x657e44bd,
+       0xf6b00000,
+       0x0e0bf400,
+       0xf2bb010f,
+       0x054ffd04,
+/* 0x037f: init_unk_next */
+       0xb60130b6,
+       0xe0b60120,
+       0x0126b004,
+/* 0x038b: init_unk_done */
+       0xb5e21bf4,
+       0x04b50703,
+       0x01008208,
+       0x0022cf02,
+       0x259534bd,
+       0xc0008008,
+       0x0005f601,
+       0x008004bd,
+       0x05f601c1,
+       0x9804bd00,
+       0x0f98000e,
+       0x01207e01,
+       0x002fbb00,
+       0x98003fbb,
+       0x0f98010e,
+       0x01207e02,
+       0x050e9800,
+       0xbb00effd,
+       0x3ebb002e,
+       0x020e9800,
+       0x7e030f98,
+       0x98000120,
+       0xeffd070e,
+       0x002ebb00,
+       0xb6003ebb,
+       0x00800235,
+       0x03f601d3,
+       0xb604bd00,
+       0x35b60825,
+       0x0120b606,
+       0xb60130b6,
+       0x34b60824,
+       0x7e2fb208,
+       0xbb000268,
+       0x0080003f,
+       0x03f60201,
+       0xbd04bd00,
+       0x1f29f024,
+       0x02300080,
+       0xbd0002f6,
+/* 0x0429: main */
+       0x0031f404,
+       0x0d0028f4,
+       0x00377e24,
+       0xf401f400,
+       0xf404e4b0,
+       0x81fe1d18,
+       0xbd060201,
+       0x0412fd20,
+       0xfd01e4b6,
+       0x18fe051e,
+       0x04fc7e00,
+       0xd40ef400,
+/* 0x0458: main_not_ctx_xfer */
+       0xf010ef94,
+       0xf87e01f5,
+       0x0ef40002,
+/* 0x0465: ih */
+       0xfe80f9c7,
+       0x80f90188,
+       0xa0f990f9,
+       0xd0f9b0f9,
+       0xf0f9e0f9,
+       0x004a04bd,
+       0x00aacf02,
+       0xf404abc4,
+       0x240d1f0b,
+       0xcf1a004e,
+       0x004f00ee,
+       0x00ffcf19,
+       0x0000047e,
+       0x0040010e,
+       0x000ef61d,
+/* 0x04a2: ih_no_fifo */
+       0x004004bd,
+       0x000af601,
+       0xf0fc04bd,
+       0xd0fce0fc,
+       0xa0fcb0fc,
+       0x80fc90fc,
+       0xfc0088fe,
+       0x0032f480,
+/* 0x04c2: hub_barrier_done */
+       0x010f01f8,
+       0xbb040e98,
+       0xffb204fe,
+       0x4094188e,
+       0x00008f7e,
+/* 0x04d6: ctx_redswitch */
+       0x200f00f8,
+       0x01850080,
+       0xbd000ff6,
+/* 0x04e3: ctx_redswitch_delay */
+       0xb6080e04,
+       0x1bf401e2,
+       0x00f5f1fd,
+       0x00f5f108,
+       0x85008002,
+       0x000ff601,
+       0x00f804bd,
+/* 0x04fc: ctx_xfer */
+       0x02810080,
+       0xbd000ff6,
+       0x0711f404,
+       0x0004d67e,
+/* 0x050c: ctx_xfer_not_load */
+       0x0002167e,
+       0xfc8024bd,
+       0x02f60247,
+       0xf004bd00,
+       0x20b6012c,
+       0x4afc8003,
+       0x0002f602,
+       0xacf004bd,
+       0x02a5f001,
+       0x5000008b,
+       0xb6040c98,
+       0xbcbb0fc4,
+       0x000c9800,
+       0x0e010d98,
+       0x013d7e00,
+       0x01acf000,
+       0x5040008b,
+       0xb6040c98,
+       0xbcbb0fc4,
+       0x010c9800,
+       0x98020d98,
+       0x004e060f,
+       0x013d7e08,
+       0x01acf000,
+       0x8b04a5f0,
+       0x98503000,
+       0xc4b6040c,
+       0x00bcbb0f,
+       0x98020c98,
+       0x0f98030d,
+       0x02004e08,
+       0x00013d7e,
+       0x00020a7e,
+       0xf40601f4,
+/* 0x0596: ctx_xfer_post */
+       0x277e0712,
+/* 0x059a: ctx_xfer_done */
+       0xc27e0002,
+       0x00f80004,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+};
index f2b0dea80116fd7b42de0a2d1ff8fad4dbd26b96..0e7b01efae8d2979cdffac5bbf4e4a4030580d2d 100644 (file)
@@ -37,14 +37,14 @@ uint32_t nvc0_grgpc_data[] = {
 };
 
 uint32_t nvc0_grgpc_code[] = {
-       0x03180ef5,
+       0x03a10ef5,
 /* 0x0004: queue_put */
        0x9800d898,
        0x86f001d9,
        0x0489b808,
        0xf00c1bf4,
        0x21f502f7,
-       0x00f802fe,
+       0x00f8037e,
 /* 0x001c: queue_put_next */
        0xb60798c4,
        0x8dbb0384,
@@ -68,184 +68,214 @@ uint32_t nvc0_grgpc_code[] = {
 /* 0x0066: queue_get_done */
        0x00f80132,
 /* 0x0068: nv_rd32 */
-       0x0728b7f1,
-       0xb906b4b6,
-       0xc9f002ec,
-       0x00bcd01f,
-/* 0x0078: nv_rd32_wait */
-       0xc800bccf,
-       0x1bf41fcc,
-       0x06a7f0fa,
-       0x010921f5,
-       0xf840bfcf,
-/* 0x008d: nv_wr32 */
-       0x28b7f100,
-       0x06b4b607,
-       0xb980bfd0,
-       0xc9f002ec,
-       0x1ec9f01f,
-/* 0x00a3: nv_wr32_wait */
-       0xcf00bcd0,
-       0xccc800bc,
-       0xfa1bf41f,
-/* 0x00ae: watchdog_reset */
-       0x87f100f8,
-       0x84b60430,
-       0x1ff9f006,
-       0xf8008fd0,
-/* 0x00bd: watchdog_clear */
-       0x3087f100,
-       0x0684b604,
-       0xf80080d0,
-/* 0x00c9: wait_donez */
-       0xf094bd00,
-       0x07f10099,
-       0x03f00f00,
-       0x0009d002,
-       0x07f104bd,
-       0x03f00600,
-       0x000ad002,
-/* 0x00e6: wait_donez_ne */
-       0x87f104bd,
-       0x83f00000,
-       0x0088cf01,
-       0xf4888aff,
-       0x94bdf31b,
-       0xf10099f0,
-       0xf0170007,
-       0x09d00203,
-       0xf804bd00,
-/* 0x0109: wait_doneo */
-       0xf094bd00,
+       0xf002ecb9,
+       0x07f11fc9,
+       0x03f0ca00,
+       0x000cd001,
+/* 0x007a: nv_rd32_wait */
+       0xc7f104bd,
+       0xc3f0ca00,
+       0x00cccf01,
+       0xf41fccc8,
+       0xa7f0f31b,
+       0x1021f506,
+       0x00f7f101,
+       0x01f3f0cb,
+       0xf800ffcf,
+/* 0x009d: nv_wr32 */
+       0x0007f100,
+       0x0103f0cc,
+       0xbd000fd0,
+       0x02ecb904,
+       0xf01fc9f0,
+       0x07f11ec9,
+       0x03f0ca00,
+       0x000cd001,
+/* 0x00be: nv_wr32_wait */
+       0xc7f104bd,
+       0xc3f0ca00,
+       0x00cccf01,
+       0xf41fccc8,
+       0x00f8f31b,
+/* 0x00d0: wait_donez */
+       0x99f094bd,
+       0x0007f100,
+       0x0203f00f,
+       0xbd0009d0,
+       0x0007f104,
+       0x0203f006,
+       0xbd000ad0,
+/* 0x00ed: wait_donez_ne */
+       0x0087f104,
+       0x0183f000,
+       0xff0088cf,
+       0x1bf4888a,
+       0xf094bdf3,
        0x07f10099,
-       0x03f00f00,
+       0x03f01700,
        0x0009d002,
-       0x87f104bd,
-       0x84b60818,
-       0x008ad006,
-/* 0x0124: wait_doneo_e */
-       0x040087f1,
-       0xcf0684b6,
-       0x8aff0088,
-       0xf30bf488,
+       0x00f804bd,
+/* 0x0110: wait_doneo */
        0x99f094bd,
        0x0007f100,
-       0x0203f017,
+       0x0203f00f,
        0xbd0009d0,
-/* 0x0147: mmctx_size */
-       0xbd00f804,
-/* 0x0149: nv_mmctx_size_loop */
-       0x00e89894,
-       0xb61a85b6,
-       0x84b60180,
-       0x0098bb02,
-       0xb804e0b6,
-       0x1bf404ef,
-       0x029fb9eb,
-/* 0x0166: mmctx_xfer */
-       0x94bd00f8,
-       0xf10199f0,
-       0xf00f0007,
-       0x09d00203,
-       0xf104bd00,
-       0xb6071087,
-       0x94bd0684,
-       0xf405bbfd,
-       0x8bd0090b,
-       0x0099f000,
-/* 0x018c: mmctx_base_disabled */
-       0xf405eefd,
-       0x8ed00c0b,
-       0xc08fd080,
-/* 0x019b: mmctx_multi_disabled */
-       0xb70199f0,
-       0xc8010080,
+       0x0007f104,
+       0x0203f006,
+       0xbd000ad0,
+/* 0x012d: wait_doneo_e */
+       0x0087f104,
+       0x0183f000,
+       0xff0088cf,
+       0x0bf4888a,
+       0xf094bdf3,
+       0x07f10099,
+       0x03f01700,
+       0x0009d002,
+       0x00f804bd,
+/* 0x0150: mmctx_size */
+/* 0x0152: nv_mmctx_size_loop */
+       0xe89894bd,
+       0x1a85b600,
+       0xb60180b6,
+       0x98bb0284,
+       0x04e0b600,
+       0xf404efb8,
+       0x9fb9eb1b,
+/* 0x016f: mmctx_xfer */
+       0xbd00f802,
+       0x0199f094,
+       0x0f0007f1,
+       0xd00203f0,
+       0x04bd0009,
+       0xbbfd94bd,
+       0x120bf405,
+       0xc40007f1,
+       0xd00103f0,
+       0x04bd000b,
+/* 0x0197: mmctx_base_disabled */
+       0xfd0099f0,
+       0x0bf405ee,
+       0x0007f11e,
+       0x0103f0c6,
+       0xbd000ed0,
+       0x0007f104,
+       0x0103f0c7,
+       0xbd000fd0,
+       0x0199f004,
+/* 0x01b8: mmctx_multi_disabled */
+       0xb600abc8,
+       0xb9f010b4,
+       0x01aec80c,
+       0xfd11e4b6,
+       0x07f105be,
+       0x03f0c500,
+       0x000bd001,
+/* 0x01d6: mmctx_exec_loop */
+/* 0x01d6: mmctx_wait_free */
+       0xe7f104bd,
+       0xe3f0c500,
+       0x00eecf01,
+       0xf41fe4f0,
+       0xce98f30b,
+       0x05e9fd00,
+       0xc80007f1,
+       0xd00103f0,
+       0x04bd000e,
+       0xb804c0b6,
+       0x1bf404cd,
+       0x02abc8d8,
+/* 0x0207: mmctx_fini_wait */
+       0xf11f1bf4,
+       0xf0c500b7,
+       0xbbcf01b3,
+       0x1fb4f000,
+       0xf410b4b0,
+       0xa7f0f01b,
+       0xd021f402,
+/* 0x0223: mmctx_stop */
+       0xc82b0ef4,
        0xb4b600ab,
        0x0cb9f010,
-       0xb601aec8,
-       0xbefd11e4,
-       0x008bd005,
-/* 0x01b4: mmctx_exec_loop */
-/* 0x01b4: mmctx_wait_free */
-       0xf0008ecf,
-       0x0bf41fe4,
-       0x00ce98fa,
-       0xd005e9fd,
-       0xc0b6c08e,
-       0x04cdb804,
-       0xc8e81bf4,
-       0x1bf402ab,
-/* 0x01d5: mmctx_fini_wait */
-       0x008bcf18,
-       0xb01fb4f0,
-       0x1bf410b4,
-       0x02a7f0f7,
-       0xf4c921f4,
-/* 0x01ea: mmctx_stop */
-       0xabc81b0e,
-       0x10b4b600,
-       0xf00cb9f0,
-       0x8bd012b9,
-/* 0x01f9: mmctx_stop_wait */
-       0x008bcf00,
-       0xf412bbc8,
-/* 0x0202: mmctx_done */
-       0x94bdfa1b,
-       0xf10199f0,
-       0xf0170007,
-       0x09d00203,
-       0xf804bd00,
-/* 0x0215: strand_wait */
-       0xf0a0f900,
-       0x21f402a7,
-       0xf8a0fcc9,
-/* 0x0221: strand_pre */
-       0xfc87f100,
-       0x0283f04a,
-       0xd00c97f0,
-       0x21f50089,
-       0x00f80215,
-/* 0x0234: strand_post */
-       0x4afc87f1,
-       0xf00283f0,
-       0x89d00d97,
-       0x1521f500,
-/* 0x0247: strand_set */
-       0xf100f802,
-       0xf04ffca7,
-       0xaba202a3,
-       0xc7f00500,
-       0x00acd00f,
-       0xd00bc7f0,
-       0x21f500bc,
-       0xaed00215,
-       0x0ac7f000,
-       0xf500bcd0,
-       0xf8021521,
-/* 0x0271: strand_ctx_init */
-       0xf094bd00,
-       0x07f10399,
-       0x03f00f00,
+       0xf112b9f0,
+       0xf0c50007,
+       0x0bd00103,
+/* 0x023b: mmctx_stop_wait */
+       0xf104bd00,
+       0xf0c500b7,
+       0xbbcf01b3,
+       0x12bbc800,
+/* 0x024b: mmctx_done */
+       0xbdf31bf4,
+       0x0199f094,
+       0x170007f1,
+       0xd00203f0,
+       0x04bd0009,
+/* 0x025e: strand_wait */
+       0xa0f900f8,
+       0xf402a7f0,
+       0xa0fcd021,
+/* 0x026a: strand_pre */
+       0x97f000f8,
+       0xfc07f10c,
+       0x0203f04a,
+       0xbd0009d0,
+       0x5e21f504,
+/* 0x027f: strand_post */
+       0xf000f802,
+       0x07f10d97,
+       0x03f04afc,
        0x0009d002,
        0x21f504bd,
-       0xe7f00221,
-       0x4721f503,
-       0xfca7f102,
-       0x02a3f046,
-       0x0400aba0,
-       0xf040a0d0,
-       0xbcd001c7,
-       0x1521f500,
-       0x010c9202,
-       0xf000acd0,
-       0xbcd002c7,
-       0x1521f500,
-       0x3421f502,
-       0x8087f102,
-       0x0684b608,
-       0xb70089cf,
-       0x95220080,
-/* 0x02ca: ctx_init_strand_loop */
+       0x00f8025e,
+/* 0x0294: strand_set */
+       0xf10fc7f0,
+       0xf04ffc07,
+       0x0cd00203,
+       0xf004bd00,
+       0x07f10bc7,
+       0x03f04afc,
+       0x000cd002,
+       0x07f104bd,
+       0x03f04ffc,
+       0x000ed002,
+       0xc7f004bd,
+       0xfc07f10a,
+       0x0203f04a,
+       0xbd000cd0,
+       0x5e21f504,
+/* 0x02d3: strand_ctx_init */
+       0xbd00f802,
+       0x0399f094,
+       0x0f0007f1,
+       0xd00203f0,
+       0x04bd0009,
+       0x026a21f5,
+       0xf503e7f0,
+       0xbd029421,
+       0xfc07f1c4,
+       0x0203f047,
+       0xbd000cd0,
+       0x01c7f004,
+       0x4afc07f1,
+       0xd00203f0,
+       0x04bd000c,
+       0x025e21f5,
+       0xf1010c92,
+       0xf046fc07,
+       0x0cd00203,
+       0xf004bd00,
+       0x07f102c7,
+       0x03f04afc,
+       0x000cd002,
+       0x21f504bd,
+       0x21f5025e,
+       0x87f1027f,
+       0x83f04200,
+       0x0097f102,
+       0x0293f020,
+       0x950099cf,
+/* 0x034a: ctx_init_strand_loop */
        0x8ed008fe,
        0x408ed000,
        0xb6808acf,
@@ -259,167 +289,199 @@ uint32_t nvc0_grgpc_code[] = {
        0x170007f1,
        0xd00203f0,
        0x04bd0009,
-/* 0x02fe: error */
+/* 0x037e: error */
        0xe0f900f8,
-       0x9814e7f1,
-       0xf440e3f0,
-       0xe0b78d21,
-       0xf7f0041c,
-       0x8d21f401,
-       0x00f8e0fc,
-/* 0x0318: init */
-       0x04fe04bd,
-       0x0017f100,
-       0x0227f012,
-       0xf10012d0,
-       0xfe042617,
-       0x17f10010,
-       0x10d00400,
-       0x0427f0c0,
-       0xf40012d0,
-       0x17f11031,
-       0x14b60608,
-       0x0012cf06,
+       0xf102ffb9,
+       0xf09814e7,
+       0x21f440e3,
+       0x01f7f09d,
+       0xf102ffb9,
+       0xf09c1ce7,
+       0x21f440e3,
+       0xf8e0fc9d,
+/* 0x03a1: init */
+       0xfe04bd00,
+       0x27f00004,
+       0x0007f102,
+       0x0003f012,
+       0xbd0002d0,
+       0xd517f104,
+       0x0010fe04,
+       0x070007f1,
+       0xd00003f0,
+       0x04bd0000,
+       0xf10427f0,
+       0xf0040007,
+       0x02d00003,
+       0xf404bd00,
+       0x27f11031,
+       0x23f08200,
+       0x0022cf01,
        0xf00137f0,
        0x32bb1f24,
        0x0132b604,
        0x80050280,
-       0x10b70603,
-       0x12cf0400,
-       0x04028000,
-       0x010027f1,
-       0xcf0223f0,
-       0x34bd0022,
-       0x070047f1,
-       0x950644b6,
-       0x45d00825,
-       0x4045d000,
-       0x98000e98,
-       0x21f5010f,
-       0x2fbb0147,
-       0x003fbb00,
-       0x98010e98,
-       0x21f5020f,
-       0x0e980147,
-       0x00effd05,
-       0xbb002ebb,
-       0x40b7003e,
-       0x35b61300,
-       0x0043d002,
-       0xb60825b6,
-       0x20b60635,
-       0x0130b601,
-       0xb60824b6,
-       0x2fb90834,
-       0x7121f502,
-       0x003fbb02,
-       0x010007f1,
+       0x27f10603,
+       0x23f08600,
+       0x0022cf01,
+       0xf1040280,
+       0xf0010027,
+       0x22cf0223,
+       0x9534bd00,
+       0x07f10825,
+       0x03f0c000,
+       0x0005d001,
+       0x07f104bd,
+       0x03f0c100,
+       0x0005d001,
+       0x0e9804bd,
+       0x010f9800,
+       0x015021f5,
+       0xbb002fbb,
+       0x0e98003f,
+       0x020f9801,
+       0x015021f5,
+       0xfd050e98,
+       0x2ebb00ef,
+       0x003ebb00,
+       0xf10235b6,
+       0xf0d30007,
+       0x03d00103,
+       0xb604bd00,
+       0x35b60825,
+       0x0120b606,
+       0xb60130b6,
+       0x34b60824,
+       0x022fb908,
+       0x02d321f5,
+       0xf1003fbb,
+       0xf0010007,
+       0x03d00203,
+       0xbd04bd00,
+       0x1f29f024,
+       0x080007f1,
        0xd00203f0,
-       0x04bd0003,
-       0x29f024bd,
-       0x0007f11f,
-       0x0203f008,
-       0xbd0002d0,
-/* 0x03e9: main */
-       0x0031f404,
-       0xf00028f4,
-       0x21f41cd7,
-       0xf401f439,
-       0xf404e4b0,
-       0x81fe1e18,
-       0x0627f001,
-       0x12fd20bd,
-       0x01e4b604,
-       0xfe051efd,
-       0x21f50018,
-       0x0ef404ad,
-/* 0x0419: main_not_ctx_xfer */
-       0x10ef94d3,
-       0xf501f5f0,
-       0xf402fe21,
-/* 0x0426: ih */
-       0x80f9c60e,
-       0xf90188fe,
-       0xf990f980,
-       0xf9b0f9a0,
-       0xf9e0f9d0,
-       0xcf04bdf0,
-       0xabc4800a,
-       0x1d0bf404,
-       0x1900b7f1,
-       0xcf1cd7f0,
-       0xbfcf40be,
+       0x04bd0002,
+/* 0x0498: main */
+       0xf40031f4,
+       0xd7f00028,
+       0x3921f41c,
+       0xb0f401f4,
+       0x18f404e4,
+       0x0181fe1e,
+       0xbd0627f0,
+       0x0412fd20,
+       0xfd01e4b6,
+       0x18fe051e,
+       0x8d21f500,
+       0xd30ef405,
+/* 0x04c8: main_not_ctx_xfer */
+       0xf010ef94,
+       0x21f501f5,
+       0x0ef4037e,
+/* 0x04d5: ih */
+       0xfe80f9c6,
+       0x80f90188,
+       0xa0f990f9,
+       0xd0f9b0f9,
+       0xf0f9e0f9,
+       0xa7f104bd,
+       0xa3f00200,
+       0x00aacf00,
+       0xf404abc4,
+       0xd7f02c0b,
+       0x00e7f11c,
+       0x00e3f01a,
+       0xf100eecf,
+       0xf01900f7,
+       0xffcf00f3,
        0x0421f400,
-       0x0400b0b7,
-       0xd001e7f0,
-/* 0x045e: ih_no_fifo */
-       0x0ad000be,
-       0xfcf0fc40,
-       0xfcd0fce0,
-       0xfca0fcb0,
-       0xfe80fc90,
-       0x80fc0088,
-       0xf80032f4,
-/* 0x0479: hub_barrier_done */
-       0x01f7f001,
-       0xbb040e98,
-       0xe7f104fe,
-       0xe3f09418,
-       0x8d21f440,
-/* 0x048e: ctx_redswitch */
-       0xe7f100f8,
-       0xe4b60614,
-       0x20f7f006,
-       0xf000efd0,
-/* 0x049e: ctx_redswitch_delay */
-       0xf2b608f7,
-       0xfd1bf401,
-       0x0a20f7f1,
-       0xf800efd0,
-/* 0x04ad: ctx_xfer */
-       0x0417f100,
-       0x0614b60a,
-       0xf4001fd0,
-       0x21f50711,
-/* 0x04be: ctx_xfer_not_load */
-       0x17f1048e,
-       0x13f04afc,
-       0x0c27f002,
-       0xf50012d0,
-       0xf1021521,
-       0xf047fc27,
-       0x20d00223,
-       0x012cf000,
-       0xd00320b6,
-       0xacf00012,
-       0x02a5f001,
-       0xf000b7f0,
-       0x0c9850b3,
-       0x0fc4b604,
-       0x9800bcbb,
-       0x0d98000c,
-       0x00e7f001,
-       0x016621f5,
+       0xf101e7f0,
+       0xf01d0007,
+       0x0ed00003,
+/* 0x0523: ih_no_fifo */
+       0xf104bd00,
+       0xf0010007,
+       0x0ad00003,
+       0xfc04bd00,
+       0xfce0fcf0,
+       0xfcb0fcd0,
+       0xfc90fca0,
+       0x0088fe80,
+       0x32f480fc,
+/* 0x0547: hub_barrier_done */
+       0xf001f800,
+       0x0e9801f7,
+       0x04febb04,
+       0xf102ffb9,
+       0xf09418e7,
+       0x21f440e3,
+/* 0x055f: ctx_redswitch */
+       0xf000f89d,
+       0x07f120f7,
+       0x03f08500,
+       0x000fd001,
+       0xe7f004bd,
+/* 0x0571: ctx_redswitch_delay */
+       0x01e2b608,
+       0xf1fd1bf4,
+       0xf10800f5,
+       0xf10200f5,
+       0xf0850007,
+       0x0fd00103,
+       0xf804bd00,
+/* 0x058d: ctx_xfer */
+       0x0007f100,
+       0x0203f081,
+       0xbd000fd0,
+       0x0711f404,
+       0x055f21f5,
+/* 0x05a0: ctx_xfer_not_load */
+       0x026a21f5,
+       0x07f124bd,
+       0x03f047fc,
+       0x0002d002,
+       0x2cf004bd,
+       0x0320b601,
+       0x4afc07f1,
+       0xd00203f0,
+       0x04bd0002,
        0xf001acf0,
-       0xb7f104a5,
-       0xb3f04000,
+       0xb7f102a5,
+       0xb3f00000,
        0x040c9850,
        0xbb0fc4b6,
        0x0c9800bc,
-       0x020d9801,
-       0xf1060f98,
-       0xf50800e7,
-       0xf5016621,
-       0xf4021521,
-       0x12f40601,
-/* 0x0535: ctx_xfer_post */
-       0xfc17f114,
-       0x0213f04a,
-       0xd00d27f0,
-       0x21f50012,
-/* 0x0546: ctx_xfer_done */
-       0x21f50215,
-       0x00f80479,
+       0x010d9800,
+       0xf500e7f0,
+       0xf0016f21,
+       0xa5f001ac,
+       0x00b7f104,
+       0x50b3f040,
+       0xb6040c98,
+       0xbcbb0fc4,
+       0x010c9800,
+       0x98020d98,
+       0xe7f1060f,
+       0x21f50800,
+       0x21f5016f,
+       0x01f4025e,
+       0x0712f406,
+/* 0x0618: ctx_xfer_post */
+       0x027f21f5,
+/* 0x061c: ctx_xfer_done */
+       0x054721f5,
+       0x000000f8,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
        0x00000000,
        0x00000000,
        0x00000000,
index dd346c2a16245849b773e382bad75499a85aa00a..84dd32db28a02cff0118eec2bae837ddf56dcc90 100644 (file)
@@ -41,14 +41,14 @@ uint32_t nvd7_grgpc_data[] = {
 };
 
 uint32_t nvd7_grgpc_code[] = {
-       0x03180ef5,
+       0x03a10ef5,
 /* 0x0004: queue_put */
        0x9800d898,
        0x86f001d9,
        0x0489b808,
        0xf00c1bf4,
        0x21f502f7,
-       0x00f802fe,
+       0x00f8037e,
 /* 0x001c: queue_put_next */
        0xb60798c4,
        0x8dbb0384,
@@ -72,184 +72,214 @@ uint32_t nvd7_grgpc_code[] = {
 /* 0x0066: queue_get_done */
        0x00f80132,
 /* 0x0068: nv_rd32 */
-       0x0728b7f1,
-       0xb906b4b6,
-       0xc9f002ec,
-       0x00bcd01f,
-/* 0x0078: nv_rd32_wait */
-       0xc800bccf,
-       0x1bf41fcc,
-       0x06a7f0fa,
-       0x010921f5,
-       0xf840bfcf,
-/* 0x008d: nv_wr32 */
-       0x28b7f100,
-       0x06b4b607,
-       0xb980bfd0,
-       0xc9f002ec,
-       0x1ec9f01f,
-/* 0x00a3: nv_wr32_wait */
-       0xcf00bcd0,
-       0xccc800bc,
-       0xfa1bf41f,
-/* 0x00ae: watchdog_reset */
-       0x87f100f8,
-       0x84b60430,
-       0x1ff9f006,
-       0xf8008fd0,
-/* 0x00bd: watchdog_clear */
-       0x3087f100,
-       0x0684b604,
-       0xf80080d0,
-/* 0x00c9: wait_donez */
-       0xf094bd00,
-       0x07f10099,
-       0x03f00f00,
-       0x0009d002,
-       0x07f104bd,
-       0x03f00600,
-       0x000ad002,
-/* 0x00e6: wait_donez_ne */
-       0x87f104bd,
-       0x83f00000,
-       0x0088cf01,
-       0xf4888aff,
-       0x94bdf31b,
-       0xf10099f0,
-       0xf0170007,
-       0x09d00203,
-       0xf804bd00,
-/* 0x0109: wait_doneo */
-       0xf094bd00,
+       0xf002ecb9,
+       0x07f11fc9,
+       0x03f0ca00,
+       0x000cd001,
+/* 0x007a: nv_rd32_wait */
+       0xc7f104bd,
+       0xc3f0ca00,
+       0x00cccf01,
+       0xf41fccc8,
+       0xa7f0f31b,
+       0x1021f506,
+       0x00f7f101,
+       0x01f3f0cb,
+       0xf800ffcf,
+/* 0x009d: nv_wr32 */
+       0x0007f100,
+       0x0103f0cc,
+       0xbd000fd0,
+       0x02ecb904,
+       0xf01fc9f0,
+       0x07f11ec9,
+       0x03f0ca00,
+       0x000cd001,
+/* 0x00be: nv_wr32_wait */
+       0xc7f104bd,
+       0xc3f0ca00,
+       0x00cccf01,
+       0xf41fccc8,
+       0x00f8f31b,
+/* 0x00d0: wait_donez */
+       0x99f094bd,
+       0x0007f100,
+       0x0203f00f,
+       0xbd0009d0,
+       0x0007f104,
+       0x0203f006,
+       0xbd000ad0,
+/* 0x00ed: wait_donez_ne */
+       0x0087f104,
+       0x0183f000,
+       0xff0088cf,
+       0x1bf4888a,
+       0xf094bdf3,
        0x07f10099,
-       0x03f00f00,
+       0x03f01700,
        0x0009d002,
-       0x87f104bd,
-       0x84b60818,
-       0x008ad006,
-/* 0x0124: wait_doneo_e */
-       0x040087f1,
-       0xcf0684b6,
-       0x8aff0088,
-       0xf30bf488,
+       0x00f804bd,
+/* 0x0110: wait_doneo */
        0x99f094bd,
        0x0007f100,
-       0x0203f017,
+       0x0203f00f,
        0xbd0009d0,
-/* 0x0147: mmctx_size */
-       0xbd00f804,
-/* 0x0149: nv_mmctx_size_loop */
-       0x00e89894,
-       0xb61a85b6,
-       0x84b60180,
-       0x0098bb02,
-       0xb804e0b6,
-       0x1bf404ef,
-       0x029fb9eb,
-/* 0x0166: mmctx_xfer */
-       0x94bd00f8,
-       0xf10199f0,
-       0xf00f0007,
-       0x09d00203,
-       0xf104bd00,
-       0xb6071087,
-       0x94bd0684,
-       0xf405bbfd,
-       0x8bd0090b,
-       0x0099f000,
-/* 0x018c: mmctx_base_disabled */
-       0xf405eefd,
-       0x8ed00c0b,
-       0xc08fd080,
-/* 0x019b: mmctx_multi_disabled */
-       0xb70199f0,
-       0xc8010080,
+       0x0007f104,
+       0x0203f006,
+       0xbd000ad0,
+/* 0x012d: wait_doneo_e */
+       0x0087f104,
+       0x0183f000,
+       0xff0088cf,
+       0x0bf4888a,
+       0xf094bdf3,
+       0x07f10099,
+       0x03f01700,
+       0x0009d002,
+       0x00f804bd,
+/* 0x0150: mmctx_size */
+/* 0x0152: nv_mmctx_size_loop */
+       0xe89894bd,
+       0x1a85b600,
+       0xb60180b6,
+       0x98bb0284,
+       0x04e0b600,
+       0xf404efb8,
+       0x9fb9eb1b,
+/* 0x016f: mmctx_xfer */
+       0xbd00f802,
+       0x0199f094,
+       0x0f0007f1,
+       0xd00203f0,
+       0x04bd0009,
+       0xbbfd94bd,
+       0x120bf405,
+       0xc40007f1,
+       0xd00103f0,
+       0x04bd000b,
+/* 0x0197: mmctx_base_disabled */
+       0xfd0099f0,
+       0x0bf405ee,
+       0x0007f11e,
+       0x0103f0c6,
+       0xbd000ed0,
+       0x0007f104,
+       0x0103f0c7,
+       0xbd000fd0,
+       0x0199f004,
+/* 0x01b8: mmctx_multi_disabled */
+       0xb600abc8,
+       0xb9f010b4,
+       0x01aec80c,
+       0xfd11e4b6,
+       0x07f105be,
+       0x03f0c500,
+       0x000bd001,
+/* 0x01d6: mmctx_exec_loop */
+/* 0x01d6: mmctx_wait_free */
+       0xe7f104bd,
+       0xe3f0c500,
+       0x00eecf01,
+       0xf41fe4f0,
+       0xce98f30b,
+       0x05e9fd00,
+       0xc80007f1,
+       0xd00103f0,
+       0x04bd000e,
+       0xb804c0b6,
+       0x1bf404cd,
+       0x02abc8d8,
+/* 0x0207: mmctx_fini_wait */
+       0xf11f1bf4,
+       0xf0c500b7,
+       0xbbcf01b3,
+       0x1fb4f000,
+       0xf410b4b0,
+       0xa7f0f01b,
+       0xd021f402,
+/* 0x0223: mmctx_stop */
+       0xc82b0ef4,
        0xb4b600ab,
        0x0cb9f010,
-       0xb601aec8,
-       0xbefd11e4,
-       0x008bd005,
-/* 0x01b4: mmctx_exec_loop */
-/* 0x01b4: mmctx_wait_free */
-       0xf0008ecf,
-       0x0bf41fe4,
-       0x00ce98fa,
-       0xd005e9fd,
-       0xc0b6c08e,
-       0x04cdb804,
-       0xc8e81bf4,
-       0x1bf402ab,
-/* 0x01d5: mmctx_fini_wait */
-       0x008bcf18,
-       0xb01fb4f0,
-       0x1bf410b4,
-       0x02a7f0f7,
-       0xf4c921f4,
-/* 0x01ea: mmctx_stop */
-       0xabc81b0e,
-       0x10b4b600,
-       0xf00cb9f0,
-       0x8bd012b9,
-/* 0x01f9: mmctx_stop_wait */
-       0x008bcf00,
-       0xf412bbc8,
-/* 0x0202: mmctx_done */
-       0x94bdfa1b,
-       0xf10199f0,
-       0xf0170007,
-       0x09d00203,
-       0xf804bd00,
-/* 0x0215: strand_wait */
-       0xf0a0f900,
-       0x21f402a7,
-       0xf8a0fcc9,
-/* 0x0221: strand_pre */
-       0xfc87f100,
-       0x0283f04a,
-       0xd00c97f0,
-       0x21f50089,
-       0x00f80215,
-/* 0x0234: strand_post */
-       0x4afc87f1,
-       0xf00283f0,
-       0x89d00d97,
-       0x1521f500,
-/* 0x0247: strand_set */
-       0xf100f802,
-       0xf04ffca7,
-       0xaba202a3,
-       0xc7f00500,
-       0x00acd00f,
-       0xd00bc7f0,
-       0x21f500bc,
-       0xaed00215,
-       0x0ac7f000,
-       0xf500bcd0,
-       0xf8021521,
-/* 0x0271: strand_ctx_init */
-       0xf094bd00,
-       0x07f10399,
-       0x03f00f00,
+       0xf112b9f0,
+       0xf0c50007,
+       0x0bd00103,
+/* 0x023b: mmctx_stop_wait */
+       0xf104bd00,
+       0xf0c500b7,
+       0xbbcf01b3,
+       0x12bbc800,
+/* 0x024b: mmctx_done */
+       0xbdf31bf4,
+       0x0199f094,
+       0x170007f1,
+       0xd00203f0,
+       0x04bd0009,
+/* 0x025e: strand_wait */
+       0xa0f900f8,
+       0xf402a7f0,
+       0xa0fcd021,
+/* 0x026a: strand_pre */
+       0x97f000f8,
+       0xfc07f10c,
+       0x0203f04a,
+       0xbd0009d0,
+       0x5e21f504,
+/* 0x027f: strand_post */
+       0xf000f802,
+       0x07f10d97,
+       0x03f04afc,
        0x0009d002,
        0x21f504bd,
-       0xe7f00221,
-       0x4721f503,
-       0xfca7f102,
-       0x02a3f046,
-       0x0400aba0,
-       0xf040a0d0,
-       0xbcd001c7,
-       0x1521f500,
-       0x010c9202,
-       0xf000acd0,
-       0xbcd002c7,
-       0x1521f500,
-       0x3421f502,
-       0x8087f102,
-       0x0684b608,
-       0xb70089cf,
-       0x95220080,
-/* 0x02ca: ctx_init_strand_loop */
+       0x00f8025e,
+/* 0x0294: strand_set */
+       0xf10fc7f0,
+       0xf04ffc07,
+       0x0cd00203,
+       0xf004bd00,
+       0x07f10bc7,
+       0x03f04afc,
+       0x000cd002,
+       0x07f104bd,
+       0x03f04ffc,
+       0x000ed002,
+       0xc7f004bd,
+       0xfc07f10a,
+       0x0203f04a,
+       0xbd000cd0,
+       0x5e21f504,
+/* 0x02d3: strand_ctx_init */
+       0xbd00f802,
+       0x0399f094,
+       0x0f0007f1,
+       0xd00203f0,
+       0x04bd0009,
+       0x026a21f5,
+       0xf503e7f0,
+       0xbd029421,
+       0xfc07f1c4,
+       0x0203f047,
+       0xbd000cd0,
+       0x01c7f004,
+       0x4afc07f1,
+       0xd00203f0,
+       0x04bd000c,
+       0x025e21f5,
+       0xf1010c92,
+       0xf046fc07,
+       0x0cd00203,
+       0xf004bd00,
+       0x07f102c7,
+       0x03f04afc,
+       0x000cd002,
+       0x21f504bd,
+       0x21f5025e,
+       0x87f1027f,
+       0x83f04200,
+       0x0097f102,
+       0x0293f020,
+       0x950099cf,
+/* 0x034a: ctx_init_strand_loop */
        0x8ed008fe,
        0x408ed000,
        0xb6808acf,
@@ -263,198 +293,230 @@ uint32_t nvd7_grgpc_code[] = {
        0x170007f1,
        0xd00203f0,
        0x04bd0009,
-/* 0x02fe: error */
+/* 0x037e: error */
        0xe0f900f8,
-       0x9814e7f1,
-       0xf440e3f0,
-       0xe0b78d21,
-       0xf7f0041c,
-       0x8d21f401,
-       0x00f8e0fc,
-/* 0x0318: init */
-       0x04fe04bd,
-       0x0017f100,
-       0x0227f012,
-       0xf10012d0,
-       0xfe047017,
-       0x17f10010,
-       0x10d00400,
-       0x0427f0c0,
-       0xf40012d0,
-       0x17f11031,
-       0x14b60608,
-       0x0012cf06,
+       0xf102ffb9,
+       0xf09814e7,
+       0x21f440e3,
+       0x01f7f09d,
+       0xf102ffb9,
+       0xf09c1ce7,
+       0x21f440e3,
+       0xf8e0fc9d,
+/* 0x03a1: init */
+       0xfe04bd00,
+       0x27f00004,
+       0x0007f102,
+       0x0003f012,
+       0xbd0002d0,
+       0x1f17f104,
+       0x0010fe05,
+       0x070007f1,
+       0xd00003f0,
+       0x04bd0000,
+       0xf10427f0,
+       0xf0040007,
+       0x02d00003,
+       0xf404bd00,
+       0x27f11031,
+       0x23f08200,
+       0x0022cf01,
        0xf00137f0,
        0x32bb1f24,
        0x0132b604,
        0x80050280,
-       0x10b70603,
-       0x12cf0400,
-       0x04028000,
-       0x0c30e7f1,
-       0xbd50e3f0,
-       0xbd34bd24,
-/* 0x0371: init_unk_loop */
-       0x6821f444,
-       0xf400f6b0,
-       0xf7f00f0b,
-       0x04f2bb01,
-       0xb6054ffd,
-/* 0x0386: init_unk_next */
-       0x20b60130,
-       0x04e0b601,
-       0xf40126b0,
-/* 0x0392: init_unk_done */
-       0x0380e21b,
-       0x08048007,
-       0x010027f1,
-       0xcf0223f0,
-       0x34bd0022,
-       0x070047f1,
-       0x950644b6,
-       0x45d00825,
-       0x4045d000,
-       0x98000e98,
-       0x21f5010f,
-       0x2fbb0147,
-       0x003fbb00,
-       0x98010e98,
-       0x21f5020f,
-       0x0e980147,
-       0x00effd05,
-       0xbb002ebb,
-       0x0e98003e,
-       0x030f9802,
-       0x014721f5,
-       0xfd070e98,
+       0x27f10603,
+       0x23f08600,
+       0x0022cf01,
+       0xf1040280,
+       0xf00c30e7,
+       0x24bd50e3,
+       0x44bd34bd,
+/* 0x0410: init_unk_loop */
+       0xb06821f4,
+       0x0bf400f6,
+       0x01f7f00f,
+       0xfd04f2bb,
+       0x30b6054f,
+/* 0x0425: init_unk_next */
+       0x0120b601,
+       0xb004e0b6,
+       0x1bf40126,
+/* 0x0431: init_unk_done */
+       0x070380e2,
+       0xf1080480,
+       0xf0010027,
+       0x22cf0223,
+       0x9534bd00,
+       0x07f10825,
+       0x03f0c000,
+       0x0005d001,
+       0x07f104bd,
+       0x03f0c100,
+       0x0005d001,
+       0x0e9804bd,
+       0x010f9800,
+       0x015021f5,
+       0xbb002fbb,
+       0x0e98003f,
+       0x020f9801,
+       0x015021f5,
+       0xfd050e98,
        0x2ebb00ef,
        0x003ebb00,
-       0x130040b7,
-       0xd00235b6,
-       0x25b60043,
-       0x0635b608,
-       0xb60120b6,
-       0x24b60130,
-       0x0834b608,
-       0xf5022fb9,
-       0xbb027121,
-       0x07f1003f,
-       0x03f00100,
-       0x0003d002,
-       0x24bd04bd,
-       0xf11f29f0,
-       0xf0080007,
-       0x02d00203,
-/* 0x0433: main */
+       0x98020e98,
+       0x21f5030f,
+       0x0e980150,
+       0x00effd07,
+       0xbb002ebb,
+       0x35b6003e,
+       0x0007f102,
+       0x0103f0d3,
+       0xbd0003d0,
+       0x0825b604,
+       0xb60635b6,
+       0x30b60120,
+       0x0824b601,
+       0xb90834b6,
+       0x21f5022f,
+       0x3fbb02d3,
+       0x0007f100,
+       0x0203f001,
+       0xbd0003d0,
+       0xf024bd04,
+       0x07f11f29,
+       0x03f00800,
+       0x0002d002,
+/* 0x04e2: main */
+       0x31f404bd,
+       0x0028f400,
+       0xf424d7f0,
+       0x01f43921,
+       0x04e4b0f4,
+       0xfe1e18f4,
+       0x27f00181,
+       0xfd20bd06,
+       0xe4b60412,
+       0x051efd01,
+       0xf50018fe,
+       0xf405d721,
+/* 0x0512: main_not_ctx_xfer */
+       0xef94d30e,
+       0x01f5f010,
+       0x037e21f5,
+/* 0x051f: ih */
+       0xf9c60ef4,
+       0x0188fe80,
+       0x90f980f9,
+       0xb0f9a0f9,
+       0xe0f9d0f9,
+       0x04bdf0f9,
+       0x0200a7f1,
+       0xcf00a3f0,
+       0xabc400aa,
+       0x2c0bf404,
+       0xf124d7f0,
+       0xf01a00e7,
+       0xeecf00e3,
+       0x00f7f100,
+       0x00f3f019,
+       0xf400ffcf,
+       0xe7f00421,
+       0x0007f101,
+       0x0003f01d,
+       0xbd000ed0,
+/* 0x056d: ih_no_fifo */
+       0x0007f104,
+       0x0003f001,
+       0xbd000ad0,
+       0xfcf0fc04,
+       0xfcd0fce0,
+       0xfca0fcb0,
+       0xfe80fc90,
+       0x80fc0088,
+       0xf80032f4,
+/* 0x0591: hub_barrier_done */
+       0x01f7f001,
+       0xbb040e98,
+       0xffb904fe,
+       0x18e7f102,
+       0x40e3f094,
+       0xf89d21f4,
+/* 0x05a9: ctx_redswitch */
+       0x20f7f000,
+       0x850007f1,
+       0xd00103f0,
+       0x04bd000f,
+/* 0x05bb: ctx_redswitch_delay */
+       0xb608e7f0,
+       0x1bf401e2,
+       0x00f5f1fd,
+       0x00f5f108,
+       0x0007f102,
+       0x0103f085,
+       0xbd000fd0,
+/* 0x05d7: ctx_xfer */
+       0xf100f804,
+       0xf0810007,
+       0x0fd00203,
        0xf404bd00,
-       0x28f40031,
-       0x24d7f000,
-       0xf43921f4,
-       0xe4b0f401,
-       0x1e18f404,
-       0xf00181fe,
-       0x20bd0627,
-       0xb60412fd,
-       0x1efd01e4,
-       0x0018fe05,
-       0x04f721f5,
-/* 0x0463: main_not_ctx_xfer */
-       0x94d30ef4,
-       0xf5f010ef,
-       0xfe21f501,
-       0xc60ef402,
-/* 0x0470: ih */
-       0x88fe80f9,
-       0xf980f901,
-       0xf9a0f990,
-       0xf9d0f9b0,
-       0xbdf0f9e0,
-       0x800acf04,
-       0xf404abc4,
-       0xb7f11d0b,
-       0xd7f01900,
-       0x40becf24,
-       0xf400bfcf,
-       0xb0b70421,
-       0xe7f00400,
-       0x00bed001,
-/* 0x04a8: ih_no_fifo */
-       0xfc400ad0,
-       0xfce0fcf0,
-       0xfcb0fcd0,
-       0xfc90fca0,
-       0x0088fe80,
-       0x32f480fc,
-/* 0x04c3: hub_barrier_done */
-       0xf001f800,
-       0x0e9801f7,
-       0x04febb04,
-       0x9418e7f1,
-       0xf440e3f0,
-       0x00f88d21,
-/* 0x04d8: ctx_redswitch */
-       0x0614e7f1,
-       0xf006e4b6,
-       0xefd020f7,
-       0x08f7f000,
-/* 0x04e8: ctx_redswitch_delay */
-       0xf401f2b6,
-       0xf7f1fd1b,
-       0xefd00a20,
-/* 0x04f7: ctx_xfer */
-       0xf100f800,
-       0xb60a0417,
-       0x1fd00614,
-       0x0711f400,
-       0x04d821f5,
-/* 0x0508: ctx_xfer_not_load */
-       0x4afc17f1,
-       0xf00213f0,
-       0x12d00c27,
-       0x1521f500,
-       0xfc27f102,
-       0x0223f047,
-       0xf00020d0,
-       0x20b6012c,
-       0x0012d003,
-       0xf001acf0,
-       0xb7f002a5,
-       0x50b3f000,
-       0xb6040c98,
-       0xbcbb0fc4,
-       0x000c9800,
-       0xf0010d98,
-       0x21f500e7,
-       0xacf00166,
-       0x00b7f101,
-       0x50b3f040,
-       0xb6040c98,
-       0xbcbb0fc4,
-       0x010c9800,
-       0x98020d98,
-       0xe7f1060f,
-       0x21f50800,
-       0xacf00166,
-       0x04a5f001,
-       0x3000b7f1,
+       0x21f50711,
+/* 0x05ea: ctx_xfer_not_load */
+       0x21f505a9,
+       0x24bd026a,
+       0x47fc07f1,
+       0xd00203f0,
+       0x04bd0002,
+       0xb6012cf0,
+       0x07f10320,
+       0x03f04afc,
+       0x0002d002,
+       0xacf004bd,
+       0x02a5f001,
+       0x0000b7f1,
        0x9850b3f0,
        0xc4b6040c,
        0x00bcbb0f,
-       0x98020c98,
-       0x0f98030d,
-       0x00e7f108,
-       0x6621f502,
-       0x1521f501,
-       0x0601f402,
-/* 0x05a3: ctx_xfer_post */
-       0xf11412f4,
-       0xf04afc17,
-       0x27f00213,
-       0x0012d00d,
-       0x021521f5,
-/* 0x05b4: ctx_xfer_done */
-       0x04c321f5,
-       0x000000f8,
+       0x98000c98,
+       0xe7f0010d,
+       0x6f21f500,
+       0x01acf001,
+       0x4000b7f1,
+       0x9850b3f0,
+       0xc4b6040c,
+       0x00bcbb0f,
+       0x98010c98,
+       0x0f98020d,
+       0x00e7f106,
+       0x6f21f508,
+       0x01acf001,
+       0xf104a5f0,
+       0xf03000b7,
+       0x0c9850b3,
+       0x0fc4b604,
+       0x9800bcbb,
+       0x0d98020c,
+       0x080f9803,
+       0x0200e7f1,
+       0x016f21f5,
+       0x025e21f5,
+       0xf40601f4,
+/* 0x0686: ctx_xfer_post */
+       0x21f50712,
+/* 0x068a: ctx_xfer_done */
+       0x21f5027f,
+       0x00f80591,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
        0x00000000,
        0x00000000,
        0x00000000,
index 7ff5ef6b08048e0d0b12d8b1cf788f97d4f01143..b6da800ee9c2ddbd2baae4cf5812c681c09fccc6 100644 (file)
@@ -41,14 +41,14 @@ uint32_t nve0_grgpc_data[] = {
 };
 
 uint32_t nve0_grgpc_code[] = {
-       0x03180ef5,
+       0x03a10ef5,
 /* 0x0004: queue_put */
        0x9800d898,
        0x86f001d9,
        0x0489b808,
        0xf00c1bf4,
        0x21f502f7,
-       0x00f802fe,
+       0x00f8037e,
 /* 0x001c: queue_put_next */
        0xb60798c4,
        0x8dbb0384,
@@ -72,184 +72,214 @@ uint32_t nve0_grgpc_code[] = {
 /* 0x0066: queue_get_done */
        0x00f80132,
 /* 0x0068: nv_rd32 */
-       0x0728b7f1,
-       0xb906b4b6,
-       0xc9f002ec,
-       0x00bcd01f,
-/* 0x0078: nv_rd32_wait */
-       0xc800bccf,
-       0x1bf41fcc,
-       0x06a7f0fa,
-       0x010921f5,
-       0xf840bfcf,
-/* 0x008d: nv_wr32 */
-       0x28b7f100,
-       0x06b4b607,
-       0xb980bfd0,
-       0xc9f002ec,
-       0x1ec9f01f,
-/* 0x00a3: nv_wr32_wait */
-       0xcf00bcd0,
-       0xccc800bc,
-       0xfa1bf41f,
-/* 0x00ae: watchdog_reset */
-       0x87f100f8,
-       0x84b60430,
-       0x1ff9f006,
-       0xf8008fd0,
-/* 0x00bd: watchdog_clear */
-       0x3087f100,
-       0x0684b604,
-       0xf80080d0,
-/* 0x00c9: wait_donez */
-       0xf094bd00,
-       0x07f10099,
-       0x03f00f00,
-       0x0009d002,
-       0x07f104bd,
-       0x03f00600,
-       0x000ad002,
-/* 0x00e6: wait_donez_ne */
-       0x87f104bd,
-       0x83f00000,
-       0x0088cf01,
-       0xf4888aff,
-       0x94bdf31b,
-       0xf10099f0,
-       0xf0170007,
-       0x09d00203,
-       0xf804bd00,
-/* 0x0109: wait_doneo */
-       0xf094bd00,
+       0xf002ecb9,
+       0x07f11fc9,
+       0x03f0ca00,
+       0x000cd001,
+/* 0x007a: nv_rd32_wait */
+       0xc7f104bd,
+       0xc3f0ca00,
+       0x00cccf01,
+       0xf41fccc8,
+       0xa7f0f31b,
+       0x1021f506,
+       0x00f7f101,
+       0x01f3f0cb,
+       0xf800ffcf,
+/* 0x009d: nv_wr32 */
+       0x0007f100,
+       0x0103f0cc,
+       0xbd000fd0,
+       0x02ecb904,
+       0xf01fc9f0,
+       0x07f11ec9,
+       0x03f0ca00,
+       0x000cd001,
+/* 0x00be: nv_wr32_wait */
+       0xc7f104bd,
+       0xc3f0ca00,
+       0x00cccf01,
+       0xf41fccc8,
+       0x00f8f31b,
+/* 0x00d0: wait_donez */
+       0x99f094bd,
+       0x0007f100,
+       0x0203f00f,
+       0xbd0009d0,
+       0x0007f104,
+       0x0203f006,
+       0xbd000ad0,
+/* 0x00ed: wait_donez_ne */
+       0x0087f104,
+       0x0183f000,
+       0xff0088cf,
+       0x1bf4888a,
+       0xf094bdf3,
        0x07f10099,
-       0x03f00f00,
+       0x03f01700,
        0x0009d002,
-       0x87f104bd,
-       0x84b60818,
-       0x008ad006,
-/* 0x0124: wait_doneo_e */
-       0x040087f1,
-       0xcf0684b6,
-       0x8aff0088,
-       0xf30bf488,
+       0x00f804bd,
+/* 0x0110: wait_doneo */
        0x99f094bd,
        0x0007f100,
-       0x0203f017,
+       0x0203f00f,
        0xbd0009d0,
-/* 0x0147: mmctx_size */
-       0xbd00f804,
-/* 0x0149: nv_mmctx_size_loop */
-       0x00e89894,
-       0xb61a85b6,
-       0x84b60180,
-       0x0098bb02,
-       0xb804e0b6,
-       0x1bf404ef,
-       0x029fb9eb,
-/* 0x0166: mmctx_xfer */
-       0x94bd00f8,
-       0xf10199f0,
-       0xf00f0007,
-       0x09d00203,
-       0xf104bd00,
-       0xb6071087,
-       0x94bd0684,
-       0xf405bbfd,
-       0x8bd0090b,
-       0x0099f000,
-/* 0x018c: mmctx_base_disabled */
-       0xf405eefd,
-       0x8ed00c0b,
-       0xc08fd080,
-/* 0x019b: mmctx_multi_disabled */
-       0xb70199f0,
-       0xc8010080,
+       0x0007f104,
+       0x0203f006,
+       0xbd000ad0,
+/* 0x012d: wait_doneo_e */
+       0x0087f104,
+       0x0183f000,
+       0xff0088cf,
+       0x0bf4888a,
+       0xf094bdf3,
+       0x07f10099,
+       0x03f01700,
+       0x0009d002,
+       0x00f804bd,
+/* 0x0150: mmctx_size */
+/* 0x0152: nv_mmctx_size_loop */
+       0xe89894bd,
+       0x1a85b600,
+       0xb60180b6,
+       0x98bb0284,
+       0x04e0b600,
+       0xf404efb8,
+       0x9fb9eb1b,
+/* 0x016f: mmctx_xfer */
+       0xbd00f802,
+       0x0199f094,
+       0x0f0007f1,
+       0xd00203f0,
+       0x04bd0009,
+       0xbbfd94bd,
+       0x120bf405,
+       0xc40007f1,
+       0xd00103f0,
+       0x04bd000b,
+/* 0x0197: mmctx_base_disabled */
+       0xfd0099f0,
+       0x0bf405ee,
+       0x0007f11e,
+       0x0103f0c6,
+       0xbd000ed0,
+       0x0007f104,
+       0x0103f0c7,
+       0xbd000fd0,
+       0x0199f004,
+/* 0x01b8: mmctx_multi_disabled */
+       0xb600abc8,
+       0xb9f010b4,
+       0x01aec80c,
+       0xfd11e4b6,
+       0x07f105be,
+       0x03f0c500,
+       0x000bd001,
+/* 0x01d6: mmctx_exec_loop */
+/* 0x01d6: mmctx_wait_free */
+       0xe7f104bd,
+       0xe3f0c500,
+       0x00eecf01,
+       0xf41fe4f0,
+       0xce98f30b,
+       0x05e9fd00,
+       0xc80007f1,
+       0xd00103f0,
+       0x04bd000e,
+       0xb804c0b6,
+       0x1bf404cd,
+       0x02abc8d8,
+/* 0x0207: mmctx_fini_wait */
+       0xf11f1bf4,
+       0xf0c500b7,
+       0xbbcf01b3,
+       0x1fb4f000,
+       0xf410b4b0,
+       0xa7f0f01b,
+       0xd021f402,
+/* 0x0223: mmctx_stop */
+       0xc82b0ef4,
        0xb4b600ab,
        0x0cb9f010,
-       0xb601aec8,
-       0xbefd11e4,
-       0x008bd005,
-/* 0x01b4: mmctx_exec_loop */
-/* 0x01b4: mmctx_wait_free */
-       0xf0008ecf,
-       0x0bf41fe4,
-       0x00ce98fa,
-       0xd005e9fd,
-       0xc0b6c08e,
-       0x04cdb804,
-       0xc8e81bf4,
-       0x1bf402ab,
-/* 0x01d5: mmctx_fini_wait */
-       0x008bcf18,
-       0xb01fb4f0,
-       0x1bf410b4,
-       0x02a7f0f7,
-       0xf4c921f4,
-/* 0x01ea: mmctx_stop */
-       0xabc81b0e,
-       0x10b4b600,
-       0xf00cb9f0,
-       0x8bd012b9,
-/* 0x01f9: mmctx_stop_wait */
-       0x008bcf00,
-       0xf412bbc8,
-/* 0x0202: mmctx_done */
-       0x94bdfa1b,
-       0xf10199f0,
-       0xf0170007,
-       0x09d00203,
-       0xf804bd00,
-/* 0x0215: strand_wait */
-       0xf0a0f900,
-       0x21f402a7,
-       0xf8a0fcc9,
-/* 0x0221: strand_pre */
-       0xfc87f100,
-       0x0283f04a,
-       0xd00c97f0,
-       0x21f50089,
-       0x00f80215,
-/* 0x0234: strand_post */
-       0x4afc87f1,
-       0xf00283f0,
-       0x89d00d97,
-       0x1521f500,
-/* 0x0247: strand_set */
-       0xf100f802,
-       0xf04ffca7,
-       0xaba202a3,
-       0xc7f00500,
-       0x00acd00f,
-       0xd00bc7f0,
-       0x21f500bc,
-       0xaed00215,
-       0x0ac7f000,
-       0xf500bcd0,
-       0xf8021521,
-/* 0x0271: strand_ctx_init */
-       0xf094bd00,
-       0x07f10399,
-       0x03f00f00,
+       0xf112b9f0,
+       0xf0c50007,
+       0x0bd00103,
+/* 0x023b: mmctx_stop_wait */
+       0xf104bd00,
+       0xf0c500b7,
+       0xbbcf01b3,
+       0x12bbc800,
+/* 0x024b: mmctx_done */
+       0xbdf31bf4,
+       0x0199f094,
+       0x170007f1,
+       0xd00203f0,
+       0x04bd0009,
+/* 0x025e: strand_wait */
+       0xa0f900f8,
+       0xf402a7f0,
+       0xa0fcd021,
+/* 0x026a: strand_pre */
+       0x97f000f8,
+       0xfc07f10c,
+       0x0203f04a,
+       0xbd0009d0,
+       0x5e21f504,
+/* 0x027f: strand_post */
+       0xf000f802,
+       0x07f10d97,
+       0x03f04afc,
        0x0009d002,
        0x21f504bd,
-       0xe7f00221,
-       0x4721f503,
-       0xfca7f102,
-       0x02a3f046,
-       0x0400aba0,
-       0xf040a0d0,
-       0xbcd001c7,
-       0x1521f500,
-       0x010c9202,
-       0xf000acd0,
-       0xbcd002c7,
-       0x1521f500,
-       0x3421f502,
-       0x8087f102,
-       0x0684b608,
-       0xb70089cf,
-       0x95220080,
-/* 0x02ca: ctx_init_strand_loop */
+       0x00f8025e,
+/* 0x0294: strand_set */
+       0xf10fc7f0,
+       0xf04ffc07,
+       0x0cd00203,
+       0xf004bd00,
+       0x07f10bc7,
+       0x03f04afc,
+       0x000cd002,
+       0x07f104bd,
+       0x03f04ffc,
+       0x000ed002,
+       0xc7f004bd,
+       0xfc07f10a,
+       0x0203f04a,
+       0xbd000cd0,
+       0x5e21f504,
+/* 0x02d3: strand_ctx_init */
+       0xbd00f802,
+       0x0399f094,
+       0x0f0007f1,
+       0xd00203f0,
+       0x04bd0009,
+       0x026a21f5,
+       0xf503e7f0,
+       0xbd029421,
+       0xfc07f1c4,
+       0x0203f047,
+       0xbd000cd0,
+       0x01c7f004,
+       0x4afc07f1,
+       0xd00203f0,
+       0x04bd000c,
+       0x025e21f5,
+       0xf1010c92,
+       0xf046fc07,
+       0x0cd00203,
+       0xf004bd00,
+       0x07f102c7,
+       0x03f04afc,
+       0x000cd002,
+       0x21f504bd,
+       0x21f5025e,
+       0x87f1027f,
+       0x83f04200,
+       0x0097f102,
+       0x0293f020,
+       0x950099cf,
+/* 0x034a: ctx_init_strand_loop */
        0x8ed008fe,
        0x408ed000,
        0xb6808acf,
@@ -263,198 +293,230 @@ uint32_t nve0_grgpc_code[] = {
        0x170007f1,
        0xd00203f0,
        0x04bd0009,
-/* 0x02fe: error */
+/* 0x037e: error */
        0xe0f900f8,
-       0x9814e7f1,
-       0xf440e3f0,
-       0xe0b78d21,
-       0xf7f0041c,
-       0x8d21f401,
-       0x00f8e0fc,
-/* 0x0318: init */
-       0x04fe04bd,
-       0x0017f100,
-       0x0227f012,
-       0xf10012d0,
-       0xfe047017,
-       0x17f10010,
-       0x10d00400,
-       0x0427f0c0,
-       0xf40012d0,
-       0x17f11031,
-       0x14b60608,
-       0x0012cf06,
+       0xf102ffb9,
+       0xf09814e7,
+       0x21f440e3,
+       0x01f7f09d,
+       0xf102ffb9,
+       0xf09c1ce7,
+       0x21f440e3,
+       0xf8e0fc9d,
+/* 0x03a1: init */
+       0xfe04bd00,
+       0x27f00004,
+       0x0007f102,
+       0x0003f012,
+       0xbd0002d0,
+       0x1f17f104,
+       0x0010fe05,
+       0x070007f1,
+       0xd00003f0,
+       0x04bd0000,
+       0xf10427f0,
+       0xf0040007,
+       0x02d00003,
+       0xf404bd00,
+       0x27f11031,
+       0x23f08200,
+       0x0022cf01,
        0xf00137f0,
        0x32bb1f24,
        0x0132b604,
        0x80050280,
-       0x10b70603,
-       0x12cf0400,
-       0x04028000,
-       0x0c30e7f1,
-       0xbd50e3f0,
-       0xbd34bd24,
-/* 0x0371: init_unk_loop */
-       0x6821f444,
-       0xf400f6b0,
-       0xf7f00f0b,
-       0x04f2bb01,
-       0xb6054ffd,
-/* 0x0386: init_unk_next */
-       0x20b60130,
-       0x04e0b601,
-       0xf40126b0,
-/* 0x0392: init_unk_done */
-       0x0380e21b,
-       0x08048007,
-       0x010027f1,
-       0xcf0223f0,
-       0x34bd0022,
-       0x070047f1,
-       0x950644b6,
-       0x45d00825,
-       0x4045d000,
-       0x98000e98,
-       0x21f5010f,
-       0x2fbb0147,
-       0x003fbb00,
-       0x98010e98,
-       0x21f5020f,
-       0x0e980147,
-       0x00effd05,
-       0xbb002ebb,
-       0x0e98003e,
-       0x030f9802,
-       0x014721f5,
-       0xfd070e98,
+       0x27f10603,
+       0x23f08600,
+       0x0022cf01,
+       0xf1040280,
+       0xf00c30e7,
+       0x24bd50e3,
+       0x44bd34bd,
+/* 0x0410: init_unk_loop */
+       0xb06821f4,
+       0x0bf400f6,
+       0x01f7f00f,
+       0xfd04f2bb,
+       0x30b6054f,
+/* 0x0425: init_unk_next */
+       0x0120b601,
+       0xb004e0b6,
+       0x1bf40126,
+/* 0x0431: init_unk_done */
+       0x070380e2,
+       0xf1080480,
+       0xf0010027,
+       0x22cf0223,
+       0x9534bd00,
+       0x07f10825,
+       0x03f0c000,
+       0x0005d001,
+       0x07f104bd,
+       0x03f0c100,
+       0x0005d001,
+       0x0e9804bd,
+       0x010f9800,
+       0x015021f5,
+       0xbb002fbb,
+       0x0e98003f,
+       0x020f9801,
+       0x015021f5,
+       0xfd050e98,
        0x2ebb00ef,
        0x003ebb00,
-       0x130040b7,
-       0xd00235b6,
-       0x25b60043,
-       0x0635b608,
-       0xb60120b6,
-       0x24b60130,
-       0x0834b608,
-       0xf5022fb9,
-       0xbb027121,
-       0x07f1003f,
-       0x03f00100,
-       0x0003d002,
-       0x24bd04bd,
-       0xf11f29f0,
-       0xf0080007,
-       0x02d00203,
-/* 0x0433: main */
+       0x98020e98,
+       0x21f5030f,
+       0x0e980150,
+       0x00effd07,
+       0xbb002ebb,
+       0x35b6003e,
+       0x0007f102,
+       0x0103f0d3,
+       0xbd0003d0,
+       0x0825b604,
+       0xb60635b6,
+       0x30b60120,
+       0x0824b601,
+       0xb90834b6,
+       0x21f5022f,
+       0x3fbb02d3,
+       0x0007f100,
+       0x0203f001,
+       0xbd0003d0,
+       0xf024bd04,
+       0x07f11f29,
+       0x03f00800,
+       0x0002d002,
+/* 0x04e2: main */
+       0x31f404bd,
+       0x0028f400,
+       0xf424d7f0,
+       0x01f43921,
+       0x04e4b0f4,
+       0xfe1e18f4,
+       0x27f00181,
+       0xfd20bd06,
+       0xe4b60412,
+       0x051efd01,
+       0xf50018fe,
+       0xf405d721,
+/* 0x0512: main_not_ctx_xfer */
+       0xef94d30e,
+       0x01f5f010,
+       0x037e21f5,
+/* 0x051f: ih */
+       0xf9c60ef4,
+       0x0188fe80,
+       0x90f980f9,
+       0xb0f9a0f9,
+       0xe0f9d0f9,
+       0x04bdf0f9,
+       0x0200a7f1,
+       0xcf00a3f0,
+       0xabc400aa,
+       0x2c0bf404,
+       0xf124d7f0,
+       0xf01a00e7,
+       0xeecf00e3,
+       0x00f7f100,
+       0x00f3f019,
+       0xf400ffcf,
+       0xe7f00421,
+       0x0007f101,
+       0x0003f01d,
+       0xbd000ed0,
+/* 0x056d: ih_no_fifo */
+       0x0007f104,
+       0x0003f001,
+       0xbd000ad0,
+       0xfcf0fc04,
+       0xfcd0fce0,
+       0xfca0fcb0,
+       0xfe80fc90,
+       0x80fc0088,
+       0xf80032f4,
+/* 0x0591: hub_barrier_done */
+       0x01f7f001,
+       0xbb040e98,
+       0xffb904fe,
+       0x18e7f102,
+       0x40e3f094,
+       0xf89d21f4,
+/* 0x05a9: ctx_redswitch */
+       0x20f7f000,
+       0x850007f1,
+       0xd00103f0,
+       0x04bd000f,
+/* 0x05bb: ctx_redswitch_delay */
+       0xb608e7f0,
+       0x1bf401e2,
+       0x00f5f1fd,
+       0x00f5f108,
+       0x0007f102,
+       0x0103f085,
+       0xbd000fd0,
+/* 0x05d7: ctx_xfer */
+       0xf100f804,
+       0xf0810007,
+       0x0fd00203,
        0xf404bd00,
-       0x28f40031,
-       0x24d7f000,
-       0xf43921f4,
-       0xe4b0f401,
-       0x1e18f404,
-       0xf00181fe,
-       0x20bd0627,
-       0xb60412fd,
-       0x1efd01e4,
-       0x0018fe05,
-       0x04f721f5,
-/* 0x0463: main_not_ctx_xfer */
-       0x94d30ef4,
-       0xf5f010ef,
-       0xfe21f501,
-       0xc60ef402,
-/* 0x0470: ih */
-       0x88fe80f9,
-       0xf980f901,
-       0xf9a0f990,
-       0xf9d0f9b0,
-       0xbdf0f9e0,
-       0x800acf04,
-       0xf404abc4,
-       0xb7f11d0b,
-       0xd7f01900,
-       0x40becf24,
-       0xf400bfcf,
-       0xb0b70421,
-       0xe7f00400,
-       0x00bed001,
-/* 0x04a8: ih_no_fifo */
-       0xfc400ad0,
-       0xfce0fcf0,
-       0xfcb0fcd0,
-       0xfc90fca0,
-       0x0088fe80,
-       0x32f480fc,
-/* 0x04c3: hub_barrier_done */
-       0xf001f800,
-       0x0e9801f7,
-       0x04febb04,
-       0x9418e7f1,
-       0xf440e3f0,
-       0x00f88d21,
-/* 0x04d8: ctx_redswitch */
-       0x0614e7f1,
-       0xf006e4b6,
-       0xefd020f7,
-       0x08f7f000,
-/* 0x04e8: ctx_redswitch_delay */
-       0xf401f2b6,
-       0xf7f1fd1b,
-       0xefd00a20,
-/* 0x04f7: ctx_xfer */
-       0xf100f800,
-       0xb60a0417,
-       0x1fd00614,
-       0x0711f400,
-       0x04d821f5,
-/* 0x0508: ctx_xfer_not_load */
-       0x4afc17f1,
-       0xf00213f0,
-       0x12d00c27,
-       0x1521f500,
-       0xfc27f102,
-       0x0223f047,
-       0xf00020d0,
-       0x20b6012c,
-       0x0012d003,
-       0xf001acf0,
-       0xb7f002a5,
-       0x50b3f000,
-       0xb6040c98,
-       0xbcbb0fc4,
-       0x000c9800,
-       0xf0010d98,
-       0x21f500e7,
-       0xacf00166,
-       0x00b7f101,
-       0x50b3f040,
-       0xb6040c98,
-       0xbcbb0fc4,
-       0x010c9800,
-       0x98020d98,
-       0xe7f1060f,
-       0x21f50800,
-       0xacf00166,
-       0x04a5f001,
-       0x3000b7f1,
+       0x21f50711,
+/* 0x05ea: ctx_xfer_not_load */
+       0x21f505a9,
+       0x24bd026a,
+       0x47fc07f1,
+       0xd00203f0,
+       0x04bd0002,
+       0xb6012cf0,
+       0x07f10320,
+       0x03f04afc,
+       0x0002d002,
+       0xacf004bd,
+       0x02a5f001,
+       0x0000b7f1,
        0x9850b3f0,
        0xc4b6040c,
        0x00bcbb0f,
-       0x98020c98,
-       0x0f98030d,
-       0x00e7f108,
-       0x6621f502,
-       0x1521f501,
-       0x0601f402,
-/* 0x05a3: ctx_xfer_post */
-       0xf11412f4,
-       0xf04afc17,
-       0x27f00213,
-       0x0012d00d,
-       0x021521f5,
-/* 0x05b4: ctx_xfer_done */
-       0x04c321f5,
-       0x000000f8,
+       0x98000c98,
+       0xe7f0010d,
+       0x6f21f500,
+       0x01acf001,
+       0x4000b7f1,
+       0x9850b3f0,
+       0xc4b6040c,
+       0x00bcbb0f,
+       0x98010c98,
+       0x0f98020d,
+       0x00e7f106,
+       0x6f21f508,
+       0x01acf001,
+       0xf104a5f0,
+       0xf03000b7,
+       0x0c9850b3,
+       0x0fc4b604,
+       0x9800bcbb,
+       0x0d98020c,
+       0x080f9803,
+       0x0200e7f1,
+       0x016f21f5,
+       0x025e21f5,
+       0xf40601f4,
+/* 0x0686: ctx_xfer_post */
+       0x21f50712,
+/* 0x068a: ctx_xfer_done */
+       0x21f5027f,
+       0x00f80591,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
        0x00000000,
        0x00000000,
        0x00000000,
index f870507be88019cd9167d82a2fd85b3626f820d3..6316ebaf5d9a581dae8ac9e5a6d0464d0f8ec478 100644 (file)
@@ -41,14 +41,14 @@ uint32_t nvf0_grgpc_data[] = {
 };
 
 uint32_t nvf0_grgpc_code[] = {
-       0x03180ef5,
+       0x03a10ef5,
 /* 0x0004: queue_put */
        0x9800d898,
        0x86f001d9,
        0x0489b808,
        0xf00c1bf4,
        0x21f502f7,
-       0x00f802fe,
+       0x00f8037e,
 /* 0x001c: queue_put_next */
        0xb60798c4,
        0x8dbb0384,
@@ -72,184 +72,214 @@ uint32_t nvf0_grgpc_code[] = {
 /* 0x0066: queue_get_done */
        0x00f80132,
 /* 0x0068: nv_rd32 */
-       0x0728b7f1,
-       0xb906b4b6,
-       0xc9f002ec,
-       0x00bcd01f,
-/* 0x0078: nv_rd32_wait */
-       0xc800bccf,
-       0x1bf41fcc,
-       0x06a7f0fa,
-       0x010921f5,
-       0xf840bfcf,
-/* 0x008d: nv_wr32 */
-       0x28b7f100,
-       0x06b4b607,
-       0xb980bfd0,
-       0xc9f002ec,
-       0x1ec9f01f,
-/* 0x00a3: nv_wr32_wait */
-       0xcf00bcd0,
-       0xccc800bc,
-       0xfa1bf41f,
-/* 0x00ae: watchdog_reset */
-       0x87f100f8,
-       0x84b60430,
-       0x1ff9f006,
-       0xf8008fd0,
-/* 0x00bd: watchdog_clear */
-       0x3087f100,
-       0x0684b604,
-       0xf80080d0,
-/* 0x00c9: wait_donez */
-       0xf094bd00,
-       0x07f10099,
-       0x03f03700,
-       0x0009d002,
-       0x07f104bd,
-       0x03f00600,
-       0x000ad002,
-/* 0x00e6: wait_donez_ne */
-       0x87f104bd,
-       0x83f00000,
-       0x0088cf01,
-       0xf4888aff,
-       0x94bdf31b,
-       0xf10099f0,
-       0xf0170007,
-       0x09d00203,
-       0xf804bd00,
-/* 0x0109: wait_doneo */
-       0xf094bd00,
+       0xf002ecb9,
+       0x07f11fc9,
+       0x03f0ca00,
+       0x000cd001,
+/* 0x007a: nv_rd32_wait */
+       0xc7f104bd,
+       0xc3f0ca00,
+       0x00cccf01,
+       0xf41fccc8,
+       0xa7f0f31b,
+       0x1021f506,
+       0x00f7f101,
+       0x01f3f0cb,
+       0xf800ffcf,
+/* 0x009d: nv_wr32 */
+       0x0007f100,
+       0x0103f0cc,
+       0xbd000fd0,
+       0x02ecb904,
+       0xf01fc9f0,
+       0x07f11ec9,
+       0x03f0ca00,
+       0x000cd001,
+/* 0x00be: nv_wr32_wait */
+       0xc7f104bd,
+       0xc3f0ca00,
+       0x00cccf01,
+       0xf41fccc8,
+       0x00f8f31b,
+/* 0x00d0: wait_donez */
+       0x99f094bd,
+       0x0007f100,
+       0x0203f037,
+       0xbd0009d0,
+       0x0007f104,
+       0x0203f006,
+       0xbd000ad0,
+/* 0x00ed: wait_donez_ne */
+       0x0087f104,
+       0x0183f000,
+       0xff0088cf,
+       0x1bf4888a,
+       0xf094bdf3,
        0x07f10099,
-       0x03f03700,
+       0x03f01700,
        0x0009d002,
-       0x87f104bd,
-       0x84b60818,
-       0x008ad006,
-/* 0x0124: wait_doneo_e */
-       0x040087f1,
-       0xcf0684b6,
-       0x8aff0088,
-       0xf30bf488,
+       0x00f804bd,
+/* 0x0110: wait_doneo */
        0x99f094bd,
        0x0007f100,
-       0x0203f017,
+       0x0203f037,
        0xbd0009d0,
-/* 0x0147: mmctx_size */
-       0xbd00f804,
-/* 0x0149: nv_mmctx_size_loop */
-       0x00e89894,
-       0xb61a85b6,
-       0x84b60180,
-       0x0098bb02,
-       0xb804e0b6,
-       0x1bf404ef,
-       0x029fb9eb,
-/* 0x0166: mmctx_xfer */
-       0x94bd00f8,
-       0xf10199f0,
-       0xf0370007,
-       0x09d00203,
-       0xf104bd00,
-       0xb6071087,
-       0x94bd0684,
-       0xf405bbfd,
-       0x8bd0090b,
-       0x0099f000,
-/* 0x018c: mmctx_base_disabled */
-       0xf405eefd,
-       0x8ed00c0b,
-       0xc08fd080,
-/* 0x019b: mmctx_multi_disabled */
-       0xb70199f0,
-       0xc8010080,
+       0x0007f104,
+       0x0203f006,
+       0xbd000ad0,
+/* 0x012d: wait_doneo_e */
+       0x0087f104,
+       0x0183f000,
+       0xff0088cf,
+       0x0bf4888a,
+       0xf094bdf3,
+       0x07f10099,
+       0x03f01700,
+       0x0009d002,
+       0x00f804bd,
+/* 0x0150: mmctx_size */
+/* 0x0152: nv_mmctx_size_loop */
+       0xe89894bd,
+       0x1a85b600,
+       0xb60180b6,
+       0x98bb0284,
+       0x04e0b600,
+       0xf404efb8,
+       0x9fb9eb1b,
+/* 0x016f: mmctx_xfer */
+       0xbd00f802,
+       0x0199f094,
+       0x370007f1,
+       0xd00203f0,
+       0x04bd0009,
+       0xbbfd94bd,
+       0x120bf405,
+       0xc40007f1,
+       0xd00103f0,
+       0x04bd000b,
+/* 0x0197: mmctx_base_disabled */
+       0xfd0099f0,
+       0x0bf405ee,
+       0x0007f11e,
+       0x0103f0c6,
+       0xbd000ed0,
+       0x0007f104,
+       0x0103f0c7,
+       0xbd000fd0,
+       0x0199f004,
+/* 0x01b8: mmctx_multi_disabled */
+       0xb600abc8,
+       0xb9f010b4,
+       0x01aec80c,
+       0xfd11e4b6,
+       0x07f105be,
+       0x03f0c500,
+       0x000bd001,
+/* 0x01d6: mmctx_exec_loop */
+/* 0x01d6: mmctx_wait_free */
+       0xe7f104bd,
+       0xe3f0c500,
+       0x00eecf01,
+       0xf41fe4f0,
+       0xce98f30b,
+       0x05e9fd00,
+       0xc80007f1,
+       0xd00103f0,
+       0x04bd000e,
+       0xb804c0b6,
+       0x1bf404cd,
+       0x02abc8d8,
+/* 0x0207: mmctx_fini_wait */
+       0xf11f1bf4,
+       0xf0c500b7,
+       0xbbcf01b3,
+       0x1fb4f000,
+       0xf410b4b0,
+       0xa7f0f01b,
+       0xd021f402,
+/* 0x0223: mmctx_stop */
+       0xc82b0ef4,
        0xb4b600ab,
        0x0cb9f010,
-       0xb601aec8,
-       0xbefd11e4,
-       0x008bd005,
-/* 0x01b4: mmctx_exec_loop */
-/* 0x01b4: mmctx_wait_free */
-       0xf0008ecf,
-       0x0bf41fe4,
-       0x00ce98fa,
-       0xd005e9fd,
-       0xc0b6c08e,
-       0x04cdb804,
-       0xc8e81bf4,
-       0x1bf402ab,
-/* 0x01d5: mmctx_fini_wait */
-       0x008bcf18,
-       0xb01fb4f0,
-       0x1bf410b4,
-       0x02a7f0f7,
-       0xf4c921f4,
-/* 0x01ea: mmctx_stop */
-       0xabc81b0e,
-       0x10b4b600,
-       0xf00cb9f0,
-       0x8bd012b9,
-/* 0x01f9: mmctx_stop_wait */
-       0x008bcf00,
-       0xf412bbc8,
-/* 0x0202: mmctx_done */
-       0x94bdfa1b,
-       0xf10199f0,
-       0xf0170007,
-       0x09d00203,
-       0xf804bd00,
-/* 0x0215: strand_wait */
-       0xf0a0f900,
-       0x21f402a7,
-       0xf8a0fcc9,
-/* 0x0221: strand_pre */
-       0xfc87f100,
-       0x0283f04a,
-       0xd00c97f0,
-       0x21f50089,
-       0x00f80215,
-/* 0x0234: strand_post */
-       0x4afc87f1,
-       0xf00283f0,
-       0x89d00d97,
-       0x1521f500,
-/* 0x0247: strand_set */
-       0xf100f802,
-       0xf04ffca7,
-       0xaba202a3,
-       0xc7f00500,
-       0x00acd00f,
-       0xd00bc7f0,
-       0x21f500bc,
-       0xaed00215,
-       0x0ac7f000,
-       0xf500bcd0,
-       0xf8021521,
-/* 0x0271: strand_ctx_init */
-       0xf094bd00,
-       0x07f10399,
-       0x03f03700,
+       0xf112b9f0,
+       0xf0c50007,
+       0x0bd00103,
+/* 0x023b: mmctx_stop_wait */
+       0xf104bd00,
+       0xf0c500b7,
+       0xbbcf01b3,
+       0x12bbc800,
+/* 0x024b: mmctx_done */
+       0xbdf31bf4,
+       0x0199f094,
+       0x170007f1,
+       0xd00203f0,
+       0x04bd0009,
+/* 0x025e: strand_wait */
+       0xa0f900f8,
+       0xf402a7f0,
+       0xa0fcd021,
+/* 0x026a: strand_pre */
+       0x97f000f8,
+       0xfc07f10c,
+       0x0203f04a,
+       0xbd0009d0,
+       0x5e21f504,
+/* 0x027f: strand_post */
+       0xf000f802,
+       0x07f10d97,
+       0x03f04afc,
        0x0009d002,
        0x21f504bd,
-       0xe7f00221,
-       0x4721f503,
-       0xfca7f102,
-       0x02a3f046,
-       0x0400aba0,
-       0xf040a0d0,
-       0xbcd001c7,
-       0x1521f500,
-       0x010c9202,
-       0xf000acd0,
-       0xbcd002c7,
-       0x1521f500,
-       0x3421f502,
-       0x8087f102,
-       0x0684b608,
-       0xb70089cf,
-       0x95220080,
-/* 0x02ca: ctx_init_strand_loop */
+       0x00f8025e,
+/* 0x0294: strand_set */
+       0xf10fc7f0,
+       0xf04ffc07,
+       0x0cd00203,
+       0xf004bd00,
+       0x07f10bc7,
+       0x03f04afc,
+       0x000cd002,
+       0x07f104bd,
+       0x03f04ffc,
+       0x000ed002,
+       0xc7f004bd,
+       0xfc07f10a,
+       0x0203f04a,
+       0xbd000cd0,
+       0x5e21f504,
+/* 0x02d3: strand_ctx_init */
+       0xbd00f802,
+       0x0399f094,
+       0x370007f1,
+       0xd00203f0,
+       0x04bd0009,
+       0x026a21f5,
+       0xf503e7f0,
+       0xbd029421,
+       0xfc07f1c4,
+       0x0203f047,
+       0xbd000cd0,
+       0x01c7f004,
+       0x4afc07f1,
+       0xd00203f0,
+       0x04bd000c,
+       0x025e21f5,
+       0xf1010c92,
+       0xf046fc07,
+       0x0cd00203,
+       0xf004bd00,
+       0x07f102c7,
+       0x03f04afc,
+       0x000cd002,
+       0x21f504bd,
+       0x21f5025e,
+       0x87f1027f,
+       0x83f04200,
+       0x0097f102,
+       0x0293f020,
+       0x950099cf,
+/* 0x034a: ctx_init_strand_loop */
        0x8ed008fe,
        0x408ed000,
        0xb6808acf,
@@ -263,198 +293,230 @@ uint32_t nvf0_grgpc_code[] = {
        0x170007f1,
        0xd00203f0,
        0x04bd0009,
-/* 0x02fe: error */
+/* 0x037e: error */
        0xe0f900f8,
-       0x9814e7f1,
-       0xf440e3f0,
-       0xe0b78d21,
-       0xf7f0041c,
-       0x8d21f401,
-       0x00f8e0fc,
-/* 0x0318: init */
-       0x04fe04bd,
-       0x0017f100,
-       0x0227f012,
-       0xf10012d0,
-       0xfe047017,
-       0x17f10010,
-       0x10d00400,
-       0x0427f0c0,
-       0xf40012d0,
-       0x17f11031,
-       0x14b60608,
-       0x0012cf06,
+       0xf102ffb9,
+       0xf09814e7,
+       0x21f440e3,
+       0x01f7f09d,
+       0xf102ffb9,
+       0xf09c1ce7,
+       0x21f440e3,
+       0xf8e0fc9d,
+/* 0x03a1: init */
+       0xfe04bd00,
+       0x27f00004,
+       0x0007f102,
+       0x0003f012,
+       0xbd0002d0,
+       0x1f17f104,
+       0x0010fe05,
+       0x070007f1,
+       0xd00003f0,
+       0x04bd0000,
+       0xf10427f0,
+       0xf0040007,
+       0x02d00003,
+       0xf404bd00,
+       0x27f11031,
+       0x23f08200,
+       0x0022cf01,
        0xf00137f0,
        0x32bb1f24,
        0x0132b604,
        0x80050280,
-       0x10b70603,
-       0x12cf0400,
-       0x04028000,
-       0x0c30e7f1,
-       0xbd50e3f0,
-       0xbd34bd24,
-/* 0x0371: init_unk_loop */
-       0x6821f444,
-       0xf400f6b0,
-       0xf7f00f0b,
-       0x04f2bb01,
-       0xb6054ffd,
-/* 0x0386: init_unk_next */
-       0x20b60130,
-       0x04e0b601,
-       0xf40226b0,
-/* 0x0392: init_unk_done */
-       0x0380e21b,
-       0x08048007,
-       0x010027f1,
-       0xcf0223f0,
-       0x34bd0022,
-       0x070047f1,
-       0x950644b6,
-       0x45d00825,
-       0x4045d000,
-       0x98000e98,
-       0x21f5010f,
-       0x2fbb0147,
-       0x003fbb00,
-       0x98010e98,
-       0x21f5020f,
-       0x0e980147,
-       0x00effd05,
-       0xbb002ebb,
-       0x0e98003e,
-       0x030f9802,
-       0x014721f5,
-       0xfd070e98,
+       0x27f10603,
+       0x23f08600,
+       0x0022cf01,
+       0xf1040280,
+       0xf00c30e7,
+       0x24bd50e3,
+       0x44bd34bd,
+/* 0x0410: init_unk_loop */
+       0xb06821f4,
+       0x0bf400f6,
+       0x01f7f00f,
+       0xfd04f2bb,
+       0x30b6054f,
+/* 0x0425: init_unk_next */
+       0x0120b601,
+       0xb004e0b6,
+       0x1bf40226,
+/* 0x0431: init_unk_done */
+       0x070380e2,
+       0xf1080480,
+       0xf0010027,
+       0x22cf0223,
+       0x9534bd00,
+       0x07f10825,
+       0x03f0c000,
+       0x0005d001,
+       0x07f104bd,
+       0x03f0c100,
+       0x0005d001,
+       0x0e9804bd,
+       0x010f9800,
+       0x015021f5,
+       0xbb002fbb,
+       0x0e98003f,
+       0x020f9801,
+       0x015021f5,
+       0xfd050e98,
        0x2ebb00ef,
        0x003ebb00,
-       0x130040b7,
-       0xd00235b6,
-       0x25b60043,
-       0x0635b608,
-       0xb60120b6,
-       0x24b60130,
-       0x0834b608,
-       0xf5022fb9,
-       0xbb027121,
-       0x07f1003f,
-       0x03f00100,
-       0x0003d002,
-       0x24bd04bd,
-       0xf11f29f0,
-       0xf0300007,
-       0x02d00203,
-/* 0x0433: main */
+       0x98020e98,
+       0x21f5030f,
+       0x0e980150,
+       0x00effd07,
+       0xbb002ebb,
+       0x35b6003e,
+       0x0007f102,
+       0x0103f0d3,
+       0xbd0003d0,
+       0x0825b604,
+       0xb60635b6,
+       0x30b60120,
+       0x0824b601,
+       0xb90834b6,
+       0x21f5022f,
+       0x3fbb02d3,
+       0x0007f100,
+       0x0203f001,
+       0xbd0003d0,
+       0xf024bd04,
+       0x07f11f29,
+       0x03f03000,
+       0x0002d002,
+/* 0x04e2: main */
+       0x31f404bd,
+       0x0028f400,
+       0xf424d7f0,
+       0x01f43921,
+       0x04e4b0f4,
+       0xfe1e18f4,
+       0x27f00181,
+       0xfd20bd06,
+       0xe4b60412,
+       0x051efd01,
+       0xf50018fe,
+       0xf405d721,
+/* 0x0512: main_not_ctx_xfer */
+       0xef94d30e,
+       0x01f5f010,
+       0x037e21f5,
+/* 0x051f: ih */
+       0xf9c60ef4,
+       0x0188fe80,
+       0x90f980f9,
+       0xb0f9a0f9,
+       0xe0f9d0f9,
+       0x04bdf0f9,
+       0x0200a7f1,
+       0xcf00a3f0,
+       0xabc400aa,
+       0x2c0bf404,
+       0xf124d7f0,
+       0xf01a00e7,
+       0xeecf00e3,
+       0x00f7f100,
+       0x00f3f019,
+       0xf400ffcf,
+       0xe7f00421,
+       0x0007f101,
+       0x0003f01d,
+       0xbd000ed0,
+/* 0x056d: ih_no_fifo */
+       0x0007f104,
+       0x0003f001,
+       0xbd000ad0,
+       0xfcf0fc04,
+       0xfcd0fce0,
+       0xfca0fcb0,
+       0xfe80fc90,
+       0x80fc0088,
+       0xf80032f4,
+/* 0x0591: hub_barrier_done */
+       0x01f7f001,
+       0xbb040e98,
+       0xffb904fe,
+       0x18e7f102,
+       0x40e3f094,
+       0xf89d21f4,
+/* 0x05a9: ctx_redswitch */
+       0x20f7f000,
+       0x850007f1,
+       0xd00103f0,
+       0x04bd000f,
+/* 0x05bb: ctx_redswitch_delay */
+       0xb608e7f0,
+       0x1bf401e2,
+       0x00f5f1fd,
+       0x00f5f108,
+       0x0007f102,
+       0x0103f085,
+       0xbd000fd0,
+/* 0x05d7: ctx_xfer */
+       0xf100f804,
+       0xf0810007,
+       0x0fd00203,
        0xf404bd00,
-       0x28f40031,
-       0x24d7f000,
-       0xf43921f4,
-       0xe4b0f401,
-       0x1e18f404,
-       0xf00181fe,
-       0x20bd0627,
-       0xb60412fd,
-       0x1efd01e4,
-       0x0018fe05,
-       0x04f721f5,
-/* 0x0463: main_not_ctx_xfer */
-       0x94d30ef4,
-       0xf5f010ef,
-       0xfe21f501,
-       0xc60ef402,
-/* 0x0470: ih */
-       0x88fe80f9,
-       0xf980f901,
-       0xf9a0f990,
-       0xf9d0f9b0,
-       0xbdf0f9e0,
-       0x800acf04,
-       0xf404abc4,
-       0xb7f11d0b,
-       0xd7f01900,
-       0x40becf24,
-       0xf400bfcf,
-       0xb0b70421,
-       0xe7f00400,
-       0x00bed001,
-/* 0x04a8: ih_no_fifo */
-       0xfc400ad0,
-       0xfce0fcf0,
-       0xfcb0fcd0,
-       0xfc90fca0,
-       0x0088fe80,
-       0x32f480fc,
-/* 0x04c3: hub_barrier_done */
-       0xf001f800,
-       0x0e9801f7,
-       0x04febb04,
-       0x9418e7f1,
-       0xf440e3f0,
-       0x00f88d21,
-/* 0x04d8: ctx_redswitch */
-       0x0614e7f1,
-       0xf006e4b6,
-       0xefd020f7,
-       0x08f7f000,
-/* 0x04e8: ctx_redswitch_delay */
-       0xf401f2b6,
-       0xf7f1fd1b,
-       0xefd00a20,
-/* 0x04f7: ctx_xfer */
-       0xf100f800,
-       0xb60a0417,
-       0x1fd00614,
-       0x0711f400,
-       0x04d821f5,
-/* 0x0508: ctx_xfer_not_load */
-       0x4afc17f1,
-       0xf00213f0,
-       0x12d00c27,
-       0x1521f500,
-       0xfc27f102,
-       0x0223f047,
-       0xf00020d0,
-       0x20b6012c,
-       0x0012d003,
-       0xf001acf0,
-       0xb7f002a5,
-       0x50b3f000,
-       0xb6040c98,
-       0xbcbb0fc4,
-       0x000c9800,
-       0xf0010d98,
-       0x21f500e7,
-       0xacf00166,
-       0x00b7f101,
-       0x50b3f040,
-       0xb6040c98,
-       0xbcbb0fc4,
-       0x010c9800,
-       0x98020d98,
-       0xe7f1060f,
-       0x21f50800,
-       0xacf00166,
-       0x04a5f001,
-       0x3000b7f1,
+       0x21f50711,
+/* 0x05ea: ctx_xfer_not_load */
+       0x21f505a9,
+       0x24bd026a,
+       0x47fc07f1,
+       0xd00203f0,
+       0x04bd0002,
+       0xb6012cf0,
+       0x07f10320,
+       0x03f04afc,
+       0x0002d002,
+       0xacf004bd,
+       0x02a5f001,
+       0x0000b7f1,
        0x9850b3f0,
        0xc4b6040c,
        0x00bcbb0f,
-       0x98020c98,
-       0x0f98030d,
-       0x00e7f108,
-       0x6621f502,
-       0x1521f501,
-       0x0601f402,
-/* 0x05a3: ctx_xfer_post */
-       0xf11412f4,
-       0xf04afc17,
-       0x27f00213,
-       0x0012d00d,
-       0x021521f5,
-/* 0x05b4: ctx_xfer_done */
-       0x04c321f5,
-       0x000000f8,
+       0x98000c98,
+       0xe7f0010d,
+       0x6f21f500,
+       0x01acf001,
+       0x4000b7f1,
+       0x9850b3f0,
+       0xc4b6040c,
+       0x00bcbb0f,
+       0x98010c98,
+       0x0f98020d,
+       0x00e7f106,
+       0x6f21f508,
+       0x01acf001,
+       0xf104a5f0,
+       0xf03000b7,
+       0x0c9850b3,
+       0x0fc4b604,
+       0x9800bcbb,
+       0x0d98020c,
+       0x080f9803,
+       0x0200e7f1,
+       0x016f21f5,
+       0x025e21f5,
+       0xf40601f4,
+/* 0x0686: ctx_xfer_post */
+       0x21f50712,
+/* 0x068a: ctx_xfer_done */
+       0x21f5027f,
+       0x00f80591,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
        0x00000000,
        0x00000000,
        0x00000000,
index b82d2ae8991742e7eb58902f4935663590fbe795..c8ddb8d71b915c668c7b116f1b7f9cf9547289b3 100644 (file)
@@ -68,60 +68,57 @@ error:
 //
 init:
        clear b32 $r0
-       mov $sp $r0
        mov $xdbase $r0
 
+       // setup stack
+       nv_iord($r1, NV_PGRAPH_FECS_CAPS, 0)
+       extr $r1 $r1 9:17
+       shl b32 $r1 8
+       mov $sp $r1
+
        // enable fifo access
-       mov $r1 0x1200
-       mov $r2 2
-       iowr I[$r1 + 0x000] $r2 // FIFO_ENABLE
+       mov $r2 NV_PGRAPH_FECS_ACCESS_FIFO
+       nv_iowr(NV_PGRAPH_FECS_ACCESS, 0, $r2)
 
        // setup i0 handler, and route all interrupts to it
        mov $r1 #ih
        mov $iv0 $r1
-       mov $r1 0x400
-       iowr I[$r1 + 0x300] $r0 // INTR_DISPATCH
 
-       // route HUB_CHANNEL_SWITCH to fuc interrupt 8
-       mov $r3 0x404
-       shl b32 $r3 6
-       mov $r2 0x2003          // { HUB_CHANNEL_SWITCH, ZERO } -> intr 8
-       iowr I[$r3 + 0x000] $r2
+       clear b32 $r2
+       nv_iowr(NV_PGRAPH_FECS_INTR_ROUTE, 0, $r2)
+
+       // route HUB_CHSW_PULSE to fuc interrupt 8
+       mov $r2 0x2003          // { HUB_CHSW_PULSE, ZERO } -> intr 8
+       nv_iowr(NV_PGRAPH_FECS_IROUTE, 0, $r2)
 
        // not sure what these are, route them because NVIDIA does, and
        // the IRQ handler will signal the host if we ever get one.. we
        // may find out if/why we need to handle these if so..
        //
-       mov $r2 0x2004
-       iowr I[$r3 + 0x004] $r2 // { 0x04, ZERO } -> intr 9
-       mov $r2 0x200b
-       iowr I[$r3 + 0x008] $r2 // { 0x0b, ZERO } -> intr 10
-       mov $r2 0x200c
-       iowr I[$r3 + 0x01c] $r2 // { 0x0c, ZERO } -> intr 15
+       mov $r2 0x2004          // { 0x04, ZERO } -> intr 9
+       nv_iowr(NV_PGRAPH_FECS_IROUTE, 1, $r2)
+       mov $r2 0x200b          // { HUB_FIRMWARE_MTHD, ZERO } -> intr 10
+       nv_iowr(NV_PGRAPH_FECS_IROUTE, 2, $r2)
+       mov $r2 0x200c          // { 0x0c, ZERO } -> intr 15
+       nv_iowr(NV_PGRAPH_FECS_IROUTE, 7, $r2)
 
        // enable all INTR_UP interrupts
-       mov $r2 0xc24
-       shl b32 $r2 6
-       not b32 $r3 $r0
-       iowr I[$r2] $r3
+       sub b32 $r3 $r0 1
+       nv_iowr(NV_PGRAPH_FECS_INTR_UP_EN, 0, $r3)
 
-       // enable fifo, ctxsw, 9, 10, 15 interrupts
-       mov $r2 -0x78fc         // 0x8704
-       sethi $r2 0
-       iowr I[$r1 + 0x000] $r2 // INTR_EN_SET
+       // enable fifo, ctxsw, 9, fwmthd, 15 interrupts
+       imm32($r2, 0x8704)
+       nv_iowr(NV_PGRAPH_FECS_INTR_EN_SET, 0, $r2)
 
        // fifo level triggered, rest edge
-       sub b32 $r1 0x100
-       mov $r2 4
-       iowr I[$r1] $r2
+       mov $r2 NV_PGRAPH_FECS_INTR_MODE_FIFO_LEVEL
+       nv_iowr(NV_PGRAPH_FECS_INTR_MODE, 0, $r2)
 
        // enable interrupts
        bset $flags ie0
 
        // fetch enabled GPC/ROP counts
-       mov $r14 -0x69fc        // 0x409604
-       sethi $r14 0x400000
-       call #nv_rd32
+       nv_rd32($r14, 0x409604)
        extr $r1 $r15 16:20
        st b32 D[$r0 + #rop_count] $r1
        and $r15 0x1f
@@ -131,37 +128,40 @@ init:
        mov $r1 1
        shl b32 $r1 $r15
        sub b32 $r1 1
-       mov $r2 0x40c
-       shl b32 $r2 6
-       iowr I[$r2 + 0x000] $r1
-       iowr I[$r2 + 0x100] $r1
+       nv_iowr(NV_PGRAPH_FECS_BAR_MASK0, 0, $r1)
+       nv_iowr(NV_PGRAPH_FECS_BAR_MASK1, 0, $r1)
 
        // context size calculation, reserve first 256 bytes for use by fuc
        mov $r1 256
 
+       //
+       mov $r15 2
+       call(ctx_4170s)
+       call(ctx_4170w)
+       mov $r15 0x10
+       call(ctx_86c)
+
        // calculate size of mmio context data
        ld b32 $r14 D[$r0 + #hub_mmio_list_head]
        ld b32 $r15 D[$r0 + #hub_mmio_list_tail]
-       call #mmctx_size
+       call(mmctx_size)
 
        // set mmctx base addresses now so we don't have to do it later,
        // they don't (currently) ever change
-       mov $r3 0x700
-       shl b32 $r3 6
        shr b32 $r4 $r1 8
-       iowr I[$r3 + 0x000] $r4         // MMCTX_SAVE_SWBASE
-       iowr I[$r3 + 0x100] $r4         // MMCTX_LOAD_SWBASE
+       nv_iowr(NV_PGRAPH_FECS_MMCTX_SAVE_SWBASE, 0, $r4)
+       nv_iowr(NV_PGRAPH_FECS_MMCTX_LOAD_SWBASE, 0, $r4)
        add b32 $r3 0x1300
        add b32 $r1 $r15
        shr b32 $r15 2
-       iowr I[$r3 + 0x000] $r15        // MMCTX_LOAD_COUNT, wtf for?!?
+       nv_iowr(NV_PGRAPH_FECS_MMCTX_LOAD_COUNT, 0, $r15) // wtf??
 
        // strands, base offset needs to be aligned to 256 bytes
        shr b32 $r1 8
        add b32 $r1 1
        shl b32 $r1 8
        mov b32 $r15 $r1
-       call #strand_ctx_init
+       call(strand_ctx_init)
        add b32 $r1 $r15
 
        // initialise each GPC in sequence by passing in the offset of its
@@ -173,30 +173,29 @@ init:
        // in GPCn_CC_SCRATCH[1]
        //
        ld b32 $r3 D[$r0 + #gpc_count]
-       mov $r4 0x2000
-       sethi $r4 0x500000
+       imm32($r4, 0x502000)
        init_gpc:
                // setup, and start GPC ucode running
                add b32 $r14 $r4 0x804
                mov b32 $r15 $r1
-               call #nv_wr32                   // CC_SCRATCH[1] = ctx offset
+               call(nv_wr32)                   // CC_SCRATCH[1] = ctx offset
                add b32 $r14 $r4 0x10c
                clear b32 $r15
-               call #nv_wr32
+               call(nv_wr32)
                add b32 $r14 $r4 0x104
-               call #nv_wr32                   // ENTRY
+               call(nv_wr32)                   // ENTRY
                add b32 $r14 $r4 0x100
                mov $r15 2                      // CTRL_START_TRIGGER
-               call #nv_wr32                   // CTRL
+               call(nv_wr32)                   // CTRL
 
                // wait for it to complete, and adjust context size
                add b32 $r14 $r4 0x800
                init_gpc_wait:
-                       call #nv_rd32
+                       call(nv_rd32)
                        xbit $r15 $r15 31
                        bra e #init_gpc_wait
                add b32 $r14 $r4 0x804
-               call #nv_rd32
+               call(nv_rd32)
                add b32 $r1 $r15
 
                // next!
@@ -204,6 +203,12 @@ init:
                sub b32 $r3 1
                bra ne #init_gpc
 
+       //
+       mov $r15 0
+       call(ctx_86c)
+       mov $r15 0
+       call(ctx_4170s)
+
        // save context size, and tell host we're ready
        nv_iowr(NV_PGRAPH_FECS_CC_SCRATCH_VAL(1), 0, $r1)
        clear b32 $r1
@@ -218,17 +223,15 @@ main:
        bset $flags $p0
        sleep $p0
        mov $r13 #cmd_queue
-       call #queue_get
+       call(queue_get)
        bra $p1 #main
 
        // context switch, requested by GPU?
        cmpu b32 $r14 0x4001
        bra ne #main_not_ctx_switch
                trace_set(T_AUTO)
-               mov $r1 0xb00
-               shl b32 $r1 6
-               iord $r2 I[$r1 + 0x100]         // CHAN_NEXT
-               iord $r1 I[$r1 + 0x000]         // CHAN_CUR
+               nv_iord($r1, NV_PGRAPH_FECS_CHAN_ADDR, 0)
+               nv_iord($r2, NV_PGRAPH_FECS_CHAN_NEXT, 0)
 
                xbit $r3 $r1 31
                bra e #chsw_no_prev
@@ -239,12 +242,12 @@ main:
                                trace_set(T_SAVE)
                                bclr $flags $p1
                                bset $flags $p2
-                               call #ctx_xfer
+                               call(ctx_xfer)
                                trace_clr(T_SAVE);
                                pop $r2
                                trace_set(T_LOAD);
                                bset $flags $p1
-                               call #ctx_xfer
+                               call(ctx_xfer)
                                trace_clr(T_LOAD);
                                bra #chsw_done
                        chsw_prev_no_next:
@@ -252,25 +255,21 @@ main:
                                mov b32 $r2 $r1
                                bclr $flags $p1
                                bclr $flags $p2
-                               call #ctx_xfer
+                               call(ctx_xfer)
                                pop $r2
-                               mov $r1 0xb00
-                               shl b32 $r1 6
-                               iowr I[$r1] $r2
+                               nv_iowr(NV_PGRAPH_FECS_CHAN_ADDR, 0, $r2)
                                bra #chsw_done
                chsw_no_prev:
                        xbit $r3 $r2 31
                        bra e #chsw_done
                                bset $flags $p1
                                bclr $flags $p2
-                               call #ctx_xfer
+                               call(ctx_xfer)
 
                // ack the context switch request
                chsw_done:
-               mov $r1 0xb0c
-               shl b32 $r1 6
-               mov $r2 1
-               iowr I[$r1 + 0x000] $r2         // 0x409b0c
+               mov $r2 NV_PGRAPH_FECS_CHSW_ACK
+               nv_iowr(NV_PGRAPH_FECS_CHSW, 0, $r2)
                trace_clr(T_AUTO)
                bra #main
 
@@ -279,7 +278,7 @@ main:
        cmpu b32 $r14 0x0001
        bra ne #main_not_ctx_chan
                mov b32 $r2 $r15
-               call #ctx_chan
+               call(ctx_chan)
                bra #main_done
 
        // request to store current channel context?
@@ -289,14 +288,14 @@ main:
                trace_set(T_SAVE)
                bclr $flags $p1
                bclr $flags $p2
-               call #ctx_xfer
+               call(ctx_xfer)
                trace_clr(T_SAVE)
                bra #main_done
 
        main_not_ctx_save:
                shl b32 $r15 $r14 16
                or $r15 E_BAD_COMMAND
-               call #error
+               call(error)
                bra #main
 
        main_done:
@@ -319,41 +318,46 @@ ih:
        clear b32 $r0
 
        // incoming fifo command?
-       iord $r10 I[$r0 + 0x200]        // INTR
-       and $r11 $r10 0x00000004
+       nv_iord($r10, NV_PGRAPH_FECS_INTR, 0)
+       and $r11 $r10 NV_PGRAPH_FECS_INTR_FIFO
        bra e #ih_no_fifo
                // queue incoming fifo command for later processing
-               mov $r11 0x1900
                mov $r13 #cmd_queue
-               iord $r14 I[$r11 + 0x100]       // FIFO_CMD
-               iord $r15 I[$r11 + 0x000]       // FIFO_DATA
-               call #queue_put
+               nv_iord($r14, NV_PGRAPH_FECS_FIFO_CMD, 0)
+               nv_iord($r15, NV_PGRAPH_FECS_FIFO_DATA, 0)
+               call(queue_put)
                add b32 $r11 0x400
                mov $r14 1
-               iowr I[$r11 + 0x000] $r14       // FIFO_ACK
+               nv_iowr(NV_PGRAPH_FECS_FIFO_ACK, 0, $r14)
 
        // context switch request?
        ih_no_fifo:
-       and $r11 $r10 0x00000100
+       and $r11 $r10 NV_PGRAPH_FECS_INTR_CHSW
        bra e #ih_no_ctxsw
                // enqueue a context switch for later processing
                mov $r13 #cmd_queue
                mov $r14 0x4001
-               call #queue_put
+               call(queue_put)
 
-       // anything we didn't handle, bring it to the host's attention
+       // firmware method?
        ih_no_ctxsw:
-       mov $r11 0x104
+       and $r11 $r10 NV_PGRAPH_FECS_INTR_FWMTHD
+       bra e #ih_no_fwmthd
+               // none we handle, ack, and fall-through to unhandled
+               mov $r11 0x100
+               nv_wr32(0x400144, $r11)
+
+       // anything we didn't handle, bring it to the host's attention
+       ih_no_fwmthd:
+       mov $r11 0x104 // FIFO | CHSW
        not b32 $r11
        and $r11 $r10 $r11
        bra e #ih_no_other
-               mov $r10 0xc1c
-               shl b32 $r10 6
-               iowr I[$r10] $r11       // INTR_UP_SET
+               nv_iowr(NV_PGRAPH_FECS_INTR_UP_SET, 0, $r11)
 
        // ack, and wake up main()
        ih_no_other:
-       iowr I[$r0 + 0x100] $r10        // INTR_ACK
+       nv_iowr(NV_PGRAPH_FECS_INTR_ACK, 0, $r10)
 
        pop $r15
        pop $r14
@@ -370,12 +374,10 @@ ih:
 #if CHIPSET < GK100
 // Not real sure, but, MEM_CMD 7 will hang forever if this isn't done
 ctx_4160s:
-       mov $r14 0x4160
-       sethi $r14 0x400000
        mov $r15 1
-       call #nv_wr32
+       nv_wr32(0x404160, $r15)
        ctx_4160s_wait:
-               call #nv_rd32
+               nv_rd32($r15, 0x404160)
                xbit $r15 $r15 4
                bra e #ctx_4160s_wait
        ret
@@ -384,10 +386,8 @@ ctx_4160s:
 // to hang with STATUS=0x00000007 until it's cleared.. fbcon can
 // still function with it set however...
 ctx_4160c:
-       mov $r14 0x4160
-       sethi $r14 0x400000
        clear b32 $r15
-       call #nv_wr32
+       nv_wr32(0x404160, $r15)
        ret
 #endif
 
@@ -396,18 +396,14 @@ ctx_4160c:
 // In: $r15 value to set 0x404170 to
 //
 ctx_4170s:
-       mov $r14 0x4170
-       sethi $r14 0x400000
        or $r15 0x10
-       call #nv_wr32
+       nv_wr32(0x404170, $r15)
        ret
 
 // Waits for a ctx_4170s() call to complete
 //
 ctx_4170w:
-       mov $r14 0x4170
-       sethi $r14 0x400000
-       call #nv_rd32
+       nv_rd32($r15, 0x404170)
        and $r15 0x10
        bra ne #ctx_4170w
        ret
@@ -419,16 +415,18 @@ ctx_4170w:
 // funny things happen.
 //
 ctx_redswitch:
-       mov $r14 0x614
-       shl b32 $r14 6
-       mov $r15 0x270
-       iowr I[$r14] $r15       // HUB_RED_SWITCH = ENABLE_GPC, POWER_ALL
+       mov $r14 NV_PGRAPH_FECS_RED_SWITCH_ENABLE_GPC
+       or  $r14 NV_PGRAPH_FECS_RED_SWITCH_POWER_ROP
+       or  $r14 NV_PGRAPH_FECS_RED_SWITCH_POWER_GPC
+       or  $r14 NV_PGRAPH_FECS_RED_SWITCH_POWER_MAIN
+       nv_iowr(NV_PGRAPH_FECS_RED_SWITCH, 0, $r14)
        mov $r15 8
        ctx_redswitch_delay:
                sub b32 $r15 1
                bra ne #ctx_redswitch_delay
-       mov $r15 0x770
-       iowr I[$r14] $r15       // HUB_RED_SWITCH = ENABLE_ALL, POWER_ALL
+       or  $r14 NV_PGRAPH_FECS_RED_SWITCH_ENABLE_ROP
+       or  $r14 NV_PGRAPH_FECS_RED_SWITCH_ENABLE_MAIN
+       nv_iowr(NV_PGRAPH_FECS_RED_SWITCH, 0, $r14)
        ret
 
 // Not a clue what this is for, except that unless the value is 0x10, the
@@ -437,15 +435,18 @@ ctx_redswitch:
 // In: $r15 value to set to (0x00/0x10 are used)
 //
 ctx_86c:
-       mov $r14 0x86c
-       shl b32 $r14 6
-       iowr I[$r14] $r15       // HUB(0x86c) = val
-       mov $r14 -0x75ec
-       sethi $r14 0x400000
-       call #nv_wr32           // ROP(0xa14) = val
-       mov $r14 -0x5794
-       sethi $r14 0x410000
-       call #nv_wr32           // GPC(0x86c) = val
+       nv_iowr(NV_PGRAPH_FECS_UNK86C, 0, $r15)
+       nv_wr32(0x408a14, $r15)
+       nv_wr32(NV_PGRAPH_GPCX_GPCCS_UNK86C, $r15)
+       ret
+
+// In: $r15 NV_PGRAPH_FECS_MEM_CMD_*
+ctx_mem:
+       nv_iowr(NV_PGRAPH_FECS_MEM_CMD, 0, $r15)
+       ctx_mem_wait:
+               nv_iord($r15, NV_PGRAPH_FECS_MEM_CMD, 0)
+               or $r15 $r15
+               bra ne #ctx_mem_wait
        ret
 
 // ctx_load - load's a channel's ctxctl data, and selects its vm
@@ -457,23 +458,14 @@ ctx_load:
 
        // switch to channel, somewhat magic in parts..
        mov $r10 12             // DONE_UNK12
-       call #wait_donez
-       mov $r1 0xa24
-       shl b32 $r1 6
-       iowr I[$r1 + 0x000] $r0 // 0x409a24
-       mov $r3 0xb00
-       shl b32 $r3 6
-       iowr I[$r3 + 0x100] $r2 // CHAN_NEXT
-       mov $r1 0xa0c
-       shl b32 $r1 6
-       mov $r4 7
-       iowr I[$r1 + 0x000] $r2 // MEM_CHAN
-       iowr I[$r1 + 0x100] $r4 // MEM_CMD
-       ctx_chan_wait_0:
-               iord $r4 I[$r1 + 0x100]
-               and $r4 0x1f
-               bra ne #ctx_chan_wait_0
-       iowr I[$r3 + 0x000] $r2 // CHAN_CUR
+       call(wait_donez)
+       clear b32 $r15
+       nv_iowr(0x409a24, 0, $r15)
+       nv_iowr(NV_PGRAPH_FECS_CHAN_NEXT, 0, $r2)
+       nv_iowr(NV_PGRAPH_FECS_MEM_CHAN, 0, $r2)
+       mov $r15 NV_PGRAPH_FECS_MEM_CMD_LOAD_CHAN
+       call(ctx_mem)
+       nv_iowr(NV_PGRAPH_FECS_CHAN_ADDR, 0, $r2)
 
        // load channel header, fetch PGRAPH context pointer
        mov $xtargets $r0
@@ -482,14 +474,10 @@ ctx_load:
        add b32 $r2 2
 
        trace_set(T_LCHAN)
-       mov $r1 0xa04
-       shl b32 $r1 6
-       iowr I[$r1 + 0x000] $r2         // MEM_BASE
-       mov $r1 0xa20
-       shl b32 $r1 6
-       mov $r2 0x0002
-       sethi $r2 0x80000000
-       iowr I[$r1 + 0x000] $r2         // MEM_TARGET = vram
+       nv_iowr(NV_PGRAPH_FECS_MEM_BASE, 0, $r2)
+       imm32($r2, NV_PGRAPH_FECS_MEM_TARGET_UNK31)
+       or  $r2 NV_PGRAPH_FECS_MEM_TARGET_AS_VRAM
+       nv_iowr(NV_PGRAPH_FECS_MEM_TARGET, 0, $r2)
        mov $r1 0x10                    // chan + 0x0210
        mov $r2 #xfer_data
        sethi $r2 0x00020000            // 16 bytes
@@ -507,13 +495,9 @@ ctx_load:
 
        // set transfer base to start of context, and fetch context header
        trace_set(T_LCTXH)
-       mov $r2 0xa04
-       shl b32 $r2 6
-       iowr I[$r2 + 0x000] $r1         // MEM_BASE
-       mov $r2 1
-       mov $r1 0xa20
-       shl b32 $r1 6
-       iowr I[$r1 + 0x000] $r2         // MEM_TARGET = vm
+       nv_iowr(NV_PGRAPH_FECS_MEM_BASE, 0, $r1)
+       mov $r2 NV_PGRAPH_FECS_MEM_TARGET_AS_VM
+       nv_iowr(NV_PGRAPH_FECS_MEM_TARGET, 0, $r2)
        mov $r1 #chan_data
        sethi $r1 0x00060000            // 256 bytes
        xdld $r0 $r1
@@ -532,21 +516,15 @@ ctx_load:
 //
 ctx_chan:
 #if CHIPSET < GK100
-       call #ctx_4160s
+       call(ctx_4160s)
 #endif
-       call #ctx_load
+       call(ctx_load)
        mov $r10 12                     // DONE_UNK12
-       call #wait_donez
-       mov $r1 0xa10
-       shl b32 $r1 6
-       mov $r2 5
-       iowr I[$r1 + 0x000] $r2         // MEM_CMD = 5 (???)
-       ctx_chan_wait:
-               iord $r2 I[$r1 + 0x000]
-               or $r2 $r2
-               bra ne #ctx_chan_wait
+       call(wait_donez)
+       mov $r15 5 // MEM_CMD 5 ???
+       call(ctx_mem)
 #if CHIPSET < GK100
-       call #ctx_4160c
+       call(ctx_4160c)
 #endif
        ret
 
@@ -562,9 +540,7 @@ ctx_chan:
 ctx_mmio_exec:
        // set transfer base to be the mmio list
        ld b32 $r3 D[$r0 + #chan_mmio_address]
-       mov $r2 0xa04
-       shl b32 $r2 6
-       iowr I[$r2 + 0x000] $r3         // MEM_BASE
+       nv_iowr(NV_PGRAPH_FECS_MEM_BASE, 0, $r3)
 
        clear b32 $r3
        ctx_mmio_loop:
@@ -580,7 +556,7 @@ ctx_mmio_exec:
                ctx_mmio_pull:
                ld b32 $r14 D[$r4 + #xfer_data + 0x00]
                ld b32 $r15 D[$r4 + #xfer_data + 0x04]
-               call #nv_wr32
+               call(nv_wr32)
 
                // next!
                add b32 $r3 8
@@ -590,7 +566,7 @@ ctx_mmio_exec:
        // set transfer base back to the current context
        ctx_mmio_done:
        ld b32 $r3 D[$r0 + #ctx_current]
-       iowr I[$r2 + 0x000] $r3         // MEM_BASE
+       nv_iowr(NV_PGRAPH_FECS_MEM_BASE, 0, $r3)
 
        // disable the mmio list now, we don't need/want to execute it again
        st b32 D[$r0 + #chan_mmio_count] $r0
@@ -610,12 +586,10 @@ ctx_mmio_exec:
 //
 ctx_xfer:
        // according to mwk, some kind of wait for idle
-       mov $r15 0xc00
-       shl b32 $r15 6
        mov $r14 4
-       iowr I[$r15 + 0x200] $r14
+       nv_iowr(0x409c08, 0, $r14)
        ctx_xfer_idle:
-               iord $r14 I[$r15 + 0x000]
+               nv_iord($r14, 0x409c00, 0)
                and $r14 0x2000
                bra ne #ctx_xfer_idle
 
@@ -623,50 +597,42 @@ ctx_xfer:
        bra $p2 #ctx_xfer_pre_load
        ctx_xfer_pre:
                mov $r15 0x10
-               call #ctx_86c
+               call(ctx_86c)
 #if CHIPSET < GK100
-               call #ctx_4160s
+               call(ctx_4160s)
 #endif
                bra not $p1 #ctx_xfer_exec
 
        ctx_xfer_pre_load:
                mov $r15 2
-               call #ctx_4170s
-               call #ctx_4170w
-               call #ctx_redswitch
+               call(ctx_4170s)
+               call(ctx_4170w)
+               call(ctx_redswitch)
                clear b32 $r15
-               call #ctx_4170s
-               call #ctx_load
+               call(ctx_4170s)
+               call(ctx_load)
 
        // fetch context pointer, and initiate xfer on all GPCs
        ctx_xfer_exec:
        ld b32 $r1 D[$r0 + #ctx_current]
-       mov $r2 0x414
-       shl b32 $r2 6
-       iowr I[$r2 + 0x000] $r0 // BAR_STATUS = reset
-       mov $r14 -0x5b00
-       sethi $r14 0x410000
-       mov b32 $r15 $r1
-       call #nv_wr32           // GPC_BCAST_WRCMD_DATA = ctx pointer
-       add b32 $r14 4
+
+       clear b32 $r2
+       nv_iowr(NV_PGRAPH_FECS_BAR, 0, $r2)
+
+       nv_wr32(0x41a500, $r1)  // GPC_BCAST_WRCMD_DATA = ctx pointer
        xbit $r15 $flags $p1
        xbit $r2 $flags $p2
        shl b32 $r2 1
        or $r15 $r2
-       call #nv_wr32           // GPC_BCAST_WRCMD_CMD = GPC_XFER(type)
+       nv_wr32(0x41a504, $r15) // GPC_BCAST_WRCMD_CMD = GPC_XFER(type)
 
        // strands
-       mov $r1 0x4afc
-       sethi $r1 0x20000
-       mov $r2 0xc
-       iowr I[$r1] $r2         // STRAND_CMD(0x3f) = 0x0c
-       call #strand_wait
-       mov $r2 0x47fc
-       sethi $r2 0x20000
-       iowr I[$r2] $r0         // STRAND_FIRST_GENE(0x3f) = 0x00
-       xbit $r2 $flags $p1
-       add b32 $r2 3
-       iowr I[$r1] $r2         // STRAND_CMD(0x3f) = 0x03/0x04 (SAVE/LOAD)
+       call(strand_pre)
+       clear b32 $r2
+       nv_iowr(NV_PGRAPH_FECS_STRAND_SELECT, 0x3f, $r2)
+       xbit $r2 $flags $p1     // SAVE/LOAD
+       add b32 $r2 NV_PGRAPH_FECS_STRAND_CMD_SAVE
+       nv_iowr(NV_PGRAPH_FECS_STRAND_CMD, 0x3f, $r2)
 
        // mmio context
        xbit $r10 $flags $p1    // direction
@@ -675,48 +641,42 @@ ctx_xfer:
        ld b32 $r12 D[$r0 + #hub_mmio_list_head]
        ld b32 $r13 D[$r0 + #hub_mmio_list_tail]
        mov $r14 0              // not multi
-       call #mmctx_xfer
+       call(mmctx_xfer)
 
        // wait for GPCs to all complete
        mov $r10 8              // DONE_BAR
-       call #wait_doneo
+       call(wait_doneo)
 
        // wait for strand xfer to complete
-       call #strand_wait
+       call(strand_wait)
 
        // post-op
        bra $p1 #ctx_xfer_post
                mov $r10 12             // DONE_UNK12
-               call #wait_donez
-               mov $r1 0xa10
-               shl b32 $r1 6
-               mov $r2 5
-               iowr I[$r1] $r2         // MEM_CMD
-               ctx_xfer_post_save_wait:
-                       iord $r2 I[$r1]
-                       or $r2 $r2
-                       bra ne #ctx_xfer_post_save_wait
+               call(wait_donez)
+               mov $r15 5 // MEM_CMD 5 ???
+               call(ctx_mem)
 
        bra $p2 #ctx_xfer_done
        ctx_xfer_post:
                mov $r15 2
-               call #ctx_4170s
+               call(ctx_4170s)
                clear b32 $r15
-               call #ctx_86c
-               call #strand_post
-               call #ctx_4170w
+               call(ctx_86c)
+               call(strand_post)
+               call(ctx_4170w)
                clear b32 $r15
-               call #ctx_4170s
+               call(ctx_4170s)
 
                bra not $p1 #ctx_xfer_no_post_mmio
                ld b32 $r1 D[$r0 + #chan_mmio_count]
                or $r1 $r1
                bra e #ctx_xfer_no_post_mmio
-                       call #ctx_mmio_exec
+                       call(ctx_mmio_exec)
 
                ctx_xfer_no_post_mmio:
 #if CHIPSET < GK100
-               call #ctx_4160c
+               call(ctx_4160c)
 #endif
 
        ctx_xfer_done:
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnv108.fuc5 b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnv108.fuc5
new file mode 100644 (file)
index 0000000..7c5d256
--- /dev/null
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#define CHIPSET GK208
+#include "macros.fuc"
+
+.section #nv108_grhub_data
+#define INCLUDE_DATA
+#include "com.fuc"
+#include "hub.fuc"
+#undef INCLUDE_DATA
+
+.section #nv108_grhub_code
+#define INCLUDE_CODE
+bra #init
+#include "com.fuc"
+#include "hub.fuc"
+.align 256
+#undef INCLUDE_CODE
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnv108.fuc5.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnv108.fuc5.h
new file mode 100644 (file)
index 0000000..4750984
--- /dev/null
@@ -0,0 +1,916 @@
+uint32_t nv108_grhub_data[] = {
+/* 0x0000: hub_mmio_list_head */
+       0x00000300,
+/* 0x0004: hub_mmio_list_tail */
+       0x00000304,
+/* 0x0008: gpc_count */
+       0x00000000,
+/* 0x000c: rop_count */
+       0x00000000,
+/* 0x0010: cmd_queue */
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+/* 0x0058: ctx_current */
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+/* 0x0100: chan_data */
+/* 0x0100: chan_mmio_count */
+       0x00000000,
+/* 0x0104: chan_mmio_address */
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+/* 0x0200: xfer_data */
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+/* 0x0300: hub_mmio_list_base */
+       0x0417e91c,
+};
+
+uint32_t nv108_grhub_code[] = {
+       0x030e0ef5,
+/* 0x0004: queue_put */
+       0x9800d898,
+       0x86f001d9,
+       0xf489a408,
+       0x020f0b1b,
+       0x0002f87e,
+/* 0x001a: queue_put_next */
+       0x98c400f8,
+       0x0384b607,
+       0xb6008dbb,
+       0x8eb50880,
+       0x018fb500,
+       0xf00190b6,
+       0xd9b50f94,
+/* 0x0037: queue_get */
+       0xf400f801,
+       0xd8980131,
+       0x01d99800,
+       0x0bf489a4,
+       0x0789c421,
+       0xbb0394b6,
+       0x90b6009d,
+       0x009e9808,
+       0xb6019f98,
+       0x84f00180,
+       0x00d8b50f,
+/* 0x0063: queue_get_done */
+       0xf80132f4,
+/* 0x0065: nv_rd32 */
+       0xf0ecb200,
+       0x00801fc9,
+       0x0cf601ca,
+/* 0x0073: nv_rd32_wait */
+       0x8c04bd00,
+       0xcf01ca00,
+       0xccc800cc,
+       0xf61bf41f,
+       0xec7e060a,
+       0x008f0000,
+       0xffcf01cb,
+/* 0x008f: nv_wr32 */
+       0x8000f800,
+       0xf601cc00,
+       0x04bd000f,
+       0xc9f0ecb2,
+       0x1ec9f01f,
+       0x01ca0080,
+       0xbd000cf6,
+/* 0x00a9: nv_wr32_wait */
+       0xca008c04,
+       0x00cccf01,
+       0xf41fccc8,
+       0x00f8f61b,
+/* 0x00b8: wait_donez */
+       0x99f094bd,
+       0x37008000,
+       0x0009f602,
+       0x008004bd,
+       0x0af60206,
+/* 0x00cf: wait_donez_ne */
+       0x8804bd00,
+       0xcf010000,
+       0x8aff0088,
+       0xf61bf488,
+       0x99f094bd,
+       0x17008000,
+       0x0009f602,
+       0x00f804bd,
+/* 0x00ec: wait_doneo */
+       0x99f094bd,
+       0x37008000,
+       0x0009f602,
+       0x008004bd,
+       0x0af60206,
+/* 0x0103: wait_doneo_e */
+       0x8804bd00,
+       0xcf010000,
+       0x8aff0088,
+       0xf60bf488,
+       0x99f094bd,
+       0x17008000,
+       0x0009f602,
+       0x00f804bd,
+/* 0x0120: mmctx_size */
+/* 0x0122: nv_mmctx_size_loop */
+       0xe89894bd,
+       0x1a85b600,
+       0xb60180b6,
+       0x98bb0284,
+       0x04e0b600,
+       0x1bf4efa4,
+       0xf89fb2ec,
+/* 0x013d: mmctx_xfer */
+       0xf094bd00,
+       0x00800199,
+       0x09f60237,
+       0xbd04bd00,
+       0x05bbfd94,
+       0x800f0bf4,
+       0xf601c400,
+       0x04bd000b,
+/* 0x015f: mmctx_base_disabled */
+       0xfd0099f0,
+       0x0bf405ee,
+       0xc6008018,
+       0x000ef601,
+       0x008004bd,
+       0x0ff601c7,
+       0xf004bd00,
+/* 0x017a: mmctx_multi_disabled */
+       0xabc80199,
+       0x10b4b600,
+       0xc80cb9f0,
+       0xe4b601ae,
+       0x05befd11,
+       0x01c50080,
+       0xbd000bf6,
+/* 0x0195: mmctx_exec_loop */
+/* 0x0195: mmctx_wait_free */
+       0xc5008e04,
+       0x00eecf01,
+       0xf41fe4f0,
+       0xce98f60b,
+       0x05e9fd00,
+       0x01c80080,
+       0xbd000ef6,
+       0x04c0b604,
+       0x1bf4cda4,
+       0x02abc8df,
+/* 0x01bf: mmctx_fini_wait */
+       0x8b1c1bf4,
+       0xcf01c500,
+       0xb4f000bb,
+       0x10b4b01f,
+       0x0af31bf4,
+       0x00b87e02,
+       0x250ef400,
+/* 0x01d8: mmctx_stop */
+       0xb600abc8,
+       0xb9f010b4,
+       0x12b9f00c,
+       0x01c50080,
+       0xbd000bf6,
+/* 0x01ed: mmctx_stop_wait */
+       0xc5008b04,
+       0x00bbcf01,
+       0xf412bbc8,
+/* 0x01fa: mmctx_done */
+       0x94bdf61b,
+       0x800199f0,
+       0xf6021700,
+       0x04bd0009,
+/* 0x020a: strand_wait */
+       0xa0f900f8,
+       0xb87e020a,
+       0xa0fc0000,
+/* 0x0216: strand_pre */
+       0x0c0900f8,
+       0x024afc80,
+       0xbd0009f6,
+       0x020a7e04,
+/* 0x0227: strand_post */
+       0x0900f800,
+       0x4afc800d,
+       0x0009f602,
+       0x0a7e04bd,
+       0x00f80002,
+/* 0x0238: strand_set */
+       0xfc800f0c,
+       0x0cf6024f,
+       0x0c04bd00,
+       0x4afc800b,
+       0x000cf602,
+       0xfc8004bd,
+       0x0ef6024f,
+       0x0c04bd00,
+       0x4afc800a,
+       0x000cf602,
+       0x0a7e04bd,
+       0x00f80002,
+/* 0x0268: strand_ctx_init */
+       0x99f094bd,
+       0x37008003,
+       0x0009f602,
+       0x167e04bd,
+       0x030e0002,
+       0x0002387e,
+       0xfc80c4bd,
+       0x0cf60247,
+       0x0c04bd00,
+       0x4afc8001,
+       0x000cf602,
+       0x0a7e04bd,
+       0x0c920002,
+       0x46fc8001,
+       0x000cf602,
+       0x020c04bd,
+       0x024afc80,
+       0xbd000cf6,
+       0x020a7e04,
+       0x02277e00,
+       0x42008800,
+       0x20008902,
+       0x0099cf02,
+/* 0x02c7: ctx_init_strand_loop */
+       0xf608fe95,
+       0x8ef6008e,
+       0x808acf40,
+       0xb606a5b6,
+       0xeabb01a0,
+       0x0480b600,
+       0xf40192b6,
+       0xe4b6e81b,
+       0xf2efbc08,
+       0x99f094bd,
+       0x17008003,
+       0x0009f602,
+       0x00f804bd,
+/* 0x02f8: error */
+       0x02050080,
+       0xbd000ff6,
+       0x80010f04,
+       0xf6030700,
+       0x04bd000f,
+/* 0x030e: init */
+       0x04bd00f8,
+       0x410007fe,
+       0x11cf4200,
+       0x0911e700,
+       0x0814b601,
+       0x020014fe,
+       0x12004002,
+       0xbd0002f6,
+       0x05c94104,
+       0xbd0010fe,
+       0x07004024,
+       0xbd0002f6,
+       0x20034204,
+       0x01010080,
+       0xbd0002f6,
+       0x20044204,
+       0x01010480,
+       0xbd0002f6,
+       0x200b4204,
+       0x01010880,
+       0xbd0002f6,
+       0x200c4204,
+       0x01011c80,
+       0xbd0002f6,
+       0x01039204,
+       0x03090080,
+       0xbd0003f6,
+       0x87044204,
+       0xf6040040,
+       0x04bd0002,
+       0x00400402,
+       0x0002f603,
+       0x31f404bd,
+       0x96048e10,
+       0x00657e40,
+       0xc7feb200,
+       0x01b590f1,
+       0x1ff4f003,
+       0x01020fb5,
+       0x041fbb01,
+       0x800112b6,
+       0xf6010300,
+       0x04bd0001,
+       0x01040080,
+       0xbd0001f6,
+       0x01004104,
+       0x627e020f,
+       0x717e0006,
+       0x100f0006,
+       0x0006b37e,
+       0x98000e98,
+       0x207e010f,
+       0x14950001,
+       0xc0008008,
+       0x0004f601,
+       0x008004bd,
+       0x04f601c1,
+       0xb704bd00,
+       0xbb130030,
+       0xf5b6001f,
+       0xd3008002,
+       0x000ff601,
+       0x15b604bd,
+       0x0110b608,
+       0xb20814b6,
+       0x02687e1f,
+       0x001fbb00,
+       0x84020398,
+/* 0x041f: init_gpc */
+       0xb8502000,
+       0x0008044e,
+       0x8f7e1fb2,
+       0x4eb80000,
+       0xbd00010c,
+       0x008f7ef4,
+       0x044eb800,
+       0x8f7e0001,
+       0x4eb80000,
+       0x0f000100,
+       0x008f7e02,
+       0x004eb800,
+/* 0x044e: init_gpc_wait */
+       0x657e0008,
+       0xffc80000,
+       0xf90bf41f,
+       0x08044eb8,
+       0x00657e00,
+       0x001fbb00,
+       0x800040b7,
+       0xf40132b6,
+       0x000fb41b,
+       0x0006b37e,
+       0x627e000f,
+       0x00800006,
+       0x01f60201,
+       0xbd04bd00,
+       0x1f19f014,
+       0x02300080,
+       0xbd0001f6,
+/* 0x0491: main */
+       0x0031f404,
+       0x0d0028f4,
+       0x00377e10,
+       0xf401f400,
+       0x4001e4b1,
+       0x00c71bf5,
+       0x99f094bd,
+       0x37008004,
+       0x0009f602,
+       0x008104bd,
+       0x11cf02c0,
+       0xc1008200,
+       0x0022cf02,
+       0xf41f13c8,
+       0x23c8770b,
+       0x550bf41f,
+       0x12b220f9,
+       0x99f094bd,
+       0x37008007,
+       0x0009f602,
+       0x32f404bd,
+       0x0231f401,
+       0x0008367e,
+       0x99f094bd,
+       0x17008007,
+       0x0009f602,
+       0x20fc04bd,
+       0x99f094bd,
+       0x37008006,
+       0x0009f602,
+       0x31f404bd,
+       0x08367e01,
+       0xf094bd00,
+       0x00800699,
+       0x09f60217,
+       0xf404bd00,
+/* 0x0522: chsw_prev_no_next */
+       0x20f92f0e,
+       0x32f412b2,
+       0x0232f401,
+       0x0008367e,
+       0x008020fc,
+       0x02f602c0,
+       0xf404bd00,
+/* 0x053e: chsw_no_prev */
+       0x23c8130e,
+       0x0d0bf41f,
+       0xf40131f4,
+       0x367e0232,
+/* 0x054e: chsw_done */
+       0x01020008,
+       0x02c30080,
+       0xbd0002f6,
+       0xf094bd04,
+       0x00800499,
+       0x09f60217,
+       0xf504bd00,
+/* 0x056b: main_not_ctx_switch */
+       0xb0ff2a0e,
+       0x1bf401e4,
+       0x7ef2b20c,
+       0xf40007d6,
+/* 0x057a: main_not_ctx_chan */
+       0xe4b0400e,
+       0x2c1bf402,
+       0x99f094bd,
+       0x37008007,
+       0x0009f602,
+       0x32f404bd,
+       0x0232f401,
+       0x0008367e,
+       0x99f094bd,
+       0x17008007,
+       0x0009f602,
+       0x0ef404bd,
+/* 0x05a9: main_not_ctx_save */
+       0x10ef9411,
+       0x7e01f5f0,
+       0xf50002f8,
+/* 0x05b7: main_done */
+       0xbdfede0e,
+       0x1f29f024,
+       0x02300080,
+       0xbd0002f6,
+       0xcc0ef504,
+/* 0x05c9: ih */
+       0xfe80f9fe,
+       0x80f90188,
+       0xa0f990f9,
+       0xd0f9b0f9,
+       0xf0f9e0f9,
+       0x004a04bd,
+       0x00aacf02,
+       0xf404abc4,
+       0x100d230b,
+       0xcf1a004e,
+       0x004f00ee,
+       0x00ffcf19,
+       0x0000047e,
+       0x0400b0b7,
+       0x0040010e,
+       0x000ef61d,
+/* 0x060a: ih_no_fifo */
+       0xabe404bd,
+       0x0bf40100,
+       0x4e100d0c,
+       0x047e4001,
+/* 0x061a: ih_no_ctxsw */
+       0xabe40000,
+       0x0bf40400,
+       0x01004b10,
+       0x448ebfb2,
+       0x8f7e4001,
+/* 0x062e: ih_no_fwmthd */
+       0x044b0000,
+       0xffb0bd01,
+       0x0bf4b4ab,
+       0x0700800c,
+       0x000bf603,
+/* 0x0642: ih_no_other */
+       0x004004bd,
+       0x000af601,
+       0xf0fc04bd,
+       0xd0fce0fc,
+       0xa0fcb0fc,
+       0x80fc90fc,
+       0xfc0088fe,
+       0x0032f480,
+/* 0x0662: ctx_4170s */
+       0xf5f001f8,
+       0x8effb210,
+       0x7e404170,
+       0xf800008f,
+/* 0x0671: ctx_4170w */
+       0x41708e00,
+       0x00657e40,
+       0xf0ffb200,
+       0x1bf410f4,
+/* 0x0683: ctx_redswitch */
+       0x4e00f8f3,
+       0xe5f00200,
+       0x20e5f040,
+       0x8010e5f0,
+       0xf6018500,
+       0x04bd000e,
+/* 0x069a: ctx_redswitch_delay */
+       0xf2b6080f,
+       0xfd1bf401,
+       0x0400e5f1,
+       0x0100e5f1,
+       0x01850080,
+       0xbd000ef6,
+/* 0x06b3: ctx_86c */
+       0x8000f804,
+       0xf6022300,
+       0x04bd000f,
+       0x148effb2,
+       0x8f7e408a,
+       0xffb20000,
+       0x41a88c8e,
+       0x00008f7e,
+/* 0x06d2: ctx_mem */
+       0x008000f8,
+       0x0ff60284,
+/* 0x06db: ctx_mem_wait */
+       0x8f04bd00,
+       0xcf028400,
+       0xfffd00ff,
+       0xf61bf405,
+/* 0x06ea: ctx_load */
+       0x94bd00f8,
+       0x800599f0,
+       0xf6023700,
+       0x04bd0009,
+       0xb87e0c0a,
+       0xf4bd0000,
+       0x02890080,
+       0xbd000ff6,
+       0xc1008004,
+       0x0002f602,
+       0x008004bd,
+       0x02f60283,
+       0x0f04bd00,
+       0x06d27e07,
+       0xc0008000,
+       0x0002f602,
+       0x0bfe04bd,
+       0x1f2af000,
+       0xb60424b6,
+       0x94bd0220,
+       0x800899f0,
+       0xf6023700,
+       0x04bd0009,
+       0x02810080,
+       0xbd0002f6,
+       0x0000d204,
+       0x25f08000,
+       0x88008002,
+       0x0002f602,
+       0x100104bd,
+       0xf0020042,
+       0x12fa0223,
+       0xbd03f805,
+       0x0899f094,
+       0x02170080,
+       0xbd0009f6,
+       0x81019804,
+       0x981814b6,
+       0x25b68002,
+       0x0512fd08,
+       0xbd1601b5,
+       0x0999f094,
+       0x02370080,
+       0xbd0009f6,
+       0x81008004,
+       0x0001f602,
+       0x010204bd,
+       0x02880080,
+       0xbd0002f6,
+       0x01004104,
+       0xfa0613f0,
+       0x03f80501,
+       0x99f094bd,
+       0x17008009,
+       0x0009f602,
+       0x94bd04bd,
+       0x800599f0,
+       0xf6021700,
+       0x04bd0009,
+/* 0x07d6: ctx_chan */
+       0xea7e00f8,
+       0x0c0a0006,
+       0x0000b87e,
+       0xd27e050f,
+       0x00f80006,
+/* 0x07e8: ctx_mmio_exec */
+       0x80410398,
+       0xf6028100,
+       0x04bd0003,
+/* 0x07f6: ctx_mmio_loop */
+       0x34c434bd,
+       0x0e1bf4ff,
+       0xf0020045,
+       0x35fa0653,
+/* 0x0807: ctx_mmio_pull */
+       0x9803f805,
+       0x4f98804e,
+       0x008f7e81,
+       0x0830b600,
+       0xf40112b6,
+/* 0x081a: ctx_mmio_done */
+       0x0398df1b,
+       0x81008016,
+       0x0003f602,
+       0x00b504bd,
+       0x01004140,
+       0xfa0613f0,
+       0x03f80601,
+/* 0x0836: ctx_xfer */
+       0x040e00f8,
+       0x03020080,
+       0xbd000ef6,
+/* 0x0841: ctx_xfer_idle */
+       0x00008e04,
+       0x00eecf03,
+       0x2000e4f1,
+       0xf4f51bf4,
+       0x02f40611,
+/* 0x0855: ctx_xfer_pre */
+       0x7e100f0c,
+       0xf40006b3,
+/* 0x085e: ctx_xfer_pre_load */
+       0x020f1b11,
+       0x0006627e,
+       0x0006717e,
+       0x0006837e,
+       0x627ef4bd,
+       0xea7e0006,
+/* 0x0876: ctx_xfer_exec */
+       0x01980006,
+       0x8024bd16,
+       0xf6010500,
+       0x04bd0002,
+       0x008e1fb2,
+       0x8f7e41a5,
+       0xfcf00000,
+       0x022cf001,
+       0xfd0124b6,
+       0xffb205f2,
+       0x41a5048e,
+       0x00008f7e,
+       0x0002167e,
+       0xfc8024bd,
+       0x02f60247,
+       0xf004bd00,
+       0x20b6012c,
+       0x4afc8003,
+       0x0002f602,
+       0xacf004bd,
+       0x06a5f001,
+       0x0c98000b,
+       0x010d9800,
+       0x3d7e000e,
+       0x080a0001,
+       0x0000ec7e,
+       0x00020a7e,
+       0x0a1201f4,
+       0x00b87e0c,
+       0x7e050f00,
+       0xf40006d2,
+/* 0x08f2: ctx_xfer_post */
+       0x020f2d02,
+       0x0006627e,
+       0xb37ef4bd,
+       0x277e0006,
+       0x717e0002,
+       0xf4bd0006,
+       0x0006627e,
+       0x981011f4,
+       0x11fd4001,
+       0x070bf405,
+       0x0007e87e,
+/* 0x091c: ctx_xfer_no_post_mmio */
+/* 0x091c: ctx_xfer_done */
+       0x000000f8,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+};
index b59f694c0423e35afddc26e2388ae8a7d6514472..132f684b1946a9357697eace024fa866af43b72f 100644 (file)
@@ -206,14 +206,14 @@ uint32_t nvc0_grhub_data[] = {
 };
 
 uint32_t nvc0_grhub_code[] = {
-       0x031b0ef5,
+       0x039b0ef5,
 /* 0x0004: queue_put */
        0x9800d898,
        0x86f001d9,
        0x0489b808,
        0xf00c1bf4,
        0x21f502f7,
-       0x00f802fe,
+       0x00f8037e,
 /* 0x001c: queue_put_next */
        0xb60798c4,
        0x8dbb0384,
@@ -237,184 +237,214 @@ uint32_t nvc0_grhub_code[] = {
 /* 0x0066: queue_get_done */
        0x00f80132,
 /* 0x0068: nv_rd32 */
-       0x0728b7f1,
-       0xb906b4b6,
-       0xc9f002ec,
-       0x00bcd01f,
-/* 0x0078: nv_rd32_wait */
-       0xc800bccf,
-       0x1bf41fcc,
-       0x06a7f0fa,
-       0x010921f5,
-       0xf840bfcf,
-/* 0x008d: nv_wr32 */
-       0x28b7f100,
-       0x06b4b607,
-       0xb980bfd0,
-       0xc9f002ec,
-       0x1ec9f01f,
-/* 0x00a3: nv_wr32_wait */
-       0xcf00bcd0,
-       0xccc800bc,
-       0xfa1bf41f,
-/* 0x00ae: watchdog_reset */
-       0x87f100f8,
-       0x84b60430,
-       0x1ff9f006,
-       0xf8008fd0,
-/* 0x00bd: watchdog_clear */
-       0x3087f100,
-       0x0684b604,
-       0xf80080d0,
-/* 0x00c9: wait_donez */
-       0xf094bd00,
-       0x07f10099,
-       0x03f00f00,
-       0x0009d002,
-       0x07f104bd,
-       0x03f00600,
-       0x000ad002,
-/* 0x00e6: wait_donez_ne */
-       0x87f104bd,
-       0x83f00000,
-       0x0088cf01,
-       0xf4888aff,
-       0x94bdf31b,
-       0xf10099f0,
-       0xf0170007,
-       0x09d00203,
-       0xf804bd00,
-/* 0x0109: wait_doneo */
-       0xf094bd00,
+       0xf002ecb9,
+       0x07f11fc9,
+       0x03f0ca00,
+       0x000cd001,
+/* 0x007a: nv_rd32_wait */
+       0xc7f104bd,
+       0xc3f0ca00,
+       0x00cccf01,
+       0xf41fccc8,
+       0xa7f0f31b,
+       0x1021f506,
+       0x00f7f101,
+       0x01f3f0cb,
+       0xf800ffcf,
+/* 0x009d: nv_wr32 */
+       0x0007f100,
+       0x0103f0cc,
+       0xbd000fd0,
+       0x02ecb904,
+       0xf01fc9f0,
+       0x07f11ec9,
+       0x03f0ca00,
+       0x000cd001,
+/* 0x00be: nv_wr32_wait */
+       0xc7f104bd,
+       0xc3f0ca00,
+       0x00cccf01,
+       0xf41fccc8,
+       0x00f8f31b,
+/* 0x00d0: wait_donez */
+       0x99f094bd,
+       0x0007f100,
+       0x0203f00f,
+       0xbd0009d0,
+       0x0007f104,
+       0x0203f006,
+       0xbd000ad0,
+/* 0x00ed: wait_donez_ne */
+       0x0087f104,
+       0x0183f000,
+       0xff0088cf,
+       0x1bf4888a,
+       0xf094bdf3,
        0x07f10099,
-       0x03f00f00,
+       0x03f01700,
        0x0009d002,
-       0x87f104bd,
-       0x84b60818,
-       0x008ad006,
-/* 0x0124: wait_doneo_e */
-       0x040087f1,
-       0xcf0684b6,
-       0x8aff0088,
-       0xf30bf488,
+       0x00f804bd,
+/* 0x0110: wait_doneo */
        0x99f094bd,
        0x0007f100,
-       0x0203f017,
+       0x0203f00f,
        0xbd0009d0,
-/* 0x0147: mmctx_size */
-       0xbd00f804,
-/* 0x0149: nv_mmctx_size_loop */
-       0x00e89894,
-       0xb61a85b6,
-       0x84b60180,
-       0x0098bb02,
-       0xb804e0b6,
-       0x1bf404ef,
-       0x029fb9eb,
-/* 0x0166: mmctx_xfer */
-       0x94bd00f8,
-       0xf10199f0,
-       0xf00f0007,
-       0x09d00203,
-       0xf104bd00,
-       0xb6071087,
-       0x94bd0684,
-       0xf405bbfd,
-       0x8bd0090b,
-       0x0099f000,
-/* 0x018c: mmctx_base_disabled */
-       0xf405eefd,
-       0x8ed00c0b,
-       0xc08fd080,
-/* 0x019b: mmctx_multi_disabled */
-       0xb70199f0,
-       0xc8010080,
+       0x0007f104,
+       0x0203f006,
+       0xbd000ad0,
+/* 0x012d: wait_doneo_e */
+       0x0087f104,
+       0x0183f000,
+       0xff0088cf,
+       0x0bf4888a,
+       0xf094bdf3,
+       0x07f10099,
+       0x03f01700,
+       0x0009d002,
+       0x00f804bd,
+/* 0x0150: mmctx_size */
+/* 0x0152: nv_mmctx_size_loop */
+       0xe89894bd,
+       0x1a85b600,
+       0xb60180b6,
+       0x98bb0284,
+       0x04e0b600,
+       0xf404efb8,
+       0x9fb9eb1b,
+/* 0x016f: mmctx_xfer */
+       0xbd00f802,
+       0x0199f094,
+       0x0f0007f1,
+       0xd00203f0,
+       0x04bd0009,
+       0xbbfd94bd,
+       0x120bf405,
+       0xc40007f1,
+       0xd00103f0,
+       0x04bd000b,
+/* 0x0197: mmctx_base_disabled */
+       0xfd0099f0,
+       0x0bf405ee,
+       0x0007f11e,
+       0x0103f0c6,
+       0xbd000ed0,
+       0x0007f104,
+       0x0103f0c7,
+       0xbd000fd0,
+       0x0199f004,
+/* 0x01b8: mmctx_multi_disabled */
+       0xb600abc8,
+       0xb9f010b4,
+       0x01aec80c,
+       0xfd11e4b6,
+       0x07f105be,
+       0x03f0c500,
+       0x000bd001,
+/* 0x01d6: mmctx_exec_loop */
+/* 0x01d6: mmctx_wait_free */
+       0xe7f104bd,
+       0xe3f0c500,
+       0x00eecf01,
+       0xf41fe4f0,
+       0xce98f30b,
+       0x05e9fd00,
+       0xc80007f1,
+       0xd00103f0,
+       0x04bd000e,
+       0xb804c0b6,
+       0x1bf404cd,
+       0x02abc8d8,
+/* 0x0207: mmctx_fini_wait */
+       0xf11f1bf4,
+       0xf0c500b7,
+       0xbbcf01b3,
+       0x1fb4f000,
+       0xf410b4b0,
+       0xa7f0f01b,
+       0xd021f402,
+/* 0x0223: mmctx_stop */
+       0xc82b0ef4,
        0xb4b600ab,
        0x0cb9f010,
-       0xb601aec8,
-       0xbefd11e4,
-       0x008bd005,
-/* 0x01b4: mmctx_exec_loop */
-/* 0x01b4: mmctx_wait_free */
-       0xf0008ecf,
-       0x0bf41fe4,
-       0x00ce98fa,
-       0xd005e9fd,
-       0xc0b6c08e,
-       0x04cdb804,
-       0xc8e81bf4,
-       0x1bf402ab,
-/* 0x01d5: mmctx_fini_wait */
-       0x008bcf18,
-       0xb01fb4f0,
-       0x1bf410b4,
-       0x02a7f0f7,
-       0xf4c921f4,
-/* 0x01ea: mmctx_stop */
-       0xabc81b0e,
-       0x10b4b600,
-       0xf00cb9f0,
-       0x8bd012b9,
-/* 0x01f9: mmctx_stop_wait */
-       0x008bcf00,
-       0xf412bbc8,
-/* 0x0202: mmctx_done */
-       0x94bdfa1b,
-       0xf10199f0,
-       0xf0170007,
-       0x09d00203,
-       0xf804bd00,
-/* 0x0215: strand_wait */
-       0xf0a0f900,
-       0x21f402a7,
-       0xf8a0fcc9,
-/* 0x0221: strand_pre */
-       0xfc87f100,
-       0x0283f04a,
-       0xd00c97f0,
-       0x21f50089,
-       0x00f80215,
-/* 0x0234: strand_post */
-       0x4afc87f1,
-       0xf00283f0,
-       0x89d00d97,
-       0x1521f500,
-/* 0x0247: strand_set */
-       0xf100f802,
-       0xf04ffca7,
-       0xaba202a3,
-       0xc7f00500,
-       0x00acd00f,
-       0xd00bc7f0,
-       0x21f500bc,
-       0xaed00215,
-       0x0ac7f000,
-       0xf500bcd0,
-       0xf8021521,
-/* 0x0271: strand_ctx_init */
-       0xf094bd00,
-       0x07f10399,
-       0x03f00f00,
+       0xf112b9f0,
+       0xf0c50007,
+       0x0bd00103,
+/* 0x023b: mmctx_stop_wait */
+       0xf104bd00,
+       0xf0c500b7,
+       0xbbcf01b3,
+       0x12bbc800,
+/* 0x024b: mmctx_done */
+       0xbdf31bf4,
+       0x0199f094,
+       0x170007f1,
+       0xd00203f0,
+       0x04bd0009,
+/* 0x025e: strand_wait */
+       0xa0f900f8,
+       0xf402a7f0,
+       0xa0fcd021,
+/* 0x026a: strand_pre */
+       0x97f000f8,
+       0xfc07f10c,
+       0x0203f04a,
+       0xbd0009d0,
+       0x5e21f504,
+/* 0x027f: strand_post */
+       0xf000f802,
+       0x07f10d97,
+       0x03f04afc,
        0x0009d002,
        0x21f504bd,
-       0xe7f00221,
-       0x4721f503,
-       0xfca7f102,
-       0x02a3f046,
-       0x0400aba0,
-       0xf040a0d0,
-       0xbcd001c7,
-       0x1521f500,
-       0x010c9202,
-       0xf000acd0,
-       0xbcd002c7,
-       0x1521f500,
-       0x3421f502,
-       0x8087f102,
-       0x0684b608,
-       0xb70089cf,
-       0x95220080,
-/* 0x02ca: ctx_init_strand_loop */
+       0x00f8025e,
+/* 0x0294: strand_set */
+       0xf10fc7f0,
+       0xf04ffc07,
+       0x0cd00203,
+       0xf004bd00,
+       0x07f10bc7,
+       0x03f04afc,
+       0x000cd002,
+       0x07f104bd,
+       0x03f04ffc,
+       0x000ed002,
+       0xc7f004bd,
+       0xfc07f10a,
+       0x0203f04a,
+       0xbd000cd0,
+       0x5e21f504,
+/* 0x02d3: strand_ctx_init */
+       0xbd00f802,
+       0x0399f094,
+       0x0f0007f1,
+       0xd00203f0,
+       0x04bd0009,
+       0x026a21f5,
+       0xf503e7f0,
+       0xbd029421,
+       0xfc07f1c4,
+       0x0203f047,
+       0xbd000cd0,
+       0x01c7f004,
+       0x4afc07f1,
+       0xd00203f0,
+       0x04bd000c,
+       0x025e21f5,
+       0xf1010c92,
+       0xf046fc07,
+       0x0cd00203,
+       0xf004bd00,
+       0x07f102c7,
+       0x03f04afc,
+       0x000cd002,
+       0x21f504bd,
+       0x21f5025e,
+       0x87f1027f,
+       0x83f04200,
+       0x0097f102,
+       0x0293f020,
+       0x950099cf,
+/* 0x034a: ctx_init_strand_loop */
        0x8ed008fe,
        0x408ed000,
        0xb6808acf,
@@ -428,7 +458,7 @@ uint32_t nvc0_grhub_code[] = {
        0x170007f1,
        0xd00203f0,
        0x04bd0009,
-/* 0x02fe: error */
+/* 0x037e: error */
        0x07f100f8,
        0x03f00500,
        0x000fd002,
@@ -436,82 +466,117 @@ uint32_t nvc0_grhub_code[] = {
        0x0007f101,
        0x0303f007,
        0xbd000fd0,
-/* 0x031b: init */
+/* 0x039b: init */
        0xbd00f804,
-       0x0004fe04,
-       0xf10007fe,
-       0xf0120017,
-       0x12d00227,
-       0xb117f100,
-       0x0010fe05,
-       0x040017f1,
-       0xf1c010d0,
-       0xb6040437,
-       0x27f10634,
-       0x32d02003,
-       0x0427f100,
-       0x0132d020,
+       0x0007fe04,
+       0x420017f1,
+       0xcf0013f0,
+       0x11e70011,
+       0x14b60109,
+       0x0014fe08,
+       0xf10227f0,
+       0xf0120007,
+       0x02d00003,
+       0xf104bd00,
+       0xfe06c817,
+       0x24bd0010,
+       0x070007f1,
+       0xd00003f0,
+       0x04bd0002,
+       0x200327f1,
+       0x010007f1,
+       0xd00103f0,
+       0x04bd0002,
+       0x200427f1,
+       0x010407f1,
+       0xd00103f0,
+       0x04bd0002,
        0x200b27f1,
-       0xf10232d0,
-       0xd0200c27,
-       0x27f10732,
-       0x24b60c24,
-       0x0003b906,
-       0xf10023d0,
+       0x010807f1,
+       0xd00103f0,
+       0x04bd0002,
+       0x200c27f1,
+       0x011c07f1,
+       0xd00103f0,
+       0x04bd0002,
+       0xf1010392,
+       0xf0090007,
+       0x03d00303,
+       0xf104bd00,
        0xf0870427,
-       0x12d00023,
-       0x0012b700,
-       0x0427f001,
-       0xf40012d0,
-       0xe7f11031,
-       0xe3f09604,
-       0x6821f440,
-       0x8090f1c7,
-       0xf4f00301,
-       0x020f801f,
-       0xbb0117f0,
-       0x12b6041f,
-       0x0c27f101,
-       0x0624b604,
-       0xd00021d0,
-       0x17f14021,
-       0x0e980100,
-       0x010f9800,
-       0x014721f5,
-       0x070037f1,
-       0x950634b6,
-       0x34d00814,
-       0x4034d000,
-       0x130030b7,
-       0xb6001fbb,
-       0x3fd002f5,
-       0x0815b600,
-       0xb60110b6,
-       0x1fb90814,
-       0x7121f502,
-       0x001fbb02,
-       0xf1020398,
-       0xf0200047,
-/* 0x03f6: init_gpc */
-       0x4ea05043,
-       0x1fb90804,
-       0x8d21f402,
-       0x010c4ea0,
-       0x21f4f4bd,
-       0x044ea08d,
-       0x8d21f401,
-       0x01004ea0,
-       0xf402f7f0,
-       0x4ea08d21,
-/* 0x041e: init_gpc_wait */
-       0x21f40800,
-       0x1fffc868,
-       0xa0fa0bf4,
-       0xf408044e,
-       0x1fbb6821,
-       0x0040b700,
-       0x0132b680,
-       0xf1be1bf4,
+       0x07f10023,
+       0x03f00400,
+       0x0002d000,
+       0x27f004bd,
+       0x0007f104,
+       0x0003f003,
+       0xbd0002d0,
+       0x1031f404,
+       0x9604e7f1,
+       0xf440e3f0,
+       0xfeb96821,
+       0x90f1c702,
+       0xf0030180,
+       0x0f801ff4,
+       0x0117f002,
+       0xb6041fbb,
+       0x07f10112,
+       0x03f00300,
+       0x0001d001,
+       0x07f104bd,
+       0x03f00400,
+       0x0001d001,
+       0x17f104bd,
+       0xf7f00100,
+       0xb521f502,
+       0xc721f507,
+       0x10f7f007,
+       0x081421f5,
+       0x98000e98,
+       0x21f5010f,
+       0x14950150,
+       0x0007f108,
+       0x0103f0c0,
+       0xbd0004d0,
+       0x0007f104,
+       0x0103f0c1,
+       0xbd0004d0,
+       0x0030b704,
+       0x001fbb13,
+       0xf102f5b6,
+       0xf0d30007,
+       0x0fd00103,
+       0xb604bd00,
+       0x10b60815,
+       0x0814b601,
+       0xf5021fb9,
+       0xbb02d321,
+       0x0398001f,
+       0x0047f102,
+       0x5043f020,
+/* 0x04f4: init_gpc */
+       0x08044ea0,
+       0xf4021fb9,
+       0x4ea09d21,
+       0xf4bd010c,
+       0xa09d21f4,
+       0xf401044e,
+       0x4ea09d21,
+       0xf7f00100,
+       0x9d21f402,
+       0x08004ea0,
+/* 0x051c: init_gpc_wait */
+       0xc86821f4,
+       0x0bf41fff,
+       0x044ea0fa,
+       0x6821f408,
+       0xb7001fbb,
+       0xb6800040,
+       0x1bf40132,
+       0x00f7f0be,
+       0x081421f5,
+       0xf500f7f0,
+       0xf107b521,
        0xf0010007,
        0x01d00203,
        0xbd04bd00,
@@ -519,402 +584,399 @@ uint32_t nvc0_grhub_code[] = {
        0x080007f1,
        0xd00203f0,
        0x04bd0001,
-/* 0x0458: main */
+/* 0x0564: main */
        0xf40031f4,
        0xd7f00028,
        0x3921f410,
        0xb1f401f4,
        0xf54001e4,
-       0xbd00de1b,
+       0xbd00e91b,
        0x0499f094,
        0x0f0007f1,
        0xd00203f0,
        0x04bd0009,
-       0x0b0017f1,
-       0xcf0614b6,
-       0x11cf4012,
-       0x1f13c800,
-       0x00870bf5,
-       0xf41f23c8,
-       0x20f9620b,
-       0xbd0212b9,
-       0x0799f094,
-       0x0f0007f1,
-       0xd00203f0,
-       0x04bd0009,
-       0xf40132f4,
-       0x21f50231,
-       0x94bd082f,
+       0xc00017f1,
+       0xcf0213f0,
+       0x27f10011,
+       0x23f0c100,
+       0x0022cf02,
+       0xf51f13c8,
+       0xc800890b,
+       0x0bf41f23,
+       0xb920f962,
+       0x94bd0212,
        0xf10799f0,
-       0xf0170007,
+       0xf00f0007,
        0x09d00203,
-       0xfc04bd00,
-       0xf094bd20,
-       0x07f10699,
-       0x03f00f00,
-       0x0009d002,
-       0x31f404bd,
-       0x2f21f501,
-       0xf094bd08,
-       0x07f10699,
+       0xf404bd00,
+       0x31f40132,
+       0xe821f502,
+       0xf094bd09,
+       0x07f10799,
        0x03f01700,
        0x0009d002,
-       0x0ef404bd,
-/* 0x04f9: chsw_prev_no_next */
-       0xb920f931,
-       0x32f40212,
-       0x0232f401,
-       0x082f21f5,
-       0x17f120fc,
-       0x14b60b00,
-       0x0012d006,
-/* 0x0517: chsw_no_prev */
-       0xc8130ef4,
-       0x0bf41f23,
-       0x0131f40d,
-       0xf50232f4,
-/* 0x0527: chsw_done */
-       0xf1082f21,
-       0xb60b0c17,
-       0x27f00614,
-       0x0012d001,
+       0x20fc04bd,
        0x99f094bd,
-       0x0007f104,
+       0x0007f106,
+       0x0203f00f,
+       0xbd0009d0,
+       0x0131f404,
+       0x09e821f5,
+       0x99f094bd,
+       0x0007f106,
        0x0203f017,
        0xbd0009d0,
-       0x130ef504,
-/* 0x0549: main_not_ctx_switch */
-       0x01e4b0ff,
-       0xb90d1bf4,
-       0x21f502f2,
-       0x0ef407bb,
-/* 0x0559: main_not_ctx_chan */
-       0x02e4b046,
-       0xbd321bf4,
-       0x0799f094,
-       0x0f0007f1,
+       0x330ef404,
+/* 0x060c: chsw_prev_no_next */
+       0x12b920f9,
+       0x0132f402,
+       0xf50232f4,
+       0xfc09e821,
+       0x0007f120,
+       0x0203f0c0,
+       0xbd0002d0,
+       0x130ef404,
+/* 0x062c: chsw_no_prev */
+       0xf41f23c8,
+       0x31f40d0b,
+       0x0232f401,
+       0x09e821f5,
+/* 0x063c: chsw_done */
+       0xf10127f0,
+       0xf0c30007,
+       0x02d00203,
+       0xbd04bd00,
+       0x0499f094,
+       0x170007f1,
        0xd00203f0,
        0x04bd0009,
-       0xf40132f4,
-       0x21f50232,
-       0x94bd082f,
+       0xff080ef5,
+/* 0x0660: main_not_ctx_switch */
+       0xf401e4b0,
+       0xf2b90d1b,
+       0x7821f502,
+       0x460ef409,
+/* 0x0670: main_not_ctx_chan */
+       0xf402e4b0,
+       0x94bd321b,
        0xf10799f0,
-       0xf0170007,
+       0xf00f0007,
        0x09d00203,
        0xf404bd00,
-/* 0x058e: main_not_ctx_save */
-       0xef94110e,
-       0x01f5f010,
-       0x02fe21f5,
-       0xfec00ef5,
-/* 0x059c: main_done */
-       0x29f024bd,
-       0x0007f11f,
-       0x0203f008,
-       0xbd0002d0,
-       0xab0ef504,
-/* 0x05b1: ih */
-       0xfe80f9fe,
-       0x80f90188,
-       0xa0f990f9,
-       0xd0f9b0f9,
-       0xf0f9e0f9,
-       0x0acf04bd,
-       0x04abc480,
-       0xf11d0bf4,
-       0xf01900b7,
-       0xbecf10d7,
-       0x00bfcf40,
+       0x32f40132,
+       0xe821f502,
+       0xf094bd09,
+       0x07f10799,
+       0x03f01700,
+       0x0009d002,
+       0x0ef404bd,
+/* 0x06a5: main_not_ctx_save */
+       0x10ef9411,
+       0xf501f5f0,
+       0xf5037e21,
+/* 0x06b3: main_done */
+       0xbdfeb50e,
+       0x1f29f024,
+       0x080007f1,
+       0xd00203f0,
+       0x04bd0002,
+       0xfea00ef5,
+/* 0x06c8: ih */
+       0x88fe80f9,
+       0xf980f901,
+       0xf9a0f990,
+       0xf9d0f9b0,
+       0xbdf0f9e0,
+       0x00a7f104,
+       0x00a3f002,
+       0xc400aacf,
+       0x0bf404ab,
+       0x10d7f030,
+       0x1a00e7f1,
+       0xcf00e3f0,
+       0xf7f100ee,
+       0xf3f01900,
+       0x00ffcf00,
        0xb70421f4,
        0xf00400b0,
-       0xbed001e7,
-/* 0x05e9: ih_no_fifo */
-       0x00abe400,
-       0x0d0bf401,
-       0xf110d7f0,
-       0xf44001e7,
-/* 0x05fa: ih_no_ctxsw */
-       0xb7f10421,
-       0xb0bd0104,
-       0xf4b4abff,
-       0xa7f10d0b,
-       0xa4b60c1c,
-       0x00abd006,
-/* 0x0610: ih_no_other */
-       0xfc400ad0,
+       0x07f101e7,
+       0x03f01d00,
+       0x000ed000,
+/* 0x071a: ih_no_fifo */
+       0xabe404bd,
+       0x0bf40100,
+       0x10d7f00d,
+       0x4001e7f1,
+/* 0x072b: ih_no_ctxsw */
+       0xe40421f4,
+       0xf40400ab,
+       0xb7f1140b,
+       0xbfb90100,
+       0x44e7f102,
+       0x40e3f001,
+/* 0x0743: ih_no_fwmthd */
+       0xf19d21f4,
+       0xbd0104b7,
+       0xb4abffb0,
+       0xf10f0bf4,
+       0xf0070007,
+       0x0bd00303,
+/* 0x075b: ih_no_other */
+       0xf104bd00,
+       0xf0010007,
+       0x0ad00003,
+       0xfc04bd00,
        0xfce0fcf0,
        0xfcb0fcd0,
        0xfc90fca0,
        0x0088fe80,
        0x32f480fc,
-/* 0x062b: ctx_4160s */
-       0xf101f800,
-       0xf04160e7,
-       0xf7f040e3,
-       0x8d21f401,
-/* 0x0638: ctx_4160s_wait */
-       0xc86821f4,
-       0x0bf404ff,
-/* 0x0643: ctx_4160c */
-       0xf100f8fa,
+/* 0x077f: ctx_4160s */
+       0xf001f800,
+       0xffb901f7,
+       0x60e7f102,
+       0x40e3f041,
+/* 0x078f: ctx_4160s_wait */
+       0xf19d21f4,
        0xf04160e7,
-       0xf4bd40e3,
-       0xf88d21f4,
-/* 0x0651: ctx_4170s */
-       0x70e7f100,
+       0x21f440e3,
+       0x02ffb968,
+       0xf404ffc8,
+       0x00f8f00b,
+/* 0x07a4: ctx_4160c */
+       0xffb9f4bd,
+       0x60e7f102,
        0x40e3f041,
-       0xf410f5f0,
-       0x00f88d21,
-/* 0x0660: ctx_4170w */
-       0x4170e7f1,
-       0xf440e3f0,
-       0xf4f06821,
-       0xf31bf410,
-/* 0x0672: ctx_redswitch */
-       0xe7f100f8,
-       0xe4b60614,
-       0x70f7f106,
-       0x00efd002,
-/* 0x0683: ctx_redswitch_delay */
-       0xb608f7f0,
-       0x1bf401f2,
-       0x70f7f1fd,
-       0x00efd007,
-/* 0x0692: ctx_86c */
-       0xe7f100f8,
-       0xe4b6086c,
-       0x00efd006,
-       0x8a14e7f1,
-       0xf440e3f0,
-       0xe7f18d21,
-       0xe3f0a86c,
-       0x8d21f441,
-/* 0x06b2: ctx_load */
+       0xf89d21f4,
+/* 0x07b5: ctx_4170s */
+       0x10f5f000,
+       0xf102ffb9,
+       0xf04170e7,
+       0x21f440e3,
+/* 0x07c7: ctx_4170w */
+       0xf100f89d,
+       0xf04170e7,
+       0x21f440e3,
+       0x02ffb968,
+       0xf410f4f0,
+       0x00f8f01b,
+/* 0x07dc: ctx_redswitch */
+       0x0200e7f1,
+       0xf040e5f0,
+       0xe5f020e5,
+       0x0007f110,
+       0x0103f085,
+       0xbd000ed0,
+       0x08f7f004,
+/* 0x07f8: ctx_redswitch_delay */
+       0xf401f2b6,
+       0xe5f1fd1b,
+       0xe5f10400,
+       0x07f10100,
+       0x03f08500,
+       0x000ed001,
+       0x00f804bd,
+/* 0x0814: ctx_86c */
+       0x1b0007f1,
+       0xd00203f0,
+       0x04bd000f,
+       0xf102ffb9,
+       0xf08a14e7,
+       0x21f440e3,
+       0x02ffb99d,
+       0xa86ce7f1,
+       0xf441e3f0,
+       0x00f89d21,
+/* 0x083c: ctx_mem */
+       0x840007f1,
+       0xd00203f0,
+       0x04bd000f,
+/* 0x0848: ctx_mem_wait */
+       0x8400f7f1,
+       0xcf02f3f0,
+       0xfffd00ff,
+       0xf31bf405,
+/* 0x085a: ctx_load */
        0x94bd00f8,
        0xf10599f0,
        0xf00f0007,
        0x09d00203,
        0xf004bd00,
        0x21f40ca7,
-       0x2417f1c9,
-       0x0614b60a,
-       0xf10010d0,
-       0xb60b0037,
-       0x32d00634,
-       0x0c17f140,
-       0x0614b60a,
-       0xd00747f0,
-       0x14d00012,
-/* 0x06ed: ctx_chan_wait_0 */
-       0x4014cf40,
-       0xf41f44f0,
-       0x32d0fa1b,
-       0x000bfe00,
-       0xb61f2af0,
-       0x20b60424,
-       0xf094bd02,
+       0xf1f4bdd0,
+       0xf0890007,
+       0x0fd00203,
+       0xf104bd00,
+       0xf0c10007,
+       0x02d00203,
+       0xf104bd00,
+       0xf0830007,
+       0x02d00203,
+       0xf004bd00,
+       0x21f507f7,
+       0x07f1083c,
+       0x03f0c000,
+       0x0002d002,
+       0x0bfe04bd,
+       0x1f2af000,
+       0xb60424b6,
+       0x94bd0220,
+       0xf10899f0,
+       0xf00f0007,
+       0x09d00203,
+       0xf104bd00,
+       0xf0810007,
+       0x02d00203,
+       0xf104bd00,
+       0xf1000027,
+       0xf0800023,
+       0x07f10225,
+       0x03f08800,
+       0x0002d002,
+       0x17f004bd,
+       0x0027f110,
+       0x0223f002,
+       0xf80512fa,
+       0xf094bd03,
        0x07f10899,
-       0x03f00f00,
+       0x03f01700,
        0x0009d002,
-       0x17f104bd,
-       0x14b60a04,
-       0x0012d006,
-       0x0a2017f1,
-       0xf00614b6,
-       0x23f10227,
-       0x12d08000,
-       0x1017f000,
-       0x020027f1,
-       0xfa0223f0,
-       0x03f80512,
+       0x019804bd,
+       0x1814b681,
+       0xb6800298,
+       0x12fd0825,
+       0x16018005,
        0x99f094bd,
-       0x0007f108,
-       0x0203f017,
+       0x0007f109,
+       0x0203f00f,
        0xbd0009d0,
-       0x81019804,
-       0x981814b6,
-       0x25b68002,
-       0x0512fd08,
-       0xbd160180,
-       0x0999f094,
-       0x0f0007f1,
-       0xd00203f0,
-       0x04bd0009,
-       0x0a0427f1,
-       0xd00624b6,
-       0x27f00021,
-       0x2017f101,
-       0x0614b60a,
-       0xf10012d0,
-       0xf0010017,
-       0x01fa0613,
-       0xbd03f805,
-       0x0999f094,
-       0x170007f1,
+       0x0007f104,
+       0x0203f081,
+       0xbd0001d0,
+       0x0127f004,
+       0x880007f1,
        0xd00203f0,
-       0x04bd0009,
+       0x04bd0002,
+       0x010017f1,
+       0xfa0613f0,
+       0x03f80501,
        0x99f094bd,
-       0x0007f105,
+       0x0007f109,
        0x0203f017,
        0xbd0009d0,
-/* 0x07bb: ctx_chan */
-       0xf500f804,
-       0xf5062b21,
-       0xf006b221,
-       0x21f40ca7,
-       0x1017f1c9,
-       0x0614b60a,
-       0xd00527f0,
-/* 0x07d6: ctx_chan_wait */
-       0x12cf0012,
-       0x0522fd00,
-       0xf5fa1bf4,
-       0xf8064321,
-/* 0x07e5: ctx_mmio_exec */
-       0x41039800,
-       0x0a0427f1,
-       0xd00624b6,
-       0x34bd0023,
-/* 0x07f4: ctx_mmio_loop */
+       0xf094bd04,
+       0x07f10599,
+       0x03f01700,
+       0x0009d002,
+       0x00f804bd,
+/* 0x0978: ctx_chan */
+       0x077f21f5,
+       0x085a21f5,
+       0xf40ca7f0,
+       0xf7f0d021,
+       0x3c21f505,
+       0xa421f508,
+/* 0x0993: ctx_mmio_exec */
+       0x9800f807,
+       0x07f14103,
+       0x03f08100,
+       0x0003d002,
+       0x34bd04bd,
+/* 0x09a4: ctx_mmio_loop */
        0xf4ff34c4,
        0x57f10f1b,
        0x53f00200,
        0x0535fa06,
-/* 0x0806: ctx_mmio_pull */
+/* 0x09b6: ctx_mmio_pull */
        0x4e9803f8,
        0x814f9880,
-       0xb68d21f4,
+       0xb69d21f4,
        0x12b60830,
        0xdf1bf401,
-/* 0x0818: ctx_mmio_done */
-       0xd0160398,
-       0x00800023,
-       0x0017f140,
-       0x0613f001,
-       0xf80601fa,
-/* 0x082f: ctx_xfer */
-       0xf100f803,
-       0xb60c00f7,
-       0xe7f006f4,
-       0x80fed004,
-/* 0x083c: ctx_xfer_idle */
-       0xf100fecf,
-       0xf42000e4,
-       0x11f4f91b,
-       0x1102f406,
-/* 0x084c: ctx_xfer_pre */
-       0xf510f7f0,
-       0xf5069221,
-       0xf4062b21,
-/* 0x085a: ctx_xfer_pre_load */
-       0xf7f01c11,
-       0x5121f502,
-       0x6021f506,
-       0x7221f506,
-       0xf5f4bd06,
-       0xf5065121,
-/* 0x0873: ctx_xfer_exec */
-       0x9806b221,
-       0x27f11601,
-       0x24b60414,
-       0x0020d006,
-       0xa500e7f1,
-       0xb941e3f0,
-       0x21f4021f,
-       0x04e0b68d,
-       0xf001fcf0,
-       0x24b6022c,
-       0x05f2fd01,
-       0xf18d21f4,
-       0xf04afc17,
-       0x27f00213,
-       0x0012d00c,
-       0x021521f5,
-       0x47fc27f1,
-       0xd00223f0,
-       0x2cf00020,
+/* 0x09c8: ctx_mmio_done */
+       0xf1160398,
+       0xf0810007,
+       0x03d00203,
+       0x8004bd00,
+       0x17f14000,
+       0x13f00100,
+       0x0601fa06,
+       0x00f803f8,
+/* 0x09e8: ctx_xfer */
+       0xf104e7f0,
+       0xf0020007,
+       0x0ed00303,
+/* 0x09f7: ctx_xfer_idle */
+       0xf104bd00,
+       0xf00000e7,
+       0xeecf03e3,
+       0x00e4f100,
+       0xf21bf420,
+       0xf40611f4,
+/* 0x0a0e: ctx_xfer_pre */
+       0xf7f01102,
+       0x1421f510,
+       0x7f21f508,
+       0x1c11f407,
+/* 0x0a1c: ctx_xfer_pre_load */
+       0xf502f7f0,
+       0xf507b521,
+       0xf507c721,
+       0xbd07dc21,
+       0xb521f5f4,
+       0x5a21f507,
+/* 0x0a35: ctx_xfer_exec */
+       0x16019808,
+       0x07f124bd,
+       0x03f00500,
+       0x0002d001,
+       0x1fb904bd,
+       0x00e7f102,
+       0x41e3f0a5,
+       0xf09d21f4,
+       0x2cf001fc,
+       0x0124b602,
+       0xb905f2fd,
+       0xe7f102ff,
+       0xe3f0a504,
+       0x9d21f441,
+       0x026a21f5,
+       0x07f124bd,
+       0x03f047fc,
+       0x0002d002,
+       0x2cf004bd,
        0x0320b601,
-       0xf00012d0,
-       0xa5f001ac,
-       0x00b7f006,
-       0x98000c98,
-       0xe7f0010d,
-       0x6621f500,
-       0x08a7f001,
-       0x010921f5,
-       0x021521f5,
-       0xf02201f4,
-       0x21f40ca7,
-       0x1017f1c9,
-       0x0614b60a,
-       0xd00527f0,
-/* 0x08fa: ctx_xfer_post_save_wait */
-       0x12cf0012,
-       0x0522fd00,
-       0xf4fa1bf4,
-/* 0x0906: ctx_xfer_post */
-       0xf7f03202,
-       0x5121f502,
-       0xf5f4bd06,
-       0xf5069221,
-       0xf5023421,
-       0xbd066021,
-       0x5121f5f4,
-       0x1011f406,
-       0xfd400198,
-       0x0bf40511,
-       0xe521f507,
-/* 0x0931: ctx_xfer_no_post_mmio */
-       0x4321f507,
-/* 0x0935: ctx_xfer_done */
-       0x0000f806,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
+       0x4afc07f1,
+       0xd00203f0,
+       0x04bd0002,
+       0xf001acf0,
+       0xb7f006a5,
+       0x000c9800,
+       0xf0010d98,
+       0x21f500e7,
+       0xa7f0016f,
+       0x1021f508,
+       0x5e21f501,
+       0x1301f402,
+       0xf40ca7f0,
+       0xf7f0d021,
+       0x3c21f505,
+       0x3202f408,
+/* 0x0ac4: ctx_xfer_post */
+       0xf502f7f0,
+       0xbd07b521,
+       0x1421f5f4,
+       0x7f21f508,
+       0xc721f502,
+       0xf5f4bd07,
+       0xf407b521,
+       0x01981011,
+       0x0511fd40,
+       0xf5070bf4,
+/* 0x0aef: ctx_xfer_no_post_mmio */
+       0xf5099321,
+/* 0x0af3: ctx_xfer_done */
+       0xf807a421,
        0x00000000,
        0x00000000,
        0x00000000,
index a1b9f763996a349d0def2c0770568accbfea9b8d..84af8241898764bfef6116d881c0425346684ee2 100644 (file)
@@ -206,14 +206,14 @@ uint32_t nvd7_grhub_data[] = {
 };
 
 uint32_t nvd7_grhub_code[] = {
-       0x031b0ef5,
+       0x039b0ef5,
 /* 0x0004: queue_put */
        0x9800d898,
        0x86f001d9,
        0x0489b808,
        0xf00c1bf4,
        0x21f502f7,
-       0x00f802fe,
+       0x00f8037e,
 /* 0x001c: queue_put_next */
        0xb60798c4,
        0x8dbb0384,
@@ -237,184 +237,214 @@ uint32_t nvd7_grhub_code[] = {
 /* 0x0066: queue_get_done */
        0x00f80132,
 /* 0x0068: nv_rd32 */
-       0x0728b7f1,
-       0xb906b4b6,
-       0xc9f002ec,
-       0x00bcd01f,
-/* 0x0078: nv_rd32_wait */
-       0xc800bccf,
-       0x1bf41fcc,
-       0x06a7f0fa,
-       0x010921f5,
-       0xf840bfcf,
-/* 0x008d: nv_wr32 */
-       0x28b7f100,
-       0x06b4b607,
-       0xb980bfd0,
-       0xc9f002ec,
-       0x1ec9f01f,
-/* 0x00a3: nv_wr32_wait */
-       0xcf00bcd0,
-       0xccc800bc,
-       0xfa1bf41f,
-/* 0x00ae: watchdog_reset */
-       0x87f100f8,
-       0x84b60430,
-       0x1ff9f006,
-       0xf8008fd0,
-/* 0x00bd: watchdog_clear */
-       0x3087f100,
-       0x0684b604,
-       0xf80080d0,
-/* 0x00c9: wait_donez */
-       0xf094bd00,
-       0x07f10099,
-       0x03f00f00,
-       0x0009d002,
-       0x07f104bd,
-       0x03f00600,
-       0x000ad002,
-/* 0x00e6: wait_donez_ne */
-       0x87f104bd,
-       0x83f00000,
-       0x0088cf01,
-       0xf4888aff,
-       0x94bdf31b,
-       0xf10099f0,
-       0xf0170007,
-       0x09d00203,
-       0xf804bd00,
-/* 0x0109: wait_doneo */
-       0xf094bd00,
+       0xf002ecb9,
+       0x07f11fc9,
+       0x03f0ca00,
+       0x000cd001,
+/* 0x007a: nv_rd32_wait */
+       0xc7f104bd,
+       0xc3f0ca00,
+       0x00cccf01,
+       0xf41fccc8,
+       0xa7f0f31b,
+       0x1021f506,
+       0x00f7f101,
+       0x01f3f0cb,
+       0xf800ffcf,
+/* 0x009d: nv_wr32 */
+       0x0007f100,
+       0x0103f0cc,
+       0xbd000fd0,
+       0x02ecb904,
+       0xf01fc9f0,
+       0x07f11ec9,
+       0x03f0ca00,
+       0x000cd001,
+/* 0x00be: nv_wr32_wait */
+       0xc7f104bd,
+       0xc3f0ca00,
+       0x00cccf01,
+       0xf41fccc8,
+       0x00f8f31b,
+/* 0x00d0: wait_donez */
+       0x99f094bd,
+       0x0007f100,
+       0x0203f00f,
+       0xbd0009d0,
+       0x0007f104,
+       0x0203f006,
+       0xbd000ad0,
+/* 0x00ed: wait_donez_ne */
+       0x0087f104,
+       0x0183f000,
+       0xff0088cf,
+       0x1bf4888a,
+       0xf094bdf3,
        0x07f10099,
-       0x03f00f00,
+       0x03f01700,
        0x0009d002,
-       0x87f104bd,
-       0x84b60818,
-       0x008ad006,
-/* 0x0124: wait_doneo_e */
-       0x040087f1,
-       0xcf0684b6,
-       0x8aff0088,
-       0xf30bf488,
+       0x00f804bd,
+/* 0x0110: wait_doneo */
        0x99f094bd,
        0x0007f100,
-       0x0203f017,
+       0x0203f00f,
        0xbd0009d0,
-/* 0x0147: mmctx_size */
-       0xbd00f804,
-/* 0x0149: nv_mmctx_size_loop */
-       0x00e89894,
-       0xb61a85b6,
-       0x84b60180,
-       0x0098bb02,
-       0xb804e0b6,
-       0x1bf404ef,
-       0x029fb9eb,
-/* 0x0166: mmctx_xfer */
-       0x94bd00f8,
-       0xf10199f0,
-       0xf00f0007,
-       0x09d00203,
-       0xf104bd00,
-       0xb6071087,
-       0x94bd0684,
-       0xf405bbfd,
-       0x8bd0090b,
-       0x0099f000,
-/* 0x018c: mmctx_base_disabled */
-       0xf405eefd,
-       0x8ed00c0b,
-       0xc08fd080,
-/* 0x019b: mmctx_multi_disabled */
-       0xb70199f0,
-       0xc8010080,
+       0x0007f104,
+       0x0203f006,
+       0xbd000ad0,
+/* 0x012d: wait_doneo_e */
+       0x0087f104,
+       0x0183f000,
+       0xff0088cf,
+       0x0bf4888a,
+       0xf094bdf3,
+       0x07f10099,
+       0x03f01700,
+       0x0009d002,
+       0x00f804bd,
+/* 0x0150: mmctx_size */
+/* 0x0152: nv_mmctx_size_loop */
+       0xe89894bd,
+       0x1a85b600,
+       0xb60180b6,
+       0x98bb0284,
+       0x04e0b600,
+       0xf404efb8,
+       0x9fb9eb1b,
+/* 0x016f: mmctx_xfer */
+       0xbd00f802,
+       0x0199f094,
+       0x0f0007f1,
+       0xd00203f0,
+       0x04bd0009,
+       0xbbfd94bd,
+       0x120bf405,
+       0xc40007f1,
+       0xd00103f0,
+       0x04bd000b,
+/* 0x0197: mmctx_base_disabled */
+       0xfd0099f0,
+       0x0bf405ee,
+       0x0007f11e,
+       0x0103f0c6,
+       0xbd000ed0,
+       0x0007f104,
+       0x0103f0c7,
+       0xbd000fd0,
+       0x0199f004,
+/* 0x01b8: mmctx_multi_disabled */
+       0xb600abc8,
+       0xb9f010b4,
+       0x01aec80c,
+       0xfd11e4b6,
+       0x07f105be,
+       0x03f0c500,
+       0x000bd001,
+/* 0x01d6: mmctx_exec_loop */
+/* 0x01d6: mmctx_wait_free */
+       0xe7f104bd,
+       0xe3f0c500,
+       0x00eecf01,
+       0xf41fe4f0,
+       0xce98f30b,
+       0x05e9fd00,
+       0xc80007f1,
+       0xd00103f0,
+       0x04bd000e,
+       0xb804c0b6,
+       0x1bf404cd,
+       0x02abc8d8,
+/* 0x0207: mmctx_fini_wait */
+       0xf11f1bf4,
+       0xf0c500b7,
+       0xbbcf01b3,
+       0x1fb4f000,
+       0xf410b4b0,
+       0xa7f0f01b,
+       0xd021f402,
+/* 0x0223: mmctx_stop */
+       0xc82b0ef4,
        0xb4b600ab,
        0x0cb9f010,
-       0xb601aec8,
-       0xbefd11e4,
-       0x008bd005,
-/* 0x01b4: mmctx_exec_loop */
-/* 0x01b4: mmctx_wait_free */
-       0xf0008ecf,
-       0x0bf41fe4,
-       0x00ce98fa,
-       0xd005e9fd,
-       0xc0b6c08e,
-       0x04cdb804,
-       0xc8e81bf4,
-       0x1bf402ab,
-/* 0x01d5: mmctx_fini_wait */
-       0x008bcf18,
-       0xb01fb4f0,
-       0x1bf410b4,
-       0x02a7f0f7,
-       0xf4c921f4,
-/* 0x01ea: mmctx_stop */
-       0xabc81b0e,
-       0x10b4b600,
-       0xf00cb9f0,
-       0x8bd012b9,
-/* 0x01f9: mmctx_stop_wait */
-       0x008bcf00,
-       0xf412bbc8,
-/* 0x0202: mmctx_done */
-       0x94bdfa1b,
-       0xf10199f0,
-       0xf0170007,
-       0x09d00203,
-       0xf804bd00,
-/* 0x0215: strand_wait */
-       0xf0a0f900,
-       0x21f402a7,
-       0xf8a0fcc9,
-/* 0x0221: strand_pre */
-       0xfc87f100,
-       0x0283f04a,
-       0xd00c97f0,
-       0x21f50089,
-       0x00f80215,
-/* 0x0234: strand_post */
-       0x4afc87f1,
-       0xf00283f0,
-       0x89d00d97,
-       0x1521f500,
-/* 0x0247: strand_set */
-       0xf100f802,
-       0xf04ffca7,
-       0xaba202a3,
-       0xc7f00500,
-       0x00acd00f,
-       0xd00bc7f0,
-       0x21f500bc,
-       0xaed00215,
-       0x0ac7f000,
-       0xf500bcd0,
-       0xf8021521,
-/* 0x0271: strand_ctx_init */
-       0xf094bd00,
-       0x07f10399,
-       0x03f00f00,
+       0xf112b9f0,
+       0xf0c50007,
+       0x0bd00103,
+/* 0x023b: mmctx_stop_wait */
+       0xf104bd00,
+       0xf0c500b7,
+       0xbbcf01b3,
+       0x12bbc800,
+/* 0x024b: mmctx_done */
+       0xbdf31bf4,
+       0x0199f094,
+       0x170007f1,
+       0xd00203f0,
+       0x04bd0009,
+/* 0x025e: strand_wait */
+       0xa0f900f8,
+       0xf402a7f0,
+       0xa0fcd021,
+/* 0x026a: strand_pre */
+       0x97f000f8,
+       0xfc07f10c,
+       0x0203f04a,
+       0xbd0009d0,
+       0x5e21f504,
+/* 0x027f: strand_post */
+       0xf000f802,
+       0x07f10d97,
+       0x03f04afc,
        0x0009d002,
        0x21f504bd,
-       0xe7f00221,
-       0x4721f503,
-       0xfca7f102,
-       0x02a3f046,
-       0x0400aba0,
-       0xf040a0d0,
-       0xbcd001c7,
-       0x1521f500,
-       0x010c9202,
-       0xf000acd0,
-       0xbcd002c7,
-       0x1521f500,
-       0x3421f502,
-       0x8087f102,
-       0x0684b608,
-       0xb70089cf,
-       0x95220080,
-/* 0x02ca: ctx_init_strand_loop */
+       0x00f8025e,
+/* 0x0294: strand_set */
+       0xf10fc7f0,
+       0xf04ffc07,
+       0x0cd00203,
+       0xf004bd00,
+       0x07f10bc7,
+       0x03f04afc,
+       0x000cd002,
+       0x07f104bd,
+       0x03f04ffc,
+       0x000ed002,
+       0xc7f004bd,
+       0xfc07f10a,
+       0x0203f04a,
+       0xbd000cd0,
+       0x5e21f504,
+/* 0x02d3: strand_ctx_init */
+       0xbd00f802,
+       0x0399f094,
+       0x0f0007f1,
+       0xd00203f0,
+       0x04bd0009,
+       0x026a21f5,
+       0xf503e7f0,
+       0xbd029421,
+       0xfc07f1c4,
+       0x0203f047,
+       0xbd000cd0,
+       0x01c7f004,
+       0x4afc07f1,
+       0xd00203f0,
+       0x04bd000c,
+       0x025e21f5,
+       0xf1010c92,
+       0xf046fc07,
+       0x0cd00203,
+       0xf004bd00,
+       0x07f102c7,
+       0x03f04afc,
+       0x000cd002,
+       0x21f504bd,
+       0x21f5025e,
+       0x87f1027f,
+       0x83f04200,
+       0x0097f102,
+       0x0293f020,
+       0x950099cf,
+/* 0x034a: ctx_init_strand_loop */
        0x8ed008fe,
        0x408ed000,
        0xb6808acf,
@@ -428,7 +458,7 @@ uint32_t nvd7_grhub_code[] = {
        0x170007f1,
        0xd00203f0,
        0x04bd0009,
-/* 0x02fe: error */
+/* 0x037e: error */
        0x07f100f8,
        0x03f00500,
        0x000fd002,
@@ -436,82 +466,117 @@ uint32_t nvd7_grhub_code[] = {
        0x0007f101,
        0x0303f007,
        0xbd000fd0,
-/* 0x031b: init */
+/* 0x039b: init */
        0xbd00f804,
-       0x0004fe04,
-       0xf10007fe,
-       0xf0120017,
-       0x12d00227,
-       0xb117f100,
-       0x0010fe05,
-       0x040017f1,
-       0xf1c010d0,
-       0xb6040437,
-       0x27f10634,
-       0x32d02003,
-       0x0427f100,
-       0x0132d020,
+       0x0007fe04,
+       0x420017f1,
+       0xcf0013f0,
+       0x11e70011,
+       0x14b60109,
+       0x0014fe08,
+       0xf10227f0,
+       0xf0120007,
+       0x02d00003,
+       0xf104bd00,
+       0xfe06c817,
+       0x24bd0010,
+       0x070007f1,
+       0xd00003f0,
+       0x04bd0002,
+       0x200327f1,
+       0x010007f1,
+       0xd00103f0,
+       0x04bd0002,
+       0x200427f1,
+       0x010407f1,
+       0xd00103f0,
+       0x04bd0002,
        0x200b27f1,
-       0xf10232d0,
-       0xd0200c27,
-       0x27f10732,
-       0x24b60c24,
-       0x0003b906,
-       0xf10023d0,
+       0x010807f1,
+       0xd00103f0,
+       0x04bd0002,
+       0x200c27f1,
+       0x011c07f1,
+       0xd00103f0,
+       0x04bd0002,
+       0xf1010392,
+       0xf0090007,
+       0x03d00303,
+       0xf104bd00,
        0xf0870427,
-       0x12d00023,
-       0x0012b700,
-       0x0427f001,
-       0xf40012d0,
-       0xe7f11031,
-       0xe3f09604,
-       0x6821f440,
-       0x8090f1c7,
-       0xf4f00301,
-       0x020f801f,
-       0xbb0117f0,
-       0x12b6041f,
-       0x0c27f101,
-       0x0624b604,
-       0xd00021d0,
-       0x17f14021,
-       0x0e980100,
-       0x010f9800,
-       0x014721f5,
-       0x070037f1,
-       0x950634b6,
-       0x34d00814,
-       0x4034d000,
-       0x130030b7,
-       0xb6001fbb,
-       0x3fd002f5,
-       0x0815b600,
-       0xb60110b6,
-       0x1fb90814,
-       0x7121f502,
-       0x001fbb02,
-       0xf1020398,
-       0xf0200047,
-/* 0x03f6: init_gpc */
-       0x4ea05043,
-       0x1fb90804,
-       0x8d21f402,
-       0x010c4ea0,
-       0x21f4f4bd,
-       0x044ea08d,
-       0x8d21f401,
-       0x01004ea0,
-       0xf402f7f0,
-       0x4ea08d21,
-/* 0x041e: init_gpc_wait */
-       0x21f40800,
-       0x1fffc868,
-       0xa0fa0bf4,
-       0xf408044e,
-       0x1fbb6821,
-       0x0040b700,
-       0x0132b680,
-       0xf1be1bf4,
+       0x07f10023,
+       0x03f00400,
+       0x0002d000,
+       0x27f004bd,
+       0x0007f104,
+       0x0003f003,
+       0xbd0002d0,
+       0x1031f404,
+       0x9604e7f1,
+       0xf440e3f0,
+       0xfeb96821,
+       0x90f1c702,
+       0xf0030180,
+       0x0f801ff4,
+       0x0117f002,
+       0xb6041fbb,
+       0x07f10112,
+       0x03f00300,
+       0x0001d001,
+       0x07f104bd,
+       0x03f00400,
+       0x0001d001,
+       0x17f104bd,
+       0xf7f00100,
+       0xb521f502,
+       0xc721f507,
+       0x10f7f007,
+       0x081421f5,
+       0x98000e98,
+       0x21f5010f,
+       0x14950150,
+       0x0007f108,
+       0x0103f0c0,
+       0xbd0004d0,
+       0x0007f104,
+       0x0103f0c1,
+       0xbd0004d0,
+       0x0030b704,
+       0x001fbb13,
+       0xf102f5b6,
+       0xf0d30007,
+       0x0fd00103,
+       0xb604bd00,
+       0x10b60815,
+       0x0814b601,
+       0xf5021fb9,
+       0xbb02d321,
+       0x0398001f,
+       0x0047f102,
+       0x5043f020,
+/* 0x04f4: init_gpc */
+       0x08044ea0,
+       0xf4021fb9,
+       0x4ea09d21,
+       0xf4bd010c,
+       0xa09d21f4,
+       0xf401044e,
+       0x4ea09d21,
+       0xf7f00100,
+       0x9d21f402,
+       0x08004ea0,
+/* 0x051c: init_gpc_wait */
+       0xc86821f4,
+       0x0bf41fff,
+       0x044ea0fa,
+       0x6821f408,
+       0xb7001fbb,
+       0xb6800040,
+       0x1bf40132,
+       0x00f7f0be,
+       0x081421f5,
+       0xf500f7f0,
+       0xf107b521,
        0xf0010007,
        0x01d00203,
        0xbd04bd00,
@@ -519,402 +584,399 @@ uint32_t nvd7_grhub_code[] = {
        0x080007f1,
        0xd00203f0,
        0x04bd0001,
-/* 0x0458: main */
+/* 0x0564: main */
        0xf40031f4,
        0xd7f00028,
        0x3921f410,
        0xb1f401f4,
        0xf54001e4,
-       0xbd00de1b,
+       0xbd00e91b,
        0x0499f094,
        0x0f0007f1,
        0xd00203f0,
        0x04bd0009,
-       0x0b0017f1,
-       0xcf0614b6,
-       0x11cf4012,
-       0x1f13c800,
-       0x00870bf5,
-       0xf41f23c8,
-       0x20f9620b,
-       0xbd0212b9,
-       0x0799f094,
-       0x0f0007f1,
-       0xd00203f0,
-       0x04bd0009,
-       0xf40132f4,
-       0x21f50231,
-       0x94bd082f,
+       0xc00017f1,
+       0xcf0213f0,
+       0x27f10011,
+       0x23f0c100,
+       0x0022cf02,
+       0xf51f13c8,
+       0xc800890b,
+       0x0bf41f23,
+       0xb920f962,
+       0x94bd0212,
        0xf10799f0,
-       0xf0170007,
+       0xf00f0007,
        0x09d00203,
-       0xfc04bd00,
-       0xf094bd20,
-       0x07f10699,
-       0x03f00f00,
-       0x0009d002,
-       0x31f404bd,
-       0x2f21f501,
-       0xf094bd08,
-       0x07f10699,
+       0xf404bd00,
+       0x31f40132,
+       0xe821f502,
+       0xf094bd09,
+       0x07f10799,
        0x03f01700,
        0x0009d002,
-       0x0ef404bd,
-/* 0x04f9: chsw_prev_no_next */
-       0xb920f931,
-       0x32f40212,
-       0x0232f401,
-       0x082f21f5,
-       0x17f120fc,
-       0x14b60b00,
-       0x0012d006,
-/* 0x0517: chsw_no_prev */
-       0xc8130ef4,
-       0x0bf41f23,
-       0x0131f40d,
-       0xf50232f4,
-/* 0x0527: chsw_done */
-       0xf1082f21,
-       0xb60b0c17,
-       0x27f00614,
-       0x0012d001,
+       0x20fc04bd,
        0x99f094bd,
-       0x0007f104,
+       0x0007f106,
+       0x0203f00f,
+       0xbd0009d0,
+       0x0131f404,
+       0x09e821f5,
+       0x99f094bd,
+       0x0007f106,
        0x0203f017,
        0xbd0009d0,
-       0x130ef504,
-/* 0x0549: main_not_ctx_switch */
-       0x01e4b0ff,
-       0xb90d1bf4,
-       0x21f502f2,
-       0x0ef407bb,
-/* 0x0559: main_not_ctx_chan */
-       0x02e4b046,
-       0xbd321bf4,
-       0x0799f094,
-       0x0f0007f1,
+       0x330ef404,
+/* 0x060c: chsw_prev_no_next */
+       0x12b920f9,
+       0x0132f402,
+       0xf50232f4,
+       0xfc09e821,
+       0x0007f120,
+       0x0203f0c0,
+       0xbd0002d0,
+       0x130ef404,
+/* 0x062c: chsw_no_prev */
+       0xf41f23c8,
+       0x31f40d0b,
+       0x0232f401,
+       0x09e821f5,
+/* 0x063c: chsw_done */
+       0xf10127f0,
+       0xf0c30007,
+       0x02d00203,
+       0xbd04bd00,
+       0x0499f094,
+       0x170007f1,
        0xd00203f0,
        0x04bd0009,
-       0xf40132f4,
-       0x21f50232,
-       0x94bd082f,
+       0xff080ef5,
+/* 0x0660: main_not_ctx_switch */
+       0xf401e4b0,
+       0xf2b90d1b,
+       0x7821f502,
+       0x460ef409,
+/* 0x0670: main_not_ctx_chan */
+       0xf402e4b0,
+       0x94bd321b,
        0xf10799f0,
-       0xf0170007,
+       0xf00f0007,
        0x09d00203,
        0xf404bd00,
-/* 0x058e: main_not_ctx_save */
-       0xef94110e,
-       0x01f5f010,
-       0x02fe21f5,
-       0xfec00ef5,
-/* 0x059c: main_done */
-       0x29f024bd,
-       0x0007f11f,
-       0x0203f008,
-       0xbd0002d0,
-       0xab0ef504,
-/* 0x05b1: ih */
-       0xfe80f9fe,
-       0x80f90188,
-       0xa0f990f9,
-       0xd0f9b0f9,
-       0xf0f9e0f9,
-       0x0acf04bd,
-       0x04abc480,
-       0xf11d0bf4,
-       0xf01900b7,
-       0xbecf10d7,
-       0x00bfcf40,
+       0x32f40132,
+       0xe821f502,
+       0xf094bd09,
+       0x07f10799,
+       0x03f01700,
+       0x0009d002,
+       0x0ef404bd,
+/* 0x06a5: main_not_ctx_save */
+       0x10ef9411,
+       0xf501f5f0,
+       0xf5037e21,
+/* 0x06b3: main_done */
+       0xbdfeb50e,
+       0x1f29f024,
+       0x080007f1,
+       0xd00203f0,
+       0x04bd0002,
+       0xfea00ef5,
+/* 0x06c8: ih */
+       0x88fe80f9,
+       0xf980f901,
+       0xf9a0f990,
+       0xf9d0f9b0,
+       0xbdf0f9e0,
+       0x00a7f104,
+       0x00a3f002,
+       0xc400aacf,
+       0x0bf404ab,
+       0x10d7f030,
+       0x1a00e7f1,
+       0xcf00e3f0,
+       0xf7f100ee,
+       0xf3f01900,
+       0x00ffcf00,
        0xb70421f4,
        0xf00400b0,
-       0xbed001e7,
-/* 0x05e9: ih_no_fifo */
-       0x00abe400,
-       0x0d0bf401,
-       0xf110d7f0,
-       0xf44001e7,
-/* 0x05fa: ih_no_ctxsw */
-       0xb7f10421,
-       0xb0bd0104,
-       0xf4b4abff,
-       0xa7f10d0b,
-       0xa4b60c1c,
-       0x00abd006,
-/* 0x0610: ih_no_other */
-       0xfc400ad0,
+       0x07f101e7,
+       0x03f01d00,
+       0x000ed000,
+/* 0x071a: ih_no_fifo */
+       0xabe404bd,
+       0x0bf40100,
+       0x10d7f00d,
+       0x4001e7f1,
+/* 0x072b: ih_no_ctxsw */
+       0xe40421f4,
+       0xf40400ab,
+       0xb7f1140b,
+       0xbfb90100,
+       0x44e7f102,
+       0x40e3f001,
+/* 0x0743: ih_no_fwmthd */
+       0xf19d21f4,
+       0xbd0104b7,
+       0xb4abffb0,
+       0xf10f0bf4,
+       0xf0070007,
+       0x0bd00303,
+/* 0x075b: ih_no_other */
+       0xf104bd00,
+       0xf0010007,
+       0x0ad00003,
+       0xfc04bd00,
        0xfce0fcf0,
        0xfcb0fcd0,
        0xfc90fca0,
        0x0088fe80,
        0x32f480fc,
-/* 0x062b: ctx_4160s */
-       0xf101f800,
-       0xf04160e7,
-       0xf7f040e3,
-       0x8d21f401,
-/* 0x0638: ctx_4160s_wait */
-       0xc86821f4,
-       0x0bf404ff,
-/* 0x0643: ctx_4160c */
-       0xf100f8fa,
+/* 0x077f: ctx_4160s */
+       0xf001f800,
+       0xffb901f7,
+       0x60e7f102,
+       0x40e3f041,
+/* 0x078f: ctx_4160s_wait */
+       0xf19d21f4,
        0xf04160e7,
-       0xf4bd40e3,
-       0xf88d21f4,
-/* 0x0651: ctx_4170s */
-       0x70e7f100,
+       0x21f440e3,
+       0x02ffb968,
+       0xf404ffc8,
+       0x00f8f00b,
+/* 0x07a4: ctx_4160c */
+       0xffb9f4bd,
+       0x60e7f102,
        0x40e3f041,
-       0xf410f5f0,
-       0x00f88d21,
-/* 0x0660: ctx_4170w */
-       0x4170e7f1,
-       0xf440e3f0,
-       0xf4f06821,
-       0xf31bf410,
-/* 0x0672: ctx_redswitch */
-       0xe7f100f8,
-       0xe4b60614,
-       0x70f7f106,
-       0x00efd002,
-/* 0x0683: ctx_redswitch_delay */
-       0xb608f7f0,
-       0x1bf401f2,
-       0x70f7f1fd,
-       0x00efd007,
-/* 0x0692: ctx_86c */
-       0xe7f100f8,
-       0xe4b6086c,
-       0x00efd006,
-       0x8a14e7f1,
-       0xf440e3f0,
-       0xe7f18d21,
-       0xe3f0a86c,
-       0x8d21f441,
-/* 0x06b2: ctx_load */
+       0xf89d21f4,
+/* 0x07b5: ctx_4170s */
+       0x10f5f000,
+       0xf102ffb9,
+       0xf04170e7,
+       0x21f440e3,
+/* 0x07c7: ctx_4170w */
+       0xf100f89d,
+       0xf04170e7,
+       0x21f440e3,
+       0x02ffb968,
+       0xf410f4f0,
+       0x00f8f01b,
+/* 0x07dc: ctx_redswitch */
+       0x0200e7f1,
+       0xf040e5f0,
+       0xe5f020e5,
+       0x0007f110,
+       0x0103f085,
+       0xbd000ed0,
+       0x08f7f004,
+/* 0x07f8: ctx_redswitch_delay */
+       0xf401f2b6,
+       0xe5f1fd1b,
+       0xe5f10400,
+       0x07f10100,
+       0x03f08500,
+       0x000ed001,
+       0x00f804bd,
+/* 0x0814: ctx_86c */
+       0x1b0007f1,
+       0xd00203f0,
+       0x04bd000f,
+       0xf102ffb9,
+       0xf08a14e7,
+       0x21f440e3,
+       0x02ffb99d,
+       0xa86ce7f1,
+       0xf441e3f0,
+       0x00f89d21,
+/* 0x083c: ctx_mem */
+       0x840007f1,
+       0xd00203f0,
+       0x04bd000f,
+/* 0x0848: ctx_mem_wait */
+       0x8400f7f1,
+       0xcf02f3f0,
+       0xfffd00ff,
+       0xf31bf405,
+/* 0x085a: ctx_load */
        0x94bd00f8,
        0xf10599f0,
        0xf00f0007,
        0x09d00203,
        0xf004bd00,
        0x21f40ca7,
-       0x2417f1c9,
-       0x0614b60a,
-       0xf10010d0,
-       0xb60b0037,
-       0x32d00634,
-       0x0c17f140,
-       0x0614b60a,
-       0xd00747f0,
-       0x14d00012,
-/* 0x06ed: ctx_chan_wait_0 */
-       0x4014cf40,
-       0xf41f44f0,
-       0x32d0fa1b,
-       0x000bfe00,
-       0xb61f2af0,
-       0x20b60424,
-       0xf094bd02,
+       0xf1f4bdd0,
+       0xf0890007,
+       0x0fd00203,
+       0xf104bd00,
+       0xf0c10007,
+       0x02d00203,
+       0xf104bd00,
+       0xf0830007,
+       0x02d00203,
+       0xf004bd00,
+       0x21f507f7,
+       0x07f1083c,
+       0x03f0c000,
+       0x0002d002,
+       0x0bfe04bd,
+       0x1f2af000,
+       0xb60424b6,
+       0x94bd0220,
+       0xf10899f0,
+       0xf00f0007,
+       0x09d00203,
+       0xf104bd00,
+       0xf0810007,
+       0x02d00203,
+       0xf104bd00,
+       0xf1000027,
+       0xf0800023,
+       0x07f10225,
+       0x03f08800,
+       0x0002d002,
+       0x17f004bd,
+       0x0027f110,
+       0x0223f002,
+       0xf80512fa,
+       0xf094bd03,
        0x07f10899,
-       0x03f00f00,
+       0x03f01700,
        0x0009d002,
-       0x17f104bd,
-       0x14b60a04,
-       0x0012d006,
-       0x0a2017f1,
-       0xf00614b6,
-       0x23f10227,
-       0x12d08000,
-       0x1017f000,
-       0x020027f1,
-       0xfa0223f0,
-       0x03f80512,
+       0x019804bd,
+       0x1814b681,
+       0xb6800298,
+       0x12fd0825,
+       0x16018005,
        0x99f094bd,
-       0x0007f108,
-       0x0203f017,
+       0x0007f109,
+       0x0203f00f,
        0xbd0009d0,
-       0x81019804,
-       0x981814b6,
-       0x25b68002,
-       0x0512fd08,
-       0xbd160180,
-       0x0999f094,
-       0x0f0007f1,
-       0xd00203f0,
-       0x04bd0009,
-       0x0a0427f1,
-       0xd00624b6,
-       0x27f00021,
-       0x2017f101,
-       0x0614b60a,
-       0xf10012d0,
-       0xf0010017,
-       0x01fa0613,
-       0xbd03f805,
-       0x0999f094,
-       0x170007f1,
+       0x0007f104,
+       0x0203f081,
+       0xbd0001d0,
+       0x0127f004,
+       0x880007f1,
        0xd00203f0,
-       0x04bd0009,
+       0x04bd0002,
+       0x010017f1,
+       0xfa0613f0,
+       0x03f80501,
        0x99f094bd,
-       0x0007f105,
+       0x0007f109,
        0x0203f017,
        0xbd0009d0,
-/* 0x07bb: ctx_chan */
-       0xf500f804,
-       0xf5062b21,
-       0xf006b221,
-       0x21f40ca7,
-       0x1017f1c9,
-       0x0614b60a,
-       0xd00527f0,
-/* 0x07d6: ctx_chan_wait */
-       0x12cf0012,
-       0x0522fd00,
-       0xf5fa1bf4,
-       0xf8064321,
-/* 0x07e5: ctx_mmio_exec */
-       0x41039800,
-       0x0a0427f1,
-       0xd00624b6,
-       0x34bd0023,
-/* 0x07f4: ctx_mmio_loop */
+       0xf094bd04,
+       0x07f10599,
+       0x03f01700,
+       0x0009d002,
+       0x00f804bd,
+/* 0x0978: ctx_chan */
+       0x077f21f5,
+       0x085a21f5,
+       0xf40ca7f0,
+       0xf7f0d021,
+       0x3c21f505,
+       0xa421f508,
+/* 0x0993: ctx_mmio_exec */
+       0x9800f807,
+       0x07f14103,
+       0x03f08100,
+       0x0003d002,
+       0x34bd04bd,
+/* 0x09a4: ctx_mmio_loop */
        0xf4ff34c4,
        0x57f10f1b,
        0x53f00200,
        0x0535fa06,
-/* 0x0806: ctx_mmio_pull */
+/* 0x09b6: ctx_mmio_pull */
        0x4e9803f8,
        0x814f9880,
-       0xb68d21f4,
+       0xb69d21f4,
        0x12b60830,
        0xdf1bf401,
-/* 0x0818: ctx_mmio_done */
-       0xd0160398,
-       0x00800023,
-       0x0017f140,
-       0x0613f001,
-       0xf80601fa,
-/* 0x082f: ctx_xfer */
-       0xf100f803,
-       0xb60c00f7,
-       0xe7f006f4,
-       0x80fed004,
-/* 0x083c: ctx_xfer_idle */
-       0xf100fecf,
-       0xf42000e4,
-       0x11f4f91b,
-       0x1102f406,
-/* 0x084c: ctx_xfer_pre */
-       0xf510f7f0,
-       0xf5069221,
-       0xf4062b21,
-/* 0x085a: ctx_xfer_pre_load */
-       0xf7f01c11,
-       0x5121f502,
-       0x6021f506,
-       0x7221f506,
-       0xf5f4bd06,
-       0xf5065121,
-/* 0x0873: ctx_xfer_exec */
-       0x9806b221,
-       0x27f11601,
-       0x24b60414,
-       0x0020d006,
-       0xa500e7f1,
-       0xb941e3f0,
-       0x21f4021f,
-       0x04e0b68d,
-       0xf001fcf0,
-       0x24b6022c,
-       0x05f2fd01,
-       0xf18d21f4,
-       0xf04afc17,
-       0x27f00213,
-       0x0012d00c,
-       0x021521f5,
-       0x47fc27f1,
-       0xd00223f0,
-       0x2cf00020,
+/* 0x09c8: ctx_mmio_done */
+       0xf1160398,
+       0xf0810007,
+       0x03d00203,
+       0x8004bd00,
+       0x17f14000,
+       0x13f00100,
+       0x0601fa06,
+       0x00f803f8,
+/* 0x09e8: ctx_xfer */
+       0xf104e7f0,
+       0xf0020007,
+       0x0ed00303,
+/* 0x09f7: ctx_xfer_idle */
+       0xf104bd00,
+       0xf00000e7,
+       0xeecf03e3,
+       0x00e4f100,
+       0xf21bf420,
+       0xf40611f4,
+/* 0x0a0e: ctx_xfer_pre */
+       0xf7f01102,
+       0x1421f510,
+       0x7f21f508,
+       0x1c11f407,
+/* 0x0a1c: ctx_xfer_pre_load */
+       0xf502f7f0,
+       0xf507b521,
+       0xf507c721,
+       0xbd07dc21,
+       0xb521f5f4,
+       0x5a21f507,
+/* 0x0a35: ctx_xfer_exec */
+       0x16019808,
+       0x07f124bd,
+       0x03f00500,
+       0x0002d001,
+       0x1fb904bd,
+       0x00e7f102,
+       0x41e3f0a5,
+       0xf09d21f4,
+       0x2cf001fc,
+       0x0124b602,
+       0xb905f2fd,
+       0xe7f102ff,
+       0xe3f0a504,
+       0x9d21f441,
+       0x026a21f5,
+       0x07f124bd,
+       0x03f047fc,
+       0x0002d002,
+       0x2cf004bd,
        0x0320b601,
-       0xf00012d0,
-       0xa5f001ac,
-       0x00b7f006,
-       0x98000c98,
-       0xe7f0010d,
-       0x6621f500,
-       0x08a7f001,
-       0x010921f5,
-       0x021521f5,
-       0xf02201f4,
-       0x21f40ca7,
-       0x1017f1c9,
-       0x0614b60a,
-       0xd00527f0,
-/* 0x08fa: ctx_xfer_post_save_wait */
-       0x12cf0012,
-       0x0522fd00,
-       0xf4fa1bf4,
-/* 0x0906: ctx_xfer_post */
-       0xf7f03202,
-       0x5121f502,
-       0xf5f4bd06,
-       0xf5069221,
-       0xf5023421,
-       0xbd066021,
-       0x5121f5f4,
-       0x1011f406,
-       0xfd400198,
-       0x0bf40511,
-       0xe521f507,
-/* 0x0931: ctx_xfer_no_post_mmio */
-       0x4321f507,
-/* 0x0935: ctx_xfer_done */
-       0x0000f806,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
+       0x4afc07f1,
+       0xd00203f0,
+       0x04bd0002,
+       0xf001acf0,
+       0xb7f006a5,
+       0x000c9800,
+       0xf0010d98,
+       0x21f500e7,
+       0xa7f0016f,
+       0x1021f508,
+       0x5e21f501,
+       0x1301f402,
+       0xf40ca7f0,
+       0xf7f0d021,
+       0x3c21f505,
+       0x3202f408,
+/* 0x0ac4: ctx_xfer_post */
+       0xf502f7f0,
+       0xbd07b521,
+       0x1421f5f4,
+       0x7f21f508,
+       0xc721f502,
+       0xf5f4bd07,
+       0xf407b521,
+       0x01981011,
+       0x0511fd40,
+       0xf5070bf4,
+/* 0x0aef: ctx_xfer_no_post_mmio */
+       0xf5099321,
+/* 0x0af3: ctx_xfer_done */
+       0xf807a421,
        0x00000000,
        0x00000000,
        0x00000000,
index eb7bc0e9576eab1a841bb93fcb3af4e3f924b6d0..1c179bdd48cc2a0ddd3228a3e8bfc0005f1bf06a 100644 (file)
@@ -206,14 +206,14 @@ uint32_t nve0_grhub_data[] = {
 };
 
 uint32_t nve0_grhub_code[] = {
-       0x031b0ef5,
+       0x039b0ef5,
 /* 0x0004: queue_put */
        0x9800d898,
        0x86f001d9,
        0x0489b808,
        0xf00c1bf4,
        0x21f502f7,
-       0x00f802fe,
+       0x00f8037e,
 /* 0x001c: queue_put_next */
        0xb60798c4,
        0x8dbb0384,
@@ -237,184 +237,214 @@ uint32_t nve0_grhub_code[] = {
 /* 0x0066: queue_get_done */
        0x00f80132,
 /* 0x0068: nv_rd32 */
-       0x0728b7f1,
-       0xb906b4b6,
-       0xc9f002ec,
-       0x00bcd01f,
-/* 0x0078: nv_rd32_wait */
-       0xc800bccf,
-       0x1bf41fcc,
-       0x06a7f0fa,
-       0x010921f5,
-       0xf840bfcf,
-/* 0x008d: nv_wr32 */
-       0x28b7f100,
-       0x06b4b607,
-       0xb980bfd0,
-       0xc9f002ec,
-       0x1ec9f01f,
-/* 0x00a3: nv_wr32_wait */
-       0xcf00bcd0,
-       0xccc800bc,
-       0xfa1bf41f,
-/* 0x00ae: watchdog_reset */
-       0x87f100f8,
-       0x84b60430,
-       0x1ff9f006,
-       0xf8008fd0,
-/* 0x00bd: watchdog_clear */
-       0x3087f100,
-       0x0684b604,
-       0xf80080d0,
-/* 0x00c9: wait_donez */
-       0xf094bd00,
-       0x07f10099,
-       0x03f00f00,
-       0x0009d002,
-       0x07f104bd,
-       0x03f00600,
-       0x000ad002,
-/* 0x00e6: wait_donez_ne */
-       0x87f104bd,
-       0x83f00000,
-       0x0088cf01,
-       0xf4888aff,
-       0x94bdf31b,
-       0xf10099f0,
-       0xf0170007,
-       0x09d00203,
-       0xf804bd00,
-/* 0x0109: wait_doneo */
-       0xf094bd00,
+       0xf002ecb9,
+       0x07f11fc9,
+       0x03f0ca00,
+       0x000cd001,
+/* 0x007a: nv_rd32_wait */
+       0xc7f104bd,
+       0xc3f0ca00,
+       0x00cccf01,
+       0xf41fccc8,
+       0xa7f0f31b,
+       0x1021f506,
+       0x00f7f101,
+       0x01f3f0cb,
+       0xf800ffcf,
+/* 0x009d: nv_wr32 */
+       0x0007f100,
+       0x0103f0cc,
+       0xbd000fd0,
+       0x02ecb904,
+       0xf01fc9f0,
+       0x07f11ec9,
+       0x03f0ca00,
+       0x000cd001,
+/* 0x00be: nv_wr32_wait */
+       0xc7f104bd,
+       0xc3f0ca00,
+       0x00cccf01,
+       0xf41fccc8,
+       0x00f8f31b,
+/* 0x00d0: wait_donez */
+       0x99f094bd,
+       0x0007f100,
+       0x0203f00f,
+       0xbd0009d0,
+       0x0007f104,
+       0x0203f006,
+       0xbd000ad0,
+/* 0x00ed: wait_donez_ne */
+       0x0087f104,
+       0x0183f000,
+       0xff0088cf,
+       0x1bf4888a,
+       0xf094bdf3,
        0x07f10099,
-       0x03f00f00,
+       0x03f01700,
        0x0009d002,
-       0x87f104bd,
-       0x84b60818,
-       0x008ad006,
-/* 0x0124: wait_doneo_e */
-       0x040087f1,
-       0xcf0684b6,
-       0x8aff0088,
-       0xf30bf488,
+       0x00f804bd,
+/* 0x0110: wait_doneo */
        0x99f094bd,
        0x0007f100,
-       0x0203f017,
+       0x0203f00f,
        0xbd0009d0,
-/* 0x0147: mmctx_size */
-       0xbd00f804,
-/* 0x0149: nv_mmctx_size_loop */
-       0x00e89894,
-       0xb61a85b6,
-       0x84b60180,
-       0x0098bb02,
-       0xb804e0b6,
-       0x1bf404ef,
-       0x029fb9eb,
-/* 0x0166: mmctx_xfer */
-       0x94bd00f8,
-       0xf10199f0,
-       0xf00f0007,
-       0x09d00203,
-       0xf104bd00,
-       0xb6071087,
-       0x94bd0684,
-       0xf405bbfd,
-       0x8bd0090b,
-       0x0099f000,
-/* 0x018c: mmctx_base_disabled */
-       0xf405eefd,
-       0x8ed00c0b,
-       0xc08fd080,
-/* 0x019b: mmctx_multi_disabled */
-       0xb70199f0,
-       0xc8010080,
+       0x0007f104,
+       0x0203f006,
+       0xbd000ad0,
+/* 0x012d: wait_doneo_e */
+       0x0087f104,
+       0x0183f000,
+       0xff0088cf,
+       0x0bf4888a,
+       0xf094bdf3,
+       0x07f10099,
+       0x03f01700,
+       0x0009d002,
+       0x00f804bd,
+/* 0x0150: mmctx_size */
+/* 0x0152: nv_mmctx_size_loop */
+       0xe89894bd,
+       0x1a85b600,
+       0xb60180b6,
+       0x98bb0284,
+       0x04e0b600,
+       0xf404efb8,
+       0x9fb9eb1b,
+/* 0x016f: mmctx_xfer */
+       0xbd00f802,
+       0x0199f094,
+       0x0f0007f1,
+       0xd00203f0,
+       0x04bd0009,
+       0xbbfd94bd,
+       0x120bf405,
+       0xc40007f1,
+       0xd00103f0,
+       0x04bd000b,
+/* 0x0197: mmctx_base_disabled */
+       0xfd0099f0,
+       0x0bf405ee,
+       0x0007f11e,
+       0x0103f0c6,
+       0xbd000ed0,
+       0x0007f104,
+       0x0103f0c7,
+       0xbd000fd0,
+       0x0199f004,
+/* 0x01b8: mmctx_multi_disabled */
+       0xb600abc8,
+       0xb9f010b4,
+       0x01aec80c,
+       0xfd11e4b6,
+       0x07f105be,
+       0x03f0c500,
+       0x000bd001,
+/* 0x01d6: mmctx_exec_loop */
+/* 0x01d6: mmctx_wait_free */
+       0xe7f104bd,
+       0xe3f0c500,
+       0x00eecf01,
+       0xf41fe4f0,
+       0xce98f30b,
+       0x05e9fd00,
+       0xc80007f1,
+       0xd00103f0,
+       0x04bd000e,
+       0xb804c0b6,
+       0x1bf404cd,
+       0x02abc8d8,
+/* 0x0207: mmctx_fini_wait */
+       0xf11f1bf4,
+       0xf0c500b7,
+       0xbbcf01b3,
+       0x1fb4f000,
+       0xf410b4b0,
+       0xa7f0f01b,
+       0xd021f402,
+/* 0x0223: mmctx_stop */
+       0xc82b0ef4,
        0xb4b600ab,
        0x0cb9f010,
-       0xb601aec8,
-       0xbefd11e4,
-       0x008bd005,
-/* 0x01b4: mmctx_exec_loop */
-/* 0x01b4: mmctx_wait_free */
-       0xf0008ecf,
-       0x0bf41fe4,
-       0x00ce98fa,
-       0xd005e9fd,
-       0xc0b6c08e,
-       0x04cdb804,
-       0xc8e81bf4,
-       0x1bf402ab,
-/* 0x01d5: mmctx_fini_wait */
-       0x008bcf18,
-       0xb01fb4f0,
-       0x1bf410b4,
-       0x02a7f0f7,
-       0xf4c921f4,
-/* 0x01ea: mmctx_stop */
-       0xabc81b0e,
-       0x10b4b600,
-       0xf00cb9f0,
-       0x8bd012b9,
-/* 0x01f9: mmctx_stop_wait */
-       0x008bcf00,
-       0xf412bbc8,
-/* 0x0202: mmctx_done */
-       0x94bdfa1b,
-       0xf10199f0,
-       0xf0170007,
-       0x09d00203,
-       0xf804bd00,
-/* 0x0215: strand_wait */
-       0xf0a0f900,
-       0x21f402a7,
-       0xf8a0fcc9,
-/* 0x0221: strand_pre */
-       0xfc87f100,
-       0x0283f04a,
-       0xd00c97f0,
-       0x21f50089,
-       0x00f80215,
-/* 0x0234: strand_post */
-       0x4afc87f1,
-       0xf00283f0,
-       0x89d00d97,
-       0x1521f500,
-/* 0x0247: strand_set */
-       0xf100f802,
-       0xf04ffca7,
-       0xaba202a3,
-       0xc7f00500,
-       0x00acd00f,
-       0xd00bc7f0,
-       0x21f500bc,
-       0xaed00215,
-       0x0ac7f000,
-       0xf500bcd0,
-       0xf8021521,
-/* 0x0271: strand_ctx_init */
-       0xf094bd00,
-       0x07f10399,
-       0x03f00f00,
+       0xf112b9f0,
+       0xf0c50007,
+       0x0bd00103,
+/* 0x023b: mmctx_stop_wait */
+       0xf104bd00,
+       0xf0c500b7,
+       0xbbcf01b3,
+       0x12bbc800,
+/* 0x024b: mmctx_done */
+       0xbdf31bf4,
+       0x0199f094,
+       0x170007f1,
+       0xd00203f0,
+       0x04bd0009,
+/* 0x025e: strand_wait */
+       0xa0f900f8,
+       0xf402a7f0,
+       0xa0fcd021,
+/* 0x026a: strand_pre */
+       0x97f000f8,
+       0xfc07f10c,
+       0x0203f04a,
+       0xbd0009d0,
+       0x5e21f504,
+/* 0x027f: strand_post */
+       0xf000f802,
+       0x07f10d97,
+       0x03f04afc,
        0x0009d002,
        0x21f504bd,
-       0xe7f00221,
-       0x4721f503,
-       0xfca7f102,
-       0x02a3f046,
-       0x0400aba0,
-       0xf040a0d0,
-       0xbcd001c7,
-       0x1521f500,
-       0x010c9202,
-       0xf000acd0,
-       0xbcd002c7,
-       0x1521f500,
-       0x3421f502,
-       0x8087f102,
-       0x0684b608,
-       0xb70089cf,
-       0x95220080,
-/* 0x02ca: ctx_init_strand_loop */
+       0x00f8025e,
+/* 0x0294: strand_set */
+       0xf10fc7f0,
+       0xf04ffc07,
+       0x0cd00203,
+       0xf004bd00,
+       0x07f10bc7,
+       0x03f04afc,
+       0x000cd002,
+       0x07f104bd,
+       0x03f04ffc,
+       0x000ed002,
+       0xc7f004bd,
+       0xfc07f10a,
+       0x0203f04a,
+       0xbd000cd0,
+       0x5e21f504,
+/* 0x02d3: strand_ctx_init */
+       0xbd00f802,
+       0x0399f094,
+       0x0f0007f1,
+       0xd00203f0,
+       0x04bd0009,
+       0x026a21f5,
+       0xf503e7f0,
+       0xbd029421,
+       0xfc07f1c4,
+       0x0203f047,
+       0xbd000cd0,
+       0x01c7f004,
+       0x4afc07f1,
+       0xd00203f0,
+       0x04bd000c,
+       0x025e21f5,
+       0xf1010c92,
+       0xf046fc07,
+       0x0cd00203,
+       0xf004bd00,
+       0x07f102c7,
+       0x03f04afc,
+       0x000cd002,
+       0x21f504bd,
+       0x21f5025e,
+       0x87f1027f,
+       0x83f04200,
+       0x0097f102,
+       0x0293f020,
+       0x950099cf,
+/* 0x034a: ctx_init_strand_loop */
        0x8ed008fe,
        0x408ed000,
        0xb6808acf,
@@ -428,7 +458,7 @@ uint32_t nve0_grhub_code[] = {
        0x170007f1,
        0xd00203f0,
        0x04bd0009,
-/* 0x02fe: error */
+/* 0x037e: error */
        0x07f100f8,
        0x03f00500,
        0x000fd002,
@@ -436,82 +466,117 @@ uint32_t nve0_grhub_code[] = {
        0x0007f101,
        0x0303f007,
        0xbd000fd0,
-/* 0x031b: init */
+/* 0x039b: init */
        0xbd00f804,
-       0x0004fe04,
-       0xf10007fe,
-       0xf0120017,
-       0x12d00227,
-       0xb117f100,
-       0x0010fe05,
-       0x040017f1,
-       0xf1c010d0,
-       0xb6040437,
-       0x27f10634,
-       0x32d02003,
-       0x0427f100,
-       0x0132d020,
+       0x0007fe04,
+       0x420017f1,
+       0xcf0013f0,
+       0x11e70011,
+       0x14b60109,
+       0x0014fe08,
+       0xf10227f0,
+       0xf0120007,
+       0x02d00003,
+       0xf104bd00,
+       0xfe06c817,
+       0x24bd0010,
+       0x070007f1,
+       0xd00003f0,
+       0x04bd0002,
+       0x200327f1,
+       0x010007f1,
+       0xd00103f0,
+       0x04bd0002,
+       0x200427f1,
+       0x010407f1,
+       0xd00103f0,
+       0x04bd0002,
        0x200b27f1,
-       0xf10232d0,
-       0xd0200c27,
-       0x27f10732,
-       0x24b60c24,
-       0x0003b906,
-       0xf10023d0,
+       0x010807f1,
+       0xd00103f0,
+       0x04bd0002,
+       0x200c27f1,
+       0x011c07f1,
+       0xd00103f0,
+       0x04bd0002,
+       0xf1010392,
+       0xf0090007,
+       0x03d00303,
+       0xf104bd00,
        0xf0870427,
-       0x12d00023,
-       0x0012b700,
-       0x0427f001,
-       0xf40012d0,
-       0xe7f11031,
-       0xe3f09604,
-       0x6821f440,
-       0x8090f1c7,
-       0xf4f00301,
-       0x020f801f,
-       0xbb0117f0,
-       0x12b6041f,
-       0x0c27f101,
-       0x0624b604,
-       0xd00021d0,
-       0x17f14021,
-       0x0e980100,
-       0x010f9800,
-       0x014721f5,
-       0x070037f1,
-       0x950634b6,
-       0x34d00814,
-       0x4034d000,
-       0x130030b7,
-       0xb6001fbb,
-       0x3fd002f5,
-       0x0815b600,
-       0xb60110b6,
-       0x1fb90814,
-       0x7121f502,
-       0x001fbb02,
-       0xf1020398,
-       0xf0200047,
-/* 0x03f6: init_gpc */
-       0x4ea05043,
-       0x1fb90804,
-       0x8d21f402,
-       0x010c4ea0,
-       0x21f4f4bd,
-       0x044ea08d,
-       0x8d21f401,
-       0x01004ea0,
-       0xf402f7f0,
-       0x4ea08d21,
-/* 0x041e: init_gpc_wait */
-       0x21f40800,
-       0x1fffc868,
-       0xa0fa0bf4,
-       0xf408044e,
-       0x1fbb6821,
-       0x0040b700,
-       0x0132b680,
-       0xf1be1bf4,
+       0x07f10023,
+       0x03f00400,
+       0x0002d000,
+       0x27f004bd,
+       0x0007f104,
+       0x0003f003,
+       0xbd0002d0,
+       0x1031f404,
+       0x9604e7f1,
+       0xf440e3f0,
+       0xfeb96821,
+       0x90f1c702,
+       0xf0030180,
+       0x0f801ff4,
+       0x0117f002,
+       0xb6041fbb,
+       0x07f10112,
+       0x03f00300,
+       0x0001d001,
+       0x07f104bd,
+       0x03f00400,
+       0x0001d001,
+       0x17f104bd,
+       0xf7f00100,
+       0x7f21f502,
+       0x9121f507,
+       0x10f7f007,
+       0x07de21f5,
+       0x98000e98,
+       0x21f5010f,
+       0x14950150,
+       0x0007f108,
+       0x0103f0c0,
+       0xbd0004d0,
+       0x0007f104,
+       0x0103f0c1,
+       0xbd0004d0,
+       0x0030b704,
+       0x001fbb13,
+       0xf102f5b6,
+       0xf0d30007,
+       0x0fd00103,
+       0xb604bd00,
+       0x10b60815,
+       0x0814b601,
+       0xf5021fb9,
+       0xbb02d321,
+       0x0398001f,
+       0x0047f102,
+       0x5043f020,
+/* 0x04f4: init_gpc */
+       0x08044ea0,
+       0xf4021fb9,
+       0x4ea09d21,
+       0xf4bd010c,
+       0xa09d21f4,
+       0xf401044e,
+       0x4ea09d21,
+       0xf7f00100,
+       0x9d21f402,
+       0x08004ea0,
+/* 0x051c: init_gpc_wait */
+       0xc86821f4,
+       0x0bf41fff,
+       0x044ea0fa,
+       0x6821f408,
+       0xb7001fbb,
+       0xb6800040,
+       0x1bf40132,
+       0x00f7f0be,
+       0x07de21f5,
+       0xf500f7f0,
+       0xf1077f21,
        0xf0010007,
        0x01d00203,
        0xbd04bd00,
@@ -519,382 +584,379 @@ uint32_t nve0_grhub_code[] = {
        0x080007f1,
        0xd00203f0,
        0x04bd0001,
-/* 0x0458: main */
+/* 0x0564: main */
        0xf40031f4,
        0xd7f00028,
        0x3921f410,
        0xb1f401f4,
        0xf54001e4,
-       0xbd00de1b,
+       0xbd00e91b,
        0x0499f094,
        0x0f0007f1,
        0xd00203f0,
        0x04bd0009,
-       0x0b0017f1,
-       0xcf0614b6,
-       0x11cf4012,
-       0x1f13c800,
-       0x00870bf5,
-       0xf41f23c8,
-       0x20f9620b,
-       0xbd0212b9,
-       0x0799f094,
-       0x0f0007f1,
-       0xd00203f0,
-       0x04bd0009,
-       0xf40132f4,
-       0x21f50231,
-       0x94bd0801,
+       0xc00017f1,
+       0xcf0213f0,
+       0x27f10011,
+       0x23f0c100,
+       0x0022cf02,
+       0xf51f13c8,
+       0xc800890b,
+       0x0bf41f23,
+       0xb920f962,
+       0x94bd0212,
        0xf10799f0,
-       0xf0170007,
+       0xf00f0007,
        0x09d00203,
-       0xfc04bd00,
-       0xf094bd20,
-       0x07f10699,
-       0x03f00f00,
-       0x0009d002,
-       0x31f404bd,
-       0x0121f501,
-       0xf094bd08,
-       0x07f10699,
+       0xf404bd00,
+       0x31f40132,
+       0xaa21f502,
+       0xf094bd09,
+       0x07f10799,
        0x03f01700,
        0x0009d002,
-       0x0ef404bd,
-/* 0x04f9: chsw_prev_no_next */
-       0xb920f931,
-       0x32f40212,
-       0x0232f401,
-       0x080121f5,
-       0x17f120fc,
-       0x14b60b00,
-       0x0012d006,
-/* 0x0517: chsw_no_prev */
-       0xc8130ef4,
-       0x0bf41f23,
-       0x0131f40d,
-       0xf50232f4,
-/* 0x0527: chsw_done */
-       0xf1080121,
-       0xb60b0c17,
-       0x27f00614,
-       0x0012d001,
+       0x20fc04bd,
        0x99f094bd,
-       0x0007f104,
+       0x0007f106,
+       0x0203f00f,
+       0xbd0009d0,
+       0x0131f404,
+       0x09aa21f5,
+       0x99f094bd,
+       0x0007f106,
        0x0203f017,
        0xbd0009d0,
-       0x130ef504,
-/* 0x0549: main_not_ctx_switch */
-       0x01e4b0ff,
-       0xb90d1bf4,
-       0x21f502f2,
-       0x0ef40795,
-/* 0x0559: main_not_ctx_chan */
-       0x02e4b046,
-       0xbd321bf4,
-       0x0799f094,
-       0x0f0007f1,
+       0x330ef404,
+/* 0x060c: chsw_prev_no_next */
+       0x12b920f9,
+       0x0132f402,
+       0xf50232f4,
+       0xfc09aa21,
+       0x0007f120,
+       0x0203f0c0,
+       0xbd0002d0,
+       0x130ef404,
+/* 0x062c: chsw_no_prev */
+       0xf41f23c8,
+       0x31f40d0b,
+       0x0232f401,
+       0x09aa21f5,
+/* 0x063c: chsw_done */
+       0xf10127f0,
+       0xf0c30007,
+       0x02d00203,
+       0xbd04bd00,
+       0x0499f094,
+       0x170007f1,
        0xd00203f0,
        0x04bd0009,
-       0xf40132f4,
-       0x21f50232,
-       0x94bd0801,
+       0xff080ef5,
+/* 0x0660: main_not_ctx_switch */
+       0xf401e4b0,
+       0xf2b90d1b,
+       0x4221f502,
+       0x460ef409,
+/* 0x0670: main_not_ctx_chan */
+       0xf402e4b0,
+       0x94bd321b,
        0xf10799f0,
-       0xf0170007,
+       0xf00f0007,
        0x09d00203,
        0xf404bd00,
-/* 0x058e: main_not_ctx_save */
-       0xef94110e,
-       0x01f5f010,
-       0x02fe21f5,
-       0xfec00ef5,
-/* 0x059c: main_done */
-       0x29f024bd,
-       0x0007f11f,
-       0x0203f008,
-       0xbd0002d0,
-       0xab0ef504,
-/* 0x05b1: ih */
-       0xfe80f9fe,
-       0x80f90188,
-       0xa0f990f9,
-       0xd0f9b0f9,
-       0xf0f9e0f9,
-       0x0acf04bd,
-       0x04abc480,
-       0xf11d0bf4,
-       0xf01900b7,
-       0xbecf10d7,
-       0x00bfcf40,
+       0x32f40132,
+       0xaa21f502,
+       0xf094bd09,
+       0x07f10799,
+       0x03f01700,
+       0x0009d002,
+       0x0ef404bd,
+/* 0x06a5: main_not_ctx_save */
+       0x10ef9411,
+       0xf501f5f0,
+       0xf5037e21,
+/* 0x06b3: main_done */
+       0xbdfeb50e,
+       0x1f29f024,
+       0x080007f1,
+       0xd00203f0,
+       0x04bd0002,
+       0xfea00ef5,
+/* 0x06c8: ih */
+       0x88fe80f9,
+       0xf980f901,
+       0xf9a0f990,
+       0xf9d0f9b0,
+       0xbdf0f9e0,
+       0x00a7f104,
+       0x00a3f002,
+       0xc400aacf,
+       0x0bf404ab,
+       0x10d7f030,
+       0x1a00e7f1,
+       0xcf00e3f0,
+       0xf7f100ee,
+       0xf3f01900,
+       0x00ffcf00,
        0xb70421f4,
        0xf00400b0,
-       0xbed001e7,
-/* 0x05e9: ih_no_fifo */
-       0x00abe400,
-       0x0d0bf401,
-       0xf110d7f0,
-       0xf44001e7,
-/* 0x05fa: ih_no_ctxsw */
-       0xb7f10421,
-       0xb0bd0104,
-       0xf4b4abff,
-       0xa7f10d0b,
-       0xa4b60c1c,
-       0x00abd006,
-/* 0x0610: ih_no_other */
-       0xfc400ad0,
+       0x07f101e7,
+       0x03f01d00,
+       0x000ed000,
+/* 0x071a: ih_no_fifo */
+       0xabe404bd,
+       0x0bf40100,
+       0x10d7f00d,
+       0x4001e7f1,
+/* 0x072b: ih_no_ctxsw */
+       0xe40421f4,
+       0xf40400ab,
+       0xb7f1140b,
+       0xbfb90100,
+       0x44e7f102,
+       0x40e3f001,
+/* 0x0743: ih_no_fwmthd */
+       0xf19d21f4,
+       0xbd0104b7,
+       0xb4abffb0,
+       0xf10f0bf4,
+       0xf0070007,
+       0x0bd00303,
+/* 0x075b: ih_no_other */
+       0xf104bd00,
+       0xf0010007,
+       0x0ad00003,
+       0xfc04bd00,
        0xfce0fcf0,
        0xfcb0fcd0,
        0xfc90fca0,
        0x0088fe80,
        0x32f480fc,
-/* 0x062b: ctx_4170s */
-       0xf101f800,
-       0xf04170e7,
-       0xf5f040e3,
-       0x8d21f410,
-/* 0x063a: ctx_4170w */
+/* 0x077f: ctx_4170s */
+       0xf001f800,
+       0xffb910f5,
+       0x70e7f102,
+       0x40e3f041,
+       0xf89d21f4,
+/* 0x0791: ctx_4170w */
+       0x70e7f100,
+       0x40e3f041,
+       0xb96821f4,
+       0xf4f002ff,
+       0xf01bf410,
+/* 0x07a6: ctx_redswitch */
        0xe7f100f8,
-       0xe3f04170,
-       0x6821f440,
-       0xf410f4f0,
+       0xe5f00200,
+       0x20e5f040,
+       0xf110e5f0,
+       0xf0850007,
+       0x0ed00103,
+       0xf004bd00,
+/* 0x07c2: ctx_redswitch_delay */
+       0xf2b608f7,
+       0xfd1bf401,
+       0x0400e5f1,
+       0x0100e5f1,
+       0x850007f1,
+       0xd00103f0,
+       0x04bd000e,
+/* 0x07de: ctx_86c */
+       0x07f100f8,
+       0x03f01b00,
+       0x000fd002,
+       0xffb904bd,
+       0x14e7f102,
+       0x40e3f08a,
+       0xb99d21f4,
+       0xe7f102ff,
+       0xe3f0a86c,
+       0x9d21f441,
+/* 0x0806: ctx_mem */
+       0x07f100f8,
+       0x03f08400,
+       0x000fd002,
+/* 0x0812: ctx_mem_wait */
+       0xf7f104bd,
+       0xf3f08400,
+       0x00ffcf02,
+       0xf405fffd,
        0x00f8f31b,
-/* 0x064c: ctx_redswitch */
-       0x0614e7f1,
-       0xf106e4b6,
-       0xd00270f7,
-       0xf7f000ef,
-/* 0x065d: ctx_redswitch_delay */
-       0x01f2b608,
-       0xf1fd1bf4,
-       0xd00770f7,
-       0x00f800ef,
-/* 0x066c: ctx_86c */
-       0x086ce7f1,
-       0xd006e4b6,
-       0xe7f100ef,
-       0xe3f08a14,
-       0x8d21f440,
-       0xa86ce7f1,
-       0xf441e3f0,
-       0x00f88d21,
-/* 0x068c: ctx_load */
+/* 0x0824: ctx_load */
        0x99f094bd,
        0x0007f105,
        0x0203f00f,
        0xbd0009d0,
        0x0ca7f004,
-       0xf1c921f4,
-       0xb60a2417,
-       0x10d00614,
-       0x0037f100,
-       0x0634b60b,
-       0xf14032d0,
-       0xb60a0c17,
-       0x47f00614,
-       0x0012d007,
-/* 0x06c7: ctx_chan_wait_0 */
-       0xcf4014d0,
-       0x44f04014,
-       0xfa1bf41f,
-       0xfe0032d0,
-       0x2af0000b,
-       0x0424b61f,
-       0xbd0220b6,
+       0xbdd021f4,
+       0x0007f1f4,
+       0x0203f089,
+       0xbd000fd0,
+       0x0007f104,
+       0x0203f0c1,
+       0xbd0002d0,
+       0x0007f104,
+       0x0203f083,
+       0xbd0002d0,
+       0x07f7f004,
+       0x080621f5,
+       0xc00007f1,
+       0xd00203f0,
+       0x04bd0002,
+       0xf0000bfe,
+       0x24b61f2a,
+       0x0220b604,
+       0x99f094bd,
+       0x0007f108,
+       0x0203f00f,
+       0xbd0009d0,
+       0x0007f104,
+       0x0203f081,
+       0xbd0002d0,
+       0x0027f104,
+       0x0023f100,
+       0x0225f080,
+       0x880007f1,
+       0xd00203f0,
+       0x04bd0002,
+       0xf11017f0,
+       0xf0020027,
+       0x12fa0223,
+       0xbd03f805,
        0x0899f094,
-       0x0f0007f1,
+       0x170007f1,
        0xd00203f0,
        0x04bd0009,
-       0x0a0417f1,
-       0xd00614b6,
-       0x17f10012,
-       0x14b60a20,
-       0x0227f006,
-       0x800023f1,
-       0xf00012d0,
-       0x27f11017,
-       0x23f00200,
-       0x0512fa02,
-       0x94bd03f8,
-       0xf10899f0,
-       0xf0170007,
+       0xb6810198,
+       0x02981814,
+       0x0825b680,
+       0x800512fd,
+       0x94bd1601,
+       0xf10999f0,
+       0xf00f0007,
        0x09d00203,
-       0x9804bd00,
-       0x14b68101,
-       0x80029818,
-       0xfd0825b6,
-       0x01800512,
-       0xf094bd16,
-       0x07f10999,
-       0x03f00f00,
-       0x0009d002,
-       0x27f104bd,
-       0x24b60a04,
-       0x0021d006,
-       0xf10127f0,
-       0xb60a2017,
-       0x12d00614,
-       0x0017f100,
-       0x0613f001,
-       0xf80501fa,
-       0xf094bd03,
-       0x07f10999,
-       0x03f01700,
-       0x0009d002,
-       0x94bd04bd,
-       0xf10599f0,
+       0xf104bd00,
+       0xf0810007,
+       0x01d00203,
+       0xf004bd00,
+       0x07f10127,
+       0x03f08800,
+       0x0002d002,
+       0x17f104bd,
+       0x13f00100,
+       0x0501fa06,
+       0x94bd03f8,
+       0xf10999f0,
        0xf0170007,
        0x09d00203,
-       0xf804bd00,
-/* 0x0795: ctx_chan */
-       0x8c21f500,
-       0x0ca7f006,
-       0xf1c921f4,
-       0xb60a1017,
-       0x27f00614,
-       0x0012d005,
-/* 0x07ac: ctx_chan_wait */
-       0xfd0012cf,
-       0x1bf40522,
-/* 0x07b7: ctx_mmio_exec */
-       0x9800f8fa,
-       0x27f14103,
-       0x24b60a04,
-       0x0023d006,
-/* 0x07c6: ctx_mmio_loop */
+       0xbd04bd00,
+       0x0599f094,
+       0x170007f1,
+       0xd00203f0,
+       0x04bd0009,
+/* 0x0942: ctx_chan */
+       0x21f500f8,
+       0xa7f00824,
+       0xd021f40c,
+       0xf505f7f0,
+       0xf8080621,
+/* 0x0955: ctx_mmio_exec */
+       0x41039800,
+       0x810007f1,
+       0xd00203f0,
+       0x04bd0003,
+/* 0x0966: ctx_mmio_loop */
        0x34c434bd,
        0x0f1bf4ff,
        0x020057f1,
        0xfa0653f0,
        0x03f80535,
-/* 0x07d8: ctx_mmio_pull */
+/* 0x0978: ctx_mmio_pull */
        0x98804e98,
        0x21f4814f,
-       0x0830b68d,
+       0x0830b69d,
        0xf40112b6,
-/* 0x07ea: ctx_mmio_done */
+/* 0x098a: ctx_mmio_done */
        0x0398df1b,
-       0x0023d016,
-       0xf1400080,
-       0xf0010017,
-       0x01fa0613,
-       0xf803f806,
-/* 0x0801: ctx_xfer */
-       0x00f7f100,
-       0x06f4b60c,
-       0xd004e7f0,
-/* 0x080e: ctx_xfer_idle */
-       0xfecf80fe,
-       0x00e4f100,
-       0xf91bf420,
-       0xf40611f4,
-/* 0x081e: ctx_xfer_pre */
-       0xf7f00d02,
-       0x6c21f510,
-       0x1c11f406,
-/* 0x0828: ctx_xfer_pre_load */
-       0xf502f7f0,
-       0xf5062b21,
-       0xf5063a21,
-       0xbd064c21,
-       0x2b21f5f4,
-       0x8c21f506,
-/* 0x0841: ctx_xfer_exec */
-       0x16019806,
-       0x041427f1,
-       0xd00624b6,
-       0xe7f10020,
-       0xe3f0a500,
-       0x021fb941,
-       0xb68d21f4,
-       0xfcf004e0,
-       0x022cf001,
-       0xfd0124b6,
-       0x21f405f2,
-       0xfc17f18d,
-       0x0213f04a,
-       0xd00c27f0,
-       0x21f50012,
-       0x27f10215,
-       0x23f047fc,
-       0x0020d002,
+       0x0007f116,
+       0x0203f081,
+       0xbd0003d0,
+       0x40008004,
+       0x010017f1,
+       0xfa0613f0,
+       0x03f80601,
+/* 0x09aa: ctx_xfer */
+       0xe7f000f8,
+       0x0007f104,
+       0x0303f002,
+       0xbd000ed0,
+/* 0x09b9: ctx_xfer_idle */
+       0x00e7f104,
+       0x03e3f000,
+       0xf100eecf,
+       0xf42000e4,
+       0x11f4f21b,
+       0x0d02f406,
+/* 0x09d0: ctx_xfer_pre */
+       0xf510f7f0,
+       0xf407de21,
+/* 0x09da: ctx_xfer_pre_load */
+       0xf7f01c11,
+       0x7f21f502,
+       0x9121f507,
+       0xa621f507,
+       0xf5f4bd07,
+       0xf5077f21,
+/* 0x09f3: ctx_xfer_exec */
+       0x98082421,
+       0x24bd1601,
+       0x050007f1,
+       0xd00103f0,
+       0x04bd0002,
+       0xf1021fb9,
+       0xf0a500e7,
+       0x21f441e3,
+       0x01fcf09d,
+       0xb6022cf0,
+       0xf2fd0124,
+       0x02ffb905,
+       0xa504e7f1,
+       0xf441e3f0,
+       0x21f59d21,
+       0x24bd026a,
+       0x47fc07f1,
+       0xd00203f0,
+       0x04bd0002,
        0xb6012cf0,
-       0x12d00320,
-       0x01acf000,
-       0xf006a5f0,
-       0x0c9800b7,
-       0x010d9800,
-       0xf500e7f0,
-       0xf0016621,
-       0x21f508a7,
-       0x21f50109,
-       0x01f40215,
-       0x0ca7f022,
-       0xf1c921f4,
-       0xb60a1017,
-       0x27f00614,
-       0x0012d005,
-/* 0x08c8: ctx_xfer_post_save_wait */
-       0xfd0012cf,
-       0x1bf40522,
-       0x2e02f4fa,
-/* 0x08d4: ctx_xfer_post */
-       0xf502f7f0,
-       0xbd062b21,
-       0x6c21f5f4,
-       0x3421f506,
-       0x3a21f502,
-       0xf5f4bd06,
-       0xf4062b21,
-       0x01981011,
-       0x0511fd40,
-       0xf5070bf4,
-/* 0x08ff: ctx_xfer_no_post_mmio */
-/* 0x08ff: ctx_xfer_done */
-       0xf807b721,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
+       0x07f10320,
+       0x03f04afc,
+       0x0002d002,
+       0xacf004bd,
+       0x06a5f001,
+       0x9800b7f0,
+       0x0d98000c,
+       0x00e7f001,
+       0x016f21f5,
+       0xf508a7f0,
+       0xf5011021,
+       0xf4025e21,
+       0xa7f01301,
+       0xd021f40c,
+       0xf505f7f0,
+       0xf4080621,
+/* 0x0a82: ctx_xfer_post */
+       0xf7f02e02,
+       0x7f21f502,
+       0xf5f4bd07,
+       0xf507de21,
+       0xf5027f21,
+       0xbd079121,
+       0x7f21f5f4,
+       0x1011f407,
+       0xfd400198,
+       0x0bf40511,
+       0x5521f507,
+/* 0x0aad: ctx_xfer_no_post_mmio */
+/* 0x0aad: ctx_xfer_done */
+       0x0000f809,
        0x00000000,
        0x00000000,
        0x00000000,
index 438506d1474992af9d35ee538282594984a4d81f..229c0ae3722844c16d39e33fb669b677bcefa13c 100644 (file)
@@ -206,14 +206,14 @@ uint32_t nvf0_grhub_data[] = {
 };
 
 uint32_t nvf0_grhub_code[] = {
-       0x031b0ef5,
+       0x039b0ef5,
 /* 0x0004: queue_put */
        0x9800d898,
        0x86f001d9,
        0x0489b808,
        0xf00c1bf4,
        0x21f502f7,
-       0x00f802fe,
+       0x00f8037e,
 /* 0x001c: queue_put_next */
        0xb60798c4,
        0x8dbb0384,
@@ -237,184 +237,214 @@ uint32_t nvf0_grhub_code[] = {
 /* 0x0066: queue_get_done */
        0x00f80132,
 /* 0x0068: nv_rd32 */
-       0x0728b7f1,
-       0xb906b4b6,
-       0xc9f002ec,
-       0x00bcd01f,
-/* 0x0078: nv_rd32_wait */
-       0xc800bccf,
-       0x1bf41fcc,
-       0x06a7f0fa,
-       0x010921f5,
-       0xf840bfcf,
-/* 0x008d: nv_wr32 */
-       0x28b7f100,
-       0x06b4b607,
-       0xb980bfd0,
-       0xc9f002ec,
-       0x1ec9f01f,
-/* 0x00a3: nv_wr32_wait */
-       0xcf00bcd0,
-       0xccc800bc,
-       0xfa1bf41f,
-/* 0x00ae: watchdog_reset */
-       0x87f100f8,
-       0x84b60430,
-       0x1ff9f006,
-       0xf8008fd0,
-/* 0x00bd: watchdog_clear */
-       0x3087f100,
-       0x0684b604,
-       0xf80080d0,
-/* 0x00c9: wait_donez */
-       0xf094bd00,
-       0x07f10099,
-       0x03f03700,
-       0x0009d002,
-       0x07f104bd,
-       0x03f00600,
-       0x000ad002,
-/* 0x00e6: wait_donez_ne */
-       0x87f104bd,
-       0x83f00000,
-       0x0088cf01,
-       0xf4888aff,
-       0x94bdf31b,
-       0xf10099f0,
-       0xf0170007,
-       0x09d00203,
-       0xf804bd00,
-/* 0x0109: wait_doneo */
-       0xf094bd00,
+       0xf002ecb9,
+       0x07f11fc9,
+       0x03f0ca00,
+       0x000cd001,
+/* 0x007a: nv_rd32_wait */
+       0xc7f104bd,
+       0xc3f0ca00,
+       0x00cccf01,
+       0xf41fccc8,
+       0xa7f0f31b,
+       0x1021f506,
+       0x00f7f101,
+       0x01f3f0cb,
+       0xf800ffcf,
+/* 0x009d: nv_wr32 */
+       0x0007f100,
+       0x0103f0cc,
+       0xbd000fd0,
+       0x02ecb904,
+       0xf01fc9f0,
+       0x07f11ec9,
+       0x03f0ca00,
+       0x000cd001,
+/* 0x00be: nv_wr32_wait */
+       0xc7f104bd,
+       0xc3f0ca00,
+       0x00cccf01,
+       0xf41fccc8,
+       0x00f8f31b,
+/* 0x00d0: wait_donez */
+       0x99f094bd,
+       0x0007f100,
+       0x0203f037,
+       0xbd0009d0,
+       0x0007f104,
+       0x0203f006,
+       0xbd000ad0,
+/* 0x00ed: wait_donez_ne */
+       0x0087f104,
+       0x0183f000,
+       0xff0088cf,
+       0x1bf4888a,
+       0xf094bdf3,
        0x07f10099,
-       0x03f03700,
+       0x03f01700,
        0x0009d002,
-       0x87f104bd,
-       0x84b60818,
-       0x008ad006,
-/* 0x0124: wait_doneo_e */
-       0x040087f1,
-       0xcf0684b6,
-       0x8aff0088,
-       0xf30bf488,
+       0x00f804bd,
+/* 0x0110: wait_doneo */
        0x99f094bd,
        0x0007f100,
-       0x0203f017,
+       0x0203f037,
        0xbd0009d0,
-/* 0x0147: mmctx_size */
-       0xbd00f804,
-/* 0x0149: nv_mmctx_size_loop */
-       0x00e89894,
-       0xb61a85b6,
-       0x84b60180,
-       0x0098bb02,
-       0xb804e0b6,
-       0x1bf404ef,
-       0x029fb9eb,
-/* 0x0166: mmctx_xfer */
-       0x94bd00f8,
-       0xf10199f0,
-       0xf0370007,
-       0x09d00203,
-       0xf104bd00,
-       0xb6071087,
-       0x94bd0684,
-       0xf405bbfd,
-       0x8bd0090b,
-       0x0099f000,
-/* 0x018c: mmctx_base_disabled */
-       0xf405eefd,
-       0x8ed00c0b,
-       0xc08fd080,
-/* 0x019b: mmctx_multi_disabled */
-       0xb70199f0,
-       0xc8010080,
+       0x0007f104,
+       0x0203f006,
+       0xbd000ad0,
+/* 0x012d: wait_doneo_e */
+       0x0087f104,
+       0x0183f000,
+       0xff0088cf,
+       0x0bf4888a,
+       0xf094bdf3,
+       0x07f10099,
+       0x03f01700,
+       0x0009d002,
+       0x00f804bd,
+/* 0x0150: mmctx_size */
+/* 0x0152: nv_mmctx_size_loop */
+       0xe89894bd,
+       0x1a85b600,
+       0xb60180b6,
+       0x98bb0284,
+       0x04e0b600,
+       0xf404efb8,
+       0x9fb9eb1b,
+/* 0x016f: mmctx_xfer */
+       0xbd00f802,
+       0x0199f094,
+       0x370007f1,
+       0xd00203f0,
+       0x04bd0009,
+       0xbbfd94bd,
+       0x120bf405,
+       0xc40007f1,
+       0xd00103f0,
+       0x04bd000b,
+/* 0x0197: mmctx_base_disabled */
+       0xfd0099f0,
+       0x0bf405ee,
+       0x0007f11e,
+       0x0103f0c6,
+       0xbd000ed0,
+       0x0007f104,
+       0x0103f0c7,
+       0xbd000fd0,
+       0x0199f004,
+/* 0x01b8: mmctx_multi_disabled */
+       0xb600abc8,
+       0xb9f010b4,
+       0x01aec80c,
+       0xfd11e4b6,
+       0x07f105be,
+       0x03f0c500,
+       0x000bd001,
+/* 0x01d6: mmctx_exec_loop */
+/* 0x01d6: mmctx_wait_free */
+       0xe7f104bd,
+       0xe3f0c500,
+       0x00eecf01,
+       0xf41fe4f0,
+       0xce98f30b,
+       0x05e9fd00,
+       0xc80007f1,
+       0xd00103f0,
+       0x04bd000e,
+       0xb804c0b6,
+       0x1bf404cd,
+       0x02abc8d8,
+/* 0x0207: mmctx_fini_wait */
+       0xf11f1bf4,
+       0xf0c500b7,
+       0xbbcf01b3,
+       0x1fb4f000,
+       0xf410b4b0,
+       0xa7f0f01b,
+       0xd021f402,
+/* 0x0223: mmctx_stop */
+       0xc82b0ef4,
        0xb4b600ab,
        0x0cb9f010,
-       0xb601aec8,
-       0xbefd11e4,
-       0x008bd005,
-/* 0x01b4: mmctx_exec_loop */
-/* 0x01b4: mmctx_wait_free */
-       0xf0008ecf,
-       0x0bf41fe4,
-       0x00ce98fa,
-       0xd005e9fd,
-       0xc0b6c08e,
-       0x04cdb804,
-       0xc8e81bf4,
-       0x1bf402ab,
-/* 0x01d5: mmctx_fini_wait */
-       0x008bcf18,
-       0xb01fb4f0,
-       0x1bf410b4,
-       0x02a7f0f7,
-       0xf4c921f4,
-/* 0x01ea: mmctx_stop */
-       0xabc81b0e,
-       0x10b4b600,
-       0xf00cb9f0,
-       0x8bd012b9,
-/* 0x01f9: mmctx_stop_wait */
-       0x008bcf00,
-       0xf412bbc8,
-/* 0x0202: mmctx_done */
-       0x94bdfa1b,
-       0xf10199f0,
-       0xf0170007,
-       0x09d00203,
-       0xf804bd00,
-/* 0x0215: strand_wait */
-       0xf0a0f900,
-       0x21f402a7,
-       0xf8a0fcc9,
-/* 0x0221: strand_pre */
-       0xfc87f100,
-       0x0283f04a,
-       0xd00c97f0,
-       0x21f50089,
-       0x00f80215,
-/* 0x0234: strand_post */
-       0x4afc87f1,
-       0xf00283f0,
-       0x89d00d97,
-       0x1521f500,
-/* 0x0247: strand_set */
-       0xf100f802,
-       0xf04ffca7,
-       0xaba202a3,
-       0xc7f00500,
-       0x00acd00f,
-       0xd00bc7f0,
-       0x21f500bc,
-       0xaed00215,
-       0x0ac7f000,
-       0xf500bcd0,
-       0xf8021521,
-/* 0x0271: strand_ctx_init */
-       0xf094bd00,
-       0x07f10399,
-       0x03f03700,
+       0xf112b9f0,
+       0xf0c50007,
+       0x0bd00103,
+/* 0x023b: mmctx_stop_wait */
+       0xf104bd00,
+       0xf0c500b7,
+       0xbbcf01b3,
+       0x12bbc800,
+/* 0x024b: mmctx_done */
+       0xbdf31bf4,
+       0x0199f094,
+       0x170007f1,
+       0xd00203f0,
+       0x04bd0009,
+/* 0x025e: strand_wait */
+       0xa0f900f8,
+       0xf402a7f0,
+       0xa0fcd021,
+/* 0x026a: strand_pre */
+       0x97f000f8,
+       0xfc07f10c,
+       0x0203f04a,
+       0xbd0009d0,
+       0x5e21f504,
+/* 0x027f: strand_post */
+       0xf000f802,
+       0x07f10d97,
+       0x03f04afc,
        0x0009d002,
        0x21f504bd,
-       0xe7f00221,
-       0x4721f503,
-       0xfca7f102,
-       0x02a3f046,
-       0x0400aba0,
-       0xf040a0d0,
-       0xbcd001c7,
-       0x1521f500,
-       0x010c9202,
-       0xf000acd0,
-       0xbcd002c7,
-       0x1521f500,
-       0x3421f502,
-       0x8087f102,
-       0x0684b608,
-       0xb70089cf,
-       0x95220080,
-/* 0x02ca: ctx_init_strand_loop */
+       0x00f8025e,
+/* 0x0294: strand_set */
+       0xf10fc7f0,
+       0xf04ffc07,
+       0x0cd00203,
+       0xf004bd00,
+       0x07f10bc7,
+       0x03f04afc,
+       0x000cd002,
+       0x07f104bd,
+       0x03f04ffc,
+       0x000ed002,
+       0xc7f004bd,
+       0xfc07f10a,
+       0x0203f04a,
+       0xbd000cd0,
+       0x5e21f504,
+/* 0x02d3: strand_ctx_init */
+       0xbd00f802,
+       0x0399f094,
+       0x370007f1,
+       0xd00203f0,
+       0x04bd0009,
+       0x026a21f5,
+       0xf503e7f0,
+       0xbd029421,
+       0xfc07f1c4,
+       0x0203f047,
+       0xbd000cd0,
+       0x01c7f004,
+       0x4afc07f1,
+       0xd00203f0,
+       0x04bd000c,
+       0x025e21f5,
+       0xf1010c92,
+       0xf046fc07,
+       0x0cd00203,
+       0xf004bd00,
+       0x07f102c7,
+       0x03f04afc,
+       0x000cd002,
+       0x21f504bd,
+       0x21f5025e,
+       0x87f1027f,
+       0x83f04200,
+       0x0097f102,
+       0x0293f020,
+       0x950099cf,
+/* 0x034a: ctx_init_strand_loop */
        0x8ed008fe,
        0x408ed000,
        0xb6808acf,
@@ -428,7 +458,7 @@ uint32_t nvf0_grhub_code[] = {
        0x170007f1,
        0xd00203f0,
        0x04bd0009,
-/* 0x02fe: error */
+/* 0x037e: error */
        0x07f100f8,
        0x03f00500,
        0x000fd002,
@@ -436,82 +466,117 @@ uint32_t nvf0_grhub_code[] = {
        0x0007f101,
        0x0303f007,
        0xbd000fd0,
-/* 0x031b: init */
+/* 0x039b: init */
        0xbd00f804,
-       0x0004fe04,
-       0xf10007fe,
-       0xf0120017,
-       0x12d00227,
-       0xb117f100,
-       0x0010fe05,
-       0x040017f1,
-       0xf1c010d0,
-       0xb6040437,
-       0x27f10634,
-       0x32d02003,
-       0x0427f100,
-       0x0132d020,
+       0x0007fe04,
+       0x420017f1,
+       0xcf0013f0,
+       0x11e70011,
+       0x14b60109,
+       0x0014fe08,
+       0xf10227f0,
+       0xf0120007,
+       0x02d00003,
+       0xf104bd00,
+       0xfe06c817,
+       0x24bd0010,
+       0x070007f1,
+       0xd00003f0,
+       0x04bd0002,
+       0x200327f1,
+       0x010007f1,
+       0xd00103f0,
+       0x04bd0002,
+       0x200427f1,
+       0x010407f1,
+       0xd00103f0,
+       0x04bd0002,
        0x200b27f1,
-       0xf10232d0,
-       0xd0200c27,
-       0x27f10732,
-       0x24b60c24,
-       0x0003b906,
-       0xf10023d0,
+       0x010807f1,
+       0xd00103f0,
+       0x04bd0002,
+       0x200c27f1,
+       0x011c07f1,
+       0xd00103f0,
+       0x04bd0002,
+       0xf1010392,
+       0xf0090007,
+       0x03d00303,
+       0xf104bd00,
        0xf0870427,
-       0x12d00023,
-       0x0012b700,
-       0x0427f001,
-       0xf40012d0,
-       0xe7f11031,
-       0xe3f09604,
-       0x6821f440,
-       0x8090f1c7,
-       0xf4f00301,
-       0x020f801f,
-       0xbb0117f0,
-       0x12b6041f,
-       0x0c27f101,
-       0x0624b604,
-       0xd00021d0,
-       0x17f14021,
-       0x0e980100,
-       0x010f9800,
-       0x014721f5,
-       0x070037f1,
-       0x950634b6,
-       0x34d00814,
-       0x4034d000,
-       0x130030b7,
-       0xb6001fbb,
-       0x3fd002f5,
-       0x0815b600,
-       0xb60110b6,
-       0x1fb90814,
-       0x7121f502,
-       0x001fbb02,
-       0xf1020398,
-       0xf0200047,
-/* 0x03f6: init_gpc */
-       0x4ea05043,
-       0x1fb90804,
-       0x8d21f402,
-       0x010c4ea0,
-       0x21f4f4bd,
-       0x044ea08d,
-       0x8d21f401,
-       0x01004ea0,
-       0xf402f7f0,
-       0x4ea08d21,
-/* 0x041e: init_gpc_wait */
-       0x21f40800,
-       0x1fffc868,
-       0xa0fa0bf4,
-       0xf408044e,
-       0x1fbb6821,
-       0x0040b700,
-       0x0132b680,
-       0xf1be1bf4,
+       0x07f10023,
+       0x03f00400,
+       0x0002d000,
+       0x27f004bd,
+       0x0007f104,
+       0x0003f003,
+       0xbd0002d0,
+       0x1031f404,
+       0x9604e7f1,
+       0xf440e3f0,
+       0xfeb96821,
+       0x90f1c702,
+       0xf0030180,
+       0x0f801ff4,
+       0x0117f002,
+       0xb6041fbb,
+       0x07f10112,
+       0x03f00300,
+       0x0001d001,
+       0x07f104bd,
+       0x03f00400,
+       0x0001d001,
+       0x17f104bd,
+       0xf7f00100,
+       0x7f21f502,
+       0x9121f507,
+       0x10f7f007,
+       0x07de21f5,
+       0x98000e98,
+       0x21f5010f,
+       0x14950150,
+       0x0007f108,
+       0x0103f0c0,
+       0xbd0004d0,
+       0x0007f104,
+       0x0103f0c1,
+       0xbd0004d0,
+       0x0030b704,
+       0x001fbb13,
+       0xf102f5b6,
+       0xf0d30007,
+       0x0fd00103,
+       0xb604bd00,
+       0x10b60815,
+       0x0814b601,
+       0xf5021fb9,
+       0xbb02d321,
+       0x0398001f,
+       0x0047f102,
+       0x5043f020,
+/* 0x04f4: init_gpc */
+       0x08044ea0,
+       0xf4021fb9,
+       0x4ea09d21,
+       0xf4bd010c,
+       0xa09d21f4,
+       0xf401044e,
+       0x4ea09d21,
+       0xf7f00100,
+       0x9d21f402,
+       0x08004ea0,
+/* 0x051c: init_gpc_wait */
+       0xc86821f4,
+       0x0bf41fff,
+       0x044ea0fa,
+       0x6821f408,
+       0xb7001fbb,
+       0xb6800040,
+       0x1bf40132,
+       0x00f7f0be,
+       0x07de21f5,
+       0xf500f7f0,
+       0xf1077f21,
        0xf0010007,
        0x01d00203,
        0xbd04bd00,
@@ -519,382 +584,379 @@ uint32_t nvf0_grhub_code[] = {
        0x300007f1,
        0xd00203f0,
        0x04bd0001,
-/* 0x0458: main */
+/* 0x0564: main */
        0xf40031f4,
        0xd7f00028,
        0x3921f410,
        0xb1f401f4,
        0xf54001e4,
-       0xbd00de1b,
+       0xbd00e91b,
        0x0499f094,
        0x370007f1,
        0xd00203f0,
        0x04bd0009,
-       0x0b0017f1,
-       0xcf0614b6,
-       0x11cf4012,
-       0x1f13c800,
-       0x00870bf5,
-       0xf41f23c8,
-       0x20f9620b,
-       0xbd0212b9,
-       0x0799f094,
-       0x370007f1,
-       0xd00203f0,
-       0x04bd0009,
-       0xf40132f4,
-       0x21f50231,
-       0x94bd0801,
+       0xc00017f1,
+       0xcf0213f0,
+       0x27f10011,
+       0x23f0c100,
+       0x0022cf02,
+       0xf51f13c8,
+       0xc800890b,
+       0x0bf41f23,
+       0xb920f962,
+       0x94bd0212,
        0xf10799f0,
-       0xf0170007,
+       0xf0370007,
        0x09d00203,
-       0xfc04bd00,
-       0xf094bd20,
-       0x07f10699,
-       0x03f03700,
-       0x0009d002,
-       0x31f404bd,
-       0x0121f501,
-       0xf094bd08,
-       0x07f10699,
+       0xf404bd00,
+       0x31f40132,
+       0xaa21f502,
+       0xf094bd09,
+       0x07f10799,
        0x03f01700,
        0x0009d002,
-       0x0ef404bd,
-/* 0x04f9: chsw_prev_no_next */
-       0xb920f931,
-       0x32f40212,
-       0x0232f401,
-       0x080121f5,
-       0x17f120fc,
-       0x14b60b00,
-       0x0012d006,
-/* 0x0517: chsw_no_prev */
-       0xc8130ef4,
-       0x0bf41f23,
-       0x0131f40d,
-       0xf50232f4,
-/* 0x0527: chsw_done */
-       0xf1080121,
-       0xb60b0c17,
-       0x27f00614,
-       0x0012d001,
+       0x20fc04bd,
        0x99f094bd,
-       0x0007f104,
+       0x0007f106,
+       0x0203f037,
+       0xbd0009d0,
+       0x0131f404,
+       0x09aa21f5,
+       0x99f094bd,
+       0x0007f106,
        0x0203f017,
        0xbd0009d0,
-       0x130ef504,
-/* 0x0549: main_not_ctx_switch */
-       0x01e4b0ff,
-       0xb90d1bf4,
-       0x21f502f2,
-       0x0ef40795,
-/* 0x0559: main_not_ctx_chan */
-       0x02e4b046,
-       0xbd321bf4,
-       0x0799f094,
-       0x370007f1,
+       0x330ef404,
+/* 0x060c: chsw_prev_no_next */
+       0x12b920f9,
+       0x0132f402,
+       0xf50232f4,
+       0xfc09aa21,
+       0x0007f120,
+       0x0203f0c0,
+       0xbd0002d0,
+       0x130ef404,
+/* 0x062c: chsw_no_prev */
+       0xf41f23c8,
+       0x31f40d0b,
+       0x0232f401,
+       0x09aa21f5,
+/* 0x063c: chsw_done */
+       0xf10127f0,
+       0xf0c30007,
+       0x02d00203,
+       0xbd04bd00,
+       0x0499f094,
+       0x170007f1,
        0xd00203f0,
        0x04bd0009,
-       0xf40132f4,
-       0x21f50232,
-       0x94bd0801,
+       0xff080ef5,
+/* 0x0660: main_not_ctx_switch */
+       0xf401e4b0,
+       0xf2b90d1b,
+       0x4221f502,
+       0x460ef409,
+/* 0x0670: main_not_ctx_chan */
+       0xf402e4b0,
+       0x94bd321b,
        0xf10799f0,
-       0xf0170007,
+       0xf0370007,
        0x09d00203,
        0xf404bd00,
-/* 0x058e: main_not_ctx_save */
-       0xef94110e,
-       0x01f5f010,
-       0x02fe21f5,
-       0xfec00ef5,
-/* 0x059c: main_done */
-       0x29f024bd,
-       0x0007f11f,
-       0x0203f030,
-       0xbd0002d0,
-       0xab0ef504,
-/* 0x05b1: ih */
-       0xfe80f9fe,
-       0x80f90188,
-       0xa0f990f9,
-       0xd0f9b0f9,
-       0xf0f9e0f9,
-       0x0acf04bd,
-       0x04abc480,
-       0xf11d0bf4,
-       0xf01900b7,
-       0xbecf10d7,
-       0x00bfcf40,
+       0x32f40132,
+       0xaa21f502,
+       0xf094bd09,
+       0x07f10799,
+       0x03f01700,
+       0x0009d002,
+       0x0ef404bd,
+/* 0x06a5: main_not_ctx_save */
+       0x10ef9411,
+       0xf501f5f0,
+       0xf5037e21,
+/* 0x06b3: main_done */
+       0xbdfeb50e,
+       0x1f29f024,
+       0x300007f1,
+       0xd00203f0,
+       0x04bd0002,
+       0xfea00ef5,
+/* 0x06c8: ih */
+       0x88fe80f9,
+       0xf980f901,
+       0xf9a0f990,
+       0xf9d0f9b0,
+       0xbdf0f9e0,
+       0x00a7f104,
+       0x00a3f002,
+       0xc400aacf,
+       0x0bf404ab,
+       0x10d7f030,
+       0x1a00e7f1,
+       0xcf00e3f0,
+       0xf7f100ee,
+       0xf3f01900,
+       0x00ffcf00,
        0xb70421f4,
        0xf00400b0,
-       0xbed001e7,
-/* 0x05e9: ih_no_fifo */
-       0x00abe400,
-       0x0d0bf401,
-       0xf110d7f0,
-       0xf44001e7,
-/* 0x05fa: ih_no_ctxsw */
-       0xb7f10421,
-       0xb0bd0104,
-       0xf4b4abff,
-       0xa7f10d0b,
-       0xa4b60c1c,
-       0x00abd006,
-/* 0x0610: ih_no_other */
-       0xfc400ad0,
+       0x07f101e7,
+       0x03f01d00,
+       0x000ed000,
+/* 0x071a: ih_no_fifo */
+       0xabe404bd,
+       0x0bf40100,
+       0x10d7f00d,
+       0x4001e7f1,
+/* 0x072b: ih_no_ctxsw */
+       0xe40421f4,
+       0xf40400ab,
+       0xb7f1140b,
+       0xbfb90100,
+       0x44e7f102,
+       0x40e3f001,
+/* 0x0743: ih_no_fwmthd */
+       0xf19d21f4,
+       0xbd0104b7,
+       0xb4abffb0,
+       0xf10f0bf4,
+       0xf0070007,
+       0x0bd00303,
+/* 0x075b: ih_no_other */
+       0xf104bd00,
+       0xf0010007,
+       0x0ad00003,
+       0xfc04bd00,
        0xfce0fcf0,
        0xfcb0fcd0,
        0xfc90fca0,
        0x0088fe80,
        0x32f480fc,
-/* 0x062b: ctx_4170s */
-       0xf101f800,
-       0xf04170e7,
-       0xf5f040e3,
-       0x8d21f410,
-/* 0x063a: ctx_4170w */
+/* 0x077f: ctx_4170s */
+       0xf001f800,
+       0xffb910f5,
+       0x70e7f102,
+       0x40e3f041,
+       0xf89d21f4,
+/* 0x0791: ctx_4170w */
+       0x70e7f100,
+       0x40e3f041,
+       0xb96821f4,
+       0xf4f002ff,
+       0xf01bf410,
+/* 0x07a6: ctx_redswitch */
        0xe7f100f8,
-       0xe3f04170,
-       0x6821f440,
-       0xf410f4f0,
+       0xe5f00200,
+       0x20e5f040,
+       0xf110e5f0,
+       0xf0850007,
+       0x0ed00103,
+       0xf004bd00,
+/* 0x07c2: ctx_redswitch_delay */
+       0xf2b608f7,
+       0xfd1bf401,
+       0x0400e5f1,
+       0x0100e5f1,
+       0x850007f1,
+       0xd00103f0,
+       0x04bd000e,
+/* 0x07de: ctx_86c */
+       0x07f100f8,
+       0x03f02300,
+       0x000fd002,
+       0xffb904bd,
+       0x14e7f102,
+       0x40e3f08a,
+       0xb99d21f4,
+       0xe7f102ff,
+       0xe3f0a88c,
+       0x9d21f441,
+/* 0x0806: ctx_mem */
+       0x07f100f8,
+       0x03f08400,
+       0x000fd002,
+/* 0x0812: ctx_mem_wait */
+       0xf7f104bd,
+       0xf3f08400,
+       0x00ffcf02,
+       0xf405fffd,
        0x00f8f31b,
-/* 0x064c: ctx_redswitch */
-       0x0614e7f1,
-       0xf106e4b6,
-       0xd00270f7,
-       0xf7f000ef,
-/* 0x065d: ctx_redswitch_delay */
-       0x01f2b608,
-       0xf1fd1bf4,
-       0xd00770f7,
-       0x00f800ef,
-/* 0x066c: ctx_86c */
-       0x086ce7f1,
-       0xd006e4b6,
-       0xe7f100ef,
-       0xe3f08a14,
-       0x8d21f440,
-       0xa86ce7f1,
-       0xf441e3f0,
-       0x00f88d21,
-/* 0x068c: ctx_load */
+/* 0x0824: ctx_load */
        0x99f094bd,
        0x0007f105,
        0x0203f037,
        0xbd0009d0,
        0x0ca7f004,
-       0xf1c921f4,
-       0xb60a2417,
-       0x10d00614,
-       0x0037f100,
-       0x0634b60b,
-       0xf14032d0,
-       0xb60a0c17,
-       0x47f00614,
-       0x0012d007,
-/* 0x06c7: ctx_chan_wait_0 */
-       0xcf4014d0,
-       0x44f04014,
-       0xfa1bf41f,
-       0xfe0032d0,
-       0x2af0000b,
-       0x0424b61f,
-       0xbd0220b6,
+       0xbdd021f4,
+       0x0007f1f4,
+       0x0203f089,
+       0xbd000fd0,
+       0x0007f104,
+       0x0203f0c1,
+       0xbd0002d0,
+       0x0007f104,
+       0x0203f083,
+       0xbd0002d0,
+       0x07f7f004,
+       0x080621f5,
+       0xc00007f1,
+       0xd00203f0,
+       0x04bd0002,
+       0xf0000bfe,
+       0x24b61f2a,
+       0x0220b604,
+       0x99f094bd,
+       0x0007f108,
+       0x0203f037,
+       0xbd0009d0,
+       0x0007f104,
+       0x0203f081,
+       0xbd0002d0,
+       0x0027f104,
+       0x0023f100,
+       0x0225f080,
+       0x880007f1,
+       0xd00203f0,
+       0x04bd0002,
+       0xf11017f0,
+       0xf0020027,
+       0x12fa0223,
+       0xbd03f805,
        0x0899f094,
-       0x370007f1,
+       0x170007f1,
        0xd00203f0,
        0x04bd0009,
-       0x0a0417f1,
-       0xd00614b6,
-       0x17f10012,
-       0x14b60a20,
-       0x0227f006,
-       0x800023f1,
-       0xf00012d0,
-       0x27f11017,
-       0x23f00200,
-       0x0512fa02,
-       0x94bd03f8,
-       0xf10899f0,
-       0xf0170007,
+       0xb6810198,
+       0x02981814,
+       0x0825b680,
+       0x800512fd,
+       0x94bd1601,
+       0xf10999f0,
+       0xf0370007,
        0x09d00203,
-       0x9804bd00,
-       0x14b68101,
-       0x80029818,
-       0xfd0825b6,
-       0x01800512,
-       0xf094bd16,
-       0x07f10999,
-       0x03f03700,
-       0x0009d002,
-       0x27f104bd,
-       0x24b60a04,
-       0x0021d006,
-       0xf10127f0,
-       0xb60a2017,
-       0x12d00614,
-       0x0017f100,
-       0x0613f001,
-       0xf80501fa,
-       0xf094bd03,
-       0x07f10999,
-       0x03f01700,
-       0x0009d002,
-       0x94bd04bd,
-       0xf10599f0,
+       0xf104bd00,
+       0xf0810007,
+       0x01d00203,
+       0xf004bd00,
+       0x07f10127,
+       0x03f08800,
+       0x0002d002,
+       0x17f104bd,
+       0x13f00100,
+       0x0501fa06,
+       0x94bd03f8,
+       0xf10999f0,
        0xf0170007,
        0x09d00203,
-       0xf804bd00,
-/* 0x0795: ctx_chan */
-       0x8c21f500,
-       0x0ca7f006,
-       0xf1c921f4,
-       0xb60a1017,
-       0x27f00614,
-       0x0012d005,
-/* 0x07ac: ctx_chan_wait */
-       0xfd0012cf,
-       0x1bf40522,
-/* 0x07b7: ctx_mmio_exec */
-       0x9800f8fa,
-       0x27f14103,
-       0x24b60a04,
-       0x0023d006,
-/* 0x07c6: ctx_mmio_loop */
+       0xbd04bd00,
+       0x0599f094,
+       0x170007f1,
+       0xd00203f0,
+       0x04bd0009,
+/* 0x0942: ctx_chan */
+       0x21f500f8,
+       0xa7f00824,
+       0xd021f40c,
+       0xf505f7f0,
+       0xf8080621,
+/* 0x0955: ctx_mmio_exec */
+       0x41039800,
+       0x810007f1,
+       0xd00203f0,
+       0x04bd0003,
+/* 0x0966: ctx_mmio_loop */
        0x34c434bd,
        0x0f1bf4ff,
        0x020057f1,
        0xfa0653f0,
        0x03f80535,
-/* 0x07d8: ctx_mmio_pull */
+/* 0x0978: ctx_mmio_pull */
        0x98804e98,
        0x21f4814f,
-       0x0830b68d,
+       0x0830b69d,
        0xf40112b6,
-/* 0x07ea: ctx_mmio_done */
+/* 0x098a: ctx_mmio_done */
        0x0398df1b,
-       0x0023d016,
-       0xf1400080,
-       0xf0010017,
-       0x01fa0613,
-       0xf803f806,
-/* 0x0801: ctx_xfer */
-       0x00f7f100,
-       0x06f4b60c,
-       0xd004e7f0,
-/* 0x080e: ctx_xfer_idle */
-       0xfecf80fe,
-       0x00e4f100,
-       0xf91bf420,
-       0xf40611f4,
-/* 0x081e: ctx_xfer_pre */
-       0xf7f00d02,
-       0x6c21f510,
-       0x1c11f406,
-/* 0x0828: ctx_xfer_pre_load */
-       0xf502f7f0,
-       0xf5062b21,
-       0xf5063a21,
-       0xbd064c21,
-       0x2b21f5f4,
-       0x8c21f506,
-/* 0x0841: ctx_xfer_exec */
-       0x16019806,
-       0x041427f1,
-       0xd00624b6,
-       0xe7f10020,
-       0xe3f0a500,
-       0x021fb941,
-       0xb68d21f4,
-       0xfcf004e0,
-       0x022cf001,
-       0xfd0124b6,
-       0x21f405f2,
-       0xfc17f18d,
-       0x0213f04a,
-       0xd00c27f0,
-       0x21f50012,
-       0x27f10215,
-       0x23f047fc,
-       0x0020d002,
+       0x0007f116,
+       0x0203f081,
+       0xbd0003d0,
+       0x40008004,
+       0x010017f1,
+       0xfa0613f0,
+       0x03f80601,
+/* 0x09aa: ctx_xfer */
+       0xe7f000f8,
+       0x0007f104,
+       0x0303f002,
+       0xbd000ed0,
+/* 0x09b9: ctx_xfer_idle */
+       0x00e7f104,
+       0x03e3f000,
+       0xf100eecf,
+       0xf42000e4,
+       0x11f4f21b,
+       0x0d02f406,
+/* 0x09d0: ctx_xfer_pre */
+       0xf510f7f0,
+       0xf407de21,
+/* 0x09da: ctx_xfer_pre_load */
+       0xf7f01c11,
+       0x7f21f502,
+       0x9121f507,
+       0xa621f507,
+       0xf5f4bd07,
+       0xf5077f21,
+/* 0x09f3: ctx_xfer_exec */
+       0x98082421,
+       0x24bd1601,
+       0x050007f1,
+       0xd00103f0,
+       0x04bd0002,
+       0xf1021fb9,
+       0xf0a500e7,
+       0x21f441e3,
+       0x01fcf09d,
+       0xb6022cf0,
+       0xf2fd0124,
+       0x02ffb905,
+       0xa504e7f1,
+       0xf441e3f0,
+       0x21f59d21,
+       0x24bd026a,
+       0x47fc07f1,
+       0xd00203f0,
+       0x04bd0002,
        0xb6012cf0,
-       0x12d00320,
-       0x01acf000,
-       0xf006a5f0,
-       0x0c9800b7,
-       0x010d9800,
-       0xf500e7f0,
-       0xf0016621,
-       0x21f508a7,
-       0x21f50109,
-       0x01f40215,
-       0x0ca7f022,
-       0xf1c921f4,
-       0xb60a1017,
-       0x27f00614,
-       0x0012d005,
-/* 0x08c8: ctx_xfer_post_save_wait */
-       0xfd0012cf,
-       0x1bf40522,
-       0x2e02f4fa,
-/* 0x08d4: ctx_xfer_post */
-       0xf502f7f0,
-       0xbd062b21,
-       0x6c21f5f4,
-       0x3421f506,
-       0x3a21f502,
-       0xf5f4bd06,
-       0xf4062b21,
-       0x01981011,
-       0x0511fd40,
-       0xf5070bf4,
-/* 0x08ff: ctx_xfer_no_post_mmio */
-/* 0x08ff: ctx_xfer_done */
-       0xf807b721,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
+       0x07f10320,
+       0x03f04afc,
+       0x0002d002,
+       0xacf004bd,
+       0x06a5f001,
+       0x9800b7f0,
+       0x0d98000c,
+       0x00e7f001,
+       0x016f21f5,
+       0xf508a7f0,
+       0xf5011021,
+       0xf4025e21,
+       0xa7f01301,
+       0xd021f40c,
+       0xf505f7f0,
+       0xf4080621,
+/* 0x0a82: ctx_xfer_post */
+       0xf7f02e02,
+       0x7f21f502,
+       0xf5f4bd07,
+       0xf507de21,
+       0xf5027f21,
+       0xbd079121,
+       0x7f21f5f4,
+       0x1011f407,
+       0xfd400198,
+       0x0bf40511,
+       0x5521f507,
+/* 0x0aad: ctx_xfer_no_post_mmio */
+/* 0x0aad: ctx_xfer_done */
+       0x0000f809,
        0x00000000,
        0x00000000,
        0x00000000,
index 33a5a82eccbd6e8114f55d8d8766d22929474d80..6ffe28307dbd6e420a019d0e7dd4ac891f531fd7 100644 (file)
 #define GF117 0xd7
 #define GK100 0xe0
 #define GK110 0xf0
+#define GK208 0x108
 
+#define NV_PGRAPH_FECS_INTR_ACK                                        0x409004
+#define NV_PGRAPH_FECS_INTR                                            0x409008
+#define NV_PGRAPH_FECS_INTR_FWMTHD                                   0x00000400
+#define NV_PGRAPH_FECS_INTR_CHSW                                     0x00000100
+#define NV_PGRAPH_FECS_INTR_FIFO                                     0x00000004
+#define NV_PGRAPH_FECS_INTR_MODE                                       0x40900c
+#define NV_PGRAPH_FECS_INTR_MODE_FIFO                                0x00000004
+#define NV_PGRAPH_FECS_INTR_MODE_FIFO_LEVEL                          0x00000004
+#define NV_PGRAPH_FECS_INTR_MODE_FIFO_EDGE                           0x00000000
+#define NV_PGRAPH_FECS_INTR_EN_SET                                     0x409010
+#define NV_PGRAPH_FECS_INTR_EN_SET_FIFO                              0x00000004
+#define NV_PGRAPH_FECS_INTR_ROUTE                                      0x40901c
+#define NV_PGRAPH_FECS_ACCESS                                          0x409048
+#define NV_PGRAPH_FECS_ACCESS_FIFO                                   0x00000002
+#define NV_PGRAPH_FECS_FIFO_DATA                                       0x409064
+#define NV_PGRAPH_FECS_FIFO_CMD                                        0x409068
+#define NV_PGRAPH_FECS_FIFO_ACK                                        0x409074
+#define NV_PGRAPH_FECS_CAPS                                            0x409108
 #define NV_PGRAPH_FECS_SIGNAL                                          0x409400
+#define NV_PGRAPH_FECS_IROUTE                                          0x409404
+#define NV_PGRAPH_FECS_BAR_MASK0                                       0x40940c
+#define NV_PGRAPH_FECS_BAR_MASK1                                       0x409410
+#define NV_PGRAPH_FECS_BAR                                             0x409414
+#define NV_PGRAPH_FECS_BAR_SET                                         0x409418
+#define NV_PGRAPH_FECS_RED_SWITCH                                      0x409614
+#define NV_PGRAPH_FECS_RED_SWITCH_ENABLE_ROP                         0x00000400
+#define NV_PGRAPH_FECS_RED_SWITCH_ENABLE_GPC                         0x00000200
+#define NV_PGRAPH_FECS_RED_SWITCH_ENABLE_MAIN                        0x00000100
+#define NV_PGRAPH_FECS_RED_SWITCH_POWER_ROP                          0x00000040
+#define NV_PGRAPH_FECS_RED_SWITCH_POWER_GPC                          0x00000020
+#define NV_PGRAPH_FECS_RED_SWITCH_POWER_MAIN                         0x00000010
+#define NV_PGRAPH_FECS_RED_SWITCH_PAUSE_GPC                          0x00000002
+#define NV_PGRAPH_FECS_RED_SWITCH_PAUSE_MAIN                         0x00000001
+#define NV_PGRAPH_FECS_MMCTX_SAVE_SWBASE                               0x409700
+#define NV_PGRAPH_FECS_MMCTX_LOAD_SWBASE                               0x409704
+#define NV_PGRAPH_FECS_MMCTX_LOAD_COUNT                                0x40974c
+#define NV_PGRAPH_FECS_MMCTX_SAVE_SWBASE                               0x409700
+#define NV_PGRAPH_FECS_MMCTX_LOAD_SWBASE                               0x409704
+#define NV_PGRAPH_FECS_MMCTX_BASE                                      0x409710
+#define NV_PGRAPH_FECS_MMCTX_CTRL                                      0x409714
+#define NV_PGRAPH_FECS_MMCTX_MULTI_STRIDE                              0x409718
+#define NV_PGRAPH_FECS_MMCTX_MULTI_MASK                                0x40971c
+#define NV_PGRAPH_FECS_MMCTX_QUEUE                                     0x409720
+#define NV_PGRAPH_FECS_MMIO_CTRL                                       0x409728
+#define NV_PGRAPH_FECS_MMIO_RDVAL                                      0x40972c
+#define NV_PGRAPH_FECS_MMIO_WRVAL                                      0x409730
+#define NV_PGRAPH_FECS_MMCTX_LOAD_COUNT                                0x40974c
 #if CHIPSET < GK110
 #define NV_PGRAPH_FECS_CC_SCRATCH_VAL(n)                    ((n) * 4 + 0x409800)
 #define NV_PGRAPH_FECS_CC_SCRATCH_SET(n)                    ((n) * 4 + 0x409820)
 #define NV_PGRAPH_FECS_CC_SCRATCH_CLR(n)                    ((n) * 4 + 0x409840)
+#define NV_PGRAPH_FECS_UNK86C                                          0x40986c
 #else
 #define NV_PGRAPH_FECS_CC_SCRATCH_VAL(n)                    ((n) * 4 + 0x409800)
 #define NV_PGRAPH_FECS_CC_SCRATCH_CLR(n)                    ((n) * 4 + 0x409840)
+#define NV_PGRAPH_FECS_UNK86C                                          0x40988c
 #define NV_PGRAPH_FECS_CC_SCRATCH_SET(n)                    ((n) * 4 + 0x4098c0)
 #endif
+#define NV_PGRAPH_FECS_STRANDS_CNT                                     0x409880
+#define NV_PGRAPH_FECS_STRAND_SAVE_SWBASE                              0x409908
+#define NV_PGRAPH_FECS_STRAND_LOAD_SWBASE                              0x40990c
+#define NV_PGRAPH_FECS_STRAND_WORDS                                    0x409910
+#define NV_PGRAPH_FECS_STRAND_DATA                                     0x409918
+#define NV_PGRAPH_FECS_STRAND_SELECT                                   0x40991c
+#define NV_PGRAPH_FECS_STRAND_CMD                                      0x409928
+#define NV_PGRAPH_FECS_STRAND_CMD_SEEK                               0x00000001
+#define NV_PGRAPH_FECS_STRAND_CMD_GET_INFO                           0x00000002
+#define NV_PGRAPH_FECS_STRAND_CMD_SAVE                               0x00000003
+#define NV_PGRAPH_FECS_STRAND_CMD_LOAD                               0x00000004
+#define NV_PGRAPH_FECS_STRAND_CMD_ACTIVATE_FILTER                    0x0000000a
+#define NV_PGRAPH_FECS_STRAND_CMD_DEACTIVATE_FILTER                  0x0000000b
+#define NV_PGRAPH_FECS_STRAND_CMD_ENABLE                             0x0000000c
+#define NV_PGRAPH_FECS_STRAND_CMD_DISABLE                            0x0000000d
+#define NV_PGRAPH_FECS_STRAND_FILTER                                   0x40993c
+#define NV_PGRAPH_FECS_MEM_BASE                                        0x409a04
+#define NV_PGRAPH_FECS_MEM_CHAN                                        0x409a0c
+#define NV_PGRAPH_FECS_MEM_CMD                                         0x409a10
+#define NV_PGRAPH_FECS_MEM_CMD_LOAD_CHAN                             0x00000007
+#define NV_PGRAPH_FECS_MEM_TARGET                                      0x409a20
+#define NV_PGRAPH_FECS_MEM_TARGET_UNK31                              0x80000000
+#define NV_PGRAPH_FECS_MEM_TARGET_AS                                 0x0000001f
+#define NV_PGRAPH_FECS_MEM_TARGET_AS_VM                              0x00000001
+#define NV_PGRAPH_FECS_MEM_TARGET_AS_VRAM                            0x00000002
+#define NV_PGRAPH_FECS_CHAN_ADDR                                       0x409b00
+#define NV_PGRAPH_FECS_CHAN_NEXT                                       0x409b04
+#define NV_PGRAPH_FECS_CHSW                                            0x409b0c
+#define NV_PGRAPH_FECS_CHSW_ACK                                      0x00000001
 #define NV_PGRAPH_FECS_INTR_UP_SET                                     0x409c1c
+#define NV_PGRAPH_FECS_INTR_UP_EN                                      0x409c24
 
+#define NV_PGRAPH_GPCX_GPCCS_INTR_ACK                                  0x41a004
+#define NV_PGRAPH_GPCX_GPCCS_INTR                                      0x41a008
+#define NV_PGRAPH_GPCX_GPCCS_INTR_FIFO                               0x00000004
+#define NV_PGRAPH_GPCX_GPCCS_INTR_EN_SET                               0x41a010
+#define NV_PGRAPH_GPCX_GPCCS_INTR_EN_SET_FIFO                        0x00000004
+#define NV_PGRAPH_GPCX_GPCCS_INTR_ROUTE                                0x41a01c
+#define NV_PGRAPH_GPCX_GPCCS_ACCESS                                    0x41a048
+#define NV_PGRAPH_GPCX_GPCCS_ACCESS_FIFO                             0x00000002
+#define NV_PGRAPH_GPCX_GPCCS_FIFO_DATA                                 0x41a064
+#define NV_PGRAPH_GPCX_GPCCS_FIFO_CMD                                  0x41a068
+#define NV_PGRAPH_GPCX_GPCCS_FIFO_ACK                                  0x41a074
+#define NV_PGRAPH_GPCX_GPCCS_UNITS                                     0x41a608
+#define NV_PGRAPH_GPCX_GPCCS_RED_SWITCH                                0x41a614
+#define NV_PGRAPH_GPCX_GPCCS_RED_SWITCH_UNK11                        0x00000800
+#define NV_PGRAPH_GPCX_GPCCS_RED_SWITCH_ENABLE                       0x00000200
+#define NV_PGRAPH_GPCX_GPCCS_RED_SWITCH_POWER                        0x00000020
+#define NV_PGRAPH_GPCX_GPCCS_RED_SWITCH_PAUSE                        0x00000002
+#define NV_PGRAPH_GPCX_GPCCS_MYINDEX                                   0x41a618
+#define NV_PGRAPH_GPCX_GPCCS_MMCTX_SAVE_SWBASE                         0x41a700
+#define NV_PGRAPH_GPCX_GPCCS_MMCTX_LOAD_SWBASE                         0x41a704
+#define NV_PGRAPH_GPCX_GPCCS_MMCTX_LOAD_COUNT                          0x41a74c
 #if CHIPSET < GK110
 #define NV_PGRAPH_GPCX_GPCCS_CC_SCRATCH_VAL(n)              ((n) * 4 + 0x41a800)
 #define NV_PGRAPH_GPCX_GPCCS_CC_SCRATCH_SET(n)              ((n) * 4 + 0x41a820)
 #define NV_PGRAPH_GPCX_GPCCS_CC_SCRATCH_CLR(n)              ((n) * 4 + 0x41a840)
+#define NV_PGRAPH_GPCX_GPCCS_UNK86C                                    0x41a86c
 #else
 #define NV_PGRAPH_GPCX_GPCCS_CC_SCRATCH_VAL(n)              ((n) * 4 + 0x41a800)
 #define NV_PGRAPH_GPCX_GPCCS_CC_SCRATCH_CLR(n)              ((n) * 4 + 0x41a840)
+#define NV_PGRAPH_GPCX_GPCCS_UNK86C                                    0x41a88c
 #define NV_PGRAPH_GPCX_GPCCS_CC_SCRATCH_SET(n)              ((n) * 4 + 0x41a8c0)
 #endif
+#define NV_PGRAPH_GPCX_GPCCS_STRAND_SELECT                             0x41a91c
+#define NV_PGRAPH_GPCX_GPCCS_STRAND_CMD                                0x41a928
+#define NV_PGRAPH_GPCX_GPCCS_STRAND_CMD_SAVE                         0x00000003
+#define NV_PGRAPH_GPCX_GPCCS_STRAND_CMD_LOAD                         0x00000004
+#define NV_PGRAPH_GPCX_GPCCS_MEM_BASE                                  0x41aa04
 
 #define mmctx_data(r,c) .b32 (((c - 1) << 26) | r)
 #define queue_init      .skip 72 // (2 * 4) + ((8 * 4) * 2)
 #define T_LCHAN   8
 #define T_LCTXH   9
 
-#define nv_mkmm(rv,r) /*
-*/     movw rv  ((r) & 0x0000fffc) /*
-*/     sethi rv ((r) & 0x00ff0000)
+#if CHIPSET < GK208
+#define imm32(reg,val) /*
+*/     movw reg  ((val) & 0x0000ffff) /*
+*/     sethi reg ((val) & 0xffff0000)
+#else
+#define imm32(reg,val) /*
+*/     mov reg (val)
+#endif
+
 #define nv_mkio(rv,r,i) /*
-*/     nv_mkmm(rv, (((r) & 0xffc) << 6) | ((i) << 2))
+*/     imm32(rv, (((r) & 0xffc) << 6) | ((i) << 2))
+
+#define hash #
+#define fn(a) a
+#if CHIPSET < GK208
+#define call(a) call fn(hash)a
+#else
+#define call(a) lcall fn(hash)a
+#endif
 
 #define nv_iord(rv,r,i) /*
 */     nv_mkio(rv,r,i) /*
 */     iord rv I[rv]
+
 #define nv_iowr(r,i,rv) /*
 */     nv_mkio($r0,r,i) /*
 */     iowr I[$r0] rv /*
 */     clear b32 $r0
 
+#define nv_rd32(reg,addr) /*
+*/     imm32($r14, addr) /*
+*/     call(nv_rd32) /*
+*/     mov b32 reg $r15
+
+#define nv_wr32(addr,reg) /*
+*/     mov b32 $r15 reg /*
+*/     imm32($r14, addr) /*
+*/     call(nv_wr32)
+
 #define trace_set(bit) /*
 */     clear b32 $r9 /*
 */     bset $r9 bit /*
 */     nv_iowr(NV_PGRAPH_FECS_CC_SCRATCH_SET(7), 0, $r9)
+
 #define trace_clr(bit) /*
 */     clear b32 $r9 /*
 */     bset $r9 bit /*
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv108.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv108.c
new file mode 100644 (file)
index 0000000..e1af65e
--- /dev/null
@@ -0,0 +1,236 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include "nvc0.h"
+
+/*******************************************************************************
+ * Graphics object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nv108_graph_sclass[] = {
+       { 0x902d, &nouveau_object_ofuncs },
+       { 0xa140, &nouveau_object_ofuncs },
+       { 0xa197, &nouveau_object_ofuncs },
+       { 0xa1c0, &nouveau_object_ofuncs },
+       {}
+};
+
+/*******************************************************************************
+ * PGRAPH engine/subdev functions
+ ******************************************************************************/
+
+static struct nvc0_graph_init
+nv108_graph_init_regs[] = {
+       { 0x400080,   1, 0x04, 0x003083c2 },
+       { 0x400088,   1, 0x04, 0x0001bfe7 },
+       { 0x40008c,   1, 0x04, 0x00000000 },
+       { 0x400090,   1, 0x04, 0x00000030 },
+       { 0x40013c,   1, 0x04, 0x003901f7 },
+       { 0x400140,   1, 0x04, 0x00000100 },
+       { 0x400144,   1, 0x04, 0x00000000 },
+       { 0x400148,   1, 0x04, 0x00000110 },
+       { 0x400138,   1, 0x04, 0x00000000 },
+       { 0x400130,   2, 0x04, 0x00000000 },
+       { 0x400124,   1, 0x04, 0x00000002 },
+       {}
+};
+
+struct nvc0_graph_init
+nv108_graph_init_unk58xx[] = {
+       { 0x405844,   1, 0x04, 0x00ffffff },
+       { 0x405850,   1, 0x04, 0x00000000 },
+       { 0x405900,   1, 0x04, 0x00000000 },
+       { 0x405908,   1, 0x04, 0x00000000 },
+       { 0x405928,   1, 0x04, 0x00000000 },
+       { 0x40592c,   1, 0x04, 0x00000000 },
+       {}
+};
+
+static struct nvc0_graph_init
+nv108_graph_init_gpc[] = {
+       { 0x418408,   1, 0x04, 0x00000000 },
+       { 0x4184a0,   3, 0x04, 0x00000000 },
+       { 0x418604,   1, 0x04, 0x00000000 },
+       { 0x418680,   1, 0x04, 0x00000000 },
+       { 0x418714,   1, 0x04, 0x00000000 },
+       { 0x418384,   2, 0x04, 0x00000000 },
+       { 0x418814,   3, 0x04, 0x00000000 },
+       { 0x418b04,   1, 0x04, 0x00000000 },
+       { 0x4188c8,   2, 0x04, 0x00000000 },
+       { 0x4188d0,   1, 0x04, 0x00010000 },
+       { 0x4188d4,   1, 0x04, 0x00000201 },
+       { 0x418910,   1, 0x04, 0x00010001 },
+       { 0x418914,   1, 0x04, 0x00000301 },
+       { 0x418918,   1, 0x04, 0x00800000 },
+       { 0x418980,   1, 0x04, 0x77777770 },
+       { 0x418984,   3, 0x04, 0x77777777 },
+       { 0x418c04,   1, 0x04, 0x00000000 },
+       { 0x418c64,   2, 0x04, 0x00000000 },
+       { 0x418c88,   1, 0x04, 0x00000000 },
+       { 0x418cb4,   2, 0x04, 0x00000000 },
+       { 0x418d00,   1, 0x04, 0x00000000 },
+       { 0x418d28,   2, 0x04, 0x00000000 },
+       { 0x418f00,   1, 0x04, 0x00000400 },
+       { 0x418f08,   1, 0x04, 0x00000000 },
+       { 0x418f20,   2, 0x04, 0x00000000 },
+       { 0x418e00,   1, 0x04, 0x00000000 },
+       { 0x418e08,   1, 0x04, 0x00000000 },
+       { 0x418e1c,   2, 0x04, 0x00000000 },
+       { 0x41900c,   1, 0x04, 0x00000000 },
+       { 0x419018,   1, 0x04, 0x00000000 },
+       {}
+};
+
+static struct nvc0_graph_init
+nv108_graph_init_tpc[] = {
+       { 0x419d0c,   1, 0x04, 0x00000000 },
+       { 0x419d10,   1, 0x04, 0x00000014 },
+       { 0x419ab0,   1, 0x04, 0x00000000 },
+       { 0x419ac8,   1, 0x04, 0x00000000 },
+       { 0x419ab8,   1, 0x04, 0x000000e7 },
+       { 0x419abc,   2, 0x04, 0x00000000 },
+       { 0x419ab4,   1, 0x04, 0x00000000 },
+       { 0x419aa8,   2, 0x04, 0x00000000 },
+       { 0x41980c,   1, 0x04, 0x00000010 },
+       { 0x419844,   1, 0x04, 0x00000000 },
+       { 0x419850,   1, 0x04, 0x00000004 },
+       { 0x419854,   2, 0x04, 0x00000000 },
+       { 0x419c98,   1, 0x04, 0x00000000 },
+       { 0x419ca8,   1, 0x04, 0x00000000 },
+       { 0x419cb0,   1, 0x04, 0x01000000 },
+       { 0x419cb4,   1, 0x04, 0x00000000 },
+       { 0x419cb8,   1, 0x04, 0x00b08bea },
+       { 0x419c84,   1, 0x04, 0x00010384 },
+       { 0x419cbc,   1, 0x04, 0x281b3646 },
+       { 0x419cc0,   2, 0x04, 0x00000000 },
+       { 0x419c80,   1, 0x04, 0x00000230 },
+       { 0x419ccc,   2, 0x04, 0x00000000 },
+       { 0x419c0c,   1, 0x04, 0x00000000 },
+       { 0x419e00,   1, 0x04, 0x00000080 },
+       { 0x419ea0,   1, 0x04, 0x00000000 },
+       { 0x419ee4,   1, 0x04, 0x00000000 },
+       { 0x419ea4,   1, 0x04, 0x00000100 },
+       { 0x419ea8,   1, 0x04, 0x00000000 },
+       { 0x419eb4,   1, 0x04, 0x00000000 },
+       { 0x419ebc,   2, 0x04, 0x00000000 },
+       { 0x419edc,   1, 0x04, 0x00000000 },
+       { 0x419f00,   1, 0x04, 0x00000000 },
+       { 0x419ed0,   1, 0x04, 0x00003234 },
+       { 0x419f74,   1, 0x04, 0x00015555 },
+       { 0x419f80,   4, 0x04, 0x00000000 },
+       {}
+};
+
+static int
+nv108_graph_fini(struct nouveau_object *object, bool suspend)
+{
+       struct nvc0_graph_priv *priv = (void *)object;
+       static const struct {
+               u32 addr;
+               u32 data;
+       } magic[] = {
+               { 0x020520, 0xfffffffc },
+               { 0x020524, 0xfffffffe },
+               { 0x020524, 0xfffffffc },
+               { 0x020524, 0xfffffff8 },
+               { 0x020524, 0xffffffe0 },
+               { 0x020530, 0xfffffffe },
+               { 0x02052c, 0xfffffffa },
+               { 0x02052c, 0xfffffff0 },
+               { 0x02052c, 0xffffffc0 },
+               { 0x02052c, 0xffffff00 },
+               { 0x02052c, 0xfffffc00 },
+               { 0x02052c, 0xfffcfc00 },
+               { 0x02052c, 0xfff0fc00 },
+               { 0x02052c, 0xff80fc00 },
+               { 0x020528, 0xfffffffe },
+               { 0x020528, 0xfffffffc },
+       };
+       int i;
+
+       nv_mask(priv, 0x000200, 0x08001000, 0x00000000);
+       nv_mask(priv, 0x0206b4, 0x00000000, 0x00000000);
+       for (i = 0; i < ARRAY_SIZE(magic); i++) {
+               nv_wr32(priv, magic[i].addr, magic[i].data);
+               nv_wait(priv, magic[i].addr, 0x80000000, 0x00000000);
+       }
+
+       return nouveau_graph_fini(&priv->base, suspend);
+}
+
+static struct nvc0_graph_init *
+nv108_graph_init_mmio[] = {
+       nv108_graph_init_regs,
+       nvf0_graph_init_unk40xx,
+       nvc0_graph_init_unk44xx,
+       nvc0_graph_init_unk78xx,
+       nvc0_graph_init_unk60xx,
+       nvd9_graph_init_unk64xx,
+       nv108_graph_init_unk58xx,
+       nvc0_graph_init_unk80xx,
+       nvf0_graph_init_unk70xx,
+       nvf0_graph_init_unk5bxx,
+       nv108_graph_init_gpc,
+       nv108_graph_init_tpc,
+       nve4_graph_init_unk,
+       nve4_graph_init_unk88xx,
+       NULL
+};
+
+#include "fuc/hubnv108.fuc5.h"
+
+static struct nvc0_graph_ucode
+nv108_graph_fecs_ucode = {
+       .code.data = nv108_grhub_code,
+       .code.size = sizeof(nv108_grhub_code),
+       .data.data = nv108_grhub_data,
+       .data.size = sizeof(nv108_grhub_data),
+};
+
+#include "fuc/gpcnv108.fuc5.h"
+
+static struct nvc0_graph_ucode
+nv108_graph_gpccs_ucode = {
+       .code.data = nv108_grgpc_code,
+       .code.size = sizeof(nv108_grgpc_code),
+       .data.data = nv108_grgpc_data,
+       .data.size = sizeof(nv108_grgpc_data),
+};
+
+struct nouveau_oclass *
+nv108_graph_oclass = &(struct nvc0_graph_oclass) {
+       .base.handle = NV_ENGINE(GR, 0x08),
+       .base.ofuncs = &(struct nouveau_ofuncs) {
+               .ctor = nvc0_graph_ctor,
+               .dtor = nvc0_graph_dtor,
+               .init = nve4_graph_init,
+               .fini = nv108_graph_fini,
+       },
+       .cclass = &nv108_grctx_oclass,
+       .sclass =  nv108_graph_sclass,
+       .mmio = nv108_graph_init_mmio,
+       .fecs.ucode = &nv108_graph_fecs_ucode,
+       .gpccs.ucode = &nv108_graph_gpccs_ucode,
+}.base;
index 03de5175dd9f8bcdb1a2ad48c18de965cf31a3c1..30ed19c52e05ca0cd6a1e759a8fc426f61f246a7 100644 (file)
@@ -304,12 +304,28 @@ nv84_graph_tlb_flush(struct nouveau_engine *engine)
        return timeout ? -EBUSY : 0;
 }
 
-static const struct nouveau_enum nv50_mp_exec_error_names[] = {
-       { 3, "STACK_UNDERFLOW", NULL },
-       { 4, "QUADON_ACTIVE", NULL },
-       { 8, "TIMEOUT", NULL },
-       { 0x10, "INVALID_OPCODE", NULL },
-       { 0x40, "BREAKPOINT", NULL },
+static const struct nouveau_bitfield nv50_mp_exec_errors[] = {
+       { 0x01, "STACK_UNDERFLOW" },
+       { 0x02, "STACK_MISMATCH" },
+       { 0x04, "QUADON_ACTIVE" },
+       { 0x08, "TIMEOUT" },
+       { 0x10, "INVALID_OPCODE" },
+       { 0x20, "PM_OVERFLOW" },
+       { 0x40, "BREAKPOINT" },
+       {}
+};
+
+static const struct nouveau_bitfield nv50_mpc_traps[] = {
+       { 0x0000001, "LOCAL_LIMIT_READ" },
+       { 0x0000010, "LOCAL_LIMIT_WRITE" },
+       { 0x0000040, "STACK_LIMIT" },
+       { 0x0000100, "GLOBAL_LIMIT_READ" },
+       { 0x0001000, "GLOBAL_LIMIT_WRITE" },
+       { 0x0010000, "MP0" },
+       { 0x0020000, "MP1" },
+       { 0x0040000, "GLOBAL_LIMIT_RED" },
+       { 0x0400000, "GLOBAL_LIMIT_ATOM" },
+       { 0x4000000, "MP2" },
        {}
 };
 
@@ -396,6 +412,60 @@ static const struct nouveau_bitfield nv50_graph_intr_name[] = {
        {}
 };
 
+static const struct nouveau_bitfield nv50_graph_trap_prop[] = {
+       { 0x00000004, "SURF_WIDTH_OVERRUN" },
+       { 0x00000008, "SURF_HEIGHT_OVERRUN" },
+       { 0x00000010, "DST2D_FAULT" },
+       { 0x00000020, "ZETA_FAULT" },
+       { 0x00000040, "RT_FAULT" },
+       { 0x00000080, "CUDA_FAULT" },
+       { 0x00000100, "DST2D_STORAGE_TYPE_MISMATCH" },
+       { 0x00000200, "ZETA_STORAGE_TYPE_MISMATCH" },
+       { 0x00000400, "RT_STORAGE_TYPE_MISMATCH" },
+       { 0x00000800, "DST2D_LINEAR_MISMATCH" },
+       { 0x00001000, "RT_LINEAR_MISMATCH" },
+       {}
+};
+
+static void
+nv50_priv_prop_trap(struct nv50_graph_priv *priv,
+                   u32 ustatus_addr, u32 ustatus, u32 tp)
+{
+       u32 e0c = nv_rd32(priv, ustatus_addr + 0x04);
+       u32 e10 = nv_rd32(priv, ustatus_addr + 0x08);
+       u32 e14 = nv_rd32(priv, ustatus_addr + 0x0c);
+       u32 e18 = nv_rd32(priv, ustatus_addr + 0x10);
+       u32 e1c = nv_rd32(priv, ustatus_addr + 0x14);
+       u32 e20 = nv_rd32(priv, ustatus_addr + 0x18);
+       u32 e24 = nv_rd32(priv, ustatus_addr + 0x1c);
+
+       /* CUDA memory: l[], g[] or stack. */
+       if (ustatus & 0x00000080) {
+               if (e18 & 0x80000000) {
+                       /* g[] read fault? */
+                       nv_error(priv, "TRAP_PROP - TP %d - CUDA_FAULT - Global read fault at address %02x%08x\n",
+                                        tp, e14, e10 | ((e18 >> 24) & 0x1f));
+                       e18 &= ~0x1f000000;
+               } else if (e18 & 0xc) {
+                       /* g[] write fault? */
+                       nv_error(priv, "TRAP_PROP - TP %d - CUDA_FAULT - Global write fault at address %02x%08x\n",
+                                tp, e14, e10 | ((e18 >> 7) & 0x1f));
+                       e18 &= ~0x00000f80;
+               } else {
+                       nv_error(priv, "TRAP_PROP - TP %d - Unknown CUDA fault at address %02x%08x\n",
+                                tp, e14, e10);
+               }
+               ustatus &= ~0x00000080;
+       }
+       if (ustatus) {
+               nv_error(priv, "TRAP_PROP - TP %d -", tp);
+               nouveau_bitfield_print(nv50_graph_trap_prop, ustatus);
+               pr_cont(" - Address %02x%08x\n", e14, e10);
+       }
+       nv_error(priv, "TRAP_PROP - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
+                tp, e0c, e18, e1c, e20, e24);
+}
+
 static void
 nv50_priv_mp_trap(struct nv50_graph_priv *priv, int tpid, int display)
 {
@@ -420,8 +490,8 @@ nv50_priv_mp_trap(struct nv50_graph_priv *priv, int tpid, int display)
                        oplow = nv_rd32(priv, addr + 0x70);
                        ophigh = nv_rd32(priv, addr + 0x74);
                        nv_error(priv, "TRAP_MP_EXEC - "
-                                       "TP %d MP %d: ", tpid, i);
-                       nouveau_enum_print(nv50_mp_exec_error_names, status);
+                                       "TP %d MP %d:", tpid, i);
+                       nouveau_bitfield_print(nv50_mp_exec_errors, status);
                        pr_cont(" at %06x warp %d, opcode %08x %08x\n",
                                        pc&0xffffff, pc >> 24,
                                        oplow, ophigh);
@@ -468,60 +538,19 @@ nv50_priv_tp_trap(struct nv50_graph_priv *priv, int type, u32 ustatus_old,
                                nv50_priv_mp_trap(priv, i, display);
                                ustatus &= ~0x04030000;
                        }
-                       break;
-               case 8: /* TPDMA error */
-                       {
-                       u32 e0c = nv_rd32(priv, ustatus_addr + 4);
-                       u32 e10 = nv_rd32(priv, ustatus_addr + 8);
-                       u32 e14 = nv_rd32(priv, ustatus_addr + 0xc);
-                       u32 e18 = nv_rd32(priv, ustatus_addr + 0x10);
-                       u32 e1c = nv_rd32(priv, ustatus_addr + 0x14);
-                       u32 e20 = nv_rd32(priv, ustatus_addr + 0x18);
-                       u32 e24 = nv_rd32(priv, ustatus_addr + 0x1c);
-                       /* 2d engine destination */
-                       if (ustatus & 0x00000010) {
-                               if (display) {
-                                       nv_error(priv, "TRAP_TPDMA_2D - TP %d - Unknown fault at address %02x%08x\n",
-                                                       i, e14, e10);
-                                       nv_error(priv, "TRAP_TPDMA_2D - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
-                                                       i, e0c, e18, e1c, e20, e24);
-                               }
-                               ustatus &= ~0x00000010;
-                       }
-                       /* Render target */
-                       if (ustatus & 0x00000040) {
-                               if (display) {
-                                       nv_error(priv, "TRAP_TPDMA_RT - TP %d - Unknown fault at address %02x%08x\n",
-                                                       i, e14, e10);
-                                       nv_error(priv, "TRAP_TPDMA_RT - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
-                                                       i, e0c, e18, e1c, e20, e24);
-                               }
-                               ustatus &= ~0x00000040;
-                       }
-                       /* CUDA memory: l[], g[] or stack. */
-                       if (ustatus & 0x00000080) {
-                               if (display) {
-                                       if (e18 & 0x80000000) {
-                                               /* g[] read fault? */
-                                               nv_error(priv, "TRAP_TPDMA - TP %d - Global read fault at address %02x%08x\n",
-                                                               i, e14, e10 | ((e18 >> 24) & 0x1f));
-                                               e18 &= ~0x1f000000;
-                                       } else if (e18 & 0xc) {
-                                               /* g[] write fault? */
-                                               nv_error(priv, "TRAP_TPDMA - TP %d - Global write fault at address %02x%08x\n",
-                                                               i, e14, e10 | ((e18 >> 7) & 0x1f));
-                                               e18 &= ~0x00000f80;
-                                       } else {
-                                               nv_error(priv, "TRAP_TPDMA - TP %d - Unknown CUDA fault at address %02x%08x\n",
-                                                               i, e14, e10);
-                                       }
-                                       nv_error(priv, "TRAP_TPDMA - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
-                                                       i, e0c, e18, e1c, e20, e24);
-                               }
-                               ustatus &= ~0x00000080;
-                       }
+                       if (ustatus && display) {
+                               nv_error("%s - TP%d:", name, i);
+                               nouveau_bitfield_print(nv50_mpc_traps, ustatus);
+                               pr_cont("\n");
+                               ustatus = 0;
                        }
                        break;
+               case 8: /* PROP error */
+                       if (display)
+                               nv50_priv_prop_trap(
+                                               priv, ustatus_addr, ustatus, i);
+                       ustatus = 0;
+                       break;
                }
                if (ustatus) {
                        if (display)
@@ -727,11 +756,11 @@ nv50_graph_trap_handler(struct nv50_graph_priv *priv, u32 display,
                status &= ~0x080;
        }
 
-       /* TPDMA:  Handles TP-initiated uncached memory accesses:
+       /* PROP:  Handles TP-initiated uncached memory accesses:
         * l[], g[], stack, 2d surfaces, render targets. */
        if (status & 0x100) {
                nv50_priv_tp_trap(priv, 8, 0x408e08, 0x408708, display,
-                                   "TRAP_TPDMA");
+                                   "TRAP_PROP");
                nv_wr32(priv, 0x400108, 0x100);
                status &= ~0x100;
        }
@@ -760,7 +789,7 @@ nv50_graph_intr(struct nouveau_subdev *subdev)
        u32 mthd = (addr & 0x00001ffc);
        u32 data = nv_rd32(priv, 0x400708);
        u32 class = nv_rd32(priv, 0x400814);
-       u32 show = stat;
+       u32 show = stat, show_bitfield = stat;
        int chid;
 
        engctx = nouveau_engctx_get(engine, inst);
@@ -778,21 +807,26 @@ nv50_graph_intr(struct nouveau_subdev *subdev)
                nv_error(priv, "DATA_ERROR ");
                nouveau_enum_print(nv50_data_error_names, ecode);
                pr_cont("\n");
+               show_bitfield &= ~0x00100000;
        }
 
        if (stat & 0x00200000) {
                if (!nv50_graph_trap_handler(priv, show, chid, (u64)inst << 12,
                                engctx))
                        show &= ~0x00200000;
+               show_bitfield &= ~0x00200000;
        }
 
        nv_wr32(priv, 0x400100, stat);
        nv_wr32(priv, 0x400500, 0x00010001);
 
        if (show) {
-               nv_error(priv, "%s", "");
-               nouveau_bitfield_print(nv50_graph_intr_name, show);
-               pr_cont("\n");
+               show &= show_bitfield;
+               if (show) {
+                       nv_error(priv, "%s", "");
+                       nouveau_bitfield_print(nv50_graph_intr_name, show);
+                       pr_cont("\n");
+               }
                nv_error(priv,
                         "ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
                         chid, (u64)inst << 12, nouveau_client_name(engctx),
index 5c8a63dc506aafcfbbe5088ede75ed635d0ba8a4..a73ab209ea88c85121fe3580e9daa189b29de46e 100644 (file)
@@ -901,6 +901,9 @@ nvc0_graph_init_ctxctl(struct nvc0_graph_priv *priv)
                }
 
                return 0;
+       } else
+       if (!oclass->fecs.ucode) {
+               return -ENOSYS;
        }
 
        /* load HUB microcode */
index ea17a80ad7fce3d7bcee48013ac6ba8c65acc4c7..b0ab6de270b2ea9d148b483b8249bc11efa27496 100644 (file)
@@ -205,6 +205,11 @@ extern struct nvc0_graph_init nve4_graph_init_regs[];
 extern struct nvc0_graph_init nve4_graph_init_unk[];
 extern struct nvc0_graph_init nve4_graph_init_unk88xx[];
 
+extern struct nvc0_graph_init nvf0_graph_init_unk40xx[];
+extern struct nvc0_graph_init nvf0_graph_init_unk70xx[];
+extern struct nvc0_graph_init nvf0_graph_init_unk5bxx[];
+extern struct nvc0_graph_init nvf0_graph_init_tpc[];
+
 int  nvc0_grctx_generate(struct nvc0_graph_priv *);
 void nvc0_grctx_generate_main(struct nvc0_graph_priv *, struct nvc0_grctx *);
 void nvc0_grctx_generate_mods(struct nvc0_graph_priv *, struct nvc0_grctx *);
@@ -266,6 +271,11 @@ extern struct nvc0_graph_init nve4_grctx_init_unk80xx[];
 extern struct nvc0_graph_init nve4_grctx_init_unk90xx[];
 
 extern struct nouveau_oclass *nvf0_grctx_oclass;
+extern struct nvc0_graph_init nvf0_grctx_init_unk44xx[];
+extern struct nvc0_graph_init nvf0_grctx_init_unk5bxx[];
+extern struct nvc0_graph_init nvf0_grctx_init_unk60xx[];
+
+extern struct nouveau_oclass *nv108_grctx_oclass;
 
 #define mmio_data(s,a,p) do {                                                  \
        info->buffer[info->buffer_nr] = round_up(info->addr, (a));             \
index 2f0ac78322345a0d5dd997f5dc3d83327ba4b809..b1acb9939d95cfe460cea8e9aa2475d85256c20f 100644 (file)
@@ -41,7 +41,7 @@ nvf0_graph_sclass[] = {
  * PGRAPH engine/subdev functions
  ******************************************************************************/
 
-static struct nvc0_graph_init
+struct nvc0_graph_init
 nvf0_graph_init_unk40xx[] = {
        { 0x40415c,   1, 0x04, 0x00000000 },
        { 0x404170,   1, 0x04, 0x00000000 },
@@ -60,7 +60,7 @@ nvf0_graph_init_unk58xx[] = {
        {}
 };
 
-static struct nvc0_graph_init
+struct nvc0_graph_init
 nvf0_graph_init_unk70xx[] = {
        { 0x407010,   1, 0x04, 0x00000000 },
        { 0x407040,   1, 0x04, 0x80440424 },
@@ -68,7 +68,7 @@ nvf0_graph_init_unk70xx[] = {
        {}
 };
 
-static struct nvc0_graph_init
+struct nvc0_graph_init
 nvf0_graph_init_unk5bxx[] = {
        { 0x405b44,   1, 0x04, 0x00000000 },
        { 0x405b50,   1, 0x04, 0x00000000 },
@@ -114,7 +114,7 @@ nvf0_graph_init_gpc[] = {
        {}
 };
 
-static struct nvc0_graph_init
+struct nvc0_graph_init
 nvf0_graph_init_tpc[] = {
        { 0x419d0c,   1, 0x04, 0x00000000 },
        { 0x419d10,   1, 0x04, 0x00000014 },
@@ -243,6 +243,6 @@ nvf0_graph_oclass = &(struct nvc0_graph_oclass) {
        .cclass = &nvf0_grctx_oclass,
        .sclass =  nvf0_graph_sclass,
        .mmio = nvf0_graph_init_mmio,
-       .fecs.ucode = 0 ? &nvf0_graph_fecs_ucode : NULL,
+       .fecs.ucode = &nvf0_graph_fecs_ucode,
        .gpccs.ucode = &nvf0_graph_gpccs_ucode,
 }.base;
index 560c3593dae75e365a647d3e4858d65be85c39e6..e71a4325e670f69c4fce0893b973760a9522185a 100644 (file)
@@ -230,9 +230,26 @@ struct nve0_channel_ind_class {
 
 #define NV04_DISP_CLASS                                              0x00000046
 
+#define NV04_DISP_MTHD                                               0x00000000
+#define NV04_DISP_MTHD_HEAD                                          0x00000001
+
+#define NV04_DISP_SCANOUTPOS                                         0x00000000
+
 struct nv04_display_class {
 };
 
+struct nv04_display_scanoutpos {
+       s64 time[2];
+       u32 vblanks;
+       u32 vblanke;
+       u32 vtotal;
+       u32 vline;
+       u32 hblanks;
+       u32 hblanke;
+       u32 htotal;
+       u32 hline;
+};
+
 /* 5070: NV50_DISP
  * 8270: NV84_DISP
  * 8370: NVA0_DISP
@@ -252,6 +269,11 @@ struct nv04_display_class {
 #define NVE0_DISP_CLASS                                              0x00009170
 #define NVF0_DISP_CLASS                                              0x00009270
 
+#define NV50_DISP_MTHD                                               0x00000000
+#define NV50_DISP_MTHD_HEAD                                          0x00000003
+
+#define NV50_DISP_SCANOUTPOS                                         0x00000000
+
 #define NV50_DISP_SOR_MTHD                                           0x00010000
 #define NV50_DISP_SOR_MTHD_TYPE                                      0x0000f000
 #define NV50_DISP_SOR_MTHD_HEAD                                      0x00000018
index ac2881d1776ac1a071371a84f379ea57ea8ee856..7b8ea221b00dc7bf0d88021fc5443ae059401cbb 100644 (file)
@@ -38,7 +38,8 @@ enum nv_subdev_type {
        NVDEV_SUBDEV_THERM,
        NVDEV_SUBDEV_CLOCK,
 
-       NVDEV_ENGINE_DMAOBJ,
+       NVDEV_ENGINE_FIRST,
+       NVDEV_ENGINE_DMAOBJ = NVDEV_ENGINE_FIRST,
        NVDEV_ENGINE_FIFO,
        NVDEV_ENGINE_SW,
        NVDEV_ENGINE_GR,
@@ -70,6 +71,7 @@ struct nouveau_device {
        const char *dbgopt;
        const char *name;
        const char *cname;
+       u64 disable_mask;
 
        enum {
                NV_04    = 0x04,
index 8c32cf4d83c78bb39773cd11233e59941da52699..26b6b2bb11121e28d4de88f08d4f308ab02fed7a 100644 (file)
@@ -109,6 +109,7 @@ extern struct nouveau_oclass *nv50_fifo_oclass;
 extern struct nouveau_oclass *nv84_fifo_oclass;
 extern struct nouveau_oclass *nvc0_fifo_oclass;
 extern struct nouveau_oclass *nve0_fifo_oclass;
+extern struct nouveau_oclass *nv108_fifo_oclass;
 
 void nv04_fifo_intr(struct nouveau_subdev *);
 int  nv04_fifo_context_attach(struct nouveau_object *, struct nouveau_object *);
index 8e1b52312ddc5cb2675696223ea8344548452ef1..97705618de979e9fe1f810ca3ee5f557d5dd19e2 100644 (file)
@@ -69,6 +69,7 @@ extern struct nouveau_oclass *nvd7_graph_oclass;
 extern struct nouveau_oclass *nvd9_graph_oclass;
 extern struct nouveau_oclass *nve4_graph_oclass;
 extern struct nouveau_oclass *nvf0_graph_oclass;
+extern struct nouveau_oclass *nv108_graph_oclass;
 
 extern const struct nouveau_bitfield nv04_graph_nsource[];
 extern struct nouveau_ofuncs nv04_graph_ofuncs;
index 4f4ff4502c3d2f284e2e6424abe05a923fbc6ff7..9faa98e67ad8c9057273eff0a7fc0f145799abf8 100644 (file)
@@ -4,8 +4,7 @@
 #include <core/subdev.h>
 #include <core/device.h>
 
-#include <subdev/fb.h>
-
+struct nouveau_mem;
 struct nouveau_vma;
 
 struct nouveau_bar {
@@ -29,27 +28,7 @@ nouveau_bar(void *obj)
        return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_BAR];
 }
 
-#define nouveau_bar_create(p,e,o,d)                                            \
-       nouveau_bar_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nouveau_bar_init(p)                                                    \
-       nouveau_subdev_init(&(p)->base)
-#define nouveau_bar_fini(p,s)                                                  \
-       nouveau_subdev_fini(&(p)->base, (s))
-
-int nouveau_bar_create_(struct nouveau_object *, struct nouveau_object *,
-                       struct nouveau_oclass *, int, void **);
-void nouveau_bar_destroy(struct nouveau_bar *);
-
-void _nouveau_bar_dtor(struct nouveau_object *);
-#define _nouveau_bar_init _nouveau_subdev_init
-#define _nouveau_bar_fini _nouveau_subdev_fini
-
 extern struct nouveau_oclass nv50_bar_oclass;
 extern struct nouveau_oclass nvc0_bar_oclass;
 
-int nouveau_bar_alloc(struct nouveau_bar *, struct nouveau_object *,
-                     struct nouveau_mem *, struct nouveau_object **);
-
-void nv84_bar_flush(struct nouveau_bar *);
-
 #endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/ramcfg.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/ramcfg.h
new file mode 100644 (file)
index 0000000..c5e6d1e
--- /dev/null
@@ -0,0 +1,66 @@
+#ifndef __NVBIOS_RAMCFG_H__
+#define __NVBIOS_RAMCFG_H__
+
+struct nouveau_bios;
+
+struct nvbios_ramcfg {
+       unsigned rammap_11_08_01:1;
+       unsigned rammap_11_08_0c:2;
+       unsigned rammap_11_08_10:1;
+       unsigned rammap_11_11_0c:2;
+
+       unsigned ramcfg_11_01_01:1;
+       unsigned ramcfg_11_01_02:1;
+       unsigned ramcfg_11_01_04:1;
+       unsigned ramcfg_11_01_08:1;
+       unsigned ramcfg_11_01_10:1;
+       unsigned ramcfg_11_01_20:1;
+       unsigned ramcfg_11_01_40:1;
+       unsigned ramcfg_11_01_80:1;
+       unsigned ramcfg_11_02_03:2;
+       unsigned ramcfg_11_02_04:1;
+       unsigned ramcfg_11_02_08:1;
+       unsigned ramcfg_11_02_10:1;
+       unsigned ramcfg_11_02_40:1;
+       unsigned ramcfg_11_02_80:1;
+       unsigned ramcfg_11_03_0f:4;
+       unsigned ramcfg_11_03_30:2;
+       unsigned ramcfg_11_03_c0:2;
+       unsigned ramcfg_11_03_f0:4;
+       unsigned ramcfg_11_04:8;
+       unsigned ramcfg_11_06:8;
+       unsigned ramcfg_11_07_02:1;
+       unsigned ramcfg_11_07_04:1;
+       unsigned ramcfg_11_07_08:1;
+       unsigned ramcfg_11_07_10:1;
+       unsigned ramcfg_11_07_40:1;
+       unsigned ramcfg_11_07_80:1;
+       unsigned ramcfg_11_08_01:1;
+       unsigned ramcfg_11_08_02:1;
+       unsigned ramcfg_11_08_04:1;
+       unsigned ramcfg_11_08_08:1;
+       unsigned ramcfg_11_08_10:1;
+       unsigned ramcfg_11_08_20:1;
+       unsigned ramcfg_11_09:8;
+
+       unsigned timing[11];
+       unsigned timing_20_2e_03:2;
+       unsigned timing_20_2e_30:2;
+       unsigned timing_20_2e_c0:2;
+       unsigned timing_20_2f_03:2;
+       unsigned timing_20_2c_003f:6;
+       unsigned timing_20_2c_1fc0:7;
+       unsigned timing_20_30_f8:5;
+       unsigned timing_20_30_07:3;
+       unsigned timing_20_31_0007:3;
+       unsigned timing_20_31_0078:4;
+       unsigned timing_20_31_0780:4;
+       unsigned timing_20_31_0800:1;
+       unsigned timing_20_31_7000:3;
+       unsigned timing_20_31_8000:1;
+};
+
+u8 nvbios_ramcfg_count(struct nouveau_bios *);
+u8 nvbios_ramcfg_index(struct nouveau_bios *);
+
+#endif
index bc15e03208773cea8a16ce3a62a84e4cea702ec3..5bdf8e4db40a137df8b00f4a0183424929ad828c 100644 (file)
@@ -1,11 +1,25 @@
 #ifndef __NVBIOS_RAMMAP_H__
 #define __NVBIOS_RAMMAP_H__
 
-u16 nvbios_rammap_table(struct nouveau_bios *, u8 *ver, u8 *hdr,
-                       u8 *cnt, u8 *len, u8 *snr, u8 *ssz);
-u16 nvbios_rammap_entry(struct nouveau_bios *, int idx,
-                       u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
-u16 nvbios_rammap_match(struct nouveau_bios *, u16 khz,
-                       u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+struct nvbios_ramcfg;
+
+u32 nvbios_rammapTe(struct nouveau_bios *, u8 *ver, u8 *hdr,
+                   u8 *cnt, u8 *len, u8 *snr, u8 *ssz);
+
+u32 nvbios_rammapEe(struct nouveau_bios *, int idx,
+                   u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u32 nvbios_rammapEm(struct nouveau_bios *, u16 mhz,
+                   u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u32 nvbios_rammapEp(struct nouveau_bios *, u16 mhz,
+                   u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+                   struct nvbios_ramcfg *);
+
+u32 nvbios_rammapSe(struct nouveau_bios *, u32 data,
+                   u8 ever, u8 ehdr, u8 ecnt, u8 elen, int idx,
+                   u8 *ver, u8 *hdr);
+u32 nvbios_rammapSp(struct nouveau_bios *, u32 data,
+                   u8 ever, u8 ehdr, u8 ecnt, u8 elen, int idx,
+                   u8 *ver, u8 *hdr,
+                   struct nvbios_ramcfg *);
 
 #endif
index 963694b5422492ea17526f88ef14ce5e30b70554..76d914b67ab5cc9c0a435d5fc77654cbbfb884e3 100644 (file)
@@ -1,8 +1,14 @@
 #ifndef __NVBIOS_TIMING_H__
 #define __NVBIOS_TIMING_H__
 
-u16 nvbios_timing_table(struct nouveau_bios *,
-                       u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
-u16 nvbios_timing_entry(struct nouveau_bios *, int idx, u8 *ver, u8 *hdr);
+struct nvbios_ramcfg;
+
+u16 nvbios_timingTe(struct nouveau_bios *,
+                   u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *snr, u8 *ssz);
+u16 nvbios_timingEe(struct nouveau_bios *, int idx,
+                   u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u16 nvbios_timingEp(struct nouveau_bios *, int idx,
+                   u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+                   struct nvbios_ramcfg *);
 
 #endif
index 685c9b12ee4cff48a682f373b7dbd94d5ea0bb24..ed1ac68c38b354a58211c386e1c9bc450b8501ab 100644 (file)
@@ -9,7 +9,6 @@ struct nouveau_devinit {
        bool post;
        void (*meminit)(struct nouveau_devinit *);
        int  (*pll_set)(struct nouveau_devinit *, u32 type, u32 freq);
-
 };
 
 static inline struct nouveau_devinit *
@@ -18,32 +17,16 @@ nouveau_devinit(void *obj)
        return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_DEVINIT];
 }
 
-#define nouveau_devinit_create(p,e,o,d)                                        \
-       nouveau_devinit_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nouveau_devinit_destroy(p)                                             \
-       nouveau_subdev_destroy(&(p)->base)
-#define nouveau_devinit_init(p) ({                                             \
-       struct nouveau_devinit *d = (p);                                       \
-       _nouveau_devinit_init(nv_object(d));                                   \
-})
-#define nouveau_devinit_fini(p,s) ({                                           \
-       struct nouveau_devinit *d = (p);                                       \
-       _nouveau_devinit_fini(nv_object(d), (s));                              \
-})
-
-int nouveau_devinit_create_(struct nouveau_object *, struct nouveau_object *,
-                           struct nouveau_oclass *, int, void **);
-#define _nouveau_devinit_dtor _nouveau_subdev_dtor
-int _nouveau_devinit_init(struct nouveau_object *);
-int _nouveau_devinit_fini(struct nouveau_object *, bool suspend);
-
-extern struct nouveau_oclass nv04_devinit_oclass;
-extern struct nouveau_oclass nv05_devinit_oclass;
-extern struct nouveau_oclass nv10_devinit_oclass;
-extern struct nouveau_oclass nv1a_devinit_oclass;
-extern struct nouveau_oclass nv20_devinit_oclass;
-extern struct nouveau_oclass nv50_devinit_oclass;
-extern struct nouveau_oclass nva3_devinit_oclass;
-extern struct nouveau_oclass nvc0_devinit_oclass;
+extern struct nouveau_oclass *nv04_devinit_oclass;
+extern struct nouveau_oclass *nv05_devinit_oclass;
+extern struct nouveau_oclass *nv10_devinit_oclass;
+extern struct nouveau_oclass *nv1a_devinit_oclass;
+extern struct nouveau_oclass *nv20_devinit_oclass;
+extern struct nouveau_oclass *nv50_devinit_oclass;
+extern struct nouveau_oclass *nv84_devinit_oclass;
+extern struct nouveau_oclass *nv98_devinit_oclass;
+extern struct nouveau_oclass *nva3_devinit_oclass;
+extern struct nouveau_oclass *nvaf_devinit_oclass;
+extern struct nouveau_oclass *nvc0_devinit_oclass;
 
 #endif
index d89dbdf39b0db501159a873eeb5ada0612a3ba35..d7ecafbae1ca1c1b0a43192673fbf5ae582ffa60 100644 (file)
@@ -106,6 +106,13 @@ extern struct nouveau_oclass *nvaf_fb_oclass;
 extern struct nouveau_oclass *nvc0_fb_oclass;
 extern struct nouveau_oclass *nve0_fb_oclass;
 
+#include <subdev/bios/ramcfg.h>
+
+struct nouveau_ram_data {
+       struct nvbios_ramcfg bios;
+       u32 freq;
+};
+
 struct nouveau_ram {
        struct nouveau_object base;
        enum {
@@ -142,6 +149,12 @@ struct nouveau_ram {
        } rammap, ramcfg, timing;
        u32 freq;
        u32 mr[16];
+       u32 mr1_nuts;
+
+       struct nouveau_ram_data *next;
+       struct nouveau_ram_data former;
+       struct nouveau_ram_data xition;
+       struct nouveau_ram_data target;
 };
 
 #endif
index 4aca33887aaa639e4ad738065bfa775319d75afc..c1df26f3230c17f8fe6ff2a270b95cbafb441c6f 100644 (file)
@@ -23,21 +23,6 @@ nv_memobj(void *obj)
        return obj;
 }
 
-#define nouveau_instobj_create(p,e,o,d)                                        \
-       nouveau_instobj_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nouveau_instobj_init(p)                                                \
-       nouveau_object_init(&(p)->base)
-#define nouveau_instobj_fini(p,s)                                              \
-       nouveau_object_fini(&(p)->base, (s))
-
-int  nouveau_instobj_create_(struct nouveau_object *, struct nouveau_object *,
-                            struct nouveau_oclass *, int, void **);
-void nouveau_instobj_destroy(struct nouveau_instobj *);
-
-void _nouveau_instobj_dtor(struct nouveau_object *);
-#define _nouveau_instobj_init nouveau_object_init
-#define _nouveau_instobj_fini nouveau_object_fini
-
 struct nouveau_instmem {
        struct nouveau_subdev base;
        struct list_head list;
@@ -60,21 +45,8 @@ nouveau_instmem(void *obj)
        return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_INSTMEM];
 }
 
-#define nouveau_instmem_create(p,e,o,d)                                        \
-       nouveau_instmem_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nouveau_instmem_destroy(p)                                             \
-       nouveau_subdev_destroy(&(p)->base)
-int nouveau_instmem_create_(struct nouveau_object *, struct nouveau_object *,
-                           struct nouveau_oclass *, int, void **);
-int nouveau_instmem_init(struct nouveau_instmem *);
-int nouveau_instmem_fini(struct nouveau_instmem *, bool);
-
-#define _nouveau_instmem_dtor _nouveau_subdev_dtor
-int _nouveau_instmem_init(struct nouveau_object *);
-int _nouveau_instmem_fini(struct nouveau_object *, bool);
-
-extern struct nouveau_oclass nv04_instmem_oclass;
-extern struct nouveau_oclass nv40_instmem_oclass;
-extern struct nouveau_oclass nv50_instmem_oclass;
+extern struct nouveau_oclass *nv04_instmem_oclass;
+extern struct nouveau_oclass *nv40_instmem_oclass;
+extern struct nouveau_oclass *nv50_instmem_oclass;
 
 #endif
index fcf57fa309bfd54bbcbba86bdfc53ef5fcc481f6..c9509039f94b4b57bff45b718651bbdfd663012c 100644 (file)
@@ -131,9 +131,5 @@ void nouveau_vm_map(struct nouveau_vma *, struct nouveau_mem *);
 void nouveau_vm_map_at(struct nouveau_vma *, u64 offset, struct nouveau_mem *);
 void nouveau_vm_unmap(struct nouveau_vma *);
 void nouveau_vm_unmap_at(struct nouveau_vma *, u64 offset, u64 length);
-void nouveau_vm_map_sg(struct nouveau_vma *, u64 offset, u64 length,
-                      struct nouveau_mem *);
-void nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length,
-                    struct nouveau_mem *mem);
 
 #endif
index d70ba342aa2e8f46c5de87c406b223d39ed98194..7098ddd546788c3adca970af4afc1a5a8d122d1a 100644 (file)
  */
 
 #include <core/object.h>
-#include <subdev/bar.h>
+
+#include <subdev/fb.h>
+#include <subdev/vm.h>
+
+#include "priv.h"
 
 struct nouveau_barobj {
        struct nouveau_object base;
index 160d27f3c7b4732fc67a069a415681295a0d8dd4..090d594a21b36ee9633462db5f0ee838547b1557 100644 (file)
 #include <core/gpuobj.h>
 
 #include <subdev/timer.h>
-#include <subdev/bar.h>
 #include <subdev/fb.h>
 #include <subdev/vm.h>
 
+#include "priv.h"
+
 struct nv50_bar_priv {
        struct nouveau_bar base;
        spinlock_t lock;
index b2ec7411eb2eb7cb43e5434016cad756159c69d6..bac5e754de35acdf838b571bf1d6e25eb8b5faec 100644 (file)
 #include <core/gpuobj.h>
 
 #include <subdev/timer.h>
-#include <subdev/bar.h>
 #include <subdev/fb.h>
 #include <subdev/vm.h>
 
+#include "priv.h"
+
 struct nvc0_bar_priv {
        struct nouveau_bar base;
        spinlock_t lock;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/priv.h b/drivers/gpu/drm/nouveau/core/subdev/bar/priv.h
new file mode 100644 (file)
index 0000000..ffad8f3
--- /dev/null
@@ -0,0 +1,26 @@
+#ifndef __NVKM_BAR_PRIV_H__
+#define __NVKM_BAR_PRIV_H__
+
+#include <subdev/bar.h>
+
+#define nouveau_bar_create(p,e,o,d)                                            \
+       nouveau_bar_create_((p), (e), (o), sizeof(**d), (void **)d)
+#define nouveau_bar_init(p)                                                    \
+       nouveau_subdev_init(&(p)->base)
+#define nouveau_bar_fini(p,s)                                                  \
+       nouveau_subdev_fini(&(p)->base, (s))
+
+int nouveau_bar_create_(struct nouveau_object *, struct nouveau_object *,
+                       struct nouveau_oclass *, int, void **);
+void nouveau_bar_destroy(struct nouveau_bar *);
+
+void _nouveau_bar_dtor(struct nouveau_object *);
+#define _nouveau_bar_init _nouveau_subdev_init
+#define _nouveau_bar_fini _nouveau_subdev_fini
+
+int  nouveau_bar_alloc(struct nouveau_bar *, struct nouveau_object *,
+                      struct nouveau_mem *, struct nouveau_object **);
+
+void nv84_bar_flush(struct nouveau_bar *);
+
+#endif
index df1b1b42309337cb14c79b503497e49bf80ee49b..de201baeb053aea79a6632289a865de94fc5dc07 100644 (file)
@@ -9,6 +9,7 @@
 #include <subdev/bios/dp.h>
 #include <subdev/bios/gpio.h>
 #include <subdev/bios/init.h>
+#include <subdev/bios/ramcfg.h>
 #include <subdev/devinit.h>
 #include <subdev/i2c.h>
 #include <subdev/vga.h>
@@ -391,43 +392,14 @@ init_unknown_script(struct nouveau_bios *bios)
        return 0x0000;
 }
 
-static u16
-init_ram_restrict_table(struct nvbios_init *init)
-{
-       struct nouveau_bios *bios = init->bios;
-       struct bit_entry bit_M;
-       u16 data = 0x0000;
-
-       if (!bit_entry(bios, 'M', &bit_M)) {
-               if (bit_M.version == 1 && bit_M.length >= 5)
-                       data = nv_ro16(bios, bit_M.offset + 3);
-               if (bit_M.version == 2 && bit_M.length >= 3)
-                       data = nv_ro16(bios, bit_M.offset + 1);
-       }
-
-       if (data == 0x0000)
-               warn("ram restrict table not found\n");
-       return data;
-}
-
 static u8
 init_ram_restrict_group_count(struct nvbios_init *init)
 {
-       struct nouveau_bios *bios = init->bios;
-       struct bit_entry bit_M;
-
-       if (!bit_entry(bios, 'M', &bit_M)) {
-               if (bit_M.version == 1 && bit_M.length >= 5)
-                       return nv_ro08(bios, bit_M.offset + 2);
-               if (bit_M.version == 2 && bit_M.length >= 3)
-                       return nv_ro08(bios, bit_M.offset + 0);
-       }
-
-       return 0x00;
+       return nvbios_ramcfg_count(init->bios);
 }
 
 static u8
-init_ram_restrict_strap(struct nvbios_init *init)
+init_ram_restrict(struct nvbios_init *init)
 {
        /* This appears to be the behaviour of the VBIOS parser, and *is*
         * important to cache the NV_PEXTDEV_BOOT0 on later chipsets to
@@ -438,18 +410,8 @@ init_ram_restrict_strap(struct nvbios_init *init)
         * in case *not* re-reading the strap causes similar breakage.
         */
        if (!init->ramcfg || init->bios->version.major < 0x70)
-               init->ramcfg = init_rd32(init, 0x101000);
-       return (init->ramcfg & 0x00000003c) >> 2;
-}
-
-static u8
-init_ram_restrict(struct nvbios_init *init)
-{
-       u8  strap = init_ram_restrict_strap(init);
-       u16 table = init_ram_restrict_table(init);
-       if (table)
-               return nv_ro08(init->bios, table + strap);
-       return 0x00;
+               init->ramcfg = 0x80000000 | nvbios_ramcfg_index(init->bios);
+       return (init->ramcfg & 0x7fffffff);
 }
 
 static u8
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/ramcfg.c b/drivers/gpu/drm/nouveau/core/subdev/bios/ramcfg.c
new file mode 100644 (file)
index 0000000..991aedd
--- /dev/null
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bios/bit.h>
+#include <subdev/bios/ramcfg.h>
+
+static u8
+nvbios_ramcfg_strap(struct nouveau_bios *bios)
+{
+       return (nv_rd32(bios, 0x101000) & 0x0000003c) >> 2;
+}
+
+u8
+nvbios_ramcfg_count(struct nouveau_bios *bios)
+{
+       struct bit_entry bit_M;
+
+       if (!bit_entry(bios, 'M', &bit_M)) {
+               if (bit_M.version == 1 && bit_M.length >= 5)
+                       return nv_ro08(bios, bit_M.offset + 2);
+               if (bit_M.version == 2 && bit_M.length >= 3)
+                       return nv_ro08(bios, bit_M.offset + 0);
+       }
+
+       return 0x00;
+}
+
+u8
+nvbios_ramcfg_index(struct nouveau_bios *bios)
+{
+       u8 strap = nvbios_ramcfg_strap(bios);
+       u32 xlat = 0x00000000;
+       struct bit_entry bit_M;
+
+       if (!bit_entry(bios, 'M', &bit_M)) {
+               if (bit_M.version == 1 && bit_M.length >= 5)
+                       xlat = nv_ro16(bios, bit_M.offset + 3);
+               if (bit_M.version == 2 && bit_M.length >= 3)
+                       xlat = nv_ro16(bios, bit_M.offset + 1);
+       }
+
+       if (xlat)
+               strap = nv_ro08(bios, xlat + strap);
+       return strap;
+}
index 916fa9d302b7f2fd108617844db25ddb565e4b5d..1811b2cb047276ead557d6d1025568667a1fbc24 100644 (file)
 
 #include <subdev/bios.h>
 #include <subdev/bios/bit.h>
+#include <subdev/bios/ramcfg.h>
 #include <subdev/bios/rammap.h>
 
-u16
-nvbios_rammap_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr,
-                   u8 *cnt, u8 *len, u8 *snr, u8 *ssz)
+u32
+nvbios_rammapTe(struct nouveau_bios *bios, u8 *ver, u8 *hdr,
+               u8 *cnt, u8 *len, u8 *snr, u8 *ssz)
 {
        struct bit_entry bit_P;
        u16 rammap = 0x0000;
@@ -57,12 +58,12 @@ nvbios_rammap_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr,
        return 0x0000;
 }
 
-u16
-nvbios_rammap_entry(struct nouveau_bios *bios, int idx,
-                   u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+u32
+nvbios_rammapEe(struct nouveau_bios *bios, int idx,
+               u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
 {
        u8  snr, ssz;
-       u16 rammap = nvbios_rammap_table(bios, ver, hdr, cnt, len, &snr, &ssz);
+       u16 rammap = nvbios_rammapTe(bios, ver, hdr, cnt, len, &snr, &ssz);
        if (rammap && idx < *cnt) {
                rammap = rammap + *hdr + (idx * (*len + (snr * ssz)));
                *hdr = *len;
@@ -73,16 +74,100 @@ nvbios_rammap_entry(struct nouveau_bios *bios, int idx,
        return 0x0000;
 }
 
-u16
-nvbios_rammap_match(struct nouveau_bios *bios, u16 khz,
-                   u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+u32
+nvbios_rammapEm(struct nouveau_bios *bios, u16 khz,
+               u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
 {
        int idx = 0;
        u32 data;
-       while ((data = nvbios_rammap_entry(bios, idx++, ver, hdr, cnt, len))) {
+       while ((data = nvbios_rammapEe(bios, idx++, ver, hdr, cnt, len))) {
                if (khz >= nv_ro16(bios, data + 0x00) &&
                    khz <= nv_ro16(bios, data + 0x02))
                        break;
        }
        return data;
 }
+
+u32
+nvbios_rammapEp(struct nouveau_bios *bios, u16 khz,
+               u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+               struct nvbios_ramcfg *p)
+{
+       u32 data = nvbios_rammapEm(bios, khz, ver, hdr, cnt, len);
+       memset(p, 0x00, sizeof(*p));
+       switch (!!data * *ver) {
+       case 0x11:
+               p->rammap_11_08_01 = (nv_ro08(bios, data + 0x08) & 0x01) >> 0;
+               p->rammap_11_08_0c = (nv_ro08(bios, data + 0x08) & 0x0c) >> 2;
+               p->rammap_11_08_10 = (nv_ro08(bios, data + 0x08) & 0x10) >> 4;
+               p->rammap_11_11_0c = (nv_ro08(bios, data + 0x11) & 0x0c) >> 2;
+               break;
+       default:
+               data = 0;
+               break;
+       }
+       return data;
+}
+
+u32
+nvbios_rammapSe(struct nouveau_bios *bios, u32 data,
+               u8 ever, u8 ehdr, u8 ecnt, u8 elen, int idx,
+               u8 *ver, u8 *hdr)
+{
+       if (idx < ecnt) {
+               data = data + ehdr + (idx * elen);
+               *ver = ever;
+               *hdr = elen;
+               return data;
+       }
+       return 0;
+}
+
+u32
+nvbios_rammapSp(struct nouveau_bios *bios, u32 data,
+               u8 ever, u8 ehdr, u8 ecnt, u8 elen, int idx,
+               u8 *ver, u8 *hdr, struct nvbios_ramcfg *p)
+{
+       data = nvbios_rammapSe(bios, data, ever, ehdr, ecnt, elen, idx, ver, hdr);
+       switch (!!data * *ver) {
+       case 0x11:
+               p->ramcfg_11_01_01 = (nv_ro08(bios, data + 0x01) & 0x01) >> 0;
+               p->ramcfg_11_01_02 = (nv_ro08(bios, data + 0x01) & 0x02) >> 1;
+               p->ramcfg_11_01_04 = (nv_ro08(bios, data + 0x01) & 0x04) >> 2;
+               p->ramcfg_11_01_08 = (nv_ro08(bios, data + 0x01) & 0x08) >> 3;
+               p->ramcfg_11_01_10 = (nv_ro08(bios, data + 0x01) & 0x10) >> 4;
+               p->ramcfg_11_01_20 = (nv_ro08(bios, data + 0x01) & 0x20) >> 5;
+               p->ramcfg_11_01_40 = (nv_ro08(bios, data + 0x01) & 0x40) >> 6;
+               p->ramcfg_11_01_80 = (nv_ro08(bios, data + 0x01) & 0x80) >> 7;
+               p->ramcfg_11_02_03 = (nv_ro08(bios, data + 0x02) & 0x03) >> 0;
+               p->ramcfg_11_02_04 = (nv_ro08(bios, data + 0x02) & 0x04) >> 2;
+               p->ramcfg_11_02_08 = (nv_ro08(bios, data + 0x02) & 0x08) >> 3;
+               p->ramcfg_11_02_10 = (nv_ro08(bios, data + 0x02) & 0x10) >> 4;
+               p->ramcfg_11_02_40 = (nv_ro08(bios, data + 0x02) & 0x40) >> 6;
+               p->ramcfg_11_02_80 = (nv_ro08(bios, data + 0x02) & 0x80) >> 7;
+               p->ramcfg_11_03_0f = (nv_ro08(bios, data + 0x03) & 0x0f) >> 0;
+               p->ramcfg_11_03_30 = (nv_ro08(bios, data + 0x03) & 0x30) >> 4;
+               p->ramcfg_11_03_c0 = (nv_ro08(bios, data + 0x03) & 0xc0) >> 6;
+               p->ramcfg_11_03_f0 = (nv_ro08(bios, data + 0x03) & 0xf0) >> 4;
+               p->ramcfg_11_04    = (nv_ro08(bios, data + 0x04) & 0xff) >> 0;
+               p->ramcfg_11_06    = (nv_ro08(bios, data + 0x06) & 0xff) >> 0;
+               p->ramcfg_11_07_02 = (nv_ro08(bios, data + 0x07) & 0x02) >> 1;
+               p->ramcfg_11_07_04 = (nv_ro08(bios, data + 0x07) & 0x04) >> 2;
+               p->ramcfg_11_07_08 = (nv_ro08(bios, data + 0x07) & 0x08) >> 3;
+               p->ramcfg_11_07_10 = (nv_ro08(bios, data + 0x07) & 0x10) >> 4;
+               p->ramcfg_11_07_40 = (nv_ro08(bios, data + 0x07) & 0x40) >> 6;
+               p->ramcfg_11_07_80 = (nv_ro08(bios, data + 0x07) & 0x80) >> 7;
+               p->ramcfg_11_08_01 = (nv_ro08(bios, data + 0x08) & 0x01) >> 0;
+               p->ramcfg_11_08_02 = (nv_ro08(bios, data + 0x08) & 0x02) >> 1;
+               p->ramcfg_11_08_04 = (nv_ro08(bios, data + 0x08) & 0x04) >> 2;
+               p->ramcfg_11_08_08 = (nv_ro08(bios, data + 0x08) & 0x08) >> 3;
+               p->ramcfg_11_08_10 = (nv_ro08(bios, data + 0x08) & 0x10) >> 4;
+               p->ramcfg_11_08_20 = (nv_ro08(bios, data + 0x08) & 0x20) >> 5;
+               p->ramcfg_11_09    = (nv_ro08(bios, data + 0x09) & 0xff) >> 0;
+               break;
+       default:
+               data = 0;
+               break;
+       }
+       return data;
+}
index 151c2d6aaee872f1398fcca6aab7ae7b48a928d7..350d44ab2ba24b9e56a9dbf178a86497159cc326 100644 (file)
 
 #include <subdev/bios.h>
 #include <subdev/bios/bit.h>
+#include <subdev/bios/ramcfg.h>
 #include <subdev/bios/timing.h>
 
 u16
-nvbios_timing_table(struct nouveau_bios *bios,
-                   u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+nvbios_timingTe(struct nouveau_bios *bios,
+               u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *snr, u8 *ssz)
 {
        struct bit_entry bit_P;
        u16 timing = 0x0000;
@@ -47,11 +48,15 @@ nvbios_timing_table(struct nouveau_bios *bios,
                                *hdr = nv_ro08(bios, timing + 1);
                                *cnt = nv_ro08(bios, timing + 2);
                                *len = nv_ro08(bios, timing + 3);
+                               *snr = 0;
+                               *ssz = 0;
                                return timing;
                        case 0x20:
                                *hdr = nv_ro08(bios, timing + 1);
-                               *cnt = nv_ro08(bios, timing + 3);
+                               *cnt = nv_ro08(bios, timing + 5);
                                *len = nv_ro08(bios, timing + 2);
+                               *snr = nv_ro08(bios, timing + 4);
+                               *ssz = nv_ro08(bios, timing + 3);
                                return timing;
                        default:
                                break;
@@ -63,11 +68,60 @@ nvbios_timing_table(struct nouveau_bios *bios,
 }
 
 u16
-nvbios_timing_entry(struct nouveau_bios *bios, int idx, u8 *ver, u8 *len)
+nvbios_timingEe(struct nouveau_bios *bios, int idx,
+               u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
 {
-       u8  hdr, cnt;
-       u16 timing = nvbios_timing_table(bios, ver, &hdr, &cnt, len);
-       if (timing && idx < cnt)
-               return timing + hdr + (idx * *len);
+       u8  snr, ssz;
+       u16 timing = nvbios_timingTe(bios, ver, hdr, cnt, len, &snr, &ssz);
+       if (timing && idx < *cnt) {
+               timing += *hdr + idx * (*len + (snr * ssz));
+               *hdr = *len;
+               *cnt = snr;
+               *len = ssz;
+               return timing;
+       }
        return 0x0000;
 }
+
+u16
+nvbios_timingEp(struct nouveau_bios *bios, int idx,
+               u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+               struct nvbios_ramcfg *p)
+{
+       u16 data = nvbios_timingEe(bios, idx, ver, hdr, cnt, len), temp;
+       switch (!!data * *ver) {
+       case 0x20:
+               p->timing[0] = nv_ro32(bios, data + 0x00);
+               p->timing[1] = nv_ro32(bios, data + 0x04);
+               p->timing[2] = nv_ro32(bios, data + 0x08);
+               p->timing[3] = nv_ro32(bios, data + 0x0c);
+               p->timing[4] = nv_ro32(bios, data + 0x10);
+               p->timing[5] = nv_ro32(bios, data + 0x14);
+               p->timing[6] = nv_ro32(bios, data + 0x18);
+               p->timing[7] = nv_ro32(bios, data + 0x1c);
+               p->timing[8] = nv_ro32(bios, data + 0x20);
+               p->timing[9] = nv_ro32(bios, data + 0x24);
+               p->timing[10] = nv_ro32(bios, data + 0x28);
+               p->timing_20_2e_03 = (nv_ro08(bios, data + 0x2e) & 0x03) >> 0;
+               p->timing_20_2e_30 = (nv_ro08(bios, data + 0x2e) & 0x30) >> 4;
+               p->timing_20_2e_c0 = (nv_ro08(bios, data + 0x2e) & 0xc0) >> 6;
+               p->timing_20_2f_03 = (nv_ro08(bios, data + 0x2f) & 0x03) >> 0;
+               temp = nv_ro16(bios, data + 0x2c);
+               p->timing_20_2c_003f = (temp & 0x003f) >> 0;
+               p->timing_20_2c_1fc0 = (temp & 0x1fc0) >> 6;
+               p->timing_20_30_07 = (nv_ro08(bios, data + 0x30) & 0x07) >> 0;
+               p->timing_20_30_f8 = (nv_ro08(bios, data + 0x30) & 0xf8) >> 3;
+               temp = nv_ro16(bios, data + 0x31);
+               p->timing_20_31_0007 = (temp & 0x0007) >> 0;
+               p->timing_20_31_0078 = (temp & 0x0078) >> 3;
+               p->timing_20_31_0780 = (temp & 0x0780) >> 7;
+               p->timing_20_31_0800 = (temp & 0x0800) >> 11;
+               p->timing_20_31_7000 = (temp & 0x7000) >> 12;
+               p->timing_20_31_8000 = (temp & 0x8000) >> 15;
+               break;
+       default:
+               data = 0;
+               break;
+       }
+       return data;
+}
index e2938a21b06fe8a7ac255e9abe106ce1533dc448..dd62baead39c54c9a906939576553b59bf9355a2 100644 (file)
@@ -182,9 +182,12 @@ nouveau_pstate_prog(struct nouveau_clock *clk, int pstatei)
        clk->pstate = pstatei;
 
        if (pfb->ram->calc) {
-               ret = pfb->ram->calc(pfb, pstate->base.domain[nv_clk_src_mem]);
-               if (ret == 0)
-                       ret = pfb->ram->prog(pfb);
+               int khz = pstate->base.domain[nv_clk_src_mem];
+               do {
+                       ret = pfb->ram->calc(pfb, khz);
+                       if (ret == 0)
+                               ret = pfb->ram->prog(pfb);
+               } while (ret > 0);
                pfb->ram->tidy(pfb);
        }
 
index 30c1f3a4158e3de87fd2bfb24592b9ea685b2d88..b74db6cfc4e21ee8f811a08e54da3dfdc7f824a1 100644 (file)
@@ -25,7 +25,7 @@
 #include <subdev/bios.h>
 #include <subdev/bios/pll.h>
 #include <subdev/clock.h>
-#include <subdev/devinit/priv.h>
+#include <subdev/devinit/nv04.h>
 
 #include "pll.h"
 
index 4c62e84b96f5f7a4aa65d5e8d37bd25d12b120eb..d3c37c96f0e7eaeed97c08749bc23611642705db 100644 (file)
@@ -457,7 +457,7 @@ nve0_domain[] = {
        { nv_clk_src_gpc    , 0x00, NVKM_CLK_DOM_FLAG_CORE, "core", 2000 },
        { nv_clk_src_hubk07 , 0x01, NVKM_CLK_DOM_FLAG_CORE },
        { nv_clk_src_rop    , 0x02, NVKM_CLK_DOM_FLAG_CORE },
-       { nv_clk_src_mem    , 0x03, 0, "memory", 1000 },
+       { nv_clk_src_mem    , 0x03, 0, "memory", 500 },
        { nv_clk_src_hubk06 , 0x04, NVKM_CLK_DOM_FLAG_CORE },
        { nv_clk_src_hubk01 , 0x05 },
        { nv_clk_src_vdec   , 0x06 },
index 79c81d3d9bacee32ec45f36dee8b73cee3ac2953..8fa34e8152c20b6ae90f2ca020f6c49a077f2cdd 100644 (file)
 
 #include <core/option.h>
 
-#include <subdev/devinit.h>
 #include <subdev/bios.h>
 #include <subdev/bios/init.h>
+#include <subdev/vga.h>
+
+#include "priv.h"
 
 int
 _nouveau_devinit_fini(struct nouveau_object *object, bool suspend)
@@ -37,18 +39,41 @@ _nouveau_devinit_fini(struct nouveau_object *object, bool suspend)
        if (suspend)
                devinit->post = true;
 
+       /* unlock the extended vga crtc regs */
+       nv_lockvgac(devinit, false);
+
        return nouveau_subdev_fini(&devinit->base, suspend);
 }
 
 int
 _nouveau_devinit_init(struct nouveau_object *object)
 {
+       struct nouveau_devinit_impl *impl = (void *)object->oclass;
        struct nouveau_devinit *devinit = (void *)object;
-       int ret = nouveau_subdev_init(&devinit->base);
+       int ret;
+
+       ret = nouveau_subdev_init(&devinit->base);
+       if (ret)
+               return ret;
+
+       ret = nvbios_init(&devinit->base, devinit->post);
        if (ret)
                return ret;
 
-       return nvbios_init(&devinit->base, devinit->post);
+       if (impl->disable)
+               nv_device(devinit)->disable_mask |= impl->disable(devinit);
+       return 0;
+}
+
+void
+_nouveau_devinit_dtor(struct nouveau_object *object)
+{
+       struct nouveau_devinit *devinit = (void *)object;
+
+       /* lock crtc regs */
+       nv_lockvgac(devinit, true);
+
+       nouveau_subdev_destroy(&devinit->base);
 }
 
 int
@@ -57,6 +82,7 @@ nouveau_devinit_create_(struct nouveau_object *parent,
                        struct nouveau_oclass *oclass,
                        int size, void **pobject)
 {
+       struct nouveau_devinit_impl *impl = (void *)oclass;
        struct nouveau_device *device = nv_device(parent);
        struct nouveau_devinit *devinit;
        int ret;
@@ -68,5 +94,7 @@ nouveau_devinit_create_(struct nouveau_object *parent,
                return ret;
 
        devinit->post = nouveau_boolopt(device->cfgopt, "NvForcePost", false);
+       devinit->meminit = impl->meminit;
+       devinit->pll_set = impl->pll_set;
        return 0;
 }
index 27c8235f1a85d08a3128dd3266416461ba08a9a7..7037eae46e445bd5d3278dfc70119dac180cadbb 100644 (file)
 #include <subdev/vga.h>
 
 #include "fbmem.h"
-#include "priv.h"
-
-struct nv04_devinit_priv {
-       struct nouveau_devinit base;
-       int owner;
-};
+#include "nv04.h"
 
 static void
 nv04_devinit_meminit(struct nouveau_devinit *devinit)
@@ -393,17 +388,21 @@ int
 nv04_devinit_fini(struct nouveau_object *object, bool suspend)
 {
        struct nv04_devinit_priv *priv = (void *)object;
+       int ret;
 
        /* make i2c busses accessible */
        nv_mask(priv, 0x000200, 0x00000001, 0x00000001);
 
-       /* unlock extended vga crtc regs, and unslave crtcs */
-       nv_lockvgac(priv, false);
+       ret = nouveau_devinit_fini(&priv->base, suspend);
+       if (ret)
+               return ret;
+
+       /* unslave crtcs */
        if (priv->owner < 0)
                priv->owner = nv_rdvgaowner(priv);
        nv_wrvgaowner(priv, 0);
 
-       return nouveau_devinit_fini(&priv->base, suspend);
+       return 0;
 }
 
 int
@@ -431,14 +430,13 @@ nv04_devinit_dtor(struct nouveau_object *object)
 {
        struct nv04_devinit_priv *priv = (void *)object;
 
-       /* restore vga owner saved at first init, and lock crtc regs  */
+       /* restore vga owner saved at first init */
        nv_wrvgaowner(priv, priv->owner);
-       nv_lockvgac(priv, true);
 
        nouveau_devinit_destroy(&priv->base);
 }
 
-static int
+int
 nv04_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
                  struct nouveau_oclass *oclass, void *data, u32 size,
                  struct nouveau_object **pobject)
@@ -451,19 +449,19 @@ nv04_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        if (ret)
                return ret;
 
-       priv->base.meminit = nv04_devinit_meminit;
-       priv->base.pll_set = nv04_devinit_pll_set;
        priv->owner = -1;
        return 0;
 }
 
-struct nouveau_oclass
-nv04_devinit_oclass = {
-       .handle = NV_SUBDEV(DEVINIT, 0x04),
-       .ofuncs = &(struct nouveau_ofuncs) {
+struct nouveau_oclass *
+nv04_devinit_oclass = &(struct nouveau_devinit_impl) {
+       .base.handle = NV_SUBDEV(DEVINIT, 0x04),
+       .base.ofuncs = &(struct nouveau_ofuncs) {
                .ctor = nv04_devinit_ctor,
                .dtor = nv04_devinit_dtor,
                .init = nv04_devinit_init,
                .fini = nv04_devinit_fini,
        },
-};
+       .meminit = nv04_devinit_meminit,
+       .pll_set = nv04_devinit_pll_set,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.h b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.h
new file mode 100644 (file)
index 0000000..23470a5
--- /dev/null
@@ -0,0 +1,23 @@
+#ifndef __NVKM_DEVINIT_NV04_H__
+#define __NVKM_DEVINIT_NV04_H__
+
+#include "priv.h"
+
+struct nv04_devinit_priv {
+       struct nouveau_devinit base;
+       u8 owner;
+};
+
+int  nv04_devinit_ctor(struct nouveau_object *, struct nouveau_object *,
+                      struct nouveau_oclass *, void *, u32,
+                      struct nouveau_object **);
+void nv04_devinit_dtor(struct nouveau_object *);
+int  nv04_devinit_init(struct nouveau_object *);
+int  nv04_devinit_fini(struct nouveau_object *, bool);
+int  nv04_devinit_pll_set(struct nouveau_devinit *, u32, u32);
+
+void setPLL_single(struct nouveau_devinit *, u32, struct nouveau_pll_vals *);
+void setPLL_double_highregs(struct nouveau_devinit *, u32, struct nouveau_pll_vals *);
+void setPLL_double_lowregs(struct nouveau_devinit *, u32, struct nouveau_pll_vals *);
+
+#endif
index b1912a8a8942129b6292323faed5aa11475bfe3e..98b7e6780dc7f4826c4eb946419d0ceb6cfb5d84 100644 (file)
 #include <subdev/vga.h>
 
 #include "fbmem.h"
-#include "priv.h"
-
-struct nv05_devinit_priv {
-       struct nouveau_devinit base;
-       u8 owner;
-};
+#include "nv04.h"
 
 static void
 nv05_devinit_meminit(struct nouveau_devinit *devinit)
@@ -49,7 +44,7 @@ nv05_devinit_meminit(struct nouveau_devinit *devinit)
                { 0x06, 0x00 },
                { 0x00, 0x00 }
        };
-       struct nv05_devinit_priv *priv = (void *)devinit;
+       struct nv04_devinit_priv *priv = (void *)devinit;
        struct nouveau_bios *bios = nouveau_bios(priv);
        struct io_mapping *fb;
        u32 patt = 0xdeadbeef;
@@ -130,31 +125,15 @@ out:
        fbmem_fini(fb);
 }
 
-static int
-nv05_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-                 struct nouveau_oclass *oclass, void *data, u32 size,
-                 struct nouveau_object **pobject)
-{
-       struct nv05_devinit_priv *priv;
-       int ret;
-
-       ret = nouveau_devinit_create(parent, engine, oclass, &priv);
-       *pobject = nv_object(priv);
-       if (ret)
-               return ret;
-
-       priv->base.meminit = nv05_devinit_meminit;
-       priv->base.pll_set = nv04_devinit_pll_set;
-       return 0;
-}
-
-struct nouveau_oclass
-nv05_devinit_oclass = {
-       .handle = NV_SUBDEV(DEVINIT, 0x05),
-       .ofuncs = &(struct nouveau_ofuncs) {
-               .ctor = nv05_devinit_ctor,
+struct nouveau_oclass *
+nv05_devinit_oclass = &(struct nouveau_devinit_impl) {
+       .base.handle = NV_SUBDEV(DEVINIT, 0x05),
+       .base.ofuncs = &(struct nouveau_ofuncs) {
+               .ctor = nv04_devinit_ctor,
                .dtor = nv04_devinit_dtor,
                .init = nv04_devinit_init,
                .fini = nv04_devinit_fini,
        },
-};
+       .meminit = nv05_devinit_meminit,
+       .pll_set = nv04_devinit_pll_set,
+}.base;
index 8d274dba1ef17a363af4d4c33c3cea4451025159..32b3d2131a7f0d14b0e37e8b9888e2b38cf937c5 100644 (file)
 #include <subdev/vga.h>
 
 #include "fbmem.h"
-#include "priv.h"
-
-struct nv10_devinit_priv {
-       struct nouveau_devinit base;
-       u8 owner;
-};
+#include "nv04.h"
 
 static void
 nv10_devinit_meminit(struct nouveau_devinit *devinit)
 {
-       struct nv10_devinit_priv *priv = (void *)devinit;
+       struct nv04_devinit_priv *priv = (void *)devinit;
        static const int mem_width[] = { 0x10, 0x00, 0x20 };
        int mem_width_count;
        uint32_t patt = 0xdeadbeef;
@@ -101,31 +96,15 @@ amount_found:
        fbmem_fini(fb);
 }
 
-static int
-nv10_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-                 struct nouveau_oclass *oclass, void *data, u32 size,
-                 struct nouveau_object **pobject)
-{
-       struct nv10_devinit_priv *priv;
-       int ret;
-
-       ret = nouveau_devinit_create(parent, engine, oclass, &priv);
-       *pobject = nv_object(priv);
-       if (ret)
-               return ret;
-
-       priv->base.meminit = nv10_devinit_meminit;
-       priv->base.pll_set = nv04_devinit_pll_set;
-       return 0;
-}
-
-struct nouveau_oclass
-nv10_devinit_oclass = {
-       .handle = NV_SUBDEV(DEVINIT, 0x10),
-       .ofuncs = &(struct nouveau_ofuncs) {
-               .ctor = nv10_devinit_ctor,
+struct nouveau_oclass *
+nv10_devinit_oclass = &(struct nouveau_devinit_impl) {
+       .base.handle = NV_SUBDEV(DEVINIT, 0x10),
+       .base.ofuncs = &(struct nouveau_ofuncs) {
+               .ctor = nv04_devinit_ctor,
                .dtor = nv04_devinit_dtor,
                .init = nv04_devinit_init,
                .fini = nv04_devinit_fini,
        },
-};
+       .meminit = nv10_devinit_meminit,
+       .pll_set = nv04_devinit_pll_set,
+}.base;
index e9743cdabe757df66b92509f295d9c800a6ae919..526d0c6faacd3d2623f1dae3aa2cd1b92087a00e 100644 (file)
  * Authors: Ben Skeggs
  */
 
-#include "priv.h"
+#include "nv04.h"
 
-struct nv1a_devinit_priv {
-       struct nouveau_devinit base;
-       u8 owner;
-};
-
-static int
-nv1a_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-                 struct nouveau_oclass *oclass, void *data, u32 size,
-                 struct nouveau_object **pobject)
-{
-       struct nv1a_devinit_priv *priv;
-       int ret;
-
-       ret = nouveau_devinit_create(parent, engine, oclass, &priv);
-       *pobject = nv_object(priv);
-       if (ret)
-               return ret;
-
-       priv->base.pll_set = nv04_devinit_pll_set;
-       return 0;
-}
-
-struct nouveau_oclass
-nv1a_devinit_oclass = {
-       .handle = NV_SUBDEV(DEVINIT, 0x1a),
-       .ofuncs = &(struct nouveau_ofuncs) {
-               .ctor = nv1a_devinit_ctor,
+struct nouveau_oclass *
+nv1a_devinit_oclass = &(struct nouveau_devinit_impl) {
+       .base.handle = NV_SUBDEV(DEVINIT, 0x1a),
+       .base.ofuncs = &(struct nouveau_ofuncs) {
+               .ctor = nv04_devinit_ctor,
                .dtor = nv04_devinit_dtor,
                .init = nv04_devinit_init,
                .fini = nv04_devinit_fini,
        },
-};
+       .pll_set = nv04_devinit_pll_set,
+}.base;
index 6cc6080d3bc01e4e9e461bb5c323f82e48a60908..4689ba303b0bd6620ec080fcfe9fa3defad2f3ff 100644 (file)
  *
  */
 
-#include "priv.h"
+#include "nv04.h"
 #include "fbmem.h"
 
-struct nv20_devinit_priv {
-       struct nouveau_devinit base;
-       u8 owner;
-};
-
 static void
 nv20_devinit_meminit(struct nouveau_devinit *devinit)
 {
-       struct nv20_devinit_priv *priv = (void *)devinit;
+       struct nv04_devinit_priv *priv = (void *)devinit;
        struct nouveau_device *device = nv_device(priv);
        uint32_t mask = (device->chipset >= 0x25 ? 0x300 : 0x900);
        uint32_t amount, off;
@@ -65,31 +60,15 @@ nv20_devinit_meminit(struct nouveau_devinit *devinit)
        fbmem_fini(fb);
 }
 
-static int
-nv20_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-                 struct nouveau_oclass *oclass, void *data, u32 size,
-                 struct nouveau_object **pobject)
-{
-       struct nv20_devinit_priv *priv;
-       int ret;
-
-       ret = nouveau_devinit_create(parent, engine, oclass, &priv);
-       *pobject = nv_object(priv);
-       if (ret)
-               return ret;
-
-       priv->base.meminit = nv20_devinit_meminit;
-       priv->base.pll_set = nv04_devinit_pll_set;
-       return 0;
-}
-
-struct nouveau_oclass
-nv20_devinit_oclass = {
-       .handle = NV_SUBDEV(DEVINIT, 0x20),
-       .ofuncs = &(struct nouveau_ofuncs) {
-               .ctor = nv20_devinit_ctor,
+struct nouveau_oclass *
+nv20_devinit_oclass = &(struct nouveau_devinit_impl) {
+       .base.handle = NV_SUBDEV(DEVINIT, 0x20),
+       .base.ofuncs = &(struct nouveau_ofuncs) {
+               .ctor = nv04_devinit_ctor,
                .dtor = nv04_devinit_dtor,
                .init = nv04_devinit_init,
                .fini = nv04_devinit_fini,
        },
-};
+       .meminit = nv20_devinit_meminit,
+       .pll_set = nv04_devinit_pll_set,
+}.base;
index 6df72247c477b5a7c96214fe28a0d7a40be82696..b46c62a1d5d86a9a44a0db88932dacbca6a10598 100644 (file)
@@ -28,9 +28,9 @@
 #include <subdev/bios/init.h>
 #include <subdev/vga.h>
 
-#include "priv.h"
+#include "nv50.h"
 
-static int
+int
 nv50_devinit_pll_set(struct nouveau_devinit *devinit, u32 type, u32 freq)
 {
        struct nv50_devinit_priv *priv = (void *)devinit;
@@ -74,6 +74,19 @@ nv50_devinit_pll_set(struct nouveau_devinit *devinit, u32 type, u32 freq)
        return 0;
 }
 
+static u64
+nv50_devinit_disable(struct nouveau_devinit *devinit)
+{
+       struct nv50_devinit_priv *priv = (void *)devinit;
+       u32 r001540 = nv_rd32(priv, 0x001540);
+       u64 disable = 0ULL;
+
+       if (!(r001540 & 0x40000000))
+               disable |= (1ULL << NVDEV_ENGINE_MPEG);
+
+       return disable;
+}
+
 int
 nv50_devinit_init(struct nouveau_object *object)
 {
@@ -120,7 +133,7 @@ nv50_devinit_init(struct nouveau_object *object)
        return 0;
 }
 
-static int
+int
 nv50_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
                  struct nouveau_oclass *oclass, void *data, u32 size,
                  struct nouveau_object **pobject)
@@ -133,17 +146,18 @@ nv50_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        if (ret)
                return ret;
 
-       priv->base.pll_set = nv50_devinit_pll_set;
        return 0;
 }
 
-struct nouveau_oclass
-nv50_devinit_oclass = {
-       .handle = NV_SUBDEV(DEVINIT, 0x50),
-       .ofuncs = &(struct nouveau_ofuncs) {
+struct nouveau_oclass *
+nv50_devinit_oclass = &(struct nouveau_devinit_impl) {
+       .base.handle = NV_SUBDEV(DEVINIT, 0x50),
+       .base.ofuncs = &(struct nouveau_ofuncs) {
                .ctor = nv50_devinit_ctor,
                .dtor = _nouveau_devinit_dtor,
                .init = nv50_devinit_init,
                .fini = _nouveau_devinit_fini,
        },
-};
+       .pll_set = nv50_devinit_pll_set,
+       .disable = nv50_devinit_disable,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.h b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.h
new file mode 100644 (file)
index 0000000..141c27e
--- /dev/null
@@ -0,0 +1,18 @@
+#ifndef __NVKM_DEVINIT_NV50_H__
+#define __NVKM_DEVINIT_NV50_H__
+
+#include "priv.h"
+
+struct nv50_devinit_priv {
+       struct nouveau_devinit base;
+};
+
+int  nv50_devinit_ctor(struct nouveau_object *, struct nouveau_object *,
+                      struct nouveau_oclass *, void *, u32,
+                      struct nouveau_object **);
+int  nv50_devinit_init(struct nouveau_object *);
+int  nv50_devinit_pll_set(struct nouveau_devinit *, u32, u32);
+
+int  nva3_devinit_pll_set(struct nouveau_devinit *, u32, u32);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv84.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv84.c
new file mode 100644 (file)
index 0000000..7874225
--- /dev/null
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nv50.h"
+
+static u64
+nv84_devinit_disable(struct nouveau_devinit *devinit)
+{
+       struct nv50_devinit_priv *priv = (void *)devinit;
+       u32 r001540 = nv_rd32(priv, 0x001540);
+       u32 r00154c = nv_rd32(priv, 0x00154c);
+       u64 disable = 0ULL;
+
+       if (!(r001540 & 0x40000000)) {
+               disable |= (1ULL << NVDEV_ENGINE_MPEG);
+               disable |= (1ULL << NVDEV_ENGINE_VP);
+               disable |= (1ULL << NVDEV_ENGINE_BSP);
+               disable |= (1ULL << NVDEV_ENGINE_CRYPT);
+       }
+
+       if (!(r00154c & 0x00000004))
+               disable |= (1ULL << NVDEV_ENGINE_DISP);
+       if (!(r00154c & 0x00000020))
+               disable |= (1ULL << NVDEV_ENGINE_BSP);
+       if (!(r00154c & 0x00000040))
+               disable |= (1ULL << NVDEV_ENGINE_CRYPT);
+
+       return disable;
+}
+
+struct nouveau_oclass *
+nv84_devinit_oclass = &(struct nouveau_devinit_impl) {
+       .base.handle = NV_SUBDEV(DEVINIT, 0x84),
+       .base.ofuncs = &(struct nouveau_ofuncs) {
+               .ctor = nv50_devinit_ctor,
+               .dtor = _nouveau_devinit_dtor,
+               .init = nv50_devinit_init,
+               .fini = _nouveau_devinit_fini,
+       },
+       .pll_set = nv50_devinit_pll_set,
+       .disable = nv84_devinit_disable,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv98.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv98.c
new file mode 100644 (file)
index 0000000..2b0e963
--- /dev/null
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nv50.h"
+
+static u64
+nv98_devinit_disable(struct nouveau_devinit *devinit)
+{
+       struct nv50_devinit_priv *priv = (void *)devinit;
+       u32 r001540 = nv_rd32(priv, 0x001540);
+       u32 r00154c = nv_rd32(priv, 0x00154c);
+       u64 disable = 0ULL;
+
+       if (!(r001540 & 0x40000000)) {
+               disable |= (1ULL << NVDEV_ENGINE_VP);
+               disable |= (1ULL << NVDEV_ENGINE_BSP);
+               disable |= (1ULL << NVDEV_ENGINE_PPP);
+       }
+
+       if (!(r00154c & 0x00000004))
+               disable |= (1ULL << NVDEV_ENGINE_DISP);
+       if (!(r00154c & 0x00000020))
+               disable |= (1ULL << NVDEV_ENGINE_BSP);
+       if (!(r00154c & 0x00000040))
+               disable |= (1ULL << NVDEV_ENGINE_CRYPT);
+
+       return disable;
+}
+
+struct nouveau_oclass *
+nv98_devinit_oclass = &(struct nouveau_devinit_impl) {
+       .base.handle = NV_SUBDEV(DEVINIT, 0x98),
+       .base.ofuncs = &(struct nouveau_ofuncs) {
+               .ctor = nv50_devinit_ctor,
+               .dtor = _nouveau_devinit_dtor,
+               .init = nv50_devinit_init,
+               .fini = _nouveau_devinit_fini,
+       },
+       .pll_set = nv50_devinit_pll_set,
+       .disable = nv98_devinit_disable,
+}.base;
index 76a68b29014119f57c909c4770777016e8838250..6dedf1dad7f7bce5b71d33dd01111cac7324621b 100644 (file)
  * Authors: Ben Skeggs
  */
 
-#include "priv.h"
+#include "nv50.h"
 
-static int
+int
 nva3_devinit_pll_set(struct nouveau_devinit *devinit, u32 type, u32 freq)
 {
-       struct nva3_devinit_priv *priv = (void *)devinit;
+       struct nv50_devinit_priv *priv = (void *)devinit;
        struct nouveau_bios *bios = nouveau_bios(priv);
        struct nvbios_pll info;
        int N, fN, M, P;
@@ -58,30 +58,38 @@ nva3_devinit_pll_set(struct nouveau_devinit *devinit, u32 type, u32 freq)
        return ret;
 }
 
-static int
-nva3_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-                 struct nouveau_oclass *oclass, void *data, u32 size,
-                 struct nouveau_object **pobject)
+static u64
+nva3_devinit_disable(struct nouveau_devinit *devinit)
 {
-       struct nv50_devinit_priv *priv;
-       int ret;
+       struct nv50_devinit_priv *priv = (void *)devinit;
+       u32 r001540 = nv_rd32(priv, 0x001540);
+       u32 r00154c = nv_rd32(priv, 0x00154c);
+       u64 disable = 0ULL;
 
-       ret = nouveau_devinit_create(parent, engine, oclass, &priv);
-       *pobject = nv_object(priv);
-       if (ret)
-               return ret;
+       if (!(r001540 & 0x40000000)) {
+               disable |= (1ULL << NVDEV_ENGINE_VP);
+               disable |= (1ULL << NVDEV_ENGINE_PPP);
+       }
+
+       if (!(r00154c & 0x00000004))
+               disable |= (1ULL << NVDEV_ENGINE_DISP);
+       if (!(r00154c & 0x00000020))
+               disable |= (1ULL << NVDEV_ENGINE_BSP);
+       if (!(r00154c & 0x00000200))
+               disable |= (1ULL << NVDEV_ENGINE_COPY0);
 
-       priv->base.pll_set = nva3_devinit_pll_set;
-       return 0;
+       return disable;
 }
 
-struct nouveau_oclass
-nva3_devinit_oclass = {
-       .handle = NV_SUBDEV(DEVINIT, 0xa3),
-       .ofuncs = &(struct nouveau_ofuncs) {
-               .ctor = nva3_devinit_ctor,
+struct nouveau_oclass *
+nva3_devinit_oclass = &(struct nouveau_devinit_impl) {
+       .base.handle = NV_SUBDEV(DEVINIT, 0xa3),
+       .base.ofuncs = &(struct nouveau_ofuncs) {
+               .ctor = nv50_devinit_ctor,
                .dtor = _nouveau_devinit_dtor,
                .init = nv50_devinit_init,
                .fini = _nouveau_devinit_fini,
        },
-};
+       .pll_set = nva3_devinit_pll_set,
+       .disable = nva3_devinit_disable,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nvaf.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nvaf.c
new file mode 100644 (file)
index 0000000..4fc68d2
--- /dev/null
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nv50.h"
+
+static u64
+nvaf_devinit_disable(struct nouveau_devinit *devinit)
+{
+       struct nv50_devinit_priv *priv = (void *)devinit;
+       u32 r001540 = nv_rd32(priv, 0x001540);
+       u32 r00154c = nv_rd32(priv, 0x00154c);
+       u64 disable = 0;
+
+       if (!(r001540 & 0x40000000)) {
+               disable |= (1ULL << NVDEV_ENGINE_VP);
+               disable |= (1ULL << NVDEV_ENGINE_PPP);
+       }
+
+       if (!(r00154c & 0x00000004))
+               disable |= (1ULL << NVDEV_ENGINE_DISP);
+       if (!(r00154c & 0x00000020))
+               disable |= (1ULL << NVDEV_ENGINE_BSP);
+       if (!(r00154c & 0x00000040))
+               disable |= (1ULL << NVDEV_ENGINE_VIC);
+       if (!(r00154c & 0x00000200))
+               disable |= (1ULL << NVDEV_ENGINE_COPY0);
+
+       return disable;
+}
+
+struct nouveau_oclass *
+nvaf_devinit_oclass = &(struct nouveau_devinit_impl) {
+       .base.handle = NV_SUBDEV(DEVINIT, 0xaf),
+       .base.ofuncs = &(struct nouveau_ofuncs) {
+               .ctor = nv50_devinit_ctor,
+               .dtor = _nouveau_devinit_dtor,
+               .init = nv50_devinit_init,
+               .fini = _nouveau_devinit_fini,
+       },
+       .pll_set = nva3_devinit_pll_set,
+       .disable = nvaf_devinit_disable,
+}.base;
index 19e265bf4574a0ad04086bd99d6fa0c2dd9912f9..fa7e63766b1b19ae47b8557c4581b77cb0ff0a22 100644 (file)
  * Authors: Ben Skeggs
  */
 
-#include "priv.h"
+#include "nv50.h"
 
 static int
 nvc0_devinit_pll_set(struct nouveau_devinit *devinit, u32 type, u32 freq)
 {
-       struct nvc0_devinit_priv *priv = (void *)devinit;
+       struct nv50_devinit_priv *priv = (void *)devinit;
        struct nouveau_bios *bios = nouveau_bios(priv);
        struct nvbios_pll info;
        int N, fN, M, P;
@@ -59,6 +59,33 @@ nvc0_devinit_pll_set(struct nouveau_devinit *devinit, u32 type, u32 freq)
        return ret;
 }
 
+static u64
+nvc0_devinit_disable(struct nouveau_devinit *devinit)
+{
+       struct nv50_devinit_priv *priv = (void *)devinit;
+       u32 r022500 = nv_rd32(priv, 0x022500);
+       u64 disable = 0ULL;
+
+       if (r022500 & 0x00000001)
+               disable |= (1ULL << NVDEV_ENGINE_DISP);
+
+       if (r022500 & 0x00000002) {
+               disable |= (1ULL << NVDEV_ENGINE_VP);
+               disable |= (1ULL << NVDEV_ENGINE_PPP);
+       }
+
+       if (r022500 & 0x00000004)
+               disable |= (1ULL << NVDEV_ENGINE_BSP);
+       if (r022500 & 0x00000008)
+               disable |= (1ULL << NVDEV_ENGINE_VENC);
+       if (r022500 & 0x00000100)
+               disable |= (1ULL << NVDEV_ENGINE_COPY0);
+       if (r022500 & 0x00000200)
+               disable |= (1ULL << NVDEV_ENGINE_COPY1);
+
+       return disable;
+}
+
 static int
 nvc0_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
                  struct nouveau_oclass *oclass, void *data, u32 size,
@@ -72,19 +99,20 @@ nvc0_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        if (ret)
                return ret;
 
-       priv->base.pll_set = nvc0_devinit_pll_set;
        if (nv_rd32(priv, 0x022500) & 0x00000001)
                priv->base.post = true;
        return 0;
 }
 
-struct nouveau_oclass
-nvc0_devinit_oclass = {
-       .handle = NV_SUBDEV(DEVINIT, 0xc0),
-       .ofuncs = &(struct nouveau_ofuncs) {
+struct nouveau_oclass *
+nvc0_devinit_oclass = &(struct nouveau_devinit_impl) {
+       .base.handle = NV_SUBDEV(DEVINIT, 0xc0),
+       .base.ofuncs = &(struct nouveau_ofuncs) {
                .ctor = nvc0_devinit_ctor,
                .dtor = _nouveau_devinit_dtor,
                .init = nv50_devinit_init,
                .fini = _nouveau_devinit_fini,
        },
-};
+       .pll_set = nvc0_devinit_pll_set,
+       .disable = nvc0_devinit_disable,
+}.base;
index 7d622e2b01712508259e1f4436e0284543da7435..822a2fbf44a5b3d601d5c79eedb7b2af68654d86 100644 (file)
@@ -6,20 +6,32 @@
 #include <subdev/clock/pll.h>
 #include <subdev/devinit.h>
 
-void nv04_devinit_dtor(struct nouveau_object *);
-int  nv04_devinit_init(struct nouveau_object *);
-int  nv04_devinit_fini(struct nouveau_object *, bool);
-int  nv04_devinit_pll_set(struct nouveau_devinit *, u32, u32);
-
-void setPLL_single(struct nouveau_devinit *, u32, struct nouveau_pll_vals *);
-void setPLL_double_highregs(struct nouveau_devinit *, u32, struct nouveau_pll_vals *);
-void setPLL_double_lowregs(struct nouveau_devinit *, u32, struct nouveau_pll_vals *);
-
-
-struct nv50_devinit_priv {
-       struct nouveau_devinit base;
+struct nouveau_devinit_impl {
+       struct nouveau_oclass base;
+       void (*meminit)(struct nouveau_devinit *);
+       int  (*pll_set)(struct nouveau_devinit *, u32 type, u32 freq);
+       u64  (*disable)(struct nouveau_devinit *);
 };
 
-int  nv50_devinit_init(struct nouveau_object *);
+#define nouveau_devinit_create(p,e,o,d)                                        \
+       nouveau_devinit_create_((p), (e), (o), sizeof(**d), (void **)d)
+#define nouveau_devinit_destroy(p) ({                                          \
+       struct nouveau_devinit *d = (p);                                       \
+       _nouveau_devinit_dtor(nv_object(d));                                   \
+})
+#define nouveau_devinit_init(p) ({                                             \
+       struct nouveau_devinit *d = (p);                                       \
+       _nouveau_devinit_init(nv_object(d));                                   \
+})
+#define nouveau_devinit_fini(p,s) ({                                           \
+       struct nouveau_devinit *d = (p);                                       \
+       _nouveau_devinit_fini(nv_object(d), (s));                              \
+})
+
+int nouveau_devinit_create_(struct nouveau_object *, struct nouveau_object *,
+                           struct nouveau_oclass *, int, void **);
+void _nouveau_devinit_dtor(struct nouveau_object *);
+int _nouveau_devinit_init(struct nouveau_object *);
+int _nouveau_devinit_fini(struct nouveau_object *, bool suspend);
 
 #endif
index 34f9605ffee61321d381d6c3ed7e552f8918c51b..66fe959b4f7431dd2cc1b9ce6520263560fb769f 100644 (file)
 #include <subdev/bios.h>
 #include "priv.h"
 
+/* binary driver only executes this path if the condition (a) is true
+ * for any configuration (combination of rammap+ramcfg+timing) that
+ * can be reached on a given card.  for now, we will execute the branch
+ * unconditionally in the hope that a "false everywhere" in the bios
+ * tables doesn't actually mean "don't touch this".
+ */
+#define NOTE00(a) 1
+
 int
-nouveau_gddr5_calc(struct nouveau_ram *ram)
+nouveau_gddr5_calc(struct nouveau_ram *ram, bool nuts)
 {
-       struct nouveau_bios *bios = nouveau_bios(ram);
-       int pd, lf, xd, vh, vr, vo;
-       int WL, CL, WR, at, dt, ds;
+       int pd, lf, xd, vh, vr, vo, l3;
+       int WL, CL, WR, at[2], dt, ds;
        int rq = ram->freq < 1000000; /* XXX */
 
-       switch (!!ram->ramcfg.data * ram->ramcfg.version) {
+       switch (ram->ramcfg.version) {
        case 0x11:
-               pd =  (nv_ro08(bios, ram->ramcfg.data + 0x01) & 0x80) >> 7;
-               lf =  (nv_ro08(bios, ram->ramcfg.data + 0x01) & 0x40) >> 6;
-               xd = !(nv_ro08(bios, ram->ramcfg.data + 0x01) & 0x20);
-               vh =  (nv_ro08(bios, ram->ramcfg.data + 0x02) & 0x10) >> 4;
-               vr =  (nv_ro08(bios, ram->ramcfg.data + 0x02) & 0x04) >> 2;
-               vo =   nv_ro08(bios, ram->ramcfg.data + 0x06) & 0xff;
+               pd =  ram->next->bios.ramcfg_11_01_80;
+               lf =  ram->next->bios.ramcfg_11_01_40;
+               xd = !ram->next->bios.ramcfg_11_01_20;
+               vh =  ram->next->bios.ramcfg_11_02_10;
+               vr =  ram->next->bios.ramcfg_11_02_04;
+               vo =  ram->next->bios.ramcfg_11_06;
+               l3 = !ram->next->bios.ramcfg_11_07_02;
                break;
        default:
                return -ENOSYS;
        }
 
-       switch (!!ram->timing.data * ram->timing.version) {
+       switch (ram->timing.version) {
        case 0x20:
-               WL = (nv_ro16(bios, ram->timing.data + 0x04) & 0x0f80) >> 7;
-               CL =  nv_ro08(bios, ram->timing.data + 0x04) & 0x1f;
-               WR =  nv_ro08(bios, ram->timing.data + 0x0a) & 0x7f;
-               at = (nv_ro08(bios, ram->timing.data + 0x2e) & 0xc0) >> 6;
-               dt =  nv_ro08(bios, ram->timing.data + 0x2e) & 0x03;
-               ds =  nv_ro08(bios, ram->timing.data + 0x2f) & 0x03;
+               WL = (ram->next->bios.timing[1] & 0x00000f80) >> 7;
+               CL = (ram->next->bios.timing[1] & 0x0000001f);
+               WR = (ram->next->bios.timing[2] & 0x007f0000) >> 16;
+               at[0] = ram->next->bios.timing_20_2e_c0;
+               at[1] = ram->next->bios.timing_20_2e_30;
+               dt =  ram->next->bios.timing_20_2e_03;
+               ds =  ram->next->bios.timing_20_2f_03;
                break;
        default:
                return -ENOSYS;
@@ -71,13 +80,25 @@ nouveau_gddr5_calc(struct nouveau_ram *ram)
 
        ram->mr[1] &= ~0x0bf;
        ram->mr[1] |= (xd & 0x01) << 7;
-       ram->mr[1] |= (at & 0x03) << 4;
+       ram->mr[1] |= (at[0] & 0x03) << 4;
        ram->mr[1] |= (dt & 0x03) << 2;
        ram->mr[1] |= (ds & 0x03) << 0;
 
+       /* this seems wrong, alternate field used for the broadcast
+        * on nuts vs non-nuts configs..  meh, it matches for now.
+        */
+       ram->mr1_nuts = ram->mr[1];
+       if (nuts) {
+               ram->mr[1] &= ~0x030;
+               ram->mr[1] |= (at[1] & 0x03) << 4;
+       }
+
        ram->mr[3] &= ~0x020;
        ram->mr[3] |= (rq & 0x01) << 5;
 
+       ram->mr[5] &= ~0x004;
+       ram->mr[5] |= (l3 << 2);
+
        if (!vo)
                vo = (ram->mr[6] & 0xff0) >> 4;
        if (ram->mr[6] & 0x001)
@@ -86,11 +107,16 @@ nouveau_gddr5_calc(struct nouveau_ram *ram)
        ram->mr[6] |= (vo & 0xff) << 4;
        ram->mr[6] |= (pd & 0x01) << 0;
 
-       if (!(ram->mr[7] & 0x100))
-               vr = 0; /* binary driver does this.. bug? */
-       ram->mr[7] &= ~0x188;
-       ram->mr[7] |= (vr & 0x01) << 8;
+       if (NOTE00(vr)) {
+               ram->mr[7] &= ~0x300;
+               ram->mr[7] |= (vr & 0x03) << 8;
+       }
+       ram->mr[7] &= ~0x088;
        ram->mr[7] |= (vh & 0x01) << 7;
        ram->mr[7] |= (lf & 0x01) << 3;
+
+       ram->mr[8] &= ~0x003;
+       ram->mr[8] |= (WR & 0x10) >> 3;
+       ram->mr[8] |= (CL & 0x10) >> 4;
        return 0;
 }
index e5fc37c4caac841977a99e4530059ebd6235b36f..45470e1f0385f4682a654e0c37cb27212d3ba8eb 100644 (file)
@@ -33,6 +33,21 @@ nvc0_fb_memtype_valid(struct nouveau_fb *pfb, u32 tile_flags)
        return likely((nvc0_pte_storage_type_map[memtype] != 0xff));
 }
 
+static void
+nvc0_fb_intr(struct nouveau_subdev *subdev)
+{
+       struct nvc0_fb_priv *priv = (void *)subdev;
+       u32 intr = nv_rd32(priv, 0x000100);
+       if (intr & 0x08000000) {
+               nv_debug(priv, "PFFB intr\n");
+               intr &= ~0x08000000;
+       }
+       if (intr & 0x00002000) {
+               nv_debug(priv, "PBFB intr\n");
+               intr &= ~0x00002000;
+       }
+}
+
 int
 nvc0_fb_init(struct nouveau_object *object)
 {
@@ -86,6 +101,7 @@ nvc0_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
                        return -EFAULT;
        }
 
+       nv_subdev(priv)->intr = nvc0_fb_intr;
        return 0;
 }
 
index 493125214e88696db12ff1786f88be701e108225..edaf95dee61285d81d468060cd2b284b4da89cc0 100644 (file)
@@ -34,7 +34,7 @@ extern struct nouveau_oclass nvc0_ram_oclass;
 extern struct nouveau_oclass nve0_ram_oclass;
 
 int nouveau_sddr3_calc(struct nouveau_ram *ram);
-int nouveau_gddr5_calc(struct nouveau_ram *ram);
+int nouveau_gddr5_calc(struct nouveau_ram *ram, bool nuts);
 
 #define nouveau_fb_create(p,e,c,d)                                             \
        nouveau_fb_create_((p), (e), (c), sizeof(**d), (void **)d)
index 76762a17d89cfe46a7b794e510fb83225c4aced7..c7fdb3a9e88b06cdcfa3151abf5ae7331c04e409 100644 (file)
@@ -70,13 +70,11 @@ nv50_ram_calc(struct nouveau_fb *pfb, u32 freq)
        struct nv50_ramseq *hwsq = &ram->hwsq;
        struct nvbios_perfE perfE;
        struct nvbios_pll mpll;
-       struct bit_entry M;
        struct {
                u32 data;
                u8  size;
        } ramcfg, timing;
-       u8  ver, hdr, cnt, strap;
-       u32 data;
+       u8  ver, hdr, cnt, len, strap;
        int N1, M1, N2, M2, P;
        int ret, i;
 
@@ -93,16 +91,7 @@ nv50_ram_calc(struct nouveau_fb *pfb, u32 freq)
        } while (perfE.memory < freq);
 
        /* locate specific data set for the attached memory */
-       if (bit_entry(bios, 'M', &M) || M.version != 1 || M.length < 5) {
-               nv_error(pfb, "invalid/missing memory table\n");
-               return -EINVAL;
-       }
-
-       strap = (nv_rd32(pfb, 0x101000) & 0x0000003c) >> 2;
-       data = nv_ro16(bios, M.offset + 3);
-       if (data)
-               strap = nv_ro08(bios, data + strap);
-
+       strap = nvbios_ramcfg_index(bios);
        if (strap >= cnt) {
                nv_error(pfb, "invalid ramcfg strap\n");
                return -EINVAL;
@@ -113,7 +102,8 @@ nv50_ram_calc(struct nouveau_fb *pfb, u32 freq)
        /* lookup memory timings, if bios says they're present */
        strap = nv_ro08(bios, ramcfg.data + 0x01);
        if (strap != 0xff) {
-               timing.data = nvbios_timing_entry(bios, strap, &ver, &hdr);
+               timing.data = nvbios_timingEe(bios, strap, &ver, &hdr,
+                                            &cnt, &len);
                if (!timing.data || ver != 0x10 || hdr < 0x12) {
                        nv_error(pfb, "invalid/missing timing entry "
                                 "%02x %04x %02x %02x\n",
index f6292cd9207cf354384738c50a135bea9e0ec562..f4ae8aa46a255948df24c3e4d2f4e259ed583d53 100644 (file)
@@ -79,8 +79,7 @@ nva3_ram_calc(struct nouveau_fb *pfb, u32 freq)
        struct nva3_ram *ram = (void *)pfb->ram;
        struct nva3_ramfuc *fuc = &ram->fuc;
        struct nva3_clock_info mclk;
-       struct bit_entry M;
-       u8  ver, cnt, strap;
+       u8  ver, cnt, len, strap;
        u32 data;
        struct {
                u32 data;
@@ -91,24 +90,15 @@ nva3_ram_calc(struct nouveau_fb *pfb, u32 freq)
        int ret;
 
        /* lookup memory config data relevant to the target frequency */
-       rammap.data = nvbios_rammap_match(bios, freq / 1000, &ver, &rammap.size,
-                                        &cnt, &ramcfg.size);
+       rammap.data = nvbios_rammapEm(bios, freq / 1000, &ver, &rammap.size,
+                                    &cnt, &ramcfg.size);
        if (!rammap.data || ver != 0x10 || rammap.size < 0x0e) {
                nv_error(pfb, "invalid/missing rammap entry\n");
                return -EINVAL;
        }
 
        /* locate specific data set for the attached memory */
-       if (bit_entry(bios, 'M', &M) || M.version != 2 || M.length < 3) {
-               nv_error(pfb, "invalid/missing memory table\n");
-               return -EINVAL;
-       }
-
-       strap = (nv_rd32(pfb, 0x101000) & 0x0000003c) >> 2;
-       data = nv_ro16(bios, M.offset + 1);
-       if (data)
-               strap = nv_ro08(bios, data + strap);
-
+       strap = nvbios_ramcfg_index(bios);
        if (strap >= cnt) {
                nv_error(pfb, "invalid ramcfg strap\n");
                return -EINVAL;
@@ -123,8 +113,8 @@ nva3_ram_calc(struct nouveau_fb *pfb, u32 freq)
        /* lookup memory timings, if bios says they're present */
        strap = nv_ro08(bios, ramcfg.data + 0x01);
        if (strap != 0xff) {
-               timing.data = nvbios_timing_entry(bios, strap, &ver,
-                                                &timing.size);
+               timing.data = nvbios_timingEe(bios, strap, &ver, &timing.size,
+                                            &cnt, &len);
                if (!timing.data || ver != 0x10 || timing.size < 0x19) {
                        nv_error(pfb, "invalid/missing timing entry\n");
                        return -EINVAL;
index f464547c6bab70c714626672bfbb015d7512d1a3..0391b824ee767d7629e333c56965366bc15347fd 100644 (file)
@@ -23,7 +23,6 @@
  */
 
 #include <subdev/bios.h>
-#include <subdev/bios/bit.h>
 #include <subdev/bios/pll.h>
 #include <subdev/bios/rammap.h>
 #include <subdev/bios/timing.h>
@@ -134,9 +133,7 @@ nvc0_ram_calc(struct nouveau_fb *pfb, u32 freq)
        struct nouveau_bios *bios = nouveau_bios(pfb);
        struct nvc0_ram *ram = (void *)pfb->ram;
        struct nvc0_ramfuc *fuc = &ram->fuc;
-       struct bit_entry M;
-       u8  ver, cnt, strap;
-       u32 data;
+       u8  ver, cnt, len, strap;
        struct {
                u32 data;
                u8  size;
@@ -147,24 +144,15 @@ nvc0_ram_calc(struct nouveau_fb *pfb, u32 freq)
        int ret;
 
        /* lookup memory config data relevant to the target frequency */
-       rammap.data = nvbios_rammap_match(bios, freq / 1000, &ver, &rammap.size,
-                                        &cnt, &ramcfg.size);
+       rammap.data = nvbios_rammapEm(bios, freq / 1000, &ver, &rammap.size,
+                                    &cnt, &ramcfg.size);
        if (!rammap.data || ver != 0x10 || rammap.size < 0x0e) {
                nv_error(pfb, "invalid/missing rammap entry\n");
                return -EINVAL;
        }
 
        /* locate specific data set for the attached memory */
-       if (bit_entry(bios, 'M', &M) || M.version != 2 || M.length < 3) {
-               nv_error(pfb, "invalid/missing memory table\n");
-               return -EINVAL;
-       }
-
-       strap = (nv_rd32(pfb, 0x101000) & 0x0000003c) >> 2;
-       data = nv_ro16(bios, M.offset + 1);
-       if (data)
-               strap = nv_ro08(bios, data + strap);
-
+       strap = nvbios_ramcfg_index(bios);
        if (strap >= cnt) {
                nv_error(pfb, "invalid ramcfg strap\n");
                return -EINVAL;
@@ -179,8 +167,8 @@ nvc0_ram_calc(struct nouveau_fb *pfb, u32 freq)
        /* lookup memory timings, if bios says they're present */
        strap = nv_ro08(bios, ramcfg.data + 0x01);
        if (strap != 0xff) {
-               timing.data = nvbios_timing_entry(bios, strap, &ver,
-                                                &timing.size);
+               timing.data = nvbios_timingEe(bios, strap, &ver, &timing.size,
+                                            &cnt, &len);
                if (!timing.data || ver != 0x10 || timing.size < 0x19) {
                        nv_error(pfb, "invalid/missing timing entry\n");
                        return -EINVAL;
index bc86cfd084f66c106c4a46980212a59cdde47dad..3257c522a0219026e5147d37725b1f6a30fcac21 100644 (file)
@@ -25,7 +25,6 @@
 #include <subdev/gpio.h>
 
 #include <subdev/bios.h>
-#include <subdev/bios/bit.h>
 #include <subdev/bios/pll.h>
 #include <subdev/bios/init.h>
 #include <subdev/bios/rammap.h>
 
 #include "ramfuc.h"
 
+/* binary driver only executes this path if the condition (a) is true
+ * for any configuration (combination of rammap+ramcfg+timing) that
+ * can be reached on a given card.  for now, we will execute the branch
+ * unconditionally in the hope that a "false everywhere" in the bios
+ * tables doesn't actually mean "don't touch this".
+ */
+#define NOTE00(a) 1
+
 struct nve0_ramfuc {
        struct ramfuc base;
 
@@ -104,7 +111,9 @@ struct nve0_ramfuc {
        struct ramfuc_reg r_mr[16]; /* MR0 - MR8, MR15 */
 
        struct ramfuc_reg r_0x62c000;
+
        struct ramfuc_reg r_0x10f200;
+
        struct ramfuc_reg r_0x10f210;
        struct ramfuc_reg r_0x10f310;
        struct ramfuc_reg r_0x10f314;
@@ -118,12 +127,17 @@ struct nve0_ramfuc {
        struct ramfuc_reg r_0x10f65c;
        struct ramfuc_reg r_0x10f6bc;
        struct ramfuc_reg r_0x100710;
-       struct ramfuc_reg r_0x10f750;
+       struct ramfuc_reg r_0x100750;
 };
 
 struct nve0_ram {
        struct nouveau_ram base;
        struct nve0_ramfuc fuc;
+
+       u32 parts;
+       u32 pmask;
+       u32 pnuts;
+
        int from;
        int mode;
        int N1, fN1, M1, P1;
@@ -134,17 +148,17 @@ struct nve0_ram {
  * GDDR5
  ******************************************************************************/
 static void
-train(struct nve0_ramfuc *fuc, u32 magic)
+nve0_ram_train(struct nve0_ramfuc *fuc, u32 mask, u32 data)
 {
        struct nve0_ram *ram = container_of(fuc, typeof(*ram), fuc);
-       struct nouveau_fb *pfb = nouveau_fb(ram);
-       const int mc = nv_rd32(pfb, 0x02243c);
-       int i;
-
-       ram_mask(fuc, 0x10f910, 0xbc0e0000, magic);
-       ram_mask(fuc, 0x10f914, 0xbc0e0000, magic);
-       for (i = 0; i < mc; i++) {
-               const u32 addr = 0x110974 + (i * 0x1000);
+       u32 addr = 0x110974, i;
+
+       ram_mask(fuc, 0x10f910, mask, data);
+       ram_mask(fuc, 0x10f914, mask, data);
+
+       for (i = 0; (data & 0x80000000) && i < ram->parts; addr += 0x1000, i++) {
+               if (ram->pmask & (1 << i))
+                       continue;
                ram_wait(fuc, addr, 0x0000000f, 0x00000000, 500000);
        }
 }
@@ -199,12 +213,12 @@ r1373f4_init(struct nve0_ramfuc *fuc)
 }
 
 static void
-r1373f4_fini(struct nve0_ramfuc *fuc, u32 ramcfg)
+r1373f4_fini(struct nve0_ramfuc *fuc)
 {
        struct nve0_ram *ram = container_of(fuc, typeof(*ram), fuc);
-       struct nouveau_bios *bios = nouveau_bios(ram);
-       u8 v0 = (nv_ro08(bios, ramcfg + 0x03) & 0xc0) >> 6;
-       u8 v1 = (nv_ro08(bios, ramcfg + 0x03) & 0x30) >> 4;
+       struct nouveau_ram_data *next = ram->base.next;
+       u8 v0 = next->bios.ramcfg_11_03_c0;
+       u8 v1 = next->bios.ramcfg_11_03_30;
        u32 tmp;
 
        tmp = ram_rd32(fuc, 0x1373ec) & ~0x00030000;
@@ -220,25 +234,46 @@ r1373f4_fini(struct nve0_ramfuc *fuc, u32 ramcfg)
        ram_mask(fuc, 0x10f800, 0x00000030, (v0 ^ v1) << 4);
 }
 
+static void
+nve0_ram_nuts(struct nve0_ram *ram, struct ramfuc_reg *reg,
+             u32 _mask, u32 _data, u32 _copy)
+{
+       struct nve0_fb_priv *priv = (void *)nouveau_fb(ram);
+       struct ramfuc *fuc = &ram->fuc.base;
+       u32 addr = 0x110000 + (reg->addr[0] & 0xfff);
+       u32 mask = _mask | _copy;
+       u32 data = (_data & _mask) | (reg->data & _copy);
+       u32 i;
+
+       for (i = 0; i < 16; i++, addr += 0x1000) {
+               if (ram->pnuts & (1 << i)) {
+                       u32 prev = nv_rd32(priv, addr);
+                       u32 next = (prev & ~mask) | data;
+                       nouveau_memx_wr32(fuc->memx, addr, next);
+               }
+       }
+}
+#define ram_nuts(s,r,m,d,c)                                                    \
+       nve0_ram_nuts((s), &(s)->fuc.r_##r, (m), (d), (c))
+
 static int
 nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
 {
-       struct nouveau_bios *bios = nouveau_bios(pfb);
        struct nve0_ram *ram = (void *)pfb->ram;
        struct nve0_ramfuc *fuc = &ram->fuc;
-       const u32 rammap = ram->base.rammap.data;
-       const u32 ramcfg = ram->base.ramcfg.data;
-       const u32 timing = ram->base.timing.data;
-       int vc = !(nv_ro08(bios, ramcfg + 0x02) & 0x08);
-       int mv = 1; /*XXX*/
+       struct nouveau_ram_data *next = ram->base.next;
+       int vc = !(next->bios.ramcfg_11_02_08);
+       int mv = !(next->bios.ramcfg_11_02_04);
        u32 mask, data;
 
        ram_mask(fuc, 0x10f808, 0x40000000, 0x40000000);
        ram_wr32(fuc, 0x62c000, 0x0f0f0000);
 
        /* MR1: turn termination on early, for some reason.. */
-       if ((ram->base.mr[1] & 0x03c) != 0x030)
+       if ((ram->base.mr[1] & 0x03c) != 0x030) {
                ram_mask(fuc, mr[1], 0x03c, ram->base.mr[1] & 0x03c);
+               ram_nuts(ram, mr[1], 0x03c, ram->base.mr1_nuts & 0x03c, 0x000);
+       }
 
        if (vc == 1 && ram_have(fuc, gpio2E)) {
                u32 temp  = ram_mask(fuc, gpio2E, 0x3000, fuc->r_func2E[1]);
@@ -250,8 +285,7 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
 
        ram_mask(fuc, 0x10f200, 0x00000800, 0x00000000);
 
-       ram_mask(fuc, 0x10f914, 0x01020000, 0x000c0000);
-       ram_mask(fuc, 0x10f910, 0x01020000, 0x000c0000);
+       nve0_ram_train(fuc, 0x01020000, 0x000c0000);
 
        ram_wr32(fuc, 0x10f210, 0x00000000); /* REFRESH_AUTO = 0 */
        ram_nsec(fuc, 1000);
@@ -280,28 +314,28 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
 
        if (1) {
                data |= 0x800807e0;
-               switch (nv_ro08(bios, ramcfg + 0x03) & 0xc0) {
-               case 0xc0: data &= ~0x00000040; break;
-               case 0x80: data &= ~0x00000100; break;
-               case 0x40: data &= ~0x80000000; break;
-               case 0x00: data &= ~0x00000400; break;
+               switch (next->bios.ramcfg_11_03_c0) {
+               case 3: data &= ~0x00000040; break;
+               case 2: data &= ~0x00000100; break;
+               case 1: data &= ~0x80000000; break;
+               case 0: data &= ~0x00000400; break;
                }
 
-               switch (nv_ro08(bios, ramcfg + 0x03) & 0x30) {
-               case 0x30: data &= ~0x00000020; break;
-               case 0x20: data &= ~0x00000080; break;
-               case 0x10: data &= ~0x00080000; break;
-               case 0x00: data &= ~0x00000200; break;
+               switch (next->bios.ramcfg_11_03_30) {
+               case 3: data &= ~0x00000020; break;
+               case 2: data &= ~0x00000080; break;
+               case 1: data &= ~0x00080000; break;
+               case 0: data &= ~0x00000200; break;
                }
        }
 
-       if (nv_ro08(bios, ramcfg + 0x02) & 0x80)
+       if (next->bios.ramcfg_11_02_80)
                mask |= 0x03000000;
-       if (nv_ro08(bios, ramcfg + 0x02) & 0x40)
+       if (next->bios.ramcfg_11_02_40)
                mask |= 0x00002000;
-       if (nv_ro08(bios, ramcfg + 0x07) & 0x10)
+       if (next->bios.ramcfg_11_07_10)
                mask |= 0x00004000;
-       if (nv_ro08(bios, ramcfg + 0x07) & 0x08)
+       if (next->bios.ramcfg_11_07_08)
                mask |= 0x00000003;
        else {
                mask |= 0x34000000;
@@ -314,18 +348,18 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
 
        if (ram->from == 2 && ram->mode != 2) {
                ram_mask(fuc, 0x10f808, 0x00080000, 0x00000000);
-               ram_mask(fuc, 0x10f200, 0x00008000, 0x00008000);
+               ram_mask(fuc, 0x10f200, 0x18008000, 0x00008000);
                ram_mask(fuc, 0x10f800, 0x00000000, 0x00000004);
                ram_mask(fuc, 0x10f830, 0x00008000, 0x01040010);
                ram_mask(fuc, 0x10f830, 0x01000000, 0x00000000);
                r1373f4_init(fuc);
                ram_mask(fuc, 0x1373f0, 0x00000002, 0x00000001);
-               r1373f4_fini(fuc, ramcfg);
+               r1373f4_fini(fuc);
                ram_mask(fuc, 0x10f830, 0x00c00000, 0x00240001);
        } else
        if (ram->from != 2 && ram->mode != 2) {
                r1373f4_init(fuc);
-               r1373f4_fini(fuc, ramcfg);
+               r1373f4_fini(fuc);
        }
 
        if (ram_have(fuc, gpioMV)) {
@@ -336,49 +370,54 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
                }
        }
 
-       if ( (nv_ro08(bios, ramcfg + 0x02) & 0x40) ||
-            (nv_ro08(bios, ramcfg + 0x07) & 0x10)) {
+       if ( (next->bios.ramcfg_11_02_40) ||
+            (next->bios.ramcfg_11_07_10)) {
                ram_mask(fuc, 0x132040, 0x00010000, 0x00010000);
                ram_nsec(fuc, 20000);
        }
 
        if (ram->from != 2 && ram->mode == 2) {
+               if (0 /*XXX: Titan */)
+                       ram_mask(fuc, 0x10f200, 0x18000000, 0x18000000);
                ram_mask(fuc, 0x10f800, 0x00000004, 0x00000000);
                ram_mask(fuc, 0x1373f0, 0x00000000, 0x00000002);
                ram_mask(fuc, 0x10f830, 0x00800001, 0x00408010);
                r1373f4_init(fuc);
-               r1373f4_fini(fuc, ramcfg);
+               r1373f4_fini(fuc);
                ram_mask(fuc, 0x10f808, 0x00000000, 0x00080000);
                ram_mask(fuc, 0x10f200, 0x00808000, 0x00800000);
        } else
        if (ram->from == 2 && ram->mode == 2) {
                ram_mask(fuc, 0x10f800, 0x00000004, 0x00000000);
                r1373f4_init(fuc);
-               r1373f4_fini(fuc, ramcfg);
+               r1373f4_fini(fuc);
        }
 
        if (ram->mode != 2) /*XXX*/ {
-               if (nv_ro08(bios, ramcfg + 0x07) & 0x40)
+               if (next->bios.ramcfg_11_07_40)
                        ram_mask(fuc, 0x10f670, 0x80000000, 0x80000000);
        }
 
-       data = (nv_ro08(bios, rammap + 0x11) & 0x0c) >> 2;
-       ram_wr32(fuc, 0x10f65c, 0x00000011 * data);
-       ram_wr32(fuc, 0x10f6b8, 0x01010101 * nv_ro08(bios, ramcfg + 0x09));
-       ram_wr32(fuc, 0x10f6bc, 0x01010101 * nv_ro08(bios, ramcfg + 0x09));
+       ram_wr32(fuc, 0x10f65c, 0x00000011 * next->bios.rammap_11_11_0c);
+       ram_wr32(fuc, 0x10f6b8, 0x01010101 * next->bios.ramcfg_11_09);
+       ram_wr32(fuc, 0x10f6bc, 0x01010101 * next->bios.ramcfg_11_09);
 
-       data = nv_ro08(bios, ramcfg + 0x04);
-       if (!(nv_ro08(bios, ramcfg + 0x07) & 0x08)) {
-               ram_wr32(fuc, 0x10f698, 0x01010101 * data);
-               ram_wr32(fuc, 0x10f69c, 0x01010101 * data);
+       if (!next->bios.ramcfg_11_07_08 && !next->bios.ramcfg_11_07_04) {
+               ram_wr32(fuc, 0x10f698, 0x01010101 * next->bios.ramcfg_11_04);
+               ram_wr32(fuc, 0x10f69c, 0x01010101 * next->bios.ramcfg_11_04);
+       } else
+       if (!next->bios.ramcfg_11_07_08) {
+               ram_wr32(fuc, 0x10f698, 0x00000000);
+               ram_wr32(fuc, 0x10f69c, 0x00000000);
        }
 
        if (ram->mode != 2) {
-               u32 temp = ram_rd32(fuc, 0x10f694) & ~0xff00ff00;
-               ram_wr32(fuc, 0x10f694, temp | (0x01000100 * data));
+               u32 data = 0x01000100 * next->bios.ramcfg_11_04;
+               ram_nuke(fuc, 0x10f694);
+               ram_mask(fuc, 0x10f694, 0xff00ff00, data);
        }
 
-       if (ram->mode == 2 && (nv_ro08(bios, ramcfg + 0x08) & 0x10))
+       if (ram->mode == 2 && (next->bios.ramcfg_11_08_10))
                data = 0x00000080;
        else
                data = 0x00000000;
@@ -386,19 +425,19 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
 
        mask = 0x00070000;
        data = 0x00000000;
-       if (!(nv_ro08(bios, ramcfg + 0x02) & 0x80))
+       if (!(next->bios.ramcfg_11_02_80))
                data |= 0x03000000;
-       if (!(nv_ro08(bios, ramcfg + 0x02) & 0x40))
+       if (!(next->bios.ramcfg_11_02_40))
                data |= 0x00002000;
-       if (!(nv_ro08(bios, ramcfg + 0x07) & 0x10))
+       if (!(next->bios.ramcfg_11_07_10))
                data |= 0x00004000;
-       if (!(nv_ro08(bios, ramcfg + 0x07) & 0x08))
+       if (!(next->bios.ramcfg_11_07_08))
                data |= 0x00000003;
        else
                data |= 0x74000000;
        ram_mask(fuc, 0x10f824, mask, data);
 
-       if (nv_ro08(bios, ramcfg + 0x01) & 0x08)
+       if (next->bios.ramcfg_11_01_08)
                data = 0x00000000;
        else
                data = 0x00001000;
@@ -409,61 +448,90 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
                ram_mask(fuc, 0x10f670, 0x80000000, 0x00000000);
        }
 
-       if (nv_ro08(bios, ramcfg + 0x08) & 0x01)
+       if (next->bios.ramcfg_11_08_01)
                data = 0x00100000;
        else
                data = 0x00000000;
        ram_mask(fuc, 0x10f82c, 0x00100000, data);
 
        data = 0x00000000;
-       if (nv_ro08(bios, ramcfg + 0x08) & 0x08)
+       if (next->bios.ramcfg_11_08_08)
                data |= 0x00002000;
-       if (nv_ro08(bios, ramcfg + 0x08) & 0x04)
+       if (next->bios.ramcfg_11_08_04)
                data |= 0x00001000;
-       if (nv_ro08(bios, ramcfg + 0x08) & 0x02)
+       if (next->bios.ramcfg_11_08_02)
                data |= 0x00004000;
        ram_mask(fuc, 0x10f830, 0x00007000, data);
 
        /* PFB timing */
-       ram_mask(fuc, 0x10f248, 0xffffffff, nv_ro32(bios, timing + 0x28));
-       ram_mask(fuc, 0x10f290, 0xffffffff, nv_ro32(bios, timing + 0x00));
-       ram_mask(fuc, 0x10f294, 0xffffffff, nv_ro32(bios, timing + 0x04));
-       ram_mask(fuc, 0x10f298, 0xffffffff, nv_ro32(bios, timing + 0x08));
-       ram_mask(fuc, 0x10f29c, 0xffffffff, nv_ro32(bios, timing + 0x0c));
-       ram_mask(fuc, 0x10f2a0, 0xffffffff, nv_ro32(bios, timing + 0x10));
-       ram_mask(fuc, 0x10f2a4, 0xffffffff, nv_ro32(bios, timing + 0x14));
-       ram_mask(fuc, 0x10f2a8, 0xffffffff, nv_ro32(bios, timing + 0x18));
-       ram_mask(fuc, 0x10f2ac, 0xffffffff, nv_ro32(bios, timing + 0x1c));
-       ram_mask(fuc, 0x10f2cc, 0xffffffff, nv_ro32(bios, timing + 0x20));
-       ram_mask(fuc, 0x10f2e8, 0xffffffff, nv_ro32(bios, timing + 0x24));
-
-       data = (nv_ro08(bios, ramcfg + 0x02) & 0x03) << 8;
-       if (nv_ro08(bios, ramcfg + 0x01) & 0x10)
-               data |= 0x70000000;
-       ram_mask(fuc, 0x10f604, 0x70000300, data);
-
-       data = (nv_ro08(bios, timing + 0x30) & 0x07) << 28;
-       if (nv_ro08(bios, ramcfg + 0x01) & 0x01)
-               data |= 0x00000100;
-       ram_mask(fuc, 0x10f614, 0x70000000, data);
-
-       data = (nv_ro08(bios, timing + 0x30) & 0x07) << 28;
-       if (nv_ro08(bios, ramcfg + 0x01) & 0x02)
-               data |= 0x00000100;
-       ram_mask(fuc, 0x10f610, 0x70000000, data);
+       ram_mask(fuc, 0x10f248, 0xffffffff, next->bios.timing[10]);
+       ram_mask(fuc, 0x10f290, 0xffffffff, next->bios.timing[0]);
+       ram_mask(fuc, 0x10f294, 0xffffffff, next->bios.timing[1]);
+       ram_mask(fuc, 0x10f298, 0xffffffff, next->bios.timing[2]);
+       ram_mask(fuc, 0x10f29c, 0xffffffff, next->bios.timing[3]);
+       ram_mask(fuc, 0x10f2a0, 0xffffffff, next->bios.timing[4]);
+       ram_mask(fuc, 0x10f2a4, 0xffffffff, next->bios.timing[5]);
+       ram_mask(fuc, 0x10f2a8, 0xffffffff, next->bios.timing[6]);
+       ram_mask(fuc, 0x10f2ac, 0xffffffff, next->bios.timing[7]);
+       ram_mask(fuc, 0x10f2cc, 0xffffffff, next->bios.timing[8]);
+       ram_mask(fuc, 0x10f2e8, 0xffffffff, next->bios.timing[9]);
+
+       data = mask = 0x00000000;
+       if (NOTE00(ramcfg_08_20)) {
+               if (next->bios.ramcfg_11_08_20)
+                       data |= 0x01000000;
+               mask |= 0x01000000;
+       }
+       ram_mask(fuc, 0x10f200, mask, data);
+
+       data = mask = 0x00000000;
+       if (NOTE00(ramcfg_02_03 != 0)) {
+               data |= (next->bios.ramcfg_11_02_03) << 8;
+               mask |= 0x00000300;
+       }
+       if (NOTE00(ramcfg_01_10)) {
+               if (next->bios.ramcfg_11_01_10)
+                       data |= 0x70000000;
+               mask |= 0x70000000;
+       }
+       ram_mask(fuc, 0x10f604, mask, data);
+
+       data = mask = 0x00000000;
+       if (NOTE00(timing_30_07 != 0)) {
+               data |= (next->bios.timing_20_30_07) << 28;
+               mask |= 0x70000000;
+       }
+       if (NOTE00(ramcfg_01_01)) {
+               if (next->bios.ramcfg_11_01_01)
+                       data |= 0x00000100;
+               mask |= 0x00000100;
+       }
+       ram_mask(fuc, 0x10f614, mask, data);
+
+       data = mask = 0x00000000;
+       if (NOTE00(timing_30_07 != 0)) {
+               data |= (next->bios.timing_20_30_07) << 28;
+               mask |= 0x70000000;
+       }
+       if (NOTE00(ramcfg_01_02)) {
+               if (next->bios.ramcfg_11_01_02)
+                       data |= 0x00000100;
+               mask |= 0x00000100;
+       }
+       ram_mask(fuc, 0x10f610, mask, data);
 
        mask = 0x33f00000;
        data = 0x00000000;
-       if (!(nv_ro08(bios, ramcfg + 0x01) & 0x04))
+       if (!(next->bios.ramcfg_11_01_04))
                data |= 0x20200000;
-       if (!(nv_ro08(bios, ramcfg + 0x07) & 0x80))
+       if (!(next->bios.ramcfg_11_07_80))
                data |= 0x12800000;
        /*XXX: see note above about there probably being some condition
         *     for the 10f824 stuff that uses ramcfg 3...
         */
-       if ( (nv_ro08(bios, ramcfg + 0x03) & 0xf0)) {
-               if (nv_ro08(bios, rammap + 0x08) & 0x0c) {
-                       if (!(nv_ro08(bios, ramcfg + 0x07) & 0x80))
+       if ( (next->bios.ramcfg_11_03_f0)) {
+               if (next->bios.rammap_11_08_0c) {
+                       if (!(next->bios.ramcfg_11_07_80))
                                mask |= 0x00000020;
                        else
                                data |= 0x00000020;
@@ -476,49 +544,53 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
 
        ram_mask(fuc, 0x10f808, mask, data);
 
-       data = nv_ro08(bios, ramcfg + 0x03) & 0x0f;
-       ram_wr32(fuc, 0x10f870, 0x11111111 * data);
+       ram_wr32(fuc, 0x10f870, 0x11111111 * next->bios.ramcfg_11_03_0f);
 
-       data = nv_ro08(bios, ramcfg + 0x02) & 0x03;
-       if (nv_ro08(bios, ramcfg + 0x01) & 0x10)
-               data |= 0x00000004;
-       if ((nv_rd32(bios, 0x100770) & 0x00000004) != (data & 0x00000004)) {
-               ram_wr32(fuc, 0x10f750, 0x04000009);
+       data = mask = 0x00000000;
+       if (NOTE00(ramcfg_02_03 != 0)) {
+               data |= next->bios.ramcfg_11_02_03;
+               mask |= 0x00000003;
+       }
+       if (NOTE00(ramcfg_01_10)) {
+               if (next->bios.ramcfg_11_01_10)
+                       data |= 0x00000004;
+               mask |= 0x00000004;
+       }
+
+       if ((ram_mask(fuc, 0x100770, mask, data) & mask & 4) != (data & 4)) {
+               ram_mask(fuc, 0x100750, 0x00000008, 0x00000008);
                ram_wr32(fuc, 0x100710, 0x00000000);
                ram_wait(fuc, 0x100710, 0x80000000, 0x80000000, 200000);
        }
-       ram_mask(fuc, 0x100770, 0x00000007, data);
 
-       data = (nv_ro08(bios, timing + 0x30) & 0x07) << 8;
-       if (nv_ro08(bios, ramcfg + 0x01) & 0x01)
+       data = (next->bios.timing_20_30_07) << 8;
+       if (next->bios.ramcfg_11_01_01)
                data |= 0x80000000;
        ram_mask(fuc, 0x100778, 0x00000700, data);
 
-       data = nv_ro16(bios, timing + 0x2c);
-       ram_mask(fuc, 0x10f250, 0x000003f0, (data & 0x003f) <<  4);
-       ram_mask(fuc, 0x10f24c, 0x7f000000, (data & 0x1fc0) << 18);
-
-       data = nv_ro08(bios, timing + 0x30);
-       ram_mask(fuc, 0x10f224, 0x001f0000, (data & 0xf8) << 13);
+       ram_mask(fuc, 0x10f250, 0x000003f0, next->bios.timing_20_2c_003f << 4);
+       data = (next->bios.timing[10] & 0x7f000000) >> 24;
+       if (data < next->bios.timing_20_2c_1fc0)
+               data = next->bios.timing_20_2c_1fc0;
+       ram_mask(fuc, 0x10f24c, 0x7f000000, data << 24);
+       ram_mask(fuc, 0x10f224, 0x001f0000, next->bios.timing_20_30_f8 << 16);
 
-       data = nv_ro16(bios, timing + 0x31);
-       ram_mask(fuc, 0x10fec4, 0x041e0f07, (data & 0x0800) << 15 |
-                                           (data & 0x0780) << 10 |
-                                           (data & 0x0078) <<  5 |
-                                           (data & 0x0007));
-       ram_mask(fuc, 0x10fec8, 0x00000027, (data & 0x8000) >> 10 |
-                                           (data & 0x7000) >> 12);
+       ram_mask(fuc, 0x10fec4, 0x041e0f07, next->bios.timing_20_31_0800 << 26 |
+                                           next->bios.timing_20_31_0780 << 17 |
+                                           next->bios.timing_20_31_0078 << 8 |
+                                           next->bios.timing_20_31_0007);
+       ram_mask(fuc, 0x10fec8, 0x00000027, next->bios.timing_20_31_8000 << 5 |
+                                           next->bios.timing_20_31_7000);
 
        ram_wr32(fuc, 0x10f090, 0x4000007e);
-       ram_nsec(fuc, 1000);
+       ram_nsec(fuc, 2000);
        ram_wr32(fuc, 0x10f314, 0x00000001); /* PRECHARGE */
        ram_wr32(fuc, 0x10f310, 0x00000001); /* REFRESH */
-       ram_nsec(fuc, 2000);
        ram_wr32(fuc, 0x10f210, 0x80000000); /* REFRESH_AUTO = 1 */
 
-       if ((nv_ro08(bios, ramcfg + 0x08) & 0x10) && (ram->mode == 2) /*XXX*/) {
+       if ((next->bios.ramcfg_11_08_10) && (ram->mode == 2) /*XXX*/) {
                u32 temp = ram_mask(fuc, 0x10f294, 0xff000000, 0x24000000);
-               train(fuc, 0xa4010000); /*XXX*/
+               nve0_ram_train(fuc, 0xbc0e0000, 0xa4010000); /*XXX*/
                ram_nsec(fuc, 1000);
                ram_wr32(fuc, 0x10f294, temp);
        }
@@ -528,7 +600,7 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
        ram_mask(fuc, mr[8], 0xfff, ram->base.mr[8]);
        ram_nsec(fuc, 1000);
        ram_mask(fuc, mr[1], 0xfff, ram->base.mr[1]);
-       ram_mask(fuc, mr[5], 0xfff, ram->base.mr[5]);
+       ram_mask(fuc, mr[5], 0xfff, ram->base.mr[5] & ~0x004); /* LP3 later */
        ram_mask(fuc, mr[6], 0xfff, ram->base.mr[6]);
        ram_mask(fuc, mr[7], 0xfff, ram->base.mr[7]);
 
@@ -544,12 +616,13 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
        ram_wr32(fuc, 0x10f318, 0x00000001); /* NOP? */
        ram_mask(fuc, 0x10f200, 0x80000000, 0x00000000);
        ram_nsec(fuc, 1000);
+       ram_nuts(ram, 0x10f200, 0x18808800, 0x00000000, 0x18808800);
 
        data  = ram_rd32(fuc, 0x10f978);
        data &= ~0x00046144;
        data |=  0x0000000b;
-       if (!(nv_ro08(bios, ramcfg + 0x07) & 0x08)) {
-               if (!(nv_ro08(bios, ramcfg + 0x07) & 0x04))
+       if (!(next->bios.ramcfg_11_07_08)) {
+               if (!(next->bios.ramcfg_11_07_04))
                        data |= 0x0000200c;
                else
                        data |= 0x00000000;
@@ -563,44 +636,43 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
                ram_wr32(fuc, 0x10f830, data);
        }
 
-       if (!(nv_ro08(bios, ramcfg + 0x07) & 0x08)) {
+       if (!(next->bios.ramcfg_11_07_08)) {
                data = 0x88020000;
-               if ( (nv_ro08(bios, ramcfg + 0x07) & 0x04))
+               if ( (next->bios.ramcfg_11_07_04))
                        data |= 0x10000000;
-               if (!(nv_ro08(bios, rammap + 0x08) & 0x10))
+               if (!(next->bios.rammap_11_08_10))
                        data |= 0x00080000;
        } else {
                data = 0xa40e0000;
        }
-       train(fuc, data);
-       ram_nsec(fuc, 1000);
+       nve0_ram_train(fuc, 0xbc0f0000, data);
+       if (1) /* XXX: not always? */
+               ram_nsec(fuc, 1000);
 
        if (ram->mode == 2) { /*XXX*/
                ram_mask(fuc, 0x10f800, 0x00000004, 0x00000004);
        }
 
-       /* MR5: (re)enable LP3 if necessary
-        * XXX: need to find the switch, keeping off for now
-        */
-       ram_mask(fuc, mr[5], 0x00000004, 0x00000000);
+       /* LP3 */
+       if (ram_mask(fuc, mr[5], 0x004, ram->base.mr[5]) != ram->base.mr[5])
+               ram_nsec(fuc, 1000);
 
        if (ram->mode != 2) {
                ram_mask(fuc, 0x10f830, 0x01000000, 0x01000000);
                ram_mask(fuc, 0x10f830, 0x01000000, 0x00000000);
        }
 
-       if (nv_ro08(bios, ramcfg + 0x07) & 0x02) {
-               ram_mask(fuc, 0x10f910, 0x80020000, 0x01000000);
-               ram_mask(fuc, 0x10f914, 0x80020000, 0x01000000);
-       }
+       if (next->bios.ramcfg_11_07_02)
+               nve0_ram_train(fuc, 0x80020000, 0x01000000);
 
        ram_wr32(fuc, 0x62c000, 0x0f0f0f00);
 
-       if (nv_ro08(bios, rammap + 0x08) & 0x01)
+       if (next->bios.rammap_11_08_01)
                data = 0x00000800;
        else
                data = 0x00000000;
        ram_mask(fuc, 0x10f200, 0x00000800, data);
+       ram_nuts(ram, 0x10f200, 0x18808800, data, 0x18808800);
        return 0;
 }
 
@@ -611,17 +683,14 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
 static int
 nve0_ram_calc_sddr3(struct nouveau_fb *pfb, u32 freq)
 {
-       struct nouveau_bios *bios = nouveau_bios(pfb);
        struct nve0_ram *ram = (void *)pfb->ram;
        struct nve0_ramfuc *fuc = &ram->fuc;
        const u32 rcoef = ((  ram->P1 << 16) | (ram->N1 << 8) | ram->M1);
        const u32 runk0 = ram->fN1 << 16;
        const u32 runk1 = ram->fN1;
-       const u32 rammap = ram->base.rammap.data;
-       const u32 ramcfg = ram->base.ramcfg.data;
-       const u32 timing = ram->base.timing.data;
-       int vc = !(nv_ro08(bios, ramcfg + 0x02) & 0x08);
-       int mv = 1; /*XXX*/
+       struct nouveau_ram_data *next = ram->base.next;
+       int vc = !(next->bios.ramcfg_11_02_08);
+       int mv = !(next->bios.ramcfg_11_02_04);
        u32 mask, data;
 
        ram_mask(fuc, 0x10f808, 0x40000000, 0x40000000);
@@ -636,7 +705,7 @@ nve0_ram_calc_sddr3(struct nouveau_fb *pfb, u32 freq)
        }
 
        ram_mask(fuc, 0x10f200, 0x00000800, 0x00000000);
-       if ((nv_ro08(bios, ramcfg + 0x03) & 0xf0))
+       if ((next->bios.ramcfg_11_03_f0))
                ram_mask(fuc, 0x10f808, 0x04000000, 0x04000000);
 
        ram_wr32(fuc, 0x10f314, 0x00000001); /* PRECHARGE */
@@ -661,28 +730,28 @@ nve0_ram_calc_sddr3(struct nouveau_fb *pfb, u32 freq)
        if (1) {
                mask |= 0x800807e0;
                data |= 0x800807e0;
-               switch (nv_ro08(bios, ramcfg + 0x03) & 0xc0) {
-               case 0xc0: data &= ~0x00000040; break;
-               case 0x80: data &= ~0x00000100; break;
-               case 0x40: data &= ~0x80000000; break;
-               case 0x00: data &= ~0x00000400; break;
+               switch (next->bios.ramcfg_11_03_c0) {
+               case 3: data &= ~0x00000040; break;
+               case 2: data &= ~0x00000100; break;
+               case 1: data &= ~0x80000000; break;
+               case 0: data &= ~0x00000400; break;
                }
 
-               switch (nv_ro08(bios, ramcfg + 0x03) & 0x30) {
-               case 0x30: data &= ~0x00000020; break;
-               case 0x20: data &= ~0x00000080; break;
-               case 0x10: data &= ~0x00080000; break;
-               case 0x00: data &= ~0x00000200; break;
+               switch (next->bios.ramcfg_11_03_30) {
+               case 3: data &= ~0x00000020; break;
+               case 2: data &= ~0x00000080; break;
+               case 1: data &= ~0x00080000; break;
+               case 0: data &= ~0x00000200; break;
                }
        }
 
-       if (nv_ro08(bios, ramcfg + 0x02) & 0x80)
+       if (next->bios.ramcfg_11_02_80)
                mask |= 0x03000000;
-       if (nv_ro08(bios, ramcfg + 0x02) & 0x40)
+       if (next->bios.ramcfg_11_02_40)
                mask |= 0x00002000;
-       if (nv_ro08(bios, ramcfg + 0x07) & 0x10)
+       if (next->bios.ramcfg_11_07_10)
                mask |= 0x00004000;
-       if (nv_ro08(bios, ramcfg + 0x07) & 0x08)
+       if (next->bios.ramcfg_11_07_08)
                mask |= 0x00000003;
        else
                mask |= 0x14000000;
@@ -692,7 +761,7 @@ nve0_ram_calc_sddr3(struct nouveau_fb *pfb, u32 freq)
 
        ram_mask(fuc, 0x1373f4, 0x00000000, 0x00010010);
        data  = ram_rd32(fuc, 0x1373ec) & ~0x00030000;
-       data |= (nv_ro08(bios, ramcfg + 0x03) & 0x30) << 12;
+       data |= (next->bios.ramcfg_11_03_30) << 12;
        ram_wr32(fuc, 0x1373ec, data);
        ram_mask(fuc, 0x1373f4, 0x00000003, 0x00000000);
        ram_mask(fuc, 0x1373f4, 0x00000010, 0x00000000);
@@ -724,68 +793,67 @@ nve0_ram_calc_sddr3(struct nouveau_fb *pfb, u32 freq)
                }
        }
 
-       if ( (nv_ro08(bios, ramcfg + 0x02) & 0x40) ||
-            (nv_ro08(bios, ramcfg + 0x07) & 0x10)) {
+       if ( (next->bios.ramcfg_11_02_40) ||
+            (next->bios.ramcfg_11_07_10)) {
                ram_mask(fuc, 0x132040, 0x00010000, 0x00010000);
                ram_nsec(fuc, 20000);
        }
 
        if (ram->mode != 2) /*XXX*/ {
-               if (nv_ro08(bios, ramcfg + 0x07) & 0x40)
+               if (next->bios.ramcfg_11_07_40)
                        ram_mask(fuc, 0x10f670, 0x80000000, 0x80000000);
        }
 
-       data = (nv_ro08(bios, rammap + 0x11) & 0x0c) >> 2;
-       ram_wr32(fuc, 0x10f65c, 0x00000011 * data);
-       ram_wr32(fuc, 0x10f6b8, 0x01010101 * nv_ro08(bios, ramcfg + 0x09));
-       ram_wr32(fuc, 0x10f6bc, 0x01010101 * nv_ro08(bios, ramcfg + 0x09));
+       ram_wr32(fuc, 0x10f65c, 0x00000011 * next->bios.rammap_11_11_0c);
+       ram_wr32(fuc, 0x10f6b8, 0x01010101 * next->bios.ramcfg_11_09);
+       ram_wr32(fuc, 0x10f6bc, 0x01010101 * next->bios.ramcfg_11_09);
 
        mask = 0x00010000;
        data = 0x00000000;
-       if (!(nv_ro08(bios, ramcfg + 0x02) & 0x80))
+       if (!(next->bios.ramcfg_11_02_80))
                data |= 0x03000000;
-       if (!(nv_ro08(bios, ramcfg + 0x02) & 0x40))
+       if (!(next->bios.ramcfg_11_02_40))
                data |= 0x00002000;
-       if (!(nv_ro08(bios, ramcfg + 0x07) & 0x10))
+       if (!(next->bios.ramcfg_11_07_10))
                data |= 0x00004000;
-       if (!(nv_ro08(bios, ramcfg + 0x07) & 0x08))
+       if (!(next->bios.ramcfg_11_07_08))
                data |= 0x00000003;
        else
                data |= 0x14000000;
        ram_mask(fuc, 0x10f824, mask, data);
        ram_nsec(fuc, 1000);
 
-       if (nv_ro08(bios, ramcfg + 0x08) & 0x01)
+       if (next->bios.ramcfg_11_08_01)
                data = 0x00100000;
        else
                data = 0x00000000;
        ram_mask(fuc, 0x10f82c, 0x00100000, data);
 
        /* PFB timing */
-       ram_mask(fuc, 0x10f248, 0xffffffff, nv_ro32(bios, timing + 0x28));
-       ram_mask(fuc, 0x10f290, 0xffffffff, nv_ro32(bios, timing + 0x00));
-       ram_mask(fuc, 0x10f294, 0xffffffff, nv_ro32(bios, timing + 0x04));
-       ram_mask(fuc, 0x10f298, 0xffffffff, nv_ro32(bios, timing + 0x08));
-       ram_mask(fuc, 0x10f29c, 0xffffffff, nv_ro32(bios, timing + 0x0c));
-       ram_mask(fuc, 0x10f2a0, 0xffffffff, nv_ro32(bios, timing + 0x10));
-       ram_mask(fuc, 0x10f2a4, 0xffffffff, nv_ro32(bios, timing + 0x14));
-       ram_mask(fuc, 0x10f2a8, 0xffffffff, nv_ro32(bios, timing + 0x18));
-       ram_mask(fuc, 0x10f2ac, 0xffffffff, nv_ro32(bios, timing + 0x1c));
-       ram_mask(fuc, 0x10f2cc, 0xffffffff, nv_ro32(bios, timing + 0x20));
-       ram_mask(fuc, 0x10f2e8, 0xffffffff, nv_ro32(bios, timing + 0x24));
+       ram_mask(fuc, 0x10f248, 0xffffffff, next->bios.timing[10]);
+       ram_mask(fuc, 0x10f290, 0xffffffff, next->bios.timing[0]);
+       ram_mask(fuc, 0x10f294, 0xffffffff, next->bios.timing[1]);
+       ram_mask(fuc, 0x10f298, 0xffffffff, next->bios.timing[2]);
+       ram_mask(fuc, 0x10f29c, 0xffffffff, next->bios.timing[3]);
+       ram_mask(fuc, 0x10f2a0, 0xffffffff, next->bios.timing[4]);
+       ram_mask(fuc, 0x10f2a4, 0xffffffff, next->bios.timing[5]);
+       ram_mask(fuc, 0x10f2a8, 0xffffffff, next->bios.timing[6]);
+       ram_mask(fuc, 0x10f2ac, 0xffffffff, next->bios.timing[7]);
+       ram_mask(fuc, 0x10f2cc, 0xffffffff, next->bios.timing[8]);
+       ram_mask(fuc, 0x10f2e8, 0xffffffff, next->bios.timing[9]);
 
        mask = 0x33f00000;
        data = 0x00000000;
-       if (!(nv_ro08(bios, ramcfg + 0x01) & 0x04))
+       if (!(next->bios.ramcfg_11_01_04))
                data |= 0x20200000;
-       if (!(nv_ro08(bios, ramcfg + 0x07) & 0x80))
+       if (!(next->bios.ramcfg_11_07_80))
                data |= 0x12800000;
        /*XXX: see note above about there probably being some condition
         *     for the 10f824 stuff that uses ramcfg 3...
         */
-       if ( (nv_ro08(bios, ramcfg + 0x03) & 0xf0)) {
-               if (nv_ro08(bios, rammap + 0x08) & 0x0c) {
-                       if (!(nv_ro08(bios, ramcfg + 0x07) & 0x80))
+       if ( (next->bios.ramcfg_11_03_f0)) {
+               if (next->bios.rammap_11_08_0c) {
+                       if (!(next->bios.ramcfg_11_07_80))
                                mask |= 0x00000020;
                        else
                                data |= 0x00000020;
@@ -799,21 +867,16 @@ nve0_ram_calc_sddr3(struct nouveau_fb *pfb, u32 freq)
 
        ram_mask(fuc, 0x10f808, mask, data);
 
-       data = nv_ro08(bios, ramcfg + 0x03) & 0x0f;
-       ram_wr32(fuc, 0x10f870, 0x11111111 * data);
+       ram_wr32(fuc, 0x10f870, 0x11111111 * next->bios.ramcfg_11_03_0f);
 
-       data = nv_ro16(bios, timing + 0x2c);
-       ram_mask(fuc, 0x10f250, 0x000003f0, (data & 0x003f) <<  4);
+       ram_mask(fuc, 0x10f250, 0x000003f0, next->bios.timing_20_2c_003f << 4);
 
-       if (((nv_ro32(bios, timing + 0x2c) & 0x00001fc0) >>  6) >
-           ((nv_ro32(bios, timing + 0x28) & 0x7f000000) >> 24))
-               data = (nv_ro32(bios, timing + 0x2c) & 0x00001fc0) >>  6;
-       else
-               data = (nv_ro32(bios, timing + 0x28) & 0x1f000000) >> 24;
+       data = (next->bios.timing[10] & 0x7f000000) >> 24;
+       if (data < next->bios.timing_20_2c_1fc0)
+               data = next->bios.timing_20_2c_1fc0;
        ram_mask(fuc, 0x10f24c, 0x7f000000, data << 24);
 
-       data = nv_ro08(bios, timing + 0x30);
-       ram_mask(fuc, 0x10f224, 0x001f0000, (data & 0xf8) << 13);
+       ram_mask(fuc, 0x10f224, 0x001f0000, next->bios.timing_20_30_f8);
 
        ram_wr32(fuc, 0x10f090, 0x4000007f);
        ram_nsec(fuc, 1000);
@@ -855,7 +918,7 @@ nve0_ram_calc_sddr3(struct nouveau_fb *pfb, u32 freq)
 
        ram_wr32(fuc, 0x62c000, 0x0f0f0f00);
 
-       if (nv_ro08(bios, rammap + 0x08) & 0x01)
+       if (next->bios.rammap_11_08_01)
                data = 0x00000800;
        else
                data = 0x00000000;
@@ -868,21 +931,18 @@ nve0_ram_calc_sddr3(struct nouveau_fb *pfb, u32 freq)
  ******************************************************************************/
 
 static int
-nve0_ram_calc(struct nouveau_fb *pfb, u32 freq)
+nve0_ram_calc_data(struct nouveau_fb *pfb, u32 freq,
+                  struct nouveau_ram_data *data)
 {
        struct nouveau_bios *bios = nouveau_bios(pfb);
        struct nve0_ram *ram = (void *)pfb->ram;
-       struct nve0_ramfuc *fuc = &ram->fuc;
-       struct bit_entry M;
-       int ret, refclk, strap, i;
-       u32 data;
-       u8  cnt;
+       u8 strap, cnt, len;
 
        /* lookup memory config data relevant to the target frequency */
-       ram->base.rammap.data = nvbios_rammap_match(bios, freq / 1000,
-                                                  &ram->base.rammap.version,
-                                                  &ram->base.rammap.size, &cnt,
-                                                  &ram->base.ramcfg.size);
+       ram->base.rammap.data = nvbios_rammapEp(bios, freq / 1000,
+                                              &ram->base.rammap.version,
+                                              &ram->base.rammap.size,
+                                              &cnt, &len, &data->bios);
        if (!ram->base.rammap.data || ram->base.rammap.version != 0x11 ||
             ram->base.rammap.size < 0x09) {
                nv_error(pfb, "invalid/missing rammap entry\n");
@@ -890,24 +950,13 @@ nve0_ram_calc(struct nouveau_fb *pfb, u32 freq)
        }
 
        /* locate specific data set for the attached memory */
-       if (bit_entry(bios, 'M', &M) || M.version != 2 || M.length < 3) {
-               nv_error(pfb, "invalid/missing memory table\n");
-               return -EINVAL;
-       }
-
-       strap = (nv_rd32(pfb, 0x101000) & 0x0000003c) >> 2;
-       data = nv_ro16(bios, M.offset + 1);
-       if (data)
-               strap = nv_ro08(bios, data + strap);
-
-       if (strap >= cnt) {
-               nv_error(pfb, "invalid ramcfg strap\n");
-               return -EINVAL;
-       }
-
-       ram->base.ramcfg.version = ram->base.rammap.version;
-       ram->base.ramcfg.data = ram->base.rammap.data + ram->base.rammap.size +
-                              (ram->base.ramcfg.size * strap);
+       ram->base.ramcfg.data = nvbios_rammapSp(bios, ram->base.rammap.data,
+                                               ram->base.rammap.version,
+                                               ram->base.rammap.size, cnt, len,
+                                               nvbios_ramcfg_index(bios),
+                                               &ram->base.ramcfg.version,
+                                               &ram->base.ramcfg.size,
+                                               &data->bios);
        if (!ram->base.ramcfg.data || ram->base.ramcfg.version != 0x11 ||
             ram->base.ramcfg.size < 0x08) {
                nv_error(pfb, "invalid/missing ramcfg entry\n");
@@ -918,9 +967,9 @@ nve0_ram_calc(struct nouveau_fb *pfb, u32 freq)
        strap = nv_ro08(bios, ram->base.ramcfg.data + 0x00);
        if (strap != 0xff) {
                ram->base.timing.data =
-                       nvbios_timing_entry(bios, strap,
-                                          &ram->base.timing.version,
-                                          &ram->base.timing.size);
+                       nvbios_timingEp(bios, strap, &ram->base.timing.version,
+                                      &ram->base.timing.size, &cnt, &len,
+                                      &data->bios);
                if (!ram->base.timing.data ||
                     ram->base.timing.version != 0x20 ||
                     ram->base.timing.size < 0x33) {
@@ -931,11 +980,23 @@ nve0_ram_calc(struct nouveau_fb *pfb, u32 freq)
                ram->base.timing.data = 0;
        }
 
+       data->freq = freq;
+       return 0;
+}
+
+static int
+nve0_ram_calc_xits(struct nouveau_fb *pfb, struct nouveau_ram_data *next)
+{
+       struct nve0_ram *ram = (void *)pfb->ram;
+       struct nve0_ramfuc *fuc = &ram->fuc;
+       int refclk, i;
+       int ret;
+
        ret = ram_init(fuc, pfb);
        if (ret)
                return ret;
 
-       ram->mode = (freq > fuc->refpll.vco1.max_freq) ? 2 : 1;
+       ram->mode = (next->freq > fuc->refpll.vco1.max_freq) ? 2 : 1;
        ram->from = ram_rd32(fuc, 0x1373f4) & 0x0000000f;
 
        /* XXX: this is *not* what nvidia do.  on fermi nvidia generally
@@ -946,7 +1007,7 @@ nve0_ram_calc(struct nouveau_fb *pfb, u32 freq)
         * so far, i've seen very weird values being chosen by nvidia on
         * kepler boards, no idea how/why they're chosen.
         */
-       refclk = freq;
+       refclk = next->freq;
        if (ram->mode == 2)
                refclk = fuc->mempll.refclk;
 
@@ -968,7 +1029,7 @@ nve0_ram_calc(struct nouveau_fb *pfb, u32 freq)
                fuc->mempll.min_p = 1;
                fuc->mempll.max_p = 2;
 
-               ret = nva3_pll_calc(nv_subdev(pfb), &fuc->mempll, freq,
+               ret = nva3_pll_calc(nv_subdev(pfb), &fuc->mempll, next->freq,
                                   &ram->N2, NULL, &ram->M2, &ram->P2);
                if (ret <= 0) {
                        nv_error(pfb, "unable to calc mempll\n");
@@ -980,17 +1041,18 @@ nve0_ram_calc(struct nouveau_fb *pfb, u32 freq)
                if (ram_have(fuc, mr[i]))
                        ram->base.mr[i] = ram_rd32(fuc, mr[i]);
        }
+       ram->base.freq = next->freq;
 
        switch (ram->base.type) {
        case NV_MEM_TYPE_DDR3:
                ret = nouveau_sddr3_calc(&ram->base);
                if (ret == 0)
-                       ret = nve0_ram_calc_sddr3(pfb, freq);
+                       ret = nve0_ram_calc_sddr3(pfb, next->freq);
                break;
        case NV_MEM_TYPE_GDDR5:
-               ret = nouveau_gddr5_calc(&ram->base);
+               ret = nouveau_gddr5_calc(&ram->base, ram->pnuts != 0);
                if (ret == 0)
-                       ret = nve0_ram_calc_gddr5(pfb, freq);
+                       ret = nve0_ram_calc_gddr5(pfb, next->freq);
                break;
        default:
                ret = -ENOSYS;
@@ -1000,6 +1062,48 @@ nve0_ram_calc(struct nouveau_fb *pfb, u32 freq)
        return ret;
 }
 
+static int
+nve0_ram_calc(struct nouveau_fb *pfb, u32 freq)
+{
+       struct nouveau_clock *clk = nouveau_clock(pfb);
+       struct nve0_ram *ram = (void *)pfb->ram;
+       struct nouveau_ram_data *xits = &ram->base.xition;
+       struct nouveau_ram_data *copy;
+       int ret;
+
+       if (ram->base.next == NULL) {
+               ret = nve0_ram_calc_data(pfb, clk->read(clk, nv_clk_src_mem),
+                                       &ram->base.former);
+               if (ret)
+                       return ret;
+
+               ret = nve0_ram_calc_data(pfb, freq, &ram->base.target);
+               if (ret)
+                       return ret;
+
+               if (ram->base.target.freq < ram->base.former.freq) {
+                       *xits = ram->base.target;
+                       copy = &ram->base.former;
+               } else {
+                       *xits = ram->base.former;
+                       copy = &ram->base.target;
+               }
+
+               xits->bios.ramcfg_11_02_04 = copy->bios.ramcfg_11_02_04;
+               xits->bios.ramcfg_11_02_03 = copy->bios.ramcfg_11_02_03;
+               xits->bios.timing_20_30_07 = copy->bios.timing_20_30_07;
+
+               ram->base.next = &ram->base.target;
+               if (memcmp(xits, &ram->base.former, sizeof(xits->bios)))
+                       ram->base.next = &ram->base.xition;
+       } else {
+               BUG_ON(ram->base.next != &ram->base.xition);
+               ram->base.next = &ram->base.target;
+       }
+
+       return nve0_ram_calc_xits(pfb, ram->base.next);
+}
+
 static int
 nve0_ram_prog(struct nouveau_fb *pfb)
 {
@@ -1007,7 +1111,7 @@ nve0_ram_prog(struct nouveau_fb *pfb)
        struct nve0_ram *ram = (void *)pfb->ram;
        struct nve0_ramfuc *fuc = &ram->fuc;
        ram_exec(fuc, nouveau_boolopt(device->cfgopt, "NvMemExec", false));
-       return 0;
+       return (ram->base.next == &ram->base.xition);
 }
 
 static void
@@ -1015,6 +1119,7 @@ nve0_ram_tidy(struct nouveau_fb *pfb)
 {
        struct nve0_ram *ram = (void *)pfb->ram;
        struct nve0_ramfuc *fuc = &ram->fuc;
+       ram->base.next = NULL;
        ram_exec(fuc, false);
 }
 
@@ -1055,7 +1160,7 @@ nve0_ram_init(struct nouveau_object *object)
         * binary driver skips the one that's already been setup by
         * the init tables.
         */
-       data = nvbios_rammap_table(bios, &ver, &hdr, &cnt, &len, &snr, &ssz);
+       data = nvbios_rammapTe(bios, &ver, &hdr, &cnt, &len, &snr, &ssz);
        if (!data || hdr < 0x15)
                return -EINVAL;
 
@@ -1073,6 +1178,7 @@ nve0_ram_init(struct nouveau_object *object)
                data += 4;
        }
        nv_wr32(pfb, 0x10f65c, save);
+       nv_mask(pfb, 0x10f584, 0x11000000, 0x00000000);
 
        switch (ram->base.type) {
        case NV_MEM_TYPE_GDDR5:
@@ -1117,7 +1223,8 @@ nve0_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        struct nouveau_gpio *gpio = nouveau_gpio(pfb);
        struct dcb_gpio_func func;
        struct nve0_ram *ram;
-       int ret;
+       int ret, i;
+       u32 tmp;
 
        ret = nvc0_ram_create(parent, engine, oclass, &ram);
        *pobject = nv_object(ram);
@@ -1136,6 +1243,25 @@ nve0_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
                break;
        }
 
+       /* calculate a mask of differently configured memory partitions,
+        * because, of course reclocking wasn't complicated enough
+        * already without having to treat some of them differently to
+        * the others....
+        */
+       ram->parts = nv_rd32(pfb, 0x022438);
+       ram->pmask = nv_rd32(pfb, 0x022554);
+       ram->pnuts = 0;
+       for (i = 0, tmp = 0; i < ram->parts; i++) {
+               if (!(ram->pmask & (1 << i))) {
+                       u32 cfg1 = nv_rd32(pfb, 0x110204 + (i * 0x1000));
+                       if (tmp && tmp != cfg1) {
+                               ram->pnuts |= (1 << i);
+                               continue;
+                       }
+                       tmp = cfg1;
+               }
+       }
+
        // parse bios data for both pll's
        ret = nvbios_pll_parse(bios, 0x0c, &ram->fuc.refpll);
        if (ret) {
@@ -1248,7 +1374,7 @@ nve0_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        ram->fuc.r_0x10f65c = ramfuc_reg(0x10f65c);
        ram->fuc.r_0x10f6bc = ramfuc_reg(0x10f6bc);
        ram->fuc.r_0x100710 = ramfuc_reg(0x100710);
-       ram->fuc.r_0x10f750 = ramfuc_reg(0x10f750);
+       ram->fuc.r_0x100750 = ramfuc_reg(0x100750);
        return 0;
 }
 
index 6565f3dbbe04e7e04c0721ad49f3523688f8b729..14706d9842ca6d1428298982e247f1887f11cff6 100644 (file)
  * Authors: Ben Skeggs
  */
 
-#include <subdev/instmem.h>
+#include "priv.h"
+
+/******************************************************************************
+ * instmem object base implementation
+ *****************************************************************************/
+
+void
+_nouveau_instobj_dtor(struct nouveau_object *object)
+{
+       struct nouveau_instmem *imem = (void *)object->engine;
+       struct nouveau_instobj *iobj = (void *)object;
+
+       mutex_lock(&nv_subdev(imem)->mutex);
+       list_del(&iobj->head);
+       mutex_unlock(&nv_subdev(imem)->mutex);
+
+       return nouveau_object_destroy(&iobj->base);
+}
 
 int
 nouveau_instobj_create_(struct nouveau_object *parent,
@@ -46,73 +63,26 @@ nouveau_instobj_create_(struct nouveau_object *parent,
        return 0;
 }
 
-void
-nouveau_instobj_destroy(struct nouveau_instobj *iobj)
-{
-       struct nouveau_subdev *subdev = nv_subdev(iobj->base.engine);
+/******************************************************************************
+ * instmem subdev base implementation
+ *****************************************************************************/
 
-       mutex_lock(&subdev->mutex);
-       list_del(&iobj->head);
-       mutex_unlock(&subdev->mutex);
-
-       return nouveau_object_destroy(&iobj->base);
-}
-
-void
-_nouveau_instobj_dtor(struct nouveau_object *object)
+static int
+nouveau_instmem_alloc(struct nouveau_instmem *imem,
+                     struct nouveau_object *parent, u32 size, u32 align,
+                     struct nouveau_object **pobject)
 {
-       struct nouveau_instobj *iobj = (void *)object;
-       return nouveau_instobj_destroy(iobj);
+       struct nouveau_object *engine = nv_object(imem);
+       struct nouveau_instmem_impl *impl = (void *)engine->oclass;
+       struct nouveau_instobj_args args = { .size = size, .align = align };
+       return nouveau_object_ctor(parent, engine, impl->instobj, &args,
+                                  sizeof(args), pobject);
 }
 
 int
-nouveau_instmem_create_(struct nouveau_object *parent,
-                       struct nouveau_object *engine,
-                       struct nouveau_oclass *oclass,
-                       int length, void **pobject)
-{
-       struct nouveau_instmem *imem;
-       int ret;
-
-       ret = nouveau_subdev_create_(parent, engine, oclass, 0,
-                                    "INSTMEM", "instmem", length, pobject);
-       imem = *pobject;
-       if (ret)
-               return ret;
-
-       INIT_LIST_HEAD(&imem->list);
-       return 0;
-}
-
-int
-nouveau_instmem_init(struct nouveau_instmem *imem)
-{
-       struct nouveau_instobj *iobj;
-       int ret, i;
-
-       ret = nouveau_subdev_init(&imem->base);
-       if (ret)
-               return ret;
-
-       mutex_lock(&imem->base.mutex);
-
-       list_for_each_entry(iobj, &imem->list, head) {
-               if (iobj->suspend) {
-                       for (i = 0; i < iobj->size; i += 4)
-                               nv_wo32(iobj, i, iobj->suspend[i / 4]);
-                       vfree(iobj->suspend);
-                       iobj->suspend = NULL;
-               }
-       }
-
-       mutex_unlock(&imem->base.mutex);
-
-       return 0;
-}
-
-int
-nouveau_instmem_fini(struct nouveau_instmem *imem, bool suspend)
+_nouveau_instmem_fini(struct nouveau_object *object, bool suspend)
 {
+       struct nouveau_instmem *imem = (void *)object;
        struct nouveau_instobj *iobj;
        int i, ret = 0;
 
@@ -143,12 +113,45 @@ int
 _nouveau_instmem_init(struct nouveau_object *object)
 {
        struct nouveau_instmem *imem = (void *)object;
-       return nouveau_instmem_init(imem);
+       struct nouveau_instobj *iobj;
+       int ret, i;
+
+       ret = nouveau_subdev_init(&imem->base);
+       if (ret)
+               return ret;
+
+       mutex_lock(&imem->base.mutex);
+
+       list_for_each_entry(iobj, &imem->list, head) {
+               if (iobj->suspend) {
+                       for (i = 0; i < iobj->size; i += 4)
+                               nv_wo32(iobj, i, iobj->suspend[i / 4]);
+                       vfree(iobj->suspend);
+                       iobj->suspend = NULL;
+               }
+       }
+
+       mutex_unlock(&imem->base.mutex);
+
+       return 0;
 }
 
 int
-_nouveau_instmem_fini(struct nouveau_object *object, bool suspend)
+nouveau_instmem_create_(struct nouveau_object *parent,
+                       struct nouveau_object *engine,
+                       struct nouveau_oclass *oclass,
+                       int length, void **pobject)
 {
-       struct nouveau_instmem *imem = (void *)object;
-       return nouveau_instmem_fini(imem, suspend);
+       struct nouveau_instmem *imem;
+       int ret;
+
+       ret = nouveau_subdev_create_(parent, engine, oclass, 0,
+                                    "INSTMEM", "instmem", length, pobject);
+       imem = *pobject;
+       if (ret)
+               return ret;
+
+       INIT_LIST_HEAD(&imem->list);
+       imem->alloc = nouveau_instmem_alloc;
+       return 0;
 }
index 795393d7b2f56826c0be783c320867bd14d4336b..7b64befee48fbb5b30b306c980ae53bd077c3368 100644 (file)
  * Authors: Ben Skeggs
  */
 
-#include <subdev/fb.h>
-
 #include "nv04.h"
 
+/******************************************************************************
+ * instmem object implementation
+ *****************************************************************************/
+
+static u32
+nv04_instobj_rd32(struct nouveau_object *object, u64 addr)
+{
+       struct nv04_instobj_priv *node = (void *)object;
+       return nv_ro32(object->engine, node->mem->offset + addr);
+}
+
+static void
+nv04_instobj_wr32(struct nouveau_object *object, u64 addr, u32 data)
+{
+       struct nv04_instobj_priv *node = (void *)object;
+       nv_wo32(object->engine, node->mem->offset + addr, data);
+}
+
+static void
+nv04_instobj_dtor(struct nouveau_object *object)
+{
+       struct nv04_instmem_priv *priv = (void *)object->engine;
+       struct nv04_instobj_priv *node = (void *)object;
+       nouveau_mm_free(&priv->heap, &node->mem);
+       nouveau_instobj_destroy(&node->base);
+}
+
 static int
 nv04_instobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
                  struct nouveau_oclass *oclass, void *data, u32 size,
@@ -33,18 +58,19 @@ nv04_instobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
 {
        struct nv04_instmem_priv *priv = (void *)engine;
        struct nv04_instobj_priv *node;
-       int ret, align;
+       struct nouveau_instobj_args *args = data;
+       int ret;
 
-       align = (unsigned long)data;
-       if (!align)
-               align = 1;
+       if (!args->align)
+               args->align = 1;
 
        ret = nouveau_instobj_create(parent, engine, oclass, &node);
        *pobject = nv_object(node);
        if (ret)
                return ret;
 
-       ret = nouveau_mm_head(&priv->heap, 1, size, size, align, &node->mem);
+       ret = nouveau_mm_head(&priv->heap, 1, args->size, args->size,
+                             args->align, &node->mem);
        if (ret)
                return ret;
 
@@ -53,32 +79,9 @@ nv04_instobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        return 0;
 }
 
-static void
-nv04_instobj_dtor(struct nouveau_object *object)
-{
-       struct nv04_instmem_priv *priv = (void *)object->engine;
-       struct nv04_instobj_priv *node = (void *)object;
-       nouveau_mm_free(&priv->heap, &node->mem);
-       nouveau_instobj_destroy(&node->base);
-}
-
-static u32
-nv04_instobj_rd32(struct nouveau_object *object, u64 addr)
-{
-       struct nv04_instobj_priv *node = (void *)object;
-       return nv_ro32(object->engine, node->mem->offset + addr);
-}
-
-static void
-nv04_instobj_wr32(struct nouveau_object *object, u64 addr, u32 data)
-{
-       struct nv04_instobj_priv *node = (void *)object;
-       nv_wo32(object->engine, node->mem->offset + addr, data);
-}
-
-static struct nouveau_oclass
+struct nouveau_instobj_impl
 nv04_instobj_oclass = {
-       .ofuncs = &(struct nouveau_ofuncs) {
+       .base.ofuncs = &(struct nouveau_ofuncs) {
                .ctor = nv04_instobj_ctor,
                .dtor = nv04_instobj_dtor,
                .init = _nouveau_instobj_init,
@@ -88,19 +91,34 @@ nv04_instobj_oclass = {
        },
 };
 
-int
-nv04_instmem_alloc(struct nouveau_instmem *imem, struct nouveau_object *parent,
-                  u32 size, u32 align, struct nouveau_object **pobject)
+/******************************************************************************
+ * instmem subdev implementation
+ *****************************************************************************/
+
+static u32
+nv04_instmem_rd32(struct nouveau_object *object, u64 addr)
 {
-       struct nouveau_object *engine = nv_object(imem);
-       int ret;
+       return nv_rd32(object, 0x700000 + addr);
+}
 
-       ret = nouveau_object_ctor(parent, engine, &nv04_instobj_oclass,
-                                 (void *)(unsigned long)align, size, pobject);
-       if (ret)
-               return ret;
+static void
+nv04_instmem_wr32(struct nouveau_object *object, u64 addr, u32 data)
+{
+       return nv_wr32(object, 0x700000 + addr, data);
+}
 
-       return 0;
+void
+nv04_instmem_dtor(struct nouveau_object *object)
+{
+       struct nv04_instmem_priv *priv = (void *)object;
+       nouveau_gpuobj_ref(NULL, &priv->ramfc);
+       nouveau_gpuobj_ref(NULL, &priv->ramro);
+       nouveau_ramht_ref(NULL, &priv->ramht);
+       nouveau_gpuobj_ref(NULL, &priv->vbios);
+       nouveau_mm_fini(&priv->heap);
+       if (priv->iomem)
+               iounmap(priv->iomem);
+       nouveau_instmem_destroy(&priv->base);
 }
 
 static int
@@ -118,7 +136,6 @@ nv04_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
 
        /* PRAMIN aperture maps over the end of VRAM, reserve it */
        priv->base.reserved = 512 * 1024;
-       priv->base.alloc    = nv04_instmem_alloc;
 
        ret = nouveau_mm_init(&priv->heap, 0, priv->base.reserved, 1);
        if (ret)
@@ -150,36 +167,10 @@ nv04_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        return 0;
 }
 
-void
-nv04_instmem_dtor(struct nouveau_object *object)
-{
-       struct nv04_instmem_priv *priv = (void *)object;
-       nouveau_gpuobj_ref(NULL, &priv->ramfc);
-       nouveau_gpuobj_ref(NULL, &priv->ramro);
-       nouveau_ramht_ref(NULL, &priv->ramht);
-       nouveau_gpuobj_ref(NULL, &priv->vbios);
-       nouveau_mm_fini(&priv->heap);
-       if (priv->iomem)
-               iounmap(priv->iomem);
-       nouveau_instmem_destroy(&priv->base);
-}
-
-static u32
-nv04_instmem_rd32(struct nouveau_object *object, u64 addr)
-{
-       return nv_rd32(object, 0x700000 + addr);
-}
-
-static void
-nv04_instmem_wr32(struct nouveau_object *object, u64 addr, u32 data)
-{
-       return nv_wr32(object, 0x700000 + addr, data);
-}
-
-struct nouveau_oclass
-nv04_instmem_oclass = {
-       .handle = NV_SUBDEV(INSTMEM, 0x04),
-       .ofuncs = &(struct nouveau_ofuncs) {
+struct nouveau_oclass *
+nv04_instmem_oclass = &(struct nouveau_instmem_impl) {
+       .base.handle = NV_SUBDEV(INSTMEM, 0x04),
+       .base.ofuncs = &(struct nouveau_ofuncs) {
                .ctor = nv04_instmem_ctor,
                .dtor = nv04_instmem_dtor,
                .init = _nouveau_instmem_init,
@@ -187,4 +178,5 @@ nv04_instmem_oclass = {
                .rd32 = nv04_instmem_rd32,
                .wr32 = nv04_instmem_wr32,
        },
-};
+       .instobj = &nv04_instobj_oclass.base,
+}.base;
index b15b61310236608e88a1bc2b89f58b70f3705f48..095fbc6fc099a852bfee334dea9aba389448adcb 100644 (file)
@@ -5,7 +5,9 @@
 #include <core/ramht.h>
 #include <core/mm.h>
 
-#include <subdev/instmem.h>
+#include "priv.h"
+
+extern struct nouveau_instobj_impl nv04_instobj_oclass;
 
 struct nv04_instmem_priv {
        struct nouveau_instmem base;
index b10a143787a7af95af24475f78fa52de7c7a641f..ec0b9661d614ced3dec026e3460d3eeb07905f2e 100644 (file)
 
 #include "nv04.h"
 
+/******************************************************************************
+ * instmem subdev implementation
+ *****************************************************************************/
+
+static u32
+nv40_instmem_rd32(struct nouveau_object *object, u64 addr)
+{
+       struct nv04_instmem_priv *priv = (void *)object;
+       return ioread32_native(priv->iomem + addr);
+}
+
+static void
+nv40_instmem_wr32(struct nouveau_object *object, u64 addr, u32 data)
+{
+       struct nv04_instmem_priv *priv = (void *)object;
+       iowrite32_native(data, priv->iomem + addr);
+}
+
 static int
 nv40_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
                  struct nouveau_oclass *oclass, void *data, u32 size,
@@ -69,7 +87,6 @@ nv40_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        priv->base.reserved += 512 * 1024;      /* object storage */
 
        priv->base.reserved = round_up(priv->base.reserved, 4096);
-       priv->base.alloc    = nv04_instmem_alloc;
 
        ret = nouveau_mm_init(&priv->heap, 0, priv->base.reserved, 1);
        if (ret)
@@ -106,24 +123,10 @@ nv40_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        return 0;
 }
 
-static u32
-nv40_instmem_rd32(struct nouveau_object *object, u64 addr)
-{
-       struct nv04_instmem_priv *priv = (void *)object;
-       return ioread32_native(priv->iomem + addr);
-}
-
-static void
-nv40_instmem_wr32(struct nouveau_object *object, u64 addr, u32 data)
-{
-       struct nv04_instmem_priv *priv = (void *)object;
-       iowrite32_native(data, priv->iomem + addr);
-}
-
-struct nouveau_oclass
-nv40_instmem_oclass = {
-       .handle = NV_SUBDEV(INSTMEM, 0x40),
-       .ofuncs = &(struct nouveau_ofuncs) {
+struct nouveau_oclass *
+nv40_instmem_oclass = &(struct nouveau_instmem_impl) {
+       .base.handle = NV_SUBDEV(INSTMEM, 0x40),
+       .base.ofuncs = &(struct nouveau_ofuncs) {
                .ctor = nv40_instmem_ctor,
                .dtor = nv04_instmem_dtor,
                .init = _nouveau_instmem_init,
@@ -131,4 +134,5 @@ nv40_instmem_oclass = {
                .rd32 = nv40_instmem_rd32,
                .wr32 = nv40_instmem_wr32,
        },
-};
+       .instobj = &nv04_instobj_oclass.base,
+}.base;
index 97bc5dff93e7aa224643c0cb5422faa15adb9e36..7cb3b098a08d030d7d315a1642ebf6a167033ffe 100644 (file)
  * Authors: Ben Skeggs
  */
 
-#include <subdev/instmem.h>
 #include <subdev/fb.h>
-
 #include <core/mm.h>
 
+#include "priv.h"
+
 struct nv50_instmem_priv {
        struct nouveau_instmem base;
        spinlock_t lock;
@@ -38,42 +38,9 @@ struct nv50_instobj_priv {
        struct nouveau_mem *mem;
 };
 
-static int
-nv50_instobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-                 struct nouveau_oclass *oclass, void *data, u32 size,
-                 struct nouveau_object **pobject)
-{
-       struct nouveau_fb *pfb = nouveau_fb(parent);
-       struct nv50_instobj_priv *node;
-       u32 align = (unsigned long)data;
-       int ret;
-
-       size  = max((size  + 4095) & ~4095, (u32)4096);
-       align = max((align + 4095) & ~4095, (u32)4096);
-
-       ret = nouveau_instobj_create(parent, engine, oclass, &node);
-       *pobject = nv_object(node);
-       if (ret)
-               return ret;
-
-       ret = pfb->ram->get(pfb, size, align, 0, 0x800, &node->mem);
-       if (ret)
-               return ret;
-
-       node->base.addr = node->mem->offset;
-       node->base.size = node->mem->size << 12;
-       node->mem->page_shift = 12;
-       return 0;
-}
-
-static void
-nv50_instobj_dtor(struct nouveau_object *object)
-{
-       struct nv50_instobj_priv *node = (void *)object;
-       struct nouveau_fb *pfb = nouveau_fb(object);
-       pfb->ram->put(pfb, &node->mem);
-       nouveau_instobj_destroy(&node->base);
-}
+/******************************************************************************
+ * instmem object implementation
+ *****************************************************************************/
 
 static u32
 nv50_instobj_rd32(struct nouveau_object *object, u64 offset)
@@ -113,9 +80,46 @@ nv50_instobj_wr32(struct nouveau_object *object, u64 offset, u32 data)
        spin_unlock_irqrestore(&priv->lock, flags);
 }
 
-static struct nouveau_oclass
+static void
+nv50_instobj_dtor(struct nouveau_object *object)
+{
+       struct nv50_instobj_priv *node = (void *)object;
+       struct nouveau_fb *pfb = nouveau_fb(object);
+       pfb->ram->put(pfb, &node->mem);
+       nouveau_instobj_destroy(&node->base);
+}
+
+static int
+nv50_instobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+                 struct nouveau_oclass *oclass, void *data, u32 size,
+                 struct nouveau_object **pobject)
+{
+       struct nouveau_fb *pfb = nouveau_fb(parent);
+       struct nouveau_instobj_args *args = data;
+       struct nv50_instobj_priv *node;
+       int ret;
+
+       args->size  = max((args->size  + 4095) & ~4095, (u32)4096);
+       args->align = max((args->align + 4095) & ~4095, (u32)4096);
+
+       ret = nouveau_instobj_create(parent, engine, oclass, &node);
+       *pobject = nv_object(node);
+       if (ret)
+               return ret;
+
+       ret = pfb->ram->get(pfb, args->size, args->align, 0, 0x800, &node->mem);
+       if (ret)
+               return ret;
+
+       node->base.addr = node->mem->offset;
+       node->base.size = node->mem->size << 12;
+       node->mem->page_shift = 12;
+       return 0;
+}
+
+static struct nouveau_instobj_impl
 nv50_instobj_oclass = {
-       .ofuncs = &(struct nouveau_ofuncs) {
+       .base.ofuncs = &(struct nouveau_ofuncs) {
                .ctor = nv50_instobj_ctor,
                .dtor = nv50_instobj_dtor,
                .init = _nouveau_instobj_init,
@@ -125,13 +129,16 @@ nv50_instobj_oclass = {
        },
 };
 
+/******************************************************************************
+ * instmem subdev implementation
+ *****************************************************************************/
+
 static int
-nv50_instmem_alloc(struct nouveau_instmem *imem, struct nouveau_object *parent,
-                  u32 size, u32 align, struct nouveau_object **pobject)
+nv50_instmem_fini(struct nouveau_object *object, bool suspend)
 {
-       struct nouveau_object *engine = nv_object(imem);
-       return nouveau_object_ctor(parent, engine, &nv50_instobj_oclass,
-                                  (void *)(unsigned long)align, size, pobject);
+       struct nv50_instmem_priv *priv = (void *)object;
+       priv->addr = ~0ULL;
+       return nouveau_instmem_fini(&priv->base, suspend);
 }
 
 static int
@@ -148,25 +155,17 @@ nv50_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
                return ret;
 
        spin_lock_init(&priv->lock);
-       priv->base.alloc = nv50_instmem_alloc;
        return 0;
 }
 
-static int
-nv50_instmem_fini(struct nouveau_object *object, bool suspend)
-{
-       struct nv50_instmem_priv *priv = (void *)object;
-       priv->addr = ~0ULL;
-       return nouveau_instmem_fini(&priv->base, suspend);
-}
-
-struct nouveau_oclass
-nv50_instmem_oclass = {
-       .handle = NV_SUBDEV(INSTMEM, 0x50),
-       .ofuncs = &(struct nouveau_ofuncs) {
+struct nouveau_oclass *
+nv50_instmem_oclass = &(struct nouveau_instmem_impl) {
+       .base.handle = NV_SUBDEV(INSTMEM, 0x50),
+       .base.ofuncs = &(struct nouveau_ofuncs) {
                .ctor = nv50_instmem_ctor,
                .dtor = _nouveau_instmem_dtor,
                .init = _nouveau_instmem_init,
                .fini = nv50_instmem_fini,
        },
-};
+       .instobj = &nv50_instobj_oclass.base,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/priv.h b/drivers/gpu/drm/nouveau/core/subdev/instmem/priv.h
new file mode 100644 (file)
index 0000000..8d67ded
--- /dev/null
@@ -0,0 +1,56 @@
+#ifndef __NVKM_INSTMEM_PRIV_H__
+#define __NVKM_INSTMEM_PRIV_H__
+
+#include <subdev/instmem.h>
+
+struct nouveau_instobj_impl {
+       struct nouveau_oclass base;
+};
+
+struct nouveau_instobj_args {
+       u32 size;
+       u32 align;
+};
+
+#define nouveau_instobj_create(p,e,o,d)                                        \
+       nouveau_instobj_create_((p), (e), (o), sizeof(**d), (void **)d)
+#define nouveau_instobj_destroy(p) ({                                          \
+       struct nouveau_instobj *iobj = (p);                                    \
+       _nouveau_instobj_dtor(nv_object(iobj));                                \
+})
+#define nouveau_instobj_init(p)                                                \
+       nouveau_object_init(&(p)->base)
+#define nouveau_instobj_fini(p,s)                                              \
+       nouveau_object_fini(&(p)->base, (s))
+
+int  nouveau_instobj_create_(struct nouveau_object *, struct nouveau_object *,
+                            struct nouveau_oclass *, int, void **);
+void _nouveau_instobj_dtor(struct nouveau_object *);
+#define _nouveau_instobj_init nouveau_object_init
+#define _nouveau_instobj_fini nouveau_object_fini
+
+struct nouveau_instmem_impl {
+       struct nouveau_oclass base;
+       struct nouveau_oclass *instobj;
+};
+
+#define nouveau_instmem_create(p,e,o,d)                                        \
+       nouveau_instmem_create_((p), (e), (o), sizeof(**d), (void **)d)
+#define nouveau_instmem_destroy(p)                                             \
+       nouveau_subdev_destroy(&(p)->base)
+#define nouveau_instmem_init(p) ({                                             \
+       struct nouveau_instmem *imem = (p);                                    \
+       _nouveau_instmem_init(nv_object(imem));                                \
+})
+#define nouveau_instmem_fini(p,s) ({                                           \
+       struct nouveau_instmem *imem = (p);                                    \
+       _nouveau_instmem_fini(nv_object(imem), (s));                           \
+})
+
+int nouveau_instmem_create_(struct nouveau_object *, struct nouveau_object *,
+                           struct nouveau_oclass *, int, void **);
+#define _nouveau_instmem_dtor _nouveau_subdev_dtor
+int _nouveau_instmem_init(struct nouveau_object *);
+int _nouveau_instmem_fini(struct nouveau_object *, bool);
+
+#endif
index c02b4763a2d50e40db65cf2a8a247f2e8374cd3f..34472d3170974ca8e6207afe0a347f332130d6bd 100644 (file)
@@ -32,6 +32,7 @@ nvc0_mc_intr[] = {
        { 0x00000080, NVDEV_ENGINE_COPY2 },
        { 0x00000100, NVDEV_ENGINE_FIFO },
        { 0x00001000, NVDEV_ENGINE_GR },
+       { 0x00002000, NVDEV_SUBDEV_FB },
        { 0x00008000, NVDEV_ENGINE_BSP },
        { 0x00040000, NVDEV_SUBDEV_THERM },
        { 0x00020000, NVDEV_ENGINE_VP },
@@ -40,6 +41,7 @@ nvc0_mc_intr[] = {
        { 0x01000000, NVDEV_SUBDEV_PWR },
        { 0x02000000, NVDEV_SUBDEV_LTCG },
        { 0x04000000, NVDEV_ENGINE_DISP },
+       { 0x08000000, NVDEV_SUBDEV_FB },
        { 0x10000000, NVDEV_SUBDEV_BUS },
        { 0x40000000, NVDEV_SUBDEV_IBUS },
        { 0x80000000, NVDEV_ENGINE_SW },
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/i2c_.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/i2c_.fuc
new file mode 100644 (file)
index 0000000..757dda7
--- /dev/null
@@ -0,0 +1,393 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#define T_TIMEOUT  2200000
+#define T_RISEFALL 1000
+#define T_HOLD     5000
+
+#ifdef INCLUDE_PROC
+process(PROC_I2C_, #i2c_init, #i2c_recv)
+#endif
+
+/******************************************************************************
+ * I2C_ data segment
+ *****************************************************************************/
+#ifdef INCLUDE_DATA
+i2c_scl_map:
+.b32 NV_PPWR_OUTPUT_I2C_0_SCL
+.b32 NV_PPWR_OUTPUT_I2C_1_SCL
+.b32 NV_PPWR_OUTPUT_I2C_2_SCL
+.b32 NV_PPWR_OUTPUT_I2C_3_SCL
+.b32 NV_PPWR_OUTPUT_I2C_4_SCL
+.b32 NV_PPWR_OUTPUT_I2C_5_SCL
+.b32 NV_PPWR_OUTPUT_I2C_6_SCL
+.b32 NV_PPWR_OUTPUT_I2C_7_SCL
+.b32 NV_PPWR_OUTPUT_I2C_8_SCL
+.b32 NV_PPWR_OUTPUT_I2C_9_SCL
+i2c_sda_map:
+.b32 NV_PPWR_OUTPUT_I2C_0_SDA
+.b32 NV_PPWR_OUTPUT_I2C_1_SDA
+.b32 NV_PPWR_OUTPUT_I2C_2_SDA
+.b32 NV_PPWR_OUTPUT_I2C_3_SDA
+.b32 NV_PPWR_OUTPUT_I2C_4_SDA
+.b32 NV_PPWR_OUTPUT_I2C_5_SDA
+.b32 NV_PPWR_OUTPUT_I2C_6_SDA
+.b32 NV_PPWR_OUTPUT_I2C_7_SDA
+.b32 NV_PPWR_OUTPUT_I2C_8_SDA
+.b32 NV_PPWR_OUTPUT_I2C_9_SDA
+#if NVKM_PPWR_CHIPSET < GF119
+i2c_ctrl:
+.b32 0x00e138
+.b32 0x00e150
+.b32 0x00e168
+.b32 0x00e180
+.b32 0x00e254
+.b32 0x00e274
+.b32 0x00e764
+.b32 0x00e780
+.b32 0x00e79c
+.b32 0x00e7b8
+#endif
+#endif
+
+/******************************************************************************
+ * I2C_ code segment
+ *****************************************************************************/
+#ifdef INCLUDE_CODE
+
+// $r3  - value
+// $r2  - sda line
+// $r1  - scl line
+// $r0  - zero
+i2c_drive_scl:
+       cmp b32 $r3 0
+       bra e #i2c_drive_scl_lo
+       nv_iowr(NV_PPWR_OUTPUT_SET, $r1)
+       ret
+       i2c_drive_scl_lo:
+       nv_iowr(NV_PPWR_OUTPUT_CLR, $r1)
+       ret
+
+i2c_drive_sda:
+       cmp b32 $r3 0
+       bra e #i2c_drive_sda_lo
+       nv_iowr(NV_PPWR_OUTPUT_SET, $r2)
+       ret
+       i2c_drive_sda_lo:
+       nv_iowr(NV_PPWR_OUTPUT_CLR, $r2)
+       ret
+
+i2c_sense_scl:
+       bclr $flags $p1
+       nv_iord($r3, NV_PPWR_INPUT)
+       and $r3 $r1
+       bra z #i2c_sense_scl_done
+               bset $flags $p1
+       i2c_sense_scl_done:
+       ret
+
+i2c_sense_sda:
+       bclr $flags $p1
+       nv_iord($r3, NV_PPWR_INPUT)
+       and $r3 $r2
+       bra z #i2c_sense_sda_done
+               bset $flags $p1
+       i2c_sense_sda_done:
+       ret
+
+#define i2c_drive_scl(v) /*
+*/     mov $r3 (v) /*
+*/     call(i2c_drive_scl)
+#define i2c_drive_sda(v) /*
+*/     mov $r3 (v) /*
+*/     call(i2c_drive_sda)
+#define i2c_sense_scl() /*
+*/     call(i2c_sense_scl)
+#define i2c_sense_sda() /*
+*/     call(i2c_sense_sda)
+#define i2c_delay(v) /*
+*/     mov $r14 (v) /*
+*/     call(nsec)
+
+#define i2c_trace_init() /*
+*/     imm32($r6, 0x10000000) /*
+*/     sub b32 $r7 $r6 1 /*
+*/
+#define i2c_trace_down() /*
+*/     shr b32 $r6 4 /*
+*/     push $r5 /*
+*/     shl b32 $r5 $r6 4 /*
+*/     sub b32 $r5 $r6 /*
+*/     not b32 $r5 /*
+*/     and $r7 $r5 /*
+*/     pop $r5 /*
+*/
+#define i2c_trace_exit() /*
+*/     shl b32 $r6 4 /*
+*/
+#define i2c_trace_next() /*
+*/     add b32 $r7 $r6 /*
+*/
+#define i2c_trace_call(func) /*
+*/     i2c_trace_next() /*
+*/     i2c_trace_down() /*
+*/     call(func) /*
+*/     i2c_trace_exit() /*
+*/
+
+i2c_raise_scl:
+       push $r4
+       mov $r4 (T_TIMEOUT / T_RISEFALL)
+       i2c_drive_scl(1)
+       i2c_raise_scl_wait:
+               i2c_delay(T_RISEFALL)
+               i2c_sense_scl()
+               bra $p1 #i2c_raise_scl_done
+               sub b32 $r4 1
+               bra nz #i2c_raise_scl_wait
+       i2c_raise_scl_done:
+       pop $r4
+       ret
+
+i2c_start:
+       i2c_sense_scl()
+       bra not $p1 #i2c_start_rep
+       i2c_sense_sda()
+       bra not $p1 #i2c_start_rep
+       bra #i2c_start_send
+       i2c_start_rep:
+               i2c_drive_scl(0)
+               i2c_drive_sda(1)
+               i2c_trace_call(i2c_raise_scl)
+               bra not $p1 #i2c_start_out
+       i2c_start_send:
+       i2c_drive_sda(0)
+       i2c_delay(T_HOLD)
+       i2c_drive_scl(0)
+       i2c_delay(T_HOLD)
+       i2c_start_out:
+       ret
+
+i2c_stop:
+       i2c_drive_scl(0)
+       i2c_drive_sda(0)
+       i2c_delay(T_RISEFALL)
+       i2c_drive_scl(1)
+       i2c_delay(T_HOLD)
+       i2c_drive_sda(1)
+       i2c_delay(T_HOLD)
+       ret
+
+// $r3  - value
+// $r2  - sda line
+// $r1  - scl line
+// $r0  - zero
+i2c_bitw:
+       call(i2c_drive_sda)
+       i2c_delay(T_RISEFALL)
+       i2c_trace_call(i2c_raise_scl)
+       bra not $p1 #i2c_bitw_out
+       i2c_delay(T_HOLD)
+       i2c_drive_scl(0)
+       i2c_delay(T_HOLD)
+       i2c_bitw_out:
+       ret
+
+// $r3  - value (out)
+// $r2  - sda line
+// $r1  - scl line
+// $r0  - zero
+i2c_bitr:
+       i2c_drive_sda(1)
+       i2c_delay(T_RISEFALL)
+       i2c_trace_call(i2c_raise_scl)
+       bra not $p1 #i2c_bitr_done
+       i2c_sense_sda()
+       i2c_drive_scl(0)
+       i2c_delay(T_HOLD)
+       xbit $r3 $flags $p1
+       bset $flags $p1
+       i2c_bitr_done:
+       ret
+
+i2c_get_byte:
+       mov $r5 0
+       mov $r4 8
+       i2c_get_byte_next:
+               shl b32 $r5 1
+               i2c_trace_call(i2c_bitr)
+               bra not $p1 #i2c_get_byte_done
+               or $r5 $r3
+               sub b32 $r4 1
+               bra nz #i2c_get_byte_next
+       mov $r3 1
+       i2c_trace_call(i2c_bitw)
+       i2c_get_byte_done:
+       ret
+
+i2c_put_byte:
+       mov $r4 8
+       i2c_put_byte_next:
+               sub b32 $r4 1
+               xbit $r3 $r5 $r4
+               i2c_trace_call(i2c_bitw)
+               bra not $p1 #i2c_put_byte_done
+               cmp b32 $r4 0
+               bra ne #i2c_put_byte_next
+       i2c_trace_call(i2c_bitr)
+       bra not $p1 #i2c_put_byte_done
+       i2c_trace_next()
+       cmp b32 $r3 1
+       bra ne #i2c_put_byte_done
+       bclr $flags $p1 // nack
+       i2c_put_byte_done:
+       ret
+
+i2c_addr:
+       i2c_trace_call(i2c_start)
+       bra not $p1 #i2c_addr_done
+       extr $r3 $r12 I2C__MSG_DATA0_ADDR
+       shl b32 $r3 1
+       or $r5 $r3
+       i2c_trace_call(i2c_put_byte)
+       i2c_addr_done:
+       ret
+
+i2c_acquire_addr:
+       extr $r14 $r12 I2C__MSG_DATA0_PORT
+#if NVKM_PPWR_CHIPSET < GF119
+       shl b32 $r14 2
+       add b32 $r14 #i2c_ctrl
+       ld b32 $r14 D[$r14]
+#else
+       shl b32 $r14 5
+       add b32 $r14 0x00d014
+#endif
+       ret
+
+i2c_acquire:
+       call(i2c_acquire_addr)
+       call(rd32)
+       bset $r13 3
+       call(wr32)
+       ret
+
+i2c_release:
+       call(i2c_acquire_addr)
+       call(rd32)
+       bclr $r13 3
+       call(wr32)
+       ret
+
+// description
+//
+// $r15 - current (i2c)
+// $r14 - sender process name
+// $r13 - message
+// $r12 - data0
+// $r11 - data1
+// $r0  - zero
+i2c_recv:
+       bclr $flags $p1
+       extr $r1 $r12 I2C__MSG_DATA0_PORT
+       shl b32 $r1 2
+       cmp b32 $r1 (#i2c_sda_map - #i2c_scl_map)
+       bra ge #i2c_recv_done
+       add b32 $r3 $r1 #i2c_sda_map
+       ld b32 $r2 D[$r3]
+       add b32 $r3 $r1 #i2c_scl_map
+       ld b32 $r1 D[$r3]
+
+       bset $flags $p2
+       push $r13
+       push $r14
+
+       push $r13
+       i2c_trace_init()
+       i2c_trace_call(i2c_acquire)
+       pop $r13
+
+       cmp b32 $r13 I2C__MSG_RD08
+       bra ne #i2c_recv_not_rd08
+               mov $r5 0
+               i2c_trace_call(i2c_addr)
+               bra not $p1 #i2c_recv_done
+               extr $r5 $r12 I2C__MSG_DATA0_RD08_REG
+               i2c_trace_call(i2c_put_byte)
+               bra not $p1 #i2c_recv_done
+               mov $r5 1
+               i2c_trace_call(i2c_addr)
+               bra not $p1 #i2c_recv_done
+               i2c_trace_call(i2c_get_byte)
+               bra not $p1 #i2c_recv_done
+               ins $r11 $r5 I2C__MSG_DATA1_RD08_VAL
+               i2c_trace_call(i2c_stop)
+               mov b32 $r11 $r5
+               clear b32 $r7
+               bra #i2c_recv_done
+
+       i2c_recv_not_rd08:
+       cmp b32 $r13 I2C__MSG_WR08
+       bra ne #i2c_recv_not_wr08
+               mov $r5 0
+               call(i2c_addr)
+               bra not $p1 #i2c_recv_done
+               extr $r5 $r12 I2C__MSG_DATA0_WR08_REG
+               call(i2c_put_byte)
+               bra not $p1 #i2c_recv_done
+               mov $r5 0
+               call(i2c_addr)
+               bra not $p1 #i2c_recv_done
+               extr $r5 $r11 I2C__MSG_DATA1_WR08_VAL
+               call(i2c_put_byte)
+               bra not $p1 #i2c_recv_done
+               call(i2c_stop)
+               clear b32 $r7
+               extr $r5 $r12 I2C__MSG_DATA0_WR08_SYNC
+               bra nz #i2c_recv_done
+               bclr $flags $p2
+               bra #i2c_recv_done
+
+       i2c_recv_not_wr08:
+
+       i2c_recv_done:
+       extr $r14 $r12 I2C__MSG_DATA0_PORT
+       call(i2c_release)
+
+       pop $r14
+       pop $r13
+       bra not $p2 #i2c_recv_exit
+       mov b32 $r12 $r7
+       call(send)
+
+       i2c_recv_exit:
+       ret
+
+// description
+//
+// $r15 - current (i2c)
+// $r0  - zero
+i2c_init:
+       ret
+#endif
index 0a7b05fa5c1112ab4f172ca39a5d66fd1de7ead2..8f29badd785f16f9cd37cff7adf19d03f1aafc44 100644 (file)
@@ -51,12 +51,12 @@ time_next: .b32 0
 // $r0  - zero
 rd32:
        nv_iowr(NV_PPWR_MMIO_ADDR, $r14)
-       mov $r14 NV_PPWR_MMIO_CTRL_OP_RD
-       sethi $r14 NV_PPWR_MMIO_CTRL_TRIGGER
-       nv_iowr(NV_PPWR_MMIO_CTRL, $r14)
+       mov $r13 NV_PPWR_MMIO_CTRL_OP_RD
+       sethi $r13 NV_PPWR_MMIO_CTRL_TRIGGER
+       nv_iowr(NV_PPWR_MMIO_CTRL, $r13)
        rd32_wait:
-               nv_iord($r14, NV_PPWR_MMIO_CTRL)
-               and $r14 NV_PPWR_MMIO_CTRL_STATUS
+               nv_iord($r13, NV_PPWR_MMIO_CTRL)
+               and $r13 NV_PPWR_MMIO_CTRL_STATUS
                bra nz #rd32_wait
        nv_iord($r13, NV_PPWR_MMIO_DATA)
        ret
@@ -70,23 +70,25 @@ rd32:
 wr32:
        nv_iowr(NV_PPWR_MMIO_ADDR, $r14)
        nv_iowr(NV_PPWR_MMIO_DATA, $r13)
-       mov $r14 NV_PPWR_MMIO_CTRL_OP_WR
-       or $r14 NV_PPWR_MMIO_CTRL_MASK_B32_0
-       sethi $r14 NV_PPWR_MMIO_CTRL_TRIGGER
+       mov $r13 NV_PPWR_MMIO_CTRL_OP_WR
+       or $r13 NV_PPWR_MMIO_CTRL_MASK_B32_0
+       sethi $r13 NV_PPWR_MMIO_CTRL_TRIGGER
 
 #ifdef NVKM_FALCON_MMIO_TRAP
-       mov $r8 NV_PPWR_INTR_TRIGGER_USER1
-       nv_iowr(NV_PPWR_INTR_TRIGGER, $r8)
+       push $r13
+       mov $r13 NV_PPWR_INTR_TRIGGER_USER1
+       nv_iowr(NV_PPWR_INTR_TRIGGER, $r13)
        wr32_host:
-               nv_iord($r8, NV_PPWR_INTR)
-               and $r8 NV_PPWR_INTR_USER1
+               nv_iord($r13, NV_PPWR_INTR)
+               and $r13 NV_PPWR_INTR_USER1
                bra nz #wr32_host
+       pop $r13
 #endif
 
-       nv_iowr(NV_PPWR_MMIO_CTRL, $r14)
+       nv_iowr(NV_PPWR_MMIO_CTRL, $r13)
        wr32_wait:
-               nv_iord($r14, NV_PPWR_MMIO_CTRL)
-               and $r14 NV_PPWR_MMIO_CTRL_STATUS
+               nv_iord($r13, NV_PPWR_MMIO_CTRL)
+               and $r13 NV_PPWR_MMIO_CTRL_STATUS
                bra nz #wr32_wait
        ret
 
index 2a74ea90760430624d6bb369b02e5a575ad75c5d..e2a63ac5422b8180e4e21cd77e622f688900e4ad 100644 (file)
 #define NV_PPWR_MMIO_CTRL_OP_WR                                      0x00000002
 #define NV_PPWR_OUTPUT                                                   0x07c0
 #define NV_PPWR_OUTPUT_FB_PAUSE                                      0x00000004
+#if NVKM_PPWR_CHIPSET < GF119
+#define NV_PPWR_OUTPUT_I2C_3_SCL                                     0x00000100
+#define NV_PPWR_OUTPUT_I2C_3_SDA                                     0x00000200
+#define NV_PPWR_OUTPUT_I2C_0_SCL                                     0x00001000
+#define NV_PPWR_OUTPUT_I2C_0_SDA                                     0x00002000
+#define NV_PPWR_OUTPUT_I2C_1_SCL                                     0x00004000
+#define NV_PPWR_OUTPUT_I2C_1_SDA                                     0x00008000
+#define NV_PPWR_OUTPUT_I2C_2_SCL                                     0x00010000
+#define NV_PPWR_OUTPUT_I2C_2_SDA                                     0x00020000
+#define NV_PPWR_OUTPUT_I2C_4_SCL                                     0x00040000
+#define NV_PPWR_OUTPUT_I2C_4_SDA                                     0x00080000
+#define NV_PPWR_OUTPUT_I2C_5_SCL                                     0x00100000
+#define NV_PPWR_OUTPUT_I2C_5_SDA                                     0x00200000
+#define NV_PPWR_OUTPUT_I2C_6_SCL                                     0x00400000
+#define NV_PPWR_OUTPUT_I2C_6_SDA                                     0x00800000
+#define NV_PPWR_OUTPUT_I2C_7_SCL                                     0x01000000
+#define NV_PPWR_OUTPUT_I2C_7_SDA                                     0x02000000
+#define NV_PPWR_OUTPUT_I2C_8_SCL                                     0x04000000
+#define NV_PPWR_OUTPUT_I2C_8_SDA                                     0x08000000
+#define NV_PPWR_OUTPUT_I2C_9_SCL                                     0x10000000
+#define NV_PPWR_OUTPUT_I2C_9_SDA                                     0x20000000
+#else
+#define NV_PPWR_OUTPUT_I2C_0_SCL                                     0x00000400
+#define NV_PPWR_OUTPUT_I2C_1_SCL                                     0x00000800
+#define NV_PPWR_OUTPUT_I2C_2_SCL                                     0x00001000
+#define NV_PPWR_OUTPUT_I2C_3_SCL                                     0x00002000
+#define NV_PPWR_OUTPUT_I2C_4_SCL                                     0x00004000
+#define NV_PPWR_OUTPUT_I2C_5_SCL                                     0x00008000
+#define NV_PPWR_OUTPUT_I2C_6_SCL                                     0x00010000
+#define NV_PPWR_OUTPUT_I2C_7_SCL                                     0x00020000
+#define NV_PPWR_OUTPUT_I2C_8_SCL                                     0x00040000
+#define NV_PPWR_OUTPUT_I2C_9_SCL                                     0x00080000
+#define NV_PPWR_OUTPUT_I2C_0_SDA                                     0x00100000
+#define NV_PPWR_OUTPUT_I2C_1_SDA                                     0x00200000
+#define NV_PPWR_OUTPUT_I2C_2_SDA                                     0x00400000
+#define NV_PPWR_OUTPUT_I2C_3_SDA                                     0x00800000
+#define NV_PPWR_OUTPUT_I2C_4_SDA                                     0x01000000
+#define NV_PPWR_OUTPUT_I2C_5_SDA                                     0x02000000
+#define NV_PPWR_OUTPUT_I2C_6_SDA                                     0x04000000
+#define NV_PPWR_OUTPUT_I2C_7_SDA                                     0x08000000
+#define NV_PPWR_OUTPUT_I2C_8_SDA                                     0x10000000
+#define NV_PPWR_OUTPUT_I2C_9_SDA                                     0x20000000
+#endif
+#define NV_PPWR_INPUT                                                    0x07c4
 #define NV_PPWR_OUTPUT_SET                                               0x07e0
 #define NV_PPWR_OUTPUT_SET_FB_PAUSE                                  0x00000004
 #define NV_PPWR_OUTPUT_CLR                                               0x07e4
 */     .b32 0 /*
 */     .skip 64
 
+#if NV_PPWR_CHIPSET < GK208
+#define imm32(reg,val) /*
+*/     movw reg  ((val) & 0x0000ffff) /*
+*/     sethi reg ((val) & 0xffff0000)
+#else
+#define imm32(reg,val) /*
+*/     mov reg (val)
+#endif
+
 #ifndef NVKM_FALCON_UNSHIFTED_IO
 #define nv_iord(reg,ior) /*
 */     mov reg ior /*
index 947be536daefcb6031723f3d5e8eb2638c29eb47..17a8a383d91a91929c2eebad9ef1f531861c6800 100644 (file)
@@ -37,6 +37,7 @@
 #include "host.fuc"
 #include "memx.fuc"
 #include "perf.fuc"
+#include "i2c_.fuc"
 #include "test.fuc"
 #include "idle.fuc"
 #undef INCLUDE_PROC
@@ -46,6 +47,7 @@
 #include "host.fuc"
 #include "memx.fuc"
 #include "perf.fuc"
+#include "i2c_.fuc"
 #include "test.fuc"
 #include "idle.fuc"
 #undef INCLUDE_DATA
@@ -57,6 +59,7 @@
 #include "host.fuc"
 #include "memx.fuc"
 #include "perf.fuc"
+#include "i2c_.fuc"
 #include "test.fuc"
 #include "idle.fuc"
 #undef INCLUDE_CODE
index 9342e2d7d3b7909b94f5cc02ead4c80d1e9e3e9b..4bd43a99fdccbe3d2cde12233644f18bda7561c0 100644 (file)
@@ -89,16 +89,9 @@ uint32_t nv108_pwr_data[] = {
        0x00000000,
        0x00000000,
        0x00000000,
-       0x54534554,
-       0x00000494,
-       0x00000475,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
+       0x5f433249,
+       0x00000877,
+       0x0000071e,
        0x00000000,
        0x00000000,
        0x00000000,
@@ -111,16 +104,6 @@ uint32_t nv108_pwr_data[] = {
        0x00000000,
        0x00000000,
        0x00000000,
-       0x454c4449,
-       0x0000049f,
-       0x0000049d,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
        0x00000000,
        0x00000000,
        0x00000000,
@@ -128,17 +111,16 @@ uint32_t nv108_pwr_data[] = {
        0x00000000,
        0x00000000,
        0x00000000,
+       0x54534554,
+       0x00000898,
+       0x00000879,
        0x00000000,
        0x00000000,
        0x00000000,
        0x00000000,
        0x00000000,
-/* 0x0210: proc_list_tail */
-/* 0x0210: time_prev */
        0x00000000,
-/* 0x0214: time_next */
        0x00000000,
-/* 0x0218: fifo_queue */
        0x00000000,
        0x00000000,
        0x00000000,
@@ -151,6 +133,9 @@ uint32_t nv108_pwr_data[] = {
        0x00000000,
        0x00000000,
        0x00000000,
+       0x454c4449,
+       0x000008a3,
+       0x000008a1,
        0x00000000,
        0x00000000,
        0x00000000,
@@ -170,9 +155,12 @@ uint32_t nv108_pwr_data[] = {
        0x00000000,
        0x00000000,
        0x00000000,
+/* 0x0268: proc_list_tail */
+/* 0x0268: time_prev */
        0x00000000,
-/* 0x0298: rfifo_queue */
+/* 0x026c: time_next */
        0x00000000,
+/* 0x0270: fifo_queue */
        0x00000000,
        0x00000000,
        0x00000000,
@@ -204,31 +192,8 @@ uint32_t nv108_pwr_data[] = {
        0x00000000,
        0x00000000,
        0x00000000,
-/* 0x0318: memx_func_head */
-       0x00010000,
-       0x00000000,
-       0x000003a9,
-/* 0x0324: memx_func_next */
-       0x00000001,
-       0x00000000,
-       0x000003c7,
-       0x00000002,
-       0x00000002,
-       0x000003df,
-       0x00040003,
-       0x00000000,
-       0x00000407,
-       0x00010004,
-       0x00000000,
-       0x00000421,
-/* 0x0354: memx_func_tail */
-/* 0x0354: memx_data_head */
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
        0x00000000,
+/* 0x02f0: rfifo_queue */
        0x00000000,
        0x00000000,
        0x00000000,
@@ -261,10 +226,25 @@ uint32_t nv108_pwr_data[] = {
        0x00000000,
        0x00000000,
        0x00000000,
+/* 0x0370: memx_func_head */
+       0x00010000,
        0x00000000,
+       0x000003a9,
+/* 0x037c: memx_func_next */
+       0x00000001,
        0x00000000,
+       0x000003c7,
+       0x00000002,
+       0x00000002,
+       0x000003df,
+       0x00040003,
        0x00000000,
+       0x00000407,
+       0x00010004,
        0x00000000,
+       0x00000421,
+/* 0x03ac: memx_func_tail */
+/* 0x03ac: memx_data_head */
        0x00000000,
        0x00000000,
        0x00000000,
@@ -735,7 +715,6 @@ uint32_t nv108_pwr_data[] = {
        0x00000000,
        0x00000000,
        0x00000000,
-/* 0x0b54: memx_data_tail */
        0x00000000,
        0x00000000,
        0x00000000,
@@ -778,6 +757,29 @@ uint32_t nv108_pwr_data[] = {
        0x00000000,
        0x00000000,
        0x00000000,
+/* 0x0bac: memx_data_tail */
+/* 0x0bac: i2c_scl_map */
+       0x00000400,
+       0x00000800,
+       0x00001000,
+       0x00002000,
+       0x00004000,
+       0x00008000,
+       0x00010000,
+       0x00020000,
+       0x00040000,
+       0x00080000,
+/* 0x0bd4: i2c_sda_map */
+       0x00100000,
+       0x00200000,
+       0x00400000,
+       0x00800000,
+       0x01000000,
+       0x02000000,
+       0x04000000,
+       0x08000000,
+       0x10000000,
+       0x20000000,
        0x00000000,
 };
 
@@ -786,13 +788,13 @@ uint32_t nv108_pwr_code[] = {
 /* 0x0004: rd32 */
        0xf607a040,
        0x04bd000e,
-       0xe3f0010e,
+       0xd3f0010d,
        0x07ac4001,
-       0xbd000ef6,
+       0xbd000df6,
 /* 0x0019: rd32_wait */
-       0x07ac4e04,
-       0xf100eecf,
-       0xf47000e4,
+       0x07ac4d04,
+       0xf100ddcf,
+       0xf47000d4,
        0xa44df61b,
        0x00ddcf07,
 /* 0x002e: wr32 */
@@ -800,14 +802,14 @@ uint32_t nv108_pwr_code[] = {
        0x000ef607,
        0xa44004bd,
        0x000df607,
-       0x020e04bd,
-       0xf0f0e5f0,
-       0xac4001e3,
-       0x000ef607,
+       0x020d04bd,
+       0xf0f0d5f0,
+       0xac4001d3,
+       0x000df607,
 /* 0x004e: wr32_wait */
-       0xac4e04bd,
-       0x00eecf07,
-       0x7000e4f1,
+       0xac4d04bd,
+       0x00ddcf07,
+       0x7000d4f1,
        0xf8f61bf4,
 /* 0x005d: nsec */
        0xcf2c0800,
@@ -832,20 +834,20 @@ uint32_t nv108_pwr_code[] = {
        0x03e99800,
        0xf40096b0,
        0x0a98280b,
-       0x029abb84,
+       0x029abb9a,
        0x0d0e1cf4,
        0x01de7e01,
        0xf494bd00,
 /* 0x00b2: intr_watchdog_next_time */
        0x0a98140e,
-       0x00a6b085,
+       0x00a6b09b,
        0xa6080bf4,
        0x061cf49a,
 /* 0x00c0: intr_watchdog_next_time_set */
 /* 0x00c3: intr_watchdog_next_proc */
-       0xb58509b5,
+       0xb59b09b5,
        0xe0b603e9,
-       0x10e6b158,
+       0x68e6b158,
        0xc81bf402,
 /* 0x00d2: intr */
        0x00f900f8,
@@ -862,15 +864,15 @@ uint32_t nv108_pwr_code[] = {
        0x080804bd,
        0xc40088cf,
        0x0bf40289,
-       0x8500b51f,
+       0x9b00b51f,
        0x957e580e,
        0x09980000,
-       0x0096b085,
+       0x0096b09b,
        0x000d0bf4,
        0x0009f634,
        0x09b504bd,
 /* 0x0125: intr_skip_watchdog */
-       0x0089e484,
+       0x0089e49a,
        0x360bf408,
        0xcf068849,
        0x9ac40099,
@@ -918,7 +920,7 @@ uint32_t nv108_pwr_code[] = {
 /* 0x01c6: timer_reset */
        0x3400161e,
        0xbd000ef6,
-       0x840eb504,
+       0x9a0eb504,
 /* 0x01d0: timer_enable */
        0x38000108,
        0xbd0008f6,
@@ -949,7 +951,7 @@ uint32_t nv108_pwr_code[] = {
        0xa6008a98,
        0x100bf4ae,
        0xb15880b6,
-       0xf4021086,
+       0xf4026886,
        0x32f4f11b,
 /* 0x0239: find_done */
        0xfc8eb201,
@@ -1009,7 +1011,7 @@ uint32_t nv108_pwr_code[] = {
        0x0bf412a6,
        0x071ec42e,
        0xb704ee94,
-       0x980218e0,
+       0x980270e0,
        0xec9803eb,
        0x01ed9802,
        0x7e00ee98,
@@ -1031,7 +1033,7 @@ uint32_t nv108_pwr_code[] = {
        0xf412a608,
        0x23c4ef0b,
        0x0434b607,
-       0x029830b7,
+       0x02f030b7,
        0xb5033bb5,
        0x3db5023c,
        0x003eb501,
@@ -1044,11 +1046,11 @@ uint32_t nv108_pwr_code[] = {
 /* 0x0379: host_init */
        0x00804100,
        0xf11014b6,
-       0x40021815,
+       0x40027015,
        0x01f604d0,
        0x4104bd00,
        0x14b60080,
-       0x9815f110,
+       0xf015f110,
        0x04dc4002,
        0xbd0001f6,
        0x40010104,
@@ -1101,13 +1103,13 @@ uint32_t nv108_pwr_code[] = {
        0x001398b2,
        0x950410b6,
        0x30f01034,
-       0xc835980c,
+       0xde35980c,
        0x12a655f9,
        0xfced1ef4,
        0x7ee0fcd0,
        0xf800023f,
 /* 0x0455: memx_info */
-       0x03544c00,
+       0x03ac4c00,
        0x7e08004b,
        0xf800023f,
 /* 0x0461: memx_recv */
@@ -1119,7 +1121,301 @@ uint32_t nv108_pwr_code[] = {
 /* 0x0471: perf_recv */
 /* 0x0473: perf_init */
        0xf800f800,
-/* 0x0475: test_recv */
+/* 0x0475: i2c_drive_scl */
+       0x0036b000,
+       0x400d0bf4,
+       0x01f607e0,
+       0xf804bd00,
+/* 0x0485: i2c_drive_scl_lo */
+       0x07e44000,
+       0xbd0001f6,
+/* 0x048f: i2c_drive_sda */
+       0xb000f804,
+       0x0bf40036,
+       0x07e0400d,
+       0xbd0002f6,
+/* 0x049f: i2c_drive_sda_lo */
+       0x4000f804,
+       0x02f607e4,
+       0xf804bd00,
+/* 0x04a9: i2c_sense_scl */
+       0x0132f400,
+       0xcf07c443,
+       0x31fd0033,
+       0x060bf404,
+/* 0x04bb: i2c_sense_scl_done */
+       0xf80131f4,
+/* 0x04bd: i2c_sense_sda */
+       0x0132f400,
+       0xcf07c443,
+       0x32fd0033,
+       0x060bf404,
+/* 0x04cf: i2c_sense_sda_done */
+       0xf80131f4,
+/* 0x04d1: i2c_raise_scl */
+       0x4440f900,
+       0x01030898,
+       0x0004757e,
+/* 0x04dc: i2c_raise_scl_wait */
+       0x7e03e84e,
+       0x7e00005d,
+       0xf40004a9,
+       0x42b60901,
+       0xef1bf401,
+/* 0x04f0: i2c_raise_scl_done */
+       0x00f840fc,
+/* 0x04f4: i2c_start */
+       0x0004a97e,
+       0x7e0d11f4,
+       0xf40004bd,
+       0x0ef40611,
+/* 0x0505: i2c_start_rep */
+       0x7e00032e,
+       0x03000475,
+       0x048f7e01,
+       0x0076bb00,
+       0xf90465b6,
+       0x04659450,
+       0xbd0256bb,
+       0x0475fd50,
+       0xd17e50fc,
+       0x64b60004,
+       0x1d11f404,
+/* 0x0530: i2c_start_send */
+       0x8f7e0003,
+       0x884e0004,
+       0x005d7e13,
+       0x7e000300,
+       0x4e000475,
+       0x5d7e1388,
+/* 0x054a: i2c_start_out */
+       0x00f80000,
+/* 0x054c: i2c_stop */
+       0x757e0003,
+       0x00030004,
+       0x00048f7e,
+       0x7e03e84e,
+       0x0300005d,
+       0x04757e01,
+       0x13884e00,
+       0x00005d7e,
+       0x8f7e0103,
+       0x884e0004,
+       0x005d7e13,
+/* 0x057b: i2c_bitw */
+       0x7e00f800,
+       0x4e00048f,
+       0x5d7e03e8,
+       0x76bb0000,
+       0x0465b600,
+       0x659450f9,
+       0x0256bb04,
+       0x75fd50bd,
+       0x7e50fc04,
+       0xb60004d1,
+       0x11f40464,
+       0x13884e17,
+       0x00005d7e,
+       0x757e0003,
+       0x884e0004,
+       0x005d7e13,
+/* 0x05b9: i2c_bitw_out */
+/* 0x05bb: i2c_bitr */
+       0x0300f800,
+       0x048f7e01,
+       0x03e84e00,
+       0x00005d7e,
+       0xb60076bb,
+       0x50f90465,
+       0xbb046594,
+       0x50bd0256,
+       0xfc0475fd,
+       0x04d17e50,
+       0x0464b600,
+       0x7e1a11f4,
+       0x030004bd,
+       0x04757e00,
+       0x13884e00,
+       0x00005d7e,
+       0xf4013cf0,
+/* 0x05fe: i2c_bitr_done */
+       0x00f80131,
+/* 0x0600: i2c_get_byte */
+       0x08040005,
+/* 0x0604: i2c_get_byte_next */
+       0xbb0154b6,
+       0x65b60076,
+       0x9450f904,
+       0x56bb0465,
+       0xfd50bd02,
+       0x50fc0475,
+       0x0005bb7e,
+       0xf40464b6,
+       0x53fd2a11,
+       0x0142b605,
+       0x03d81bf4,
+       0x0076bb01,
+       0xf90465b6,
+       0x04659450,
+       0xbd0256bb,
+       0x0475fd50,
+       0x7b7e50fc,
+       0x64b60005,
+/* 0x064d: i2c_get_byte_done */
+/* 0x064f: i2c_put_byte */
+       0x0400f804,
+/* 0x0651: i2c_put_byte_next */
+       0x0142b608,
+       0xbb3854ff,
+       0x65b60076,
+       0x9450f904,
+       0x56bb0465,
+       0xfd50bd02,
+       0x50fc0475,
+       0x00057b7e,
+       0xf40464b6,
+       0x46b03411,
+       0xd81bf400,
+       0xb60076bb,
+       0x50f90465,
+       0xbb046594,
+       0x50bd0256,
+       0xfc0475fd,
+       0x05bb7e50,
+       0x0464b600,
+       0xbb0f11f4,
+       0x36b00076,
+       0x061bf401,
+/* 0x06a7: i2c_put_byte_done */
+       0xf80132f4,
+/* 0x06a9: i2c_addr */
+       0x0076bb00,
+       0xf90465b6,
+       0x04659450,
+       0xbd0256bb,
+       0x0475fd50,
+       0xf47e50fc,
+       0x64b60004,
+       0x2911f404,
+       0x012ec3e7,
+       0xfd0134b6,
+       0x76bb0553,
+       0x0465b600,
+       0x659450f9,
+       0x0256bb04,
+       0x75fd50bd,
+       0x7e50fc04,
+       0xb600064f,
+/* 0x06ee: i2c_addr_done */
+       0x00f80464,
+/* 0x06f0: i2c_acquire_addr */
+       0xb6f8cec7,
+       0xe0b705e4,
+       0x00f8d014,
+/* 0x06fc: i2c_acquire */
+       0x0006f07e,
+       0x0000047e,
+       0x7e03d9f0,
+       0xf800002e,
+/* 0x070d: i2c_release */
+       0x06f07e00,
+       0x00047e00,
+       0x03daf000,
+       0x00002e7e,
+/* 0x071e: i2c_recv */
+       0x32f400f8,
+       0xf8c1c701,
+       0xb00214b6,
+       0x1ff52816,
+       0x13b80137,
+       0x98000bd4,
+       0x13b80032,
+       0x98000bac,
+       0x31f40031,
+       0xf9d0f902,
+       0xf1d0f9e0,
+       0xf1000067,
+       0x92100063,
+       0x76bb0167,
+       0x0465b600,
+       0x659450f9,
+       0x0256bb04,
+       0x75fd50bd,
+       0x7e50fc04,
+       0xb60006fc,
+       0xd0fc0464,
+       0xf500d6b0,
+       0x0500b01b,
+       0x0076bb00,
+       0xf90465b6,
+       0x04659450,
+       0xbd0256bb,
+       0x0475fd50,
+       0xa97e50fc,
+       0x64b60006,
+       0xcc11f504,
+       0xe0c5c700,
+       0xb60076bb,
+       0x50f90465,
+       0xbb046594,
+       0x50bd0256,
+       0xfc0475fd,
+       0x064f7e50,
+       0x0464b600,
+       0x00a911f5,
+       0x76bb0105,
+       0x0465b600,
+       0x659450f9,
+       0x0256bb04,
+       0x75fd50bd,
+       0x7e50fc04,
+       0xb60006a9,
+       0x11f50464,
+       0x76bb0087,
+       0x0465b600,
+       0x659450f9,
+       0x0256bb04,
+       0x75fd50bd,
+       0x7e50fc04,
+       0xb6000600,
+       0x11f40464,
+       0xe05bcb67,
+       0xb60076bb,
+       0x50f90465,
+       0xbb046594,
+       0x50bd0256,
+       0xfc0475fd,
+       0x054c7e50,
+       0x0464b600,
+       0x74bd5bb2,
+/* 0x0823: i2c_recv_not_rd08 */
+       0xb0410ef4,
+       0x1bf401d6,
+       0x7e00053b,
+       0xf40006a9,
+       0xc5c73211,
+       0x064f7ee0,
+       0x2811f400,
+       0xa97e0005,
+       0x11f40006,
+       0xe0b5c71f,
+       0x00064f7e,
+       0x7e1511f4,
+       0xbd00054c,
+       0x08c5c774,
+       0xf4091bf4,
+       0x0ef40232,
+/* 0x0861: i2c_recv_not_wr08 */
+/* 0x0861: i2c_recv_done */
+       0xf8cec703,
+       0x00070d7e,
+       0xd0fce0fc,
+       0xb20912f4,
+       0x023f7e7c,
+/* 0x0875: i2c_recv_exit */
+/* 0x0877: i2c_init */
+       0xf800f800,
+/* 0x0879: test_recv */
        0x04584100,
        0xb60011cf,
        0x58400110,
@@ -1128,26 +1424,26 @@ uint32_t nv108_pwr_code[] = {
        0xe3f1d900,
        0x967e134f,
        0x00f80001,
-/* 0x0494: test_init */
+/* 0x0898: test_init */
        0x7e08004e,
        0xf8000196,
-/* 0x049d: idle_recv */
-/* 0x049f: idle */
+/* 0x08a1: idle_recv */
+/* 0x08a3: idle */
        0xf400f800,
        0x54410031,
        0x0011cf04,
        0x400110b6,
        0x01f60454,
-/* 0x04b3: idle_loop */
+/* 0x08b7: idle_loop */
        0x0104bd00,
        0x0232f458,
-/* 0x04b8: idle_proc */
-/* 0x04b8: idle_proc_exec */
+/* 0x08bc: idle_proc */
+/* 0x08bc: idle_proc_exec */
        0x1eb210f9,
        0x0002487e,
        0x11f410fc,
        0x0231f409,
-/* 0x04cb: idle_proc_next */
+/* 0x08cf: idle_proc_next */
        0xb6f00ef4,
        0x1fa65810,
        0xf4e81bf4,
@@ -1161,5 +1457,4 @@ uint32_t nv108_pwr_code[] = {
        0x00000000,
        0x00000000,
        0x00000000,
-       0x00000000,
 };
index 6fde0b89e5aa641de3add5489a98dea4e25c61bc..6744fcc0615160e657227285a70fc2c201bb7cb9 100644 (file)
@@ -37,6 +37,7 @@
 #include "host.fuc"
 #include "memx.fuc"
 #include "perf.fuc"
+#include "i2c_.fuc"
 #include "test.fuc"
 #include "idle.fuc"
 #undef INCLUDE_PROC
@@ -46,6 +47,7 @@
 #include "host.fuc"
 #include "memx.fuc"
 #include "perf.fuc"
+#include "i2c_.fuc"
 #include "test.fuc"
 #include "idle.fuc"
 #undef INCLUDE_DATA
@@ -57,6 +59,7 @@
 #include "host.fuc"
 #include "memx.fuc"
 #include "perf.fuc"
+#include "i2c_.fuc"
 #include "test.fuc"
 #include "idle.fuc"
 #undef INCLUDE_CODE
index 0fa4d7dcd407bb5bdb7802dcd4a6382c6bb84803..5a73fa620978aa0b300f87f80e6a1c049a2aa094 100644 (file)
@@ -89,9 +89,31 @@ uint32_t nva3_pwr_data[] = {
        0x00000000,
        0x00000000,
        0x00000000,
+       0x5f433249,
+       0x00000982,
+       0x00000825,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
        0x54534554,
-       0x0000057b,
-       0x00000554,
+       0x000009ab,
+       0x00000984,
        0x00000000,
        0x00000000,
        0x00000000,
@@ -112,8 +134,8 @@ uint32_t nva3_pwr_data[] = {
        0x00000000,
        0x00000000,
        0x454c4449,
-       0x00000587,
-       0x00000585,
+       0x000009b7,
+       0x000009b5,
        0x00000000,
        0x00000000,
        0x00000000,
@@ -133,12 +155,12 @@ uint32_t nva3_pwr_data[] = {
        0x00000000,
        0x00000000,
        0x00000000,
-/* 0x0210: proc_list_tail */
-/* 0x0210: time_prev */
+/* 0x0268: proc_list_tail */
+/* 0x0268: time_prev */
        0x00000000,
-/* 0x0214: time_next */
+/* 0x026c: time_next */
        0x00000000,
-/* 0x0218: fifo_queue */
+/* 0x0270: fifo_queue */
        0x00000000,
        0x00000000,
        0x00000000,
@@ -171,7 +193,7 @@ uint32_t nva3_pwr_data[] = {
        0x00000000,
        0x00000000,
        0x00000000,
-/* 0x0298: rfifo_queue */
+/* 0x02f0: rfifo_queue */
        0x00000000,
        0x00000000,
        0x00000000,
@@ -204,11 +226,11 @@ uint32_t nva3_pwr_data[] = {
        0x00000000,
        0x00000000,
        0x00000000,
-/* 0x0318: memx_func_head */
+/* 0x0370: memx_func_head */
        0x00010000,
        0x00000000,
        0x0000046f,
-/* 0x0324: memx_func_next */
+/* 0x037c: memx_func_next */
        0x00000001,
        0x00000000,
        0x00000496,
@@ -221,8 +243,18 @@ uint32_t nva3_pwr_data[] = {
        0x00010004,
        0x00000000,
        0x000004fc,
-/* 0x0354: memx_func_tail */
-/* 0x0354: memx_data_head */
+/* 0x03ac: memx_func_tail */
+/* 0x03ac: memx_data_head */
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
        0x00000000,
        0x00000000,
        0x00000000,
@@ -725,6 +757,42 @@ uint32_t nva3_pwr_data[] = {
        0x00000000,
        0x00000000,
        0x00000000,
+/* 0x0bac: memx_data_tail */
+/* 0x0bac: i2c_scl_map */
+       0x00001000,
+       0x00004000,
+       0x00010000,
+       0x00000100,
+       0x00040000,
+       0x00100000,
+       0x00400000,
+       0x01000000,
+       0x04000000,
+       0x10000000,
+/* 0x0bd4: i2c_sda_map */
+       0x00002000,
+       0x00008000,
+       0x00020000,
+       0x00000200,
+       0x00080000,
+       0x00200000,
+       0x00800000,
+       0x02000000,
+       0x08000000,
+       0x20000000,
+/* 0x0bfc: i2c_ctrl */
+       0x0000e138,
+       0x0000e150,
+       0x0000e168,
+       0x0000e180,
+       0x0000e254,
+       0x0000e274,
+       0x0000e764,
+       0x0000e780,
+       0x0000e79c,
+       0x0000e7b8,
+       0x00000000,
+       0x00000000,
        0x00000000,
        0x00000000,
        0x00000000,
@@ -735,7 +803,6 @@ uint32_t nva3_pwr_data[] = {
        0x00000000,
        0x00000000,
        0x00000000,
-/* 0x0b54: memx_data_tail */
        0x00000000,
        0x00000000,
        0x00000000,
@@ -787,15 +854,15 @@ uint32_t nva3_pwr_code[] = {
        0x07a007f1,
        0xd00604b6,
        0x04bd000e,
-       0xf001e7f0,
-       0x07f101e3,
+       0xf001d7f0,
+       0x07f101d3,
        0x04b607ac,
-       0x000ed006,
+       0x000dd006,
 /* 0x0022: rd32_wait */
-       0xe7f104bd,
-       0xe4b607ac,
-       0x00eecf06,
-       0x7000e4f1,
+       0xd7f104bd,
+       0xd4b607ac,
+       0x00ddcf06,
+       0x7000d4f1,
        0xf1f21bf4,
        0xb607a4d7,
        0xddcf06d4,
@@ -807,15 +874,15 @@ uint32_t nva3_pwr_code[] = {
        0xb607a407,
        0x0dd00604,
        0xf004bd00,
-       0xe5f002e7,
-       0x01e3f0f0,
+       0xd5f002d7,
+       0x01d3f0f0,
        0x07ac07f1,
        0xd00604b6,
-       0x04bd000e,
+       0x04bd000d,
 /* 0x006c: wr32_wait */
-       0x07ace7f1,
-       0xcf06e4b6,
-       0xe4f100ee,
+       0x07acd7f1,
+       0xcf06d4b6,
+       0xd4f100dd,
        0x1bf47000,
 /* 0x007f: nsec */
        0xf000f8f2,
@@ -845,21 +912,21 @@ uint32_t nva3_pwr_code[] = {
        0x9800f8df,
        0x96b003e9,
        0x2a0bf400,
-       0xbb840a98,
+       0xbb9a0a98,
        0x1cf4029a,
        0x01d7f00f,
        0x025421f5,
        0x0ef494bd,
 /* 0x00e9: intr_watchdog_next_time */
-       0x850a9815,
+       0x9b0a9815,
        0xf400a6b0,
        0x9ab8090b,
        0x061cf406,
 /* 0x00f8: intr_watchdog_next_time_set */
 /* 0x00fb: intr_watchdog_next_proc */
-       0x80850980,
+       0x809b0980,
        0xe0b603e9,
-       0x10e6b158,
+       0x68e6b158,
        0xc61bf402,
 /* 0x010a: intr */
        0x00f900f8,
@@ -880,15 +947,15 @@ uint32_t nva3_pwr_code[] = {
        0x0088cf06,
        0xf40289c4,
        0x0080230b,
-       0x58e7f085,
+       0x58e7f09b,
        0x98cb21f4,
-       0x96b08509,
+       0x96b09b09,
        0x110bf400,
        0xb63407f0,
        0x09d00604,
        0x8004bd00,
 /* 0x016e: intr_skip_watchdog */
-       0x89e48409,
+       0x89e49a09,
        0x0bf40800,
        0x8897f148,
        0x0694b606,
@@ -948,7 +1015,7 @@ uint32_t nva3_pwr_code[] = {
        0x000ed006,
        0x0e8004bd,
 /* 0x0241: timer_enable */
-       0x0187f084,
+       0x0187f09a,
        0xb63807f0,
        0x08d00604,
 /* 0x024f: timer_done */
@@ -979,7 +1046,7 @@ uint32_t nva3_pwr_code[] = {
        0xb8008a98,
        0x0bf406ae,
        0x5880b610,
-       0x021086b1,
+       0x026886b1,
        0xf4f01bf4,
 /* 0x02b2: find_done */
        0x8eb90132,
@@ -1049,7 +1116,7 @@ uint32_t nva3_pwr_code[] = {
        0x320bf406,
        0x94071ec4,
        0xe0b704ee,
-       0xeb980218,
+       0xeb980270,
        0x02ec9803,
        0x9801ed98,
        0x21f500ee,
@@ -1075,7 +1142,7 @@ uint32_t nva3_pwr_code[] = {
        0xe60bf406,
        0xb60723c4,
        0x30b70434,
-       0x3b800298,
+       0x3b8002f0,
        0x023c8003,
        0x80013d80,
        0x20b6003e,
@@ -1090,13 +1157,13 @@ uint32_t nva3_pwr_code[] = {
 /* 0x0430: host_init */
        0x008017f1,
        0xf11014b6,
-       0xf1021815,
+       0xf1027015,
        0xb604d007,
        0x01d00604,
        0xf104bd00,
        0xb6008017,
        0x15f11014,
-       0x07f10298,
+       0x07f102f0,
        0x04b604dc,
        0x0001d006,
        0x17f004bd,
@@ -1156,14 +1223,14 @@ uint32_t nva3_pwr_code[] = {
        0x00139802,
        0x950410b6,
        0x30f01034,
-       0xc835980c,
+       0xde35980c,
        0x12b855f9,
        0xec1ef406,
        0xe0fcd0fc,
        0x02b921f5,
 /* 0x0532: memx_info */
        0xc7f100f8,
-       0xb7f10354,
+       0xb7f103ac,
        0x21f50800,
        0x00f802b9,
 /* 0x0540: memx_recv */
@@ -1175,7 +1242,312 @@ uint32_t nva3_pwr_code[] = {
 /* 0x0550: perf_recv */
 /* 0x0552: perf_init */
        0x00f800f8,
-/* 0x0554: test_recv */
+/* 0x0554: i2c_drive_scl */
+       0xf40036b0,
+       0x07f1110b,
+       0x04b607e0,
+       0x0001d006,
+       0x00f804bd,
+/* 0x0568: i2c_drive_scl_lo */
+       0x07e407f1,
+       0xd00604b6,
+       0x04bd0001,
+/* 0x0576: i2c_drive_sda */
+       0x36b000f8,
+       0x110bf400,
+       0x07e007f1,
+       0xd00604b6,
+       0x04bd0002,
+/* 0x058a: i2c_drive_sda_lo */
+       0x07f100f8,
+       0x04b607e4,
+       0x0002d006,
+       0x00f804bd,
+/* 0x0598: i2c_sense_scl */
+       0xf10132f4,
+       0xb607c437,
+       0x33cf0634,
+       0x0431fd00,
+       0xf4060bf4,
+/* 0x05ae: i2c_sense_scl_done */
+       0x00f80131,
+/* 0x05b0: i2c_sense_sda */
+       0xf10132f4,
+       0xb607c437,
+       0x33cf0634,
+       0x0432fd00,
+       0xf4060bf4,
+/* 0x05c6: i2c_sense_sda_done */
+       0x00f80131,
+/* 0x05c8: i2c_raise_scl */
+       0x47f140f9,
+       0x37f00898,
+       0x5421f501,
+/* 0x05d5: i2c_raise_scl_wait */
+       0xe8e7f105,
+       0x7f21f403,
+       0x059821f5,
+       0xb60901f4,
+       0x1bf40142,
+/* 0x05e9: i2c_raise_scl_done */
+       0xf840fcef,
+/* 0x05ed: i2c_start */
+       0x9821f500,
+       0x0d11f405,
+       0x05b021f5,
+       0xf40611f4,
+/* 0x05fe: i2c_start_rep */
+       0x37f0300e,
+       0x5421f500,
+       0x0137f005,
+       0x057621f5,
+       0xb60076bb,
+       0x50f90465,
+       0xbb046594,
+       0x50bd0256,
+       0xfc0475fd,
+       0xc821f550,
+       0x0464b605,
+/* 0x062b: i2c_start_send */
+       0xf01f11f4,
+       0x21f50037,
+       0xe7f10576,
+       0x21f41388,
+       0x0037f07f,
+       0x055421f5,
+       0x1388e7f1,
+/* 0x0647: i2c_start_out */
+       0xf87f21f4,
+/* 0x0649: i2c_stop */
+       0x0037f000,
+       0x055421f5,
+       0xf50037f0,
+       0xf1057621,
+       0xf403e8e7,
+       0x37f07f21,
+       0x5421f501,
+       0x88e7f105,
+       0x7f21f413,
+       0xf50137f0,
+       0xf1057621,
+       0xf41388e7,
+       0x00f87f21,
+/* 0x067c: i2c_bitw */
+       0x057621f5,
+       0x03e8e7f1,
+       0xbb7f21f4,
+       0x65b60076,
+       0x9450f904,
+       0x56bb0465,
+       0xfd50bd02,
+       0x50fc0475,
+       0x05c821f5,
+       0xf40464b6,
+       0xe7f11811,
+       0x21f41388,
+       0x0037f07f,
+       0x055421f5,
+       0x1388e7f1,
+/* 0x06bb: i2c_bitw_out */
+       0xf87f21f4,
+/* 0x06bd: i2c_bitr */
+       0x0137f000,
+       0x057621f5,
+       0x03e8e7f1,
+       0xbb7f21f4,
+       0x65b60076,
+       0x9450f904,
+       0x56bb0465,
+       0xfd50bd02,
+       0x50fc0475,
+       0x05c821f5,
+       0xf40464b6,
+       0x21f51b11,
+       0x37f005b0,
+       0x5421f500,
+       0x88e7f105,
+       0x7f21f413,
+       0xf4013cf0,
+/* 0x0702: i2c_bitr_done */
+       0x00f80131,
+/* 0x0704: i2c_get_byte */
+       0xf00057f0,
+/* 0x070a: i2c_get_byte_next */
+       0x54b60847,
+       0x0076bb01,
+       0xf90465b6,
+       0x04659450,
+       0xbd0256bb,
+       0x0475fd50,
+       0x21f550fc,
+       0x64b606bd,
+       0x2b11f404,
+       0xb60553fd,
+       0x1bf40142,
+       0x0137f0d8,
+       0xb60076bb,
+       0x50f90465,
+       0xbb046594,
+       0x50bd0256,
+       0xfc0475fd,
+       0x7c21f550,
+       0x0464b606,
+/* 0x0754: i2c_get_byte_done */
+/* 0x0756: i2c_put_byte */
+       0x47f000f8,
+/* 0x0759: i2c_put_byte_next */
+       0x0142b608,
+       0xbb3854ff,
+       0x65b60076,
+       0x9450f904,
+       0x56bb0465,
+       0xfd50bd02,
+       0x50fc0475,
+       0x067c21f5,
+       0xf40464b6,
+       0x46b03411,
+       0xd81bf400,
+       0xb60076bb,
+       0x50f90465,
+       0xbb046594,
+       0x50bd0256,
+       0xfc0475fd,
+       0xbd21f550,
+       0x0464b606,
+       0xbb0f11f4,
+       0x36b00076,
+       0x061bf401,
+/* 0x07af: i2c_put_byte_done */
+       0xf80132f4,
+/* 0x07b1: i2c_addr */
+       0x0076bb00,
+       0xf90465b6,
+       0x04659450,
+       0xbd0256bb,
+       0x0475fd50,
+       0x21f550fc,
+       0x64b605ed,
+       0x2911f404,
+       0x012ec3e7,
+       0xfd0134b6,
+       0x76bb0553,
+       0x0465b600,
+       0x659450f9,
+       0x0256bb04,
+       0x75fd50bd,
+       0xf550fc04,
+       0xb6075621,
+/* 0x07f6: i2c_addr_done */
+       0x00f80464,
+/* 0x07f8: i2c_acquire_addr */
+       0xb6f8cec7,
+       0xe0b702e4,
+       0xee980bfc,
+/* 0x0807: i2c_acquire */
+       0xf500f800,
+       0xf407f821,
+       0xd9f00421,
+       0x3f21f403,
+/* 0x0816: i2c_release */
+       0x21f500f8,
+       0x21f407f8,
+       0x03daf004,
+       0xf83f21f4,
+/* 0x0825: i2c_recv */
+       0x0132f400,
+       0xb6f8c1c7,
+       0x16b00214,
+       0x3a1ff528,
+       0xd413a001,
+       0x0032980b,
+       0x0bac13a0,
+       0xf4003198,
+       0xd0f90231,
+       0xd0f9e0f9,
+       0x000067f1,
+       0x100063f1,
+       0xbb016792,
+       0x65b60076,
+       0x9450f904,
+       0x56bb0465,
+       0xfd50bd02,
+       0x50fc0475,
+       0x080721f5,
+       0xfc0464b6,
+       0x00d6b0d0,
+       0x00b31bf5,
+       0xbb0057f0,
+       0x65b60076,
+       0x9450f904,
+       0x56bb0465,
+       0xfd50bd02,
+       0x50fc0475,
+       0x07b121f5,
+       0xf50464b6,
+       0xc700d011,
+       0x76bbe0c5,
+       0x0465b600,
+       0x659450f9,
+       0x0256bb04,
+       0x75fd50bd,
+       0xf550fc04,
+       0xb6075621,
+       0x11f50464,
+       0x57f000ad,
+       0x0076bb01,
+       0xf90465b6,
+       0x04659450,
+       0xbd0256bb,
+       0x0475fd50,
+       0x21f550fc,
+       0x64b607b1,
+       0x8a11f504,
+       0x0076bb00,
+       0xf90465b6,
+       0x04659450,
+       0xbd0256bb,
+       0x0475fd50,
+       0x21f550fc,
+       0x64b60704,
+       0x6a11f404,
+       0xbbe05bcb,
+       0x65b60076,
+       0x9450f904,
+       0x56bb0465,
+       0xfd50bd02,
+       0x50fc0475,
+       0x064921f5,
+       0xb90464b6,
+       0x74bd025b,
+/* 0x092b: i2c_recv_not_rd08 */
+       0xb0430ef4,
+       0x1bf401d6,
+       0x0057f03d,
+       0x07b121f5,
+       0xc73311f4,
+       0x21f5e0c5,
+       0x11f40756,
+       0x0057f029,
+       0x07b121f5,
+       0xc71f11f4,
+       0x21f5e0b5,
+       0x11f40756,
+       0x4921f515,
+       0xc774bd06,
+       0x1bf408c5,
+       0x0232f409,
+/* 0x096b: i2c_recv_not_wr08 */
+/* 0x096b: i2c_recv_done */
+       0xc7030ef4,
+       0x21f5f8ce,
+       0xe0fc0816,
+       0x12f4d0fc,
+       0x027cb90a,
+       0x02b921f5,
+/* 0x0980: i2c_recv_exit */
+/* 0x0982: i2c_init */
+       0x00f800f8,
+/* 0x0984: test_recv */
        0x05d817f1,
        0xcf0614b6,
        0x10b60011,
@@ -1185,12 +1557,12 @@ uint32_t nva3_pwr_code[] = {
        0x00e7f104,
        0x4fe3f1d9,
        0xf521f513,
-/* 0x057b: test_init */
+/* 0x09ab: test_init */
        0xf100f801,
        0xf50800e7,
        0xf801f521,
-/* 0x0585: idle_recv */
-/* 0x0587: idle */
+/* 0x09b5: idle_recv */
+/* 0x09b7: idle */
        0xf400f800,
        0x17f10031,
        0x14b605d4,
@@ -1198,32 +1570,20 @@ uint32_t nva3_pwr_code[] = {
        0xf10110b6,
        0xb605d407,
        0x01d00604,
-/* 0x05a3: idle_loop */
+/* 0x09d3: idle_loop */
        0xf004bd00,
        0x32f45817,
-/* 0x05a9: idle_proc */
-/* 0x05a9: idle_proc_exec */
+/* 0x09d9: idle_proc */
+/* 0x09d9: idle_proc_exec */
        0xb910f902,
        0x21f5021e,
        0x10fc02c2,
        0xf40911f4,
        0x0ef40231,
-/* 0x05bd: idle_proc_next */
+/* 0x09ed: idle_proc_next */
        0x5810b6ef,
        0xf4061fb8,
        0x02f4e61b,
        0x0028f4dd,
        0x00bb0ef4,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
 };
index eaa64da68e3604b1537753cc314388311dd48514..48f79434a4491f30c61f95755d43c37097f73249 100644 (file)
@@ -37,6 +37,7 @@
 #include "host.fuc"
 #include "memx.fuc"
 #include "perf.fuc"
+#include "i2c_.fuc"
 #include "test.fuc"
 #include "idle.fuc"
 #undef INCLUDE_PROC
@@ -46,6 +47,7 @@
 #include "host.fuc"
 #include "memx.fuc"
 #include "perf.fuc"
+#include "i2c_.fuc"
 #include "test.fuc"
 #include "idle.fuc"
 #undef INCLUDE_DATA
@@ -57,6 +59,7 @@
 #include "host.fuc"
 #include "memx.fuc"
 #include "perf.fuc"
+#include "i2c_.fuc"
 #include "test.fuc"
 #include "idle.fuc"
 #undef INCLUDE_CODE
index 82c8e8b889178dd34e0eef6cf6a39fdd0d7a800d..4dba00d2dd1a6f579d19c4571964440ccbf077b5 100644 (file)
@@ -89,9 +89,31 @@ uint32_t nvc0_pwr_data[] = {
        0x00000000,
        0x00000000,
        0x00000000,
+       0x5f433249,
+       0x00000982,
+       0x00000825,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
        0x54534554,
-       0x0000057b,
-       0x00000554,
+       0x000009ab,
+       0x00000984,
        0x00000000,
        0x00000000,
        0x00000000,
@@ -112,8 +134,8 @@ uint32_t nvc0_pwr_data[] = {
        0x00000000,
        0x00000000,
        0x454c4449,
-       0x00000587,
-       0x00000585,
+       0x000009b7,
+       0x000009b5,
        0x00000000,
        0x00000000,
        0x00000000,
@@ -133,12 +155,12 @@ uint32_t nvc0_pwr_data[] = {
        0x00000000,
        0x00000000,
        0x00000000,
-/* 0x0210: proc_list_tail */
-/* 0x0210: time_prev */
+/* 0x0268: proc_list_tail */
+/* 0x0268: time_prev */
        0x00000000,
-/* 0x0214: time_next */
+/* 0x026c: time_next */
        0x00000000,
-/* 0x0218: fifo_queue */
+/* 0x0270: fifo_queue */
        0x00000000,
        0x00000000,
        0x00000000,
@@ -171,7 +193,7 @@ uint32_t nvc0_pwr_data[] = {
        0x00000000,
        0x00000000,
        0x00000000,
-/* 0x0298: rfifo_queue */
+/* 0x02f0: rfifo_queue */
        0x00000000,
        0x00000000,
        0x00000000,
@@ -204,11 +226,11 @@ uint32_t nvc0_pwr_data[] = {
        0x00000000,
        0x00000000,
        0x00000000,
-/* 0x0318: memx_func_head */
+/* 0x0370: memx_func_head */
        0x00010000,
        0x00000000,
        0x0000046f,
-/* 0x0324: memx_func_next */
+/* 0x037c: memx_func_next */
        0x00000001,
        0x00000000,
        0x00000496,
@@ -221,8 +243,18 @@ uint32_t nvc0_pwr_data[] = {
        0x00010004,
        0x00000000,
        0x000004fc,
-/* 0x0354: memx_func_tail */
-/* 0x0354: memx_data_head */
+/* 0x03ac: memx_func_tail */
+/* 0x03ac: memx_data_head */
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
        0x00000000,
        0x00000000,
        0x00000000,
@@ -725,6 +757,42 @@ uint32_t nvc0_pwr_data[] = {
        0x00000000,
        0x00000000,
        0x00000000,
+/* 0x0bac: memx_data_tail */
+/* 0x0bac: i2c_scl_map */
+       0x00001000,
+       0x00004000,
+       0x00010000,
+       0x00000100,
+       0x00040000,
+       0x00100000,
+       0x00400000,
+       0x01000000,
+       0x04000000,
+       0x10000000,
+/* 0x0bd4: i2c_sda_map */
+       0x00002000,
+       0x00008000,
+       0x00020000,
+       0x00000200,
+       0x00080000,
+       0x00200000,
+       0x00800000,
+       0x02000000,
+       0x08000000,
+       0x20000000,
+/* 0x0bfc: i2c_ctrl */
+       0x0000e138,
+       0x0000e150,
+       0x0000e168,
+       0x0000e180,
+       0x0000e254,
+       0x0000e274,
+       0x0000e764,
+       0x0000e780,
+       0x0000e79c,
+       0x0000e7b8,
+       0x00000000,
+       0x00000000,
        0x00000000,
        0x00000000,
        0x00000000,
@@ -735,7 +803,6 @@ uint32_t nvc0_pwr_data[] = {
        0x00000000,
        0x00000000,
        0x00000000,
-/* 0x0b54: memx_data_tail */
        0x00000000,
        0x00000000,
        0x00000000,
@@ -787,15 +854,15 @@ uint32_t nvc0_pwr_code[] = {
        0x07a007f1,
        0xd00604b6,
        0x04bd000e,
-       0xf001e7f0,
-       0x07f101e3,
+       0xf001d7f0,
+       0x07f101d3,
        0x04b607ac,
-       0x000ed006,
+       0x000dd006,
 /* 0x0022: rd32_wait */
-       0xe7f104bd,
-       0xe4b607ac,
-       0x00eecf06,
-       0x7000e4f1,
+       0xd7f104bd,
+       0xd4b607ac,
+       0x00ddcf06,
+       0x7000d4f1,
        0xf1f21bf4,
        0xb607a4d7,
        0xddcf06d4,
@@ -807,15 +874,15 @@ uint32_t nvc0_pwr_code[] = {
        0xb607a407,
        0x0dd00604,
        0xf004bd00,
-       0xe5f002e7,
-       0x01e3f0f0,
+       0xd5f002d7,
+       0x01d3f0f0,
        0x07ac07f1,
        0xd00604b6,
-       0x04bd000e,
+       0x04bd000d,
 /* 0x006c: wr32_wait */
-       0x07ace7f1,
-       0xcf06e4b6,
-       0xe4f100ee,
+       0x07acd7f1,
+       0xcf06d4b6,
+       0xd4f100dd,
        0x1bf47000,
 /* 0x007f: nsec */
        0xf000f8f2,
@@ -845,21 +912,21 @@ uint32_t nvc0_pwr_code[] = {
        0x9800f8df,
        0x96b003e9,
        0x2a0bf400,
-       0xbb840a98,
+       0xbb9a0a98,
        0x1cf4029a,
        0x01d7f00f,
        0x025421f5,
        0x0ef494bd,
 /* 0x00e9: intr_watchdog_next_time */
-       0x850a9815,
+       0x9b0a9815,
        0xf400a6b0,
        0x9ab8090b,
        0x061cf406,
 /* 0x00f8: intr_watchdog_next_time_set */
 /* 0x00fb: intr_watchdog_next_proc */
-       0x80850980,
+       0x809b0980,
        0xe0b603e9,
-       0x10e6b158,
+       0x68e6b158,
        0xc61bf402,
 /* 0x010a: intr */
        0x00f900f8,
@@ -880,15 +947,15 @@ uint32_t nvc0_pwr_code[] = {
        0x0088cf06,
        0xf40289c4,
        0x0080230b,
-       0x58e7f085,
+       0x58e7f09b,
        0x98cb21f4,
-       0x96b08509,
+       0x96b09b09,
        0x110bf400,
        0xb63407f0,
        0x09d00604,
        0x8004bd00,
 /* 0x016e: intr_skip_watchdog */
-       0x89e48409,
+       0x89e49a09,
        0x0bf40800,
        0x8897f148,
        0x0694b606,
@@ -948,7 +1015,7 @@ uint32_t nvc0_pwr_code[] = {
        0x000ed006,
        0x0e8004bd,
 /* 0x0241: timer_enable */
-       0x0187f084,
+       0x0187f09a,
        0xb63807f0,
        0x08d00604,
 /* 0x024f: timer_done */
@@ -979,7 +1046,7 @@ uint32_t nvc0_pwr_code[] = {
        0xb8008a98,
        0x0bf406ae,
        0x5880b610,
-       0x021086b1,
+       0x026886b1,
        0xf4f01bf4,
 /* 0x02b2: find_done */
        0x8eb90132,
@@ -1049,7 +1116,7 @@ uint32_t nvc0_pwr_code[] = {
        0x320bf406,
        0x94071ec4,
        0xe0b704ee,
-       0xeb980218,
+       0xeb980270,
        0x02ec9803,
        0x9801ed98,
        0x21f500ee,
@@ -1075,7 +1142,7 @@ uint32_t nvc0_pwr_code[] = {
        0xe60bf406,
        0xb60723c4,
        0x30b70434,
-       0x3b800298,
+       0x3b8002f0,
        0x023c8003,
        0x80013d80,
        0x20b6003e,
@@ -1090,13 +1157,13 @@ uint32_t nvc0_pwr_code[] = {
 /* 0x0430: host_init */
        0x008017f1,
        0xf11014b6,
-       0xf1021815,
+       0xf1027015,
        0xb604d007,
        0x01d00604,
        0xf104bd00,
        0xb6008017,
        0x15f11014,
-       0x07f10298,
+       0x07f102f0,
        0x04b604dc,
        0x0001d006,
        0x17f004bd,
@@ -1156,14 +1223,14 @@ uint32_t nvc0_pwr_code[] = {
        0x00139802,
        0x950410b6,
        0x30f01034,
-       0xc835980c,
+       0xde35980c,
        0x12b855f9,
        0xec1ef406,
        0xe0fcd0fc,
        0x02b921f5,
 /* 0x0532: memx_info */
        0xc7f100f8,
-       0xb7f10354,
+       0xb7f103ac,
        0x21f50800,
        0x00f802b9,
 /* 0x0540: memx_recv */
@@ -1175,7 +1242,312 @@ uint32_t nvc0_pwr_code[] = {
 /* 0x0550: perf_recv */
 /* 0x0552: perf_init */
        0x00f800f8,
-/* 0x0554: test_recv */
+/* 0x0554: i2c_drive_scl */
+       0xf40036b0,
+       0x07f1110b,
+       0x04b607e0,
+       0x0001d006,
+       0x00f804bd,
+/* 0x0568: i2c_drive_scl_lo */
+       0x07e407f1,
+       0xd00604b6,
+       0x04bd0001,
+/* 0x0576: i2c_drive_sda */
+       0x36b000f8,
+       0x110bf400,
+       0x07e007f1,
+       0xd00604b6,
+       0x04bd0002,
+/* 0x058a: i2c_drive_sda_lo */
+       0x07f100f8,
+       0x04b607e4,
+       0x0002d006,
+       0x00f804bd,
+/* 0x0598: i2c_sense_scl */
+       0xf10132f4,
+       0xb607c437,
+       0x33cf0634,
+       0x0431fd00,
+       0xf4060bf4,
+/* 0x05ae: i2c_sense_scl_done */
+       0x00f80131,
+/* 0x05b0: i2c_sense_sda */
+       0xf10132f4,
+       0xb607c437,
+       0x33cf0634,
+       0x0432fd00,
+       0xf4060bf4,
+/* 0x05c6: i2c_sense_sda_done */
+       0x00f80131,
+/* 0x05c8: i2c_raise_scl */
+       0x47f140f9,
+       0x37f00898,
+       0x5421f501,
+/* 0x05d5: i2c_raise_scl_wait */
+       0xe8e7f105,
+       0x7f21f403,
+       0x059821f5,
+       0xb60901f4,
+       0x1bf40142,
+/* 0x05e9: i2c_raise_scl_done */
+       0xf840fcef,
+/* 0x05ed: i2c_start */
+       0x9821f500,
+       0x0d11f405,
+       0x05b021f5,
+       0xf40611f4,
+/* 0x05fe: i2c_start_rep */
+       0x37f0300e,
+       0x5421f500,
+       0x0137f005,
+       0x057621f5,
+       0xb60076bb,
+       0x50f90465,
+       0xbb046594,
+       0x50bd0256,
+       0xfc0475fd,
+       0xc821f550,
+       0x0464b605,
+/* 0x062b: i2c_start_send */
+       0xf01f11f4,
+       0x21f50037,
+       0xe7f10576,
+       0x21f41388,
+       0x0037f07f,
+       0x055421f5,
+       0x1388e7f1,
+/* 0x0647: i2c_start_out */
+       0xf87f21f4,
+/* 0x0649: i2c_stop */
+       0x0037f000,
+       0x055421f5,
+       0xf50037f0,
+       0xf1057621,
+       0xf403e8e7,
+       0x37f07f21,
+       0x5421f501,
+       0x88e7f105,
+       0x7f21f413,
+       0xf50137f0,
+       0xf1057621,
+       0xf41388e7,
+       0x00f87f21,
+/* 0x067c: i2c_bitw */
+       0x057621f5,
+       0x03e8e7f1,
+       0xbb7f21f4,
+       0x65b60076,
+       0x9450f904,
+       0x56bb0465,
+       0xfd50bd02,
+       0x50fc0475,
+       0x05c821f5,
+       0xf40464b6,
+       0xe7f11811,
+       0x21f41388,
+       0x0037f07f,
+       0x055421f5,
+       0x1388e7f1,
+/* 0x06bb: i2c_bitw_out */
+       0xf87f21f4,
+/* 0x06bd: i2c_bitr */
+       0x0137f000,
+       0x057621f5,
+       0x03e8e7f1,
+       0xbb7f21f4,
+       0x65b60076,
+       0x9450f904,
+       0x56bb0465,
+       0xfd50bd02,
+       0x50fc0475,
+       0x05c821f5,
+       0xf40464b6,
+       0x21f51b11,
+       0x37f005b0,
+       0x5421f500,
+       0x88e7f105,
+       0x7f21f413,
+       0xf4013cf0,
+/* 0x0702: i2c_bitr_done */
+       0x00f80131,
+/* 0x0704: i2c_get_byte */
+       0xf00057f0,
+/* 0x070a: i2c_get_byte_next */
+       0x54b60847,
+       0x0076bb01,
+       0xf90465b6,
+       0x04659450,
+       0xbd0256bb,
+       0x0475fd50,
+       0x21f550fc,
+       0x64b606bd,
+       0x2b11f404,
+       0xb60553fd,
+       0x1bf40142,
+       0x0137f0d8,
+       0xb60076bb,
+       0x50f90465,
+       0xbb046594,
+       0x50bd0256,
+       0xfc0475fd,
+       0x7c21f550,
+       0x0464b606,
+/* 0x0754: i2c_get_byte_done */
+/* 0x0756: i2c_put_byte */
+       0x47f000f8,
+/* 0x0759: i2c_put_byte_next */
+       0x0142b608,
+       0xbb3854ff,
+       0x65b60076,
+       0x9450f904,
+       0x56bb0465,
+       0xfd50bd02,
+       0x50fc0475,
+       0x067c21f5,
+       0xf40464b6,
+       0x46b03411,
+       0xd81bf400,
+       0xb60076bb,
+       0x50f90465,
+       0xbb046594,
+       0x50bd0256,
+       0xfc0475fd,
+       0xbd21f550,
+       0x0464b606,
+       0xbb0f11f4,
+       0x36b00076,
+       0x061bf401,
+/* 0x07af: i2c_put_byte_done */
+       0xf80132f4,
+/* 0x07b1: i2c_addr */
+       0x0076bb00,
+       0xf90465b6,
+       0x04659450,
+       0xbd0256bb,
+       0x0475fd50,
+       0x21f550fc,
+       0x64b605ed,
+       0x2911f404,
+       0x012ec3e7,
+       0xfd0134b6,
+       0x76bb0553,
+       0x0465b600,
+       0x659450f9,
+       0x0256bb04,
+       0x75fd50bd,
+       0xf550fc04,
+       0xb6075621,
+/* 0x07f6: i2c_addr_done */
+       0x00f80464,
+/* 0x07f8: i2c_acquire_addr */
+       0xb6f8cec7,
+       0xe0b702e4,
+       0xee980bfc,
+/* 0x0807: i2c_acquire */
+       0xf500f800,
+       0xf407f821,
+       0xd9f00421,
+       0x3f21f403,
+/* 0x0816: i2c_release */
+       0x21f500f8,
+       0x21f407f8,
+       0x03daf004,
+       0xf83f21f4,
+/* 0x0825: i2c_recv */
+       0x0132f400,
+       0xb6f8c1c7,
+       0x16b00214,
+       0x3a1ff528,
+       0xd413a001,
+       0x0032980b,
+       0x0bac13a0,
+       0xf4003198,
+       0xd0f90231,
+       0xd0f9e0f9,
+       0x000067f1,
+       0x100063f1,
+       0xbb016792,
+       0x65b60076,
+       0x9450f904,
+       0x56bb0465,
+       0xfd50bd02,
+       0x50fc0475,
+       0x080721f5,
+       0xfc0464b6,
+       0x00d6b0d0,
+       0x00b31bf5,
+       0xbb0057f0,
+       0x65b60076,
+       0x9450f904,
+       0x56bb0465,
+       0xfd50bd02,
+       0x50fc0475,
+       0x07b121f5,
+       0xf50464b6,
+       0xc700d011,
+       0x76bbe0c5,
+       0x0465b600,
+       0x659450f9,
+       0x0256bb04,
+       0x75fd50bd,
+       0xf550fc04,
+       0xb6075621,
+       0x11f50464,
+       0x57f000ad,
+       0x0076bb01,
+       0xf90465b6,
+       0x04659450,
+       0xbd0256bb,
+       0x0475fd50,
+       0x21f550fc,
+       0x64b607b1,
+       0x8a11f504,
+       0x0076bb00,
+       0xf90465b6,
+       0x04659450,
+       0xbd0256bb,
+       0x0475fd50,
+       0x21f550fc,
+       0x64b60704,
+       0x6a11f404,
+       0xbbe05bcb,
+       0x65b60076,
+       0x9450f904,
+       0x56bb0465,
+       0xfd50bd02,
+       0x50fc0475,
+       0x064921f5,
+       0xb90464b6,
+       0x74bd025b,
+/* 0x092b: i2c_recv_not_rd08 */
+       0xb0430ef4,
+       0x1bf401d6,
+       0x0057f03d,
+       0x07b121f5,
+       0xc73311f4,
+       0x21f5e0c5,
+       0x11f40756,
+       0x0057f029,
+       0x07b121f5,
+       0xc71f11f4,
+       0x21f5e0b5,
+       0x11f40756,
+       0x4921f515,
+       0xc774bd06,
+       0x1bf408c5,
+       0x0232f409,
+/* 0x096b: i2c_recv_not_wr08 */
+/* 0x096b: i2c_recv_done */
+       0xc7030ef4,
+       0x21f5f8ce,
+       0xe0fc0816,
+       0x12f4d0fc,
+       0x027cb90a,
+       0x02b921f5,
+/* 0x0980: i2c_recv_exit */
+/* 0x0982: i2c_init */
+       0x00f800f8,
+/* 0x0984: test_recv */
        0x05d817f1,
        0xcf0614b6,
        0x10b60011,
@@ -1185,12 +1557,12 @@ uint32_t nvc0_pwr_code[] = {
        0x00e7f104,
        0x4fe3f1d9,
        0xf521f513,
-/* 0x057b: test_init */
+/* 0x09ab: test_init */
        0xf100f801,
        0xf50800e7,
        0xf801f521,
-/* 0x0585: idle_recv */
-/* 0x0587: idle */
+/* 0x09b5: idle_recv */
+/* 0x09b7: idle */
        0xf400f800,
        0x17f10031,
        0x14b605d4,
@@ -1198,32 +1570,20 @@ uint32_t nvc0_pwr_code[] = {
        0xf10110b6,
        0xb605d407,
        0x01d00604,
-/* 0x05a3: idle_loop */
+/* 0x09d3: idle_loop */
        0xf004bd00,
        0x32f45817,
-/* 0x05a9: idle_proc */
-/* 0x05a9: idle_proc_exec */
+/* 0x09d9: idle_proc */
+/* 0x09d9: idle_proc_exec */
        0xb910f902,
        0x21f5021e,
        0x10fc02c2,
        0xf40911f4,
        0x0ef40231,
-/* 0x05bd: idle_proc_next */
+/* 0x09ed: idle_proc_next */
        0x5810b6ef,
        0xf4061fb8,
        0x02f4e61b,
        0x0028f4dd,
        0x00bb0ef4,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
 };
index 32d65ea254dd85c9b89677855d527c63d2b85654..8a89dfe41ce1ba8ddf7b808ef5534e5eefd418a9 100644 (file)
@@ -37,6 +37,7 @@
 #include "host.fuc"
 #include "memx.fuc"
 #include "perf.fuc"
+#include "i2c_.fuc"
 #include "test.fuc"
 #include "idle.fuc"
 #undef INCLUDE_PROC
@@ -46,6 +47,7 @@
 #include "host.fuc"
 #include "memx.fuc"
 #include "perf.fuc"
+#include "i2c_.fuc"
 #include "test.fuc"
 #include "idle.fuc"
 #undef INCLUDE_DATA
@@ -57,6 +59,7 @@
 #include "host.fuc"
 #include "memx.fuc"
 #include "perf.fuc"
+#include "i2c_.fuc"
 #include "test.fuc"
 #include "idle.fuc"
 #undef INCLUDE_CODE
index ce65e2a4b789b4cfef72c9ee196bc0a69c20b906..5e24c6bc041d6aefbe8de51933ed4c30e0af3fcc 100644 (file)
@@ -89,33 +89,13 @@ uint32_t nvd0_pwr_data[] = {
        0x00000000,
        0x00000000,
        0x00000000,
-       0x54534554,
-       0x000004eb,
-       0x000004ca,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
+       0x5f433249,
+       0x000008e3,
+       0x00000786,
        0x00000000,
        0x00000000,
        0x00000000,
        0x00000000,
-       0x454c4449,
-       0x000004f7,
-       0x000004f5,
-       0x00000000,
-       0x00000000,
        0x00000000,
        0x00000000,
        0x00000000,
@@ -131,14 +111,13 @@ uint32_t nvd0_pwr_data[] = {
        0x00000000,
        0x00000000,
        0x00000000,
+       0x54534554,
+       0x00000906,
+       0x000008e5,
        0x00000000,
        0x00000000,
-/* 0x0210: proc_list_tail */
-/* 0x0210: time_prev */
        0x00000000,
-/* 0x0214: time_next */
        0x00000000,
-/* 0x0218: fifo_queue */
        0x00000000,
        0x00000000,
        0x00000000,
@@ -154,6 +133,9 @@ uint32_t nvd0_pwr_data[] = {
        0x00000000,
        0x00000000,
        0x00000000,
+       0x454c4449,
+       0x00000912,
+       0x00000910,
        0x00000000,
        0x00000000,
        0x00000000,
@@ -171,11 +153,14 @@ uint32_t nvd0_pwr_data[] = {
        0x00000000,
        0x00000000,
        0x00000000,
-/* 0x0298: rfifo_queue */
        0x00000000,
        0x00000000,
+/* 0x0268: proc_list_tail */
+/* 0x0268: time_prev */
        0x00000000,
+/* 0x026c: time_next */
        0x00000000,
+/* 0x0270: fifo_queue */
        0x00000000,
        0x00000000,
        0x00000000,
@@ -204,31 +189,11 @@ uint32_t nvd0_pwr_data[] = {
        0x00000000,
        0x00000000,
        0x00000000,
-/* 0x0318: memx_func_head */
-       0x00010000,
-       0x00000000,
-       0x000003f4,
-/* 0x0324: memx_func_next */
-       0x00000001,
-       0x00000000,
-       0x00000415,
-       0x00000002,
-       0x00000002,
-       0x00000430,
-       0x00040003,
-       0x00000000,
-       0x00000458,
-       0x00010004,
-       0x00000000,
-       0x00000472,
-/* 0x0354: memx_func_tail */
-/* 0x0354: memx_data_head */
-       0x00000000,
-       0x00000000,
        0x00000000,
        0x00000000,
        0x00000000,
        0x00000000,
+/* 0x02f0: rfifo_queue */
        0x00000000,
        0x00000000,
        0x00000000,
@@ -261,10 +226,25 @@ uint32_t nvd0_pwr_data[] = {
        0x00000000,
        0x00000000,
        0x00000000,
+/* 0x0370: memx_func_head */
+       0x00010000,
        0x00000000,
+       0x000003f4,
+/* 0x037c: memx_func_next */
+       0x00000001,
        0x00000000,
+       0x00000415,
+       0x00000002,
+       0x00000002,
+       0x00000430,
+       0x00040003,
        0x00000000,
+       0x00000458,
+       0x00010004,
        0x00000000,
+       0x00000472,
+/* 0x03ac: memx_func_tail */
+/* 0x03ac: memx_data_head */
        0x00000000,
        0x00000000,
        0x00000000,
@@ -735,7 +715,6 @@ uint32_t nvd0_pwr_data[] = {
        0x00000000,
        0x00000000,
        0x00000000,
-/* 0x0b54: memx_data_tail */
        0x00000000,
        0x00000000,
        0x00000000,
@@ -778,6 +757,29 @@ uint32_t nvd0_pwr_data[] = {
        0x00000000,
        0x00000000,
        0x00000000,
+/* 0x0bac: memx_data_tail */
+/* 0x0bac: i2c_scl_map */
+       0x00000400,
+       0x00000800,
+       0x00001000,
+       0x00002000,
+       0x00004000,
+       0x00008000,
+       0x00010000,
+       0x00020000,
+       0x00040000,
+       0x00080000,
+/* 0x0bd4: i2c_sda_map */
+       0x00100000,
+       0x00200000,
+       0x00400000,
+       0x00800000,
+       0x01000000,
+       0x02000000,
+       0x04000000,
+       0x08000000,
+       0x10000000,
+       0x20000000,
        0x00000000,
 };
 
@@ -786,14 +788,14 @@ uint32_t nvd0_pwr_code[] = {
 /* 0x0004: rd32 */
        0x07a007f1,
        0xbd000ed0,
-       0x01e7f004,
-       0xf101e3f0,
+       0x01d7f004,
+       0xf101d3f0,
        0xd007ac07,
-       0x04bd000e,
+       0x04bd000d,
 /* 0x001c: rd32_wait */
-       0x07ace7f1,
-       0xf100eecf,
-       0xf47000e4,
+       0x07acd7f1,
+       0xf100ddcf,
+       0xf47000d4,
        0xd7f1f51b,
        0xddcf07a4,
 /* 0x0033: wr32 */
@@ -802,14 +804,14 @@ uint32_t nvd0_pwr_code[] = {
        0x04bd000e,
        0x07a407f1,
        0xbd000dd0,
-       0x02e7f004,
-       0xf0f0e5f0,
-       0x07f101e3,
-       0x0ed007ac,
+       0x02d7f004,
+       0xf0f0d5f0,
+       0x07f101d3,
+       0x0dd007ac,
 /* 0x0057: wr32_wait */
        0xf104bd00,
-       0xcf07ace7,
-       0xe4f100ee,
+       0xcf07acd7,
+       0xd4f100dd,
        0x1bf47000,
 /* 0x0067: nsec */
        0xf000f8f5,
@@ -836,21 +838,21 @@ uint32_t nvd0_pwr_code[] = {
        0x9800f8e2,
        0x96b003e9,
        0x2a0bf400,
-       0xbb840a98,
+       0xbb9a0a98,
        0x1cf4029a,
        0x01d7f00f,
        0x020621f5,
        0x0ef494bd,
 /* 0x00c5: intr_watchdog_next_time */
-       0x850a9815,
+       0x9b0a9815,
        0xf400a6b0,
        0x9ab8090b,
        0x061cf406,
 /* 0x00d4: intr_watchdog_next_time_set */
 /* 0x00d7: intr_watchdog_next_proc */
-       0x80850980,
+       0x809b0980,
        0xe0b603e9,
-       0x10e6b158,
+       0x68e6b158,
        0xc61bf402,
 /* 0x00e6: intr */
        0x00f900f8,
@@ -868,15 +870,15 @@ uint32_t nvd0_pwr_code[] = {
        0x0887f004,
        0xc40088cf,
        0x0bf40289,
-       0x85008020,
+       0x9b008020,
        0xf458e7f0,
        0x0998a721,
-       0x0096b085,
+       0x0096b09b,
        0xf00e0bf4,
        0x09d03407,
        0x8004bd00,
 /* 0x013e: intr_skip_watchdog */
-       0x89e48409,
+       0x89e49a09,
        0x0bf40800,
        0x8897f13c,
        0x0099cf06,
@@ -929,7 +931,7 @@ uint32_t nvd0_pwr_code[] = {
        0x0ed03407,
        0x8004bd00,
 /* 0x01f6: timer_enable */
-       0x87f0840e,
+       0x87f09a0e,
        0x3807f001,
        0xbd0008d0,
 /* 0x0201: timer_done */
@@ -960,7 +962,7 @@ uint32_t nvd0_pwr_code[] = {
        0x06aeb800,
        0xb6100bf4,
        0x86b15880,
-       0x1bf40210,
+       0x1bf40268,
        0x0132f4f0,
 /* 0x0264: find_done */
        0xfc028eb9,
@@ -1024,7 +1026,7 @@ uint32_t nvd0_pwr_code[] = {
        0x0bf40612,
        0x071ec42f,
        0xb704ee94,
-       0x980218e0,
+       0x980270e0,
        0xec9803eb,
        0x01ed9802,
        0xf500ee98,
@@ -1048,7 +1050,7 @@ uint32_t nvd0_pwr_code[] = {
        0xec0bf406,
        0xb60723c4,
        0x30b70434,
-       0x3b800298,
+       0x3b8002f0,
        0x023c8003,
        0x80013d80,
        0x20b6003e,
@@ -1061,12 +1063,12 @@ uint32_t nvd0_pwr_code[] = {
 /* 0x03be: host_init */
        0x17f100f8,
        0x14b60080,
-       0x1815f110,
+       0x7015f110,
        0xd007f102,
        0x0001d004,
        0x17f104bd,
        0x14b60080,
-       0x9815f110,
+       0xf015f110,
        0xdc07f102,
        0x0001d004,
        0x17f004bd,
@@ -1122,13 +1124,13 @@ uint32_t nvd0_pwr_code[] = {
        0x10b60013,
        0x10349504,
        0x980c30f0,
-       0x55f9c835,
+       0x55f9de35,
        0xf40612b8,
        0xd0fcec1e,
        0x21f5e0fc,
        0x00f8026b,
 /* 0x04a8: memx_info */
-       0x0354c7f1,
+       0x03acc7f1,
        0x0800b7f1,
        0x026b21f5,
 /* 0x04b6: memx_recv */
@@ -1140,49 +1142,342 @@ uint32_t nvd0_pwr_code[] = {
 /* 0x04c6: perf_recv */
        0x00f800f8,
 /* 0x04c8: perf_init */
-/* 0x04ca: test_recv */
-       0x17f100f8,
-       0x11cf05d8,
-       0x0110b600,
-       0x05d807f1,
+/* 0x04ca: i2c_drive_scl */
+       0x36b000f8,
+       0x0e0bf400,
+       0x07e007f1,
        0xbd0001d0,
-       0x00e7f104,
-       0x4fe3f1d9,
-       0xb621f513,
-/* 0x04eb: test_init */
-       0xf100f801,
-       0xf50800e7,
-       0xf801b621,
-/* 0x04f5: idle_recv */
-/* 0x04f7: idle */
-       0xf400f800,
-       0x17f10031,
-       0x11cf05d4,
-       0x0110b600,
-       0x05d407f1,
-       0xbd0001d0,
-/* 0x050d: idle_loop */
-       0x5817f004,
-/* 0x0513: idle_proc */
-/* 0x0513: idle_proc_exec */
-       0xf90232f4,
-       0x021eb910,
-       0x027421f5,
-       0x11f410fc,
-       0x0231f409,
-/* 0x0527: idle_proc_next */
-       0xb6ef0ef4,
-       0x1fb85810,
-       0xe61bf406,
-       0xf4dd02f4,
-       0x0ef40028,
-       0x000000c1,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
+/* 0x04db: i2c_drive_scl_lo */
+       0xf100f804,
+       0xd007e407,
+       0x04bd0001,
+/* 0x04e6: i2c_drive_sda */
+       0x36b000f8,
+       0x0e0bf400,
+       0x07e007f1,
+       0xbd0002d0,
+/* 0x04f7: i2c_drive_sda_lo */
+       0xf100f804,
+       0xd007e407,
+       0x04bd0002,
+/* 0x0502: i2c_sense_scl */
+       0x32f400f8,
+       0xc437f101,
+       0x0033cf07,
+       0xf40431fd,
+       0x31f4060b,
+/* 0x0515: i2c_sense_scl_done */
+/* 0x0517: i2c_sense_sda */
+       0xf400f801,
+       0x37f10132,
+       0x33cf07c4,
+       0x0432fd00,
+       0xf4060bf4,
+/* 0x052a: i2c_sense_sda_done */
+       0x00f80131,
+/* 0x052c: i2c_raise_scl */
+       0x47f140f9,
+       0x37f00898,
+       0xca21f501,
+/* 0x0539: i2c_raise_scl_wait */
+       0xe8e7f104,
+       0x6721f403,
+       0x050221f5,
+       0xb60901f4,
+       0x1bf40142,
+/* 0x054d: i2c_raise_scl_done */
+       0xf840fcef,
+/* 0x0551: i2c_start */
+       0x0221f500,
+       0x0d11f405,
+       0x051721f5,
+       0xf40611f4,
+/* 0x0562: i2c_start_rep */
+       0x37f0300e,
+       0xca21f500,
+       0x0137f004,
+       0x04e621f5,
+       0xb60076bb,
+       0x50f90465,
+       0xbb046594,
+       0x50bd0256,
+       0xfc0475fd,
+       0x2c21f550,
+       0x0464b605,
+/* 0x058f: i2c_start_send */
+       0xf01f11f4,
+       0x21f50037,
+       0xe7f104e6,
+       0x21f41388,
+       0x0037f067,
+       0x04ca21f5,
+       0x1388e7f1,
+/* 0x05ab: i2c_start_out */
+       0xf86721f4,
+/* 0x05ad: i2c_stop */
+       0x0037f000,
+       0x04ca21f5,
+       0xf50037f0,
+       0xf104e621,
+       0xf403e8e7,
+       0x37f06721,
+       0xca21f501,
+       0x88e7f104,
+       0x6721f413,
+       0xf50137f0,
+       0xf104e621,
+       0xf41388e7,
+       0x00f86721,
+/* 0x05e0: i2c_bitw */
+       0x04e621f5,
+       0x03e8e7f1,
+       0xbb6721f4,
+       0x65b60076,
+       0x9450f904,
+       0x56bb0465,
+       0xfd50bd02,
+       0x50fc0475,
+       0x052c21f5,
+       0xf40464b6,
+       0xe7f11811,
+       0x21f41388,
+       0x0037f067,
+       0x04ca21f5,
+       0x1388e7f1,
+/* 0x061f: i2c_bitw_out */
+       0xf86721f4,
+/* 0x0621: i2c_bitr */
+       0x0137f000,
+       0x04e621f5,
+       0x03e8e7f1,
+       0xbb6721f4,
+       0x65b60076,
+       0x9450f904,
+       0x56bb0465,
+       0xfd50bd02,
+       0x50fc0475,
+       0x052c21f5,
+       0xf40464b6,
+       0x21f51b11,
+       0x37f00517,
+       0xca21f500,
+       0x88e7f104,
+       0x6721f413,
+       0xf4013cf0,
+/* 0x0666: i2c_bitr_done */
+       0x00f80131,
+/* 0x0668: i2c_get_byte */
+       0xf00057f0,
+/* 0x066e: i2c_get_byte_next */
+       0x54b60847,
+       0x0076bb01,
+       0xf90465b6,
+       0x04659450,
+       0xbd0256bb,
+       0x0475fd50,
+       0x21f550fc,
+       0x64b60621,
+       0x2b11f404,
+       0xb60553fd,
+       0x1bf40142,
+       0x0137f0d8,
+       0xb60076bb,
+       0x50f90465,
+       0xbb046594,
+       0x50bd0256,
+       0xfc0475fd,
+       0xe021f550,
+       0x0464b605,
+/* 0x06b8: i2c_get_byte_done */
+/* 0x06ba: i2c_put_byte */
+       0x47f000f8,
+/* 0x06bd: i2c_put_byte_next */
+       0x0142b608,
+       0xbb3854ff,
+       0x65b60076,
+       0x9450f904,
+       0x56bb0465,
+       0xfd50bd02,
+       0x50fc0475,
+       0x05e021f5,
+       0xf40464b6,
+       0x46b03411,
+       0xd81bf400,
+       0xb60076bb,
+       0x50f90465,
+       0xbb046594,
+       0x50bd0256,
+       0xfc0475fd,
+       0x2121f550,
+       0x0464b606,
+       0xbb0f11f4,
+       0x36b00076,
+       0x061bf401,
+/* 0x0713: i2c_put_byte_done */
+       0xf80132f4,
+/* 0x0715: i2c_addr */
+       0x0076bb00,
+       0xf90465b6,
+       0x04659450,
+       0xbd0256bb,
+       0x0475fd50,
+       0x21f550fc,
+       0x64b60551,
+       0x2911f404,
+       0x012ec3e7,
+       0xfd0134b6,
+       0x76bb0553,
+       0x0465b600,
+       0x659450f9,
+       0x0256bb04,
+       0x75fd50bd,
+       0xf550fc04,
+       0xb606ba21,
+/* 0x075a: i2c_addr_done */
+       0x00f80464,
+/* 0x075c: i2c_acquire_addr */
+       0xb6f8cec7,
+       0xe0b705e4,
+       0x00f8d014,
+/* 0x0768: i2c_acquire */
+       0x075c21f5,
+       0xf00421f4,
+       0x21f403d9,
+/* 0x0777: i2c_release */
+       0xf500f833,
+       0xf4075c21,
+       0xdaf00421,
+       0x3321f403,
+/* 0x0786: i2c_recv */
+       0x32f400f8,
+       0xf8c1c701,
+       0xb00214b6,
+       0x1ff52816,
+       0x13a0013a,
+       0x32980bd4,
+       0xac13a000,
+       0x0031980b,
+       0xf90231f4,
+       0xf9e0f9d0,
+       0x0067f1d0,
+       0x0063f100,
+       0x01679210,
+       0xb60076bb,
+       0x50f90465,
+       0xbb046594,
+       0x50bd0256,
+       0xfc0475fd,
+       0x6821f550,
+       0x0464b607,
+       0xd6b0d0fc,
+       0xb31bf500,
+       0x0057f000,
+       0xb60076bb,
+       0x50f90465,
+       0xbb046594,
+       0x50bd0256,
+       0xfc0475fd,
+       0x1521f550,
+       0x0464b607,
+       0x00d011f5,
+       0xbbe0c5c7,
+       0x65b60076,
+       0x9450f904,
+       0x56bb0465,
+       0xfd50bd02,
+       0x50fc0475,
+       0x06ba21f5,
+       0xf50464b6,
+       0xf000ad11,
+       0x76bb0157,
+       0x0465b600,
+       0x659450f9,
+       0x0256bb04,
+       0x75fd50bd,
+       0xf550fc04,
+       0xb6071521,
+       0x11f50464,
+       0x76bb008a,
+       0x0465b600,
+       0x659450f9,
+       0x0256bb04,
+       0x75fd50bd,
+       0xf550fc04,
+       0xb6066821,
+       0x11f40464,
+       0xe05bcb6a,
+       0xb60076bb,
+       0x50f90465,
+       0xbb046594,
+       0x50bd0256,
+       0xfc0475fd,
+       0xad21f550,
+       0x0464b605,
+       0xbd025bb9,
+       0x430ef474,
+/* 0x088c: i2c_recv_not_rd08 */
+       0xf401d6b0,
+       0x57f03d1b,
+       0x1521f500,
+       0x3311f407,
+       0xf5e0c5c7,
+       0xf406ba21,
+       0x57f02911,
+       0x1521f500,
+       0x1f11f407,
+       0xf5e0b5c7,
+       0xf406ba21,
+       0x21f51511,
+       0x74bd05ad,
+       0xf408c5c7,
+       0x32f4091b,
+       0x030ef402,
+/* 0x08cc: i2c_recv_not_wr08 */
+/* 0x08cc: i2c_recv_done */
+       0xf5f8cec7,
+       0xfc077721,
+       0xf4d0fce0,
+       0x7cb90a12,
+       0x6b21f502,
+/* 0x08e1: i2c_recv_exit */
+/* 0x08e3: i2c_init */
+       0xf800f802,
+/* 0x08e5: test_recv */
+       0xd817f100,
+       0x0011cf05,
+       0xf10110b6,
+       0xd005d807,
+       0x04bd0001,
+       0xd900e7f1,
+       0x134fe3f1,
+       0x01b621f5,
+/* 0x0906: test_init */
+       0xe7f100f8,
+       0x21f50800,
+       0x00f801b6,
+/* 0x0910: idle_recv */
+/* 0x0912: idle */
+       0x31f400f8,
+       0xd417f100,
+       0x0011cf05,
+       0xf10110b6,
+       0xd005d407,
+       0x04bd0001,
+/* 0x0928: idle_loop */
+       0xf45817f0,
+/* 0x092e: idle_proc */
+/* 0x092e: idle_proc_exec */
+       0x10f90232,
+       0xf5021eb9,
+       0xfc027421,
+       0x0911f410,
+       0xf40231f4,
+/* 0x0942: idle_proc_next */
+       0x10b6ef0e,
+       0x061fb858,
+       0xf4e61bf4,
+       0x28f4dd02,
+       0xc10ef400,
        0x00000000,
        0x00000000,
        0x00000000,
index 5fb0cccc6c64b593dbe1d2728a4011e793744f21..574acfa44c8c78f327dcd9d59cc7e1b6e9b2bc1f 100644 (file)
@@ -7,6 +7,7 @@
 #define PROC_HOST 0x54534f48
 #define PROC_MEMX 0x584d454d
 #define PROC_PERF 0x46524550
+#define PROC_I2C_ 0x5f433249
 #define PROC_TEST 0x54534554
 
 /* KERN: message identifiers */
 #define MEMX_WAIT   3
 #define MEMX_DELAY  4
 
+/* I2C_: message identifiers */
+#define I2C__MSG_RD08 0
+#define I2C__MSG_WR08 1
+
+#define I2C__MSG_DATA0_PORT 24:31
+#define I2C__MSG_DATA0_ADDR 14:23
+
+#define I2C__MSG_DATA0_RD08_PORT I2C__MSG_DATA0_PORT
+#define I2C__MSG_DATA0_RD08_ADDR I2C__MSG_DATA0_ADDR
+#define I2C__MSG_DATA0_RD08_REG 0:7
+#define I2C__MSG_DATA1_RD08_VAL 0:7
+
+#define I2C__MSG_DATA0_WR08_PORT I2C__MSG_DATA0_PORT
+#define I2C__MSG_DATA0_WR08_ADDR I2C__MSG_DATA0_ADDR
+#define I2C__MSG_DATA0_WR08_SYNC 8:8
+#define I2C__MSG_DATA0_WR08_REG 0:7
+#define I2C__MSG_DATA1_WR08_VAL 0:7
+
 #endif
index ef3133e7575c8eaf937520cf750bd51effcd7505..7dd680ff2f6f63683f34e39a1d7821f3c73c140c 100644 (file)
@@ -72,13 +72,7 @@ nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_mem *node)
        vmm->flush(vm);
 }
 
-void
-nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_mem *node)
-{
-       nouveau_vm_map_at(vma, 0, node);
-}
-
-void
+static void
 nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length,
                        struct nouveau_mem *mem)
 {
@@ -136,7 +130,7 @@ finish:
        vmm->flush(vm);
 }
 
-void
+static void
 nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
                  struct nouveau_mem *mem)
 {
@@ -174,6 +168,18 @@ nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
        vmm->flush(vm);
 }
 
+void
+nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_mem *node)
+{
+       if (node->sg)
+               nouveau_vm_map_sg_table(vma, 0, node->size << 12, node);
+       else
+       if (node->pages)
+               nouveau_vm_map_sg(vma, 0, node->size << 12, node);
+       else
+               nouveau_vm_map_at(vma, 0, node);
+}
+
 void
 nouveau_vm_unmap_at(struct nouveau_vma *vma, u64 delta, u64 length)
 {
index b13ff0fc42de4b2dcaf175e4a1ec3a126f04732c..2f1ed61f7c8c9e39d44c1528410eb4aaeb83fe17 100644 (file)
@@ -77,11 +77,6 @@ nv04_display_create(struct drm_device *dev)
 
        nouveau_hw_save_vga_fonts(dev, 1);
 
-       ret = nouveau_object_new(nv_object(drm), NVDRM_DEVICE, 0xd1500000,
-                                NV04_DISP_CLASS, NULL, 0, &disp->core);
-       if (ret)
-               return ret;
-
        nv04_crtc_create(dev, 0);
        if (nv_two_heads(dev))
                nv04_crtc_create(dev, 1);
index 56a28db040004fe6ecfebb6b3b69c8feda6a85f5..4245fc3dab70e5ce06964cc70296ef4e6929a9b8 100644 (file)
@@ -80,7 +80,6 @@ struct nv04_display {
        struct nv04_mode_state saved_reg;
        uint32_t saved_vga_font[4][16384];
        uint32_t dac_users[4];
-       struct nouveau_object *core;
        struct nouveau_bo *image[2];
 };
 
index 32e7064b819b6df4f9e5847beba7a826eaaed726..ab03f7719d2d3af06687ea0b1e840290094ad8dd 100644 (file)
@@ -55,9 +55,12 @@ struct nouveau_plane {
        int hue;
        int saturation;
        int iturbt_709;
+
+       void (*set_params)(struct nouveau_plane *);
 };
 
 static uint32_t formats[] = {
+       DRM_FORMAT_YUYV,
        DRM_FORMAT_UYVY,
        DRM_FORMAT_NV12,
 };
@@ -140,10 +143,10 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
        nv_wr32(dev, NV_PVIDEO_POINT_OUT(flip), crtc_y << 16 | crtc_x);
        nv_wr32(dev, NV_PVIDEO_SIZE_OUT(flip), crtc_h << 16 | crtc_w);
 
-       if (fb->pixel_format == DRM_FORMAT_NV12) {
+       if (fb->pixel_format != DRM_FORMAT_UYVY)
                format |= NV_PVIDEO_FORMAT_COLOR_LE_CR8YB8CB8YA8;
+       if (fb->pixel_format == DRM_FORMAT_NV12)
                format |= NV_PVIDEO_FORMAT_PLANAR;
-       }
        if (nv_plane->iturbt_709)
                format |= NV_PVIDEO_FORMAT_MATRIX_ITURBT709;
        if (nv_plane->colorkey & (1 << 24))
@@ -182,9 +185,9 @@ nv10_disable_plane(struct drm_plane *plane)
 }
 
 static void
-nv10_destroy_plane(struct drm_plane *plane)
+nv_destroy_plane(struct drm_plane *plane)
 {
-       nv10_disable_plane(plane);
+       plane->funcs->disable_plane(plane);
        drm_plane_cleanup(plane);
        kfree(plane);
 }
@@ -217,9 +220,9 @@ nv10_set_params(struct nouveau_plane *plane)
 }
 
 static int
-nv10_set_property(struct drm_plane *plane,
-                 struct drm_property *property,
-                 uint64_t value)
+nv_set_property(struct drm_plane *plane,
+               struct drm_property *property,
+               uint64_t value)
 {
        struct nouveau_plane *nv_plane = (struct nouveau_plane *)plane;
 
@@ -238,15 +241,16 @@ nv10_set_property(struct drm_plane *plane,
        else
                return -EINVAL;
 
-       nv10_set_params(nv_plane);
+       if (nv_plane->set_params)
+               nv_plane->set_params(nv_plane);
        return 0;
 }
 
 static const struct drm_plane_funcs nv10_plane_funcs = {
        .update_plane = nv10_update_plane,
        .disable_plane = nv10_disable_plane,
-       .set_property = nv10_set_property,
-       .destroy = nv10_destroy_plane,
+       .set_property = nv_set_property,
+       .destroy = nv_destroy_plane,
 };
 
 static void
@@ -266,7 +270,7 @@ nv10_overlay_init(struct drm_device *device)
        case 0x15:
        case 0x1a:
        case 0x20:
-               num_formats = 1;
+               num_formats = 2;
                break;
        }
 
@@ -321,8 +325,159 @@ nv10_overlay_init(struct drm_device *device)
        drm_object_attach_property(&plane->base.base,
                                   plane->props.iturbt_709, plane->iturbt_709);
 
+       plane->set_params = nv10_set_params;
        nv10_set_params(plane);
-       nv_wr32(dev, NV_PVIDEO_STOP, 1);
+       nv10_disable_plane(&plane->base);
+       return;
+cleanup:
+       drm_plane_cleanup(&plane->base);
+err:
+       kfree(plane);
+       nv_error(dev, "Failed to create plane\n");
+}
+
+static int
+nv04_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
+                 struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+                 unsigned int crtc_w, unsigned int crtc_h,
+                 uint32_t src_x, uint32_t src_y,
+                 uint32_t src_w, uint32_t src_h)
+{
+       struct nouveau_device *dev = nouveau_dev(plane->dev);
+       struct nouveau_plane *nv_plane = (struct nouveau_plane *)plane;
+       struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
+       struct nouveau_bo *cur = nv_plane->cur;
+       uint32_t overlay = 1;
+       int brightness = (nv_plane->brightness - 512) * 62 / 512;
+       int pitch, ret, i;
+
+       /* Source parameters given in 16.16 fixed point, ignore fractional. */
+       src_x >>= 16;
+       src_y >>= 16;
+       src_w >>= 16;
+       src_h >>= 16;
+
+       pitch = ALIGN(src_w * 4, 0x100);
+
+       if (pitch > 0xffff)
+               return -ERANGE;
+
+       /* TODO: Compute an offset? Not sure how to do this for YUYV. */
+       if (src_x != 0 || src_y != 0)
+               return -ERANGE;
+
+       if (crtc_w < src_w || crtc_h < src_h)
+               return -ERANGE;
+
+       ret = nouveau_bo_pin(nv_fb->nvbo, TTM_PL_FLAG_VRAM);
+       if (ret)
+               return ret;
+
+       nv_plane->cur = nv_fb->nvbo;
+
+       nv_wr32(dev, NV_PVIDEO_OE_STATE, 0);
+       nv_wr32(dev, NV_PVIDEO_SU_STATE, 0);
+       nv_wr32(dev, NV_PVIDEO_RM_STATE, 0);
+
+       for (i = 0; i < 2; i++) {
+               nv_wr32(dev, NV_PVIDEO_BUFF0_START_ADDRESS + 4 * i,
+                       nv_fb->nvbo->bo.offset);
+               nv_wr32(dev, NV_PVIDEO_BUFF0_PITCH_LENGTH + 4 * i, pitch);
+               nv_wr32(dev, NV_PVIDEO_BUFF0_OFFSET + 4 * i, 0);
+       }
+       nv_wr32(dev, NV_PVIDEO_WINDOW_START, crtc_y << 16 | crtc_x);
+       nv_wr32(dev, NV_PVIDEO_WINDOW_SIZE, crtc_h << 16 | crtc_w);
+       nv_wr32(dev, NV_PVIDEO_STEP_SIZE,
+               (uint32_t)(((src_h - 1) << 11) / (crtc_h - 1)) << 16 | (uint32_t)(((src_w - 1) << 11) / (crtc_w - 1)));
+
+       /* It should be possible to convert hue/contrast to this */
+       nv_wr32(dev, NV_PVIDEO_RED_CSC_OFFSET, 0x69 - brightness);
+       nv_wr32(dev, NV_PVIDEO_GREEN_CSC_OFFSET, 0x3e + brightness);
+       nv_wr32(dev, NV_PVIDEO_BLUE_CSC_OFFSET, 0x89 - brightness);
+       nv_wr32(dev, NV_PVIDEO_CSC_ADJUST, 0);
+
+       nv_wr32(dev, NV_PVIDEO_CONTROL_Y, 0x001); /* (BLUR_ON, LINE_HALF) */
+       nv_wr32(dev, NV_PVIDEO_CONTROL_X, 0x111); /* (WEIGHT_HEAVY, SHARPENING_ON, SMOOTHING_ON) */
+
+       nv_wr32(dev, NV_PVIDEO_FIFO_BURST_LENGTH, 0x03);
+       nv_wr32(dev, NV_PVIDEO_FIFO_THRES_SIZE, 0x38);
+
+       nv_wr32(dev, NV_PVIDEO_KEY, nv_plane->colorkey);
+
+       if (nv_plane->colorkey & (1 << 24))
+               overlay |= 0x10;
+       if (fb->pixel_format == DRM_FORMAT_YUYV)
+               overlay |= 0x100;
+
+       nv_wr32(dev, NV_PVIDEO_OVERLAY, overlay);
+
+       nv_wr32(dev, NV_PVIDEO_SU_STATE, nv_rd32(dev, NV_PVIDEO_SU_STATE) ^ (1 << 16));
+
+       if (cur)
+               nouveau_bo_unpin(cur);
+
+       return 0;
+}
+
+static int
+nv04_disable_plane(struct drm_plane *plane)
+{
+       struct nouveau_device *dev = nouveau_dev(plane->dev);
+       struct nouveau_plane *nv_plane = (struct nouveau_plane *)plane;
+
+       nv_mask(dev, NV_PVIDEO_OVERLAY, 1, 0);
+       nv_wr32(dev, NV_PVIDEO_OE_STATE, 0);
+       nv_wr32(dev, NV_PVIDEO_SU_STATE, 0);
+       nv_wr32(dev, NV_PVIDEO_RM_STATE, 0);
+       if (nv_plane->cur) {
+               nouveau_bo_unpin(nv_plane->cur);
+               nv_plane->cur = NULL;
+       }
+
+       return 0;
+}
+
+static const struct drm_plane_funcs nv04_plane_funcs = {
+       .update_plane = nv04_update_plane,
+       .disable_plane = nv04_disable_plane,
+       .set_property = nv_set_property,
+       .destroy = nv_destroy_plane,
+};
+
+static void
+nv04_overlay_init(struct drm_device *device)
+{
+       struct nouveau_device *dev = nouveau_dev(device);
+       struct nouveau_plane *plane = kzalloc(sizeof(struct nouveau_plane), GFP_KERNEL);
+       int ret;
+
+       if (!plane)
+               return;
+
+       ret = drm_plane_init(device, &plane->base, 1 /* single crtc */,
+                            &nv04_plane_funcs,
+                            formats, 2, false);
+       if (ret)
+               goto err;
+
+       /* Set up the plane properties */
+       plane->props.colorkey = drm_property_create_range(
+                       device, 0, "colorkey", 0, 0x01ffffff);
+       plane->props.brightness = drm_property_create_range(
+                       device, 0, "brightness", 0, 1024);
+       if (!plane->props.colorkey ||
+           !plane->props.brightness)
+               goto cleanup;
+
+       plane->colorkey = 0;
+       drm_object_attach_property(&plane->base.base,
+                                  plane->props.colorkey, plane->colorkey);
+
+       plane->brightness = 512;
+       drm_object_attach_property(&plane->base.base,
+                                  plane->props.brightness, plane->brightness);
+
+       nv04_disable_plane(&plane->base);
        return;
 cleanup:
        drm_plane_cleanup(&plane->base);
@@ -335,6 +490,8 @@ void
 nouveau_overlay_init(struct drm_device *device)
 {
        struct nouveau_device *dev = nouveau_dev(device);
-       if (dev->chipset >= 0x10 && dev->chipset <= 0x40)
+       if (dev->chipset < 0x10)
+               nv04_overlay_init(device);
+       else if (dev->chipset <= 0x40)
                nv10_overlay_init(device);
 }
index 3c149617cfcbaabb49a0253fd405fe52930bc0af..4ef83df2b246fb335dc8ce05bafead80ed7ac180 100644 (file)
@@ -61,6 +61,7 @@ bool nouveau_is_v1_dsm(void) {
 #define NOUVEAU_DSM_HAS_MUX 0x1
 #define NOUVEAU_DSM_HAS_OPT 0x2
 
+#ifdef CONFIG_VGA_SWITCHEROO
 static const char nouveau_dsm_muid[] = {
        0xA0, 0xA0, 0x95, 0x9D, 0x60, 0x00, 0x48, 0x4D,
        0xB3, 0x4D, 0x7E, 0x5F, 0xEA, 0x12, 0x9F, 0xD4,
@@ -326,6 +327,11 @@ void nouveau_unregister_dsm_handler(void)
        if (nouveau_dsm_priv.optimus_detected || nouveau_dsm_priv.dsm_detected)
                vga_switcheroo_unregister_handler();
 }
+#else
+void nouveau_register_dsm_handler(void) {}
+void nouveau_unregister_dsm_handler(void) {}
+void nouveau_switcheroo_optimus_dsm(void) {}
+#endif
 
 /* retrieve the ROM in 4k blocks */
 static int nouveau_rom_call(acpi_handle rom_handle, uint8_t *bios,
index c0fde6b9393cb24fa602e838fdf145657d059d05..488686d490c0c7a96ba016e0f55a4beffcda6e4f 100644 (file)
@@ -560,28 +560,6 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
 }
 
 
-/* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
- * TTM_PL_{VRAM,TT} directly.
- */
-
-static int
-nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
-                             struct nouveau_bo *nvbo, bool evict,
-                             bool no_wait_gpu, struct ttm_mem_reg *new_mem)
-{
-       struct nouveau_fence *fence = NULL;
-       int ret;
-
-       ret = nouveau_fence_new(chan, false, &fence);
-       if (ret)
-               return ret;
-
-       ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, evict,
-                                       no_wait_gpu, new_mem);
-       nouveau_fence_unref(&fence);
-       return ret;
-}
-
 static int
 nve0_bo_move_init(struct nouveau_channel *chan, u32 handle)
 {
@@ -798,25 +776,25 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
                  struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
 {
        struct nouveau_mem *node = old_mem->mm_node;
-       struct nouveau_bo *nvbo = nouveau_bo(bo);
        u64 length = (new_mem->num_pages << PAGE_SHIFT);
        u64 src_offset = node->vma[0].offset;
        u64 dst_offset = node->vma[1].offset;
+       int src_tiled = !!node->memtype;
+       int dst_tiled = !!((struct nouveau_mem *)new_mem->mm_node)->memtype;
        int ret;
 
        while (length) {
                u32 amount, stride, height;
 
+               ret = RING_SPACE(chan, 18 + 6 * (src_tiled + dst_tiled));
+               if (ret)
+                       return ret;
+
                amount  = min(length, (u64)(4 * 1024 * 1024));
                stride  = 16 * 4;
                height  = amount / stride;
 
-               if (old_mem->mem_type == TTM_PL_VRAM &&
-                   nouveau_bo_tile_layout(nvbo)) {
-                       ret = RING_SPACE(chan, 8);
-                       if (ret)
-                               return ret;
-
+               if (src_tiled) {
                        BEGIN_NV04(chan, NvSubCopy, 0x0200, 7);
                        OUT_RING  (chan, 0);
                        OUT_RING  (chan, 0);
@@ -826,19 +804,10 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
                        OUT_RING  (chan, 0);
                        OUT_RING  (chan, 0);
                } else {
-                       ret = RING_SPACE(chan, 2);
-                       if (ret)
-                               return ret;
-
                        BEGIN_NV04(chan, NvSubCopy, 0x0200, 1);
                        OUT_RING  (chan, 1);
                }
-               if (new_mem->mem_type == TTM_PL_VRAM &&
-                   nouveau_bo_tile_layout(nvbo)) {
-                       ret = RING_SPACE(chan, 8);
-                       if (ret)
-                               return ret;
-
+               if (dst_tiled) {
                        BEGIN_NV04(chan, NvSubCopy, 0x021c, 7);
                        OUT_RING  (chan, 0);
                        OUT_RING  (chan, 0);
@@ -848,18 +817,10 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
                        OUT_RING  (chan, 0);
                        OUT_RING  (chan, 0);
                } else {
-                       ret = RING_SPACE(chan, 2);
-                       if (ret)
-                               return ret;
-
                        BEGIN_NV04(chan, NvSubCopy, 0x021c, 1);
                        OUT_RING  (chan, 1);
                }
 
-               ret = RING_SPACE(chan, 14);
-               if (ret)
-                       return ret;
-
                BEGIN_NV04(chan, NvSubCopy, 0x0238, 2);
                OUT_RING  (chan, upper_32_bits(src_offset));
                OUT_RING  (chan, upper_32_bits(dst_offset));
@@ -953,23 +914,28 @@ nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
 }
 
 static int
-nouveau_vma_getmap(struct nouveau_channel *chan, struct nouveau_bo *nvbo,
-                  struct ttm_mem_reg *mem, struct nouveau_vma *vma)
+nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo,
+                    struct ttm_mem_reg *mem)
 {
-       struct nouveau_mem *node = mem->mm_node;
+       struct nouveau_mem *old_node = bo->mem.mm_node;
+       struct nouveau_mem *new_node = mem->mm_node;
+       u64 size = (u64)mem->num_pages << PAGE_SHIFT;
        int ret;
 
-       ret = nouveau_vm_get(nv_client(chan->cli)->vm, mem->num_pages <<
-                            PAGE_SHIFT, node->page_shift,
-                            NV_MEM_ACCESS_RW, vma);
+       ret = nouveau_vm_get(nv_client(drm)->vm, size, old_node->page_shift,
+                            NV_MEM_ACCESS_RW, &old_node->vma[0]);
        if (ret)
                return ret;
 
-       if (mem->mem_type == TTM_PL_VRAM)
-               nouveau_vm_map(vma, node);
-       else
-               nouveau_vm_map_sg(vma, 0, mem->num_pages << PAGE_SHIFT, node);
+       ret = nouveau_vm_get(nv_client(drm)->vm, size, new_node->page_shift,
+                            NV_MEM_ACCESS_RW, &old_node->vma[1]);
+       if (ret) {
+               nouveau_vm_put(&old_node->vma[0]);
+               return ret;
+       }
 
+       nouveau_vm_map(&old_node->vma[0], old_node);
+       nouveau_vm_map(&old_node->vma[1], new_node);
        return 0;
 }
 
@@ -979,35 +945,34 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
 {
        struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
        struct nouveau_channel *chan = drm->ttm.chan;
-       struct nouveau_bo *nvbo = nouveau_bo(bo);
-       struct ttm_mem_reg *old_mem = &bo->mem;
+       struct nouveau_fence *fence;
        int ret;
 
-       mutex_lock_nested(&chan->cli->mutex, SINGLE_DEPTH_NESTING);
-
        /* create temporary vmas for the transfer and attach them to the
         * old nouveau_mem node, these will get cleaned up after ttm has
         * destroyed the ttm_mem_reg
         */
        if (nv_device(drm->device)->card_type >= NV_50) {
-               struct nouveau_mem *node = old_mem->mm_node;
-
-               ret = nouveau_vma_getmap(chan, nvbo, old_mem, &node->vma[0]);
-               if (ret)
-                       goto out;
-
-               ret = nouveau_vma_getmap(chan, nvbo, new_mem, &node->vma[1]);
+               ret = nouveau_bo_move_prep(drm, bo, new_mem);
                if (ret)
-                       goto out;
+                       return ret;
        }
 
-       ret = drm->ttm.move(chan, bo, &bo->mem, new_mem);
+       mutex_lock_nested(&chan->cli->mutex, SINGLE_DEPTH_NESTING);
+       ret = nouveau_fence_sync(bo->sync_obj, chan);
        if (ret == 0) {
-               ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict,
-                                                   no_wait_gpu, new_mem);
+               ret = drm->ttm.move(chan, bo, &bo->mem, new_mem);
+               if (ret == 0) {
+                       ret = nouveau_fence_new(chan, false, &fence);
+                       if (ret == 0) {
+                               ret = ttm_bo_move_accel_cleanup(bo, fence,
+                                                               evict,
+                                                               no_wait_gpu,
+                                                               new_mem);
+                               nouveau_fence_unref(&fence);
+                       }
+               }
        }
-
-out:
        mutex_unlock(&chan->cli->mutex);
        return ret;
 }
@@ -1147,19 +1112,10 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
                return;
 
        list_for_each_entry(vma, &nvbo->vma_list, head) {
-               if (new_mem && new_mem->mem_type == TTM_PL_VRAM) {
+               if (new_mem && new_mem->mem_type != TTM_PL_SYSTEM &&
+                             (new_mem->mem_type == TTM_PL_VRAM ||
+                              nvbo->page_shift != vma->vm->vmm->lpg_shift)) {
                        nouveau_vm_map(vma, new_mem->mm_node);
-               } else
-               if (new_mem && new_mem->mem_type == TTM_PL_TT &&
-                   nvbo->page_shift == vma->vm->vmm->spg_shift) {
-                       if (((struct nouveau_mem *)new_mem->mm_node)->sg)
-                               nouveau_vm_map_sg_table(vma, 0, new_mem->
-                                                 num_pages << PAGE_SHIFT,
-                                                 new_mem->mm_node);
-                       else
-                               nouveau_vm_map_sg(vma, 0, new_mem->
-                                                 num_pages << PAGE_SHIFT,
-                                                 new_mem->mm_node);
                } else {
                        nouveau_vm_unmap(vma);
                }
@@ -1224,28 +1180,27 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
                goto out;
        }
 
-       /* CPU copy if we have no accelerated method available */
-       if (!drm->ttm.move) {
-               ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
-               goto out;
-       }
-
        /* Hardware assisted copy. */
-       if (new_mem->mem_type == TTM_PL_SYSTEM)
-               ret = nouveau_bo_move_flipd(bo, evict, intr,
-                                           no_wait_gpu, new_mem);
-       else if (old_mem->mem_type == TTM_PL_SYSTEM)
-               ret = nouveau_bo_move_flips(bo, evict, intr,
-                                           no_wait_gpu, new_mem);
-       else
-               ret = nouveau_bo_move_m2mf(bo, evict, intr,
-                                          no_wait_gpu, new_mem);
-
-       if (!ret)
-               goto out;
+       if (drm->ttm.move) {
+               if (new_mem->mem_type == TTM_PL_SYSTEM)
+                       ret = nouveau_bo_move_flipd(bo, evict, intr,
+                                                   no_wait_gpu, new_mem);
+               else if (old_mem->mem_type == TTM_PL_SYSTEM)
+                       ret = nouveau_bo_move_flips(bo, evict, intr,
+                                                   no_wait_gpu, new_mem);
+               else
+                       ret = nouveau_bo_move_m2mf(bo, evict, intr,
+                                                  no_wait_gpu, new_mem);
+               if (!ret)
+                       goto out;
+       }
 
        /* Fallback to software copy. */
-       ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
+       spin_lock(&bo->bdev->fence_lock);
+       ret = ttm_bo_wait(bo, true, intr, no_wait_gpu);
+       spin_unlock(&bo->bdev->fence_lock);
+       if (ret == 0)
+               ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
 
 out:
        if (nv_device(drm->device)->card_type < NV_50) {
@@ -1271,6 +1226,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
 {
        struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
        struct nouveau_drm *drm = nouveau_bdev(bdev);
+       struct nouveau_mem *node = mem->mm_node;
        struct drm_device *dev = drm->dev;
        int ret;
 
@@ -1293,14 +1249,16 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
                        mem->bus.is_iomem = !dev->agp->cant_use_aperture;
                }
 #endif
-               break;
+               if (!node->memtype)
+                       /* untiled */
+                       break;
+               /* fallthrough, tiled memory */
        case TTM_PL_VRAM:
                mem->bus.offset = mem->start << PAGE_SHIFT;
                mem->bus.base = pci_resource_start(dev->pdev, 1);
                mem->bus.is_iomem = true;
                if (nv_device(drm->device)->card_type >= NV_50) {
                        struct nouveau_bar *bar = nouveau_bar(drm->device);
-                       struct nouveau_mem *node = mem->mm_node;
 
                        ret = bar->umap(bar, node, NV_MEM_ACCESS_RW,
                                        &node->bar_vma);
@@ -1336,6 +1294,7 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
        struct nouveau_bo *nvbo = nouveau_bo(bo);
        struct nouveau_device *device = nv_device(drm->device);
        u32 mappable = pci_resource_len(device->pdev, 1) >> PAGE_SHIFT;
+       int ret;
 
        /* as long as the bo isn't in vram, and isn't tiled, we've got
         * nothing to do here.
@@ -1344,10 +1303,20 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
                if (nv_device(drm->device)->card_type < NV_50 ||
                    !nouveau_bo_tile_layout(nvbo))
                        return 0;
+
+               if (bo->mem.mem_type == TTM_PL_SYSTEM) {
+                       nouveau_bo_placement_set(nvbo, TTM_PL_TT, 0);
+
+                       ret = nouveau_bo_validate(nvbo, false, false);
+                       if (ret)
+                               return ret;
+               }
+               return 0;
        }
 
        /* make sure bo is in mappable vram */
-       if (bo->mem.start + bo->mem.num_pages < mappable)
+       if (nv_device(drm->device)->card_type >= NV_50 ||
+           bo->mem.start + bo->mem.num_pages < mappable)
                return 0;
 
 
@@ -1535,7 +1504,6 @@ nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm,
                   struct nouveau_vma *vma)
 {
        const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
-       struct nouveau_mem *node = nvbo->bo.mem.mm_node;
        int ret;
 
        ret = nouveau_vm_get(vm, size, nvbo->page_shift,
@@ -1543,15 +1511,10 @@ nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm,
        if (ret)
                return ret;
 
-       if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
+       if ( nvbo->bo.mem.mem_type != TTM_PL_SYSTEM &&
+           (nvbo->bo.mem.mem_type == TTM_PL_VRAM ||
+            nvbo->page_shift != vma->vm->vmm->lpg_shift))
                nouveau_vm_map(vma, nvbo->bo.mem.mm_node);
-       else if (nvbo->bo.mem.mem_type == TTM_PL_TT &&
-                nvbo->page_shift == vma->vm->vmm->spg_shift) {
-               if (node->sg)
-                       nouveau_vm_map_sg_table(vma, 0, size, node);
-               else
-                       nouveau_vm_map_sg(vma, 0, size, node);
-       }
 
        list_add_tail(&vma->head, &nvbo->vma_list);
        vma->refcount = 1;
index 25ea82f8def3cb883a06aef6e9da59ea4cbffaf7..24011596af434276bbe1b9a82a26252229c20c5a 100644 (file)
@@ -68,20 +68,100 @@ nouveau_display_vblank_disable(struct drm_device *dev, int head)
                nouveau_event_put(disp->vblank[head]);
 }
 
+static inline int
+calc(int blanks, int blanke, int total, int line)
+{
+       if (blanke >= blanks) {
+               if (line >= blanks)
+                       line -= total;
+       } else {
+               if (line >= blanks)
+                       line -= total;
+               line -= blanke + 1;
+       }
+       return line;
+}
+
+int
+nouveau_display_scanoutpos_head(struct drm_crtc *crtc, int *vpos, int *hpos,
+                               ktime_t *stime, ktime_t *etime)
+{
+       const u32 mthd = NV04_DISP_SCANOUTPOS + nouveau_crtc(crtc)->index;
+       struct nouveau_display *disp = nouveau_display(crtc->dev);
+       struct nv04_display_scanoutpos args;
+       int ret, retry = 1;
+
+       do {
+               ret = nv_exec(disp->core, mthd, &args, sizeof(args));
+               if (ret != 0)
+                       return 0;
+
+               if (args.vline) {
+                       ret |= DRM_SCANOUTPOS_ACCURATE;
+                       ret |= DRM_SCANOUTPOS_VALID;
+                       break;
+               }
+
+               if (retry) ndelay(crtc->linedur_ns);
+       } while (retry--);
+
+       *hpos = calc(args.hblanks, args.hblanke, args.htotal, args.hline);
+       *vpos = calc(args.vblanks, args.vblanke, args.vtotal, args.vline);
+       if (stime) *stime = ns_to_ktime(args.time[0]);
+       if (etime) *etime = ns_to_ktime(args.time[1]);
+
+       if (*vpos < 0)
+               ret |= DRM_SCANOUTPOS_INVBL;
+       return ret;
+}
+
+int
+nouveau_display_scanoutpos(struct drm_device *dev, int head, unsigned int flags,
+                          int *vpos, int *hpos, ktime_t *stime, ktime_t *etime)
+{
+       struct drm_crtc *crtc;
+
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+               if (nouveau_crtc(crtc)->index == head) {
+                       return nouveau_display_scanoutpos_head(crtc, vpos, hpos,
+                                                              stime, etime);
+               }
+       }
+
+       return 0;
+}
+
+int
+nouveau_display_vblstamp(struct drm_device *dev, int head, int *max_error,
+                        struct timeval *time, unsigned flags)
+{
+       struct drm_crtc *crtc;
+
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+               if (nouveau_crtc(crtc)->index == head) {
+                       return drm_calc_vbltimestamp_from_scanoutpos(dev,
+                                       head, max_error, time, flags, crtc,
+                                       &crtc->hwmode);
+               }
+       }
+
+       return -EINVAL;
+}
+
 static void
 nouveau_display_vblank_fini(struct drm_device *dev)
 {
        struct nouveau_display *disp = nouveau_display(dev);
        int i;
 
+       drm_vblank_cleanup(dev);
+
        if (disp->vblank) {
                for (i = 0; i < dev->mode_config.num_crtc; i++)
                        nouveau_event_ref(NULL, &disp->vblank[i]);
                kfree(disp->vblank);
                disp->vblank = NULL;
        }
-
-       drm_vblank_cleanup(dev);
 }
 
 static int
@@ -407,10 +487,31 @@ nouveau_display_create(struct drm_device *dev)
        drm_kms_helper_poll_disable(dev);
 
        if (drm->vbios.dcb.entries) {
-               if (nv_device(drm->device)->card_type < NV_50)
-                       ret = nv04_display_create(dev);
-               else
-                       ret = nv50_display_create(dev);
+               static const u16 oclass[] = {
+                       NVF0_DISP_CLASS,
+                       NVE0_DISP_CLASS,
+                       NVD0_DISP_CLASS,
+                       NVA3_DISP_CLASS,
+                       NV94_DISP_CLASS,
+                       NVA0_DISP_CLASS,
+                       NV84_DISP_CLASS,
+                       NV50_DISP_CLASS,
+                       NV04_DISP_CLASS,
+               };
+               int i;
+
+               for (i = 0, ret = -ENODEV; ret && i < ARRAY_SIZE(oclass); i++) {
+                       ret = nouveau_object_new(nv_object(drm), NVDRM_DEVICE,
+                                                NVDRM_DISPLAY, oclass[i],
+                                                NULL, 0, &disp->core);
+               }
+
+               if (ret == 0) {
+                       if (nv_mclass(disp->core) < NV50_DISP_CLASS)
+                               ret = nv04_display_create(dev);
+                       else
+                               ret = nv50_display_create(dev);
+               }
        } else {
                ret = 0;
        }
@@ -439,6 +540,7 @@ void
 nouveau_display_destroy(struct drm_device *dev)
 {
        struct nouveau_display *disp = nouveau_display(dev);
+       struct nouveau_drm *drm = nouveau_drm(dev);
 
        nouveau_backlight_exit(dev);
        nouveau_display_vblank_fini(dev);
@@ -449,6 +551,8 @@ nouveau_display_destroy(struct drm_device *dev)
        if (disp->dtor)
                disp->dtor(dev);
 
+       nouveau_object_del(nv_object(drm), NVDRM_DEVICE, NVDRM_DISPLAY);
+
        nouveau_drm(dev)->display = NULL;
        kfree(disp);
 }
@@ -603,6 +707,14 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
        if (!s)
                return -ENOMEM;
 
+       if (new_bo != old_bo) {
+               ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM);
+               if (ret)
+                       goto fail_free;
+       }
+
+       mutex_lock(&chan->cli->mutex);
+
        /* synchronise rendering channel with the kernel's channel */
        spin_lock(&new_bo->bo.bdev->fence_lock);
        fence = nouveau_fence_ref(new_bo->bo.sync_obj);
@@ -610,15 +722,8 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
        ret = nouveau_fence_sync(fence, chan);
        nouveau_fence_unref(&fence);
        if (ret)
-               goto fail_free;
-
-       if (new_bo != old_bo) {
-               ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM);
-               if (ret)
-                       goto fail_free;
-       }
+               goto fail_unpin;
 
-       mutex_lock(&chan->cli->mutex);
        ret = ttm_bo_reserve(&old_bo->bo, true, false, false, NULL);
        if (ret)
                goto fail_unpin;
index 8bc8bab90e8d34462f0b484a90710a9094247216..a71cf77e55b24eb19c48df69086cb758ab2a9db6 100644 (file)
@@ -36,6 +36,7 @@ struct nouveau_display {
        int  (*init)(struct drm_device *);
        void (*fini)(struct drm_device *);
 
+       struct nouveau_object *core;
        struct nouveau_eventh **vblank;
 
        struct drm_property *dithering_mode;
@@ -63,6 +64,10 @@ void nouveau_display_repin(struct drm_device *dev);
 void nouveau_display_resume(struct drm_device *dev);
 int  nouveau_display_vblank_enable(struct drm_device *, int);
 void nouveau_display_vblank_disable(struct drm_device *, int);
+int  nouveau_display_scanoutpos(struct drm_device *, int, unsigned int,
+                               int *, int *, ktime_t *, ktime_t *);
+int  nouveau_display_vblstamp(struct drm_device *, int, int *,
+                             struct timeval *, unsigned);
 
 int  nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
                            struct drm_pending_vblank_event *event,
index 40f91e1e58422f0cdd7db3f607cde397966a7e8d..c177272152e24b7fb525891b8308668abfbeb460 100644 (file)
@@ -100,7 +100,7 @@ nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo,
 
        chan->dma.ib_put = (chan->dma.ib_put + 1) & chan->dma.ib_max;
 
-       DRM_MEMORYBARRIER();
+       mb();
        /* Flush writes. */
        nouveau_bo_rd32(pb, 0);
 
index 984004d66a6d313d1934230bc654c091a77b5f8f..dc0e0c5cadb48753d0c814f4b24586620053fb10 100644 (file)
@@ -155,7 +155,7 @@ BEGIN_IMC0(struct nouveau_channel *chan, int subc, int mthd, u16 data)
 }
 
 #define WRITE_PUT(val) do {                                                    \
-       DRM_MEMORYBARRIER();                                                   \
+       mb();                                                   \
        nouveau_bo_rd32(chan->push.buffer, 0);                                 \
        nv_wo32(chan->object, chan->user_put, ((val) << 2) + chan->push.vma.offset);  \
 } while (0)
index 98a22e6e27a11f73045fdbab161452309f2416ab..78c8e7146d56b2c5f7d189e372e542f4d9b43717 100644 (file)
@@ -503,19 +503,21 @@ nouveau_do_suspend(struct drm_device *dev)
        if (drm->cechan) {
                ret = nouveau_channel_idle(drm->cechan);
                if (ret)
-                       return ret;
+                       goto fail_display;
        }
 
        if (drm->channel) {
                ret = nouveau_channel_idle(drm->channel);
                if (ret)
-                       return ret;
+                       goto fail_display;
        }
 
        NV_INFO(drm, "suspending client object trees...\n");
        if (drm->fence && nouveau_fence(drm)->suspend) {
-               if (!nouveau_fence(drm)->suspend(drm))
-                       return -ENOMEM;
+               if (!nouveau_fence(drm)->suspend(drm)) {
+                       ret = -ENOMEM;
+                       goto fail_display;
+               }
        }
 
        list_for_each_entry(cli, &drm->clients, head) {
@@ -537,6 +539,10 @@ fail_client:
                nouveau_client_init(&cli->base);
        }
 
+       if (drm->fence && nouveau_fence(drm)->resume)
+               nouveau_fence(drm)->resume(drm);
+
+fail_display:
        if (dev->mode_config.num_crtc) {
                NV_INFO(drm, "resuming display...\n");
                nouveau_display_resume(dev);
@@ -798,6 +804,8 @@ driver = {
        .get_vblank_counter = drm_vblank_count,
        .enable_vblank = nouveau_display_vblank_enable,
        .disable_vblank = nouveau_display_vblank_disable,
+       .get_scanout_position = nouveau_display_scanoutpos,
+       .get_vblank_timestamp = nouveau_display_vblstamp,
 
        .ioctls = nouveau_ioctls,
        .num_ioctls = ARRAY_SIZE(nouveau_ioctls),
index 4b0fb6c66be918857bc6529b3a7c5a7c7a49a498..23ca7a517246feda3e7dfae782683c88c2bc64a5 100644 (file)
@@ -54,6 +54,7 @@ enum nouveau_drm_handle {
        NVDRM_CLIENT  = 0xffffffff,
        NVDRM_DEVICE  = 0xdddddddd,
        NVDRM_CONTROL = 0xdddddddc,
+       NVDRM_DISPLAY = 0xd1500000,
        NVDRM_PUSH    = 0xbbbb0000, /* |= client chid */
        NVDRM_CHAN    = 0xcccc0000, /* |= client chid */
        NVDRM_NVSW    = 0x55550000,
index 40cf52e6d6d21ffb818f70b2b01be3b5f5e02b95..90074d620e31265bdcc70d4b7285fc9be077fee8 100644 (file)
@@ -143,7 +143,7 @@ nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
        int ret;
 
        fence->channel  = chan;
-       fence->timeout  = jiffies + (15 * DRM_HZ);
+       fence->timeout  = jiffies + (15 * HZ);
        fence->sequence = ++fctx->sequence;
 
        ret = fctx->emit(fence);
index 78a27f8ad7d97be653a63506bb60bd9c7b518252..27c3fd89e8ceb657595d188a8a3ff16cadde0edc 100644 (file)
@@ -463,12 +463,6 @@ validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
        list_for_each_entry(nvbo, list, entry) {
                struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
 
-               ret = validate_sync(chan, nvbo);
-               if (unlikely(ret)) {
-                       NV_ERROR(cli, "fail pre-validate sync\n");
-                       return ret;
-               }
-
                ret = nouveau_gem_set_domain(&nvbo->gem, b->read_domains,
                                             b->write_domains,
                                             b->valid_domains);
@@ -506,7 +500,7 @@ validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
                        b->presumed.valid = 0;
                        relocs++;
 
-                       if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index].presumed,
+                       if (copy_to_user(&upbbo[nvbo->pbbo_index].presumed,
                                             &b->presumed, sizeof(b->presumed)))
                                return -EFAULT;
                }
@@ -593,7 +587,7 @@ u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
        if (!mem)
                return ERR_PTR(-ENOMEM);
 
-       if (DRM_COPY_FROM_USER(mem, userptr, size)) {
+       if (copy_from_user(mem, userptr, size)) {
                u_free(mem);
                return ERR_PTR(-EFAULT);
        }
index 0843ebc910d4d6062ce94023f70dde1f1cc00ea0..a4d22e5eb176ef342023fdf0e581c564139139ed 100644 (file)
@@ -31,16 +31,17 @@ nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
 {
        struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
        struct nouveau_mem *node = mem->mm_node;
-       u64 size = mem->num_pages << 12;
 
        if (ttm->sg) {
-               node->sg = ttm->sg;
-               nouveau_vm_map_sg_table(&node->vma[0], 0, size, node);
+               node->sg    = ttm->sg;
+               node->pages = NULL;
        } else {
+               node->sg    = NULL;
                node->pages = nvbe->ttm.dma_address;
-               nouveau_vm_map_sg(&node->vma[0], 0, size, node);
        }
+       node->size = (mem->num_pages << PAGE_SHIFT) >> 12;
 
+       nouveau_vm_map(&node->vma[0], node);
        nvbe->node = node;
        return 0;
 }
@@ -67,9 +68,13 @@ nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
 
        /* noop: bound in move_notify() */
        if (ttm->sg) {
-               node->sg = ttm->sg;
-       } else
+               node->sg    = ttm->sg;
+               node->pages = NULL;
+       } else {
+               node->sg    = NULL;
                node->pages = nvbe->ttm.dma_address;
+       }
+       node->size = (mem->num_pages << PAGE_SHIFT) >> 12;
        return 0;
 }
 
index 19e3757291fba09434de033622fd4f5f6b35ea8c..d45d50da978f07870fb2bfc703ef509b4a2482f9 100644 (file)
@@ -171,6 +171,7 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
        node = kzalloc(sizeof(*node), GFP_KERNEL);
        if (!node)
                return -ENOMEM;
+
        node->page_shift = 12;
 
        switch (nv_device(drm->device)->card_type) {
index 4e384a2f99c3627ea0fc6e31f5f0e34ac95e71e8..2dccafc6e9db573814e43acff007c36c2c0da97b 100644 (file)
@@ -1035,6 +1035,7 @@ static bool
 nv50_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode,
                     struct drm_display_mode *adjusted_mode)
 {
+       drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
        return true;
 }
 
@@ -2199,16 +2200,6 @@ nv50_display_destroy(struct drm_device *dev)
 int
 nv50_display_create(struct drm_device *dev)
 {
-       static const u16 oclass[] = {
-               NVF0_DISP_CLASS,
-               NVE0_DISP_CLASS,
-               NVD0_DISP_CLASS,
-               NVA3_DISP_CLASS,
-               NV94_DISP_CLASS,
-               NVA0_DISP_CLASS,
-               NV84_DISP_CLASS,
-               NV50_DISP_CLASS,
-       };
        struct nouveau_device *device = nouveau_dev(dev);
        struct nouveau_drm *drm = nouveau_drm(dev);
        struct dcb_table *dcb = &drm->vbios.dcb;
@@ -2225,6 +2216,7 @@ nv50_display_create(struct drm_device *dev)
        nouveau_display(dev)->dtor = nv50_display_destroy;
        nouveau_display(dev)->init = nv50_display_init;
        nouveau_display(dev)->fini = nv50_display_fini;
+       disp->core = nouveau_display(dev)->core;
 
        /* small shared memory area we use for notifiers and semaphores */
        ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
@@ -2240,17 +2232,6 @@ nv50_display_create(struct drm_device *dev)
                        nouveau_bo_ref(NULL, &disp->sync);
        }
 
-       if (ret)
-               goto out;
-
-       /* attempt to allocate a supported evo display class */
-       ret = -ENODEV;
-       for (i = 0; ret && i < ARRAY_SIZE(oclass); i++) {
-               ret = nouveau_object_new(nv_object(drm), NVDRM_DEVICE,
-                                        0xd1500000, oclass[i], NULL, 0,
-                                        &disp->core);
-       }
-
        if (ret)
                goto out;
 
index 0fd2eb139f6e402cd6856d5996ee4faf6affa950..4313bb0a49a62a70039e1bd952a402e2ac54221e 100644 (file)
@@ -411,7 +411,7 @@ static void omap_crtc_error_irq(struct omap_drm_irq *irq, uint32_t irqstatus)
        struct drm_crtc *crtc = &omap_crtc->base;
        DRM_ERROR("%s: errors: %08x\n", omap_crtc->name, irqstatus);
        /* avoid getting in a flood, unregister the irq until next vblank */
-       omap_irq_unregister(crtc->dev, &omap_crtc->error_irq);
+       __omap_irq_unregister(crtc->dev, &omap_crtc->error_irq);
 }
 
 static void omap_crtc_apply_irq(struct omap_drm_irq *irq, uint32_t irqstatus)
@@ -421,13 +421,13 @@ static void omap_crtc_apply_irq(struct omap_drm_irq *irq, uint32_t irqstatus)
        struct drm_crtc *crtc = &omap_crtc->base;
 
        if (!omap_crtc->error_irq.registered)
-               omap_irq_register(crtc->dev, &omap_crtc->error_irq);
+               __omap_irq_register(crtc->dev, &omap_crtc->error_irq);
 
        if (!dispc_mgr_go_busy(omap_crtc->channel)) {
                struct omap_drm_private *priv =
                                crtc->dev->dev_private;
                DBG("%s: apply done", omap_crtc->name);
-               omap_irq_unregister(crtc->dev, &omap_crtc->apply_irq);
+               __omap_irq_unregister(crtc->dev, &omap_crtc->apply_irq);
                queue_work(priv->wq, &omap_crtc->apply_work);
        }
 }
@@ -623,6 +623,11 @@ void omap_crtc_pre_init(void)
        dss_install_mgr_ops(&mgr_ops);
 }
 
+void omap_crtc_pre_uninit(void)
+{
+       dss_uninstall_mgr_ops();
+}
+
 /* initialize crtc */
 struct drm_crtc *omap_crtc_init(struct drm_device *dev,
                struct drm_plane *plane, enum omap_channel channel, int id)
index c27f59da7f2935f0fe3a1908b9a73c9a4c6b7529..d4c04d69fc4df62a62352db72cd035ae6e5e53d1 100644 (file)
@@ -48,7 +48,7 @@ static int mm_show(struct seq_file *m, void *arg)
 {
        struct drm_info_node *node = (struct drm_info_node *) m->private;
        struct drm_device *dev = node->minor->dev;
-       return drm_mm_dump_table(m, dev->mm_private);
+       return drm_mm_dump_table(m, &dev->vma_offset_manager->vm_addr_space_mm);
 }
 
 static int fb_show(struct seq_file *m, void *arg)
index 701c4c10e08b5858a5e083d05329af23c076669e..f926b4caf44989be904451c277049152cfc9b9ad 100644 (file)
@@ -969,12 +969,21 @@ static const struct dev_pm_ops omap_dmm_pm_ops = {
 };
 #endif
 
+#if defined(CONFIG_OF)
+static const struct of_device_id dmm_of_match[] = {
+       { .compatible = "ti,omap4-dmm", },
+       { .compatible = "ti,omap5-dmm", },
+       {},
+};
+#endif
+
 struct platform_driver omap_dmm_driver = {
        .probe = omap_dmm_probe,
        .remove = omap_dmm_remove,
        .driver = {
                .owner = THIS_MODULE,
                .name = DMM_DRIVER_NAME,
+               .of_match_table = of_match_ptr(dmm_of_match),
 #ifdef CONFIG_PM
                .pm = &omap_dmm_pm_ops,
 #endif
index e7fa3cd9674389e1cac2de91816ce6b0013b2a1b..bf39fcc49e0f181ddcbf340632fa4f77c8b1bb10 100644 (file)
@@ -86,6 +86,47 @@ static bool channel_used(struct drm_device *dev, enum omap_channel channel)
 
        return false;
 }
+static void omap_disconnect_dssdevs(void)
+{
+       struct omap_dss_device *dssdev = NULL;
+
+       for_each_dss_dev(dssdev)
+               dssdev->driver->disconnect(dssdev);
+}
+
+static int omap_connect_dssdevs(void)
+{
+       int r;
+       struct omap_dss_device *dssdev = NULL;
+       bool no_displays = true;
+
+       for_each_dss_dev(dssdev) {
+               r = dssdev->driver->connect(dssdev);
+               if (r == -EPROBE_DEFER) {
+                       omap_dss_put_device(dssdev);
+                       goto cleanup;
+               } else if (r) {
+                       dev_warn(dssdev->dev, "could not connect display: %s\n",
+                               dssdev->name);
+               } else {
+                       no_displays = false;
+               }
+       }
+
+       if (no_displays)
+               return -EPROBE_DEFER;
+
+       return 0;
+
+cleanup:
+       /*
+        * if we are deferring probe, we disconnect the devices we previously
+        * connected
+        */
+       omap_disconnect_dssdevs();
+
+       return r;
+}
 
 static int omap_modeset_init(struct drm_device *dev)
 {
@@ -95,9 +136,6 @@ static int omap_modeset_init(struct drm_device *dev)
        int num_mgrs = dss_feat_get_num_mgrs();
        int num_crtcs;
        int i, id = 0;
-       int r;
-
-       omap_crtc_pre_init();
 
        drm_mode_config_init(dev);
 
@@ -119,26 +157,8 @@ static int omap_modeset_init(struct drm_device *dev)
                enum omap_channel channel;
                struct omap_overlay_manager *mgr;
 
-               if (!dssdev->driver) {
-                       dev_warn(dev->dev, "%s has no driver.. skipping it\n",
-                                       dssdev->name);
-                       continue;
-               }
-
-               if (!(dssdev->driver->get_timings ||
-                                       dssdev->driver->read_edid)) {
-                       dev_warn(dev->dev, "%s driver does not support "
-                               "get_timings or read_edid.. skipping it!\n",
-                               dssdev->name);
-                       continue;
-               }
-
-               r = dssdev->driver->connect(dssdev);
-               if (r) {
-                       dev_err(dev->dev, "could not connect display: %s\n",
-                                       dssdev->name);
+               if (!omapdss_device_is_connected(dssdev))
                        continue;
-               }
 
                encoder = omap_encoder_init(dev, dssdev);
 
@@ -497,16 +517,16 @@ static int dev_unload(struct drm_device *dev)
        DBG("unload: dev=%p", dev);
 
        drm_kms_helper_poll_fini(dev);
-       drm_vblank_cleanup(dev);
-       omap_drm_irq_uninstall(dev);
 
        omap_fbdev_free(dev);
        omap_modeset_free(dev);
        omap_gem_deinit(dev);
 
-       flush_workqueue(priv->wq);
        destroy_workqueue(priv->wq);
 
+       drm_vblank_cleanup(dev);
+       omap_drm_irq_uninstall(dev);
+
        kfree(dev->dev_private);
        dev->dev_private = NULL;
 
@@ -655,9 +675,19 @@ static void pdev_shutdown(struct platform_device *device)
 
 static int pdev_probe(struct platform_device *device)
 {
+       int r;
+
        if (omapdss_is_initialized() == false)
                return -EPROBE_DEFER;
 
+       omap_crtc_pre_init();
+
+       r = omap_connect_dssdevs();
+       if (r) {
+               omap_crtc_pre_uninit();
+               return r;
+       }
+
        DBG("%s", device->name);
        return drm_platform_init(&omap_drm_driver, device);
 }
@@ -665,9 +695,11 @@ static int pdev_probe(struct platform_device *device)
 static int pdev_remove(struct platform_device *device)
 {
        DBG("");
-       drm_platform_exit(&omap_drm_driver, device);
 
-       platform_driver_unregister(&omap_dmm_driver);
+       omap_disconnect_dssdevs();
+       omap_crtc_pre_uninit();
+
+       drm_put_dev(platform_get_drvdata(device));
        return 0;
 }
 
index 07847693cf494caababe5780bae717ef393c8af1..428b2981fd685f3e42b3ec4d0a63021a94fdaf15 100644 (file)
@@ -141,10 +141,12 @@ int omap_gem_resume(struct device *dev);
 
 int omap_irq_enable_vblank(struct drm_device *dev, int crtc_id);
 void omap_irq_disable_vblank(struct drm_device *dev, int crtc_id);
-irqreturn_t omap_irq_handler(DRM_IRQ_ARGS);
+irqreturn_t omap_irq_handler(int irq, void *arg);
 void omap_irq_preinstall(struct drm_device *dev);
 int omap_irq_postinstall(struct drm_device *dev);
 void omap_irq_uninstall(struct drm_device *dev);
+void __omap_irq_register(struct drm_device *dev, struct omap_drm_irq *irq);
+void __omap_irq_unregister(struct drm_device *dev, struct omap_drm_irq *irq);
 void omap_irq_register(struct drm_device *dev, struct omap_drm_irq *irq);
 void omap_irq_unregister(struct drm_device *dev, struct omap_drm_irq *irq);
 int omap_drm_irq_uninstall(struct drm_device *dev);
@@ -158,6 +160,7 @@ enum omap_channel omap_crtc_channel(struct drm_crtc *crtc);
 int omap_crtc_apply(struct drm_crtc *crtc,
                struct omap_drm_apply *apply);
 void omap_crtc_pre_init(void);
+void omap_crtc_pre_uninit(void);
 struct drm_crtc *omap_crtc_init(struct drm_device *dev,
                struct drm_plane *plane, enum omap_channel channel, int id);
 
index 6a12e899235bfe480dda3fd1bf80876bc6c805aa..5290a88c681db3e6cd2d4a9e4fa32c0e50230153 100644 (file)
@@ -51,6 +51,9 @@ struct omap_dss_device *omap_encoder_get_dssdev(struct drm_encoder *encoder)
 static void omap_encoder_destroy(struct drm_encoder *encoder)
 {
        struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
+
+       omap_encoder_set_enabled(encoder, false);
+
        drm_encoder_cleanup(encoder);
        kfree(omap_encoder);
 }
index f2b8f0668c0c1701887e8b6c4a8ccb3b3bf1952b..f466c4aaee9464c7ca4c16d684a4e7d4bfec5584 100644 (file)
@@ -123,12 +123,16 @@ static int omap_framebuffer_dirty(struct drm_framebuffer *fb,
 {
        int i;
 
+       drm_modeset_lock_all(fb->dev);
+
        for (i = 0; i < num_clips; i++) {
                omap_framebuffer_flush(fb, clips[i].x1, clips[i].y1,
                                        clips[i].x2 - clips[i].x1,
                                        clips[i].y2 - clips[i].y1);
        }
 
+       drm_modeset_unlock_all(fb->dev);
+
        return 0;
 }
 
index cb858600185f8051c7f92997e5e7e8ad7d8a6173..f035d2bceae7db358427c689b3a1fdd6cde28334 100644 (file)
@@ -45,12 +45,11 @@ static void omap_irq_update(struct drm_device *dev)
        dispc_read_irqenable();        /* flush posted write */
 }
 
-void omap_irq_register(struct drm_device *dev, struct omap_drm_irq *irq)
+void __omap_irq_register(struct drm_device *dev, struct omap_drm_irq *irq)
 {
        struct omap_drm_private *priv = dev->dev_private;
        unsigned long flags;
 
-       dispc_runtime_get();
        spin_lock_irqsave(&list_lock, flags);
 
        if (!WARN_ON(irq->registered)) {
@@ -60,14 +59,21 @@ void omap_irq_register(struct drm_device *dev, struct omap_drm_irq *irq)
        }
 
        spin_unlock_irqrestore(&list_lock, flags);
+}
+
+void omap_irq_register(struct drm_device *dev, struct omap_drm_irq *irq)
+{
+       dispc_runtime_get();
+
+       __omap_irq_register(dev, irq);
+
        dispc_runtime_put();
 }
 
-void omap_irq_unregister(struct drm_device *dev, struct omap_drm_irq *irq)
+void __omap_irq_unregister(struct drm_device *dev, struct omap_drm_irq *irq)
 {
        unsigned long flags;
 
-       dispc_runtime_get();
        spin_lock_irqsave(&list_lock, flags);
 
        if (!WARN_ON(!irq->registered)) {
@@ -77,6 +83,14 @@ void omap_irq_unregister(struct drm_device *dev, struct omap_drm_irq *irq)
        }
 
        spin_unlock_irqrestore(&list_lock, flags);
+}
+
+void omap_irq_unregister(struct drm_device *dev, struct omap_drm_irq *irq)
+{
+       dispc_runtime_get();
+
+       __omap_irq_unregister(dev, irq);
+
        dispc_runtime_put();
 }
 
@@ -173,7 +187,7 @@ void omap_irq_disable_vblank(struct drm_device *dev, int crtc_id)
        dispc_runtime_put();
 }
 
-irqreturn_t omap_irq_handler(DRM_IRQ_ARGS)
+irqreturn_t omap_irq_handler(int irq, void *arg)
 {
        struct drm_device *dev = (struct drm_device *) arg;
        struct omap_drm_private *priv = dev->dev_private;
@@ -308,7 +322,7 @@ int omap_drm_irq_uninstall(struct drm_device *dev)
        if (dev->num_crtcs) {
                spin_lock_irqsave(&dev->vbl_lock, irqflags);
                for (i = 0; i < dev->num_crtcs; i++) {
-                       DRM_WAKEUP(&dev->vblank[i].queue);
+                       wake_up(&dev->vblank[i].queue);
                        dev->vblank[i].enabled = false;
                        dev->vblank[i].last =
                                dev->driver->get_vblank_counter(dev, i);
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
new file mode 100644 (file)
index 0000000..3e0f13d
--- /dev/null
@@ -0,0 +1,19 @@
+config DRM_PANEL
+       bool
+       depends on DRM
+       help
+         Panel registration and lookup framework.
+
+menu "Display Panels"
+       depends on DRM_PANEL
+
+config DRM_PANEL_SIMPLE
+       tristate "support for simple panels"
+       depends on OF
+       help
+         DRM panel driver for dumb panels that need at most a regulator and
+         a GPIO to be powered up. Optionally a backlight can be attached so
+         that it can be automatically turned off when the panel goes into a
+         low power state.
+
+endmenu
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
new file mode 100644 (file)
index 0000000..af9dfa2
--- /dev/null
@@ -0,0 +1 @@
+obj-$(CONFIG_DRM_PANEL_SIMPLE) += panel-simple.o
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
new file mode 100644 (file)
index 0000000..59d52ca
--- /dev/null
@@ -0,0 +1,548 @@
+/*
+ * Copyright (C) 2013, NVIDIA Corporation.  All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/backlight.h>
+#include <linux/gpio.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_panel.h>
+
+struct panel_desc {
+       const struct drm_display_mode *modes;
+       unsigned int num_modes;
+
+       struct {
+               unsigned int width;
+               unsigned int height;
+       } size;
+};
+
+/* TODO: convert to gpiod_*() API once it's been merged */
+#define GPIO_ACTIVE_LOW        (1 << 0)
+
+struct panel_simple {
+       struct drm_panel base;
+       bool enabled;
+
+       const struct panel_desc *desc;
+
+       struct backlight_device *backlight;
+       struct regulator *supply;
+       struct i2c_adapter *ddc;
+
+       unsigned long enable_gpio_flags;
+       int enable_gpio;
+};
+
+static inline struct panel_simple *to_panel_simple(struct drm_panel *panel)
+{
+       return container_of(panel, struct panel_simple, base);
+}
+
+static int panel_simple_get_fixed_modes(struct panel_simple *panel)
+{
+       struct drm_connector *connector = panel->base.connector;
+       struct drm_device *drm = panel->base.drm;
+       struct drm_display_mode *mode;
+       unsigned int i, num = 0;
+
+       if (!panel->desc)
+               return 0;
+
+       for (i = 0; i < panel->desc->num_modes; i++) {
+               const struct drm_display_mode *m = &panel->desc->modes[i];
+
+               mode = drm_mode_duplicate(drm, m);
+               if (!mode) {
+                       dev_err(drm->dev, "failed to add mode %ux%u@%u\n",
+                               m->hdisplay, m->vdisplay, m->vrefresh);
+                       continue;
+               }
+
+               drm_mode_set_name(mode);
+
+               drm_mode_probed_add(connector, mode);
+               num++;
+       }
+
+       connector->display_info.width_mm = panel->desc->size.width;
+       connector->display_info.height_mm = panel->desc->size.height;
+
+       return num;
+}
+
+static int panel_simple_disable(struct drm_panel *panel)
+{
+       struct panel_simple *p = to_panel_simple(panel);
+
+       if (!p->enabled)
+               return 0;
+
+       if (p->backlight) {
+               p->backlight->props.power = FB_BLANK_POWERDOWN;
+               backlight_update_status(p->backlight);
+       }
+
+       if (gpio_is_valid(p->enable_gpio)) {
+               if (p->enable_gpio_flags & GPIO_ACTIVE_LOW)
+                       gpio_set_value(p->enable_gpio, 1);
+               else
+                       gpio_set_value(p->enable_gpio, 0);
+       }
+
+       regulator_disable(p->supply);
+       p->enabled = false;
+
+       return 0;
+}
+
+static int panel_simple_enable(struct drm_panel *panel)
+{
+       struct panel_simple *p = to_panel_simple(panel);
+       int err;
+
+       if (p->enabled)
+               return 0;
+
+       err = regulator_enable(p->supply);
+       if (err < 0) {
+               dev_err(panel->dev, "failed to enable supply: %d\n", err);
+               return err;
+       }
+
+       if (gpio_is_valid(p->enable_gpio)) {
+               if (p->enable_gpio_flags & GPIO_ACTIVE_LOW)
+                       gpio_set_value(p->enable_gpio, 0);
+               else
+                       gpio_set_value(p->enable_gpio, 1);
+       }
+
+       if (p->backlight) {
+               p->backlight->props.power = FB_BLANK_UNBLANK;
+               backlight_update_status(p->backlight);
+       }
+
+       p->enabled = true;
+
+       return 0;
+}
+
+static int panel_simple_get_modes(struct drm_panel *panel)
+{
+       struct panel_simple *p = to_panel_simple(panel);
+       int num = 0;
+
+       /* probe EDID if a DDC bus is available */
+       if (p->ddc) {
+               struct edid *edid = drm_get_edid(panel->connector, p->ddc);
+               drm_mode_connector_update_edid_property(panel->connector, edid);
+               if (edid) {
+                       num += drm_add_edid_modes(panel->connector, edid);
+                       kfree(edid);
+               }
+       }
+
+       /* add hard-coded panel modes */
+       num += panel_simple_get_fixed_modes(p);
+
+       return num;
+}
+
+static const struct drm_panel_funcs panel_simple_funcs = {
+       .disable = panel_simple_disable,
+       .enable = panel_simple_enable,
+       .get_modes = panel_simple_get_modes,
+};
+
+static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
+{
+       struct device_node *backlight, *ddc;
+       struct panel_simple *panel;
+       enum of_gpio_flags flags;
+       int err;
+
+       panel = devm_kzalloc(dev, sizeof(*panel), GFP_KERNEL);
+       if (!panel)
+               return -ENOMEM;
+
+       panel->enabled = false;
+       panel->desc = desc;
+
+       panel->supply = devm_regulator_get(dev, "power");
+       if (IS_ERR(panel->supply))
+               return PTR_ERR(panel->supply);
+
+       panel->enable_gpio = of_get_named_gpio_flags(dev->of_node,
+                                                    "enable-gpios", 0,
+                                                    &flags);
+       if (gpio_is_valid(panel->enable_gpio)) {
+               unsigned int value;
+
+               if (flags & OF_GPIO_ACTIVE_LOW)
+                       panel->enable_gpio_flags |= GPIO_ACTIVE_LOW;
+
+               err = gpio_request(panel->enable_gpio, "enable");
+               if (err < 0) {
+                       dev_err(dev, "failed to request GPIO#%u: %d\n",
+                               panel->enable_gpio, err);
+                       return err;
+               }
+
+               value = (panel->enable_gpio_flags & GPIO_ACTIVE_LOW) != 0;
+
+               err = gpio_direction_output(panel->enable_gpio, value);
+               if (err < 0) {
+                       dev_err(dev, "failed to setup GPIO%u: %d\n",
+                               panel->enable_gpio, err);
+                       goto free_gpio;
+               }
+       }
+
+       backlight = of_parse_phandle(dev->of_node, "backlight", 0);
+       if (backlight) {
+               panel->backlight = of_find_backlight_by_node(backlight);
+               of_node_put(backlight);
+
+               if (!panel->backlight) {
+                       err = -EPROBE_DEFER;
+                       goto free_gpio;
+               }
+       }
+
+       ddc = of_parse_phandle(dev->of_node, "ddc-i2c-bus", 0);
+       if (ddc) {
+               panel->ddc = of_find_i2c_adapter_by_node(ddc);
+               of_node_put(ddc);
+
+               if (!panel->ddc) {
+                       err = -EPROBE_DEFER;
+                       goto free_backlight;
+               }
+       }
+
+       drm_panel_init(&panel->base);
+       panel->base.dev = dev;
+       panel->base.funcs = &panel_simple_funcs;
+
+       err = drm_panel_add(&panel->base);
+       if (err < 0)
+               goto free_ddc;
+
+       dev_set_drvdata(dev, panel);
+
+       return 0;
+
+free_ddc:
+       if (panel->ddc)
+               put_device(&panel->ddc->dev);
+free_backlight:
+       if (panel->backlight)
+               put_device(&panel->backlight->dev);
+free_gpio:
+       if (gpio_is_valid(panel->enable_gpio))
+               gpio_free(panel->enable_gpio);
+
+       return err;
+}
+
+static int panel_simple_remove(struct device *dev)
+{
+       struct panel_simple *panel = dev_get_drvdata(dev);
+
+       drm_panel_detach(&panel->base);
+       drm_panel_remove(&panel->base);
+
+       panel_simple_disable(&panel->base);
+
+       if (panel->ddc)
+               put_device(&panel->ddc->dev);
+
+       if (panel->backlight)
+               put_device(&panel->backlight->dev);
+
+       if (gpio_is_valid(panel->enable_gpio))
+               gpio_free(panel->enable_gpio);
+
+       regulator_disable(panel->supply);
+
+       return 0;
+}
+
+static const struct drm_display_mode auo_b101aw03_mode = {
+       .clock = 51450,
+       .hdisplay = 1024,
+       .hsync_start = 1024 + 156,
+       .hsync_end = 1024 + 156 + 8,
+       .htotal = 1024 + 156 + 8 + 156,
+       .vdisplay = 600,
+       .vsync_start = 600 + 16,
+       .vsync_end = 600 + 16 + 6,
+       .vtotal = 600 + 16 + 6 + 16,
+       .vrefresh = 60,
+};
+
+static const struct panel_desc auo_b101aw03 = {
+       .modes = &auo_b101aw03_mode,
+       .num_modes = 1,
+       .size = {
+               .width = 223,
+               .height = 125,
+       },
+};
+
+static const struct drm_display_mode chunghwa_claa101wa01a_mode = {
+       .clock = 72070,
+       .hdisplay = 1366,
+       .hsync_start = 1366 + 58,
+       .hsync_end = 1366 + 58 + 58,
+       .htotal = 1366 + 58 + 58 + 58,
+       .vdisplay = 768,
+       .vsync_start = 768 + 4,
+       .vsync_end = 768 + 4 + 4,
+       .vtotal = 768 + 4 + 4 + 4,
+       .vrefresh = 60,
+};
+
+static const struct panel_desc chunghwa_claa101wa01a = {
+       .modes = &chunghwa_claa101wa01a_mode,
+       .num_modes = 1,
+       .size = {
+               .width = 220,
+               .height = 120,
+       },
+};
+
+static const struct drm_display_mode chunghwa_claa101wb01_mode = {
+       .clock = 69300,
+       .hdisplay = 1366,
+       .hsync_start = 1366 + 48,
+       .hsync_end = 1366 + 48 + 32,
+       .htotal = 1366 + 48 + 32 + 20,
+       .vdisplay = 768,
+       .vsync_start = 768 + 16,
+       .vsync_end = 768 + 16 + 8,
+       .vtotal = 768 + 16 + 8 + 16,
+       .vrefresh = 60,
+};
+
+static const struct panel_desc chunghwa_claa101wb01 = {
+       .modes = &chunghwa_claa101wb01_mode,
+       .num_modes = 1,
+       .size = {
+               .width = 223,
+               .height = 125,
+       },
+};
+
+static const struct drm_display_mode samsung_ltn101nt05_mode = {
+       .clock = 54030,
+       .hdisplay = 1024,
+       .hsync_start = 1024 + 24,
+       .hsync_end = 1024 + 24 + 136,
+       .htotal = 1024 + 24 + 136 + 160,
+       .vdisplay = 600,
+       .vsync_start = 600 + 3,
+       .vsync_end = 600 + 3 + 6,
+       .vtotal = 600 + 3 + 6 + 61,
+       .vrefresh = 60,
+};
+
+static const struct panel_desc samsung_ltn101nt05 = {
+       .modes = &samsung_ltn101nt05_mode,
+       .num_modes = 1,
+       .size = {
+               .width = 1024,
+               .height = 600,
+       },
+};
+
+static const struct of_device_id platform_of_match[] = {
+       {
+               .compatible = "auo,b101aw03",
+               .data = &auo_b101aw03,
+       }, {
+               .compatible = "chunghwa,claa101wa01a",
+               .data = &chunghwa_claa101wa01a
+       }, {
+               .compatible = "chunghwa,claa101wb01",
+               .data = &chunghwa_claa101wb01
+       }, {
+               .compatible = "samsung,ltn101nt05",
+               .data = &samsung_ltn101nt05,
+       }, {
+               .compatible = "simple-panel",
+       }, {
+               /* sentinel */
+       }
+};
+MODULE_DEVICE_TABLE(of, platform_of_match);
+
+static int panel_simple_platform_probe(struct platform_device *pdev)
+{
+       const struct of_device_id *id;
+
+       id = of_match_node(platform_of_match, pdev->dev.of_node);
+       if (!id)
+               return -ENODEV;
+
+       return panel_simple_probe(&pdev->dev, id->data);
+}
+
+static int panel_simple_platform_remove(struct platform_device *pdev)
+{
+       return panel_simple_remove(&pdev->dev);
+}
+
+static struct platform_driver panel_simple_platform_driver = {
+       .driver = {
+               .name = "panel-simple",
+               .owner = THIS_MODULE,
+               .of_match_table = platform_of_match,
+       },
+       .probe = panel_simple_platform_probe,
+       .remove = panel_simple_platform_remove,
+};
+
+struct panel_desc_dsi {
+       struct panel_desc desc;
+
+       enum mipi_dsi_pixel_format format;
+       unsigned int lanes;
+};
+
+static const struct drm_display_mode panasonic_vvx10f004b00_mode = {
+       .clock = 157200,
+       .hdisplay = 1920,
+       .hsync_start = 1920 + 154,
+       .hsync_end = 1920 + 154 + 16,
+       .htotal = 1920 + 154 + 16 + 32,
+       .vdisplay = 1200,
+       .vsync_start = 1200 + 17,
+       .vsync_end = 1200 + 17 + 2,
+       .vtotal = 1200 + 17 + 2 + 16,
+       .vrefresh = 60,
+};
+
+static const struct panel_desc_dsi panasonic_vvx10f004b00 = {
+       .desc = {
+               .modes = &panasonic_vvx10f004b00_mode,
+               .num_modes = 1,
+               .size = {
+                       .width = 217,
+                       .height = 136,
+               },
+       },
+       .format = MIPI_DSI_FMT_RGB888,
+       .lanes = 4,
+};
+
+static const struct of_device_id dsi_of_match[] = {
+       {
+               .compatible = "panasonic,vvx10f004b00",
+               .data = &panasonic_vvx10f004b00
+       }, {
+               /* sentinel */
+       }
+};
+MODULE_DEVICE_TABLE(of, dsi_of_match);
+
+static int panel_simple_dsi_probe(struct mipi_dsi_device *dsi)
+{
+       const struct panel_desc_dsi *desc;
+       const struct of_device_id *id;
+       int err;
+
+       id = of_match_node(dsi_of_match, dsi->dev.of_node);
+       if (!id)
+               return -ENODEV;
+
+       desc = id->data;
+
+       err = panel_simple_probe(&dsi->dev, &desc->desc);
+       if (err < 0)
+               return err;
+
+       dsi->format = desc->format;
+       dsi->lanes = desc->lanes;
+
+       return mipi_dsi_attach(dsi);
+}
+
+static int panel_simple_dsi_remove(struct mipi_dsi_device *dsi)
+{
+       int err;
+
+       err = mipi_dsi_detach(dsi);
+       if (err < 0)
+               dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", err);
+
+       return panel_simple_remove(&dsi->dev);
+}
+
+static struct mipi_dsi_driver panel_simple_dsi_driver = {
+       .driver = {
+               .name = "panel-simple-dsi",
+               .owner = THIS_MODULE,
+               .of_match_table = dsi_of_match,
+       },
+       .probe = panel_simple_dsi_probe,
+       .remove = panel_simple_dsi_remove,
+};
+
+static int __init panel_simple_init(void)
+{
+       int err;
+
+       err = platform_driver_register(&panel_simple_platform_driver);
+       if (err < 0)
+               return err;
+
+       if (IS_ENABLED(CONFIG_DRM_MIPI_DSI)) {
+               err = mipi_dsi_driver_register(&panel_simple_dsi_driver);
+               if (err < 0)
+                       return err;
+       }
+
+       return 0;
+}
+module_init(panel_simple_init);
+
+static void __exit panel_simple_exit(void)
+{
+       if (IS_ENABLED(CONFIG_DRM_MIPI_DSI))
+               mipi_dsi_driver_unregister(&panel_simple_dsi_driver);
+
+       platform_driver_unregister(&panel_simple_platform_driver);
+}
+module_exit(panel_simple_exit);
+
+MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
+MODULE_DESCRIPTION("DRM Driver for Simple Panels");
+MODULE_LICENSE("GPL and additional rights");
index d70aafb83307d2a085afdbba6a2d4762309667ea..798bde2e5881db484fd3797013aa11a04f11c902 100644 (file)
@@ -399,10 +399,14 @@ static int qxl_framebuffer_surface_dirty(struct drm_framebuffer *fb,
        struct qxl_bo *qobj;
        int inc = 1;
 
+       drm_modeset_lock_all(fb->dev);
+
        qobj = gem_to_qxl_bo(qxl_fb->obj);
        /* if we aren't primary surface ignore this */
-       if (!qobj->is_primary)
+       if (!qobj->is_primary) {
+               drm_modeset_unlock_all(fb->dev);
                return 0;
+       }
 
        if (!num_clips) {
                num_clips = 1;
@@ -417,6 +421,9 @@ static int qxl_framebuffer_surface_dirty(struct drm_framebuffer *fb,
 
        qxl_draw_dirty_fb(qdev, qxl_fb, qobj, flags, color,
                          clips, num_clips, inc);
+
+       drm_modeset_unlock_all(fb->dev);
+
        return 0;
 }
 
index 7bda32f68d3b50e528c21c3bba29a3b2018d1df1..36ed40ba773f8149d88eacef4c0633614e09eb83 100644 (file)
@@ -534,7 +534,7 @@ void qxl_debugfs_takedown(struct drm_minor *minor);
 
 /* qxl_irq.c */
 int qxl_irq_init(struct qxl_device *qdev);
-irqreturn_t qxl_irq_handler(DRM_IRQ_ARGS);
+irqreturn_t qxl_irq_handler(int irq, void *arg);
 
 /* qxl_fb.c */
 int qxl_fb_init(struct qxl_device *qdev);
index 7b95c75e9626ae1324dea48b310d3052f0c00d2d..0bb86e6d41b44cc4c1641e72da6e67e754994798 100644 (file)
@@ -200,7 +200,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
        for (i = 0; i < cmd->relocs_num; ++i) {
                struct drm_qxl_reloc reloc;
 
-               if (DRM_COPY_FROM_USER(&reloc,
+               if (copy_from_user(&reloc,
                                       &((struct drm_qxl_reloc *)(uintptr_t)cmd->relocs)[i],
                                       sizeof(reloc))) {
                        ret = -EFAULT;
@@ -297,7 +297,7 @@ static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
                struct drm_qxl_command *commands =
                        (struct drm_qxl_command *)(uintptr_t)execbuffer->commands;
 
-               if (DRM_COPY_FROM_USER(&user_cmd, &commands[cmd_num],
+               if (copy_from_user(&user_cmd, &commands[cmd_num],
                                       sizeof(user_cmd)))
                        return -EFAULT;
 
index 21393dc4700a09697b7551d274b18a96ff5938ab..28f84b4fce32fab576d4bd71d324bceaf93c0682 100644 (file)
@@ -25,7 +25,7 @@
 
 #include "qxl_drv.h"
 
-irqreturn_t qxl_irq_handler(DRM_IRQ_ARGS)
+irqreturn_t qxl_irq_handler(int irq, void *arg)
 {
        struct drm_device *dev = (struct drm_device *) arg;
        struct qxl_device *qdev = (struct qxl_device *)dev->dev_private;
index e5ca498be920a86be9507c94858637aeb0b3c4c6..fd88eb4a3f79d1be5b5478ef04a3c77dde2dbb71 100644 (file)
@@ -115,7 +115,7 @@ static void qxl_gc_work(struct work_struct *work)
        qxl_garbage_collect(qdev);
 }
 
-int qxl_device_init(struct qxl_device *qdev,
+static int qxl_device_init(struct qxl_device *qdev,
                    struct drm_device *ddev,
                    struct pci_dev *pdev,
                    unsigned long flags)
index c451257f08fb51ea4b2c5346339237032705f955..59459fe4e8c57b9c367887daa945324ac7f4eb34 100644 (file)
@@ -892,10 +892,10 @@ static int r128_cce_get_buffers(struct drm_device *dev,
 
                buf->file_priv = file_priv;
 
-               if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx,
+               if (copy_to_user(&d->request_indices[i], &buf->idx,
                                     sizeof(buf->idx)))
                        return -EFAULT;
-               if (DRM_COPY_TO_USER(&d->request_sizes[i], &buf->total,
+               if (copy_to_user(&d->request_sizes[i], &buf->total,
                                     sizeof(buf->total)))
                        return -EFAULT;
 
index 56eb5e3f54399ae9197e62cd5cfadcf094b7fb1b..5bf3f5ff805d941b74e05ab26f5119b8f154c057 100644 (file)
@@ -154,7 +154,7 @@ extern int r128_do_cleanup_cce(struct drm_device *dev);
 extern int r128_enable_vblank(struct drm_device *dev, int crtc);
 extern void r128_disable_vblank(struct drm_device *dev, int crtc);
 extern u32 r128_get_vblank_counter(struct drm_device *dev, int crtc);
-extern irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS);
+extern irqreturn_t r128_driver_irq_handler(int irq, void *arg);
 extern void r128_driver_irq_preinstall(struct drm_device *dev);
 extern int r128_driver_irq_postinstall(struct drm_device *dev);
 extern void r128_driver_irq_uninstall(struct drm_device *dev);
@@ -514,7 +514,7 @@ do {                                                                        \
        if (R128_VERBOSE)                                               \
                DRM_INFO("COMMIT_RING() tail=0x%06x\n",                 \
                         dev_priv->ring.tail);                          \
-       DRM_MEMORYBARRIER();                                            \
+       mb();                                           \
        R128_WRITE(R128_PM4_BUFFER_DL_WPTR, dev_priv->ring.tail);       \
        R128_READ(R128_PM4_BUFFER_DL_WPTR);                             \
 } while (0)
index a954c548201ece8f733a933885c3830e32de1788..b0d0fd3e437676cb306a55df7fa82cba49bcdda1 100644 (file)
@@ -33,6 +33,7 @@
 
 #include <drm/drmP.h>
 #include <drm/r128_drm.h>
+#include "r128_drv.h"
 
 typedef struct drm_r128_init32 {
        int func;
index 2ea4f09d2691d7f69f0a084677c9821f1e0afd69..c2ae496babb7374da8381c688f15883ebc533bc0 100644 (file)
@@ -44,7 +44,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
        return atomic_read(&dev_priv->vbl_received);
 }
 
-irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
+irqreturn_t r128_driver_irq_handler(int irq, void *arg)
 {
        struct drm_device *dev = (struct drm_device *) arg;
        drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private;
index 01dd9aef9f0e9046116d570d52de96b3bf4a121b..e806dacd452f7b9cc8c93d20308db93ea0ef0da6 100644 (file)
@@ -895,31 +895,22 @@ static int r128_cce_dispatch_write_span(struct drm_device *dev,
        if (count > 4096 || count <= 0)
                return -EMSGSIZE;
 
-       if (DRM_COPY_FROM_USER(&x, depth->x, sizeof(x)))
+       if (copy_from_user(&x, depth->x, sizeof(x)))
                return -EFAULT;
-       if (DRM_COPY_FROM_USER(&y, depth->y, sizeof(y)))
+       if (copy_from_user(&y, depth->y, sizeof(y)))
                return -EFAULT;
 
        buffer_size = depth->n * sizeof(u32);
-       buffer = kmalloc(buffer_size, GFP_KERNEL);
-       if (buffer == NULL)
-               return -ENOMEM;
-       if (DRM_COPY_FROM_USER(buffer, depth->buffer, buffer_size)) {
-               kfree(buffer);
-               return -EFAULT;
-       }
+       buffer = memdup_user(depth->buffer, buffer_size);
+       if (IS_ERR(buffer))
+               return PTR_ERR(buffer);
 
        mask_size = depth->n * sizeof(u8);
        if (depth->mask) {
-               mask = kmalloc(mask_size, GFP_KERNEL);
-               if (mask == NULL) {
+               mask = memdup_user(depth->mask, mask_size);
+               if (IS_ERR(mask)) {
                        kfree(buffer);
-                       return -ENOMEM;
-               }
-               if (DRM_COPY_FROM_USER(mask, depth->mask, mask_size)) {
-                       kfree(buffer);
-                       kfree(mask);
-                       return -EFAULT;
+                       return PTR_ERR(mask);
                }
 
                for (i = 0; i < count; i++, x++) {
@@ -999,46 +990,33 @@ static int r128_cce_dispatch_write_pixels(struct drm_device *dev,
                kfree(x);
                return -ENOMEM;
        }
-       if (DRM_COPY_FROM_USER(x, depth->x, xbuf_size)) {
+       if (copy_from_user(x, depth->x, xbuf_size)) {
                kfree(x);
                kfree(y);
                return -EFAULT;
        }
-       if (DRM_COPY_FROM_USER(y, depth->y, xbuf_size)) {
+       if (copy_from_user(y, depth->y, xbuf_size)) {
                kfree(x);
                kfree(y);
                return -EFAULT;
        }
 
        buffer_size = depth->n * sizeof(u32);
-       buffer = kmalloc(buffer_size, GFP_KERNEL);
-       if (buffer == NULL) {
-               kfree(x);
-               kfree(y);
-               return -ENOMEM;
-       }
-       if (DRM_COPY_FROM_USER(buffer, depth->buffer, buffer_size)) {
+       buffer = memdup_user(depth->buffer, buffer_size);
+       if (IS_ERR(buffer)) {
                kfree(x);
                kfree(y);
-               kfree(buffer);
-               return -EFAULT;
+               return PTR_ERR(buffer);
        }
 
        if (depth->mask) {
                mask_size = depth->n * sizeof(u8);
-               mask = kmalloc(mask_size, GFP_KERNEL);
-               if (mask == NULL) {
-                       kfree(x);
-                       kfree(y);
-                       kfree(buffer);
-                       return -ENOMEM;
-               }
-               if (DRM_COPY_FROM_USER(mask, depth->mask, mask_size)) {
+               mask = memdup_user(depth->mask, mask_size);
+               if (IS_ERR(mask)) {
                        kfree(x);
                        kfree(y);
                        kfree(buffer);
-                       kfree(mask);
-                       return -EFAULT;
+                       return PTR_ERR(mask);
                }
 
                for (i = 0; i < count; i++) {
@@ -1107,9 +1085,9 @@ static int r128_cce_dispatch_read_span(struct drm_device *dev,
        if (count > 4096 || count <= 0)
                return -EMSGSIZE;
 
-       if (DRM_COPY_FROM_USER(&x, depth->x, sizeof(x)))
+       if (copy_from_user(&x, depth->x, sizeof(x)))
                return -EFAULT;
-       if (DRM_COPY_FROM_USER(&y, depth->y, sizeof(y)))
+       if (copy_from_user(&y, depth->y, sizeof(y)))
                return -EFAULT;
 
        BEGIN_RING(7);
@@ -1162,12 +1140,12 @@ static int r128_cce_dispatch_read_pixels(struct drm_device *dev,
                kfree(x);
                return -ENOMEM;
        }
-       if (DRM_COPY_FROM_USER(x, depth->x, xbuf_size)) {
+       if (copy_from_user(x, depth->x, xbuf_size)) {
                kfree(x);
                kfree(y);
                return -EFAULT;
        }
-       if (DRM_COPY_FROM_USER(y, depth->y, ybuf_size)) {
+       if (copy_from_user(y, depth->y, ybuf_size)) {
                kfree(x);
                kfree(y);
                return -EFAULT;
@@ -1524,7 +1502,7 @@ static int r128_cce_stipple(struct drm_device *dev, void *data, struct drm_file
 
        DEV_INIT_TEST_WITH_RETURN(dev_priv);
 
-       if (DRM_COPY_FROM_USER(&mask, stipple->mask, 32 * sizeof(u32)))
+       if (copy_from_user(&mask, stipple->mask, 32 * sizeof(u32)))
                return -EFAULT;
 
        RING_SPACE_TEST_WITH_RETURN(dev_priv);
@@ -1622,7 +1600,7 @@ static int r128_getparam(struct drm_device *dev, void *data, struct drm_file *fi
                return -EINVAL;
        }
 
-       if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
+       if (copy_to_user(param->value, &value, sizeof(int))) {
                DRM_ERROR("copy_to_user\n");
                return -EFAULT;
        }
index 0b9621c9aeea3b25da6bad8b121bfc3c3236ddcc..a9338c85630fe0548336e8c8c492361ce797dd5a 100644 (file)
@@ -209,6 +209,16 @@ static void atombios_enable_crtc_memreq(struct drm_crtc *crtc, int state)
        atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
 }
 
+static const u32 vga_control_regs[6] =
+{
+       AVIVO_D1VGA_CONTROL,
+       AVIVO_D2VGA_CONTROL,
+       EVERGREEN_D3VGA_CONTROL,
+       EVERGREEN_D4VGA_CONTROL,
+       EVERGREEN_D5VGA_CONTROL,
+       EVERGREEN_D6VGA_CONTROL,
+};
+
 static void atombios_blank_crtc(struct drm_crtc *crtc, int state)
 {
        struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
@@ -216,13 +226,23 @@ static void atombios_blank_crtc(struct drm_crtc *crtc, int state)
        struct radeon_device *rdev = dev->dev_private;
        int index = GetIndexIntoMasterTable(COMMAND, BlankCRTC);
        BLANK_CRTC_PS_ALLOCATION args;
+       u32 vga_control = 0;
 
        memset(&args, 0, sizeof(args));
 
+       if (ASIC_IS_DCE8(rdev)) {
+               vga_control = RREG32(vga_control_regs[radeon_crtc->crtc_id]);
+               WREG32(vga_control_regs[radeon_crtc->crtc_id], vga_control | 1);
+       }
+
        args.ucCRTC = radeon_crtc->crtc_id;
        args.ucBlanking = state;
 
        atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+       if (ASIC_IS_DCE8(rdev)) {
+               WREG32(vga_control_regs[radeon_crtc->crtc_id], vga_control);
+       }
 }
 
 static void atombios_powergate_crtc(struct drm_crtc *crtc, int state)
@@ -423,7 +443,17 @@ static void atombios_crtc_program_ss(struct radeon_device *rdev,
        int index = GetIndexIntoMasterTable(COMMAND, EnableSpreadSpectrumOnPPLL);
        union atom_enable_ss args;
 
-       if (!enable) {
+       if (enable) {
+               /* Don't mess with SS if percentage is 0 or external ss.
+                * SS is already disabled previously, and disabling it
+                * again can cause display problems if the pll is already
+                * programmed.
+                */
+               if (ss->percentage == 0)
+                       return;
+               if (ss->type & ATOM_EXTERNAL_SS_MASK)
+                       return;
+       } else {
                for (i = 0; i < rdev->num_crtc; i++) {
                        if (rdev->mode_info.crtcs[i] &&
                            rdev->mode_info.crtcs[i]->enabled &&
@@ -459,8 +489,6 @@ static void atombios_crtc_program_ss(struct radeon_device *rdev,
                args.v3.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
                args.v3.usSpreadSpectrumStep = cpu_to_le16(ss->step);
                args.v3.ucEnable = enable;
-               if ((ss->percentage == 0) || (ss->type & ATOM_EXTERNAL_SS_MASK) || ASIC_IS_DCE61(rdev))
-                       args.v3.ucEnable = ATOM_DISABLE;
        } else if (ASIC_IS_DCE4(rdev)) {
                args.v2.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
                args.v2.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK;
@@ -480,8 +508,6 @@ static void atombios_crtc_program_ss(struct radeon_device *rdev,
                args.v2.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
                args.v2.usSpreadSpectrumStep = cpu_to_le16(ss->step);
                args.v2.ucEnable = enable;
-               if ((ss->percentage == 0) || (ss->type & ATOM_EXTERNAL_SS_MASK) || ASIC_IS_DCE41(rdev))
-                       args.v2.ucEnable = ATOM_DISABLE;
        } else if (ASIC_IS_DCE3(rdev)) {
                args.v1.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
                args.v1.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK;
@@ -503,8 +529,7 @@ static void atombios_crtc_program_ss(struct radeon_device *rdev,
                args.lvds_ss_2.ucSpreadSpectrumRange = ss->range;
                args.lvds_ss_2.ucEnable = enable;
        } else {
-               if ((enable == ATOM_DISABLE) || (ss->percentage == 0) ||
-                   (ss->type & ATOM_EXTERNAL_SS_MASK)) {
+               if (enable == ATOM_DISABLE) {
                        atombios_disable_ss(rdev, pll_id);
                        return;
                }
@@ -938,11 +963,14 @@ static bool atombios_crtc_prepare_pll(struct drm_crtc *crtc, struct drm_display_
                                                        radeon_atombios_get_ppll_ss_info(rdev,
                                                                                         &radeon_crtc->ss,
                                                                                         ATOM_DP_SS_ID1);
-                               } else
+                               } else {
                                        radeon_crtc->ss_enabled =
                                                radeon_atombios_get_ppll_ss_info(rdev,
                                                                                 &radeon_crtc->ss,
                                                                                 ATOM_DP_SS_ID1);
+                               }
+                               /* disable spread spectrum on DCE3 DP */
+                               radeon_crtc->ss_enabled = false;
                        }
                        break;
                case ATOM_ENCODER_MODE_LVDS:
@@ -1039,15 +1067,17 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
                /* calculate ss amount and step size */
                if (ASIC_IS_DCE4(rdev)) {
                        u32 step_size;
-                       u32 amount = (((fb_div * 10) + frac_fb_div) * radeon_crtc->ss.percentage) / 10000;
+                       u32 amount = (((fb_div * 10) + frac_fb_div) *
+                                     (u32)radeon_crtc->ss.percentage) /
+                               (100 * (u32)radeon_crtc->ss.percentage_divider);
                        radeon_crtc->ss.amount = (amount / 10) & ATOM_PPLL_SS_AMOUNT_V2_FBDIV_MASK;
                        radeon_crtc->ss.amount |= ((amount - (amount / 10)) << ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT) &
                                ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK;
                        if (radeon_crtc->ss.type & ATOM_PPLL_SS_TYPE_V2_CENTRE_SPREAD)
-                               step_size = (4 * amount * ref_div * (radeon_crtc->ss.rate * 2048)) /
+                               step_size = (4 * amount * ref_div * ((u32)radeon_crtc->ss.rate * 2048)) /
                                        (125 * 25 * pll->reference_freq / 100);
                        else
-                               step_size = (2 * amount * ref_div * (radeon_crtc->ss.rate * 2048)) /
+                               step_size = (2 * amount * ref_div * ((u32)radeon_crtc->ss.rate * 2048)) /
                                        (125 * 25 * pll->reference_freq / 100);
                        radeon_crtc->ss.step = step_size;
                }
index fb3ae07a14692601a912b1cbea37ae7ff13bcc62..4ad7643fce5fe9bdb8a479727400ae173475bb37 100644 (file)
@@ -157,21 +157,22 @@ static int radeon_dp_aux_native_write(struct radeon_connector *radeon_connector,
 
        msg[0] = address;
        msg[1] = address >> 8;
-       msg[2] = AUX_NATIVE_WRITE << 4;
+       msg[2] = DP_AUX_NATIVE_WRITE << 4;
        msg[3] = (msg_bytes << 4) | (send_bytes - 1);
        memcpy(&msg[4], send, send_bytes);
 
-       for (retry = 0; retry < 4; retry++) {
+       for (retry = 0; retry < 7; retry++) {
                ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus,
                                            msg, msg_bytes, NULL, 0, delay, &ack);
                if (ret == -EBUSY)
                        continue;
                else if (ret < 0)
                        return ret;
-               if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
+               ack >>= 4;
+               if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK)
                        return send_bytes;
-               else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
-                       udelay(400);
+               else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER)
+                       usleep_range(400, 500);
                else
                        return -EIO;
        }
@@ -191,20 +192,21 @@ static int radeon_dp_aux_native_read(struct radeon_connector *radeon_connector,
 
        msg[0] = address;
        msg[1] = address >> 8;
-       msg[2] = AUX_NATIVE_READ << 4;
+       msg[2] = DP_AUX_NATIVE_READ << 4;
        msg[3] = (msg_bytes << 4) | (recv_bytes - 1);
 
-       for (retry = 0; retry < 4; retry++) {
+       for (retry = 0; retry < 7; retry++) {
                ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus,
                                            msg, msg_bytes, recv, recv_bytes, delay, &ack);
                if (ret == -EBUSY)
                        continue;
                else if (ret < 0)
                        return ret;
-               if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
+               ack >>= 4;
+               if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK)
                        return ret;
-               else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
-                       udelay(400);
+               else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER)
+                       usleep_range(400, 500);
                else if (ret == 0)
                        return -EPROTO;
                else
@@ -246,12 +248,12 @@ int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
 
        /* Set up the command byte */
        if (mode & MODE_I2C_READ)
-               msg[2] = AUX_I2C_READ << 4;
+               msg[2] = DP_AUX_I2C_READ << 4;
        else
-               msg[2] = AUX_I2C_WRITE << 4;
+               msg[2] = DP_AUX_I2C_WRITE << 4;
 
        if (!(mode & MODE_I2C_STOP))
-               msg[2] |= AUX_I2C_MOT << 4;
+               msg[2] |= DP_AUX_I2C_MOT << 4;
 
        msg[0] = address;
        msg[1] = address >> 8;
@@ -272,7 +274,7 @@ int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
                break;
        }
 
-       for (retry = 0; retry < 4; retry++) {
+       for (retry = 0; retry < 7; retry++) {
                ret = radeon_process_aux_ch(auxch,
                                            msg, msg_bytes, reply, reply_bytes, 0, &ack);
                if (ret == -EBUSY)
@@ -282,35 +284,35 @@ int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
                        return ret;
                }
 
-               switch (ack & AUX_NATIVE_REPLY_MASK) {
-               case AUX_NATIVE_REPLY_ACK:
+               switch ((ack >> 4) & DP_AUX_NATIVE_REPLY_MASK) {
+               case DP_AUX_NATIVE_REPLY_ACK:
                        /* I2C-over-AUX Reply field is only valid
                         * when paired with AUX ACK.
                         */
                        break;
-               case AUX_NATIVE_REPLY_NACK:
+               case DP_AUX_NATIVE_REPLY_NACK:
                        DRM_DEBUG_KMS("aux_ch native nack\n");
                        return -EREMOTEIO;
-               case AUX_NATIVE_REPLY_DEFER:
+               case DP_AUX_NATIVE_REPLY_DEFER:
                        DRM_DEBUG_KMS("aux_ch native defer\n");
-                       udelay(400);
+                       usleep_range(500, 600);
                        continue;
                default:
                        DRM_ERROR("aux_ch invalid native reply 0x%02x\n", ack);
                        return -EREMOTEIO;
                }
 
-               switch (ack & AUX_I2C_REPLY_MASK) {
-               case AUX_I2C_REPLY_ACK:
+               switch ((ack >> 4) & DP_AUX_I2C_REPLY_MASK) {
+               case DP_AUX_I2C_REPLY_ACK:
                        if (mode == MODE_I2C_READ)
                                *read_byte = reply[0];
                        return ret;
-               case AUX_I2C_REPLY_NACK:
+               case DP_AUX_I2C_REPLY_NACK:
                        DRM_DEBUG_KMS("aux_i2c nack\n");
                        return -EREMOTEIO;
-               case AUX_I2C_REPLY_DEFER:
+               case DP_AUX_I2C_REPLY_DEFER:
                        DRM_DEBUG_KMS("aux_i2c defer\n");
-                       udelay(400);
+                       usleep_range(400, 500);
                        break;
                default:
                        DRM_ERROR("aux_i2c invalid reply 0x%02x\n", ack);
@@ -671,9 +673,11 @@ static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info)
        u8 tmp;
 
        /* power up the sink */
-       if (dp_info->dpcd[0] >= 0x11)
+       if (dp_info->dpcd[0] >= 0x11) {
                radeon_write_dpcd_reg(dp_info->radeon_connector,
                                      DP_SET_POWER, DP_SET_POWER_D0);
+               usleep_range(1000, 2000);
+       }
 
        /* possibly enable downspread on the sink */
        if (dp_info->dpcd[3] & 0x1)
index f685035dbe39a7d7ad48e63a9b4ca2ea717701fe..b5162c3b6111a1845834fe1b63d3e1f17642d501 100644 (file)
@@ -27,8 +27,6 @@
 #include "radeon.h"
 #include "atom.h"
 
-extern void radeon_atom_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le);
-
 #define TARGET_HW_I2C_CLOCK 50
 
 /* these are a limitation of ProcessI2cChannelTransaction not the hw */
index 9b6950d9b3c09cc193010a50bdd521939464d539..0fbd36f3d4e9da34b65bc5f8ed302dc143b2d3c7 100644 (file)
@@ -49,6 +49,7 @@ struct rv7xx_ps *rv770_get_ps(struct radeon_ps *rps);
 struct rv7xx_power_info *rv770_get_pi(struct radeon_device *rdev);
 struct evergreen_power_info *evergreen_get_pi(struct radeon_device *rdev);
 
+extern int ni_mc_load_microcode(struct radeon_device *rdev);
 
 //********* BARTS **************//
 static const u32 barts_cgcg_cgls_default[] =
@@ -2510,21 +2511,6 @@ int btc_dpm_enable(struct radeon_device *rdev)
        if (eg_pi->ls_clock_gating)
                btc_ls_clock_gating_enable(rdev, true);
 
-       if (rdev->irq.installed &&
-           r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
-               PPSMC_Result result;
-
-               ret = rv770_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
-               if (ret)
-                       return ret;
-               rdev->irq.dpm_thermal = true;
-               radeon_irq_set(rdev);
-               result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt);
-
-               if (result != PPSMC_Result_OK)
-                       DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
-       }
-
        rv770_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
 
        btc_init_stutter_mode(rdev);
@@ -2576,7 +2562,11 @@ void btc_dpm_disable(struct radeon_device *rdev)
 void btc_dpm_setup_asic(struct radeon_device *rdev)
 {
        struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+       int r;
 
+       r = ni_mc_load_microcode(rdev);
+       if (r)
+               DRM_ERROR("Failed to load MC firmware!\n");
        rv770_get_memory_type(rdev);
        rv740_read_clock_registers(rdev);
        btc_read_arb_registers(rdev);
index 1ed47997635803d9e69bba8b3bd0202ecbc04b0e..8d49104ca6c254efa86f4287059323be28ba3f29 100644 (file)
@@ -171,8 +171,7 @@ extern void si_trim_voltage_table_to_fit_state_table(struct radeon_device *rdev,
                                                     struct atom_voltage_table *voltage_table);
 extern void cik_enter_rlc_safe_mode(struct radeon_device *rdev);
 extern void cik_exit_rlc_safe_mode(struct radeon_device *rdev);
-extern void cik_update_cg(struct radeon_device *rdev,
-                         u32 block, bool enable);
+extern int ci_mc_load_microcode(struct radeon_device *rdev);
 
 static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev,
                                         struct atom_voltage_table_entry *voltage_table,
@@ -4503,8 +4502,8 @@ static void ci_get_memory_type(struct radeon_device *rdev)
 
 }
 
-void ci_update_current_ps(struct radeon_device *rdev,
-                         struct radeon_ps *rps)
+static void ci_update_current_ps(struct radeon_device *rdev,
+                                struct radeon_ps *rps)
 {
        struct ci_ps *new_ps = ci_get_ps(rps);
        struct ci_power_info *pi = ci_get_pi(rdev);
@@ -4514,8 +4513,8 @@ void ci_update_current_ps(struct radeon_device *rdev,
        pi->current_rps.ps_priv = &pi->current_ps;
 }
 
-void ci_update_requested_ps(struct radeon_device *rdev,
-                           struct radeon_ps *rps)
+static void ci_update_requested_ps(struct radeon_device *rdev,
+                                  struct radeon_ps *rps)
 {
        struct ci_ps *new_ps = ci_get_ps(rps);
        struct ci_power_info *pi = ci_get_pi(rdev);
@@ -4549,6 +4548,11 @@ void ci_dpm_post_set_power_state(struct radeon_device *rdev)
 
 void ci_dpm_setup_asic(struct radeon_device *rdev)
 {
+       int r;
+
+       r = ci_mc_load_microcode(rdev);
+       if (r)
+               DRM_ERROR("Failed to load MC firmware!\n");
        ci_read_clock_registers(rdev);
        ci_get_memory_type(rdev);
        ci_enable_acpi_power_management(rdev);
@@ -4561,13 +4565,6 @@ int ci_dpm_enable(struct radeon_device *rdev)
        struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
        int ret;
 
-       cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
-                            RADEON_CG_BLOCK_MC |
-                            RADEON_CG_BLOCK_SDMA |
-                            RADEON_CG_BLOCK_BIF |
-                            RADEON_CG_BLOCK_UVD |
-                            RADEON_CG_BLOCK_HDP), false);
-
        if (ci_is_smc_running(rdev))
                return -EINVAL;
        if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
@@ -4665,6 +4662,18 @@ int ci_dpm_enable(struct radeon_device *rdev)
                DRM_ERROR("ci_enable_power_containment failed\n");
                return ret;
        }
+
+       ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
+
+       ci_update_current_ps(rdev, boot_ps);
+
+       return 0;
+}
+
+int ci_dpm_late_enable(struct radeon_device *rdev)
+{
+       int ret;
+
        if (rdev->irq.installed &&
            r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
 #if 0
@@ -4685,19 +4694,8 @@ int ci_dpm_enable(struct radeon_device *rdev)
 #endif
        }
 
-       ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
-
        ci_dpm_powergate_uvd(rdev, true);
 
-       cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
-                            RADEON_CG_BLOCK_MC |
-                            RADEON_CG_BLOCK_SDMA |
-                            RADEON_CG_BLOCK_BIF |
-                            RADEON_CG_BLOCK_UVD |
-                            RADEON_CG_BLOCK_HDP), true);
-
-       ci_update_current_ps(rdev, boot_ps);
-
        return 0;
 }
 
@@ -4706,12 +4704,6 @@ void ci_dpm_disable(struct radeon_device *rdev)
        struct ci_power_info *pi = ci_get_pi(rdev);
        struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
 
-       cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
-                            RADEON_CG_BLOCK_MC |
-                            RADEON_CG_BLOCK_SDMA |
-                            RADEON_CG_BLOCK_UVD |
-                            RADEON_CG_BLOCK_HDP), false);
-
        ci_dpm_powergate_uvd(rdev, false);
 
        if (!ci_is_smc_running(rdev))
@@ -4742,13 +4734,6 @@ int ci_dpm_set_power_state(struct radeon_device *rdev)
        struct radeon_ps *old_ps = &pi->current_rps;
        int ret;
 
-       cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
-                            RADEON_CG_BLOCK_MC |
-                            RADEON_CG_BLOCK_SDMA |
-                            RADEON_CG_BLOCK_BIF |
-                            RADEON_CG_BLOCK_UVD |
-                            RADEON_CG_BLOCK_HDP), false);
-
        ci_find_dpm_states_clocks_in_dpm_table(rdev, new_ps);
        if (pi->pcie_performance_request)
                ci_request_link_speed_change_before_state_change(rdev, new_ps, old_ps);
@@ -4804,13 +4789,6 @@ int ci_dpm_set_power_state(struct radeon_device *rdev)
        if (pi->pcie_performance_request)
                ci_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps);
 
-       cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
-                            RADEON_CG_BLOCK_MC |
-                            RADEON_CG_BLOCK_SDMA |
-                            RADEON_CG_BLOCK_BIF |
-                            RADEON_CG_BLOCK_UVD |
-                            RADEON_CG_BLOCK_HDP), true);
-
        return 0;
 }
 
@@ -5023,8 +5001,8 @@ static int ci_parse_power_table(struct radeon_device *rdev)
        return 0;
 }
 
-int ci_get_vbios_boot_values(struct radeon_device *rdev,
-                            struct ci_vbios_boot_state *boot_state)
+static int ci_get_vbios_boot_values(struct radeon_device *rdev,
+                                   struct ci_vbios_boot_state *boot_state)
 {
        struct radeon_mode_info *mode_info = &rdev->mode_info;
        int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
index 9c745dd22438953106715feceb974cb620c7db02..8debc9d473625d642243b5a1923b6725eb534387 100644 (file)
@@ -28,6 +28,7 @@
 #include "cikd.h"
 #include "ppsmc.h"
 #include "radeon_ucode.h"
+#include "ci_dpm.h"
 
 static int ci_set_smc_sram_address(struct radeon_device *rdev,
                                   u32 smc_address, u32 limit)
index e950fabd7f5e474ab3e60371f669e31805077785..e6419ca7cd375d6958ebcd339fb68a004c56cd1a 100644 (file)
@@ -1697,7 +1697,7 @@ static void cik_srbm_select(struct radeon_device *rdev,
  * Load the GDDR MC ucode into the hw (CIK).
  * Returns 0 on success, error on failure.
  */
-static int ci_mc_load_microcode(struct radeon_device *rdev)
+int ci_mc_load_microcode(struct radeon_device *rdev)
 {
        const __be32 *fw_data;
        u32 running, blackout = 0;
@@ -3486,6 +3486,51 @@ int cik_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
        return r;
 }
 
+/**
+ * cik_hdp_flush_cp_ring_emit - emit an hdp flush on the cp
+ *
+ * @rdev: radeon_device pointer
+ * @ridx: radeon ring index
+ *
+ * Emits an hdp flush on the cp.
+ */
+static void cik_hdp_flush_cp_ring_emit(struct radeon_device *rdev,
+                                      int ridx)
+{
+       struct radeon_ring *ring = &rdev->ring[ridx];
+       u32 ref_and_mask;
+
+       switch (ring->idx) {
+       case CAYMAN_RING_TYPE_CP1_INDEX:
+       case CAYMAN_RING_TYPE_CP2_INDEX:
+       default:
+               switch (ring->me) {
+               case 0:
+                       ref_and_mask = CP2 << ring->pipe;
+                       break;
+               case 1:
+                       ref_and_mask = CP6 << ring->pipe;
+                       break;
+               default:
+                       return;
+               }
+               break;
+       case RADEON_RING_TYPE_GFX_INDEX:
+               ref_and_mask = CP0;
+               break;
+       }
+
+       radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
+       radeon_ring_write(ring, (WAIT_REG_MEM_OPERATION(1) | /* write, wait, write */
+                                WAIT_REG_MEM_FUNCTION(3) |  /* == */
+                                WAIT_REG_MEM_ENGINE(1)));   /* pfp */
+       radeon_ring_write(ring, GPU_HDP_FLUSH_REQ >> 2);
+       radeon_ring_write(ring, GPU_HDP_FLUSH_DONE >> 2);
+       radeon_ring_write(ring, ref_and_mask);
+       radeon_ring_write(ring, ref_and_mask);
+       radeon_ring_write(ring, 0x20); /* poll interval */
+}
+
 /**
  * cik_fence_gfx_ring_emit - emit a fence on the gfx ring
  *
@@ -3512,15 +3557,7 @@ void cik_fence_gfx_ring_emit(struct radeon_device *rdev,
        radeon_ring_write(ring, fence->seq);
        radeon_ring_write(ring, 0);
        /* HDP flush */
-       /* We should be using the new WAIT_REG_MEM special op packet here
-        * but it causes the CP to hang
-        */
-       radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
-       radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
-                                WRITE_DATA_DST_SEL(0)));
-       radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
-       radeon_ring_write(ring, 0);
-       radeon_ring_write(ring, 0);
+       cik_hdp_flush_cp_ring_emit(rdev, fence->ring);
 }
 
 /**
@@ -3550,15 +3587,7 @@ void cik_fence_compute_ring_emit(struct radeon_device *rdev,
        radeon_ring_write(ring, fence->seq);
        radeon_ring_write(ring, 0);
        /* HDP flush */
-       /* We should be using the new WAIT_REG_MEM special op packet here
-        * but it causes the CP to hang
-        */
-       radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
-       radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
-                                WRITE_DATA_DST_SEL(0)));
-       radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
-       radeon_ring_write(ring, 0);
-       radeon_ring_write(ring, 0);
+       cik_hdp_flush_cp_ring_emit(rdev, fence->ring);
 }
 
 bool cik_semaphore_ring_emit(struct radeon_device *rdev,
@@ -3566,8 +3595,6 @@ bool cik_semaphore_ring_emit(struct radeon_device *rdev,
                             struct radeon_semaphore *semaphore,
                             bool emit_wait)
 {
-/* TODO: figure out why semaphore cause lockups */
-#if 0
        uint64_t addr = semaphore->gpu_addr;
        unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
 
@@ -3576,9 +3603,6 @@ bool cik_semaphore_ring_emit(struct radeon_device *rdev,
        radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel);
 
        return true;
-#else
-       return false;
-#endif
 }
 
 /**
@@ -3816,6 +3840,8 @@ static void cik_cp_gfx_enable(struct radeon_device *rdev, bool enable)
        if (enable)
                WREG32(CP_ME_CNTL, 0);
        else {
+               if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
+                       radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
                WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT));
                rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
        }
@@ -4014,18 +4040,50 @@ static int cik_cp_gfx_resume(struct radeon_device *rdev)
                rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
                return r;
        }
+
+       if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
+               radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+
        return 0;
 }
 
-u32 cik_compute_ring_get_rptr(struct radeon_device *rdev,
-                             struct radeon_ring *ring)
+u32 cik_gfx_get_rptr(struct radeon_device *rdev,
+                    struct radeon_ring *ring)
 {
        u32 rptr;
 
+       if (rdev->wb.enabled)
+               rptr = rdev->wb.wb[ring->rptr_offs/4];
+       else
+               rptr = RREG32(CP_RB0_RPTR);
+
+       return rptr;
+}
+
+u32 cik_gfx_get_wptr(struct radeon_device *rdev,
+                    struct radeon_ring *ring)
+{
+       u32 wptr;
+
+       wptr = RREG32(CP_RB0_WPTR);
+
+       return wptr;
+}
 
+void cik_gfx_set_wptr(struct radeon_device *rdev,
+                     struct radeon_ring *ring)
+{
+       WREG32(CP_RB0_WPTR, ring->wptr);
+       (void)RREG32(CP_RB0_WPTR);
+}
+
+u32 cik_compute_get_rptr(struct radeon_device *rdev,
+                        struct radeon_ring *ring)
+{
+       u32 rptr;
 
        if (rdev->wb.enabled) {
-               rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]);
+               rptr = rdev->wb.wb[ring->rptr_offs/4];
        } else {
                mutex_lock(&rdev->srbm_mutex);
                cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0);
@@ -4037,13 +4095,14 @@ u32 cik_compute_ring_get_rptr(struct radeon_device *rdev,
        return rptr;
 }
 
-u32 cik_compute_ring_get_wptr(struct radeon_device *rdev,
-                             struct radeon_ring *ring)
+u32 cik_compute_get_wptr(struct radeon_device *rdev,
+                        struct radeon_ring *ring)
 {
        u32 wptr;
 
        if (rdev->wb.enabled) {
-               wptr = le32_to_cpu(rdev->wb.wb[ring->wptr_offs/4]);
+               /* XXX check if swapping is necessary on BE */
+               wptr = rdev->wb.wb[ring->wptr_offs/4];
        } else {
                mutex_lock(&rdev->srbm_mutex);
                cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0);
@@ -4055,10 +4114,11 @@ u32 cik_compute_ring_get_wptr(struct radeon_device *rdev,
        return wptr;
 }
 
-void cik_compute_ring_set_wptr(struct radeon_device *rdev,
-                              struct radeon_ring *ring)
+void cik_compute_set_wptr(struct radeon_device *rdev,
+                         struct radeon_ring *ring)
 {
-       rdev->wb.wb[ring->wptr_offs/4] = cpu_to_le32(ring->wptr);
+       /* XXX check if swapping is necessary on BE */
+       rdev->wb.wb[ring->wptr_offs/4] = ring->wptr;
        WDOORBELL32(ring->doorbell_index, ring->wptr);
 }
 
@@ -4852,6 +4912,160 @@ static void cik_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
        cik_print_gpu_status_regs(rdev);
 }
 
+struct kv_reset_save_regs {
+       u32 gmcon_reng_execute;
+       u32 gmcon_misc;
+       u32 gmcon_misc3;
+};
+
+static void kv_save_regs_for_reset(struct radeon_device *rdev,
+                                  struct kv_reset_save_regs *save)
+{
+       save->gmcon_reng_execute = RREG32(GMCON_RENG_EXECUTE);
+       save->gmcon_misc = RREG32(GMCON_MISC);
+       save->gmcon_misc3 = RREG32(GMCON_MISC3);
+
+       WREG32(GMCON_RENG_EXECUTE, save->gmcon_reng_execute & ~RENG_EXECUTE_ON_PWR_UP);
+       WREG32(GMCON_MISC, save->gmcon_misc & ~(RENG_EXECUTE_ON_REG_UPDATE |
+                                               STCTRL_STUTTER_EN));
+}
+
+static void kv_restore_regs_for_reset(struct radeon_device *rdev,
+                                     struct kv_reset_save_regs *save)
+{
+       int i;
+
+       WREG32(GMCON_PGFSM_WRITE, 0);
+       WREG32(GMCON_PGFSM_CONFIG, 0x200010ff);
+
+       for (i = 0; i < 5; i++)
+               WREG32(GMCON_PGFSM_WRITE, 0);
+
+       WREG32(GMCON_PGFSM_WRITE, 0);
+       WREG32(GMCON_PGFSM_CONFIG, 0x300010ff);
+
+       for (i = 0; i < 5; i++)
+               WREG32(GMCON_PGFSM_WRITE, 0);
+
+       WREG32(GMCON_PGFSM_WRITE, 0x210000);
+       WREG32(GMCON_PGFSM_CONFIG, 0xa00010ff);
+
+       for (i = 0; i < 5; i++)
+               WREG32(GMCON_PGFSM_WRITE, 0);
+
+       WREG32(GMCON_PGFSM_WRITE, 0x21003);
+       WREG32(GMCON_PGFSM_CONFIG, 0xb00010ff);
+
+       for (i = 0; i < 5; i++)
+               WREG32(GMCON_PGFSM_WRITE, 0);
+
+       WREG32(GMCON_PGFSM_WRITE, 0x2b00);
+       WREG32(GMCON_PGFSM_CONFIG, 0xc00010ff);
+
+       for (i = 0; i < 5; i++)
+               WREG32(GMCON_PGFSM_WRITE, 0);
+
+       WREG32(GMCON_PGFSM_WRITE, 0);
+       WREG32(GMCON_PGFSM_CONFIG, 0xd00010ff);
+
+       for (i = 0; i < 5; i++)
+               WREG32(GMCON_PGFSM_WRITE, 0);
+
+       WREG32(GMCON_PGFSM_WRITE, 0x420000);
+       WREG32(GMCON_PGFSM_CONFIG, 0x100010ff);
+
+       for (i = 0; i < 5; i++)
+               WREG32(GMCON_PGFSM_WRITE, 0);
+
+       WREG32(GMCON_PGFSM_WRITE, 0x120202);
+       WREG32(GMCON_PGFSM_CONFIG, 0x500010ff);
+
+       for (i = 0; i < 5; i++)
+               WREG32(GMCON_PGFSM_WRITE, 0);
+
+       WREG32(GMCON_PGFSM_WRITE, 0x3e3e36);
+       WREG32(GMCON_PGFSM_CONFIG, 0x600010ff);
+
+       for (i = 0; i < 5; i++)
+               WREG32(GMCON_PGFSM_WRITE, 0);
+
+       WREG32(GMCON_PGFSM_WRITE, 0x373f3e);
+       WREG32(GMCON_PGFSM_CONFIG, 0x700010ff);
+
+       for (i = 0; i < 5; i++)
+               WREG32(GMCON_PGFSM_WRITE, 0);
+
+       WREG32(GMCON_PGFSM_WRITE, 0x3e1332);
+       WREG32(GMCON_PGFSM_CONFIG, 0xe00010ff);
+
+       WREG32(GMCON_MISC3, save->gmcon_misc3);
+       WREG32(GMCON_MISC, save->gmcon_misc);
+       WREG32(GMCON_RENG_EXECUTE, save->gmcon_reng_execute);
+}
+
+static void cik_gpu_pci_config_reset(struct radeon_device *rdev)
+{
+       struct evergreen_mc_save save;
+       struct kv_reset_save_regs kv_save = { 0 };
+       u32 tmp, i;
+
+       dev_info(rdev->dev, "GPU pci config reset\n");
+
+       /* disable dpm? */
+
+       /* disable cg/pg */
+       cik_fini_pg(rdev);
+       cik_fini_cg(rdev);
+
+       /* Disable GFX parsing/prefetching */
+       WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT);
+
+       /* Disable MEC parsing/prefetching */
+       WREG32(CP_MEC_CNTL, MEC_ME1_HALT | MEC_ME2_HALT);
+
+       /* sdma0 */
+       tmp = RREG32(SDMA0_ME_CNTL + SDMA0_REGISTER_OFFSET);
+       tmp |= SDMA_HALT;
+       WREG32(SDMA0_ME_CNTL + SDMA0_REGISTER_OFFSET, tmp);
+       /* sdma1 */
+       tmp = RREG32(SDMA0_ME_CNTL + SDMA1_REGISTER_OFFSET);
+       tmp |= SDMA_HALT;
+       WREG32(SDMA0_ME_CNTL + SDMA1_REGISTER_OFFSET, tmp);
+       /* XXX other engines? */
+
+       /* halt the rlc, disable cp internal ints */
+       cik_rlc_stop(rdev);
+
+       udelay(50);
+
+       /* disable mem access */
+       evergreen_mc_stop(rdev, &save);
+       if (evergreen_mc_wait_for_idle(rdev)) {
+               dev_warn(rdev->dev, "Wait for MC idle timed out !\n");
+       }
+
+       if (rdev->flags & RADEON_IS_IGP)
+               kv_save_regs_for_reset(rdev, &kv_save);
+
+       /* disable BM */
+       pci_clear_master(rdev->pdev);
+       /* reset */
+       radeon_pci_config_reset(rdev);
+
+       udelay(100);
+
+       /* wait for asic to come out of reset */
+       for (i = 0; i < rdev->usec_timeout; i++) {
+               if (RREG32(CONFIG_MEMSIZE) != 0xffffffff)
+                       break;
+               udelay(1);
+       }
+
+       /* does asic init need to be run first??? */
+       if (rdev->flags & RADEON_IS_IGP)
+               kv_restore_regs_for_reset(rdev, &kv_save);
+}
+
 /**
  * cik_asic_reset - soft reset GPU
  *
@@ -4870,10 +5084,17 @@ int cik_asic_reset(struct radeon_device *rdev)
        if (reset_mask)
                r600_set_bios_scratch_engine_hung(rdev, true);
 
+       /* try soft reset */
        cik_gpu_soft_reset(rdev, reset_mask);
 
        reset_mask = cik_gpu_check_soft_reset(rdev);
 
+       /* try pci config reset */
+       if (reset_mask && radeon_hard_reset)
+               cik_gpu_pci_config_reset(rdev);
+
+       reset_mask = cik_gpu_check_soft_reset(rdev);
+
        if (!reset_mask)
                r600_set_bios_scratch_engine_hung(rdev, false);
 
@@ -5138,20 +5359,6 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev)
                                WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |
                                WRITE_PROTECTION_FAULT_ENABLE_DEFAULT);
 
-       /* TC cache setup ??? */
-       WREG32(TC_CFG_L1_LOAD_POLICY0, 0);
-       WREG32(TC_CFG_L1_LOAD_POLICY1, 0);
-       WREG32(TC_CFG_L1_STORE_POLICY, 0);
-
-       WREG32(TC_CFG_L2_LOAD_POLICY0, 0);
-       WREG32(TC_CFG_L2_LOAD_POLICY1, 0);
-       WREG32(TC_CFG_L2_STORE_POLICY0, 0);
-       WREG32(TC_CFG_L2_STORE_POLICY1, 0);
-       WREG32(TC_CFG_L2_ATOMIC_POLICY, 0);
-
-       WREG32(TC_CFG_L1_VOLATILE, 0);
-       WREG32(TC_CFG_L2_VOLATILE, 0);
-
        if (rdev->family == CHIP_KAVERI) {
                u32 tmp = RREG32(CHUB_CONTROL);
                tmp &= ~BYPASS_VM;
@@ -5367,16 +5574,7 @@ void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
        radeon_ring_write(ring, VMID(0));
 
        /* HDP flush */
-       /* We should be using the WAIT_REG_MEM packet here like in
-        * cik_fence_ring_emit(), but it causes the CP to hang in this
-        * context...
-        */
-       radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
-       radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
-                                WRITE_DATA_DST_SEL(0)));
-       radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
-       radeon_ring_write(ring, 0);
-       radeon_ring_write(ring, 0);
+       cik_hdp_flush_cp_ring_emit(rdev, ridx);
 
        /* bits 0-15 are the VM contexts0-15 */
        radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
@@ -7503,26 +7701,7 @@ static int cik_startup(struct radeon_device *rdev)
 
        cik_mc_program(rdev);
 
-       if (rdev->flags & RADEON_IS_IGP) {
-               if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
-                   !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw) {
-                       r = cik_init_microcode(rdev);
-                       if (r) {
-                               DRM_ERROR("Failed to load firmware!\n");
-                               return r;
-                       }
-               }
-       } else {
-               if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
-                   !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw ||
-                   !rdev->mc_fw) {
-                       r = cik_init_microcode(rdev);
-                       if (r) {
-                               DRM_ERROR("Failed to load firmware!\n");
-                               return r;
-                       }
-               }
-
+       if (!(rdev->flags & RADEON_IS_IGP) && !rdev->pm.dpm_enabled) {
                r = ci_mc_load_microcode(rdev);
                if (r) {
                        DRM_ERROR("Failed to load MC firmware!\n");
@@ -7627,7 +7806,6 @@ static int cik_startup(struct radeon_device *rdev)
 
        ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
-                            CP_RB0_RPTR, CP_RB0_WPTR,
                             PACKET3(PACKET3_NOP, 0x3FFF));
        if (r)
                return r;
@@ -7636,7 +7814,6 @@ static int cik_startup(struct radeon_device *rdev)
        /* type-2 packets are deprecated on MEC, use type-3 instead */
        ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
        r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET,
-                            CP_HQD_PQ_RPTR, CP_HQD_PQ_WPTR,
                             PACKET3(PACKET3_NOP, 0x3FFF));
        if (r)
                return r;
@@ -7648,7 +7825,6 @@ static int cik_startup(struct radeon_device *rdev)
        /* type-2 packets are deprecated on MEC, use type-3 instead */
        ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
        r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET,
-                            CP_HQD_PQ_RPTR, CP_HQD_PQ_WPTR,
                             PACKET3(PACKET3_NOP, 0x3FFF));
        if (r)
                return r;
@@ -7660,16 +7836,12 @@ static int cik_startup(struct radeon_device *rdev)
 
        ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
        r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
-                            SDMA0_GFX_RB_RPTR + SDMA0_REGISTER_OFFSET,
-                            SDMA0_GFX_RB_WPTR + SDMA0_REGISTER_OFFSET,
                             SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
        if (r)
                return r;
 
        ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
        r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
-                            SDMA0_GFX_RB_RPTR + SDMA1_REGISTER_OFFSET,
-                            SDMA0_GFX_RB_WPTR + SDMA1_REGISTER_OFFSET,
                             SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
        if (r)
                return r;
@@ -7685,7 +7857,6 @@ static int cik_startup(struct radeon_device *rdev)
        ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
        if (ring->ring_size) {
                r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
-                                    UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
                                     RADEON_CP_PACKET2);
                if (!r)
                        r = uvd_v1_0_init(rdev);
@@ -7731,6 +7902,8 @@ int cik_resume(struct radeon_device *rdev)
        /* init golden registers */
        cik_init_golden_registers(rdev);
 
+       radeon_pm_resume(rdev);
+
        rdev->accel_working = true;
        r = cik_startup(rdev);
        if (r) {
@@ -7754,6 +7927,7 @@ int cik_resume(struct radeon_device *rdev)
  */
 int cik_suspend(struct radeon_device *rdev)
 {
+       radeon_pm_suspend(rdev);
        dce6_audio_fini(rdev);
        radeon_vm_manager_fini(rdev);
        cik_cp_enable(rdev, false);
@@ -7835,6 +8009,30 @@ int cik_init(struct radeon_device *rdev)
        if (r)
                return r;
 
+       if (rdev->flags & RADEON_IS_IGP) {
+               if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
+                   !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw) {
+                       r = cik_init_microcode(rdev);
+                       if (r) {
+                               DRM_ERROR("Failed to load firmware!\n");
+                               return r;
+                       }
+               }
+       } else {
+               if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
+                   !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw ||
+                   !rdev->mc_fw) {
+                       r = cik_init_microcode(rdev);
+                       if (r) {
+                               DRM_ERROR("Failed to load firmware!\n");
+                               return r;
+                       }
+               }
+       }
+
+       /* Initialize power management */
+       radeon_pm_init(rdev);
+
        ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        ring->ring_obj = NULL;
        r600_ring_init(rdev, ring, 1024 * 1024);
@@ -7915,6 +8113,7 @@ int cik_init(struct radeon_device *rdev)
  */
 void cik_fini(struct radeon_device *rdev)
 {
+       radeon_pm_fini(rdev);
        cik_cp_fini(rdev);
        cik_sdma_fini(rdev);
        cik_fini_pg(rdev);
index d08b83c6267b4cde4ff5ce0ad71754c1e5e446a1..1ecb3f1070e35c6ed3516e5f325eeeb61bc795b2 100644 (file)
@@ -51,6 +51,75 @@ u32 cik_gpu_check_soft_reset(struct radeon_device *rdev);
  * buffers.
  */
 
+/**
+ * cik_sdma_get_rptr - get the current read pointer
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon ring pointer
+ *
+ * Get the current rptr from the hardware (CIK+).
+ */
+uint32_t cik_sdma_get_rptr(struct radeon_device *rdev,
+                          struct radeon_ring *ring)
+{
+       u32 rptr, reg;
+
+       if (rdev->wb.enabled) {
+               rptr = rdev->wb.wb[ring->rptr_offs/4];
+       } else {
+               if (ring->idx == R600_RING_TYPE_DMA_INDEX)
+                       reg = SDMA0_GFX_RB_RPTR + SDMA0_REGISTER_OFFSET;
+               else
+                       reg = SDMA0_GFX_RB_RPTR + SDMA1_REGISTER_OFFSET;
+
+               rptr = RREG32(reg);
+       }
+
+       return (rptr & 0x3fffc) >> 2;
+}
+
+/**
+ * cik_sdma_get_wptr - get the current write pointer
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon ring pointer
+ *
+ * Get the current wptr from the hardware (CIK+).
+ */
+uint32_t cik_sdma_get_wptr(struct radeon_device *rdev,
+                          struct radeon_ring *ring)
+{
+       u32 reg;
+
+       if (ring->idx == R600_RING_TYPE_DMA_INDEX)
+               reg = SDMA0_GFX_RB_WPTR + SDMA0_REGISTER_OFFSET;
+       else
+               reg = SDMA0_GFX_RB_WPTR + SDMA1_REGISTER_OFFSET;
+
+       return (RREG32(reg) & 0x3fffc) >> 2;
+}
+
+/**
+ * cik_sdma_set_wptr - commit the write pointer
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon ring pointer
+ *
+ * Write the wptr back to the hardware (CIK+).
+ */
+void cik_sdma_set_wptr(struct radeon_device *rdev,
+                      struct radeon_ring *ring)
+{
+       u32 reg;
+
+       if (ring->idx == R600_RING_TYPE_DMA_INDEX)
+               reg = SDMA0_GFX_RB_WPTR + SDMA0_REGISTER_OFFSET;
+       else
+               reg = SDMA0_GFX_RB_WPTR + SDMA1_REGISTER_OFFSET;
+
+       WREG32(reg, (ring->wptr << 2) & 0x3fffc);
+}
+
 /**
  * cik_sdma_ring_ib_execute - Schedule an IB on the DMA engine
  *
@@ -87,6 +156,35 @@ void cik_sdma_ring_ib_execute(struct radeon_device *rdev,
 
 }
 
+/**
+ * cik_sdma_hdp_flush_ring_emit - emit an hdp flush on the DMA ring
+ *
+ * @rdev: radeon_device pointer
+ * @ridx: radeon ring index
+ *
+ * Emit an hdp flush packet on the requested DMA ring.
+ */
+static void cik_sdma_hdp_flush_ring_emit(struct radeon_device *rdev,
+                                        int ridx)
+{
+       struct radeon_ring *ring = &rdev->ring[ridx];
+       u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) |
+                         SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
+       u32 ref_and_mask;
+
+       if (ridx == R600_RING_TYPE_DMA_INDEX)
+               ref_and_mask = SDMA0;
+       else
+               ref_and_mask = SDMA1;
+
+       radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
+       radeon_ring_write(ring, GPU_HDP_FLUSH_DONE);
+       radeon_ring_write(ring, GPU_HDP_FLUSH_REQ);
+       radeon_ring_write(ring, ref_and_mask); /* reference */
+       radeon_ring_write(ring, ref_and_mask); /* mask */
+       radeon_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */
+}
+
 /**
  * cik_sdma_fence_ring_emit - emit a fence on the DMA ring
  *
@@ -111,12 +209,7 @@ void cik_sdma_fence_ring_emit(struct radeon_device *rdev,
        /* generate an interrupt */
        radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_TRAP, 0, 0));
        /* flush HDP */
-       /* We should be using the new POLL_REG_MEM special op packet here
-        * but it causes sDMA to hang sometimes
-        */
-       radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
-       radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
-       radeon_ring_write(ring, 0);
+       cik_sdma_hdp_flush_ring_emit(rdev, fence->ring);
 }
 
 /**
@@ -157,7 +250,9 @@ static void cik_sdma_gfx_stop(struct radeon_device *rdev)
        u32 rb_cntl, reg_offset;
        int i;
 
-       radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+       if ((rdev->asic->copy.copy_ring_index == R600_RING_TYPE_DMA_INDEX) ||
+           (rdev->asic->copy.copy_ring_index == CAYMAN_RING_TYPE_DMA1_INDEX))
+               radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
 
        for (i = 0; i < 2; i++) {
                if (i == 0)
@@ -288,7 +383,9 @@ static int cik_sdma_gfx_resume(struct radeon_device *rdev)
                }
        }
 
-       radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+       if ((rdev->asic->copy.copy_ring_index == R600_RING_TYPE_DMA_INDEX) ||
+           (rdev->asic->copy.copy_ring_index == CAYMAN_RING_TYPE_DMA1_INDEX))
+               radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
 
        return 0;
 }
@@ -747,12 +844,7 @@ void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm
        radeon_ring_write(ring, VMID(0));
 
        /* flush HDP */
-       /* We should be using the new POLL_REG_MEM special op packet here
-        * but it causes sDMA to hang sometimes
-        */
-       radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
-       radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
-       radeon_ring_write(ring, 0);
+       cik_sdma_hdp_flush_ring_emit(rdev, ridx);
 
        /* flush TLB */
        radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
index 5964af5e5b2d8823c4626cad40bfd2abf385fb40..98bae9d7b74d1b3fe82ab41bc5f631d7e4acfc86 100644 (file)
 
 #define ATC_MISC_CG                                    0x3350
 
+#define GMCON_RENG_EXECUTE                             0x3508
+#define        RENG_EXECUTE_ON_PWR_UP                  (1 << 0)
+#define GMCON_MISC                                     0x350c
+#define        RENG_EXECUTE_ON_REG_UPDATE              (1 << 11)
+#define        STCTRL_STUTTER_EN                       (1 << 16)
+
+#define GMCON_PGFSM_CONFIG                             0x3538
+#define GMCON_PGFSM_WRITE                              0x353c
+#define GMCON_PGFSM_READ                               0x3540
+#define GMCON_MISC3                                    0x3544
+
 #define MC_SEQ_CNTL_3                                     0x3600
 #       define CAC_EN                                     (1 << 31)
 #define MC_SEQ_G5PDX_CTRL                                 0x3604
index 920e1e4a52c52c1edefd84e727a0fc13c2dde39b..cf783fc0ef21920f7d0e50156bddbd9c24299150 100644 (file)
@@ -1905,21 +1905,6 @@ int cypress_dpm_enable(struct radeon_device *rdev)
        if (pi->mg_clock_gating)
                cypress_mg_clock_gating_enable(rdev, true);
 
-       if (rdev->irq.installed &&
-           r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
-               PPSMC_Result result;
-
-               ret = rv770_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
-               if (ret)
-                       return ret;
-               rdev->irq.dpm_thermal = true;
-               radeon_irq_set(rdev);
-               result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt);
-
-               if (result != PPSMC_Result_OK)
-                       DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
-       }
-
        rv770_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
 
        return 0;
index 9702e55e924e8327740869eb34ed455dcd908db3..f2b9e21ce4da063a03004c4705a3662b2e78c437 100644 (file)
@@ -146,6 +146,7 @@ extern u32 si_get_csb_size(struct radeon_device *rdev);
 extern void si_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer);
 extern u32 cik_get_csb_size(struct radeon_device *rdev);
 extern void cik_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer);
+extern void rv770_set_clk_bypass_mode(struct radeon_device *rdev);
 
 static const u32 evergreen_golden_registers[] =
 {
@@ -3867,6 +3868,48 @@ static void evergreen_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
        evergreen_print_gpu_status_regs(rdev);
 }
 
+void evergreen_gpu_pci_config_reset(struct radeon_device *rdev)
+{
+       struct evergreen_mc_save save;
+       u32 tmp, i;
+
+       dev_info(rdev->dev, "GPU pci config reset\n");
+
+       /* disable dpm? */
+
+       /* Disable CP parsing/prefetching */
+       WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
+       udelay(50);
+       /* Disable DMA */
+       tmp = RREG32(DMA_RB_CNTL);
+       tmp &= ~DMA_RB_ENABLE;
+       WREG32(DMA_RB_CNTL, tmp);
+       /* XXX other engines? */
+
+       /* halt the rlc */
+       r600_rlc_stop(rdev);
+
+       udelay(50);
+
+       /* set mclk/sclk to bypass */
+       rv770_set_clk_bypass_mode(rdev);
+       /* disable BM */
+       pci_clear_master(rdev->pdev);
+       /* disable mem access */
+       evergreen_mc_stop(rdev, &save);
+       if (evergreen_mc_wait_for_idle(rdev)) {
+               dev_warn(rdev->dev, "Wait for MC idle timed out !\n");
+       }
+       /* reset */
+       radeon_pci_config_reset(rdev);
+       /* wait for asic to come out of reset */
+       for (i = 0; i < rdev->usec_timeout; i++) {
+               if (RREG32(CONFIG_MEMSIZE) != 0xffffffff)
+                       break;
+               udelay(1);
+       }
+}
+
 int evergreen_asic_reset(struct radeon_device *rdev)
 {
        u32 reset_mask;
@@ -3876,10 +3919,17 @@ int evergreen_asic_reset(struct radeon_device *rdev)
        if (reset_mask)
                r600_set_bios_scratch_engine_hung(rdev, true);
 
+       /* try soft reset */
        evergreen_gpu_soft_reset(rdev, reset_mask);
 
        reset_mask = evergreen_gpu_check_soft_reset(rdev);
 
+       /* try pci config reset */
+       if (reset_mask && radeon_hard_reset)
+               evergreen_gpu_pci_config_reset(rdev);
+
+       reset_mask = evergreen_gpu_check_soft_reset(rdev);
+
        if (!reset_mask)
                r600_set_bios_scratch_engine_hung(rdev, false);
 
@@ -4298,8 +4348,8 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev)
                WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
        }
 
-       /* only one DAC on DCE6 */
-       if (!ASIC_IS_DCE6(rdev))
+       /* only one DAC on DCE5 */
+       if (!ASIC_IS_DCE5(rdev))
                WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
        WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
 
@@ -5109,27 +5159,12 @@ static int evergreen_startup(struct radeon_device *rdev)
 
        evergreen_mc_program(rdev);
 
-       if (ASIC_IS_DCE5(rdev)) {
-               if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
-                       r = ni_init_microcode(rdev);
-                       if (r) {
-                               DRM_ERROR("Failed to load firmware!\n");
-                               return r;
-                       }
-               }
+       if (ASIC_IS_DCE5(rdev) && !rdev->pm.dpm_enabled) {
                r = ni_mc_load_microcode(rdev);
                if (r) {
                        DRM_ERROR("Failed to load MC firmware!\n");
                        return r;
                }
-       } else {
-               if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
-                       r = r600_init_microcode(rdev);
-                       if (r) {
-                               DRM_ERROR("Failed to load firmware!\n");
-                               return r;
-                       }
-               }
        }
 
        if (rdev->flags & RADEON_IS_AGP) {
@@ -5199,14 +5234,12 @@ static int evergreen_startup(struct radeon_device *rdev)
 
        ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
-                            R600_CP_RB_RPTR, R600_CP_RB_WPTR,
                             RADEON_CP_PACKET2);
        if (r)
                return r;
 
        ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
        r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
-                            DMA_RB_RPTR, DMA_RB_WPTR,
                             DMA_PACKET(DMA_PACKET_NOP, 0, 0));
        if (r)
                return r;
@@ -5224,7 +5257,6 @@ static int evergreen_startup(struct radeon_device *rdev)
        ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
        if (ring->ring_size) {
                r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
-                                    UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
                                     RADEON_CP_PACKET2);
                if (!r)
                        r = uvd_v1_0_init(rdev);
@@ -5267,6 +5299,8 @@ int evergreen_resume(struct radeon_device *rdev)
        /* init golden registers */
        evergreen_init_golden_registers(rdev);
 
+       radeon_pm_resume(rdev);
+
        rdev->accel_working = true;
        r = evergreen_startup(rdev);
        if (r) {
@@ -5281,6 +5315,7 @@ int evergreen_resume(struct radeon_device *rdev)
 
 int evergreen_suspend(struct radeon_device *rdev)
 {
+       radeon_pm_suspend(rdev);
        r600_audio_fini(rdev);
        uvd_v1_0_fini(rdev);
        radeon_uvd_suspend(rdev);
@@ -5357,6 +5392,27 @@ int evergreen_init(struct radeon_device *rdev)
        if (r)
                return r;
 
+       if (ASIC_IS_DCE5(rdev)) {
+               if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
+                       r = ni_init_microcode(rdev);
+                       if (r) {
+                               DRM_ERROR("Failed to load firmware!\n");
+                               return r;
+                       }
+               }
+       } else {
+               if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
+                       r = r600_init_microcode(rdev);
+                       if (r) {
+                               DRM_ERROR("Failed to load firmware!\n");
+                               return r;
+                       }
+               }
+       }
+
+       /* Initialize power management */
+       radeon_pm_init(rdev);
+
        rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
        r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
 
@@ -5409,6 +5465,7 @@ int evergreen_init(struct radeon_device *rdev)
 
 void evergreen_fini(struct radeon_device *rdev)
 {
+       radeon_pm_fini(rdev);
        r600_audio_fini(rdev);
        r700_cp_fini(rdev);
        r600_dma_fini(rdev);
index eb8ac315f92faa3cbd58468b6bf9ba360a8c0986..c7cac07f139b2106041208179d124d8ff5ba20b0 100644 (file)
@@ -967,7 +967,10 @@ static int evergreen_cs_track_check(struct radeon_cs_parser *p)
        if (track->cb_dirty) {
                tmp = track->cb_target_mask;
                for (i = 0; i < 8; i++) {
-                       if ((tmp >> (i * 4)) & 0xF) {
+                       u32 format = G_028C70_FORMAT(track->cb_color_info[i]);
+
+                       if (format != V_028C70_COLOR_INVALID &&
+                           (tmp >> (i * 4)) & 0xF) {
                                /* at least one component is enabled */
                                if (track->cb_color_bo[i] == NULL) {
                                        dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
index 8a4e641f0e3c3712cfbcb543372fde5707e6cce5..a0f63ff5a5e97cb47100abce14bc249083c61479 100644 (file)
@@ -33,6 +33,7 @@
 #define EVERGREEN_PIF_PHY0_DATA                         0xc
 #define EVERGREEN_PIF_PHY1_INDEX                        0x10
 #define EVERGREEN_PIF_PHY1_DATA                         0x14
+#define EVERGREEN_MM_INDEX_HI                           0x18
 
 #define EVERGREEN_VGA_MEMORY_BASE_ADDRESS               0x310
 #define EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH          0x324
index 17f990798992dae5d6335726a8f3b4c259659f89..f9c7963b3ee6b38730cdf6d422c2b612f0d1c904 100644 (file)
 #define        CG_SPLL_FUNC_CNTL_2                             0x604
 #define                SCLK_MUX_SEL(x)                         ((x) << 0)
 #define                SCLK_MUX_SEL_MASK                       (0x1ff << 0)
+#define                SCLK_MUX_UPDATE                         (1 << 26)
 #define        CG_SPLL_FUNC_CNTL_3                             0x608
 #define                SPLL_FB_DIV(x)                          ((x) << 0)
 #define                SPLL_FB_DIV_MASK                        (0x3ffffff << 0)
 #define                SPLL_DITHEN                             (1 << 28)
+#define        CG_SPLL_STATUS                                  0x60c
+#define                SPLL_CHG_STATUS                         (1 << 1)
 
 #define MPLL_CNTL_MODE                                  0x61c
+#       define MPLL_MCLK_SEL                            (1 << 11)
 #       define SS_SSEN                                  (1 << 24)
 #       define SS_DSMODE_EN                             (1 << 25)
 
index b41905573cd2a431862b3684f4d43ad445ecd15d..b6e01d5d2cced24edc67e773c84cba33a6d5fef8 100644 (file)
@@ -1126,11 +1126,6 @@ int kv_dpm_enable(struct radeon_device *rdev)
        struct kv_power_info *pi = kv_get_pi(rdev);
        int ret;
 
-       cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
-                            RADEON_CG_BLOCK_SDMA |
-                            RADEON_CG_BLOCK_BIF |
-                            RADEON_CG_BLOCK_HDP), false);
-
        ret = kv_process_firmware_header(rdev);
        if (ret) {
                DRM_ERROR("kv_process_firmware_header failed\n");
@@ -1215,6 +1210,21 @@ int kv_dpm_enable(struct radeon_device *rdev)
 
        kv_reset_acp_boot_level(rdev);
 
+       ret = kv_smc_bapm_enable(rdev, false);
+       if (ret) {
+               DRM_ERROR("kv_smc_bapm_enable failed\n");
+               return ret;
+       }
+
+       kv_update_current_ps(rdev, rdev->pm.dpm.boot_ps);
+
+       return ret;
+}
+
+int kv_dpm_late_enable(struct radeon_device *rdev)
+{
+       int ret;
+
        if (rdev->irq.installed &&
            r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
                ret = kv_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
@@ -1226,35 +1236,17 @@ int kv_dpm_enable(struct radeon_device *rdev)
                radeon_irq_set(rdev);
        }
 
-       ret = kv_smc_bapm_enable(rdev, false);
-       if (ret) {
-               DRM_ERROR("kv_smc_bapm_enable failed\n");
-               return ret;
-       }
-
        /* powerdown unused blocks for now */
        kv_dpm_powergate_acp(rdev, true);
        kv_dpm_powergate_samu(rdev, true);
        kv_dpm_powergate_vce(rdev, true);
        kv_dpm_powergate_uvd(rdev, true);
 
-       cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
-                            RADEON_CG_BLOCK_SDMA |
-                            RADEON_CG_BLOCK_BIF |
-                            RADEON_CG_BLOCK_HDP), true);
-
-       kv_update_current_ps(rdev, rdev->pm.dpm.boot_ps);
-
        return ret;
 }
 
 void kv_dpm_disable(struct radeon_device *rdev)
 {
-       cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
-                            RADEON_CG_BLOCK_SDMA |
-                            RADEON_CG_BLOCK_BIF |
-                            RADEON_CG_BLOCK_HDP), false);
-
        kv_smc_bapm_enable(rdev, false);
 
        /* powerup blocks */
@@ -1779,11 +1771,6 @@ int kv_dpm_set_power_state(struct radeon_device *rdev)
        /*struct radeon_ps *old_ps = &pi->current_rps;*/
        int ret;
 
-       cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
-                            RADEON_CG_BLOCK_SDMA |
-                            RADEON_CG_BLOCK_BIF |
-                            RADEON_CG_BLOCK_HDP), false);
-
        if (pi->bapm_enable) {
                ret = kv_smc_bapm_enable(rdev, rdev->pm.dpm.ac_power);
                if (ret) {
@@ -1849,11 +1836,6 @@ int kv_dpm_set_power_state(struct radeon_device *rdev)
                }
        }
 
-       cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
-                            RADEON_CG_BLOCK_SDMA |
-                            RADEON_CG_BLOCK_BIF |
-                            RADEON_CG_BLOCK_HDP), true);
-
        return 0;
 }
 
index f59a9e9fccf8a803ce6097e4e185d8824ad5f714..ea932ac66fc6647da43a8cf775b60ade25d478de 100644 (file)
@@ -174,6 +174,7 @@ extern void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
 extern void evergreen_program_aspm(struct radeon_device *rdev);
 extern void sumo_rlc_fini(struct radeon_device *rdev);
 extern int sumo_rlc_init(struct radeon_device *rdev);
+extern void evergreen_gpu_pci_config_reset(struct radeon_device *rdev);
 
 /* Firmware Names */
 MODULE_FIRMWARE("radeon/BARTS_pfp.bin");
@@ -1330,13 +1331,12 @@ void cayman_fence_ring_emit(struct radeon_device *rdev,
 {
        struct radeon_ring *ring = &rdev->ring[fence->ring];
        u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
+       u32 cp_coher_cntl = PACKET3_FULL_CACHE_ENA | PACKET3_TC_ACTION_ENA |
+               PACKET3_SH_ACTION_ENA;
 
        /* flush read cache over gart for this vmid */
-       radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
-       radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
-       radeon_ring_write(ring, 0);
        radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
-       radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA);
+       radeon_ring_write(ring, PACKET3_ENGINE_ME | cp_coher_cntl);
        radeon_ring_write(ring, 0xFFFFFFFF);
        radeon_ring_write(ring, 0);
        radeon_ring_write(ring, 10); /* poll interval */
@@ -1352,6 +1352,8 @@ void cayman_fence_ring_emit(struct radeon_device *rdev,
 void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
 {
        struct radeon_ring *ring = &rdev->ring[ib->ring];
+       u32 cp_coher_cntl = PACKET3_FULL_CACHE_ENA | PACKET3_TC_ACTION_ENA |
+               PACKET3_SH_ACTION_ENA;
 
        /* set to DX10/11 mode */
        radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
@@ -1376,14 +1378,11 @@ void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
                          (ib->vm ? (ib->vm->id << 24) : 0));
 
        /* flush read cache over gart for this vmid */
-       radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
-       radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
-       radeon_ring_write(ring, ib->vm ? ib->vm->id : 0);
        radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
-       radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA);
+       radeon_ring_write(ring, PACKET3_ENGINE_ME | cp_coher_cntl);
        radeon_ring_write(ring, 0xFFFFFFFF);
        radeon_ring_write(ring, 0);
-       radeon_ring_write(ring, 10); /* poll interval */
+       radeon_ring_write(ring, ((ib->vm ? ib->vm->id : 0) << 24) | 10); /* poll interval */
 }
 
 static void cayman_cp_enable(struct radeon_device *rdev, bool enable)
@@ -1391,13 +1390,63 @@ static void cayman_cp_enable(struct radeon_device *rdev, bool enable)
        if (enable)
                WREG32(CP_ME_CNTL, 0);
        else {
-               radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+               if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
+                       radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
                WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
                WREG32(SCRATCH_UMSK, 0);
                rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
        }
 }
 
+u32 cayman_gfx_get_rptr(struct radeon_device *rdev,
+                       struct radeon_ring *ring)
+{
+       u32 rptr;
+
+       if (rdev->wb.enabled)
+               rptr = rdev->wb.wb[ring->rptr_offs/4];
+       else {
+               if (ring->idx == RADEON_RING_TYPE_GFX_INDEX)
+                       rptr = RREG32(CP_RB0_RPTR);
+               else if (ring->idx == CAYMAN_RING_TYPE_CP1_INDEX)
+                       rptr = RREG32(CP_RB1_RPTR);
+               else
+                       rptr = RREG32(CP_RB2_RPTR);
+       }
+
+       return rptr;
+}
+
+u32 cayman_gfx_get_wptr(struct radeon_device *rdev,
+                       struct radeon_ring *ring)
+{
+       u32 wptr;
+
+       if (ring->idx == RADEON_RING_TYPE_GFX_INDEX)
+               wptr = RREG32(CP_RB0_WPTR);
+       else if (ring->idx == CAYMAN_RING_TYPE_CP1_INDEX)
+               wptr = RREG32(CP_RB1_WPTR);
+       else
+               wptr = RREG32(CP_RB2_WPTR);
+
+       return wptr;
+}
+
+void cayman_gfx_set_wptr(struct radeon_device *rdev,
+                        struct radeon_ring *ring)
+{
+       if (ring->idx == RADEON_RING_TYPE_GFX_INDEX) {
+               WREG32(CP_RB0_WPTR, ring->wptr);
+               (void)RREG32(CP_RB0_WPTR);
+       } else if (ring->idx == CAYMAN_RING_TYPE_CP1_INDEX) {
+               WREG32(CP_RB1_WPTR, ring->wptr);
+               (void)RREG32(CP_RB1_WPTR);
+       } else {
+               WREG32(CP_RB2_WPTR, ring->wptr);
+               (void)RREG32(CP_RB2_WPTR);
+       }
+}
+
 static int cayman_cp_load_microcode(struct radeon_device *rdev)
 {
        const __be32 *fw_data;
@@ -1526,6 +1575,16 @@ static int cayman_cp_resume(struct radeon_device *rdev)
                CP_RB1_BASE,
                CP_RB2_BASE
        };
+       static const unsigned cp_rb_rptr[] = {
+               CP_RB0_RPTR,
+               CP_RB1_RPTR,
+               CP_RB2_RPTR
+       };
+       static const unsigned cp_rb_wptr[] = {
+               CP_RB0_WPTR,
+               CP_RB1_WPTR,
+               CP_RB2_WPTR
+       };
        struct radeon_ring *ring;
        int i, r;
 
@@ -1584,8 +1643,8 @@ static int cayman_cp_resume(struct radeon_device *rdev)
                WREG32_P(cp_rb_cntl[i], RB_RPTR_WR_ENA, ~RB_RPTR_WR_ENA);
 
                ring->rptr = ring->wptr = 0;
-               WREG32(ring->rptr_reg, ring->rptr);
-               WREG32(ring->wptr_reg, ring->wptr);
+               WREG32(cp_rb_rptr[i], ring->rptr);
+               WREG32(cp_rb_wptr[i], ring->wptr);
 
                mdelay(1);
                WREG32_P(cp_rb_cntl[i], 0, ~RB_RPTR_WR_ENA);
@@ -1605,6 +1664,9 @@ static int cayman_cp_resume(struct radeon_device *rdev)
                return r;
        }
 
+       if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
+               radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+
        return 0;
 }
 
@@ -1831,8 +1893,10 @@ int cayman_asic_reset(struct radeon_device *rdev)
 
        reset_mask = cayman_gpu_check_soft_reset(rdev);
 
-       if (!reset_mask)
-               r600_set_bios_scratch_engine_hung(rdev, false);
+       if (reset_mask)
+               evergreen_gpu_pci_config_reset(rdev);
+
+       r600_set_bios_scratch_engine_hung(rdev, false);
 
        return 0;
 }
@@ -1878,23 +1942,7 @@ static int cayman_startup(struct radeon_device *rdev)
 
        evergreen_mc_program(rdev);
 
-       if (rdev->flags & RADEON_IS_IGP) {
-               if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
-                       r = ni_init_microcode(rdev);
-                       if (r) {
-                               DRM_ERROR("Failed to load firmware!\n");
-                               return r;
-                       }
-               }
-       } else {
-               if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
-                       r = ni_init_microcode(rdev);
-                       if (r) {
-                               DRM_ERROR("Failed to load firmware!\n");
-                               return r;
-                       }
-               }
-
+       if (!(rdev->flags & RADEON_IS_IGP) && !rdev->pm.dpm_enabled) {
                r = ni_mc_load_microcode(rdev);
                if (r) {
                        DRM_ERROR("Failed to load MC firmware!\n");
@@ -1981,23 +2029,18 @@ static int cayman_startup(struct radeon_device *rdev)
        evergreen_irq_set(rdev);
 
        r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
-                            CP_RB0_RPTR, CP_RB0_WPTR,
                             RADEON_CP_PACKET2);
        if (r)
                return r;
 
        ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
        r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
-                            DMA_RB_RPTR + DMA0_REGISTER_OFFSET,
-                            DMA_RB_WPTR + DMA0_REGISTER_OFFSET,
                             DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
        if (r)
                return r;
 
        ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
        r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
-                            DMA_RB_RPTR + DMA1_REGISTER_OFFSET,
-                            DMA_RB_WPTR + DMA1_REGISTER_OFFSET,
                             DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
        if (r)
                return r;
@@ -2016,7 +2059,6 @@ static int cayman_startup(struct radeon_device *rdev)
        ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
        if (ring->ring_size) {
                r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
-                                    UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
                                     RADEON_CP_PACKET2);
                if (!r)
                        r = uvd_v1_0_init(rdev);
@@ -2063,6 +2105,8 @@ int cayman_resume(struct radeon_device *rdev)
        /* init golden registers */
        ni_init_golden_registers(rdev);
 
+       radeon_pm_resume(rdev);
+
        rdev->accel_working = true;
        r = cayman_startup(rdev);
        if (r) {
@@ -2075,6 +2119,7 @@ int cayman_resume(struct radeon_device *rdev)
 
 int cayman_suspend(struct radeon_device *rdev)
 {
+       radeon_pm_suspend(rdev);
        if (ASIC_IS_DCE6(rdev))
                dce6_audio_fini(rdev);
        else
@@ -2145,6 +2190,27 @@ int cayman_init(struct radeon_device *rdev)
        if (r)
                return r;
 
+       if (rdev->flags & RADEON_IS_IGP) {
+               if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
+                       r = ni_init_microcode(rdev);
+                       if (r) {
+                               DRM_ERROR("Failed to load firmware!\n");
+                               return r;
+                       }
+               }
+       } else {
+               if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
+                       r = ni_init_microcode(rdev);
+                       if (r) {
+                               DRM_ERROR("Failed to load firmware!\n");
+                               return r;
+                       }
+               }
+       }
+
+       /* Initialize power management */
+       radeon_pm_init(rdev);
+
        ring->ring_obj = NULL;
        r600_ring_init(rdev, ring, 1024 * 1024);
 
@@ -2204,6 +2270,7 @@ int cayman_init(struct radeon_device *rdev)
 
 void cayman_fini(struct radeon_device *rdev)
 {
+       radeon_pm_fini(rdev);
        cayman_cp_fini(rdev);
        cayman_dma_fini(rdev);
        r600_irq_fini(rdev);
index bdeb65ed365831db35a94a0ffae523f3c8e419dd..7cf96b15377fa8f66bfbd048b08a6f1959ea68b5 100644 (file)
@@ -42,6 +42,75 @@ u32 cayman_gpu_check_soft_reset(struct radeon_device *rdev);
  * Cayman and newer support two asynchronous DMA engines.
  */
 
+/**
+ * cayman_dma_get_rptr - get the current read pointer
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon ring pointer
+ *
+ * Get the current rptr from the hardware (cayman+).
+ */
+uint32_t cayman_dma_get_rptr(struct radeon_device *rdev,
+                            struct radeon_ring *ring)
+{
+       u32 rptr, reg;
+
+       if (rdev->wb.enabled) {
+               rptr = rdev->wb.wb[ring->rptr_offs/4];
+       } else {
+               if (ring->idx == R600_RING_TYPE_DMA_INDEX)
+                       reg = DMA_RB_RPTR + DMA0_REGISTER_OFFSET;
+               else
+                       reg = DMA_RB_RPTR + DMA1_REGISTER_OFFSET;
+
+               rptr = RREG32(reg);
+       }
+
+       return (rptr & 0x3fffc) >> 2;
+}
+
+/**
+ * cayman_dma_get_wptr - get the current write pointer
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon ring pointer
+ *
+ * Get the current wptr from the hardware (cayman+).
+ */
+uint32_t cayman_dma_get_wptr(struct radeon_device *rdev,
+                          struct radeon_ring *ring)
+{
+       u32 reg;
+
+       if (ring->idx == R600_RING_TYPE_DMA_INDEX)
+               reg = DMA_RB_WPTR + DMA0_REGISTER_OFFSET;
+       else
+               reg = DMA_RB_WPTR + DMA1_REGISTER_OFFSET;
+
+       return (RREG32(reg) & 0x3fffc) >> 2;
+}
+
+/**
+ * cayman_dma_set_wptr - commit the write pointer
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon ring pointer
+ *
+ * Write the wptr back to the hardware (cayman+).
+ */
+void cayman_dma_set_wptr(struct radeon_device *rdev,
+                        struct radeon_ring *ring)
+{
+       u32 reg;
+
+       if (ring->idx == R600_RING_TYPE_DMA_INDEX)
+               reg = DMA_RB_WPTR + DMA0_REGISTER_OFFSET;
+       else
+               reg = DMA_RB_WPTR + DMA1_REGISTER_OFFSET;
+
+       WREG32(reg, (ring->wptr << 2) & 0x3fffc);
+}
+
 /**
  * cayman_dma_ring_ib_execute - Schedule an IB on the DMA engine
  *
@@ -88,7 +157,9 @@ void cayman_dma_stop(struct radeon_device *rdev)
 {
        u32 rb_cntl;
 
-       radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+       if ((rdev->asic->copy.copy_ring_index == R600_RING_TYPE_DMA_INDEX) ||
+           (rdev->asic->copy.copy_ring_index == CAYMAN_RING_TYPE_DMA1_INDEX))
+               radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
 
        /* dma0 */
        rb_cntl = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
@@ -190,7 +261,9 @@ int cayman_dma_resume(struct radeon_device *rdev)
                }
        }
 
-       radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+       if ((rdev->asic->copy.copy_ring_index == R600_RING_TYPE_DMA_INDEX) ||
+           (rdev->asic->copy.copy_ring_index == CAYMAN_RING_TYPE_DMA1_INDEX))
+               radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
 
        return 0;
 }
index 49c4d48f54d616b49b7af261d8fa2d71586ab8fd..c351226ecb31b0d9fa2d94a32ac23d504d61952b 100644 (file)
@@ -720,6 +720,8 @@ static const u32 cayman_sysls_enable[] =
 struct rv7xx_power_info *rv770_get_pi(struct radeon_device *rdev);
 struct evergreen_power_info *evergreen_get_pi(struct radeon_device *rdev);
 
+extern int ni_mc_load_microcode(struct radeon_device *rdev);
+
 struct ni_power_info *ni_get_pi(struct radeon_device *rdev)
 {
         struct ni_power_info *pi = rdev->pm.dpm.priv;
@@ -3565,7 +3567,11 @@ void ni_set_uvd_clock_after_set_eng_clock(struct radeon_device *rdev,
 void ni_dpm_setup_asic(struct radeon_device *rdev)
 {
        struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+       int r;
 
+       r = ni_mc_load_microcode(rdev);
+       if (r)
+               DRM_ERROR("Failed to load MC firmware!\n");
        ni_read_clock_registers(rdev);
        btc_read_arb_registers(rdev);
        rv770_get_memory_type(rdev);
@@ -3710,21 +3716,6 @@ int ni_dpm_enable(struct radeon_device *rdev)
        if (eg_pi->ls_clock_gating)
                ni_ls_clockgating_enable(rdev, true);
 
-       if (rdev->irq.installed &&
-           r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
-               PPSMC_Result result;
-
-               ret = rv770_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, 0xff * 1000);
-               if (ret)
-                       return ret;
-               rdev->irq.dpm_thermal = true;
-               radeon_irq_set(rdev);
-               result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt);
-
-               if (result != PPSMC_Result_OK)
-                       DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
-       }
-
        rv770_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
 
        ni_update_current_ps(rdev, boot_ps);
index 22421bc80c0d0d0d33c8b87ef9922c3cf982fc15..d996033c243ee14f4509220a707edf50f9dbe489 100644 (file)
 #              define PACKET3_DB_ACTION_ENA        (1 << 26)
 #              define PACKET3_SH_ACTION_ENA        (1 << 27)
 #              define PACKET3_SX_ACTION_ENA        (1 << 28)
+#              define PACKET3_ENGINE_ME            (1 << 31)
 #define        PACKET3_ME_INITIALIZE                           0x44
 #define                PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
 #define        PACKET3_COND_WRITE                              0x45
index da43ab3288332faf705a3e673663c5fe7fbc30b3..2d532996c69795cc129cb4ac6c60a4dfc4b70808 100644 (file)
@@ -23,7 +23,7 @@
 #ifndef _PPTABLE_H
 #define _PPTABLE_H
 
-#pragma pack(push, 1)
+#pragma pack(1)
 
 typedef struct _ATOM_PPLIB_THERMALCONTROLLER
 
@@ -677,6 +677,6 @@ typedef struct _ATOM_PPLIB_PPM_Table
       ULONG  ulTjmax;
 } ATOM_PPLIB_PPM_Table;
 
-#pragma pack(pop)
+#pragma pack()
 
 #endif
index 10abc4d5a6cc396a85bb32d4b7d094b748757f99..ef024ce3f7ccfd39b1be47be9840a401b588b1dc 100644 (file)
@@ -1050,6 +1050,36 @@ static int r100_cp_init_microcode(struct radeon_device *rdev)
        return err;
 }
 
+u32 r100_gfx_get_rptr(struct radeon_device *rdev,
+                     struct radeon_ring *ring)
+{
+       u32 rptr;
+
+       if (rdev->wb.enabled)
+               rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]);
+       else
+               rptr = RREG32(RADEON_CP_RB_RPTR);
+
+       return rptr;
+}
+
+u32 r100_gfx_get_wptr(struct radeon_device *rdev,
+                     struct radeon_ring *ring)
+{
+       u32 wptr;
+
+       wptr = RREG32(RADEON_CP_RB_WPTR);
+
+       return wptr;
+}
+
+void r100_gfx_set_wptr(struct radeon_device *rdev,
+                      struct radeon_ring *ring)
+{
+       WREG32(RADEON_CP_RB_WPTR, ring->wptr);
+       (void)RREG32(RADEON_CP_RB_WPTR);
+}
+
 static void r100_cp_load_microcode(struct radeon_device *rdev)
 {
        const __be32 *fw_data;
@@ -1102,7 +1132,6 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
        ring_size = (1 << (rb_bufsz + 1)) * 4;
        r100_cp_load_microcode(rdev);
        r = radeon_ring_init(rdev, ring, ring_size, RADEON_WB_CP_RPTR_OFFSET,
-                            RADEON_CP_RB_RPTR, RADEON_CP_RB_WPTR,
                             RADEON_CP_PACKET2);
        if (r) {
                return r;
@@ -3913,6 +3942,8 @@ int r100_resume(struct radeon_device *rdev)
        /* Initialize surface registers */
        radeon_surface_init(rdev);
 
+       radeon_pm_resume(rdev);
+
        rdev->accel_working = true;
        r = r100_startup(rdev);
        if (r) {
@@ -3923,6 +3954,7 @@ int r100_resume(struct radeon_device *rdev)
 
 int r100_suspend(struct radeon_device *rdev)
 {
+       radeon_pm_suspend(rdev);
        r100_cp_disable(rdev);
        radeon_wb_disable(rdev);
        r100_irq_disable(rdev);
@@ -3933,6 +3965,7 @@ int r100_suspend(struct radeon_device *rdev)
 
 void r100_fini(struct radeon_device *rdev)
 {
+       radeon_pm_fini(rdev);
        r100_cp_fini(rdev);
        radeon_wb_fini(rdev);
        radeon_ib_pool_fini(rdev);
@@ -4039,6 +4072,9 @@ int r100_init(struct radeon_device *rdev)
        }
        r100_set_safe_registers(rdev);
 
+       /* Initialize power management */
+       radeon_pm_init(rdev);
+
        rdev->accel_working = true;
        r = r100_startup(rdev);
        if (r) {
index d8dd269b9159fcc0dfbed98cf8c3c2037742e671..7c63ef840e86abaf04f216201b32ee2f40323b9b 100644 (file)
@@ -1430,6 +1430,8 @@ int r300_resume(struct radeon_device *rdev)
        /* Initialize surface registers */
        radeon_surface_init(rdev);
 
+       radeon_pm_resume(rdev);
+
        rdev->accel_working = true;
        r = r300_startup(rdev);
        if (r) {
@@ -1440,6 +1442,7 @@ int r300_resume(struct radeon_device *rdev)
 
 int r300_suspend(struct radeon_device *rdev)
 {
+       radeon_pm_suspend(rdev);
        r100_cp_disable(rdev);
        radeon_wb_disable(rdev);
        r100_irq_disable(rdev);
@@ -1452,6 +1455,7 @@ int r300_suspend(struct radeon_device *rdev)
 
 void r300_fini(struct radeon_device *rdev)
 {
+       radeon_pm_fini(rdev);
        r100_cp_fini(rdev);
        radeon_wb_fini(rdev);
        radeon_ib_pool_fini(rdev);
@@ -1538,6 +1542,9 @@ int r300_init(struct radeon_device *rdev)
        }
        r300_set_reg_safe(rdev);
 
+       /* Initialize power management */
+       radeon_pm_init(rdev);
+
        rdev->accel_working = true;
        r = r300_startup(rdev);
        if (r) {
index 60170ea5e3a228c0483c18f0fd1652b30ec7760b..84b1d5367a11f6950026337f76117bbb8edda0bb 100644 (file)
@@ -75,7 +75,7 @@ static int r300_emit_cliprects(drm_radeon_private_t *dev_priv,
                OUT_RING(CP_PACKET0(R300_RE_CLIPRECT_TL_0, nr * 2 - 1));
 
                for (i = 0; i < nr; ++i) {
-                       if (DRM_COPY_FROM_USER
+                       if (copy_from_user
                            (&box, &cmdbuf->boxes[n + i], sizeof(box))) {
                                DRM_ERROR("copy cliprect faulted\n");
                                return -EFAULT;
@@ -928,12 +928,12 @@ static int r300_scratch(drm_radeon_private_t *dev_priv,
                buf_idx = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
                *buf_idx *= 2; /* 8 bytes per buf */
 
-               if (DRM_COPY_TO_USER(ref_age_base + *buf_idx,
+               if (copy_to_user(ref_age_base + *buf_idx,
                                &dev_priv->scratch_ages[header.scratch.reg],
                                sizeof(u32)))
                        return -EINVAL;
 
-               if (DRM_COPY_FROM_USER(&h_pending,
+               if (copy_from_user(&h_pending,
                                ref_age_base + *buf_idx + 1,
                                sizeof(u32)))
                        return -EINVAL;
@@ -943,7 +943,7 @@ static int r300_scratch(drm_radeon_private_t *dev_priv,
 
                h_pending--;
 
-               if (DRM_COPY_TO_USER(ref_age_base + *buf_idx + 1,
+               if (copy_to_user(ref_age_base + *buf_idx + 1,
                                        &h_pending,
                                        sizeof(u32)))
                        return -EINVAL;
index 6edf2b3a52b4d7e4ba82cc063048ffb73733a984..3768aab2710b3943261312c4fcc5184b4f18a6fb 100644 (file)
@@ -325,6 +325,8 @@ int r420_resume(struct radeon_device *rdev)
        /* Initialize surface registers */
        radeon_surface_init(rdev);
 
+       radeon_pm_resume(rdev);
+
        rdev->accel_working = true;
        r = r420_startup(rdev);
        if (r) {
@@ -335,6 +337,7 @@ int r420_resume(struct radeon_device *rdev)
 
 int r420_suspend(struct radeon_device *rdev)
 {
+       radeon_pm_suspend(rdev);
        r420_cp_errata_fini(rdev);
        r100_cp_disable(rdev);
        radeon_wb_disable(rdev);
@@ -348,6 +351,7 @@ int r420_suspend(struct radeon_device *rdev)
 
 void r420_fini(struct radeon_device *rdev)
 {
+       radeon_pm_fini(rdev);
        r100_cp_fini(rdev);
        radeon_wb_fini(rdev);
        radeon_ib_pool_fini(rdev);
@@ -444,6 +448,9 @@ int r420_init(struct radeon_device *rdev)
        }
        r420_set_reg_safe(rdev);
 
+       /* Initialize power management */
+       radeon_pm_init(rdev);
+
        rdev->accel_working = true;
        r = r420_startup(rdev);
        if (r) {
index e1aece73b370c1ec9d6d9f86273caad0cbb14d36..e209eb75024f9dc1c441518cbe8f8d70a41844f5 100644 (file)
@@ -240,6 +240,8 @@ int r520_resume(struct radeon_device *rdev)
        /* Initialize surface registers */
        radeon_surface_init(rdev);
 
+       radeon_pm_resume(rdev);
+
        rdev->accel_working = true;
        r = r520_startup(rdev);
        if (r) {
@@ -312,6 +314,9 @@ int r520_init(struct radeon_device *rdev)
                return r;
        rv515_set_safe_registers(rdev);
 
+       /* Initialize power management */
+       radeon_pm_init(rdev);
+
        rdev->accel_working = true;
        r = r520_startup(rdev);
        if (r) {
index 9ad06732a78bc6d079914a6ce15e92b7e273b265..56140b4e5bb2e9fa7fc72339cc29f88b398aff1c 100644 (file)
@@ -105,6 +105,7 @@ void r600_fini(struct radeon_device *rdev);
 void r600_irq_disable(struct radeon_device *rdev);
 static void r600_pcie_gen2_enable(struct radeon_device *rdev);
 extern int evergreen_rlc_resume(struct radeon_device *rdev);
+extern void rv770_set_clk_bypass_mode(struct radeon_device *rdev);
 
 /**
  * r600_get_xclk - get the xclk
@@ -1644,6 +1645,67 @@ static void r600_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
        r600_print_gpu_status_regs(rdev);
 }
 
+static void r600_gpu_pci_config_reset(struct radeon_device *rdev)
+{
+       struct rv515_mc_save save;
+       u32 tmp, i;
+
+       dev_info(rdev->dev, "GPU pci config reset\n");
+
+       /* disable dpm? */
+
+       /* Disable CP parsing/prefetching */
+       if (rdev->family >= CHIP_RV770)
+               WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1) | S_0086D8_CP_PFP_HALT(1));
+       else
+               WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
+
+       /* disable the RLC */
+       WREG32(RLC_CNTL, 0);
+
+       /* Disable DMA */
+       tmp = RREG32(DMA_RB_CNTL);
+       tmp &= ~DMA_RB_ENABLE;
+       WREG32(DMA_RB_CNTL, tmp);
+
+       mdelay(50);
+
+       /* set mclk/sclk to bypass */
+       if (rdev->family >= CHIP_RV770)
+               rv770_set_clk_bypass_mode(rdev);
+       /* disable BM */
+       pci_clear_master(rdev->pdev);
+       /* disable mem access */
+       rv515_mc_stop(rdev, &save);
+       if (r600_mc_wait_for_idle(rdev)) {
+               dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+       }
+
+       /* BIF reset workaround.  Not sure if this is needed on 6xx */
+       tmp = RREG32(BUS_CNTL);
+       tmp |= VGA_COHE_SPEC_TIMER_DIS;
+       WREG32(BUS_CNTL, tmp);
+
+       tmp = RREG32(BIF_SCRATCH0);
+
+       /* reset */
+       radeon_pci_config_reset(rdev);
+       mdelay(1);
+
+       /* BIF reset workaround.  Not sure if this is needed on 6xx */
+       tmp = SOFT_RESET_BIF;
+       WREG32(SRBM_SOFT_RESET, tmp);
+       mdelay(1);
+       WREG32(SRBM_SOFT_RESET, 0);
+
+       /* wait for asic to come out of reset */
+       for (i = 0; i < rdev->usec_timeout; i++) {
+               if (RREG32(CONFIG_MEMSIZE) != 0xffffffff)
+                       break;
+               udelay(1);
+       }
+}
+
 int r600_asic_reset(struct radeon_device *rdev)
 {
        u32 reset_mask;
@@ -1653,10 +1715,17 @@ int r600_asic_reset(struct radeon_device *rdev)
        if (reset_mask)
                r600_set_bios_scratch_engine_hung(rdev, true);
 
+       /* try soft reset */
        r600_gpu_soft_reset(rdev, reset_mask);
 
        reset_mask = r600_gpu_check_soft_reset(rdev);
 
+       /* try pci config reset */
+       if (reset_mask && radeon_hard_reset)
+               r600_gpu_pci_config_reset(rdev);
+
+       reset_mask = r600_gpu_check_soft_reset(rdev);
+
        if (!reset_mask)
                r600_set_bios_scratch_engine_hung(rdev, false);
 
@@ -2185,7 +2254,8 @@ void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
  */
 void r600_cp_stop(struct radeon_device *rdev)
 {
-       radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+       if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
+               radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
        WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
        WREG32(SCRATCH_UMSK, 0);
        rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
@@ -2382,6 +2452,36 @@ out:
        return err;
 }
 
+u32 r600_gfx_get_rptr(struct radeon_device *rdev,
+                     struct radeon_ring *ring)
+{
+       u32 rptr;
+
+       if (rdev->wb.enabled)
+               rptr = rdev->wb.wb[ring->rptr_offs/4];
+       else
+               rptr = RREG32(R600_CP_RB_RPTR);
+
+       return rptr;
+}
+
+u32 r600_gfx_get_wptr(struct radeon_device *rdev,
+                     struct radeon_ring *ring)
+{
+       u32 wptr;
+
+       wptr = RREG32(R600_CP_RB_WPTR);
+
+       return wptr;
+}
+
+void r600_gfx_set_wptr(struct radeon_device *rdev,
+                      struct radeon_ring *ring)
+{
+       WREG32(R600_CP_RB_WPTR, ring->wptr);
+       (void)RREG32(R600_CP_RB_WPTR);
+}
+
 static int r600_cp_load_microcode(struct radeon_device *rdev)
 {
        const __be32 *fw_data;
@@ -2513,6 +2613,10 @@ int r600_cp_resume(struct radeon_device *rdev)
                ring->ready = false;
                return r;
        }
+
+       if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
+               radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+
        return 0;
 }
 
@@ -2607,14 +2711,17 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
                          struct radeon_fence *fence)
 {
        struct radeon_ring *ring = &rdev->ring[fence->ring];
+       u32 cp_coher_cntl = PACKET3_TC_ACTION_ENA | PACKET3_VC_ACTION_ENA |
+               PACKET3_SH_ACTION_ENA;
+
+       if (rdev->family >= CHIP_RV770)
+               cp_coher_cntl |= PACKET3_FULL_CACHE_ENA;
 
        if (rdev->wb.use_event) {
                u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
                /* flush read cache over gart */
                radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
-               radeon_ring_write(ring, PACKET3_TC_ACTION_ENA |
-                                       PACKET3_VC_ACTION_ENA |
-                                       PACKET3_SH_ACTION_ENA);
+               radeon_ring_write(ring, cp_coher_cntl);
                radeon_ring_write(ring, 0xFFFFFFFF);
                radeon_ring_write(ring, 0);
                radeon_ring_write(ring, 10); /* poll interval */
@@ -2628,9 +2735,7 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
        } else {
                /* flush read cache over gart */
                radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
-               radeon_ring_write(ring, PACKET3_TC_ACTION_ENA |
-                                       PACKET3_VC_ACTION_ENA |
-                                       PACKET3_SH_ACTION_ENA);
+               radeon_ring_write(ring, cp_coher_cntl);
                radeon_ring_write(ring, 0xFFFFFFFF);
                radeon_ring_write(ring, 0);
                radeon_ring_write(ring, 10); /* poll interval */
@@ -2775,14 +2880,6 @@ static int r600_startup(struct radeon_device *rdev)
 
        r600_mc_program(rdev);
 
-       if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
-               r = r600_init_microcode(rdev);
-               if (r) {
-                       DRM_ERROR("Failed to load firmware!\n");
-                       return r;
-               }
-       }
-
        if (rdev->flags & RADEON_IS_AGP) {
                r600_agp_enable(rdev);
        } else {
@@ -2803,12 +2900,6 @@ static int r600_startup(struct radeon_device *rdev)
                return r;
        }
 
-       r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
-       if (r) {
-               dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
-               return r;
-       }
-
        /* Enable IRQ */
        if (!rdev->irq.installed) {
                r = radeon_irq_kms_init(rdev);
@@ -2826,18 +2917,10 @@ static int r600_startup(struct radeon_device *rdev)
 
        ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
-                            R600_CP_RB_RPTR, R600_CP_RB_WPTR,
                             RADEON_CP_PACKET2);
        if (r)
                return r;
 
-       ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
-       r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
-                            DMA_RB_RPTR, DMA_RB_WPTR,
-                            DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
-       if (r)
-               return r;
-
        r = r600_cp_load_microcode(rdev);
        if (r)
                return r;
@@ -2845,10 +2928,6 @@ static int r600_startup(struct radeon_device *rdev)
        if (r)
                return r;
 
-       r = r600_dma_resume(rdev);
-       if (r)
-               return r;
-
        r = radeon_ib_pool_init(rdev);
        if (r) {
                dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
@@ -2889,6 +2968,8 @@ int r600_resume(struct radeon_device *rdev)
        /* post card */
        atom_asic_init(rdev->mode_info.atom_context);
 
+       radeon_pm_resume(rdev);
+
        rdev->accel_working = true;
        r = r600_startup(rdev);
        if (r) {
@@ -2902,9 +2983,9 @@ int r600_resume(struct radeon_device *rdev)
 
 int r600_suspend(struct radeon_device *rdev)
 {
+       radeon_pm_suspend(rdev);
        r600_audio_fini(rdev);
        r600_cp_stop(rdev);
-       r600_dma_stop(rdev);
        r600_irq_suspend(rdev);
        radeon_wb_disable(rdev);
        r600_pcie_gart_disable(rdev);
@@ -2970,12 +3051,20 @@ int r600_init(struct radeon_device *rdev)
        if (r)
                return r;
 
+       if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
+               r = r600_init_microcode(rdev);
+               if (r) {
+                       DRM_ERROR("Failed to load firmware!\n");
+                       return r;
+               }
+       }
+
+       /* Initialize power management */
+       radeon_pm_init(rdev);
+
        rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
        r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
 
-       rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL;
-       r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024);
-
        rdev->ih.ring_obj = NULL;
        r600_ih_ring_init(rdev, 64 * 1024);
 
@@ -2988,7 +3077,6 @@ int r600_init(struct radeon_device *rdev)
        if (r) {
                dev_err(rdev->dev, "disabling GPU acceleration\n");
                r600_cp_fini(rdev);
-               r600_dma_fini(rdev);
                r600_irq_fini(rdev);
                radeon_wb_fini(rdev);
                radeon_ib_pool_fini(rdev);
@@ -3002,9 +3090,9 @@ int r600_init(struct radeon_device *rdev)
 
 void r600_fini(struct radeon_device *rdev)
 {
+       radeon_pm_fini(rdev);
        r600_audio_fini(rdev);
        r600_cp_fini(rdev);
-       r600_dma_fini(rdev);
        r600_irq_fini(rdev);
        radeon_wb_fini(rdev);
        radeon_ib_pool_fini(rdev);
index d8eb48bff0ed204e9c52538342a11dcd1bd5b8ff..8c9b7e26533c6c81635a9dafa20e7cecb164b167 100644 (file)
@@ -2515,7 +2515,7 @@ int r600_cp_dispatch_texture(struct drm_device *dev,
                buf = radeon_freelist_get(dev);
                if (!buf) {
                        DRM_DEBUG("EAGAIN\n");
-                       if (DRM_COPY_TO_USER(tex->image, image, sizeof(*image)))
+                       if (copy_to_user(tex->image, image, sizeof(*image)))
                                return -EFAULT;
                        return -EAGAIN;
                }
@@ -2528,7 +2528,7 @@ int r600_cp_dispatch_texture(struct drm_device *dev,
                buffer =
                    (u32 *) ((char *)dev->agp_buffer_map->handle + buf->offset);
 
-               if (DRM_COPY_FROM_USER(buffer, data, pass_size)) {
+               if (copy_from_user(buffer, data, pass_size)) {
                        DRM_ERROR("EFAULT on pad, %d bytes\n", pass_size);
                        return -EFAULT;
                }
index 5dceea6f71ae450cf925b0d7e119783c801d6ebd..7b399dc5fd5492d1e53dcaa5b5be728164dc85b2 100644 (file)
@@ -749,7 +749,10 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
                }
 
                for (i = 0; i < 8; i++) {
-                       if ((tmp >> (i * 4)) & 0xF) {
+                       u32 format = G_0280A0_FORMAT(track->cb_color_info[i]);
+
+                       if (format != V_0280A0_COLOR_INVALID &&
+                           (tmp >> (i * 4)) & 0xF) {
                                /* at least one component is enabled */
                                if (track->cb_color_bo[i] == NULL) {
                                        dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
@@ -2386,7 +2389,7 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
        ib_chunk = &parser.chunks[parser.chunk_ib_idx];
        parser.ib.length_dw = ib_chunk->length_dw;
        *l = parser.ib.length_dw;
-       if (DRM_COPY_FROM_USER(ib, ib_chunk->user_ptr, ib_chunk->length_dw * 4)) {
+       if (copy_from_user(ib, ib_chunk->user_ptr, ib_chunk->length_dw * 4)) {
                r = -EFAULT;
                r600_cs_parser_fini(&parser, r);
                return r;
index 7844d15c139fcb97247880d596ab4354d4bea8d5..b2d4c91e6272e4fb9dfbfbf669a55705c920eee2 100644 (file)
@@ -51,7 +51,14 @@ u32 r600_gpu_check_soft_reset(struct radeon_device *rdev);
 uint32_t r600_dma_get_rptr(struct radeon_device *rdev,
                           struct radeon_ring *ring)
 {
-       return (radeon_ring_generic_get_rptr(rdev, ring) & 0x3fffc) >> 2;
+       u32 rptr;
+
+       if (rdev->wb.enabled)
+               rptr = rdev->wb.wb[ring->rptr_offs/4];
+       else
+               rptr = RREG32(DMA_RB_RPTR);
+
+       return (rptr & 0x3fffc) >> 2;
 }
 
 /**
@@ -65,7 +72,7 @@ uint32_t r600_dma_get_rptr(struct radeon_device *rdev,
 uint32_t r600_dma_get_wptr(struct radeon_device *rdev,
                           struct radeon_ring *ring)
 {
-       return (RREG32(ring->wptr_reg) & 0x3fffc) >> 2;
+       return (RREG32(DMA_RB_WPTR) & 0x3fffc) >> 2;
 }
 
 /**
@@ -79,7 +86,7 @@ uint32_t r600_dma_get_wptr(struct radeon_device *rdev,
 void r600_dma_set_wptr(struct radeon_device *rdev,
                       struct radeon_ring *ring)
 {
-       WREG32(ring->wptr_reg, (ring->wptr << 2) & 0x3fffc);
+       WREG32(DMA_RB_WPTR, (ring->wptr << 2) & 0x3fffc);
 }
 
 /**
@@ -93,7 +100,8 @@ void r600_dma_stop(struct radeon_device *rdev)
 {
        u32 rb_cntl = RREG32(DMA_RB_CNTL);
 
-       radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+       if (rdev->asic->copy.copy_ring_index == R600_RING_TYPE_DMA_INDEX)
+               radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
 
        rb_cntl &= ~DMA_RB_ENABLE;
        WREG32(DMA_RB_CNTL, rb_cntl);
@@ -180,7 +188,8 @@ int r600_dma_resume(struct radeon_device *rdev)
                return r;
        }
 
-       radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+       if (rdev->asic->copy.copy_ring_index == R600_RING_TYPE_DMA_INDEX)
+               radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
 
        return 0;
 }
index 5513d8f06252e13e11e27e9d21d5aecae775b5a8..e4cc9b314ce974fe5caacf91a000cb9a76a584d6 100644 (file)
@@ -729,8 +729,8 @@ bool r600_is_uvd_state(u32 class, u32 class2)
        return false;
 }
 
-int r600_set_thermal_temperature_range(struct radeon_device *rdev,
-                                      int min_temp, int max_temp)
+static int r600_set_thermal_temperature_range(struct radeon_device *rdev,
+                                             int min_temp, int max_temp)
 {
        int low_temp = 0 * 1000;
        int high_temp = 255 * 1000;
@@ -777,6 +777,22 @@ bool r600_is_internal_thermal_sensor(enum radeon_int_thermal_type sensor)
        }
 }
 
+int r600_dpm_late_enable(struct radeon_device *rdev)
+{
+       int ret;
+
+       if (rdev->irq.installed &&
+           r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
+               ret = r600_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
+               if (ret)
+                       return ret;
+               rdev->irq.dpm_thermal = true;
+               radeon_irq_set(rdev);
+       }
+
+       return 0;
+}
+
 union power_info {
        struct _ATOM_POWERPLAY_INFO info;
        struct _ATOM_POWERPLAY_INFO_V2 info_2;
index 1000bf9719f2449af5d15a153b147f19c6af25e4..07eab2b04e81b5db420a6ee96152c8b8e94e4092 100644 (file)
@@ -213,8 +213,6 @@ void r600_wait_for_power_level(struct radeon_device *rdev,
 void r600_start_dpm(struct radeon_device *rdev);
 void r600_stop_dpm(struct radeon_device *rdev);
 
-int r600_set_thermal_temperature_range(struct radeon_device *rdev,
-                                      int min_temp, int max_temp);
 bool r600_is_internal_thermal_sensor(enum radeon_int_thermal_type sensor);
 
 int r600_parse_extended_power_table(struct radeon_device *rdev);
index b7d3ecba43e34d3b559ee072800101532150c631..3016fc14f502c49a61c2343c3b2ca877d2590109 100644 (file)
@@ -250,7 +250,7 @@ static void r600_hdmi_audio_workaround(struct drm_encoder *encoder)
                 value, ~HDMI0_AUDIO_TEST_EN);
 }
 
-void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
+static void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
 {
        struct drm_device *dev = encoder->dev;
        struct radeon_device *rdev = dev->dev_private;
index ebe38724a9765765041eaa245fc7994c4fec01fb..37455f65107f7fc8c0d2ff8a6a14d5abe43c13af 100644 (file)
 #define RLC_UCODE_DATA                                    0x3f30
 
 #define SRBM_SOFT_RESET                                   0xe60
+#       define SOFT_RESET_BIF                             (1 << 1)
 #       define SOFT_RESET_DMA                             (1 << 12)
 #       define SOFT_RESET_RLC                             (1 << 13)
 #       define SOFT_RESET_UVD                             (1 << 18)
 #       define RV770_SOFT_RESET_DMA                       (1 << 20)
 
+#define BIF_SCRATCH0                                      0x5438
+
+#define BUS_CNTL                                          0x5420
+#       define BIOS_ROM_DIS                               (1 << 1)
+#       define VGA_COHE_SPEC_TIMER_DIS                    (1 << 9)
+
 #define CP_INT_CNTL                                       0xc124
 #       define CNTX_BUSY_INT_ENABLE                       (1 << 19)
 #       define CNTX_EMPTY_INT_ENABLE                      (1 << 20)
 #              define PACKET3_CP_DMA_CMD_DAIC      (1 << 29)
 #define        PACKET3_SURFACE_SYNC                            0x43
 #              define PACKET3_CB0_DEST_BASE_ENA    (1 << 6)
+#              define PACKET3_FULL_CACHE_ENA       (1 << 20) /* r7xx+ only */
 #              define PACKET3_TC_ACTION_ENA        (1 << 23)
 #              define PACKET3_VC_ACTION_ENA        (1 << 24)
 #              define PACKET3_CB_ACTION_ENA        (1 << 25)
index 45e1f447bc794c677a8e83830fd318585c2d7f7e..4a8ac1cd6b4c65582690e035e026aa6f1b125e00 100644 (file)
@@ -99,6 +99,7 @@ extern int radeon_fastfb;
 extern int radeon_dpm;
 extern int radeon_aspm;
 extern int radeon_runtime_pm;
+extern int radeon_hard_reset;
 
 /*
  * Copy from radeon_drv.h so we don't have to include both and have conflicting
@@ -139,6 +140,9 @@ extern int radeon_runtime_pm;
 #define RADEON_VA_RESERVED_SIZE                        (8 << 20)
 #define RADEON_IB_VM_MAX_SIZE                  (64 << 10)
 
+/* hard reset data */
+#define RADEON_ASIC_RESET_DATA                  0x39d5e86b
+
 /* reset flags */
 #define RADEON_RESET_GFX                       (1 << 0)
 #define RADEON_RESET_COMPUTE                   (1 << 1)
@@ -252,6 +256,7 @@ struct radeon_clock {
  * Power management
  */
 int radeon_pm_init(struct radeon_device *rdev);
+int radeon_pm_late_init(struct radeon_device *rdev);
 void radeon_pm_fini(struct radeon_device *rdev);
 void radeon_pm_compute_clocks(struct radeon_device *rdev);
 void radeon_pm_suspend(struct radeon_device *rdev);
@@ -413,6 +418,11 @@ struct radeon_mman {
        struct ttm_bo_device            bdev;
        bool                            mem_global_referenced;
        bool                            initialized;
+
+#if defined(CONFIG_DEBUG_FS)
+       struct dentry                   *vram;
+       struct dentry                   *gtt;
+#endif
 };
 
 /* bo virtual address in a specific vm */
@@ -779,13 +789,11 @@ struct radeon_ring {
        volatile uint32_t       *ring;
        unsigned                rptr;
        unsigned                rptr_offs;
-       unsigned                rptr_reg;
        unsigned                rptr_save_reg;
        u64                     next_rptr_gpu_addr;
        volatile u32            *next_rptr_cpu_addr;
        unsigned                wptr;
        unsigned                wptr_old;
-       unsigned                wptr_reg;
        unsigned                ring_size;
        unsigned                ring_free_dw;
        int                     count_dw;
@@ -859,6 +867,8 @@ struct radeon_vm {
        struct radeon_fence             *fence;
        /* last flush or NULL if we still need to flush */
        struct radeon_fence             *last_flush;
+       /* last use of vmid */
+       struct radeon_fence             *last_id_use;
 };
 
 struct radeon_vm_manager {
@@ -949,7 +959,7 @@ unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring
 int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring,
                        unsigned size, uint32_t *data);
 int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ring_size,
-                    unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg, u32 nop);
+                    unsigned rptr_offs, u32 nop);
 void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *cp);
 
 
@@ -1775,6 +1785,7 @@ struct radeon_asic {
                int (*init)(struct radeon_device *rdev);
                void (*setup_asic)(struct radeon_device *rdev);
                int (*enable)(struct radeon_device *rdev);
+               int (*late_enable)(struct radeon_device *rdev);
                void (*disable)(struct radeon_device *rdev);
                int (*pre_set_power_state)(struct radeon_device *rdev);
                int (*set_power_state)(struct radeon_device *rdev);
@@ -2650,6 +2661,7 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
 #define radeon_dpm_init(rdev) rdev->asic->dpm.init((rdev))
 #define radeon_dpm_setup_asic(rdev) rdev->asic->dpm.setup_asic((rdev))
 #define radeon_dpm_enable(rdev) rdev->asic->dpm.enable((rdev))
+#define radeon_dpm_late_enable(rdev) rdev->asic->dpm.late_enable((rdev))
 #define radeon_dpm_disable(rdev) rdev->asic->dpm.disable((rdev))
 #define radeon_dpm_pre_set_power_state(rdev) rdev->asic->dpm.pre_set_power_state((rdev))
 #define radeon_dpm_set_power_state(rdev) rdev->asic->dpm.set_power_state((rdev))
@@ -2668,6 +2680,7 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
 /* Common functions */
 /* AGP */
 extern int radeon_gpu_reset(struct radeon_device *rdev);
+extern void radeon_pci_config_reset(struct radeon_device *rdev);
 extern void r600_set_bios_scratch_engine_hung(struct radeon_device *rdev, bool hung);
 extern void radeon_agp_disable(struct radeon_device *rdev);
 extern int radeon_modeset_init(struct radeon_device *rdev);
index c0425bb6223a99fae5eab07a069ab73ace19eec7..f74db43346fd86f4658e11cc62e6e08134127936 100644 (file)
@@ -182,9 +182,9 @@ static struct radeon_asic_ring r100_gfx_ring = {
        .ring_test = &r100_ring_test,
        .ib_test = &r100_ib_test,
        .is_lockup = &r100_gpu_is_lockup,
-       .get_rptr = &radeon_ring_generic_get_rptr,
-       .get_wptr = &radeon_ring_generic_get_wptr,
-       .set_wptr = &radeon_ring_generic_set_wptr,
+       .get_rptr = &r100_gfx_get_rptr,
+       .get_wptr = &r100_gfx_get_wptr,
+       .set_wptr = &r100_gfx_set_wptr,
 };
 
 static struct radeon_asic r100_asic = {
@@ -330,9 +330,9 @@ static struct radeon_asic_ring r300_gfx_ring = {
        .ring_test = &r100_ring_test,
        .ib_test = &r100_ib_test,
        .is_lockup = &r100_gpu_is_lockup,
-       .get_rptr = &radeon_ring_generic_get_rptr,
-       .get_wptr = &radeon_ring_generic_get_wptr,
-       .set_wptr = &radeon_ring_generic_set_wptr,
+       .get_rptr = &r100_gfx_get_rptr,
+       .get_wptr = &r100_gfx_get_wptr,
+       .set_wptr = &r100_gfx_set_wptr,
 };
 
 static struct radeon_asic r300_asic = {
@@ -883,9 +883,9 @@ static struct radeon_asic_ring r600_gfx_ring = {
        .ring_test = &r600_ring_test,
        .ib_test = &r600_ib_test,
        .is_lockup = &r600_gfx_is_lockup,
-       .get_rptr = &radeon_ring_generic_get_rptr,
-       .get_wptr = &radeon_ring_generic_get_wptr,
-       .set_wptr = &radeon_ring_generic_set_wptr,
+       .get_rptr = &r600_gfx_get_rptr,
+       .get_wptr = &r600_gfx_get_wptr,
+       .set_wptr = &r600_gfx_set_wptr,
 };
 
 static struct radeon_asic_ring r600_dma_ring = {
@@ -1045,6 +1045,7 @@ static struct radeon_asic rv6xx_asic = {
                .init = &rv6xx_dpm_init,
                .setup_asic = &rv6xx_setup_asic,
                .enable = &rv6xx_dpm_enable,
+               .late_enable = &r600_dpm_late_enable,
                .disable = &rv6xx_dpm_disable,
                .pre_set_power_state = &r600_dpm_pre_set_power_state,
                .set_power_state = &rv6xx_dpm_set_power_state,
@@ -1135,6 +1136,7 @@ static struct radeon_asic rs780_asic = {
                .init = &rs780_dpm_init,
                .setup_asic = &rs780_dpm_setup_asic,
                .enable = &rs780_dpm_enable,
+               .late_enable = &r600_dpm_late_enable,
                .disable = &rs780_dpm_disable,
                .pre_set_power_state = &r600_dpm_pre_set_power_state,
                .set_power_state = &rs780_dpm_set_power_state,
@@ -1239,6 +1241,7 @@ static struct radeon_asic rv770_asic = {
                .init = &rv770_dpm_init,
                .setup_asic = &rv770_dpm_setup_asic,
                .enable = &rv770_dpm_enable,
+               .late_enable = &rv770_dpm_late_enable,
                .disable = &rv770_dpm_disable,
                .pre_set_power_state = &r600_dpm_pre_set_power_state,
                .set_power_state = &rv770_dpm_set_power_state,
@@ -1267,9 +1270,9 @@ static struct radeon_asic_ring evergreen_gfx_ring = {
        .ring_test = &r600_ring_test,
        .ib_test = &r600_ib_test,
        .is_lockup = &evergreen_gfx_is_lockup,
-       .get_rptr = &radeon_ring_generic_get_rptr,
-       .get_wptr = &radeon_ring_generic_get_wptr,
-       .set_wptr = &radeon_ring_generic_set_wptr,
+       .get_rptr = &r600_gfx_get_rptr,
+       .get_wptr = &r600_gfx_get_wptr,
+       .set_wptr = &r600_gfx_set_wptr,
 };
 
 static struct radeon_asic_ring evergreen_dma_ring = {
@@ -1357,6 +1360,7 @@ static struct radeon_asic evergreen_asic = {
                .init = &cypress_dpm_init,
                .setup_asic = &cypress_dpm_setup_asic,
                .enable = &cypress_dpm_enable,
+               .late_enable = &rv770_dpm_late_enable,
                .disable = &cypress_dpm_disable,
                .pre_set_power_state = &r600_dpm_pre_set_power_state,
                .set_power_state = &cypress_dpm_set_power_state,
@@ -1449,6 +1453,7 @@ static struct radeon_asic sumo_asic = {
                .init = &sumo_dpm_init,
                .setup_asic = &sumo_dpm_setup_asic,
                .enable = &sumo_dpm_enable,
+               .late_enable = &sumo_dpm_late_enable,
                .disable = &sumo_dpm_disable,
                .pre_set_power_state = &sumo_dpm_pre_set_power_state,
                .set_power_state = &sumo_dpm_set_power_state,
@@ -1540,6 +1545,7 @@ static struct radeon_asic btc_asic = {
                .init = &btc_dpm_init,
                .setup_asic = &btc_dpm_setup_asic,
                .enable = &btc_dpm_enable,
+               .late_enable = &rv770_dpm_late_enable,
                .disable = &btc_dpm_disable,
                .pre_set_power_state = &btc_dpm_pre_set_power_state,
                .set_power_state = &btc_dpm_set_power_state,
@@ -1570,9 +1576,9 @@ static struct radeon_asic_ring cayman_gfx_ring = {
        .ib_test = &r600_ib_test,
        .is_lockup = &cayman_gfx_is_lockup,
        .vm_flush = &cayman_vm_flush,
-       .get_rptr = &radeon_ring_generic_get_rptr,
-       .get_wptr = &radeon_ring_generic_get_wptr,
-       .set_wptr = &radeon_ring_generic_set_wptr,
+       .get_rptr = &cayman_gfx_get_rptr,
+       .get_wptr = &cayman_gfx_get_wptr,
+       .set_wptr = &cayman_gfx_set_wptr,
 };
 
 static struct radeon_asic_ring cayman_dma_ring = {
@@ -1585,9 +1591,9 @@ static struct radeon_asic_ring cayman_dma_ring = {
        .ib_test = &r600_dma_ib_test,
        .is_lockup = &cayman_dma_is_lockup,
        .vm_flush = &cayman_dma_vm_flush,
-       .get_rptr = &r600_dma_get_rptr,
-       .get_wptr = &r600_dma_get_wptr,
-       .set_wptr = &r600_dma_set_wptr
+       .get_rptr = &cayman_dma_get_rptr,
+       .get_wptr = &cayman_dma_get_wptr,
+       .set_wptr = &cayman_dma_set_wptr
 };
 
 static struct radeon_asic_ring cayman_uvd_ring = {
@@ -1683,6 +1689,7 @@ static struct radeon_asic cayman_asic = {
                .init = &ni_dpm_init,
                .setup_asic = &ni_dpm_setup_asic,
                .enable = &ni_dpm_enable,
+               .late_enable = &rv770_dpm_late_enable,
                .disable = &ni_dpm_disable,
                .pre_set_power_state = &ni_dpm_pre_set_power_state,
                .set_power_state = &ni_dpm_set_power_state,
@@ -1783,6 +1790,7 @@ static struct radeon_asic trinity_asic = {
                .init = &trinity_dpm_init,
                .setup_asic = &trinity_dpm_setup_asic,
                .enable = &trinity_dpm_enable,
+               .late_enable = &trinity_dpm_late_enable,
                .disable = &trinity_dpm_disable,
                .pre_set_power_state = &trinity_dpm_pre_set_power_state,
                .set_power_state = &trinity_dpm_set_power_state,
@@ -1813,9 +1821,9 @@ static struct radeon_asic_ring si_gfx_ring = {
        .ib_test = &r600_ib_test,
        .is_lockup = &si_gfx_is_lockup,
        .vm_flush = &si_vm_flush,
-       .get_rptr = &radeon_ring_generic_get_rptr,
-       .get_wptr = &radeon_ring_generic_get_wptr,
-       .set_wptr = &radeon_ring_generic_set_wptr,
+       .get_rptr = &cayman_gfx_get_rptr,
+       .get_wptr = &cayman_gfx_get_wptr,
+       .set_wptr = &cayman_gfx_set_wptr,
 };
 
 static struct radeon_asic_ring si_dma_ring = {
@@ -1828,9 +1836,9 @@ static struct radeon_asic_ring si_dma_ring = {
        .ib_test = &r600_dma_ib_test,
        .is_lockup = &si_dma_is_lockup,
        .vm_flush = &si_dma_vm_flush,
-       .get_rptr = &r600_dma_get_rptr,
-       .get_wptr = &r600_dma_get_wptr,
-       .set_wptr = &r600_dma_set_wptr,
+       .get_rptr = &cayman_dma_get_rptr,
+       .get_wptr = &cayman_dma_get_wptr,
+       .set_wptr = &cayman_dma_set_wptr,
 };
 
 static struct radeon_asic si_asic = {
@@ -1913,6 +1921,7 @@ static struct radeon_asic si_asic = {
                .init = &si_dpm_init,
                .setup_asic = &si_dpm_setup_asic,
                .enable = &si_dpm_enable,
+               .late_enable = &si_dpm_late_enable,
                .disable = &si_dpm_disable,
                .pre_set_power_state = &si_dpm_pre_set_power_state,
                .set_power_state = &si_dpm_set_power_state,
@@ -1943,9 +1952,9 @@ static struct radeon_asic_ring ci_gfx_ring = {
        .ib_test = &cik_ib_test,
        .is_lockup = &cik_gfx_is_lockup,
        .vm_flush = &cik_vm_flush,
-       .get_rptr = &radeon_ring_generic_get_rptr,
-       .get_wptr = &radeon_ring_generic_get_wptr,
-       .set_wptr = &radeon_ring_generic_set_wptr,
+       .get_rptr = &cik_gfx_get_rptr,
+       .get_wptr = &cik_gfx_get_wptr,
+       .set_wptr = &cik_gfx_set_wptr,
 };
 
 static struct radeon_asic_ring ci_cp_ring = {
@@ -1958,9 +1967,9 @@ static struct radeon_asic_ring ci_cp_ring = {
        .ib_test = &cik_ib_test,
        .is_lockup = &cik_gfx_is_lockup,
        .vm_flush = &cik_vm_flush,
-       .get_rptr = &cik_compute_ring_get_rptr,
-       .get_wptr = &cik_compute_ring_get_wptr,
-       .set_wptr = &cik_compute_ring_set_wptr,
+       .get_rptr = &cik_compute_get_rptr,
+       .get_wptr = &cik_compute_get_wptr,
+       .set_wptr = &cik_compute_set_wptr,
 };
 
 static struct radeon_asic_ring ci_dma_ring = {
@@ -1973,9 +1982,9 @@ static struct radeon_asic_ring ci_dma_ring = {
        .ib_test = &cik_sdma_ib_test,
        .is_lockup = &cik_sdma_is_lockup,
        .vm_flush = &cik_dma_vm_flush,
-       .get_rptr = &r600_dma_get_rptr,
-       .get_wptr = &r600_dma_get_wptr,
-       .set_wptr = &r600_dma_set_wptr,
+       .get_rptr = &cik_sdma_get_rptr,
+       .get_wptr = &cik_sdma_get_wptr,
+       .set_wptr = &cik_sdma_set_wptr,
 };
 
 static struct radeon_asic ci_asic = {
@@ -2058,6 +2067,7 @@ static struct radeon_asic ci_asic = {
                .init = &ci_dpm_init,
                .setup_asic = &ci_dpm_setup_asic,
                .enable = &ci_dpm_enable,
+               .late_enable = &ci_dpm_late_enable,
                .disable = &ci_dpm_disable,
                .pre_set_power_state = &ci_dpm_pre_set_power_state,
                .set_power_state = &ci_dpm_set_power_state,
@@ -2159,6 +2169,7 @@ static struct radeon_asic kv_asic = {
                .init = &kv_dpm_init,
                .setup_asic = &kv_dpm_setup_asic,
                .enable = &kv_dpm_enable,
+               .late_enable = &kv_dpm_late_enable,
                .disable = &kv_dpm_disable,
                .pre_set_power_state = &kv_dpm_pre_set_power_state,
                .set_power_state = &kv_dpm_set_power_state,
@@ -2449,7 +2460,7 @@ int radeon_asic_init(struct radeon_device *rdev)
                        rdev->cg_flags =
                                RADEON_CG_SUPPORT_GFX_MGCG |
                                RADEON_CG_SUPPORT_GFX_MGLS |
-                               /*RADEON_CG_SUPPORT_GFX_CGCG |*/
+                               RADEON_CG_SUPPORT_GFX_CGCG |
                                RADEON_CG_SUPPORT_GFX_CGLS |
                                RADEON_CG_SUPPORT_GFX_CGTS |
                                RADEON_CG_SUPPORT_GFX_CGTS_LS |
@@ -2468,7 +2479,7 @@ int radeon_asic_init(struct radeon_device *rdev)
                        rdev->cg_flags =
                                RADEON_CG_SUPPORT_GFX_MGCG |
                                RADEON_CG_SUPPORT_GFX_MGLS |
-                               /*RADEON_CG_SUPPORT_GFX_CGCG |*/
+                               RADEON_CG_SUPPORT_GFX_CGCG |
                                RADEON_CG_SUPPORT_GFX_CGLS |
                                RADEON_CG_SUPPORT_GFX_CGTS |
                                RADEON_CG_SUPPORT_GFX_CP_LS |
@@ -2493,7 +2504,7 @@ int radeon_asic_init(struct radeon_device *rdev)
                        rdev->cg_flags =
                                RADEON_CG_SUPPORT_GFX_MGCG |
                                RADEON_CG_SUPPORT_GFX_MGLS |
-                               /*RADEON_CG_SUPPORT_GFX_CGCG |*/
+                               RADEON_CG_SUPPORT_GFX_CGCG |
                                RADEON_CG_SUPPORT_GFX_CGLS |
                                RADEON_CG_SUPPORT_GFX_CGTS |
                                RADEON_CG_SUPPORT_GFX_CGTS_LS |
@@ -2521,7 +2532,7 @@ int radeon_asic_init(struct radeon_device *rdev)
                        rdev->cg_flags =
                                RADEON_CG_SUPPORT_GFX_MGCG |
                                RADEON_CG_SUPPORT_GFX_MGLS |
-                               /*RADEON_CG_SUPPORT_GFX_CGCG |*/
+                               RADEON_CG_SUPPORT_GFX_CGCG |
                                RADEON_CG_SUPPORT_GFX_CGLS |
                                RADEON_CG_SUPPORT_GFX_CGTS |
                                RADEON_CG_SUPPORT_GFX_CGTS_LS |
index c9fd97b58076bd366e57e7916f93de5435c58c52..b3bc433eed4c3bd832424051306a845461c90aa8 100644 (file)
@@ -47,13 +47,6 @@ u8 atombios_get_backlight_level(struct radeon_encoder *radeon_encoder);
 void radeon_legacy_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level);
 u8 radeon_legacy_get_backlight_level(struct radeon_encoder *radeon_encoder);
 
-u32 radeon_ring_generic_get_rptr(struct radeon_device *rdev,
-                                struct radeon_ring *ring);
-u32 radeon_ring_generic_get_wptr(struct radeon_device *rdev,
-                                struct radeon_ring *ring);
-void radeon_ring_generic_set_wptr(struct radeon_device *rdev,
-                                 struct radeon_ring *ring);
-
 /*
  * r100,rv100,rs100,rv200,rs200
  */
@@ -148,6 +141,13 @@ extern void r100_post_page_flip(struct radeon_device *rdev, int crtc);
 extern void r100_wait_for_vblank(struct radeon_device *rdev, int crtc);
 extern int r100_mc_wait_for_idle(struct radeon_device *rdev);
 
+u32 r100_gfx_get_rptr(struct radeon_device *rdev,
+                     struct radeon_ring *ring);
+u32 r100_gfx_get_wptr(struct radeon_device *rdev,
+                     struct radeon_ring *ring);
+void r100_gfx_set_wptr(struct radeon_device *rdev,
+                      struct radeon_ring *ring);
+
 /*
  * r200,rv250,rs300,rv280
  */
@@ -368,6 +368,12 @@ int r600_mc_wait_for_idle(struct radeon_device *rdev);
 int r600_pcie_gart_init(struct radeon_device *rdev);
 void r600_scratch_init(struct radeon_device *rdev);
 int r600_init_microcode(struct radeon_device *rdev);
+u32 r600_gfx_get_rptr(struct radeon_device *rdev,
+                     struct radeon_ring *ring);
+u32 r600_gfx_get_wptr(struct radeon_device *rdev,
+                     struct radeon_ring *ring);
+void r600_gfx_set_wptr(struct radeon_device *rdev,
+                      struct radeon_ring *ring);
 /* r600 irq */
 int r600_irq_process(struct radeon_device *rdev);
 int r600_irq_init(struct radeon_device *rdev);
@@ -392,6 +398,7 @@ int rv6xx_get_temp(struct radeon_device *rdev);
 int r600_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
 int r600_dpm_pre_set_power_state(struct radeon_device *rdev);
 void r600_dpm_post_set_power_state(struct radeon_device *rdev);
+int r600_dpm_late_enable(struct radeon_device *rdev);
 /* r600 dma */
 uint32_t r600_dma_get_rptr(struct radeon_device *rdev,
                           struct radeon_ring *ring);
@@ -454,6 +461,7 @@ int rv770_get_temp(struct radeon_device *rdev);
 /* rv7xx pm */
 int rv770_dpm_init(struct radeon_device *rdev);
 int rv770_dpm_enable(struct radeon_device *rdev);
+int rv770_dpm_late_enable(struct radeon_device *rdev);
 void rv770_dpm_disable(struct radeon_device *rdev);
 int rv770_dpm_set_power_state(struct radeon_device *rdev);
 void rv770_dpm_setup_asic(struct radeon_device *rdev);
@@ -545,6 +553,7 @@ u32 btc_dpm_get_mclk(struct radeon_device *rdev, bool low);
 bool btc_dpm_vblank_too_short(struct radeon_device *rdev);
 int sumo_dpm_init(struct radeon_device *rdev);
 int sumo_dpm_enable(struct radeon_device *rdev);
+int sumo_dpm_late_enable(struct radeon_device *rdev);
 void sumo_dpm_disable(struct radeon_device *rdev);
 int sumo_dpm_pre_set_power_state(struct radeon_device *rdev);
 int sumo_dpm_set_power_state(struct radeon_device *rdev);
@@ -591,6 +600,19 @@ void cayman_dma_vm_set_page(struct radeon_device *rdev,
 
 void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
 
+u32 cayman_gfx_get_rptr(struct radeon_device *rdev,
+                       struct radeon_ring *ring);
+u32 cayman_gfx_get_wptr(struct radeon_device *rdev,
+                       struct radeon_ring *ring);
+void cayman_gfx_set_wptr(struct radeon_device *rdev,
+                        struct radeon_ring *ring);
+uint32_t cayman_dma_get_rptr(struct radeon_device *rdev,
+                            struct radeon_ring *ring);
+uint32_t cayman_dma_get_wptr(struct radeon_device *rdev,
+                            struct radeon_ring *ring);
+void cayman_dma_set_wptr(struct radeon_device *rdev,
+                        struct radeon_ring *ring);
+
 int ni_dpm_init(struct radeon_device *rdev);
 void ni_dpm_setup_asic(struct radeon_device *rdev);
 int ni_dpm_enable(struct radeon_device *rdev);
@@ -610,6 +632,7 @@ int ni_dpm_force_performance_level(struct radeon_device *rdev,
 bool ni_dpm_vblank_too_short(struct radeon_device *rdev);
 int trinity_dpm_init(struct radeon_device *rdev);
 int trinity_dpm_enable(struct radeon_device *rdev);
+int trinity_dpm_late_enable(struct radeon_device *rdev);
 void trinity_dpm_disable(struct radeon_device *rdev);
 int trinity_dpm_pre_set_power_state(struct radeon_device *rdev);
 int trinity_dpm_set_power_state(struct radeon_device *rdev);
@@ -669,6 +692,7 @@ int si_get_temp(struct radeon_device *rdev);
 int si_dpm_init(struct radeon_device *rdev);
 void si_dpm_setup_asic(struct radeon_device *rdev);
 int si_dpm_enable(struct radeon_device *rdev);
+int si_dpm_late_enable(struct radeon_device *rdev);
 void si_dpm_disable(struct radeon_device *rdev);
 int si_dpm_pre_set_power_state(struct radeon_device *rdev);
 int si_dpm_set_power_state(struct radeon_device *rdev);
@@ -739,17 +763,30 @@ void cik_sdma_vm_set_page(struct radeon_device *rdev,
                          uint32_t incr, uint32_t flags);
 void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
 int cik_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
-u32 cik_compute_ring_get_rptr(struct radeon_device *rdev,
-                             struct radeon_ring *ring);
-u32 cik_compute_ring_get_wptr(struct radeon_device *rdev,
-                             struct radeon_ring *ring);
-void cik_compute_ring_set_wptr(struct radeon_device *rdev,
-                              struct radeon_ring *ring);
+u32 cik_gfx_get_rptr(struct radeon_device *rdev,
+                    struct radeon_ring *ring);
+u32 cik_gfx_get_wptr(struct radeon_device *rdev,
+                    struct radeon_ring *ring);
+void cik_gfx_set_wptr(struct radeon_device *rdev,
+                     struct radeon_ring *ring);
+u32 cik_compute_get_rptr(struct radeon_device *rdev,
+                        struct radeon_ring *ring);
+u32 cik_compute_get_wptr(struct radeon_device *rdev,
+                        struct radeon_ring *ring);
+void cik_compute_set_wptr(struct radeon_device *rdev,
+                         struct radeon_ring *ring);
+u32 cik_sdma_get_rptr(struct radeon_device *rdev,
+                     struct radeon_ring *ring);
+u32 cik_sdma_get_wptr(struct radeon_device *rdev,
+                     struct radeon_ring *ring);
+void cik_sdma_set_wptr(struct radeon_device *rdev,
+                      struct radeon_ring *ring);
 int ci_get_temp(struct radeon_device *rdev);
 int kv_get_temp(struct radeon_device *rdev);
 
 int ci_dpm_init(struct radeon_device *rdev);
 int ci_dpm_enable(struct radeon_device *rdev);
+int ci_dpm_late_enable(struct radeon_device *rdev);
 void ci_dpm_disable(struct radeon_device *rdev);
 int ci_dpm_pre_set_power_state(struct radeon_device *rdev);
 int ci_dpm_set_power_state(struct radeon_device *rdev);
@@ -770,6 +807,7 @@ void ci_dpm_powergate_uvd(struct radeon_device *rdev, bool gate);
 
 int kv_dpm_init(struct radeon_device *rdev);
 int kv_dpm_enable(struct radeon_device *rdev);
+int kv_dpm_late_enable(struct radeon_device *rdev);
 void kv_dpm_disable(struct radeon_device *rdev);
 int kv_dpm_pre_set_power_state(struct radeon_device *rdev);
 int kv_dpm_set_power_state(struct radeon_device *rdev);
index 5c39bf7c3d88668bad65ef9667de82a0a145f196..30844814c25a3c931a286b6823b54c88a0bbf348 100644 (file)
 #include "atom.h"
 #include "atom-bits.h"
 
-/* from radeon_encoder.c */
-extern uint32_t
-radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device,
-                       uint8_t dac);
-extern void radeon_link_encoder_connector(struct drm_device *dev);
 extern void
 radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum,
                        uint32_t supported_device, u16 caps);
 
-/* from radeon_connector.c */
-extern void
-radeon_add_atom_connector(struct drm_device *dev,
-                         uint32_t connector_id,
-                         uint32_t supported_device,
-                         int connector_type,
-                         struct radeon_i2c_bus_rec *i2c_bus,
-                         uint32_t igp_lane_info,
-                         uint16_t connector_object_id,
-                         struct radeon_hpd *hpd,
-                         struct radeon_router *router);
-
 /* from radeon_legacy_encoder.c */
 extern void
 radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum,
@@ -1528,6 +1511,7 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
                                                le16_to_cpu(ss_assign->v1.usSpreadSpectrumPercentage);
                                        ss->type = ss_assign->v1.ucSpreadSpectrumMode;
                                        ss->rate = le16_to_cpu(ss_assign->v1.usSpreadRateInKhz);
+                                       ss->percentage_divider = 100;
                                        return true;
                                }
                                ss_assign = (union asic_ss_assignment *)
@@ -1545,6 +1529,7 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
                                                le16_to_cpu(ss_assign->v2.usSpreadSpectrumPercentage);
                                        ss->type = ss_assign->v2.ucSpreadSpectrumMode;
                                        ss->rate = le16_to_cpu(ss_assign->v2.usSpreadRateIn10Hz);
+                                       ss->percentage_divider = 100;
                                        if ((crev == 2) &&
                                            ((id == ASIC_INTERNAL_ENGINE_SS) ||
                                             (id == ASIC_INTERNAL_MEMORY_SS)))
@@ -1566,6 +1551,11 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
                                                le16_to_cpu(ss_assign->v3.usSpreadSpectrumPercentage);
                                        ss->type = ss_assign->v3.ucSpreadSpectrumMode;
                                        ss->rate = le16_to_cpu(ss_assign->v3.usSpreadRateIn10Hz);
+                                       if (ss_assign->v3.ucSpreadSpectrumMode &
+                                           SS_MODE_V3_PERCENTAGE_DIV_BY_1000_MASK)
+                                               ss->percentage_divider = 1000;
+                                       else
+                                               ss->percentage_divider = 100;
                                        if ((id == ASIC_INTERNAL_ENGINE_SS) ||
                                            (id == ASIC_INTERNAL_MEMORY_SS))
                                                ss->rate /= 100;
@@ -1809,7 +1799,8 @@ bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
                if (misc & ATOM_DOUBLE_CLOCK_MODE)
                        mode->flags |= DRM_MODE_FLAG_DBLSCAN;
 
-               mode->clock = le16_to_cpu(tv_info->aModeTimings[index].usPixelClock) * 10;
+               mode->crtc_clock = mode->clock =
+                       le16_to_cpu(tv_info->aModeTimings[index].usPixelClock) * 10;
 
                if (index == 1) {
                        /* PAL timings appear to have wrong values for totals */
@@ -1852,7 +1843,8 @@ bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
                if (misc & ATOM_DOUBLE_CLOCK_MODE)
                        mode->flags |= DRM_MODE_FLAG_DBLSCAN;
 
-               mode->clock = le16_to_cpu(dtd_timings->usPixClk) * 10;
+               mode->crtc_clock = mode->clock =
+                       le16_to_cpu(dtd_timings->usPixClk) * 10;
                break;
        }
        return true;
@@ -3884,16 +3876,18 @@ int radeon_atom_init_mc_reg_table(struct radeon_device *rdev,
                                                        ((u8 *)format + sizeof(ATOM_INIT_REG_INDEX_FORMAT));
                                        }
                                        reg_table->last = i;
-                                       while ((*(u32 *)reg_data != END_OF_REG_DATA_BLOCK) &&
+                                       while ((le32_to_cpu(*(u32 *)reg_data) != END_OF_REG_DATA_BLOCK) &&
                                               (num_ranges < VBIOS_MAX_AC_TIMING_ENTRIES)) {
-                                               t_mem_id = (u8)((*(u32 *)reg_data & MEM_ID_MASK) >> MEM_ID_SHIFT);
+                                               t_mem_id = (u8)((le32_to_cpu(*(u32 *)reg_data) & MEM_ID_MASK)
+                                                               >> MEM_ID_SHIFT);
                                                if (module_index == t_mem_id) {
                                                        reg_table->mc_reg_table_entry[num_ranges].mclk_max =
-                                                               (u32)((*(u32 *)reg_data & CLOCK_RANGE_MASK) >> CLOCK_RANGE_SHIFT);
+                                                               (u32)((le32_to_cpu(*(u32 *)reg_data) & CLOCK_RANGE_MASK)
+                                                                     >> CLOCK_RANGE_SHIFT);
                                                        for (i = 0, j = 1; i < reg_table->last; i++) {
                                                                if ((reg_table->mc_reg_address[i].pre_reg_data & LOW_NIBBLE_MASK) == DATA_FROM_TABLE) {
                                                                        reg_table->mc_reg_table_entry[num_ranges].mc_data[i] =
-                                                                               (u32)*((u32 *)reg_data + j);
+                                                                               (u32)le32_to_cpu(*((u32 *)reg_data + j));
                                                                        j++;
                                                                } else if ((reg_table->mc_reg_address[i].pre_reg_data & LOW_NIBBLE_MASK) == DATA_EQU_PREV) {
                                                                        reg_table->mc_reg_table_entry[num_ranges].mc_data[i] =
@@ -3905,7 +3899,7 @@ int radeon_atom_init_mc_reg_table(struct radeon_device *rdev,
                                                reg_data = (ATOM_MEMORY_SETTING_DATA_BLOCK *)
                                                        ((u8 *)reg_data + le16_to_cpu(reg_block->usRegDataBlkSize));
                                        }
-                                       if (*(u32 *)reg_data != END_OF_REG_DATA_BLOCK)
+                                       if (le32_to_cpu(*(u32 *)reg_data) != END_OF_REG_DATA_BLOCK)
                                                return -EINVAL;
                                        reg_table->num_entries = num_ranges;
                                } else
@@ -3944,6 +3938,10 @@ void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev)
        /* tell the bios not to handle mode switching */
        bios_6_scratch |= ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH;
 
+       /* clear the vbios dpms state */
+       if (ASIC_IS_DCE4(rdev))
+               bios_2_scratch &= ~ATOM_S2_DEVICE_DPMS_STATE;
+
        if (rdev->family >= CHIP_R600) {
                WREG32(R600_BIOS_2_SCRATCH, bios_2_scratch);
                WREG32(R600_BIOS_6_SCRATCH, bios_6_scratch);
index 68ce360560190af6505856452658e450d3f6878a..6651177110f08cd5afb9088f4849ad3f987f5a1b 100644 (file)
 #include <asm/pci-bridge.h>
 #endif /* CONFIG_PPC_PMAC */
 
-/* from radeon_encoder.c */
-extern uint32_t
-radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device,
-                       uint8_t dac);
-extern void radeon_link_encoder_connector(struct drm_device *dev);
-
-/* from radeon_connector.c */
-extern void
-radeon_add_legacy_connector(struct drm_device *dev,
-                           uint32_t connector_id,
-                           uint32_t supported_device,
-                           int connector_type,
-                           struct radeon_i2c_bus_rec *i2c_bus,
-                           uint16_t connector_object_id,
-                           struct radeon_hpd *hpd);
-
 /* from radeon_legacy_encoder.c */
 extern void
 radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum,
index 20a768ac89a8e6532b33cbb3114928afc98749b7..82d4f865546ed9024b4078d67bfdd947f08a1060 100644 (file)
 
 #include <linux/pm_runtime.h>
 
-extern void
-radeon_combios_connected_scratch_regs(struct drm_connector *connector,
-                                     struct drm_encoder *encoder,
-                                     bool connected);
-extern void
-radeon_atombios_connected_scratch_regs(struct drm_connector *connector,
-                                      struct drm_encoder *encoder,
-                                      bool connected);
-
 void radeon_connector_hotplug(struct drm_connector *connector)
 {
        struct drm_device *dev = connector->dev;
index 3cae2bbc1854d73dda35f13f0b8cfc3201dc2c25..bb0d5c3a8311bf0dc274c2b47930078d7e657852 100644 (file)
@@ -2020,10 +2020,10 @@ static int radeon_cp_get_buffers(struct drm_device *dev,
 
                buf->file_priv = file_priv;
 
-               if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx,
+               if (copy_to_user(&d->request_indices[i], &buf->idx,
                                     sizeof(buf->idx)))
                        return -EFAULT;
-               if (DRM_COPY_TO_USER(&d->request_sizes[i], &buf->total,
+               if (copy_to_user(&d->request_sizes[i], &buf->total,
                                     sizeof(buf->total)))
                        return -EFAULT;
 
@@ -2228,7 +2228,7 @@ void radeon_commit_ring(drm_radeon_private_t *dev_priv)
 
        dev_priv->ring.tail &= dev_priv->ring.tail_mask;
 
-       DRM_MEMORYBARRIER();
+       mb();
        GET_RING_HEAD( dev_priv );
 
        if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) {
index 0b366169d64de55c52e4c2d9c26d1c3b9db19b2d..dfb5a1db87d4a8651fd3a9bf7494d14287af7803 100644 (file)
@@ -138,7 +138,7 @@ static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority
                                p->ring = R600_RING_TYPE_DMA_INDEX;
                        else
                                p->ring = CAYMAN_RING_TYPE_DMA1_INDEX;
-               } else if (p->rdev->family >= CHIP_R600) {
+               } else if (p->rdev->family >= CHIP_RV770) {
                        p->ring = R600_RING_TYPE_DMA_INDEX;
                } else {
                        return -EINVAL;
@@ -192,7 +192,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
                return -ENOMEM;
        }
        chunk_array_ptr = (uint64_t *)(unsigned long)(cs->chunks);
-       if (DRM_COPY_FROM_USER(p->chunks_array, chunk_array_ptr,
+       if (copy_from_user(p->chunks_array, chunk_array_ptr,
                               sizeof(uint64_t)*cs->num_chunks)) {
                return -EFAULT;
        }
@@ -208,7 +208,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
                uint32_t __user *cdata;
 
                chunk_ptr = (void __user*)(unsigned long)p->chunks_array[i];
-               if (DRM_COPY_FROM_USER(&user_chunk, chunk_ptr,
+               if (copy_from_user(&user_chunk, chunk_ptr,
                                       sizeof(struct drm_radeon_cs_chunk))) {
                        return -EFAULT;
                }
@@ -252,7 +252,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
                if (p->chunks[i].kdata == NULL) {
                        return -ENOMEM;
                }
-               if (DRM_COPY_FROM_USER(p->chunks[i].kdata, cdata, size)) {
+               if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
                        return -EFAULT;
                }
                if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
@@ -472,7 +472,7 @@ static int radeon_cs_ib_fill(struct radeon_device *rdev, struct radeon_cs_parser
                        }
                        parser->const_ib.is_const_ib = true;
                        parser->const_ib.length_dw = ib_chunk->length_dw;
-                       if (DRM_COPY_FROM_USER(parser->const_ib.ptr,
+                       if (copy_from_user(parser->const_ib.ptr,
                                               ib_chunk->user_ptr,
                                               ib_chunk->length_dw * 4))
                                return -EFAULT;
@@ -495,7 +495,7 @@ static int radeon_cs_ib_fill(struct radeon_device *rdev, struct radeon_cs_parser
        parser->ib.length_dw = ib_chunk->length_dw;
        if (ib_chunk->kdata)
                memcpy(parser->ib.ptr, ib_chunk->kdata, ib_chunk->length_dw * 4);
-       else if (DRM_COPY_FROM_USER(parser->ib.ptr, ib_chunk->user_ptr, ib_chunk->length_dw * 4))
+       else if (copy_from_user(parser->ib.ptr, ib_chunk->user_ptr, ib_chunk->length_dw * 4))
                return -EFAULT;
        return 0;
 }
index 39b033b441d2a6f76e8d021df7085568538887b3..b012cbbc3ed5a9b892b433eff0ee5f3134a130de 100644 (file)
@@ -144,6 +144,11 @@ void radeon_program_register_sequence(struct radeon_device *rdev,
        }
 }
 
+void radeon_pci_config_reset(struct radeon_device *rdev)
+{
+       pci_write_config_dword(rdev->pdev, 0x7c, RADEON_ASIC_RESET_DATA);
+}
+
 /**
  * radeon_surface_init - Clear GPU surface registers.
  *
@@ -249,7 +254,7 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
  * Init doorbell driver information (CIK)
  * Returns 0 on success, error on failure.
  */
-int radeon_doorbell_init(struct radeon_device *rdev)
+static int radeon_doorbell_init(struct radeon_device *rdev)
 {
        /* doorbell bar mapping */
        rdev->doorbell.base = pci_resource_start(rdev->pdev, 2);
@@ -278,7 +283,7 @@ int radeon_doorbell_init(struct radeon_device *rdev)
  *
  * Tear down doorbell driver information (CIK)
  */
-void radeon_doorbell_fini(struct radeon_device *rdev)
+static void radeon_doorbell_fini(struct radeon_device *rdev)
 {
        iounmap(rdev->doorbell.ptr);
        rdev->doorbell.ptr = NULL;
@@ -1330,6 +1335,7 @@ int radeon_device_init(struct radeon_device *rdev,
                if (r)
                        return r;
        }
+
        if ((radeon_testing & 1)) {
                if (rdev->accel_working)
                        radeon_test_moves(rdev);
@@ -1455,7 +1461,6 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
 
        radeon_save_bios_scratch_regs(rdev);
 
-       radeon_pm_suspend(rdev);
        radeon_suspend(rdev);
        radeon_hpd_fini(rdev);
        /* evict remaining vram memory */
@@ -1516,14 +1521,22 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
        if (r)
                DRM_ERROR("ib ring test failed (%d).\n", r);
 
-       radeon_pm_resume(rdev);
+       if (rdev->pm.dpm_enabled) {
+               /* do dpm late init */
+               r = radeon_pm_late_init(rdev);
+               if (r) {
+                       rdev->pm.dpm_enabled = false;
+                       DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
+               }
+       }
+
        radeon_restore_bios_scratch_regs(rdev);
 
        if (fbcon) {
                radeon_fbdev_set_suspend(rdev, 0);
                console_unlock();
        }
-       
+
        /* init dig PHYs, disp eng pll */
        if (rdev->is_atom_bios) {
                radeon_atom_encoder_init(rdev);
index 7b253815a3237153b668d660e9ef7d56642597ec..d680608f6f5bc9a80e879b4f24769f8a9ffb1e8c 100644 (file)
@@ -306,7 +306,7 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
         * to complete in this vblank?
         */
        if (update_pending &&
-           (DRM_SCANOUTPOS_VALID & radeon_get_crtc_scanoutpos(rdev->ddev, crtc_id,
+           (DRM_SCANOUTPOS_VALID & radeon_get_crtc_scanoutpos(rdev->ddev, crtc_id, 0,
                                                               &vpos, &hpos, NULL, NULL)) &&
            ((vpos >= (99 * rdev->mode_info.crtcs[crtc_id]->base.hwmode.crtc_vdisplay)/100) ||
             (vpos < 0 && !ASIC_IS_AVIVO(rdev)))) {
@@ -1464,12 +1464,22 @@ int radeon_modeset_init(struct radeon_device *rdev)
        /* setup afmt */
        radeon_afmt_init(rdev);
 
-       /* Initialize power management */
-       radeon_pm_init(rdev);
-
        radeon_fbdev_init(rdev);
        drm_kms_helper_poll_init(rdev->ddev);
 
+       if (rdev->pm.dpm_enabled) {
+               /* do dpm late init */
+               ret = radeon_pm_late_init(rdev);
+               if (ret) {
+                       rdev->pm.dpm_enabled = false;
+                       DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
+               }
+               /* set the dpm state for PX since there won't be
+                * a modeset to call this.
+                */
+               radeon_pm_compute_clocks(rdev);
+       }
+
        return 0;
 }
 
@@ -1477,7 +1487,6 @@ void radeon_modeset_fini(struct radeon_device *rdev)
 {
        radeon_fbdev_fini(rdev);
        kfree(rdev->mode_info.bios_hardcoded_edid);
-       radeon_pm_fini(rdev);
 
        if (rdev->mode_info.mode_config_initialized) {
                radeon_afmt_fini(rdev);
@@ -1601,6 +1610,7 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
  *
  * \param dev Device to query.
  * \param crtc Crtc to query.
+ * \param flags Flags from caller (DRM_CALLED_FROM_VBLIRQ or 0).
  * \param *vpos Location where vertical scanout position should be stored.
  * \param *hpos Location where horizontal scanout position should go.
  * \param *stime Target location for timestamp taken immediately before
@@ -1622,8 +1632,8 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
  * unknown small number of scanlines wrt. real scanout position.
  *
  */
-int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, int *vpos, int *hpos,
-                              ktime_t *stime, ktime_t *etime)
+int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, unsigned int flags,
+                              int *vpos, int *hpos, ktime_t *stime, ktime_t *etime)
 {
        u32 stat_crtc = 0, vbl = 0, position = 0;
        int vbl_start, vbl_end, vtotal, ret = 0;
@@ -1765,5 +1775,27 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, int *vpos, int
        if (in_vbl)
                ret |= DRM_SCANOUTPOS_INVBL;
 
+       /* Is vpos outside nominal vblank area, but less than
+        * 1/100 of a frame height away from start of vblank?
+        * If so, assume this isn't a massively delayed vblank
+        * interrupt, but a vblank interrupt that fired a few
+        * microseconds before true start of vblank. Compensate
+        * by adding a full frame duration to the final timestamp.
+        * Happens, e.g., on ATI R500, R600.
+        *
+        * We only do this if DRM_CALLED_FROM_VBLIRQ.
+        */
+       if ((flags & DRM_CALLED_FROM_VBLIRQ) && !in_vbl) {
+               vbl_start = rdev->mode_info.crtcs[crtc]->base.hwmode.crtc_vdisplay;
+               vtotal = rdev->mode_info.crtcs[crtc]->base.hwmode.crtc_vtotal;
+
+               if (vbl_start - *vpos < vtotal / 100) {
+                       *vpos -= vtotal;
+
+                       /* Signal this correction as "applied". */
+                       ret |= 0x8;
+               }
+       }
+
        return ret;
 }
index db39ea36bf22fc396da63e5a157adf42e752e712..ec8c388eec176e8c36e343ff4182634d3708bc84 100644 (file)
@@ -102,13 +102,14 @@ int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc,
 void radeon_driver_irq_preinstall_kms(struct drm_device *dev);
 int radeon_driver_irq_postinstall_kms(struct drm_device *dev);
 void radeon_driver_irq_uninstall_kms(struct drm_device *dev);
-irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS);
+irqreturn_t radeon_driver_irq_handler_kms(int irq, void *arg);
 void radeon_gem_object_free(struct drm_gem_object *obj);
 int radeon_gem_object_open(struct drm_gem_object *obj,
                                struct drm_file *file_priv);
 void radeon_gem_object_close(struct drm_gem_object *obj,
                                struct drm_file *file_priv);
 extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
+                                     unsigned int flags,
                                      int *vpos, int *hpos, ktime_t *stime,
                                      ktime_t *etime);
 extern const struct drm_ioctl_desc radeon_ioctls_kms[];
@@ -168,6 +169,7 @@ int radeon_fastfb = 0;
 int radeon_dpm = -1;
 int radeon_aspm = -1;
 int radeon_runtime_pm = -1;
+int radeon_hard_reset = 0;
 
 MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
 module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -232,6 +234,9 @@ module_param_named(aspm, radeon_aspm, int, 0444);
 MODULE_PARM_DESC(runpm, "PX runtime pm (1 = force enable, 0 = disable, -1 = PX only default)");
 module_param_named(runpm, radeon_runtime_pm, int, 0444);
 
+MODULE_PARM_DESC(hard_reset, "PCI config reset (1 = force enable, 0 = disable (default))");
+module_param_named(hard_reset, radeon_hard_reset, int, 0444);
+
 static struct pci_device_id pciidlist[] = {
        radeon_PCI_IDS
 };
@@ -400,6 +405,9 @@ static int radeon_pmops_runtime_suspend(struct device *dev)
        if (radeon_runtime_pm == 0)
                return -EINVAL;
 
+       if (radeon_runtime_pm == -1 && !radeon_is_px())
+               return -EINVAL;
+
        drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
        drm_kms_helper_poll_disable(drm_dev);
        vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF);
@@ -422,6 +430,9 @@ static int radeon_pmops_runtime_resume(struct device *dev)
        if (radeon_runtime_pm == 0)
                return -EINVAL;
 
+       if (radeon_runtime_pm == -1 && !radeon_is_px())
+               return -EINVAL;
+
        drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
 
        pci_set_power_state(pdev, PCI_D0);
index 00e0d449021c343015540ea3baf1028b85a10090..dafd812e45710b0ffed36c33f3e37de20ec64733 100644 (file)
@@ -405,7 +405,7 @@ extern void radeon_do_release(struct drm_device * dev);
 extern u32 radeon_get_vblank_counter(struct drm_device *dev, int crtc);
 extern int radeon_enable_vblank(struct drm_device *dev, int crtc);
 extern void radeon_disable_vblank(struct drm_device *dev, int crtc);
-extern irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS);
+extern irqreturn_t radeon_driver_irq_handler(int irq, void *arg);
 extern void radeon_driver_irq_preinstall(struct drm_device * dev);
 extern int radeon_driver_irq_postinstall(struct drm_device *dev);
 extern void radeon_driver_irq_uninstall(struct drm_device * dev);
index d3a86e43c0123715e0cf765a738e6664f42ab6e0..c37cb79a9489aadd38a84b2f59a6aed7e56333a5 100644 (file)
@@ -121,7 +121,7 @@ int radeon_fence_emit(struct radeon_device *rdev,
        (*fence)->seq = ++rdev->fence_drv[ring].sync_seq[ring];
        (*fence)->ring = ring;
        radeon_fence_ring_emit(rdev, ring, *fence);
-       trace_radeon_fence_emit(rdev->ddev, (*fence)->seq);
+       trace_radeon_fence_emit(rdev->ddev, ring, (*fence)->seq);
        return 0;
 }
 
@@ -313,7 +313,7 @@ static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq,
                                continue;
 
                        last_seq[i] = atomic64_read(&rdev->fence_drv[i].last_seq);
-                       trace_radeon_fence_wait_begin(rdev->ddev, target_seq[i]);
+                       trace_radeon_fence_wait_begin(rdev->ddev, i, target_seq[i]);
                        radeon_irq_kms_sw_irq_get(rdev, i);
                }
 
@@ -332,7 +332,7 @@ static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq,
                                continue;
 
                        radeon_irq_kms_sw_irq_put(rdev, i);
-                       trace_radeon_fence_wait_end(rdev->ddev, target_seq[i]);
+                       trace_radeon_fence_wait_end(rdev->ddev, i, target_seq[i]);
                }
 
                if (unlikely(r < 0))
@@ -841,6 +841,8 @@ static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
                if (!rdev->fence_drv[i].initialized)
                        continue;
 
+               radeon_fence_process(rdev, i);
+
                seq_printf(m, "--- ring %d ---\n", i);
                seq_printf(m, "Last signaled fence 0x%016llx\n",
                           (unsigned long long)atomic64_read(&rdev->fence_drv[i].last_seq));
index 96e440061bdbf5b65a6213047f62bbeb89aea77f..a8f9b463bf2a4767d9ed35d150bafffbfa1dfe07 100644 (file)
@@ -713,7 +713,7 @@ struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
        unsigned i;
 
        /* check if the id is still valid */
-       if (vm->fence && vm->fence == rdev->vm_manager.active[vm->id])
+       if (vm->last_id_use && vm->last_id_use == rdev->vm_manager.active[vm->id])
                return NULL;
 
        /* we definately need to flush */
@@ -726,6 +726,7 @@ struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
                if (fence == NULL) {
                        /* found a free one */
                        vm->id = i;
+                       trace_radeon_vm_grab_id(vm->id, ring);
                        return NULL;
                }
 
@@ -769,6 +770,9 @@ void radeon_vm_fence(struct radeon_device *rdev,
 
        radeon_fence_unref(&vm->fence);
        vm->fence = radeon_fence_ref(fence);
+
+       radeon_fence_unref(&vm->last_id_use);
+       vm->last_id_use = radeon_fence_ref(fence);
 }
 
 /**
@@ -1303,6 +1307,8 @@ void radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
 {
        vm->id = 0;
        vm->fence = NULL;
+       vm->last_flush = NULL;
+       vm->last_id_use = NULL;
        mutex_init(&vm->mutex);
        INIT_LIST_HEAD(&vm->list);
        INIT_LIST_HEAD(&vm->va);
@@ -1341,5 +1347,6 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
        }
        radeon_fence_unref(&vm->fence);
        radeon_fence_unref(&vm->last_flush);
+       radeon_fence_unref(&vm->last_id_use);
        mutex_unlock(&vm->mutex);
 }
index 805c5e566b9a1f29539a4cf1148183056dc90579..b96c819024b3cdb7b2ea8f387c1e267a563ed8df 100644 (file)
@@ -86,7 +86,7 @@ retry:
        return 0;
 }
 
-int radeon_gem_set_domain(struct drm_gem_object *gobj,
+static int radeon_gem_set_domain(struct drm_gem_object *gobj,
                          uint32_t rdomain, uint32_t wdomain)
 {
        struct radeon_bo *robj;
index fc60b74ee304dd779d98db03b3ec60c58767ed9a..e24ca6ab96decdf94c3e86a097d949f2705a1f7c 100644 (file)
@@ -1020,6 +1020,9 @@ void radeon_i2c_destroy(struct radeon_i2c_chan *i2c)
 /* Add the default buses */
 void radeon_i2c_init(struct radeon_device *rdev)
 {
+       if (radeon_hw_i2c)
+               DRM_INFO("hw_i2c forced on, you may experience display detection problems!\n");
+
        if (rdev->is_atom_bios)
                radeon_atombios_i2c_init(rdev);
        else
index 8d68e972789a343f08186c3a4560d7c18db24999..244b19bab2e72406648ef1b2eae8ef7965bfcf01 100644 (file)
@@ -181,7 +181,7 @@ static u32 radeon_acknowledge_irqs(drm_radeon_private_t *dev_priv, u32 *r500_dis
  * tied to dma at all, this is just a hangover from dri prehistory.
  */
 
-irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS)
+irqreturn_t radeon_driver_irq_handler(int irq, void *arg)
 {
        struct drm_device *dev = (struct drm_device *) arg;
        drm_radeon_private_t *dev_priv =
@@ -203,7 +203,7 @@ irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS)
 
        /* SW interrupt */
        if (stat & RADEON_SW_INT_TEST)
-               DRM_WAKEUP(&dev_priv->swi_queue);
+               wake_up(&dev_priv->swi_queue);
 
        /* VBLANK interrupt */
        if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS600) {
@@ -249,7 +249,7 @@ static int radeon_wait_irq(struct drm_device * dev, int swi_nr)
 
        dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
 
-       DRM_WAIT_ON(ret, dev_priv->swi_queue, 3 * DRM_HZ,
+       DRM_WAIT_ON(ret, dev_priv->swi_queue, 3 * HZ,
                    RADEON_READ(RADEON_LAST_SWI_REG) >= swi_nr);
 
        return ret;
@@ -302,7 +302,7 @@ int radeon_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_pr
 
        result = radeon_emit_irq(dev);
 
-       if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
+       if (copy_to_user(emit->irq_seq, &result, sizeof(int))) {
                DRM_ERROR("copy_to_user\n");
                return -EFAULT;
        }
@@ -354,7 +354,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
            (drm_radeon_private_t *) dev->dev_private;
 
        atomic_set(&dev_priv->swi_emitted, 0);
-       DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
+       init_waitqueue_head(&dev_priv->swi_queue);
 
        dev->max_vblank_count = 0x001fffff;
 
index ec6240b00469a18c471e181d59c4670b2e05069d..089c9ffb0aa95e8e47c964f190b07e0e8fe93c32 100644 (file)
 /**
  * radeon_driver_irq_handler_kms - irq handler for KMS
  *
- * @DRM_IRQ_ARGS: args
+ * @int irq, void *arg: args
  *
  * This is the irq handler for the radeon KMS driver (all asics).
  * radeon_irq_process is a macro that points to the per-asic
  * irq handler callback.
  */
-irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS)
+irqreturn_t radeon_driver_irq_handler_kms(int irq, void *arg)
 {
        struct drm_device *dev = (struct drm_device *) arg;
        struct radeon_device *rdev = dev->dev_private;
index 21d593c0ecaf4e7e0ec85ab9b7bf53fafd0f2a87..114d1672d616d0b5d7b85ab5d739da66f2db823e 100644 (file)
@@ -191,7 +191,7 @@ static void radeon_set_filp_rights(struct drm_device *dev,
  * etc. (all asics).
  * Returns 0 on success, -EINVAL on failure.
  */
-int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
 {
        struct radeon_device *rdev = dev->dev_private;
        struct drm_radeon_info *info = data;
@@ -223,7 +223,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
                        *value = rdev->accel_working;
                break;
        case RADEON_INFO_CRTC_FROM_ID:
-               if (DRM_COPY_FROM_USER(value, value_ptr, sizeof(uint32_t))) {
+               if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
                        DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
                        return -EFAULT;
                }
@@ -269,7 +269,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
                 *
                 * When returning, the value is 1 if filp owns hyper-z access,
                 * 0 otherwise. */
-               if (DRM_COPY_FROM_USER(value, value_ptr, sizeof(uint32_t))) {
+               if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
                        DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
                        return -EFAULT;
                }
@@ -281,7 +281,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
                break;
        case RADEON_INFO_WANT_CMASK:
                /* The same logic as Hyper-Z. */
-               if (DRM_COPY_FROM_USER(value, value_ptr, sizeof(uint32_t))) {
+               if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
                        DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
                        return -EFAULT;
                }
@@ -417,7 +417,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
                *value = rdev->fastfb_working;
                break;
        case RADEON_INFO_RING_WORKING:
-               if (DRM_COPY_FROM_USER(value, value_ptr, sizeof(uint32_t))) {
+               if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
                        DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
                        return -EFAULT;
                }
@@ -470,11 +470,18 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
                        DRM_DEBUG_KMS("BACKEND_ENABLED_MASK is si+ only!\n");
                }
                break;
+       case RADEON_INFO_MAX_SCLK:
+               if ((rdev->pm.pm_method == PM_METHOD_DPM) &&
+                   rdev->pm.dpm_enabled)
+                       *value = rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk * 10;
+               else
+                       *value = rdev->pm.default_sclk * 10;
+               break;
        default:
                DRM_DEBUG_KMS("Invalid request %d\n", info->request);
                return -EINVAL;
        }
-       if (DRM_COPY_TO_USER(value_ptr, (char*)value, value_size)) {
+       if (copy_to_user(value_ptr, (char*)value, value_size)) {
                DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__);
                return -EFAULT;
        }
@@ -712,11 +719,12 @@ int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc,
        /* Helper routine in DRM core does all the work: */
        return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error,
                                                     vblank_time, flags,
-                                                    drmcrtc);
+                                                    drmcrtc, &drmcrtc->hwmode);
 }
 
 #define KMS_INVALID_IOCTL(name)                                                \
-int name(struct drm_device *dev, void *data, struct drm_file *file_priv)\
+static int name(struct drm_device *dev, void *data, struct drm_file    \
+               *file_priv)                                             \
 {                                                                      \
        DRM_ERROR("invalid ioctl with kms %s\n", __func__);             \
        return -EINVAL;                                                 \
index d54d2d7c9031a7ff957fec87b1d58c0ed927e6a5..146d253f1131a4780e889893b3e566336548b3f7 100644 (file)
@@ -243,7 +243,7 @@ int radeon_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_p
        if (!block)
                return -ENOMEM;
 
-       if (DRM_COPY_TO_USER(alloc->region_offset, &block->start,
+       if (copy_to_user(alloc->region_offset, &block->start,
                             sizeof(int))) {
                DRM_ERROR("copy_to_user\n");
                return -EFAULT;
index 3f0dd664af90d6815b2edea895662936cc1982e4..402dbe32c23483afb3524d3397e3ce1b7c3cc281 100644 (file)
@@ -291,6 +291,7 @@ struct radeon_tv_regs {
 
 struct radeon_atom_ss {
        uint16_t percentage;
+       uint16_t percentage_divider;
        uint8_t type;
        uint16_t step;
        uint8_t delay;
@@ -624,6 +625,30 @@ struct atom_voltage_table
        struct atom_voltage_table_entry entries[MAX_VOLTAGE_ENTRIES];
 };
 
+
+extern void
+radeon_add_atom_connector(struct drm_device *dev,
+                         uint32_t connector_id,
+                         uint32_t supported_device,
+                         int connector_type,
+                         struct radeon_i2c_bus_rec *i2c_bus,
+                         uint32_t igp_lane_info,
+                         uint16_t connector_object_id,
+                         struct radeon_hpd *hpd,
+                         struct radeon_router *router);
+extern void
+radeon_add_legacy_connector(struct drm_device *dev,
+                           uint32_t connector_id,
+                           uint32_t supported_device,
+                           int connector_type,
+                           struct radeon_i2c_bus_rec *i2c_bus,
+                           uint16_t connector_object_id,
+                           struct radeon_hpd *hpd);
+extern uint32_t
+radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device,
+                       uint8_t dac);
+extern void radeon_link_encoder_connector(struct drm_device *dev);
+
 extern enum radeon_tv_std
 radeon_combios_get_tv_info(struct radeon_device *rdev);
 extern enum radeon_tv_std
@@ -631,6 +656,15 @@ radeon_atombios_get_tv_info(struct radeon_device *rdev);
 extern void radeon_atombios_get_default_voltages(struct radeon_device *rdev,
                                                 u16 *vddc, u16 *vddci, u16 *mvdd);
 
+extern void
+radeon_combios_connected_scratch_regs(struct drm_connector *connector,
+                                     struct drm_encoder *encoder,
+                                     bool connected);
+extern void
+radeon_atombios_connected_scratch_regs(struct drm_connector *connector,
+                                      struct drm_encoder *encoder,
+                                      bool connected);
+
 extern struct drm_connector *
 radeon_get_connector_for_encoder(struct drm_encoder *encoder);
 extern struct drm_connector *
@@ -666,6 +700,7 @@ extern void radeon_atom_ext_encoder_setup_ddc(struct drm_encoder *encoder);
 extern struct drm_encoder *radeon_get_external_encoder(struct drm_encoder *encoder);
 extern int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
                                u8 write_byte, u8 *read_byte);
+void radeon_atom_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le);
 
 extern void radeon_i2c_init(struct radeon_device *rdev);
 extern void radeon_i2c_fini(struct radeon_device *rdev);
@@ -766,6 +801,7 @@ extern int radeon_crtc_cursor_move(struct drm_crtc *crtc,
                                   int x, int y);
 
 extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
+                                     unsigned int flags,
                                      int *vpos, int *hpos, ktime_t *stime,
                                      ktime_t *etime);
 
index c0fa4aa9ceea8ad8d22c7485b28f4bf045a3a7dd..08595cf90b0139ee0da24f4ac3725e30dcbf9d68 100644 (file)
@@ -46,7 +46,7 @@ static void radeon_bo_clear_surface_reg(struct radeon_bo *bo);
  * function are calling it.
  */
 
-void radeon_bo_clear_va(struct radeon_bo *bo)
+static void radeon_bo_clear_va(struct radeon_bo *bo)
 {
        struct radeon_bo_va *bo_va, *tmp;
 
index 984097b907ef5ee67c8e59faf4198ccf8add5f1b..8e8153e471c20a6b0dba28b38ce36bcd78f41a7b 100644 (file)
@@ -924,6 +924,10 @@ void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable)
 
        if (rdev->asic->dpm.powergate_uvd) {
                mutex_lock(&rdev->pm.mutex);
+               /* don't powergate anything if we
+                  have active but pause streams */
+               enable |= rdev->pm.dpm.sd > 0;
+               enable |= rdev->pm.dpm.hd > 0;
                /* enable/disable UVD */
                radeon_dpm_powergate_uvd(rdev, !enable);
                mutex_unlock(&rdev->pm.mutex);
@@ -1010,8 +1014,10 @@ static void radeon_pm_resume_old(struct radeon_device *rdev)
        rdev->pm.current_clock_mode_index = 0;
        rdev->pm.current_sclk = rdev->pm.default_sclk;
        rdev->pm.current_mclk = rdev->pm.default_mclk;
-       rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
-       rdev->pm.current_vddci = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.vddci;
+       if (rdev->pm.power_state) {
+               rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
+               rdev->pm.current_vddci = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.vddci;
+       }
        if (rdev->pm.pm_method == PM_METHOD_DYNPM
            && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) {
                rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
@@ -1032,25 +1038,27 @@ static void radeon_pm_resume_dpm(struct radeon_device *rdev)
        radeon_dpm_setup_asic(rdev);
        ret = radeon_dpm_enable(rdev);
        mutex_unlock(&rdev->pm.mutex);
-       if (ret) {
-               DRM_ERROR("radeon: dpm resume failed\n");
-               if ((rdev->family >= CHIP_BARTS) &&
-                   (rdev->family <= CHIP_CAYMAN) &&
-                   rdev->mc_fw) {
-                       if (rdev->pm.default_vddc)
-                               radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
-                                                       SET_VOLTAGE_TYPE_ASIC_VDDC);
-                       if (rdev->pm.default_vddci)
-                               radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
-                                                       SET_VOLTAGE_TYPE_ASIC_VDDCI);
-                       if (rdev->pm.default_sclk)
-                               radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
-                       if (rdev->pm.default_mclk)
-                               radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
-               }
-       } else {
-               rdev->pm.dpm_enabled = true;
-               radeon_pm_compute_clocks(rdev);
+       if (ret)
+               goto dpm_resume_fail;
+       rdev->pm.dpm_enabled = true;
+       radeon_pm_compute_clocks(rdev);
+       return;
+
+dpm_resume_fail:
+       DRM_ERROR("radeon: dpm resume failed\n");
+       if ((rdev->family >= CHIP_BARTS) &&
+           (rdev->family <= CHIP_CAYMAN) &&
+           rdev->mc_fw) {
+               if (rdev->pm.default_vddc)
+                       radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
+                                               SET_VOLTAGE_TYPE_ASIC_VDDC);
+               if (rdev->pm.default_vddci)
+                       radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
+                                               SET_VOLTAGE_TYPE_ASIC_VDDCI);
+               if (rdev->pm.default_sclk)
+                       radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
+               if (rdev->pm.default_mclk)
+                       radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
        }
 }
 
@@ -1170,51 +1178,50 @@ static int radeon_pm_init_dpm(struct radeon_device *rdev)
        radeon_dpm_setup_asic(rdev);
        ret = radeon_dpm_enable(rdev);
        mutex_unlock(&rdev->pm.mutex);
-       if (ret) {
-               rdev->pm.dpm_enabled = false;
-               if ((rdev->family >= CHIP_BARTS) &&
-                   (rdev->family <= CHIP_CAYMAN) &&
-                   rdev->mc_fw) {
-                       if (rdev->pm.default_vddc)
-                               radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
-                                                       SET_VOLTAGE_TYPE_ASIC_VDDC);
-                       if (rdev->pm.default_vddci)
-                               radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
-                                                       SET_VOLTAGE_TYPE_ASIC_VDDCI);
-                       if (rdev->pm.default_sclk)
-                               radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
-                       if (rdev->pm.default_mclk)
-                               radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
-               }
-               DRM_ERROR("radeon: dpm initialization failed\n");
-               return ret;
-       }
+       if (ret)
+               goto dpm_failed;
        rdev->pm.dpm_enabled = true;
-       radeon_pm_compute_clocks(rdev);
 
-       if (rdev->pm.num_power_states > 1) {
-               ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state);
-               if (ret)
-                       DRM_ERROR("failed to create device file for dpm state\n");
-               ret = device_create_file(rdev->dev, &dev_attr_power_dpm_force_performance_level);
-               if (ret)
-                       DRM_ERROR("failed to create device file for dpm state\n");
-               /* XXX: these are noops for dpm but are here for backwards compat */
-               ret = device_create_file(rdev->dev, &dev_attr_power_profile);
-               if (ret)
-                       DRM_ERROR("failed to create device file for power profile\n");
-               ret = device_create_file(rdev->dev, &dev_attr_power_method);
-               if (ret)
-                       DRM_ERROR("failed to create device file for power method\n");
-
-               if (radeon_debugfs_pm_init(rdev)) {
-                       DRM_ERROR("Failed to register debugfs file for dpm!\n");
-               }
+       ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state);
+       if (ret)
+               DRM_ERROR("failed to create device file for dpm state\n");
+       ret = device_create_file(rdev->dev, &dev_attr_power_dpm_force_performance_level);
+       if (ret)
+               DRM_ERROR("failed to create device file for dpm state\n");
+       /* XXX: these are noops for dpm but are here for backwards compat */
+       ret = device_create_file(rdev->dev, &dev_attr_power_profile);
+       if (ret)
+               DRM_ERROR("failed to create device file for power profile\n");
+       ret = device_create_file(rdev->dev, &dev_attr_power_method);
+       if (ret)
+               DRM_ERROR("failed to create device file for power method\n");
 
-               DRM_INFO("radeon: dpm initialized\n");
+       if (radeon_debugfs_pm_init(rdev)) {
+               DRM_ERROR("Failed to register debugfs file for dpm!\n");
        }
 
+       DRM_INFO("radeon: dpm initialized\n");
+
        return 0;
+
+dpm_failed:
+       rdev->pm.dpm_enabled = false;
+       if ((rdev->family >= CHIP_BARTS) &&
+           (rdev->family <= CHIP_CAYMAN) &&
+           rdev->mc_fw) {
+               if (rdev->pm.default_vddc)
+                       radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
+                                               SET_VOLTAGE_TYPE_ASIC_VDDC);
+               if (rdev->pm.default_vddci)
+                       radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
+                                               SET_VOLTAGE_TYPE_ASIC_VDDCI);
+               if (rdev->pm.default_sclk)
+                       radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
+               if (rdev->pm.default_mclk)
+                       radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
+       }
+       DRM_ERROR("radeon: dpm initialization failed\n");
+       return ret;
 }
 
 int radeon_pm_init(struct radeon_device *rdev)
@@ -1228,11 +1235,10 @@ int radeon_pm_init(struct radeon_device *rdev)
        case CHIP_RV670:
        case CHIP_RS780:
        case CHIP_RS880:
+       case CHIP_BARTS:
+       case CHIP_TURKS:
+       case CHIP_CAICOS:
        case CHIP_CAYMAN:
-       case CHIP_BONAIRE:
-       case CHIP_KABINI:
-       case CHIP_KAVERI:
-       case CHIP_HAWAII:
                /* DPM requires the RLC, RV770+ dGPU requires SMC */
                if (!rdev->rlc_fw)
                        rdev->pm.pm_method = PM_METHOD_PROFILE;
@@ -1257,15 +1263,16 @@ int radeon_pm_init(struct radeon_device *rdev)
        case CHIP_PALM:
        case CHIP_SUMO:
        case CHIP_SUMO2:
-       case CHIP_BARTS:
-       case CHIP_TURKS:
-       case CHIP_CAICOS:
        case CHIP_ARUBA:
        case CHIP_TAHITI:
        case CHIP_PITCAIRN:
        case CHIP_VERDE:
        case CHIP_OLAND:
        case CHIP_HAINAN:
+       case CHIP_BONAIRE:
+       case CHIP_KABINI:
+       case CHIP_KAVERI:
+       case CHIP_HAWAII:
                /* DPM requires the RLC, RV770+ dGPU requires SMC */
                if (!rdev->rlc_fw)
                        rdev->pm.pm_method = PM_METHOD_PROFILE;
@@ -1290,6 +1297,18 @@ int radeon_pm_init(struct radeon_device *rdev)
                return radeon_pm_init_old(rdev);
 }
 
+int radeon_pm_late_init(struct radeon_device *rdev)
+{
+       int ret = 0;
+
+       if (rdev->pm.pm_method == PM_METHOD_DPM) {
+               mutex_lock(&rdev->pm.mutex);
+               ret = radeon_dpm_late_enable(rdev);
+               mutex_unlock(&rdev->pm.mutex);
+       }
+       return ret;
+}
+
 static void radeon_pm_fini_old(struct radeon_device *rdev)
 {
        if (rdev->pm.num_power_states > 1) {
@@ -1420,6 +1439,9 @@ static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
        struct drm_crtc *crtc;
        struct radeon_crtc *radeon_crtc;
 
+       if (!rdev->pm.dpm_enabled)
+               return;
+
        mutex_lock(&rdev->pm.mutex);
 
        /* update active crtc counts */
@@ -1464,7 +1486,7 @@ static bool radeon_pm_in_vbl(struct radeon_device *rdev)
         */
        for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) {
                if (rdev->pm.active_crtcs & (1 << crtc)) {
-                       vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, &vpos, &hpos, NULL, NULL);
+                       vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, 0, &vpos, &hpos, NULL, NULL);
                        if ((vbl_status & DRM_SCANOUTPOS_VALID) &&
                            !(vbl_status & DRM_SCANOUTPOS_INVBL))
                                in_vbl = false;
index 9214403ae173c146573cf8c4441a6a71cf052665..1b783f0e6d3ad084b0a62629a4b5d67c9e92453f 100644 (file)
@@ -332,36 +332,6 @@ bool radeon_ring_supports_scratch_reg(struct radeon_device *rdev,
        }
 }
 
-u32 radeon_ring_generic_get_rptr(struct radeon_device *rdev,
-                                struct radeon_ring *ring)
-{
-       u32 rptr;
-
-       if (rdev->wb.enabled)
-               rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]);
-       else
-               rptr = RREG32(ring->rptr_reg);
-
-       return rptr;
-}
-
-u32 radeon_ring_generic_get_wptr(struct radeon_device *rdev,
-                                struct radeon_ring *ring)
-{
-       u32 wptr;
-
-       wptr = RREG32(ring->wptr_reg);
-
-       return wptr;
-}
-
-void radeon_ring_generic_set_wptr(struct radeon_device *rdev,
-                                 struct radeon_ring *ring)
-{
-       WREG32(ring->wptr_reg, ring->wptr);
-       (void)RREG32(ring->wptr_reg);
-}
-
 /**
  * radeon_ring_free_size - update the free size
  *
@@ -463,7 +433,7 @@ void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring)
        while (ring->wptr & ring->align_mask) {
                radeon_ring_write(ring, ring->nop);
        }
-       DRM_MEMORYBARRIER();
+       mb();
        radeon_ring_set_wptr(rdev, ring);
 }
 
@@ -689,22 +659,18 @@ int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring,
  * @ring: radeon_ring structure holding ring information
  * @ring_size: size of the ring
  * @rptr_offs: offset of the rptr writeback location in the WB buffer
- * @rptr_reg: MMIO offset of the rptr register
- * @wptr_reg: MMIO offset of the wptr register
  * @nop: nop packet for this ring
  *
  * Initialize the driver information for the selected ring (all asics).
  * Returns 0 on success, error on failure.
  */
 int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size,
-                    unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg, u32 nop)
+                    unsigned rptr_offs, u32 nop)
 {
        int r;
 
        ring->ring_size = ring_size;
        ring->rptr_offs = rptr_offs;
-       ring->rptr_reg = rptr_reg;
-       ring->wptr_reg = wptr_reg;
        ring->nop = nop;
        /* Allocate ring buffer */
        if (ring->ring_obj == NULL) {
@@ -790,34 +756,54 @@ static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
        struct radeon_device *rdev = dev->dev_private;
        int ridx = *(int*)node->info_ent->data;
        struct radeon_ring *ring = &rdev->ring[ridx];
+
+       uint32_t rptr, wptr, rptr_next;
        unsigned count, i, j;
-       u32 tmp;
 
        radeon_ring_free_size(rdev, ring);
        count = (ring->ring_size / 4) - ring->ring_free_dw;
-       tmp = radeon_ring_get_wptr(rdev, ring);
-       seq_printf(m, "wptr(0x%04x): 0x%08x [%5d]\n", ring->wptr_reg, tmp, tmp);
-       tmp = radeon_ring_get_rptr(rdev, ring);
-       seq_printf(m, "rptr(0x%04x): 0x%08x [%5d]\n", ring->rptr_reg, tmp, tmp);
+
+       wptr = radeon_ring_get_wptr(rdev, ring);
+       seq_printf(m, "wptr: 0x%08x [%5d]\n",
+                  wptr, wptr);
+
+       rptr = radeon_ring_get_rptr(rdev, ring);
+       seq_printf(m, "rptr: 0x%08x [%5d]\n",
+                  rptr, rptr);
+
        if (ring->rptr_save_reg) {
-               seq_printf(m, "rptr next(0x%04x): 0x%08x\n", ring->rptr_save_reg,
-                          RREG32(ring->rptr_save_reg));
-       }
-       seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n", ring->wptr, ring->wptr);
-       seq_printf(m, "driver's copy of the rptr: 0x%08x [%5d]\n", ring->rptr, ring->rptr);
-       seq_printf(m, "last semaphore signal addr : 0x%016llx\n", ring->last_semaphore_signal_addr);
-       seq_printf(m, "last semaphore wait addr   : 0x%016llx\n", ring->last_semaphore_wait_addr);
+               rptr_next = RREG32(ring->rptr_save_reg);
+               seq_printf(m, "rptr next(0x%04x): 0x%08x [%5d]\n",
+                          ring->rptr_save_reg, rptr_next, rptr_next);
+       } else
+               rptr_next = ~0;
+
+       seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n",
+                  ring->wptr, ring->wptr);
+       seq_printf(m, "driver's copy of the rptr: 0x%08x [%5d]\n",
+                  ring->rptr, ring->rptr);
+       seq_printf(m, "last semaphore signal addr : 0x%016llx\n",
+                  ring->last_semaphore_signal_addr);
+       seq_printf(m, "last semaphore wait addr   : 0x%016llx\n",
+                  ring->last_semaphore_wait_addr);
        seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
        seq_printf(m, "%u dwords in ring\n", count);
+
+       if (!ring->ready)
+               return 0;
+
        /* print 8 dw before current rptr as often it's the last executed
         * packet that is the root issue
         */
-       i = (ring->rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask;
-       if (ring->ready) {
-               for (j = 0; j <= (count + 32); j++) {
-                       seq_printf(m, "r[%5d]=0x%08x\n", i, ring->ring[i]);
-                       i = (i + 1) & ring->ptr_mask;
-               }
+       i = (rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask;
+       for (j = 0; j <= (count + 32); j++) {
+               seq_printf(m, "r[%5d]=0x%08x", i, ring->ring[i]);
+               if (rptr == i)
+                       seq_puts(m, " *");
+               if (rptr_next == i)
+                       seq_puts(m, " #");
+               seq_puts(m, "\n");
+               i = (i + 1) & ring->ptr_mask;
        }
        return 0;
 }
index f0bac68254b79a5dd31cfdc14ce80a98bca91065..c0625805cdd769b826d0605141f6cc80ce80ad72 100644 (file)
@@ -402,13 +402,15 @@ void radeon_sa_bo_dump_debug_info(struct radeon_sa_manager *sa_manager,
 
        spin_lock(&sa_manager->wq.lock);
        list_for_each_entry(i, &sa_manager->olist, olist) {
+               uint64_t soffset = i->soffset + sa_manager->gpu_addr;
+               uint64_t eoffset = i->eoffset + sa_manager->gpu_addr;
                if (&i->olist == sa_manager->hole) {
                        seq_printf(m, ">");
                } else {
                        seq_printf(m, " ");
                }
-               seq_printf(m, "[0x%08x 0x%08x] size %8d",
-                          i->soffset, i->eoffset, i->eoffset - i->soffset);
+               seq_printf(m, "[0x%010llx 0x%010llx] size %8lld",
+                          soffset, eoffset, eoffset - soffset);
                if (i->fence) {
                        seq_printf(m, " protected by 0x%016llx on ring %d",
                                   i->fence->seq, i->fence->ring);
index 4d20910899d4c7ed8ad85f29788bfd35cc359fb5..956ab7f14e1650607c8dd09f1b9c96daaee3169a 100644 (file)
@@ -1810,7 +1810,7 @@ static int radeon_cp_dispatch_texture(struct drm_device * dev,
                }
                if (!buf) {
                        DRM_DEBUG("EAGAIN\n");
-                       if (DRM_COPY_TO_USER(tex->image, image, sizeof(*image)))
+                       if (copy_to_user(tex->image, image, sizeof(*image)))
                                return -EFAULT;
                        return -EAGAIN;
                }
@@ -1823,7 +1823,7 @@ static int radeon_cp_dispatch_texture(struct drm_device * dev,
 
 #define RADEON_COPY_MT(_buf, _data, _width) \
        do { \
-               if (DRM_COPY_FROM_USER(_buf, _data, (_width))) {\
+               if (copy_from_user(_buf, _data, (_width))) {\
                        DRM_ERROR("EFAULT on pad, %d bytes\n", (_width)); \
                        return -EFAULT; \
                } \
@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
        if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
                sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
 
-       if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
+       if (copy_from_user(&depth_boxes, clear->depth_boxes,
                               sarea_priv->nbox * sizeof(depth_boxes[0])))
                return -EFAULT;
 
@@ -2436,7 +2436,7 @@ static int radeon_cp_texture(struct drm_device *dev, void *data, struct drm_file
                return -EINVAL;
        }
 
-       if (DRM_COPY_FROM_USER(&image,
+       if (copy_from_user(&image,
                               (drm_radeon_tex_image_t __user *) tex->image,
                               sizeof(image)))
                return -EFAULT;
@@ -2460,7 +2460,7 @@ static int radeon_cp_stipple(struct drm_device *dev, void *data, struct drm_file
 
        LOCK_TEST_WITH_RETURN(dev, file_priv);
 
-       if (DRM_COPY_FROM_USER(&mask, stipple->mask, 32 * sizeof(u32)))
+       if (copy_from_user(&mask, stipple->mask, 32 * sizeof(u32)))
                return -EFAULT;
 
        RING_SPACE_TEST_WITH_RETURN(dev_priv);
@@ -2585,13 +2585,13 @@ static int radeon_cp_vertex2(struct drm_device *dev, void *data, struct drm_file
                drm_radeon_prim_t prim;
                drm_radeon_tcl_prim_t tclprim;
 
-               if (DRM_COPY_FROM_USER(&prim, &vertex->prim[i], sizeof(prim)))
+               if (copy_from_user(&prim, &vertex->prim[i], sizeof(prim)))
                        return -EFAULT;
 
                if (prim.stateidx != laststate) {
                        drm_radeon_state_t state;
 
-                       if (DRM_COPY_FROM_USER(&state,
+                       if (copy_from_user(&state,
                                               &vertex->state[prim.stateidx],
                                               sizeof(state)))
                                return -EFAULT;
@@ -2799,7 +2799,7 @@ static int radeon_emit_packet3_cliprect(struct drm_device *dev,
 
        do {
                if (i < cmdbuf->nbox) {
-                       if (DRM_COPY_FROM_USER(&box, &boxes[i], sizeof(box)))
+                       if (copy_from_user(&box, &boxes[i], sizeof(box)))
                                return -EFAULT;
                        /* FIXME The second and subsequent times round
                         * this loop, send a WAIT_UNTIL_3D_IDLE before
@@ -3116,7 +3116,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
                return -EINVAL;
        }
 
-       if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
+       if (copy_to_user(param->value, &value, sizeof(int))) {
                DRM_ERROR("copy_to_user\n");
                return -EFAULT;
        }
index 0473257d407886e175f77347078b2de54c61cfc3..f749f2c3bbdb838a63bdcd6598409387de3c790b 100644 (file)
@@ -106,42 +106,45 @@ TRACE_EVENT(radeon_vm_set_page,
 
 DECLARE_EVENT_CLASS(radeon_fence_request,
 
-           TP_PROTO(struct drm_device *dev, u32 seqno),
+           TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
 
-           TP_ARGS(dev, seqno),
+           TP_ARGS(dev, ring, seqno),
 
            TP_STRUCT__entry(
                             __field(u32, dev)
+                            __field(int, ring)
                             __field(u32, seqno)
                             ),
 
            TP_fast_assign(
                           __entry->dev = dev->primary->index;
+                          __entry->ring = ring;
                           __entry->seqno = seqno;
                           ),
 
-           TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
+           TP_printk("dev=%u, ring=%d, seqno=%u",
+                     __entry->dev, __entry->ring, __entry->seqno)
 );
 
 DEFINE_EVENT(radeon_fence_request, radeon_fence_emit,
 
-           TP_PROTO(struct drm_device *dev, u32 seqno),
+           TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
 
-           TP_ARGS(dev, seqno)
+           TP_ARGS(dev, ring, seqno)
 );
 
 DEFINE_EVENT(radeon_fence_request, radeon_fence_wait_begin,
 
-           TP_PROTO(struct drm_device *dev, u32 seqno),
+           TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
 
-           TP_ARGS(dev, seqno)
+           TP_ARGS(dev, ring, seqno)
 );
 
 DEFINE_EVENT(radeon_fence_request, radeon_fence_wait_end,
 
-           TP_PROTO(struct drm_device *dev, u32 seqno),
+           TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
 
-           TP_ARGS(dev, seqno)
+           TP_ARGS(dev, ring, seqno)
 );
 
 DECLARE_EVENT_CLASS(radeon_semaphore_request,
index 71245d6f34a20c0f64a6eff3057eb6d7ae37f388..77f5b0c3edb8d8b1f4835620d626c3981171c7a6 100644 (file)
 #include <linux/seq_file.h>
 #include <linux/slab.h>
 #include <linux/swiotlb.h>
+#include <linux/debugfs.h>
 #include "radeon_reg.h"
 #include "radeon.h"
 
 #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
 
 static int radeon_ttm_debugfs_init(struct radeon_device *rdev);
+static void radeon_ttm_debugfs_fini(struct radeon_device *rdev);
 
 static struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev)
 {
@@ -142,7 +144,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
                man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
 #if __OS_HAS_AGP
                if (rdev->flags & RADEON_IS_AGP) {
-                       if (!(drm_core_has_AGP(rdev->ddev) && rdev->ddev->agp)) {
+                       if (!rdev->ddev->agp) {
                                DRM_ERROR("AGP is not enabled for memory type %u\n",
                                          (unsigned)type);
                                return -EINVAL;
@@ -753,6 +755,7 @@ void radeon_ttm_fini(struct radeon_device *rdev)
 
        if (!rdev->mman.initialized)
                return;
+       radeon_ttm_debugfs_fini(rdev);
        if (rdev->stollen_vga_memory) {
                r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
                if (r == 0) {
@@ -832,16 +835,15 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
        return 0;
 }
 
-
-#define RADEON_DEBUGFS_MEM_TYPES 2
-
 #if defined(CONFIG_DEBUG_FS)
+
 static int radeon_mm_dump_table(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = (struct drm_info_node *)m->private;
-       struct drm_mm *mm = (struct drm_mm *)node->info_ent->data;
+       unsigned ttm_pl = *(int *)node->info_ent->data;
        struct drm_device *dev = node->minor->dev;
        struct radeon_device *rdev = dev->dev_private;
+       struct drm_mm *mm = (struct drm_mm *)rdev->mman.bdev.man[ttm_pl].priv;
        int ret;
        struct ttm_bo_global *glob = rdev->mman.bdev.glob;
 
@@ -850,46 +852,169 @@ static int radeon_mm_dump_table(struct seq_file *m, void *data)
        spin_unlock(&glob->lru_lock);
        return ret;
 }
+
+static int ttm_pl_vram = TTM_PL_VRAM;
+static int ttm_pl_tt = TTM_PL_TT;
+
+static struct drm_info_list radeon_ttm_debugfs_list[] = {
+       {"radeon_vram_mm", radeon_mm_dump_table, 0, &ttm_pl_vram},
+       {"radeon_gtt_mm", radeon_mm_dump_table, 0, &ttm_pl_tt},
+       {"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL},
+#ifdef CONFIG_SWIOTLB
+       {"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL}
 #endif
+};
 
-static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
+static int radeon_ttm_vram_open(struct inode *inode, struct file *filep)
 {
-#if defined(CONFIG_DEBUG_FS)
-       static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+2];
-       static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES+2][32];
-       unsigned i;
+       struct radeon_device *rdev = inode->i_private;
+       i_size_write(inode, rdev->mc.mc_vram_size);
+       filep->private_data = inode->i_private;
+       return 0;
+}
 
-       for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) {
-               if (i == 0)
-                       sprintf(radeon_mem_types_names[i], "radeon_vram_mm");
-               else
-                       sprintf(radeon_mem_types_names[i], "radeon_gtt_mm");
-               radeon_mem_types_list[i].name = radeon_mem_types_names[i];
-               radeon_mem_types_list[i].show = &radeon_mm_dump_table;
-               radeon_mem_types_list[i].driver_features = 0;
-               if (i == 0)
-                       radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv;
-               else
-                       radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_TT].priv;
+static ssize_t radeon_ttm_vram_read(struct file *f, char __user *buf,
+                                   size_t size, loff_t *pos)
+{
+       struct radeon_device *rdev = f->private_data;
+       ssize_t result = 0;
+       int r;
 
+       if (size & 0x3 || *pos & 0x3)
+               return -EINVAL;
+
+       while (size) {
+               unsigned long flags;
+               uint32_t value;
+
+               if (*pos >= rdev->mc.mc_vram_size)
+                       return result;
+
+               spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
+               WREG32(RADEON_MM_INDEX, ((uint32_t)*pos) | 0x80000000);
+               if (rdev->family >= CHIP_CEDAR)
+                       WREG32(EVERGREEN_MM_INDEX_HI, *pos >> 31);
+               value = RREG32(RADEON_MM_DATA);
+               spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
+
+               r = put_user(value, (uint32_t *)buf);
+               if (r)
+                       return r;
+
+               result += 4;
+               buf += 4;
+               *pos += 4;
+               size -= 4;
        }
-       /* Add ttm page pool to debugfs */
-       sprintf(radeon_mem_types_names[i], "ttm_page_pool");
-       radeon_mem_types_list[i].name = radeon_mem_types_names[i];
-       radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs;
-       radeon_mem_types_list[i].driver_features = 0;
-       radeon_mem_types_list[i++].data = NULL;
-#ifdef CONFIG_SWIOTLB
-       if (swiotlb_nr_tbl()) {
-               sprintf(radeon_mem_types_names[i], "ttm_dma_page_pool");
-               radeon_mem_types_list[i].name = radeon_mem_types_names[i];
-               radeon_mem_types_list[i].show = &ttm_dma_page_alloc_debugfs;
-               radeon_mem_types_list[i].driver_features = 0;
-               radeon_mem_types_list[i++].data = NULL;
+
+       return result;
+}
+
+static const struct file_operations radeon_ttm_vram_fops = {
+       .owner = THIS_MODULE,
+       .open = radeon_ttm_vram_open,
+       .read = radeon_ttm_vram_read,
+       .llseek = default_llseek
+};
+
+static int radeon_ttm_gtt_open(struct inode *inode, struct file *filep)
+{
+       struct radeon_device *rdev = inode->i_private;
+       i_size_write(inode, rdev->mc.gtt_size);
+       filep->private_data = inode->i_private;
+       return 0;
+}
+
+static ssize_t radeon_ttm_gtt_read(struct file *f, char __user *buf,
+                                  size_t size, loff_t *pos)
+{
+       struct radeon_device *rdev = f->private_data;
+       ssize_t result = 0;
+       int r;
+
+       while (size) {
+               loff_t p = *pos / PAGE_SIZE;
+               unsigned off = *pos & ~PAGE_MASK;
+               ssize_t cur_size = min(size, PAGE_SIZE - off);
+               struct page *page;
+               void *ptr;
+
+               if (p >= rdev->gart.num_cpu_pages)
+                       return result;
+
+               page = rdev->gart.pages[p];
+               if (page) {
+                       ptr = kmap(page);
+                       ptr += off;
+
+                       r = copy_to_user(buf, ptr, cur_size);
+                       kunmap(rdev->gart.pages[p]);
+               } else
+                       r = clear_user(buf, cur_size);
+
+               if (r)
+                       return -EFAULT;
+
+               result += cur_size;
+               buf += cur_size;
+               *pos += cur_size;
+               size -= cur_size;
        }
+
+       return result;
+}
+
+static const struct file_operations radeon_ttm_gtt_fops = {
+       .owner = THIS_MODULE,
+       .open = radeon_ttm_gtt_open,
+       .read = radeon_ttm_gtt_read,
+       .llseek = default_llseek
+};
+
 #endif
-       return radeon_debugfs_add_files(rdev, radeon_mem_types_list, i);
 
+static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+       unsigned count;
+
+       struct drm_minor *minor = rdev->ddev->primary;
+       struct dentry *ent, *root = minor->debugfs_root;
+
+       ent = debugfs_create_file("radeon_vram", S_IFREG | S_IRUGO, root,
+                                 rdev, &radeon_ttm_vram_fops);
+       if (IS_ERR(ent))
+               return PTR_ERR(ent);
+       rdev->mman.vram = ent;
+
+       ent = debugfs_create_file("radeon_gtt", S_IFREG | S_IRUGO, root,
+                                 rdev, &radeon_ttm_gtt_fops);
+       if (IS_ERR(ent))
+               return PTR_ERR(ent);
+       rdev->mman.gtt = ent;
+
+       count = ARRAY_SIZE(radeon_ttm_debugfs_list);
+
+#ifdef CONFIG_SWIOTLB
+       if (!swiotlb_nr_tbl())
+               --count;
 #endif
+
+       return radeon_debugfs_add_files(rdev, radeon_ttm_debugfs_list, count);
+#else
+
        return 0;
+#endif
+}
+
+static void radeon_ttm_debugfs_fini(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+
+       debugfs_remove(rdev->mman.vram);
+       rdev->mman.vram = NULL;
+
+       debugfs_remove(rdev->mman.gtt);
+       rdev->mman.gtt = NULL;
+#endif
 }
index b9c0529b4a2e1e9d8f69e51f6742d023492a0040..6781fee1eaadc21a68e50de696dd353be0a3e9e5 100644 (file)
@@ -91,6 +91,7 @@ int radeon_uvd_init(struct radeon_device *rdev)
        case CHIP_VERDE:
        case CHIP_PITCAIRN:
        case CHIP_ARUBA:
+       case CHIP_OLAND:
                fw_name = FIRMWARE_TAHITI;
                break;
 
@@ -778,6 +779,8 @@ static void radeon_uvd_idle_work_handler(struct work_struct *work)
 
        if (radeon_fence_count_emitted(rdev, R600_RING_TYPE_UVD_INDEX) == 0) {
                if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
+                       radeon_uvd_count_handles(rdev, &rdev->pm.dpm.sd,
+                                                &rdev->pm.dpm.hd);
                        radeon_dpm_enable_uvd(rdev, false);
                } else {
                        radeon_set_uvd_clocks(rdev, 0, 0);
index 9566b5940a5ae723f90dfa11427e3f09bac233b3..b5c2369cda2fe28ca043fe91f4fcfe12fc227fc6 100644 (file)
@@ -474,6 +474,8 @@ int rs400_resume(struct radeon_device *rdev)
        /* Initialize surface registers */
        radeon_surface_init(rdev);
 
+       radeon_pm_resume(rdev);
+
        rdev->accel_working = true;
        r = rs400_startup(rdev);
        if (r) {
@@ -484,6 +486,7 @@ int rs400_resume(struct radeon_device *rdev)
 
 int rs400_suspend(struct radeon_device *rdev)
 {
+       radeon_pm_suspend(rdev);
        r100_cp_disable(rdev);
        radeon_wb_disable(rdev);
        r100_irq_disable(rdev);
@@ -493,6 +496,7 @@ int rs400_suspend(struct radeon_device *rdev)
 
 void rs400_fini(struct radeon_device *rdev)
 {
+       radeon_pm_fini(rdev);
        r100_cp_fini(rdev);
        radeon_wb_fini(rdev);
        radeon_ib_pool_fini(rdev);
@@ -560,6 +564,9 @@ int rs400_init(struct radeon_device *rdev)
                return r;
        r300_set_reg_safe(rdev);
 
+       /* Initialize power management */
+       radeon_pm_init(rdev);
+
        rdev->accel_working = true;
        r = rs400_startup(rdev);
        if (r) {
index 76cc8d3aafec461d0b41668f25500f2a4753dbc3..fdcde7693032c28cc30008b494fd573fd82e58f3 100644 (file)
@@ -1048,6 +1048,8 @@ int rs600_resume(struct radeon_device *rdev)
        /* Initialize surface registers */
        radeon_surface_init(rdev);
 
+       radeon_pm_resume(rdev);
+
        rdev->accel_working = true;
        r = rs600_startup(rdev);
        if (r) {
@@ -1058,6 +1060,7 @@ int rs600_resume(struct radeon_device *rdev)
 
 int rs600_suspend(struct radeon_device *rdev)
 {
+       radeon_pm_suspend(rdev);
        r600_audio_fini(rdev);
        r100_cp_disable(rdev);
        radeon_wb_disable(rdev);
@@ -1068,6 +1071,7 @@ int rs600_suspend(struct radeon_device *rdev)
 
 void rs600_fini(struct radeon_device *rdev)
 {
+       radeon_pm_fini(rdev);
        r600_audio_fini(rdev);
        r100_cp_fini(rdev);
        radeon_wb_fini(rdev);
@@ -1136,6 +1140,9 @@ int rs600_init(struct radeon_device *rdev)
                return r;
        rs600_set_safe_registers(rdev);
 
+       /* Initialize power management */
+       radeon_pm_init(rdev);
+
        rdev->accel_working = true;
        r = rs600_startup(rdev);
        if (r) {
index e7dab069cccf48a05e19cfd7caf27bd65d38c53d..35950738bd5e449465e0e5062d096e3e8ae9ff0e 100644 (file)
@@ -756,6 +756,8 @@ int rs690_resume(struct radeon_device *rdev)
        /* Initialize surface registers */
        radeon_surface_init(rdev);
 
+       radeon_pm_resume(rdev);
+
        rdev->accel_working = true;
        r = rs690_startup(rdev);
        if (r) {
@@ -766,6 +768,7 @@ int rs690_resume(struct radeon_device *rdev)
 
 int rs690_suspend(struct radeon_device *rdev)
 {
+       radeon_pm_suspend(rdev);
        r600_audio_fini(rdev);
        r100_cp_disable(rdev);
        radeon_wb_disable(rdev);
@@ -776,6 +779,7 @@ int rs690_suspend(struct radeon_device *rdev)
 
 void rs690_fini(struct radeon_device *rdev)
 {
+       radeon_pm_fini(rdev);
        r600_audio_fini(rdev);
        r100_cp_fini(rdev);
        radeon_wb_fini(rdev);
@@ -845,6 +849,9 @@ int rs690_init(struct radeon_device *rdev)
                return r;
        rs600_set_safe_registers(rdev);
 
+       /* Initialize power management */
+       radeon_pm_init(rdev);
+
        rdev->accel_working = true;
        r = rs690_startup(rdev);
        if (r) {
index 6af8505cf4d2db624ee64811ba4575158d90e974..8512085b0aefe0139c6ffabdc69114510ae3e2f3 100644 (file)
@@ -623,14 +623,6 @@ int rs780_dpm_enable(struct radeon_device *rdev)
        if (pi->gfx_clock_gating)
                r600_gfx_clockgating_enable(rdev, true);
 
-       if (rdev->irq.installed && (rdev->pm.int_thermal_type == THERMAL_TYPE_RV6XX)) {
-               ret = r600_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
-               if (ret)
-                       return ret;
-               rdev->irq.dpm_thermal = true;
-               radeon_irq_set(rdev);
-       }
-
        return 0;
 }
 
index 5d1c316115efa31ab7b029de44db66e399322e71..98e8138ff77945ecdba447bdcb94310c30dcb1c7 100644 (file)
@@ -586,6 +586,8 @@ int rv515_resume(struct radeon_device *rdev)
        /* Initialize surface registers */
        radeon_surface_init(rdev);
 
+       radeon_pm_resume(rdev);
+
        rdev->accel_working = true;
        r =  rv515_startup(rdev);
        if (r) {
@@ -596,6 +598,7 @@ int rv515_resume(struct radeon_device *rdev)
 
 int rv515_suspend(struct radeon_device *rdev)
 {
+       radeon_pm_suspend(rdev);
        r100_cp_disable(rdev);
        radeon_wb_disable(rdev);
        rs600_irq_disable(rdev);
@@ -612,6 +615,7 @@ void rv515_set_safe_registers(struct radeon_device *rdev)
 
 void rv515_fini(struct radeon_device *rdev)
 {
+       radeon_pm_fini(rdev);
        r100_cp_fini(rdev);
        radeon_wb_fini(rdev);
        radeon_ib_pool_fini(rdev);
@@ -685,6 +689,9 @@ int rv515_init(struct radeon_device *rdev)
                return r;
        rv515_set_safe_registers(rdev);
 
+       /* Initialize power management */
+       radeon_pm_init(rdev);
+
        rdev->accel_working = true;
        r = rv515_startup(rdev);
        if (r) {
index 26633a0252522051bc5a504357d93bd9b6257c67..bebf31c4d841ccaa07b86d9bae96c7abb5c223be 100644 (file)
@@ -1546,7 +1546,6 @@ int rv6xx_dpm_enable(struct radeon_device *rdev)
 {
        struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
        struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
-       int ret;
 
        if (r600_dynamicpm_enabled(rdev))
                return -EINVAL;
@@ -1594,15 +1593,6 @@ int rv6xx_dpm_enable(struct radeon_device *rdev)
        r600_power_level_enable(rdev, R600_POWER_LEVEL_MEDIUM, true);
        r600_power_level_enable(rdev, R600_POWER_LEVEL_HIGH, true);
 
-       if (rdev->irq.installed &&
-           r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
-               ret = r600_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
-               if (ret)
-                       return ret;
-               rdev->irq.dpm_thermal = true;
-               radeon_irq_set(rdev);
-       }
-
        rv6xx_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
 
        r600_start_dpm(rdev);
index 9f5846743c9e0a26dbcf76662250bfb957f6ced1..6c772e58c7845e7d4170287d3e583afd64c5a3e8 100644 (file)
@@ -1071,7 +1071,8 @@ static void rv770_mc_program(struct radeon_device *rdev)
  */
 void r700_cp_stop(struct radeon_device *rdev)
 {
-       radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+       if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
+               radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
        WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
        WREG32(SCRATCH_UMSK, 0);
        rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
@@ -1123,6 +1124,35 @@ void r700_cp_fini(struct radeon_device *rdev)
        radeon_scratch_free(rdev, ring->rptr_save_reg);
 }
 
+void rv770_set_clk_bypass_mode(struct radeon_device *rdev)
+{
+       u32 tmp, i;
+
+       if (rdev->flags & RADEON_IS_IGP)
+               return;
+
+       tmp = RREG32(CG_SPLL_FUNC_CNTL_2);
+       tmp &= SCLK_MUX_SEL_MASK;
+       tmp |= SCLK_MUX_SEL(1) | SCLK_MUX_UPDATE;
+       WREG32(CG_SPLL_FUNC_CNTL_2, tmp);
+
+       for (i = 0; i < rdev->usec_timeout; i++) {
+               if (RREG32(CG_SPLL_STATUS) & SPLL_CHG_STATUS)
+                       break;
+               udelay(1);
+       }
+
+       tmp &= ~SCLK_MUX_UPDATE;
+       WREG32(CG_SPLL_FUNC_CNTL_2, tmp);
+
+       tmp = RREG32(MPLL_CNTL_MODE);
+       if ((rdev->family == CHIP_RV710) || (rdev->family == CHIP_RV730))
+               tmp &= ~RV730_MPLL_MCLK_SEL;
+       else
+               tmp &= ~MPLL_MCLK_SEL;
+       WREG32(MPLL_CNTL_MODE, tmp);
+}
+
 /*
  * Core functions
  */
@@ -1665,14 +1695,6 @@ static int rv770_startup(struct radeon_device *rdev)
 
        rv770_mc_program(rdev);
 
-       if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
-               r = r600_init_microcode(rdev);
-               if (r) {
-                       DRM_ERROR("Failed to load firmware!\n");
-                       return r;
-               }
-       }
-
        if (rdev->flags & RADEON_IS_AGP) {
                rv770_agp_enable(rdev);
        } else {
@@ -1728,14 +1750,12 @@ static int rv770_startup(struct radeon_device *rdev)
 
        ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
-                            R600_CP_RB_RPTR, R600_CP_RB_WPTR,
                             RADEON_CP_PACKET2);
        if (r)
                return r;
 
        ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
        r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
-                            DMA_RB_RPTR, DMA_RB_WPTR,
                             DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
        if (r)
                return r;
@@ -1754,7 +1774,6 @@ static int rv770_startup(struct radeon_device *rdev)
        ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
        if (ring->ring_size) {
                r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
-                                    UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
                                     RADEON_CP_PACKET2);
                if (!r)
                        r = uvd_v1_0_init(rdev);
@@ -1792,6 +1811,8 @@ int rv770_resume(struct radeon_device *rdev)
        /* init golden registers */
        rv770_init_golden_registers(rdev);
 
+       radeon_pm_resume(rdev);
+
        rdev->accel_working = true;
        r = rv770_startup(rdev);
        if (r) {
@@ -1806,6 +1827,7 @@ int rv770_resume(struct radeon_device *rdev)
 
 int rv770_suspend(struct radeon_device *rdev)
 {
+       radeon_pm_suspend(rdev);
        r600_audio_fini(rdev);
        uvd_v1_0_fini(rdev);
        radeon_uvd_suspend(rdev);
@@ -1876,6 +1898,17 @@ int rv770_init(struct radeon_device *rdev)
        if (r)
                return r;
 
+       if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
+               r = r600_init_microcode(rdev);
+               if (r) {
+                       DRM_ERROR("Failed to load firmware!\n");
+                       return r;
+               }
+       }
+
+       /* Initialize power management */
+       radeon_pm_init(rdev);
+
        rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
        r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
 
@@ -1915,6 +1948,7 @@ int rv770_init(struct radeon_device *rdev)
 
 void rv770_fini(struct radeon_device *rdev)
 {
+       radeon_pm_fini(rdev);
        r700_cp_fini(rdev);
        r600_dma_fini(rdev);
        r600_irq_fini(rdev);
index 374499db20c7e59b55d956b066a30bdc42b4f71f..80c595aba359b53c4f748feae3aeab9d207f5db8 100644 (file)
@@ -1863,8 +1863,8 @@ void rv770_enable_auto_throttle_source(struct radeon_device *rdev,
        }
 }
 
-int rv770_set_thermal_temperature_range(struct radeon_device *rdev,
-                                       int min_temp, int max_temp)
+static int rv770_set_thermal_temperature_range(struct radeon_device *rdev,
+                                              int min_temp, int max_temp)
 {
        int low_temp = 0 * 1000;
        int high_temp = 255 * 1000;
@@ -1966,6 +1966,15 @@ int rv770_dpm_enable(struct radeon_device *rdev)
        if (pi->mg_clock_gating)
                rv770_mg_clock_gating_enable(rdev, true);
 
+       rv770_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
+
+       return 0;
+}
+
+int rv770_dpm_late_enable(struct radeon_device *rdev)
+{
+       int ret;
+
        if (rdev->irq.installed &&
            r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
                PPSMC_Result result;
@@ -1981,8 +1990,6 @@ int rv770_dpm_enable(struct radeon_device *rdev)
                        DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
        }
 
-       rv770_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
-
        return 0;
 }
 
@@ -2244,14 +2251,12 @@ static void rv7xx_parse_pplib_clock_info(struct radeon_device *rdev,
                pl->vddci = vddci;
        }
 
-       if (rdev->family >= CHIP_BARTS) {
-               if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) ==
-                   ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
-                       rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk = pl->sclk;
-                       rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk = pl->mclk;
-                       rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc = pl->vddc;
-                       rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci = pl->vddci;
-               }
+       if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) ==
+           ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
+               rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk = pl->sclk;
+               rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk = pl->mclk;
+               rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc = pl->vddc;
+               rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci = pl->vddci;
        }
 }
 
@@ -2531,6 +2536,12 @@ bool rv770_dpm_vblank_too_short(struct radeon_device *rdev)
            (rdev->pdev->subsystem_device == 0x1c42))
                switch_limit = 200;
 
+       /* RV770 */
+       /* mclk switching doesn't seem to work reliably on desktop RV770s */
+       if ((rdev->family == CHIP_RV770) &&
+           !(rdev->flags & RADEON_IS_MOBILITY))
+               switch_limit = 0xffffffff; /* disable mclk switching */
+
        if (vblank_time < switch_limit)
                return true;
        else
index 9244effc6b59e2e69c6882b0d4dc35dd2517379c..f776634840c9782bde2e9228007720448aa469d1 100644 (file)
@@ -283,8 +283,4 @@ int rv770_read_smc_soft_register(struct radeon_device *rdev,
 int rv770_write_smc_soft_register(struct radeon_device *rdev,
                                  u16 reg_offset, u32 value);
 
-/* thermal */
-int rv770_set_thermal_temperature_range(struct radeon_device *rdev,
-                                       int min_temp, int max_temp);
-
 #endif
index 1ae277152cc7f0c66f7babbcb0cdfcec5ab04f71..3cf1e2921545f9a980569925088d720e347505d1 100644 (file)
 #define        CG_SPLL_FUNC_CNTL_2                             0x604
 #define                SCLK_MUX_SEL(x)                         ((x) << 0)
 #define                SCLK_MUX_SEL_MASK                       (0x1ff << 0)
+#define                SCLK_MUX_UPDATE                         (1 << 26)
 #define        CG_SPLL_FUNC_CNTL_3                             0x608
 #define                SPLL_FB_DIV(x)                          ((x) << 0)
 #define                SPLL_FB_DIV_MASK                        (0x3ffffff << 0)
 #define                SPLL_DITHEN                             (1 << 28)
+#define        CG_SPLL_STATUS                                  0x60c
+#define                SPLL_CHG_STATUS                         (1 << 1)
 
 #define        SPLL_CNTL_MODE                                  0x610
 #define                SPLL_DIV_SYNC                           (1 << 5)
 
+#define MPLL_CNTL_MODE                                  0x61c
+#       define MPLL_MCLK_SEL                            (1 << 11)
+#       define RV730_MPLL_MCLK_SEL                      (1 << 25)
+
 #define        MPLL_AD_FUNC_CNTL                               0x624
 #define                CLKF(x)                                 ((x) << 0)
 #define                CLKF_MASK                               (0x7f << 0)
index 85e1edfaa3bed0814e262378ae0a7558834936d3..09ec4f6c53bb2202dec2e9c039a3877593d2c4fe 100644 (file)
@@ -80,6 +80,8 @@ extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev);
 extern bool evergreen_is_display_hung(struct radeon_device *rdev);
 static void si_enable_gui_idle_interrupt(struct radeon_device *rdev,
                                         bool enable);
+static void si_init_pg(struct radeon_device *rdev);
+static void si_init_cg(struct radeon_device *rdev);
 static void si_fini_pg(struct radeon_device *rdev);
 static void si_fini_cg(struct radeon_device *rdev);
 static void si_rlc_stop(struct radeon_device *rdev);
@@ -1460,7 +1462,7 @@ static const u32 hainan_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
 };
 
 /* ucode loading */
-static int si_mc_load_microcode(struct radeon_device *rdev)
+int si_mc_load_microcode(struct radeon_device *rdev)
 {
        const __be32 *fw_data;
        u32 running, blackout = 0;
@@ -3247,7 +3249,8 @@ static void si_cp_enable(struct radeon_device *rdev, bool enable)
        if (enable)
                WREG32(CP_ME_CNTL, 0);
        else {
-               radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+               if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
+                       radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
                WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT));
                WREG32(SCRATCH_UMSK, 0);
                rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
@@ -3508,6 +3511,9 @@ static int si_cp_resume(struct radeon_device *rdev)
 
        si_enable_gui_idle_interrupt(rdev, true);
 
+       if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
+               radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+
        return 0;
 }
 
@@ -3724,6 +3730,106 @@ static void si_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
        evergreen_print_gpu_status_regs(rdev);
 }
 
+static void si_set_clk_bypass_mode(struct radeon_device *rdev)
+{
+       u32 tmp, i;
+
+       tmp = RREG32(CG_SPLL_FUNC_CNTL);
+       tmp |= SPLL_BYPASS_EN;
+       WREG32(CG_SPLL_FUNC_CNTL, tmp);
+
+       tmp = RREG32(CG_SPLL_FUNC_CNTL_2);
+       tmp |= SPLL_CTLREQ_CHG;
+       WREG32(CG_SPLL_FUNC_CNTL_2, tmp);
+
+       for (i = 0; i < rdev->usec_timeout; i++) {
+               if (RREG32(SPLL_STATUS) & SPLL_CHG_STATUS)
+                       break;
+               udelay(1);
+       }
+
+       tmp = RREG32(CG_SPLL_FUNC_CNTL_2);
+       tmp &= ~(SPLL_CTLREQ_CHG | SCLK_MUX_UPDATE);
+       WREG32(CG_SPLL_FUNC_CNTL_2, tmp);
+
+       tmp = RREG32(MPLL_CNTL_MODE);
+       tmp &= ~MPLL_MCLK_SEL;
+       WREG32(MPLL_CNTL_MODE, tmp);
+}
+
+static void si_spll_powerdown(struct radeon_device *rdev)
+{
+       u32 tmp;
+
+       tmp = RREG32(SPLL_CNTL_MODE);
+       tmp |= SPLL_SW_DIR_CONTROL;
+       WREG32(SPLL_CNTL_MODE, tmp);
+
+       tmp = RREG32(CG_SPLL_FUNC_CNTL);
+       tmp |= SPLL_RESET;
+       WREG32(CG_SPLL_FUNC_CNTL, tmp);
+
+       tmp = RREG32(CG_SPLL_FUNC_CNTL);
+       tmp |= SPLL_SLEEP;
+       WREG32(CG_SPLL_FUNC_CNTL, tmp);
+
+       tmp = RREG32(SPLL_CNTL_MODE);
+       tmp &= ~SPLL_SW_DIR_CONTROL;
+       WREG32(SPLL_CNTL_MODE, tmp);
+}
+
+static void si_gpu_pci_config_reset(struct radeon_device *rdev)
+{
+       struct evergreen_mc_save save;
+       u32 tmp, i;
+
+       dev_info(rdev->dev, "GPU pci config reset\n");
+
+       /* disable dpm? */
+
+       /* disable cg/pg */
+       si_fini_pg(rdev);
+       si_fini_cg(rdev);
+
+       /* Disable CP parsing/prefetching */
+       WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT);
+       /* dma0 */
+       tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
+       tmp &= ~DMA_RB_ENABLE;
+       WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
+       /* dma1 */
+       tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
+       tmp &= ~DMA_RB_ENABLE;
+       WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
+       /* XXX other engines? */
+
+       /* halt the rlc, disable cp internal ints */
+       si_rlc_stop(rdev);
+
+       udelay(50);
+
+       /* disable mem access */
+       evergreen_mc_stop(rdev, &save);
+       if (evergreen_mc_wait_for_idle(rdev)) {
+               dev_warn(rdev->dev, "Wait for MC idle timed out !\n");
+       }
+
+       /* set mclk/sclk to bypass */
+       si_set_clk_bypass_mode(rdev);
+       /* powerdown spll */
+       si_spll_powerdown(rdev);
+       /* disable BM */
+       pci_clear_master(rdev->pdev);
+       /* reset */
+       radeon_pci_config_reset(rdev);
+       /* wait for asic to come out of reset */
+       for (i = 0; i < rdev->usec_timeout; i++) {
+               if (RREG32(CONFIG_MEMSIZE) != 0xffffffff)
+                       break;
+               udelay(1);
+       }
+}
+
 int si_asic_reset(struct radeon_device *rdev)
 {
        u32 reset_mask;
@@ -3733,10 +3839,17 @@ int si_asic_reset(struct radeon_device *rdev)
        if (reset_mask)
                r600_set_bios_scratch_engine_hung(rdev, true);
 
+       /* try soft reset */
        si_gpu_soft_reset(rdev, reset_mask);
 
        reset_mask = si_gpu_check_soft_reset(rdev);
 
+       /* try pci config reset */
+       if (reset_mask && radeon_hard_reset)
+               si_gpu_pci_config_reset(rdev);
+
+       reset_mask = si_gpu_check_soft_reset(rdev);
+
        if (!reset_mask)
                r600_set_bios_scratch_engine_hung(rdev, false);
 
@@ -5212,8 +5325,8 @@ static void si_enable_hdp_ls(struct radeon_device *rdev,
                WREG32(HDP_MEM_POWER_LS, data);
 }
 
-void si_update_cg(struct radeon_device *rdev,
-                 u32 block, bool enable)
+static void si_update_cg(struct radeon_device *rdev,
+                        u32 block, bool enable)
 {
        if (block & RADEON_CG_BLOCK_GFX) {
                si_enable_gui_idle_interrupt(rdev, false);
@@ -5379,6 +5492,9 @@ static void si_init_pg(struct radeon_device *rdev)
                si_init_ao_cu_mask(rdev);
                if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG) {
                        si_init_gfx_cgpg(rdev);
+               } else {
+                       WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
+                       WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
                }
                si_enable_dma_pg(rdev, true);
                si_enable_gfx_cgpg(rdev, true);
@@ -5566,7 +5682,7 @@ static void si_disable_interrupt_state(struct radeon_device *rdev)
        }
 
        if (!ASIC_IS_NODCE(rdev)) {
-               WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
+               WREG32(DAC_AUTODETECT_INT_CONTROL, 0);
 
                tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
                WREG32(DC_HPD1_INT_CONTROL, tmp);
@@ -6324,21 +6440,14 @@ static int si_startup(struct radeon_device *rdev)
 
        si_mc_program(rdev);
 
-       if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
-           !rdev->rlc_fw || !rdev->mc_fw) {
-               r = si_init_microcode(rdev);
+       if (!rdev->pm.dpm_enabled) {
+               r = si_mc_load_microcode(rdev);
                if (r) {
-                       DRM_ERROR("Failed to load firmware!\n");
+                       DRM_ERROR("Failed to load MC firmware!\n");
                        return r;
                }
        }
 
-       r = si_mc_load_microcode(rdev);
-       if (r) {
-               DRM_ERROR("Failed to load MC firmware!\n");
-               return r;
-       }
-
        r = si_pcie_gart_enable(rdev);
        if (r)
                return r;
@@ -6421,37 +6530,30 @@ static int si_startup(struct radeon_device *rdev)
 
        ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
-                            CP_RB0_RPTR, CP_RB0_WPTR,
                             RADEON_CP_PACKET2);
        if (r)
                return r;
 
        ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
        r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET,
-                            CP_RB1_RPTR, CP_RB1_WPTR,
                             RADEON_CP_PACKET2);
        if (r)
                return r;
 
        ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
        r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET,
-                            CP_RB2_RPTR, CP_RB2_WPTR,
                             RADEON_CP_PACKET2);
        if (r)
                return r;
 
        ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
        r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
-                            DMA_RB_RPTR + DMA0_REGISTER_OFFSET,
-                            DMA_RB_WPTR + DMA0_REGISTER_OFFSET,
                             DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
        if (r)
                return r;
 
        ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
        r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
-                            DMA_RB_RPTR + DMA1_REGISTER_OFFSET,
-                            DMA_RB_WPTR + DMA1_REGISTER_OFFSET,
                             DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
        if (r)
                return r;
@@ -6471,7 +6573,6 @@ static int si_startup(struct radeon_device *rdev)
                ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
                if (ring->ring_size) {
                        r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
-                                            UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
                                             RADEON_CP_PACKET2);
                        if (!r)
                                r = uvd_v1_0_init(rdev);
@@ -6513,6 +6614,8 @@ int si_resume(struct radeon_device *rdev)
        /* init golden registers */
        si_init_golden_registers(rdev);
 
+       radeon_pm_resume(rdev);
+
        rdev->accel_working = true;
        r = si_startup(rdev);
        if (r) {
@@ -6527,6 +6630,7 @@ int si_resume(struct radeon_device *rdev)
 
 int si_suspend(struct radeon_device *rdev)
 {
+       radeon_pm_suspend(rdev);
        dce6_audio_fini(rdev);
        radeon_vm_manager_fini(rdev);
        si_cp_enable(rdev, false);
@@ -6600,6 +6704,18 @@ int si_init(struct radeon_device *rdev)
        if (r)
                return r;
 
+       if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
+           !rdev->rlc_fw || !rdev->mc_fw) {
+               r = si_init_microcode(rdev);
+               if (r) {
+                       DRM_ERROR("Failed to load firmware!\n");
+                       return r;
+               }
+       }
+
+       /* Initialize power management */
+       radeon_pm_init(rdev);
+
        ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        ring->ring_obj = NULL;
        r600_ring_init(rdev, ring, 1024 * 1024);
@@ -6666,6 +6782,7 @@ int si_init(struct radeon_device *rdev)
 
 void si_fini(struct radeon_device *rdev)
 {
+       radeon_pm_fini(rdev);
        si_cp_fini(rdev);
        cayman_dma_fini(rdev);
        si_fini_pg(rdev);
index 0b00c790fb7713d8b4cbddb91c9b319ecc41c3b9..0471501338fbbeb9e7b72ee572b2bda126ab475a 100644 (file)
@@ -1738,6 +1738,8 @@ struct evergreen_power_info *evergreen_get_pi(struct radeon_device *rdev);
 struct ni_power_info *ni_get_pi(struct radeon_device *rdev);
 struct ni_ps *ni_get_ps(struct radeon_ps *rps);
 
+extern int si_mc_load_microcode(struct radeon_device *rdev);
+
 static int si_populate_voltage_value(struct radeon_device *rdev,
                                     const struct atom_voltage_table *table,
                                     u16 value, SISLANDS_SMC_VOLTAGE_VALUE *voltage);
@@ -1753,9 +1755,6 @@ static int si_calculate_sclk_params(struct radeon_device *rdev,
                                    u32 engine_clock,
                                    SISLANDS_SMC_SCLK_VALUE *sclk);
 
-extern void si_update_cg(struct radeon_device *rdev,
-                        u32 block, bool enable);
-
 static struct si_power_info *si_get_pi(struct radeon_device *rdev)
 {
         struct si_power_info *pi = rdev->pm.dpm.priv;
@@ -2396,7 +2395,7 @@ static int si_populate_sq_ramping_values(struct radeon_device *rdev,
        if (SISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT))
                enable_sq_ramping = false;
 
-       if (NISLANDS_DPM2_SQ_RAMP_LTI_RATIO <= (LTI_RATIO_MASK >> LTI_RATIO_SHIFT))
+       if (SISLANDS_DPM2_SQ_RAMP_LTI_RATIO <= (LTI_RATIO_MASK >> LTI_RATIO_SHIFT))
                enable_sq_ramping = false;
 
        for (i = 0; i < state->performance_level_count; i++) {
@@ -3591,10 +3590,9 @@ static void si_program_display_gap(struct radeon_device *rdev)
 
        /* Setting this to false forces the performance state to low if the crtcs are disabled.
         * This can be a problem on PowerXpress systems or if you want to use the card
-        * for offscreen rendering or compute if there are no crtcs enabled.  Set it to
-        * true for now so that performance scales even if the displays are off.
+        * for offscreen rendering or compute if there are no crtcs enabled.
         */
-       si_notify_smc_display_change(rdev, true /*rdev->pm.dpm.new_active_crtc_count > 0*/);
+       si_notify_smc_display_change(rdev, rdev->pm.dpm.new_active_crtc_count > 0);
 }
 
 static void si_enable_spread_spectrum(struct radeon_device *rdev, bool enable)
@@ -5414,7 +5412,7 @@ static void si_populate_mc_reg_addresses(struct radeon_device *rdev,
 
        for (i = 0, j = 0; j < si_pi->mc_reg_table.last; j++) {
                if (si_pi->mc_reg_table.valid_flag & (1 << j)) {
-                       if (i >= SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE)
+                       if (i >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
                                break;
                        mc_reg_table->address[i].s0 =
                                cpu_to_be16(si_pi->mc_reg_table.mc_reg_address[j].s0);
@@ -5754,6 +5752,11 @@ static void si_set_pcie_lane_width_in_smc(struct radeon_device *rdev,
 
 void si_dpm_setup_asic(struct radeon_device *rdev)
 {
+       int r;
+
+       r = si_mc_load_microcode(rdev);
+       if (r)
+               DRM_ERROR("Failed to load MC firmware!\n");
        rv770_get_memory_type(rdev);
        si_read_clock_registers(rdev);
        si_enable_acpi_power_management(rdev);
@@ -5791,13 +5794,6 @@ int si_dpm_enable(struct radeon_device *rdev)
        struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
        int ret;
 
-       si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
-                           RADEON_CG_BLOCK_MC |
-                           RADEON_CG_BLOCK_SDMA |
-                           RADEON_CG_BLOCK_BIF |
-                           RADEON_CG_BLOCK_UVD |
-                           RADEON_CG_BLOCK_HDP), false);
-
        if (si_is_smc_running(rdev))
                return -EINVAL;
        if (pi->voltage_control)
@@ -5900,6 +5896,17 @@ int si_dpm_enable(struct radeon_device *rdev)
        si_enable_sclk_control(rdev, true);
        si_start_dpm(rdev);
 
+       si_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
+
+       ni_update_current_ps(rdev, boot_ps);
+
+       return 0;
+}
+
+int si_dpm_late_enable(struct radeon_device *rdev)
+{
+       int ret;
+
        if (rdev->irq.installed &&
            r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
                PPSMC_Result result;
@@ -5915,17 +5922,6 @@ int si_dpm_enable(struct radeon_device *rdev)
                        DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
        }
 
-       si_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
-
-       si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
-                           RADEON_CG_BLOCK_MC |
-                           RADEON_CG_BLOCK_SDMA |
-                           RADEON_CG_BLOCK_BIF |
-                           RADEON_CG_BLOCK_UVD |
-                           RADEON_CG_BLOCK_HDP), true);
-
-       ni_update_current_ps(rdev, boot_ps);
-
        return 0;
 }
 
@@ -5934,13 +5930,6 @@ void si_dpm_disable(struct radeon_device *rdev)
        struct rv7xx_power_info *pi = rv770_get_pi(rdev);
        struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
 
-       si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
-                           RADEON_CG_BLOCK_MC |
-                           RADEON_CG_BLOCK_SDMA |
-                           RADEON_CG_BLOCK_BIF |
-                           RADEON_CG_BLOCK_UVD |
-                           RADEON_CG_BLOCK_HDP), false);
-
        if (!si_is_smc_running(rdev))
                return;
        si_disable_ulv(rdev);
@@ -6005,13 +5994,6 @@ int si_dpm_set_power_state(struct radeon_device *rdev)
        struct radeon_ps *old_ps = &eg_pi->current_rps;
        int ret;
 
-       si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
-                           RADEON_CG_BLOCK_MC |
-                           RADEON_CG_BLOCK_SDMA |
-                           RADEON_CG_BLOCK_BIF |
-                           RADEON_CG_BLOCK_UVD |
-                           RADEON_CG_BLOCK_HDP), false);
-
        ret = si_disable_ulv(rdev);
        if (ret) {
                DRM_ERROR("si_disable_ulv failed\n");
@@ -6104,13 +6086,6 @@ int si_dpm_set_power_state(struct radeon_device *rdev)
                return ret;
        }
 
-       si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
-                           RADEON_CG_BLOCK_MC |
-                           RADEON_CG_BLOCK_SDMA |
-                           RADEON_CG_BLOCK_BIF |
-                           RADEON_CG_BLOCK_UVD |
-                           RADEON_CG_BLOCK_HDP), true);
-
        return 0;
 }
 
index d422a1cbf727b375c467bcf78dc417431c7db2fd..e80efcf0c2306e812b4462ab930464bc03c8c5b5 100644 (file)
@@ -28,6 +28,7 @@
 #include "sid.h"
 #include "ppsmc.h"
 #include "radeon_ucode.h"
+#include "sislands_smc.h"
 
 static int si_set_smc_sram_address(struct radeon_device *rdev,
                                   u32 smc_address, u32 limit)
index b322acc48097f632a0fa1ac3204facc3287912f5..9239a6d291280765ef42b2e79e0c02f4719f33f9 100644 (file)
@@ -94,6 +94,8 @@
 #define        CG_SPLL_FUNC_CNTL_2                             0x604
 #define                SCLK_MUX_SEL(x)                         ((x) << 0)
 #define                SCLK_MUX_SEL_MASK                       (0x1ff << 0)
+#define                SPLL_CTLREQ_CHG                         (1 << 23)
+#define                SCLK_MUX_UPDATE                         (1 << 26)
 #define        CG_SPLL_FUNC_CNTL_3                             0x608
 #define                SPLL_FB_DIV(x)                          ((x) << 0)
 #define                SPLL_FB_DIV_MASK                        (0x3ffffff << 0)
 #define                SPLL_DITHEN                             (1 << 28)
 #define        CG_SPLL_FUNC_CNTL_4                             0x60c
 
+#define        SPLL_STATUS                                     0x614
+#define                SPLL_CHG_STATUS                         (1 << 1)
 #define        SPLL_CNTL_MODE                                  0x618
+#define                SPLL_SW_DIR_CONTROL                     (1 << 0)
 #      define SPLL_REFCLK_SEL(x)                       ((x) << 8)
 #      define SPLL_REFCLK_SEL_MASK                     0xFF00
 
 #       define MRDCK0_BYPASS                            (1 << 24)
 #       define MRDCK1_BYPASS                            (1 << 25)
 
+#define        MPLL_CNTL_MODE                                  0x2bb0
+#       define MPLL_MCLK_SEL                            (1 << 11)
 #define        MPLL_FUNC_CNTL                                  0x2bb4
 #define                BWCTRL(x)                               ((x) << 20)
 #define                BWCTRL_MASK                             (0xff << 20)
 #       define GRPH_PFLIP_INT_MASK                      (1 << 0)
 #       define GRPH_PFLIP_INT_TYPE                      (1 << 8)
 
-#define        DACA_AUTODETECT_INT_CONTROL                     0x66c8
+#define        DAC_AUTODETECT_INT_CONTROL                      0x67c8
 
 #define DC_HPD1_INT_STATUS                              0x601c
 #define DC_HPD2_INT_STATUS                              0x6028
index 5578e9837026fec65ac023a71c5ed3400d8d0734..10e945a49479e3e9bb611ec98cb62b409046acf6 100644 (file)
@@ -374,8 +374,6 @@ typedef struct Smc_SIslands_DTE_Configuration Smc_SIslands_DTE_Configuration;
 
 #pragma pack(pop)
 
-int si_set_smc_sram_address(struct radeon_device *rdev,
-                           u32 smc_address, u32 limit);
 int si_copy_bytes_to_smc(struct radeon_device *rdev,
                         u32 smc_start_address,
                         const u8 *src, u32 byte_count, u32 limit);
index 96ea6db8bf575e7a45f6af501e26120f7e996a9a..f121efe12dc5c10a4fd6f1c5283ee7f87e8b0797 100644 (file)
@@ -71,7 +71,7 @@ static const u32 sumo_dtc[SUMO_PM_NUMBER_OF_TC] =
        SUMO_DTC_DFLT_14,
 };
 
-struct sumo_ps *sumo_get_ps(struct radeon_ps *rps)
+static struct sumo_ps *sumo_get_ps(struct radeon_ps *rps)
 {
        struct sumo_ps *ps = rps->ps_priv;
 
@@ -1202,14 +1202,10 @@ static void sumo_update_requested_ps(struct radeon_device *rdev,
 int sumo_dpm_enable(struct radeon_device *rdev)
 {
        struct sumo_power_info *pi = sumo_get_pi(rdev);
-       int ret;
 
        if (sumo_dpm_enabled(rdev))
                return -EINVAL;
 
-       ret = sumo_enable_clock_power_gating(rdev);
-       if (ret)
-               return ret;
        sumo_program_bootup_state(rdev);
        sumo_init_bsp(rdev);
        sumo_reset_am(rdev);
@@ -1233,6 +1229,19 @@ int sumo_dpm_enable(struct radeon_device *rdev)
        if (pi->enable_boost)
                sumo_enable_boost_timer(rdev);
 
+       sumo_update_current_ps(rdev, rdev->pm.dpm.boot_ps);
+
+       return 0;
+}
+
+int sumo_dpm_late_enable(struct radeon_device *rdev)
+{
+       int ret;
+
+       ret = sumo_enable_clock_power_gating(rdev);
+       if (ret)
+               return ret;
+
        if (rdev->irq.installed &&
            r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
                ret = sumo_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
@@ -1242,8 +1251,6 @@ int sumo_dpm_enable(struct radeon_device *rdev)
                radeon_irq_set(rdev);
        }
 
-       sumo_update_current_ps(rdev, rdev->pm.dpm.boot_ps);
-
        return 0;
 }
 
index 18abba5b5810b467f23efb012b9eee24e5e3a155..fb081d2ae37477e37b0ec28068c7d45112e19bc0 100644 (file)
@@ -31,7 +31,6 @@
 #define SUMO_SMU_SERVICE_ROUTINE_ALTVDDNB_NOTIFY  27
 #define SUMO_SMU_SERVICE_ROUTINE_GFX_SRV_ID_20  20
 
-struct sumo_ps *sumo_get_ps(struct radeon_ps *rps);
 struct sumo_power_info *sumo_get_pi(struct radeon_device *rdev);
 
 static void sumo_send_msg_to_smu(struct radeon_device *rdev, u32 id)
index d700698a1f224bea67bd585061289e56735072e4..2d447192d6f7356b1f37af690e1242932b23d7bf 100644 (file)
@@ -342,14 +342,14 @@ static void trinity_apply_state_adjust_rules(struct radeon_device *rdev,
                                             struct radeon_ps *new_rps,
                                             struct radeon_ps *old_rps);
 
-struct trinity_ps *trinity_get_ps(struct radeon_ps *rps)
+static struct trinity_ps *trinity_get_ps(struct radeon_ps *rps)
 {
        struct trinity_ps *ps = rps->ps_priv;
 
        return ps;
 }
 
-struct trinity_power_info *trinity_get_pi(struct radeon_device *rdev)
+static struct trinity_power_info *trinity_get_pi(struct radeon_device *rdev)
 {
        struct trinity_power_info *pi = rdev->pm.dpm.priv;
 
@@ -1082,7 +1082,6 @@ void trinity_dpm_enable_bapm(struct radeon_device *rdev, bool enable)
 int trinity_dpm_enable(struct radeon_device *rdev)
 {
        struct trinity_power_info *pi = trinity_get_pi(rdev);
-       int ret;
 
        trinity_acquire_mutex(rdev);
 
@@ -1091,7 +1090,6 @@ int trinity_dpm_enable(struct radeon_device *rdev)
                return -EINVAL;
        }
 
-       trinity_enable_clock_power_gating(rdev);
        trinity_program_bootup_state(rdev);
        sumo_program_vc(rdev, 0x00C00033);
        trinity_start_am(rdev);
@@ -1105,6 +1103,18 @@ int trinity_dpm_enable(struct radeon_device *rdev)
        trinity_dpm_bapm_enable(rdev, false);
        trinity_release_mutex(rdev);
 
+       trinity_update_current_ps(rdev, rdev->pm.dpm.boot_ps);
+
+       return 0;
+}
+
+int trinity_dpm_late_enable(struct radeon_device *rdev)
+{
+       int ret;
+
+       trinity_acquire_mutex(rdev);
+       trinity_enable_clock_power_gating(rdev);
+
        if (rdev->irq.installed &&
            r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
                ret = trinity_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
@@ -1115,8 +1125,7 @@ int trinity_dpm_enable(struct radeon_device *rdev)
                rdev->irq.dpm_thermal = true;
                radeon_irq_set(rdev);
        }
-
-       trinity_update_current_ps(rdev, rdev->pm.dpm.boot_ps);
+       trinity_release_mutex(rdev);
 
        return 0;
 }
index 9672bcbc7312218a357f07ae54393ed511284279..99dd0455334d1a433c880565ee84dd60e14989c9 100644 (file)
@@ -27,9 +27,6 @@
 #include "trinity_dpm.h"
 #include "ppsmc.h"
 
-struct trinity_ps *trinity_get_ps(struct radeon_ps *rps);
-struct trinity_power_info *trinity_get_pi(struct radeon_device *rdev);
-
 static int trinity_notify_message_to_smu(struct radeon_device *rdev, u32 id)
 {
        int i;
index b19ef4951085b06422aa7b2f8b1f634fba086d87..824550db3fed59e2220953e9e9976b8b28443260 100644 (file)
@@ -153,6 +153,7 @@ int uvd_v2_2_resume(struct radeon_device *rdev)
                chip_id = 0x01000015;
                break;
        case CHIP_PITCAIRN:
+       case CHIP_OLAND:
                chip_id = 0x01000016;
                break;
        case CHIP_ARUBA:
index a9d24e4bf79280c90991c155634c0ca8bd87c1d9..fbf4be316d0b675f89be45db4d820952af079287 100644 (file)
@@ -371,7 +371,6 @@ static int rcar_du_crtc_mode_set(struct drm_crtc *crtc,
                goto error;
 
        rcrtc->plane->format = format;
-       rcrtc->plane->pitch = crtc->fb->pitches[0];
 
        rcrtc->plane->src_x = x;
        rcrtc->plane->src_y = y;
@@ -413,7 +412,7 @@ static int rcar_du_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
        rcrtc->plane->src_x = x;
        rcrtc->plane->src_y = y;
 
-       rcar_du_crtc_update_base(to_rcar_crtc(crtc));
+       rcar_du_crtc_update_base(rcrtc);
 
        return 0;
 }
index 0023f9719cf18fda9e2f3232bae5bf178e15de7d..792fd1d20e865df1125294dcfbf626e6a7a16524 100644 (file)
@@ -224,7 +224,9 @@ static int rcar_du_probe(struct platform_device *pdev)
 
 static int rcar_du_remove(struct platform_device *pdev)
 {
-       drm_platform_exit(&rcar_du_driver, pdev);
+       struct rcar_du_device *rcdu = platform_get_drvdata(pdev);
+
+       drm_put_dev(rcdu->ddev);
 
        return 0;
 }
@@ -249,8 +251,8 @@ static const struct rcar_du_device_info rcar_du_r8a7779_info = {
 };
 
 static const struct rcar_du_device_info rcar_du_r8a7790_info = {
-       .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK | RCAR_DU_FEATURE_ALIGN_128B
-                 | RCAR_DU_FEATURE_DEFR8,
+       .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK | RCAR_DU_FEATURE_DEFR8,
+       .quirks = RCAR_DU_QUIRK_ALIGN_128B | RCAR_DU_QUIRK_LVDS_LANES,
        .num_crtcs = 3,
        .routes = {
                /* R8A7790 has one RGB output, two LVDS outputs and one
@@ -272,9 +274,29 @@ static const struct rcar_du_device_info rcar_du_r8a7790_info = {
        .num_lvds = 2,
 };
 
+static const struct rcar_du_device_info rcar_du_r8a7791_info = {
+       .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK | RCAR_DU_FEATURE_DEFR8,
+       .num_crtcs = 2,
+       .routes = {
+               /* R8A7791 has one RGB output, one LVDS output and one
+                * (currently unsupported) TCON output.
+                */
+               [RCAR_DU_OUTPUT_DPAD0] = {
+                       .possible_crtcs = BIT(1),
+                       .encoder_type = DRM_MODE_ENCODER_NONE,
+               },
+               [RCAR_DU_OUTPUT_LVDS0] = {
+                       .possible_crtcs = BIT(0),
+                       .encoder_type = DRM_MODE_ENCODER_LVDS,
+               },
+       },
+       .num_lvds = 1,
+};
+
 static const struct platform_device_id rcar_du_id_table[] = {
        { "rcar-du-r8a7779", (kernel_ulong_t)&rcar_du_r8a7779_info },
        { "rcar-du-r8a7790", (kernel_ulong_t)&rcar_du_r8a7790_info },
+       { "rcar-du-r8a7791", (kernel_ulong_t)&rcar_du_r8a7791_info },
        { }
 };
 
index 65d2d636b002d9fbf32fb2c6391b8df4bc63ff03..e31b735d3f258b69b65ab97320ddfe7440d7d6b9 100644 (file)
@@ -28,8 +28,10 @@ struct rcar_du_device;
 struct rcar_du_lvdsenc;
 
 #define RCAR_DU_FEATURE_CRTC_IRQ_CLOCK (1 << 0)        /* Per-CRTC IRQ and clock */
-#define RCAR_DU_FEATURE_ALIGN_128B     (1 << 1)        /* Align pitches to 128 bytes */
-#define RCAR_DU_FEATURE_DEFR8          (1 << 2)        /* Has DEFR8 register */
+#define RCAR_DU_FEATURE_DEFR8          (1 << 1)        /* Has DEFR8 register */
+
+#define RCAR_DU_QUIRK_ALIGN_128B       (1 << 0)        /* Align pitches to 128 bytes */
+#define RCAR_DU_QUIRK_LVDS_LANES       (1 << 1)        /* LVDS lanes 1 and 3 inverted */
 
 /*
  * struct rcar_du_output_routing - Output routing specification
@@ -48,12 +50,14 @@ struct rcar_du_output_routing {
 /*
  * struct rcar_du_device_info - DU model-specific information
  * @features: device features (RCAR_DU_FEATURE_*)
+ * @quirks: device quirks (RCAR_DU_QUIRK_*)
  * @num_crtcs: total number of CRTCs
  * @routes: array of CRTC to output routes, indexed by output (RCAR_DU_OUTPUT_*)
  * @num_lvds: number of internal LVDS encoders
  */
 struct rcar_du_device_info {
        unsigned int features;
+       unsigned int quirks;
        unsigned int num_crtcs;
        struct rcar_du_output_routing routes[RCAR_DU_OUTPUT_MAX];
        unsigned int num_lvds;
@@ -84,6 +88,12 @@ static inline bool rcar_du_has(struct rcar_du_device *rcdu,
        return rcdu->info->features & feature;
 }
 
+static inline bool rcar_du_needs(struct rcar_du_device *rcdu,
+                                unsigned int quirk)
+{
+       return rcdu->info->quirks & quirk;
+}
+
 static inline u32 rcar_du_read(struct rcar_du_device *rcdu, u32 reg)
 {
        return ioread32(rcdu->mmio + reg);
index b31ac080c4a77ba6554ae281d0fd90754e8d633b..fbeabd9a281f9a71c2e283df478fb7e3428a7655 100644 (file)
@@ -119,7 +119,7 @@ int rcar_du_dumb_create(struct drm_file *file, struct drm_device *dev,
        /* The R8A7779 DU requires a 16 pixels pitch alignment as documented,
         * but the R8A7790 DU seems to require a 128 bytes pitch alignment.
         */
-       if (rcar_du_has(rcdu, RCAR_DU_FEATURE_ALIGN_128B))
+       if (rcar_du_needs(rcdu, RCAR_DU_QUIRK_ALIGN_128B))
                align = 128;
        else
                align = 16 * args->bpp / 8;
@@ -144,7 +144,7 @@ rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
                return ERR_PTR(-EINVAL);
        }
 
-       if (rcar_du_has(rcdu, RCAR_DU_FEATURE_ALIGN_128B))
+       if (rcar_du_needs(rcdu, RCAR_DU_QUIRK_ALIGN_128B))
                align = 128;
        else
                align = 16 * format->bpp / 8;
index a0f6a17819252b51c0f201861777b8c73fc30ef1..df30a075d793ce5d97c550c80a2f01ce172fbdc1 100644 (file)
@@ -44,6 +44,7 @@ static int rcar_du_lvdsenc_start(struct rcar_du_lvdsenc *lvds,
        const struct drm_display_mode *mode = &rcrtc->crtc.mode;
        unsigned int freq = mode->clock;
        u32 lvdcr0;
+       u32 lvdhcr;
        u32 pllcr;
        int ret;
 
@@ -72,15 +73,19 @@ static int rcar_du_lvdsenc_start(struct rcar_du_lvdsenc *lvds,
         * VSYNC -> CTRL1
         * DISP  -> CTRL2
         * 0     -> CTRL3
-        *
-        * Channels 1 and 3 are switched on ES1.
         */
        rcar_lvds_write(lvds, LVDCTRCR, LVDCTRCR_CTR3SEL_ZERO |
                        LVDCTRCR_CTR2SEL_DISP | LVDCTRCR_CTR1SEL_VSYNC |
                        LVDCTRCR_CTR0SEL_HSYNC);
-       rcar_lvds_write(lvds, LVDCHCR,
-                       LVDCHCR_CHSEL_CH(0, 0) | LVDCHCR_CHSEL_CH(1, 3) |
-                       LVDCHCR_CHSEL_CH(2, 2) | LVDCHCR_CHSEL_CH(3, 1));
+
+       if (rcar_du_needs(lvds->dev, RCAR_DU_QUIRK_LVDS_LANES))
+               lvdhcr = LVDCHCR_CHSEL_CH(0, 0) | LVDCHCR_CHSEL_CH(1, 3)
+                      | LVDCHCR_CHSEL_CH(2, 2) | LVDCHCR_CHSEL_CH(3, 1);
+       else
+               lvdhcr = LVDCHCR_CHSEL_CH(0, 0) | LVDCHCR_CHSEL_CH(1, 1)
+                      | LVDCHCR_CHSEL_CH(2, 2) | LVDCHCR_CHSEL_CH(3, 3);
+
+       rcar_lvds_write(lvds, LVDCHCR, lvdhcr);
 
        /* Select the input, hardcode mode 0, enable LVDS operation and turn
         * bias circuitry on.
@@ -144,18 +149,9 @@ static int rcar_du_lvdsenc_get_resources(struct rcar_du_lvdsenc *lvds,
        sprintf(name, "lvds.%u", lvds->index);
 
        mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
-       if (mem == NULL) {
-               dev_err(&pdev->dev, "failed to get memory resource for %s\n",
-                       name);
-               return -EINVAL;
-       }
-
        lvds->mmio = devm_ioremap_resource(&pdev->dev, mem);
-       if (lvds->mmio == NULL) {
-               dev_err(&pdev->dev, "failed to remap memory resource for %s\n",
-                       name);
-               return -ENOMEM;
-       }
+       if (IS_ERR(lvds->mmio))
+               return PTR_ERR(lvds->mmio);
 
        lvds->clock = devm_clk_get(&pdev->dev, name);
        if (IS_ERR(lvds->clock)) {
index 53000644733f29c25a5db1edb5d24e5090bbc666..3fb69d9ae61bd15fdea9e1801a5e0c69a19d13df 100644 (file)
@@ -104,6 +104,15 @@ void rcar_du_plane_update_base(struct rcar_du_plane *plane)
 {
        struct rcar_du_group *rgrp = plane->group;
        unsigned int index = plane->hwindex;
+       u32 mwr;
+
+       /* Memory pitch (expressed in pixels) */
+       if (plane->format->planes == 2)
+               mwr = plane->pitch;
+       else
+               mwr = plane->pitch * 8 / plane->format->bpp;
+
+       rcar_du_plane_write(rgrp, index, PnMWR, mwr);
 
        /* The Y position is expressed in raster line units and must be doubled
         * for 32bpp formats, according to the R8A7790 datasheet. No mention of
@@ -133,6 +142,8 @@ void rcar_du_plane_compute_base(struct rcar_du_plane *plane,
 {
        struct drm_gem_cma_object *gem;
 
+       plane->pitch = fb->pitches[0];
+
        gem = drm_fb_cma_get_gem_obj(fb, 0);
        plane->dma[0] = gem->paddr + fb->offsets[0];
 
@@ -209,7 +220,6 @@ static void __rcar_du_plane_setup(struct rcar_du_plane *plane,
        struct rcar_du_group *rgrp = plane->group;
        u32 ddcr2 = PnDDCR2_CODE;
        u32 ddcr4;
-       u32 mwr;
 
        /* Data format
         *
@@ -240,14 +250,6 @@ static void __rcar_du_plane_setup(struct rcar_du_plane *plane,
        rcar_du_plane_write(rgrp, index, PnDDCR2, ddcr2);
        rcar_du_plane_write(rgrp, index, PnDDCR4, ddcr4);
 
-       /* Memory pitch (expressed in pixels) */
-       if (plane->format->planes == 2)
-               mwr = plane->pitch;
-       else
-               mwr = plane->pitch * 8 / plane->format->bpp;
-
-       rcar_du_plane_write(rgrp, index, PnMWR, mwr);
-
        /* Destination position and size */
        rcar_du_plane_write(rgrp, index, PnDSXR, plane->width);
        rcar_du_plane_write(rgrp, index, PnDSYR, plane->height);
@@ -309,7 +311,6 @@ rcar_du_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
 
        rplane->crtc = crtc;
        rplane->format = format;
-       rplane->pitch = fb->pitches[0];
 
        rplane->src_x = src_x >> 16;
        rplane->src_y = src_y >> 16;
index b17d0710871abc77487973d837dfe212d17eb5df..d2b2df9e26f3692b51d7495b5197b66ad9d6387b 100644 (file)
@@ -49,7 +49,7 @@ savage_bci_wait_fifo_shadow(drm_savage_private_t * dev_priv, unsigned int n)
 #endif
 
        for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) {
-               DRM_MEMORYBARRIER();
+               mb();
                status = dev_priv->status_ptr[0];
                if ((status & mask) < threshold)
                        return 0;
@@ -123,7 +123,7 @@ savage_bci_wait_event_shadow(drm_savage_private_t * dev_priv, uint16_t e)
        int i;
 
        for (i = 0; i < SAVAGE_EVENT_USEC_TIMEOUT; i++) {
-               DRM_MEMORYBARRIER();
+               mb();
                status = dev_priv->status_ptr[1];
                if ((((status & 0xffff) - e) & 0xffff) <= 0x7fff ||
                    (status & 0xffff) == 0)
@@ -449,7 +449,7 @@ static void savage_dma_flush(drm_savage_private_t * dev_priv)
                }
        }
 
-       DRM_MEMORYBARRIER();
+       mb();
 
        /* do flush ... */
        phys_addr = dev_priv->cmd_dma->offset +
@@ -990,10 +990,10 @@ static int savage_bci_get_buffers(struct drm_device *dev,
 
                buf->file_priv = file_priv;
 
-               if (DRM_COPY_TO_USER(&d->request_indices[i],
+               if (copy_to_user(&d->request_indices[i],
                                     &buf->idx, sizeof(buf->idx)))
                        return -EFAULT;
-               if (DRM_COPY_TO_USER(&d->request_sizes[i],
+               if (copy_to_user(&d->request_sizes[i],
                                     &buf->total, sizeof(buf->total)))
                        return -EFAULT;
 
index b35e75ed890c0384c48130108858d5e2f95297c3..c01ad0aeaa5806dd034910eff873d48a149fdbc5 100644 (file)
@@ -992,7 +992,7 @@ int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_
                if (kcmd_addr == NULL)
                        return -ENOMEM;
 
-               if (DRM_COPY_FROM_USER(kcmd_addr, cmdbuf->cmd_addr,
+               if (copy_from_user(kcmd_addr, cmdbuf->cmd_addr,
                                       cmdbuf->size * 8))
                {
                        kfree(kcmd_addr);
@@ -1007,7 +1007,7 @@ int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_
                        goto done;
                }
 
-               if (DRM_COPY_FROM_USER(kvb_addr, cmdbuf->vb_addr,
+               if (copy_from_user(kvb_addr, cmdbuf->vb_addr,
                                       cmdbuf->vb_size)) {
                        ret = -EFAULT;
                        goto done;
@@ -1022,7 +1022,7 @@ int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_
                        goto done;
                }
 
-               if (DRM_COPY_FROM_USER(kbox_addr, cmdbuf->box_addr,
+               if (copy_from_user(kbox_addr, cmdbuf->box_addr,
                                       cmdbuf->nbox * sizeof(struct drm_clip_rect))) {
                        ret = -EFAULT;
                        goto done;
@@ -1032,7 +1032,7 @@ int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_
 
        /* Make sure writes to DMA buffers are finished before sending
         * DMA commands to the graphics hardware. */
-       DRM_MEMORYBARRIER();
+       mb();
 
        /* Coming from user space. Don't know if the Xserver has
         * emitted wait commands. Assuming the worst. */
index 562f9a401cf65996bb5b0fa99982451271537736..0428076f1ce8723d025e64cdea77df33777288b6 100644 (file)
  * Clock management
  */
 
-static void shmob_drm_clk_on(struct shmob_drm_device *sdev)
+static int shmob_drm_clk_on(struct shmob_drm_device *sdev)
 {
-       if (sdev->clock)
-               clk_prepare_enable(sdev->clock);
+       int ret;
+
+       if (sdev->clock) {
+               ret = clk_prepare_enable(sdev->clock);
+               if (ret < 0)
+                       return ret;
+       }
 #if 0
        if (sdev->meram_dev && sdev->meram_dev->pdev)
                pm_runtime_get_sync(&sdev->meram_dev->pdev->dev);
 #endif
+
+       return 0;
 }
 
 static void shmob_drm_clk_off(struct shmob_drm_device *sdev)
@@ -161,6 +168,7 @@ static void shmob_drm_crtc_start(struct shmob_drm_crtc *scrtc)
        struct drm_device *dev = sdev->ddev;
        struct drm_plane *plane;
        u32 value;
+       int ret;
 
        if (scrtc->started)
                return;
@@ -170,7 +178,9 @@ static void shmob_drm_crtc_start(struct shmob_drm_crtc *scrtc)
                return;
 
        /* Enable clocks before accessing the hardware. */
-       shmob_drm_clk_on(sdev);
+       ret = shmob_drm_clk_on(sdev);
+       if (ret < 0)
+               return;
 
        /* Reset and enable the LCDC. */
        lcdc_write(sdev, LDCNT2R, lcdc_read(sdev, LDCNT2R) | LDCNT2R_BR);
index 015551866b4a0d3c6f9912c361993f99c3b548b1..c839c9c89efbf6f4eb4a37cac2fba654435bd1f9 100644 (file)
@@ -336,7 +336,9 @@ static int shmob_drm_probe(struct platform_device *pdev)
 
 static int shmob_drm_remove(struct platform_device *pdev)
 {
-       drm_platform_exit(&shmob_drm_driver, pdev);
+       struct shmob_drm_device *sdev = platform_get_drvdata(pdev);
+
+       drm_put_dev(sdev->ddev);
 
        return 0;
 }
index 4383b74a3aa46f480ac3c118e3a113812fdff9de..756f787b71439ac42af2b6a723916207165b2382 100644 (file)
@@ -94,7 +94,7 @@ static int sis_driver_open(struct drm_device *dev, struct drm_file *file)
        return 0;
 }
 
-void sis_driver_postclose(struct drm_device *dev, struct drm_file *file)
+static void sis_driver_postclose(struct drm_device *dev, struct drm_file *file)
 {
        struct sis_file_private *file_priv = file->driver_priv;
 
index 01857d836350db00261d96870af2571e2b3e356f..0573be0d293304269f6cd11eb8677002ddf217a6 100644 (file)
@@ -266,7 +266,7 @@ int sis_idle(struct drm_device *dev)
         * because its polling frequency is too low.
         */
 
-       end = jiffies + (DRM_HZ * 3);
+       end = jiffies + (HZ * 3);
 
        for (i = 0; i < 4; ++i) {
                do {
index 8db9b3bce001fd5dd15e4de157a09d1f9b242ff4..354ddb29231f26e67e6ac9ec3a638358b47923c4 100644 (file)
@@ -1,14 +1,12 @@
 config DRM_TEGRA
-       bool "NVIDIA Tegra DRM"
-       depends on ARCH_TEGRA || ARCH_MULTIPLATFORM
+       tristate "NVIDIA Tegra DRM"
+       depends on ARCH_TEGRA || (ARM && COMPILE_TEST)
        depends on DRM
        depends on RESET_CONTROLLER
-       select TEGRA_HOST1X
        select DRM_KMS_HELPER
-       select DRM_KMS_FB_HELPER
-       select FB_SYS_FILLRECT
-       select FB_SYS_COPYAREA
-       select FB_SYS_IMAGEBLIT
+       select DRM_MIPI_DSI
+       select DRM_PANEL
+       select TEGRA_HOST1X
        help
          Choose this option if you have an NVIDIA Tegra SoC.
 
@@ -17,6 +15,18 @@ config DRM_TEGRA
 
 if DRM_TEGRA
 
+config DRM_TEGRA_FBDEV
+       bool "Enable legacy fbdev support"
+       select DRM_KMS_FB_HELPER
+       select FB_SYS_FILLRECT
+       select FB_SYS_COPYAREA
+       select FB_SYS_IMAGEBLIT
+       default y
+       help
+         Choose this option if you have a need for the legacy fbdev support.
+         Note that this support also provides the Linux console on top of
+         the Tegra modesetting driver.
+
 config DRM_TEGRA_DEBUG
        bool "NVIDIA Tegra DRM debug support"
        help
index edc76abd58bb3ba6c67ca6782ecb3999224db6c1..8d220afbd85f94c6599a2ec0015c38ee7c87434b 100644 (file)
@@ -9,6 +9,8 @@ tegra-drm-y := \
        output.o \
        rgb.o \
        hdmi.o \
+       mipi-phy.o \
+       dsi.o \
        gr2d.o \
        gr3d.o
 
index 565f8f7b9a4781d0f9d0b33e1b53b2ecbe2b7eda..e38e5967d77bdc4a4c709cd44744887318cb87bf 100644 (file)
@@ -46,7 +46,6 @@ int drm_host1x_init(struct drm_driver *driver, struct host1x_device *device)
        struct drm_device *drm;
        int ret;
 
-       INIT_LIST_HEAD(&driver->device_list);
        driver->bus = &drm_host1x_bus;
 
        drm = drm_dev_alloc(driver, &device->dev);
index cd7f1e499616891347485bcf6d3da86d6a05ab56..9336006b475d70b7494f1694d8661c07c2cd882a 100644 (file)
 #include "drm.h"
 #include "gem.h"
 
+struct tegra_dc_soc_info {
+       bool supports_interlacing;
+};
+
 struct tegra_plane {
        struct drm_plane base;
        unsigned int index;
@@ -658,19 +662,12 @@ static int tegra_crtc_mode_set(struct drm_crtc *crtc,
        /* program display mode */
        tegra_dc_set_timings(dc, mode);
 
-       value = DE_SELECT_ACTIVE | DE_CONTROL_NORMAL;
-       tegra_dc_writel(dc, value, DC_DISP_DATA_ENABLE_OPTIONS);
-
-       value = tegra_dc_readl(dc, DC_COM_PIN_OUTPUT_POLARITY(1));
-       value &= ~LVS_OUTPUT_POLARITY_LOW;
-       value &= ~LHS_OUTPUT_POLARITY_LOW;
-       tegra_dc_writel(dc, value, DC_COM_PIN_OUTPUT_POLARITY(1));
-
-       value = DISP_DATA_FORMAT_DF1P1C | DISP_ALIGNMENT_MSB |
-               DISP_ORDER_RED_BLUE;
-       tegra_dc_writel(dc, value, DC_DISP_DISP_INTERFACE_CONTROL);
-
-       tegra_dc_writel(dc, 0x00010001, DC_DISP_SHIFT_CLOCK_OPTIONS);
+       /* interlacing isn't supported yet, so disable it */
+       if (dc->soc->supports_interlacing) {
+               value = tegra_dc_readl(dc, DC_DISP_INTERLACE_CONTROL);
+               value &= ~INTERLACE_ENABLE;
+               tegra_dc_writel(dc, value, DC_DISP_INTERLACE_CONTROL);
+       }
 
        value = SHIFT_CLK_DIVIDER(div) | PIXEL_CLK_DIVIDER_PCD1;
        tegra_dc_writel(dc, value, DC_DISP_DISP_CLOCK_CONTROL);
@@ -735,10 +732,6 @@ static void tegra_crtc_prepare(struct drm_crtc *crtc)
                PW4_ENABLE | PM0_ENABLE | PM1_ENABLE;
        tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
 
-       value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND);
-       value |= DISP_CTRL_MODE_C_DISPLAY;
-       tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND);
-
        /* initialize timer */
        value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(0x20) |
                WINDOW_B_THRESHOLD(0x20) | WINDOW_C_THRESHOLD(0x20);
@@ -1107,8 +1100,6 @@ static int tegra_dc_init(struct host1x_client *client)
        struct tegra_dc *dc = host1x_client_to_dc(client);
        int err;
 
-       dc->pipe = tegra->drm->mode_config.num_crtc;
-
        drm_crtc_init(tegra->drm, &dc->base, &tegra_crtc_funcs);
        drm_mode_crtc_set_gamma_size(&dc->base, 256);
        drm_crtc_helper_add(&dc->base, &tegra_crtc_helper_funcs);
@@ -1167,8 +1158,71 @@ static const struct host1x_client_ops dc_client_ops = {
        .exit = tegra_dc_exit,
 };
 
+static const struct tegra_dc_soc_info tegra20_dc_soc_info = {
+       .supports_interlacing = false,
+};
+
+static const struct tegra_dc_soc_info tegra30_dc_soc_info = {
+       .supports_interlacing = false,
+};
+
+static const struct tegra_dc_soc_info tegra124_dc_soc_info = {
+       .supports_interlacing = true,
+};
+
+static const struct of_device_id tegra_dc_of_match[] = {
+       {
+               .compatible = "nvidia,tegra124-dc",
+               .data = &tegra124_dc_soc_info,
+       }, {
+               .compatible = "nvidia,tegra30-dc",
+               .data = &tegra30_dc_soc_info,
+       }, {
+               .compatible = "nvidia,tegra20-dc",
+               .data = &tegra20_dc_soc_info,
+       }, {
+               /* sentinel */
+       }
+};
+
+static int tegra_dc_parse_dt(struct tegra_dc *dc)
+{
+       struct device_node *np;
+       u32 value = 0;
+       int err;
+
+       err = of_property_read_u32(dc->dev->of_node, "nvidia,head", &value);
+       if (err < 0) {
+               dev_err(dc->dev, "missing \"nvidia,head\" property\n");
+
+               /*
+                * If the nvidia,head property isn't present, try to find the
+                * correct head number by looking up the position of this
+                * display controller's node within the device tree. Assuming
+                * that the nodes are ordered properly in the DTS file and
+                * that the translation into a flattened device tree blob
+                * preserves that ordering this will actually yield the right
+                * head number.
+                *
+                * If those assumptions don't hold, this will still work for
+                * cases where only a single display controller is used.
+                */
+               for_each_matching_node(np, tegra_dc_of_match) {
+                       if (np == dc->dev->of_node)
+                               break;
+
+                       value++;
+               }
+       }
+
+       dc->pipe = value;
+
+       return 0;
+}
+
 static int tegra_dc_probe(struct platform_device *pdev)
 {
+       const struct of_device_id *id;
        struct resource *regs;
        struct tegra_dc *dc;
        int err;
@@ -1177,9 +1231,18 @@ static int tegra_dc_probe(struct platform_device *pdev)
        if (!dc)
                return -ENOMEM;
 
+       id = of_match_node(tegra_dc_of_match, pdev->dev.of_node);
+       if (!id)
+               return -ENODEV;
+
        spin_lock_init(&dc->lock);
        INIT_LIST_HEAD(&dc->list);
        dc->dev = &pdev->dev;
+       dc->soc = id->data;
+
+       err = tegra_dc_parse_dt(dc);
+       if (err < 0)
+               return err;
 
        dc->clk = devm_clk_get(&pdev->dev, NULL);
        if (IS_ERR(dc->clk)) {
@@ -1253,12 +1316,6 @@ static int tegra_dc_remove(struct platform_device *pdev)
        return 0;
 }
 
-static struct of_device_id tegra_dc_of_match[] = {
-       { .compatible = "nvidia,tegra30-dc", },
-       { .compatible = "nvidia,tegra20-dc", },
-       { },
-};
-
 struct platform_driver tegra_dc_driver = {
        .driver = {
                .name = "tegra-dc",
index 91bbda291470c29ce3ba80a70b0e5d541b8c6556..3c2c0ea1cd87a84e1f3414a780788d868f676703 100644 (file)
@@ -28,6 +28,7 @@
 #define DISP_CTRL_MODE_STOP (0 << 5)
 #define DISP_CTRL_MODE_C_DISPLAY (1 << 5)
 #define DISP_CTRL_MODE_NC_DISPLAY (2 << 5)
+#define DISP_CTRL_MODE_MASK (3 << 5)
 #define DC_CMD_SIGNAL_RAISE                    0x033
 #define DC_CMD_DISPLAY_POWER_CONTROL           0x036
 #define PW0_ENABLE (1 <<  0)
 
 #define DC_DISP_DISP_WIN_OPTIONS               0x402
 #define HDMI_ENABLE (1 << 30)
+#define DSI_ENABLE  (1 << 29)
 
 #define DC_DISP_DISP_MEM_HIGH_PRIORITY         0x403
 #define CURSOR_THRESHOLD(x)   (((x) & 0x03) << 24)
 #define DITHER_CONTROL_ERRDIFF (3 << 8)
 
 #define DC_DISP_SHIFT_CLOCK_OPTIONS            0x431
+#define  SC1_H_QUALIFIER_NONE  (1 << 16)
+#define  SC0_H_QUALIFIER_NONE  (1 <<  0)
 
 #define DC_DISP_DATA_ENABLE_OPTIONS            0x432
 #define DE_SELECT_ACTIVE_BLANK  (0 << 0)
 #define DC_DISP_SD_HW_K_VALUES                 0x4dd
 #define DC_DISP_SD_MAN_K_VALUES                        0x4de
 
+#define DC_DISP_INTERLACE_CONTROL              0x4e5
+#define  INTERLACE_STATUS (1 << 2)
+#define  INTERLACE_START  (1 << 1)
+#define  INTERLACE_ENABLE (1 << 0)
+
 #define DC_WIN_CSC_YOF                         0x611
 #define DC_WIN_CSC_KYRGB                       0x612
 #define DC_WIN_CSC_KUR                         0x613
index 07eba596d458d22b63bc9f0a731d6ca3befae036..88a529008ce0e59210e917422b2cfe07570a0d7d 100644 (file)
@@ -104,9 +104,11 @@ static void tegra_drm_context_free(struct tegra_drm_context *context)
 
 static void tegra_drm_lastclose(struct drm_device *drm)
 {
+#ifdef CONFIG_TEGRA_DRM_FBDEV
        struct tegra_drm *tegra = drm->dev_private;
 
        tegra_fbdev_restore_mode(tegra->fbdev);
+#endif
 }
 
 static struct host1x_bo *
@@ -578,7 +580,7 @@ static void tegra_debugfs_cleanup(struct drm_minor *minor)
 #endif
 
 static struct drm_driver tegra_drm_driver = {
-       .driver_features = DRIVER_MODESET | DRIVER_GEM,
+       .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
        .load = tegra_drm_load,
        .unload = tegra_drm_unload,
        .open = tegra_drm_open,
@@ -596,6 +598,12 @@ static struct drm_driver tegra_drm_driver = {
 
        .gem_free_object = tegra_bo_free_object,
        .gem_vm_ops = &tegra_bo_vm_ops,
+
+       .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+       .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+       .gem_prime_export = tegra_gem_prime_export,
+       .gem_prime_import = tegra_gem_prime_import,
+
        .dumb_create = tegra_bo_dumb_create,
        .dumb_map_offset = tegra_bo_dumb_map_offset,
        .dumb_destroy = drm_gem_dumb_destroy,
@@ -653,8 +661,10 @@ static const struct of_device_id host1x_drm_subdevs[] = {
        { .compatible = "nvidia,tegra30-hdmi", },
        { .compatible = "nvidia,tegra30-gr2d", },
        { .compatible = "nvidia,tegra30-gr3d", },
+       { .compatible = "nvidia,tegra114-dsi", },
        { .compatible = "nvidia,tegra114-hdmi", },
        { .compatible = "nvidia,tegra114-gr3d", },
+       { .compatible = "nvidia,tegra124-dc", },
        { /* sentinel */ }
 };
 
@@ -677,10 +687,14 @@ static int __init host1x_drm_init(void)
        if (err < 0)
                goto unregister_host1x;
 
-       err = platform_driver_register(&tegra_hdmi_driver);
+       err = platform_driver_register(&tegra_dsi_driver);
        if (err < 0)
                goto unregister_dc;
 
+       err = platform_driver_register(&tegra_hdmi_driver);
+       if (err < 0)
+               goto unregister_dsi;
+
        err = platform_driver_register(&tegra_gr2d_driver);
        if (err < 0)
                goto unregister_hdmi;
@@ -695,6 +709,8 @@ unregister_gr2d:
        platform_driver_unregister(&tegra_gr2d_driver);
 unregister_hdmi:
        platform_driver_unregister(&tegra_hdmi_driver);
+unregister_dsi:
+       platform_driver_unregister(&tegra_dsi_driver);
 unregister_dc:
        platform_driver_unregister(&tegra_dc_driver);
 unregister_host1x:
@@ -708,6 +724,7 @@ static void __exit host1x_drm_exit(void)
        platform_driver_unregister(&tegra_gr3d_driver);
        platform_driver_unregister(&tegra_gr2d_driver);
        platform_driver_unregister(&tegra_hdmi_driver);
+       platform_driver_unregister(&tegra_dsi_driver);
        platform_driver_unregister(&tegra_dc_driver);
        host1x_driver_unregister(&host1x_drm_driver);
 }
index 266aae08a3bd394fff2d9e41e902d2c9e899bba9..bf1cac7658f8d20333ce0cefbac8383e62f98c37 100644 (file)
@@ -27,10 +27,12 @@ struct tegra_fb {
        unsigned int num_planes;
 };
 
+#ifdef CONFIG_DRM_TEGRA_FBDEV
 struct tegra_fbdev {
        struct drm_fb_helper base;
        struct tegra_fb *fb;
 };
+#endif
 
 struct tegra_drm {
        struct drm_device *drm;
@@ -38,7 +40,9 @@ struct tegra_drm {
        struct mutex clients_lock;
        struct list_head clients;
 
+#ifdef CONFIG_DRM_TEGRA_FBDEV
        struct tegra_fbdev *fbdev;
+#endif
 };
 
 struct tegra_drm_client;
@@ -84,6 +88,7 @@ extern int tegra_drm_unregister_client(struct tegra_drm *tegra,
 extern int tegra_drm_init(struct tegra_drm *tegra, struct drm_device *drm);
 extern int tegra_drm_exit(struct tegra_drm *tegra);
 
+struct tegra_dc_soc_info;
 struct tegra_output;
 
 struct tegra_dc {
@@ -109,6 +114,8 @@ struct tegra_dc {
 
        /* page-flip handling */
        struct drm_pending_vblank_event *event;
+
+       const struct tegra_dc_soc_info *soc;
 };
 
 static inline struct tegra_dc *
@@ -177,6 +184,7 @@ struct tegra_output_ops {
 enum tegra_output_type {
        TEGRA_OUTPUT_RGB,
        TEGRA_OUTPUT_HDMI,
+       TEGRA_OUTPUT_DSI,
 };
 
 struct tegra_output {
@@ -186,6 +194,7 @@ struct tegra_output {
        const struct tegra_output_ops *ops;
        enum tegra_output_type type;
 
+       struct drm_panel *panel;
        struct i2c_adapter *ddc;
        const struct edid *edid;
        unsigned int hpd_irq;
@@ -263,9 +272,12 @@ bool tegra_fb_is_bottom_up(struct drm_framebuffer *framebuffer);
 bool tegra_fb_is_tiled(struct drm_framebuffer *framebuffer);
 extern int tegra_drm_fb_init(struct drm_device *drm);
 extern void tegra_drm_fb_exit(struct drm_device *drm);
+#ifdef CONFIG_DRM_TEGRA_FBDEV
 extern void tegra_fbdev_restore_mode(struct tegra_fbdev *fbdev);
+#endif
 
 extern struct platform_driver tegra_dc_driver;
+extern struct platform_driver tegra_dsi_driver;
 extern struct platform_driver tegra_hdmi_driver;
 extern struct platform_driver tegra_gr2d_driver;
 extern struct platform_driver tegra_gr3d_driver;
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
new file mode 100644 (file)
index 0000000..d452faa
--- /dev/null
@@ -0,0 +1,971 @@
+/*
+ * Copyright (C) 2013 NVIDIA Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/host1x.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_panel.h>
+
+#include <video/mipi_display.h>
+
+#include "dc.h"
+#include "drm.h"
+#include "dsi.h"
+#include "mipi-phy.h"
+
+#define DSI_VIDEO_FIFO_DEPTH (1920 / 4)
+#define DSI_HOST_FIFO_DEPTH 64
+
+struct tegra_dsi {
+       struct host1x_client client;
+       struct tegra_output output;
+       struct device *dev;
+
+       void __iomem *regs;
+
+       struct reset_control *rst;
+       struct clk *clk_parent;
+       struct clk *clk_lp;
+       struct clk *clk;
+
+       struct drm_info_list *debugfs_files;
+       struct drm_minor *minor;
+       struct dentry *debugfs;
+
+       enum mipi_dsi_pixel_format format;
+       unsigned int lanes;
+
+       struct tegra_mipi_device *mipi;
+       struct mipi_dsi_host host;
+};
+
+static inline struct tegra_dsi *
+host1x_client_to_dsi(struct host1x_client *client)
+{
+       return container_of(client, struct tegra_dsi, client);
+}
+
+static inline struct tegra_dsi *host_to_tegra(struct mipi_dsi_host *host)
+{
+       return container_of(host, struct tegra_dsi, host);
+}
+
+static inline struct tegra_dsi *to_dsi(struct tegra_output *output)
+{
+       return container_of(output, struct tegra_dsi, output);
+}
+
+static inline unsigned long tegra_dsi_readl(struct tegra_dsi *dsi,
+                                           unsigned long reg)
+{
+       return readl(dsi->regs + (reg << 2));
+}
+
+static inline void tegra_dsi_writel(struct tegra_dsi *dsi, unsigned long value,
+                                   unsigned long reg)
+{
+       writel(value, dsi->regs + (reg << 2));
+}
+
+static int tegra_dsi_show_regs(struct seq_file *s, void *data)
+{
+       struct drm_info_node *node = s->private;
+       struct tegra_dsi *dsi = node->info_ent->data;
+
+#define DUMP_REG(name)                                         \
+       seq_printf(s, "%-32s %#05x %08lx\n", #name, name,       \
+                  tegra_dsi_readl(dsi, name))
+
+       DUMP_REG(DSI_INCR_SYNCPT);
+       DUMP_REG(DSI_INCR_SYNCPT_CONTROL);
+       DUMP_REG(DSI_INCR_SYNCPT_ERROR);
+       DUMP_REG(DSI_CTXSW);
+       DUMP_REG(DSI_RD_DATA);
+       DUMP_REG(DSI_WR_DATA);
+       DUMP_REG(DSI_POWER_CONTROL);
+       DUMP_REG(DSI_INT_ENABLE);
+       DUMP_REG(DSI_INT_STATUS);
+       DUMP_REG(DSI_INT_MASK);
+       DUMP_REG(DSI_HOST_CONTROL);
+       DUMP_REG(DSI_CONTROL);
+       DUMP_REG(DSI_SOL_DELAY);
+       DUMP_REG(DSI_MAX_THRESHOLD);
+       DUMP_REG(DSI_TRIGGER);
+       DUMP_REG(DSI_TX_CRC);
+       DUMP_REG(DSI_STATUS);
+
+       DUMP_REG(DSI_INIT_SEQ_CONTROL);
+       DUMP_REG(DSI_INIT_SEQ_DATA_0);
+       DUMP_REG(DSI_INIT_SEQ_DATA_1);
+       DUMP_REG(DSI_INIT_SEQ_DATA_2);
+       DUMP_REG(DSI_INIT_SEQ_DATA_3);
+       DUMP_REG(DSI_INIT_SEQ_DATA_4);
+       DUMP_REG(DSI_INIT_SEQ_DATA_5);
+       DUMP_REG(DSI_INIT_SEQ_DATA_6);
+       DUMP_REG(DSI_INIT_SEQ_DATA_7);
+
+       DUMP_REG(DSI_PKT_SEQ_0_LO);
+       DUMP_REG(DSI_PKT_SEQ_0_HI);
+       DUMP_REG(DSI_PKT_SEQ_1_LO);
+       DUMP_REG(DSI_PKT_SEQ_1_HI);
+       DUMP_REG(DSI_PKT_SEQ_2_LO);
+       DUMP_REG(DSI_PKT_SEQ_2_HI);
+       DUMP_REG(DSI_PKT_SEQ_3_LO);
+       DUMP_REG(DSI_PKT_SEQ_3_HI);
+       DUMP_REG(DSI_PKT_SEQ_4_LO);
+       DUMP_REG(DSI_PKT_SEQ_4_HI);
+       DUMP_REG(DSI_PKT_SEQ_5_LO);
+       DUMP_REG(DSI_PKT_SEQ_5_HI);
+
+       DUMP_REG(DSI_DCS_CMDS);
+
+       DUMP_REG(DSI_PKT_LEN_0_1);
+       DUMP_REG(DSI_PKT_LEN_2_3);
+       DUMP_REG(DSI_PKT_LEN_4_5);
+       DUMP_REG(DSI_PKT_LEN_6_7);
+
+       DUMP_REG(DSI_PHY_TIMING_0);
+       DUMP_REG(DSI_PHY_TIMING_1);
+       DUMP_REG(DSI_PHY_TIMING_2);
+       DUMP_REG(DSI_BTA_TIMING);
+
+       DUMP_REG(DSI_TIMEOUT_0);
+       DUMP_REG(DSI_TIMEOUT_1);
+       DUMP_REG(DSI_TO_TALLY);
+
+       DUMP_REG(DSI_PAD_CONTROL_0);
+       DUMP_REG(DSI_PAD_CONTROL_CD);
+       DUMP_REG(DSI_PAD_CD_STATUS);
+       DUMP_REG(DSI_VIDEO_MODE_CONTROL);
+       DUMP_REG(DSI_PAD_CONTROL_1);
+       DUMP_REG(DSI_PAD_CONTROL_2);
+       DUMP_REG(DSI_PAD_CONTROL_3);
+       DUMP_REG(DSI_PAD_CONTROL_4);
+
+       DUMP_REG(DSI_GANGED_MODE_CONTROL);
+       DUMP_REG(DSI_GANGED_MODE_START);
+       DUMP_REG(DSI_GANGED_MODE_SIZE);
+
+       DUMP_REG(DSI_RAW_DATA_BYTE_COUNT);
+       DUMP_REG(DSI_ULTRA_LOW_POWER_CONTROL);
+
+       DUMP_REG(DSI_INIT_SEQ_DATA_8);
+       DUMP_REG(DSI_INIT_SEQ_DATA_9);
+       DUMP_REG(DSI_INIT_SEQ_DATA_10);
+       DUMP_REG(DSI_INIT_SEQ_DATA_11);
+       DUMP_REG(DSI_INIT_SEQ_DATA_12);
+       DUMP_REG(DSI_INIT_SEQ_DATA_13);
+       DUMP_REG(DSI_INIT_SEQ_DATA_14);
+       DUMP_REG(DSI_INIT_SEQ_DATA_15);
+
+#undef DUMP_REG
+
+       return 0;
+}
+
+static struct drm_info_list debugfs_files[] = {
+       { "regs", tegra_dsi_show_regs, 0, NULL },
+};
+
+static int tegra_dsi_debugfs_init(struct tegra_dsi *dsi,
+                                 struct drm_minor *minor)
+{
+       const char *name = dev_name(dsi->dev);
+       unsigned int i;
+       int err;
+
+       dsi->debugfs = debugfs_create_dir(name, minor->debugfs_root);
+       if (!dsi->debugfs)
+               return -ENOMEM;
+
+       dsi->debugfs_files = kmemdup(debugfs_files, sizeof(debugfs_files),
+                                    GFP_KERNEL);
+       if (!dsi->debugfs_files) {
+               err = -ENOMEM;
+               goto remove;
+       }
+
+       for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
+               dsi->debugfs_files[i].data = dsi;
+
+       err = drm_debugfs_create_files(dsi->debugfs_files,
+                                      ARRAY_SIZE(debugfs_files),
+                                      dsi->debugfs, minor);
+       if (err < 0)
+               goto free;
+
+       dsi->minor = minor;
+
+       return 0;
+
+free:
+       kfree(dsi->debugfs_files);
+       dsi->debugfs_files = NULL;
+remove:
+       debugfs_remove(dsi->debugfs);
+       dsi->debugfs = NULL;
+
+       return err;
+}
+
+static int tegra_dsi_debugfs_exit(struct tegra_dsi *dsi)
+{
+       drm_debugfs_remove_files(dsi->debugfs_files, ARRAY_SIZE(debugfs_files),
+                                dsi->minor);
+       dsi->minor = NULL;
+
+       kfree(dsi->debugfs_files);
+       dsi->debugfs_files = NULL;
+
+       debugfs_remove(dsi->debugfs);
+       dsi->debugfs = NULL;
+
+       return 0;
+}
+
+#define PKT_ID0(id)    ((((id) & 0x3f) <<  3) | (1 <<  9))
+#define PKT_LEN0(len)  (((len) & 0x07) <<  0)
+#define PKT_ID1(id)    ((((id) & 0x3f) << 13) | (1 << 19))
+#define PKT_LEN1(len)  (((len) & 0x07) << 10)
+#define PKT_ID2(id)    ((((id) & 0x3f) << 23) | (1 << 29))
+#define PKT_LEN2(len)  (((len) & 0x07) << 20)
+
+#define PKT_LP         (1 << 30)
+#define NUM_PKT_SEQ    12
+
+/* non-burst mode with sync-end */
+static const u32 pkt_seq_vnb_syne[NUM_PKT_SEQ] = {
+       [ 0] = PKT_ID0(MIPI_DSI_V_SYNC_START) | PKT_LEN0(0) |
+              PKT_ID1(MIPI_DSI_BLANKING_PACKET) | PKT_LEN1(1) |
+              PKT_ID2(MIPI_DSI_H_SYNC_END) | PKT_LEN2(0) |
+              PKT_LP,
+       [ 1] = 0,
+       [ 2] = PKT_ID0(MIPI_DSI_V_SYNC_END) | PKT_LEN0(0) |
+              PKT_ID1(MIPI_DSI_BLANKING_PACKET) | PKT_LEN1(1) |
+              PKT_ID2(MIPI_DSI_H_SYNC_END) | PKT_LEN2(0) |
+              PKT_LP,
+       [ 3] = 0,
+       [ 4] = PKT_ID0(MIPI_DSI_H_SYNC_START) | PKT_LEN0(0) |
+              PKT_ID1(MIPI_DSI_BLANKING_PACKET) | PKT_LEN1(1) |
+              PKT_ID2(MIPI_DSI_H_SYNC_END) | PKT_LEN2(0) |
+              PKT_LP,
+       [ 5] = 0,
+       [ 6] = PKT_ID0(MIPI_DSI_H_SYNC_START) | PKT_LEN0(0) |
+              PKT_ID1(MIPI_DSI_BLANKING_PACKET) | PKT_LEN1(1) |
+              PKT_ID2(MIPI_DSI_H_SYNC_END) | PKT_LEN2(0),
+       [ 7] = PKT_ID0(MIPI_DSI_BLANKING_PACKET) | PKT_LEN0(2) |
+              PKT_ID1(MIPI_DSI_PACKED_PIXEL_STREAM_24) | PKT_LEN1(3) |
+              PKT_ID2(MIPI_DSI_BLANKING_PACKET) | PKT_LEN2(4),
+       [ 8] = PKT_ID0(MIPI_DSI_H_SYNC_START) | PKT_LEN0(0) |
+              PKT_ID1(MIPI_DSI_BLANKING_PACKET) | PKT_LEN1(1) |
+              PKT_ID2(MIPI_DSI_H_SYNC_END) | PKT_LEN2(0) |
+              PKT_LP,
+       [ 9] = 0,
+       [10] = PKT_ID0(MIPI_DSI_H_SYNC_START) | PKT_LEN0(0) |
+              PKT_ID1(MIPI_DSI_BLANKING_PACKET) | PKT_LEN1(1) |
+              PKT_ID2(MIPI_DSI_H_SYNC_END) | PKT_LEN2(0),
+       [11] = PKT_ID0(MIPI_DSI_BLANKING_PACKET) | PKT_LEN0(2) |
+              PKT_ID1(MIPI_DSI_PACKED_PIXEL_STREAM_24) | PKT_LEN1(3) |
+              PKT_ID2(MIPI_DSI_BLANKING_PACKET) | PKT_LEN2(4),
+};
+
+static int tegra_dsi_set_phy_timing(struct tegra_dsi *dsi)
+{
+       struct mipi_dphy_timing timing;
+       unsigned long value, period;
+       long rate;
+       int err;
+
+       rate = clk_get_rate(dsi->clk);
+       if (rate < 0)
+               return rate;
+
+       period = DIV_ROUND_CLOSEST(1000000000UL, rate * 2);
+
+       err = mipi_dphy_timing_get_default(&timing, period);
+       if (err < 0)
+               return err;
+
+       err = mipi_dphy_timing_validate(&timing, period);
+       if (err < 0) {
+               dev_err(dsi->dev, "failed to validate D-PHY timing: %d\n", err);
+               return err;
+       }
+
+       /*
+        * The D-PHY timing fields below are expressed in byte-clock cycles,
+        * so multiply the period by 8.
+        */
+       period *= 8;
+
+       value = DSI_TIMING_FIELD(timing.hsexit, period, 1) << 24 |
+               DSI_TIMING_FIELD(timing.hstrail, period, 0) << 16 |
+               DSI_TIMING_FIELD(timing.hszero, period, 3) << 8 |
+               DSI_TIMING_FIELD(timing.hsprepare, period, 1);
+       tegra_dsi_writel(dsi, value, DSI_PHY_TIMING_0);
+
+       value = DSI_TIMING_FIELD(timing.clktrail, period, 1) << 24 |
+               DSI_TIMING_FIELD(timing.clkpost, period, 1) << 16 |
+               DSI_TIMING_FIELD(timing.clkzero, period, 1) << 8 |
+               DSI_TIMING_FIELD(timing.lpx, period, 1);
+       tegra_dsi_writel(dsi, value, DSI_PHY_TIMING_1);
+
+       value = DSI_TIMING_FIELD(timing.clkprepare, period, 1) << 16 |
+               DSI_TIMING_FIELD(timing.clkpre, period, 1) << 8 |
+               DSI_TIMING_FIELD(0xff * period, period, 0) << 0;
+       tegra_dsi_writel(dsi, value, DSI_PHY_TIMING_2);
+
+       value = DSI_TIMING_FIELD(timing.taget, period, 1) << 16 |
+               DSI_TIMING_FIELD(timing.tasure, period, 1) << 8 |
+               DSI_TIMING_FIELD(timing.tago, period, 1);
+       tegra_dsi_writel(dsi, value, DSI_BTA_TIMING);
+
+       return 0;
+}
+
+static int tegra_dsi_get_muldiv(enum mipi_dsi_pixel_format format,
+                               unsigned int *mulp, unsigned int *divp)
+{
+       switch (format) {
+       case MIPI_DSI_FMT_RGB666_PACKED:
+       case MIPI_DSI_FMT_RGB888:
+               *mulp = 3;
+               *divp = 1;
+               break;
+
+       case MIPI_DSI_FMT_RGB565:
+               *mulp = 2;
+               *divp = 1;
+               break;
+
+       case MIPI_DSI_FMT_RGB666:
+               *mulp = 9;
+               *divp = 4;
+               break;
+
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int tegra_output_dsi_enable(struct tegra_output *output)
+{
+       struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
+       struct drm_display_mode *mode = &dc->base.mode;
+       unsigned int hact, hsw, hbp, hfp, i, mul, div;
+       struct tegra_dsi *dsi = to_dsi(output);
+       /* FIXME: don't hardcode this */
+       const u32 *pkt_seq = pkt_seq_vnb_syne;
+       unsigned long value;
+       int err;
+
+       err = tegra_dsi_get_muldiv(dsi->format, &mul, &div);
+       if (err < 0)
+               return err;
+
+       err = clk_enable(dsi->clk);
+       if (err < 0)
+               return err;
+
+       reset_control_deassert(dsi->rst);
+
+       value = DSI_CONTROL_CHANNEL(0) | DSI_CONTROL_FORMAT(dsi->format) |
+               DSI_CONTROL_LANES(dsi->lanes - 1) |
+               DSI_CONTROL_SOURCE(dc->pipe);
+       tegra_dsi_writel(dsi, value, DSI_CONTROL);
+
+       tegra_dsi_writel(dsi, DSI_VIDEO_FIFO_DEPTH, DSI_MAX_THRESHOLD);
+
+       value = DSI_HOST_CONTROL_HS | DSI_HOST_CONTROL_CS |
+               DSI_HOST_CONTROL_ECC;
+       tegra_dsi_writel(dsi, value, DSI_HOST_CONTROL);
+
+       value = tegra_dsi_readl(dsi, DSI_CONTROL);
+       value |= DSI_CONTROL_HS_CLK_CTRL;
+       value &= ~DSI_CONTROL_TX_TRIG(3);
+       value &= ~DSI_CONTROL_DCS_ENABLE;
+       value |= DSI_CONTROL_VIDEO_ENABLE;
+       value &= ~DSI_CONTROL_HOST_ENABLE;
+       tegra_dsi_writel(dsi, value, DSI_CONTROL);
+
+       err = tegra_dsi_set_phy_timing(dsi);
+       if (err < 0)
+               return err;
+
+       for (i = 0; i < NUM_PKT_SEQ; i++)
+               tegra_dsi_writel(dsi, pkt_seq[i], DSI_PKT_SEQ_0_LO + i);
+
+       /* horizontal active pixels */
+       hact = mode->hdisplay * mul / div;
+
+       /* horizontal sync width */
+       hsw = (mode->hsync_end - mode->hsync_start) * mul / div;
+       hsw -= 10;
+
+       /* horizontal back porch */
+       hbp = (mode->htotal - mode->hsync_end) * mul / div;
+       hbp -= 14;
+
+       /* horizontal front porch */
+       hfp = (mode->hsync_start  - mode->hdisplay) * mul / div;
+       hfp -= 8;
+
+       tegra_dsi_writel(dsi, hsw << 16 | 0, DSI_PKT_LEN_0_1);
+       tegra_dsi_writel(dsi, hact << 16 | hbp, DSI_PKT_LEN_2_3);
+       tegra_dsi_writel(dsi, hfp, DSI_PKT_LEN_4_5);
+       tegra_dsi_writel(dsi, 0x0f0f << 16, DSI_PKT_LEN_6_7);
+
+       /* set SOL delay */
+       tegra_dsi_writel(dsi, 8 * mul / div, DSI_SOL_DELAY);
+
+       /* enable display controller */
+       value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
+       value |= DSI_ENABLE;
+       tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
+
+       value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND);
+       value &= ~DISP_CTRL_MODE_MASK;
+       value |= DISP_CTRL_MODE_C_DISPLAY;
+       tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND);
+
+       value = tegra_dc_readl(dc, DC_CMD_DISPLAY_POWER_CONTROL);
+       value |= PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
+                PW4_ENABLE | PM0_ENABLE | PM1_ENABLE;
+       tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
+
+       tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
+       tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
+
+       /* enable DSI controller */
+       value = tegra_dsi_readl(dsi, DSI_POWER_CONTROL);
+       value |= DSI_POWER_CONTROL_ENABLE;
+       tegra_dsi_writel(dsi, value, DSI_POWER_CONTROL);
+
+       return 0;
+}
+
+static int tegra_output_dsi_disable(struct tegra_output *output)
+{
+       struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
+       struct tegra_dsi *dsi = to_dsi(output);
+       unsigned long value;
+
+       /* disable DSI controller */
+       value = tegra_dsi_readl(dsi, DSI_POWER_CONTROL);
+       value &= DSI_POWER_CONTROL_ENABLE;
+       tegra_dsi_writel(dsi, value, DSI_POWER_CONTROL);
+
+       /*
+        * The following accesses registers of the display controller, so make
+        * sure it's only executed when the output is attached to one.
+        */
+       if (dc) {
+               value = tegra_dc_readl(dc, DC_CMD_DISPLAY_POWER_CONTROL);
+               value &= ~(PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
+                          PW4_ENABLE | PM0_ENABLE | PM1_ENABLE);
+               tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
+
+               value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND);
+               value &= ~DISP_CTRL_MODE_MASK;
+               tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND);
+
+               value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
+               value &= ~DSI_ENABLE;
+               tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
+
+               tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
+               tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
+       }
+
+       clk_disable(dsi->clk);
+
+       return 0;
+}
+
+static int tegra_output_dsi_setup_clock(struct tegra_output *output,
+                                       struct clk *clk, unsigned long pclk)
+{
+       struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
+       struct drm_display_mode *mode = &dc->base.mode;
+       unsigned int timeout, mul, div, vrefresh;
+       struct tegra_dsi *dsi = to_dsi(output);
+       unsigned long bclk, plld, value;
+       struct clk *base;
+       int err;
+
+       err = tegra_dsi_get_muldiv(dsi->format, &mul, &div);
+       if (err < 0)
+               return err;
+
+       vrefresh = drm_mode_vrefresh(mode);
+
+       pclk = mode->htotal * mode->vtotal * vrefresh;
+       bclk = (pclk * mul) / (div * dsi->lanes);
+       plld = DIV_ROUND_UP(bclk * 8, 1000000);
+       pclk = (plld * 1000000) / 2;
+
+       err = clk_set_parent(clk, dsi->clk_parent);
+       if (err < 0) {
+               dev_err(dsi->dev, "failed to set parent clock: %d\n", err);
+               return err;
+       }
+
+       base = clk_get_parent(dsi->clk_parent);
+
+       /*
+        * This assumes that the parent clock is pll_d_out0 or pll_d2_out
+        * respectively, each of which divides the base pll_d by 2.
+        */
+       err = clk_set_rate(base, pclk * 2);
+       if (err < 0) {
+               dev_err(dsi->dev, "failed to set base clock rate to %lu Hz\n",
+                       pclk * 2);
+               return err;
+       }
+
+       /*
+        * XXX: Move the below somewhere else so that we don't need to have
+        * access to the vrefresh in this function?
+        */
+
+       /* one frame high-speed transmission timeout */
+       timeout = (bclk / vrefresh) / 512;
+       value = DSI_TIMEOUT_LRX(0x2000) | DSI_TIMEOUT_HTX(timeout);
+       tegra_dsi_writel(dsi, value, DSI_TIMEOUT_0);
+
+       /* 2 ms peripheral timeout for panel */
+       timeout = 2 * bclk / 512 * 1000;
+       value = DSI_TIMEOUT_PR(timeout) | DSI_TIMEOUT_TA(0x2000);
+       tegra_dsi_writel(dsi, value, DSI_TIMEOUT_1);
+
+       value = DSI_TALLY_TA(0) | DSI_TALLY_LRX(0) | DSI_TALLY_HTX(0);
+       tegra_dsi_writel(dsi, value, DSI_TO_TALLY);
+
+       return 0;
+}
+
+static int tegra_output_dsi_check_mode(struct tegra_output *output,
+                                      struct drm_display_mode *mode,
+                                      enum drm_mode_status *status)
+{
+       /*
+        * FIXME: For now, always assume that the mode is okay.
+        */
+
+       *status = MODE_OK;
+
+       return 0;
+}
+
+static const struct tegra_output_ops dsi_ops = {
+       .enable = tegra_output_dsi_enable,
+       .disable = tegra_output_dsi_disable,
+       .setup_clock = tegra_output_dsi_setup_clock,
+       .check_mode = tegra_output_dsi_check_mode,
+};
+
+static int tegra_dsi_pad_enable(struct tegra_dsi *dsi)
+{
+       unsigned long value;
+
+       value = DSI_PAD_CONTROL_VS1_PULLDN(0) | DSI_PAD_CONTROL_VS1_PDIO(0);
+       tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_0);
+
+       return 0;
+}
+
+static int tegra_dsi_pad_calibrate(struct tegra_dsi *dsi)
+{
+       unsigned long value;
+
+       tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_0);
+       tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_1);
+       tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_2);
+       tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_3);
+       tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_4);
+
+       /* start calibration */
+       tegra_dsi_pad_enable(dsi);
+
+       value = DSI_PAD_SLEW_UP(0x7) | DSI_PAD_SLEW_DN(0x7) |
+               DSI_PAD_LP_UP(0x1) | DSI_PAD_LP_DN(0x1) |
+               DSI_PAD_OUT_CLK(0x0);
+       tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_2);
+
+       return tegra_mipi_calibrate(dsi->mipi);
+}
+
+static int tegra_dsi_init(struct host1x_client *client)
+{
+       struct tegra_drm *tegra = dev_get_drvdata(client->parent);
+       struct tegra_dsi *dsi = host1x_client_to_dsi(client);
+       unsigned long value, i;
+       int err;
+
+       dsi->output.type = TEGRA_OUTPUT_DSI;
+       dsi->output.dev = client->dev;
+       dsi->output.ops = &dsi_ops;
+
+       err = tegra_output_init(tegra->drm, &dsi->output);
+       if (err < 0) {
+               dev_err(client->dev, "output setup failed: %d\n", err);
+               return err;
+       }
+
+       if (IS_ENABLED(CONFIG_DEBUG_FS)) {
+               err = tegra_dsi_debugfs_init(dsi, tegra->drm->primary);
+               if (err < 0)
+                       dev_err(dsi->dev, "debugfs setup failed: %d\n", err);
+       }
+
+       /*
+        * enable high-speed mode, checksum generation, ECC generation and
+        * disable raw mode
+        */
+       value = tegra_dsi_readl(dsi, DSI_HOST_CONTROL);
+       value |= DSI_HOST_CONTROL_ECC | DSI_HOST_CONTROL_CS |
+                DSI_HOST_CONTROL_HS;
+       value &= ~DSI_HOST_CONTROL_RAW;
+       tegra_dsi_writel(dsi, value, DSI_HOST_CONTROL);
+
+       tegra_dsi_writel(dsi, 0, DSI_SOL_DELAY);
+       tegra_dsi_writel(dsi, 0, DSI_MAX_THRESHOLD);
+
+       tegra_dsi_writel(dsi, 0, DSI_INIT_SEQ_CONTROL);
+
+       for (i = 0; i < 8; i++) {
+               tegra_dsi_writel(dsi, 0, DSI_INIT_SEQ_DATA_0 + i);
+               tegra_dsi_writel(dsi, 0, DSI_INIT_SEQ_DATA_8 + i);
+       }
+
+       for (i = 0; i < 12; i++)
+               tegra_dsi_writel(dsi, 0, DSI_PKT_SEQ_0_LO + i);
+
+       tegra_dsi_writel(dsi, 0, DSI_DCS_CMDS);
+
+       err = tegra_dsi_pad_calibrate(dsi);
+       if (err < 0) {
+               dev_err(dsi->dev, "MIPI calibration failed: %d\n", err);
+               return err;
+       }
+
+       tegra_dsi_writel(dsi, DSI_POWER_CONTROL_ENABLE, DSI_POWER_CONTROL);
+       usleep_range(300, 1000);
+
+       return 0;
+}
+
+static int tegra_dsi_exit(struct host1x_client *client)
+{
+       struct tegra_dsi *dsi = host1x_client_to_dsi(client);
+       int err;
+
+       if (IS_ENABLED(CONFIG_DEBUG_FS)) {
+               err = tegra_dsi_debugfs_exit(dsi);
+               if (err < 0)
+                       dev_err(dsi->dev, "debugfs cleanup failed: %d\n", err);
+       }
+
+       err = tegra_output_disable(&dsi->output);
+       if (err < 0) {
+               dev_err(client->dev, "output failed to disable: %d\n", err);
+               return err;
+       }
+
+       err = tegra_output_exit(&dsi->output);
+       if (err < 0) {
+               dev_err(client->dev, "output cleanup failed: %d\n", err);
+               return err;
+       }
+
+       return 0;
+}
+
+static const struct host1x_client_ops dsi_client_ops = {
+       .init = tegra_dsi_init,
+       .exit = tegra_dsi_exit,
+};
+
+static int tegra_dsi_setup_clocks(struct tegra_dsi *dsi)
+{
+       struct clk *parent;
+       int err;
+
+       parent = clk_get_parent(dsi->clk);
+       if (!parent)
+               return -EINVAL;
+
+       err = clk_set_parent(parent, dsi->clk_parent);
+       if (err < 0)
+               return err;
+
+       return 0;
+}
+
+static void tegra_dsi_initialize(struct tegra_dsi *dsi)
+{
+       unsigned int i;
+
+       tegra_dsi_writel(dsi, 0, DSI_POWER_CONTROL);
+
+       tegra_dsi_writel(dsi, 0, DSI_INT_ENABLE);
+       tegra_dsi_writel(dsi, 0, DSI_INT_STATUS);
+       tegra_dsi_writel(dsi, 0, DSI_INT_MASK);
+
+       tegra_dsi_writel(dsi, 0, DSI_HOST_CONTROL);
+       tegra_dsi_writel(dsi, 0, DSI_CONTROL);
+
+       tegra_dsi_writel(dsi, 0, DSI_SOL_DELAY);
+       tegra_dsi_writel(dsi, 0, DSI_MAX_THRESHOLD);
+
+       tegra_dsi_writel(dsi, 0, DSI_INIT_SEQ_CONTROL);
+
+       for (i = 0; i < 8; i++) {
+               tegra_dsi_writel(dsi, 0, DSI_INIT_SEQ_DATA_0 + i);
+               tegra_dsi_writel(dsi, 0, DSI_INIT_SEQ_DATA_8 + i);
+       }
+
+       for (i = 0; i < 12; i++)
+               tegra_dsi_writel(dsi, 0, DSI_PKT_SEQ_0_LO + i);
+
+       tegra_dsi_writel(dsi, 0, DSI_DCS_CMDS);
+
+       for (i = 0; i < 4; i++)
+               tegra_dsi_writel(dsi, 0, DSI_PKT_LEN_0_1 + i);
+
+       tegra_dsi_writel(dsi, 0x00000000, DSI_PHY_TIMING_0);
+       tegra_dsi_writel(dsi, 0x00000000, DSI_PHY_TIMING_1);
+       tegra_dsi_writel(dsi, 0x000000ff, DSI_PHY_TIMING_2);
+       tegra_dsi_writel(dsi, 0x00000000, DSI_BTA_TIMING);
+
+       tegra_dsi_writel(dsi, 0, DSI_TIMEOUT_0);
+       tegra_dsi_writel(dsi, 0, DSI_TIMEOUT_1);
+       tegra_dsi_writel(dsi, 0, DSI_TO_TALLY);
+
+       tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_0);
+       tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_CD);
+       tegra_dsi_writel(dsi, 0, DSI_PAD_CD_STATUS);
+       tegra_dsi_writel(dsi, 0, DSI_VIDEO_MODE_CONTROL);
+       tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_1);
+       tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_2);
+       tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_3);
+       tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_4);
+
+       tegra_dsi_writel(dsi, 0, DSI_GANGED_MODE_CONTROL);
+       tegra_dsi_writel(dsi, 0, DSI_GANGED_MODE_START);
+       tegra_dsi_writel(dsi, 0, DSI_GANGED_MODE_SIZE);
+}
+
+static int tegra_dsi_host_attach(struct mipi_dsi_host *host,
+                                struct mipi_dsi_device *device)
+{
+       struct tegra_dsi *dsi = host_to_tegra(host);
+       struct tegra_output *output = &dsi->output;
+
+       dsi->format = device->format;
+       dsi->lanes = device->lanes;
+
+       output->panel = of_drm_find_panel(device->dev.of_node);
+       if (output->panel) {
+               if (output->connector.dev)
+                       drm_helper_hpd_irq_event(output->connector.dev);
+       }
+
+       return 0;
+}
+
+static int tegra_dsi_host_detach(struct mipi_dsi_host *host,
+                                struct mipi_dsi_device *device)
+{
+       struct tegra_dsi *dsi = host_to_tegra(host);
+       struct tegra_output *output = &dsi->output;
+
+       if (output->panel && &device->dev == output->panel->dev) {
+               if (output->connector.dev)
+                       drm_helper_hpd_irq_event(output->connector.dev);
+
+               output->panel = NULL;
+       }
+
+       return 0;
+}
+
+static const struct mipi_dsi_host_ops tegra_dsi_host_ops = {
+       .attach = tegra_dsi_host_attach,
+       .detach = tegra_dsi_host_detach,
+};
+
+static int tegra_dsi_probe(struct platform_device *pdev)
+{
+       struct tegra_dsi *dsi;
+       struct resource *regs;
+       int err;
+
+       dsi = devm_kzalloc(&pdev->dev, sizeof(*dsi), GFP_KERNEL);
+       if (!dsi)
+               return -ENOMEM;
+
+       dsi->output.dev = dsi->dev = &pdev->dev;
+
+       err = tegra_output_probe(&dsi->output);
+       if (err < 0)
+               return err;
+
+       /*
+        * Assume these values by default. When a DSI peripheral driver
+        * attaches to the DSI host, the parameters will be taken from
+        * the attached device.
+        */
+       dsi->format = MIPI_DSI_FMT_RGB888;
+       dsi->lanes = 4;
+
+       dsi->rst = devm_reset_control_get(&pdev->dev, "dsi");
+       if (IS_ERR(dsi->rst))
+               return PTR_ERR(dsi->rst);
+
+       dsi->clk = devm_clk_get(&pdev->dev, NULL);
+       if (IS_ERR(dsi->clk)) {
+               dev_err(&pdev->dev, "cannot get DSI clock\n");
+               return PTR_ERR(dsi->clk);
+       }
+
+       err = clk_prepare_enable(dsi->clk);
+       if (err < 0) {
+               dev_err(&pdev->dev, "cannot enable DSI clock\n");
+               return err;
+       }
+
+       dsi->clk_lp = devm_clk_get(&pdev->dev, "lp");
+       if (IS_ERR(dsi->clk_lp)) {
+               dev_err(&pdev->dev, "cannot get low-power clock\n");
+               return PTR_ERR(dsi->clk_lp);
+       }
+
+       err = clk_prepare_enable(dsi->clk_lp);
+       if (err < 0) {
+               dev_err(&pdev->dev, "cannot enable low-power clock\n");
+               return err;
+       }
+
+       dsi->clk_parent = devm_clk_get(&pdev->dev, "parent");
+       if (IS_ERR(dsi->clk_parent)) {
+               dev_err(&pdev->dev, "cannot get parent clock\n");
+               return PTR_ERR(dsi->clk_parent);
+       }
+
+       err = clk_prepare_enable(dsi->clk_parent);
+       if (err < 0) {
+               dev_err(&pdev->dev, "cannot enable parent clock\n");
+               return err;
+       }
+
+       err = tegra_dsi_setup_clocks(dsi);
+       if (err < 0) {
+               dev_err(&pdev->dev, "cannot setup clocks\n");
+               return err;
+       }
+
+       regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       dsi->regs = devm_ioremap_resource(&pdev->dev, regs);
+       if (IS_ERR(dsi->regs))
+               return PTR_ERR(dsi->regs);
+
+       tegra_dsi_initialize(dsi);
+
+       dsi->mipi = tegra_mipi_request(&pdev->dev);
+       if (IS_ERR(dsi->mipi))
+               return PTR_ERR(dsi->mipi);
+
+       dsi->host.ops = &tegra_dsi_host_ops;
+       dsi->host.dev = &pdev->dev;
+
+       err = mipi_dsi_host_register(&dsi->host);
+       if (err < 0) {
+               dev_err(&pdev->dev, "failed to register DSI host: %d\n", err);
+               return err;
+       }
+
+       INIT_LIST_HEAD(&dsi->client.list);
+       dsi->client.ops = &dsi_client_ops;
+       dsi->client.dev = &pdev->dev;
+
+       err = host1x_client_register(&dsi->client);
+       if (err < 0) {
+               dev_err(&pdev->dev, "failed to register host1x client: %d\n",
+                       err);
+               return err;
+       }
+
+       platform_set_drvdata(pdev, dsi);
+
+       return 0;
+}
+
+static int tegra_dsi_remove(struct platform_device *pdev)
+{
+       struct tegra_dsi *dsi = platform_get_drvdata(pdev);
+       int err;
+
+       err = host1x_client_unregister(&dsi->client);
+       if (err < 0) {
+               dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
+                       err);
+               return err;
+       }
+
+       mipi_dsi_host_unregister(&dsi->host);
+       tegra_mipi_free(dsi->mipi);
+
+       clk_disable_unprepare(dsi->clk_parent);
+       clk_disable_unprepare(dsi->clk_lp);
+       clk_disable_unprepare(dsi->clk);
+
+       err = tegra_output_remove(&dsi->output);
+       if (err < 0) {
+               dev_err(&pdev->dev, "failed to remove output: %d\n", err);
+               return err;
+       }
+
+       return 0;
+}
+
+static const struct of_device_id tegra_dsi_of_match[] = {
+       { .compatible = "nvidia,tegra114-dsi", },
+       { },
+};
+
+struct platform_driver tegra_dsi_driver = {
+       .driver = {
+               .name = "tegra-dsi",
+               .of_match_table = tegra_dsi_of_match,
+       },
+       .probe = tegra_dsi_probe,
+       .remove = tegra_dsi_remove,
+};
diff --git a/drivers/gpu/drm/tegra/dsi.h b/drivers/gpu/drm/tegra/dsi.h
new file mode 100644 (file)
index 0000000..00e79c1
--- /dev/null
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2013 NVIDIA Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#ifndef DRM_TEGRA_DSI_H
+#define DRM_TEGRA_DSI_H
+
+#define DSI_INCR_SYNCPT                        0x00
+#define DSI_INCR_SYNCPT_CONTROL                0x01
+#define DSI_INCR_SYNCPT_ERROR          0x02
+#define DSI_CTXSW                      0x08
+#define DSI_RD_DATA                    0x09
+#define DSI_WR_DATA                    0x0a
+#define DSI_POWER_CONTROL              0x0b
+#define DSI_POWER_CONTROL_ENABLE       (1 << 0)
+#define DSI_INT_ENABLE                 0x0c
+#define DSI_INT_STATUS                 0x0d
+#define DSI_INT_MASK                   0x0e
+#define DSI_HOST_CONTROL               0x0f
+#define DSI_HOST_CONTROL_RAW           (1 << 6)
+#define DSI_HOST_CONTROL_HS            (1 << 5)
+#define DSI_HOST_CONTROL_BTA           (1 << 2)
+#define DSI_HOST_CONTROL_CS            (1 << 1)
+#define DSI_HOST_CONTROL_ECC           (1 << 0)
+#define DSI_CONTROL                    0x10
+#define DSI_CONTROL_HS_CLK_CTRL                (1 << 20)
+#define DSI_CONTROL_CHANNEL(c)         (((c) & 0x3) << 16)
+#define DSI_CONTROL_FORMAT(f)          (((f) & 0x3) << 12)
+#define DSI_CONTROL_TX_TRIG(x)         (((x) & 0x3) <<  8)
+#define DSI_CONTROL_LANES(n)           (((n) & 0x3) <<  4)
+#define DSI_CONTROL_DCS_ENABLE         (1 << 3)
+#define DSI_CONTROL_SOURCE(s)          (((s) & 0x1) <<  2)
+#define DSI_CONTROL_VIDEO_ENABLE       (1 << 1)
+#define DSI_CONTROL_HOST_ENABLE                (1 << 0)
+#define DSI_SOL_DELAY                  0x11
+#define DSI_MAX_THRESHOLD              0x12
+#define DSI_TRIGGER                    0x13
+#define DSI_TX_CRC                     0x14
+#define DSI_STATUS                     0x15
+#define DSI_STATUS_IDLE                        (1 << 10)
+#define DSI_INIT_SEQ_CONTROL           0x1a
+#define DSI_INIT_SEQ_DATA_0            0x1b
+#define DSI_INIT_SEQ_DATA_1            0x1c
+#define DSI_INIT_SEQ_DATA_2            0x1d
+#define DSI_INIT_SEQ_DATA_3            0x1e
+#define DSI_INIT_SEQ_DATA_4            0x1f
+#define DSI_INIT_SEQ_DATA_5            0x20
+#define DSI_INIT_SEQ_DATA_6            0x21
+#define DSI_INIT_SEQ_DATA_7            0x22
+#define DSI_PKT_SEQ_0_LO               0x23
+#define DSI_PKT_SEQ_0_HI               0x24
+#define DSI_PKT_SEQ_1_LO               0x25
+#define DSI_PKT_SEQ_1_HI               0x26
+#define DSI_PKT_SEQ_2_LO               0x27
+#define DSI_PKT_SEQ_2_HI               0x28
+#define DSI_PKT_SEQ_3_LO               0x29
+#define DSI_PKT_SEQ_3_HI               0x2a
+#define DSI_PKT_SEQ_4_LO               0x2b
+#define DSI_PKT_SEQ_4_HI               0x2c
+#define DSI_PKT_SEQ_5_LO               0x2d
+#define DSI_PKT_SEQ_5_HI               0x2e
+#define DSI_DCS_CMDS                   0x33
+#define DSI_PKT_LEN_0_1                        0x34
+#define DSI_PKT_LEN_2_3                        0x35
+#define DSI_PKT_LEN_4_5                        0x36
+#define DSI_PKT_LEN_6_7                        0x37
+#define DSI_PHY_TIMING_0               0x3c
+#define DSI_PHY_TIMING_1               0x3d
+#define DSI_PHY_TIMING_2               0x3e
+#define DSI_BTA_TIMING                 0x3f
+
+#define DSI_TIMING_FIELD(value, period, hwinc) \
+       ((DIV_ROUND_CLOSEST(value, period) - (hwinc)) & 0xff)
+
+#define DSI_TIMEOUT_0                  0x44
+#define DSI_TIMEOUT_LRX(x)             (((x) & 0xffff) << 16)
+#define DSI_TIMEOUT_HTX(x)             (((x) & 0xffff) <<  0)
+#define DSI_TIMEOUT_1                  0x45
+#define DSI_TIMEOUT_PR(x)              (((x) & 0xffff) << 16)
+#define DSI_TIMEOUT_TA(x)              (((x) & 0xffff) <<  0)
+#define DSI_TO_TALLY                   0x46
+#define DSI_TALLY_TA(x)                        (((x) & 0xff) << 16)
+#define DSI_TALLY_LRX(x)               (((x) & 0xff) <<  8)
+#define DSI_TALLY_HTX(x)               (((x) & 0xff) <<  0)
+#define DSI_PAD_CONTROL_0              0x4b
+#define DSI_PAD_CONTROL_VS1_PDIO(x)    (((x) & 0xf) <<  0)
+#define DSI_PAD_CONTROL_VS1_PDIO_CLK   (1 <<  8)
+#define DSI_PAD_CONTROL_VS1_PULLDN(x)  (((x) & 0xf) << 16)
+#define DSI_PAD_CONTROL_VS1_PULLDN_CLK (1 << 24)
+#define DSI_PAD_CONTROL_CD             0x4c
+#define DSI_PAD_CD_STATUS              0x4d
+#define DSI_VIDEO_MODE_CONTROL         0x4e
+#define DSI_PAD_CONTROL_1              0x4f
+#define DSI_PAD_CONTROL_2              0x50
+#define DSI_PAD_OUT_CLK(x)             (((x) & 0x7) <<  0)
+#define DSI_PAD_LP_DN(x)               (((x) & 0x7) <<  4)
+#define DSI_PAD_LP_UP(x)               (((x) & 0x7) <<  8)
+#define DSI_PAD_SLEW_DN(x)             (((x) & 0x7) << 12)
+#define DSI_PAD_SLEW_UP(x)             (((x) & 0x7) << 16)
+#define DSI_PAD_CONTROL_3              0x51
+#define DSI_PAD_CONTROL_4              0x52
+#define DSI_GANGED_MODE_CONTROL                0x53
+#define DSI_GANGED_MODE_START          0x54
+#define DSI_GANGED_MODE_SIZE           0x55
+#define DSI_RAW_DATA_BYTE_COUNT                0x56
+#define DSI_ULTRA_LOW_POWER_CONTROL    0x57
+#define DSI_INIT_SEQ_DATA_8            0x58
+#define DSI_INIT_SEQ_DATA_9            0x59
+#define DSI_INIT_SEQ_DATA_10           0x5a
+#define DSI_INIT_SEQ_DATA_11           0x5b
+#define DSI_INIT_SEQ_DATA_12           0x5c
+#define DSI_INIT_SEQ_DATA_13           0x5d
+#define DSI_INIT_SEQ_DATA_14           0x5e
+#define DSI_INIT_SEQ_DATA_15           0x5f
+
+#endif
index a3835e7de1842b84145e52c40ef15ed21e183323..f7fca09d49211c5afebdc797590c001a88e72d6d 100644 (file)
@@ -18,10 +18,12 @@ static inline struct tegra_fb *to_tegra_fb(struct drm_framebuffer *fb)
        return container_of(fb, struct tegra_fb, base);
 }
 
+#ifdef CONFIG_DRM_TEGRA_FBDEV
 static inline struct tegra_fbdev *to_tegra_fbdev(struct drm_fb_helper *helper)
 {
        return container_of(helper, struct tegra_fbdev, base);
 }
+#endif
 
 struct tegra_bo *tegra_fb_get_plane(struct drm_framebuffer *framebuffer,
                                    unsigned int index)
@@ -98,8 +100,10 @@ static struct tegra_fb *tegra_fb_alloc(struct drm_device *drm,
                return ERR_PTR(-ENOMEM);
 
        fb->planes = kzalloc(num_planes * sizeof(*planes), GFP_KERNEL);
-       if (!fb->planes)
+       if (!fb->planes) {
+               kfree(fb);
                return ERR_PTR(-ENOMEM);
+       }
 
        fb->num_planes = num_planes;
 
@@ -172,6 +176,7 @@ unreference:
        return ERR_PTR(err);
 }
 
+#ifdef CONFIG_DRM_TEGRA_FBDEV
 static struct fb_ops tegra_fb_ops = {
        .owner = THIS_MODULE,
        .fb_fillrect = sys_fillrect,
@@ -339,6 +344,15 @@ static void tegra_fbdev_free(struct tegra_fbdev *fbdev)
        kfree(fbdev);
 }
 
+void tegra_fbdev_restore_mode(struct tegra_fbdev *fbdev)
+{
+       if (fbdev) {
+               drm_modeset_lock_all(fbdev->base.dev);
+               drm_fb_helper_restore_fbdev_mode(&fbdev->base);
+               drm_modeset_unlock_all(fbdev->base.dev);
+       }
+}
+
 static void tegra_fb_output_poll_changed(struct drm_device *drm)
 {
        struct tegra_drm *tegra = drm->dev_private;
@@ -346,16 +360,20 @@ static void tegra_fb_output_poll_changed(struct drm_device *drm)
        if (tegra->fbdev)
                drm_fb_helper_hotplug_event(&tegra->fbdev->base);
 }
+#endif
 
 static const struct drm_mode_config_funcs tegra_drm_mode_funcs = {
        .fb_create = tegra_fb_create,
+#ifdef CONFIG_DRM_TEGRA_FBDEV
        .output_poll_changed = tegra_fb_output_poll_changed,
+#endif
 };
 
 int tegra_drm_fb_init(struct drm_device *drm)
 {
+#ifdef CONFIG_DRM_TEGRA_FBDEV
        struct tegra_drm *tegra = drm->dev_private;
-       struct tegra_fbdev *fbdev;
+#endif
 
        drm->mode_config.min_width = 0;
        drm->mode_config.min_height = 0;
@@ -365,28 +383,21 @@ int tegra_drm_fb_init(struct drm_device *drm)
 
        drm->mode_config.funcs = &tegra_drm_mode_funcs;
 
-       fbdev = tegra_fbdev_create(drm, 32, drm->mode_config.num_crtc,
-                                  drm->mode_config.num_connector);
-       if (IS_ERR(fbdev))
-               return PTR_ERR(fbdev);
-
-       tegra->fbdev = fbdev;
+#ifdef CONFIG_DRM_TEGRA_FBDEV
+       tegra->fbdev = tegra_fbdev_create(drm, 32, drm->mode_config.num_crtc,
+                                         drm->mode_config.num_connector);
+       if (IS_ERR(tegra->fbdev))
+               return PTR_ERR(tegra->fbdev);
+#endif
 
        return 0;
 }
 
 void tegra_drm_fb_exit(struct drm_device *drm)
 {
+#ifdef CONFIG_DRM_TEGRA_FBDEV
        struct tegra_drm *tegra = drm->dev_private;
 
        tegra_fbdev_free(tegra->fbdev);
-}
-
-void tegra_fbdev_restore_mode(struct tegra_fbdev *fbdev)
-{
-       if (fbdev) {
-               drm_modeset_lock_all(fbdev->base.dev);
-               drm_fb_helper_restore_fbdev_mode(&fbdev->base);
-               drm_modeset_unlock_all(fbdev->base.dev);
-       }
+#endif
 }
index 28a9cbc07ab95f3a5873fc9aac0f008180bffca5..ef853e558036d55ce31c7cb4acfdf2479beefb80 100644 (file)
@@ -18,6 +18,7 @@
  * GNU General Public License for more details.
  */
 
+#include <linux/dma-buf.h>
 #include <drm/tegra_drm.h>
 
 #include "gem.h"
@@ -83,7 +84,7 @@ static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
        return bo;
 }
 
-const struct host1x_bo_ops tegra_bo_ops = {
+static const struct host1x_bo_ops tegra_bo_ops = {
        .get = tegra_bo_get,
        .put = tegra_bo_put,
        .pin = tegra_bo_pin,
@@ -145,7 +146,6 @@ err_dma:
        kfree(bo);
 
        return ERR_PTR(err);
-
 }
 
 struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
@@ -174,13 +174,87 @@ err:
        return ERR_PTR(ret);
 }
 
+struct tegra_bo *tegra_bo_import(struct drm_device *drm, struct dma_buf *buf)
+{
+       struct dma_buf_attachment *attach;
+       struct tegra_bo *bo;
+       ssize_t size;
+       int err;
+
+       bo = kzalloc(sizeof(*bo), GFP_KERNEL);
+       if (!bo)
+               return ERR_PTR(-ENOMEM);
+
+       host1x_bo_init(&bo->base, &tegra_bo_ops);
+       size = round_up(buf->size, PAGE_SIZE);
+
+       err = drm_gem_object_init(drm, &bo->gem, size);
+       if (err < 0)
+               goto free;
+
+       err = drm_gem_create_mmap_offset(&bo->gem);
+       if (err < 0)
+               goto release;
+
+       attach = dma_buf_attach(buf, drm->dev);
+       if (IS_ERR(attach)) {
+               err = PTR_ERR(attach);
+               goto free_mmap;
+       }
+
+       get_dma_buf(buf);
+
+       bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
+       if (!bo->sgt) {
+               err = -ENOMEM;
+               goto detach;
+       }
+
+       if (IS_ERR(bo->sgt)) {
+               err = PTR_ERR(bo->sgt);
+               goto detach;
+       }
+
+       if (bo->sgt->nents > 1) {
+               err = -EINVAL;
+               goto detach;
+       }
+
+       bo->paddr = sg_dma_address(bo->sgt->sgl);
+       bo->gem.import_attach = attach;
+
+       return bo;
+
+detach:
+       if (!IS_ERR_OR_NULL(bo->sgt))
+               dma_buf_unmap_attachment(attach, bo->sgt, DMA_TO_DEVICE);
+
+       dma_buf_detach(buf, attach);
+       dma_buf_put(buf);
+free_mmap:
+       drm_gem_free_mmap_offset(&bo->gem);
+release:
+       drm_gem_object_release(&bo->gem);
+free:
+       kfree(bo);
+
+       return ERR_PTR(err);
+}
+
 void tegra_bo_free_object(struct drm_gem_object *gem)
 {
        struct tegra_bo *bo = to_tegra_bo(gem);
 
+       if (gem->import_attach) {
+               dma_buf_unmap_attachment(gem->import_attach, bo->sgt,
+                                        DMA_TO_DEVICE);
+               drm_prime_gem_destroy(gem, NULL);
+       } else {
+               tegra_bo_destroy(gem->dev, bo);
+       }
+
        drm_gem_free_mmap_offset(gem);
        drm_gem_object_release(gem);
-       tegra_bo_destroy(gem->dev, bo);
 
        kfree(bo);
 }
@@ -256,3 +330,106 @@ int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
 
        return ret;
 }
+
+static struct sg_table *
+tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
+                           enum dma_data_direction dir)
+{
+       struct drm_gem_object *gem = attach->dmabuf->priv;
+       struct tegra_bo *bo = to_tegra_bo(gem);
+       struct sg_table *sgt;
+
+       sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
+       if (!sgt)
+               return NULL;
+
+       if (sg_alloc_table(sgt, 1, GFP_KERNEL)) {
+               kfree(sgt);
+               return NULL;
+       }
+
+       sg_dma_address(sgt->sgl) = bo->paddr;
+       sg_dma_len(sgt->sgl) = gem->size;
+
+       return sgt;
+}
+
+static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
+                                         struct sg_table *sgt,
+                                         enum dma_data_direction dir)
+{
+       sg_free_table(sgt);
+       kfree(sgt);
+}
+
+static void tegra_gem_prime_release(struct dma_buf *buf)
+{
+       drm_gem_dmabuf_release(buf);
+}
+
+static void *tegra_gem_prime_kmap_atomic(struct dma_buf *buf,
+                                        unsigned long page)
+{
+       return NULL;
+}
+
+static void tegra_gem_prime_kunmap_atomic(struct dma_buf *buf,
+                                         unsigned long page,
+                                         void *addr)
+{
+}
+
+static void *tegra_gem_prime_kmap(struct dma_buf *buf, unsigned long page)
+{
+       return NULL;
+}
+
+static void tegra_gem_prime_kunmap(struct dma_buf *buf, unsigned long page,
+                                  void *addr)
+{
+}
+
+static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
+{
+       return -EINVAL;
+}
+
+static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = {
+       .map_dma_buf = tegra_gem_prime_map_dma_buf,
+       .unmap_dma_buf = tegra_gem_prime_unmap_dma_buf,
+       .release = tegra_gem_prime_release,
+       .kmap_atomic = tegra_gem_prime_kmap_atomic,
+       .kunmap_atomic = tegra_gem_prime_kunmap_atomic,
+       .kmap = tegra_gem_prime_kmap,
+       .kunmap = tegra_gem_prime_kunmap,
+       .mmap = tegra_gem_prime_mmap,
+};
+
+struct dma_buf *tegra_gem_prime_export(struct drm_device *drm,
+                                      struct drm_gem_object *gem,
+                                      int flags)
+{
+       return dma_buf_export(gem, &tegra_gem_prime_dmabuf_ops, gem->size,
+                             flags);
+}
+
+struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
+                                             struct dma_buf *buf)
+{
+       struct tegra_bo *bo;
+
+       if (buf->ops == &tegra_gem_prime_dmabuf_ops) {
+               struct drm_gem_object *gem = buf->priv;
+
+               if (gem->dev == drm) {
+                       drm_gem_object_reference(gem);
+                       return gem;
+               }
+       }
+
+       bo = tegra_bo_import(drm, buf);
+       if (IS_ERR(bo))
+               return ERR_CAST(bo);
+
+       return &bo->gem;
+}
index 7674000bf47d6696ecec6db7507926144832c772..ffd4f792b410997630e06339a4307d087ab55f2e 100644 (file)
@@ -31,6 +31,7 @@ struct tegra_bo {
        struct drm_gem_object gem;
        struct host1x_bo base;
        unsigned long flags;
+       struct sg_table *sgt;
        dma_addr_t paddr;
        void *vaddr;
 };
@@ -40,8 +41,6 @@ static inline struct tegra_bo *to_tegra_bo(struct drm_gem_object *gem)
        return container_of(gem, struct tegra_bo, gem);
 }
 
-extern const struct host1x_bo_ops tegra_bo_ops;
-
 struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size,
                                 unsigned long flags);
 struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
@@ -59,4 +58,10 @@ int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma);
 
 extern const struct vm_operations_struct tegra_bo_vm_ops;
 
+struct dma_buf *tegra_gem_prime_export(struct drm_device *drm,
+                                      struct drm_gem_object *gem,
+                                      int flags);
+struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
+                                             struct dma_buf *buf);
+
 #endif
index 7f6253ea5cb5ea264319e98800b6d3783e1becb3..6928015d11a49e9fd6e499aed2ed5f02d6d1730e 100644 (file)
@@ -40,6 +40,7 @@ struct tegra_hdmi {
        struct host1x_client client;
        struct tegra_output output;
        struct device *dev;
+       bool enabled;
 
        struct regulator *vdd;
        struct regulator *pll;
@@ -379,7 +380,7 @@ static void tegra_hdmi_setup_audio_fs_tables(struct tegra_hdmi *hdmi)
 
                if (f > 96000)
                        delta = 2;
-               else if (f > 480000)
+               else if (f > 48000)
                        delta = 6;
                else
                        delta = 9;
@@ -699,6 +700,9 @@ static int tegra_output_hdmi_enable(struct tegra_output *output)
        int retries = 1000;
        int err;
 
+       if (hdmi->enabled)
+               return 0;
+
        hdmi->dvi = !tegra_output_is_hdmi(output);
 
        pclk = mode->clock * 1000;
@@ -839,10 +843,6 @@ static int tegra_output_hdmi_enable(struct tegra_output *output)
        value |= SOR_CSTM_ROTCLK(2);
        tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_CSTM);
 
-       tegra_dc_writel(dc, DISP_CTRL_MODE_STOP, DC_CMD_DISPLAY_COMMAND);
-       tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
-       tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
-
        /* start SOR */
        tegra_hdmi_writel(hdmi,
                          SOR_PWR_NORMAL_STATE_PU |
@@ -892,31 +892,67 @@ static int tegra_output_hdmi_enable(struct tegra_output *output)
                          HDMI_NV_PDISP_SOR_STATE1);
        tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_SOR_STATE0);
 
-       tegra_dc_writel(dc, HDMI_ENABLE, DC_DISP_DISP_WIN_OPTIONS);
-
-       value = PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
-               PW4_ENABLE | PM0_ENABLE | PM1_ENABLE;
-       tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
+       value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
+       value |= HDMI_ENABLE;
+       tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
 
-       value = DISP_CTRL_MODE_C_DISPLAY;
+       value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND);
+       value &= ~DISP_CTRL_MODE_MASK;
+       value |= DISP_CTRL_MODE_C_DISPLAY;
        tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND);
 
+       value = tegra_dc_readl(dc, DC_CMD_DISPLAY_POWER_CONTROL);
+       value |= PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
+                PW4_ENABLE | PM0_ENABLE | PM1_ENABLE;
+       tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
+
        tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
        tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
 
        /* TODO: add HDCP support */
 
+       hdmi->enabled = true;
+
        return 0;
 }
 
 static int tegra_output_hdmi_disable(struct tegra_output *output)
 {
+       struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
        struct tegra_hdmi *hdmi = to_hdmi(output);
+       unsigned long value;
+
+       if (!hdmi->enabled)
+               return 0;
+
+       /*
+        * The following accesses registers of the display controller, so make
+        * sure it's only executed when the output is attached to one.
+        */
+       if (dc) {
+               value = tegra_dc_readl(dc, DC_CMD_DISPLAY_POWER_CONTROL);
+               value &= ~(PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
+                          PW4_ENABLE | PM0_ENABLE | PM1_ENABLE);
+               tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
+
+               value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND);
+               value &= ~DISP_CTRL_MODE_MASK;
+               tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND);
+
+               value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
+               value &= ~HDMI_ENABLE;
+               tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
+
+               tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
+               tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
+       }
 
        reset_control_assert(hdmi->rst);
        clk_disable(hdmi->clk);
        regulator_disable(hdmi->pll);
 
+       hdmi->enabled = false;
+
        return 0;
 }
 
@@ -960,7 +996,7 @@ static int tegra_output_hdmi_check_mode(struct tegra_output *output,
        parent = clk_get_parent(hdmi->clk_parent);
 
        err = clk_round_rate(parent, pclk * 4);
-       if (err < 0)
+       if (err <= 0)
                *status = MODE_NOCLOCK;
        else
                *status = MODE_OK;
@@ -1382,9 +1418,6 @@ static int tegra_hdmi_probe(struct platform_device *pdev)
                return err;
 
        regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!regs)
-               return -ENXIO;
-
        hdmi->regs = devm_ioremap_resource(&pdev->dev, regs);
        if (IS_ERR(hdmi->regs))
                return PTR_ERR(hdmi->regs);
diff --git a/drivers/gpu/drm/tegra/mipi-phy.c b/drivers/gpu/drm/tegra/mipi-phy.c
new file mode 100644 (file)
index 0000000..e2c4aed
--- /dev/null
@@ -0,0 +1,138 @@
+/*
+ * Copyright (C) 2013 NVIDIA Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+
+#include "mipi-phy.h"
+
+/*
+ * Default D-PHY timings based on MIPI D-PHY specification. Derived from
+ * the valid ranges specified in Section 5.9 of the D-PHY specification
+ * with minor adjustments.
+ */
+int mipi_dphy_timing_get_default(struct mipi_dphy_timing *timing,
+                                unsigned long period)
+{
+       timing->clkmiss = 0;
+       timing->clkpost = 70 + 52 * period;
+       timing->clkpre = 8;
+       timing->clkprepare = 65;
+       timing->clksettle = 95;
+       timing->clktermen = 0;
+       timing->clktrail = 80;
+       timing->clkzero = 260;
+       timing->dtermen = 0;
+       timing->eot = 0;
+       timing->hsexit = 120;
+       timing->hsprepare = 65 + 5 * period;
+       timing->hszero = 145 + 5 * period;
+       timing->hssettle = 85 + 6 * period;
+       timing->hsskip = 40;
+       timing->hstrail = max(8 * period, 60 + 4 * period);
+       timing->init = 100000;
+       timing->lpx = 60;
+       timing->taget = 5 * timing->lpx;
+       timing->tago = 4 * timing->lpx;
+       timing->tasure = 2 * timing->lpx;
+       timing->wakeup = 1000000;
+
+       return 0;
+}
+
+/*
+ * Validate D-PHY timing according to MIPI Alliance Specification for D-PHY,
+ * Section 5.9 "Global Operation Timing Parameters".
+ */
+int mipi_dphy_timing_validate(struct mipi_dphy_timing *timing,
+                             unsigned long period)
+{
+       if (timing->clkmiss > 60)
+               return -EINVAL;
+
+       if (timing->clkpost < (60 + 52 * period))
+               return -EINVAL;
+
+       if (timing->clkpre < 8)
+               return -EINVAL;
+
+       if (timing->clkprepare < 38 || timing->clkprepare > 95)
+               return -EINVAL;
+
+       if (timing->clksettle < 95 || timing->clksettle > 300)
+               return -EINVAL;
+
+       if (timing->clktermen > 38)
+               return -EINVAL;
+
+       if (timing->clktrail < 60)
+               return -EINVAL;
+
+       if (timing->clkprepare + timing->clkzero < 300)
+               return -EINVAL;
+
+       if (timing->dtermen > 35 + 4 * period)
+               return -EINVAL;
+
+       if (timing->eot > 105 + 12 * period)
+               return -EINVAL;
+
+       if (timing->hsexit < 100)
+               return -EINVAL;
+
+       if (timing->hsprepare < 40 + 4 * period ||
+           timing->hsprepare > 85 + 6 * period)
+               return -EINVAL;
+
+       if (timing->hsprepare + timing->hszero < 145 + 10 * period)
+               return -EINVAL;
+
+       if ((timing->hssettle < 85 + 6 * period) ||
+           (timing->hssettle > 145 + 10 * period))
+               return -EINVAL;
+
+       if (timing->hsskip < 40 || timing->hsskip > 55 + 4 * period)
+               return -EINVAL;
+
+       if (timing->hstrail < max(8 * period, 60 + 4 * period))
+               return -EINVAL;
+
+       if (timing->init < 100000)
+               return -EINVAL;
+
+       if (timing->lpx < 50)
+               return -EINVAL;
+
+       if (timing->taget != 5 * timing->lpx)
+               return -EINVAL;
+
+       if (timing->tago != 4 * timing->lpx)
+               return -EINVAL;
+
+       if (timing->tasure < timing->lpx || timing->tasure > 2 * timing->lpx)
+               return -EINVAL;
+
+       if (timing->wakeup < 1000000)
+               return -EINVAL;
+
+       return 0;
+}
diff --git a/drivers/gpu/drm/tegra/mipi-phy.h b/drivers/gpu/drm/tegra/mipi-phy.h
new file mode 100644 (file)
index 0000000..d359169
--- /dev/null
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2013 NVIDIA Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#ifndef DRM_TEGRA_MIPI_PHY_H
+#define DRM_TEGRA_MIPI_PHY_H
+
+/*
+ * D-PHY timing parameters
+ *
+ * A detailed description of these parameters can be found in the  MIPI
+ * Alliance Specification for D-PHY, Section 5.9 "Global Operation Timing
+ * Parameters".
+ *
+ * All parameters are specified in nanoseconds.
+ */
+struct mipi_dphy_timing {
+       unsigned int clkmiss;
+       unsigned int clkpost;
+       unsigned int clkpre;
+       unsigned int clkprepare;
+       unsigned int clksettle;
+       unsigned int clktermen;
+       unsigned int clktrail;
+       unsigned int clkzero;
+       unsigned int dtermen;
+       unsigned int eot;
+       unsigned int hsexit;
+       unsigned int hsprepare;
+       unsigned int hszero;
+       unsigned int hssettle;
+       unsigned int hsskip;
+       unsigned int hstrail;
+       unsigned int init;
+       unsigned int lpx;
+       unsigned int taget;
+       unsigned int tago;
+       unsigned int tasure;
+       unsigned int wakeup;
+};
+
+int mipi_dphy_timing_get_default(struct mipi_dphy_timing *timing,
+                                unsigned long period);
+int mipi_dphy_timing_validate(struct mipi_dphy_timing *timing,
+                             unsigned long period);
+
+#endif
index 2cb0065e0578f6d80da0532dad68a31e27a6924d..57cecbd18ca88d210a6e83e61c3f3cff208f26e5 100644 (file)
@@ -9,6 +9,7 @@
 
 #include <linux/of_gpio.h>
 
+#include <drm/drm_panel.h>
 #include "drm.h"
 
 static int tegra_connector_get_modes(struct drm_connector *connector)
@@ -17,6 +18,16 @@ static int tegra_connector_get_modes(struct drm_connector *connector)
        struct edid *edid = NULL;
        int err = 0;
 
+       /*
+        * If the panel provides one or more modes, use them exclusively and
+        * ignore any other means of obtaining a mode.
+        */
+       if (output->panel) {
+               err = output->panel->funcs->get_modes(output->panel);
+               if (err > 0)
+                       return err;
+       }
+
        if (output->edid)
                edid = kmemdup(output->edid, sizeof(*edid), GFP_KERNEL);
        else if (output->ddc)
@@ -72,6 +83,11 @@ tegra_connector_detect(struct drm_connector *connector, bool force)
                else
                        status = connector_status_connected;
        } else {
+               if (!output->panel)
+                       status = connector_status_disconnected;
+               else
+                       status = connector_status_connected;
+
                if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
                        status = connector_status_connected;
        }
@@ -115,6 +131,16 @@ static const struct drm_encoder_funcs encoder_funcs = {
 
 static void tegra_encoder_dpms(struct drm_encoder *encoder, int mode)
 {
+       struct tegra_output *output = encoder_to_output(encoder);
+       struct drm_panel *panel = output->panel;
+
+       if (mode != DRM_MODE_DPMS_ON) {
+               drm_panel_disable(panel);
+               tegra_output_disable(output);
+       } else {
+               tegra_output_enable(output);
+               drm_panel_enable(panel);
+       }
 }
 
 static bool tegra_encoder_mode_fixup(struct drm_encoder *encoder,
@@ -163,14 +189,22 @@ static irqreturn_t hpd_irq(int irq, void *data)
 
 int tegra_output_probe(struct tegra_output *output)
 {
+       struct device_node *ddc, *panel;
        enum of_gpio_flags flags;
-       struct device_node *ddc;
-       size_t size;
-       int err;
+       int err, size;
 
        if (!output->of_node)
                output->of_node = output->dev->of_node;
 
+       panel = of_parse_phandle(output->of_node, "nvidia,panel", 0);
+       if (panel) {
+               output->panel = of_drm_find_panel(panel);
+               if (!output->panel)
+                       return -EPROBE_DEFER;
+
+               of_node_put(panel);
+       }
+
        output->edid = of_get_property(output->of_node, "nvidia,edid", &size);
 
        ddc = of_parse_phandle(output->of_node, "nvidia,ddc-i2c-bus", 0);
@@ -185,9 +219,6 @@ int tegra_output_probe(struct tegra_output *output)
                of_node_put(ddc);
        }
 
-       if (!output->edid && !output->ddc)
-               return -ENODEV;
-
        output->hpd_gpio = of_get_named_gpio_flags(output->of_node,
                                                   "nvidia,hpd-gpio", 0,
                                                   &flags);
@@ -256,6 +287,11 @@ int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
                encoder = DRM_MODE_ENCODER_TMDS;
                break;
 
+       case TEGRA_OUTPUT_DSI:
+               connector = DRM_MODE_CONNECTOR_DSI;
+               encoder = DRM_MODE_ENCODER_DSI;
+               break;
+
        default:
                connector = DRM_MODE_CONNECTOR_Unknown;
                encoder = DRM_MODE_ENCODER_NONE;
@@ -267,6 +303,9 @@ int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
        drm_connector_helper_add(&output->connector, &connector_helper_funcs);
        output->connector.dpms = DRM_MODE_DPMS_OFF;
 
+       if (output->panel)
+               drm_panel_attach(output->panel, &output->connector);
+
        drm_encoder_init(drm, &output->encoder, &encoder_funcs, encoder);
        drm_encoder_helper_add(&output->encoder, &encoder_helper_funcs);
 
index 3b29018913a5f2bb9da1a2ae2f57ddc89280bbe8..338f7f6561d701d601a92b2f6362782536d03e2e 100644 (file)
@@ -87,15 +87,60 @@ static void tegra_dc_write_regs(struct tegra_dc *dc,
 static int tegra_output_rgb_enable(struct tegra_output *output)
 {
        struct tegra_rgb *rgb = to_rgb(output);
+       unsigned long value;
 
        tegra_dc_write_regs(rgb->dc, rgb_enable, ARRAY_SIZE(rgb_enable));
 
+       value = DE_SELECT_ACTIVE | DE_CONTROL_NORMAL;
+       tegra_dc_writel(rgb->dc, value, DC_DISP_DATA_ENABLE_OPTIONS);
+
+       /* XXX: parameterize? */
+       value = tegra_dc_readl(rgb->dc, DC_COM_PIN_OUTPUT_POLARITY(1));
+       value &= ~LVS_OUTPUT_POLARITY_LOW;
+       value &= ~LHS_OUTPUT_POLARITY_LOW;
+       tegra_dc_writel(rgb->dc, value, DC_COM_PIN_OUTPUT_POLARITY(1));
+
+       /* XXX: parameterize? */
+       value = DISP_DATA_FORMAT_DF1P1C | DISP_ALIGNMENT_MSB |
+               DISP_ORDER_RED_BLUE;
+       tegra_dc_writel(rgb->dc, value, DC_DISP_DISP_INTERFACE_CONTROL);
+
+       /* XXX: parameterize? */
+       value = SC0_H_QUALIFIER_NONE | SC1_H_QUALIFIER_NONE;
+       tegra_dc_writel(rgb->dc, value, DC_DISP_SHIFT_CLOCK_OPTIONS);
+
+       value = tegra_dc_readl(rgb->dc, DC_CMD_DISPLAY_COMMAND);
+       value &= ~DISP_CTRL_MODE_MASK;
+       value |= DISP_CTRL_MODE_C_DISPLAY;
+       tegra_dc_writel(rgb->dc, value, DC_CMD_DISPLAY_COMMAND);
+
+       value = tegra_dc_readl(rgb->dc, DC_CMD_DISPLAY_POWER_CONTROL);
+       value |= PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
+                PW4_ENABLE | PM0_ENABLE | PM1_ENABLE;
+       tegra_dc_writel(rgb->dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
+
+       tegra_dc_writel(rgb->dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
+       tegra_dc_writel(rgb->dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
+
        return 0;
 }
 
 static int tegra_output_rgb_disable(struct tegra_output *output)
 {
        struct tegra_rgb *rgb = to_rgb(output);
+       unsigned long value;
+
+       value = tegra_dc_readl(rgb->dc, DC_CMD_DISPLAY_POWER_CONTROL);
+       value &= ~(PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
+                  PW4_ENABLE | PM0_ENABLE | PM1_ENABLE);
+       tegra_dc_writel(rgb->dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
+
+       value = tegra_dc_readl(rgb->dc, DC_CMD_DISPLAY_COMMAND);
+       value &= ~DISP_CTRL_MODE_MASK;
+       tegra_dc_writel(rgb->dc, value, DC_CMD_DISPLAY_COMMAND);
+
+       tegra_dc_writel(rgb->dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
+       tegra_dc_writel(rgb->dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
 
        tegra_dc_write_regs(rgb->dc, rgb_disable, ARRAY_SIZE(rgb_disable));
 
@@ -213,7 +258,7 @@ int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc)
         * RGB outputs are an exception, so we make sure they can be attached
         * to only their parent display controller.
         */
-       rgb->output.encoder.possible_crtcs = 1 << dc->pipe;
+       rgb->output.encoder.possible_crtcs = drm_crtc_mask(&dc->base);
 
        return 0;
 }
index 116da199b9429a56ba8a7b58efc953369b721344..171a8203892ce16b7767b2fb3fa11fc1995f0fad 100644 (file)
@@ -311,7 +311,7 @@ static void tilcdc_lastclose(struct drm_device *dev)
        drm_fbdev_cma_restore_mode(priv->fbdev);
 }
 
-static irqreturn_t tilcdc_irq(DRM_IRQ_ARGS)
+static irqreturn_t tilcdc_irq(int irq, void *arg)
 {
        struct drm_device *dev = arg;
        struct tilcdc_drm_private *priv = dev->dev_private;
@@ -444,7 +444,7 @@ static int tilcdc_mm_show(struct seq_file *m, void *arg)
 {
        struct drm_info_node *node = (struct drm_info_node *) m->private;
        struct drm_device *dev = node->minor->dev;
-       return drm_mm_dump_table(m, dev->mm_private);
+       return drm_mm_dump_table(m, &dev->vma_offset_manager->vm_addr_space_mm);
 }
 
 static struct drm_info_list tilcdc_debugfs_list[] = {
@@ -594,7 +594,7 @@ static int tilcdc_pdev_probe(struct platform_device *pdev)
 
 static int tilcdc_pdev_remove(struct platform_device *pdev)
 {
-       drm_platform_exit(&tilcdc_driver, pdev);
+       drm_put_dev(platform_get_drvdata(pdev));
 
        return 0;
 }
index 07e02c4bf5a8e0def00e120a14b5ada2d1cfaa97..a066513093880a218d185624589ec07efce66b0b 100644 (file)
@@ -957,7 +957,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
 }
 EXPORT_SYMBOL(ttm_bo_mem_space);
 
-int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
+static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
                        struct ttm_placement *placement,
                        bool interruptible,
                        bool no_wait_gpu)
index 4061521523154e0fbc102623b6a9d348e03d0313..1df856f7856821b31fe96e55dac5bc74bd74ed4b 100644 (file)
@@ -187,7 +187,7 @@ void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
        }
 }
 
-int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
+static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
                        void **virtual)
 {
        struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
@@ -219,7 +219,7 @@ int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
        return 0;
 }
 
-void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
+static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
                         void *virtual)
 {
        struct ttm_mem_type_manager *man;
@@ -594,7 +594,7 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
        if (start_page > bo->num_pages)
                return -EINVAL;
 #if 0
-       if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
+       if (num_pages > 1 && !capable(CAP_SYS_ADMIN))
                return -EPERM;
 #endif
        (void) ttm_mem_io_lock(man, false);
index 6440eeac22d250844d2203018258654e54483cd3..801231c9ae483980afe0e93a0af73a7f07123864 100644 (file)
@@ -132,6 +132,15 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
                return VM_FAULT_NOPAGE;
        }
 
+       /*
+        * Refuse to fault imported pages. This should be handled
+        * (if at all) by redirecting mmap to the exporter.
+        */
+       if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) {
+               retval = VM_FAULT_SIGBUS;
+               goto out_unlock;
+       }
+
        if (bdev->driver->fault_reserve_notify) {
                ret = bdev->driver->fault_reserve_notify(bo);
                switch (ret) {
@@ -217,10 +226,17 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
                        } else if (unlikely(!page)) {
                                break;
                        }
+                       page->mapping = vma->vm_file->f_mapping;
+                       page->index = drm_vma_node_start(&bo->vma_node) +
+                               page_offset;
                        pfn = page_to_pfn(page);
                }
 
-               ret = vm_insert_mixed(&cvma, address, pfn);
+               if (vma->vm_flags & VM_MIXEDMAP)
+                       ret = vm_insert_mixed(&cvma, address, pfn);
+               else
+                       ret = vm_insert_pfn(&cvma, address, pfn);
+
                /*
                 * Somebody beat us to this PTE or prefaulting to
                 * an already populated PTE, or prefaulting error.
@@ -250,6 +266,8 @@ static void ttm_bo_vm_open(struct vm_area_struct *vma)
        struct ttm_buffer_object *bo =
            (struct ttm_buffer_object *)vma->vm_private_data;
 
+       WARN_ON(bo->bdev->dev_mapping != vma->vm_file->f_mapping);
+
        (void)ttm_bo_reference(bo);
 }
 
@@ -319,7 +337,14 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
         */
 
        vma->vm_private_data = bo;
-       vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
+
+       /*
+        * PFNMAP is faster than MIXEDMAP due to reduced page
+        * administration. So use MIXEDMAP only if private VMA, where
+        * we need to support COW.
+        */
+       vma->vm_flags |= (vma->vm_flags & VM_SHARED) ? VM_PFNMAP : VM_MIXEDMAP;
+       vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
        return 0;
 out_unref:
        ttm_bo_unref(&bo);
@@ -334,7 +359,8 @@ int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
 
        vma->vm_ops = &ttm_bo_vm_ops;
        vma->vm_private_data = ttm_bo_reference(bo);
-       vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
+       vma->vm_flags |= (vma->vm_flags & VM_SHARED) ? VM_PFNMAP : VM_MIXEDMAP;
+       vma->vm_flags |= VM_IO | VM_DONTEXPAND;
        return 0;
 }
 EXPORT_SYMBOL(ttm_fbdev_mmap);
index 3daa9a3930b810c5adaf310a3dbea89e0e73343d..6a954544727f3f13cfa8d9be0b8ff4cbf39d9367 100644 (file)
@@ -186,14 +186,6 @@ int ttm_write_lock(struct ttm_lock *lock, bool interruptible)
 }
 EXPORT_SYMBOL(ttm_write_lock);
 
-void ttm_write_lock_downgrade(struct ttm_lock *lock)
-{
-       spin_lock(&lock->lock);
-       lock->rw = 1;
-       wake_up_all(&lock->queue);
-       spin_unlock(&lock->lock);
-}
-
 static int __ttm_vt_unlock(struct ttm_lock *lock)
 {
        int ret = 0;
index 6fe7b92a82d1f72f465a79d48dd0088337d713fe..37079859afc86e6cef120cd4d8a924388c3f6e3b 100644 (file)
@@ -68,7 +68,7 @@
 
 struct ttm_object_file {
        struct ttm_object_device *tdev;
-       rwlock_t lock;
+       spinlock_t lock;
        struct list_head ref_list;
        struct drm_open_hash ref_hash[TTM_REF_NUM];
        struct kref refcount;
@@ -118,6 +118,7 @@ struct ttm_object_device {
  */
 
 struct ttm_ref_object {
+       struct rcu_head rcu_head;
        struct drm_hash_item hash;
        struct list_head head;
        struct kref kref;
@@ -210,10 +211,9 @@ static void ttm_release_base(struct kref *kref)
         * call_rcu() or ttm_base_object_kfree().
         */
 
-       if (base->refcount_release) {
-               ttm_object_file_unref(&base->tfile);
+       ttm_object_file_unref(&base->tfile);
+       if (base->refcount_release)
                base->refcount_release(&base);
-       }
 }
 
 void ttm_base_object_unref(struct ttm_base_object **p_base)
@@ -229,32 +229,46 @@ EXPORT_SYMBOL(ttm_base_object_unref);
 struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
                                               uint32_t key)
 {
-       struct ttm_object_device *tdev = tfile->tdev;
-       struct ttm_base_object *uninitialized_var(base);
+       struct ttm_base_object *base = NULL;
        struct drm_hash_item *hash;
+       struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE];
        int ret;
 
        rcu_read_lock();
-       ret = drm_ht_find_item_rcu(&tdev->object_hash, key, &hash);
+       ret = drm_ht_find_item_rcu(ht, key, &hash);
 
        if (likely(ret == 0)) {
-               base = drm_hash_entry(hash, struct ttm_base_object, hash);
-               ret = kref_get_unless_zero(&base->refcount) ? 0 : -EINVAL;
+               base = drm_hash_entry(hash, struct ttm_ref_object, hash)->obj;
+               if (!kref_get_unless_zero(&base->refcount))
+                       base = NULL;
        }
        rcu_read_unlock();
 
-       if (unlikely(ret != 0))
-               return NULL;
+       return base;
+}
+EXPORT_SYMBOL(ttm_base_object_lookup);
 
-       if (tfile != base->tfile && !base->shareable) {
-               pr_err("Attempted access of non-shareable object\n");
-               ttm_base_object_unref(&base);
-               return NULL;
+struct ttm_base_object *
+ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key)
+{
+       struct ttm_base_object *base = NULL;
+       struct drm_hash_item *hash;
+       struct drm_open_hash *ht = &tdev->object_hash;
+       int ret;
+
+       rcu_read_lock();
+       ret = drm_ht_find_item_rcu(ht, key, &hash);
+
+       if (likely(ret == 0)) {
+               base = drm_hash_entry(hash, struct ttm_base_object, hash);
+               if (!kref_get_unless_zero(&base->refcount))
+                       base = NULL;
        }
+       rcu_read_unlock();
 
        return base;
 }
-EXPORT_SYMBOL(ttm_base_object_lookup);
+EXPORT_SYMBOL(ttm_base_object_lookup_for_ref);
 
 int ttm_ref_object_add(struct ttm_object_file *tfile,
                       struct ttm_base_object *base,
@@ -266,21 +280,25 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
        struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
        int ret = -EINVAL;
 
+       if (base->tfile != tfile && !base->shareable)
+               return -EPERM;
+
        if (existed != NULL)
                *existed = true;
 
        while (ret == -EINVAL) {
-               read_lock(&tfile->lock);
-               ret = drm_ht_find_item(ht, base->hash.key, &hash);
+               rcu_read_lock();
+               ret = drm_ht_find_item_rcu(ht, base->hash.key, &hash);
 
                if (ret == 0) {
                        ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
-                       kref_get(&ref->kref);
-                       read_unlock(&tfile->lock);
-                       break;
+                       if (!kref_get_unless_zero(&ref->kref)) {
+                               rcu_read_unlock();
+                               break;
+                       }
                }
 
-               read_unlock(&tfile->lock);
+               rcu_read_unlock();
                ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
                                           false, false);
                if (unlikely(ret != 0))
@@ -297,19 +315,19 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
                ref->ref_type = ref_type;
                kref_init(&ref->kref);
 
-               write_lock(&tfile->lock);
-               ret = drm_ht_insert_item(ht, &ref->hash);
+               spin_lock(&tfile->lock);
+               ret = drm_ht_insert_item_rcu(ht, &ref->hash);
 
                if (likely(ret == 0)) {
                        list_add_tail(&ref->head, &tfile->ref_list);
                        kref_get(&base->refcount);
-                       write_unlock(&tfile->lock);
+                       spin_unlock(&tfile->lock);
                        if (existed != NULL)
                                *existed = false;
                        break;
                }
 
-               write_unlock(&tfile->lock);
+               spin_unlock(&tfile->lock);
                BUG_ON(ret != -EINVAL);
 
                ttm_mem_global_free(mem_glob, sizeof(*ref));
@@ -330,17 +348,17 @@ static void ttm_ref_object_release(struct kref *kref)
        struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
 
        ht = &tfile->ref_hash[ref->ref_type];
-       (void)drm_ht_remove_item(ht, &ref->hash);
+       (void)drm_ht_remove_item_rcu(ht, &ref->hash);
        list_del(&ref->head);
-       write_unlock(&tfile->lock);
+       spin_unlock(&tfile->lock);
 
        if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release)
                base->ref_obj_release(base, ref->ref_type);
 
        ttm_base_object_unref(&ref->obj);
        ttm_mem_global_free(mem_glob, sizeof(*ref));
-       kfree(ref);
-       write_lock(&tfile->lock);
+       kfree_rcu(ref, rcu_head);
+       spin_lock(&tfile->lock);
 }
 
 int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
@@ -351,15 +369,15 @@ int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
        struct drm_hash_item *hash;
        int ret;
 
-       write_lock(&tfile->lock);
+       spin_lock(&tfile->lock);
        ret = drm_ht_find_item(ht, key, &hash);
        if (unlikely(ret != 0)) {
-               write_unlock(&tfile->lock);
+               spin_unlock(&tfile->lock);
                return -EINVAL;
        }
        ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
        kref_put(&ref->kref, ttm_ref_object_release);
-       write_unlock(&tfile->lock);
+       spin_unlock(&tfile->lock);
        return 0;
 }
 EXPORT_SYMBOL(ttm_ref_object_base_unref);
@@ -372,7 +390,7 @@ void ttm_object_file_release(struct ttm_object_file **p_tfile)
        struct ttm_object_file *tfile = *p_tfile;
 
        *p_tfile = NULL;
-       write_lock(&tfile->lock);
+       spin_lock(&tfile->lock);
 
        /*
         * Since we release the lock within the loop, we have to
@@ -388,7 +406,7 @@ void ttm_object_file_release(struct ttm_object_file **p_tfile)
        for (i = 0; i < TTM_REF_NUM; ++i)
                drm_ht_remove(&tfile->ref_hash[i]);
 
-       write_unlock(&tfile->lock);
+       spin_unlock(&tfile->lock);
        ttm_object_file_unref(&tfile);
 }
 EXPORT_SYMBOL(ttm_object_file_release);
@@ -404,7 +422,7 @@ struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
        if (unlikely(tfile == NULL))
                return NULL;
 
-       rwlock_init(&tfile->lock);
+       spin_lock_init(&tfile->lock);
        tfile->tdev = tdev;
        kref_init(&tfile->refcount);
        INIT_LIST_HEAD(&tfile->ref_list);
index 210d50365162d39d8b2048ba3d8c0e567ed95bf9..9af99084b344413dcb07c2a368916bff647fc119 100644 (file)
@@ -170,9 +170,8 @@ void ttm_tt_destroy(struct ttm_tt *ttm)
                ttm_tt_unbind(ttm);
        }
 
-       if (ttm->state == tt_unbound) {
-               ttm->bdev->driver->ttm_tt_unpopulate(ttm);
-       }
+       if (ttm->state == tt_unbound)
+               ttm_tt_unpopulate(ttm);
 
        if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
            ttm->swap_storage)
@@ -362,7 +361,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
                page_cache_release(to_page);
        }
 
-       ttm->bdev->driver->ttm_tt_unpopulate(ttm);
+       ttm_tt_unpopulate(ttm);
        ttm->swap_storage = swap_storage;
        ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
        if (persistent_swap_storage)
@@ -375,3 +374,23 @@ out_err:
 
        return ret;
 }
+
+static void ttm_tt_clear_mapping(struct ttm_tt *ttm)
+{
+       pgoff_t i;
+       struct page **page = ttm->pages;
+
+       for (i = 0; i < ttm->num_pages; ++i) {
+               (*page)->mapping = NULL;
+               (*page++)->index = 0;
+       }
+}
+
+void ttm_tt_unpopulate(struct ttm_tt *ttm)
+{
+       if (ttm->state == tt_unpopulated)
+               return;
+
+       ttm_tt_clear_mapping(ttm);
+       ttm->bdev->driver->ttm_tt_unpopulate(ttm);
+}
index 97e9d614700f7f6d475b55c7ecae8a513e54afb4..dbadd49e4c4a62bdd32c83473ad3751452e4033d 100644 (file)
@@ -403,15 +403,17 @@ static int udl_user_framebuffer_dirty(struct drm_framebuffer *fb,
        int i;
        int ret = 0;
 
+       drm_modeset_lock_all(fb->dev);
+
        if (!ufb->active_16)
-               return 0;
+               goto unlock;
 
        if (ufb->obj->base.import_attach) {
                ret = dma_buf_begin_cpu_access(ufb->obj->base.import_attach->dmabuf,
                                               0, ufb->obj->base.size,
                                               DMA_FROM_DEVICE);
                if (ret)
-                       return ret;
+                       goto unlock;
        }
 
        for (i = 0; i < num_clips; i++) {
@@ -419,7 +421,7 @@ static int udl_user_framebuffer_dirty(struct drm_framebuffer *fb,
                                  clips[i].x2 - clips[i].x1,
                                  clips[i].y2 - clips[i].y1);
                if (ret)
-                       break;
+                       goto unlock;
        }
 
        if (ufb->obj->base.import_attach) {
@@ -427,6 +429,10 @@ static int udl_user_framebuffer_dirty(struct drm_framebuffer *fb,
                                       0, ufb->obj->base.size,
                                       DMA_FROM_DEVICE);
        }
+
+ unlock:
+       drm_modeset_unlock_all(fb->dev);
+
        return ret;
 }
 
index 652f9b43ec9dcfc1c8a6e383dcc443fa9ef2db94..a18479c6b6dae3dfdbb3d3e6e238f129dc79a3ad 100644 (file)
@@ -60,7 +60,7 @@
        dev_priv->dma_low += 8;                                 \
 }
 
-#define via_flush_write_combine() DRM_MEMORYBARRIER()
+#define via_flush_write_combine() mb()
 
 #define VIA_OUT_RING_QW(w1, w2)        do {            \
        *vb++ = (w1);                           \
@@ -234,13 +234,13 @@ static int via_dma_init(struct drm_device *dev, void *data, struct drm_file *fil
 
        switch (init->func) {
        case VIA_INIT_DMA:
-               if (!DRM_SUSER(DRM_CURPROC))
+               if (!capable(CAP_SYS_ADMIN))
                        retcode = -EPERM;
                else
                        retcode = via_initialize(dev, dev_priv, init);
                break;
        case VIA_CLEANUP_DMA:
-               if (!DRM_SUSER(DRM_CURPROC))
+               if (!capable(CAP_SYS_ADMIN))
                        retcode = -EPERM;
                else
                        retcode = via_dma_cleanup(dev);
@@ -273,7 +273,7 @@ static int via_dispatch_cmdbuffer(struct drm_device *dev, drm_via_cmdbuffer_t *c
        if (cmd->size > VIA_PCI_BUF_SIZE)
                return -ENOMEM;
 
-       if (DRM_COPY_FROM_USER(dev_priv->pci_buf, cmd->buf, cmd->size))
+       if (copy_from_user(dev_priv->pci_buf, cmd->buf, cmd->size))
                return -EFAULT;
 
        /*
@@ -346,7 +346,7 @@ static int via_dispatch_pci_cmdbuffer(struct drm_device *dev,
 
        if (cmd->size > VIA_PCI_BUF_SIZE)
                return -ENOMEM;
-       if (DRM_COPY_FROM_USER(dev_priv->pci_buf, cmd->buf, cmd->size))
+       if (copy_from_user(dev_priv->pci_buf, cmd->buf, cmd->size))
                return -EFAULT;
 
        if ((ret =
@@ -543,7 +543,7 @@ static void via_cmdbuf_start(drm_via_private_t *dev_priv)
 
        VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_hi);
        VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_lo);
-       DRM_WRITEMEMORYBARRIER();
+       wmb();
        VIA_WRITE(VIA_REG_TRANSPACE, command | HC_HAGPCMNT_MASK);
        VIA_READ(VIA_REG_TRANSPACE);
 
index 8b0f25904e6db4edf754c9c18ab58077bfb0930e..ba33cf679180498a4ecd29260c16ee0570d2f480 100644 (file)
@@ -217,7 +217,7 @@ via_fire_dmablit(struct drm_device *dev, drm_via_sg_info_t *vsg, int engine)
        VIA_WRITE(VIA_PCI_DMA_MR0  + engine*0x04, VIA_DMA_MR_CM | VIA_DMA_MR_TDIE);
        VIA_WRITE(VIA_PCI_DMA_BCR0 + engine*0x10, 0);
        VIA_WRITE(VIA_PCI_DMA_DPR0 + engine*0x10, vsg->chain_start);
-       DRM_WRITEMEMORYBARRIER();
+       wmb();
        VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DE | VIA_DMA_CSR_TS);
        VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04);
 }
@@ -338,7 +338,7 @@ via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
 
                blitq->blits[cur]->aborted = blitq->aborting;
                blitq->done_blit_handle++;
-               DRM_WAKEUP(blitq->blit_queue + cur);
+               wake_up(blitq->blit_queue + cur);
 
                cur++;
                if (cur >= VIA_NUM_BLIT_SLOTS)
@@ -363,7 +363,7 @@ via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
 
                via_abort_dmablit(dev, engine);
                blitq->aborting = 1;
-               blitq->end = jiffies + DRM_HZ;
+               blitq->end = jiffies + HZ;
        }
 
        if (!blitq->is_active) {
@@ -372,7 +372,7 @@ via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
                        blitq->is_active = 1;
                        blitq->cur = cur;
                        blitq->num_outstanding--;
-                       blitq->end = jiffies + DRM_HZ;
+                       blitq->end = jiffies + HZ;
                        if (!timer_pending(&blitq->poll_timer))
                                mod_timer(&blitq->poll_timer, jiffies + 1);
                } else {
@@ -436,7 +436,7 @@ via_dmablit_sync(struct drm_device *dev, uint32_t handle, int engine)
        int ret = 0;
 
        if (via_dmablit_active(blitq, engine, handle, &queue)) {
-               DRM_WAIT_ON(ret, *queue, 3 * DRM_HZ,
+               DRM_WAIT_ON(ret, *queue, 3 * HZ,
                            !via_dmablit_active(blitq, engine, handle, NULL));
        }
        DRM_DEBUG("DMA blit sync handle 0x%x engine %d returned %d\n",
@@ -521,7 +521,7 @@ via_dmablit_workqueue(struct work_struct *work)
 
                spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
 
-               DRM_WAKEUP(&blitq->busy_queue);
+               wake_up(&blitq->busy_queue);
 
                via_free_sg_info(dev->pdev, cur_sg);
                kfree(cur_sg);
@@ -561,8 +561,8 @@ via_init_dmablit(struct drm_device *dev)
                blitq->aborting = 0;
                spin_lock_init(&blitq->blit_lock);
                for (j = 0; j < VIA_NUM_BLIT_SLOTS; ++j)
-                       DRM_INIT_WAITQUEUE(blitq->blit_queue + j);
-               DRM_INIT_WAITQUEUE(&blitq->busy_queue);
+                       init_waitqueue_head(blitq->blit_queue + j);
+               init_waitqueue_head(&blitq->busy_queue);
                INIT_WORK(&blitq->wq, via_dmablit_workqueue);
                setup_timer(&blitq->poll_timer, via_dmablit_timer,
                                (unsigned long)blitq);
@@ -688,7 +688,7 @@ via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine)
        while (blitq->num_free == 0) {
                spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
 
-               DRM_WAIT_ON(ret, blitq->busy_queue, DRM_HZ, blitq->num_free > 0);
+               DRM_WAIT_ON(ret, blitq->busy_queue, HZ, blitq->num_free > 0);
                if (ret)
                        return (-EINTR == ret) ? -EAGAIN : ret;
 
@@ -713,7 +713,7 @@ via_dmablit_release_slot(drm_via_blitq_t *blitq)
        spin_lock_irqsave(&blitq->blit_lock, irqsave);
        blitq->num_free++;
        spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
-       DRM_WAKEUP(&blitq->busy_queue);
+       wake_up(&blitq->busy_queue);
 }
 
 /*
index 92684a9b7e3414f69cdc76f2d2ef82d286cda734..50abc2adfaee495730337a94b4fc931bcdd40f37 100644 (file)
@@ -46,7 +46,7 @@ static int via_driver_open(struct drm_device *dev, struct drm_file *file)
        return 0;
 }
 
-void via_driver_postclose(struct drm_device *dev, struct drm_file *file)
+static void via_driver_postclose(struct drm_device *dev, struct drm_file *file)
 {
        struct via_file_private *file_priv = file->driver_priv;
 
index a811ef2b505f1b25de22921a55ad8309d6a33d6a..ad0273256beb58b3bd4ff65c3c6b230427543a17 100644 (file)
@@ -138,7 +138,7 @@ extern u32 via_get_vblank_counter(struct drm_device *dev, int crtc);
 extern int via_enable_vblank(struct drm_device *dev, int crtc);
 extern void via_disable_vblank(struct drm_device *dev, int crtc);
 
-extern irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS);
+extern irqreturn_t via_driver_irq_handler(int irq, void *arg);
 extern void via_driver_irq_preinstall(struct drm_device *dev);
 extern int via_driver_irq_postinstall(struct drm_device *dev);
 extern void via_driver_irq_uninstall(struct drm_device *dev);
index ac98964297cfffcf4939817629fbd51d17f32dd7..1319433816d3cd4743c7eadbcf073d8703c6476b 100644 (file)
@@ -104,7 +104,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
        return atomic_read(&dev_priv->vbl_received);
 }
 
-irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
+irqreturn_t via_driver_irq_handler(int irq, void *arg)
 {
        struct drm_device *dev = (struct drm_device *) arg;
        drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
@@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
        for (i = 0; i < dev_priv->num_irqs; ++i) {
                if (status & cur_irq->pending_mask) {
                        atomic_inc(&cur_irq->irq_received);
-                       DRM_WAKEUP(&cur_irq->irq_queue);
+                       wake_up(&cur_irq->irq_queue);
                        handled = 1;
                        if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
                                via_dmablit_handler(dev, 0, 1);
@@ -239,12 +239,12 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
        cur_irq = dev_priv->via_irqs + real_irq;
 
        if (masks[real_irq][2] && !force_sequence) {
-               DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
+               DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
                            ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
                             masks[irq][4]));
                cur_irq_sequence = atomic_read(&cur_irq->irq_received);
        } else {
-               DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
+               DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
                            (((cur_irq_sequence =
                               atomic_read(&cur_irq->irq_received)) -
                              *sequence) <= (1 << 23)));
@@ -287,7 +287,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
                        atomic_set(&cur_irq->irq_received, 0);
                        cur_irq->enable_mask = dev_priv->irq_masks[i][0];
                        cur_irq->pending_mask = dev_priv->irq_masks[i][1];
-                       DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
+                       init_waitqueue_head(&cur_irq->irq_queue);
                        dev_priv->irq_enable_mask |= cur_irq->enable_mask;
                        dev_priv->irq_pending_mask |= cur_irq->pending_mask;
                        cur_irq++;
index 6569efa2ff6ea4e82f2e91649dc9fdec0964b107..a9ffbad1cfdd87fe9d4c4bf14ce8902ca07f4f50 100644 (file)
@@ -36,7 +36,7 @@ void via_init_futex(drm_via_private_t *dev_priv)
        DRM_DEBUG("\n");
 
        for (i = 0; i < VIA_NR_XVMC_LOCKS; ++i) {
-               DRM_INIT_WAITQUEUE(&(dev_priv->decoder_queue[i]));
+               init_waitqueue_head(&(dev_priv->decoder_queue[i]));
                XVMCLOCKPTR(dev_priv->sarea_priv, i)->lock = 0;
        }
 }
@@ -58,7 +58,7 @@ void via_release_futex(drm_via_private_t *dev_priv, int context)
                if ((_DRM_LOCKING_CONTEXT(*lock) == context)) {
                        if (_DRM_LOCK_IS_HELD(*lock)
                            && (*lock & _DRM_LOCK_CONT)) {
-                               DRM_WAKEUP(&(dev_priv->decoder_queue[i]));
+                               wake_up(&(dev_priv->decoder_queue[i]));
                        }
                        *lock = 0;
                }
@@ -83,10 +83,10 @@ int via_decoder_futex(struct drm_device *dev, void *data, struct drm_file *file_
        switch (fx->func) {
        case VIA_FUTEX_WAIT:
                DRM_WAIT_ON(ret, dev_priv->decoder_queue[fx->lock],
-                           (fx->ms / 10) * (DRM_HZ / 100), *lock != fx->val);
+                           (fx->ms / 10) * (HZ / 100), *lock != fx->val);
                return ret;
        case VIA_FUTEX_WAKE:
-               DRM_WAKEUP(&(dev_priv->decoder_queue[fx->lock]));
+               wake_up(&(dev_priv->decoder_queue[fx->lock]));
                return 0;
        }
        return 0;
index 9f8b690bcf52c97e24db7c6a515f9be185b30321..458cdf6d81e8b91781af5d1c18d663c133c5f455 100644 (file)
@@ -6,6 +6,6 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
            vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
            vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \
            vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \
-           vmwgfx_surface.o vmwgfx_prime.o
+           vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o
 
 obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
index d0e085ee82498679cdde540f2ce87c539fa4856b..d95335cb90bd4f30f7b7cdcf601028611986d802 100644 (file)
@@ -34,6 +34,8 @@
 
 #include "svga_reg.h"
 
+typedef uint32 PPN;
+typedef __le64 PPN64;
 
 /*
  * 3D Hardware Version
@@ -71,6 +73,9 @@ typedef uint32 SVGA3dBool; /* 32-bit Bool definition */
 #define SVGA3D_MAX_CONTEXT_IDS                  256
 #define SVGA3D_MAX_SURFACE_IDS                  (32 * 1024)
 
+#define SVGA3D_NUM_TEXTURE_UNITS                32
+#define SVGA3D_NUM_LIGHTS                       8
+
 /*
  * Surface formats.
  *
@@ -81,6 +86,7 @@ typedef uint32 SVGA3dBool; /* 32-bit Bool definition */
  */
 
 typedef enum SVGA3dSurfaceFormat {
+   SVGA3D_FORMAT_MIN                   = 0,
    SVGA3D_FORMAT_INVALID               = 0,
 
    SVGA3D_X8R8G8B8                     = 1,
@@ -134,12 +140,6 @@ typedef enum SVGA3dSurfaceFormat {
    SVGA3D_RG_S10E5                     = 35,
    SVGA3D_RG_S23E8                     = 36,
 
-   /*
-    * Any surface can be used as a buffer object, but SVGA3D_BUFFER is
-    * the most efficient format to use when creating new surfaces
-    * expressly for index or vertex data.
-    */
-
    SVGA3D_BUFFER                       = 37,
 
    SVGA3D_Z_D24X8                      = 38,
@@ -159,15 +159,114 @@ typedef enum SVGA3dSurfaceFormat {
    /* Video format with alpha */
    SVGA3D_AYUV                         = 45,
 
+   SVGA3D_R32G32B32A32_TYPELESS        = 46,
+   SVGA3D_R32G32B32A32_FLOAT           = 25,
+   SVGA3D_R32G32B32A32_UINT            = 47,
+   SVGA3D_R32G32B32A32_SINT            = 48,
+   SVGA3D_R32G32B32_TYPELESS           = 49,
+   SVGA3D_R32G32B32_FLOAT              = 50,
+   SVGA3D_R32G32B32_UINT               = 51,
+   SVGA3D_R32G32B32_SINT               = 52,
+   SVGA3D_R16G16B16A16_TYPELESS        = 53,
+   SVGA3D_R16G16B16A16_FLOAT           = 24,
+   SVGA3D_R16G16B16A16_UNORM           = 41,
+   SVGA3D_R16G16B16A16_UINT            = 54,
+   SVGA3D_R16G16B16A16_SNORM           = 55,
+   SVGA3D_R16G16B16A16_SINT            = 56,
+   SVGA3D_R32G32_TYPELESS              = 57,
+   SVGA3D_R32G32_FLOAT                 = 36,
+   SVGA3D_R32G32_UINT                  = 58,
+   SVGA3D_R32G32_SINT                  = 59,
+   SVGA3D_R32G8X24_TYPELESS            = 60,
+   SVGA3D_D32_FLOAT_S8X24_UINT         = 61,
+   SVGA3D_R32_FLOAT_X8X24_TYPELESS     = 62,
+   SVGA3D_X32_TYPELESS_G8X24_UINT      = 63,
+   SVGA3D_R10G10B10A2_TYPELESS         = 64,
+   SVGA3D_R10G10B10A2_UNORM            = 26,
+   SVGA3D_R10G10B10A2_UINT             = 65,
+   SVGA3D_R11G11B10_FLOAT              = 66,
+   SVGA3D_R8G8B8A8_TYPELESS            = 67,
+   SVGA3D_R8G8B8A8_UNORM               = 68,
+   SVGA3D_R8G8B8A8_UNORM_SRGB          = 69,
+   SVGA3D_R8G8B8A8_UINT                = 70,
+   SVGA3D_R8G8B8A8_SNORM               = 28,
+   SVGA3D_R8G8B8A8_SINT                = 71,
+   SVGA3D_R16G16_TYPELESS              = 72,
+   SVGA3D_R16G16_FLOAT                 = 35,
+   SVGA3D_R16G16_UNORM                 = 40,
+   SVGA3D_R16G16_UINT                  = 73,
+   SVGA3D_R16G16_SNORM                 = 39,
+   SVGA3D_R16G16_SINT                  = 74,
+   SVGA3D_R32_TYPELESS                 = 75,
+   SVGA3D_D32_FLOAT                    = 76,
+   SVGA3D_R32_FLOAT                    = 34,
+   SVGA3D_R32_UINT                     = 77,
+   SVGA3D_R32_SINT                     = 78,
+   SVGA3D_R24G8_TYPELESS               = 79,
+   SVGA3D_D24_UNORM_S8_UINT            = 80,
+   SVGA3D_R24_UNORM_X8_TYPELESS        = 81,
+   SVGA3D_X24_TYPELESS_G8_UINT         = 82,
+   SVGA3D_R8G8_TYPELESS                = 83,
+   SVGA3D_R8G8_UNORM                   = 84,
+   SVGA3D_R8G8_UINT                    = 85,
+   SVGA3D_R8G8_SNORM                   = 27,
+   SVGA3D_R8G8_SINT                    = 86,
+   SVGA3D_R16_TYPELESS                 = 87,
+   SVGA3D_R16_FLOAT                    = 33,
+   SVGA3D_D16_UNORM                    = 8,
+   SVGA3D_R16_UNORM                    = 88,
+   SVGA3D_R16_UINT                     = 89,
+   SVGA3D_R16_SNORM                    = 90,
+   SVGA3D_R16_SINT                     = 91,
+   SVGA3D_R8_TYPELESS                  = 92,
+   SVGA3D_R8_UNORM                     = 93,
+   SVGA3D_R8_UINT                      = 94,
+   SVGA3D_R8_SNORM                     = 95,
+   SVGA3D_R8_SINT                      = 96,
+   SVGA3D_A8_UNORM                     = 32,
+   SVGA3D_R1_UNORM                     = 97,
+   SVGA3D_R9G9B9E5_SHAREDEXP           = 98,
+   SVGA3D_R8G8_B8G8_UNORM              = 99,
+   SVGA3D_G8R8_G8B8_UNORM              = 100,
+   SVGA3D_BC1_TYPELESS                 = 101,
+   SVGA3D_BC1_UNORM                    = 15,
+   SVGA3D_BC1_UNORM_SRGB               = 102,
+   SVGA3D_BC2_TYPELESS                 = 103,
+   SVGA3D_BC2_UNORM                    = 17,
+   SVGA3D_BC2_UNORM_SRGB               = 104,
+   SVGA3D_BC3_TYPELESS                 = 105,
+   SVGA3D_BC3_UNORM                    = 19,
+   SVGA3D_BC3_UNORM_SRGB               = 106,
+   SVGA3D_BC4_TYPELESS                 = 107,
    SVGA3D_BC4_UNORM                    = 108,
+   SVGA3D_BC4_SNORM                    = 109,
+   SVGA3D_BC5_TYPELESS                 = 110,
    SVGA3D_BC5_UNORM                    = 111,
+   SVGA3D_BC5_SNORM                    = 112,
+   SVGA3D_B5G6R5_UNORM                 = 3,
+   SVGA3D_B5G5R5A1_UNORM               = 5,
+   SVGA3D_B8G8R8A8_UNORM               = 2,
+   SVGA3D_B8G8R8X8_UNORM               = 1,
+   SVGA3D_R10G10B10_XR_BIAS_A2_UNORM   = 113,
+   SVGA3D_B8G8R8A8_TYPELESS            = 114,
+   SVGA3D_B8G8R8A8_UNORM_SRGB          = 115,
+   SVGA3D_B8G8R8X8_TYPELESS            = 116,
+   SVGA3D_B8G8R8X8_UNORM_SRGB          = 117,
 
    /* Advanced D3D9 depth formats. */
    SVGA3D_Z_DF16                       = 118,
    SVGA3D_Z_DF24                       = 119,
    SVGA3D_Z_D24S8_INT                  = 120,
 
-   SVGA3D_FORMAT_MAX
+   /* Planar video formats. */
+   SVGA3D_YV12                         = 121,
+
+   /* Shader constant formats. */
+   SVGA3D_SURFACE_SHADERCONST_FLOAT    = 122,
+   SVGA3D_SURFACE_SHADERCONST_INT      = 123,
+   SVGA3D_SURFACE_SHADERCONST_BOOL     = 124,
+
+   SVGA3D_FORMAT_MAX                   = 125,
 } SVGA3dSurfaceFormat;
 
 typedef uint32 SVGA3dColor; /* a, r, g, b */
@@ -957,15 +1056,21 @@ typedef enum {
 } SVGA3dCubeFace;
 
 typedef enum {
+   SVGA3D_SHADERTYPE_INVALID                    = 0,
+   SVGA3D_SHADERTYPE_MIN                        = 1,
    SVGA3D_SHADERTYPE_VS                         = 1,
    SVGA3D_SHADERTYPE_PS                         = 2,
-   SVGA3D_SHADERTYPE_MAX
+   SVGA3D_SHADERTYPE_MAX                        = 3,
+   SVGA3D_SHADERTYPE_GS                         = 3,
 } SVGA3dShaderType;
 
+#define SVGA3D_NUM_SHADERTYPE (SVGA3D_SHADERTYPE_MAX - SVGA3D_SHADERTYPE_MIN)
+
 typedef enum {
    SVGA3D_CONST_TYPE_FLOAT                      = 0,
    SVGA3D_CONST_TYPE_INT                        = 1,
    SVGA3D_CONST_TYPE_BOOL                       = 2,
+   SVGA3D_CONST_TYPE_MAX
 } SVGA3dShaderConstType;
 
 #define SVGA3D_MAX_SURFACE_FACES                6
@@ -1056,9 +1161,74 @@ typedef enum {
 #define SVGA_3D_CMD_GENERATE_MIPMAPS       SVGA_3D_CMD_BASE + 31
 #define SVGA_3D_CMD_ACTIVATE_SURFACE       SVGA_3D_CMD_BASE + 40
 #define SVGA_3D_CMD_DEACTIVATE_SURFACE     SVGA_3D_CMD_BASE + 41
-#define SVGA_3D_CMD_MAX                    SVGA_3D_CMD_BASE + 42
-
-#define SVGA_3D_CMD_FUTURE_MAX             2000
+#define SVGA_3D_CMD_SCREEN_DMA               1082
+#define SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE 1083
+#define SVGA_3D_CMD_OPEN_CONTEXT_SURFACE     1084
+
+#define SVGA_3D_CMD_LOGICOPS_BITBLT          1085
+#define SVGA_3D_CMD_LOGICOPS_TRANSBLT        1086
+#define SVGA_3D_CMD_LOGICOPS_STRETCHBLT      1087
+#define SVGA_3D_CMD_LOGICOPS_COLORFILL       1088
+#define SVGA_3D_CMD_LOGICOPS_ALPHABLEND      1089
+#define SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND  1090
+
+#define SVGA_3D_CMD_SET_OTABLE_BASE          1091
+#define SVGA_3D_CMD_READBACK_OTABLE          1092
+
+#define SVGA_3D_CMD_DEFINE_GB_MOB            1093
+#define SVGA_3D_CMD_DESTROY_GB_MOB           1094
+#define SVGA_3D_CMD_REDEFINE_GB_MOB          1095
+#define SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING    1096
+
+#define SVGA_3D_CMD_DEFINE_GB_SURFACE        1097
+#define SVGA_3D_CMD_DESTROY_GB_SURFACE       1098
+#define SVGA_3D_CMD_BIND_GB_SURFACE          1099
+#define SVGA_3D_CMD_COND_BIND_GB_SURFACE     1100
+#define SVGA_3D_CMD_UPDATE_GB_IMAGE          1101
+#define SVGA_3D_CMD_UPDATE_GB_SURFACE        1102
+#define SVGA_3D_CMD_READBACK_GB_IMAGE        1103
+#define SVGA_3D_CMD_READBACK_GB_SURFACE      1104
+#define SVGA_3D_CMD_INVALIDATE_GB_IMAGE      1105
+#define SVGA_3D_CMD_INVALIDATE_GB_SURFACE    1106
+
+#define SVGA_3D_CMD_DEFINE_GB_CONTEXT        1107
+#define SVGA_3D_CMD_DESTROY_GB_CONTEXT       1108
+#define SVGA_3D_CMD_BIND_GB_CONTEXT          1109
+#define SVGA_3D_CMD_READBACK_GB_CONTEXT      1110
+#define SVGA_3D_CMD_INVALIDATE_GB_CONTEXT    1111
+
+#define SVGA_3D_CMD_DEFINE_GB_SHADER         1112
+#define SVGA_3D_CMD_DESTROY_GB_SHADER        1113
+#define SVGA_3D_CMD_BIND_GB_SHADER           1114
+
+#define SVGA_3D_CMD_SET_OTABLE_BASE64        1115
+
+#define SVGA_3D_CMD_BEGIN_GB_QUERY           1116
+#define SVGA_3D_CMD_END_GB_QUERY             1117
+#define SVGA_3D_CMD_WAIT_FOR_GB_QUERY        1118
+
+#define SVGA_3D_CMD_NOP                      1119
+
+#define SVGA_3D_CMD_ENABLE_GART              1120
+#define SVGA_3D_CMD_DISABLE_GART             1121
+#define SVGA_3D_CMD_MAP_MOB_INTO_GART        1122
+#define SVGA_3D_CMD_UNMAP_GART_RANGE         1123
+
+#define SVGA_3D_CMD_DEFINE_GB_SCREENTARGET   1124
+#define SVGA_3D_CMD_DESTROY_GB_SCREENTARGET  1125
+#define SVGA_3D_CMD_BIND_GB_SCREENTARGET     1126
+#define SVGA_3D_CMD_UPDATE_GB_SCREENTARGET   1127
+
+#define SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL   1128
+#define SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL 1129
+
+#define SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE  1130
+
+#define SVGA_3D_CMD_DEFINE_GB_MOB64          1135
+#define SVGA_3D_CMD_REDEFINE_GB_MOB64        1136
+
+#define SVGA_3D_CMD_MAX                      1142
+#define SVGA_3D_CMD_FUTURE_MAX               3000
 
 /*
  * Common substructures used in multiple FIFO commands:
@@ -1749,6 +1919,495 @@ struct {
 } SVGA3dCmdGenerateMipmaps;             /* SVGA_3D_CMD_GENERATE_MIPMAPS */
 
 
+/*
+ * Guest-backed surface definitions.
+ */
+
+typedef uint32 SVGAMobId;
+
+typedef enum SVGAMobFormat {
+   SVGA3D_MOBFMT_INVALID = SVGA3D_INVALID_ID,
+   SVGA3D_MOBFMT_PTDEPTH_0 = 0,
+   SVGA3D_MOBFMT_PTDEPTH_1 = 1,
+   SVGA3D_MOBFMT_PTDEPTH_2 = 2,
+   SVGA3D_MOBFMT_RANGE     = 3,
+   SVGA3D_MOBFMT_PTDEPTH64_0 = 4,
+   SVGA3D_MOBFMT_PTDEPTH64_1 = 5,
+   SVGA3D_MOBFMT_PTDEPTH64_2 = 6,
+   SVGA3D_MOBFMT_MAX,
+} SVGAMobFormat;
+
+/*
+ * Sizes of opaque types.
+ */
+
+#define SVGA3D_OTABLE_MOB_ENTRY_SIZE 16
+#define SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE 8
+#define SVGA3D_OTABLE_SURFACE_ENTRY_SIZE 64
+#define SVGA3D_OTABLE_SHADER_ENTRY_SIZE 16
+#define SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE 64
+#define SVGA3D_CONTEXT_DATA_SIZE 16384
+
+/*
+ * SVGA3dCmdSetOTableBase --
+ *
+ * This command allows the guest to specify the base PPN of the
+ * specified object table.
+ */
+
+typedef enum {
+   SVGA_OTABLE_MOB           = 0,
+   SVGA_OTABLE_MIN           = 0,
+   SVGA_OTABLE_SURFACE       = 1,
+   SVGA_OTABLE_CONTEXT       = 2,
+   SVGA_OTABLE_SHADER        = 3,
+   SVGA_OTABLE_SCREEN_TARGET = 4,
+   SVGA_OTABLE_DX9_MAX       = 5,
+   SVGA_OTABLE_MAX           = 8
+} SVGAOTableType;
+
+typedef
+struct {
+   SVGAOTableType type;
+   PPN baseAddress;
+   uint32 sizeInBytes;
+   uint32 validSizeInBytes;
+   SVGAMobFormat ptDepth;
+}
+__attribute__((__packed__))
+SVGA3dCmdSetOTableBase;  /* SVGA_3D_CMD_SET_OTABLE_BASE */
+
+typedef
+struct {
+   SVGAOTableType type;
+   PPN64 baseAddress;
+   uint32 sizeInBytes;
+   uint32 validSizeInBytes;
+   SVGAMobFormat ptDepth;
+}
+__attribute__((__packed__))
+SVGA3dCmdSetOTableBase64;  /* SVGA_3D_CMD_SET_OTABLE_BASE64 */
+
+typedef
+struct {
+   SVGAOTableType type;
+}
+__attribute__((__packed__))
+SVGA3dCmdReadbackOTable;  /* SVGA_3D_CMD_READBACK_OTABLE */
+
+/*
+ * Define a memory object (Mob) in the OTable.
+ */
+
+typedef
+struct SVGA3dCmdDefineGBMob {
+   SVGAMobId mobid;
+   SVGAMobFormat ptDepth;
+   PPN base;
+   uint32 sizeInBytes;
+}
+__attribute__((__packed__))
+SVGA3dCmdDefineGBMob;   /* SVGA_3D_CMD_DEFINE_GB_MOB */
+
+
+/*
+ * Destroys an object in the OTable.
+ */
+
+typedef
+struct SVGA3dCmdDestroyGBMob {
+   SVGAMobId mobid;
+}
+__attribute__((__packed__))
+SVGA3dCmdDestroyGBMob;   /* SVGA_3D_CMD_DESTROY_GB_MOB */
+
+/*
+ * Redefine an object in the OTable.
+ */
+
+typedef
+struct SVGA3dCmdRedefineGBMob {
+   SVGAMobId mobid;
+   SVGAMobFormat ptDepth;
+   PPN base;
+   uint32 sizeInBytes;
+}
+__attribute__((__packed__))
+SVGA3dCmdRedefineGBMob;   /* SVGA_3D_CMD_REDEFINE_GB_MOB */
+
+/*
+ * Define a memory object (Mob) in the OTable with a PPN64 base.
+ */
+
+typedef
+struct SVGA3dCmdDefineGBMob64 {
+   SVGAMobId mobid;
+   SVGAMobFormat ptDepth;
+   PPN64 base;
+   uint32 sizeInBytes;
+}
+__attribute__((__packed__))
+SVGA3dCmdDefineGBMob64;   /* SVGA_3D_CMD_DEFINE_GB_MOB64 */
+
+/*
+ * Redefine an object in the OTable with PPN64 base.
+ */
+
+typedef
+struct SVGA3dCmdRedefineGBMob64 {
+   SVGAMobId mobid;
+   SVGAMobFormat ptDepth;
+   PPN64 base;
+   uint32 sizeInBytes;
+}
+__attribute__((__packed__))
+SVGA3dCmdRedefineGBMob64;   /* SVGA_3D_CMD_REDEFINE_GB_MOB64 */
+
+/*
+ * Notification that the page tables have been modified.
+ */
+
+typedef
+struct SVGA3dCmdUpdateGBMobMapping {
+   SVGAMobId mobid;
+}
+__attribute__((__packed__))
+SVGA3dCmdUpdateGBMobMapping;   /* SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING */
+
+/*
+ * Define a guest-backed surface.
+ */
+
+typedef
+struct SVGA3dCmdDefineGBSurface {
+   uint32 sid;
+   SVGA3dSurfaceFlags surfaceFlags;
+   SVGA3dSurfaceFormat format;
+   uint32 numMipLevels;
+   uint32 multisampleCount;
+   SVGA3dTextureFilter autogenFilter;
+   SVGA3dSize size;
+} SVGA3dCmdDefineGBSurface;   /* SVGA_3D_CMD_DEFINE_GB_SURFACE */
+
+/*
+ * Destroy a guest-backed surface.
+ */
+
+typedef
+struct SVGA3dCmdDestroyGBSurface {
+   uint32 sid;
+} SVGA3dCmdDestroyGBSurface;   /* SVGA_3D_CMD_DESTROY_GB_SURFACE */
+
+/*
+ * Bind a guest-backed surface to an object.
+ */
+
+typedef
+struct SVGA3dCmdBindGBSurface {
+   uint32 sid;
+   SVGAMobId mobid;
+} SVGA3dCmdBindGBSurface;   /* SVGA_3D_CMD_BIND_GB_SURFACE */
+
+/*
+ * Conditionally bind a mob to a guest backed surface if testMobid
+ * matches the currently bound mob.  Optionally issue a readback on
+ * the surface while it is still bound to the old mobid if the mobid
+ * is changed by this command.
+ */
+
+#define SVGA3D_COND_BIND_GB_SURFACE_FLAG_READBACK (1 << 0)
+
+typedef
+struct{
+   uint32 sid;
+   SVGAMobId testMobid;
+   SVGAMobId mobid;
+   uint32 flags;
+}
+SVGA3dCmdCondBindGBSurface;          /* SVGA_3D_CMD_COND_BIND_GB_SURFACE */
+
+/*
+ * Update an image in a guest-backed surface.
+ * (Inform the device that the guest-contents have been updated.)
+ */
+
+typedef
+struct SVGA3dCmdUpdateGBImage {
+   SVGA3dSurfaceImageId image;
+   SVGA3dBox box;
+} SVGA3dCmdUpdateGBImage;   /* SVGA_3D_CMD_UPDATE_GB_IMAGE */
+
+/*
+ * Update an entire guest-backed surface.
+ * (Inform the device that the guest-contents have been updated.)
+ */
+
+typedef
+struct SVGA3dCmdUpdateGBSurface {
+   uint32 sid;
+} SVGA3dCmdUpdateGBSurface;   /* SVGA_3D_CMD_UPDATE_GB_SURFACE */
+
+/*
+ * Readback an image in a guest-backed surface.
+ * (Request the device to flush the dirty contents into the guest.)
+ */
+
+typedef
+struct SVGA3dCmdReadbackGBImage {
+   SVGA3dSurfaceImageId image;
+} SVGA3dCmdReadbackGBImage;   /* SVGA_3D_CMD_READBACK_GB_IMAGE*/
+
+/*
+ * Readback an entire guest-backed surface.
+ * (Request the device to flush the dirty contents into the guest.)
+ */
+
+typedef
+struct SVGA3dCmdReadbackGBSurface {
+   uint32 sid;
+} SVGA3dCmdReadbackGBSurface;   /* SVGA_3D_CMD_READBACK_GB_SURFACE */
+
+/*
+ * Readback a sub rect of an image in a guest-backed surface.  After
+ * issuing this command the driver is required to issue an update call
+ * of the same region before issuing any other commands that reference
+ * this surface or rendering is not guaranteed.
+ */
+
+typedef
+struct SVGA3dCmdReadbackGBImagePartial {
+   SVGA3dSurfaceImageId image;
+   SVGA3dBox box;
+   uint32 invertBox;
+}
+SVGA3dCmdReadbackGBImagePartial; /* SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL */
+
+/*
+ * Invalidate an image in a guest-backed surface.
+ * (Notify the device that the contents can be lost.)
+ */
+
+typedef
+struct SVGA3dCmdInvalidateGBImage {
+   SVGA3dSurfaceImageId image;
+} SVGA3dCmdInvalidateGBImage;   /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE */
+
+/*
+ * Invalidate an entire guest-backed surface.
+ * (Notify the device that the contents if all images can be lost.)
+ */
+
+typedef
+struct SVGA3dCmdInvalidateGBSurface {
+   uint32 sid;
+} SVGA3dCmdInvalidateGBSurface; /* SVGA_3D_CMD_INVALIDATE_GB_SURFACE */
+
+/*
+ * Invalidate a sub rect of an image in a guest-backed surface.  After
+ * issuing this command the driver is required to issue an update call
+ * of the same region before issuing any other commands that reference
+ * this surface or rendering is not guaranteed.
+ */
+
+typedef
+struct SVGA3dCmdInvalidateGBImagePartial {
+   SVGA3dSurfaceImageId image;
+   SVGA3dBox box;
+   uint32 invertBox;
+}
+SVGA3dCmdInvalidateGBImagePartial; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL */
+
+/*
+ * Define a guest-backed context.
+ */
+
+typedef
+struct SVGA3dCmdDefineGBContext {
+   uint32 cid;
+} SVGA3dCmdDefineGBContext;   /* SVGA_3D_CMD_DEFINE_GB_CONTEXT */
+
+/*
+ * Destroy a guest-backed context.
+ */
+
+typedef
+struct SVGA3dCmdDestroyGBContext {
+   uint32 cid;
+} SVGA3dCmdDestroyGBContext;   /* SVGA_3D_CMD_DESTROY_GB_CONTEXT */
+
+/*
+ * Bind a guest-backed context.
+ *
+ * validContents should be set to 0 for new contexts,
+ * and 1 if this is an old context which is getting paged
+ * back on to the device.
+ *
+ * For new contexts, it is recommended that the driver
+ * issue commands to initialize all interesting state
+ * prior to rendering.
+ */
+
+typedef
+struct SVGA3dCmdBindGBContext {
+   uint32 cid;
+   SVGAMobId mobid;
+   uint32 validContents;
+} SVGA3dCmdBindGBContext;   /* SVGA_3D_CMD_BIND_GB_CONTEXT */
+
+/*
+ * Readback a guest-backed context.
+ * (Request that the device flush the contents back into guest memory.)
+ */
+
+typedef
+struct SVGA3dCmdReadbackGBContext {
+   uint32 cid;
+} SVGA3dCmdReadbackGBContext;   /* SVGA_3D_CMD_READBACK_GB_CONTEXT */
+
+/*
+ * Invalidate a guest-backed context.
+ */
+typedef
+struct SVGA3dCmdInvalidateGBContext {
+   uint32 cid;
+} SVGA3dCmdInvalidateGBContext;   /* SVGA_3D_CMD_INVALIDATE_GB_CONTEXT */
+
+/*
+ * Define a guest-backed shader.
+ */
+
+typedef
+struct SVGA3dCmdDefineGBShader {
+   uint32 shid;
+   SVGA3dShaderType type;
+   uint32 sizeInBytes;
+} SVGA3dCmdDefineGBShader;   /* SVGA_3D_CMD_DEFINE_GB_SHADER */
+
+/*
+ * Bind a guest-backed shader.
+ */
+
+typedef struct SVGA3dCmdBindGBShader {
+   uint32 shid;
+   SVGAMobId mobid;
+   uint32 offsetInBytes;
+} SVGA3dCmdBindGBShader;   /* SVGA_3D_CMD_BIND_GB_SHADER */
+
+/*
+ * Destroy a guest-backed shader.
+ */
+
+typedef struct SVGA3dCmdDestroyGBShader {
+   uint32 shid;
+} SVGA3dCmdDestroyGBShader;   /* SVGA_3D_CMD_DESTROY_GB_SHADER */
+
+typedef
+struct {
+   uint32                  cid;
+   uint32                  regStart;
+   SVGA3dShaderType        shaderType;
+   SVGA3dShaderConstType   constType;
+
+   /*
+    * Followed by a variable number of shader constants.
+    *
+    * Note that FLOAT and INT constants are 4-dwords in length, while
+    * BOOL constants are 1-dword in length.
+    */
+} SVGA3dCmdSetGBShaderConstInline;
+/* SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE */
+
+typedef
+struct {
+   uint32               cid;
+   SVGA3dQueryType      type;
+} SVGA3dCmdBeginGBQuery;           /* SVGA_3D_CMD_BEGIN_GB_QUERY */
+
+typedef
+struct {
+   uint32               cid;
+   SVGA3dQueryType      type;
+   SVGAMobId mobid;
+   uint32 offset;
+} SVGA3dCmdEndGBQuery;                  /* SVGA_3D_CMD_END_GB_QUERY */
+
+
+/*
+ * SVGA_3D_CMD_WAIT_FOR_GB_QUERY --
+ *
+ *    The semantics of this command are identical to the
+ *    SVGA_3D_CMD_WAIT_FOR_QUERY except that the results are written
+ *    to a Mob instead of a GMR.
+ */
+
+typedef
+struct {
+   uint32               cid;
+   SVGA3dQueryType      type;
+   SVGAMobId mobid;
+   uint32 offset;
+} SVGA3dCmdWaitForGBQuery;          /* SVGA_3D_CMD_WAIT_FOR_GB_QUERY */
+
+typedef
+struct {
+   SVGAMobId mobid;
+   uint32 fbOffset;
+   uint32 initalized;
+}
+SVGA3dCmdEnableGart;              /* SVGA_3D_CMD_ENABLE_GART */
+
+typedef
+struct {
+   SVGAMobId mobid;
+   uint32 gartOffset;
+}
+SVGA3dCmdMapMobIntoGart;          /* SVGA_3D_CMD_MAP_MOB_INTO_GART */
+
+
+typedef
+struct {
+   uint32 gartOffset;
+   uint32 numPages;
+}
+SVGA3dCmdUnmapGartRange;          /* SVGA_3D_CMD_UNMAP_GART_RANGE */
+
+
+/*
+ * Screen Targets
+ */
+#define SVGA_STFLAG_PRIMARY (1 << 0)
+
+typedef
+struct {
+   uint32 stid;
+   uint32 width;
+   uint32 height;
+   int32 xRoot;
+   int32 yRoot;
+   uint32 flags;
+}
+SVGA3dCmdDefineGBScreenTarget;    /* SVGA_3D_CMD_DEFINE_GB_SCREENTARGET */
+
+typedef
+struct {
+   uint32 stid;
+}
+SVGA3dCmdDestroyGBScreenTarget;  /* SVGA_3D_CMD_DESTROY_GB_SCREENTARGET */
+
+typedef
+struct {
+   uint32 stid;
+   SVGA3dSurfaceImageId image;
+}
+SVGA3dCmdBindGBScreenTarget;  /* SVGA_3D_CMD_BIND_GB_SCREENTARGET */
+
+typedef
+struct {
+   uint32 stid;
+   SVGA3dBox box;
+}
+SVGA3dCmdUpdateGBScreenTarget;  /* SVGA_3D_CMD_UPDATE_GB_SCREENTARGET */
+
 /*
  * Capability query index.
  *
@@ -1879,10 +2538,41 @@ typedef enum {
    SVGA3D_DEVCAP_SURFACEFMT_BC5_UNORM              = 83,
 
    /*
-    * Don't add new caps into the previous section; the values in this
-    * enumeration must not change. You can put new values right before
-    * SVGA3D_DEVCAP_MAX.
+    * Deprecated.
     */
+   SVGA3D_DEVCAP_VGPU10                            = 84,
+
+   /*
+    * This contains several SVGA_3D_CAPS_VIDEO_DECODE elements
+    * ored together, one for every type of video decoding supported.
+    */
+   SVGA3D_DEVCAP_VIDEO_DECODE                      = 85,
+
+   /*
+    * This contains several SVGA_3D_CAPS_VIDEO_PROCESS elements
+    * ored together, one for every type of video processing supported.
+    */
+   SVGA3D_DEVCAP_VIDEO_PROCESS                     = 86,
+
+   SVGA3D_DEVCAP_LINE_AA                           = 87,  /* boolean */
+   SVGA3D_DEVCAP_LINE_STIPPLE                      = 88,  /* boolean */
+   SVGA3D_DEVCAP_MAX_LINE_WIDTH                    = 89,  /* float */
+   SVGA3D_DEVCAP_MAX_AA_LINE_WIDTH                 = 90,  /* float */
+
+   SVGA3D_DEVCAP_SURFACEFMT_YV12                   = 91,
+
+   /*
+    * Does the host support the SVGA logic ops commands?
+    */
+   SVGA3D_DEVCAP_LOGICOPS                          = 92,
+
+   /*
+    * What support does the host have for screen targets?
+    *
+    * See the SVGA3D_SCREENTARGET_CAP bits below.
+    */
+   SVGA3D_DEVCAP_SCREENTARGETS                     = 93,
+
    SVGA3D_DEVCAP_MAX                                  /* This must be the last index. */
 } SVGA3dDevCapIndex;
 
index 01f63cb49678f6971e5c704bbb67c77cc51df4ed..71defa4d2d7528a247b8e985a7b14cd1197e2a58 100644 (file)
@@ -169,7 +169,10 @@ enum {
    SVGA_REG_TRACES = 45,            /* Enable trace-based updates even when FIFO is on */
    SVGA_REG_GMRS_MAX_PAGES = 46,    /* Maximum number of 4KB pages for all GMRs */
    SVGA_REG_MEMORY_SIZE = 47,       /* Total dedicated device memory excluding FIFO */
-   SVGA_REG_TOP = 48,               /* Must be 1 more than the last register */
+   SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM = 50,   /* Max primary memory */
+   SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB = 51, /* Suggested limit on mob mem */
+   SVGA_REG_DEV_CAP = 52,           /* Write dev cap index, read value */
+   SVGA_REG_TOP = 53,               /* Must be 1 more than the last register */
 
    SVGA_PALETTE_BASE = 1024,        /* Base of SVGA color map */
    /* Next 768 (== 256*3) registers exist for colormap */
@@ -431,7 +434,10 @@ struct SVGASignedPoint {
 #define SVGA_CAP_TRACES             0x00200000
 #define SVGA_CAP_GMR2               0x00400000
 #define SVGA_CAP_SCREEN_OBJECT_2    0x00800000
-
+#define SVGA_CAP_COMMAND_BUFFERS    0x01000000
+#define SVGA_CAP_DEAD1              0x02000000
+#define SVGA_CAP_CMD_BUFFERS_2      0x04000000
+#define SVGA_CAP_GBOBJECTS          0x08000000
 
 /*
  * FIFO register indices.
index 0489c61524826f2e2b673628783393639f0c591e..6327cfc36805f46364402a004ce76cd9bb19ecd5 100644 (file)
@@ -40,6 +40,10 @@ static uint32_t vram_ne_placement_flags = TTM_PL_FLAG_VRAM |
 static uint32_t sys_placement_flags = TTM_PL_FLAG_SYSTEM |
        TTM_PL_FLAG_CACHED;
 
+static uint32_t sys_ne_placement_flags = TTM_PL_FLAG_SYSTEM |
+       TTM_PL_FLAG_CACHED |
+       TTM_PL_FLAG_NO_EVICT;
+
 static uint32_t gmr_placement_flags = VMW_PL_FLAG_GMR |
        TTM_PL_FLAG_CACHED;
 
@@ -47,6 +51,9 @@ static uint32_t gmr_ne_placement_flags = VMW_PL_FLAG_GMR |
        TTM_PL_FLAG_CACHED |
        TTM_PL_FLAG_NO_EVICT;
 
+static uint32_t mob_placement_flags = VMW_PL_FLAG_MOB |
+       TTM_PL_FLAG_CACHED;
+
 struct ttm_placement vmw_vram_placement = {
        .fpfn = 0,
        .lpfn = 0,
@@ -116,16 +123,26 @@ struct ttm_placement vmw_sys_placement = {
        .busy_placement = &sys_placement_flags
 };
 
+struct ttm_placement vmw_sys_ne_placement = {
+       .fpfn = 0,
+       .lpfn = 0,
+       .num_placement = 1,
+       .placement = &sys_ne_placement_flags,
+       .num_busy_placement = 1,
+       .busy_placement = &sys_ne_placement_flags
+};
+
 static uint32_t evictable_placement_flags[] = {
        TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED,
        TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED,
-       VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
+       VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED,
+       VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
 };
 
 struct ttm_placement vmw_evictable_placement = {
        .fpfn = 0,
        .lpfn = 0,
-       .num_placement = 3,
+       .num_placement = 4,
        .placement = evictable_placement_flags,
        .num_busy_placement = 1,
        .busy_placement = &sys_placement_flags
@@ -140,10 +157,21 @@ struct ttm_placement vmw_srf_placement = {
        .busy_placement = gmr_vram_placement_flags
 };
 
+struct ttm_placement vmw_mob_placement = {
+       .fpfn = 0,
+       .lpfn = 0,
+       .num_placement = 1,
+       .num_busy_placement = 1,
+       .placement = &mob_placement_flags,
+       .busy_placement = &mob_placement_flags
+};
+
 struct vmw_ttm_tt {
        struct ttm_dma_tt dma_ttm;
        struct vmw_private *dev_priv;
        int gmr_id;
+       struct vmw_mob *mob;
+       int mem_type;
        struct sg_table sgt;
        struct vmw_sg_table vsgt;
        uint64_t sg_alloc_size;
@@ -244,6 +272,7 @@ void vmw_piter_start(struct vmw_piter *viter, const struct vmw_sg_table *vsgt,
                viter->dma_address = &__vmw_piter_dma_addr;
                viter->page = &__vmw_piter_non_sg_page;
                viter->addrs = vsgt->addrs;
+               viter->pages = vsgt->pages;
                break;
        case vmw_dma_map_populate:
        case vmw_dma_map_bind:
@@ -424,6 +453,63 @@ static void vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt)
        vmw_tt->mapped = false;
 }
 
+
+/**
+ * vmw_bo_map_dma - Make sure buffer object pages are visible to the device
+ *
+ * @bo: Pointer to a struct ttm_buffer_object
+ *
+ * Wrapper around vmw_ttm_map_dma, that takes a TTM buffer object pointer
+ * instead of a pointer to a struct vmw_ttm_backend as argument.
+ * Note that the buffer object must be either pinned or reserved before
+ * calling this function.
+ */
+int vmw_bo_map_dma(struct ttm_buffer_object *bo)
+{
+       struct vmw_ttm_tt *vmw_tt =
+               container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
+
+       return vmw_ttm_map_dma(vmw_tt);
+}
+
+
+/**
+ * vmw_bo_unmap_dma - Make sure buffer object pages are visible to the device
+ *
+ * @bo: Pointer to a struct ttm_buffer_object
+ *
+ * Wrapper around vmw_ttm_unmap_dma, that takes a TTM buffer object pointer
+ * instead of a pointer to a struct vmw_ttm_backend as argument.
+ */
+void vmw_bo_unmap_dma(struct ttm_buffer_object *bo)
+{
+       struct vmw_ttm_tt *vmw_tt =
+               container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
+
+       vmw_ttm_unmap_dma(vmw_tt);
+}
+
+
+/**
+ * vmw_bo_sg_table - Return a struct vmw_sg_table object for a
+ * TTM buffer object
+ *
+ * @bo: Pointer to a struct ttm_buffer_object
+ *
+ * Returns a pointer to a struct vmw_sg_table object. The object should
+ * not be freed after use.
+ * Note that for the device addresses to be valid, the buffer object must
+ * either be reserved or pinned.
+ */
+const struct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo)
+{
+       struct vmw_ttm_tt *vmw_tt =
+               container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
+
+       return &vmw_tt->vsgt;
+}
+
+
 static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
 {
        struct vmw_ttm_tt *vmw_be =
@@ -435,9 +521,27 @@ static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
                return ret;
 
        vmw_be->gmr_id = bo_mem->start;
+       vmw_be->mem_type = bo_mem->mem_type;
+
+       switch (bo_mem->mem_type) {
+       case VMW_PL_GMR:
+               return vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt,
+                                   ttm->num_pages, vmw_be->gmr_id);
+       case VMW_PL_MOB:
+               if (unlikely(vmw_be->mob == NULL)) {
+                       vmw_be->mob =
+                               vmw_mob_create(ttm->num_pages);
+                       if (unlikely(vmw_be->mob == NULL))
+                               return -ENOMEM;
+               }
 
-       return vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt,
-                           ttm->num_pages, vmw_be->gmr_id);
+               return vmw_mob_bind(vmw_be->dev_priv, vmw_be->mob,
+                                   &vmw_be->vsgt, ttm->num_pages,
+                                   vmw_be->gmr_id);
+       default:
+               BUG();
+       }
+       return 0;
 }
 
 static int vmw_ttm_unbind(struct ttm_tt *ttm)
@@ -445,7 +549,16 @@ static int vmw_ttm_unbind(struct ttm_tt *ttm)
        struct vmw_ttm_tt *vmw_be =
                container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
 
-       vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
+       switch (vmw_be->mem_type) {
+       case VMW_PL_GMR:
+               vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
+               break;
+       case VMW_PL_MOB:
+               vmw_mob_unbind(vmw_be->dev_priv, vmw_be->mob);
+               break;
+       default:
+               BUG();
+       }
 
        if (vmw_be->dev_priv->map_mode == vmw_dma_map_bind)
                vmw_ttm_unmap_dma(vmw_be);
@@ -453,6 +566,7 @@ static int vmw_ttm_unbind(struct ttm_tt *ttm)
        return 0;
 }
 
+
 static void vmw_ttm_destroy(struct ttm_tt *ttm)
 {
        struct vmw_ttm_tt *vmw_be =
@@ -463,9 +577,14 @@ static void vmw_ttm_destroy(struct ttm_tt *ttm)
                ttm_dma_tt_fini(&vmw_be->dma_ttm);
        else
                ttm_tt_fini(ttm);
+
+       if (vmw_be->mob)
+               vmw_mob_destroy(vmw_be->mob);
+
        kfree(vmw_be);
 }
 
+
 static int vmw_ttm_populate(struct ttm_tt *ttm)
 {
        struct vmw_ttm_tt *vmw_tt =
@@ -500,6 +619,12 @@ static void vmw_ttm_unpopulate(struct ttm_tt *ttm)
        struct vmw_private *dev_priv = vmw_tt->dev_priv;
        struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
 
+
+       if (vmw_tt->mob) {
+               vmw_mob_destroy(vmw_tt->mob);
+               vmw_tt->mob = NULL;
+       }
+
        vmw_ttm_unmap_dma(vmw_tt);
        if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
                size_t size =
@@ -517,7 +642,7 @@ static struct ttm_backend_func vmw_ttm_func = {
        .destroy = vmw_ttm_destroy,
 };
 
-struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev,
+static struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev,
                                 unsigned long size, uint32_t page_flags,
                                 struct page *dummy_read_page)
 {
@@ -530,6 +655,7 @@ struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev,
 
        vmw_be->dma_ttm.ttm.func = &vmw_ttm_func;
        vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev);
+       vmw_be->mob = NULL;
 
        if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
                ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bdev, size, page_flags,
@@ -546,12 +672,12 @@ out_no_init:
        return NULL;
 }
 
-int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
+static int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
 {
        return 0;
 }
 
-int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
+static int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
                      struct ttm_mem_type_manager *man)
 {
        switch (type) {
@@ -571,6 +697,7 @@ int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
                man->default_caching = TTM_PL_FLAG_CACHED;
                break;
        case VMW_PL_GMR:
+       case VMW_PL_MOB:
                /*
                 * "Guest Memory Regions" is an aperture like feature with
                 *  one slot per bo. There is an upper limit of the number of
@@ -589,7 +716,7 @@ int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
        return 0;
 }
 
-void vmw_evict_flags(struct ttm_buffer_object *bo,
+static void vmw_evict_flags(struct ttm_buffer_object *bo,
                     struct ttm_placement *placement)
 {
        *placement = vmw_sys_placement;
@@ -618,6 +745,7 @@ static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg
        switch (mem->mem_type) {
        case TTM_PL_SYSTEM:
        case VMW_PL_GMR:
+       case VMW_PL_MOB:
                return 0;
        case TTM_PL_VRAM:
                mem->bus.offset = mem->start << PAGE_SHIFT;
@@ -677,6 +805,38 @@ static int vmw_sync_obj_wait(void *sync_obj, bool lazy, bool interruptible)
                                  VMW_FENCE_WAIT_TIMEOUT);
 }
 
+/**
+ * vmw_move_notify - TTM move_notify_callback
+ *
+ * @bo:             The TTM buffer object about to move.
+ * @mem:            The truct ttm_mem_reg indicating to what memory
+ *                  region the move is taking place.
+ *
+ * Calls move_notify for all subsystems needing it.
+ * (currently only resources).
+ */
+static void vmw_move_notify(struct ttm_buffer_object *bo,
+                           struct ttm_mem_reg *mem)
+{
+       vmw_resource_move_notify(bo, mem);
+}
+
+
+/**
+ * vmw_swap_notify - TTM move_notify_callback
+ *
+ * @bo:             The TTM buffer object about to be swapped out.
+ */
+static void vmw_swap_notify(struct ttm_buffer_object *bo)
+{
+       struct ttm_bo_device *bdev = bo->bdev;
+
+       spin_lock(&bdev->fence_lock);
+       ttm_bo_wait(bo, false, false, false);
+       spin_unlock(&bdev->fence_lock);
+}
+
+
 struct ttm_bo_driver vmw_bo_driver = {
        .ttm_tt_create = &vmw_ttm_tt_create,
        .ttm_tt_populate = &vmw_ttm_populate,
@@ -691,8 +851,8 @@ struct ttm_bo_driver vmw_bo_driver = {
        .sync_obj_flush = vmw_sync_obj_flush,
        .sync_obj_unref = vmw_sync_obj_unref,
        .sync_obj_ref = vmw_sync_obj_ref,
-       .move_notify = NULL,
-       .swap_notify = NULL,
+       .move_notify = vmw_move_notify,
+       .swap_notify = vmw_swap_notify,
        .fault_reserve_notify = &vmw_ttm_fault_reserve_notify,
        .io_mem_reserve = &vmw_ttm_io_mem_reserve,
        .io_mem_free = &vmw_ttm_io_mem_free,
index 00ae0925aca87e17994288743e079852c0b14dc2..82c41daebc0e35be6fb3629b602a083a6520f6a3 100644 (file)
 struct vmw_user_context {
        struct ttm_base_object base;
        struct vmw_resource res;
+       struct vmw_ctx_binding_state cbs;
 };
 
+
+
+typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *);
+
 static void vmw_user_context_free(struct vmw_resource *res);
 static struct vmw_resource *
 vmw_user_context_base_to_res(struct ttm_base_object *base);
 
+static int vmw_gb_context_create(struct vmw_resource *res);
+static int vmw_gb_context_bind(struct vmw_resource *res,
+                              struct ttm_validate_buffer *val_buf);
+static int vmw_gb_context_unbind(struct vmw_resource *res,
+                                bool readback,
+                                struct ttm_validate_buffer *val_buf);
+static int vmw_gb_context_destroy(struct vmw_resource *res);
+static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi);
+static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi);
+static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi);
+static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs);
 static uint64_t vmw_user_context_size;
 
 static const struct vmw_user_resource_conv user_context_conv = {
@@ -62,6 +78,23 @@ static const struct vmw_res_func vmw_legacy_context_func = {
        .unbind = NULL
 };
 
+static const struct vmw_res_func vmw_gb_context_func = {
+       .res_type = vmw_res_context,
+       .needs_backup = true,
+       .may_evict = true,
+       .type_name = "guest backed contexts",
+       .backup_placement = &vmw_mob_placement,
+       .create = vmw_gb_context_create,
+       .destroy = vmw_gb_context_destroy,
+       .bind = vmw_gb_context_bind,
+       .unbind = vmw_gb_context_unbind
+};
+
+static const vmw_scrub_func vmw_scrub_funcs[vmw_ctx_binding_max] = {
+       [vmw_ctx_binding_shader] = vmw_context_scrub_shader,
+       [vmw_ctx_binding_rt] = vmw_context_scrub_render_target,
+       [vmw_ctx_binding_tex] = vmw_context_scrub_texture };
+
 /**
  * Context management:
  */
@@ -76,6 +109,16 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
        } *cmd;
 
 
+       if (res->func->destroy == vmw_gb_context_destroy) {
+               mutex_lock(&dev_priv->cmdbuf_mutex);
+               (void) vmw_gb_context_destroy(res);
+               if (dev_priv->pinned_bo != NULL &&
+                   !dev_priv->query_cid_valid)
+                       __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
+               mutex_unlock(&dev_priv->cmdbuf_mutex);
+               return;
+       }
+
        vmw_execbuf_release_pinned_bo(dev_priv);
        cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
        if (unlikely(cmd == NULL)) {
@@ -92,6 +135,33 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
        vmw_3d_resource_dec(dev_priv, false);
 }
 
+static int vmw_gb_context_init(struct vmw_private *dev_priv,
+                              struct vmw_resource *res,
+                              void (*res_free) (struct vmw_resource *res))
+{
+       int ret;
+       struct vmw_user_context *uctx =
+               container_of(res, struct vmw_user_context, res);
+
+       ret = vmw_resource_init(dev_priv, res, true,
+                               res_free, &vmw_gb_context_func);
+       res->backup_size = SVGA3D_CONTEXT_DATA_SIZE;
+
+       if (unlikely(ret != 0)) {
+               if (res_free)
+                       res_free(res);
+               else
+                       kfree(res);
+               return ret;
+       }
+
+       memset(&uctx->cbs, 0, sizeof(uctx->cbs));
+       INIT_LIST_HEAD(&uctx->cbs.list);
+
+       vmw_resource_activate(res, vmw_hw_context_destroy);
+       return 0;
+}
+
 static int vmw_context_init(struct vmw_private *dev_priv,
                            struct vmw_resource *res,
                            void (*res_free) (struct vmw_resource *res))
@@ -103,6 +173,9 @@ static int vmw_context_init(struct vmw_private *dev_priv,
                SVGA3dCmdDefineContext body;
        } *cmd;
 
+       if (dev_priv->has_mob)
+               return vmw_gb_context_init(dev_priv, res, res_free);
+
        ret = vmw_resource_init(dev_priv, res, false,
                                res_free, &vmw_legacy_context_func);
 
@@ -154,6 +227,184 @@ struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
        return (ret == 0) ? res : NULL;
 }
 
+
+static int vmw_gb_context_create(struct vmw_resource *res)
+{
+       struct vmw_private *dev_priv = res->dev_priv;
+       int ret;
+       struct {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdDefineGBContext body;
+       } *cmd;
+
+       if (likely(res->id != -1))
+               return 0;
+
+       ret = vmw_resource_alloc_id(res);
+       if (unlikely(ret != 0)) {
+               DRM_ERROR("Failed to allocate a context id.\n");
+               goto out_no_id;
+       }
+
+       if (unlikely(res->id >= VMWGFX_NUM_GB_CONTEXT)) {
+               ret = -EBUSY;
+               goto out_no_fifo;
+       }
+
+       cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+       if (unlikely(cmd == NULL)) {
+               DRM_ERROR("Failed reserving FIFO space for context "
+                         "creation.\n");
+               ret = -ENOMEM;
+               goto out_no_fifo;
+       }
+
+       cmd->header.id = SVGA_3D_CMD_DEFINE_GB_CONTEXT;
+       cmd->header.size = sizeof(cmd->body);
+       cmd->body.cid = res->id;
+       vmw_fifo_commit(dev_priv, sizeof(*cmd));
+       (void) vmw_3d_resource_inc(dev_priv, false);
+
+       return 0;
+
+out_no_fifo:
+       vmw_resource_release_id(res);
+out_no_id:
+       return ret;
+}
+
+static int vmw_gb_context_bind(struct vmw_resource *res,
+                              struct ttm_validate_buffer *val_buf)
+{
+       struct vmw_private *dev_priv = res->dev_priv;
+       struct {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdBindGBContext body;
+       } *cmd;
+       struct ttm_buffer_object *bo = val_buf->bo;
+
+       BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+
+       cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+       if (unlikely(cmd == NULL)) {
+               DRM_ERROR("Failed reserving FIFO space for context "
+                         "binding.\n");
+               return -ENOMEM;
+       }
+
+       cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
+       cmd->header.size = sizeof(cmd->body);
+       cmd->body.cid = res->id;
+       cmd->body.mobid = bo->mem.start;
+       cmd->body.validContents = res->backup_dirty;
+       res->backup_dirty = false;
+       vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+       return 0;
+}
+
+static int vmw_gb_context_unbind(struct vmw_resource *res,
+                                bool readback,
+                                struct ttm_validate_buffer *val_buf)
+{
+       struct vmw_private *dev_priv = res->dev_priv;
+       struct ttm_buffer_object *bo = val_buf->bo;
+       struct vmw_fence_obj *fence;
+       struct vmw_user_context *uctx =
+               container_of(res, struct vmw_user_context, res);
+
+       struct {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdReadbackGBContext body;
+       } *cmd1;
+       struct {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdBindGBContext body;
+       } *cmd2;
+       uint32_t submit_size;
+       uint8_t *cmd;
+
+
+       BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+
+       mutex_lock(&dev_priv->binding_mutex);
+       vmw_context_binding_state_kill(&uctx->cbs);
+
+       submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
+
+       cmd = vmw_fifo_reserve(dev_priv, submit_size);
+       if (unlikely(cmd == NULL)) {
+               DRM_ERROR("Failed reserving FIFO space for context "
+                         "unbinding.\n");
+               mutex_unlock(&dev_priv->binding_mutex);
+               return -ENOMEM;
+       }
+
+       cmd2 = (void *) cmd;
+       if (readback) {
+               cmd1 = (void *) cmd;
+               cmd1->header.id = SVGA_3D_CMD_READBACK_GB_CONTEXT;
+               cmd1->header.size = sizeof(cmd1->body);
+               cmd1->body.cid = res->id;
+               cmd2 = (void *) (&cmd1[1]);
+       }
+       cmd2->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
+       cmd2->header.size = sizeof(cmd2->body);
+       cmd2->body.cid = res->id;
+       cmd2->body.mobid = SVGA3D_INVALID_ID;
+
+       vmw_fifo_commit(dev_priv, submit_size);
+       mutex_unlock(&dev_priv->binding_mutex);
+
+       /*
+        * Create a fence object and fence the backup buffer.
+        */
+
+       (void) vmw_execbuf_fence_commands(NULL, dev_priv,
+                                         &fence, NULL);
+
+       vmw_fence_single_bo(bo, fence);
+
+       if (likely(fence != NULL))
+               vmw_fence_obj_unreference(&fence);
+
+       return 0;
+}
+
+static int vmw_gb_context_destroy(struct vmw_resource *res)
+{
+       struct vmw_private *dev_priv = res->dev_priv;
+       struct {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdDestroyGBContext body;
+       } *cmd;
+       struct vmw_user_context *uctx =
+               container_of(res, struct vmw_user_context, res);
+
+       BUG_ON(!list_empty(&uctx->cbs.list));
+
+       if (likely(res->id == -1))
+               return 0;
+
+       cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+       if (unlikely(cmd == NULL)) {
+               DRM_ERROR("Failed reserving FIFO space for context "
+                         "destruction.\n");
+               return -ENOMEM;
+       }
+
+       cmd->header.id = SVGA_3D_CMD_DESTROY_GB_CONTEXT;
+       cmd->header.size = sizeof(cmd->body);
+       cmd->body.cid = res->id;
+       vmw_fifo_commit(dev_priv, sizeof(*cmd));
+       if (dev_priv->query_cid == res->id)
+               dev_priv->query_cid_valid = false;
+       vmw_resource_release_id(res);
+       vmw_3d_resource_dec(dev_priv, false);
+
+       return 0;
+}
+
 /**
  * User-space context management:
  */
@@ -272,3 +523,283 @@ out_unlock:
        return ret;
 
 }
+
+/**
+ * vmw_context_scrub_shader - scrub a shader binding from a context.
+ *
+ * @bi: single binding information.
+ */
+static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi)
+{
+       struct vmw_private *dev_priv = bi->ctx->dev_priv;
+       struct {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdSetShader body;
+       } *cmd;
+
+       cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+       if (unlikely(cmd == NULL)) {
+               DRM_ERROR("Failed reserving FIFO space for shader "
+                         "unbinding.\n");
+               return -ENOMEM;
+       }
+
+       cmd->header.id = SVGA_3D_CMD_SET_SHADER;
+       cmd->header.size = sizeof(cmd->body);
+       cmd->body.cid = bi->ctx->id;
+       cmd->body.type = bi->i1.shader_type;
+       cmd->body.shid = SVGA3D_INVALID_ID;
+       vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+       return 0;
+}
+
+/**
+ * vmw_context_scrub_render_target - scrub a render target binding
+ * from a context.
+ *
+ * @bi: single binding information.
+ */
+static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi)
+{
+       struct vmw_private *dev_priv = bi->ctx->dev_priv;
+       struct {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdSetRenderTarget body;
+       } *cmd;
+
+       cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+       if (unlikely(cmd == NULL)) {
+               DRM_ERROR("Failed reserving FIFO space for render target "
+                         "unbinding.\n");
+               return -ENOMEM;
+       }
+
+       cmd->header.id = SVGA_3D_CMD_SETRENDERTARGET;
+       cmd->header.size = sizeof(cmd->body);
+       cmd->body.cid = bi->ctx->id;
+       cmd->body.type = bi->i1.rt_type;
+       cmd->body.target.sid = SVGA3D_INVALID_ID;
+       cmd->body.target.face = 0;
+       cmd->body.target.mipmap = 0;
+       vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+       return 0;
+}
+
+/**
+ * vmw_context_scrub_texture - scrub a texture binding from a context.
+ *
+ * @bi: single binding information.
+ *
+ * TODO: Possibly complement this function with a function that takes
+ * a list of texture bindings and combines them to a single command.
+ */
+static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi)
+{
+       struct vmw_private *dev_priv = bi->ctx->dev_priv;
+       struct {
+               SVGA3dCmdHeader header;
+               struct {
+                       SVGA3dCmdSetTextureState c;
+                       SVGA3dTextureState s1;
+               } body;
+       } *cmd;
+
+       cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+       if (unlikely(cmd == NULL)) {
+               DRM_ERROR("Failed reserving FIFO space for texture "
+                         "unbinding.\n");
+               return -ENOMEM;
+       }
+
+
+       cmd->header.id = SVGA_3D_CMD_SETTEXTURESTATE;
+       cmd->header.size = sizeof(cmd->body);
+       cmd->body.c.cid = bi->ctx->id;
+       cmd->body.s1.stage = bi->i1.texture_stage;
+       cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE;
+       cmd->body.s1.value = (uint32) SVGA3D_INVALID_ID;
+       vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+       return 0;
+}
+
+/**
+ * vmw_context_binding_drop: Stop tracking a context binding
+ *
+ * @cb: Pointer to binding tracker storage.
+ *
+ * Stops tracking a context binding, and re-initializes its storage.
+ * Typically used when the context binding is replaced with a binding to
+ * another (or the same, for that matter) resource.
+ */
+static void vmw_context_binding_drop(struct vmw_ctx_binding *cb)
+{
+       list_del(&cb->ctx_list);
+       if (!list_empty(&cb->res_list))
+               list_del(&cb->res_list);
+       cb->bi.ctx = NULL;
+}
+
+/**
+ * vmw_context_binding_add: Start tracking a context binding
+ *
+ * @cbs: Pointer to the context binding state tracker.
+ * @bi: Information about the binding to track.
+ *
+ * Performs basic checks on the binding to make sure arguments are within
+ * bounds and then starts tracking the binding in the context binding
+ * state structure @cbs.
+ */
+int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs,
+                           const struct vmw_ctx_bindinfo *bi)
+{
+       struct vmw_ctx_binding *loc;
+
+       switch (bi->bt) {
+       case vmw_ctx_binding_rt:
+               if (unlikely((unsigned)bi->i1.rt_type >= SVGA3D_RT_MAX)) {
+                       DRM_ERROR("Illegal render target type %u.\n",
+                                 (unsigned) bi->i1.rt_type);
+                       return -EINVAL;
+               }
+               loc = &cbs->render_targets[bi->i1.rt_type];
+               break;
+       case vmw_ctx_binding_tex:
+               if (unlikely((unsigned)bi->i1.texture_stage >=
+                            SVGA3D_NUM_TEXTURE_UNITS)) {
+                       DRM_ERROR("Illegal texture/sampler unit %u.\n",
+                                 (unsigned) bi->i1.texture_stage);
+                       return -EINVAL;
+               }
+               loc = &cbs->texture_units[bi->i1.texture_stage];
+               break;
+       case vmw_ctx_binding_shader:
+               if (unlikely((unsigned)bi->i1.shader_type >=
+                            SVGA3D_SHADERTYPE_MAX)) {
+                       DRM_ERROR("Illegal shader type %u.\n",
+                                 (unsigned) bi->i1.shader_type);
+                       return -EINVAL;
+               }
+               loc = &cbs->shaders[bi->i1.shader_type];
+               break;
+       default:
+               BUG();
+       }
+
+       if (loc->bi.ctx != NULL)
+               vmw_context_binding_drop(loc);
+
+       loc->bi = *bi;
+       list_add_tail(&loc->ctx_list, &cbs->list);
+       INIT_LIST_HEAD(&loc->res_list);
+
+       return 0;
+}
+
+/**
+ * vmw_context_binding_transfer: Transfer a context binding tracking entry.
+ *
+ * @cbs: Pointer to the persistent context binding state tracker.
+ * @bi: Information about the binding to track.
+ *
+ */
+static void vmw_context_binding_transfer(struct vmw_ctx_binding_state *cbs,
+                                        const struct vmw_ctx_bindinfo *bi)
+{
+       struct vmw_ctx_binding *loc;
+
+       switch (bi->bt) {
+       case vmw_ctx_binding_rt:
+               loc = &cbs->render_targets[bi->i1.rt_type];
+               break;
+       case vmw_ctx_binding_tex:
+               loc = &cbs->texture_units[bi->i1.texture_stage];
+               break;
+       case vmw_ctx_binding_shader:
+               loc = &cbs->shaders[bi->i1.shader_type];
+               break;
+       default:
+               BUG();
+       }
+
+       if (loc->bi.ctx != NULL)
+               vmw_context_binding_drop(loc);
+
+       loc->bi = *bi;
+       list_add_tail(&loc->ctx_list, &cbs->list);
+       if (bi->res != NULL)
+               list_add_tail(&loc->res_list, &bi->res->binding_head);
+       else
+               INIT_LIST_HEAD(&loc->res_list);
+}
+
+/**
+ * vmw_context_binding_kill - Kill a binding on the device
+ * and stop tracking it.
+ *
+ * @cb: Pointer to binding tracker storage.
+ *
+ * Emits FIFO commands to scrub a binding represented by @cb.
+ * Then stops tracking the binding and re-initializes its storage.
+ */
+static void vmw_context_binding_kill(struct vmw_ctx_binding *cb)
+{
+       (void) vmw_scrub_funcs[cb->bi.bt](&cb->bi);
+       vmw_context_binding_drop(cb);
+}
+
+/**
+ * vmw_context_binding_state_kill - Kill all bindings associated with a
+ * struct vmw_ctx_binding state structure, and re-initialize the structure.
+ *
+ * @cbs: Pointer to the context binding state tracker.
+ *
+ * Emits commands to scrub all bindings associated with the
+ * context binding state tracker. Then re-initializes the whole structure.
+ */
+static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs)
+{
+       struct vmw_ctx_binding *entry, *next;
+
+       list_for_each_entry_safe(entry, next, &cbs->list, ctx_list)
+               vmw_context_binding_kill(entry);
+}
+
+/**
+ * vmw_context_binding_res_list_kill - Kill all bindings on a
+ * resource binding list
+ *
+ * @head: list head of resource binding list
+ *
+ * Kills all bindings associated with a specific resource. Typically
+ * called before the resource is destroyed.
+ */
+void vmw_context_binding_res_list_kill(struct list_head *head)
+{
+       struct vmw_ctx_binding *entry, *next;
+
+       list_for_each_entry_safe(entry, next, head, res_list)
+               vmw_context_binding_kill(entry);
+}
+
+/**
+ * vmw_context_binding_state_transfer - Commit staged binding info
+ *
+ * @ctx: Pointer to context to commit the staged binding info to.
+ * @from: Staged binding info built during execbuf.
+ *
+ * Transfers binding info from a temporary structure to the persistent
+ * structure in the context. This can be done once commands
+ */
+void vmw_context_binding_state_transfer(struct vmw_resource *ctx,
+                                       struct vmw_ctx_binding_state *from)
+{
+       struct vmw_user_context *uctx =
+               container_of(ctx, struct vmw_user_context, res);
+       struct vmw_ctx_binding *entry, *next;
+
+       list_for_each_entry_safe(entry, next, &from->list, ctx_list)
+               vmw_context_binding_transfer(&uctx->cbs, &entry->bi);
+}
index d4e54fcc0acd3e778bd74d1488f898798c740c73..a75840211b3c9682da3be63df8a54385de238824 100644 (file)
@@ -290,8 +290,7 @@ void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
 /**
  * vmw_bo_pin - Pin or unpin a buffer object without moving it.
  *
- * @bo: The buffer object. Must be reserved, and present either in VRAM
- * or GMR memory.
+ * @bo: The buffer object. Must be reserved.
  * @pin: Whether to pin or unpin.
  *
  */
@@ -303,10 +302,9 @@ void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin)
        int ret;
 
        lockdep_assert_held(&bo->resv->lock.base);
-       BUG_ON(old_mem_type != TTM_PL_VRAM &&
-              old_mem_type != VMW_PL_GMR);
 
-       pl_flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED;
+       pl_flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB
+               | TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
        if (pin)
                pl_flags |= TTM_PL_FLAG_NO_EVICT;
 
index c7a549694e59fb2614562627dcb7f167680babcd..9893328f8fdc04750ec78538c4cdb2ba89ddae30 100644 (file)
 #define DRM_IOCTL_VMW_UPDATE_LAYOUT                            \
        DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT,       \
                 struct drm_vmw_update_layout_arg)
+#define DRM_IOCTL_VMW_CREATE_SHADER                            \
+       DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SHADER,      \
+                struct drm_vmw_shader_create_arg)
+#define DRM_IOCTL_VMW_UNREF_SHADER                             \
+       DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SHADER,        \
+                struct drm_vmw_shader_arg)
+#define DRM_IOCTL_VMW_GB_SURFACE_CREATE                                \
+       DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE,  \
+                union drm_vmw_gb_surface_create_arg)
+#define DRM_IOCTL_VMW_GB_SURFACE_REF                           \
+       DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF,     \
+                union drm_vmw_gb_surface_reference_arg)
+#define DRM_IOCTL_VMW_SYNCCPU                                  \
+       DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU,             \
+                struct drm_vmw_synccpu_arg)
 
 /**
  * The core DRM version of this macro doesn't account for
@@ -177,6 +192,21 @@ static const struct drm_ioctl_desc vmw_ioctls[] = {
        VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT,
                      vmw_kms_update_layout_ioctl,
                      DRM_MASTER | DRM_UNLOCKED),
+       VMW_IOCTL_DEF(VMW_CREATE_SHADER,
+                     vmw_shader_define_ioctl,
+                     DRM_AUTH | DRM_UNLOCKED),
+       VMW_IOCTL_DEF(VMW_UNREF_SHADER,
+                     vmw_shader_destroy_ioctl,
+                     DRM_AUTH | DRM_UNLOCKED),
+       VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE,
+                     vmw_gb_surface_define_ioctl,
+                     DRM_AUTH | DRM_UNLOCKED),
+       VMW_IOCTL_DEF(VMW_GB_SURFACE_REF,
+                     vmw_gb_surface_reference_ioctl,
+                     DRM_AUTH | DRM_UNLOCKED),
+       VMW_IOCTL_DEF(VMW_SYNCCPU,
+                     vmw_user_dmabuf_synccpu_ioctl,
+                     DRM_AUTH | DRM_UNLOCKED),
 };
 
 static struct pci_device_id vmw_pci_id_list[] = {
@@ -189,6 +219,7 @@ static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON);
 static int vmw_force_iommu;
 static int vmw_restrict_iommu;
 static int vmw_force_coherent;
+static int vmw_restrict_dma_mask;
 
 static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
 static void vmw_master_init(struct vmw_master *);
@@ -203,6 +234,8 @@ MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages");
 module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600);
 MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
 module_param_named(force_coherent, vmw_force_coherent, int, 0600);
+MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU");
+module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600);
 
 
 static void vmw_print_capabilities(uint32_t capabilities)
@@ -240,38 +273,52 @@ static void vmw_print_capabilities(uint32_t capabilities)
                DRM_INFO("  GMR2.\n");
        if (capabilities & SVGA_CAP_SCREEN_OBJECT_2)
                DRM_INFO("  Screen Object 2.\n");
+       if (capabilities & SVGA_CAP_COMMAND_BUFFERS)
+               DRM_INFO("  Command Buffers.\n");
+       if (capabilities & SVGA_CAP_CMD_BUFFERS_2)
+               DRM_INFO("  Command Buffers 2.\n");
+       if (capabilities & SVGA_CAP_GBOBJECTS)
+               DRM_INFO("  Guest Backed Resources.\n");
 }
 
-
 /**
- * vmw_execbuf_prepare_dummy_query - Initialize a query result structure at
- * the start of a buffer object.
+ * vmw_dummy_query_bo_create - create a bo to hold a dummy query result
  *
- * @dev_priv: The device private structure.
+ * @dev_priv: A device private structure.
  *
- * This function will idle the buffer using an uninterruptible wait, then
- * map the first page and initialize a pending occlusion query result structure,
- * Finally it will unmap the buffer.
+ * This function creates a small buffer object that holds the query
+ * result for dummy queries emitted as query barriers.
+ * The function will then map the first page and initialize a pending
+ * occlusion query result structure, Finally it will unmap the buffer.
+ * No interruptible waits are done within this function.
  *
- * TODO: Since we're only mapping a single page, we should optimize the map
- * to use kmap_atomic / iomap_atomic.
+ * Returns an error if bo creation or initialization fails.
  */
-static void vmw_dummy_query_bo_prepare(struct vmw_private *dev_priv)
+static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
 {
+       int ret;
+       struct ttm_buffer_object *bo;
        struct ttm_bo_kmap_obj map;
        volatile SVGA3dQueryResult *result;
        bool dummy;
-       int ret;
-       struct ttm_bo_device *bdev = &dev_priv->bdev;
-       struct ttm_buffer_object *bo = dev_priv->dummy_query_bo;
 
-       ttm_bo_reserve(bo, false, false, false, 0);
-       spin_lock(&bdev->fence_lock);
-       ret = ttm_bo_wait(bo, false, false, false);
-       spin_unlock(&bdev->fence_lock);
+       /*
+        * Create the bo as pinned, so that a tryreserve will
+        * immediately succeed. This is because we're the only
+        * user of the bo currently.
+        */
+       ret = ttm_bo_create(&dev_priv->bdev,
+                           PAGE_SIZE,
+                           ttm_bo_type_device,
+                           &vmw_sys_ne_placement,
+                           0, false, NULL,
+                           &bo);
+
        if (unlikely(ret != 0))
-               (void) vmw_fallback_wait(dev_priv, false, true, 0, false,
-                                        10*HZ);
+               return ret;
+
+       ret = ttm_bo_reserve(bo, false, true, false, 0);
+       BUG_ON(ret != 0);
 
        ret = ttm_bo_kmap(bo, 0, 1, &map);
        if (likely(ret == 0)) {
@@ -280,34 +327,19 @@ static void vmw_dummy_query_bo_prepare(struct vmw_private *dev_priv)
                result->state = SVGA3D_QUERYSTATE_PENDING;
                result->result32 = 0xff;
                ttm_bo_kunmap(&map);
-       } else
-               DRM_ERROR("Dummy query buffer map failed.\n");
+       }
+       vmw_bo_pin(bo, false);
        ttm_bo_unreserve(bo);
-}
 
+       if (unlikely(ret != 0)) {
+               DRM_ERROR("Dummy query buffer map failed.\n");
+               ttm_bo_unref(&bo);
+       } else
+               dev_priv->dummy_query_bo = bo;
 
-/**
- * vmw_dummy_query_bo_create - create a bo to hold a dummy query result
- *
- * @dev_priv: A device private structure.
- *
- * This function creates a small buffer object that holds the query
- * result for dummy queries emitted as query barriers.
- * No interruptible waits are done within this function.
- *
- * Returns an error if bo creation fails.
- */
-static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
-{
-       return ttm_bo_create(&dev_priv->bdev,
-                            PAGE_SIZE,
-                            ttm_bo_type_device,
-                            &vmw_vram_sys_placement,
-                            0, false, NULL,
-                            &dev_priv->dummy_query_bo);
+       return ret;
 }
 
-
 static int vmw_request_device(struct vmw_private *dev_priv)
 {
        int ret;
@@ -318,14 +350,24 @@ static int vmw_request_device(struct vmw_private *dev_priv)
                return ret;
        }
        vmw_fence_fifo_up(dev_priv->fman);
+       if (dev_priv->has_mob) {
+               ret = vmw_otables_setup(dev_priv);
+               if (unlikely(ret != 0)) {
+                       DRM_ERROR("Unable to initialize "
+                                 "guest Memory OBjects.\n");
+                       goto out_no_mob;
+               }
+       }
        ret = vmw_dummy_query_bo_create(dev_priv);
        if (unlikely(ret != 0))
                goto out_no_query_bo;
-       vmw_dummy_query_bo_prepare(dev_priv);
 
        return 0;
 
 out_no_query_bo:
+       if (dev_priv->has_mob)
+               vmw_otables_takedown(dev_priv);
+out_no_mob:
        vmw_fence_fifo_down(dev_priv->fman);
        vmw_fifo_release(dev_priv, &dev_priv->fifo);
        return ret;
@@ -341,10 +383,13 @@ static void vmw_release_device(struct vmw_private *dev_priv)
        BUG_ON(dev_priv->pinned_bo != NULL);
 
        ttm_bo_unref(&dev_priv->dummy_query_bo);
+       if (dev_priv->has_mob)
+               vmw_otables_takedown(dev_priv);
        vmw_fence_fifo_down(dev_priv->fman);
        vmw_fifo_release(dev_priv, &dev_priv->fifo);
 }
 
+
 /**
  * Increase the 3d resource refcount.
  * If the count was prevously zero, initialize the fifo, switching to svga
@@ -510,6 +555,33 @@ out_fixup:
        return 0;
 }
 
+/**
+ * vmw_dma_masks - set required page- and dma masks
+ *
+ * @dev: Pointer to struct drm-device
+ *
+ * With 32-bit we can only handle 32 bit PFNs. Optionally set that
+ * restriction also for 64-bit systems.
+ */
+#ifdef CONFIG_INTEL_IOMMU
+static int vmw_dma_masks(struct vmw_private *dev_priv)
+{
+       struct drm_device *dev = dev_priv->dev;
+
+       if (intel_iommu_enabled &&
+           (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) {
+               DRM_INFO("Restricting DMA addresses to 44 bits.\n");
+               return dma_set_mask(dev->dev, DMA_BIT_MASK(44));
+       }
+       return 0;
+}
+#else
+static int vmw_dma_masks(struct vmw_private *dev_priv)
+{
+       return 0;
+}
+#endif
+
 static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 {
        struct vmw_private *dev_priv;
@@ -532,6 +604,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
        mutex_init(&dev_priv->hw_mutex);
        mutex_init(&dev_priv->cmdbuf_mutex);
        mutex_init(&dev_priv->release_mutex);
+       mutex_init(&dev_priv->binding_mutex);
        rwlock_init(&dev_priv->resource_lock);
 
        for (i = vmw_res_context; i < vmw_res_max; ++i) {
@@ -578,14 +651,9 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 
        vmw_get_initial_size(dev_priv);
 
-       if (dev_priv->capabilities & SVGA_CAP_GMR) {
-               dev_priv->max_gmr_descriptors =
-                       vmw_read(dev_priv,
-                                SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH);
+       if (dev_priv->capabilities & SVGA_CAP_GMR2) {
                dev_priv->max_gmr_ids =
                        vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
-       }
-       if (dev_priv->capabilities & SVGA_CAP_GMR2) {
                dev_priv->max_gmr_pages =
                        vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
                dev_priv->memory_size =
@@ -598,23 +666,42 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
                 */
                dev_priv->memory_size = 512*1024*1024;
        }
+       dev_priv->max_mob_pages = 0;
+       if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
+               uint64_t mem_size =
+                       vmw_read(dev_priv,
+                                SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
+
+               dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
+               dev_priv->prim_bb_mem =
+                       vmw_read(dev_priv,
+                                SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM);
+       } else
+               dev_priv->prim_bb_mem = dev_priv->vram_size;
+
+       ret = vmw_dma_masks(dev_priv);
+       if (unlikely(ret != 0)) {
+               mutex_unlock(&dev_priv->hw_mutex);
+               goto out_err0;
+       }
+
+       if (unlikely(dev_priv->prim_bb_mem < dev_priv->vram_size))
+               dev_priv->prim_bb_mem = dev_priv->vram_size;
 
        mutex_unlock(&dev_priv->hw_mutex);
 
        vmw_print_capabilities(dev_priv->capabilities);
 
-       if (dev_priv->capabilities & SVGA_CAP_GMR) {
+       if (dev_priv->capabilities & SVGA_CAP_GMR2) {
                DRM_INFO("Max GMR ids is %u\n",
                         (unsigned)dev_priv->max_gmr_ids);
-               DRM_INFO("Max GMR descriptors is %u\n",
-                        (unsigned)dev_priv->max_gmr_descriptors);
-       }
-       if (dev_priv->capabilities & SVGA_CAP_GMR2) {
                DRM_INFO("Max number of GMR pages is %u\n",
                         (unsigned)dev_priv->max_gmr_pages);
                DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
                         (unsigned)dev_priv->memory_size / 1024);
        }
+       DRM_INFO("Maximum display memory size is %u kiB\n",
+                dev_priv->prim_bb_mem / 1024);
        DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
                 dev_priv->vram_start, dev_priv->vram_size / 1024);
        DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
@@ -649,12 +736,22 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
        dev_priv->has_gmr = true;
        if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
            refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
-                                        dev_priv->max_gmr_ids) != 0) {
+                                        VMW_PL_GMR) != 0) {
                DRM_INFO("No GMR memory available. "
                         "Graphics memory resources are very limited.\n");
                dev_priv->has_gmr = false;
        }
 
+       if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
+               dev_priv->has_mob = true;
+               if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
+                                  VMW_PL_MOB) != 0) {
+                       DRM_INFO("No MOB memory available. "
+                                "3D will be disabled.\n");
+                       dev_priv->has_mob = false;
+               }
+       }
+
        dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start,
                                               dev_priv->mmio_size);
 
@@ -757,6 +854,8 @@ out_err4:
        iounmap(dev_priv->mmio_virt);
 out_err3:
        arch_phys_wc_del(dev_priv->mmio_mtrr);
+       if (dev_priv->has_mob)
+               (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
        if (dev_priv->has_gmr)
                (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
        (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
@@ -801,6 +900,8 @@ static int vmw_driver_unload(struct drm_device *dev)
        ttm_object_device_release(&dev_priv->tdev);
        iounmap(dev_priv->mmio_virt);
        arch_phys_wc_del(dev_priv->mmio_mtrr);
+       if (dev_priv->has_mob)
+               (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
        if (dev_priv->has_gmr)
                (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
        (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
index 20890ad8408bb5ef377c4ff54c80397c3105efaf..554e7fa330824cc7a2f1d728f4299c19c8ab8fb5 100644 (file)
@@ -40,9 +40,9 @@
 #include <drm/ttm/ttm_module.h>
 #include "vmwgfx_fence.h"
 
-#define VMWGFX_DRIVER_DATE "20120209"
+#define VMWGFX_DRIVER_DATE "20121114"
 #define VMWGFX_DRIVER_MAJOR 2
-#define VMWGFX_DRIVER_MINOR 4
+#define VMWGFX_DRIVER_MINOR 5
 #define VMWGFX_DRIVER_PATCHLEVEL 0
 #define VMWGFX_FILE_PAGE_OFFSET 0x00100000
 #define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
 #define VMWGFX_MAX_VALIDATIONS 2048
 #define VMWGFX_MAX_DISPLAYS 16
 #define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768
+#define VMWGFX_ENABLE_SCREEN_TARGET_OTABLE 0
+
+/*
+ * Perhaps we should have sysfs entries for these.
+ */
+#define VMWGFX_NUM_GB_CONTEXT 256
+#define VMWGFX_NUM_GB_SHADER 20000
+#define VMWGFX_NUM_GB_SURFACE 32768
+#define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_MAX_DISPLAYS
+#define VMWGFX_NUM_MOB (VMWGFX_NUM_GB_CONTEXT +\
+                       VMWGFX_NUM_GB_SHADER +\
+                       VMWGFX_NUM_GB_SURFACE +\
+                       VMWGFX_NUM_GB_SCREEN_TARGET)
 
 #define VMW_PL_GMR TTM_PL_PRIV0
 #define VMW_PL_FLAG_GMR TTM_PL_FLAG_PRIV0
+#define VMW_PL_MOB TTM_PL_PRIV1
+#define VMW_PL_FLAG_MOB TTM_PL_FLAG_PRIV1
 
 #define VMW_RES_CONTEXT ttm_driver_type0
 #define VMW_RES_SURFACE ttm_driver_type1
 #define VMW_RES_STREAM ttm_driver_type2
 #define VMW_RES_FENCE ttm_driver_type3
+#define VMW_RES_SHADER ttm_driver_type4
 
 struct vmw_fpriv {
        struct drm_master *locked_master;
@@ -82,6 +98,7 @@ struct vmw_dma_buffer {
 struct vmw_validate_buffer {
        struct ttm_validate_buffer base;
        struct drm_hash_item hash;
+       bool validate_as_mob;
 };
 
 struct vmw_res_func;
@@ -98,6 +115,7 @@ struct vmw_resource {
        const struct vmw_res_func *func;
        struct list_head lru_head; /* Protected by the resource lock */
        struct list_head mob_head; /* Protected by @backup reserved */
+       struct list_head binding_head; /* Protected by binding_mutex */
        void (*res_free) (struct vmw_resource *res);
        void (*hw_destroy) (struct vmw_resource *res);
 };
@@ -106,6 +124,7 @@ enum vmw_res_type {
        vmw_res_context,
        vmw_res_surface,
        vmw_res_stream,
+       vmw_res_shader,
        vmw_res_max
 };
 
@@ -154,6 +173,7 @@ struct vmw_fifo_state {
 };
 
 struct vmw_relocation {
+       SVGAMobId *mob_loc;
        SVGAGuestPtr *location;
        uint32_t index;
 };
@@ -229,6 +249,71 @@ struct vmw_piter {
        struct page *(*page)(struct vmw_piter *);
 };
 
+/*
+ * enum vmw_ctx_binding_type - abstract resource to context binding types
+ */
+enum vmw_ctx_binding_type {
+       vmw_ctx_binding_shader,
+       vmw_ctx_binding_rt,
+       vmw_ctx_binding_tex,
+       vmw_ctx_binding_max
+};
+
+/**
+ * struct vmw_ctx_bindinfo - structure representing a single context binding
+ *
+ * @ctx: Pointer to the context structure. NULL means the binding is not
+ * active.
+ * @res: Non ref-counted pointer to the bound resource.
+ * @bt: The binding type.
+ * @i1: Union of information needed to unbind.
+ */
+struct vmw_ctx_bindinfo {
+       struct vmw_resource *ctx;
+       struct vmw_resource *res;
+       enum vmw_ctx_binding_type bt;
+       union {
+               SVGA3dShaderType shader_type;
+               SVGA3dRenderTargetType rt_type;
+               uint32 texture_stage;
+       } i1;
+};
+
+/**
+ * struct vmw_ctx_binding - structure representing a single context binding
+ *                        - suitable for tracking in a context
+ *
+ * @ctx_list: List head for context.
+ * @res_list: List head for bound resource.
+ * @bi: Binding info
+ */
+struct vmw_ctx_binding {
+       struct list_head ctx_list;
+       struct list_head res_list;
+       struct vmw_ctx_bindinfo bi;
+};
+
+
+/**
+ * struct vmw_ctx_binding_state - context binding state
+ *
+ * @list: linked list of individual bindings.
+ * @render_targets: Render target bindings.
+ * @texture_units: Texture units/samplers bindings.
+ * @shaders: Shader bindings.
+ *
+ * Note that this structure also provides storage space for the individual
+ * struct vmw_ctx_binding objects, so that no dynamic allocation is needed
+ * for individual bindings.
+ *
+ */
+struct vmw_ctx_binding_state {
+       struct list_head list;
+       struct vmw_ctx_binding render_targets[SVGA3D_RT_MAX];
+       struct vmw_ctx_binding texture_units[SVGA3D_NUM_TEXTURE_UNITS];
+       struct vmw_ctx_binding shaders[SVGA3D_SHADERTYPE_MAX];
+};
+
 struct vmw_sw_context{
        struct drm_open_hash res_ht;
        bool res_ht_initialized;
@@ -250,6 +335,7 @@ struct vmw_sw_context{
        struct vmw_resource *last_query_ctx;
        bool needs_post_query_barrier;
        struct vmw_resource *error_resource;
+       struct vmw_ctx_binding_state staged_bindings;
 };
 
 struct vmw_legacy_display;
@@ -281,6 +367,7 @@ struct vmw_private {
        unsigned int io_start;
        uint32_t vram_start;
        uint32_t vram_size;
+       uint32_t prim_bb_mem;
        uint32_t mmio_start;
        uint32_t mmio_size;
        uint32_t fb_max_width;
@@ -290,11 +377,12 @@ struct vmw_private {
        __le32 __iomem *mmio_virt;
        int mmio_mtrr;
        uint32_t capabilities;
-       uint32_t max_gmr_descriptors;
        uint32_t max_gmr_ids;
        uint32_t max_gmr_pages;
+       uint32_t max_mob_pages;
        uint32_t memory_size;
        bool has_gmr;
+       bool has_mob;
        struct mutex hw_mutex;
 
        /*
@@ -370,6 +458,7 @@ struct vmw_private {
 
        struct vmw_sw_context ctx;
        struct mutex cmdbuf_mutex;
+       struct mutex binding_mutex;
 
        /**
         * Operating mode.
@@ -415,6 +504,12 @@ struct vmw_private {
         * DMA mapping stuff.
         */
        enum vmw_dma_map_mode map_mode;
+
+       /*
+        * Guest Backed stuff
+        */
+       struct ttm_buffer_object *otable_bo;
+       struct vmw_otable *otables;
 };
 
 static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
@@ -471,23 +566,12 @@ extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id);
  * Resource utilities - vmwgfx_resource.c
  */
 struct vmw_user_resource_conv;
-extern const struct vmw_user_resource_conv *user_surface_converter;
-extern const struct vmw_user_resource_conv *user_context_converter;
 
-extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv);
 extern void vmw_resource_unreference(struct vmw_resource **p_res);
 extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
 extern int vmw_resource_validate(struct vmw_resource *res);
 extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup);
 extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
-extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
-                                    struct drm_file *file_priv);
-extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
-                                   struct drm_file *file_priv);
-extern int vmw_context_check(struct vmw_private *dev_priv,
-                            struct ttm_object_file *tfile,
-                            int id,
-                            struct vmw_resource **p_res);
 extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
                                  struct ttm_object_file *tfile,
                                  uint32_t handle,
@@ -499,18 +583,6 @@ extern int vmw_user_resource_lookup_handle(
        uint32_t handle,
        const struct vmw_user_resource_conv *converter,
        struct vmw_resource **p_res);
-extern void vmw_surface_res_free(struct vmw_resource *res);
-extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
-                                    struct drm_file *file_priv);
-extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
-                                   struct drm_file *file_priv);
-extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
-                                      struct drm_file *file_priv);
-extern int vmw_surface_check(struct vmw_private *dev_priv,
-                            struct ttm_object_file *tfile,
-                            uint32_t handle, int *id);
-extern int vmw_surface_validate(struct vmw_private *dev_priv,
-                               struct vmw_surface *srf);
 extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo);
 extern int vmw_dmabuf_init(struct vmw_private *dev_priv,
                           struct vmw_dma_buffer *vmw_bo,
@@ -519,10 +591,21 @@ extern int vmw_dmabuf_init(struct vmw_private *dev_priv,
                           void (*bo_free) (struct ttm_buffer_object *bo));
 extern int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
                                  struct ttm_object_file *tfile);
+extern int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
+                                struct ttm_object_file *tfile,
+                                uint32_t size,
+                                bool shareable,
+                                uint32_t *handle,
+                                struct vmw_dma_buffer **p_dma_buf);
+extern int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
+                                    struct vmw_dma_buffer *dma_buf,
+                                    uint32_t *handle);
 extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
                                  struct drm_file *file_priv);
 extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
                                  struct drm_file *file_priv);
+extern int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
+                                        struct drm_file *file_priv);
 extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
                                         uint32_t cur_validate_node);
 extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo);
@@ -622,10 +705,16 @@ extern struct ttm_placement vmw_vram_sys_placement;
 extern struct ttm_placement vmw_vram_gmr_placement;
 extern struct ttm_placement vmw_vram_gmr_ne_placement;
 extern struct ttm_placement vmw_sys_placement;
+extern struct ttm_placement vmw_sys_ne_placement;
 extern struct ttm_placement vmw_evictable_placement;
 extern struct ttm_placement vmw_srf_placement;
+extern struct ttm_placement vmw_mob_placement;
 extern struct ttm_bo_driver vmw_bo_driver;
 extern int vmw_dma_quiescent(struct drm_device *dev);
+extern int vmw_bo_map_dma(struct ttm_buffer_object *bo);
+extern void vmw_bo_unmap_dma(struct ttm_buffer_object *bo);
+extern const struct vmw_sg_table *
+vmw_bo_sg_table(struct ttm_buffer_object *bo);
 extern void vmw_piter_start(struct vmw_piter *viter,
                            const struct vmw_sg_table *vsgt,
                            unsigned long p_offs);
@@ -701,7 +790,7 @@ extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
  * IRQs and wating - vmwgfx_irq.c
  */
 
-extern irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS);
+extern irqreturn_t vmw_irq_handler(int irq, void *arg);
 extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy,
                             uint32_t seqno, bool interruptible,
                             unsigned long timeout);
@@ -832,6 +921,76 @@ extern int vmw_prime_handle_to_fd(struct drm_device *dev,
                                  uint32_t handle, uint32_t flags,
                                  int *prime_fd);
 
+/*
+ * MemoryOBject management -  vmwgfx_mob.c
+ */
+struct vmw_mob;
+extern int vmw_mob_bind(struct vmw_private *dev_priv, struct vmw_mob *mob,
+                       const struct vmw_sg_table *vsgt,
+                       unsigned long num_data_pages, int32_t mob_id);
+extern void vmw_mob_unbind(struct vmw_private *dev_priv,
+                          struct vmw_mob *mob);
+extern void vmw_mob_destroy(struct vmw_mob *mob);
+extern struct vmw_mob *vmw_mob_create(unsigned long data_pages);
+extern int vmw_otables_setup(struct vmw_private *dev_priv);
+extern void vmw_otables_takedown(struct vmw_private *dev_priv);
+
+/*
+ * Context management - vmwgfx_context.c
+ */
+
+extern const struct vmw_user_resource_conv *user_context_converter;
+
+extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv);
+
+extern int vmw_context_check(struct vmw_private *dev_priv,
+                            struct ttm_object_file *tfile,
+                            int id,
+                            struct vmw_resource **p_res);
+extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
+                                   struct drm_file *file_priv);
+extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
+                                    struct drm_file *file_priv);
+extern int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs,
+                                  const struct vmw_ctx_bindinfo *ci);
+extern void
+vmw_context_binding_state_transfer(struct vmw_resource *res,
+                                  struct vmw_ctx_binding_state *cbs);
+extern void vmw_context_binding_res_list_kill(struct list_head *head);
+
+/*
+ * Surface management - vmwgfx_surface.c
+ */
+
+extern const struct vmw_user_resource_conv *user_surface_converter;
+
+extern void vmw_surface_res_free(struct vmw_resource *res);
+extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
+                                    struct drm_file *file_priv);
+extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
+                                   struct drm_file *file_priv);
+extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
+                                      struct drm_file *file_priv);
+extern int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
+                                      struct drm_file *file_priv);
+extern int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
+                                         struct drm_file *file_priv);
+extern int vmw_surface_check(struct vmw_private *dev_priv,
+                            struct ttm_object_file *tfile,
+                            uint32_t handle, int *id);
+extern int vmw_surface_validate(struct vmw_private *dev_priv,
+                               struct vmw_surface *srf);
+
+/*
+ * Shader management - vmwgfx_shader.c
+ */
+
+extern const struct vmw_user_resource_conv *user_shader_converter;
+
+extern int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
+                                  struct drm_file *file_priv);
+extern int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
+                                   struct drm_file *file_priv);
 
 /**
  * Inline helper functions
index 599f6469a1ebb00c10b3d8047c44882aebd779d8..7a5f1eb55c5a0ad09adf8aa02c92367908c49c24 100644 (file)
@@ -54,6 +54,8 @@ struct vmw_resource_relocation {
  * @res: Ref-counted pointer to the resource.
  * @switch_backup: Boolean whether to switch backup buffer on unreserve.
  * @new_backup: Refcounted pointer to the new backup buffer.
+ * @staged_bindings: If @res is a context, tracks bindings set up during
+ * the command batch. Otherwise NULL.
  * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
  * @first_usage: Set to true the first time the resource is referenced in
  * the command stream.
@@ -65,11 +67,31 @@ struct vmw_resource_val_node {
        struct drm_hash_item hash;
        struct vmw_resource *res;
        struct vmw_dma_buffer *new_backup;
+       struct vmw_ctx_binding_state *staged_bindings;
        unsigned long new_backup_offset;
        bool first_usage;
        bool no_buffer_needed;
 };
 
+/**
+ * struct vmw_cmd_entry - Describe a command for the verifier
+ *
+ * @user_allow: Whether allowed from the execbuf ioctl.
+ * @gb_disable: Whether disabled if guest-backed objects are available.
+ * @gb_enable: Whether enabled iff guest-backed objects are available.
+ */
+struct vmw_cmd_entry {
+       int (*func) (struct vmw_private *, struct vmw_sw_context *,
+                    SVGA3dCmdHeader *);
+       bool user_allow;
+       bool gb_disable;
+       bool gb_enable;
+};
+
+#define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \
+       [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
+                                      (_gb_disable), (_gb_enable)}
+
 /**
  * vmw_resource_unreserve - unreserve resources previously reserved for
  * command submission.
@@ -87,6 +109,16 @@ static void vmw_resource_list_unreserve(struct list_head *list,
                struct vmw_dma_buffer *new_backup =
                        backoff ? NULL : val->new_backup;
 
+               /*
+                * Transfer staged context bindings to the
+                * persistent context binding tracker.
+                */
+               if (unlikely(val->staged_bindings)) {
+                       vmw_context_binding_state_transfer
+                               (val->res, val->staged_bindings);
+                       kfree(val->staged_bindings);
+                       val->staged_bindings = NULL;
+               }
                vmw_resource_unreserve(res, new_backup,
                        val->new_backup_offset);
                vmw_dmabuf_unreference(&val->new_backup);
@@ -224,6 +256,7 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv,
  *
  * @sw_context: The software context used for this command submission batch.
  * @bo: The buffer object to add.
+ * @validate_as_mob: Validate this buffer as a MOB.
  * @p_val_node: If non-NULL Will be updated with the validate node number
  * on return.
  *
@@ -232,6 +265,7 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv,
  */
 static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
                                   struct ttm_buffer_object *bo,
+                                  bool validate_as_mob,
                                   uint32_t *p_val_node)
 {
        uint32_t val_node;
@@ -244,6 +278,10 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
                                    &hash) == 0)) {
                vval_buf = container_of(hash, struct vmw_validate_buffer,
                                        hash);
+               if (unlikely(vval_buf->validate_as_mob != validate_as_mob)) {
+                       DRM_ERROR("Inconsistent buffer usage.\n");
+                       return -EINVAL;
+               }
                val_buf = &vval_buf->base;
                val_node = vval_buf - sw_context->val_bufs;
        } else {
@@ -266,6 +304,7 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
                val_buf->bo = ttm_bo_reference(bo);
                val_buf->reserved = false;
                list_add_tail(&val_buf->head, &sw_context->validate_nodes);
+               vval_buf->validate_as_mob = validate_as_mob;
        }
 
        sw_context->fence_flags |= DRM_VMW_FENCE_FLAG_EXEC;
@@ -302,7 +341,8 @@ static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
                        struct ttm_buffer_object *bo = &res->backup->base;
 
                        ret = vmw_bo_to_validate_list
-                               (sw_context, bo, NULL);
+                               (sw_context, bo,
+                                vmw_resource_needs_backup(res), NULL);
 
                        if (unlikely(ret != 0))
                                return ret;
@@ -362,8 +402,15 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv,
        struct vmw_resource_val_node *node;
        int ret;
 
-       if (*id == SVGA3D_INVALID_ID)
+       if (*id == SVGA3D_INVALID_ID) {
+               if (p_val)
+                       *p_val = NULL;
+               if (res_type == vmw_res_context) {
+                       DRM_ERROR("Illegal context invalid id.\n");
+                       return -EINVAL;
+               }
                return 0;
+       }
 
        /*
         * Fastpath in case of repeated commands referencing the same
@@ -411,6 +458,18 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv,
        rcache->node = node;
        if (p_val)
                *p_val = node;
+
+       if (node->first_usage && res_type == vmw_res_context) {
+               node->staged_bindings =
+                       kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL);
+               if (node->staged_bindings == NULL) {
+                       DRM_ERROR("Failed to allocate context binding "
+                                 "information.\n");
+                       goto out_no_reloc;
+               }
+               INIT_LIST_HEAD(&node->staged_bindings->list);
+       }
+
        vmw_resource_unreference(&res);
        return 0;
 
@@ -453,17 +512,35 @@ static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
                SVGA3dCmdHeader header;
                SVGA3dCmdSetRenderTarget body;
        } *cmd;
+       struct vmw_resource_val_node *ctx_node;
+       struct vmw_resource_val_node *res_node;
        int ret;
 
-       ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
+       cmd = container_of(header, struct vmw_sid_cmd, header);
+
+       ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+                               user_context_converter, &cmd->body.cid,
+                               &ctx_node);
        if (unlikely(ret != 0))
                return ret;
 
-       cmd = container_of(header, struct vmw_sid_cmd, header);
        ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
                                user_surface_converter,
-                               &cmd->body.target.sid, NULL);
-       return ret;
+                               &cmd->body.target.sid, &res_node);
+       if (unlikely(ret != 0))
+               return ret;
+
+       if (dev_priv->has_mob) {
+               struct vmw_ctx_bindinfo bi;
+
+               bi.ctx = ctx_node->res;
+               bi.res = res_node ? res_node->res : NULL;
+               bi.bt = vmw_ctx_binding_rt;
+               bi.i1.rt_type = cmd->body.type;
+               return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
+       }
+
+       return 0;
 }
 
 static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
@@ -519,11 +596,6 @@ static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
 
        cmd = container_of(header, struct vmw_sid_cmd, header);
 
-       if (unlikely(!sw_context->kernel)) {
-               DRM_ERROR("Kernel only SVGA3d command: %u.\n", cmd->header.id);
-               return -EPERM;
-       }
-
        return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
                                 user_surface_converter,
                                 &cmd->body.srcImage.sid, NULL);
@@ -541,11 +613,6 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv,
 
        cmd = container_of(header, struct vmw_sid_cmd, header);
 
-       if (unlikely(!sw_context->kernel)) {
-               DRM_ERROR("Kernel only SVGA3d command: %u.\n", cmd->header.id);
-               return -EPERM;
-       }
-
        return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
                                 user_surface_converter, &cmd->body.sid,
                                 NULL);
@@ -586,7 +653,7 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
                        sw_context->needs_post_query_barrier = true;
                        ret = vmw_bo_to_validate_list(sw_context,
                                                      sw_context->cur_query_bo,
-                                                     NULL);
+                                                     dev_priv->has_mob, NULL);
                        if (unlikely(ret != 0))
                                return ret;
                }
@@ -594,7 +661,7 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
 
                ret = vmw_bo_to_validate_list(sw_context,
                                              dev_priv->dummy_query_bo,
-                                             NULL);
+                                             dev_priv->has_mob, NULL);
                if (unlikely(ret != 0))
                        return ret;
 
@@ -671,6 +738,66 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
        }
 }
 
+/**
+ * vmw_translate_mob_pointer - Prepare to translate a user-space buffer
+ * handle to a MOB id.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ * @sw_context: The software context used for this command batch validation.
+ * @id: Pointer to the user-space handle to be translated.
+ * @vmw_bo_p: Points to a location that, on successful return will carry
+ * a reference-counted pointer to the DMA buffer identified by the
+ * user-space handle in @id.
+ *
+ * This function saves information needed to translate a user-space buffer
+ * handle to a MOB id. The translation does not take place immediately, but
+ * during a call to vmw_apply_relocations(). This function builds a relocation
+ * list and a list of buffers to validate. The former needs to be freed using
+ * either vmw_apply_relocations() or vmw_free_relocations(). The latter
+ * needs to be freed using vmw_clear_validations.
+ */
+static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
+                                struct vmw_sw_context *sw_context,
+                                SVGAMobId *id,
+                                struct vmw_dma_buffer **vmw_bo_p)
+{
+       struct vmw_dma_buffer *vmw_bo = NULL;
+       struct ttm_buffer_object *bo;
+       uint32_t handle = *id;
+       struct vmw_relocation *reloc;
+       int ret;
+
+       ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
+       if (unlikely(ret != 0)) {
+               DRM_ERROR("Could not find or use MOB buffer.\n");
+               return -EINVAL;
+       }
+       bo = &vmw_bo->base;
+
+       if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
+               DRM_ERROR("Max number relocations per submission"
+                         " exceeded\n");
+               ret = -EINVAL;
+               goto out_no_reloc;
+       }
+
+       reloc = &sw_context->relocs[sw_context->cur_reloc++];
+       reloc->mob_loc = id;
+       reloc->location = NULL;
+
+       ret = vmw_bo_to_validate_list(sw_context, bo, true, &reloc->index);
+       if (unlikely(ret != 0))
+               goto out_no_reloc;
+
+       *vmw_bo_p = vmw_bo;
+       return 0;
+
+out_no_reloc:
+       vmw_dmabuf_unreference(&vmw_bo);
+       vmw_bo_p = NULL;
+       return ret;
+}
+
 /**
  * vmw_translate_guest_pointer - Prepare to translate a user-space buffer
  * handle to a valid SVGAGuestPtr
@@ -718,7 +845,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
        reloc = &sw_context->relocs[sw_context->cur_reloc++];
        reloc->location = ptr;
 
-       ret = vmw_bo_to_validate_list(sw_context, bo, &reloc->index);
+       ret = vmw_bo_to_validate_list(sw_context, bo, false, &reloc->index);
        if (unlikely(ret != 0))
                goto out_no_reloc;
 
@@ -731,6 +858,30 @@ out_no_reloc:
        return ret;
 }
 
+/**
+ * vmw_cmd_begin_gb_query - validate a  SVGA_3D_CMD_BEGIN_GB_QUERY command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context used for this command submission.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
+                                 struct vmw_sw_context *sw_context,
+                                 SVGA3dCmdHeader *header)
+{
+       struct vmw_begin_gb_query_cmd {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdBeginGBQuery q;
+       } *cmd;
+
+       cmd = container_of(header, struct vmw_begin_gb_query_cmd,
+                          header);
+
+       return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+                                user_context_converter, &cmd->q.cid,
+                                NULL);
+}
+
 /**
  * vmw_cmd_begin_query - validate a  SVGA_3D_CMD_BEGIN_QUERY command.
  *
@@ -750,11 +901,63 @@ static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
        cmd = container_of(header, struct vmw_begin_query_cmd,
                           header);
 
+       if (unlikely(dev_priv->has_mob)) {
+               struct {
+                       SVGA3dCmdHeader header;
+                       SVGA3dCmdBeginGBQuery q;
+               } gb_cmd;
+
+               BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
+
+               gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
+               gb_cmd.header.size = cmd->header.size;
+               gb_cmd.q.cid = cmd->q.cid;
+               gb_cmd.q.type = cmd->q.type;
+
+               memcpy(cmd, &gb_cmd, sizeof(*cmd));
+               return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
+       }
+
        return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
                                 user_context_converter, &cmd->q.cid,
                                 NULL);
 }
 
+/**
+ * vmw_cmd_end_gb_query - validate a  SVGA_3D_CMD_END_GB_QUERY command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context used for this command submission.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
+                               struct vmw_sw_context *sw_context,
+                               SVGA3dCmdHeader *header)
+{
+       struct vmw_dma_buffer *vmw_bo;
+       struct vmw_query_cmd {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdEndGBQuery q;
+       } *cmd;
+       int ret;
+
+       cmd = container_of(header, struct vmw_query_cmd, header);
+       ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
+       if (unlikely(ret != 0))
+               return ret;
+
+       ret = vmw_translate_mob_ptr(dev_priv, sw_context,
+                                   &cmd->q.mobid,
+                                   &vmw_bo);
+       if (unlikely(ret != 0))
+               return ret;
+
+       ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context);
+
+       vmw_dmabuf_unreference(&vmw_bo);
+       return ret;
+}
+
 /**
  * vmw_cmd_end_query - validate a  SVGA_3D_CMD_END_QUERY command.
  *
@@ -774,6 +977,25 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv,
        int ret;
 
        cmd = container_of(header, struct vmw_query_cmd, header);
+       if (dev_priv->has_mob) {
+               struct {
+                       SVGA3dCmdHeader header;
+                       SVGA3dCmdEndGBQuery q;
+               } gb_cmd;
+
+               BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
+
+               gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
+               gb_cmd.header.size = cmd->header.size;
+               gb_cmd.q.cid = cmd->q.cid;
+               gb_cmd.q.type = cmd->q.type;
+               gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
+               gb_cmd.q.offset = cmd->q.guestResult.offset;
+
+               memcpy(cmd, &gb_cmd, sizeof(*cmd));
+               return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
+       }
+
        ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
        if (unlikely(ret != 0))
                return ret;
@@ -790,7 +1012,40 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv,
        return ret;
 }
 
-/*
+/**
+ * vmw_cmd_wait_gb_query - validate a  SVGA_3D_CMD_WAIT_GB_QUERY command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context used for this command submission.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
+                                struct vmw_sw_context *sw_context,
+                                SVGA3dCmdHeader *header)
+{
+       struct vmw_dma_buffer *vmw_bo;
+       struct vmw_query_cmd {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdWaitForGBQuery q;
+       } *cmd;
+       int ret;
+
+       cmd = container_of(header, struct vmw_query_cmd, header);
+       ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
+       if (unlikely(ret != 0))
+               return ret;
+
+       ret = vmw_translate_mob_ptr(dev_priv, sw_context,
+                                   &cmd->q.mobid,
+                                   &vmw_bo);
+       if (unlikely(ret != 0))
+               return ret;
+
+       vmw_dmabuf_unreference(&vmw_bo);
+       return 0;
+}
+
+/**
  * vmw_cmd_wait_query - validate a  SVGA_3D_CMD_WAIT_QUERY command.
  *
  * @dev_priv: Pointer to a device private struct.
@@ -809,6 +1064,25 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
        int ret;
 
        cmd = container_of(header, struct vmw_query_cmd, header);
+       if (dev_priv->has_mob) {
+               struct {
+                       SVGA3dCmdHeader header;
+                       SVGA3dCmdWaitForGBQuery q;
+               } gb_cmd;
+
+               BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
+
+               gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
+               gb_cmd.header.size = cmd->header.size;
+               gb_cmd.q.cid = cmd->q.cid;
+               gb_cmd.q.type = cmd->q.type;
+               gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
+               gb_cmd.q.offset = cmd->q.guestResult.offset;
+
+               memcpy(cmd, &gb_cmd, sizeof(*cmd));
+               return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
+       }
+
        ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
        if (unlikely(ret != 0))
                return ret;
@@ -921,15 +1195,22 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
        struct vmw_tex_state_cmd {
                SVGA3dCmdHeader header;
                SVGA3dCmdSetTextureState state;
-       };
+       } *cmd;
 
        SVGA3dTextureState *last_state = (SVGA3dTextureState *)
          ((unsigned long) header + header->size + sizeof(header));
        SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
                ((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
+       struct vmw_resource_val_node *ctx_node;
+       struct vmw_resource_val_node *res_node;
        int ret;
 
-       ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
+       cmd = container_of(header, struct vmw_tex_state_cmd,
+                          header);
+
+       ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+                               user_context_converter, &cmd->state.cid,
+                               &ctx_node);
        if (unlikely(ret != 0))
                return ret;
 
@@ -939,9 +1220,20 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
 
                ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
                                        user_surface_converter,
-                                       &cur_state->value, NULL);
+                                       &cur_state->value, &res_node);
                if (unlikely(ret != 0))
                        return ret;
+
+               if (dev_priv->has_mob) {
+                       struct vmw_ctx_bindinfo bi;
+
+                       bi.ctx = ctx_node->res;
+                       bi.res = res_node ? res_node->res : NULL;
+                       bi.bt = vmw_ctx_binding_tex;
+                       bi.i1.texture_stage = cur_state->stage;
+                       vmw_context_binding_add(ctx_node->staged_bindings,
+                                               &bi);
+               }
        }
 
        return 0;
@@ -970,6 +1262,222 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
        return ret;
 }
 
+/**
+ * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @res_type: The resource type.
+ * @converter: Information about user-space binding for this resource type.
+ * @res_id: Pointer to the user-space resource handle in the command stream.
+ * @buf_id: Pointer to the user-space backup buffer handle in the command
+ * stream.
+ * @backup_offset: Offset of backup into MOB.
+ *
+ * This function prepares for registering a switch of backup buffers
+ * in the resource metadata just prior to unreserving.
+ */
+static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
+                                struct vmw_sw_context *sw_context,
+                                enum vmw_res_type res_type,
+                                const struct vmw_user_resource_conv
+                                *converter,
+                                uint32_t *res_id,
+                                uint32_t *buf_id,
+                                unsigned long backup_offset)
+{
+       int ret;
+       struct vmw_dma_buffer *dma_buf;
+       struct vmw_resource_val_node *val_node;
+
+       ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
+                               converter, res_id, &val_node);
+       if (unlikely(ret != 0))
+               return ret;
+
+       ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
+       if (unlikely(ret != 0))
+               return ret;
+
+       if (val_node->first_usage)
+               val_node->no_buffer_needed = true;
+
+       vmw_dmabuf_unreference(&val_node->new_backup);
+       val_node->new_backup = dma_buf;
+       val_node->new_backup_offset = backup_offset;
+
+       return 0;
+}
+
+/**
+ * vmw_cmd_bind_gb_surface - Validate an SVGA_3D_CMD_BIND_GB_SURFACE
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
+                                  struct vmw_sw_context *sw_context,
+                                  SVGA3dCmdHeader *header)
+{
+       struct vmw_bind_gb_surface_cmd {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdBindGBSurface body;
+       } *cmd;
+
+       cmd = container_of(header, struct vmw_bind_gb_surface_cmd, header);
+
+       return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
+                                    user_surface_converter,
+                                    &cmd->body.sid, &cmd->body.mobid,
+                                    0);
+}
+
+/**
+ * vmw_cmd_update_gb_image - Validate an SVGA_3D_CMD_UPDATE_GB_IMAGE
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
+                                  struct vmw_sw_context *sw_context,
+                                  SVGA3dCmdHeader *header)
+{
+       struct vmw_gb_surface_cmd {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdUpdateGBImage body;
+       } *cmd;
+
+       cmd = container_of(header, struct vmw_gb_surface_cmd, header);
+
+       return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+                                user_surface_converter,
+                                &cmd->body.image.sid, NULL);
+}
+
+/**
+ * vmw_cmd_update_gb_surface - Validate an SVGA_3D_CMD_UPDATE_GB_SURFACE
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
+                                    struct vmw_sw_context *sw_context,
+                                    SVGA3dCmdHeader *header)
+{
+       struct vmw_gb_surface_cmd {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdUpdateGBSurface body;
+       } *cmd;
+
+       cmd = container_of(header, struct vmw_gb_surface_cmd, header);
+
+       return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+                                user_surface_converter,
+                                &cmd->body.sid, NULL);
+}
+
+/**
+ * vmw_cmd_readback_gb_image - Validate an SVGA_3D_CMD_READBACK_GB_IMAGE
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
+                                    struct vmw_sw_context *sw_context,
+                                    SVGA3dCmdHeader *header)
+{
+       struct vmw_gb_surface_cmd {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdReadbackGBImage body;
+       } *cmd;
+
+       cmd = container_of(header, struct vmw_gb_surface_cmd, header);
+
+       return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+                                user_surface_converter,
+                                &cmd->body.image.sid, NULL);
+}
+
+/**
+ * vmw_cmd_readback_gb_surface - Validate an SVGA_3D_CMD_READBACK_GB_SURFACE
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
+                                      struct vmw_sw_context *sw_context,
+                                      SVGA3dCmdHeader *header)
+{
+       struct vmw_gb_surface_cmd {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdReadbackGBSurface body;
+       } *cmd;
+
+       cmd = container_of(header, struct vmw_gb_surface_cmd, header);
+
+       return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+                                user_surface_converter,
+                                &cmd->body.sid, NULL);
+}
+
+/**
+ * vmw_cmd_invalidate_gb_image - Validate an SVGA_3D_CMD_INVALIDATE_GB_IMAGE
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
+                                      struct vmw_sw_context *sw_context,
+                                      SVGA3dCmdHeader *header)
+{
+       struct vmw_gb_surface_cmd {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdInvalidateGBImage body;
+       } *cmd;
+
+       cmd = container_of(header, struct vmw_gb_surface_cmd, header);
+
+       return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+                                user_surface_converter,
+                                &cmd->body.image.sid, NULL);
+}
+
+/**
+ * vmw_cmd_invalidate_gb_surface - Validate an
+ * SVGA_3D_CMD_INVALIDATE_GB_SURFACE command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
+                                        struct vmw_sw_context *sw_context,
+                                        SVGA3dCmdHeader *header)
+{
+       struct vmw_gb_surface_cmd {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdInvalidateGBSurface body;
+       } *cmd;
+
+       cmd = container_of(header, struct vmw_gb_surface_cmd, header);
+
+       return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+                                user_surface_converter,
+                                &cmd->body.sid, NULL);
+}
+
 /**
  * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
  * command
@@ -986,18 +1494,64 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
                SVGA3dCmdHeader header;
                SVGA3dCmdSetShader body;
        } *cmd;
+       struct vmw_resource_val_node *ctx_node;
        int ret;
 
        cmd = container_of(header, struct vmw_set_shader_cmd,
                           header);
 
-       ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
+       ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+                               user_context_converter, &cmd->body.cid,
+                               &ctx_node);
        if (unlikely(ret != 0))
                return ret;
 
+       if (dev_priv->has_mob) {
+               struct vmw_ctx_bindinfo bi;
+               struct vmw_resource_val_node *res_node;
+
+               ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_shader,
+                                       user_shader_converter,
+                                       &cmd->body.shid, &res_node);
+               if (unlikely(ret != 0))
+                       return ret;
+
+               bi.ctx = ctx_node->res;
+               bi.res = res_node ? res_node->res : NULL;
+               bi.bt = vmw_ctx_binding_shader;
+               bi.i1.shader_type = cmd->body.type;
+               return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
+       }
+
        return 0;
 }
 
+/**
+ * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
+                                 struct vmw_sw_context *sw_context,
+                                 SVGA3dCmdHeader *header)
+{
+       struct vmw_bind_gb_shader_cmd {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdBindGBShader body;
+       } *cmd;
+
+       cmd = container_of(header, struct vmw_bind_gb_shader_cmd,
+                          header);
+
+       return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
+                                    user_shader_converter,
+                                    &cmd->body.shid, &cmd->body.mobid,
+                                    cmd->body.offsetInBytes);
+}
+
 static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
                                struct vmw_sw_context *sw_context,
                                void *buf, uint32_t *size)
@@ -1041,50 +1595,173 @@ static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
        return 0;
 }
 
-typedef int (*vmw_cmd_func) (struct vmw_private *,
-                            struct vmw_sw_context *,
-                            SVGA3dCmdHeader *);
-
-#define VMW_CMD_DEF(cmd, func) \
-       [cmd - SVGA_3D_CMD_BASE] = func
-
-static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = {
-       VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid),
-       VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid),
-       VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check),
-       VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check),
-       VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma),
-       VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid),
-       VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid),
-       VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check),
-       VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check),
-       VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check),
+static const struct vmw_cmd_entry const vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
+       VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
+                   false, false, false),
+       VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
+                   false, false, false),
+       VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
+                   true, false, false),
+       VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
+                   true, false, false),
+       VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
+                   true, false, false),
+       VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
+                   false, false, false),
+       VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
+                   false, false, false),
+       VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
+                   true, false, false),
+       VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
+                   true, false, false),
+       VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
+                   true, false, false),
        VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
-                   &vmw_cmd_set_render_target_check),
-       VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state),
-       VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check),
-       VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check),
-       VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check),
-       VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check),
-       VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check),
-       VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check),
-       VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check),
-       VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check),
-       VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check),
-       VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader),
-       VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check),
-       VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw),
-       VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check),
-       VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query),
-       VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query),
-       VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query),
-       VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok),
+                   &vmw_cmd_set_render_target_check, true, false, false),
+       VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
+                   true, false, false),
+       VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
+                   true, false, false),
+       VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
+                   true, false, false),
+       VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
+                   true, false, false),
+       VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
+                   true, false, false),
+       VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
+                   true, false, false),
+       VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
+                   true, false, false),
+       VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
+                   false, false, false),
+       VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check,
+                   true, true, false),
+       VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check,
+                   true, true, false),
+       VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
+                   true, false, false),
+       VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check,
+                   true, true, false),
+       VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
+                   true, false, false),
+       VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
+                   true, false, false),
+       VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
+                   true, false, false),
+       VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
+                   true, false, false),
+       VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
+                   true, false, false),
+       VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
+                   true, false, false),
        VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
-                   &vmw_cmd_blt_surf_screen_check),
-       VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid),
-       VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid),
-       VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid),
-       VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid),
+                   &vmw_cmd_blt_surf_screen_check, false, false, false),
+       VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
+                   false, false, false),
+       VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
+                   false, false, false),
+       VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
+                   false, false, false),
+       VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
+                   false, false, false),
+       VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
+                   false, false, false),
+       VMW_CMD_DEF(SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE, &vmw_cmd_invalid,
+                   false, false, false),
+       VMW_CMD_DEF(SVGA_3D_CMD_OPEN_CONTEXT_SURFACE, &vmw_cmd_invalid,
+                   false, false, false),
+       VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid,
+                   false, false, false),
+       VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid,
+                   false, false, false),
+       VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid,
+                   false, false, false),
+       VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid,
+                   false, false, false),
+       VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid,
+                   false, false, false),
+       VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid,
+                   false, false, false),
+       VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
+                   false, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
+                   false, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
+                   false, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
+                   false, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB, &vmw_cmd_invalid,
+                   false, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
+                   false, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
+                   false, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
+                   false, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
+                   true, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
+                   false, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
+                   true, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
+                   &vmw_cmd_update_gb_surface, true, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
+                   &vmw_cmd_readback_gb_image, true, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
+                   &vmw_cmd_readback_gb_surface, true, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
+                   &vmw_cmd_invalidate_gb_image, true, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
+                   &vmw_cmd_invalidate_gb_surface, true, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
+                   false, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
+                   false, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
+                   false, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
+                   false, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
+                   false, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
+                   false, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
+                   true, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
+                   false, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
+                   false, false, false),
+       VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
+                   true, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
+                   true, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
+                   true, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
+                   true, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
+                   false, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
+                   false, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
+                   false, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
+                   false, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
+                   false, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
+                   false, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
+                   false, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
+                   false, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
+                   false, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
+                   false, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
+                   true, false, true)
 };
 
 static int vmw_cmd_check(struct vmw_private *dev_priv,
@@ -1095,6 +1772,8 @@ static int vmw_cmd_check(struct vmw_private *dev_priv,
        uint32_t size_remaining = *size;
        SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
        int ret;
+       const struct vmw_cmd_entry *entry;
+       bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
 
        cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
        /* Handle any none 3D commands */
@@ -1107,18 +1786,40 @@ static int vmw_cmd_check(struct vmw_private *dev_priv,
 
        cmd_id -= SVGA_3D_CMD_BASE;
        if (unlikely(*size > size_remaining))
-               goto out_err;
+               goto out_invalid;
 
        if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
-               goto out_err;
+               goto out_invalid;
+
+       entry = &vmw_cmd_entries[cmd_id];
+       if (unlikely(!entry->user_allow && !sw_context->kernel))
+               goto out_privileged;
 
-       ret = vmw_cmd_funcs[cmd_id](dev_priv, sw_context, header);
+       if (unlikely(entry->gb_disable && gb))
+               goto out_old;
+
+       if (unlikely(entry->gb_enable && !gb))
+               goto out_new;
+
+       ret = entry->func(dev_priv, sw_context, header);
        if (unlikely(ret != 0))
-               goto out_err;
+               goto out_invalid;
 
        return 0;
-out_err:
-       DRM_ERROR("Illegal / Invalid SVGA3D command: %d\n",
+out_invalid:
+       DRM_ERROR("Invalid SVGA3D command: %d\n",
+                 cmd_id + SVGA_3D_CMD_BASE);
+       return -EINVAL;
+out_privileged:
+       DRM_ERROR("Privileged SVGA3D command: %d\n",
+                 cmd_id + SVGA_3D_CMD_BASE);
+       return -EPERM;
+out_old:
+       DRM_ERROR("Deprecated (disallowed) SVGA3D command: %d\n",
+                 cmd_id + SVGA_3D_CMD_BASE);
+       return -EINVAL;
+out_new:
+       DRM_ERROR("SVGA3D command: %d not supported by virtual hardware.\n",
                  cmd_id + SVGA_3D_CMD_BASE);
        return -EINVAL;
 }
@@ -1174,6 +1875,9 @@ static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
                case VMW_PL_GMR:
                        reloc->location->gmrId = bo->mem.start;
                        break;
+               case VMW_PL_MOB:
+                       *reloc->mob_loc = bo->mem.start;
+                       break;
                default:
                        BUG();
                }
@@ -1198,6 +1902,8 @@ static void vmw_resource_list_unreference(struct list_head *list)
        list_for_each_entry_safe(val, val_next, list, head) {
                list_del_init(&val->head);
                vmw_resource_unreference(&val->res);
+               if (unlikely(val->staged_bindings))
+                       kfree(val->staged_bindings);
                kfree(val);
        }
 }
@@ -1224,7 +1930,8 @@ static void vmw_clear_validations(struct vmw_sw_context *sw_context)
 }
 
 static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
-                                     struct ttm_buffer_object *bo)
+                                     struct ttm_buffer_object *bo,
+                                     bool validate_as_mob)
 {
        int ret;
 
@@ -1238,6 +1945,9 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
             dev_priv->dummy_query_bo_pinned))
                return 0;
 
+       if (validate_as_mob)
+               return ttm_bo_validate(bo, &vmw_mob_placement, true, false);
+
        /**
         * Put BO in VRAM if there is space, otherwise as a GMR.
         * If there is no space in VRAM and GMR ids are all used up,
@@ -1259,7 +1969,6 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
        return ret;
 }
 
-
 static int vmw_validate_buffers(struct vmw_private *dev_priv,
                                struct vmw_sw_context *sw_context)
 {
@@ -1267,7 +1976,8 @@ static int vmw_validate_buffers(struct vmw_private *dev_priv,
        int ret;
 
        list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
-               ret = vmw_validate_single_buffer(dev_priv, entry->base.bo);
+               ret = vmw_validate_single_buffer(dev_priv, entry->base.bo,
+                                                entry->validate_as_mob);
                if (unlikely(ret != 0))
                        return ret;
        }
@@ -1509,11 +2219,17 @@ int vmw_execbuf_process(struct drm_file *file_priv,
                        goto out_err;
        }
 
+       ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
+       if (unlikely(ret != 0)) {
+               ret = -ERESTARTSYS;
+               goto out_err;
+       }
+
        cmd = vmw_fifo_reserve(dev_priv, command_size);
        if (unlikely(cmd == NULL)) {
                DRM_ERROR("Failed reserving fifo space for commands.\n");
                ret = -ENOMEM;
-               goto out_err;
+               goto out_unlock_binding;
        }
 
        vmw_apply_relocations(sw_context);
@@ -1538,6 +2254,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
                DRM_ERROR("Fence submission error. Syncing.\n");
 
        vmw_resource_list_unreserve(&sw_context->resource_list, false);
+       mutex_unlock(&dev_priv->binding_mutex);
+
        ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
                                    (void *) fence);
 
@@ -1568,6 +2286,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
 
        return 0;
 
+out_unlock_binding:
+       mutex_unlock(&dev_priv->binding_mutex);
 out_err:
        vmw_resource_relocations_free(&sw_context->res_relocations);
        vmw_free_relocations(sw_context);
index c62d20e8a6f169cce1173c849b42f160c55a4f39..436b013b42316a06296dd62cb0dda23f5095c075 100644 (file)
@@ -271,7 +271,7 @@ void vmw_fence_obj_unreference(struct vmw_fence_obj **fence_p)
        spin_unlock_irq(&fman->lock);
 }
 
-void vmw_fences_perform_actions(struct vmw_fence_manager *fman,
+static void vmw_fences_perform_actions(struct vmw_fence_manager *fman,
                                struct list_head *list)
 {
        struct vmw_fence_action *action, *next_action;
@@ -897,7 +897,7 @@ static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action)
  * Note that the action callbacks may be executed before this function
  * returns.
  */
-void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
+static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
                              struct vmw_fence_action *action)
 {
        struct vmw_fence_manager *fman = fence->fman;
@@ -993,7 +993,7 @@ struct vmw_event_fence_pending {
        struct drm_vmw_event_fence event;
 };
 
-int vmw_event_fence_action_create(struct drm_file *file_priv,
+static int vmw_event_fence_action_create(struct drm_file *file_priv,
                                  struct vmw_fence_obj *fence,
                                  uint32_t flags,
                                  uint64_t user_data,
@@ -1080,7 +1080,8 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
         */
        if (arg->handle) {
                struct ttm_base_object *base =
-                       ttm_base_object_lookup(vmw_fp->tfile, arg->handle);
+                       ttm_base_object_lookup_for_ref(dev_priv->tdev,
+                                                      arg->handle);
 
                if (unlikely(base == NULL)) {
                        DRM_ERROR("Fence event invalid fence object handle "
index 3eb148667d6382f003969757db0b9dd26555f909..6ccd993e26bf4ead66d0e8d1f1b4b592856d7599 100644 (file)
@@ -35,6 +35,23 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
        uint32_t fifo_min, hwversion;
        const struct vmw_fifo_state *fifo = &dev_priv->fifo;
 
+       if (!(dev_priv->capabilities & SVGA_CAP_3D))
+               return false;
+
+       if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
+               uint32_t result;
+
+               if (!dev_priv->has_mob)
+                       return false;
+
+               mutex_lock(&dev_priv->hw_mutex);
+               vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_3D);
+               result = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
+               mutex_unlock(&dev_priv->hw_mutex);
+
+               return (result != 0);
+       }
+
        if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
                return false;
 
@@ -511,24 +528,16 @@ out_err:
 }
 
 /**
- * vmw_fifo_emit_dummy_query - emits a dummy query to the fifo.
+ * vmw_fifo_emit_dummy_legacy_query - emits a dummy query to the fifo using
+ * legacy query commands.
  *
  * @dev_priv: The device private structure.
  * @cid: The hardware context id used for the query.
  *
- * This function is used to emit a dummy occlusion query with
- * no primitives rendered between query begin and query end.
- * It's used to provide a query barrier, in order to know that when
- * this query is finished, all preceding queries are also finished.
- *
- * A Query results structure should have been initialized at the start
- * of the dev_priv->dummy_query_bo buffer object. And that buffer object
- * must also be either reserved or pinned when this function is called.
- *
- * Returns -ENOMEM on failure to reserve fifo space.
+ * See the vmw_fifo_emit_dummy_query documentation.
  */
-int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
-                             uint32_t cid)
+static int vmw_fifo_emit_dummy_legacy_query(struct vmw_private *dev_priv,
+                                           uint32_t cid)
 {
        /*
         * A query wait without a preceding query end will
@@ -566,3 +575,75 @@ int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
 
        return 0;
 }
+
+/**
+ * vmw_fifo_emit_dummy_gb_query - emits a dummy query to the fifo using
+ * guest-backed resource query commands.
+ *
+ * @dev_priv: The device private structure.
+ * @cid: The hardware context id used for the query.
+ *
+ * See the vmw_fifo_emit_dummy_query documentation.
+ */
+static int vmw_fifo_emit_dummy_gb_query(struct vmw_private *dev_priv,
+                                       uint32_t cid)
+{
+       /*
+        * A query wait without a preceding query end will
+        * actually finish all queries for this cid
+        * without writing to the query result structure.
+        */
+
+       struct ttm_buffer_object *bo = dev_priv->dummy_query_bo;
+       struct {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdWaitForGBQuery body;
+       } *cmd;
+
+       cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+
+       if (unlikely(cmd == NULL)) {
+               DRM_ERROR("Out of fifo space for dummy query.\n");
+               return -ENOMEM;
+       }
+
+       cmd->header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
+       cmd->header.size = sizeof(cmd->body);
+       cmd->body.cid = cid;
+       cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION;
+       BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+       cmd->body.mobid = bo->mem.start;
+       cmd->body.offset = 0;
+
+       vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+       return 0;
+}
+
+
+/**
+ * vmw_fifo_emit_dummy_gb_query - emits a dummy query to the fifo using
+ * appropriate resource query commands.
+ *
+ * @dev_priv: The device private structure.
+ * @cid: The hardware context id used for the query.
+ *
+ * This function is used to emit a dummy occlusion query with
+ * no primitives rendered between query begin and query end.
+ * It's used to provide a query barrier, in order to know that when
+ * this query is finished, all preceding queries are also finished.
+ *
+ * A Query results structure should have been initialized at the start
+ * of the dev_priv->dummy_query_bo buffer object. And that buffer object
+ * must also be either reserved or pinned when this function is called.
+ *
+ * Returns -ENOMEM on failure to reserve fifo space.
+ */
+int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
+                             uint32_t cid)
+{
+       if (dev_priv->has_mob)
+               return vmw_fifo_emit_dummy_gb_query(dev_priv, cid);
+
+       return vmw_fifo_emit_dummy_legacy_query(dev_priv, cid);
+}
index 6ef0b035becbc5959f78ecf7b978629f90ae98e2..61d8d803199fc97085ef7e7ada07e7cbbd89bb5f 100644 (file)
@@ -125,181 +125,27 @@ static void vmw_gmr2_unbind(struct vmw_private *dev_priv,
 }
 
 
-static void vmw_gmr_free_descriptors(struct device *dev, dma_addr_t desc_dma,
-                                    struct list_head *desc_pages)
-{
-       struct page *page, *next;
-       struct svga_guest_mem_descriptor *page_virtual;
-       unsigned int desc_per_page = PAGE_SIZE /
-               sizeof(struct svga_guest_mem_descriptor) - 1;
-
-       if (list_empty(desc_pages))
-               return;
-
-       list_for_each_entry_safe(page, next, desc_pages, lru) {
-               list_del_init(&page->lru);
-
-               if (likely(desc_dma != DMA_ADDR_INVALID)) {
-                       dma_unmap_page(dev, desc_dma, PAGE_SIZE,
-                                      DMA_TO_DEVICE);
-               }
-
-               page_virtual = kmap_atomic(page);
-               desc_dma = (dma_addr_t)
-                       le32_to_cpu(page_virtual[desc_per_page].ppn) <<
-                       PAGE_SHIFT;
-               kunmap_atomic(page_virtual);
-
-               __free_page(page);
-       }
-}
-
-/**
- * FIXME: Adjust to the ttm lowmem / highmem storage to minimize
- * the number of used descriptors.
- *
- */
-
-static int vmw_gmr_build_descriptors(struct device *dev,
-                                    struct list_head *desc_pages,
-                                    struct vmw_piter *iter,
-                                    unsigned long num_pages,
-                                    dma_addr_t *first_dma)
-{
-       struct page *page;
-       struct svga_guest_mem_descriptor *page_virtual = NULL;
-       struct svga_guest_mem_descriptor *desc_virtual = NULL;
-       unsigned int desc_per_page;
-       unsigned long prev_pfn;
-       unsigned long pfn;
-       int ret;
-       dma_addr_t desc_dma;
-
-       desc_per_page = PAGE_SIZE /
-           sizeof(struct svga_guest_mem_descriptor) - 1;
-
-       while (likely(num_pages != 0)) {
-               page = alloc_page(__GFP_HIGHMEM);
-               if (unlikely(page == NULL)) {
-                       ret = -ENOMEM;
-                       goto out_err;
-               }
-
-               list_add_tail(&page->lru, desc_pages);
-               page_virtual = kmap_atomic(page);
-               desc_virtual = page_virtual - 1;
-               prev_pfn = ~(0UL);
-
-               while (likely(num_pages != 0)) {
-                       pfn = vmw_piter_dma_addr(iter) >> PAGE_SHIFT;
-
-                       if (pfn != prev_pfn + 1) {
-
-                               if (desc_virtual - page_virtual ==
-                                   desc_per_page - 1)
-                                       break;
-
-                               (++desc_virtual)->ppn = cpu_to_le32(pfn);
-                               desc_virtual->num_pages = cpu_to_le32(1);
-                       } else {
-                               uint32_t tmp =
-                                   le32_to_cpu(desc_virtual->num_pages);
-                               desc_virtual->num_pages = cpu_to_le32(tmp + 1);
-                       }
-                       prev_pfn = pfn;
-                       --num_pages;
-                       vmw_piter_next(iter);
-               }
-
-               (++desc_virtual)->ppn = DMA_PAGE_INVALID;
-               desc_virtual->num_pages = cpu_to_le32(0);
-               kunmap_atomic(page_virtual);
-       }
-
-       desc_dma = 0;
-       list_for_each_entry_reverse(page, desc_pages, lru) {
-               page_virtual = kmap_atomic(page);
-               page_virtual[desc_per_page].ppn = cpu_to_le32
-                       (desc_dma >> PAGE_SHIFT);
-               kunmap_atomic(page_virtual);
-               desc_dma = dma_map_page(dev, page, 0, PAGE_SIZE,
-                                       DMA_TO_DEVICE);
-
-               if (unlikely(dma_mapping_error(dev, desc_dma)))
-                       goto out_err;
-       }
-       *first_dma = desc_dma;
-
-       return 0;
-out_err:
-       vmw_gmr_free_descriptors(dev, DMA_ADDR_INVALID, desc_pages);
-       return ret;
-}
-
-static void vmw_gmr_fire_descriptors(struct vmw_private *dev_priv,
-                                    int gmr_id, dma_addr_t desc_dma)
-{
-       mutex_lock(&dev_priv->hw_mutex);
-
-       vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id);
-       wmb();
-       vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, desc_dma >> PAGE_SHIFT);
-       mb();
-
-       mutex_unlock(&dev_priv->hw_mutex);
-
-}
-
 int vmw_gmr_bind(struct vmw_private *dev_priv,
                 const struct vmw_sg_table *vsgt,
                 unsigned long num_pages,
                 int gmr_id)
 {
-       struct list_head desc_pages;
-       dma_addr_t desc_dma = 0;
-       struct device *dev = dev_priv->dev->dev;
        struct vmw_piter data_iter;
-       int ret;
 
        vmw_piter_start(&data_iter, vsgt, 0);
 
        if (unlikely(!vmw_piter_next(&data_iter)))
                return 0;
 
-       if (likely(dev_priv->capabilities & SVGA_CAP_GMR2))
-               return vmw_gmr2_bind(dev_priv, &data_iter, num_pages, gmr_id);
-
-       if (unlikely(!(dev_priv->capabilities & SVGA_CAP_GMR)))
-               return -EINVAL;
-
-       if (vsgt->num_regions > dev_priv->max_gmr_descriptors)
+       if (unlikely(!(dev_priv->capabilities & SVGA_CAP_GMR2)))
                return -EINVAL;
 
-       INIT_LIST_HEAD(&desc_pages);
-
-       ret = vmw_gmr_build_descriptors(dev, &desc_pages, &data_iter,
-                                       num_pages, &desc_dma);
-       if (unlikely(ret != 0))
-               return ret;
-
-       vmw_gmr_fire_descriptors(dev_priv, gmr_id, desc_dma);
-       vmw_gmr_free_descriptors(dev, desc_dma, &desc_pages);
-
-       return 0;
+       return vmw_gmr2_bind(dev_priv, &data_iter, num_pages, gmr_id);
 }
 
 
 void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id)
 {
-       if (likely(dev_priv->capabilities & SVGA_CAP_GMR2)) {
+       if (likely(dev_priv->capabilities & SVGA_CAP_GMR2))
                vmw_gmr2_unbind(dev_priv, gmr_id);
-               return;
-       }
-
-       mutex_lock(&dev_priv->hw_mutex);
-       vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id);
-       wmb();
-       vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, 0);
-       mb();
-       mutex_unlock(&dev_priv->hw_mutex);
 }
index c5c054ae9056aaea2479eb6a9018013098708ce2..b1273e8e9a6903e2d15d3b153177e9a99c806859 100644 (file)
@@ -125,10 +125,21 @@ static int vmw_gmrid_man_init(struct ttm_mem_type_manager *man,
                return -ENOMEM;
 
        spin_lock_init(&gman->lock);
-       gman->max_gmr_pages = dev_priv->max_gmr_pages;
        gman->used_gmr_pages = 0;
        ida_init(&gman->gmr_ida);
-       gman->max_gmr_ids = p_size;
+
+       switch (p_size) {
+       case VMW_PL_GMR:
+               gman->max_gmr_ids = dev_priv->max_gmr_ids;
+               gman->max_gmr_pages = dev_priv->max_gmr_pages;
+               break;
+       case VMW_PL_MOB:
+               gman->max_gmr_ids = VMWGFX_NUM_MOB;
+               gman->max_gmr_pages = dev_priv->max_mob_pages;
+               break;
+       default:
+               BUG();
+       }
        man->priv = (void *) gman;
        return 0;
 }
index 45d5b5ab6ca9d8788fe80f0fbfd9f164203c026a..116c49736763ee81a4b4d664bfba2ab0a5e3d5cd 100644 (file)
@@ -53,7 +53,7 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
                param->value = dev_priv->fifo.capabilities;
                break;
        case DRM_VMW_PARAM_MAX_FB_SIZE:
-               param->value = dev_priv->vram_size;
+               param->value = dev_priv->prim_bb_mem;
                break;
        case DRM_VMW_PARAM_FIFO_HW_VERSION:
        {
@@ -71,6 +71,17 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
        case DRM_VMW_PARAM_MAX_SURF_MEMORY:
                param->value = dev_priv->memory_size;
                break;
+       case DRM_VMW_PARAM_3D_CAPS_SIZE:
+               if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS)
+                       param->value = SVGA3D_DEVCAP_MAX;
+               else
+                       param->value = (SVGA_FIFO_3D_CAPS_LAST -
+                                       SVGA_FIFO_3D_CAPS + 1);
+               param->value *= sizeof(uint32_t);
+               break;
+       case DRM_VMW_PARAM_MAX_MOB_MEMORY:
+               param->value = dev_priv->max_mob_pages * PAGE_SIZE;
+               break;
        default:
                DRM_ERROR("Illegal vmwgfx get param request: %d\n",
                          param->param);
@@ -92,13 +103,19 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
        void __user *buffer = (void __user *)((unsigned long)(arg->buffer));
        void *bounce;
        int ret;
+       bool gb_objects = !!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS);
 
        if (unlikely(arg->pad64 != 0)) {
                DRM_ERROR("Illegal GET_3D_CAP argument.\n");
                return -EINVAL;
        }
 
-       size = (SVGA_FIFO_3D_CAPS_LAST - SVGA_FIFO_3D_CAPS + 1) << 2;
+       if (gb_objects)
+               size = SVGA3D_DEVCAP_MAX;
+       else
+               size = (SVGA_FIFO_3D_CAPS_LAST - SVGA_FIFO_3D_CAPS + 1);
+
+       size *= sizeof(uint32_t);
 
        if (arg->max_size < size)
                size = arg->max_size;
@@ -109,8 +126,22 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
                return -ENOMEM;
        }
 
-       fifo_mem = dev_priv->mmio_virt;
-       memcpy_fromio(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size);
+       if (gb_objects) {
+               int i;
+               uint32_t *bounce32 = (uint32_t *) bounce;
+
+               mutex_lock(&dev_priv->hw_mutex);
+               for (i = 0; i < SVGA3D_DEVCAP_MAX; ++i) {
+                       vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
+                       *bounce32++ = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
+               }
+               mutex_unlock(&dev_priv->hw_mutex);
+
+       } else {
+
+               fifo_mem = dev_priv->mmio_virt;
+               memcpy_fromio(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size);
+       }
 
        ret = copy_to_user(buffer, bounce, size);
        if (ret)
index 4640adbcaf91b9609643895432d858313e56e2e4..0c423766c44119ca923825e879e3d05b7058cc90 100644 (file)
@@ -30,7 +30,7 @@
 
 #define VMW_FENCE_WRAP (1 << 24)
 
-irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS)
+irqreturn_t vmw_irq_handler(int irq, void *arg)
 {
        struct drm_device *dev = (struct drm_device *)arg;
        struct vmw_private *dev_priv = vmw_priv(dev);
index 03f1c203863193621d9941af3051169ea2f79afb..8a650413dea57c8e50212b41f8be00abba3ee8af 100644 (file)
@@ -40,7 +40,7 @@ struct vmw_clip_rect {
  * Clip @num_rects number of @rects against @clip storing the
  * results in @out_rects and the number of passed rects in @out_num.
  */
-void vmw_clip_cliprects(struct drm_clip_rect *rects,
+static void vmw_clip_cliprects(struct drm_clip_rect *rects,
                        int num_rects,
                        struct vmw_clip_rect clip,
                        SVGASignedRect *out_rects,
@@ -423,7 +423,7 @@ struct vmw_framebuffer_surface {
        struct drm_master *master;
 };
 
-void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
+static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
 {
        struct vmw_framebuffer_surface *vfbs =
                vmw_framebuffer_to_vfbs(framebuffer);
@@ -589,7 +589,7 @@ out_free_tmp:
        return ret;
 }
 
-int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
+static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
                                  struct drm_file *file_priv,
                                  unsigned flags, unsigned color,
                                  struct drm_clip_rect *clips,
@@ -609,9 +609,13 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
        if (!dev_priv->sou_priv)
                return -EINVAL;
 
+       drm_modeset_lock_all(dev_priv->dev);
+
        ret = ttm_read_lock(&vmaster->lock, true);
-       if (unlikely(ret != 0))
+       if (unlikely(ret != 0)) {
+               drm_modeset_unlock_all(dev_priv->dev);
                return ret;
+       }
 
        if (!num_clips) {
                num_clips = 1;
@@ -629,6 +633,9 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
                                   clips, num_clips, inc, NULL);
 
        ttm_read_unlock(&vmaster->lock);
+
+       drm_modeset_unlock_all(dev_priv->dev);
+
        return 0;
 }
 
@@ -665,9 +672,9 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
 
        if (unlikely(surface->mip_levels[0] != 1 ||
                     surface->num_sizes != 1 ||
-                    surface->sizes[0].width < mode_cmd->width ||
-                    surface->sizes[0].height < mode_cmd->height ||
-                    surface->sizes[0].depth != 1)) {
+                    surface->base_size.width < mode_cmd->width ||
+                    surface->base_size.height < mode_cmd->height ||
+                    surface->base_size.depth != 1)) {
                DRM_ERROR("Incompatible surface dimensions "
                          "for requested mode.\n");
                return -EINVAL;
@@ -754,7 +761,7 @@ struct vmw_framebuffer_dmabuf {
        struct vmw_dma_buffer *buffer;
 };
 
-void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer)
+static void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer)
 {
        struct vmw_framebuffer_dmabuf *vfbd =
                vmw_framebuffer_to_vfbd(framebuffer);
@@ -940,7 +947,7 @@ static int do_dmabuf_dirty_sou(struct drm_file *file_priv,
        return ret;
 }
 
-int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
+static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
                                 struct drm_file *file_priv,
                                 unsigned flags, unsigned color,
                                 struct drm_clip_rect *clips,
@@ -953,9 +960,13 @@ int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
        struct drm_clip_rect norect;
        int ret, increment = 1;
 
+       drm_modeset_lock_all(dev_priv->dev);
+
        ret = ttm_read_lock(&vmaster->lock, true);
-       if (unlikely(ret != 0))
+       if (unlikely(ret != 0)) {
+               drm_modeset_unlock_all(dev_priv->dev);
                return ret;
+       }
 
        if (!num_clips) {
                num_clips = 1;
@@ -979,6 +990,9 @@ int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
        }
 
        ttm_read_unlock(&vmaster->lock);
+
+       drm_modeset_unlock_all(dev_priv->dev);
+
        return ret;
 }
 
@@ -1631,7 +1645,7 @@ bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
                                uint32_t pitch,
                                uint32_t height)
 {
-       return ((u64) pitch * (u64) height) < (u64) dev_priv->vram_size;
+       return ((u64) pitch * (u64) height) < (u64) dev_priv->prim_bb_mem;
 }
 
 
@@ -1663,7 +1677,7 @@ void vmw_disable_vblank(struct drm_device *dev, int crtc)
  * Small shared kms functions.
  */
 
-int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num,
+static int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num,
                         struct drm_vmw_rect *rects)
 {
        struct drm_device *dev = dev_priv->dev;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
new file mode 100644 (file)
index 0000000..4910e7b
--- /dev/null
@@ -0,0 +1,652 @@
+/**************************************************************************
+ *
+ * Copyright Â© 2012 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_drv.h"
+
+/*
+ * If we set up the screen target otable, screen objects stop working.
+ */
+
+#define VMW_OTABLE_SETUP_SUB ((VMWGFX_ENABLE_SCREEN_TARGET_OTABLE) ? 0 : 1)
+
+#ifdef CONFIG_64BIT
+#define VMW_PPN_SIZE 8
+#define VMW_MOBFMT_PTDEPTH_0 SVGA3D_MOBFMT_PTDEPTH64_0
+#define VMW_MOBFMT_PTDEPTH_1 SVGA3D_MOBFMT_PTDEPTH64_1
+#define VMW_MOBFMT_PTDEPTH_2 SVGA3D_MOBFMT_PTDEPTH64_2
+#else
+#define VMW_PPN_SIZE 4
+#define VMW_MOBFMT_PTDEPTH_0 SVGA3D_MOBFMT_PTDEPTH_0
+#define VMW_MOBFMT_PTDEPTH_1 SVGA3D_MOBFMT_PTDEPTH_1
+#define VMW_MOBFMT_PTDEPTH_2 SVGA3D_MOBFMT_PTDEPTH_2
+#endif
+
+/*
+ * struct vmw_mob - Structure containing page table and metadata for a
+ * Guest Memory OBject.
+ *
+ * @num_pages       Number of pages that make up the page table.
+ * @pt_level        The indirection level of the page table. 0-2.
+ * @pt_root_page    DMA address of the level 0 page of the page table.
+ */
+struct vmw_mob {
+       struct ttm_buffer_object *pt_bo;
+       unsigned long num_pages;
+       unsigned pt_level;
+       dma_addr_t pt_root_page;
+       uint32_t id;
+};
+
+/*
+ * struct vmw_otable - Guest Memory OBject table metadata
+ *
+ * @size:           Size of the table (page-aligned).
+ * @page_table:     Pointer to a struct vmw_mob holding the page table.
+ */
+struct vmw_otable {
+       unsigned long size;
+       struct vmw_mob *page_table;
+};
+
+static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
+                              struct vmw_mob *mob);
+static void vmw_mob_pt_setup(struct vmw_mob *mob,
+                            struct vmw_piter data_iter,
+                            unsigned long num_data_pages);
+
+/*
+ * vmw_setup_otable_base - Issue an object table base setup command to
+ * the device
+ *
+ * @dev_priv:       Pointer to a device private structure
+ * @type:           Type of object table base
+ * @offset          Start of table offset into dev_priv::otable_bo
+ * @otable          Pointer to otable metadata;
+ *
+ * This function returns -ENOMEM if it fails to reserve fifo space,
+ * and may block waiting for fifo space.
+ */
+static int vmw_setup_otable_base(struct vmw_private *dev_priv,
+                                SVGAOTableType type,
+                                unsigned long offset,
+                                struct vmw_otable *otable)
+{
+       struct {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdSetOTableBase64 body;
+       } *cmd;
+       struct vmw_mob *mob;
+       const struct vmw_sg_table *vsgt;
+       struct vmw_piter iter;
+       int ret;
+
+       BUG_ON(otable->page_table != NULL);
+
+       vsgt = vmw_bo_sg_table(dev_priv->otable_bo);
+       vmw_piter_start(&iter, vsgt, offset >> PAGE_SHIFT);
+       WARN_ON(!vmw_piter_next(&iter));
+
+       mob = vmw_mob_create(otable->size >> PAGE_SHIFT);
+       if (unlikely(mob == NULL)) {
+               DRM_ERROR("Failed creating OTable page table.\n");
+               return -ENOMEM;
+       }
+
+       if (otable->size <= PAGE_SIZE) {
+               mob->pt_level = VMW_MOBFMT_PTDEPTH_0;
+               mob->pt_root_page = vmw_piter_dma_addr(&iter);
+       } else if (vsgt->num_regions == 1) {
+               mob->pt_level = SVGA3D_MOBFMT_RANGE;
+               mob->pt_root_page = vmw_piter_dma_addr(&iter);
+       } else {
+               ret = vmw_mob_pt_populate(dev_priv, mob);
+               if (unlikely(ret != 0))
+                       goto out_no_populate;
+
+               vmw_mob_pt_setup(mob, iter, otable->size >> PAGE_SHIFT);
+               mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PTDEPTH_1;
+       }
+
+       cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+       if (unlikely(cmd == NULL)) {
+               DRM_ERROR("Failed reserving FIFO space for OTable setup.\n");
+               goto out_no_fifo;
+       }
+
+       memset(cmd, 0, sizeof(*cmd));
+       cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE64;
+       cmd->header.size = sizeof(cmd->body);
+       cmd->body.type = type;
+       cmd->body.baseAddress = cpu_to_le64(mob->pt_root_page >> PAGE_SHIFT);
+       cmd->body.sizeInBytes = otable->size;
+       cmd->body.validSizeInBytes = 0;
+       cmd->body.ptDepth = mob->pt_level;
+
+       /*
+        * The device doesn't support this, But the otable size is
+        * determined at compile-time, so this BUG shouldn't trigger
+        * randomly.
+        */
+       BUG_ON(mob->pt_level == VMW_MOBFMT_PTDEPTH_2);
+
+       vmw_fifo_commit(dev_priv, sizeof(*cmd));
+       otable->page_table = mob;
+
+       return 0;
+
+out_no_fifo:
+out_no_populate:
+       vmw_mob_destroy(mob);
+       return ret;
+}
+
+/*
+ * vmw_takedown_otable_base - Issue an object table base takedown command
+ * to the device
+ *
+ * @dev_priv:       Pointer to a device private structure
+ * @type:           Type of object table base
+ *
+ */
+static void vmw_takedown_otable_base(struct vmw_private *dev_priv,
+                                    SVGAOTableType type,
+                                    struct vmw_otable *otable)
+{
+       struct {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdSetOTableBase body;
+       } *cmd;
+       struct ttm_buffer_object *bo;
+
+       if (otable->page_table == NULL)
+               return;
+
+       bo = otable->page_table->pt_bo;
+       cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+       if (unlikely(cmd == NULL))
+               DRM_ERROR("Failed reserving FIFO space for OTable setup.\n");
+
+       memset(cmd, 0, sizeof(*cmd));
+       cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE;
+       cmd->header.size = sizeof(cmd->body);
+       cmd->body.type = type;
+       cmd->body.baseAddress = 0;
+       cmd->body.sizeInBytes = 0;
+       cmd->body.validSizeInBytes = 0;
+       cmd->body.ptDepth = SVGA3D_MOBFMT_INVALID;
+       vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+       if (bo) {
+               int ret;
+
+               ret = ttm_bo_reserve(bo, false, true, false, NULL);
+               BUG_ON(ret != 0);
+
+               vmw_fence_single_bo(bo, NULL);
+               ttm_bo_unreserve(bo);
+       }
+
+       vmw_mob_destroy(otable->page_table);
+       otable->page_table = NULL;
+}
+
+/*
+ * vmw_otables_setup - Set up guest backed memory object tables
+ *
+ * @dev_priv:       Pointer to a device private structure
+ *
+ * Takes care of the device guest backed surface
+ * initialization, by setting up the guest backed memory object tables.
+ * Returns 0 on success and various error codes on failure. A succesful return
+ * means the object tables can be taken down using the vmw_otables_takedown
+ * function.
+ */
+int vmw_otables_setup(struct vmw_private *dev_priv)
+{
+       unsigned long offset;
+       unsigned long bo_size;
+       struct vmw_otable *otables;
+       SVGAOTableType i;
+       int ret;
+
+       otables = kzalloc(SVGA_OTABLE_DX9_MAX * sizeof(*otables),
+                         GFP_KERNEL);
+       if (unlikely(otables == NULL)) {
+               DRM_ERROR("Failed to allocate space for otable "
+                         "metadata.\n");
+               return -ENOMEM;
+       }
+
+       otables[SVGA_OTABLE_MOB].size =
+               VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE;
+       otables[SVGA_OTABLE_SURFACE].size =
+               VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE;
+       otables[SVGA_OTABLE_CONTEXT].size =
+               VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE;
+       otables[SVGA_OTABLE_SHADER].size =
+               VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE;
+       otables[SVGA_OTABLE_SCREEN_TARGET].size =
+               VMWGFX_NUM_GB_SCREEN_TARGET *
+               SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE;
+
+       bo_size = 0;
+       for (i = 0; i < SVGA_OTABLE_DX9_MAX; ++i) {
+               otables[i].size =
+                       (otables[i].size + PAGE_SIZE - 1) & PAGE_MASK;
+               bo_size += otables[i].size;
+       }
+
+       ret = ttm_bo_create(&dev_priv->bdev, bo_size,
+                           ttm_bo_type_device,
+                           &vmw_sys_ne_placement,
+                           0, false, NULL,
+                           &dev_priv->otable_bo);
+
+       if (unlikely(ret != 0))
+               goto out_no_bo;
+
+       ret = ttm_bo_reserve(dev_priv->otable_bo, false, true, false, NULL);
+       BUG_ON(ret != 0);
+       ret = vmw_bo_driver.ttm_tt_populate(dev_priv->otable_bo->ttm);
+       if (unlikely(ret != 0))
+               goto out_unreserve;
+       ret = vmw_bo_map_dma(dev_priv->otable_bo);
+       if (unlikely(ret != 0))
+               goto out_unreserve;
+
+       ttm_bo_unreserve(dev_priv->otable_bo);
+
+       offset = 0;
+       for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i) {
+               ret = vmw_setup_otable_base(dev_priv, i, offset,
+                                           &otables[i]);
+               if (unlikely(ret != 0))
+                       goto out_no_setup;
+               offset += otables[i].size;
+       }
+
+       dev_priv->otables = otables;
+       return 0;
+
+out_unreserve:
+       ttm_bo_unreserve(dev_priv->otable_bo);
+out_no_setup:
+       for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i)
+               vmw_takedown_otable_base(dev_priv, i, &otables[i]);
+
+       ttm_bo_unref(&dev_priv->otable_bo);
+out_no_bo:
+       kfree(otables);
+       return ret;
+}
+
+
+/*
+ * vmw_otables_takedown - Take down guest backed memory object tables
+ *
+ * @dev_priv:       Pointer to a device private structure
+ *
+ * Take down the Guest Memory Object tables.
+ */
+void vmw_otables_takedown(struct vmw_private *dev_priv)
+{
+       SVGAOTableType i;
+       struct ttm_buffer_object *bo = dev_priv->otable_bo;
+       int ret;
+
+       for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i)
+               vmw_takedown_otable_base(dev_priv, i,
+                                        &dev_priv->otables[i]);
+
+       ret = ttm_bo_reserve(bo, false, true, false, NULL);
+       BUG_ON(ret != 0);
+
+       vmw_fence_single_bo(bo, NULL);
+       ttm_bo_unreserve(bo);
+
+       ttm_bo_unref(&dev_priv->otable_bo);
+       kfree(dev_priv->otables);
+       dev_priv->otables = NULL;
+}
+
+
+/*
+ * vmw_mob_calculate_pt_pages - Calculate the number of page table pages
+ * needed for a guest backed memory object.
+ *
+ * @data_pages:  Number of data pages in the memory object buffer.
+ */
+static unsigned long vmw_mob_calculate_pt_pages(unsigned long data_pages)
+{
+       unsigned long data_size = data_pages * PAGE_SIZE;
+       unsigned long tot_size = 0;
+
+       while (likely(data_size > PAGE_SIZE)) {
+               data_size = DIV_ROUND_UP(data_size, PAGE_SIZE);
+               data_size *= VMW_PPN_SIZE;
+               tot_size += (data_size + PAGE_SIZE - 1) & PAGE_MASK;
+       }
+
+       return tot_size >> PAGE_SHIFT;
+}
+
+/*
+ * vmw_mob_create - Create a mob, but don't populate it.
+ *
+ * @data_pages:  Number of data pages of the underlying buffer object.
+ */
+struct vmw_mob *vmw_mob_create(unsigned long data_pages)
+{
+       struct vmw_mob *mob = kzalloc(sizeof(*mob), GFP_KERNEL);
+
+       if (unlikely(mob == NULL))
+               return NULL;
+
+       mob->num_pages = vmw_mob_calculate_pt_pages(data_pages);
+
+       return mob;
+}
+
+/*
+ * vmw_mob_pt_populate - Populate the mob pagetable
+ *
+ * @mob:         Pointer to the mob the pagetable of which we want to
+ *               populate.
+ *
+ * This function allocates memory to be used for the pagetable, and
+ * adjusts TTM memory accounting accordingly. Returns ENOMEM if
+ * memory resources aren't sufficient and may cause TTM buffer objects
+ * to be swapped out by using the TTM memory accounting function.
+ */
+static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
+                              struct vmw_mob *mob)
+{
+       int ret;
+       BUG_ON(mob->pt_bo != NULL);
+
+       ret = ttm_bo_create(&dev_priv->bdev, mob->num_pages * PAGE_SIZE,
+                           ttm_bo_type_device,
+                           &vmw_sys_ne_placement,
+                           0, false, NULL, &mob->pt_bo);
+       if (unlikely(ret != 0))
+               return ret;
+
+       ret = ttm_bo_reserve(mob->pt_bo, false, true, false, NULL);
+
+       BUG_ON(ret != 0);
+       ret = vmw_bo_driver.ttm_tt_populate(mob->pt_bo->ttm);
+       if (unlikely(ret != 0))
+               goto out_unreserve;
+       ret = vmw_bo_map_dma(mob->pt_bo);
+       if (unlikely(ret != 0))
+               goto out_unreserve;
+
+       ttm_bo_unreserve(mob->pt_bo);
+       
+       return 0;
+
+out_unreserve:
+       ttm_bo_unreserve(mob->pt_bo);
+       ttm_bo_unref(&mob->pt_bo);
+
+       return ret;
+}
+
+/**
+ * vmw_mob_assign_ppn - Assign a value to a page table entry
+ *
+ * @addr: Pointer to pointer to page table entry.
+ * @val: The page table entry
+ *
+ * Assigns a value to a page table entry pointed to by *@addr and increments
+ * *@addr according to the page table entry size.
+ */
+#if (VMW_PPN_SIZE == 8)
+static void vmw_mob_assign_ppn(__le32 **addr, dma_addr_t val)
+{
+       *((__le64 *) *addr) = cpu_to_le64(val >> PAGE_SHIFT);
+       *addr += 2;
+}
+#else
+static void vmw_mob_assign_ppn(__le32 **addr, dma_addr_t val)
+{
+       *(*addr)++ = cpu_to_le32(val >> PAGE_SHIFT);
+}
+#endif
+
+/*
+ * vmw_mob_build_pt - Build a pagetable
+ *
+ * @data_addr:      Array of DMA addresses to the underlying buffer
+ *                  object's data pages.
+ * @num_data_pages: Number of buffer object data pages.
+ * @pt_pages:       Array of page pointers to the page table pages.
+ *
+ * Returns the number of page table pages actually used.
+ * Uses atomic kmaps of highmem pages to avoid TLB thrashing.
+ */
+static unsigned long vmw_mob_build_pt(struct vmw_piter *data_iter,
+                                     unsigned long num_data_pages,
+                                     struct vmw_piter *pt_iter)
+{
+       unsigned long pt_size = num_data_pages * VMW_PPN_SIZE;
+       unsigned long num_pt_pages = DIV_ROUND_UP(pt_size, PAGE_SIZE);
+       unsigned long pt_page;
+       __le32 *addr, *save_addr;
+       unsigned long i;
+       struct page *page;
+
+       for (pt_page = 0; pt_page < num_pt_pages; ++pt_page) {
+               page = vmw_piter_page(pt_iter);
+
+               save_addr = addr = kmap_atomic(page);
+
+               for (i = 0; i < PAGE_SIZE / VMW_PPN_SIZE; ++i) {
+                       vmw_mob_assign_ppn(&addr,
+                                          vmw_piter_dma_addr(data_iter));
+                       if (unlikely(--num_data_pages == 0))
+                               break;
+                       WARN_ON(!vmw_piter_next(data_iter));
+               }
+               kunmap_atomic(save_addr);
+               vmw_piter_next(pt_iter);
+       }
+
+       return num_pt_pages;
+}
+
+/*
+ * vmw_mob_build_pt - Set up a multilevel mob pagetable
+ *
+ * @mob:            Pointer to a mob whose page table needs setting up.
+ * @data_addr       Array of DMA addresses to the buffer object's data
+ *                  pages.
+ * @num_data_pages: Number of buffer object data pages.
+ *
+ * Uses tail recursion to set up a multilevel mob page table.
+ */
+static void vmw_mob_pt_setup(struct vmw_mob *mob,
+                            struct vmw_piter data_iter,
+                            unsigned long num_data_pages)
+{
+       unsigned long num_pt_pages = 0;
+       struct ttm_buffer_object *bo = mob->pt_bo;
+       struct vmw_piter save_pt_iter;
+       struct vmw_piter pt_iter;
+       const struct vmw_sg_table *vsgt;
+       int ret;
+
+       ret = ttm_bo_reserve(bo, false, true, false, NULL);
+       BUG_ON(ret != 0);
+
+       vsgt = vmw_bo_sg_table(bo);
+       vmw_piter_start(&pt_iter, vsgt, 0);
+       BUG_ON(!vmw_piter_next(&pt_iter));
+       mob->pt_level = 0;
+       while (likely(num_data_pages > 1)) {
+               ++mob->pt_level;
+               BUG_ON(mob->pt_level > 2);
+               save_pt_iter = pt_iter;
+               num_pt_pages = vmw_mob_build_pt(&data_iter, num_data_pages,
+                                               &pt_iter);
+               data_iter = save_pt_iter;
+               num_data_pages = num_pt_pages;
+       }
+
+       mob->pt_root_page = vmw_piter_dma_addr(&save_pt_iter);
+       ttm_bo_unreserve(bo);
+}
+
+/*
+ * vmw_mob_destroy - Destroy a mob, unpopulating first if necessary.
+ *
+ * @mob:            Pointer to a mob to destroy.
+ */
+void vmw_mob_destroy(struct vmw_mob *mob)
+{
+       if (mob->pt_bo)
+               ttm_bo_unref(&mob->pt_bo);
+       kfree(mob);
+}
+
+/*
+ * vmw_mob_unbind - Hide a mob from the device.
+ *
+ * @dev_priv:       Pointer to a device private.
+ * @mob_id:         Device id of the mob to unbind.
+ */
+void vmw_mob_unbind(struct vmw_private *dev_priv,
+                   struct vmw_mob *mob)
+{
+       struct {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdDestroyGBMob body;
+       } *cmd;
+       int ret;
+       struct ttm_buffer_object *bo = mob->pt_bo;
+
+       if (bo) {
+               ret = ttm_bo_reserve(bo, false, true, false, NULL);
+               /*
+                * Noone else should be using this buffer.
+                */
+               BUG_ON(ret != 0);
+       }
+
+       cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+       if (unlikely(cmd == NULL)) {
+               DRM_ERROR("Failed reserving FIFO space for Memory "
+                         "Object unbinding.\n");
+       }
+       cmd->header.id = SVGA_3D_CMD_DESTROY_GB_MOB;
+       cmd->header.size = sizeof(cmd->body);
+       cmd->body.mobid = mob->id;
+       vmw_fifo_commit(dev_priv, sizeof(*cmd));
+       if (bo) {
+               vmw_fence_single_bo(bo, NULL);
+               ttm_bo_unreserve(bo);
+       }
+       vmw_3d_resource_dec(dev_priv, false);
+}
+
+/*
+ * vmw_mob_bind - Make a mob visible to the device after first
+ *                populating it if necessary.
+ *
+ * @dev_priv:       Pointer to a device private.
+ * @mob:            Pointer to the mob we're making visible.
+ * @data_addr:      Array of DMA addresses to the data pages of the underlying
+ *                  buffer object.
+ * @num_data_pages: Number of data pages of the underlying buffer
+ *                  object.
+ * @mob_id:         Device id of the mob to bind
+ *
+ * This function is intended to be interfaced with the ttm_tt backend
+ * code.
+ */
+int vmw_mob_bind(struct vmw_private *dev_priv,
+                struct vmw_mob *mob,
+                const struct vmw_sg_table *vsgt,
+                unsigned long num_data_pages,
+                int32_t mob_id)
+{
+       int ret;
+       bool pt_set_up = false;
+       struct vmw_piter data_iter;
+       struct {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdDefineGBMob64 body;
+       } *cmd;
+
+       mob->id = mob_id;
+       vmw_piter_start(&data_iter, vsgt, 0);
+       if (unlikely(!vmw_piter_next(&data_iter)))
+               return 0;
+
+       if (likely(num_data_pages == 1)) {
+               mob->pt_level = VMW_MOBFMT_PTDEPTH_0;
+               mob->pt_root_page = vmw_piter_dma_addr(&data_iter);
+       } else if (vsgt->num_regions == 1) {
+               mob->pt_level = SVGA3D_MOBFMT_RANGE;
+               mob->pt_root_page = vmw_piter_dma_addr(&data_iter);
+       } else if (unlikely(mob->pt_bo == NULL)) {
+               ret = vmw_mob_pt_populate(dev_priv, mob);
+               if (unlikely(ret != 0))
+                       return ret;
+
+               vmw_mob_pt_setup(mob, data_iter, num_data_pages);
+               pt_set_up = true;
+               mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PTDEPTH_1;
+       }
+
+       (void) vmw_3d_resource_inc(dev_priv, false);
+
+       cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+       if (unlikely(cmd == NULL)) {
+               DRM_ERROR("Failed reserving FIFO space for Memory "
+                         "Object binding.\n");
+               goto out_no_cmd_space;
+       }
+
+       cmd->header.id = SVGA_3D_CMD_DEFINE_GB_MOB64;
+       cmd->header.size = sizeof(cmd->body);
+       cmd->body.mobid = mob_id;
+       cmd->body.ptDepth = mob->pt_level;
+       cmd->body.base = cpu_to_le64(mob->pt_root_page >> PAGE_SHIFT);
+       cmd->body.sizeInBytes = num_data_pages * PAGE_SIZE;
+
+       vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+       return 0;
+
+out_no_cmd_space:
+       vmw_3d_resource_dec(dev_priv, false);
+       if (pt_set_up)
+               ttm_bo_unref(&mob->pt_bo);
+
+       return -ENOMEM;
+}
index 9b5ea2ac7ddff21562aa7ef52a2f96872c623116..6fdd82d42f6549d2af208516ad22b26d45785e28 100644 (file)
@@ -215,6 +215,7 @@ int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
        res->func = func;
        INIT_LIST_HEAD(&res->lru_head);
        INIT_LIST_HEAD(&res->mob_head);
+       INIT_LIST_HEAD(&res->binding_head);
        res->id = -1;
        res->backup = NULL;
        res->backup_offset = 0;
@@ -441,6 +442,21 @@ static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
        ttm_bo_unref(&bo);
 }
 
+static void vmw_user_dmabuf_ref_obj_release(struct ttm_base_object *base,
+                                           enum ttm_ref_type ref_type)
+{
+       struct vmw_user_dma_buffer *user_bo;
+       user_bo = container_of(base, struct vmw_user_dma_buffer, prime.base);
+
+       switch (ref_type) {
+       case TTM_REF_SYNCCPU_WRITE:
+               ttm_bo_synccpu_write_release(&user_bo->dma.base);
+               break;
+       default:
+               BUG();
+       }
+}
+
 /**
  * vmw_user_dmabuf_alloc - Allocate a user dma buffer
  *
@@ -471,6 +487,8 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
        }
 
        ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size,
+                             (dev_priv->has_mob) ?
+                             &vmw_sys_placement :
                              &vmw_vram_sys_placement, true,
                              &vmw_user_dmabuf_destroy);
        if (unlikely(ret != 0))
@@ -482,7 +500,8 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
                                    &user_bo->prime,
                                    shareable,
                                    ttm_buffer_type,
-                                   &vmw_user_dmabuf_release, NULL);
+                                   &vmw_user_dmabuf_release,
+                                   &vmw_user_dmabuf_ref_obj_release);
        if (unlikely(ret != 0)) {
                ttm_bo_unref(&tmp);
                goto out_no_base_object;
@@ -515,6 +534,130 @@ int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
                vmw_user_bo->prime.base.shareable) ? 0 : -EPERM;
 }
 
+/**
+ * vmw_user_dmabuf_synccpu_grab - Grab a struct vmw_user_dma_buffer for cpu
+ * access, idling previous GPU operations on the buffer and optionally
+ * blocking it for further command submissions.
+ *
+ * @user_bo: Pointer to the buffer object being grabbed for CPU access
+ * @tfile: Identifying the caller.
+ * @flags: Flags indicating how the grab should be performed.
+ *
+ * A blocking grab will be automatically released when @tfile is closed.
+ */
+static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
+                                       struct ttm_object_file *tfile,
+                                       uint32_t flags)
+{
+       struct ttm_buffer_object *bo = &user_bo->dma.base;
+       bool existed;
+       int ret;
+
+       if (flags & drm_vmw_synccpu_allow_cs) {
+               struct ttm_bo_device *bdev = bo->bdev;
+
+               spin_lock(&bdev->fence_lock);
+               ret = ttm_bo_wait(bo, false, true,
+                                 !!(flags & drm_vmw_synccpu_dontblock));
+               spin_unlock(&bdev->fence_lock);
+               return ret;
+       }
+
+       ret = ttm_bo_synccpu_write_grab
+               (bo, !!(flags & drm_vmw_synccpu_dontblock));
+       if (unlikely(ret != 0))
+               return ret;
+
+       ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
+                                TTM_REF_SYNCCPU_WRITE, &existed);
+       if (ret != 0 || existed)
+               ttm_bo_synccpu_write_release(&user_bo->dma.base);
+
+       return ret;
+}
+
+/**
+ * vmw_user_dmabuf_synccpu_release - Release a previous grab for CPU access,
+ * and unblock command submission on the buffer if blocked.
+ *
+ * @handle: Handle identifying the buffer object.
+ * @tfile: Identifying the caller.
+ * @flags: Flags indicating the type of release.
+ */
+static int vmw_user_dmabuf_synccpu_release(uint32_t handle,
+                                          struct ttm_object_file *tfile,
+                                          uint32_t flags)
+{
+       if (!(flags & drm_vmw_synccpu_allow_cs))
+               return ttm_ref_object_base_unref(tfile, handle,
+                                                TTM_REF_SYNCCPU_WRITE);
+
+       return 0;
+}
+
+/**
+ * vmw_user_dmabuf_synccpu_release - ioctl function implementing the synccpu
+ * functionality.
+ *
+ * @dev: Identifies the drm device.
+ * @data: Pointer to the ioctl argument.
+ * @file_priv: Identifies the caller.
+ *
+ * This function checks the ioctl arguments for validity and calls the
+ * relevant synccpu functions.
+ */
+int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
+                                 struct drm_file *file_priv)
+{
+       struct drm_vmw_synccpu_arg *arg =
+               (struct drm_vmw_synccpu_arg *) data;
+       struct vmw_dma_buffer *dma_buf;
+       struct vmw_user_dma_buffer *user_bo;
+       struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+       int ret;
+
+       if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
+           || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
+                              drm_vmw_synccpu_dontblock |
+                              drm_vmw_synccpu_allow_cs)) != 0) {
+               DRM_ERROR("Illegal synccpu flags.\n");
+               return -EINVAL;
+       }
+
+       switch (arg->op) {
+       case drm_vmw_synccpu_grab:
+               ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf);
+               if (unlikely(ret != 0))
+                       return ret;
+
+               user_bo = container_of(dma_buf, struct vmw_user_dma_buffer,
+                                      dma);
+               ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags);
+               vmw_dmabuf_unreference(&dma_buf);
+               if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
+                            ret != -EBUSY)) {
+                       DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
+                                 (unsigned int) arg->handle);
+                       return ret;
+               }
+               break;
+       case drm_vmw_synccpu_release:
+               ret = vmw_user_dmabuf_synccpu_release(arg->handle, tfile,
+                                                     arg->flags);
+               if (unlikely(ret != 0)) {
+                       DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
+                                 (unsigned int) arg->handle);
+                       return ret;
+               }
+               break;
+       default:
+               DRM_ERROR("Invalid synccpu operation.\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
 int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
                           struct drm_file *file_priv)
 {
@@ -591,7 +734,8 @@ int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
 }
 
 int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
-                             struct vmw_dma_buffer *dma_buf)
+                             struct vmw_dma_buffer *dma_buf,
+                             uint32_t *handle)
 {
        struct vmw_user_dma_buffer *user_bo;
 
@@ -599,6 +743,8 @@ int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
                return -EINVAL;
 
        user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma);
+
+       *handle = user_bo->prime.base.hash.key;
        return ttm_ref_object_add(tfile, &user_bo->prime.base,
                                  TTM_REF_USAGE, NULL);
 }
@@ -1291,11 +1437,54 @@ void vmw_fence_single_bo(struct ttm_buffer_object *bo,
  * @mem:            The truct ttm_mem_reg indicating to what memory
  *                  region the move is taking place.
  *
- * For now does nothing.
+ * Evicts the Guest Backed hardware resource if the backup
+ * buffer is being moved out of MOB memory.
+ * Note that this function should not race with the resource
+ * validation code as long as it accesses only members of struct
+ * resource that remain static while bo::res is !NULL and
+ * while we have @bo reserved. struct resource::backup is *not* a
+ * static member. The resource validation code will take care
+ * to set @bo::res to NULL, while having @bo reserved when the
+ * buffer is no longer bound to the resource, so @bo:res can be
+ * used to determine whether there is a need to unbind and whether
+ * it is safe to unbind.
  */
 void vmw_resource_move_notify(struct ttm_buffer_object *bo,
                              struct ttm_mem_reg *mem)
 {
+       struct vmw_dma_buffer *dma_buf;
+
+       if (mem == NULL)
+               return;
+
+       if (bo->destroy != vmw_dmabuf_bo_free &&
+           bo->destroy != vmw_user_dmabuf_destroy)
+               return;
+
+       dma_buf = container_of(bo, struct vmw_dma_buffer, base);
+
+       if (mem->mem_type != VMW_PL_MOB) {
+               struct vmw_resource *res, *n;
+               struct ttm_bo_device *bdev = bo->bdev;
+               struct ttm_validate_buffer val_buf;
+
+               val_buf.bo = bo;
+
+               list_for_each_entry_safe(res, n, &dma_buf->res_list, mob_head) {
+
+                       if (unlikely(res->func->unbind == NULL))
+                               continue;
+
+                       (void) res->func->unbind(res, true, &val_buf);
+                       res->backup_dirty = true;
+                       res->res_dirty = false;
+                       list_del_init(&res->mob_head);
+               }
+
+               spin_lock(&bdev->fence_lock);
+               (void) ttm_bo_wait(bo, false, false, false);
+               spin_unlock(&bdev->fence_lock);
+       }
 }
 
 /**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
new file mode 100644 (file)
index 0000000..1457ec4
--- /dev/null
@@ -0,0 +1,441 @@
+/**************************************************************************
+ *
+ * Copyright Â© 2009-2012 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_drv.h"
+#include "vmwgfx_resource_priv.h"
+#include "ttm/ttm_placement.h"
+
+struct vmw_shader {
+       struct vmw_resource res;
+       SVGA3dShaderType type;
+       uint32_t size;
+};
+
+struct vmw_user_shader {
+       struct ttm_base_object base;
+       struct vmw_shader shader;
+};
+
+static void vmw_user_shader_free(struct vmw_resource *res);
+static struct vmw_resource *
+vmw_user_shader_base_to_res(struct ttm_base_object *base);
+
+static int vmw_gb_shader_create(struct vmw_resource *res);
+static int vmw_gb_shader_bind(struct vmw_resource *res,
+                              struct ttm_validate_buffer *val_buf);
+static int vmw_gb_shader_unbind(struct vmw_resource *res,
+                                bool readback,
+                                struct ttm_validate_buffer *val_buf);
+static int vmw_gb_shader_destroy(struct vmw_resource *res);
+
+static uint64_t vmw_user_shader_size;
+
+static const struct vmw_user_resource_conv user_shader_conv = {
+       .object_type = VMW_RES_SHADER,
+       .base_obj_to_res = vmw_user_shader_base_to_res,
+       .res_free = vmw_user_shader_free
+};
+
+const struct vmw_user_resource_conv *user_shader_converter =
+       &user_shader_conv;
+
+
+static const struct vmw_res_func vmw_gb_shader_func = {
+       .res_type = vmw_res_shader,
+       .needs_backup = true,
+       .may_evict = true,
+       .type_name = "guest backed shaders",
+       .backup_placement = &vmw_mob_placement,
+       .create = vmw_gb_shader_create,
+       .destroy = vmw_gb_shader_destroy,
+       .bind = vmw_gb_shader_bind,
+       .unbind = vmw_gb_shader_unbind
+};
+
+/**
+ * Shader management:
+ */
+
+static inline struct vmw_shader *
+vmw_res_to_shader(struct vmw_resource *res)
+{
+       return container_of(res, struct vmw_shader, res);
+}
+
+static void vmw_hw_shader_destroy(struct vmw_resource *res)
+{
+       (void) vmw_gb_shader_destroy(res);
+}
+
+static int vmw_gb_shader_init(struct vmw_private *dev_priv,
+                             struct vmw_resource *res,
+                             uint32_t size,
+                             uint64_t offset,
+                             SVGA3dShaderType type,
+                             struct vmw_dma_buffer *byte_code,
+                             void (*res_free) (struct vmw_resource *res))
+{
+       struct vmw_shader *shader = vmw_res_to_shader(res);
+       int ret;
+
+       ret = vmw_resource_init(dev_priv, res, true,
+                               res_free, &vmw_gb_shader_func);
+
+
+       if (unlikely(ret != 0)) {
+               if (res_free)
+                       res_free(res);
+               else
+                       kfree(res);
+               return ret;
+       }
+
+       res->backup_size = size;
+       if (byte_code) {
+               res->backup = vmw_dmabuf_reference(byte_code);
+               res->backup_offset = offset;
+       }
+       shader->size = size;
+       shader->type = type;
+
+       vmw_resource_activate(res, vmw_hw_shader_destroy);
+       return 0;
+}
+
+static int vmw_gb_shader_create(struct vmw_resource *res)
+{
+       struct vmw_private *dev_priv = res->dev_priv;
+       struct vmw_shader *shader = vmw_res_to_shader(res);
+       int ret;
+       struct {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdDefineGBShader body;
+       } *cmd;
+
+       if (likely(res->id != -1))
+               return 0;
+
+       ret = vmw_resource_alloc_id(res);
+       if (unlikely(ret != 0)) {
+               DRM_ERROR("Failed to allocate a shader id.\n");
+               goto out_no_id;
+       }
+
+       if (unlikely(res->id >= VMWGFX_NUM_GB_SHADER)) {
+               ret = -EBUSY;
+               goto out_no_fifo;
+       }
+
+       cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+       if (unlikely(cmd == NULL)) {
+               DRM_ERROR("Failed reserving FIFO space for shader "
+                         "creation.\n");
+               ret = -ENOMEM;
+               goto out_no_fifo;
+       }
+
+       cmd->header.id = SVGA_3D_CMD_DEFINE_GB_SHADER;
+       cmd->header.size = sizeof(cmd->body);
+       cmd->body.shid = res->id;
+       cmd->body.type = shader->type;
+       cmd->body.sizeInBytes = shader->size;
+       vmw_fifo_commit(dev_priv, sizeof(*cmd));
+       (void) vmw_3d_resource_inc(dev_priv, false);
+
+       return 0;
+
+out_no_fifo:
+       vmw_resource_release_id(res);
+out_no_id:
+       return ret;
+}
+
+static int vmw_gb_shader_bind(struct vmw_resource *res,
+                             struct ttm_validate_buffer *val_buf)
+{
+       struct vmw_private *dev_priv = res->dev_priv;
+       struct {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdBindGBShader body;
+       } *cmd;
+       struct ttm_buffer_object *bo = val_buf->bo;
+
+       BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+
+       cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+       if (unlikely(cmd == NULL)) {
+               DRM_ERROR("Failed reserving FIFO space for shader "
+                         "binding.\n");
+               return -ENOMEM;
+       }
+
+       cmd->header.id = SVGA_3D_CMD_BIND_GB_SHADER;
+       cmd->header.size = sizeof(cmd->body);
+       cmd->body.shid = res->id;
+       cmd->body.mobid = bo->mem.start;
+       cmd->body.offsetInBytes = 0;
+       res->backup_dirty = false;
+       vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+       return 0;
+}
+
+static int vmw_gb_shader_unbind(struct vmw_resource *res,
+                               bool readback,
+                               struct ttm_validate_buffer *val_buf)
+{
+       struct vmw_private *dev_priv = res->dev_priv;
+       struct {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdBindGBShader body;
+       } *cmd;
+       struct vmw_fence_obj *fence;
+
+       BUG_ON(res->backup->base.mem.mem_type != VMW_PL_MOB);
+
+       cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+       if (unlikely(cmd == NULL)) {
+               DRM_ERROR("Failed reserving FIFO space for shader "
+                         "unbinding.\n");
+               return -ENOMEM;
+       }
+
+       cmd->header.id = SVGA_3D_CMD_BIND_GB_SHADER;
+       cmd->header.size = sizeof(cmd->body);
+       cmd->body.shid = res->id;
+       cmd->body.mobid = SVGA3D_INVALID_ID;
+       cmd->body.offsetInBytes = 0;
+       vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+       /*
+        * Create a fence object and fence the backup buffer.
+        */
+
+       (void) vmw_execbuf_fence_commands(NULL, dev_priv,
+                                         &fence, NULL);
+
+       vmw_fence_single_bo(val_buf->bo, fence);
+
+       if (likely(fence != NULL))
+               vmw_fence_obj_unreference(&fence);
+
+       return 0;
+}
+
+static int vmw_gb_shader_destroy(struct vmw_resource *res)
+{
+       struct vmw_private *dev_priv = res->dev_priv;
+       struct {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdDestroyGBShader body;
+       } *cmd;
+
+       if (likely(res->id == -1))
+               return 0;
+
+       mutex_lock(&dev_priv->binding_mutex);
+       vmw_context_binding_res_list_kill(&res->binding_head);
+
+       cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+       if (unlikely(cmd == NULL)) {
+               DRM_ERROR("Failed reserving FIFO space for shader "
+                         "destruction.\n");
+               mutex_unlock(&dev_priv->binding_mutex);
+               return -ENOMEM;
+       }
+
+       cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SHADER;
+       cmd->header.size = sizeof(cmd->body);
+       cmd->body.shid = res->id;
+       vmw_fifo_commit(dev_priv, sizeof(*cmd));
+       mutex_unlock(&dev_priv->binding_mutex);
+       vmw_resource_release_id(res);
+       vmw_3d_resource_dec(dev_priv, false);
+
+       return 0;
+}
+
+/**
+ * User-space shader management:
+ */
+
+static struct vmw_resource *
+vmw_user_shader_base_to_res(struct ttm_base_object *base)
+{
+       return &(container_of(base, struct vmw_user_shader, base)->
+                shader.res);
+}
+
+static void vmw_user_shader_free(struct vmw_resource *res)
+{
+       struct vmw_user_shader *ushader =
+               container_of(res, struct vmw_user_shader, shader.res);
+       struct vmw_private *dev_priv = res->dev_priv;
+
+       ttm_base_object_kfree(ushader, base);
+       ttm_mem_global_free(vmw_mem_glob(dev_priv),
+                           vmw_user_shader_size);
+}
+
+/**
+ * This function is called when user space has no more references on the
+ * base object. It releases the base-object's reference on the resource object.
+ */
+
+static void vmw_user_shader_base_release(struct ttm_base_object **p_base)
+{
+       struct ttm_base_object *base = *p_base;
+       struct vmw_resource *res = vmw_user_shader_base_to_res(base);
+
+       *p_base = NULL;
+       vmw_resource_unreference(&res);
+}
+
+int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
+                             struct drm_file *file_priv)
+{
+       struct drm_vmw_shader_arg *arg = (struct drm_vmw_shader_arg *)data;
+       struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+
+       return ttm_ref_object_base_unref(tfile, arg->handle,
+                                        TTM_REF_USAGE);
+}
+
+int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
+                            struct drm_file *file_priv)
+{
+       struct vmw_private *dev_priv = vmw_priv(dev);
+       struct vmw_user_shader *ushader;
+       struct vmw_resource *res;
+       struct vmw_resource *tmp;
+       struct drm_vmw_shader_create_arg *arg =
+               (struct drm_vmw_shader_create_arg *)data;
+       struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+       struct vmw_master *vmaster = vmw_master(file_priv->master);
+       struct vmw_dma_buffer *buffer = NULL;
+       SVGA3dShaderType shader_type;
+       int ret;
+
+       if (arg->buffer_handle != SVGA3D_INVALID_ID) {
+               ret = vmw_user_dmabuf_lookup(tfile, arg->buffer_handle,
+                                            &buffer);
+               if (unlikely(ret != 0)) {
+                       DRM_ERROR("Could not find buffer for shader "
+                                 "creation.\n");
+                       return ret;
+               }
+
+               if ((u64)buffer->base.num_pages * PAGE_SIZE <
+                   (u64)arg->size + (u64)arg->offset) {
+                       DRM_ERROR("Illegal buffer- or shader size.\n");
+                       ret = -EINVAL;
+                       goto out_bad_arg;
+               }
+       }
+
+       switch (arg->shader_type) {
+       case drm_vmw_shader_type_vs:
+               shader_type = SVGA3D_SHADERTYPE_VS;
+               break;
+       case drm_vmw_shader_type_ps:
+               shader_type = SVGA3D_SHADERTYPE_PS;
+               break;
+       case drm_vmw_shader_type_gs:
+               shader_type = SVGA3D_SHADERTYPE_GS;
+               break;
+       default:
+               DRM_ERROR("Illegal shader type.\n");
+               ret = -EINVAL;
+               goto out_bad_arg;
+       }
+
+       /*
+        * Approximate idr memory usage with 128 bytes. It will be limited
+        * by maximum number_of shaders anyway.
+        */
+
+       if (unlikely(vmw_user_shader_size == 0))
+               vmw_user_shader_size = ttm_round_pot(sizeof(*ushader))
+                       + 128;
+
+       ret = ttm_read_lock(&vmaster->lock, true);
+       if (unlikely(ret != 0))
+               return ret;
+
+       ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
+                                  vmw_user_shader_size,
+                                  false, true);
+       if (unlikely(ret != 0)) {
+               if (ret != -ERESTARTSYS)
+                       DRM_ERROR("Out of graphics memory for shader"
+                                 " creation.\n");
+               goto out_unlock;
+       }
+
+       ushader = kzalloc(sizeof(*ushader), GFP_KERNEL);
+       if (unlikely(ushader == NULL)) {
+               ttm_mem_global_free(vmw_mem_glob(dev_priv),
+                                   vmw_user_shader_size);
+               ret = -ENOMEM;
+               goto out_unlock;
+       }
+
+       res = &ushader->shader.res;
+       ushader->base.shareable = false;
+       ushader->base.tfile = NULL;
+
+       /*
+        * From here on, the destructor takes over resource freeing.
+        */
+
+       ret = vmw_gb_shader_init(dev_priv, res, arg->size,
+                                arg->offset, shader_type, buffer,
+                                vmw_user_shader_free);
+       if (unlikely(ret != 0))
+               goto out_unlock;
+
+       tmp = vmw_resource_reference(res);
+       ret = ttm_base_object_init(tfile, &ushader->base, false,
+                                  VMW_RES_SHADER,
+                                  &vmw_user_shader_base_release, NULL);
+
+       if (unlikely(ret != 0)) {
+               vmw_resource_unreference(&tmp);
+               goto out_err;
+       }
+
+       arg->shader_handle = ushader->base.hash.key;
+out_err:
+       vmw_resource_unreference(&res);
+out_unlock:
+       ttm_read_unlock(&vmaster->lock);
+out_bad_arg:
+       vmw_dmabuf_unreference(&buffer);
+
+       return ret;
+
+}
index 7de2ea8bd55357561913384bfa23196fe3479ec2..979da1c246a543a445257daee03787e794b266a1 100644 (file)
@@ -41,7 +41,6 @@ struct vmw_user_surface {
        struct ttm_prime_object prime;
        struct vmw_surface srf;
        uint32_t size;
-       uint32_t backup_handle;
 };
 
 /**
@@ -68,6 +67,14 @@ static int vmw_legacy_srf_unbind(struct vmw_resource *res,
                                 struct ttm_validate_buffer *val_buf);
 static int vmw_legacy_srf_create(struct vmw_resource *res);
 static int vmw_legacy_srf_destroy(struct vmw_resource *res);
+static int vmw_gb_surface_create(struct vmw_resource *res);
+static int vmw_gb_surface_bind(struct vmw_resource *res,
+                              struct ttm_validate_buffer *val_buf);
+static int vmw_gb_surface_unbind(struct vmw_resource *res,
+                                bool readback,
+                                struct ttm_validate_buffer *val_buf);
+static int vmw_gb_surface_destroy(struct vmw_resource *res);
+
 
 static const struct vmw_user_resource_conv user_surface_conv = {
        .object_type = VMW_RES_SURFACE,
@@ -93,6 +100,18 @@ static const struct vmw_res_func vmw_legacy_surface_func = {
        .unbind = &vmw_legacy_srf_unbind
 };
 
+static const struct vmw_res_func vmw_gb_surface_func = {
+       .res_type = vmw_res_surface,
+       .needs_backup = true,
+       .may_evict = true,
+       .type_name = "guest backed surfaces",
+       .backup_placement = &vmw_mob_placement,
+       .create = vmw_gb_surface_create,
+       .destroy = vmw_gb_surface_destroy,
+       .bind = vmw_gb_surface_bind,
+       .unbind = vmw_gb_surface_unbind
+};
+
 /**
  * struct vmw_surface_dma - SVGA3D DMA command
  */
@@ -291,6 +310,11 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res)
        struct vmw_surface *srf;
        void *cmd;
 
+       if (res->func->destroy == vmw_gb_surface_destroy) {
+               (void) vmw_gb_surface_destroy(res);
+               return;
+       }
+
        if (res->id != -1) {
 
                cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size());
@@ -549,12 +573,15 @@ static int vmw_surface_init(struct vmw_private *dev_priv,
        struct vmw_resource *res = &srf->res;
 
        BUG_ON(res_free == NULL);
-       (void) vmw_3d_resource_inc(dev_priv, false);
+       if (!dev_priv->has_mob)
+               (void) vmw_3d_resource_inc(dev_priv, false);
        ret = vmw_resource_init(dev_priv, res, true, res_free,
+                               (dev_priv->has_mob) ? &vmw_gb_surface_func :
                                &vmw_legacy_surface_func);
 
        if (unlikely(ret != 0)) {
-               vmw_3d_resource_dec(dev_priv, false);
+               if (!dev_priv->has_mob)
+                       vmw_3d_resource_dec(dev_priv, false);
                res_free(res);
                return ret;
        }
@@ -750,7 +777,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
 
        srf->base_size = *srf->sizes;
        srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
-       srf->multisample_count = 1;
+       srf->multisample_count = 0;
 
        cur_bo_offset = 0;
        cur_offset = srf->offsets;
@@ -843,6 +870,7 @@ out_unlock:
 int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
                                struct drm_file *file_priv)
 {
+       struct vmw_private *dev_priv = vmw_priv(dev);
        union drm_vmw_surface_reference_arg *arg =
            (union drm_vmw_surface_reference_arg *)data;
        struct drm_vmw_surface_arg *req = &arg->req;
@@ -854,7 +882,7 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
        struct ttm_base_object *base;
        int ret = -EINVAL;
 
-       base = ttm_base_object_lookup(tfile, req->sid);
+       base = ttm_base_object_lookup_for_ref(dev_priv->tdev, req->sid);
        if (unlikely(base == NULL)) {
                DRM_ERROR("Could not find surface to reference.\n");
                return -EINVAL;
@@ -893,3 +921,436 @@ out_no_reference:
 
        return ret;
 }
+
+/**
+ * vmw_surface_define_encode - Encode a surface_define command.
+ *
+ * @srf: Pointer to a struct vmw_surface object.
+ * @cmd_space: Pointer to memory area in which the commands should be encoded.
+ */
+static int vmw_gb_surface_create(struct vmw_resource *res)
+{
+       struct vmw_private *dev_priv = res->dev_priv;
+       struct vmw_surface *srf = vmw_res_to_srf(res);
+       uint32_t cmd_len, submit_len;
+       int ret;
+       struct {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdDefineGBSurface body;
+       } *cmd;
+
+       if (likely(res->id != -1))
+               return 0;
+
+       (void) vmw_3d_resource_inc(dev_priv, false);
+       ret = vmw_resource_alloc_id(res);
+       if (unlikely(ret != 0)) {
+               DRM_ERROR("Failed to allocate a surface id.\n");
+               goto out_no_id;
+       }
+
+       if (unlikely(res->id >= VMWGFX_NUM_GB_SURFACE)) {
+               ret = -EBUSY;
+               goto out_no_fifo;
+       }
+
+       cmd_len = sizeof(cmd->body);
+       submit_len = sizeof(*cmd);
+       cmd = vmw_fifo_reserve(dev_priv, submit_len);
+       if (unlikely(cmd == NULL)) {
+               DRM_ERROR("Failed reserving FIFO space for surface "
+                         "creation.\n");
+               ret = -ENOMEM;
+               goto out_no_fifo;
+       }
+
+       cmd->header.id = SVGA_3D_CMD_DEFINE_GB_SURFACE;
+       cmd->header.size = cmd_len;
+       cmd->body.sid = srf->res.id;
+       cmd->body.surfaceFlags = srf->flags;
+       cmd->body.format = cpu_to_le32(srf->format);
+       cmd->body.numMipLevels = srf->mip_levels[0];
+       cmd->body.multisampleCount = srf->multisample_count;
+       cmd->body.autogenFilter = srf->autogen_filter;
+       cmd->body.size.width = srf->base_size.width;
+       cmd->body.size.height = srf->base_size.height;
+       cmd->body.size.depth = srf->base_size.depth;
+       vmw_fifo_commit(dev_priv, submit_len);
+
+       return 0;
+
+out_no_fifo:
+       vmw_resource_release_id(res);
+out_no_id:
+       vmw_3d_resource_dec(dev_priv, false);
+       return ret;
+}
+
+
+static int vmw_gb_surface_bind(struct vmw_resource *res,
+                              struct ttm_validate_buffer *val_buf)
+{
+       struct vmw_private *dev_priv = res->dev_priv;
+       struct {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdBindGBSurface body;
+       } *cmd1;
+       struct {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdUpdateGBSurface body;
+       } *cmd2;
+       uint32_t submit_size;
+       struct ttm_buffer_object *bo = val_buf->bo;
+
+       BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+
+       submit_size = sizeof(*cmd1) + (res->backup_dirty ? sizeof(*cmd2) : 0);
+
+       cmd1 = vmw_fifo_reserve(dev_priv, submit_size);
+       if (unlikely(cmd1 == NULL)) {
+               DRM_ERROR("Failed reserving FIFO space for surface "
+                         "binding.\n");
+               return -ENOMEM;
+       }
+
+       cmd1->header.id = SVGA_3D_CMD_BIND_GB_SURFACE;
+       cmd1->header.size = sizeof(cmd1->body);
+       cmd1->body.sid = res->id;
+       cmd1->body.mobid = bo->mem.start;
+       if (res->backup_dirty) {
+               cmd2 = (void *) &cmd1[1];
+               cmd2->header.id = SVGA_3D_CMD_UPDATE_GB_SURFACE;
+               cmd2->header.size = sizeof(cmd2->body);
+               cmd2->body.sid = res->id;
+               res->backup_dirty = false;
+       }
+       vmw_fifo_commit(dev_priv, submit_size);
+
+       return 0;
+}
+
+static int vmw_gb_surface_unbind(struct vmw_resource *res,
+                                bool readback,
+                                struct ttm_validate_buffer *val_buf)
+{
+       struct vmw_private *dev_priv = res->dev_priv;
+       struct ttm_buffer_object *bo = val_buf->bo;
+       struct vmw_fence_obj *fence;
+
+       struct {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdReadbackGBSurface body;
+       } *cmd1;
+       struct {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdInvalidateGBSurface body;
+       } *cmd2;
+       struct {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdBindGBSurface body;
+       } *cmd3;
+       uint32_t submit_size;
+       uint8_t *cmd;
+
+
+       BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+
+       submit_size = sizeof(*cmd3) + (readback ? sizeof(*cmd1) : sizeof(*cmd2));
+       cmd = vmw_fifo_reserve(dev_priv, submit_size);
+       if (unlikely(cmd == NULL)) {
+               DRM_ERROR("Failed reserving FIFO space for surface "
+                         "unbinding.\n");
+               return -ENOMEM;
+       }
+
+       if (readback) {
+               cmd1 = (void *) cmd;
+               cmd1->header.id = SVGA_3D_CMD_READBACK_GB_SURFACE;
+               cmd1->header.size = sizeof(cmd1->body);
+               cmd1->body.sid = res->id;
+               cmd3 = (void *) &cmd1[1];
+       } else {
+               cmd2 = (void *) cmd;
+               cmd2->header.id = SVGA_3D_CMD_INVALIDATE_GB_SURFACE;
+               cmd2->header.size = sizeof(cmd2->body);
+               cmd2->body.sid = res->id;
+               cmd3 = (void *) &cmd2[1];
+       }
+
+       cmd3->header.id = SVGA_3D_CMD_BIND_GB_SURFACE;
+       cmd3->header.size = sizeof(cmd3->body);
+       cmd3->body.sid = res->id;
+       cmd3->body.mobid = SVGA3D_INVALID_ID;
+
+       vmw_fifo_commit(dev_priv, submit_size);
+
+       /*
+        * Create a fence object and fence the backup buffer.
+        */
+
+       (void) vmw_execbuf_fence_commands(NULL, dev_priv,
+                                         &fence, NULL);
+
+       vmw_fence_single_bo(val_buf->bo, fence);
+
+       if (likely(fence != NULL))
+               vmw_fence_obj_unreference(&fence);
+
+       return 0;
+}
+
+static int vmw_gb_surface_destroy(struct vmw_resource *res)
+{
+       struct vmw_private *dev_priv = res->dev_priv;
+       struct {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdDestroyGBSurface body;
+       } *cmd;
+
+       if (likely(res->id == -1))
+               return 0;
+
+       mutex_lock(&dev_priv->binding_mutex);
+       vmw_context_binding_res_list_kill(&res->binding_head);
+
+       cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+       if (unlikely(cmd == NULL)) {
+               DRM_ERROR("Failed reserving FIFO space for surface "
+                         "destruction.\n");
+               mutex_unlock(&dev_priv->binding_mutex);
+               return -ENOMEM;
+       }
+
+       cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SURFACE;
+       cmd->header.size = sizeof(cmd->body);
+       cmd->body.sid = res->id;
+       vmw_fifo_commit(dev_priv, sizeof(*cmd));
+       mutex_unlock(&dev_priv->binding_mutex);
+       vmw_resource_release_id(res);
+       vmw_3d_resource_dec(dev_priv, false);
+
+       return 0;
+}
+
+/**
+ * vmw_gb_surface_define_ioctl - Ioctl function implementing
+ *                               the user surface define functionality.
+ *
+ * @dev:            Pointer to a struct drm_device.
+ * @data:           Pointer to data copied from / to user-space.
+ * @file_priv:      Pointer to a drm file private structure.
+ */
+int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
+                               struct drm_file *file_priv)
+{
+       struct vmw_private *dev_priv = vmw_priv(dev);
+       struct vmw_user_surface *user_srf;
+       struct vmw_surface *srf;
+       struct vmw_resource *res;
+       struct vmw_resource *tmp;
+       union drm_vmw_gb_surface_create_arg *arg =
+           (union drm_vmw_gb_surface_create_arg *)data;
+       struct drm_vmw_gb_surface_create_req *req = &arg->req;
+       struct drm_vmw_gb_surface_create_rep *rep = &arg->rep;
+       struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+       int ret;
+       uint32_t size;
+       struct vmw_master *vmaster = vmw_master(file_priv->master);
+       const struct svga3d_surface_desc *desc;
+       uint32_t backup_handle;
+
+       if (unlikely(vmw_user_surface_size == 0))
+               vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
+                       128;
+
+       size = vmw_user_surface_size + 128;
+
+       desc = svga3dsurface_get_desc(req->format);
+       if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
+               DRM_ERROR("Invalid surface format for surface creation.\n");
+               return -EINVAL;
+       }
+
+       ret = ttm_read_lock(&vmaster->lock, true);
+       if (unlikely(ret != 0))
+               return ret;
+
+       ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
+                                  size, false, true);
+       if (unlikely(ret != 0)) {
+               if (ret != -ERESTARTSYS)
+                       DRM_ERROR("Out of graphics memory for surface"
+                                 " creation.\n");
+               goto out_unlock;
+       }
+
+       user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
+       if (unlikely(user_srf == NULL)) {
+               ret = -ENOMEM;
+               goto out_no_user_srf;
+       }
+
+       srf = &user_srf->srf;
+       res = &srf->res;
+
+       srf->flags = req->svga3d_flags;
+       srf->format = req->format;
+       srf->scanout = req->drm_surface_flags & drm_vmw_surface_flag_scanout;
+       srf->mip_levels[0] = req->mip_levels;
+       srf->num_sizes = 1;
+       srf->sizes = NULL;
+       srf->offsets = NULL;
+       user_srf->size = size;
+       srf->base_size = req->base_size;
+       srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
+       srf->multisample_count = req->multisample_count;
+       res->backup_size = svga3dsurface_get_serialized_size
+         (srf->format, srf->base_size, srf->mip_levels[0],
+          srf->flags & SVGA3D_SURFACE_CUBEMAP);
+
+       user_srf->prime.base.shareable = false;
+       user_srf->prime.base.tfile = NULL;
+
+       /**
+        * From this point, the generic resource management functions
+        * destroy the object on failure.
+        */
+
+       ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
+       if (unlikely(ret != 0))
+               goto out_unlock;
+
+       if (req->buffer_handle != SVGA3D_INVALID_ID) {
+               ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle,
+                                            &res->backup);
+       } else if (req->drm_surface_flags &
+                  drm_vmw_surface_flag_create_buffer)
+               ret = vmw_user_dmabuf_alloc(dev_priv, tfile,
+                                           res->backup_size,
+                                           req->drm_surface_flags &
+                                           drm_vmw_surface_flag_shareable,
+                                           &backup_handle,
+                                           &res->backup);
+
+       if (unlikely(ret != 0)) {
+               vmw_resource_unreference(&res);
+               goto out_unlock;
+       }
+
+       tmp = vmw_resource_reference(&srf->res);
+       ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
+                                   req->drm_surface_flags &
+                                   drm_vmw_surface_flag_shareable,
+                                   VMW_RES_SURFACE,
+                                   &vmw_user_surface_base_release, NULL);
+
+       if (unlikely(ret != 0)) {
+               vmw_resource_unreference(&tmp);
+               vmw_resource_unreference(&res);
+               goto out_unlock;
+       }
+
+       rep->handle = user_srf->prime.base.hash.key;
+       rep->backup_size = res->backup_size;
+       if (res->backup) {
+               rep->buffer_map_handle =
+                       drm_vma_node_offset_addr(&res->backup->base.vma_node);
+               rep->buffer_size = res->backup->base.num_pages * PAGE_SIZE;
+               rep->buffer_handle = backup_handle;
+       } else {
+               rep->buffer_map_handle = 0;
+               rep->buffer_size = 0;
+               rep->buffer_handle = SVGA3D_INVALID_ID;
+       }
+
+       vmw_resource_unreference(&res);
+
+       ttm_read_unlock(&vmaster->lock);
+       return 0;
+out_no_user_srf:
+       ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
+out_unlock:
+       ttm_read_unlock(&vmaster->lock);
+       return ret;
+}
+
+/**
+ * vmw_gb_surface_reference_ioctl - Ioctl function implementing
+ *                                  the user surface reference functionality.
+ *
+ * @dev:            Pointer to a struct drm_device.
+ * @data:           Pointer to data copied from / to user-space.
+ * @file_priv:      Pointer to a drm file private structure.
+ */
+int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
+                                  struct drm_file *file_priv)
+{
+       struct vmw_private *dev_priv = vmw_priv(dev);
+       union drm_vmw_gb_surface_reference_arg *arg =
+           (union drm_vmw_gb_surface_reference_arg *)data;
+       struct drm_vmw_surface_arg *req = &arg->req;
+       struct drm_vmw_gb_surface_ref_rep *rep = &arg->rep;
+       struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+       struct vmw_surface *srf;
+       struct vmw_user_surface *user_srf;
+       struct ttm_base_object *base;
+       uint32_t backup_handle;
+       int ret = -EINVAL;
+
+       base = ttm_base_object_lookup_for_ref(dev_priv->tdev, req->sid);
+       if (unlikely(base == NULL)) {
+               DRM_ERROR("Could not find surface to reference.\n");
+               return -EINVAL;
+       }
+
+       if (unlikely(ttm_base_object_type(base) != VMW_RES_SURFACE))
+               goto out_bad_resource;
+
+       user_srf = container_of(base, struct vmw_user_surface, prime.base);
+       srf = &user_srf->srf;
+       if (srf->res.backup == NULL) {
+               DRM_ERROR("Shared GB surface is missing a backup buffer.\n");
+               goto out_bad_resource;
+       }
+
+       ret = ttm_ref_object_add(tfile, &user_srf->prime.base,
+                                TTM_REF_USAGE, NULL);
+       if (unlikely(ret != 0)) {
+               DRM_ERROR("Could not add a reference to a GB surface.\n");
+               goto out_bad_resource;
+       }
+
+       mutex_lock(&dev_priv->cmdbuf_mutex); /* Protect res->backup */
+       ret = vmw_user_dmabuf_reference(tfile, srf->res.backup,
+                                       &backup_handle);
+       mutex_unlock(&dev_priv->cmdbuf_mutex);
+
+       if (unlikely(ret != 0)) {
+               DRM_ERROR("Could not add a reference to a GB surface "
+                         "backup buffer.\n");
+               (void) ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
+                                                req->sid,
+                                                TTM_REF_USAGE);
+               goto out_bad_resource;
+       }
+
+       rep->creq.svga3d_flags = srf->flags;
+       rep->creq.format = srf->format;
+       rep->creq.mip_levels = srf->mip_levels[0];
+       rep->creq.drm_surface_flags = 0;
+       rep->creq.multisample_count = srf->multisample_count;
+       rep->creq.autogen_filter = srf->autogen_filter;
+       rep->creq.buffer_handle = backup_handle;
+       rep->creq.base_size = srf->base_size;
+       rep->crep.handle = user_srf->prime.base.hash.key;
+       rep->crep.backup_size = srf->res.backup_size;
+       rep->crep.buffer_handle = backup_handle;
+       rep->crep.buffer_map_handle =
+               drm_vma_node_offset_addr(&srf->res.backup->base.vma_node);
+       rep->crep.buffer_size = srf->res.backup->base.num_pages * PAGE_SIZE;
+
+out_bad_resource:
+       ttm_base_object_unref(&base);
+
+       return ret;
+}
index 7d6bed2225422fa2413130a606d2b20fd084cfd4..b2fd029d67b308ce60490d61cd908d743a9e520b 100644 (file)
@@ -1,6 +1,6 @@
 config TEGRA_HOST1X
        tristate "NVIDIA Tegra host1x driver"
-       depends on ARCH_TEGRA || ARCH_MULTIPLATFORM
+       depends on ARCH_TEGRA || (ARM && COMPILE_TEST)
        help
          Driver for the NVIDIA Tegra host1x hardware.
 
index afa1e9e4e51265fbdb21698072315786a6b730b7..c1189f0044411810fb2e0e58203421d6ffa302d7 100644 (file)
@@ -7,7 +7,9 @@ host1x-y = \
        channel.o \
        job.o \
        debug.o \
+       mipi.o \
        hw/host1x01.o \
-       hw/host1x02.o
+       hw/host1x02.o \
+       hw/host1x04.o
 
 obj-$(CONFIG_TEGRA_HOST1X) += host1x.o
index 6a929591aa73801df1126433bc514980e80d1465..ccdd2e6da5e3710a205e1b5dc3a9c667a9ed6bcf 100644 (file)
@@ -188,6 +188,7 @@ int host1x_device_init(struct host1x_device *device)
 
        return 0;
 }
+EXPORT_SYMBOL(host1x_device_init);
 
 int host1x_device_exit(struct host1x_device *device)
 {
@@ -213,6 +214,7 @@ int host1x_device_exit(struct host1x_device *device)
 
        return 0;
 }
+EXPORT_SYMBOL(host1x_device_exit);
 
 static int host1x_register_client(struct host1x *host1x,
                                  struct host1x_client *client)
index 83ea51b9f0fce44ea6a5ff96f78c8db49f6b8839..b4ae3affb987bb92ed7dcd1f3317714b260fd61d 100644 (file)
@@ -43,6 +43,7 @@ int host1x_job_submit(struct host1x_job *job)
 
        return host1x_hw_channel_submit(host, job);
 }
+EXPORT_SYMBOL(host1x_job_submit);
 
 struct host1x_channel *host1x_channel_get(struct host1x_channel *channel)
 {
@@ -60,6 +61,7 @@ struct host1x_channel *host1x_channel_get(struct host1x_channel *channel)
 
        return err ? NULL : channel;
 }
+EXPORT_SYMBOL(host1x_channel_get);
 
 void host1x_channel_put(struct host1x_channel *channel)
 {
@@ -76,6 +78,7 @@ void host1x_channel_put(struct host1x_channel *channel)
 
        mutex_unlock(&channel->reflock);
 }
+EXPORT_SYMBOL(host1x_channel_put);
 
 struct host1x_channel *host1x_channel_request(struct device *dev)
 {
@@ -115,6 +118,7 @@ fail:
        mutex_unlock(&host->chlist_mutex);
        return NULL;
 }
+EXPORT_SYMBOL(host1x_channel_request);
 
 void host1x_channel_free(struct host1x_channel *channel)
 {
@@ -124,3 +128,4 @@ void host1x_channel_free(struct host1x_channel *channel)
        list_del(&channel->list);
        kfree(channel);
 }
+EXPORT_SYMBOL(host1x_channel_free);
index 3ec7d77de24ddc846321f159229e3d592bf02a38..ee3d12b51c50a036b89c48c3e60576a2a14acf5b 100644 (file)
@@ -96,7 +96,6 @@ static void show_all(struct host1x *m, struct output *o)
                show_channels(ch, o, true);
 }
 
-#ifdef CONFIG_DEBUG_FS
 static void show_all_no_fifo(struct host1x *host1x, struct output *o)
 {
        struct host1x_channel *ch;
@@ -153,7 +152,7 @@ static const struct file_operations host1x_debug_fops = {
        .release        = single_release,
 };
 
-void host1x_debug_init(struct host1x *host1x)
+static void host1x_debugfs_init(struct host1x *host1x)
 {
        struct dentry *de = debugfs_create_dir("tegra-host1x", NULL);
 
@@ -180,18 +179,22 @@ void host1x_debug_init(struct host1x *host1x)
                           &host1x_debug_force_timeout_channel);
 }
 
-void host1x_debug_deinit(struct host1x *host1x)
+static void host1x_debugfs_exit(struct host1x *host1x)
 {
        debugfs_remove_recursive(host1x->debugfs);
 }
-#else
+
 void host1x_debug_init(struct host1x *host1x)
 {
+       if (IS_ENABLED(CONFIG_DEBUG_FS))
+               host1x_debugfs_init(host1x);
 }
+
 void host1x_debug_deinit(struct host1x *host1x)
 {
+       if (IS_ENABLED(CONFIG_DEBUG_FS))
+               host1x_debugfs_exit(host1x);
 }
-#endif
 
 void host1x_debug_dump(struct host1x *host1x)
 {
index 80da003d63de8d4b754cd37a607441c7c4901574..2529908d304bd851fdd9c2d0b9d71471e5d69b20 100644 (file)
@@ -34,6 +34,7 @@
 #include "debug.h"
 #include "hw/host1x01.h"
 #include "hw/host1x02.h"
+#include "hw/host1x04.h"
 
 void host1x_sync_writel(struct host1x *host1x, u32 v, u32 r)
 {
@@ -77,7 +78,17 @@ static const struct host1x_info host1x02_info = {
        .sync_offset = 0x3000,
 };
 
+static const struct host1x_info host1x04_info = {
+       .nb_channels = 12,
+       .nb_pts = 192,
+       .nb_mlocks = 16,
+       .nb_bases = 64,
+       .init = host1x04_init,
+       .sync_offset = 0x2100,
+};
+
 static struct of_device_id host1x_of_match[] = {
+       { .compatible = "nvidia,tegra124-host1x", .data = &host1x04_info, },
        { .compatible = "nvidia,tegra114-host1x", .data = &host1x02_info, },
        { .compatible = "nvidia,tegra30-host1x", .data = &host1x01_info, },
        { .compatible = "nvidia,tegra20-host1x", .data = &host1x01_info, },
@@ -210,17 +221,26 @@ static int __init tegra_host1x_init(void)
                return err;
 
        err = platform_driver_register(&tegra_host1x_driver);
-       if (err < 0) {
-               host1x_bus_exit();
-               return err;
-       }
+       if (err < 0)
+               goto unregister_bus;
+
+       err = platform_driver_register(&tegra_mipi_driver);
+       if (err < 0)
+               goto unregister_host1x;
 
        return 0;
+
+unregister_host1x:
+       platform_driver_unregister(&tegra_host1x_driver);
+unregister_bus:
+       host1x_bus_exit();
+       return err;
 }
 module_init(tegra_host1x_init);
 
 static void __exit tegra_host1x_exit(void)
 {
+       platform_driver_unregister(&tegra_mipi_driver);
        platform_driver_unregister(&tegra_host1x_driver);
        host1x_bus_exit();
 }
index a61a976e7a421a405d7fe075272228f08e3e3e6d..0b6e8e9629c5330fbc7fadba1a1ae318ae07da12 100644 (file)
@@ -306,4 +306,6 @@ static inline void host1x_hw_show_mlocks(struct host1x *host, struct output *o)
        host->debug_op->show_mlocks(host, o);
 }
 
+extern struct platform_driver tegra_mipi_driver;
+
 #endif
index e98caca0ca42a55b120e1a29085c9d02fc49c046..928946c2144bd17eaa814e1ab9f5449cf17264d8 100644 (file)
@@ -17,8 +17,8 @@
  */
 
 /* include hw specification */
-#include "host1x01.h"
-#include "host1x01_hardware.h"
+#include "host1x02.h"
+#include "host1x02_hardware.h"
 
 /* include code */
 #include "cdma_hw.c"
diff --git a/drivers/gpu/host1x/hw/host1x02_hardware.h b/drivers/gpu/host1x/hw/host1x02_hardware.h
new file mode 100644 (file)
index 0000000..1549018
--- /dev/null
@@ -0,0 +1,142 @@
+/*
+ * Tegra host1x Register Offsets for Tegra114
+ *
+ * Copyright (c) 2010-2013 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __HOST1X_HOST1X02_HARDWARE_H
+#define __HOST1X_HOST1X02_HARDWARE_H
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+
+#include "hw_host1x02_channel.h"
+#include "hw_host1x02_sync.h"
+#include "hw_host1x02_uclass.h"
+
+static inline u32 host1x_class_host_wait_syncpt(
+       unsigned indx, unsigned threshold)
+{
+       return host1x_uclass_wait_syncpt_indx_f(indx)
+               | host1x_uclass_wait_syncpt_thresh_f(threshold);
+}
+
+static inline u32 host1x_class_host_load_syncpt_base(
+       unsigned indx, unsigned threshold)
+{
+       return host1x_uclass_load_syncpt_base_base_indx_f(indx)
+               | host1x_uclass_load_syncpt_base_value_f(threshold);
+}
+
+static inline u32 host1x_class_host_wait_syncpt_base(
+       unsigned indx, unsigned base_indx, unsigned offset)
+{
+       return host1x_uclass_wait_syncpt_base_indx_f(indx)
+               | host1x_uclass_wait_syncpt_base_base_indx_f(base_indx)
+               | host1x_uclass_wait_syncpt_base_offset_f(offset);
+}
+
+static inline u32 host1x_class_host_incr_syncpt_base(
+       unsigned base_indx, unsigned offset)
+{
+       return host1x_uclass_incr_syncpt_base_base_indx_f(base_indx)
+               | host1x_uclass_incr_syncpt_base_offset_f(offset);
+}
+
+static inline u32 host1x_class_host_incr_syncpt(
+       unsigned cond, unsigned indx)
+{
+       return host1x_uclass_incr_syncpt_cond_f(cond)
+               | host1x_uclass_incr_syncpt_indx_f(indx);
+}
+
+static inline u32 host1x_class_host_indoff_reg_write(
+       unsigned mod_id, unsigned offset, bool auto_inc)
+{
+       u32 v = host1x_uclass_indoff_indbe_f(0xf)
+               | host1x_uclass_indoff_indmodid_f(mod_id)
+               | host1x_uclass_indoff_indroffset_f(offset);
+       if (auto_inc)
+               v |= host1x_uclass_indoff_autoinc_f(1);
+       return v;
+}
+
+static inline u32 host1x_class_host_indoff_reg_read(
+       unsigned mod_id, unsigned offset, bool auto_inc)
+{
+       u32 v = host1x_uclass_indoff_indmodid_f(mod_id)
+               | host1x_uclass_indoff_indroffset_f(offset)
+               | host1x_uclass_indoff_rwn_read_v();
+       if (auto_inc)
+               v |= host1x_uclass_indoff_autoinc_f(1);
+       return v;
+}
+
+/* cdma opcodes */
+static inline u32 host1x_opcode_setclass(
+       unsigned class_id, unsigned offset, unsigned mask)
+{
+       return (0 << 28) | (offset << 16) | (class_id << 6) | mask;
+}
+
+static inline u32 host1x_opcode_incr(unsigned offset, unsigned count)
+{
+       return (1 << 28) | (offset << 16) | count;
+}
+
+static inline u32 host1x_opcode_nonincr(unsigned offset, unsigned count)
+{
+       return (2 << 28) | (offset << 16) | count;
+}
+
+static inline u32 host1x_opcode_mask(unsigned offset, unsigned mask)
+{
+       return (3 << 28) | (offset << 16) | mask;
+}
+
+static inline u32 host1x_opcode_imm(unsigned offset, unsigned value)
+{
+       return (4 << 28) | (offset << 16) | value;
+}
+
+static inline u32 host1x_opcode_imm_incr_syncpt(unsigned cond, unsigned indx)
+{
+       return host1x_opcode_imm(host1x_uclass_incr_syncpt_r(),
+               host1x_class_host_incr_syncpt(cond, indx));
+}
+
+static inline u32 host1x_opcode_restart(unsigned address)
+{
+       return (5 << 28) | (address >> 4);
+}
+
+static inline u32 host1x_opcode_gather(unsigned count)
+{
+       return (6 << 28) | count;
+}
+
+static inline u32 host1x_opcode_gather_nonincr(unsigned offset,        unsigned count)
+{
+       return (6 << 28) | (offset << 16) | BIT(15) | count;
+}
+
+static inline u32 host1x_opcode_gather_incr(unsigned offset, unsigned count)
+{
+       return (6 << 28) | (offset << 16) | BIT(15) | BIT(14) | count;
+}
+
+#define HOST1X_OPCODE_NOP host1x_opcode_nonincr(0, 0)
+
+#endif
diff --git a/drivers/gpu/host1x/hw/host1x04.c b/drivers/gpu/host1x/hw/host1x04.c
new file mode 100644 (file)
index 0000000..8007c70
--- /dev/null
@@ -0,0 +1,42 @@
+/*
+ * Host1x init for Tegra124 SoCs
+ *
+ * Copyright (c) 2013 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* include hw specification */
+#include "host1x04.h"
+#include "host1x04_hardware.h"
+
+/* include code */
+#include "cdma_hw.c"
+#include "channel_hw.c"
+#include "debug_hw.c"
+#include "intr_hw.c"
+#include "syncpt_hw.c"
+
+#include "../dev.h"
+
+int host1x04_init(struct host1x *host)
+{
+       host->channel_op = &host1x_channel_ops;
+       host->cdma_op = &host1x_cdma_ops;
+       host->cdma_pb_op = &host1x_pushbuffer_ops;
+       host->syncpt_op = &host1x_syncpt_ops;
+       host->intr_op = &host1x_intr_ops;
+       host->debug_op = &host1x_debug_ops;
+
+       return 0;
+}
diff --git a/drivers/gpu/host1x/hw/host1x04.h b/drivers/gpu/host1x/hw/host1x04.h
new file mode 100644 (file)
index 0000000..a9ab749
--- /dev/null
@@ -0,0 +1,26 @@
+/*
+ * Host1x init for Tegra124 SoCs
+ *
+ * Copyright (c) 2013 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HOST1X_HOST1X04_H
+#define HOST1X_HOST1X04_H
+
+struct host1x;
+
+int host1x04_init(struct host1x *host);
+
+#endif
diff --git a/drivers/gpu/host1x/hw/host1x04_hardware.h b/drivers/gpu/host1x/hw/host1x04_hardware.h
new file mode 100644 (file)
index 0000000..de1a381
--- /dev/null
@@ -0,0 +1,142 @@
+/*
+ * Tegra host1x Register Offsets for Tegra124
+ *
+ * Copyright (c) 2010-2013 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __HOST1X_HOST1X04_HARDWARE_H
+#define __HOST1X_HOST1X04_HARDWARE_H
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+
+#include "hw_host1x04_channel.h"
+#include "hw_host1x04_sync.h"
+#include "hw_host1x04_uclass.h"
+
+static inline u32 host1x_class_host_wait_syncpt(
+       unsigned indx, unsigned threshold)
+{
+       return host1x_uclass_wait_syncpt_indx_f(indx)
+               | host1x_uclass_wait_syncpt_thresh_f(threshold);
+}
+
+static inline u32 host1x_class_host_load_syncpt_base(
+       unsigned indx, unsigned threshold)
+{
+       return host1x_uclass_load_syncpt_base_base_indx_f(indx)
+               | host1x_uclass_load_syncpt_base_value_f(threshold);
+}
+
+static inline u32 host1x_class_host_wait_syncpt_base(
+       unsigned indx, unsigned base_indx, unsigned offset)
+{
+       return host1x_uclass_wait_syncpt_base_indx_f(indx)
+               | host1x_uclass_wait_syncpt_base_base_indx_f(base_indx)
+               | host1x_uclass_wait_syncpt_base_offset_f(offset);
+}
+
+static inline u32 host1x_class_host_incr_syncpt_base(
+       unsigned base_indx, unsigned offset)
+{
+       return host1x_uclass_incr_syncpt_base_base_indx_f(base_indx)
+               | host1x_uclass_incr_syncpt_base_offset_f(offset);
+}
+
+static inline u32 host1x_class_host_incr_syncpt(
+       unsigned cond, unsigned indx)
+{
+       return host1x_uclass_incr_syncpt_cond_f(cond)
+               | host1x_uclass_incr_syncpt_indx_f(indx);
+}
+
+static inline u32 host1x_class_host_indoff_reg_write(
+       unsigned mod_id, unsigned offset, bool auto_inc)
+{
+       u32 v = host1x_uclass_indoff_indbe_f(0xf)
+               | host1x_uclass_indoff_indmodid_f(mod_id)
+               | host1x_uclass_indoff_indroffset_f(offset);
+       if (auto_inc)
+               v |= host1x_uclass_indoff_autoinc_f(1);
+       return v;
+}
+
+static inline u32 host1x_class_host_indoff_reg_read(
+       unsigned mod_id, unsigned offset, bool auto_inc)
+{
+       u32 v = host1x_uclass_indoff_indmodid_f(mod_id)
+               | host1x_uclass_indoff_indroffset_f(offset)
+               | host1x_uclass_indoff_rwn_read_v();
+       if (auto_inc)
+               v |= host1x_uclass_indoff_autoinc_f(1);
+       return v;
+}
+
+/* cdma opcodes */
+static inline u32 host1x_opcode_setclass(
+       unsigned class_id, unsigned offset, unsigned mask)
+{
+       return (0 << 28) | (offset << 16) | (class_id << 6) | mask;
+}
+
+static inline u32 host1x_opcode_incr(unsigned offset, unsigned count)
+{
+       return (1 << 28) | (offset << 16) | count;
+}
+
+static inline u32 host1x_opcode_nonincr(unsigned offset, unsigned count)
+{
+       return (2 << 28) | (offset << 16) | count;
+}
+
+static inline u32 host1x_opcode_mask(unsigned offset, unsigned mask)
+{
+       return (3 << 28) | (offset << 16) | mask;
+}
+
+static inline u32 host1x_opcode_imm(unsigned offset, unsigned value)
+{
+       return (4 << 28) | (offset << 16) | value;
+}
+
+static inline u32 host1x_opcode_imm_incr_syncpt(unsigned cond, unsigned indx)
+{
+       return host1x_opcode_imm(host1x_uclass_incr_syncpt_r(),
+               host1x_class_host_incr_syncpt(cond, indx));
+}
+
+static inline u32 host1x_opcode_restart(unsigned address)
+{
+       return (5 << 28) | (address >> 4);
+}
+
+static inline u32 host1x_opcode_gather(unsigned count)
+{
+       return (6 << 28) | count;
+}
+
+static inline u32 host1x_opcode_gather_nonincr(unsigned offset,        unsigned count)
+{
+       return (6 << 28) | (offset << 16) | BIT(15) | count;
+}
+
+static inline u32 host1x_opcode_gather_incr(unsigned offset, unsigned count)
+{
+       return (6 << 28) | (offset << 16) | BIT(15) | BIT(14) | count;
+}
+
+#define HOST1X_OPCODE_NOP host1x_opcode_nonincr(0, 0)
+
+#endif
index a3b3c9874413c1b420632b74df3137da31eaf73c..028e49d9bac9837132f3ccbcb10067a8514dea58 100644 (file)
@@ -111,6 +111,12 @@ static inline u32 host1x_uclass_wait_syncpt_base_offset_f(u32 v)
 }
 #define HOST1X_UCLASS_WAIT_SYNCPT_BASE_OFFSET_F(v) \
        host1x_uclass_wait_syncpt_base_offset_f(v)
+static inline u32 host1x_uclass_load_syncpt_base_r(void)
+{
+       return 0xb;
+}
+#define HOST1X_UCLASS_LOAD_SYNCPT_BASE \
+       host1x_uclass_load_syncpt_base_r()
 static inline u32 host1x_uclass_load_syncpt_base_base_indx_f(u32 v)
 {
        return (v & 0xff) << 24;
diff --git a/drivers/gpu/host1x/hw/hw_host1x04_channel.h b/drivers/gpu/host1x/hw/hw_host1x04_channel.h
new file mode 100644 (file)
index 0000000..95e6f96
--- /dev/null
@@ -0,0 +1,121 @@
+/*
+ * Copyright (c) 2013 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+ /*
+  * Function naming determines intended use:
+  *
+  *     <x>_r(void) : Returns the offset for register <x>.
+  *
+  *     <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
+  *
+  *     <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
+  *
+  *     <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
+  *         and masked to place it at field <y> of register <x>.  This value
+  *         can be |'d with others to produce a full register value for
+  *         register <x>.
+  *
+  *     <x>_<y>_m(void) : Returns a mask for field <y> of register <x>.  This
+  *         value can be ~'d and then &'d to clear the value of field <y> for
+  *         register <x>.
+  *
+  *     <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
+  *         to place it at field <y> of register <x>.  This value can be |'d
+  *         with others to produce a full register value for <x>.
+  *
+  *     <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
+  *         <x> value 'r' after being shifted to place its LSB at bit 0.
+  *         This value is suitable for direct comparison with other unshifted
+  *         values appropriate for use in field <y> of register <x>.
+  *
+  *     <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
+  *         field <y> of register <x>.  This value is suitable for direct
+  *         comparison with unshifted values appropriate for use in field <y>
+  *         of register <x>.
+  */
+
+#ifndef HOST1X_HW_HOST1X04_CHANNEL_H
+#define HOST1X_HW_HOST1X04_CHANNEL_H
+
+static inline u32 host1x_channel_fifostat_r(void)
+{
+       return 0x0;
+}
+#define HOST1X_CHANNEL_FIFOSTAT \
+       host1x_channel_fifostat_r()
+static inline u32 host1x_channel_fifostat_cfempty_v(u32 r)
+{
+       return (r >> 11) & 0x1;
+}
+#define HOST1X_CHANNEL_FIFOSTAT_CFEMPTY_V(r) \
+       host1x_channel_fifostat_cfempty_v(r)
+static inline u32 host1x_channel_dmastart_r(void)
+{
+       return 0x14;
+}
+#define HOST1X_CHANNEL_DMASTART \
+       host1x_channel_dmastart_r()
+static inline u32 host1x_channel_dmaput_r(void)
+{
+       return 0x18;
+}
+#define HOST1X_CHANNEL_DMAPUT \
+       host1x_channel_dmaput_r()
+static inline u32 host1x_channel_dmaget_r(void)
+{
+       return 0x1c;
+}
+#define HOST1X_CHANNEL_DMAGET \
+       host1x_channel_dmaget_r()
+static inline u32 host1x_channel_dmaend_r(void)
+{
+       return 0x20;
+}
+#define HOST1X_CHANNEL_DMAEND \
+       host1x_channel_dmaend_r()
+static inline u32 host1x_channel_dmactrl_r(void)
+{
+       return 0x24;
+}
+#define HOST1X_CHANNEL_DMACTRL \
+       host1x_channel_dmactrl_r()
+static inline u32 host1x_channel_dmactrl_dmastop(void)
+{
+       return 1 << 0;
+}
+#define HOST1X_CHANNEL_DMACTRL_DMASTOP \
+       host1x_channel_dmactrl_dmastop()
+static inline u32 host1x_channel_dmactrl_dmastop_v(u32 r)
+{
+       return (r >> 0) & 0x1;
+}
+#define HOST1X_CHANNEL_DMACTRL_DMASTOP_V(r) \
+       host1x_channel_dmactrl_dmastop_v(r)
+static inline u32 host1x_channel_dmactrl_dmagetrst(void)
+{
+       return 1 << 1;
+}
+#define HOST1X_CHANNEL_DMACTRL_DMAGETRST \
+       host1x_channel_dmactrl_dmagetrst()
+static inline u32 host1x_channel_dmactrl_dmainitget(void)
+{
+       return 1 << 2;
+}
+#define HOST1X_CHANNEL_DMACTRL_DMAINITGET \
+       host1x_channel_dmactrl_dmainitget()
+
+#endif
diff --git a/drivers/gpu/host1x/hw/hw_host1x04_sync.h b/drivers/gpu/host1x/hw/hw_host1x04_sync.h
new file mode 100644 (file)
index 0000000..ef2275b
--- /dev/null
@@ -0,0 +1,243 @@
+/*
+ * Copyright (c) 2013 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+ /*
+  * Function naming determines intended use:
+  *
+  *     <x>_r(void) : Returns the offset for register <x>.
+  *
+  *     <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
+  *
+  *     <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
+  *
+  *     <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
+  *         and masked to place it at field <y> of register <x>.  This value
+  *         can be |'d with others to produce a full register value for
+  *         register <x>.
+  *
+  *     <x>_<y>_m(void) : Returns a mask for field <y> of register <x>.  This
+  *         value can be ~'d and then &'d to clear the value of field <y> for
+  *         register <x>.
+  *
+  *     <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
+  *         to place it at field <y> of register <x>.  This value can be |'d
+  *         with others to produce a full register value for <x>.
+  *
+  *     <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
+  *         <x> value 'r' after being shifted to place its LSB at bit 0.
+  *         This value is suitable for direct comparison with other unshifted
+  *         values appropriate for use in field <y> of register <x>.
+  *
+  *     <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
+  *         field <y> of register <x>.  This value is suitable for direct
+  *         comparison with unshifted values appropriate for use in field <y>
+  *         of register <x>.
+  */
+
+#ifndef HOST1X_HW_HOST1X04_SYNC_H
+#define HOST1X_HW_HOST1X04_SYNC_H
+
+#define REGISTER_STRIDE        4
+
+static inline u32 host1x_sync_syncpt_r(unsigned int id)
+{
+       return 0xf80 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT(id) \
+       host1x_sync_syncpt_r(id)
+static inline u32 host1x_sync_syncpt_thresh_cpu0_int_status_r(unsigned int id)
+{
+       return 0xe80 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(id) \
+       host1x_sync_syncpt_thresh_cpu0_int_status_r(id)
+static inline u32 host1x_sync_syncpt_thresh_int_disable_r(unsigned int id)
+{
+       return 0xf00 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(id) \
+       host1x_sync_syncpt_thresh_int_disable_r(id)
+static inline u32 host1x_sync_syncpt_thresh_int_enable_cpu0_r(unsigned int id)
+{
+       return 0xf20 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0(id) \
+       host1x_sync_syncpt_thresh_int_enable_cpu0_r(id)
+static inline u32 host1x_sync_cf_setup_r(unsigned int channel)
+{
+       return 0xc00 + channel * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_CF_SETUP(channel) \
+       host1x_sync_cf_setup_r(channel)
+static inline u32 host1x_sync_cf_setup_base_v(u32 r)
+{
+       return (r >> 0) & 0x3ff;
+}
+#define HOST1X_SYNC_CF_SETUP_BASE_V(r) \
+       host1x_sync_cf_setup_base_v(r)
+static inline u32 host1x_sync_cf_setup_limit_v(u32 r)
+{
+       return (r >> 16) & 0x3ff;
+}
+#define HOST1X_SYNC_CF_SETUP_LIMIT_V(r) \
+       host1x_sync_cf_setup_limit_v(r)
+static inline u32 host1x_sync_cmdproc_stop_r(void)
+{
+       return 0xac;
+}
+#define HOST1X_SYNC_CMDPROC_STOP \
+       host1x_sync_cmdproc_stop_r()
+static inline u32 host1x_sync_ch_teardown_r(void)
+{
+       return 0xb0;
+}
+#define HOST1X_SYNC_CH_TEARDOWN \
+       host1x_sync_ch_teardown_r()
+static inline u32 host1x_sync_usec_clk_r(void)
+{
+       return 0x1a4;
+}
+#define HOST1X_SYNC_USEC_CLK \
+       host1x_sync_usec_clk_r()
+static inline u32 host1x_sync_ctxsw_timeout_cfg_r(void)
+{
+       return 0x1a8;
+}
+#define HOST1X_SYNC_CTXSW_TIMEOUT_CFG \
+       host1x_sync_ctxsw_timeout_cfg_r()
+static inline u32 host1x_sync_ip_busy_timeout_r(void)
+{
+       return 0x1bc;
+}
+#define HOST1X_SYNC_IP_BUSY_TIMEOUT \
+       host1x_sync_ip_busy_timeout_r()
+static inline u32 host1x_sync_mlock_owner_r(unsigned int id)
+{
+       return 0x340 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_MLOCK_OWNER(id) \
+       host1x_sync_mlock_owner_r(id)
+static inline u32 host1x_sync_mlock_owner_chid_f(u32 v)
+{
+       return (v & 0xf) << 8;
+}
+#define HOST1X_SYNC_MLOCK_OWNER_CHID_F(v) \
+       host1x_sync_mlock_owner_chid_f(v)
+static inline u32 host1x_sync_mlock_owner_cpu_owns_v(u32 r)
+{
+       return (r >> 1) & 0x1;
+}
+#define HOST1X_SYNC_MLOCK_OWNER_CPU_OWNS_V(r) \
+       host1x_sync_mlock_owner_cpu_owns_v(r)
+static inline u32 host1x_sync_mlock_owner_ch_owns_v(u32 r)
+{
+       return (r >> 0) & 0x1;
+}
+#define HOST1X_SYNC_MLOCK_OWNER_CH_OWNS_V(r) \
+       host1x_sync_mlock_owner_ch_owns_v(r)
+static inline u32 host1x_sync_syncpt_int_thresh_r(unsigned int id)
+{
+       return 0x1380 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT_INT_THRESH(id) \
+       host1x_sync_syncpt_int_thresh_r(id)
+static inline u32 host1x_sync_syncpt_base_r(unsigned int id)
+{
+       return 0x600 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT_BASE(id) \
+       host1x_sync_syncpt_base_r(id)
+static inline u32 host1x_sync_syncpt_cpu_incr_r(unsigned int id)
+{
+       return 0xf60 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT_CPU_INCR(id) \
+       host1x_sync_syncpt_cpu_incr_r(id)
+static inline u32 host1x_sync_cbread_r(unsigned int channel)
+{
+       return 0xc80 + channel * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_CBREAD(channel) \
+       host1x_sync_cbread_r(channel)
+static inline u32 host1x_sync_cfpeek_ctrl_r(void)
+{
+       return 0x74c;
+}
+#define HOST1X_SYNC_CFPEEK_CTRL \
+       host1x_sync_cfpeek_ctrl_r()
+static inline u32 host1x_sync_cfpeek_ctrl_addr_f(u32 v)
+{
+       return (v & 0x3ff) << 0;
+}
+#define HOST1X_SYNC_CFPEEK_CTRL_ADDR_F(v) \
+       host1x_sync_cfpeek_ctrl_addr_f(v)
+static inline u32 host1x_sync_cfpeek_ctrl_channr_f(u32 v)
+{
+       return (v & 0xf) << 16;
+}
+#define HOST1X_SYNC_CFPEEK_CTRL_CHANNR_F(v) \
+       host1x_sync_cfpeek_ctrl_channr_f(v)
+static inline u32 host1x_sync_cfpeek_ctrl_ena_f(u32 v)
+{
+       return (v & 0x1) << 31;
+}
+#define HOST1X_SYNC_CFPEEK_CTRL_ENA_F(v) \
+       host1x_sync_cfpeek_ctrl_ena_f(v)
+static inline u32 host1x_sync_cfpeek_read_r(void)
+{
+       return 0x750;
+}
+#define HOST1X_SYNC_CFPEEK_READ \
+       host1x_sync_cfpeek_read_r()
+static inline u32 host1x_sync_cfpeek_ptrs_r(void)
+{
+       return 0x754;
+}
+#define HOST1X_SYNC_CFPEEK_PTRS \
+       host1x_sync_cfpeek_ptrs_r()
+static inline u32 host1x_sync_cfpeek_ptrs_cf_rd_ptr_v(u32 r)
+{
+       return (r >> 0) & 0x3ff;
+}
+#define HOST1X_SYNC_CFPEEK_PTRS_CF_RD_PTR_V(r) \
+       host1x_sync_cfpeek_ptrs_cf_rd_ptr_v(r)
+static inline u32 host1x_sync_cfpeek_ptrs_cf_wr_ptr_v(u32 r)
+{
+       return (r >> 16) & 0x3ff;
+}
+#define HOST1X_SYNC_CFPEEK_PTRS_CF_WR_PTR_V(r) \
+       host1x_sync_cfpeek_ptrs_cf_wr_ptr_v(r)
+static inline u32 host1x_sync_cbstat_r(unsigned int channel)
+{
+       return 0xcc0 + channel * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_CBSTAT(channel) \
+       host1x_sync_cbstat_r(channel)
+static inline u32 host1x_sync_cbstat_cboffset_v(u32 r)
+{
+       return (r >> 0) & 0xffff;
+}
+#define HOST1X_SYNC_CBSTAT_CBOFFSET_V(r) \
+       host1x_sync_cbstat_cboffset_v(r)
+static inline u32 host1x_sync_cbstat_cbclass_v(u32 r)
+{
+       return (r >> 16) & 0x3ff;
+}
+#define HOST1X_SYNC_CBSTAT_CBCLASS_V(r) \
+       host1x_sync_cbstat_cbclass_v(r)
+
+#endif
diff --git a/drivers/gpu/host1x/hw/hw_host1x04_uclass.h b/drivers/gpu/host1x/hw/hw_host1x04_uclass.h
new file mode 100644 (file)
index 0000000..d1460e9
--- /dev/null
@@ -0,0 +1,181 @@
+/*
+ * Copyright (c) 2013 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+ /*
+  * Function naming determines intended use:
+  *
+  *     <x>_r(void) : Returns the offset for register <x>.
+  *
+  *     <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
+  *
+  *     <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
+  *
+  *     <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
+  *         and masked to place it at field <y> of register <x>.  This value
+  *         can be |'d with others to produce a full register value for
+  *         register <x>.
+  *
+  *     <x>_<y>_m(void) : Returns a mask for field <y> of register <x>.  This
+  *         value can be ~'d and then &'d to clear the value of field <y> for
+  *         register <x>.
+  *
+  *     <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
+  *         to place it at field <y> of register <x>.  This value can be |'d
+  *         with others to produce a full register value for <x>.
+  *
+  *     <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
+  *         <x> value 'r' after being shifted to place its LSB at bit 0.
+  *         This value is suitable for direct comparison with other unshifted
+  *         values appropriate for use in field <y> of register <x>.
+  *
+  *     <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
+  *         field <y> of register <x>.  This value is suitable for direct
+  *         comparison with unshifted values appropriate for use in field <y>
+  *         of register <x>.
+  */
+
+#ifndef HOST1X_HW_HOST1X04_UCLASS_H
+#define HOST1X_HW_HOST1X04_UCLASS_H
+
+static inline u32 host1x_uclass_incr_syncpt_r(void)
+{
+       return 0x0;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT \
+       host1x_uclass_incr_syncpt_r()
+static inline u32 host1x_uclass_incr_syncpt_cond_f(u32 v)
+{
+       return (v & 0xff) << 8;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT_COND_F(v) \
+       host1x_uclass_incr_syncpt_cond_f(v)
+static inline u32 host1x_uclass_incr_syncpt_indx_f(u32 v)
+{
+       return (v & 0xff) << 0;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT_INDX_F(v) \
+       host1x_uclass_incr_syncpt_indx_f(v)
+static inline u32 host1x_uclass_wait_syncpt_r(void)
+{
+       return 0x8;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT \
+       host1x_uclass_wait_syncpt_r()
+static inline u32 host1x_uclass_wait_syncpt_indx_f(u32 v)
+{
+       return (v & 0xff) << 24;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_INDX_F(v) \
+       host1x_uclass_wait_syncpt_indx_f(v)
+static inline u32 host1x_uclass_wait_syncpt_thresh_f(u32 v)
+{
+       return (v & 0xffffff) << 0;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_THRESH_F(v) \
+       host1x_uclass_wait_syncpt_thresh_f(v)
+static inline u32 host1x_uclass_wait_syncpt_base_r(void)
+{
+       return 0x9;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_BASE \
+       host1x_uclass_wait_syncpt_base_r()
+static inline u32 host1x_uclass_wait_syncpt_base_indx_f(u32 v)
+{
+       return (v & 0xff) << 24;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_INDX_F(v) \
+       host1x_uclass_wait_syncpt_base_indx_f(v)
+static inline u32 host1x_uclass_wait_syncpt_base_base_indx_f(u32 v)
+{
+       return (v & 0xff) << 16;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_BASE_INDX_F(v) \
+       host1x_uclass_wait_syncpt_base_base_indx_f(v)
+static inline u32 host1x_uclass_wait_syncpt_base_offset_f(u32 v)
+{
+       return (v & 0xffff) << 0;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_OFFSET_F(v) \
+       host1x_uclass_wait_syncpt_base_offset_f(v)
+static inline u32 host1x_uclass_load_syncpt_base_r(void)
+{
+       return 0xb;
+}
+#define HOST1X_UCLASS_LOAD_SYNCPT_BASE \
+       host1x_uclass_load_syncpt_base_r()
+static inline u32 host1x_uclass_load_syncpt_base_base_indx_f(u32 v)
+{
+       return (v & 0xff) << 24;
+}
+#define HOST1X_UCLASS_LOAD_SYNCPT_BASE_BASE_INDX_F(v) \
+       host1x_uclass_load_syncpt_base_base_indx_f(v)
+static inline u32 host1x_uclass_load_syncpt_base_value_f(u32 v)
+{
+       return (v & 0xffffff) << 0;
+}
+#define HOST1X_UCLASS_LOAD_SYNCPT_BASE_VALUE_F(v) \
+       host1x_uclass_load_syncpt_base_value_f(v)
+static inline u32 host1x_uclass_incr_syncpt_base_base_indx_f(u32 v)
+{
+       return (v & 0xff) << 24;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT_BASE_BASE_INDX_F(v) \
+       host1x_uclass_incr_syncpt_base_base_indx_f(v)
+static inline u32 host1x_uclass_incr_syncpt_base_offset_f(u32 v)
+{
+       return (v & 0xffffff) << 0;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT_BASE_OFFSET_F(v) \
+       host1x_uclass_incr_syncpt_base_offset_f(v)
+static inline u32 host1x_uclass_indoff_r(void)
+{
+       return 0x2d;
+}
+#define HOST1X_UCLASS_INDOFF \
+       host1x_uclass_indoff_r()
+static inline u32 host1x_uclass_indoff_indbe_f(u32 v)
+{
+       return (v & 0xf) << 28;
+}
+#define HOST1X_UCLASS_INDOFF_INDBE_F(v) \
+       host1x_uclass_indoff_indbe_f(v)
+static inline u32 host1x_uclass_indoff_autoinc_f(u32 v)
+{
+       return (v & 0x1) << 27;
+}
+#define HOST1X_UCLASS_INDOFF_AUTOINC_F(v) \
+       host1x_uclass_indoff_autoinc_f(v)
+static inline u32 host1x_uclass_indoff_indmodid_f(u32 v)
+{
+       return (v & 0xff) << 18;
+}
+#define HOST1X_UCLASS_INDOFF_INDMODID_F(v) \
+       host1x_uclass_indoff_indmodid_f(v)
+static inline u32 host1x_uclass_indoff_indroffset_f(u32 v)
+{
+       return (v & 0xffff) << 2;
+}
+#define HOST1X_UCLASS_INDOFF_INDROFFSET_F(v) \
+       host1x_uclass_indoff_indroffset_f(v)
+static inline u32 host1x_uclass_indoff_rwn_read_v(void)
+{
+       return 1;
+}
+#define HOST1X_UCLASS_INDOFF_INDROFFSET_F(v) \
+       host1x_uclass_indoff_indroffset_f(v)
+
+#endif
index b26dcc83bc1b373400d72c46aad21d04fb97f52f..db9017adfe2bb944f01558f49769f60715995c87 100644 (file)
@@ -20,7 +20,6 @@
 #include <linux/interrupt.h>
 #include <linux/irq.h>
 #include <linux/io.h>
-#include <asm/mach/irq.h>
 
 #include "../intr.h"
 #include "../dev.h"
index de5ec333ce1adc1974001d01bad56d2544d472d3..1146e3bba6e19bb69cd7f866c149b4aa7202ae46 100644 (file)
@@ -75,12 +75,14 @@ struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
 
        return job;
 }
+EXPORT_SYMBOL(host1x_job_alloc);
 
 struct host1x_job *host1x_job_get(struct host1x_job *job)
 {
        kref_get(&job->ref);
        return job;
 }
+EXPORT_SYMBOL(host1x_job_get);
 
 static void job_free(struct kref *ref)
 {
@@ -93,6 +95,7 @@ void host1x_job_put(struct host1x_job *job)
 {
        kref_put(&job->ref, job_free);
 }
+EXPORT_SYMBOL(host1x_job_put);
 
 void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *bo,
                           u32 words, u32 offset)
@@ -104,6 +107,7 @@ void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *bo,
        cur_gather->offset = offset;
        job->num_gathers++;
 }
+EXPORT_SYMBOL(host1x_job_add_gather);
 
 /*
  * NULL an already satisfied WAIT_SYNCPT host method, by patching its
@@ -560,6 +564,7 @@ out:
 
        return err;
 }
+EXPORT_SYMBOL(host1x_job_pin);
 
 void host1x_job_unpin(struct host1x_job *job)
 {
@@ -577,6 +582,7 @@ void host1x_job_unpin(struct host1x_job *job)
                                      job->gather_copy_mapped,
                                      job->gather_copy);
 }
+EXPORT_SYMBOL(host1x_job_unpin);
 
 /*
  * Debug routine used to dump job entries
diff --git a/drivers/gpu/host1x/mipi.c b/drivers/gpu/host1x/mipi.c
new file mode 100644 (file)
index 0000000..9882ea1
--- /dev/null
@@ -0,0 +1,275 @@
+/*
+ * Copyright (C) 2013 NVIDIA Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/host1x.h>
+#include <linux/io.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "dev.h"
+
+#define MIPI_CAL_CTRL                  0x00
+#define MIPI_CAL_CTRL_START            (1 << 0)
+
+#define MIPI_CAL_AUTOCAL_CTRL          0x01
+
+#define MIPI_CAL_STATUS                        0x02
+#define MIPI_CAL_STATUS_DONE           (1 << 16)
+#define MIPI_CAL_STATUS_ACTIVE         (1 <<  0)
+
+#define MIPI_CAL_CONFIG_CSIA           0x05
+#define MIPI_CAL_CONFIG_CSIB           0x06
+#define MIPI_CAL_CONFIG_CSIC           0x07
+#define MIPI_CAL_CONFIG_CSID           0x08
+#define MIPI_CAL_CONFIG_CSIE           0x09
+#define MIPI_CAL_CONFIG_DSIA           0x0e
+#define MIPI_CAL_CONFIG_DSIB           0x0f
+#define MIPI_CAL_CONFIG_DSIC           0x10
+#define MIPI_CAL_CONFIG_DSID           0x11
+
+#define MIPI_CAL_CONFIG_SELECT         (1 << 21)
+#define MIPI_CAL_CONFIG_HSPDOS(x)      (((x) & 0x1f) << 16)
+#define MIPI_CAL_CONFIG_HSPUOS(x)      (((x) & 0x1f) <<  8)
+#define MIPI_CAL_CONFIG_TERMOS(x)      (((x) & 0x1f) <<  0)
+
+#define MIPI_CAL_BIAS_PAD_CFG0         0x16
+#define MIPI_CAL_BIAS_PAD_PDVCLAMP     (1 << 1)
+#define MIPI_CAL_BIAS_PAD_E_VCLAMP_REF (1 << 0)
+
+#define MIPI_CAL_BIAS_PAD_CFG1         0x17
+
+#define MIPI_CAL_BIAS_PAD_CFG2         0x18
+#define MIPI_CAL_BIAS_PAD_PDVREG       (1 << 1)
+
+static const struct module {
+       unsigned long reg;
+} modules[] = {
+       { .reg = MIPI_CAL_CONFIG_CSIA },
+       { .reg = MIPI_CAL_CONFIG_CSIB },
+       { .reg = MIPI_CAL_CONFIG_CSIC },
+       { .reg = MIPI_CAL_CONFIG_CSID },
+       { .reg = MIPI_CAL_CONFIG_CSIE },
+       { .reg = MIPI_CAL_CONFIG_DSIA },
+       { .reg = MIPI_CAL_CONFIG_DSIB },
+       { .reg = MIPI_CAL_CONFIG_DSIC },
+       { .reg = MIPI_CAL_CONFIG_DSID },
+};
+
+struct tegra_mipi {
+       void __iomem *regs;
+       struct mutex lock;
+       struct clk *clk;
+};
+
+struct tegra_mipi_device {
+       struct platform_device *pdev;
+       struct tegra_mipi *mipi;
+       struct device *device;
+       unsigned long pads;
+};
+
+static inline unsigned long tegra_mipi_readl(struct tegra_mipi *mipi,
+                                            unsigned long reg)
+{
+       return readl(mipi->regs + (reg << 2));
+}
+
+static inline void tegra_mipi_writel(struct tegra_mipi *mipi,
+                                    unsigned long value, unsigned long reg)
+{
+       writel(value, mipi->regs + (reg << 2));
+}
+
+struct tegra_mipi_device *tegra_mipi_request(struct device *device)
+{
+       struct device_node *np = device->of_node;
+       struct tegra_mipi_device *dev;
+       struct of_phandle_args args;
+       int err;
+
+       err = of_parse_phandle_with_args(np, "nvidia,mipi-calibrate",
+                                        "#nvidia,mipi-calibrate-cells", 0,
+                                        &args);
+       if (err < 0)
+               return ERR_PTR(err);
+
+       dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+       if (!dev) {
+               of_node_put(args.np);
+               err = -ENOMEM;
+               goto out;
+       }
+
+       dev->pdev = of_find_device_by_node(args.np);
+       if (!dev->pdev) {
+               of_node_put(args.np);
+               err = -ENODEV;
+               goto free;
+       }
+
+       of_node_put(args.np);
+
+       dev->mipi = platform_get_drvdata(dev->pdev);
+       if (!dev->mipi) {
+               err = -EPROBE_DEFER;
+               goto pdev_put;
+       }
+
+       dev->pads = args.args[0];
+       dev->device = device;
+
+       return dev;
+
+pdev_put:
+       platform_device_put(dev->pdev);
+free:
+       kfree(dev);
+out:
+       return ERR_PTR(err);
+}
+EXPORT_SYMBOL(tegra_mipi_request);
+
+void tegra_mipi_free(struct tegra_mipi_device *device)
+{
+       platform_device_put(device->pdev);
+       kfree(device);
+}
+EXPORT_SYMBOL(tegra_mipi_free);
+
+static int tegra_mipi_wait(struct tegra_mipi *mipi)
+{
+       unsigned long timeout = jiffies + msecs_to_jiffies(250);
+       unsigned long value;
+
+       while (time_before(jiffies, timeout)) {
+               value = tegra_mipi_readl(mipi, MIPI_CAL_STATUS);
+               if ((value & MIPI_CAL_STATUS_ACTIVE) == 0 &&
+                   (value & MIPI_CAL_STATUS_DONE) != 0)
+                       return 0;
+
+               usleep_range(10, 50);
+       }
+
+       return -ETIMEDOUT;
+}
+
+int tegra_mipi_calibrate(struct tegra_mipi_device *device)
+{
+       unsigned long value;
+       unsigned int i;
+       int err;
+
+       err = clk_enable(device->mipi->clk);
+       if (err < 0)
+               return err;
+
+       mutex_lock(&device->mipi->lock);
+
+       value = tegra_mipi_readl(device->mipi, MIPI_CAL_BIAS_PAD_CFG0);
+       value &= ~MIPI_CAL_BIAS_PAD_PDVCLAMP;
+       value |= MIPI_CAL_BIAS_PAD_E_VCLAMP_REF;
+       tegra_mipi_writel(device->mipi, value, MIPI_CAL_BIAS_PAD_CFG0);
+
+       value = tegra_mipi_readl(device->mipi, MIPI_CAL_BIAS_PAD_CFG2);
+       value &= ~MIPI_CAL_BIAS_PAD_PDVREG;
+       tegra_mipi_writel(device->mipi, value, MIPI_CAL_BIAS_PAD_CFG2);
+
+       for (i = 0; i < ARRAY_SIZE(modules); i++) {
+               if (device->pads & BIT(i))
+                       value = MIPI_CAL_CONFIG_SELECT |
+                               MIPI_CAL_CONFIG_HSPDOS(0) |
+                               MIPI_CAL_CONFIG_HSPUOS(4) |
+                               MIPI_CAL_CONFIG_TERMOS(5);
+               else
+                       value = 0;
+
+               tegra_mipi_writel(device->mipi, value, modules[i].reg);
+       }
+
+       tegra_mipi_writel(device->mipi, MIPI_CAL_CTRL_START, MIPI_CAL_CTRL);
+
+       err = tegra_mipi_wait(device->mipi);
+
+       mutex_unlock(&device->mipi->lock);
+       clk_disable(device->mipi->clk);
+
+       return err;
+}
+EXPORT_SYMBOL(tegra_mipi_calibrate);
+
+static int tegra_mipi_probe(struct platform_device *pdev)
+{
+       struct tegra_mipi *mipi;
+       struct resource *res;
+       int err;
+
+       mipi = devm_kzalloc(&pdev->dev, sizeof(*mipi), GFP_KERNEL);
+       if (!mipi)
+               return -ENOMEM;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       mipi->regs = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(mipi->regs))
+               return PTR_ERR(mipi->regs);
+
+       mutex_init(&mipi->lock);
+
+       mipi->clk = devm_clk_get(&pdev->dev, NULL);
+       if (IS_ERR(mipi->clk)) {
+               dev_err(&pdev->dev, "failed to get clock\n");
+               return PTR_ERR(mipi->clk);
+       }
+
+       err = clk_prepare(mipi->clk);
+       if (err < 0)
+               return err;
+
+       platform_set_drvdata(pdev, mipi);
+
+       return 0;
+}
+
+static int tegra_mipi_remove(struct platform_device *pdev)
+{
+       struct tegra_mipi *mipi = platform_get_drvdata(pdev);
+
+       clk_unprepare(mipi->clk);
+
+       return 0;
+}
+
+static struct of_device_id tegra_mipi_of_match[] = {
+       { .compatible = "nvidia,tegra114-mipi", },
+       { },
+};
+
+struct platform_driver tegra_mipi_driver = {
+       .driver = {
+               .name = "tegra-mipi",
+               .of_match_table = tegra_mipi_of_match,
+       },
+       .probe = tegra_mipi_probe,
+       .remove = tegra_mipi_remove,
+};
index 159c479829c959d0bd898742f9bcfec54e74020e..bfb09d802abdfe0c9bcba38d879477610d44bbd8 100644 (file)
@@ -93,6 +93,7 @@ u32 host1x_syncpt_id(struct host1x_syncpt *sp)
 {
        return sp->id;
 }
+EXPORT_SYMBOL(host1x_syncpt_id);
 
 /*
  * Updates the value sent to hardware.
@@ -168,6 +169,7 @@ int host1x_syncpt_incr(struct host1x_syncpt *sp)
 {
        return host1x_hw_syncpt_cpu_incr(sp->host, sp);
 }
+EXPORT_SYMBOL(host1x_syncpt_incr);
 
 /*
  * Updated sync point form hardware, and returns true if syncpoint is expired,
@@ -377,6 +379,7 @@ struct host1x_syncpt *host1x_syncpt_request(struct device *dev,
        struct host1x *host = dev_get_drvdata(dev->parent);
        return host1x_syncpt_alloc(host, dev, flags);
 }
+EXPORT_SYMBOL(host1x_syncpt_request);
 
 void host1x_syncpt_free(struct host1x_syncpt *sp)
 {
@@ -390,6 +393,7 @@ void host1x_syncpt_free(struct host1x_syncpt *sp)
        sp->name = NULL;
        sp->client_managed = false;
 }
+EXPORT_SYMBOL(host1x_syncpt_free);
 
 void host1x_syncpt_deinit(struct host1x *host)
 {
@@ -408,6 +412,7 @@ u32 host1x_syncpt_read_max(struct host1x_syncpt *sp)
        smp_rmb();
        return (u32)atomic_read(&sp->max_val);
 }
+EXPORT_SYMBOL(host1x_syncpt_read_max);
 
 /*
  * Read min, which is a shadow of the current sync point value in hardware.
@@ -417,6 +422,7 @@ u32 host1x_syncpt_read_min(struct host1x_syncpt *sp)
        smp_rmb();
        return (u32)atomic_read(&sp->min_val);
 }
+EXPORT_SYMBOL(host1x_syncpt_read_min);
 
 int host1x_syncpt_nb_pts(struct host1x *host)
 {
@@ -439,13 +445,16 @@ struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, u32 id)
                return NULL;
        return host->syncpt + id;
 }
+EXPORT_SYMBOL(host1x_syncpt_get);
 
 struct host1x_syncpt_base *host1x_syncpt_get_base(struct host1x_syncpt *sp)
 {
        return sp ? sp->base : NULL;
 }
+EXPORT_SYMBOL(host1x_syncpt_get_base);
 
 u32 host1x_syncpt_base_id(struct host1x_syncpt_base *base)
 {
        return base->id;
 }
+EXPORT_SYMBOL(host1x_syncpt_base_id);
index cea623c36ae23cc007138371b14ef05e770fe6d0..69ea36f07b4d6b47c2030fe9c82bb8bb3fe866d0 100644 (file)
@@ -209,7 +209,6 @@ static int create_gpadl_header(void *kbuffer, u32 size,
 {
        int i;
        int pagecount;
-       unsigned long long pfn;
        struct vmbus_channel_gpadl_header *gpadl_header;
        struct vmbus_channel_gpadl_body *gpadl_body;
        struct vmbus_channel_msginfo *msgheader;
@@ -219,7 +218,6 @@ static int create_gpadl_header(void *kbuffer, u32 size,
        int pfnsum, pfncount, pfnleft, pfncurr, pfnsize;
 
        pagecount = size >> PAGE_SHIFT;
-       pfn = virt_to_phys(kbuffer) >> PAGE_SHIFT;
 
        /* do we need a gpadl body msg */
        pfnsize = MAX_SIZE_CHANNEL_MESSAGE -
@@ -248,7 +246,8 @@ static int create_gpadl_header(void *kbuffer, u32 size,
                gpadl_header->range[0].byte_offset = 0;
                gpadl_header->range[0].byte_count = size;
                for (i = 0; i < pfncount; i++)
-                       gpadl_header->range[0].pfn_array[i] = pfn+i;
+                       gpadl_header->range[0].pfn_array[i] = slow_virt_to_phys(
+                               kbuffer + PAGE_SIZE * i) >> PAGE_SHIFT;
                *msginfo = msgheader;
                *messagecount = 1;
 
@@ -301,7 +300,9 @@ static int create_gpadl_header(void *kbuffer, u32 size,
                         * so the hypervisor gurantees that this is ok.
                         */
                        for (i = 0; i < pfncurr; i++)
-                               gpadl_body->pfn[i] = pfn + pfnsum + i;
+                               gpadl_body->pfn[i] = slow_virt_to_phys(
+                                       kbuffer + PAGE_SIZE * (pfnsum + i)) >>
+                                       PAGE_SHIFT;
 
                        /* add to msg header */
                        list_add_tail(&msgbody->msglistentry,
@@ -327,7 +328,8 @@ static int create_gpadl_header(void *kbuffer, u32 size,
                gpadl_header->range[0].byte_offset = 0;
                gpadl_header->range[0].byte_count = size;
                for (i = 0; i < pagecount; i++)
-                       gpadl_header->range[0].pfn_array[i] = pfn+i;
+                       gpadl_header->range[0].pfn_array[i] = slow_virt_to_phys(
+                               kbuffer + PAGE_SIZE * i) >> PAGE_SHIFT;
 
                *msginfo = msgheader;
                *messagecount = 1;
@@ -344,7 +346,7 @@ nomem:
  * vmbus_establish_gpadl - Estabish a GPADL for the specified buffer
  *
  * @channel: a channel
- * @kbuffer: from kmalloc
+ * @kbuffer: from kmalloc or vmalloc
  * @size: page-size multiple
  * @gpadl_handle: some funky thing
  */
index 52d548f1dc1ddbccceedab0c7f2bf9e1d53766f2..f6ca3b21aebd989bcdadeb749e4250134dfbe16e 100644 (file)
@@ -573,8 +573,8 @@ config SENSORS_IT87
        help
          If you say yes here you get support for ITE IT8705F, IT8712F,
          IT8716F, IT8718F, IT8720F, IT8721F, IT8726F, IT8728F, IT8758E,
-         IT8771E, IT8772E, IT8782F, and IT8783E/F sensor chips, and the
-         SiS950 clone.
+         IT8771E, IT8772E, IT8782F, IT8783E/F and IT8603E sensor chips,
+         and the SiS950 clone.
 
          This driver can also be built as a module.  If so, the module
          will be called it87.
index 7e16e5d07bc6868c86514dd4ac5c1343c2e4d99b..9ffc4c8ca8b5e293098cf719ffa3c7ce4334aa49 100644 (file)
@@ -2,7 +2,7 @@
  * adm1025.c
  *
  * Copyright (C) 2000       Chen-Yuan Wu <gwu@esoft.com>
- * Copyright (C) 2003-2009  Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2003-2009  Jean Delvare <jdelvare@suse.de>
  *
  * The ADM1025 is a sensor chip made by Analog Devices. It reports up to 6
  * voltages (including its own power source) and up to two temperatures
@@ -615,6 +615,6 @@ static struct adm1025_data *adm1025_update_device(struct device *dev)
 
 module_i2c_driver(adm1025_driver);
 
-MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
 MODULE_DESCRIPTION("ADM1025 driver");
 MODULE_LICENSE("GPL");
index 9ee5e066423bee462f17dd6fc81979b618b25e20..d19c790e410a8d7a4354cf7a230e56f9bfeadf6d 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Copyright (C) 2006 Corentin LABBE <corentin.labbe@geomatys.fr>
  *
- * Based on LM83 Driver by Jean Delvare <khali@linux-fr.org>
+ * Based on LM83 Driver by Jean Delvare <jdelvare@suse.de>
  *
  * Give only processor, motherboard temperatures and fan tachs
  * Very rare chip please let me know if you use it
index 253ea396106db45e18ae6b1a54022029d5b7afc9..a8a540ca8c3495c93dd3ac7af1af15e6d9dac914 100644 (file)
@@ -4,7 +4,7 @@
  * Based on lm75.c and lm85.c
  * Supports adm1030 / adm1031
  * Copyright (C) 2004 Alexandre d'Alton <alex@alexdalton.org>
- * Reworked by Jean Delvare <khali@linux-fr.org>
+ * Reworked by Jean Delvare <jdelvare@suse.de>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
index 22d008bbdc1011d16919e70170f2f9d09b394235..3cefd1aeb24f4a6c73f4bffd64fe7fc5b1edea18 100644 (file)
@@ -3,7 +3,7 @@
  * Copyright (C) 2007-2008, Advanced Micro Devices, Inc.
  * Copyright (C) 2008 Jordan Crouse <jordan@cosmicpenguin.net>
  * Copyright (C) 2008 Hans de Goede <hdegoede@redhat.com>
- * Copyright (C) 2009 Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2009 Jean Delvare <jdelvare@suse.de>
  *
  * Derived from the lm83 driver by Jean Delvare
  *
index 872d76744e30d1a7d603cc6ca332ab2aa76a965f..fc6f5d54e7f755282025045f843ba62dcff68fa9 100644 (file)
@@ -4,7 +4,7 @@
  * Christian W. Zuckschwerdt  <zany@triq.net>  2000-11-23
  * based on lm75.c by Frodo Looijaard <frodol@dds.nl>
  * Ported to Linux 2.6 by Aurelien Jarno <aurelien@aurel32.net> with
- * the help of Jean Delvare <khali@linux-fr.org>
+ * the help of Jean Delvare <jdelvare@suse.de>
  *
  * The DS1621 device is a digital temperature/thermometer with 9-bit
  * resolution, a thermal alarm output (Tout), and user-defined minimum
index 82e661e8241b7a15cf8c32ce57a048429704a2cd..f76a74cb6dc4136e683559a17f0633f0f03aa251 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * emc6w201.c - Hardware monitoring driver for the SMSC EMC6W201
- * Copyright (C) 2011  Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2011  Jean Delvare <jdelvare@suse.de>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -548,6 +548,6 @@ static struct i2c_driver emc6w201_driver = {
 
 module_i2c_driver(emc6w201_driver);
 
-MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
 MODULE_DESCRIPTION("SMSC EMC6W201 hardware monitoring driver");
 MODULE_LICENSE("GPL");
index 15b7f5281def34947ee72afb7c3e170573d5693e..1a8aa1265262e392dd246a8aecfd39d9e997e75b 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * f71805f.c - driver for the Fintek F71805F/FG and F71872F/FG Super-I/O
  *             chips integrated hardware monitoring features
- * Copyright (C) 2005-2006  Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2005-2006  Jean Delvare <jdelvare@suse.de>
  *
  * The F71805F/FG is a LPC Super-I/O chip made by Fintek. It integrates
  * complete hardware monitoring features: voltage, fan and temperature
index 95257a5621d8f4da50f3175de073fed6d84f1456..1e9830513045e5bb0ecf145170904a5b24352e0c 100644 (file)
@@ -4,7 +4,7 @@
  * Copyright (C) 1998, 1999 Frodo Looijaard <frodol@dds.nl> and
  * Kyosti Malkki <kmalkki@cc.hut.fi>
  * Copyright (C) 2004 Hong-Gunn Chew <hglinux@gunnet.org> and
- * Jean Delvare <khali@linux-fr.org>
+ * Jean Delvare <jdelvare@suse.de>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
index 29ffa27c60b89b60bf84db948cf56a890772599e..70749fc15a4f4fdf07155a995513b7872b3bc4e6 100644 (file)
@@ -10,7 +10,8 @@
  *  This driver supports only the Environment Controller in the IT8705F and
  *  similar parts.  The other devices are supported by different drivers.
  *
- *  Supports: IT8705F  Super I/O chip w/LPC interface
+ *  Supports: IT8603E  Super I/O chip w/LPC interface
+ *            IT8705F  Super I/O chip w/LPC interface
  *            IT8712F  Super I/O chip w/LPC interface
  *            IT8716F  Super I/O chip w/LPC interface
  *            IT8718F  Super I/O chip w/LPC interface
@@ -26,7 +27,7 @@
  *            Sis950   A clone of the IT8705F
  *
  *  Copyright (C) 2001 Chris Gauthron
- *  Copyright (C) 2005-2010 Jean Delvare <khali@linux-fr.org>
+ *  Copyright (C) 2005-2010 Jean Delvare <jdelvare@suse.de>
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
@@ -64,7 +65,7 @@
 #define DRVNAME "it87"
 
 enum chips { it87, it8712, it8716, it8718, it8720, it8721, it8728, it8771,
-            it8772, it8782, it8783 };
+            it8772, it8782, it8783, it8603 };
 
 static unsigned short force_id;
 module_param(force_id, ushort, 0);
@@ -146,6 +147,7 @@ static inline void superio_exit(void)
 #define IT8772E_DEVID 0x8772
 #define IT8782F_DEVID 0x8782
 #define IT8783E_DEVID 0x8783
+#define IT8306E_DEVID 0x8603
 #define IT87_ACT_REG  0x30
 #define IT87_BASE_REG 0x60
 
@@ -315,6 +317,12 @@ static const struct it87_devices it87_devices[] = {
                  | FEAT_TEMP_OLD_PECI,
                .old_peci_mask = 0x4,
        },
+       [it8603] = {
+               .name = "it8603",
+               .features = FEAT_NEWER_AUTOPWM | FEAT_12MV_ADC | FEAT_16BIT_FANS
+                 | FEAT_TEMP_OFFSET | FEAT_TEMP_PECI,
+               .peci_mask = 0x07,
+       },
 };
 
 #define has_16bit_fans(data)   ((data)->features & FEAT_16BIT_FANS)
@@ -361,7 +369,7 @@ struct it87_data {
        unsigned long last_updated;     /* In jiffies */
 
        u16 in_scaled;          /* Internal voltage sensors are scaled */
-       u8 in[9][3];            /* [nr][0]=in, [1]=min, [2]=max */
+       u8 in[10][3];           /* [nr][0]=in, [1]=min, [2]=max */
        u8 has_fan;             /* Bitfield, fans enabled */
        u16 fan[5][2];          /* Register values, [nr][0]=fan, [1]=min */
        u8 has_temp;            /* Bitfield, temp sensors enabled */
@@ -578,6 +586,7 @@ static SENSOR_DEVICE_ATTR_2(in7_max, S_IRUGO | S_IWUSR, show_in, set_in,
                            7, 2);
 
 static SENSOR_DEVICE_ATTR_2(in8_input, S_IRUGO, show_in, NULL, 8, 0);
+static SENSOR_DEVICE_ATTR_2(in9_input, S_IRUGO, show_in, NULL, 9, 0);
 
 /* 3 temperatures */
 static ssize_t show_temp(struct device *dev, struct device_attribute *attr,
@@ -734,7 +743,7 @@ static int pwm_mode(const struct it87_data *data, int nr)
 {
        int ctrl = data->fan_main_ctrl & (1 << nr);
 
-       if (ctrl == 0)                                  /* Full speed */
+       if (ctrl == 0 && data->type != it8603)          /* Full speed */
                return 0;
        if (data->pwm_ctrl[nr] & 0x80)                  /* Automatic mode */
                return 2;
@@ -929,6 +938,10 @@ static ssize_t set_pwm_enable(struct device *dev,
                        return -EINVAL;
        }
 
+       /* IT8603E does not have on/off mode */
+       if (val == 0 && data->type == it8603)
+               return -EINVAL;
+
        mutex_lock(&data->update_lock);
 
        if (val == 0) {
@@ -948,10 +961,13 @@ static ssize_t set_pwm_enable(struct device *dev,
                else                                    /* Automatic mode */
                        data->pwm_ctrl[nr] = 0x80 | data->pwm_temp_map[nr];
                it87_write_value(data, IT87_REG_PWM(nr), data->pwm_ctrl[nr]);
-               /* set SmartGuardian mode */
-               data->fan_main_ctrl |= (1 << nr);
-               it87_write_value(data, IT87_REG_FAN_MAIN_CTRL,
-                                data->fan_main_ctrl);
+
+               if (data->type != it8603) {
+                       /* set SmartGuardian mode */
+                       data->fan_main_ctrl |= (1 << nr);
+                       it87_write_value(data, IT87_REG_FAN_MAIN_CTRL,
+                                        data->fan_main_ctrl);
+               }
        }
 
        mutex_unlock(&data->update_lock);
@@ -1415,6 +1431,8 @@ static ssize_t show_label(struct device *dev, struct device_attribute *attr,
 static SENSOR_DEVICE_ATTR(in3_label, S_IRUGO, show_label, NULL, 0);
 static SENSOR_DEVICE_ATTR(in7_label, S_IRUGO, show_label, NULL, 1);
 static SENSOR_DEVICE_ATTR(in8_label, S_IRUGO, show_label, NULL, 2);
+/* special AVCC3 IT8306E in9 */
+static SENSOR_DEVICE_ATTR(in9_label, S_IRUGO, show_label, NULL, 0);
 
 static ssize_t show_name(struct device *dev, struct device_attribute
                         *devattr, char *buf)
@@ -1424,7 +1442,7 @@ static ssize_t show_name(struct device *dev, struct device_attribute
 }
 static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
 
-static struct attribute *it87_attributes_in[9][5] = {
+static struct attribute *it87_attributes_in[10][5] = {
 {
        &sensor_dev_attr_in0_input.dev_attr.attr,
        &sensor_dev_attr_in0_min.dev_attr.attr,
@@ -1476,9 +1494,12 @@ static struct attribute *it87_attributes_in[9][5] = {
 }, {
        &sensor_dev_attr_in8_input.dev_attr.attr,
        NULL
+}, {
+       &sensor_dev_attr_in9_input.dev_attr.attr,
+       NULL
 } };
 
-static const struct attribute_group it87_group_in[9] = {
+static const struct attribute_group it87_group_in[10] = {
        { .attrs = it87_attributes_in[0] },
        { .attrs = it87_attributes_in[1] },
        { .attrs = it87_attributes_in[2] },
@@ -1488,6 +1509,7 @@ static const struct attribute_group it87_group_in[9] = {
        { .attrs = it87_attributes_in[6] },
        { .attrs = it87_attributes_in[7] },
        { .attrs = it87_attributes_in[8] },
+       { .attrs = it87_attributes_in[9] },
 };
 
 static struct attribute *it87_attributes_temp[3][6] = {
@@ -1546,7 +1568,8 @@ static struct attribute *it87_attributes_in_beep[] = {
        &sensor_dev_attr_in5_beep.dev_attr.attr,
        &sensor_dev_attr_in6_beep.dev_attr.attr,
        &sensor_dev_attr_in7_beep.dev_attr.attr,
-       NULL
+       NULL,
+       NULL,
 };
 
 static struct attribute *it87_attributes_temp_beep[] = {
@@ -1685,6 +1708,7 @@ static struct attribute *it87_attributes_label[] = {
        &sensor_dev_attr_in3_label.dev_attr.attr,
        &sensor_dev_attr_in7_label.dev_attr.attr,
        &sensor_dev_attr_in8_label.dev_attr.attr,
+       &sensor_dev_attr_in9_label.dev_attr.attr,
        NULL
 };
 
@@ -1742,6 +1766,9 @@ static int __init it87_find(unsigned short *address,
        case IT8783E_DEVID:
                sio_data->type = it8783;
                break;
+       case IT8306E_DEVID:
+               sio_data->type = it8603;
+               break;
        case 0xffff:    /* No device at all */
                goto exit;
        default:
@@ -1763,11 +1790,16 @@ static int __init it87_find(unsigned short *address,
 
        err = 0;
        sio_data->revision = superio_inb(DEVREV) & 0x0f;
-       pr_info("Found IT%04xF chip at 0x%x, revision %d\n",
-               chip_type, *address, sio_data->revision);
+       pr_info("Found IT%04x%c chip at 0x%x, revision %d\n", chip_type,
+               chip_type == 0x8771 || chip_type == 0x8772 ||
+               chip_type == 0x8603 ? 'E' : 'F', *address,
+               sio_data->revision);
 
        /* in8 (Vbat) is always internal */
        sio_data->internal = (1 << 2);
+       /* Only the IT8603E has in9 */
+       if (sio_data->type != it8603)
+               sio_data->skip_in |= (1 << 9);
 
        /* Read GPIO config and VID value from LDN 7 (GPIO) */
        if (sio_data->type == it87) {
@@ -1844,7 +1876,38 @@ static int __init it87_find(unsigned short *address,
                        sio_data->internal |= (1 << 1);
 
                sio_data->beep_pin = superio_inb(IT87_SIO_BEEP_PIN_REG) & 0x3f;
+       } else if (sio_data->type == it8603) {
+               int reg27, reg29;
+
+               sio_data->skip_vid = 1; /* No VID */
+               superio_select(GPIO);
 
+               reg27 = superio_inb(IT87_SIO_GPIO3_REG);
+
+               /* Check if fan3 is there or not */
+               if (reg27 & (1 << 6))
+                       sio_data->skip_pwm |= (1 << 2);
+               if (reg27 & (1 << 7))
+                       sio_data->skip_fan |= (1 << 2);
+
+               /* Check if fan2 is there or not */
+               reg29 = superio_inb(IT87_SIO_GPIO5_REG);
+               if (reg29 & (1 << 1))
+                       sio_data->skip_pwm |= (1 << 1);
+               if (reg29 & (1 << 2))
+                       sio_data->skip_fan |= (1 << 1);
+
+               sio_data->skip_in |= (1 << 5); /* No VIN5 */
+               sio_data->skip_in |= (1 << 6); /* No VIN6 */
+
+               /* no fan4 */
+               sio_data->skip_pwm |= (1 << 3);
+               sio_data->skip_fan |= (1 << 3);
+
+               sio_data->internal |= (1 << 1); /* in7 is VSB */
+               sio_data->internal |= (1 << 3); /* in9 is AVCC */
+
+               sio_data->beep_pin = superio_inb(IT87_SIO_BEEP_PIN_REG) & 0x3f;
        } else {
                int reg;
                bool uart6;
@@ -1966,7 +2029,7 @@ static void it87_remove_files(struct device *dev)
        int i;
 
        sysfs_remove_group(&dev->kobj, &it87_group);
-       for (i = 0; i < 9; i++) {
+       for (i = 0; i < 10; i++) {
                if (sio_data->skip_in & (1 << i))
                        continue;
                sysfs_remove_group(&dev->kobj, &it87_group_in[i]);
@@ -2080,6 +2143,8 @@ static int it87_probe(struct platform_device *pdev)
                        data->in_scaled |= (1 << 7);    /* in7 is VSB */
                if (sio_data->internal & (1 << 2))
                        data->in_scaled |= (1 << 8);    /* in8 is Vbat */
+               if (sio_data->internal & (1 << 3))
+                       data->in_scaled |= (1 << 9);    /* in9 is AVCC */
        } else if (sio_data->type == it8782 || sio_data->type == it8783) {
                if (sio_data->internal & (1 << 0))
                        data->in_scaled |= (1 << 3);    /* in3 is VCC5V */
@@ -2102,7 +2167,7 @@ static int it87_probe(struct platform_device *pdev)
        if (err)
                return err;
 
-       for (i = 0; i < 9; i++) {
+       for (i = 0; i < 10; i++) {
                if (sio_data->skip_in & (1 << i))
                        continue;
                err = sysfs_create_group(&dev->kobj, &it87_group_in[i]);
@@ -2202,7 +2267,7 @@ static int it87_probe(struct platform_device *pdev)
        }
 
        /* Export labels for internal sensors */
-       for (i = 0; i < 3; i++) {
+       for (i = 0; i < 4; i++) {
                if (!(sio_data->internal & (1 << i)))
                        continue;
                err = sysfs_create_file(&dev->kobj,
@@ -2383,8 +2448,9 @@ static void it87_init_device(struct platform_device *pdev)
        }
        data->has_fan = (data->fan_main_ctrl >> 4) & 0x07;
 
-       /* Set tachometers to 16-bit mode if needed */
-       if (has_16bit_fans(data)) {
+       /* Set tachometers to 16-bit mode if needed, IT8603E (and IT8728F?)
+        * has it by default */
+       if (has_16bit_fans(data) && data->type != it8603) {
                tmp = it87_read_value(data, IT87_REG_FAN_16BIT);
                if (~tmp & 0x07 & data->has_fan) {
                        dev_dbg(&pdev->dev,
@@ -2464,6 +2530,8 @@ static struct it87_data *it87_update_device(struct device *dev)
                }
                /* in8 (battery) has no limit registers */
                data->in[8][0] = it87_read_value(data, IT87_REG_VIN(8));
+               if (data->type == it8603)
+                       data->in[9][0] = it87_read_value(data, 0x2f);
 
                for (i = 0; i < 5; i++) {
                        /* Skip disabled fans */
@@ -2620,7 +2688,7 @@ static void __exit sm_it87_exit(void)
 }
 
 
-MODULE_AUTHOR("Chris Gauthron, Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Chris Gauthron, Jean Delvare <jdelvare@suse.de>");
 MODULE_DESCRIPTION("IT8705F/IT871xF/IT872xF hardware monitoring driver");
 module_param(update_vbat, bool, 0);
 MODULE_PARM_DESC(update_vbat, "Update vbat if set else return powerup value");
index d0def50ea8605218632593c62411aafd976b73b2..b4ad598feb6c8ee10b7eb3b05ddfef38cf75fc9a 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * lm63.c - driver for the National Semiconductor LM63 temperature sensor
  *          with integrated fan control
- * Copyright (C) 2004-2008  Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2004-2008  Jean Delvare <jdelvare@suse.de>
  * Based on the lm90 driver.
  *
  * The LM63 is a sensor chip made by National Semiconductor. It measures
@@ -1202,6 +1202,6 @@ static struct i2c_driver lm63_driver = {
 
 module_i2c_driver(lm63_driver);
 
-MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
 MODULE_DESCRIPTION("LM63 driver");
 MODULE_LICENSE("GPL");
index a2f3b4a365e4bbafa17385df4fdca298a7ece0e5..9efadfc851bc99e97451963705d738be33591885 100644 (file)
@@ -2,7 +2,7 @@
  * lm78.c - Part of lm_sensors, Linux kernel modules for hardware
  *         monitoring
  * Copyright (c) 1998, 1999  Frodo Looijaard <frodol@dds.nl>
- * Copyright (c) 2007, 2011  Jean Delvare <khali@linux-fr.org>
+ * Copyright (c) 2007, 2011  Jean Delvare <jdelvare@suse.de>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -1108,7 +1108,7 @@ static void __exit sm_lm78_exit(void)
        i2c_del_driver(&lm78_driver);
 }
 
-MODULE_AUTHOR("Frodo Looijaard, Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Frodo Looijaard, Jean Delvare <jdelvare@suse.de>");
 MODULE_DESCRIPTION("LM78/LM79 driver");
 MODULE_LICENSE("GPL");
 
index e998034f1f11e35df0c5848309860fe2e9f0fd66..abd270243ba701d2332977eea1e79f485353a4c0 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * lm83.c - Part of lm_sensors, Linux kernel modules for hardware
  *          monitoring
- * Copyright (C) 2003-2009  Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2003-2009  Jean Delvare <jdelvare@suse.de>
  *
  * Heavily inspired from the lm78, lm75 and adm1021 drivers. The LM83 is
  * a sensor chip made by National Semiconductor. It reports up to four
@@ -427,6 +427,6 @@ static struct lm83_data *lm83_update_device(struct device *dev)
 
 module_i2c_driver(lm83_driver);
 
-MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
 MODULE_DESCRIPTION("LM83 driver");
 MODULE_LICENSE("GPL");
index 3894c408fda3cedc1230742cc5c0a9a653227cd5..bed4af358308def4085d603665641b744a17b0f5 100644 (file)
@@ -5,7 +5,7 @@
  * Copyright (c) 2002, 2003  Philip Pokorny <ppokorny@penguincomputing.com>
  * Copyright (c) 2003        Margit Schubert-While <margitsw@t-online.de>
  * Copyright (c) 2004        Justin Thiessen <jthiessen@penguincomputing.com>
- * Copyright (C) 2007--2009  Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2007--2009  Jean Delvare <jdelvare@suse.de>
  *
  * Chip details at           <http://www.national.com/ds/LM/LM85.pdf>
  *
index 333092ce2465e234c15d415d4a99bb381100f2a3..4c5f20231c1a6a71e0daa66116340dfb701ff8b5 100644 (file)
@@ -5,7 +5,7 @@
  *                          Philip Edelbrock <phil@netroedge.com>
  *                          Stephen Rousset <stephen.rousset@rocketlogix.com>
  *                          Dan Eaton <dan.eaton@rocketlogix.com>
- * Copyright (C) 2004-2008  Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2004-2008  Jean Delvare <jdelvare@suse.de>
  *
  * Original port to Linux 2.6 by Jeff Oliver.
  *
@@ -1011,6 +1011,6 @@ static struct i2c_driver lm87_driver = {
 
 module_i2c_driver(lm87_driver);
 
-MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org> and others");
+MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de> and others");
 MODULE_DESCRIPTION("LM87 driver");
 MODULE_LICENSE("GPL");
index 8b8f3aa49726873b89267ebe45f4e145eab2c7bb..701e952ae52378477d34c84afd99ce4ebb0e1df1 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * lm90.c - Part of lm_sensors, Linux kernel modules for hardware
  *          monitoring
- * Copyright (C) 2003-2010  Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2003-2010  Jean Delvare <jdelvare@suse.de>
  *
  * Based on the lm83 driver. The LM90 is a sensor chip made by National
  * Semiconductor. It reports up to two temperatures (its own plus up to
@@ -1679,6 +1679,6 @@ static struct i2c_driver lm90_driver = {
 
 module_i2c_driver(lm90_driver);
 
-MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
 MODULE_DESCRIPTION("LM90/ADM1032 driver");
 MODULE_LICENSE("GPL");
index 71626f3c874239eba685e098f65d324707e6f946..9d0e87a4f0cbc1226cbf8efa47d99cf62dbaa4b8 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * lm92 - Hardware monitoring driver
- * Copyright (C) 2005-2008  Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2005-2008  Jean Delvare <jdelvare@suse.de>
  *
  * Based on the lm90 driver, with some ideas taken from the lm_sensors
  * lm92 driver as well.
@@ -440,6 +440,6 @@ static struct i2c_driver lm92_driver = {
 
 module_i2c_driver(lm92_driver);
 
-MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
 MODULE_DESCRIPTION("LM92/MAX6635 driver");
 MODULE_LICENSE("GPL");
index a6f46058b1be3994d03cc04217f4789a4e1a7633..6f1c6c0dbaf56c1d019375292dfbfeefb4aef25a 100644 (file)
@@ -12,7 +12,7 @@
  *     Copyright (c) 2003       Margit Schubert-While <margitsw@t-online.de>
  *
  * derived in part from w83l785ts.c:
- *     Copyright (c) 2003-2004 Jean Delvare <khali@linux-fr.org>
+ *     Copyright (c) 2003-2004 Jean Delvare <jdelvare@suse.de>
  *
  * Ported to Linux 2.6 by Eric J. Bowersox <ericb@aspsys.com>
  *     Copyright (c) 2005 Aspen Systems, Inc.
index 445e5d40ac82c5760c90dfa7fbcf3f32893ac52c..6638e997f83fbd165ebcf019aba2b2da930b715b 100644 (file)
@@ -2,7 +2,7 @@
  * max1619.c - Part of lm_sensors, Linux kernel modules for hardware
  *             monitoring
  * Copyright (C) 2003-2004 Oleksij Rempel <bug-track@fisher-privat.net>
- *                         Jean Delvare <khali@linux-fr.org>
+ *                         Jean Delvare <jdelvare@suse.de>
  *
  * Based on the lm90 driver. The MAX1619 is a sensor chip made by Maxim.
  * It reports up to two temperatures (its own plus up to
@@ -357,7 +357,6 @@ static struct max1619_data *max1619_update_device(struct device *dev)
 
 module_i2c_driver(max1619_driver);
 
-MODULE_AUTHOR("Oleksij Rempel <bug-track@fisher-privat.net> and "
-       "Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Oleksij Rempel <bug-track@fisher-privat.net>, Jean Delvare <jdelvare@suse.de>");
 MODULE_DESCRIPTION("MAX1619 sensor driver");
 MODULE_LICENSE("GPL");
index 8326fbd601508d354f5a037aabdc5420b1018670..6520bc51d02a0f71faeb2456025f656bdc817b11 100644 (file)
@@ -8,7 +8,7 @@
  *
  *  Based on the max1619 driver.
  *  Copyright (C) 2003-2004 Oleksij Rempel <bug-track@fisher-privat.net>
- *                          Jean Delvare <khali@linux-fr.org>
+ *                          Jean Delvare <jdelvare@suse.de>
  *
  * The MAX6642 is a sensor chip made by Maxim.
  * It reports up to two temperatures (its own plus up to
index 8686e966fa28e20d267ddf597580ff62e4faf231..38d5a63340535567719feb35338712570a2c88d2 100644 (file)
@@ -5,7 +5,7 @@
  * Copyright (C) 2012  Guenter Roeck <linux@roeck-us.net>
  *
  * Derived from w83627ehf driver
- * Copyright (C) 2005-2012  Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2005-2012  Jean Delvare <jdelvare@suse.de>
  * Copyright (C) 2006  Yuan Mu (Winbond),
  *                    Rudolf Marek <r.marek@assembler.cz>
  *                    David Hubbard <david.c.hubbard@gmail.com>
index aa615ba73d4bf70f9f627a29a7279d545dae22bb..330fe117e219ff1af15a59d5d2fc4ed11bd79cbc 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *  pc87360.c - Part of lm_sensors, Linux kernel modules
  *              for hardware monitoring
- *  Copyright (C) 2004, 2007 Jean Delvare <khali@linux-fr.org>
+ *  Copyright (C) 2004, 2007 Jean Delvare <jdelvare@suse.de>
  *
  *  Copied from smsc47m1.c:
  *  Copyright (C) 2002 Mark D. Studebaker <mdsxyz123@yahoo.com>
@@ -1808,7 +1808,7 @@ static void __exit pc87360_exit(void)
 }
 
 
-MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
 MODULE_DESCRIPTION("PC8736x hardware monitor");
 MODULE_LICENSE("GPL");
 
index 6e6ea4437bb6947692ebd5b4fcc55e698a675b3f..d847e0a084e0f605ed4f02cc7091a4caf4631617 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *  pc87427.c - hardware monitoring driver for the
  *              National Semiconductor PC87427 Super-I/O chip
- *  Copyright (C) 2006, 2008, 2010  Jean Delvare <khali@linux-fr.org>
+ *  Copyright (C) 2006, 2008, 2010  Jean Delvare <jdelvare@suse.de>
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License version 2 as
@@ -1347,7 +1347,7 @@ static void __exit pc87427_exit(void)
        platform_driver_unregister(&pc87427_driver);
 }
 
-MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
 MODULE_DESCRIPTION("PC87427 hardware monitoring driver");
 MODULE_LICENSE("GPL");
 
index 825883d29002ef1b1f212a3dc02210b9511dd352..5740888c62426b3a45b8b8181a78253123944779 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Copyright (C) 2001-2004 Aurelien Jarno <aurelien@aurel32.net>
  * Ported to Linux 2.6 by Aurelien Jarno <aurelien@aurel32.net> with
- * the help of Jean Delvare <khali@linux-fr.org>
+ * the help of Jean Delvare <jdelvare@suse.de>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
index e74bd7e620e8140a44a3917ef701ff22223b8a62..3532026e25dafbf1006bff72559f6e0eba00070e 100644 (file)
@@ -6,7 +6,7 @@
  *                          Kyösti Mälkki <kmalkki@cc.hut.fi>, and
  *                          Mark D. Studebaker <mdsxyz123@yahoo.com>
  * Ported to Linux 2.6 by Aurelien Jarno <aurelien@aurel32.net> with
- * the help of Jean Delvare <khali@linux-fr.org>
+ * the help of Jean Delvare <jdelvare@suse.de>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
index 81348fadf3b67b2deb3ef89a8a1294d344276239..bd89e87bd6ae3297b655609e92f6e1f62c98afbf 100644 (file)
@@ -9,7 +9,7 @@
  *
  * derived in part from smsc47m1.c:
  * Copyright (C) 2002 Mark D. Studebaker <mdsxyz123@yahoo.com>
- * Copyright (C) 2004 Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2004 Jean Delvare <jdelvare@suse.de>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
index 05cb814539cb609d42e7bd9bc84abe178cb6b7a7..23a22c4eee51bb142d858c42e97ddb6b52ed1af8 100644 (file)
@@ -7,7 +7,7 @@
  * Super-I/O chips.
  *
  * Copyright (C) 2002 Mark D. Studebaker <mdsxyz123@yahoo.com>
- * Copyright (C) 2004-2007 Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2004-2007 Jean Delvare <jdelvare@suse.de>
  * Ported to Linux 2.6 by Gabriele Gorla <gorlik@yahoo.com>
  *                     and Jean Delvare
  *
index 23ff210513d314da9fa09e944e30c4338f9d1313..f0ab61db7a0d4f536c95ef0196674afc97cc70ac 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *  w83627ehf - Driver for the hardware monitoring functionality of
  *             the Winbond W83627EHF Super-I/O chip
- *  Copyright (C) 2005-2012  Jean Delvare <khali@linux-fr.org>
+ *  Copyright (C) 2005-2012  Jean Delvare <jdelvare@suse.de>
  *  Copyright (C) 2006  Yuan Mu (Winbond),
  *                     Rudolf Marek <r.marek@assembler.cz>
  *                     David Hubbard <david.c.hubbard@gmail.com>
@@ -2889,7 +2889,7 @@ static void __exit sensors_w83627ehf_exit(void)
        platform_driver_unregister(&w83627ehf_driver);
 }
 
-MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
 MODULE_DESCRIPTION("W83627EHF driver");
 MODULE_LICENSE("GPL");
 
index cb9cd326ecb564147ec64474c14a296b1be764dd..c1726be3654c156ede9bdb965bb04e1d2ad6c926 100644 (file)
@@ -5,7 +5,7 @@
  *                           Philip Edelbrock <phil@netroedge.com>,
  *                           and Mark Studebaker <mdsxyz123@yahoo.com>
  * Ported to 2.6 by Bernhard C. Schrenk <clemy@clemy.org>
- * Copyright (c) 2007 - 1012  Jean Delvare <khali@linux-fr.org>
+ * Copyright (c) 2007 - 1012  Jean Delvare <jdelvare@suse.de>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
index f9d513949a38a7a47dcb9317c529ef7b5f93a38a..84911616d8c03e872879be61b56a01115cd05713 100644 (file)
@@ -4,7 +4,7 @@
  * Copyright (c) 1998 - 2001  Frodo Looijaard <frodol@dds.nl>,
  *                           Philip Edelbrock <phil@netroedge.com>,
  *                           and Mark Studebaker <mdsxyz123@yahoo.com>
- * Copyright (c) 2007 - 2008  Jean Delvare <khali@linux-fr.org>
+ * Copyright (c) 2007 - 2008  Jean Delvare <jdelvare@suse.de>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
index 908209d24664e5fd206c0bfd53738a2030f1b1b6..21894131190f4b9a3d18f29e619f352d9fcdb279 100644 (file)
@@ -2,7 +2,7 @@
  *  w83795.c - Linux kernel driver for hardware monitoring
  *  Copyright (C) 2008 Nuvoton Technology Corp.
  *                Wei Song
- *  Copyright (C) 2010 Jean Delvare <khali@linux-fr.org>
+ *  Copyright (C) 2010 Jean Delvare <jdelvare@suse.de>
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
@@ -2282,6 +2282,6 @@ static struct i2c_driver w83795_driver = {
 
 module_i2c_driver(w83795_driver);
 
-MODULE_AUTHOR("Wei Song, Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Wei Song, Jean Delvare <jdelvare@suse.de>");
 MODULE_DESCRIPTION("W83795G/ADG hardware monitoring driver");
 MODULE_LICENSE("GPL");
index 39dbe990dc102e5638b157cecb1fd3e4a3720a09..6384b268f59008406a3cd2789fd851978e35acd7 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * w83l785ts.c - Part of lm_sensors, Linux kernel modules for hardware
  *               monitoring
- * Copyright (C) 2003-2009  Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2003-2009  Jean Delvare <jdelvare@suse.de>
  *
  * Inspired from the lm83 driver. The W83L785TS-S is a sensor chip made
  * by Winbond. It reports a single external temperature with a 1 deg
@@ -10,7 +10,7 @@
  *   http://www.winbond-usa.com/products/winbond_products/pdfs/PCIC/W83L785TS-S.pdf
  *
  * Ported to Linux 2.6 by Wolfgang Ziegler <nuppla@gmx.at> and Jean Delvare
- * <khali@linux-fr.org>.
+ * <jdelvare@suse.de>.
  *
  * Thanks to James Bolt <james@evilpenguin.com> for benchmarking the read
  * error handling mechanism.
@@ -299,6 +299,6 @@ static struct w83l785ts_data *w83l785ts_update_device(struct device *dev)
 
 module_i2c_driver(w83l785ts_driver);
 
-MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
 MODULE_DESCRIPTION("W83L785TS-S driver");
 MODULE_LICENSE("GPL");
index fad22b0bb5b06fff58eb7f5fd49533fd841a7464..65ef9664d5da884cdec666a81228408e5240b381 100644 (file)
  * ------------------------------------------------------------------------- */
 
 /* With some changes from Frodo Looijaard <frodol@dds.nl>, Kyösti Mälkki
-   <kmalkki@cc.hut.fi> and Jean Delvare <khali@linux-fr.org> */
+   <kmalkki@cc.hut.fi> and Jean Delvare <jdelvare@suse.de> */
 
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/delay.h>
-#include <linux/init.h>
 #include <linux/errno.h>
 #include <linux/sched.h>
 #include <linux/i2c.h>
index f892a424009b8123c3cb0ceb3dae75359cceaef9..8b10f88b13d9bd8c908997d3ebdde7de84718732 100644 (file)
@@ -24,7 +24,6 @@
 #include <linux/moduleparam.h>
 #include <linux/delay.h>
 #include <linux/jiffies.h>
-#include <linux/init.h>
 #include <linux/errno.h>
 #include <linux/i2c.h>
 #include <linux/i2c-algo-pca.h>
index 5c2379522aa9fb8d9e8c0e1a6635e4429443289f..34370090b753f616ff56e8a67e8172210b05c2cd 100644 (file)
@@ -30,7 +30,6 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/delay.h>
-#include <linux/init.h>
 #include <linux/errno.h>
 #include <linux/i2c.h>
 #include <linux/i2c-algo-pcf.h>
index 6bcdea5856afa7f84e9d2c0534bcdbfa7a696dd1..f5ed03164d86c314942453d244f103d6e9276070 100644 (file)
@@ -152,6 +152,7 @@ config I2C_PIIX4
            ATI SB700/SP5100
            ATI SB800
            AMD Hudson-2
+           AMD ML
            AMD CZ
            Serverworks OSB4
            Serverworks CSB5
index ed9f48d566dba7d4dab71d1217ce792c7dc3005a..9d7be5af2bf2a037ffb93a1e2ca60bc8f0299583 100644 (file)
@@ -12,7 +12,7 @@
  *  On Acorn machines, the following i2c devices are on the bus:
  *     - PCF8583 real time clock & static RAM
  */
-#include <linux/init.h>
+#include <linux/module.h>
 #include <linux/i2c.h>
 #include <linux/i2c-algo-bit.h>
 #include <linux/io.h>
index 3f491815e2c4759a5d0b7d850de20abeafe42381..7d60d3a1f621cf9fa44d66daedd4af0c7c1e0c30 100644 (file)
@@ -58,7 +58,6 @@
 #include <linux/delay.h>
 #include <linux/ioport.h>
 #include <linux/i2c.h>
-#include <linux/init.h>
 #include <linux/acpi.h>
 #include <linux/io.h>
 
index 84ccd9496a5e91934e197f49192cf5a54fc492ef..4611e4754a67071e35275a4411c327cbe59af0ce 100644 (file)
@@ -20,7 +20,6 @@
 #include <linux/delay.h>
 #include <linux/i2c.h>
 #include <linux/pci.h>
-#include <linux/init.h>
 #include <linux/acpi.h>
 
 #define ALI1563_MAX_TIMEOUT    500
index 26bcc6127cee62c1b12749944af43af76ae3b908..4823206a4870d51331ed74ab4b61b3630e12c9f7 100644 (file)
@@ -65,7 +65,6 @@
 #include <linux/ioport.h>
 #include <linux/delay.h>
 #include <linux/i2c.h>
-#include <linux/init.h>
 #include <linux/acpi.h>
 #include <linux/io.h>
 
index 07f01ac853ff6343c6e3ca51e8066770aa189b09..41fc6837fb8b5393f2193873c765d07d87a2e955 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * i2c-amd756-s4882.c - i2c-amd756 extras for the Tyan S4882 motherboard
  *
- * Copyright (C) 2004, 2008 Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2004, 2008 Jean Delvare <jdelvare@suse.de>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -250,7 +250,7 @@ static void __exit amd756_s4882_exit(void)
                       "Physical bus restoration failed\n");
 }
 
-MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
 MODULE_DESCRIPTION("S4882 SMBus multiplexing");
 MODULE_LICENSE("GPL");
 
index e13e2aa2d05d9fb379299fdd84635742cf37efba..819d3c1062a75878a4bd56c8762190e7a662e131 100644 (file)
@@ -41,7 +41,6 @@
 #include <linux/stddef.h>
 #include <linux/ioport.h>
 #include <linux/i2c.h>
-#include <linux/init.h>
 #include <linux/acpi.h>
 #include <linux/io.h>
 
index a44e6e77c5a1d1e55dde1a326c67c6ad1cf39dd2..f3d4d79855b5fdc8fb29dc4524422cb1056c96cb 100644 (file)
@@ -13,7 +13,6 @@
 #include <linux/kernel.h>
 #include <linux/stddef.h>
 #include <linux/ioport.h>
-#include <linux/init.h>
 #include <linux/i2c.h>
 #include <linux/delay.h>
 #include <linux/acpi.h>
index b5b89239d622d015698d638239fe50bd9562f6f9..8762458ca7da1f09d5094f5f0490bd2bb22c3c2b 100644 (file)
@@ -31,7 +31,6 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
-#include <linux/init.h>
 #include <linux/errno.h>
 #include <linux/i2c.h>
 #include <linux/slab.h>
index ce7ffba2b0208f24fa7324c5c7ad283346e2c693..bdf040fd8675c268510d496cc7d89bbbe1a0d8ae 100644 (file)
@@ -19,7 +19,6 @@
 #include <linux/io.h>
 #include <linux/i2c.h>
 #include <linux/gpio.h>
-#include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/delay.h>
 #include <linux/errno.h>
index 3e5ea2c87a6e19b747f34a80613d892866fba77d..be7f0a20d634d1107a7bb3147b32edf0256546ed 100644 (file)
@@ -33,7 +33,6 @@
 #include <linux/module.h>
 #include <linux/delay.h>
 #include <linux/slab.h>
-#include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/errno.h>
 #include <linux/stddef.h>
index ff15ae90aaf54bb5c52585e087954338c0e40a31..e08e458bab0247161e89c9b68c10b8efd3fe255f 100644 (file)
@@ -18,7 +18,6 @@
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/delay.h>
-#include <linux/init.h>
 #include <linux/errno.h>
 #include <linux/i2c.h>
 #include <linux/fs.h>
index 044f85b01d062f4485cba00a76f0fe4d8540230c..9fd711c03dd2bd2ff2de7fef34db4da569a1fbdd 100644 (file)
@@ -12,7 +12,6 @@
 #include <linux/module.h>
 
 #include <linux/i2c.h>
-#include <linux/init.h>
 #include <linux/time.h>
 #include <linux/interrupt.h>
 #include <linux/delay.h>
index 436b0f254916dbe8d1a8c4e3c32e0c13e1362536..512fcfabc18e34470e81735d83dc91c2145629e4 100644 (file)
@@ -12,7 +12,6 @@
  * of this archive for more details.
  */
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/i2c.h>
 #include <linux/platform_device.h>
index 79c3d9069a487328edc82c33228c84908b401222..e248257fe517a5f62fc909826640d82d4e15a66f 100644 (file)
@@ -27,7 +27,6 @@
 #include <linux/types.h>
 #include <linux/i2c.h>
 #include <linux/i2c-algo-bit.h>
-#include <linux/init.h>
 #include <linux/io.h>
 #include <asm/hydra.h>
 
index 737e298668878f6b0c7d7421374248b596839668..349c2d35e792b81c278edd59832a24c66d5b0caf 100644 (file)
@@ -2,7 +2,7 @@
     Copyright (c) 1998 - 2002  Frodo Looijaard <frodol@dds.nl>,
     Philip Edelbrock <phil@netroedge.com>, and Mark D. Studebaker
     <mdsxyz123@yahoo.com>
-    Copyright (C) 2007 - 2012  Jean Delvare <khali@linux-fr.org>
+    Copyright (C) 2007 - 2012  Jean Delvare <jdelvare@suse.de>
     Copyright (C) 2010         Intel Corporation,
                                David Woodhouse <dwmw2@infradead.org>
 
@@ -1312,8 +1312,7 @@ static void __exit i2c_i801_exit(void)
        pci_unregister_driver(&i801_driver);
 }
 
-MODULE_AUTHOR("Mark D. Studebaker <mdsxyz123@yahoo.com>, "
-             "Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Mark D. Studebaker <mdsxyz123@yahoo.com>, Jean Delvare <jdelvare@suse.de>");
 MODULE_DESCRIPTION("I801 SMBus driver");
 MODULE_LICENSE("GPL");
 
index f7444100f397cc41baeeb117d790e449e567ed6f..274312c96b12e0eecc46b6fb13e484316e5e3c87 100644 (file)
@@ -36,7 +36,6 @@
 #include <linux/ioport.h>
 #include <linux/delay.h>
 #include <linux/slab.h>
-#include <linux/init.h>
 #include <linux/interrupt.h>
 #include <asm/irq.h>
 #include <linux/io.h>
index dd24aa0424a9c0cc3df4588a32743171146ac812..3d16c2f60a5e17d6681dc5d3fd3a46bd0543269e 100644 (file)
@@ -34,7 +34,6 @@
 #include <linux/module.h>
 #include <linux/delay.h>
 #include <linux/slab.h>
-#include <linux/init.h>
 #include <linux/errno.h>
 #include <linux/platform_device.h>
 #include <linux/i2c.h>
index af213045ab7e21d52570c93fd91060d4d330d847..cf99dbf21fd100a5002aac1a6d98ddb20ab847c9 100644 (file)
@@ -33,7 +33,6 @@
 #include <linux/stddef.h>
 #include <linux/ioport.h>
 #include <linux/i2c.h>
-#include <linux/init.h>
 #include <linux/io.h>
 #include <linux/acpi.h>
 
index bb132ea7d2b49e1b0050ff998d0f893b6cdb80af..8ce4f517fc56eedcb01072c01adba455e8dd0a4a 100644 (file)
@@ -62,7 +62,6 @@
  */
 
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/pci.h>
 #include <linux/kernel.h>
 #include <linux/stddef.h>
index b6a741caf4f6566fcfe0c36be0189bbe185272aa..f5391633b53ac69647f4db90cb1379a19a78a01d 100644 (file)
@@ -16,7 +16,6 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/sched.h>
-#include <linux/init.h>
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
 #include <linux/of_platform.h>
index 2ca268d6140b54bdfe15eb121b6b3080a2d407a0..b170bdffb5de3aa273d27b97b567b13b27cff278 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * i2c-nforce2-s4985.c - i2c-nforce2 extras for the Tyan S4985 motherboard
  *
- * Copyright (C) 2008 Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2008 Jean Delvare <jdelvare@suse.de>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -245,7 +245,7 @@ static void __exit nforce2_s4985_exit(void)
                       "Physical bus restoration failed\n");
 }
 
-MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
 MODULE_DESCRIPTION("S4985 SMBus multiplexing");
 MODULE_LICENSE("GPL");
 
index ac88f4000cc23acfb8fbcdc8bbb63290aa85bb79..0038c451095c6e86d614c13fea0bca8ee298a3fb 100644 (file)
@@ -51,7 +51,6 @@
 #include <linux/kernel.h>
 #include <linux/stddef.h>
 #include <linux/ioport.h>
-#include <linux/init.h>
 #include <linux/i2c.h>
 #include <linux/delay.h>
 #include <linux/dmi.h>
index c61f37a10a074e30f2c3c1e368a602a85e7301b9..80e06fa45720e59235ece70499637eceed2a530f 100644 (file)
@@ -15,7 +15,6 @@
 #include <linux/err.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/errno.h>
 #include <linux/platform_device.h>
 #include <linux/i2c.h>
index b929ba271b4705714f846fa07aae3d9116fcada3..81042b08a947669390d61715f285f8702c888f25 100644 (file)
@@ -18,7 +18,6 @@
 #include <linux/delay.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
-#include <linux/init.h>
 #include <linux/i2c.h>
 #include <linux/io.h>
 #include <linux/of.h>
index aa9577881925f3d9244cd13508dfb64a733a4657..62f55fe624cb3be72228decd9f03631f85d2e483 100644 (file)
@@ -1,7 +1,7 @@
 /* ------------------------------------------------------------------------ *
  * i2c-parport-light.c I2C bus over parallel port                           *
  * ------------------------------------------------------------------------ *
-   Copyright (C) 2003-2010 Jean Delvare <khali@linux-fr.org>
+   Copyright (C) 2003-2010 Jean Delvare <jdelvare@suse.de>
 
    Based on older i2c-velleman.c driver
    Copyright (C) 1995-2000 Simon G. Vogl
@@ -273,7 +273,7 @@ static void __exit i2c_parport_exit(void)
        release_region(base, 3);
 }
 
-MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
 MODULE_DESCRIPTION("I2C bus over parallel port (light)");
 MODULE_LICENSE("GPL");
 
index 81d887869620b40fafd3b21c76d85643a6247d0d..a27aae2d6757195039a69cd192dda8774081b194 100644 (file)
@@ -1,7 +1,7 @@
 /* ------------------------------------------------------------------------ *
  * i2c-parport.c I2C bus over parallel port                                 *
  * ------------------------------------------------------------------------ *
-   Copyright (C) 2003-2011 Jean Delvare <khali@linux-fr.org>
+   Copyright (C) 2003-2011 Jean Delvare <jdelvare@suse.de>
 
    Based on older i2c-philips-par.c driver
    Copyright (C) 1995-2000 Simon G. Vogl
@@ -298,7 +298,7 @@ static void __exit i2c_parport_exit(void)
        parport_unregister_driver(&i2c_parport_driver);
 }
 
-MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
 MODULE_DESCRIPTION("I2C bus over parallel port");
 MODULE_LICENSE("GPL");
 
index 3fe652302ea7d2286bad2f5d9f0d696395d8a55c..e572f3aac0f79863a98d7c9c134ab6b038c36eec 100644 (file)
@@ -1,7 +1,7 @@
 /* ------------------------------------------------------------------------ *
  * i2c-parport.h I2C bus over parallel port                                 *
  * ------------------------------------------------------------------------ *
-   Copyright (C) 2003-2010 Jean Delvare <khali@linux-fr.org>
+   Copyright (C) 2003-2010 Jean Delvare <jdelvare@suse.de>
 
    This program is free software; you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
index 39e2755e3f257c09bd1edd76371130fd8df0ceda..845f12598e7914bf5bf5c8f49036f6acacb017bb 100644 (file)
@@ -12,7 +12,6 @@
  */
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/delay.h>
 #include <linux/jiffies.h>
index a028617b8f13c37a4a8371264a149ba7f6d44540..39dd8ec60dfda4170664c009726aa46d8c5eae42 100644 (file)
@@ -22,7 +22,7 @@
        Intel PIIX4, 440MX
        Serverworks OSB4, CSB5, CSB6, HT-1000, HT-1100
        ATI IXP200, IXP300, IXP400, SB600, SB700/SP5100, SB800
-       AMD Hudson-2, CZ
+       AMD Hudson-2, ML, CZ
        SMSC Victory66
 
    Note: we assume there can only be one device, with one or more
@@ -38,7 +38,6 @@
 #include <linux/ioport.h>
 #include <linux/i2c.h>
 #include <linux/slab.h>
-#include <linux/init.h>
 #include <linux/dmi.h>
 #include <linux/acpi.h>
 #include <linux/io.h>
@@ -208,16 +207,16 @@ static int piix4_setup(struct pci_dev *PIIX4_dev,
                                   "WARNING: SMBus interface has been FORCEFULLY ENABLED!\n");
                } else {
                        dev_err(&PIIX4_dev->dev,
-                               "Host SMBus controller not enabled!\n");
+                               "SMBus Host Controller not enabled!\n");
                        release_region(piix4_smba, SMBIOSIZE);
                        return -ENODEV;
                }
        }
 
        if (((temp & 0x0E) == 8) || ((temp & 0x0E) == 2))
-               dev_dbg(&PIIX4_dev->dev, "Using Interrupt 9 for SMBus.\n");
+               dev_dbg(&PIIX4_dev->dev, "Using IRQ for SMBus\n");
        else if ((temp & 0x0E) == 0)
-               dev_dbg(&PIIX4_dev->dev, "Using Interrupt SMI# for SMBus.\n");
+               dev_dbg(&PIIX4_dev->dev, "Using SMI# for SMBus\n");
        else
                dev_err(&PIIX4_dev->dev, "Illegal Interrupt configuration "
                        "(or code out of date)!\n");
@@ -235,7 +234,8 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
 {
        unsigned short piix4_smba;
        unsigned short smba_idx = 0xcd6;
-       u8 smba_en_lo, smba_en_hi, i2ccfg, i2ccfg_offset = 0x10, smb_en;
+       u8 smba_en_lo, smba_en_hi, smb_en, smb_en_status;
+       u8 i2ccfg, i2ccfg_offset = 0x10;
 
        /* SB800 and later SMBus does not support forcing address */
        if (force || force_addr) {
@@ -245,7 +245,15 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
        }
 
        /* Determine the address of the SMBus areas */
-       smb_en = (aux) ? 0x28 : 0x2c;
+       if ((PIIX4_dev->vendor == PCI_VENDOR_ID_AMD &&
+            PIIX4_dev->device == PCI_DEVICE_ID_AMD_HUDSON2_SMBUS &&
+            PIIX4_dev->revision >= 0x41) ||
+           (PIIX4_dev->vendor == PCI_VENDOR_ID_AMD &&
+            PIIX4_dev->device == 0x790b &&
+            PIIX4_dev->revision >= 0x49))
+               smb_en = 0x00;
+       else
+               smb_en = (aux) ? 0x28 : 0x2c;
 
        if (!request_region(smba_idx, 2, "smba_idx")) {
                dev_err(&PIIX4_dev->dev, "SMBus base address index region "
@@ -258,13 +266,22 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
        smba_en_hi = inb_p(smba_idx + 1);
        release_region(smba_idx, 2);
 
-       if ((smba_en_lo & 1) == 0) {
+       if (!smb_en) {
+               smb_en_status = smba_en_lo & 0x10;
+               piix4_smba = smba_en_hi << 8;
+               if (aux)
+                       piix4_smba |= 0x20;
+       } else {
+               smb_en_status = smba_en_lo & 0x01;
+               piix4_smba = ((smba_en_hi << 8) | smba_en_lo) & 0xffe0;
+       }
+
+       if (!smb_en_status) {
                dev_err(&PIIX4_dev->dev,
-                       "Host SMBus controller not enabled!\n");
+                       "SMBus Host Controller not enabled!\n");
                return -ENODEV;
        }
 
-       piix4_smba = ((smba_en_hi << 8) | smba_en_lo) & 0xffe0;
        if (acpi_check_region(piix4_smba, SMBIOSIZE, piix4_driver.name))
                return -ENODEV;
 
@@ -277,7 +294,8 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
        /* Aux SMBus does not support IRQ information */
        if (aux) {
                dev_info(&PIIX4_dev->dev,
-                        "SMBus Host Controller at 0x%x\n", piix4_smba);
+                        "Auxiliary SMBus Host Controller at 0x%x\n",
+                        piix4_smba);
                return piix4_smba;
        }
 
@@ -292,9 +310,9 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
        release_region(piix4_smba + i2ccfg_offset, 1);
 
        if (i2ccfg & 1)
-               dev_dbg(&PIIX4_dev->dev, "Using IRQ for SMBus.\n");
+               dev_dbg(&PIIX4_dev->dev, "Using IRQ for SMBus\n");
        else
-               dev_dbg(&PIIX4_dev->dev, "Using SMI# for SMBus.\n");
+               dev_dbg(&PIIX4_dev->dev, "Using SMI# for SMBus\n");
 
        dev_info(&PIIX4_dev->dev,
                 "SMBus Host Controller at 0x%x, revision %d\n",
index f6389e2c9d02d704163cd7b2f0fc6c23bda1f8ba..8564768fee32eb24d7bfd92aec29d362ffe6cbdc 100644 (file)
@@ -26,7 +26,6 @@
 
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/platform_device.h>
 #include <linux/i2c.h>
 #include <linux/interrupt.h>
index 8c87f4a9793be22ef28b8de3c8c7ae3766b25bde..01e967763c2a70ed92e15eb4e134ab84f621d42c 100644 (file)
@@ -24,7 +24,6 @@
 #include <linux/kernel.h>
 #include <linux/types.h>
 #include <linux/i2c.h>
-#include <linux/init.h>
 #include <linux/device.h>
 #include <linux/platform_device.h>
 #include <linux/of_irq.h>
index ac80199885bef383c1a394ffbdbff335ab529b5d..c83fc3ccdd2b2c5677a977d00462c65d6c334489 100644 (file)
@@ -17,7 +17,6 @@
 #include <linux/types.h>
 #include <linux/delay.h>
 #include <linux/i2c.h>
-#include <linux/init.h>
 #include <linux/clk.h>
 #include <linux/platform_device.h>
 #include <linux/io.h>
index 2c2fd7c2b116624f737352fb6ea5dee55ff3cad3..0282d4d42805fae9c2a6d68c3213bf0cab18f6f6 100644 (file)
@@ -26,7 +26,6 @@
 #include <linux/clk.h>
 #include <linux/delay.h>
 #include <linux/err.h>
-#include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
 #include <linux/i2c.h>
@@ -111,6 +110,7 @@ struct rcar_i2c_priv {
        void __iomem *io;
        struct i2c_adapter adap;
        struct i2c_msg  *msg;
+       struct clk *clk;
 
        spinlock_t lock;
        wait_queue_head_t wait;
@@ -227,18 +227,12 @@ static int rcar_i2c_clock_calculate(struct rcar_i2c_priv *priv,
                                    u32 bus_speed,
                                    struct device *dev)
 {
-       struct clk *clkp = clk_get(dev, NULL);
        u32 scgd, cdf;
        u32 round, ick;
        u32 scl;
        u32 cdf_width;
        unsigned long rate;
 
-       if (IS_ERR(clkp)) {
-               dev_err(dev, "couldn't get clock\n");
-               return PTR_ERR(clkp);
-       }
-
        switch (priv->devtype) {
        case I2C_RCAR_GEN1:
                cdf_width = 2;
@@ -266,7 +260,7 @@ static int rcar_i2c_clock_calculate(struct rcar_i2c_priv *priv,
         * clkp : peripheral_clk
         * F[]  : integer up-valuation
         */
-       rate = clk_get_rate(clkp);
+       rate = clk_get_rate(priv->clk);
        cdf = rate / 20000000;
        if (cdf >= 1 << cdf_width) {
                dev_err(dev, "Input clock %lu too high\n", rate);
@@ -308,7 +302,7 @@ static int rcar_i2c_clock_calculate(struct rcar_i2c_priv *priv,
 
 scgd_find:
        dev_dbg(dev, "clk %d/%d(%lu), round %u, CDF:0x%x, SCGD: 0x%x\n",
-               scl, bus_speed, clk_get_rate(clkp), round, cdf, scgd);
+               scl, bus_speed, clk_get_rate(priv->clk), round, cdf, scgd);
 
        /*
         * keep icccr value
@@ -604,7 +598,7 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
                 * error handling
                 */
                if (rcar_i2c_flags_has(priv, ID_NACK)) {
-                       ret = -EREMOTEIO;
+                       ret = -ENXIO;
                        break;
                }
 
@@ -623,7 +617,7 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
 
        pm_runtime_put(dev);
 
-       if (ret < 0)
+       if (ret < 0 && ret != -ENXIO)
                dev_err(dev, "error %d : %x\n", ret, priv->flags);
 
        return ret;
@@ -664,6 +658,12 @@ static int rcar_i2c_probe(struct platform_device *pdev)
                return -ENOMEM;
        }
 
+       priv->clk = devm_clk_get(dev, NULL);
+       if (IS_ERR(priv->clk)) {
+               dev_err(dev, "cannot get clock\n");
+               return PTR_ERR(priv->clk);
+       }
+
        bus_speed = 100000; /* default 100 kHz */
        ret = of_property_read_u32(dev->of_node, "clock-frequency", &bus_speed);
        if (ret < 0 && pdata && pdata->bus_speed)
index 599235514138564ba2db1befc6b711655420cf16..dfc98df7b1b6a72ccc96e1e1b242bcab9770002e 100644 (file)
@@ -12,7 +12,6 @@
 #include <linux/slab.h>
 #include <linux/kernel.h>
 #include <linux/stddef.h>
-#include <linux/init.h>
 #include <linux/i2c.h>
 #include <linux/acpi.h>
 
index 5e8f136e233f79d5205a5ef65038474721154b16..d76f3d9737ec920564e72184598010e5f581e7a2 100644 (file)
@@ -11,7 +11,6 @@
 #include <linux/delay.h>
 #include <linux/err.h>
 #include <linux/i2c.h>
-#include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/ioport.h>
 #include <linux/platform_device.h>
index 4fc87e7c94c9472b1df8b44f25e10dd605593805..294c80f21d65163b86e17d27a0101a3b3b921b50 100644 (file)
@@ -20,7 +20,6 @@
 
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/delay.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
index 36a9556d7cfa8e287b4e8a2f819dd7b2768842c9..19b8505d0cdd0b02ebb437dc7ac859fc91d20df1 100644 (file)
@@ -45,7 +45,6 @@
 #include <linux/delay.h>
 #include <linux/pci.h>
 #include <linux/ioport.h>
-#include <linux/init.h>
 #include <linux/i2c.h>
 #include <linux/acpi.h>
 #include <linux/io.h>
index b9faf9b6002bce7c09286229fd1680a212f2d11c..f8aa0c29f02b2cdbd8f852da3aa707058e4413ec 100644 (file)
@@ -36,7 +36,6 @@
 #include <linux/stddef.h>
 #include <linux/ioport.h>
 #include <linux/i2c.h>
-#include <linux/init.h>
 #include <linux/acpi.h>
 #include <linux/io.h>
 
index 6ffa56e0851777981af053420d69df68fc7c9d1b..057602683553dc132d01e191af875378573909b1 100644 (file)
@@ -3,7 +3,7 @@
  * These devices include an I2C master which can be controlled over the
  * serial port.
  *
- * Copyright (C) 2007 Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2007 Jean Delvare <jdelvare@suse.de>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -321,7 +321,7 @@ static void __exit taos_exit(void)
        serio_unregister_driver(&taos_drv);
 }
 
-MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
 MODULE_DESCRIPTION("TAOS evaluation module driver");
 MODULE_LICENSE("GPL");
 
index be662511c58bf931aef8e57de31905d710952363..49d7f14b9d275074fc3181b7cacabc49fb521055 100644 (file)
@@ -22,7 +22,6 @@
 #include <linux/module.h>
 #include <linux/pci.h>
 #include <linux/ioport.h>
-#include <linux/init.h>
 #include <linux/i2c.h>
 #include <linux/i2c-algo-bit.h>
 #include <linux/io.h>
index b2d90e105f41c93b6a6013e6e20adcfbb6927864..40d36df678dec750175a4be7e4e424e4d25e9a13 100644 (file)
@@ -2,7 +2,7 @@
     Copyright (c) 1998 - 2002  Frodo Looijaard <frodol@dds.nl>,
     Philip Edelbrock <phil@netroedge.com>, Kyösti Mälkki <kmalkki@cc.hut.fi>,
     Mark D. Studebaker <mdsxyz123@yahoo.com>
-    Copyright (C) 2005 - 2008  Jean Delvare <khali@linux-fr.org>
+    Copyright (C) 2005 - 2008  Jean Delvare <jdelvare@suse.de>
 
     This program is free software; you can redistribute it and/or modify
     it under the terms of the GNU General Public License as published by
@@ -503,7 +503,7 @@ static void __exit i2c_vt596_exit(void)
 
 MODULE_AUTHOR("Kyosti Malkki <kmalkki@cc.hut.fi>, "
              "Mark D. Studebaker <mdsxyz123@yahoo.com> and "
-             "Jean Delvare <khali@linux-fr.org>");
+             "Jean Delvare <jdelvare@suse.de>");
 MODULE_DESCRIPTION("vt82c596 SMBus driver");
 MODULE_LICENSE("GPL");
 
index 6f9918f37b91c23f0464d3a7ed4d70ccc6ae4e93..28107502517f19005860861ca9ff17f785295575 100644 (file)
@@ -30,7 +30,6 @@
  */
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/errno.h>
 #include <linux/err.h>
 #include <linux/delay.h>
index 7945b05d3ea0415c2fe86c5f578fa90787e20ef1..17f7352eca6bf7fe37ef1bb2c10f2ada6e4aa984 100644 (file)
@@ -11,7 +11,6 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/slab.h>
-#include <linux/init.h>
 #include <linux/ioport.h>
 #include <linux/delay.h>
 #include <linux/errno.h>
index ae1258b95d6041cdfbba0194cbe0de0bf1a8d529..8eadf0f47ad7a1a295a9b4486c3bd870ed756e38 100644 (file)
@@ -26,7 +26,6 @@
 #include <linux/module.h>
 #include <linux/errno.h>
 #include <linux/kernel.h>
-#include <linux/init.h>
 #include <linux/i2c.h>
 #include <linux/i2c-algo-bit.h>
 #include <linux/io.h>
index c4c5588ec0fbe3dcafe23ff2af00a813721fb5c5..5fb80b8962a2ad7d8e78dcee01df68cb639d3602 100644 (file)
@@ -21,7 +21,7 @@
 /* With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi>.
    All SMBus-related things are written by Frodo Looijaard <frodol@dds.nl>
    SMBus 2.0 support by Mark Studebaker <mdsxyz123@yahoo.com> and
-   Jean Delvare <khali@linux-fr.org>
+   Jean Delvare <jdelvare@suse.de>
    Mux support by Rodolfo Giometti <giometti@enneenne.com> and
    Michael Lawnick <michael.lawnick.ext@nsn.com>
    OF support is copyright (c) 2008 Jochen Friedrich <jochen@scram.de>
@@ -261,10 +261,9 @@ static int i2c_device_probe(struct device *dev)
 
        acpi_dev_pm_attach(&client->dev, true);
        status = driver->probe(client, i2c_match_id(driver->id_table, client));
-       if (status) {
-               i2c_set_clientdata(client, NULL);
+       if (status)
                acpi_dev_pm_detach(&client->dev, true);
-       }
+
        return status;
 }
 
@@ -272,7 +271,7 @@ static int i2c_device_remove(struct device *dev)
 {
        struct i2c_client       *client = i2c_verify_client(dev);
        struct i2c_driver       *driver;
-       int                     status;
+       int status = 0;
 
        if (!client || !dev->driver)
                return 0;
@@ -281,12 +280,8 @@ static int i2c_device_remove(struct device *dev)
        if (driver->remove) {
                dev_dbg(dev, "remove\n");
                status = driver->remove(client);
-       } else {
-               dev->driver = NULL;
-               status = 0;
        }
-       if (status == 0)
-               i2c_set_clientdata(client, NULL);
+
        acpi_dev_pm_detach(&client->dev, true);
        return status;
 }
index c99b229873665b56656a7cabd95c7dfd6acb3fb1..fc99f0d6b4a5b0d1f11d886e5bdb5abf23073b0f 100644 (file)
@@ -2,7 +2,7 @@
  * i2c-smbus.c - SMBus extensions to the I2C protocol
  *
  * Copyright (C) 2008 David Brownell
- * Copyright (C) 2010 Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2010 Jean Delvare <jdelvare@suse.de>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -246,6 +246,6 @@ EXPORT_SYMBOL_GPL(i2c_handle_smbus_alert);
 
 module_i2c_driver(smbalert_driver);
 
-MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
 MODULE_DESCRIPTION("SMBus protocol extensions support");
 MODULE_LICENSE("GPL");
index d0a9c590c3cd4708937c924afe9f6759602948f0..77e4849d2f2ac348ba06dbbc88f6196856758a7f 100644 (file)
@@ -2,7 +2,7 @@
     i2c-stub.c - I2C/SMBus chip emulator
 
     Copyright (c) 2004 Mark M. Hoffman <mhoffman@lightlink.com>
-    Copyright (C) 2007, 2012 Jean Delvare <khali@linux-fr.org>
+    Copyright (C) 2007, 2012 Jean Delvare <jdelvare@suse.de>
 
     This program is free software; you can redistribute it and/or modify
     it under the terms of the GNU General Public License as published by
index c58e093b6032480a316c1725db5f6879842cd2ef..69afffa8f427a0b055c3a13ae1dd9423b27e76da 100644 (file)
@@ -19,7 +19,6 @@
 #include <linux/kernel.h>
 #include <linux/i2c.h>
 #include <linux/i2c-mux.h>
-#include <linux/init.h>
 #include <linux/module.h>
 #include <linux/of_gpio.h>
 #include <linux/platform_device.h>
index 8a8c56f4b026d6a22e54941e9959ea633b42c321..d8989c823f50d0baa7c73c09a64e9016821b7f74 100644 (file)
@@ -12,7 +12,6 @@
 #include <linux/i2c-mux.h>
 #include <linux/i2c-mux-gpio.h>
 #include <linux/platform_device.h>
-#include <linux/init.h>
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/gpio.h>
index c4f08ad311832ed79da7d02d6606342e696b72c1..cb772775da431e78ba5ecf1a3da9c45fba135a03 100644 (file)
@@ -17,7 +17,6 @@
  */
 
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/jiffies.h>
 #include <linux/delay.h>
 #include <linux/slab.h>
index e835304e7b5a0c1305a5997b38dd3c805fd0134d..550bd36aa5d651b7324879df550563ca0c85d53e 100644 (file)
@@ -28,7 +28,7 @@
  * Based on:
  *     i2c-virtual_cb.c from Brian Kuschak <bkuschak@yahoo.com>
  * and
- *     pca9540.c from Jean Delvare <khali@linux-fr.org>.
+ *     pca9540.c from Jean Delvare <jdelvare@suse.de>.
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
@@ -40,7 +40,6 @@
 #include <linux/i2c.h>
 #include <linux/i2c-mux.h>
 #include <linux/i2c/pca954x.h>
-#include <linux/init.h>
 #include <linux/module.h>
 #include <linux/of_gpio.h>
 #include <linux/slab.h>
index d7978dc4ad0b075a03b842bc93b52a61c952ae70..4ff0ef3e07a6b9839c4bcb9e48618f859bc7e10d 100644 (file)
@@ -18,7 +18,6 @@
 
 #include <linux/i2c.h>
 #include <linux/i2c-mux.h>
-#include <linux/init.h>
 #include <linux/module.h>
 #include <linux/pinctrl/consumer.h>
 #include <linux/i2c-mux-pinctrl.h>
index 6490a2dea96b01526657615ceb7139d9b1603a55..f079ca2f260b605ae5f20b7f1010d653baad22e2 100644 (file)
@@ -9,7 +9,9 @@
 #include <linux/kernel.h>
 #include <linux/blkdev.h>
 #include <linux/cdrom.h>
+#include <linux/ide.h>
 #include <scsi/scsi.h>
+#include "ide-cd.h"
 
 #ifndef CONFIG_BLK_DEV_IDECD_VERBOSE_ERRORS
 void ide_cd_log_error(const char *name, struct request *failed_command,
index a8c2c8f8660a6a312c7d6767f9a0fee14abf79e1..40e683a84ff91b61ebd7487cd6e074528c7d3c67 100644 (file)
@@ -7,6 +7,7 @@
  */
 
 #include <linux/string.h>
+#include <linux/ide.h>
 
 static struct ide_pio_info {
        const char      *name;
index 3e7fdbb4916b1c810314acb77fe9f3b0a2c7cdf1..79bbc21c1d01671b6c7d289c3c1ff44db449e4ec 100644 (file)
@@ -207,6 +207,7 @@ config SHMOBILE_IOMMU
        bool "IOMMU for Renesas IPMMU/IPMMUI"
        default n
        depends on ARM
+       depends on SH_MOBILE || COMPILE_TEST
        select IOMMU_API
        select ARM_DMA_USE_IOMMU
        select SHMOBILE_IPMMU
index 72531f008a5e34ea871e7d3d3569a724af695d86..faf0da4bb3a2f84bfd69c911d8de100028b25b7e 100644 (file)
@@ -248,8 +248,8 @@ static bool check_device(struct device *dev)
        if (!dev || !dev->dma_mask)
                return false;
 
-       /* No device or no PCI device */
-       if (dev->bus != &pci_bus_type)
+       /* No PCI device */
+       if (!dev_is_pci(dev))
                return false;
 
        devid = get_device_id(dev);
index e46a88700b6824c735967118281c9f7feb6e41b0..8911850c94445fa5adf3b41de168293e5adf7721 100644 (file)
@@ -24,7 +24,7 @@
  *     - v7/v8 long-descriptor format
  *     - Non-secure access to the SMMU
  *     - 4k and 64k pages, with contiguous pte hints.
- *     - Up to 39-bit addressing
+ *     - Up to 42-bit addressing (dependent on VA_BITS)
  *     - Context fault reporting
  */
 
 #define ARM_SMMU_GR1(smmu)             ((smmu)->base + (smmu)->pagesize)
 
 /* Page table bits */
-#define ARM_SMMU_PTE_PAGE              (((pteval_t)3) << 0)
+#define ARM_SMMU_PTE_XN                        (((pteval_t)3) << 53)
 #define ARM_SMMU_PTE_CONT              (((pteval_t)1) << 52)
 #define ARM_SMMU_PTE_AF                        (((pteval_t)1) << 10)
 #define ARM_SMMU_PTE_SH_NS             (((pteval_t)0) << 8)
 #define ARM_SMMU_PTE_SH_OS             (((pteval_t)2) << 8)
 #define ARM_SMMU_PTE_SH_IS             (((pteval_t)3) << 8)
+#define ARM_SMMU_PTE_PAGE              (((pteval_t)3) << 0)
 
 #if PAGE_SIZE == SZ_4K
 #define ARM_SMMU_PTE_CONT_ENTRIES      16
@@ -1205,7 +1206,7 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd,
                                   unsigned long pfn, int flags, int stage)
 {
        pte_t *pte, *start;
-       pteval_t pteval = ARM_SMMU_PTE_PAGE | ARM_SMMU_PTE_AF;
+       pteval_t pteval = ARM_SMMU_PTE_PAGE | ARM_SMMU_PTE_AF | ARM_SMMU_PTE_XN;
 
        if (pmd_none(*pmd)) {
                /* Allocate a new set of tables */
@@ -1244,7 +1245,9 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd,
        }
 
        /* If no access, create a faulting entry to avoid TLB fills */
-       if (!(flags & (IOMMU_READ | IOMMU_WRITE)))
+       if (flags & IOMMU_EXEC)
+               pteval &= ~ARM_SMMU_PTE_XN;
+       else if (!(flags & (IOMMU_READ | IOMMU_WRITE)))
                pteval &= ~ARM_SMMU_PTE_PAGE;
 
        pteval |= ARM_SMMU_PTE_SH_IS;
@@ -1494,6 +1497,13 @@ static int arm_smmu_add_device(struct device *dev)
 {
        struct arm_smmu_device *child, *parent, *smmu;
        struct arm_smmu_master *master = NULL;
+       struct iommu_group *group;
+       int ret;
+
+       if (dev->archdata.iommu) {
+               dev_warn(dev, "IOMMU driver already assigned to device\n");
+               return -EINVAL;
+       }
 
        spin_lock(&arm_smmu_devices_lock);
        list_for_each_entry(parent, &arm_smmu_devices, list) {
@@ -1526,13 +1536,23 @@ static int arm_smmu_add_device(struct device *dev)
        if (!master)
                return -ENODEV;
 
+       group = iommu_group_alloc();
+       if (IS_ERR(group)) {
+               dev_err(dev, "Failed to allocate IOMMU group\n");
+               return PTR_ERR(group);
+       }
+
+       ret = iommu_group_add_device(group, dev);
+       iommu_group_put(group);
        dev->archdata.iommu = smmu;
-       return 0;
+
+       return ret;
 }
 
 static void arm_smmu_remove_device(struct device *dev)
 {
        dev->archdata.iommu = NULL;
+       iommu_group_remove_device(dev);
 }
 
 static struct iommu_ops arm_smmu_ops = {
@@ -1730,7 +1750,6 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
         * allocation (PTRS_PER_PGD).
         */
 #ifdef CONFIG_64BIT
-       /* Current maximum output size of 39 bits */
        smmu->s1_output_size = min(39UL, size);
 #else
        smmu->s1_output_size = min(32UL, size);
@@ -1745,7 +1764,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
        } else {
 #ifdef CONFIG_64BIT
                size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
-               size = min(39, arm_smmu_id_size_to_bits(size));
+               size = min(VA_BITS, arm_smmu_id_size_to_bits(size));
 #else
                size = 32;
 #endif
index 8b452c9676d968ca5650b1ad8ce2128571014ba8..1581565434106027dca83e1e7bae4c640b3febe9 100644 (file)
@@ -52,6 +52,9 @@ LIST_HEAD(dmar_drhd_units);
 struct acpi_table_header * __initdata dmar_tbl;
 static acpi_size dmar_tbl_size;
 
+static int alloc_iommu(struct dmar_drhd_unit *drhd);
+static void free_iommu(struct intel_iommu *iommu);
+
 static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
 {
        /*
@@ -100,7 +103,6 @@ static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
        if (!pdev) {
                pr_warn("Device scope device [%04x:%02x:%02x.%02x] not found\n",
                        segment, scope->bus, path->device, path->function);
-               *dev = NULL;
                return 0;
        }
        if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
@@ -151,7 +153,7 @@ int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
                        ret = dmar_parse_one_dev_scope(scope,
                                &(*devices)[index], segment);
                        if (ret) {
-                               kfree(*devices);
+                               dmar_free_dev_scope(devices, cnt);
                                return ret;
                        }
                        index ++;
@@ -162,6 +164,17 @@ int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
        return 0;
 }
 
+void dmar_free_dev_scope(struct pci_dev ***devices, int *cnt)
+{
+       if (*devices && *cnt) {
+               while (--*cnt >= 0)
+                       pci_dev_put((*devices)[*cnt]);
+               kfree(*devices);
+               *devices = NULL;
+               *cnt = 0;
+       }
+}
+
 /**
  * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
  * structure which uniquely represent one DMA remapping hardware unit
@@ -193,25 +206,28 @@ dmar_parse_one_drhd(struct acpi_dmar_header *header)
        return 0;
 }
 
+static void dmar_free_drhd(struct dmar_drhd_unit *dmaru)
+{
+       if (dmaru->devices && dmaru->devices_cnt)
+               dmar_free_dev_scope(&dmaru->devices, &dmaru->devices_cnt);
+       if (dmaru->iommu)
+               free_iommu(dmaru->iommu);
+       kfree(dmaru);
+}
+
 static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
 {
        struct acpi_dmar_hardware_unit *drhd;
-       int ret = 0;
 
        drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
 
        if (dmaru->include_all)
                return 0;
 
-       ret = dmar_parse_dev_scope((void *)(drhd + 1),
-                               ((void *)drhd) + drhd->header.length,
-                               &dmaru->devices_cnt, &dmaru->devices,
-                               drhd->segment);
-       if (ret) {
-               list_del(&dmaru->list);
-               kfree(dmaru);
-       }
-       return ret;
+       return dmar_parse_dev_scope((void *)(drhd + 1),
+                                   ((void *)drhd) + drhd->header.length,
+                                   &dmaru->devices_cnt, &dmaru->devices,
+                                   drhd->segment);
 }
 
 #ifdef CONFIG_ACPI_NUMA
@@ -423,7 +439,7 @@ dmar_find_matched_drhd_unit(struct pci_dev *dev)
 int __init dmar_dev_scope_init(void)
 {
        static int dmar_dev_scope_initialized;
-       struct dmar_drhd_unit *drhd, *drhd_n;
+       struct dmar_drhd_unit *drhd;
        int ret = -ENODEV;
 
        if (dmar_dev_scope_initialized)
@@ -432,7 +448,7 @@ int __init dmar_dev_scope_init(void)
        if (list_empty(&dmar_drhd_units))
                goto fail;
 
-       list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) {
+       list_for_each_entry(drhd, &dmar_drhd_units, list) {
                ret = dmar_parse_dev(drhd);
                if (ret)
                        goto fail;
@@ -456,24 +472,23 @@ int __init dmar_table_init(void)
        static int dmar_table_initialized;
        int ret;
 
-       if (dmar_table_initialized)
-               return 0;
-
-       dmar_table_initialized = 1;
-
-       ret = parse_dmar_table();
-       if (ret) {
-               if (ret != -ENODEV)
-                       pr_info("parse DMAR table failure.\n");
-               return ret;
-       }
+       if (dmar_table_initialized == 0) {
+               ret = parse_dmar_table();
+               if (ret < 0) {
+                       if (ret != -ENODEV)
+                               pr_info("parse DMAR table failure.\n");
+               } else  if (list_empty(&dmar_drhd_units)) {
+                       pr_info("No DMAR devices found\n");
+                       ret = -ENODEV;
+               }
 
-       if (list_empty(&dmar_drhd_units)) {
-               pr_info("No DMAR devices found\n");
-               return -ENODEV;
+               if (ret < 0)
+                       dmar_table_initialized = ret;
+               else
+                       dmar_table_initialized = 1;
        }
 
-       return 0;
+       return dmar_table_initialized < 0 ? dmar_table_initialized : 0;
 }
 
 static void warn_invalid_dmar(u64 addr, const char *message)
@@ -488,7 +503,7 @@ static void warn_invalid_dmar(u64 addr, const char *message)
                dmi_get_system_info(DMI_PRODUCT_VERSION));
 }
 
-int __init check_zero_address(void)
+static int __init check_zero_address(void)
 {
        struct acpi_table_dmar *dmar;
        struct acpi_dmar_header *entry_header;
@@ -546,14 +561,6 @@ int __init detect_intel_iommu(void)
        if (ret)
                ret = check_zero_address();
        {
-               struct acpi_table_dmar *dmar;
-
-               dmar = (struct acpi_table_dmar *) dmar_tbl;
-
-               if (ret && irq_remapping_enabled && cpu_has_x2apic &&
-                   dmar->flags & 0x1)
-                       pr_info("Queued invalidation will be enabled to support x2apic and Intr-remapping.\n");
-
                if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
                        iommu_detected = 1;
                        /* Make sure ACS will be enabled */
@@ -565,7 +572,7 @@ int __init detect_intel_iommu(void)
                        x86_init.iommu.iommu_init = intel_iommu_init;
 #endif
        }
-       early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
+       early_acpi_os_unmap_memory((void __iomem *)dmar_tbl, dmar_tbl_size);
        dmar_tbl = NULL;
 
        return ret ? 1 : -ENODEV;
@@ -647,7 +654,7 @@ out:
        return err;
 }
 
-int alloc_iommu(struct dmar_drhd_unit *drhd)
+static int alloc_iommu(struct dmar_drhd_unit *drhd)
 {
        struct intel_iommu *iommu;
        u32 ver, sts;
@@ -721,12 +728,19 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
        return err;
 }
 
-void free_iommu(struct intel_iommu *iommu)
+static void free_iommu(struct intel_iommu *iommu)
 {
-       if (!iommu)
-               return;
+       if (iommu->irq) {
+               free_irq(iommu->irq, iommu);
+               irq_set_handler_data(iommu->irq, NULL);
+               destroy_irq(iommu->irq);
+       }
 
-       free_dmar_iommu(iommu);
+       if (iommu->qi) {
+               free_page((unsigned long)iommu->qi->desc);
+               kfree(iommu->qi->desc_status);
+               kfree(iommu->qi);
+       }
 
        if (iommu->reg)
                unmap_iommu(iommu);
@@ -1050,7 +1064,7 @@ int dmar_enable_qi(struct intel_iommu *iommu)
        desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
        if (!desc_page) {
                kfree(qi);
-               iommu->qi = 0;
+               iommu->qi = NULL;
                return -ENOMEM;
        }
 
@@ -1060,7 +1074,7 @@ int dmar_enable_qi(struct intel_iommu *iommu)
        if (!qi->desc_status) {
                free_page((unsigned long) qi->desc);
                kfree(qi);
-               iommu->qi = 0;
+               iommu->qi = NULL;
                return -ENOMEM;
        }
 
@@ -1111,9 +1125,7 @@ static const char *irq_remap_fault_reasons[] =
        "Blocked an interrupt request due to source-id verification failure",
 };
 
-#define MAX_FAULT_REASON_IDX   (ARRAY_SIZE(fault_reason_strings) - 1)
-
-const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
+static const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
 {
        if (fault_reason >= 0x20 && (fault_reason - 0x20 <
                                        ARRAY_SIZE(irq_remap_fault_reasons))) {
@@ -1303,15 +1315,14 @@ int dmar_set_interrupt(struct intel_iommu *iommu)
 int __init enable_drhd_fault_handling(void)
 {
        struct dmar_drhd_unit *drhd;
+       struct intel_iommu *iommu;
 
        /*
         * Enable fault control interrupt.
         */
-       for_each_drhd_unit(drhd) {
-               int ret;
-               struct intel_iommu *iommu = drhd->iommu;
+       for_each_iommu(iommu, drhd) {
                u32 fault_status;
-               ret = dmar_set_interrupt(iommu);
+               int ret = dmar_set_interrupt(iommu);
 
                if (ret) {
                        pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
@@ -1366,4 +1377,22 @@ int __init dmar_ir_support(void)
                return 0;
        return dmar->flags & 0x1;
 }
+
+static int __init dmar_free_unused_resources(void)
+{
+       struct dmar_drhd_unit *dmaru, *dmaru_n;
+
+       /* DMAR units are in use */
+       if (irq_remapping_enabled || intel_iommu_enabled)
+               return 0;
+
+       list_for_each_entry_safe(dmaru, dmaru_n, &dmar_drhd_units, list) {
+               list_del(&dmaru->list);
+               dmar_free_drhd(dmaru);
+       }
+
+       return 0;
+}
+
+late_initcall(dmar_free_unused_resources);
 IOMMU_INIT_POST(detect_intel_iommu);
index c857c30da9791e9a01f9630f293e5a0d1315dc47..93072ba44b1d179dff9a486cd728cf16ea645691 100644 (file)
@@ -691,7 +691,7 @@ static int fsl_pamu_attach_device(struct iommu_domain *domain,
         * Use LIODN of the PCI controller while attaching a
         * PCI device.
         */
-       if (dev->bus == &pci_bus_type) {
+       if (dev_is_pci(dev)) {
                pdev = to_pci_dev(dev);
                pci_ctl = pci_bus_to_host(pdev->bus);
                /*
@@ -729,7 +729,7 @@ static void fsl_pamu_detach_device(struct iommu_domain *domain,
         * Use LIODN of the PCI controller while detaching a
         * PCI device.
         */
-       if (dev->bus == &pci_bus_type) {
+       if (dev_is_pci(dev)) {
                pdev = to_pci_dev(dev);
                pci_ctl = pci_bus_to_host(pdev->bus);
                /*
@@ -1056,7 +1056,7 @@ static int fsl_pamu_add_device(struct device *dev)
         * For platform devices we allocate a separate group for
         * each of the devices.
         */
-       if (dev->bus == &pci_bus_type) {
+       if (dev_is_pci(dev)) {
                pdev = to_pci_dev(dev);
                /* Don't create device groups for virtual PCI bridges */
                if (pdev->subordinate)
index 59779e19315e1c55eef91bdc0dbe067c1ff701c0..a22c86c867faee78544b99b8528e1f4b9fc183c8 100644 (file)
@@ -63,6 +63,7 @@
 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
 
 #define MAX_AGAW_WIDTH 64
+#define MAX_AGAW_PFN_WIDTH     (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
 
 #define __DOMAIN_MAX_PFN(gaw)  ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
 #define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
@@ -106,12 +107,12 @@ static inline int agaw_to_level(int agaw)
 
 static inline int agaw_to_width(int agaw)
 {
-       return 30 + agaw * LEVEL_STRIDE;
+       return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
 }
 
 static inline int width_to_agaw(int width)
 {
-       return (width - 30) / LEVEL_STRIDE;
+       return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
 }
 
 static inline unsigned int level_to_offset_bits(int level)
@@ -141,7 +142,7 @@ static inline unsigned long align_to_level(unsigned long pfn, int level)
 
 static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
 {
-       return  1 << ((lvl - 1) * LEVEL_STRIDE);
+       return  1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
 }
 
 /* VT-d pages must always be _smaller_ than MM pages. Otherwise things
@@ -288,26 +289,6 @@ static inline void dma_clear_pte(struct dma_pte *pte)
        pte->val = 0;
 }
 
-static inline void dma_set_pte_readable(struct dma_pte *pte)
-{
-       pte->val |= DMA_PTE_READ;
-}
-
-static inline void dma_set_pte_writable(struct dma_pte *pte)
-{
-       pte->val |= DMA_PTE_WRITE;
-}
-
-static inline void dma_set_pte_snp(struct dma_pte *pte)
-{
-       pte->val |= DMA_PTE_SNP;
-}
-
-static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
-{
-       pte->val = (pte->val & ~3) | (prot & 3);
-}
-
 static inline u64 dma_pte_addr(struct dma_pte *pte)
 {
 #ifdef CONFIG_64BIT
@@ -318,11 +299,6 @@ static inline u64 dma_pte_addr(struct dma_pte *pte)
 #endif
 }
 
-static inline void dma_set_pte_pfn(struct dma_pte *pte, unsigned long pfn)
-{
-       pte->val |= (uint64_t)pfn << VTD_PAGE_SHIFT;
-}
-
 static inline bool dma_pte_present(struct dma_pte *pte)
 {
        return (pte->val & 3) != 0;
@@ -406,7 +382,7 @@ struct device_domain_info {
 
 static void flush_unmaps_timeout(unsigned long data);
 
-DEFINE_TIMER(unmap_timer,  flush_unmaps_timeout, 0, 0);
+static DEFINE_TIMER(unmap_timer,  flush_unmaps_timeout, 0, 0);
 
 #define HIGH_WATER_MARK 250
 struct deferred_flush_tables {
@@ -652,9 +628,7 @@ static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn)
        struct dmar_drhd_unit *drhd = NULL;
        int i;
 
-       for_each_drhd_unit(drhd) {
-               if (drhd->ignored)
-                       continue;
+       for_each_active_drhd_unit(drhd) {
                if (segment != drhd->segment)
                        continue;
 
@@ -865,7 +839,6 @@ static int dma_pte_clear_range(struct dmar_domain *domain,
        int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
        unsigned int large_page = 1;
        struct dma_pte *first_pte, *pte;
-       int order;
 
        BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
        BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
@@ -890,8 +863,7 @@ static int dma_pte_clear_range(struct dmar_domain *domain,
 
        } while (start_pfn && start_pfn <= last_pfn);
 
-       order = (large_page - 1) * 9;
-       return order;
+       return min_t(int, (large_page - 1) * 9, MAX_AGAW_PFN_WIDTH);
 }
 
 static void dma_pte_free_level(struct dmar_domain *domain, int level,
@@ -1255,8 +1227,8 @@ static int iommu_init_domains(struct intel_iommu *iommu)
        unsigned long nlongs;
 
        ndomains = cap_ndoms(iommu->cap);
-       pr_debug("IOMMU %d: Number of Domains supported <%ld>\n", iommu->seq_id,
-                       ndomains);
+       pr_debug("IOMMU%d: Number of Domains supported <%ld>\n",
+                iommu->seq_id, ndomains);
        nlongs = BITS_TO_LONGS(ndomains);
 
        spin_lock_init(&iommu->lock);
@@ -1266,13 +1238,17 @@ static int iommu_init_domains(struct intel_iommu *iommu)
         */
        iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
        if (!iommu->domain_ids) {
-               printk(KERN_ERR "Allocating domain id array failed\n");
+               pr_err("IOMMU%d: allocating domain id array failed\n",
+                      iommu->seq_id);
                return -ENOMEM;
        }
        iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
                        GFP_KERNEL);
        if (!iommu->domains) {
-               printk(KERN_ERR "Allocating domain array failed\n");
+               pr_err("IOMMU%d: allocating domain array failed\n",
+                      iommu->seq_id);
+               kfree(iommu->domain_ids);
+               iommu->domain_ids = NULL;
                return -ENOMEM;
        }
 
@@ -1289,10 +1265,10 @@ static int iommu_init_domains(struct intel_iommu *iommu)
 static void domain_exit(struct dmar_domain *domain);
 static void vm_domain_exit(struct dmar_domain *domain);
 
-void free_dmar_iommu(struct intel_iommu *iommu)
+static void free_dmar_iommu(struct intel_iommu *iommu)
 {
        struct dmar_domain *domain;
-       int i;
+       int i, count;
        unsigned long flags;
 
        if ((iommu->domains) && (iommu->domain_ids)) {
@@ -1301,28 +1277,24 @@ void free_dmar_iommu(struct intel_iommu *iommu)
                        clear_bit(i, iommu->domain_ids);
 
                        spin_lock_irqsave(&domain->iommu_lock, flags);
-                       if (--domain->iommu_count == 0) {
+                       count = --domain->iommu_count;
+                       spin_unlock_irqrestore(&domain->iommu_lock, flags);
+                       if (count == 0) {
                                if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
                                        vm_domain_exit(domain);
                                else
                                        domain_exit(domain);
                        }
-                       spin_unlock_irqrestore(&domain->iommu_lock, flags);
                }
        }
 
        if (iommu->gcmd & DMA_GCMD_TE)
                iommu_disable_translation(iommu);
 
-       if (iommu->irq) {
-               irq_set_handler_data(iommu->irq, NULL);
-               /* This will mask the irq */
-               free_irq(iommu->irq, iommu);
-               destroy_irq(iommu->irq);
-       }
-
        kfree(iommu->domains);
        kfree(iommu->domain_ids);
+       iommu->domains = NULL;
+       iommu->domain_ids = NULL;
 
        g_iommus[iommu->seq_id] = NULL;
 
@@ -2245,8 +2217,6 @@ static int __init si_domain_init(int hw)
        if (!si_domain)
                return -EFAULT;
 
-       pr_debug("Identity mapping domain is domain %d\n", si_domain->id);
-
        for_each_active_iommu(iommu, drhd) {
                ret = iommu_attach_domain(si_domain, iommu);
                if (ret) {
@@ -2261,6 +2231,8 @@ static int __init si_domain_init(int hw)
        }
 
        si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
+       pr_debug("IOMMU: identity mapping domain is domain %d\n",
+                si_domain->id);
 
        if (hw)
                return 0;
@@ -2492,11 +2464,7 @@ static int __init init_dmars(void)
                goto error;
        }
 
-       for_each_drhd_unit(drhd) {
-               if (drhd->ignored)
-                       continue;
-
-               iommu = drhd->iommu;
+       for_each_active_iommu(iommu, drhd) {
                g_iommus[iommu->seq_id] = iommu;
 
                ret = iommu_init_domains(iommu);
@@ -2520,12 +2488,7 @@ static int __init init_dmars(void)
        /*
         * Start from the sane iommu hardware state.
         */
-       for_each_drhd_unit(drhd) {
-               if (drhd->ignored)
-                       continue;
-
-               iommu = drhd->iommu;
-
+       for_each_active_iommu(iommu, drhd) {
                /*
                 * If the queued invalidation is already initialized by us
                 * (for example, while enabling interrupt-remapping) then
@@ -2545,12 +2508,7 @@ static int __init init_dmars(void)
                dmar_disable_qi(iommu);
        }
 
-       for_each_drhd_unit(drhd) {
-               if (drhd->ignored)
-                       continue;
-
-               iommu = drhd->iommu;
-
+       for_each_active_iommu(iommu, drhd) {
                if (dmar_enable_qi(iommu)) {
                        /*
                         * Queued Invalidate not enabled, use Register Based
@@ -2633,17 +2591,16 @@ static int __init init_dmars(void)
         *   global invalidate iotlb
         *   enable translation
         */
-       for_each_drhd_unit(drhd) {
+       for_each_iommu(iommu, drhd) {
                if (drhd->ignored) {
                        /*
                         * we always have to disable PMRs or DMA may fail on
                         * this device
                         */
                        if (force_on)
-                               iommu_disable_protect_mem_regions(drhd->iommu);
+                               iommu_disable_protect_mem_regions(iommu);
                        continue;
                }
-               iommu = drhd->iommu;
 
                iommu_flush_write_buffer(iommu);
 
@@ -2665,12 +2622,9 @@ static int __init init_dmars(void)
 
        return 0;
 error:
-       for_each_drhd_unit(drhd) {
-               if (drhd->ignored)
-                       continue;
-               iommu = drhd->iommu;
-               free_iommu(iommu);
-       }
+       for_each_active_iommu(iommu, drhd)
+               free_dmar_iommu(iommu);
+       kfree(deferred_flush);
        kfree(g_iommus);
        return ret;
 }
@@ -2758,7 +2712,7 @@ static int iommu_no_mapping(struct device *dev)
        struct pci_dev *pdev;
        int found;
 
-       if (unlikely(dev->bus != &pci_bus_type))
+       if (unlikely(!dev_is_pci(dev)))
                return 1;
 
        pdev = to_pci_dev(dev);
@@ -3318,9 +3272,9 @@ static void __init init_no_remapping_devices(void)
                }
        }
 
-       for_each_drhd_unit(drhd) {
+       for_each_active_drhd_unit(drhd) {
                int i;
-               if (drhd->ignored || drhd->include_all)
+               if (drhd->include_all)
                        continue;
 
                for (i = 0; i < drhd->devices_cnt; i++)
@@ -3514,18 +3468,12 @@ static int __init
 rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
 {
        struct acpi_dmar_reserved_memory *rmrr;
-       int ret;
 
        rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
-       ret = dmar_parse_dev_scope((void *)(rmrr + 1),
-               ((void *)rmrr) + rmrr->header.length,
-               &rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
-
-       if (ret || (rmrru->devices_cnt == 0)) {
-               list_del(&rmrru->list);
-               kfree(rmrru);
-       }
-       return ret;
+       return dmar_parse_dev_scope((void *)(rmrr + 1),
+                                   ((void *)rmrr) + rmrr->header.length,
+                                   &rmrru->devices_cnt, &rmrru->devices,
+                                   rmrr->segment);
 }
 
 static LIST_HEAD(dmar_atsr_units);
@@ -3550,23 +3498,39 @@ int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
 
 static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
 {
-       int rc;
        struct acpi_dmar_atsr *atsr;
 
        if (atsru->include_all)
                return 0;
 
        atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
-       rc = dmar_parse_dev_scope((void *)(atsr + 1),
-                               (void *)atsr + atsr->header.length,
-                               &atsru->devices_cnt, &atsru->devices,
-                               atsr->segment);
-       if (rc || !atsru->devices_cnt) {
-               list_del(&atsru->list);
-               kfree(atsru);
+       return dmar_parse_dev_scope((void *)(atsr + 1),
+                                   (void *)atsr + atsr->header.length,
+                                   &atsru->devices_cnt, &atsru->devices,
+                                   atsr->segment);
+}
+
+static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
+{
+       dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
+       kfree(atsru);
+}
+
+static void intel_iommu_free_dmars(void)
+{
+       struct dmar_rmrr_unit *rmrru, *rmrr_n;
+       struct dmar_atsr_unit *atsru, *atsr_n;
+
+       list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
+               list_del(&rmrru->list);
+               dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
+               kfree(rmrru);
        }
 
-       return rc;
+       list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
+               list_del(&atsru->list);
+               intel_iommu_free_atsr(atsru);
+       }
 }
 
 int dmar_find_matched_atsr_unit(struct pci_dev *dev)
@@ -3610,17 +3574,17 @@ found:
 
 int __init dmar_parse_rmrr_atsr_dev(void)
 {
-       struct dmar_rmrr_unit *rmrr, *rmrr_n;
-       struct dmar_atsr_unit *atsr, *atsr_n;
+       struct dmar_rmrr_unit *rmrr;
+       struct dmar_atsr_unit *atsr;
        int ret = 0;
 
-       list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
+       list_for_each_entry(rmrr, &dmar_rmrr_units, list) {
                ret = rmrr_parse_dev(rmrr);
                if (ret)
                        return ret;
        }
 
-       list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) {
+       list_for_each_entry(atsr, &dmar_atsr_units, list) {
                ret = atsr_parse_dev(atsr);
                if (ret)
                        return ret;
@@ -3667,8 +3631,9 @@ static struct notifier_block device_nb = {
 
 int __init intel_iommu_init(void)
 {
-       int ret = 0;
+       int ret = -ENODEV;
        struct dmar_drhd_unit *drhd;
+       struct intel_iommu *iommu;
 
        /* VT-d is required for a TXT/tboot launch, so enforce that */
        force_on = tboot_force_iommu();
@@ -3676,36 +3641,29 @@ int __init intel_iommu_init(void)
        if (dmar_table_init()) {
                if (force_on)
                        panic("tboot: Failed to initialize DMAR table\n");
-               return  -ENODEV;
+               goto out_free_dmar;
        }
 
        /*
         * Disable translation if already enabled prior to OS handover.
         */
-       for_each_drhd_unit(drhd) {
-               struct intel_iommu *iommu;
-
-               if (drhd->ignored)
-                       continue;
-
-               iommu = drhd->iommu;
+       for_each_active_iommu(iommu, drhd)
                if (iommu->gcmd & DMA_GCMD_TE)
                        iommu_disable_translation(iommu);
-       }
 
        if (dmar_dev_scope_init() < 0) {
                if (force_on)
                        panic("tboot: Failed to initialize DMAR device scope\n");
-               return  -ENODEV;
+               goto out_free_dmar;
        }
 
        if (no_iommu || dmar_disabled)
-               return -ENODEV;
+               goto out_free_dmar;
 
        if (iommu_init_mempool()) {
                if (force_on)
                        panic("tboot: Failed to initialize iommu memory\n");
-               return  -ENODEV;
+               goto out_free_dmar;
        }
 
        if (list_empty(&dmar_rmrr_units))
@@ -3717,7 +3675,7 @@ int __init intel_iommu_init(void)
        if (dmar_init_reserved_ranges()) {
                if (force_on)
                        panic("tboot: Failed to reserve iommu ranges\n");
-               return  -ENODEV;
+               goto out_free_mempool;
        }
 
        init_no_remapping_devices();
@@ -3727,9 +3685,7 @@ int __init intel_iommu_init(void)
                if (force_on)
                        panic("tboot: Failed to initialize DMARs\n");
                printk(KERN_ERR "IOMMU: dmar init failed\n");
-               put_iova_domain(&reserved_iova_list);
-               iommu_exit_mempool();
-               return ret;
+               goto out_free_reserved_range;
        }
        printk(KERN_INFO
        "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
@@ -3749,6 +3705,14 @@ int __init intel_iommu_init(void)
        intel_iommu_enabled = 1;
 
        return 0;
+
+out_free_reserved_range:
+       put_iova_domain(&reserved_iova_list);
+out_free_mempool:
+       iommu_exit_mempool();
+out_free_dmar:
+       intel_iommu_free_dmars();
+       return ret;
 }
 
 static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
@@ -3877,7 +3841,7 @@ static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
 }
 
 /* domain id for virtual machine, it won't be set in context */
-static unsigned long vm_domid;
+static atomic_t vm_domid = ATOMIC_INIT(0);
 
 static struct dmar_domain *iommu_alloc_vm_domain(void)
 {
@@ -3887,7 +3851,7 @@ static struct dmar_domain *iommu_alloc_vm_domain(void)
        if (!domain)
                return NULL;
 
-       domain->id = vm_domid++;
+       domain->id = atomic_inc_return(&vm_domid);
        domain->nid = -1;
        memset(domain->iommu_bmp, 0, sizeof(domain->iommu_bmp));
        domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
@@ -3934,11 +3898,7 @@ static void iommu_free_vm_domain(struct dmar_domain *domain)
        unsigned long i;
        unsigned long ndomains;
 
-       for_each_drhd_unit(drhd) {
-               if (drhd->ignored)
-                       continue;
-               iommu = drhd->iommu;
-
+       for_each_active_iommu(iommu, drhd) {
                ndomains = cap_ndoms(iommu->cap);
                for_each_set_bit(i, iommu->domain_ids, ndomains) {
                        if (iommu->domains[i] == domain) {
index 0cb7528b30a134416b4343b08a458f80dcb6aecf..ef5f65dbafe92d81c9d6fefa9990b76662347c50 100644 (file)
@@ -40,13 +40,15 @@ static int ir_ioapic_num, ir_hpet_num;
 
 static DEFINE_RAW_SPINLOCK(irq_2_ir_lock);
 
+static int __init parse_ioapics_under_ir(void);
+
 static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
 {
        struct irq_cfg *cfg = irq_get_chip_data(irq);
        return cfg ? &cfg->irq_2_iommu : NULL;
 }
 
-int get_irte(int irq, struct irte *entry)
+static int get_irte(int irq, struct irte *entry)
 {
        struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
        unsigned long flags;
@@ -69,19 +71,13 @@ static int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
        struct ir_table *table = iommu->ir_table;
        struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
        struct irq_cfg *cfg = irq_get_chip_data(irq);
-       u16 index, start_index;
        unsigned int mask = 0;
        unsigned long flags;
-       int i;
+       int index;
 
        if (!count || !irq_iommu)
                return -1;
 
-       /*
-        * start the IRTE search from index 0.
-        */
-       index = start_index = 0;
-
        if (count > 1) {
                count = __roundup_pow_of_two(count);
                mask = ilog2(count);
@@ -96,32 +92,17 @@ static int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
        }
 
        raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
-       do {
-               for (i = index; i < index + count; i++)
-                       if  (table->base[i].present)
-                               break;
-               /* empty index found */
-               if (i == index + count)
-                       break;
-
-               index = (index + count) % INTR_REMAP_TABLE_ENTRIES;
-
-               if (index == start_index) {
-                       raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
-                       printk(KERN_ERR "can't allocate an IRTE\n");
-                       return -1;
-               }
-       } while (1);
-
-       for (i = index; i < index + count; i++)
-               table->base[i].present = 1;
-
-       cfg->remapped = 1;
-       irq_iommu->iommu = iommu;
-       irq_iommu->irte_index =  index;
-       irq_iommu->sub_handle = 0;
-       irq_iommu->irte_mask = mask;
-
+       index = bitmap_find_free_region(table->bitmap,
+                                       INTR_REMAP_TABLE_ENTRIES, mask);
+       if (index < 0) {
+               pr_warn("IR%d: can't allocate an IRTE\n", iommu->seq_id);
+       } else {
+               cfg->remapped = 1;
+               irq_iommu->iommu = iommu;
+               irq_iommu->irte_index =  index;
+               irq_iommu->sub_handle = 0;
+               irq_iommu->irte_mask = mask;
+       }
        raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
 
        return index;
@@ -254,6 +235,8 @@ static int clear_entries(struct irq_2_iommu *irq_iommu)
                set_64bit(&entry->low, 0);
                set_64bit(&entry->high, 0);
        }
+       bitmap_release_region(iommu->ir_table->bitmap, index,
+                             irq_iommu->irte_mask);
 
        return qi_flush_iec(iommu, index, irq_iommu->irte_mask);
 }
@@ -336,7 +319,7 @@ static int set_ioapic_sid(struct irte *irte, int apic)
                return -1;
        }
 
-       set_irte_sid(irte, 1, 0, sid);
+       set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, sid);
 
        return 0;
 }
@@ -453,6 +436,7 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu, int mode)
 {
        struct ir_table *ir_table;
        struct page *pages;
+       unsigned long *bitmap;
 
        ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table),
                                             GFP_ATOMIC);
@@ -464,13 +448,23 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu, int mode)
                                 INTR_REMAP_PAGE_ORDER);
 
        if (!pages) {
-               printk(KERN_ERR "failed to allocate pages of order %d\n",
-                      INTR_REMAP_PAGE_ORDER);
+               pr_err("IR%d: failed to allocate pages of order %d\n",
+                      iommu->seq_id, INTR_REMAP_PAGE_ORDER);
                kfree(iommu->ir_table);
                return -ENOMEM;
        }
 
+       bitmap = kcalloc(BITS_TO_LONGS(INTR_REMAP_TABLE_ENTRIES),
+                        sizeof(long), GFP_ATOMIC);
+       if (bitmap == NULL) {
+               pr_err("IR%d: failed to allocate bitmap\n", iommu->seq_id);
+               __free_pages(pages, INTR_REMAP_PAGE_ORDER);
+               kfree(ir_table);
+               return -ENOMEM;
+       }
+
        ir_table->base = page_address(pages);
+       ir_table->bitmap = bitmap;
 
        iommu_set_irq_remapping(iommu, mode);
        return 0;
@@ -521,6 +515,7 @@ static int __init dmar_x2apic_optout(void)
 static int __init intel_irq_remapping_supported(void)
 {
        struct dmar_drhd_unit *drhd;
+       struct intel_iommu *iommu;
 
        if (disable_irq_remap)
                return 0;
@@ -539,12 +534,9 @@ static int __init intel_irq_remapping_supported(void)
        if (!dmar_ir_support())
                return 0;
 
-       for_each_drhd_unit(drhd) {
-               struct intel_iommu *iommu = drhd->iommu;
-
+       for_each_iommu(iommu, drhd)
                if (!ecap_ir_support(iommu->ecap))
                        return 0;
-       }
 
        return 1;
 }
@@ -552,6 +544,7 @@ static int __init intel_irq_remapping_supported(void)
 static int __init intel_enable_irq_remapping(void)
 {
        struct dmar_drhd_unit *drhd;
+       struct intel_iommu *iommu;
        bool x2apic_present;
        int setup = 0;
        int eim = 0;
@@ -564,6 +557,8 @@ static int __init intel_enable_irq_remapping(void)
        }
 
        if (x2apic_present) {
+               pr_info("Queued invalidation will be enabled to support x2apic and Intr-remapping.\n");
+
                eim = !dmar_x2apic_optout();
                if (!eim)
                        printk(KERN_WARNING
@@ -572,9 +567,7 @@ static int __init intel_enable_irq_remapping(void)
                                "Use 'intremap=no_x2apic_optout' to override BIOS request.\n");
        }
 
-       for_each_drhd_unit(drhd) {
-               struct intel_iommu *iommu = drhd->iommu;
-
+       for_each_iommu(iommu, drhd) {
                /*
                 * If the queued invalidation is already initialized,
                 * shouldn't disable it.
@@ -599,9 +592,7 @@ static int __init intel_enable_irq_remapping(void)
        /*
         * check for the Interrupt-remapping support
         */
-       for_each_drhd_unit(drhd) {
-               struct intel_iommu *iommu = drhd->iommu;
-
+       for_each_iommu(iommu, drhd) {
                if (!ecap_ir_support(iommu->ecap))
                        continue;
 
@@ -615,10 +606,8 @@ static int __init intel_enable_irq_remapping(void)
        /*
         * Enable queued invalidation for all the DRHD's.
         */
-       for_each_drhd_unit(drhd) {
-               int ret;
-               struct intel_iommu *iommu = drhd->iommu;
-               ret = dmar_enable_qi(iommu);
+       for_each_iommu(iommu, drhd) {
+               int ret = dmar_enable_qi(iommu);
 
                if (ret) {
                        printk(KERN_ERR "DRHD %Lx: failed to enable queued, "
@@ -631,9 +620,7 @@ static int __init intel_enable_irq_remapping(void)
        /*
         * Setup Interrupt-remapping for all the DRHD's now.
         */
-       for_each_drhd_unit(drhd) {
-               struct intel_iommu *iommu = drhd->iommu;
-
+       for_each_iommu(iommu, drhd) {
                if (!ecap_ir_support(iommu->ecap))
                        continue;
 
@@ -774,22 +761,20 @@ static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header,
  * Finds the assocaition between IOAPIC's and its Interrupt-remapping
  * hardware unit.
  */
-int __init parse_ioapics_under_ir(void)
+static int __init parse_ioapics_under_ir(void)
 {
        struct dmar_drhd_unit *drhd;
+       struct intel_iommu *iommu;
        int ir_supported = 0;
        int ioapic_idx;
 
-       for_each_drhd_unit(drhd) {
-               struct intel_iommu *iommu = drhd->iommu;
-
+       for_each_iommu(iommu, drhd)
                if (ecap_ir_support(iommu->ecap)) {
                        if (ir_parse_ioapic_hpet_scope(drhd->hdr, iommu))
                                return -1;
 
                        ir_supported = 1;
                }
-       }
 
        if (!ir_supported)
                return 0;
@@ -807,7 +792,7 @@ int __init parse_ioapics_under_ir(void)
        return 1;
 }
 
-int __init ir_dev_scope_init(void)
+static int __init ir_dev_scope_init(void)
 {
        if (!irq_remapping_enabled)
                return 0;
index 39f81aeefcd698f6389f0cf12799491be8e2867d..228632c99adbae9080f9abea301c1c7335121f48 100644 (file)
@@ -150,7 +150,7 @@ static int irq_remapping_setup_msi_irqs(struct pci_dev *dev,
                return do_setup_msix_irqs(dev, nvec);
 }
 
-void eoi_ioapic_pin_remapped(int apic, int pin, int vector)
+static void eoi_ioapic_pin_remapped(int apic, int pin, int vector)
 {
        /*
         * Intr-remapping uses pin number as the virtual vector
@@ -295,8 +295,8 @@ int setup_ioapic_remapped_entry(int irq,
                                             vector, attr);
 }
 
-int set_remapped_irq_affinity(struct irq_data *data, const struct cpumask *mask,
-                             bool force)
+static int set_remapped_irq_affinity(struct irq_data *data,
+                                    const struct cpumask *mask, bool force)
 {
        if (!config_enabled(CONFIG_SMP) || !remap_ops ||
            !remap_ops->set_affinity)
index ee249bc959f84bb2bc50eb652ea1b34d449db77a..e550ccb7634e91d312cd9377ff499499d37e38ed 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/export.h>
 #include <linux/limits.h>
 #include <linux/of.h>
+#include <linux/of_iommu.h>
 
 /**
  * of_get_dma_window - Parse *dma-window property and returns 0 if found.
index d572863dfccd47d767aed86cfe9a04e155c0233a..7a3b928fad1c81eadb788d45a281bacaca3f8949 100644 (file)
@@ -380,14 +380,13 @@ int ipmmu_iommu_init(struct shmobile_ipmmu *ipmmu)
                kmem_cache_destroy(l1cache);
                return -ENOMEM;
        }
-       archdata = kmalloc(sizeof(*archdata), GFP_KERNEL);
+       archdata = kzalloc(sizeof(*archdata), GFP_KERNEL);
        if (!archdata) {
                kmem_cache_destroy(l1cache);
                kmem_cache_destroy(l2cache);
                return -ENOMEM;
        }
        spin_lock_init(&archdata->attach_lock);
-       archdata->attached = NULL;
        archdata->ipmmu = ipmmu;
        ipmmu_archdata = archdata;
        bus_set_iommu(&platform_bus_type, &shmobile_iommu_ops);
index 8321f89596c493909ac36c3ce24a7b2bc245884e..e3bc2e19b6dd8f2cd7bd91b30120ecbba477abe3 100644 (file)
@@ -35,12 +35,12 @@ void ipmmu_tlb_flush(struct shmobile_ipmmu *ipmmu)
        if (!ipmmu)
                return;
 
-       mutex_lock(&ipmmu->flush_lock);
+       spin_lock(&ipmmu->flush_lock);
        if (ipmmu->tlb_enabled)
                ipmmu_reg_write(ipmmu, IMCTR1, IMCTR1_FLUSH | IMCTR1_TLBEN);
        else
                ipmmu_reg_write(ipmmu, IMCTR1, IMCTR1_FLUSH);
-       mutex_unlock(&ipmmu->flush_lock);
+       spin_unlock(&ipmmu->flush_lock);
 }
 
 void ipmmu_tlb_set(struct shmobile_ipmmu *ipmmu, unsigned long phys, int size,
@@ -49,7 +49,7 @@ void ipmmu_tlb_set(struct shmobile_ipmmu *ipmmu, unsigned long phys, int size,
        if (!ipmmu)
                return;
 
-       mutex_lock(&ipmmu->flush_lock);
+       spin_lock(&ipmmu->flush_lock);
        switch (size) {
        default:
                ipmmu->tlb_enabled = 0;
@@ -85,7 +85,7 @@ void ipmmu_tlb_set(struct shmobile_ipmmu *ipmmu, unsigned long phys, int size,
        }
        ipmmu_reg_write(ipmmu, IMTTBR, phys);
        ipmmu_reg_write(ipmmu, IMASID, asid);
-       mutex_unlock(&ipmmu->flush_lock);
+       spin_unlock(&ipmmu->flush_lock);
 }
 
 static int ipmmu_probe(struct platform_device *pdev)
@@ -104,7 +104,7 @@ static int ipmmu_probe(struct platform_device *pdev)
                dev_err(&pdev->dev, "cannot allocate device data\n");
                return -ENOMEM;
        }
-       mutex_init(&ipmmu->flush_lock);
+       spin_lock_init(&ipmmu->flush_lock);
        ipmmu->dev = &pdev->dev;
        ipmmu->ipmmu_base = devm_ioremap_nocache(&pdev->dev, res->start,
                                                resource_size(res));
index 4d53684673e1ff1649c0bc1f44a0d819975d9cc6..9524743ca1fba781a695b4a15c950a12fcd87b18 100644 (file)
@@ -14,7 +14,7 @@ struct shmobile_ipmmu {
        struct device *dev;
        void __iomem *ipmmu_base;
        int tlb_enabled;
-       struct mutex flush_lock;
+       spinlock_t flush_lock;
        const char * const *dev_names;
        unsigned int num_dev_names;
 };
index 3c972b2f989335e4df54aa8436cbd99a7f19ba09..e387f41a9cb7667f3542f510b4a954d299e1d316 100644 (file)
@@ -242,18 +242,14 @@ EXPORT_SYMBOL_GPL(led_trigger_unregister);
 void led_trigger_event(struct led_trigger *trig,
                        enum led_brightness brightness)
 {
-       struct list_head *entry;
+       struct led_classdev *led_cdev;
 
        if (!trig)
                return;
 
        read_lock(&trig->leddev_list_lock);
-       list_for_each(entry, &trig->led_cdevs) {
-               struct led_classdev *led_cdev;
-
-               led_cdev = list_entry(entry, struct led_classdev, trig_list);
+       list_for_each_entry(led_cdev, &trig->led_cdevs, trig_list)
                led_set_brightness(led_cdev, brightness);
-       }
        read_unlock(&trig->leddev_list_lock);
 }
 EXPORT_SYMBOL_GPL(led_trigger_event);
@@ -264,16 +260,13 @@ static void led_trigger_blink_setup(struct led_trigger *trig,
                             int oneshot,
                             int invert)
 {
-       struct list_head *entry;
+       struct led_classdev *led_cdev;
 
        if (!trig)
                return;
 
        read_lock(&trig->leddev_list_lock);
-       list_for_each(entry, &trig->led_cdevs) {
-               struct led_classdev *led_cdev;
-
-               led_cdev = list_entry(entry, struct led_classdev, trig_list);
+       list_for_each_entry(led_cdev, &trig->led_cdevs, trig_list) {
                if (oneshot)
                        led_blink_set_oneshot(led_cdev, delay_on, delay_off,
                                              invert);
index a97263e902ffc6b927db6b935cb290902909f21e..2ec34cfcedcee613aebd97bae8c06966b826f404 100644 (file)
@@ -152,12 +152,26 @@ static void lp5521_load_engine(struct lp55xx_chip *chip)
        lp5521_wait_opmode_done();
 }
 
-static void lp5521_stop_engine(struct lp55xx_chip *chip)
+static void lp5521_stop_all_engines(struct lp55xx_chip *chip)
 {
        lp55xx_write(chip, LP5521_REG_OP_MODE, 0);
        lp5521_wait_opmode_done();
 }
 
+static void lp5521_stop_engine(struct lp55xx_chip *chip)
+{
+       enum lp55xx_engine_index idx = chip->engine_idx;
+       u8 mask[] = {
+               [LP55XX_ENGINE_1] = LP5521_MODE_R_M,
+               [LP55XX_ENGINE_2] = LP5521_MODE_G_M,
+               [LP55XX_ENGINE_3] = LP5521_MODE_B_M,
+       };
+
+       lp55xx_update_bits(chip, LP5521_REG_OP_MODE, mask[idx], 0);
+
+       lp5521_wait_opmode_done();
+}
+
 static void lp5521_run_engine(struct lp55xx_chip *chip, bool start)
 {
        int ret;
@@ -564,7 +578,7 @@ static int lp5521_remove(struct i2c_client *client)
        struct lp55xx_led *led = i2c_get_clientdata(client);
        struct lp55xx_chip *chip = led->chip;
 
-       lp5521_stop_engine(chip);
+       lp5521_stop_all_engines(chip);
        lp55xx_unregister_sysfs(chip);
        lp55xx_unregister_leds(led, chip);
        lp55xx_deinit_device(chip);
index 3a0bc886a87a2c88782c9b075f0eba935271ebc2..4ade66a2d9d4758b314c45ed52a222ff0cb0aeca 100644 (file)
@@ -195,12 +195,26 @@ static void lp5523_load_engine_and_select_page(struct lp55xx_chip *chip)
        lp55xx_write(chip, LP5523_REG_PROG_PAGE_SEL, page_sel[idx]);
 }
 
-static void lp5523_stop_engine(struct lp55xx_chip *chip)
+static void lp5523_stop_all_engines(struct lp55xx_chip *chip)
 {
        lp55xx_write(chip, LP5523_REG_OP_MODE, 0);
        lp5523_wait_opmode_done();
 }
 
+static void lp5523_stop_engine(struct lp55xx_chip *chip)
+{
+       enum lp55xx_engine_index idx = chip->engine_idx;
+       u8 mask[] = {
+               [LP55XX_ENGINE_1] = LP5523_MODE_ENG1_M,
+               [LP55XX_ENGINE_2] = LP5523_MODE_ENG2_M,
+               [LP55XX_ENGINE_3] = LP5523_MODE_ENG3_M,
+       };
+
+       lp55xx_update_bits(chip, LP5523_REG_OP_MODE, mask[idx], 0);
+
+       lp5523_wait_opmode_done();
+}
+
 static void lp5523_turn_off_channels(struct lp55xx_chip *chip)
 {
        int i;
@@ -311,7 +325,7 @@ static int lp5523_init_program_engine(struct lp55xx_chip *chip)
        }
 
 out:
-       lp5523_stop_engine(chip);
+       lp5523_stop_all_engines(chip);
        return ret;
 }
 
@@ -782,7 +796,7 @@ static int lp5523_remove(struct i2c_client *client)
        struct lp55xx_led *led = i2c_get_clientdata(client);
        struct lp55xx_chip *chip = led->chip;
 
-       lp5523_stop_engine(chip);
+       lp5523_stop_all_engines(chip);
        lp55xx_unregister_sysfs(chip);
        lp55xx_unregister_leds(led, chip);
        lp55xx_deinit_device(chip);
index 9acc6bb7deef01f153e5c71044ac93be7e983ba9..88317b4f7bf3abf70e4337a12aa682646e8a2e56 100644 (file)
@@ -210,6 +210,7 @@ static void lp55xx_firmware_loaded(const struct firmware *fw, void *context)
 {
        struct lp55xx_chip *chip = context;
        struct device *dev = &chip->cl->dev;
+       enum lp55xx_engine_index idx = chip->engine_idx;
 
        if (!fw) {
                dev_err(dev, "firmware request failed\n");
@@ -219,6 +220,7 @@ static void lp55xx_firmware_loaded(const struct firmware *fw, void *context)
        /* handling firmware data is chip dependent */
        mutex_lock(&chip->lock);
 
+       chip->engines[idx - 1].mode = LP55XX_ENGINE_LOAD;
        chip->fw = fw;
        if (chip->cfg->firmware_cb)
                chip->cfg->firmware_cb(chip);
index fa9b439323bd06d0d126a48a7828e4b47665c6b5..ca87a1b4a0db228896ca19dc410885eaa53eb7f3 100644 (file)
@@ -117,9 +117,7 @@ static void mc13xxx_led_work(struct work_struct *work)
                BUG();
        }
 
-       mc13xxx_lock(led->master);
        mc13xxx_reg_rmw(led->master, reg, mask << shift, value << shift);
-       mc13xxx_unlock(led->master);
 }
 
 static void mc13xxx_led_set(struct led_classdev *led_cdev,
@@ -132,75 +130,6 @@ static void mc13xxx_led_set(struct led_classdev *led_cdev,
        schedule_work(&led->work);
 }
 
-static int __init mc13xxx_led_setup(struct mc13xxx_led *led, int max_current)
-{
-       int shift, mask, reg, ret, bank;
-
-       switch (led->id) {
-       case MC13783_LED_MD:
-               reg = MC13XXX_REG_LED_CONTROL(2);
-               shift = 0;
-               mask = 0x07;
-               break;
-       case MC13783_LED_AD:
-               reg = MC13XXX_REG_LED_CONTROL(2);
-               shift = 3;
-               mask = 0x07;
-               break;
-       case MC13783_LED_KP:
-               reg = MC13XXX_REG_LED_CONTROL(2);
-               shift = 6;
-               mask = 0x07;
-               break;
-       case MC13783_LED_R1:
-       case MC13783_LED_G1:
-       case MC13783_LED_B1:
-       case MC13783_LED_R2:
-       case MC13783_LED_G2:
-       case MC13783_LED_B2:
-       case MC13783_LED_R3:
-       case MC13783_LED_G3:
-       case MC13783_LED_B3:
-               bank = (led->id - MC13783_LED_R1) / 3;
-               reg = MC13XXX_REG_LED_CONTROL(3) + bank;
-               shift = ((led->id - MC13783_LED_R1) - bank * 3) * 2;
-               mask = 0x03;
-               break;
-       case MC13892_LED_MD:
-               reg = MC13XXX_REG_LED_CONTROL(0);
-               shift = 9;
-               mask = 0x07;
-               break;
-       case MC13892_LED_AD:
-               reg = MC13XXX_REG_LED_CONTROL(0);
-               shift = 21;
-               mask = 0x07;
-               break;
-       case MC13892_LED_KP:
-               reg = MC13XXX_REG_LED_CONTROL(1);
-               shift = 9;
-               mask = 0x07;
-               break;
-       case MC13892_LED_R:
-       case MC13892_LED_G:
-       case MC13892_LED_B:
-               bank = (led->id - MC13892_LED_R) / 2;
-               reg = MC13XXX_REG_LED_CONTROL(2) + bank;
-               shift = ((led->id - MC13892_LED_R) - bank * 2) * 12 + 9;
-               mask = 0x07;
-               break;
-       default:
-               BUG();
-       }
-
-       mc13xxx_lock(led->master);
-       ret = mc13xxx_reg_rmw(led->master, reg, mask << shift,
-                             max_current << shift);
-       mc13xxx_unlock(led->master);
-
-       return ret;
-}
-
 static int __init mc13xxx_led_probe(struct platform_device *pdev)
 {
        struct mc13xxx_leds_platform_data *pdata = dev_get_platdata(&pdev->dev);
@@ -233,31 +162,22 @@ static int __init mc13xxx_led_probe(struct platform_device *pdev)
        leds->num_leds = num_leds;
        platform_set_drvdata(pdev, leds);
 
-       mc13xxx_lock(mcdev);
        for (i = 0; i < devtype->num_regs; i++) {
                reg = pdata->led_control[i];
                WARN_ON(reg >= (1 << 24));
                ret = mc13xxx_reg_write(mcdev, MC13XXX_REG_LED_CONTROL(i), reg);
                if (ret)
-                       break;
-       }
-       mc13xxx_unlock(mcdev);
-
-       if (ret) {
-               dev_err(&pdev->dev, "Unable to init LED driver\n");
-               return ret;
+                       return ret;
        }
 
        for (i = 0; i < num_leds; i++) {
                const char *name, *trig;
-               char max_current;
 
                ret = -EINVAL;
 
                id = pdata->led[i].id;
                name = pdata->led[i].name;
                trig = pdata->led[i].default_trigger;
-               max_current = pdata->led[i].max_current;
 
                if ((id > devtype->led_max) || (id < devtype->led_min)) {
                        dev_err(&pdev->dev, "Invalid ID %i\n", id);
@@ -280,11 +200,6 @@ static int __init mc13xxx_led_probe(struct platform_device *pdev)
 
                INIT_WORK(&leds->led[i].work, mc13xxx_led_work);
 
-               ret = mc13xxx_led_setup(&leds->led[i], max_current);
-               if (ret) {
-                       dev_err(&pdev->dev, "Unable to setup LED %i\n", id);
-                       break;
-               }
                ret = led_classdev_register(pdev->dev.parent,
                                            &leds->led[i].cdev);
                if (ret) {
@@ -313,10 +228,8 @@ static int mc13xxx_led_remove(struct platform_device *pdev)
                cancel_work_sync(&leds->led[i].work);
        }
 
-       mc13xxx_lock(mcdev);
        for (i = 0; i < leds->devtype->num_regs; i++)
                mc13xxx_reg_write(mcdev, MC13XXX_REG_LED_CONTROL(i), 0);
-       mc13xxx_unlock(mcdev);
 
        return 0;
 }
index b31d8e99c41992c77610fe5ca1694211035ea592..605047428b5ad75e2d67add0edaecb912d35581a 100644 (file)
@@ -66,9 +66,11 @@ static void led_pwm_set(struct led_classdev *led_cdev,
        struct led_pwm_data *led_dat =
                container_of(led_cdev, struct led_pwm_data, cdev);
        unsigned int max = led_dat->cdev.max_brightness;
-       unsigned int period =  led_dat->period;
+       unsigned long long duty =  led_dat->period;
 
-       led_dat->duty = brightness * period / max;
+       duty *= brightness;
+       do_div(duty, max);
+       led_dat->duty = duty;
 
        if (led_dat->can_sleep)
                schedule_work(&led_dat->work);
@@ -85,11 +87,10 @@ static inline size_t sizeof_pwm_leds_priv(int num_leds)
 static int led_pwm_create_of(struct platform_device *pdev,
                             struct led_pwm_priv *priv)
 {
-       struct device_node *node = pdev->dev.of_node;
        struct device_node *child;
        int ret;
 
-       for_each_child_of_node(node, child) {
+       for_each_child_of_node(pdev->dev.of_node, child) {
                struct led_pwm_data *led_dat = &priv->leds[priv->num_leds];
 
                led_dat->cdev.name = of_get_property(child, "label",
index 87cf215af798c97076df857b0609292c1f752cfe..98174e7240ee9f41b8a4d0cddae494ebff99da65 100644 (file)
 #include <linux/gpio.h>
 #include <linux/slab.h>
 #include <linux/module.h>
+#include <linux/platform_data/leds-s3c24xx.h>
 
-#include <mach/hardware.h>
 #include <mach/regs-gpio.h>
 #include <plat/gpio-cfg.h>
-#include <linux/platform_data/leds-s3c24xx.h>
 
 /* our context */
 
index 8cc304f36728a4e217cbf6716f893a3e6f5aed0c..3d9e267a56c428580bfaa074ce08053bd86ae34a 100644 (file)
@@ -4,77 +4,87 @@
  * The TCA6507 is a programmable LED controller that can drive 7
  * separate lines either by holding them low, or by pulsing them
  * with modulated width.
- * The modulation can be varied in a simple pattern to produce a blink or
- * double-blink.
+ * The modulation can be varied in a simple pattern to produce a
+ * blink or double-blink.
  *
- * This driver can configure each line either as a 'GPIO' which is out-only
- * (no pull-up) or as an LED with variable brightness and hardware-assisted
- * blinking.
+ * This driver can configure each line either as a 'GPIO' which is
+ * out-only (pull-up resistor required) or as an LED with variable
+ * brightness and hardware-assisted blinking.
  *
- * Apart from OFF and ON there are three programmable brightness levels which
- * can be programmed from 0 to 15 and indicate how many 500usec intervals in
- * each 8msec that the led is 'on'.  The levels are named MASTER, BANK0 and
- * BANK1.
+ * Apart from OFF and ON there are three programmable brightness
+ * levels which can be programmed from 0 to 15 and indicate how many
+ * 500usec intervals in each 8msec that the led is 'on'.  The levels
+ * are named MASTER, BANK0 and BANK1.
  *
- * There are two different blink rates that can be programmed, each with
- * separate time for rise, on, fall, off and second-off.  Thus if 3 or more
- * different non-trivial rates are required, software must be used for the extra
- * rates. The two different blink rates must align with the two levels BANK0 and
- * BANK1.
- * This driver does not support double-blink so 'second-off' always matches
- * 'off'.
+ * There are two different blink rates that can be programmed, each
+ * with separate time for rise, on, fall, off and second-off.  Thus if
+ * 3 or more different non-trivial rates are required, software must
+ * be used for the extra rates. The two different blink rates must
+ * align with the two levels BANK0 and BANK1.  This driver does not
+ * support double-blink so 'second-off' always matches 'off'.
  *
- * Only 16 different times can be programmed in a roughly logarithmic scale from
- * 64ms to 16320ms.  To be precise the possible times are:
+ * Only 16 different times can be programmed in a roughly logarithmic
+ * scale from 64ms to 16320ms.  To be precise the possible times are:
  *    0, 64, 128, 192, 256, 384, 512, 768,
  *    1024, 1536, 2048, 3072, 4096, 5760, 8128, 16320
  *
- * Times that cannot be closely matched with these must be
- * handled in software.  This driver allows 12.5% error in matching.
+ * Times that cannot be closely matched with these must be handled in
+ * software.  This driver allows 12.5% error in matching.
  *
- * This driver does not allow rise/fall rates to be set explicitly.  When trying
- * to match a given 'on' or 'off' period, an appropriate pair of 'change' and
- * 'hold' times are chosen to get a close match.  If the target delay is even,
- * the 'change' number will be the smaller; if odd, the 'hold' number will be
- * the smaller.
-
- * Choosing pairs of delays with 12.5% errors allows us to match delays in the
- * ranges: 56-72, 112-144, 168-216, 224-27504, 28560-36720.
- * 26% of the achievable sums can be matched by multiple pairings. For example
- * 1536 == 1536+0, 1024+512, or 768+768.  This driver will always choose the
- * pairing with the least maximum - 768+768 in this case.  Other pairings are
- * not available.
+ * This driver does not allow rise/fall rates to be set explicitly.
+ * When trying to match a given 'on' or 'off' period, an appropriate
+ * pair of 'change' and 'hold' times are chosen to get a close match.
+ * If the target delay is even, the 'change' number will be the
+ * smaller; if odd, the 'hold' number will be the smaller.
+
+ * Choosing pairs of delays with 12.5% errors allows us to match
+ * delays in the ranges: 56-72, 112-144, 168-216, 224-27504,
+ * 28560-36720.
+ * 26% of the achievable sums can be matched by multiple pairings.
+ * For example 1536 == 1536+0, 1024+512, or 768+768.
+ * This driver will always choose the pairing with the least
+ * maximum - 768+768 in this case.  Other pairings are not available.
  *
- * Access to the 3 levels and 2 blinks are on a first-come, first-served basis.
- * Access can be shared by multiple leds if they have the same level and
- * either same blink rates, or some don't blink.
- * When a led changes, it relinquishes access and tries again, so it might
- * lose access to hardware blink.
- * If a blink engine cannot be allocated, software blink is used.
- * If the desired brightness cannot be allocated, the closest available non-zero
- * brightness is used.  As 'full' is always available, the worst case would be
- * to have two different blink rates at '1', with Max at '2', then other leds
- * will have to choose between '2' and '16'.  Hopefully this is not likely.
+ * Access to the 3 levels and 2 blinks are on a first-come,
+ * first-served basis.  Access can be shared by multiple leds if they
+ * have the same level and either same blink rates, or some don't
+ * blink.  When a led changes, it relinquishes access and tries again,
+ * so it might lose access to hardware blink.
  *
- * Each bank (BANK0 and BANK1) has two usage counts - LEDs using the brightness
- * and LEDs using the blink.  It can only be reprogrammed when the appropriate
- * counter is zero.  The MASTER level has a single usage count.
+ * If a blink engine cannot be allocated, software blink is used.  If
+ * the desired brightness cannot be allocated, the closest available
+ * non-zero brightness is used.  As 'full' is always available, the
+ * worst case would be to have two different blink rates at '1', with
+ * Max at '2', then other leds will have to choose between '2' and
+ * '16'.  Hopefully this is not likely.
  *
- * Each Led has programmable 'on' and 'off' time as milliseconds.  With each
- * there is a flag saying if it was explicitly requested or defaulted.
- * Similarly the banks know if each time was explicit or a default.  Defaults
- * are permitted to be changed freely - they are not recognised when matching.
+ * Each bank (BANK0 and BANK1) has two usage counts - LEDs using the
+ * brightness and LEDs using the blink.  It can only be reprogrammed
+ * when the appropriate counter is zero.  The MASTER level has a
+ * single usage count.
  *
+ * Each LED has programmable 'on' and 'off' time as milliseconds.
+ * With each there is a flag saying if it was explicitly requested or
+ * defaulted.  Similarly the banks know if each time was explicit or a
+ * default.  Defaults are permitted to be changed freely - they are
+ * not recognised when matching.
  *
- * An led-tca6507 device must be provided with platform data.  This data
- * lists for each output: the name, default trigger, and whether the signal
- * is being used as a GPiO rather than an led.  'struct led_plaform_data'
- * is used for this.  If 'name' is NULL, the output isn't used.  If 'flags'
- * is TCA6507_MAKE_CPIO, the output is a GPO.
- * The "struct led_platform_data" can be embedded in a
- * "struct tca6507_platform_data" which adds a 'gpio_base' for the GPiOs,
- * and a 'setup' callback which is called once the GPiOs are available.
  *
+ * An led-tca6507 device must be provided with platform data or
+ * configured via devicetree.
+ *
+ * The platform-data lists for each output: the name, default trigger,
+ * and whether the signal is being used as a GPIO rather than an LED.
+ * 'struct led_plaform_data' is used for this.  If 'name' is NULL, the
+ * output isn't used.  If 'flags' is TCA6507_MAKE_GPIO, the output is
+ * a GPO.  The "struct led_platform_data" can be embedded in a "struct
+ * tca6507_platform_data" which adds a 'gpio_base' for the GPIOs, and
+ * a 'setup' callback which is called once the GPIOs are available.
+ *
+ * When configured via devicetree there is one child for each output.
+ * The "reg" determines the output number and "compatible" determines
+ * whether it is an LED or a GPIO.  "linux,default-trigger" can set a
+ * default trigger.
  */
 
 #include <linux/module.h>
@@ -192,17 +202,18 @@ MODULE_DEVICE_TABLE(i2c, tca6507_id);
 static int choose_times(int msec, int *c1p, int *c2p)
 {
        /*
-        * Choose two timecodes which add to 'msec' as near as possible.
-        * The first returned is the 'on' or 'off' time.  The second is to be
-        * used as a 'fade-on' or 'fade-off' time.  If 'msec' is even,
-        * the first will not be smaller than the second.  If 'msec' is odd,
-        * the first will not be larger than the second.
-        * If we cannot get a sum within 1/8 of 'msec' fail with -EINVAL,
-        * otherwise return the sum that was achieved, plus 1 if the first is
-        * smaller.
-        * If two possibilities are equally good (e.g. 512+0, 256+256), choose
-        * the first pair so there is more change-time visible (i.e. it is
-        * softer).
+        * Choose two timecodes which add to 'msec' as near as
+        * possible.  The first returned is the 'on' or 'off' time.
+        * The second is to be used as a 'fade-on' or 'fade-off' time.
+        * If 'msec' is even, the first will not be smaller than the
+        * second.  If 'msec' is odd, the first will not be larger
+        * than the second.
+        * If we cannot get a sum within 1/8 of 'msec' fail with
+        * -EINVAL, otherwise return the sum that was achieved, plus 1
+        * if the first is smaller.
+        * If two possibilities are equally good (e.g. 512+0,
+        * 256+256), choose the first pair so there is more
+        * change-time visible (i.e. it is softer).
         */
        int c1, c2;
        int tmax = msec * 9 / 8;
@@ -255,8 +266,8 @@ static int choose_times(int msec, int *c1p, int *c2p)
 }
 
 /*
- * Update the register file with the appropriate 3-bit state for
- * the given led.
+ * Update the register file with the appropriate 3-bit state for the
+ * given led.
  */
 static void set_select(struct tca6507_chip *tca, int led, int val)
 {
@@ -274,9 +285,9 @@ static void set_select(struct tca6507_chip *tca, int led, int val)
        }
 }
 
-/* Update the register file with the appropriate 4-bit code for
- * one bank or other.  This can be used for timers, for levels, or
- * for initialisation.
+/* Update the register file with the appropriate 4-bit code for one
+ * bank or other.  This can be used for timers, for levels, or for
+ * initialization.
  */
 static void set_code(struct tca6507_chip *tca, int reg, int bank, int new)
 {
@@ -309,7 +320,7 @@ static void set_level(struct tca6507_chip *tca, int bank, int level)
        tca->bank[bank].level = level;
 }
 
-/* Record all relevant time code for a given bank */
+/* Record all relevant time codes for a given bank */
 static void set_times(struct tca6507_chip *tca, int bank)
 {
        int c1, c2;
@@ -317,7 +328,8 @@ static void set_times(struct tca6507_chip *tca, int bank)
 
        result = choose_times(tca->bank[bank].ontime, &c1, &c2);
        dev_dbg(&tca->client->dev,
-               "Chose on  times %d(%d) %d(%d) for %dms\n", c1, time_codes[c1],
+               "Chose on  times %d(%d) %d(%d) for %dms\n",
+               c1, time_codes[c1],
                c2, time_codes[c2], tca->bank[bank].ontime);
        set_code(tca, TCA6507_FADE_ON, bank, c2);
        set_code(tca, TCA6507_FULL_ON, bank, c1);
@@ -325,7 +337,8 @@ static void set_times(struct tca6507_chip *tca, int bank)
 
        result = choose_times(tca->bank[bank].offtime, &c1, &c2);
        dev_dbg(&tca->client->dev,
-               "Chose off times %d(%d) %d(%d) for %dms\n", c1, time_codes[c1],
+               "Chose off times %d(%d) %d(%d) for %dms\n",
+               c1, time_codes[c1],
                c2, time_codes[c2], tca->bank[bank].offtime);
        set_code(tca, TCA6507_FADE_OFF, bank, c2);
        set_code(tca, TCA6507_FIRST_OFF, bank, c1);
@@ -373,7 +386,8 @@ static void led_release(struct tca6507_led *led)
 
 static int led_prepare(struct tca6507_led *led)
 {
-       /* Assign this led to a bank, configuring that bank if necessary. */
+       /* Assign this led to a bank, configuring that bank if
+        * necessary. */
        int level = TO_LEVEL(led->led_cdev.brightness);
        struct tca6507_chip *tca = led->chip;
        int c1, c2;
@@ -389,10 +403,10 @@ static int led_prepare(struct tca6507_led *led)
 
        if (led->ontime == 0 || led->offtime == 0) {
                /*
-                * Just set the brightness, choosing first usable bank.
-                * If none perfect, choose best.
-                * Count backwards so we check MASTER bank first
-                * to avoid wasting a timer.
+                * Just set the brightness, choosing first usable
+                * bank.  If none perfect, choose best.  Count
+                * backwards so we check MASTER bank first to avoid
+                * wasting a timer.
                 */
                int best = -1;/* full-on */
                int diff = 15-level;
@@ -433,9 +447,9 @@ static int led_prepare(struct tca6507_led *led)
        }
 
        /*
-        * We have on/off time so we need to try to allocate a timing bank.
-        * First check if times are compatible with hardware and give up if
-        * not.
+        * We have on/off time so we need to try to allocate a timing
+        * bank.  First check if times are compatible with hardware
+        * and give up if not.
         */
        if (choose_times(led->ontime, &c1, &c2) < 0)
                return -EINVAL;
@@ -523,8 +537,8 @@ static int led_assign(struct tca6507_led *led)
        err = led_prepare(led);
        if (err) {
                /*
-                * Can only fail on timer setup.  In that case we need to
-                * re-establish as steady level.
+                * Can only fail on timer setup.  In that case we need
+                * to re-establish as steady level.
                 */
                led->ontime = 0;
                led->offtime = 0;
@@ -594,8 +608,8 @@ static void tca6507_gpio_set_value(struct gpio_chip *gc,
 
        spin_lock_irqsave(&tca->lock, flags);
        /*
-        * 'OFF' is floating high, and 'ON' is pulled down, so it has the
-        * inverse sense of 'val'.
+        * 'OFF' is floating high, and 'ON' is pulled down, so it has
+        * the inverse sense of 'val'.
         */
        set_select(tca, tca->gpio_map[offset],
                   val ? TCA6507_LS_LED_OFF : TCA6507_LS_LED_ON);
@@ -638,6 +652,9 @@ static int tca6507_probe_gpios(struct i2c_client *client,
        tca->gpio.direction_output = tca6507_gpio_direction_output;
        tca->gpio.set = tca6507_gpio_set_value;
        tca->gpio.dev = &client->dev;
+#ifdef CONFIG_OF_GPIO
+       tca->gpio.of_node = of_node_get(client->dev.of_node);
+#endif
        err = gpiochip_add(&tca->gpio);
        if (err) {
                tca->gpio.ngpio = 0;
@@ -682,7 +699,7 @@ tca6507_led_dt_init(struct i2c_client *client)
                return ERR_PTR(-ENODEV);
 
        tca_leds = devm_kzalloc(&client->dev,
-                       sizeof(struct led_info) * count, GFP_KERNEL);
+                       sizeof(struct led_info) * NUM_LEDS, GFP_KERNEL);
        if (!tca_leds)
                return ERR_PTR(-ENOMEM);
 
@@ -695,9 +712,11 @@ tca6507_led_dt_init(struct i2c_client *client)
                        of_get_property(child, "label", NULL) ? : child->name;
                led.default_trigger =
                        of_get_property(child, "linux,default-trigger", NULL);
-
+               led.flags = 0;
+               if (of_property_match_string(child, "compatible", "gpio") >= 0)
+                       led.flags |= TCA6507_MAKE_GPIO;
                ret = of_property_read_u32(child, "reg", &reg);
-               if (ret != 0)
+               if (ret != 0 || reg < 0 || reg >= NUM_LEDS)
                        continue;
 
                tca_leds[reg] = led;
@@ -708,8 +727,10 @@ tca6507_led_dt_init(struct i2c_client *client)
                return ERR_PTR(-ENOMEM);
 
        pdata->leds.leds = tca_leds;
-       pdata->leds.num_leds = count;
-
+       pdata->leds.num_leds = NUM_LEDS;
+#ifdef CONFIG_GPIOLIB
+       pdata->gpio_base = -1;
+#endif
        return pdata;
 }
 
index 0e9c82523be678eb1604d18a33aa196b724b0ce7..c488b846f831b9605789779310c90380fa3eb039 100644 (file)
@@ -1,7 +1,8 @@
 
 obj-$(CONFIG_BCACHE)   += bcache.o
 
-bcache-y               := alloc.o btree.o bset.o io.o journal.o writeback.o\
-       movinggc.o request.o super.o sysfs.o debug.o util.o trace.o stats.o closure.o
+bcache-y               := alloc.o bset.o btree.o closure.o debug.o extents.o\
+       io.o journal.o movinggc.o request.o stats.o super.o sysfs.o trace.o\
+       util.o writeback.o
 
 CFLAGS_request.o       += -Iblock
index 4c9852d92b0a909d5b103510ae77d55f4bb05ad0..c0d37d0824439f2daff4c049ab82dc6b3c6e646e 100644 (file)
@@ -132,10 +132,16 @@ bool bch_bucket_add_unused(struct cache *ca, struct bucket *b)
 {
        BUG_ON(GC_MARK(b) || GC_SECTORS_USED(b));
 
-       if (fifo_used(&ca->free) > ca->watermark[WATERMARK_MOVINGGC] &&
-           CACHE_REPLACEMENT(&ca->sb) == CACHE_REPLACEMENT_FIFO)
-               return false;
+       if (CACHE_REPLACEMENT(&ca->sb) == CACHE_REPLACEMENT_FIFO) {
+               unsigned i;
+
+               for (i = 0; i < RESERVE_NONE; i++)
+                       if (!fifo_full(&ca->free[i]))
+                               goto add;
 
+               return false;
+       }
+add:
        b->prio = 0;
 
        if (can_inc_bucket_gen(b) &&
@@ -162,8 +168,21 @@ static void invalidate_one_bucket(struct cache *ca, struct bucket *b)
        fifo_push(&ca->free_inc, b - ca->buckets);
 }
 
-#define bucket_prio(b)                         \
-       (((unsigned) (b->prio - ca->set->min_prio)) * GC_SECTORS_USED(b))
+/*
+ * Determines what order we're going to reuse buckets, smallest bucket_prio()
+ * first: we also take into account the number of sectors of live data in that
+ * bucket, and in order for that multiply to make sense we have to scale bucket
+ *
+ * Thus, we scale the bucket priorities so that the bucket with the smallest
+ * prio is worth 1/8th of what INITIAL_PRIO is worth.
+ */
+
+#define bucket_prio(b)                                                 \
+({                                                                     \
+       unsigned min_prio = (INITIAL_PRIO - ca->set->min_prio) / 8;     \
+                                                                       \
+       (b->prio - ca->set->min_prio + min_prio) * GC_SECTORS_USED(b);  \
+})
 
 #define bucket_max_cmp(l, r)   (bucket_prio(l) < bucket_prio(r))
 #define bucket_min_cmp(l, r)   (bucket_prio(l) > bucket_prio(r))
@@ -304,6 +323,21 @@ do {                                                                       \
        __set_current_state(TASK_RUNNING);                              \
 } while (0)
 
+static int bch_allocator_push(struct cache *ca, long bucket)
+{
+       unsigned i;
+
+       /* Prios/gens are actually the most important reserve */
+       if (fifo_push(&ca->free[RESERVE_PRIO], bucket))
+               return true;
+
+       for (i = 0; i < RESERVE_NR; i++)
+               if (fifo_push(&ca->free[i], bucket))
+                       return true;
+
+       return false;
+}
+
 static int bch_allocator_thread(void *arg)
 {
        struct cache *ca = arg;
@@ -336,9 +370,7 @@ static int bch_allocator_thread(void *arg)
                                mutex_lock(&ca->set->bucket_lock);
                        }
 
-                       allocator_wait(ca, !fifo_full(&ca->free));
-
-                       fifo_push(&ca->free, bucket);
+                       allocator_wait(ca, bch_allocator_push(ca, bucket));
                        wake_up(&ca->set->bucket_wait);
                }
 
@@ -365,34 +397,29 @@ static int bch_allocator_thread(void *arg)
        }
 }
 
-long bch_bucket_alloc(struct cache *ca, unsigned watermark, bool wait)
+long bch_bucket_alloc(struct cache *ca, unsigned reserve, bool wait)
 {
        DEFINE_WAIT(w);
        struct bucket *b;
        long r;
 
        /* fastpath */
-       if (fifo_used(&ca->free) > ca->watermark[watermark]) {
-               fifo_pop(&ca->free, r);
+       if (fifo_pop(&ca->free[RESERVE_NONE], r) ||
+           fifo_pop(&ca->free[reserve], r))
                goto out;
-       }
 
        if (!wait)
                return -1;
 
-       while (1) {
-               if (fifo_used(&ca->free) > ca->watermark[watermark]) {
-                       fifo_pop(&ca->free, r);
-                       break;
-               }
-
+       do {
                prepare_to_wait(&ca->set->bucket_wait, &w,
                                TASK_UNINTERRUPTIBLE);
 
                mutex_unlock(&ca->set->bucket_lock);
                schedule();
                mutex_lock(&ca->set->bucket_lock);
-       }
+       } while (!fifo_pop(&ca->free[RESERVE_NONE], r) &&
+                !fifo_pop(&ca->free[reserve], r));
 
        finish_wait(&ca->set->bucket_wait, &w);
 out:
@@ -401,12 +428,14 @@ out:
        if (expensive_debug_checks(ca->set)) {
                size_t iter;
                long i;
+               unsigned j;
 
                for (iter = 0; iter < prio_buckets(ca) * 2; iter++)
                        BUG_ON(ca->prio_buckets[iter] == (uint64_t) r);
 
-               fifo_for_each(i, &ca->free, iter)
-                       BUG_ON(i == r);
+               for (j = 0; j < RESERVE_NR; j++)
+                       fifo_for_each(i, &ca->free[j], iter)
+                               BUG_ON(i == r);
                fifo_for_each(i, &ca->free_inc, iter)
                        BUG_ON(i == r);
                fifo_for_each(i, &ca->unused, iter)
@@ -419,7 +448,7 @@ out:
 
        SET_GC_SECTORS_USED(b, ca->sb.bucket_size);
 
-       if (watermark <= WATERMARK_METADATA) {
+       if (reserve <= RESERVE_PRIO) {
                SET_GC_MARK(b, GC_MARK_METADATA);
                SET_GC_MOVE(b, 0);
                b->prio = BTREE_PRIO;
@@ -445,7 +474,7 @@ void bch_bucket_free(struct cache_set *c, struct bkey *k)
        }
 }
 
-int __bch_bucket_alloc_set(struct cache_set *c, unsigned watermark,
+int __bch_bucket_alloc_set(struct cache_set *c, unsigned reserve,
                           struct bkey *k, int n, bool wait)
 {
        int i;
@@ -459,7 +488,7 @@ int __bch_bucket_alloc_set(struct cache_set *c, unsigned watermark,
 
        for (i = 0; i < n; i++) {
                struct cache *ca = c->cache_by_alloc[i];
-               long b = bch_bucket_alloc(ca, watermark, wait);
+               long b = bch_bucket_alloc(ca, reserve, wait);
 
                if (b == -1)
                        goto err;
@@ -478,12 +507,12 @@ err:
        return -1;
 }
 
-int bch_bucket_alloc_set(struct cache_set *c, unsigned watermark,
+int bch_bucket_alloc_set(struct cache_set *c, unsigned reserve,
                         struct bkey *k, int n, bool wait)
 {
        int ret;
        mutex_lock(&c->bucket_lock);
-       ret = __bch_bucket_alloc_set(c, watermark, k, n, wait);
+       ret = __bch_bucket_alloc_set(c, reserve, k, n, wait);
        mutex_unlock(&c->bucket_lock);
        return ret;
 }
@@ -573,8 +602,8 @@ bool bch_alloc_sectors(struct cache_set *c, struct bkey *k, unsigned sectors,
 
        while (!(b = pick_data_bucket(c, k, write_point, &alloc.key))) {
                unsigned watermark = write_prio
-                       ? WATERMARK_MOVINGGC
-                       : WATERMARK_NONE;
+                       ? RESERVE_MOVINGGC
+                       : RESERVE_NONE;
 
                spin_unlock(&c->data_bucket_lock);
 
@@ -689,7 +718,7 @@ int bch_cache_allocator_init(struct cache *ca)
         * Then 8 for btree allocations
         * Then half for the moving garbage collector
         */
-
+#if 0
        ca->watermark[WATERMARK_PRIO] = 0;
 
        ca->watermark[WATERMARK_METADATA] = prio_buckets(ca);
@@ -699,6 +728,6 @@ int bch_cache_allocator_init(struct cache *ca)
 
        ca->watermark[WATERMARK_NONE] = ca->free.size / 2 +
                ca->watermark[WATERMARK_MOVINGGC];
-
+#endif
        return 0;
 }
index 754f4317748322e7450d69da9591b6b72aff6dc4..0c707e4f4eafc32adcdb3fec5539b2dbe0998a7a 100644 (file)
 #include <linux/types.h>
 #include <linux/workqueue.h>
 
+#include "bset.h"
 #include "util.h"
 #include "closure.h"
 
@@ -280,7 +281,6 @@ struct bcache_device {
        unsigned long           sectors_dirty_last;
        long                    sectors_dirty_derivative;
 
-       mempool_t               *unaligned_bvec;
        struct bio_set          *bio_split;
 
        unsigned                data_csum:1;
@@ -310,7 +310,8 @@ struct cached_dev {
        struct cache_sb         sb;
        struct bio              sb_bio;
        struct bio_vec          sb_bv[1];
-       struct closure_with_waitlist sb_write;
+       struct closure          sb_write;
+       struct semaphore        sb_write_mutex;
 
        /* Refcount on the cache set. Always nonzero when we're caching. */
        atomic_t                count;
@@ -383,12 +384,12 @@ struct cached_dev {
        unsigned                writeback_rate_p_term_inverse;
 };
 
-enum alloc_watermarks {
-       WATERMARK_PRIO,
-       WATERMARK_METADATA,
-       WATERMARK_MOVINGGC,
-       WATERMARK_NONE,
-       WATERMARK_MAX
+enum alloc_reserve {
+       RESERVE_BTREE,
+       RESERVE_PRIO,
+       RESERVE_MOVINGGC,
+       RESERVE_NONE,
+       RESERVE_NR,
 };
 
 struct cache {
@@ -400,8 +401,6 @@ struct cache {
        struct kobject          kobj;
        struct block_device     *bdev;
 
-       unsigned                watermark[WATERMARK_MAX];
-
        struct task_struct      *alloc_thread;
 
        struct closure          prio;
@@ -430,7 +429,7 @@ struct cache {
         * because all the data they contained was overwritten), so we only
         * need to discard them before they can be moved to the free list.
         */
-       DECLARE_FIFO(long, free);
+       DECLARE_FIFO(long, free)[RESERVE_NR];
        DECLARE_FIFO(long, free_inc);
        DECLARE_FIFO(long, unused);
 
@@ -515,7 +514,8 @@ struct cache_set {
        uint64_t                cached_dev_sectors;
        struct closure          caching;
 
-       struct closure_with_waitlist sb_write;
+       struct closure          sb_write;
+       struct semaphore        sb_write_mutex;
 
        mempool_t               *search;
        mempool_t               *bio_meta;
@@ -630,13 +630,15 @@ struct cache_set {
 
 #ifdef CONFIG_BCACHE_DEBUG
        struct btree            *verify_data;
+       struct bset             *verify_ondisk;
        struct mutex            verify_lock;
 #endif
 
        unsigned                nr_uuids;
        struct uuid_entry       *uuids;
        BKEY_PADDED(uuid_bucket);
-       struct closure_with_waitlist uuid_write;
+       struct closure          uuid_write;
+       struct semaphore        uuid_write_mutex;
 
        /*
         * A btree node on disk could have too many bsets for an iterator to fit
@@ -644,13 +646,7 @@ struct cache_set {
         */
        mempool_t               *fill_iter;
 
-       /*
-        * btree_sort() is a merge sort and requires temporary space - single
-        * element mempool
-        */
-       struct mutex            sort_lock;
-       struct bset             *sort;
-       unsigned                sort_crit_factor;
+       struct bset_sort_state  sort;
 
        /* List of buckets we're currently writing data to */
        struct list_head        data_buckets;
@@ -666,7 +662,6 @@ struct cache_set {
        unsigned                congested_read_threshold_us;
        unsigned                congested_write_threshold_us;
 
-       struct time_stats       sort_time;
        struct time_stats       btree_gc_time;
        struct time_stats       btree_split_time;
        struct time_stats       btree_read_time;
@@ -684,9 +679,9 @@ struct cache_set {
        unsigned                error_decay;
 
        unsigned short          journal_delay_ms;
+       bool                    expensive_debug_checks;
        unsigned                verify:1;
        unsigned                key_merging_disabled:1;
-       unsigned                expensive_debug_checks:1;
        unsigned                gc_always_rewrite:1;
        unsigned                shrinker_disabled:1;
        unsigned                copy_gc_enabled:1;
@@ -708,13 +703,8 @@ struct bbio {
        struct bio              bio;
 };
 
-static inline unsigned local_clock_us(void)
-{
-       return local_clock() >> 10;
-}
-
 #define BTREE_PRIO             USHRT_MAX
-#define INITIAL_PRIO           32768
+#define INITIAL_PRIO           32768U
 
 #define btree_bytes(c)         ((c)->btree_pages * PAGE_SIZE)
 #define btree_blocks(b)                                                        \
@@ -727,21 +717,6 @@ static inline unsigned local_clock_us(void)
 #define bucket_bytes(c)                ((c)->sb.bucket_size << 9)
 #define block_bytes(c)         ((c)->sb.block_size << 9)
 
-#define __set_bytes(i, k)      (sizeof(*(i)) + (k) * sizeof(uint64_t))
-#define set_bytes(i)           __set_bytes(i, i->keys)
-
-#define __set_blocks(i, k, c)  DIV_ROUND_UP(__set_bytes(i, k), block_bytes(c))
-#define set_blocks(i, c)       __set_blocks(i, (i)->keys, c)
-
-#define node(i, j)             ((struct bkey *) ((i)->d + (j)))
-#define end(i)                 node(i, (i)->keys)
-
-#define index(i, b)                                                    \
-       ((size_t) (((void *) i - (void *) (b)->sets[0].data) /          \
-                  block_bytes(b->c)))
-
-#define btree_data_space(b)    (PAGE_SIZE << (b)->page_order)
-
 #define prios_per_bucket(c)                            \
        ((bucket_bytes(c) - sizeof(struct prio_set)) /  \
         sizeof(struct bucket_disk))
@@ -784,20 +759,34 @@ static inline struct bucket *PTR_BUCKET(struct cache_set *c,
        return PTR_CACHE(c, k, ptr)->buckets + PTR_BUCKET_NR(c, k, ptr);
 }
 
-/* Btree key macros */
+static inline uint8_t gen_after(uint8_t a, uint8_t b)
+{
+       uint8_t r = a - b;
+       return r > 128U ? 0 : r;
+}
 
-static inline void bkey_init(struct bkey *k)
+static inline uint8_t ptr_stale(struct cache_set *c, const struct bkey *k,
+                               unsigned i)
 {
-       *k = ZERO_KEY;
+       return gen_after(PTR_BUCKET(c, k, i)->gen, PTR_GEN(k, i));
 }
 
+static inline bool ptr_available(struct cache_set *c, const struct bkey *k,
+                                unsigned i)
+{
+       return (PTR_DEV(k, i) < MAX_CACHES_PER_SET) && PTR_CACHE(c, k, i);
+}
+
+/* Btree key macros */
+
 /*
  * This is used for various on disk data structures - cache_sb, prio_set, bset,
  * jset: The checksum is _always_ the first 8 bytes of these structs
  */
 #define csum_set(i)                                                    \
        bch_crc64(((void *) (i)) + sizeof(uint64_t),                    \
-             ((void *) end(i)) - (((void *) (i)) + sizeof(uint64_t)))
+                 ((void *) bset_bkey_last(i)) -                        \
+                 (((void *) (i)) + sizeof(uint64_t)))
 
 /* Error handling macros */
 
@@ -902,7 +891,6 @@ void bch_bbio_endio(struct cache_set *, struct bio *, int, const char *);
 void bch_bbio_free(struct bio *, struct cache_set *);
 struct bio *bch_bbio_alloc(struct cache_set *);
 
-struct bio *bch_bio_split(struct bio *, int, gfp_t, struct bio_set *);
 void bch_generic_make_request(struct bio *, struct bio_split_pool *);
 void __bch_submit_bbio(struct bio *, struct cache_set *);
 void bch_submit_bbio(struct bio *, struct cache_set *, struct bkey *, unsigned);
index 7d388b8bb50e35cf154d7c082ea492a5de08235d..4f6b5940e609b4f53c248bf65762ef8f99c7258c 100644 (file)
  * Copyright 2012 Google, Inc.
  */
 
-#include "bcache.h"
-#include "btree.h"
-#include "debug.h"
+#define pr_fmt(fmt) "bcache: %s() " fmt "\n", __func__
 
+#include "util.h"
+#include "bset.h"
+
+#include <linux/console.h>
 #include <linux/random.h>
 #include <linux/prefetch.h>
 
+#ifdef CONFIG_BCACHE_DEBUG
+
+void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned set)
+{
+       struct bkey *k, *next;
+
+       for (k = i->start; k < bset_bkey_last(i); k = next) {
+               next = bkey_next(k);
+
+               printk(KERN_ERR "block %u key %zi/%u: ", set,
+                      (uint64_t *) k - i->d, i->keys);
+
+               if (b->ops->key_dump)
+                       b->ops->key_dump(b, k);
+               else
+                       printk("%llu:%llu\n", KEY_INODE(k), KEY_OFFSET(k));
+
+               if (next < bset_bkey_last(i) &&
+                   bkey_cmp(k, b->ops->is_extents ?
+                            &START_KEY(next) : next) > 0)
+                       printk(KERN_ERR "Key skipped backwards\n");
+       }
+}
+
+void bch_dump_bucket(struct btree_keys *b)
+{
+       unsigned i;
+
+       console_lock();
+       for (i = 0; i <= b->nsets; i++)
+               bch_dump_bset(b, b->set[i].data,
+                             bset_sector_offset(b, b->set[i].data));
+       console_unlock();
+}
+
+int __bch_count_data(struct btree_keys *b)
+{
+       unsigned ret = 0;
+       struct btree_iter iter;
+       struct bkey *k;
+
+       if (b->ops->is_extents)
+               for_each_key(b, k, &iter)
+                       ret += KEY_SIZE(k);
+       return ret;
+}
+
+void __bch_check_keys(struct btree_keys *b, const char *fmt, ...)
+{
+       va_list args;
+       struct bkey *k, *p = NULL;
+       struct btree_iter iter;
+       const char *err;
+
+       for_each_key(b, k, &iter) {
+               if (b->ops->is_extents) {
+                       err = "Keys out of order";
+                       if (p && bkey_cmp(&START_KEY(p), &START_KEY(k)) > 0)
+                               goto bug;
+
+                       if (bch_ptr_invalid(b, k))
+                               continue;
+
+                       err =  "Overlapping keys";
+                       if (p && bkey_cmp(p, &START_KEY(k)) > 0)
+                               goto bug;
+               } else {
+                       if (bch_ptr_bad(b, k))
+                               continue;
+
+                       err = "Duplicate keys";
+                       if (p && !bkey_cmp(p, k))
+                               goto bug;
+               }
+               p = k;
+       }
+#if 0
+       err = "Key larger than btree node key";
+       if (p && bkey_cmp(p, &b->key) > 0)
+               goto bug;
+#endif
+       return;
+bug:
+       bch_dump_bucket(b);
+
+       va_start(args, fmt);
+       vprintk(fmt, args);
+       va_end(args);
+
+       panic("bch_check_keys error:  %s:\n", err);
+}
+
+static void bch_btree_iter_next_check(struct btree_iter *iter)
+{
+       struct bkey *k = iter->data->k, *next = bkey_next(k);
+
+       if (next < iter->data->end &&
+           bkey_cmp(k, iter->b->ops->is_extents ?
+                    &START_KEY(next) : next) > 0) {
+               bch_dump_bucket(iter->b);
+               panic("Key skipped backwards\n");
+       }
+}
+
+#else
+
+static inline void bch_btree_iter_next_check(struct btree_iter *iter) {}
+
+#endif
+
 /* Keylists */
 
-int bch_keylist_realloc(struct keylist *l, int nptrs, struct cache_set *c)
+int __bch_keylist_realloc(struct keylist *l, unsigned u64s)
 {
        size_t oldsize = bch_keylist_nkeys(l);
-       size_t newsize = oldsize + 2 + nptrs;
+       size_t newsize = oldsize + u64s;
        uint64_t *old_keys = l->keys_p == l->inline_keys ? NULL : l->keys_p;
        uint64_t *new_keys;
 
-       /* The journalling code doesn't handle the case where the keys to insert
-        * is bigger than an empty write: If we just return -ENOMEM here,
-        * bio_insert() and bio_invalidate() will insert the keys created so far
-        * and finish the rest when the keylist is empty.
-        */
-       if (newsize * sizeof(uint64_t) > block_bytes(c) - sizeof(struct jset))
-               return -ENOMEM;
-
        newsize = roundup_pow_of_two(newsize);
 
        if (newsize <= KEYLIST_INLINE ||
@@ -71,136 +175,6 @@ void bch_keylist_pop_front(struct keylist *l)
                bch_keylist_bytes(l));
 }
 
-/* Pointer validation */
-
-static bool __ptr_invalid(struct cache_set *c, const struct bkey *k)
-{
-       unsigned i;
-
-       for (i = 0; i < KEY_PTRS(k); i++)
-               if (ptr_available(c, k, i)) {
-                       struct cache *ca = PTR_CACHE(c, k, i);
-                       size_t bucket = PTR_BUCKET_NR(c, k, i);
-                       size_t r = bucket_remainder(c, PTR_OFFSET(k, i));
-
-                       if (KEY_SIZE(k) + r > c->sb.bucket_size ||
-                           bucket <  ca->sb.first_bucket ||
-                           bucket >= ca->sb.nbuckets)
-                               return true;
-               }
-
-       return false;
-}
-
-bool bch_btree_ptr_invalid(struct cache_set *c, const struct bkey *k)
-{
-       char buf[80];
-
-       if (!KEY_PTRS(k) || !KEY_SIZE(k) || KEY_DIRTY(k))
-               goto bad;
-
-       if (__ptr_invalid(c, k))
-               goto bad;
-
-       return false;
-bad:
-       bch_bkey_to_text(buf, sizeof(buf), k);
-       cache_bug(c, "spotted btree ptr %s: %s", buf, bch_ptr_status(c, k));
-       return true;
-}
-
-bool bch_extent_ptr_invalid(struct cache_set *c, const struct bkey *k)
-{
-       char buf[80];
-
-       if (!KEY_SIZE(k))
-               return true;
-
-       if (KEY_SIZE(k) > KEY_OFFSET(k))
-               goto bad;
-
-       if (__ptr_invalid(c, k))
-               goto bad;
-
-       return false;
-bad:
-       bch_bkey_to_text(buf, sizeof(buf), k);
-       cache_bug(c, "spotted extent %s: %s", buf, bch_ptr_status(c, k));
-       return true;
-}
-
-static bool ptr_bad_expensive_checks(struct btree *b, const struct bkey *k,
-                                    unsigned ptr)
-{
-       struct bucket *g = PTR_BUCKET(b->c, k, ptr);
-       char buf[80];
-
-       if (mutex_trylock(&b->c->bucket_lock)) {
-               if (b->level) {
-                       if (KEY_DIRTY(k) ||
-                           g->prio != BTREE_PRIO ||
-                           (b->c->gc_mark_valid &&
-                            GC_MARK(g) != GC_MARK_METADATA))
-                               goto err;
-
-               } else {
-                       if (g->prio == BTREE_PRIO)
-                               goto err;
-
-                       if (KEY_DIRTY(k) &&
-                           b->c->gc_mark_valid &&
-                           GC_MARK(g) != GC_MARK_DIRTY)
-                               goto err;
-               }
-               mutex_unlock(&b->c->bucket_lock);
-       }
-
-       return false;
-err:
-       mutex_unlock(&b->c->bucket_lock);
-       bch_bkey_to_text(buf, sizeof(buf), k);
-       btree_bug(b,
-"inconsistent pointer %s: bucket %zu pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i",
-                 buf, PTR_BUCKET_NR(b->c, k, ptr), atomic_read(&g->pin),
-                 g->prio, g->gen, g->last_gc, GC_MARK(g), g->gc_gen);
-       return true;
-}
-
-bool bch_ptr_bad(struct btree *b, const struct bkey *k)
-{
-       struct bucket *g;
-       unsigned i, stale;
-
-       if (!bkey_cmp(k, &ZERO_KEY) ||
-           !KEY_PTRS(k) ||
-           bch_ptr_invalid(b, k))
-               return true;
-
-       for (i = 0; i < KEY_PTRS(k); i++) {
-               if (!ptr_available(b->c, k, i))
-                       return true;
-
-               g = PTR_BUCKET(b->c, k, i);
-               stale = ptr_stale(b->c, k, i);
-
-               btree_bug_on(stale > 96, b,
-                            "key too stale: %i, need_gc %u",
-                            stale, b->c->need_gc);
-
-               btree_bug_on(stale && KEY_DIRTY(k) && KEY_SIZE(k),
-                            b, "stale dirty pointer");
-
-               if (stale)
-                       return true;
-
-               if (expensive_debug_checks(b->c) &&
-                   ptr_bad_expensive_checks(b, k, i))
-                       return true;
-       }
-
-       return false;
-}
-
 /* Key/pointer manipulation */
 
 void bch_bkey_copy_single_ptr(struct bkey *dest, const struct bkey *src,
@@ -255,56 +229,138 @@ bool __bch_cut_back(const struct bkey *where, struct bkey *k)
        return true;
 }
 
-static uint64_t merge_chksums(struct bkey *l, struct bkey *r)
+/* Auxiliary search trees */
+
+/* 32 bits total: */
+#define BKEY_MID_BITS          3
+#define BKEY_EXPONENT_BITS     7
+#define BKEY_MANTISSA_BITS     (32 - BKEY_MID_BITS - BKEY_EXPONENT_BITS)
+#define BKEY_MANTISSA_MASK     ((1 << BKEY_MANTISSA_BITS) - 1)
+
+struct bkey_float {
+       unsigned        exponent:BKEY_EXPONENT_BITS;
+       unsigned        m:BKEY_MID_BITS;
+       unsigned        mantissa:BKEY_MANTISSA_BITS;
+} __packed;
+
+/*
+ * BSET_CACHELINE was originally intended to match the hardware cacheline size -
+ * it used to be 64, but I realized the lookup code would touch slightly less
+ * memory if it was 128.
+ *
+ * It definites the number of bytes (in struct bset) per struct bkey_float in
+ * the auxiliar search tree - when we're done searching the bset_float tree we
+ * have this many bytes left that we do a linear search over.
+ *
+ * Since (after level 5) every level of the bset_tree is on a new cacheline,
+ * we're touching one fewer cacheline in the bset tree in exchange for one more
+ * cacheline in the linear search - but the linear search might stop before it
+ * gets to the second cacheline.
+ */
+
+#define BSET_CACHELINE         128
+
+/* Space required for the btree node keys */
+static inline size_t btree_keys_bytes(struct btree_keys *b)
 {
-       return (l->ptr[KEY_PTRS(l)] + r->ptr[KEY_PTRS(r)]) &
-               ~((uint64_t)1 << 63);
+       return PAGE_SIZE << b->page_order;
 }
 
-/* Tries to merge l and r: l should be lower than r
- * Returns true if we were able to merge. If we did merge, l will be the merged
- * key, r will be untouched.
- */
-bool bch_bkey_try_merge(struct btree *b, struct bkey *l, struct bkey *r)
+static inline size_t btree_keys_cachelines(struct btree_keys *b)
 {
-       unsigned i;
+       return btree_keys_bytes(b) / BSET_CACHELINE;
+}
 
-       if (key_merging_disabled(b->c))
-               return false;
+/* Space required for the auxiliary search trees */
+static inline size_t bset_tree_bytes(struct btree_keys *b)
+{
+       return btree_keys_cachelines(b) * sizeof(struct bkey_float);
+}
 
-       if (KEY_PTRS(l) != KEY_PTRS(r) ||
-           KEY_DIRTY(l) != KEY_DIRTY(r) ||
-           bkey_cmp(l, &START_KEY(r)))
-               return false;
+/* Space required for the prev pointers */
+static inline size_t bset_prev_bytes(struct btree_keys *b)
+{
+       return btree_keys_cachelines(b) * sizeof(uint8_t);
+}
 
-       for (i = 0; i < KEY_PTRS(l); i++)
-               if (l->ptr[i] + PTR(0, KEY_SIZE(l), 0) != r->ptr[i] ||
-                   PTR_BUCKET_NR(b->c, l, i) != PTR_BUCKET_NR(b->c, r, i))
-                       return false;
+/* Memory allocation */
 
-       /* Keys with no pointers aren't restricted to one bucket and could
-        * overflow KEY_SIZE
-        */
-       if (KEY_SIZE(l) + KEY_SIZE(r) > USHRT_MAX) {
-               SET_KEY_OFFSET(l, KEY_OFFSET(l) + USHRT_MAX - KEY_SIZE(l));
-               SET_KEY_SIZE(l, USHRT_MAX);
+void bch_btree_keys_free(struct btree_keys *b)
+{
+       struct bset_tree *t = b->set;
 
-               bch_cut_front(l, r);
-               return false;
-       }
+       if (bset_prev_bytes(b) < PAGE_SIZE)
+               kfree(t->prev);
+       else
+               free_pages((unsigned long) t->prev,
+                          get_order(bset_prev_bytes(b)));
 
-       if (KEY_CSUM(l)) {
-               if (KEY_CSUM(r))
-                       l->ptr[KEY_PTRS(l)] = merge_chksums(l, r);
-               else
-                       SET_KEY_CSUM(l, 0);
-       }
+       if (bset_tree_bytes(b) < PAGE_SIZE)
+               kfree(t->tree);
+       else
+               free_pages((unsigned long) t->tree,
+                          get_order(bset_tree_bytes(b)));
 
-       SET_KEY_OFFSET(l, KEY_OFFSET(l) + KEY_SIZE(r));
-       SET_KEY_SIZE(l, KEY_SIZE(l) + KEY_SIZE(r));
+       free_pages((unsigned long) t->data, b->page_order);
 
-       return true;
+       t->prev = NULL;
+       t->tree = NULL;
+       t->data = NULL;
+}
+EXPORT_SYMBOL(bch_btree_keys_free);
+
+int bch_btree_keys_alloc(struct btree_keys *b, unsigned page_order, gfp_t gfp)
+{
+       struct bset_tree *t = b->set;
+
+       BUG_ON(t->data);
+
+       b->page_order = page_order;
+
+       t->data = (void *) __get_free_pages(gfp, b->page_order);
+       if (!t->data)
+               goto err;
+
+       t->tree = bset_tree_bytes(b) < PAGE_SIZE
+               ? kmalloc(bset_tree_bytes(b), gfp)
+               : (void *) __get_free_pages(gfp, get_order(bset_tree_bytes(b)));
+       if (!t->tree)
+               goto err;
+
+       t->prev = bset_prev_bytes(b) < PAGE_SIZE
+               ? kmalloc(bset_prev_bytes(b), gfp)
+               : (void *) __get_free_pages(gfp, get_order(bset_prev_bytes(b)));
+       if (!t->prev)
+               goto err;
+
+       return 0;
+err:
+       bch_btree_keys_free(b);
+       return -ENOMEM;
 }
+EXPORT_SYMBOL(bch_btree_keys_alloc);
+
+void bch_btree_keys_init(struct btree_keys *b, const struct btree_keys_ops *ops,
+                        bool *expensive_debug_checks)
+{
+       unsigned i;
+
+       b->ops = ops;
+       b->expensive_debug_checks = expensive_debug_checks;
+       b->nsets = 0;
+       b->last_set_unwritten = 0;
+
+       /* XXX: shouldn't be needed */
+       for (i = 0; i < MAX_BSETS; i++)
+               b->set[i].size = 0;
+       /*
+        * Second loop starts at 1 because b->keys[0]->data is the memory we
+        * allocated
+        */
+       for (i = 1; i < MAX_BSETS; i++)
+               b->set[i].data = NULL;
+}
+EXPORT_SYMBOL(bch_btree_keys_init);
 
 /* Binary tree stuff for auxiliary search trees */
 
@@ -455,9 +511,11 @@ static unsigned bkey_to_cacheline(struct bset_tree *t, struct bkey *k)
        return ((void *) k - (void *) t->data) / BSET_CACHELINE;
 }
 
-static unsigned bkey_to_cacheline_offset(struct bkey *k)
+static unsigned bkey_to_cacheline_offset(struct bset_tree *t,
+                                        unsigned cacheline,
+                                        struct bkey *k)
 {
-       return ((size_t) k & (BSET_CACHELINE - 1)) / sizeof(uint64_t);
+       return (u64 *) k - (u64 *) cacheline_to_bkey(t, cacheline, 0);
 }
 
 static struct bkey *tree_to_bkey(struct bset_tree *t, unsigned j)
@@ -504,7 +562,7 @@ static void make_bfloat(struct bset_tree *t, unsigned j)
                : tree_to_prev_bkey(t, j >> ffs(j));
 
        struct bkey *r = is_power_of_2(j + 1)
-               ? node(t->data, t->data->keys - bkey_u64s(&t->end))
+               ? bset_bkey_idx(t->data, t->data->keys - bkey_u64s(&t->end))
                : tree_to_bkey(t, j >> (ffz(j) + 1));
 
        BUG_ON(m < l || m > r);
@@ -528,9 +586,9 @@ static void make_bfloat(struct bset_tree *t, unsigned j)
                f->exponent = 127;
 }
 
-static void bset_alloc_tree(struct btree *b, struct bset_tree *t)
+static void bset_alloc_tree(struct btree_keys *b, struct bset_tree *t)
 {
-       if (t != b->sets) {
+       if (t != b->set) {
                unsigned j = roundup(t[-1].size,
                                     64 / sizeof(struct bkey_float));
 
@@ -538,33 +596,54 @@ static void bset_alloc_tree(struct btree *b, struct bset_tree *t)
                t->prev = t[-1].prev + j;
        }
 
-       while (t < b->sets + MAX_BSETS)
+       while (t < b->set + MAX_BSETS)
                t++->size = 0;
 }
 
-static void bset_build_unwritten_tree(struct btree *b)
+static void bch_bset_build_unwritten_tree(struct btree_keys *b)
 {
-       struct bset_tree *t = b->sets + b->nsets;
+       struct bset_tree *t = bset_tree_last(b);
+
+       BUG_ON(b->last_set_unwritten);
+       b->last_set_unwritten = 1;
 
        bset_alloc_tree(b, t);
 
-       if (t->tree != b->sets->tree + bset_tree_space(b)) {
-               t->prev[0] = bkey_to_cacheline_offset(t->data->start);
+       if (t->tree != b->set->tree + btree_keys_cachelines(b)) {
+               t->prev[0] = bkey_to_cacheline_offset(t, 0, t->data->start);
                t->size = 1;
        }
 }
 
-static void bset_build_written_tree(struct btree *b)
+void bch_bset_init_next(struct btree_keys *b, struct bset *i, uint64_t magic)
+{
+       if (i != b->set->data) {
+               b->set[++b->nsets].data = i;
+               i->seq = b->set->data->seq;
+       } else
+               get_random_bytes(&i->seq, sizeof(uint64_t));
+
+       i->magic        = magic;
+       i->version      = 0;
+       i->keys         = 0;
+
+       bch_bset_build_unwritten_tree(b);
+}
+EXPORT_SYMBOL(bch_bset_init_next);
+
+void bch_bset_build_written_tree(struct btree_keys *b)
 {
-       struct bset_tree *t = b->sets + b->nsets;
-       struct bkey *k = t->data->start;
+       struct bset_tree *t = bset_tree_last(b);
+       struct bkey *prev = NULL, *k = t->data->start;
        unsigned j, cacheline = 1;
 
+       b->last_set_unwritten = 0;
+
        bset_alloc_tree(b, t);
 
        t->size = min_t(unsigned,
-                       bkey_to_cacheline(t, end(t->data)),
-                       b->sets->tree + bset_tree_space(b) - t->tree);
+                       bkey_to_cacheline(t, bset_bkey_last(t->data)),
+                       b->set->tree + btree_keys_cachelines(b) - t->tree);
 
        if (t->size < 2) {
                t->size = 0;
@@ -577,16 +656,14 @@ static void bset_build_written_tree(struct btree *b)
        for (j = inorder_next(0, t->size);
             j;
             j = inorder_next(j, t->size)) {
-               while (bkey_to_cacheline(t, k) != cacheline)
-                       k = bkey_next(k);
+               while (bkey_to_cacheline(t, k) < cacheline)
+                       prev = k, k = bkey_next(k);
 
-               t->prev[j] = bkey_u64s(k);
-               k = bkey_next(k);
-               cacheline++;
-               t->tree[j].m = bkey_to_cacheline_offset(k);
+               t->prev[j] = bkey_u64s(prev);
+               t->tree[j].m = bkey_to_cacheline_offset(t, cacheline++, k);
        }
 
-       while (bkey_next(k) != end(t->data))
+       while (bkey_next(k) != bset_bkey_last(t->data))
                k = bkey_next(k);
 
        t->end = *k;
@@ -597,14 +674,17 @@ static void bset_build_written_tree(struct btree *b)
             j = inorder_next(j, t->size))
                make_bfloat(t, j);
 }
+EXPORT_SYMBOL(bch_bset_build_written_tree);
 
-void bch_bset_fix_invalidated_key(struct btree *b, struct bkey *k)
+/* Insert */
+
+void bch_bset_fix_invalidated_key(struct btree_keys *b, struct bkey *k)
 {
        struct bset_tree *t;
        unsigned inorder, j = 1;
 
-       for (t = b->sets; t <= &b->sets[b->nsets]; t++)
-               if (k < end(t->data))
+       for (t = b->set; t <= bset_tree_last(b); t++)
+               if (k < bset_bkey_last(t->data))
                        goto found_set;
 
        BUG();
@@ -617,7 +697,7 @@ found_set:
        if (k == t->data->start)
                goto fix_left;
 
-       if (bkey_next(k) == end(t->data)) {
+       if (bkey_next(k) == bset_bkey_last(t->data)) {
                t->end = *k;
                goto fix_right;
        }
@@ -642,10 +722,12 @@ fix_right:        do {
                        j = j * 2 + 1;
                } while (j < t->size);
 }
+EXPORT_SYMBOL(bch_bset_fix_invalidated_key);
 
-void bch_bset_fix_lookup_table(struct btree *b, struct bkey *k)
+static void bch_bset_fix_lookup_table(struct btree_keys *b,
+                                     struct bset_tree *t,
+                                     struct bkey *k)
 {
-       struct bset_tree *t = &b->sets[b->nsets];
        unsigned shift = bkey_u64s(k);
        unsigned j = bkey_to_cacheline(t, k);
 
@@ -657,8 +739,8 @@ void bch_bset_fix_lookup_table(struct btree *b, struct bkey *k)
         * lookup table for the first key that is strictly greater than k:
         * it's either k's cacheline or the next one
         */
-       if (j < t->size &&
-           table_to_bkey(t, j) <= k)
+       while (j < t->size &&
+              table_to_bkey(t, j) <= k)
                j++;
 
        /* Adjust all the lookup table entries, and find a new key for any that
@@ -673,54 +755,124 @@ void bch_bset_fix_lookup_table(struct btree *b, struct bkey *k)
                        while (k < cacheline_to_bkey(t, j, 0))
                                k = bkey_next(k);
 
-                       t->prev[j] = bkey_to_cacheline_offset(k);
+                       t->prev[j] = bkey_to_cacheline_offset(t, j, k);
                }
        }
 
-       if (t->size == b->sets->tree + bset_tree_space(b) - t->tree)
+       if (t->size == b->set->tree + btree_keys_cachelines(b) - t->tree)
                return;
 
        /* Possibly add a new entry to the end of the lookup table */
 
        for (k = table_to_bkey(t, t->size - 1);
-            k != end(t->data);
+            k != bset_bkey_last(t->data);
             k = bkey_next(k))
                if (t->size == bkey_to_cacheline(t, k)) {
-                       t->prev[t->size] = bkey_to_cacheline_offset(k);
+                       t->prev[t->size] = bkey_to_cacheline_offset(t, t->size, k);
                        t->size++;
                }
 }
 
-void bch_bset_init_next(struct btree *b)
+/*
+ * Tries to merge l and r: l should be lower than r
+ * Returns true if we were able to merge. If we did merge, l will be the merged
+ * key, r will be untouched.
+ */
+bool bch_bkey_try_merge(struct btree_keys *b, struct bkey *l, struct bkey *r)
 {
-       struct bset *i = write_block(b);
+       if (!b->ops->key_merge)
+               return false;
 
-       if (i != b->sets[0].data) {
-               b->sets[++b->nsets].data = i;
-               i->seq = b->sets[0].data->seq;
-       } else
-               get_random_bytes(&i->seq, sizeof(uint64_t));
+       /*
+        * Generic header checks
+        * Assumes left and right are in order
+        * Left and right must be exactly aligned
+        */
+       if (!bch_bkey_equal_header(l, r) ||
+            bkey_cmp(l, &START_KEY(r)))
+               return false;
 
-       i->magic        = bset_magic(&b->c->sb);
-       i->version      = 0;
-       i->keys         = 0;
+       return b->ops->key_merge(b, l, r);
+}
+EXPORT_SYMBOL(bch_bkey_try_merge);
 
-       bset_build_unwritten_tree(b);
+void bch_bset_insert(struct btree_keys *b, struct bkey *where,
+                    struct bkey *insert)
+{
+       struct bset_tree *t = bset_tree_last(b);
+
+       BUG_ON(!b->last_set_unwritten);
+       BUG_ON(bset_byte_offset(b, t->data) +
+              __set_bytes(t->data, t->data->keys + bkey_u64s(insert)) >
+              PAGE_SIZE << b->page_order);
+
+       memmove((uint64_t *) where + bkey_u64s(insert),
+               where,
+               (void *) bset_bkey_last(t->data) - (void *) where);
+
+       t->data->keys += bkey_u64s(insert);
+       bkey_copy(where, insert);
+       bch_bset_fix_lookup_table(b, t, where);
 }
+EXPORT_SYMBOL(bch_bset_insert);
+
+unsigned bch_btree_insert_key(struct btree_keys *b, struct bkey *k,
+                             struct bkey *replace_key)
+{
+       unsigned status = BTREE_INSERT_STATUS_NO_INSERT;
+       struct bset *i = bset_tree_last(b)->data;
+       struct bkey *m, *prev = NULL;
+       struct btree_iter iter;
+
+       BUG_ON(b->ops->is_extents && !KEY_SIZE(k));
+
+       m = bch_btree_iter_init(b, &iter, b->ops->is_extents
+                               ? PRECEDING_KEY(&START_KEY(k))
+                               : PRECEDING_KEY(k));
+
+       if (b->ops->insert_fixup(b, k, &iter, replace_key))
+               return status;
+
+       status = BTREE_INSERT_STATUS_INSERT;
+
+       while (m != bset_bkey_last(i) &&
+              bkey_cmp(k, b->ops->is_extents ? &START_KEY(m) : m) > 0)
+               prev = m, m = bkey_next(m);
+
+       /* prev is in the tree, if we merge we're done */
+       status = BTREE_INSERT_STATUS_BACK_MERGE;
+       if (prev &&
+           bch_bkey_try_merge(b, prev, k))
+               goto merged;
+#if 0
+       status = BTREE_INSERT_STATUS_OVERWROTE;
+       if (m != bset_bkey_last(i) &&
+           KEY_PTRS(m) == KEY_PTRS(k) && !KEY_SIZE(m))
+               goto copy;
+#endif
+       status = BTREE_INSERT_STATUS_FRONT_MERGE;
+       if (m != bset_bkey_last(i) &&
+           bch_bkey_try_merge(b, k, m))
+               goto copy;
+
+       bch_bset_insert(b, m, k);
+copy:  bkey_copy(m, k);
+merged:
+       return status;
+}
+EXPORT_SYMBOL(bch_btree_insert_key);
+
+/* Lookup */
 
 struct bset_search_iter {
        struct bkey *l, *r;
 };
 
-static struct bset_search_iter bset_search_write_set(struct btree *b,
-                                                    struct bset_tree *t,
+static struct bset_search_iter bset_search_write_set(struct bset_tree *t,
                                                     const struct bkey *search)
 {
        unsigned li = 0, ri = t->size;
 
-       BUG_ON(!b->nsets &&
-              t->size < bkey_to_cacheline(t, end(t->data)));
-
        while (li + 1 != ri) {
                unsigned m = (li + ri) >> 1;
 
@@ -732,12 +884,11 @@ static struct bset_search_iter bset_search_write_set(struct btree *b,
 
        return (struct bset_search_iter) {
                table_to_bkey(t, li),
-               ri < t->size ? table_to_bkey(t, ri) : end(t->data)
+               ri < t->size ? table_to_bkey(t, ri) : bset_bkey_last(t->data)
        };
 }
 
-static struct bset_search_iter bset_search_tree(struct btree *b,
-                                               struct bset_tree *t,
+static struct bset_search_iter bset_search_tree(struct bset_tree *t,
                                                const struct bkey *search)
 {
        struct bkey *l, *r;
@@ -784,7 +935,7 @@ static struct bset_search_iter bset_search_tree(struct btree *b,
                        f = &t->tree[inorder_next(j, t->size)];
                        r = cacheline_to_bkey(t, inorder, f->m);
                } else
-                       r = end(t->data);
+                       r = bset_bkey_last(t->data);
        } else {
                r = cacheline_to_bkey(t, inorder, f->m);
 
@@ -798,7 +949,7 @@ static struct bset_search_iter bset_search_tree(struct btree *b,
        return (struct bset_search_iter) {l, r};
 }
 
-struct bkey *__bch_bset_search(struct btree *b, struct bset_tree *t,
+struct bkey *__bch_bset_search(struct btree_keys *b, struct bset_tree *t,
                               const struct bkey *search)
 {
        struct bset_search_iter i;
@@ -820,7 +971,7 @@ struct bkey *__bch_bset_search(struct btree *b, struct bset_tree *t,
 
        if (unlikely(!t->size)) {
                i.l = t->data->start;
-               i.r = end(t->data);
+               i.r = bset_bkey_last(t->data);
        } else if (bset_written(b, t)) {
                /*
                 * Each node in the auxiliary search tree covers a certain range
@@ -830,23 +981,27 @@ struct bkey *__bch_bset_search(struct btree *b, struct bset_tree *t,
                 */
 
                if (unlikely(bkey_cmp(search, &t->end) >= 0))
-                       return end(t->data);
+                       return bset_bkey_last(t->data);
 
                if (unlikely(bkey_cmp(search, t->data->start) < 0))
                        return t->data->start;
 
-               i = bset_search_tree(b, t, search);
-       } else
-               i = bset_search_write_set(b, t, search);
+               i = bset_search_tree(t, search);
+       } else {
+               BUG_ON(!b->nsets &&
+                      t->size < bkey_to_cacheline(t, bset_bkey_last(t->data)));
 
-       if (expensive_debug_checks(b->c)) {
+               i = bset_search_write_set(t, search);
+       }
+
+       if (btree_keys_expensive_checks(b)) {
                BUG_ON(bset_written(b, t) &&
                       i.l != t->data->start &&
                       bkey_cmp(tree_to_prev_bkey(t,
                          inorder_to_tree(bkey_to_cacheline(t, i.l), t)),
                                search) > 0);
 
-               BUG_ON(i.r != end(t->data) &&
+               BUG_ON(i.r != bset_bkey_last(t->data) &&
                       bkey_cmp(i.r, search) <= 0);
        }
 
@@ -856,22 +1011,17 @@ struct bkey *__bch_bset_search(struct btree *b, struct bset_tree *t,
 
        return i.l;
 }
+EXPORT_SYMBOL(__bch_bset_search);
 
 /* Btree iterator */
 
-/*
- * Returns true if l > r - unless l == r, in which case returns true if l is
- * older than r.
- *
- * Necessary for btree_sort_fixup() - if there are multiple keys that compare
- * equal in different sets, we have to process them newest to oldest.
- */
+typedef bool (btree_iter_cmp_fn)(struct btree_iter_set,
+                                struct btree_iter_set);
+
 static inline bool btree_iter_cmp(struct btree_iter_set l,
                                  struct btree_iter_set r)
 {
-       int64_t c = bkey_cmp(&START_KEY(l.k), &START_KEY(r.k));
-
-       return c ? c > 0 : l.k < r.k;
+       return bkey_cmp(l.k, r.k) > 0;
 }
 
 static inline bool btree_iter_end(struct btree_iter *iter)
@@ -888,8 +1038,10 @@ void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k,
                                 btree_iter_cmp));
 }
 
-struct bkey *__bch_btree_iter_init(struct btree *b, struct btree_iter *iter,
-                                  struct bkey *search, struct bset_tree *start)
+static struct bkey *__bch_btree_iter_init(struct btree_keys *b,
+                                         struct btree_iter *iter,
+                                         struct bkey *search,
+                                         struct bset_tree *start)
 {
        struct bkey *ret = NULL;
        iter->size = ARRAY_SIZE(iter->data);
@@ -899,15 +1051,24 @@ struct bkey *__bch_btree_iter_init(struct btree *b, struct btree_iter *iter,
        iter->b = b;
 #endif
 
-       for (; start <= &b->sets[b->nsets]; start++) {
+       for (; start <= bset_tree_last(b); start++) {
                ret = bch_bset_search(b, start, search);
-               bch_btree_iter_push(iter, ret, end(start->data));
+               bch_btree_iter_push(iter, ret, bset_bkey_last(start->data));
        }
 
        return ret;
 }
 
-struct bkey *bch_btree_iter_next(struct btree_iter *iter)
+struct bkey *bch_btree_iter_init(struct btree_keys *b,
+                                struct btree_iter *iter,
+                                struct bkey *search)
+{
+       return __bch_btree_iter_init(b, iter, search, b->set);
+}
+EXPORT_SYMBOL(bch_btree_iter_init);
+
+static inline struct bkey *__bch_btree_iter_next(struct btree_iter *iter,
+                                                btree_iter_cmp_fn *cmp)
 {
        struct btree_iter_set unused;
        struct bkey *ret = NULL;
@@ -924,16 +1085,23 @@ struct bkey *bch_btree_iter_next(struct btree_iter *iter)
                }
 
                if (iter->data->k == iter->data->end)
-                       heap_pop(iter, unused, btree_iter_cmp);
+                       heap_pop(iter, unused, cmp);
                else
-                       heap_sift(iter, 0, btree_iter_cmp);
+                       heap_sift(iter, 0, cmp);
        }
 
        return ret;
 }
 
+struct bkey *bch_btree_iter_next(struct btree_iter *iter)
+{
+       return __bch_btree_iter_next(iter, btree_iter_cmp);
+
+}
+EXPORT_SYMBOL(bch_btree_iter_next);
+
 struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter,
-                                       struct btree *b, ptr_filter_fn fn)
+                                       struct btree_keys *b, ptr_filter_fn fn)
 {
        struct bkey *ret;
 
@@ -946,70 +1114,58 @@ struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter,
 
 /* Mergesort */
 
-static void sort_key_next(struct btree_iter *iter,
-                         struct btree_iter_set *i)
+void bch_bset_sort_state_free(struct bset_sort_state *state)
 {
-       i->k = bkey_next(i->k);
-
-       if (i->k == i->end)
-               *i = iter->data[--iter->used];
+       if (state->pool)
+               mempool_destroy(state->pool);
 }
 
-static void btree_sort_fixup(struct btree_iter *iter)
+int bch_bset_sort_state_init(struct bset_sort_state *state, unsigned page_order)
 {
-       while (iter->used > 1) {
-               struct btree_iter_set *top = iter->data, *i = top + 1;
+       spin_lock_init(&state->time.lock);
 
-               if (iter->used > 2 &&
-                   btree_iter_cmp(i[0], i[1]))
-                       i++;
+       state->page_order = page_order;
+       state->crit_factor = int_sqrt(1 << page_order);
 
-               if (bkey_cmp(top->k, &START_KEY(i->k)) <= 0)
-                       break;
-
-               if (!KEY_SIZE(i->k)) {
-                       sort_key_next(iter, i);
-                       heap_sift(iter, i - top, btree_iter_cmp);
-                       continue;
-               }
-
-               if (top->k > i->k) {
-                       if (bkey_cmp(top->k, i->k) >= 0)
-                               sort_key_next(iter, i);
-                       else
-                               bch_cut_front(top->k, i->k);
+       state->pool = mempool_create_page_pool(1, page_order);
+       if (!state->pool)
+               return -ENOMEM;
 
-                       heap_sift(iter, i - top, btree_iter_cmp);
-               } else {
-                       /* can't happen because of comparison func */
-                       BUG_ON(!bkey_cmp(&START_KEY(top->k), &START_KEY(i->k)));
-                       bch_cut_back(&START_KEY(i->k), top->k);
-               }
-       }
+       return 0;
 }
+EXPORT_SYMBOL(bch_bset_sort_state_init);
 
-static void btree_mergesort(struct btree *b, struct bset *out,
+static void btree_mergesort(struct btree_keys *b, struct bset *out,
                            struct btree_iter *iter,
                            bool fixup, bool remove_stale)
 {
+       int i;
        struct bkey *k, *last = NULL;
-       bool (*bad)(struct btree *, const struct bkey *) = remove_stale
+       BKEY_PADDED(k) tmp;
+       bool (*bad)(struct btree_keys *, const struct bkey *) = remove_stale
                ? bch_ptr_bad
                : bch_ptr_invalid;
 
+       /* Heapify the iterator, using our comparison function */
+       for (i = iter->used / 2 - 1; i >= 0; --i)
+               heap_sift(iter, i, b->ops->sort_cmp);
+
        while (!btree_iter_end(iter)) {
-               if (fixup && !b->level)
-                       btree_sort_fixup(iter);
+               if (b->ops->sort_fixup && fixup)
+                       k = b->ops->sort_fixup(iter, &tmp.k);
+               else
+                       k = NULL;
+
+               if (!k)
+                       k = __bch_btree_iter_next(iter, b->ops->sort_cmp);
 
-               k = bch_btree_iter_next(iter);
                if (bad(b, k))
                        continue;
 
                if (!last) {
                        last = out->start;
                        bkey_copy(last, k);
-               } else if (b->level ||
-                          !bch_bkey_try_merge(b, last, k)) {
+               } else if (!bch_bkey_try_merge(b, last, k)) {
                        last = bkey_next(last);
                        bkey_copy(last, k);
                }
@@ -1020,27 +1176,27 @@ static void btree_mergesort(struct btree *b, struct bset *out,
        pr_debug("sorted %i keys", out->keys);
 }
 
-static void __btree_sort(struct btree *b, struct btree_iter *iter,
-                        unsigned start, unsigned order, bool fixup)
+static void __btree_sort(struct btree_keys *b, struct btree_iter *iter,
+                        unsigned start, unsigned order, bool fixup,
+                        struct bset_sort_state *state)
 {
        uint64_t start_time;
-       bool remove_stale = !b->written;
+       bool used_mempool = false;
        struct bset *out = (void *) __get_free_pages(__GFP_NOWARN|GFP_NOIO,
                                                     order);
        if (!out) {
-               mutex_lock(&b->c->sort_lock);
-               out = b->c->sort;
-               order = ilog2(bucket_pages(b->c));
+               BUG_ON(order > state->page_order);
+
+               out = page_address(mempool_alloc(state->pool, GFP_NOIO));
+               used_mempool = true;
+               order = state->page_order;
        }
 
        start_time = local_clock();
 
-       btree_mergesort(b, out, iter, fixup, remove_stale);
+       btree_mergesort(b, out, iter, fixup, false);
        b->nsets = start;
 
-       if (!fixup && !start && b->written)
-               bch_btree_verify(b, out);
-
        if (!start && order == b->page_order) {
                /*
                 * Our temporary buffer is the same size as the btree node's
@@ -1048,84 +1204,76 @@ static void __btree_sort(struct btree *b, struct btree_iter *iter,
                 * memcpy()
                 */
 
-               out->magic      = bset_magic(&b->c->sb);
-               out->seq        = b->sets[0].data->seq;
-               out->version    = b->sets[0].data->version;
-               swap(out, b->sets[0].data);
-
-               if (b->c->sort == b->sets[0].data)
-                       b->c->sort = out;
+               out->magic      = b->set->data->magic;
+               out->seq        = b->set->data->seq;
+               out->version    = b->set->data->version;
+               swap(out, b->set->data);
        } else {
-               b->sets[start].data->keys = out->keys;
-               memcpy(b->sets[start].data->start, out->start,
-                      (void *) end(out) - (void *) out->start);
+               b->set[start].data->keys = out->keys;
+               memcpy(b->set[start].data->start, out->start,
+                      (void *) bset_bkey_last(out) - (void *) out->start);
        }
 
-       if (out == b->c->sort)
-               mutex_unlock(&b->c->sort_lock);
+       if (used_mempool)
+               mempool_free(virt_to_page(out), state->pool);
        else
                free_pages((unsigned long) out, order);
 
-       if (b->written)
-               bset_build_written_tree(b);
+       bch_bset_build_written_tree(b);
 
        if (!start)
-               bch_time_stats_update(&b->c->sort_time, start_time);
+               bch_time_stats_update(&state->time, start_time);
 }
 
-void bch_btree_sort_partial(struct btree *b, unsigned start)
+void bch_btree_sort_partial(struct btree_keys *b, unsigned start,
+                           struct bset_sort_state *state)
 {
        size_t order = b->page_order, keys = 0;
        struct btree_iter iter;
        int oldsize = bch_count_data(b);
 
-       __bch_btree_iter_init(b, &iter, NULL, &b->sets[start]);
-
-       BUG_ON(b->sets[b->nsets].data == write_block(b) &&
-              (b->sets[b->nsets].size || b->nsets));
-
+       __bch_btree_iter_init(b, &iter, NULL, &b->set[start]);
 
        if (start) {
                unsigned i;
 
                for (i = start; i <= b->nsets; i++)
-                       keys += b->sets[i].data->keys;
+                       keys += b->set[i].data->keys;
 
-               order = roundup_pow_of_two(__set_bytes(b->sets->data,
-                                                      keys)) / PAGE_SIZE;
-               if (order)
-                       order = ilog2(order);
+               order = get_order(__set_bytes(b->set->data, keys));
        }
 
-       __btree_sort(b, &iter, start, order, false);
+       __btree_sort(b, &iter, start, order, false, state);
 
-       EBUG_ON(b->written && oldsize >= 0 && bch_count_data(b) != oldsize);
+       EBUG_ON(oldsize >= 0 && bch_count_data(b) != oldsize);
 }
+EXPORT_SYMBOL(bch_btree_sort_partial);
 
-void bch_btree_sort_and_fix_extents(struct btree *b, struct btree_iter *iter)
+void bch_btree_sort_and_fix_extents(struct btree_keys *b,
+                                   struct btree_iter *iter,
+                                   struct bset_sort_state *state)
 {
-       BUG_ON(!b->written);
-       __btree_sort(b, iter, 0, b->page_order, true);
+       __btree_sort(b, iter, 0, b->page_order, true, state);
 }
 
-void bch_btree_sort_into(struct btree *b, struct btree *new)
+void bch_btree_sort_into(struct btree_keys *b, struct btree_keys *new,
+                        struct bset_sort_state *state)
 {
        uint64_t start_time = local_clock();
 
        struct btree_iter iter;
        bch_btree_iter_init(b, &iter, NULL);
 
-       btree_mergesort(b, new->sets->data, &iter, false, true);
+       btree_mergesort(b, new->set->data, &iter, false, true);
 
-       bch_time_stats_update(&b->c->sort_time, start_time);
+       bch_time_stats_update(&state->time, start_time);
 
-       bkey_copy_key(&new->key, &b->key);
-       new->sets->size = 0;
+       new->set->size = 0; // XXX: why?
 }
 
 #define SORT_CRIT      (4096 / sizeof(uint64_t))
 
-void bch_btree_sort_lazy(struct btree *b)
+void bch_btree_sort_lazy(struct btree_keys *b, struct bset_sort_state *state)
 {
        unsigned crit = SORT_CRIT;
        int i;
@@ -1134,50 +1282,32 @@ void bch_btree_sort_lazy(struct btree *b)
        if (!b->nsets)
                goto out;
 
-       /* If not a leaf node, always sort */
-       if (b->level) {
-               bch_btree_sort(b);
-               return;
-       }
-
        for (i = b->nsets - 1; i >= 0; --i) {
-               crit *= b->c->sort_crit_factor;
+               crit *= state->crit_factor;
 
-               if (b->sets[i].data->keys < crit) {
-                       bch_btree_sort_partial(b, i);
+               if (b->set[i].data->keys < crit) {
+                       bch_btree_sort_partial(b, i, state);
                        return;
                }
        }
 
        /* Sort if we'd overflow */
        if (b->nsets + 1 == MAX_BSETS) {
-               bch_btree_sort(b);
+               bch_btree_sort(b, state);
                return;
        }
 
 out:
-       bset_build_written_tree(b);
+       bch_bset_build_written_tree(b);
 }
+EXPORT_SYMBOL(bch_btree_sort_lazy);
 
-/* Sysfs stuff */
-
-struct bset_stats {
-       struct btree_op op;
-       size_t nodes;
-       size_t sets_written, sets_unwritten;
-       size_t bytes_written, bytes_unwritten;
-       size_t floats, failed;
-};
-
-static int btree_bset_stats(struct btree_op *op, struct btree *b)
+void bch_btree_keys_stats(struct btree_keys *b, struct bset_stats *stats)
 {
-       struct bset_stats *stats = container_of(op, struct bset_stats, op);
        unsigned i;
 
-       stats->nodes++;
-
        for (i = 0; i <= b->nsets; i++) {
-               struct bset_tree *t = &b->sets[i];
+               struct bset_tree *t = &b->set[i];
                size_t bytes = t->data->keys * sizeof(uint64_t);
                size_t j;
 
@@ -1195,32 +1325,4 @@ static int btree_bset_stats(struct btree_op *op, struct btree *b)
                        stats->bytes_unwritten += bytes;
                }
        }
-
-       return MAP_CONTINUE;
-}
-
-int bch_bset_print_stats(struct cache_set *c, char *buf)
-{
-       struct bset_stats t;
-       int ret;
-
-       memset(&t, 0, sizeof(struct bset_stats));
-       bch_btree_op_init(&t.op, -1);
-
-       ret = bch_btree_map_nodes(&t.op, c, &ZERO_KEY, btree_bset_stats);
-       if (ret < 0)
-               return ret;
-
-       return snprintf(buf, PAGE_SIZE,
-                       "btree nodes:           %zu\n"
-                       "written sets:          %zu\n"
-                       "unwritten sets:                %zu\n"
-                       "written key bytes:     %zu\n"
-                       "unwritten key bytes:   %zu\n"
-                       "floats:                        %zu\n"
-                       "failed:                        %zu\n",
-                       t.nodes,
-                       t.sets_written, t.sets_unwritten,
-                       t.bytes_written, t.bytes_unwritten,
-                       t.floats, t.failed);
 }
index 1d3c24f9fa0e95fbb20c03fcb825559b0736de1b..003260f4ddf6e725417956531e64c9cb33022ae4 100644 (file)
@@ -1,7 +1,11 @@
 #ifndef _BCACHE_BSET_H
 #define _BCACHE_BSET_H
 
-#include <linux/slab.h>
+#include <linux/bcache.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#include "util.h" /* for time_stats */
 
 /*
  * BKEYS:
  * first key in that range of bytes again.
  */
 
-/* Btree key comparison/iteration */
+struct btree_keys;
+struct btree_iter;
+struct btree_iter_set;
+struct bkey_float;
 
 #define MAX_BSETS              4U
 
-struct btree_iter {
-       size_t size, used;
-#ifdef CONFIG_BCACHE_DEBUG
-       struct btree *b;
-#endif
-       struct btree_iter_set {
-               struct bkey *k, *end;
-       } data[MAX_BSETS];
-};
-
 struct bset_tree {
        /*
         * We construct a binary tree in an array as if the array
@@ -165,14 +162,14 @@ struct bset_tree {
         */
 
        /* size of the binary tree and prev array */
-       unsigned        size;
+       unsigned                size;
 
        /* function of size - precalculated for to_inorder() */
-       unsigned        extra;
+       unsigned                extra;
 
        /* copy of the last key in the set */
-       struct bkey     end;
-       struct bkey_float *tree;
+       struct bkey             end;
+       struct bkey_float       *tree;
 
        /*
         * The nodes in the bset tree point to specific keys - this
@@ -182,12 +179,219 @@ struct bset_tree {
         * to keep bkey_float to 4 bytes and prev isn't used in the fast
         * path.
         */
-       uint8_t         *prev;
+       uint8_t                 *prev;
 
        /* The actual btree node, with pointers to each sorted set */
-       struct bset     *data;
+       struct bset             *data;
+};
+
+struct btree_keys_ops {
+       bool            (*sort_cmp)(struct btree_iter_set,
+                                   struct btree_iter_set);
+       struct bkey     *(*sort_fixup)(struct btree_iter *, struct bkey *);
+       bool            (*insert_fixup)(struct btree_keys *, struct bkey *,
+                                       struct btree_iter *, struct bkey *);
+       bool            (*key_invalid)(struct btree_keys *,
+                                      const struct bkey *);
+       bool            (*key_bad)(struct btree_keys *, const struct bkey *);
+       bool            (*key_merge)(struct btree_keys *,
+                                    struct bkey *, struct bkey *);
+       void            (*key_to_text)(char *, size_t, const struct bkey *);
+       void            (*key_dump)(struct btree_keys *, const struct bkey *);
+
+       /*
+        * Only used for deciding whether to use START_KEY(k) or just the key
+        * itself in a couple places
+        */
+       bool            is_extents;
+};
+
+struct btree_keys {
+       const struct btree_keys_ops     *ops;
+       uint8_t                 page_order;
+       uint8_t                 nsets;
+       unsigned                last_set_unwritten:1;
+       bool                    *expensive_debug_checks;
+
+       /*
+        * Sets of sorted keys - the real btree node - plus a binary search tree
+        *
+        * set[0] is special; set[0]->tree, set[0]->prev and set[0]->data point
+        * to the memory we have allocated for this btree node. Additionally,
+        * set[0]->data points to the entire btree node as it exists on disk.
+        */
+       struct bset_tree        set[MAX_BSETS];
+};
+
+static inline struct bset_tree *bset_tree_last(struct btree_keys *b)
+{
+       return b->set + b->nsets;
+}
+
+static inline bool bset_written(struct btree_keys *b, struct bset_tree *t)
+{
+       return t <= b->set + b->nsets - b->last_set_unwritten;
+}
+
+static inline bool bkey_written(struct btree_keys *b, struct bkey *k)
+{
+       return !b->last_set_unwritten || k < b->set[b->nsets].data->start;
+}
+
+static inline unsigned bset_byte_offset(struct btree_keys *b, struct bset *i)
+{
+       return ((size_t) i) - ((size_t) b->set->data);
+}
+
+static inline unsigned bset_sector_offset(struct btree_keys *b, struct bset *i)
+{
+       return bset_byte_offset(b, i) >> 9;
+}
+
+#define __set_bytes(i, k)      (sizeof(*(i)) + (k) * sizeof(uint64_t))
+#define set_bytes(i)           __set_bytes(i, i->keys)
+
+#define __set_blocks(i, k, block_bytes)                                \
+       DIV_ROUND_UP(__set_bytes(i, k), block_bytes)
+#define set_blocks(i, block_bytes)                             \
+       __set_blocks(i, (i)->keys, block_bytes)
+
+static inline size_t bch_btree_keys_u64s_remaining(struct btree_keys *b)
+{
+       struct bset_tree *t = bset_tree_last(b);
+
+       BUG_ON((PAGE_SIZE << b->page_order) <
+              (bset_byte_offset(b, t->data) + set_bytes(t->data)));
+
+       if (!b->last_set_unwritten)
+               return 0;
+
+       return ((PAGE_SIZE << b->page_order) -
+               (bset_byte_offset(b, t->data) + set_bytes(t->data))) /
+               sizeof(u64);
+}
+
+static inline struct bset *bset_next_set(struct btree_keys *b,
+                                        unsigned block_bytes)
+{
+       struct bset *i = bset_tree_last(b)->data;
+
+       return ((void *) i) + roundup(set_bytes(i), block_bytes);
+}
+
+void bch_btree_keys_free(struct btree_keys *);
+int bch_btree_keys_alloc(struct btree_keys *, unsigned, gfp_t);
+void bch_btree_keys_init(struct btree_keys *, const struct btree_keys_ops *,
+                        bool *);
+
+void bch_bset_init_next(struct btree_keys *, struct bset *, uint64_t);
+void bch_bset_build_written_tree(struct btree_keys *);
+void bch_bset_fix_invalidated_key(struct btree_keys *, struct bkey *);
+bool bch_bkey_try_merge(struct btree_keys *, struct bkey *, struct bkey *);
+void bch_bset_insert(struct btree_keys *, struct bkey *, struct bkey *);
+unsigned bch_btree_insert_key(struct btree_keys *, struct bkey *,
+                             struct bkey *);
+
+enum {
+       BTREE_INSERT_STATUS_NO_INSERT = 0,
+       BTREE_INSERT_STATUS_INSERT,
+       BTREE_INSERT_STATUS_BACK_MERGE,
+       BTREE_INSERT_STATUS_OVERWROTE,
+       BTREE_INSERT_STATUS_FRONT_MERGE,
 };
 
+/* Btree key iteration */
+
+struct btree_iter {
+       size_t size, used;
+#ifdef CONFIG_BCACHE_DEBUG
+       struct btree_keys *b;
+#endif
+       struct btree_iter_set {
+               struct bkey *k, *end;
+       } data[MAX_BSETS];
+};
+
+typedef bool (*ptr_filter_fn)(struct btree_keys *, const struct bkey *);
+
+struct bkey *bch_btree_iter_next(struct btree_iter *);
+struct bkey *bch_btree_iter_next_filter(struct btree_iter *,
+                                       struct btree_keys *, ptr_filter_fn);
+
+void bch_btree_iter_push(struct btree_iter *, struct bkey *, struct bkey *);
+struct bkey *bch_btree_iter_init(struct btree_keys *, struct btree_iter *,
+                                struct bkey *);
+
+struct bkey *__bch_bset_search(struct btree_keys *, struct bset_tree *,
+                              const struct bkey *);
+
+/*
+ * Returns the first key that is strictly greater than search
+ */
+static inline struct bkey *bch_bset_search(struct btree_keys *b,
+                                          struct bset_tree *t,
+                                          const struct bkey *search)
+{
+       return search ? __bch_bset_search(b, t, search) : t->data->start;
+}
+
+#define for_each_key_filter(b, k, iter, filter)                                \
+       for (bch_btree_iter_init((b), (iter), NULL);                    \
+            ((k) = bch_btree_iter_next_filter((iter), (b), filter));)
+
+#define for_each_key(b, k, iter)                                       \
+       for (bch_btree_iter_init((b), (iter), NULL);                    \
+            ((k) = bch_btree_iter_next(iter));)
+
+/* Sorting */
+
+struct bset_sort_state {
+       mempool_t               *pool;
+
+       unsigned                page_order;
+       unsigned                crit_factor;
+
+       struct time_stats       time;
+};
+
+void bch_bset_sort_state_free(struct bset_sort_state *);
+int bch_bset_sort_state_init(struct bset_sort_state *, unsigned);
+void bch_btree_sort_lazy(struct btree_keys *, struct bset_sort_state *);
+void bch_btree_sort_into(struct btree_keys *, struct btree_keys *,
+                        struct bset_sort_state *);
+void bch_btree_sort_and_fix_extents(struct btree_keys *, struct btree_iter *,
+                                   struct bset_sort_state *);
+void bch_btree_sort_partial(struct btree_keys *, unsigned,
+                           struct bset_sort_state *);
+
+static inline void bch_btree_sort(struct btree_keys *b,
+                                 struct bset_sort_state *state)
+{
+       bch_btree_sort_partial(b, 0, state);
+}
+
+struct bset_stats {
+       size_t sets_written, sets_unwritten;
+       size_t bytes_written, bytes_unwritten;
+       size_t floats, failed;
+};
+
+void bch_btree_keys_stats(struct btree_keys *, struct bset_stats *);
+
+/* Bkey utility code */
+
+#define bset_bkey_last(i)      bkey_idx((struct bkey *) (i)->d, (i)->keys)
+
+static inline struct bkey *bset_bkey_idx(struct bset *i, unsigned idx)
+{
+       return bkey_idx(i->start, idx);
+}
+
+static inline void bkey_init(struct bkey *k)
+{
+       *k = ZERO_KEY;
+}
+
 static __always_inline int64_t bkey_cmp(const struct bkey *l,
                                        const struct bkey *r)
 {
@@ -196,6 +400,62 @@ static __always_inline int64_t bkey_cmp(const struct bkey *l,
                : (int64_t) KEY_OFFSET(l) - (int64_t) KEY_OFFSET(r);
 }
 
+void bch_bkey_copy_single_ptr(struct bkey *, const struct bkey *,
+                             unsigned);
+bool __bch_cut_front(const struct bkey *, struct bkey *);
+bool __bch_cut_back(const struct bkey *, struct bkey *);
+
+static inline bool bch_cut_front(const struct bkey *where, struct bkey *k)
+{
+       BUG_ON(bkey_cmp(where, k) > 0);
+       return __bch_cut_front(where, k);
+}
+
+static inline bool bch_cut_back(const struct bkey *where, struct bkey *k)
+{
+       BUG_ON(bkey_cmp(where, &START_KEY(k)) < 0);
+       return __bch_cut_back(where, k);
+}
+
+#define PRECEDING_KEY(_k)                                      \
+({                                                             \
+       struct bkey *_ret = NULL;                               \
+                                                               \
+       if (KEY_INODE(_k) || KEY_OFFSET(_k)) {                  \
+               _ret = &KEY(KEY_INODE(_k), KEY_OFFSET(_k), 0);  \
+                                                               \
+               if (!_ret->low)                                 \
+                       _ret->high--;                           \
+               _ret->low--;                                    \
+       }                                                       \
+                                                               \
+       _ret;                                                   \
+})
+
+static inline bool bch_ptr_invalid(struct btree_keys *b, const struct bkey *k)
+{
+       return b->ops->key_invalid(b, k);
+}
+
+static inline bool bch_ptr_bad(struct btree_keys *b, const struct bkey *k)
+{
+       return b->ops->key_bad(b, k);
+}
+
+static inline void bch_bkey_to_text(struct btree_keys *b, char *buf,
+                                   size_t size, const struct bkey *k)
+{
+       return b->ops->key_to_text(buf, size, k);
+}
+
+static inline bool bch_bkey_equal_header(const struct bkey *l,
+                                        const struct bkey *r)
+{
+       return (KEY_DIRTY(l) == KEY_DIRTY(r) &&
+               KEY_PTRS(l) == KEY_PTRS(r) &&
+               KEY_CSUM(l) == KEY_CSUM(l));
+}
+
 /* Keylists */
 
 struct keylist {
@@ -257,136 +517,44 @@ static inline size_t bch_keylist_bytes(struct keylist *l)
 
 struct bkey *bch_keylist_pop(struct keylist *);
 void bch_keylist_pop_front(struct keylist *);
-int bch_keylist_realloc(struct keylist *, int, struct cache_set *);
-
-void bch_bkey_copy_single_ptr(struct bkey *, const struct bkey *,
-                             unsigned);
-bool __bch_cut_front(const struct bkey *, struct bkey *);
-bool __bch_cut_back(const struct bkey *, struct bkey *);
+int __bch_keylist_realloc(struct keylist *, unsigned);
 
-static inline bool bch_cut_front(const struct bkey *where, struct bkey *k)
-{
-       BUG_ON(bkey_cmp(where, k) > 0);
-       return __bch_cut_front(where, k);
-}
+/* Debug stuff */
 
-static inline bool bch_cut_back(const struct bkey *where, struct bkey *k)
-{
-       BUG_ON(bkey_cmp(where, &START_KEY(k)) < 0);
-       return __bch_cut_back(where, k);
-}
-
-const char *bch_ptr_status(struct cache_set *, const struct bkey *);
-bool bch_btree_ptr_invalid(struct cache_set *, const struct bkey *);
-bool bch_extent_ptr_invalid(struct cache_set *, const struct bkey *);
-
-bool bch_ptr_bad(struct btree *, const struct bkey *);
-
-static inline uint8_t gen_after(uint8_t a, uint8_t b)
-{
-       uint8_t r = a - b;
-       return r > 128U ? 0 : r;
-}
-
-static inline uint8_t ptr_stale(struct cache_set *c, const struct bkey *k,
-                               unsigned i)
-{
-       return gen_after(PTR_BUCKET(c, k, i)->gen, PTR_GEN(k, i));
-}
-
-static inline bool ptr_available(struct cache_set *c, const struct bkey *k,
-                                unsigned i)
-{
-       return (PTR_DEV(k, i) < MAX_CACHES_PER_SET) && PTR_CACHE(c, k, i);
-}
-
-
-typedef bool (*ptr_filter_fn)(struct btree *, const struct bkey *);
-
-struct bkey *bch_btree_iter_next(struct btree_iter *);
-struct bkey *bch_btree_iter_next_filter(struct btree_iter *,
-                                       struct btree *, ptr_filter_fn);
-
-void bch_btree_iter_push(struct btree_iter *, struct bkey *, struct bkey *);
-struct bkey *__bch_btree_iter_init(struct btree *, struct btree_iter *,
-                                  struct bkey *, struct bset_tree *);
-
-/* 32 bits total: */
-#define BKEY_MID_BITS          3
-#define BKEY_EXPONENT_BITS     7
-#define BKEY_MANTISSA_BITS     22
-#define BKEY_MANTISSA_MASK     ((1 << BKEY_MANTISSA_BITS) - 1)
-
-struct bkey_float {
-       unsigned        exponent:BKEY_EXPONENT_BITS;
-       unsigned        m:BKEY_MID_BITS;
-       unsigned        mantissa:BKEY_MANTISSA_BITS;
-} __packed;
-
-/*
- * BSET_CACHELINE was originally intended to match the hardware cacheline size -
- * it used to be 64, but I realized the lookup code would touch slightly less
- * memory if it was 128.
- *
- * It definites the number of bytes (in struct bset) per struct bkey_float in
- * the auxiliar search tree - when we're done searching the bset_float tree we
- * have this many bytes left that we do a linear search over.
- *
- * Since (after level 5) every level of the bset_tree is on a new cacheline,
- * we're touching one fewer cacheline in the bset tree in exchange for one more
- * cacheline in the linear search - but the linear search might stop before it
- * gets to the second cacheline.
- */
-
-#define BSET_CACHELINE         128
-#define bset_tree_space(b)     (btree_data_space(b) / BSET_CACHELINE)
+#ifdef CONFIG_BCACHE_DEBUG
 
-#define bset_tree_bytes(b)     (bset_tree_space(b) * sizeof(struct bkey_float))
-#define bset_prev_bytes(b)     (bset_tree_space(b) * sizeof(uint8_t))
+int __bch_count_data(struct btree_keys *);
+void __bch_check_keys(struct btree_keys *, const char *, ...);
+void bch_dump_bset(struct btree_keys *, struct bset *, unsigned);
+void bch_dump_bucket(struct btree_keys *);
 
-void bch_bset_init_next(struct btree *);
+#else
 
-void bch_bset_fix_invalidated_key(struct btree *, struct bkey *);
-void bch_bset_fix_lookup_table(struct btree *, struct bkey *);
+static inline int __bch_count_data(struct btree_keys *b) { return -1; }
+static inline void __bch_check_keys(struct btree_keys *b, const char *fmt, ...) {}
+static inline void bch_dump_bucket(struct btree_keys *b) {}
+void bch_dump_bset(struct btree_keys *, struct bset *, unsigned);
 
-struct bkey *__bch_bset_search(struct btree *, struct bset_tree *,
-                          const struct bkey *);
+#endif
 
-/*
- * Returns the first key that is strictly greater than search
- */
-static inline struct bkey *bch_bset_search(struct btree *b, struct bset_tree *t,
-                                          const struct bkey *search)
+static inline bool btree_keys_expensive_checks(struct btree_keys *b)
 {
-       return search ? __bch_bset_search(b, t, search) : t->data->start;
+#ifdef CONFIG_BCACHE_DEBUG
+       return *b->expensive_debug_checks;
+#else
+       return false;
+#endif
 }
 
-#define PRECEDING_KEY(_k)                                      \
-({                                                             \
-       struct bkey *_ret = NULL;                               \
-                                                               \
-       if (KEY_INODE(_k) || KEY_OFFSET(_k)) {                  \
-               _ret = &KEY(KEY_INODE(_k), KEY_OFFSET(_k), 0);  \
-                                                               \
-               if (!_ret->low)                                 \
-                       _ret->high--;                           \
-               _ret->low--;                                    \
-       }                                                       \
-                                                               \
-       _ret;                                                   \
-})
-
-bool bch_bkey_try_merge(struct btree *, struct bkey *, struct bkey *);
-void bch_btree_sort_lazy(struct btree *);
-void bch_btree_sort_into(struct btree *, struct btree *);
-void bch_btree_sort_and_fix_extents(struct btree *, struct btree_iter *);
-void bch_btree_sort_partial(struct btree *, unsigned);
-
-static inline void bch_btree_sort(struct btree *b)
+static inline int bch_count_data(struct btree_keys *b)
 {
-       bch_btree_sort_partial(b, 0);
+       return btree_keys_expensive_checks(b) ? __bch_count_data(b) : -1;
 }
 
-int bch_bset_print_stats(struct cache_set *, char *);
+#define bch_check_keys(b, ...)                                         \
+do {                                                                   \
+       if (btree_keys_expensive_checks(b))                             \
+               __bch_check_keys(b, __VA_ARGS__);                       \
+} while (0)
 
 #endif
index 31bb53fcc67a40806cf73659a596f98297d36128..98cc0a810a366a466253d250baba5c2564e9fab7 100644 (file)
@@ -23,7 +23,7 @@
 #include "bcache.h"
 #include "btree.h"
 #include "debug.h"
-#include "writeback.h"
+#include "extents.h"
 
 #include <linux/slab.h>
 #include <linux/bitops.h>
  * Test module load/unload
  */
 
-enum {
-       BTREE_INSERT_STATUS_INSERT,
-       BTREE_INSERT_STATUS_BACK_MERGE,
-       BTREE_INSERT_STATUS_OVERWROTE,
-       BTREE_INSERT_STATUS_FRONT_MERGE,
-};
-
 #define MAX_NEED_GC            64
 #define MAX_SAVE_PRIO          72
 
@@ -106,14 +99,6 @@ enum {
 
 static struct workqueue_struct *btree_io_wq;
 
-static inline bool should_split(struct btree *b)
-{
-       struct bset *i = write_block(b);
-       return b->written >= btree_blocks(b) ||
-               (b->written + __set_blocks(i, i->keys + 15, b->c)
-                > btree_blocks(b));
-}
-
 #define insert_lock(s, b)      ((b)->level <= (s)->lock)
 
 /*
@@ -167,6 +152,8 @@ static inline bool should_split(struct btree *b)
                        _r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__);   \
                }                                                       \
                rw_unlock(_w, _b);                                      \
+               if (_r == -EINTR)                                       \
+                       schedule();                                     \
                bch_cannibalize_unlock(c);                              \
                if (_r == -ENOSPC) {                                    \
                        wait_event((c)->try_wait,                       \
@@ -175,9 +162,15 @@ static inline bool should_split(struct btree *b)
                }                                                       \
        } while (_r == -EINTR);                                         \
                                                                        \
+       finish_wait(&(c)->bucket_wait, &(op)->wait);                    \
        _r;                                                             \
 })
 
+static inline struct bset *write_block(struct btree *b)
+{
+       return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c);
+}
+
 /* Btree key manipulation */
 
 void bkey_put(struct cache_set *c, struct bkey *k)
@@ -194,16 +187,16 @@ void bkey_put(struct cache_set *c, struct bkey *k)
 static uint64_t btree_csum_set(struct btree *b, struct bset *i)
 {
        uint64_t crc = b->key.ptr[0];
-       void *data = (void *) i + 8, *end = end(i);
+       void *data = (void *) i + 8, *end = bset_bkey_last(i);
 
        crc = bch_crc64_update(crc, data, end - data);
        return crc ^ 0xffffffffffffffffULL;
 }
 
-static void bch_btree_node_read_done(struct btree *b)
+void bch_btree_node_read_done(struct btree *b)
 {
        const char *err = "bad btree header";
-       struct bset *i = b->sets[0].data;
+       struct bset *i = btree_bset_first(b);
        struct btree_iter *iter;
 
        iter = mempool_alloc(b->c->fill_iter, GFP_NOWAIT);
@@ -211,21 +204,22 @@ static void bch_btree_node_read_done(struct btree *b)
        iter->used = 0;
 
 #ifdef CONFIG_BCACHE_DEBUG
-       iter->b = b;
+       iter->b = &b->keys;
 #endif
 
        if (!i->seq)
                goto err;
 
        for (;
-            b->written < btree_blocks(b) && i->seq == b->sets[0].data->seq;
+            b->written < btree_blocks(b) && i->seq == b->keys.set[0].data->seq;
             i = write_block(b)) {
                err = "unsupported bset version";
                if (i->version > BCACHE_BSET_VERSION)
                        goto err;
 
                err = "bad btree header";
-               if (b->written + set_blocks(i, b->c) > btree_blocks(b))
+               if (b->written + set_blocks(i, block_bytes(b->c)) >
+                   btree_blocks(b))
                        goto err;
 
                err = "bad magic";
@@ -245,39 +239,40 @@ static void bch_btree_node_read_done(struct btree *b)
                }
 
                err = "empty set";
-               if (i != b->sets[0].data && !i->keys)
+               if (i != b->keys.set[0].data && !i->keys)
                        goto err;
 
-               bch_btree_iter_push(iter, i->start, end(i));
+               bch_btree_iter_push(iter, i->start, bset_bkey_last(i));
 
-               b->written += set_blocks(i, b->c);
+               b->written += set_blocks(i, block_bytes(b->c));
        }
 
        err = "corrupted btree";
        for (i = write_block(b);
-            index(i, b) < btree_blocks(b);
+            bset_sector_offset(&b->keys, i) < KEY_SIZE(&b->key);
             i = ((void *) i) + block_bytes(b->c))
-               if (i->seq == b->sets[0].data->seq)
+               if (i->seq == b->keys.set[0].data->seq)
                        goto err;
 
-       bch_btree_sort_and_fix_extents(b, iter);
+       bch_btree_sort_and_fix_extents(&b->keys, iter, &b->c->sort);
 
-       i = b->sets[0].data;
+       i = b->keys.set[0].data;
        err = "short btree key";
-       if (b->sets[0].size &&
-           bkey_cmp(&b->key, &b->sets[0].end) < 0)
+       if (b->keys.set[0].size &&
+           bkey_cmp(&b->key, &b->keys.set[0].end) < 0)
                goto err;
 
        if (b->written < btree_blocks(b))
-               bch_bset_init_next(b);
+               bch_bset_init_next(&b->keys, write_block(b),
+                                  bset_magic(&b->c->sb));
 out:
        mempool_free(iter, b->c->fill_iter);
        return;
 err:
        set_btree_node_io_error(b);
-       bch_cache_set_error(b->c, "%s at bucket %zu, block %zu, %u keys",
+       bch_cache_set_error(b->c, "%s at bucket %zu, block %u, %u keys",
                            err, PTR_BUCKET_NR(b->c, &b->key, 0),
-                           index(i, b), i->keys);
+                           bset_block_offset(b, i), i->keys);
        goto out;
 }
 
@@ -287,7 +282,7 @@ static void btree_node_read_endio(struct bio *bio, int error)
        closure_put(cl);
 }
 
-void bch_btree_node_read(struct btree *b)
+static void bch_btree_node_read(struct btree *b)
 {
        uint64_t start_time = local_clock();
        struct closure cl;
@@ -299,11 +294,11 @@ void bch_btree_node_read(struct btree *b)
 
        bio = bch_bbio_alloc(b->c);
        bio->bi_rw      = REQ_META|READ_SYNC;
-       bio->bi_size    = KEY_SIZE(&b->key) << 9;
+       bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9;
        bio->bi_end_io  = btree_node_read_endio;
        bio->bi_private = &cl;
 
-       bch_bio_map(bio, b->sets[0].data);
+       bch_bio_map(bio, b->keys.set[0].data);
 
        bch_submit_bbio(bio, b->c, &b->key, 0);
        closure_sync(&cl);
@@ -340,9 +335,16 @@ static void btree_complete_write(struct btree *b, struct btree_write *w)
        w->journal      = NULL;
 }
 
+static void btree_node_write_unlock(struct closure *cl)
+{
+       struct btree *b = container_of(cl, struct btree, io);
+
+       up(&b->io_mutex);
+}
+
 static void __btree_node_write_done(struct closure *cl)
 {
-       struct btree *b = container_of(cl, struct btree, io.cl);
+       struct btree *b = container_of(cl, struct btree, io);
        struct btree_write *w = btree_prev_write(b);
 
        bch_bbio_free(b->bio, b->c);
@@ -353,16 +355,16 @@ static void __btree_node_write_done(struct closure *cl)
                queue_delayed_work(btree_io_wq, &b->work,
                                   msecs_to_jiffies(30000));
 
-       closure_return(cl);
+       closure_return_with_destructor(cl, btree_node_write_unlock);
 }
 
 static void btree_node_write_done(struct closure *cl)
 {
-       struct btree *b = container_of(cl, struct btree, io.cl);
+       struct btree *b = container_of(cl, struct btree, io);
        struct bio_vec *bv;
        int n;
 
-       __bio_for_each_segment(bv, b->bio, n, 0)
+       bio_for_each_segment_all(bv, b->bio, n)
                __free_page(bv->bv_page);
 
        __btree_node_write_done(cl);
@@ -371,7 +373,7 @@ static void btree_node_write_done(struct closure *cl)
 static void btree_node_write_endio(struct bio *bio, int error)
 {
        struct closure *cl = bio->bi_private;
-       struct btree *b = container_of(cl, struct btree, io.cl);
+       struct btree *b = container_of(cl, struct btree, io);
 
        if (error)
                set_btree_node_io_error(b);
@@ -382,8 +384,8 @@ static void btree_node_write_endio(struct bio *bio, int error)
 
 static void do_btree_node_write(struct btree *b)
 {
-       struct closure *cl = &b->io.cl;
-       struct bset *i = b->sets[b->nsets].data;
+       struct closure *cl = &b->io;
+       struct bset *i = btree_bset_last(b);
        BKEY_PADDED(key) k;
 
        i->version      = BCACHE_BSET_VERSION;
@@ -395,7 +397,7 @@ static void do_btree_node_write(struct btree *b)
        b->bio->bi_end_io       = btree_node_write_endio;
        b->bio->bi_private      = cl;
        b->bio->bi_rw           = REQ_META|WRITE_SYNC|REQ_FUA;
-       b->bio->bi_size         = set_blocks(i, b->c) * block_bytes(b->c);
+       b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c));
        bch_bio_map(b->bio, i);
 
        /*
@@ -414,14 +416,15 @@ static void do_btree_node_write(struct btree *b)
         */
 
        bkey_copy(&k.key, &b->key);
-       SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) + bset_offset(b, i));
+       SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) +
+                      bset_sector_offset(&b->keys, i));
 
        if (!bio_alloc_pages(b->bio, GFP_NOIO)) {
                int j;
                struct bio_vec *bv;
                void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1));
 
-               bio_for_each_segment(bv, b->bio, j)
+               bio_for_each_segment_all(bv, b->bio, j)
                        memcpy(page_address(bv->bv_page),
                               base + j * PAGE_SIZE, PAGE_SIZE);
 
@@ -435,40 +438,54 @@ static void do_btree_node_write(struct btree *b)
                bch_submit_bbio(b->bio, b->c, &k.key, 0);
 
                closure_sync(cl);
-               __btree_node_write_done(cl);
+               continue_at_nobarrier(cl, __btree_node_write_done, NULL);
        }
 }
 
 void bch_btree_node_write(struct btree *b, struct closure *parent)
 {
-       struct bset *i = b->sets[b->nsets].data;
+       struct bset *i = btree_bset_last(b);
 
        trace_bcache_btree_write(b);
 
        BUG_ON(current->bio_list);
        BUG_ON(b->written >= btree_blocks(b));
        BUG_ON(b->written && !i->keys);
-       BUG_ON(b->sets->data->seq != i->seq);
-       bch_check_keys(b, "writing");
+       BUG_ON(btree_bset_first(b)->seq != i->seq);
+       bch_check_keys(&b->keys, "writing");
 
        cancel_delayed_work(&b->work);
 
        /* If caller isn't waiting for write, parent refcount is cache set */
-       closure_lock(&b->io, parent ?: &b->c->cl);
+       down(&b->io_mutex);
+       closure_init(&b->io, parent ?: &b->c->cl);
 
        clear_bit(BTREE_NODE_dirty,      &b->flags);
        change_bit(BTREE_NODE_write_idx, &b->flags);
 
        do_btree_node_write(b);
 
-       b->written += set_blocks(i, b->c);
-       atomic_long_add(set_blocks(i, b->c) * b->c->sb.block_size,
+       atomic_long_add(set_blocks(i, block_bytes(b->c)) * b->c->sb.block_size,
                        &PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written);
 
-       bch_btree_sort_lazy(b);
+       b->written += set_blocks(i, block_bytes(b->c));
+
+       /* If not a leaf node, always sort */
+       if (b->level && b->keys.nsets)
+               bch_btree_sort(&b->keys, &b->c->sort);
+       else
+               bch_btree_sort_lazy(&b->keys, &b->c->sort);
+
+       /*
+        * do verify if there was more than one set initially (i.e. we did a
+        * sort) and we sorted down to a single set:
+        */
+       if (i != b->keys.set->data && !b->keys.nsets)
+               bch_btree_verify(b);
 
        if (b->written < btree_blocks(b))
-               bch_bset_init_next(b);
+               bch_bset_init_next(&b->keys, write_block(b),
+                                  bset_magic(&b->c->sb));
 }
 
 static void bch_btree_node_write_sync(struct btree *b)
@@ -493,7 +510,7 @@ static void btree_node_write_work(struct work_struct *w)
 
 static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref)
 {
-       struct bset *i = b->sets[b->nsets].data;
+       struct bset *i = btree_bset_last(b);
        struct btree_write *w = btree_current_write(b);
 
        BUG_ON(!b->written);
@@ -528,24 +545,6 @@ static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref)
  * mca -> memory cache
  */
 
-static void mca_reinit(struct btree *b)
-{
-       unsigned i;
-
-       b->flags        = 0;
-       b->written      = 0;
-       b->nsets        = 0;
-
-       for (i = 0; i < MAX_BSETS; i++)
-               b->sets[i].size = 0;
-       /*
-        * Second loop starts at 1 because b->sets[0]->data is the memory we
-        * allocated
-        */
-       for (i = 1; i < MAX_BSETS; i++)
-               b->sets[i].data = NULL;
-}
-
 #define mca_reserve(c) (((c->root && c->root->level)           \
                          ? c->root->level : 1) * 8 + 16)
 #define mca_can_free(c)                                                \
@@ -553,28 +552,12 @@ static void mca_reinit(struct btree *b)
 
 static void mca_data_free(struct btree *b)
 {
-       struct bset_tree *t = b->sets;
-       BUG_ON(!closure_is_unlocked(&b->io.cl));
+       BUG_ON(b->io_mutex.count != 1);
 
-       if (bset_prev_bytes(b) < PAGE_SIZE)
-               kfree(t->prev);
-       else
-               free_pages((unsigned long) t->prev,
-                          get_order(bset_prev_bytes(b)));
+       bch_btree_keys_free(&b->keys);
 
-       if (bset_tree_bytes(b) < PAGE_SIZE)
-               kfree(t->tree);
-       else
-               free_pages((unsigned long) t->tree,
-                          get_order(bset_tree_bytes(b)));
-
-       free_pages((unsigned long) t->data, b->page_order);
-
-       t->prev = NULL;
-       t->tree = NULL;
-       t->data = NULL;
-       list_move(&b->list, &b->c->btree_cache_freed);
        b->c->bucket_cache_used--;
+       list_move(&b->list, &b->c->btree_cache_freed);
 }
 
 static void mca_bucket_free(struct btree *b)
@@ -593,34 +576,16 @@ static unsigned btree_order(struct bkey *k)
 
 static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp)
 {
-       struct bset_tree *t = b->sets;
-       BUG_ON(t->data);
-
-       b->page_order = max_t(unsigned,
-                             ilog2(b->c->btree_pages),
-                             btree_order(k));
-
-       t->data = (void *) __get_free_pages(gfp, b->page_order);
-       if (!t->data)
-               goto err;
-
-       t->tree = bset_tree_bytes(b) < PAGE_SIZE
-               ? kmalloc(bset_tree_bytes(b), gfp)
-               : (void *) __get_free_pages(gfp, get_order(bset_tree_bytes(b)));
-       if (!t->tree)
-               goto err;
-
-       t->prev = bset_prev_bytes(b) < PAGE_SIZE
-               ? kmalloc(bset_prev_bytes(b), gfp)
-               : (void *) __get_free_pages(gfp, get_order(bset_prev_bytes(b)));
-       if (!t->prev)
-               goto err;
-
-       list_move(&b->list, &b->c->btree_cache);
-       b->c->bucket_cache_used++;
-       return;
-err:
-       mca_data_free(b);
+       if (!bch_btree_keys_alloc(&b->keys,
+                                 max_t(unsigned,
+                                       ilog2(b->c->btree_pages),
+                                       btree_order(k)),
+                                 gfp)) {
+               b->c->bucket_cache_used++;
+               list_move(&b->list, &b->c->btree_cache);
+       } else {
+               list_move(&b->list, &b->c->btree_cache_freed);
+       }
 }
 
 static struct btree *mca_bucket_alloc(struct cache_set *c,
@@ -635,7 +600,7 @@ static struct btree *mca_bucket_alloc(struct cache_set *c,
        INIT_LIST_HEAD(&b->list);
        INIT_DELAYED_WORK(&b->work, btree_node_write_work);
        b->c = c;
-       closure_init_unlocked(&b->io);
+       sema_init(&b->io_mutex, 1);
 
        mca_data_alloc(b, k, gfp);
        return b;
@@ -651,24 +616,31 @@ static int mca_reap(struct btree *b, unsigned min_order, bool flush)
        if (!down_write_trylock(&b->lock))
                return -ENOMEM;
 
-       BUG_ON(btree_node_dirty(b) && !b->sets[0].data);
+       BUG_ON(btree_node_dirty(b) && !b->keys.set[0].data);
 
-       if (b->page_order < min_order ||
-           (!flush &&
-            (btree_node_dirty(b) ||
-             atomic_read(&b->io.cl.remaining) != -1))) {
-               rw_unlock(true, b);
-               return -ENOMEM;
+       if (b->keys.page_order < min_order)
+               goto out_unlock;
+
+       if (!flush) {
+               if (btree_node_dirty(b))
+                       goto out_unlock;
+
+               if (down_trylock(&b->io_mutex))
+                       goto out_unlock;
+               up(&b->io_mutex);
        }
 
        if (btree_node_dirty(b))
                bch_btree_node_write_sync(b);
 
        /* wait for any in flight btree write */
-       closure_wait_event(&b->io.wait, &cl,
-                          atomic_read(&b->io.cl.remaining) == -1);
+       down(&b->io_mutex);
+       up(&b->io_mutex);
 
        return 0;
+out_unlock:
+       rw_unlock(true, b);
+       return -ENOMEM;
 }
 
 static unsigned long bch_mca_scan(struct shrinker *shrink,
@@ -714,14 +686,10 @@ static unsigned long bch_mca_scan(struct shrinker *shrink,
                }
        }
 
-       /*
-        * Can happen right when we first start up, before we've read in any
-        * btree nodes
-        */
-       if (list_empty(&c->btree_cache))
-               goto out;
-
        for (i = 0; (nr--) && i < c->bucket_cache_used; i++) {
+               if (list_empty(&c->btree_cache))
+                       goto out;
+
                b = list_first_entry(&c->btree_cache, struct btree, list);
                list_rotate_left(&c->btree_cache);
 
@@ -767,6 +735,8 @@ void bch_btree_cache_free(struct cache_set *c)
 #ifdef CONFIG_BCACHE_DEBUG
        if (c->verify_data)
                list_move(&c->verify_data->list, &c->btree_cache);
+
+       free_pages((unsigned long) c->verify_ondisk, ilog2(bucket_pages(c)));
 #endif
 
        list_splice(&c->btree_cache_freeable,
@@ -807,10 +777,13 @@ int bch_btree_cache_alloc(struct cache_set *c)
 #ifdef CONFIG_BCACHE_DEBUG
        mutex_init(&c->verify_lock);
 
+       c->verify_ondisk = (void *)
+               __get_free_pages(GFP_KERNEL, ilog2(bucket_pages(c)));
+
        c->verify_data = mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL);
 
        if (c->verify_data &&
-           c->verify_data->sets[0].data)
+           c->verify_data->keys.set->data)
                list_del_init(&c->verify_data->list);
        else
                c->verify_data = NULL;
@@ -908,7 +881,7 @@ static struct btree *mca_alloc(struct cache_set *c, struct bkey *k, int level)
        list_for_each_entry(b, &c->btree_cache_freed, list)
                if (!mca_reap(b, 0, false)) {
                        mca_data_alloc(b, k, __GFP_NOWARN|GFP_NOIO);
-                       if (!b->sets[0].data)
+                       if (!b->keys.set[0].data)
                                goto err;
                        else
                                goto out;
@@ -919,10 +892,10 @@ static struct btree *mca_alloc(struct cache_set *c, struct bkey *k, int level)
                goto err;
 
        BUG_ON(!down_write_trylock(&b->lock));
-       if (!b->sets->data)
+       if (!b->keys.set->data)
                goto err;
 out:
-       BUG_ON(!closure_is_unlocked(&b->io.cl));
+       BUG_ON(b->io_mutex.count != 1);
 
        bkey_copy(&b->key, k);
        list_move(&b->list, &c->btree_cache);
@@ -930,10 +903,17 @@ out:
        hlist_add_head_rcu(&b->hash, mca_hash(c, k));
 
        lock_set_subclass(&b->lock.dep_map, level + 1, _THIS_IP_);
-       b->level        = level;
        b->parent       = (void *) ~0UL;
+       b->flags        = 0;
+       b->written      = 0;
+       b->level        = level;
 
-       mca_reinit(b);
+       if (!b->level)
+               bch_btree_keys_init(&b->keys, &bch_extent_keys_ops,
+                                   &b->c->expensive_debug_checks);
+       else
+               bch_btree_keys_init(&b->keys, &bch_btree_keys_ops,
+                                   &b->c->expensive_debug_checks);
 
        return b;
 err:
@@ -994,13 +974,13 @@ retry:
 
        b->accessed = 1;
 
-       for (; i <= b->nsets && b->sets[i].size; i++) {
-               prefetch(b->sets[i].tree);
-               prefetch(b->sets[i].data);
+       for (; i <= b->keys.nsets && b->keys.set[i].size; i++) {
+               prefetch(b->keys.set[i].tree);
+               prefetch(b->keys.set[i].data);
        }
 
-       for (; i <= b->nsets; i++)
-               prefetch(b->sets[i].data);
+       for (; i <= b->keys.nsets; i++)
+               prefetch(b->keys.set[i].data);
 
        if (btree_node_io_error(b)) {
                rw_unlock(write, b);
@@ -1063,7 +1043,7 @@ struct btree *bch_btree_node_alloc(struct cache_set *c, int level, bool wait)
 
        mutex_lock(&c->bucket_lock);
 retry:
-       if (__bch_bucket_alloc_set(c, WATERMARK_METADATA, &k.key, 1, wait))
+       if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, wait))
                goto err;
 
        bkey_put(c, &k.key);
@@ -1080,7 +1060,7 @@ retry:
        }
 
        b->accessed = 1;
-       bch_bset_init_next(b);
+       bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->sb));
 
        mutex_unlock(&c->bucket_lock);
 
@@ -1098,8 +1078,10 @@ err:
 static struct btree *btree_node_alloc_replacement(struct btree *b, bool wait)
 {
        struct btree *n = bch_btree_node_alloc(b->c, b->level, wait);
-       if (!IS_ERR_OR_NULL(n))
-               bch_btree_sort_into(b, n);
+       if (!IS_ERR_OR_NULL(n)) {
+               bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort);
+               bkey_copy_key(&n->key, &b->key);
+       }
 
        return n;
 }
@@ -1120,6 +1102,28 @@ static void make_btree_freeing_key(struct btree *b, struct bkey *k)
        atomic_inc(&b->c->prio_blocked);
 }
 
+static int btree_check_reserve(struct btree *b, struct btree_op *op)
+{
+       struct cache_set *c = b->c;
+       struct cache *ca;
+       unsigned i, reserve = c->root->level * 2 + 1;
+       int ret = 0;
+
+       mutex_lock(&c->bucket_lock);
+
+       for_each_cache(ca, c, i)
+               if (fifo_used(&ca->free[RESERVE_BTREE]) < reserve) {
+                       if (op)
+                               prepare_to_wait(&c->bucket_wait, &op->wait,
+                                               TASK_UNINTERRUPTIBLE);
+                       ret = -EINTR;
+                       break;
+               }
+
+       mutex_unlock(&c->bucket_lock);
+       return ret;
+}
+
 /* Garbage collection */
 
 uint8_t __bch_btree_mark_key(struct cache_set *c, int level, struct bkey *k)
@@ -1183,11 +1187,11 @@ static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
 
        gc->nodes++;
 
-       for_each_key_filter(b, k, &iter, bch_ptr_invalid) {
+       for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) {
                stale = max(stale, btree_mark_key(b, k));
                keys++;
 
-               if (bch_ptr_bad(b, k))
+               if (bch_ptr_bad(&b->keys, k))
                        continue;
 
                gc->key_bytes += bkey_u64s(k);
@@ -1197,9 +1201,9 @@ static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
                gc->data += KEY_SIZE(k);
        }
 
-       for (t = b->sets; t <= &b->sets[b->nsets]; t++)
+       for (t = b->keys.set; t <= &b->keys.set[b->keys.nsets]; t++)
                btree_bug_on(t->size &&
-                            bset_written(b, t) &&
+                            bset_written(&b->keys, t) &&
                             bkey_cmp(&b->key, &t->end) < 0,
                             b, "found short btree key in gc");
 
@@ -1243,7 +1247,8 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
        blocks = btree_default_blocks(b->c) * 2 / 3;
 
        if (nodes < 2 ||
-           __set_blocks(b->sets[0].data, keys, b->c) > blocks * (nodes - 1))
+           __set_blocks(b->keys.set[0].data, keys,
+                        block_bytes(b->c)) > blocks * (nodes - 1))
                return 0;
 
        for (i = 0; i < nodes; i++) {
@@ -1253,18 +1258,19 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
        }
 
        for (i = nodes - 1; i > 0; --i) {
-               struct bset *n1 = new_nodes[i]->sets->data;
-               struct bset *n2 = new_nodes[i - 1]->sets->data;
+               struct bset *n1 = btree_bset_first(new_nodes[i]);
+               struct bset *n2 = btree_bset_first(new_nodes[i - 1]);
                struct bkey *k, *last = NULL;
 
                keys = 0;
 
                if (i > 1) {
                        for (k = n2->start;
-                            k < end(n2);
+                            k < bset_bkey_last(n2);
                             k = bkey_next(k)) {
                                if (__set_blocks(n1, n1->keys + keys +
-                                                bkey_u64s(k), b->c) > blocks)
+                                                bkey_u64s(k),
+                                                block_bytes(b->c)) > blocks)
                                        break;
 
                                last = k;
@@ -1280,7 +1286,8 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
                         * though)
                         */
                        if (__set_blocks(n1, n1->keys + n2->keys,
-                                        b->c) > btree_blocks(new_nodes[i]))
+                                        block_bytes(b->c)) >
+                           btree_blocks(new_nodes[i]))
                                goto out_nocoalesce;
 
                        keys = n2->keys;
@@ -1288,27 +1295,28 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
                        last = &r->b->key;
                }
 
-               BUG_ON(__set_blocks(n1, n1->keys + keys,
-                                   b->c) > btree_blocks(new_nodes[i]));
+               BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c)) >
+                      btree_blocks(new_nodes[i]));
 
                if (last)
                        bkey_copy_key(&new_nodes[i]->key, last);
 
-               memcpy(end(n1),
+               memcpy(bset_bkey_last(n1),
                       n2->start,
-                      (void *) node(n2, keys) - (void *) n2->start);
+                      (void *) bset_bkey_idx(n2, keys) - (void *) n2->start);
 
                n1->keys += keys;
                r[i].keys = n1->keys;
 
                memmove(n2->start,
-                       node(n2, keys),
-                       (void *) end(n2) - (void *) node(n2, keys));
+                       bset_bkey_idx(n2, keys),
+                       (void *) bset_bkey_last(n2) -
+                       (void *) bset_bkey_idx(n2, keys));
 
                n2->keys -= keys;
 
-               if (bch_keylist_realloc(keylist,
-                                       KEY_PTRS(&new_nodes[i]->key), b->c))
+               if (__bch_keylist_realloc(keylist,
+                                         bkey_u64s(&new_nodes[i]->key)))
                        goto out_nocoalesce;
 
                bch_btree_node_write(new_nodes[i], &cl);
@@ -1316,7 +1324,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
        }
 
        for (i = 0; i < nodes; i++) {
-               if (bch_keylist_realloc(keylist, KEY_PTRS(&r[i].b->key), b->c))
+               if (__bch_keylist_realloc(keylist, bkey_u64s(&r[i].b->key)))
                        goto out_nocoalesce;
 
                make_btree_freeing_key(r[i].b, keylist->top);
@@ -1324,7 +1332,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
        }
 
        /* We emptied out this node */
-       BUG_ON(new_nodes[0]->sets->data->keys);
+       BUG_ON(btree_bset_first(new_nodes[0])->keys);
        btree_node_free(new_nodes[0]);
        rw_unlock(true, new_nodes[0]);
 
@@ -1370,7 +1378,7 @@ static unsigned btree_gc_count_keys(struct btree *b)
        struct btree_iter iter;
        unsigned ret = 0;
 
-       for_each_key_filter(b, k, &iter, bch_ptr_bad)
+       for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
                ret += bkey_u64s(k);
 
        return ret;
@@ -1390,13 +1398,13 @@ static int btree_gc_recurse(struct btree *b, struct btree_op *op,
        struct gc_merge_info *last = r + GC_MERGE_NODES - 1;
 
        bch_keylist_init(&keys);
-       bch_btree_iter_init(b, &iter, &b->c->gc_done);
+       bch_btree_iter_init(&b->keys, &iter, &b->c->gc_done);
 
        for (i = 0; i < GC_MERGE_NODES; i++)
                r[i].b = ERR_PTR(-EINTR);
 
        while (1) {
-               k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad);
+               k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad);
                if (k) {
                        r->b = bch_btree_node_get(b->c, k, b->level - 1, true);
                        if (IS_ERR(r->b)) {
@@ -1416,7 +1424,8 @@ static int btree_gc_recurse(struct btree *b, struct btree_op *op,
 
                if (!IS_ERR(last->b)) {
                        should_rewrite = btree_gc_mark_node(last->b, gc);
-                       if (should_rewrite) {
+                       if (should_rewrite &&
+                           !btree_check_reserve(b, NULL)) {
                                n = btree_node_alloc_replacement(last->b,
                                                                 false);
 
@@ -1705,7 +1714,7 @@ static int bch_btree_check_recurse(struct btree *b, struct btree_op *op,
        struct bucket *g;
        struct btree_iter iter;
 
-       for_each_key_filter(b, k, &iter, bch_ptr_invalid) {
+       for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) {
                for (i = 0; i < KEY_PTRS(k); i++) {
                        if (!ptr_available(b->c, k, i))
                                continue;
@@ -1728,10 +1737,11 @@ static int bch_btree_check_recurse(struct btree *b, struct btree_op *op,
        }
 
        if (b->level) {
-               bch_btree_iter_init(b, &iter, NULL);
+               bch_btree_iter_init(&b->keys, &iter, NULL);
 
                do {
-                       k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad);
+                       k = bch_btree_iter_next_filter(&iter, &b->keys,
+                                                      bch_ptr_bad);
                        if (k)
                                btree_node_prefetch(b->c, k, b->level - 1);
 
@@ -1774,235 +1784,36 @@ err:
 
 /* Btree insertion */
 
-static void shift_keys(struct btree *b, struct bkey *where, struct bkey *insert)
-{
-       struct bset *i = b->sets[b->nsets].data;
-
-       memmove((uint64_t *) where + bkey_u64s(insert),
-               where,
-               (void *) end(i) - (void *) where);
-
-       i->keys += bkey_u64s(insert);
-       bkey_copy(where, insert);
-       bch_bset_fix_lookup_table(b, where);
-}
-
-static bool fix_overlapping_extents(struct btree *b, struct bkey *insert,
-                                   struct btree_iter *iter,
-                                   struct bkey *replace_key)
+static bool btree_insert_key(struct btree *b, struct bkey *k,
+                            struct bkey *replace_key)
 {
-       void subtract_dirty(struct bkey *k, uint64_t offset, int sectors)
-       {
-               if (KEY_DIRTY(k))
-                       bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
-                                                    offset, -sectors);
-       }
-
-       uint64_t old_offset;
-       unsigned old_size, sectors_found = 0;
-
-       while (1) {
-               struct bkey *k = bch_btree_iter_next(iter);
-               if (!k ||
-                   bkey_cmp(&START_KEY(k), insert) >= 0)
-                       break;
-
-               if (bkey_cmp(k, &START_KEY(insert)) <= 0)
-                       continue;
-
-               old_offset = KEY_START(k);
-               old_size = KEY_SIZE(k);
-
-               /*
-                * We might overlap with 0 size extents; we can't skip these
-                * because if they're in the set we're inserting to we have to
-                * adjust them so they don't overlap with the key we're
-                * inserting. But we don't want to check them for replace
-                * operations.
-                */
-
-               if (replace_key && KEY_SIZE(k)) {
-                       /*
-                        * k might have been split since we inserted/found the
-                        * key we're replacing
-                        */
-                       unsigned i;
-                       uint64_t offset = KEY_START(k) -
-                               KEY_START(replace_key);
-
-                       /* But it must be a subset of the replace key */
-                       if (KEY_START(k) < KEY_START(replace_key) ||
-                           KEY_OFFSET(k) > KEY_OFFSET(replace_key))
-                               goto check_failed;
-
-                       /* We didn't find a key that we were supposed to */
-                       if (KEY_START(k) > KEY_START(insert) + sectors_found)
-                               goto check_failed;
-
-                       if (KEY_PTRS(k) != KEY_PTRS(replace_key) ||
-                           KEY_DIRTY(k) != KEY_DIRTY(replace_key))
-                               goto check_failed;
-
-                       /* skip past gen */
-                       offset <<= 8;
-
-                       BUG_ON(!KEY_PTRS(replace_key));
+       unsigned status;
 
-                       for (i = 0; i < KEY_PTRS(replace_key); i++)
-                               if (k->ptr[i] != replace_key->ptr[i] + offset)
-                                       goto check_failed;
-
-                       sectors_found = KEY_OFFSET(k) - KEY_START(insert);
-               }
-
-               if (bkey_cmp(insert, k) < 0 &&
-                   bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0) {
-                       /*
-                        * We overlapped in the middle of an existing key: that
-                        * means we have to split the old key. But we have to do
-                        * slightly different things depending on whether the
-                        * old key has been written out yet.
-                        */
-
-                       struct bkey *top;
-
-                       subtract_dirty(k, KEY_START(insert), KEY_SIZE(insert));
-
-                       if (bkey_written(b, k)) {
-                               /*
-                                * We insert a new key to cover the top of the
-                                * old key, and the old key is modified in place
-                                * to represent the bottom split.
-                                *
-                                * It's completely arbitrary whether the new key
-                                * is the top or the bottom, but it has to match
-                                * up with what btree_sort_fixup() does - it
-                                * doesn't check for this kind of overlap, it
-                                * depends on us inserting a new key for the top
-                                * here.
-                                */
-                               top = bch_bset_search(b, &b->sets[b->nsets],
-                                                     insert);
-                               shift_keys(b, top, k);
-                       } else {
-                               BKEY_PADDED(key) temp;
-                               bkey_copy(&temp.key, k);
-                               shift_keys(b, k, &temp.key);
-                               top = bkey_next(k);
-                       }
-
-                       bch_cut_front(insert, top);
-                       bch_cut_back(&START_KEY(insert), k);
-                       bch_bset_fix_invalidated_key(b, k);
-                       return false;
-               }
-
-               if (bkey_cmp(insert, k) < 0) {
-                       bch_cut_front(insert, k);
-               } else {
-                       if (bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0)
-                               old_offset = KEY_START(insert);
-
-                       if (bkey_written(b, k) &&
-                           bkey_cmp(&START_KEY(insert), &START_KEY(k)) <= 0) {
-                               /*
-                                * Completely overwrote, so we don't have to
-                                * invalidate the binary search tree
-                                */
-                               bch_cut_front(k, k);
-                       } else {
-                               __bch_cut_back(&START_KEY(insert), k);
-                               bch_bset_fix_invalidated_key(b, k);
-                       }
-               }
-
-               subtract_dirty(k, old_offset, old_size - KEY_SIZE(k));
-       }
+       BUG_ON(bkey_cmp(k, &b->key) > 0);
 
-check_failed:
-       if (replace_key) {
-               if (!sectors_found) {
-                       return true;
-               } else if (sectors_found < KEY_SIZE(insert)) {
-                       SET_KEY_OFFSET(insert, KEY_OFFSET(insert) -
-                                      (KEY_SIZE(insert) - sectors_found));
-                       SET_KEY_SIZE(insert, sectors_found);
-               }
-       }
+       status = bch_btree_insert_key(&b->keys, k, replace_key);
+       if (status != BTREE_INSERT_STATUS_NO_INSERT) {
+               bch_check_keys(&b->keys, "%u for %s", status,
+                              replace_key ? "replace" : "insert");
 
-       return false;
+               trace_bcache_btree_insert_key(b, k, replace_key != NULL,
+                                             status);
+               return true;
+       } else
+               return false;
 }
 
-static bool btree_insert_key(struct btree *b, struct btree_op *op,
-                            struct bkey *k, struct bkey *replace_key)
+static size_t insert_u64s_remaining(struct btree *b)
 {
-       struct bset *i = b->sets[b->nsets].data;
-       struct bkey *m, *prev;
-       unsigned status = BTREE_INSERT_STATUS_INSERT;
-
-       BUG_ON(bkey_cmp(k, &b->key) > 0);
-       BUG_ON(b->level && !KEY_PTRS(k));
-       BUG_ON(!b->level && !KEY_OFFSET(k));
-
-       if (!b->level) {
-               struct btree_iter iter;
-
-               /*
-                * bset_search() returns the first key that is strictly greater
-                * than the search key - but for back merging, we want to find
-                * the previous key.
-                */
-               prev = NULL;
-               m = bch_btree_iter_init(b, &iter, PRECEDING_KEY(&START_KEY(k)));
+       ssize_t ret = bch_btree_keys_u64s_remaining(&b->keys);
 
-               if (fix_overlapping_extents(b, k, &iter, replace_key)) {
-                       op->insert_collision = true;
-                       return false;
-               }
-
-               if (KEY_DIRTY(k))
-                       bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
-                                                    KEY_START(k), KEY_SIZE(k));
-
-               while (m != end(i) &&
-                      bkey_cmp(k, &START_KEY(m)) > 0)
-                       prev = m, m = bkey_next(m);
-
-               if (key_merging_disabled(b->c))
-                       goto insert;
-
-               /* prev is in the tree, if we merge we're done */
-               status = BTREE_INSERT_STATUS_BACK_MERGE;
-               if (prev &&
-                   bch_bkey_try_merge(b, prev, k))
-                       goto merged;
-
-               status = BTREE_INSERT_STATUS_OVERWROTE;
-               if (m != end(i) &&
-                   KEY_PTRS(m) == KEY_PTRS(k) && !KEY_SIZE(m))
-                       goto copy;
-
-               status = BTREE_INSERT_STATUS_FRONT_MERGE;
-               if (m != end(i) &&
-                   bch_bkey_try_merge(b, k, m))
-                       goto copy;
-       } else {
-               BUG_ON(replace_key);
-               m = bch_bset_search(b, &b->sets[b->nsets], k);
-       }
-
-insert:        shift_keys(b, m, k);
-copy:  bkey_copy(m, k);
-merged:
-       bch_check_keys(b, "%u for %s", status,
-                      replace_key ? "replace" : "insert");
-
-       if (b->level && !KEY_OFFSET(k))
-               btree_current_write(b)->prio_blocked++;
-
-       trace_bcache_btree_insert_key(b, k, replace_key != NULL, status);
+       /*
+        * Might land in the middle of an existing extent and have to split it
+        */
+       if (b->keys.ops->is_extents)
+               ret -= KEY_MAX_U64S;
 
-       return true;
+       return max(ret, 0L);
 }
 
 static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
@@ -2010,21 +1821,19 @@ static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
                                  struct bkey *replace_key)
 {
        bool ret = false;
-       int oldsize = bch_count_data(b);
+       int oldsize = bch_count_data(&b->keys);
 
        while (!bch_keylist_empty(insert_keys)) {
-               struct bset *i = write_block(b);
                struct bkey *k = insert_keys->keys;
 
-               if (b->written + __set_blocks(i, i->keys + bkey_u64s(k), b->c)
-                   > btree_blocks(b))
+               if (bkey_u64s(k) > insert_u64s_remaining(b))
                        break;
 
                if (bkey_cmp(k, &b->key) <= 0) {
                        if (!b->level)
                                bkey_put(b->c, k);
 
-                       ret |= btree_insert_key(b, op, k, replace_key);
+                       ret |= btree_insert_key(b, k, replace_key);
                        bch_keylist_pop_front(insert_keys);
                } else if (bkey_cmp(&START_KEY(k), &b->key) < 0) {
                        BKEY_PADDED(key) temp;
@@ -2033,16 +1842,19 @@ static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
                        bch_cut_back(&b->key, &temp.key);
                        bch_cut_front(&b->key, insert_keys->keys);
 
-                       ret |= btree_insert_key(b, op, &temp.key, replace_key);
+                       ret |= btree_insert_key(b, &temp.key, replace_key);
                        break;
                } else {
                        break;
                }
        }
 
+       if (!ret)
+               op->insert_collision = true;
+
        BUG_ON(!bch_keylist_empty(insert_keys) && b->level);
 
-       BUG_ON(bch_count_data(b) < oldsize);
+       BUG_ON(bch_count_data(&b->keys) < oldsize);
        return ret;
 }
 
@@ -2059,16 +1871,21 @@ static int btree_split(struct btree *b, struct btree_op *op,
        closure_init_stack(&cl);
        bch_keylist_init(&parent_keys);
 
+       if (!b->level &&
+           btree_check_reserve(b, op))
+               return -EINTR;
+
        n1 = btree_node_alloc_replacement(b, true);
        if (IS_ERR(n1))
                goto err;
 
-       split = set_blocks(n1->sets[0].data, n1->c) > (btree_blocks(b) * 4) / 5;
+       split = set_blocks(btree_bset_first(n1),
+                          block_bytes(n1->c)) > (btree_blocks(b) * 4) / 5;
 
        if (split) {
                unsigned keys = 0;
 
-               trace_bcache_btree_node_split(b, n1->sets[0].data->keys);
+               trace_bcache_btree_node_split(b, btree_bset_first(n1)->keys);
 
                n2 = bch_btree_node_alloc(b->c, b->level, true);
                if (IS_ERR(n2))
@@ -2087,18 +1904,20 @@ static int btree_split(struct btree *b, struct btree_op *op,
                 * search tree yet
                 */
 
-               while (keys < (n1->sets[0].data->keys * 3) / 5)
-                       keys += bkey_u64s(node(n1->sets[0].data, keys));
+               while (keys < (btree_bset_first(n1)->keys * 3) / 5)
+                       keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1),
+                                                       keys));
 
-               bkey_copy_key(&n1->key, node(n1->sets[0].data, keys));
-               keys += bkey_u64s(node(n1->sets[0].data, keys));
+               bkey_copy_key(&n1->key,
+                             bset_bkey_idx(btree_bset_first(n1), keys));
+               keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1), keys));
 
-               n2->sets[0].data->keys = n1->sets[0].data->keys - keys;
-               n1->sets[0].data->keys = keys;
+               btree_bset_first(n2)->keys = btree_bset_first(n1)->keys - keys;
+               btree_bset_first(n1)->keys = keys;
 
-               memcpy(n2->sets[0].data->start,
-                      end(n1->sets[0].data),
-                      n2->sets[0].data->keys * sizeof(uint64_t));
+               memcpy(btree_bset_first(n2)->start,
+                      bset_bkey_last(btree_bset_first(n1)),
+                      btree_bset_first(n2)->keys * sizeof(uint64_t));
 
                bkey_copy_key(&n2->key, &b->key);
 
@@ -2106,7 +1925,7 @@ static int btree_split(struct btree *b, struct btree_op *op,
                bch_btree_node_write(n2, &cl);
                rw_unlock(true, n2);
        } else {
-               trace_bcache_btree_node_compact(b, n1->sets[0].data->keys);
+               trace_bcache_btree_node_compact(b, btree_bset_first(n1)->keys);
 
                bch_btree_insert_keys(n1, op, insert_keys, replace_key);
        }
@@ -2149,18 +1968,21 @@ static int btree_split(struct btree *b, struct btree_op *op,
 
        return 0;
 err_free2:
+       bkey_put(b->c, &n2->key);
        btree_node_free(n2);
        rw_unlock(true, n2);
 err_free1:
+       bkey_put(b->c, &n1->key);
        btree_node_free(n1);
        rw_unlock(true, n1);
 err:
+       WARN(1, "bcache: btree split failed");
+
        if (n3 == ERR_PTR(-EAGAIN) ||
            n2 == ERR_PTR(-EAGAIN) ||
            n1 == ERR_PTR(-EAGAIN))
                return -EAGAIN;
 
-       pr_warn("couldn't split");
        return -ENOMEM;
 }
 
@@ -2171,7 +1993,7 @@ static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
 {
        BUG_ON(b->level && replace_key);
 
-       if (should_split(b)) {
+       if (bch_keylist_nkeys(insert_keys) > insert_u64s_remaining(b)) {
                if (current->bio_list) {
                        op->lock = b->c->root->level + 1;
                        return -EAGAIN;
@@ -2180,11 +2002,13 @@ static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
                        return -EINTR;
                } else {
                        /* Invalidated all iterators */
-                       return btree_split(b, op, insert_keys, replace_key) ?:
-                               -EINTR;
+                       int ret = btree_split(b, op, insert_keys, replace_key);
+
+                       return bch_keylist_empty(insert_keys) ?
+                               0 : ret ?: -EINTR;
                }
        } else {
-               BUG_ON(write_block(b) != b->sets[b->nsets].data);
+               BUG_ON(write_block(b) != btree_bset_last(b));
 
                if (bch_btree_insert_keys(b, op, insert_keys, replace_key)) {
                        if (!b->level)
@@ -2323,9 +2147,9 @@ static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op,
                struct bkey *k;
                struct btree_iter iter;
 
-               bch_btree_iter_init(b, &iter, from);
+               bch_btree_iter_init(&b->keys, &iter, from);
 
-               while ((k = bch_btree_iter_next_filter(&iter, b,
+               while ((k = bch_btree_iter_next_filter(&iter, &b->keys,
                                                       bch_ptr_bad))) {
                        ret = btree(map_nodes_recurse, k, b,
                                    op, from, fn, flags);
@@ -2356,9 +2180,9 @@ static int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
        struct bkey *k;
        struct btree_iter iter;
 
-       bch_btree_iter_init(b, &iter, from);
+       bch_btree_iter_init(&b->keys, &iter, from);
 
-       while ((k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad))) {
+       while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) {
                ret = !b->level
                        ? fn(op, b, k)
                        : btree(map_keys_recurse, k, b, op, from, fn, flags);
index 767e755708964ce82f36dc88e28281b5c1b90177..af065e97e55c4186782422db0bae20498fd3cdb8 100644 (file)
@@ -130,20 +130,12 @@ struct btree {
        unsigned long           flags;
        uint16_t                written;        /* would be nice to kill */
        uint8_t                 level;
-       uint8_t                 nsets;
-       uint8_t                 page_order;
-
-       /*
-        * Set of sorted keys - the real btree node - plus a binary search tree
-        *
-        * sets[0] is special; set[0]->tree, set[0]->prev and set[0]->data point
-        * to the memory we have allocated for this btree node. Additionally,
-        * set[0]->data points to the entire btree node as it exists on disk.
-        */
-       struct bset_tree        sets[MAX_BSETS];
+
+       struct btree_keys       keys;
 
        /* For outstanding btree writes, used as a lock - protects write_idx */
-       struct closure_with_waitlist    io;
+       struct closure          io;
+       struct semaphore        io_mutex;
 
        struct list_head        list;
        struct delayed_work     work;
@@ -179,24 +171,19 @@ static inline struct btree_write *btree_prev_write(struct btree *b)
        return b->writes + (btree_node_write_idx(b) ^ 1);
 }
 
-static inline unsigned bset_offset(struct btree *b, struct bset *i)
+static inline struct bset *btree_bset_first(struct btree *b)
 {
-       return (((size_t) i) - ((size_t) b->sets->data)) >> 9;
+       return b->keys.set->data;
 }
 
-static inline struct bset *write_block(struct btree *b)
+static inline struct bset *btree_bset_last(struct btree *b)
 {
-       return ((void *) b->sets[0].data) + b->written * block_bytes(b->c);
+       return bset_tree_last(&b->keys)->data;
 }
 
-static inline bool bset_written(struct btree *b, struct bset_tree *t)
+static inline unsigned bset_block_offset(struct btree *b, struct bset *i)
 {
-       return t->data < write_block(b);
-}
-
-static inline bool bkey_written(struct btree *b, struct bkey *k)
-{
-       return k < write_block(b)->start;
+       return bset_sector_offset(&b->keys, i) >> b->c->block_bits;
 }
 
 static inline void set_gc_sectors(struct cache_set *c)
@@ -204,21 +191,6 @@ static inline void set_gc_sectors(struct cache_set *c)
        atomic_set(&c->sectors_to_gc, c->sb.bucket_size * c->nbuckets / 16);
 }
 
-static inline struct bkey *bch_btree_iter_init(struct btree *b,
-                                              struct btree_iter *iter,
-                                              struct bkey *search)
-{
-       return __bch_btree_iter_init(b, iter, search, b->sets);
-}
-
-static inline bool bch_ptr_invalid(struct btree *b, const struct bkey *k)
-{
-       if (b->level)
-               return bch_btree_ptr_invalid(b->c, k);
-       else
-               return bch_extent_ptr_invalid(b->c, k);
-}
-
 void bkey_put(struct cache_set *c, struct bkey *k);
 
 /* Looping macros */
@@ -229,17 +201,12 @@ void bkey_put(struct cache_set *c, struct bkey *k);
             iter++)                                                    \
                hlist_for_each_entry_rcu((b), (c)->bucket_hash + iter, hash)
 
-#define for_each_key_filter(b, k, iter, filter)                                \
-       for (bch_btree_iter_init((b), (iter), NULL);                    \
-            ((k) = bch_btree_iter_next_filter((iter), b, filter));)
-
-#define for_each_key(b, k, iter)                                       \
-       for (bch_btree_iter_init((b), (iter), NULL);                    \
-            ((k) = bch_btree_iter_next(iter));)
-
 /* Recursing down the btree */
 
 struct btree_op {
+       /* for waiting on btree reserve in btree_split() */
+       wait_queue_t            wait;
+
        /* Btree level at which we start taking write locks */
        short                   lock;
 
@@ -249,6 +216,7 @@ struct btree_op {
 static inline void bch_btree_op_init(struct btree_op *op, int write_lock_level)
 {
        memset(op, 0, sizeof(struct btree_op));
+       init_wait(&op->wait);
        op->lock = write_lock_level;
 }
 
@@ -267,7 +235,7 @@ static inline void rw_unlock(bool w, struct btree *b)
        (w ? up_write : up_read)(&b->lock);
 }
 
-void bch_btree_node_read(struct btree *);
+void bch_btree_node_read_done(struct btree *);
 void bch_btree_node_write(struct btree *, struct closure *);
 
 void bch_btree_set_root(struct btree *);
index dfff2410322e70263ac63e6dbfd9536bd4a5dc54..7a228de95fd7e94fa61acb0758a53d8670770383 100644 (file)
 
 #include "closure.h"
 
-#define CL_FIELD(type, field)                                  \
-       case TYPE_ ## type:                                     \
-       return &container_of(cl, struct type, cl)->field
-
-static struct closure_waitlist *closure_waitlist(struct closure *cl)
-{
-       switch (cl->type) {
-               CL_FIELD(closure_with_waitlist, wait);
-       default:
-               return NULL;
-       }
-}
-
 static inline void closure_put_after_sub(struct closure *cl, int flags)
 {
        int r = flags & CLOSURE_REMAINING_MASK;
@@ -42,17 +29,10 @@ static inline void closure_put_after_sub(struct closure *cl, int flags)
                        closure_queue(cl);
                } else {
                        struct closure *parent = cl->parent;
-                       struct closure_waitlist *wait = closure_waitlist(cl);
                        closure_fn *destructor = cl->fn;
 
                        closure_debug_destroy(cl);
 
-                       smp_mb();
-                       atomic_set(&cl->remaining, -1);
-
-                       if (wait)
-                               closure_wake_up(wait);
-
                        if (destructor)
                                destructor(cl);
 
@@ -69,19 +49,18 @@ void closure_sub(struct closure *cl, int v)
 }
 EXPORT_SYMBOL(closure_sub);
 
+/**
+ * closure_put - decrement a closure's refcount
+ */
 void closure_put(struct closure *cl)
 {
        closure_put_after_sub(cl, atomic_dec_return(&cl->remaining));
 }
 EXPORT_SYMBOL(closure_put);
 
-static void set_waiting(struct closure *cl, unsigned long f)
-{
-#ifdef CONFIG_BCACHE_CLOSURES_DEBUG
-       cl->waiting_on = f;
-#endif
-}
-
+/**
+ * closure_wake_up - wake up all closures on a wait list, without memory barrier
+ */
 void __closure_wake_up(struct closure_waitlist *wait_list)
 {
        struct llist_node *list;
@@ -106,27 +85,34 @@ void __closure_wake_up(struct closure_waitlist *wait_list)
                cl = container_of(reverse, struct closure, list);
                reverse = llist_next(reverse);
 
-               set_waiting(cl, 0);
+               closure_set_waiting(cl, 0);
                closure_sub(cl, CLOSURE_WAITING + 1);
        }
 }
 EXPORT_SYMBOL(__closure_wake_up);
 
-bool closure_wait(struct closure_waitlist *list, struct closure *cl)
+/**
+ * closure_wait - add a closure to a waitlist
+ *
+ * @waitlist will own a ref on @cl, which will be released when
+ * closure_wake_up() is called on @waitlist.
+ *
+ */
+bool closure_wait(struct closure_waitlist *waitlist, struct closure *cl)
 {
        if (atomic_read(&cl->remaining) & CLOSURE_WAITING)
                return false;
 
-       set_waiting(cl, _RET_IP_);
+       closure_set_waiting(cl, _RET_IP_);
        atomic_add(CLOSURE_WAITING + 1, &cl->remaining);
-       llist_add(&cl->list, &list->list);
+       llist_add(&cl->list, &waitlist->list);
 
        return true;
 }
 EXPORT_SYMBOL(closure_wait);
 
 /**
- * closure_sync() - sleep until a closure a closure has nothing left to wait on
+ * closure_sync - sleep until a closure a closure has nothing left to wait on
  *
  * Sleeps until the refcount hits 1 - the thread that's running the closure owns
  * the last refcount.
@@ -148,46 +134,6 @@ void closure_sync(struct closure *cl)
 }
 EXPORT_SYMBOL(closure_sync);
 
-/**
- * closure_trylock() - try to acquire the closure, without waiting
- * @cl:                closure to lock
- *
- * Returns true if the closure was succesfully locked.
- */
-bool closure_trylock(struct closure *cl, struct closure *parent)
-{
-       if (atomic_cmpxchg(&cl->remaining, -1,
-                          CLOSURE_REMAINING_INITIALIZER) != -1)
-               return false;
-
-       smp_mb();
-
-       cl->parent = parent;
-       if (parent)
-               closure_get(parent);
-
-       closure_set_ret_ip(cl);
-       closure_debug_create(cl);
-       return true;
-}
-EXPORT_SYMBOL(closure_trylock);
-
-void __closure_lock(struct closure *cl, struct closure *parent,
-                   struct closure_waitlist *wait_list)
-{
-       struct closure wait;
-       closure_init_stack(&wait);
-
-       while (1) {
-               if (closure_trylock(cl, parent))
-                       return;
-
-               closure_wait_event(wait_list, &wait,
-                                  atomic_read(&cl->remaining) == -1);
-       }
-}
-EXPORT_SYMBOL(__closure_lock);
-
 #ifdef CONFIG_BCACHE_CLOSURES_DEBUG
 
 static LIST_HEAD(closure_list);
index 9762f1be3304f1349cc21f001cfa02d5401470e9..7ef7461912be252d00ce23b79504664e96ffd84f 100644 (file)
  * closure - _always_ use continue_at(). Doing so consistently will help
  * eliminate an entire class of particularly pernicious races.
  *
- * For a closure to wait on an arbitrary event, we need to introduce waitlists:
- *
- * struct closure_waitlist list;
- * closure_wait_event(list, cl, condition);
- * closure_wake_up(wait_list);
- *
- * These work analagously to wait_event() and wake_up() - except that instead of
- * operating on the current thread (for wait_event()) and lists of threads, they
- * operate on an explicit closure and lists of closures.
- *
- * Because it's a closure we can now wait either synchronously or
- * asynchronously. closure_wait_event() returns the current value of the
- * condition, and if it returned false continue_at() or closure_sync() can be
- * used to wait for it to become true.
- *
- * It's useful for waiting on things when you can't sleep in the context in
- * which you must check the condition (perhaps a spinlock held, or you might be
- * beneath generic_make_request() - in which case you can't sleep on IO).
- *
- * closure_wait_event() will wait either synchronously or asynchronously,
- * depending on whether the closure is in blocking mode or not. You can pick a
- * mode explicitly with closure_wait_event_sync() and
- * closure_wait_event_async(), which do just what you might expect.
- *
  * Lastly, you might have a wait list dedicated to a specific event, and have no
  * need for specifying the condition - you just want to wait until someone runs
  * closure_wake_up() on the appropriate wait list. In that case, just use
  * All this implies that a closure should typically be embedded in a particular
  * struct (which its refcount will normally control the lifetime of), and that
  * struct can very much be thought of as a stack frame.
- *
- * Locking:
- *
- * Closures are based on work items but they can be thought of as more like
- * threads - in that like threads and unlike work items they have a well
- * defined lifetime; they are created (with closure_init()) and eventually
- * complete after a continue_at(cl, NULL, NULL).
- *
- * Suppose you've got some larger structure with a closure embedded in it that's
- * used for periodically doing garbage collection. You only want one garbage
- * collection happening at a time, so the natural thing to do is protect it with
- * a lock. However, it's difficult to use a lock protecting a closure correctly
- * because the unlock should come after the last continue_to() (additionally, if
- * you're using the closure asynchronously a mutex won't work since a mutex has
- * to be unlocked by the same process that locked it).
- *
- * So to make it less error prone and more efficient, we also have the ability
- * to use closures as locks:
- *
- * closure_init_unlocked();
- * closure_trylock();
- *
- * That's all we need for trylock() - the last closure_put() implicitly unlocks
- * it for you.  But for closure_lock(), we also need a wait list:
- *
- * struct closure_with_waitlist frobnicator_cl;
- *
- * closure_init_unlocked(&frobnicator_cl);
- * closure_lock(&frobnicator_cl);
- *
- * A closure_with_waitlist embeds a closure and a wait list - much like struct
- * delayed_work embeds a work item and a timer_list. The important thing is, use
- * it exactly like you would a regular closure and closure_put() will magically
- * handle everything for you.
  */
 
 struct closure;
@@ -164,12 +106,6 @@ struct closure_waitlist {
        struct llist_head       list;
 };
 
-enum closure_type {
-       TYPE_closure                            = 0,
-       TYPE_closure_with_waitlist              = 1,
-       MAX_CLOSURE_TYPE                        = 1,
-};
-
 enum closure_state {
        /*
         * CLOSURE_WAITING: Set iff the closure is on a waitlist. Must be set by
@@ -224,8 +160,6 @@ struct closure {
 
        atomic_t                remaining;
 
-       enum closure_type       type;
-
 #ifdef CONFIG_BCACHE_CLOSURES_DEBUG
 #define CLOSURE_MAGIC_DEAD     0xc054dead
 #define CLOSURE_MAGIC_ALIVE    0xc054a11e
@@ -237,34 +171,12 @@ struct closure {
 #endif
 };
 
-struct closure_with_waitlist {
-       struct closure          cl;
-       struct closure_waitlist wait;
-};
-
-extern unsigned invalid_closure_type(void);
-
-#define __CLOSURE_TYPE(cl, _t)                                         \
-         __builtin_types_compatible_p(typeof(cl), struct _t)           \
-               ? TYPE_ ## _t :                                         \
-
-#define __closure_type(cl)                                             \
-(                                                                      \
-       __CLOSURE_TYPE(cl, closure)                                     \
-       __CLOSURE_TYPE(cl, closure_with_waitlist)                       \
-       invalid_closure_type()                                          \
-)
-
 void closure_sub(struct closure *cl, int v);
 void closure_put(struct closure *cl);
 void __closure_wake_up(struct closure_waitlist *list);
 bool closure_wait(struct closure_waitlist *list, struct closure *cl);
 void closure_sync(struct closure *cl);
 
-bool closure_trylock(struct closure *cl, struct closure *parent);
-void __closure_lock(struct closure *cl, struct closure *parent,
-                   struct closure_waitlist *wait_list);
-
 #ifdef CONFIG_BCACHE_CLOSURES_DEBUG
 
 void closure_debug_init(void);
@@ -293,134 +205,97 @@ static inline void closure_set_ret_ip(struct closure *cl)
 #endif
 }
 
-static inline void closure_get(struct closure *cl)
+static inline void closure_set_waiting(struct closure *cl, unsigned long f)
 {
 #ifdef CONFIG_BCACHE_CLOSURES_DEBUG
-       BUG_ON((atomic_inc_return(&cl->remaining) &
-               CLOSURE_REMAINING_MASK) <= 1);
-#else
-       atomic_inc(&cl->remaining);
+       cl->waiting_on = f;
 #endif
 }
 
-static inline void closure_set_stopped(struct closure *cl)
+static inline void __closure_end_sleep(struct closure *cl)
 {
-       atomic_sub(CLOSURE_RUNNING, &cl->remaining);
+       __set_current_state(TASK_RUNNING);
+
+       if (atomic_read(&cl->remaining) & CLOSURE_SLEEPING)
+               atomic_sub(CLOSURE_SLEEPING, &cl->remaining);
 }
 
-static inline bool closure_is_unlocked(struct closure *cl)
+static inline void __closure_start_sleep(struct closure *cl)
 {
-       return atomic_read(&cl->remaining) == -1;
+       closure_set_ip(cl);
+       cl->task = current;
+       set_current_state(TASK_UNINTERRUPTIBLE);
+
+       if (!(atomic_read(&cl->remaining) & CLOSURE_SLEEPING))
+               atomic_add(CLOSURE_SLEEPING, &cl->remaining);
 }
 
-static inline void do_closure_init(struct closure *cl, struct closure *parent,
-                                  bool running)
+static inline void closure_set_stopped(struct closure *cl)
 {
-       cl->parent = parent;
-       if (parent)
-               closure_get(parent);
-
-       if (running) {
-               closure_debug_create(cl);
-               atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER);
-       } else
-               atomic_set(&cl->remaining, -1);
+       atomic_sub(CLOSURE_RUNNING, &cl->remaining);
+}
 
+static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
+                                 struct workqueue_struct *wq)
+{
+       BUG_ON(object_is_on_stack(cl));
        closure_set_ip(cl);
+       cl->fn = fn;
+       cl->wq = wq;
+       /* between atomic_dec() in closure_put() */
+       smp_mb__before_atomic_dec();
 }
 
-/*
- * Hack to get at the embedded closure if there is one, by doing an unsafe cast:
- * the result of __closure_type() is thrown away, it's used merely for type
- * checking.
- */
-#define __to_internal_closure(cl)                              \
-({                                                             \
-       BUILD_BUG_ON(__closure_type(*cl) > MAX_CLOSURE_TYPE);   \
-       (struct closure *) cl;                                  \
-})
-
-#define closure_init_type(cl, parent, running)                 \
-do {                                                           \
-       struct closure *_cl = __to_internal_closure(cl);        \
-       _cl->type = __closure_type(*(cl));                      \
-       do_closure_init(_cl, parent, running);                  \
-} while (0)
+static inline void closure_queue(struct closure *cl)
+{
+       struct workqueue_struct *wq = cl->wq;
+       if (wq) {
+               INIT_WORK(&cl->work, cl->work.func);
+               BUG_ON(!queue_work(wq, &cl->work));
+       } else
+               cl->fn(cl);
+}
 
 /**
- * __closure_init() - Initialize a closure, skipping the memset()
- *
- * May be used instead of closure_init() when memory has already been zeroed.
+ * closure_get - increment a closure's refcount
  */
-#define __closure_init(cl, parent)                             \
-       closure_init_type(cl, parent, true)
+static inline void closure_get(struct closure *cl)
+{
+#ifdef CONFIG_BCACHE_CLOSURES_DEBUG
+       BUG_ON((atomic_inc_return(&cl->remaining) &
+               CLOSURE_REMAINING_MASK) <= 1);
+#else
+       atomic_inc(&cl->remaining);
+#endif
+}
 
 /**
- * closure_init() - Initialize a closure, setting the refcount to 1
+ * closure_init - Initialize a closure, setting the refcount to 1
  * @cl:                closure to initialize
  * @parent:    parent of the new closure. cl will take a refcount on it for its
  *             lifetime; may be NULL.
  */
-#define closure_init(cl, parent)                               \
-do {                                                           \
-       memset((cl), 0, sizeof(*(cl)));                         \
-       __closure_init(cl, parent);                             \
-} while (0)
-
-static inline void closure_init_stack(struct closure *cl)
+static inline void closure_init(struct closure *cl, struct closure *parent)
 {
        memset(cl, 0, sizeof(struct closure));
-       atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER|CLOSURE_STACK);
-}
-
-/**
- * closure_init_unlocked() - Initialize a closure but leave it unlocked.
- * @cl:                closure to initialize
- *
- * For when the closure will be used as a lock. The closure may not be used
- * until after a closure_lock() or closure_trylock().
- */
-#define closure_init_unlocked(cl)                              \
-do {                                                           \
-       memset((cl), 0, sizeof(*(cl)));                         \
-       closure_init_type(cl, NULL, false);                     \
-} while (0)
-
-/**
- * closure_lock() - lock and initialize a closure.
- * @cl:                the closure to lock
- * @parent:    the new parent for this closure
- *
- * The closure must be of one of the types that has a waitlist (otherwise we
- * wouldn't be able to sleep on contention).
- *
- * @parent has exactly the same meaning as in closure_init(); if non null, the
- * closure will take a reference on @parent which will be released when it is
- * unlocked.
- */
-#define closure_lock(cl, parent)                               \
-       __closure_lock(__to_internal_closure(cl), parent, &(cl)->wait)
+       cl->parent = parent;
+       if (parent)
+               closure_get(parent);
 
-static inline void __closure_end_sleep(struct closure *cl)
-{
-       __set_current_state(TASK_RUNNING);
+       atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER);
 
-       if (atomic_read(&cl->remaining) & CLOSURE_SLEEPING)
-               atomic_sub(CLOSURE_SLEEPING, &cl->remaining);
+       closure_debug_create(cl);
+       closure_set_ip(cl);
 }
 
-static inline void __closure_start_sleep(struct closure *cl)
+static inline void closure_init_stack(struct closure *cl)
 {
-       closure_set_ip(cl);
-       cl->task = current;
-       set_current_state(TASK_UNINTERRUPTIBLE);
-
-       if (!(atomic_read(&cl->remaining) & CLOSURE_SLEEPING))
-               atomic_add(CLOSURE_SLEEPING, &cl->remaining);
+       memset(cl, 0, sizeof(struct closure));
+       atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER|CLOSURE_STACK);
 }
 
 /**
- * closure_wake_up() - wake up all closures on a wait list.
+ * closure_wake_up - wake up all closures on a wait list.
  */
 static inline void closure_wake_up(struct closure_waitlist *list)
 {
@@ -428,69 +303,19 @@ static inline void closure_wake_up(struct closure_waitlist *list)
        __closure_wake_up(list);
 }
 
-/*
- * Wait on an event, synchronously or asynchronously - analogous to wait_event()
- * but for closures.
- *
- * The loop is oddly structured so as to avoid a race; we must check the
- * condition again after we've added ourself to the waitlist. We know if we were
- * already on the waitlist because closure_wait() returns false; thus, we only
- * schedule or break if closure_wait() returns false. If it returns true, we
- * just loop again - rechecking the condition.
- *
- * The __closure_wake_up() is necessary because we may race with the event
- * becoming true; i.e. we see event false -> wait -> recheck condition, but the
- * thread that made the event true may have called closure_wake_up() before we
- * added ourself to the wait list.
- *
- * We have to call closure_sync() at the end instead of just
- * __closure_end_sleep() because a different thread might've called
- * closure_wake_up() before us and gotten preempted before they dropped the
- * refcount on our closure. If this was a stack allocated closure, that would be
- * bad.
+/**
+ * continue_at - jump to another function with barrier
+ *
+ * After @cl is no longer waiting on anything (i.e. all outstanding refs have
+ * been dropped with closure_put()), it will resume execution at @fn running out
+ * of @wq (or, if @wq is NULL, @fn will be called by closure_put() directly).
+ *
+ * NOTE: This macro expands to a return in the calling function!
+ *
+ * This is because after calling continue_at() you no longer have a ref on @cl,
+ * and whatever @cl owns may be freed out from under you - a running closure fn
+ * has a ref on its own closure which continue_at() drops.
  */
-#define closure_wait_event(list, cl, condition)                                \
-({                                                                     \
-       typeof(condition) ret;                                          \
-                                                                       \
-       while (1) {                                                     \
-               ret = (condition);                                      \
-               if (ret) {                                              \
-                       __closure_wake_up(list);                        \
-                       closure_sync(cl);                               \
-                       break;                                          \
-               }                                                       \
-                                                                       \
-               __closure_start_sleep(cl);                              \
-                                                                       \
-               if (!closure_wait(list, cl))                            \
-                       schedule();                                     \
-       }                                                               \
-                                                                       \
-       ret;                                                            \
-})
-
-static inline void closure_queue(struct closure *cl)
-{
-       struct workqueue_struct *wq = cl->wq;
-       if (wq) {
-               INIT_WORK(&cl->work, cl->work.func);
-               BUG_ON(!queue_work(wq, &cl->work));
-       } else
-               cl->fn(cl);
-}
-
-static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
-                                 struct workqueue_struct *wq)
-{
-       BUG_ON(object_is_on_stack(cl));
-       closure_set_ip(cl);
-       cl->fn = fn;
-       cl->wq = wq;
-       /* between atomic_dec() in closure_put() */
-       smp_mb__before_atomic_dec();
-}
-
 #define continue_at(_cl, _fn, _wq)                                     \
 do {                                                                   \
        set_closure_fn(_cl, _fn, _wq);                                  \
@@ -498,8 +323,28 @@ do {                                                                       \
        return;                                                         \
 } while (0)
 
+/**
+ * closure_return - finish execution of a closure
+ *
+ * This is used to indicate that @cl is finished: when all outstanding refs on
+ * @cl have been dropped @cl's ref on its parent closure (as passed to
+ * closure_init()) will be dropped, if one was specified - thus this can be
+ * thought of as returning to the parent closure.
+ */
 #define closure_return(_cl)    continue_at((_cl), NULL, NULL)
 
+/**
+ * continue_at_nobarrier - jump to another function without barrier
+ *
+ * Causes @fn to be executed out of @cl, in @wq context (or called directly if
+ * @wq is NULL).
+ *
+ * NOTE: like continue_at(), this macro expands to a return in the caller!
+ *
+ * The ref the caller of continue_at_nobarrier() had on @cl is now owned by @fn,
+ * thus it's not safe to touch anything protected by @cl after a
+ * continue_at_nobarrier().
+ */
 #define continue_at_nobarrier(_cl, _fn, _wq)                           \
 do {                                                                   \
        set_closure_fn(_cl, _fn, _wq);                                  \
@@ -507,6 +352,15 @@ do {                                                                       \
        return;                                                         \
 } while (0)
 
+/**
+ * closure_return - finish execution of a closure, with destructor
+ *
+ * Works like closure_return(), except @destructor will be called when all
+ * outstanding refs on @cl have been dropped; @destructor may be used to safely
+ * free the memory occupied by @cl, and it is called with the ref on the parent
+ * closure still held - so @destructor could safely return an item to a
+ * freelist protected by @cl's parent.
+ */
 #define closure_return_with_destructor(_cl, _destructor)               \
 do {                                                                   \
        set_closure_fn(_cl, _destructor, NULL);                         \
@@ -514,6 +368,13 @@ do {                                                                       \
        return;                                                         \
 } while (0)
 
+/**
+ * closure_call - execute @fn out of a new, uninitialized closure
+ *
+ * Typically used when running out of one closure, and we want to run @fn
+ * asynchronously out of a new closure - @parent will then wait for @cl to
+ * finish.
+ */
 static inline void closure_call(struct closure *cl, closure_fn fn,
                                struct workqueue_struct *wq,
                                struct closure *parent)
@@ -522,12 +383,4 @@ static inline void closure_call(struct closure *cl, closure_fn fn,
        continue_at_nobarrier(cl, fn, wq);
 }
 
-static inline void closure_trylock_call(struct closure *cl, closure_fn fn,
-                                       struct workqueue_struct *wq,
-                                       struct closure *parent)
-{
-       if (closure_trylock(cl, parent))
-               continue_at_nobarrier(cl, fn, wq);
-}
-
 #endif /* _LINUX_CLOSURE_H */
index 264fcfbd629016aa1ab890cce56de2699c49be70..8b1f1d5c18198f078dea917f40303c98fce1ee94 100644 (file)
@@ -8,6 +8,7 @@
 #include "bcache.h"
 #include "btree.h"
 #include "debug.h"
+#include "extents.h"
 
 #include <linux/console.h>
 #include <linux/debugfs.h>
 
 static struct dentry *debug;
 
-const char *bch_ptr_status(struct cache_set *c, const struct bkey *k)
-{
-       unsigned i;
-
-       for (i = 0; i < KEY_PTRS(k); i++)
-               if (ptr_available(c, k, i)) {
-                       struct cache *ca = PTR_CACHE(c, k, i);
-                       size_t bucket = PTR_BUCKET_NR(c, k, i);
-                       size_t r = bucket_remainder(c, PTR_OFFSET(k, i));
-
-                       if (KEY_SIZE(k) + r > c->sb.bucket_size)
-                               return "bad, length too big";
-                       if (bucket <  ca->sb.first_bucket)
-                               return "bad, short offset";
-                       if (bucket >= ca->sb.nbuckets)
-                               return "bad, offset past end of device";
-                       if (ptr_stale(c, k, i))
-                               return "stale";
-               }
-
-       if (!bkey_cmp(k, &ZERO_KEY))
-               return "bad, null key";
-       if (!KEY_PTRS(k))
-               return "bad, no pointers";
-       if (!KEY_SIZE(k))
-               return "zeroed key";
-       return "";
-}
-
-int bch_bkey_to_text(char *buf, size_t size, const struct bkey *k)
-{
-       unsigned i = 0;
-       char *out = buf, *end = buf + size;
-
-#define p(...) (out += scnprintf(out, end - out, __VA_ARGS__))
-
-       p("%llu:%llu len %llu -> [", KEY_INODE(k), KEY_OFFSET(k), KEY_SIZE(k));
-
-       if (KEY_PTRS(k))
-               while (1) {
-                       p("%llu:%llu gen %llu",
-                         PTR_DEV(k, i), PTR_OFFSET(k, i), PTR_GEN(k, i));
-
-                       if (++i == KEY_PTRS(k))
-                               break;
-
-                       p(", ");
-               }
-
-       p("]");
-
-       if (KEY_DIRTY(k))
-               p(" dirty");
-       if (KEY_CSUM(k))
-               p(" cs%llu %llx", KEY_CSUM(k), k->ptr[1]);
-#undef p
-       return out - buf;
-}
-
 #ifdef CONFIG_BCACHE_DEBUG
 
-static void dump_bset(struct btree *b, struct bset *i)
-{
-       struct bkey *k, *next;
-       unsigned j;
-       char buf[80];
-
-       for (k = i->start; k < end(i); k = next) {
-               next = bkey_next(k);
-
-               bch_bkey_to_text(buf, sizeof(buf), k);
-               printk(KERN_ERR "block %zu key %zi/%u: %s", index(i, b),
-                      (uint64_t *) k - i->d, i->keys, buf);
-
-               for (j = 0; j < KEY_PTRS(k); j++) {
-                       size_t n = PTR_BUCKET_NR(b->c, k, j);
-                       printk(" bucket %zu", n);
-
-                       if (n >= b->c->sb.first_bucket && n < b->c->sb.nbuckets)
-                               printk(" prio %i",
-                                      PTR_BUCKET(b->c, k, j)->prio);
-               }
+#define for_each_written_bset(b, start, i)                             \
+       for (i = (start);                                               \
+            (void *) i < (void *) (start) + (KEY_SIZE(&b->key) << 9) &&\
+            i->seq == (start)->seq;                                    \
+            i = (void *) i + set_blocks(i, block_bytes(b->c)) *        \
+                block_bytes(b->c))
 
-               printk(" %s\n", bch_ptr_status(b->c, k));
-
-               if (next < end(i) &&
-                   bkey_cmp(k, !b->level ? &START_KEY(next) : next) > 0)
-                       printk(KERN_ERR "Key skipped backwards\n");
-       }
-}
-
-static void bch_dump_bucket(struct btree *b)
-{
-       unsigned i;
-
-       console_lock();
-       for (i = 0; i <= b->nsets; i++)
-               dump_bset(b, b->sets[i].data);
-       console_unlock();
-}
-
-void bch_btree_verify(struct btree *b, struct bset *new)
+void bch_btree_verify(struct btree *b)
 {
        struct btree *v = b->c->verify_data;
-       struct closure cl;
-       closure_init_stack(&cl);
+       struct bset *ondisk, *sorted, *inmemory;
+       struct bio *bio;
 
-       if (!b->c->verify)
+       if (!b->c->verify || !b->c->verify_ondisk)
                return;
 
-       closure_wait_event(&b->io.wait, &cl,
-                          atomic_read(&b->io.cl.remaining) == -1);
-
+       down(&b->io_mutex);
        mutex_lock(&b->c->verify_lock);
 
+       ondisk = b->c->verify_ondisk;
+       sorted = b->c->verify_data->keys.set->data;
+       inmemory = b->keys.set->data;
+
        bkey_copy(&v->key, &b->key);
        v->written = 0;
        v->level = b->level;
+       v->keys.ops = b->keys.ops;
+
+       bio = bch_bbio_alloc(b->c);
+       bio->bi_bdev            = PTR_CACHE(b->c, &b->key, 0)->bdev;
+       bio->bi_iter.bi_sector  = PTR_OFFSET(&b->key, 0);
+       bio->bi_iter.bi_size    = KEY_SIZE(&v->key) << 9;
+       bch_bio_map(bio, sorted);
 
-       bch_btree_node_read(v);
-       closure_wait_event(&v->io.wait, &cl,
-                          atomic_read(&b->io.cl.remaining) == -1);
+       submit_bio_wait(REQ_META|READ_SYNC, bio);
+       bch_bbio_free(bio, b->c);
 
-       if (new->keys != v->sets[0].data->keys ||
-           memcmp(new->start,
-                  v->sets[0].data->start,
-                  (void *) end(new) - (void *) new->start)) {
-               unsigned i, j;
+       memcpy(ondisk, sorted, KEY_SIZE(&v->key) << 9);
+
+       bch_btree_node_read_done(v);
+       sorted = v->keys.set->data;
+
+       if (inmemory->keys != sorted->keys ||
+           memcmp(inmemory->start,
+                  sorted->start,
+                  (void *) bset_bkey_last(inmemory) - (void *) inmemory->start)) {
+               struct bset *i;
+               unsigned j;
 
                console_lock();
 
-               printk(KERN_ERR "*** original memory node:\n");
-               for (i = 0; i <= b->nsets; i++)
-                       dump_bset(b, b->sets[i].data);
+               printk(KERN_ERR "*** in memory:\n");
+               bch_dump_bset(&b->keys, inmemory, 0);
 
-               printk(KERN_ERR "*** sorted memory node:\n");
-               dump_bset(b, new);
+               printk(KERN_ERR "*** read back in:\n");
+               bch_dump_bset(&v->keys, sorted, 0);
 
-               printk(KERN_ERR "*** on disk node:\n");
-               dump_bset(v, v->sets[0].data);
+               for_each_written_bset(b, ondisk, i) {
+                       unsigned block = ((void *) i - (void *) ondisk) /
+                               block_bytes(b->c);
+
+                       printk(KERN_ERR "*** on disk block %u:\n", block);
+                       bch_dump_bset(&b->keys, i, block);
+               }
 
-               for (j = 0; j < new->keys; j++)
-                       if (new->d[j] != v->sets[0].data->d[j])
+               printk(KERN_ERR "*** block %zu not written\n",
+                      ((void *) i - (void *) ondisk) / block_bytes(b->c));
+
+               for (j = 0; j < inmemory->keys; j++)
+                       if (inmemory->d[j] != sorted->d[j])
                                break;
 
+               printk(KERN_ERR "b->written %u\n", b->written);
+
                console_unlock();
                panic("verify failed at %u\n", j);
        }
 
        mutex_unlock(&b->c->verify_lock);
+       up(&b->io_mutex);
 }
 
 void bch_data_verify(struct cached_dev *dc, struct bio *bio)
 {
        char name[BDEVNAME_SIZE];
        struct bio *check;
-       struct bio_vec *bv;
+       struct bio_vec bv, *bv2;
+       struct bvec_iter iter;
        int i;
 
        check = bio_clone(bio, GFP_NOIO);
@@ -185,95 +119,27 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio)
 
        submit_bio_wait(READ_SYNC, check);
 
-       bio_for_each_segment(bv, bio, i) {
-               void *p1 = kmap_atomic(bv->bv_page);
-               void *p2 = page_address(check->bi_io_vec[i].bv_page);
+       bio_for_each_segment(bv, bio, iter) {
+               void *p1 = kmap_atomic(bv.bv_page);
+               void *p2 = page_address(check->bi_io_vec[iter.bi_idx].bv_page);
 
-               cache_set_err_on(memcmp(p1 + bv->bv_offset,
-                                       p2 + bv->bv_offset,
-                                       bv->bv_len),
+               cache_set_err_on(memcmp(p1 + bv.bv_offset,
+                                       p2 + bv.bv_offset,
+                                       bv.bv_len),
                                 dc->disk.c,
                                 "verify failed at dev %s sector %llu",
                                 bdevname(dc->bdev, name),
-                                (uint64_t) bio->bi_sector);
+                                (uint64_t) bio->bi_iter.bi_sector);
 
                kunmap_atomic(p1);
        }
 
-       bio_for_each_segment_all(bv, check, i)
-               __free_page(bv->bv_page);
+       bio_for_each_segment_all(bv2, check, i)
+               __free_page(bv2->bv_page);
 out_put:
        bio_put(check);
 }
 
-int __bch_count_data(struct btree *b)
-{
-       unsigned ret = 0;
-       struct btree_iter iter;
-       struct bkey *k;
-
-       if (!b->level)
-               for_each_key(b, k, &iter)
-                       ret += KEY_SIZE(k);
-       return ret;
-}
-
-void __bch_check_keys(struct btree *b, const char *fmt, ...)
-{
-       va_list args;
-       struct bkey *k, *p = NULL;
-       struct btree_iter iter;
-       const char *err;
-
-       for_each_key(b, k, &iter) {
-               if (!b->level) {
-                       err = "Keys out of order";
-                       if (p && bkey_cmp(&START_KEY(p), &START_KEY(k)) > 0)
-                               goto bug;
-
-                       if (bch_ptr_invalid(b, k))
-                               continue;
-
-                       err =  "Overlapping keys";
-                       if (p && bkey_cmp(p, &START_KEY(k)) > 0)
-                               goto bug;
-               } else {
-                       if (bch_ptr_bad(b, k))
-                               continue;
-
-                       err = "Duplicate keys";
-                       if (p && !bkey_cmp(p, k))
-                               goto bug;
-               }
-               p = k;
-       }
-
-       err = "Key larger than btree node key";
-       if (p && bkey_cmp(p, &b->key) > 0)
-               goto bug;
-
-       return;
-bug:
-       bch_dump_bucket(b);
-
-       va_start(args, fmt);
-       vprintk(fmt, args);
-       va_end(args);
-
-       panic("bcache error: %s:\n", err);
-}
-
-void bch_btree_iter_next_check(struct btree_iter *iter)
-{
-       struct bkey *k = iter->data->k, *next = bkey_next(k);
-
-       if (next < iter->data->end &&
-           bkey_cmp(k, iter->b->level ? next : &START_KEY(next)) > 0) {
-               bch_dump_bucket(iter->b);
-               panic("Key skipped backwards\n");
-       }
-}
-
 #endif
 
 #ifdef CONFIG_DEBUG_FS
@@ -320,7 +186,7 @@ static ssize_t bch_dump_read(struct file *file, char __user *buf,
                if (!w)
                        break;
 
-               bch_bkey_to_text(kbuf, sizeof(kbuf), &w->key);
+               bch_extent_to_text(kbuf, sizeof(kbuf), &w->key);
                i->bytes = snprintf(i->buf, PAGE_SIZE, "%s\n", kbuf);
                bch_keybuf_del(&i->keys, w);
        }
index 2ede60e3187475d114004111ef70c2ebaf4f38af..1f63c195d2476ece1b0c50b6acdddd1a1d92ba8b 100644 (file)
@@ -1,47 +1,30 @@
 #ifndef _BCACHE_DEBUG_H
 #define _BCACHE_DEBUG_H
 
-/* Btree/bkey debug printing */
-
-int bch_bkey_to_text(char *buf, size_t size, const struct bkey *k);
+struct bio;
+struct cached_dev;
+struct cache_set;
 
 #ifdef CONFIG_BCACHE_DEBUG
 
-void bch_btree_verify(struct btree *, struct bset *);
+void bch_btree_verify(struct btree *);
 void bch_data_verify(struct cached_dev *, struct bio *);
-int __bch_count_data(struct btree *);
-void __bch_check_keys(struct btree *, const char *, ...);
-void bch_btree_iter_next_check(struct btree_iter *);
 
-#define EBUG_ON(cond)                  BUG_ON(cond)
 #define expensive_debug_checks(c)      ((c)->expensive_debug_checks)
 #define key_merging_disabled(c)                ((c)->key_merging_disabled)
 #define bypass_torture_test(d)         ((d)->bypass_torture_test)
 
 #else /* DEBUG */
 
-static inline void bch_btree_verify(struct btree *b, struct bset *i) {}
+static inline void bch_btree_verify(struct btree *b) {}
 static inline void bch_data_verify(struct cached_dev *dc, struct bio *bio) {}
-static inline int __bch_count_data(struct btree *b) { return -1; }
-static inline void __bch_check_keys(struct btree *b, const char *fmt, ...) {}
-static inline void bch_btree_iter_next_check(struct btree_iter *iter) {}
 
-#define EBUG_ON(cond)                  do { if (cond); } while (0)
 #define expensive_debug_checks(c)      0
 #define key_merging_disabled(c)                0
 #define bypass_torture_test(d)         0
 
 #endif
 
-#define bch_count_data(b)                                              \
-       (expensive_debug_checks((b)->c) ? __bch_count_data(b) : -1)
-
-#define bch_check_keys(b, ...)                                         \
-do {                                                                   \
-       if (expensive_debug_checks((b)->c))                             \
-               __bch_check_keys(b, __VA_ARGS__);                       \
-} while (0)
-
 #ifdef CONFIG_DEBUG_FS
 void bch_debug_init_cache_set(struct cache_set *);
 #else
diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c
new file mode 100644 (file)
index 0000000..c3ead58
--- /dev/null
@@ -0,0 +1,616 @@
+/*
+ * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
+ *
+ * Uses a block device as cache for other block devices; optimized for SSDs.
+ * All allocation is done in buckets, which should match the erase block size
+ * of the device.
+ *
+ * Buckets containing cached data are kept on a heap sorted by priority;
+ * bucket priority is increased on cache hit, and periodically all the buckets
+ * on the heap have their priority scaled down. This currently is just used as
+ * an LRU but in the future should allow for more intelligent heuristics.
+ *
+ * Buckets have an 8 bit counter; freeing is accomplished by incrementing the
+ * counter. Garbage collection is used to remove stale pointers.
+ *
+ * Indexing is done via a btree; nodes are not necessarily fully sorted, rather
+ * as keys are inserted we only sort the pages that have not yet been written.
+ * When garbage collection is run, we resort the entire node.
+ *
+ * All configuration is done via sysfs; see Documentation/bcache.txt.
+ */
+
+#include "bcache.h"
+#include "btree.h"
+#include "debug.h"
+#include "extents.h"
+#include "writeback.h"
+
+static void sort_key_next(struct btree_iter *iter,
+                         struct btree_iter_set *i)
+{
+       i->k = bkey_next(i->k);
+
+       if (i->k == i->end)
+               *i = iter->data[--iter->used];
+}
+
+static bool bch_key_sort_cmp(struct btree_iter_set l,
+                            struct btree_iter_set r)
+{
+       int64_t c = bkey_cmp(l.k, r.k);
+
+       return c ? c > 0 : l.k < r.k;
+}
+
+static bool __ptr_invalid(struct cache_set *c, const struct bkey *k)
+{
+       unsigned i;
+
+       for (i = 0; i < KEY_PTRS(k); i++)
+               if (ptr_available(c, k, i)) {
+                       struct cache *ca = PTR_CACHE(c, k, i);
+                       size_t bucket = PTR_BUCKET_NR(c, k, i);
+                       size_t r = bucket_remainder(c, PTR_OFFSET(k, i));
+
+                       if (KEY_SIZE(k) + r > c->sb.bucket_size ||
+                           bucket <  ca->sb.first_bucket ||
+                           bucket >= ca->sb.nbuckets)
+                               return true;
+               }
+
+       return false;
+}
+
+/* Common among btree and extent ptrs */
+
+static const char *bch_ptr_status(struct cache_set *c, const struct bkey *k)
+{
+       unsigned i;
+
+       for (i = 0; i < KEY_PTRS(k); i++)
+               if (ptr_available(c, k, i)) {
+                       struct cache *ca = PTR_CACHE(c, k, i);
+                       size_t bucket = PTR_BUCKET_NR(c, k, i);
+                       size_t r = bucket_remainder(c, PTR_OFFSET(k, i));
+
+                       if (KEY_SIZE(k) + r > c->sb.bucket_size)
+                               return "bad, length too big";
+                       if (bucket <  ca->sb.first_bucket)
+                               return "bad, short offset";
+                       if (bucket >= ca->sb.nbuckets)
+                               return "bad, offset past end of device";
+                       if (ptr_stale(c, k, i))
+                               return "stale";
+               }
+
+       if (!bkey_cmp(k, &ZERO_KEY))
+               return "bad, null key";
+       if (!KEY_PTRS(k))
+               return "bad, no pointers";
+       if (!KEY_SIZE(k))
+               return "zeroed key";
+       return "";
+}
+
+void bch_extent_to_text(char *buf, size_t size, const struct bkey *k)
+{
+       unsigned i = 0;
+       char *out = buf, *end = buf + size;
+
+#define p(...) (out += scnprintf(out, end - out, __VA_ARGS__))
+
+       p("%llu:%llu len %llu -> [", KEY_INODE(k), KEY_START(k), KEY_SIZE(k));
+
+       for (i = 0; i < KEY_PTRS(k); i++) {
+               if (i)
+                       p(", ");
+
+               if (PTR_DEV(k, i) == PTR_CHECK_DEV)
+                       p("check dev");
+               else
+                       p("%llu:%llu gen %llu", PTR_DEV(k, i),
+                         PTR_OFFSET(k, i), PTR_GEN(k, i));
+       }
+
+       p("]");
+
+       if (KEY_DIRTY(k))
+               p(" dirty");
+       if (KEY_CSUM(k))
+               p(" cs%llu %llx", KEY_CSUM(k), k->ptr[1]);
+#undef p
+}
+
+static void bch_bkey_dump(struct btree_keys *keys, const struct bkey *k)
+{
+       struct btree *b = container_of(keys, struct btree, keys);
+       unsigned j;
+       char buf[80];
+
+       bch_extent_to_text(buf, sizeof(buf), k);
+       printk(" %s", buf);
+
+       for (j = 0; j < KEY_PTRS(k); j++) {
+               size_t n = PTR_BUCKET_NR(b->c, k, j);
+               printk(" bucket %zu", n);
+
+               if (n >= b->c->sb.first_bucket && n < b->c->sb.nbuckets)
+                       printk(" prio %i",
+                              PTR_BUCKET(b->c, k, j)->prio);
+       }
+
+       printk(" %s\n", bch_ptr_status(b->c, k));
+}
+
+/* Btree ptrs */
+
+bool __bch_btree_ptr_invalid(struct cache_set *c, const struct bkey *k)
+{
+       char buf[80];
+
+       if (!KEY_PTRS(k) || !KEY_SIZE(k) || KEY_DIRTY(k))
+               goto bad;
+
+       if (__ptr_invalid(c, k))
+               goto bad;
+
+       return false;
+bad:
+       bch_extent_to_text(buf, sizeof(buf), k);
+       cache_bug(c, "spotted btree ptr %s: %s", buf, bch_ptr_status(c, k));
+       return true;
+}
+
+static bool bch_btree_ptr_invalid(struct btree_keys *bk, const struct bkey *k)
+{
+       struct btree *b = container_of(bk, struct btree, keys);
+       return __bch_btree_ptr_invalid(b->c, k);
+}
+
+static bool btree_ptr_bad_expensive(struct btree *b, const struct bkey *k)
+{
+       unsigned i;
+       char buf[80];
+       struct bucket *g;
+
+       if (mutex_trylock(&b->c->bucket_lock)) {
+               for (i = 0; i < KEY_PTRS(k); i++)
+                       if (ptr_available(b->c, k, i)) {
+                               g = PTR_BUCKET(b->c, k, i);
+
+                               if (KEY_DIRTY(k) ||
+                                   g->prio != BTREE_PRIO ||
+                                   (b->c->gc_mark_valid &&
+                                    GC_MARK(g) != GC_MARK_METADATA))
+                                       goto err;
+                       }
+
+               mutex_unlock(&b->c->bucket_lock);
+       }
+
+       return false;
+err:
+       mutex_unlock(&b->c->bucket_lock);
+       bch_extent_to_text(buf, sizeof(buf), k);
+       btree_bug(b,
+"inconsistent btree pointer %s: bucket %li pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i",
+                 buf, PTR_BUCKET_NR(b->c, k, i), atomic_read(&g->pin),
+                 g->prio, g->gen, g->last_gc, GC_MARK(g), g->gc_gen);
+       return true;
+}
+
+static bool bch_btree_ptr_bad(struct btree_keys *bk, const struct bkey *k)
+{
+       struct btree *b = container_of(bk, struct btree, keys);
+       unsigned i;
+
+       if (!bkey_cmp(k, &ZERO_KEY) ||
+           !KEY_PTRS(k) ||
+           bch_ptr_invalid(bk, k))
+               return true;
+
+       for (i = 0; i < KEY_PTRS(k); i++)
+               if (!ptr_available(b->c, k, i) ||
+                   ptr_stale(b->c, k, i))
+                       return true;
+
+       if (expensive_debug_checks(b->c) &&
+           btree_ptr_bad_expensive(b, k))
+               return true;
+
+       return false;
+}
+
+static bool bch_btree_ptr_insert_fixup(struct btree_keys *bk,
+                                      struct bkey *insert,
+                                      struct btree_iter *iter,
+                                      struct bkey *replace_key)
+{
+       struct btree *b = container_of(bk, struct btree, keys);
+
+       if (!KEY_OFFSET(insert))
+               btree_current_write(b)->prio_blocked++;
+
+       return false;
+}
+
+const struct btree_keys_ops bch_btree_keys_ops = {
+       .sort_cmp       = bch_key_sort_cmp,
+       .insert_fixup   = bch_btree_ptr_insert_fixup,
+       .key_invalid    = bch_btree_ptr_invalid,
+       .key_bad        = bch_btree_ptr_bad,
+       .key_to_text    = bch_extent_to_text,
+       .key_dump       = bch_bkey_dump,
+};
+
+/* Extents */
+
+/*
+ * Returns true if l > r - unless l == r, in which case returns true if l is
+ * older than r.
+ *
+ * Necessary for btree_sort_fixup() - if there are multiple keys that compare
+ * equal in different sets, we have to process them newest to oldest.
+ */
+static bool bch_extent_sort_cmp(struct btree_iter_set l,
+                               struct btree_iter_set r)
+{
+       int64_t c = bkey_cmp(&START_KEY(l.k), &START_KEY(r.k));
+
+       return c ? c > 0 : l.k < r.k;
+}
+
+static struct bkey *bch_extent_sort_fixup(struct btree_iter *iter,
+                                         struct bkey *tmp)
+{
+       while (iter->used > 1) {
+               struct btree_iter_set *top = iter->data, *i = top + 1;
+
+               if (iter->used > 2 &&
+                   bch_extent_sort_cmp(i[0], i[1]))
+                       i++;
+
+               if (bkey_cmp(top->k, &START_KEY(i->k)) <= 0)
+                       break;
+
+               if (!KEY_SIZE(i->k)) {
+                       sort_key_next(iter, i);
+                       heap_sift(iter, i - top, bch_extent_sort_cmp);
+                       continue;
+               }
+
+               if (top->k > i->k) {
+                       if (bkey_cmp(top->k, i->k) >= 0)
+                               sort_key_next(iter, i);
+                       else
+                               bch_cut_front(top->k, i->k);
+
+                       heap_sift(iter, i - top, bch_extent_sort_cmp);
+               } else {
+                       /* can't happen because of comparison func */
+                       BUG_ON(!bkey_cmp(&START_KEY(top->k), &START_KEY(i->k)));
+
+                       if (bkey_cmp(i->k, top->k) < 0) {
+                               bkey_copy(tmp, top->k);
+
+                               bch_cut_back(&START_KEY(i->k), tmp);
+                               bch_cut_front(i->k, top->k);
+                               heap_sift(iter, 0, bch_extent_sort_cmp);
+
+                               return tmp;
+                       } else {
+                               bch_cut_back(&START_KEY(i->k), top->k);
+                       }
+               }
+       }
+
+       return NULL;
+}
+
+static bool bch_extent_insert_fixup(struct btree_keys *b,
+                                   struct bkey *insert,
+                                   struct btree_iter *iter,
+                                   struct bkey *replace_key)
+{
+       struct cache_set *c = container_of(b, struct btree, keys)->c;
+
+       void subtract_dirty(struct bkey *k, uint64_t offset, int sectors)
+       {
+               if (KEY_DIRTY(k))
+                       bcache_dev_sectors_dirty_add(c, KEY_INODE(k),
+                                                    offset, -sectors);
+       }
+
+       uint64_t old_offset;
+       unsigned old_size, sectors_found = 0;
+
+       BUG_ON(!KEY_OFFSET(insert));
+       BUG_ON(!KEY_SIZE(insert));
+
+       while (1) {
+               struct bkey *k = bch_btree_iter_next(iter);
+               if (!k)
+                       break;
+
+               if (bkey_cmp(&START_KEY(k), insert) >= 0) {
+                       if (KEY_SIZE(k))
+                               break;
+                       else
+                               continue;
+               }
+
+               if (bkey_cmp(k, &START_KEY(insert)) <= 0)
+                       continue;
+
+               old_offset = KEY_START(k);
+               old_size = KEY_SIZE(k);
+
+               /*
+                * We might overlap with 0 size extents; we can't skip these
+                * because if they're in the set we're inserting to we have to
+                * adjust them so they don't overlap with the key we're
+                * inserting. But we don't want to check them for replace
+                * operations.
+                */
+
+               if (replace_key && KEY_SIZE(k)) {
+                       /*
+                        * k might have been split since we inserted/found the
+                        * key we're replacing
+                        */
+                       unsigned i;
+                       uint64_t offset = KEY_START(k) -
+                               KEY_START(replace_key);
+
+                       /* But it must be a subset of the replace key */
+                       if (KEY_START(k) < KEY_START(replace_key) ||
+                           KEY_OFFSET(k) > KEY_OFFSET(replace_key))
+                               goto check_failed;
+
+                       /* We didn't find a key that we were supposed to */
+                       if (KEY_START(k) > KEY_START(insert) + sectors_found)
+                               goto check_failed;
+
+                       if (!bch_bkey_equal_header(k, replace_key))
+                               goto check_failed;
+
+                       /* skip past gen */
+                       offset <<= 8;
+
+                       BUG_ON(!KEY_PTRS(replace_key));
+
+                       for (i = 0; i < KEY_PTRS(replace_key); i++)
+                               if (k->ptr[i] != replace_key->ptr[i] + offset)
+                                       goto check_failed;
+
+                       sectors_found = KEY_OFFSET(k) - KEY_START(insert);
+               }
+
+               if (bkey_cmp(insert, k) < 0 &&
+                   bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0) {
+                       /*
+                        * We overlapped in the middle of an existing key: that
+                        * means we have to split the old key. But we have to do
+                        * slightly different things depending on whether the
+                        * old key has been written out yet.
+                        */
+
+                       struct bkey *top;
+
+                       subtract_dirty(k, KEY_START(insert), KEY_SIZE(insert));
+
+                       if (bkey_written(b, k)) {
+                               /*
+                                * We insert a new key to cover the top of the
+                                * old key, and the old key is modified in place
+                                * to represent the bottom split.
+                                *
+                                * It's completely arbitrary whether the new key
+                                * is the top or the bottom, but it has to match
+                                * up with what btree_sort_fixup() does - it
+                                * doesn't check for this kind of overlap, it
+                                * depends on us inserting a new key for the top
+                                * here.
+                                */
+                               top = bch_bset_search(b, bset_tree_last(b),
+                                                     insert);
+                               bch_bset_insert(b, top, k);
+                       } else {
+                               BKEY_PADDED(key) temp;
+                               bkey_copy(&temp.key, k);
+                               bch_bset_insert(b, k, &temp.key);
+                               top = bkey_next(k);
+                       }
+
+                       bch_cut_front(insert, top);
+                       bch_cut_back(&START_KEY(insert), k);
+                       bch_bset_fix_invalidated_key(b, k);
+                       goto out;
+               }
+
+               if (bkey_cmp(insert, k) < 0) {
+                       bch_cut_front(insert, k);
+               } else {
+                       if (bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0)
+                               old_offset = KEY_START(insert);
+
+                       if (bkey_written(b, k) &&
+                           bkey_cmp(&START_KEY(insert), &START_KEY(k)) <= 0) {
+                               /*
+                                * Completely overwrote, so we don't have to
+                                * invalidate the binary search tree
+                                */
+                               bch_cut_front(k, k);
+                       } else {
+                               __bch_cut_back(&START_KEY(insert), k);
+                               bch_bset_fix_invalidated_key(b, k);
+                       }
+               }
+
+               subtract_dirty(k, old_offset, old_size - KEY_SIZE(k));
+       }
+
+check_failed:
+       if (replace_key) {
+               if (!sectors_found) {
+                       return true;
+               } else if (sectors_found < KEY_SIZE(insert)) {
+                       SET_KEY_OFFSET(insert, KEY_OFFSET(insert) -
+                                      (KEY_SIZE(insert) - sectors_found));
+                       SET_KEY_SIZE(insert, sectors_found);
+               }
+       }
+out:
+       if (KEY_DIRTY(insert))
+               bcache_dev_sectors_dirty_add(c, KEY_INODE(insert),
+                                            KEY_START(insert),
+                                            KEY_SIZE(insert));
+
+       return false;
+}
+
+static bool bch_extent_invalid(struct btree_keys *bk, const struct bkey *k)
+{
+       struct btree *b = container_of(bk, struct btree, keys);
+       char buf[80];
+
+       if (!KEY_SIZE(k))
+               return true;
+
+       if (KEY_SIZE(k) > KEY_OFFSET(k))
+               goto bad;
+
+       if (__ptr_invalid(b->c, k))
+               goto bad;
+
+       return false;
+bad:
+       bch_extent_to_text(buf, sizeof(buf), k);
+       cache_bug(b->c, "spotted extent %s: %s", buf, bch_ptr_status(b->c, k));
+       return true;
+}
+
+static bool bch_extent_bad_expensive(struct btree *b, const struct bkey *k,
+                                    unsigned ptr)
+{
+       struct bucket *g = PTR_BUCKET(b->c, k, ptr);
+       char buf[80];
+
+       if (mutex_trylock(&b->c->bucket_lock)) {
+               if (b->c->gc_mark_valid &&
+                   ((GC_MARK(g) != GC_MARK_DIRTY &&
+                     KEY_DIRTY(k)) ||
+                    GC_MARK(g) == GC_MARK_METADATA))
+                       goto err;
+
+               if (g->prio == BTREE_PRIO)
+                       goto err;
+
+               mutex_unlock(&b->c->bucket_lock);
+       }
+
+       return false;
+err:
+       mutex_unlock(&b->c->bucket_lock);
+       bch_extent_to_text(buf, sizeof(buf), k);
+       btree_bug(b,
+"inconsistent extent pointer %s:\nbucket %zu pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i",
+                 buf, PTR_BUCKET_NR(b->c, k, ptr), atomic_read(&g->pin),
+                 g->prio, g->gen, g->last_gc, GC_MARK(g), g->gc_gen);
+       return true;
+}
+
+static bool bch_extent_bad(struct btree_keys *bk, const struct bkey *k)
+{
+       struct btree *b = container_of(bk, struct btree, keys);
+       struct bucket *g;
+       unsigned i, stale;
+
+       if (!KEY_PTRS(k) ||
+           bch_extent_invalid(bk, k))
+               return true;
+
+       for (i = 0; i < KEY_PTRS(k); i++)
+               if (!ptr_available(b->c, k, i))
+                       return true;
+
+       if (!expensive_debug_checks(b->c) && KEY_DIRTY(k))
+               return false;
+
+       for (i = 0; i < KEY_PTRS(k); i++) {
+               g = PTR_BUCKET(b->c, k, i);
+               stale = ptr_stale(b->c, k, i);
+
+               btree_bug_on(stale > 96, b,
+                            "key too stale: %i, need_gc %u",
+                            stale, b->c->need_gc);
+
+               btree_bug_on(stale && KEY_DIRTY(k) && KEY_SIZE(k),
+                            b, "stale dirty pointer");
+
+               if (stale)
+                       return true;
+
+               if (expensive_debug_checks(b->c) &&
+                   bch_extent_bad_expensive(b, k, i))
+                       return true;
+       }
+
+       return false;
+}
+
+static uint64_t merge_chksums(struct bkey *l, struct bkey *r)
+{
+       return (l->ptr[KEY_PTRS(l)] + r->ptr[KEY_PTRS(r)]) &
+               ~((uint64_t)1 << 63);
+}
+
+static bool bch_extent_merge(struct btree_keys *bk, struct bkey *l, struct bkey *r)
+{
+       struct btree *b = container_of(bk, struct btree, keys);
+       unsigned i;
+
+       if (key_merging_disabled(b->c))
+               return false;
+
+       for (i = 0; i < KEY_PTRS(l); i++)
+               if (l->ptr[i] + PTR(0, KEY_SIZE(l), 0) != r->ptr[i] ||
+                   PTR_BUCKET_NR(b->c, l, i) != PTR_BUCKET_NR(b->c, r, i))
+                       return false;
+
+       /* Keys with no pointers aren't restricted to one bucket and could
+        * overflow KEY_SIZE
+        */
+       if (KEY_SIZE(l) + KEY_SIZE(r) > USHRT_MAX) {
+               SET_KEY_OFFSET(l, KEY_OFFSET(l) + USHRT_MAX - KEY_SIZE(l));
+               SET_KEY_SIZE(l, USHRT_MAX);
+
+               bch_cut_front(l, r);
+               return false;
+       }
+
+       if (KEY_CSUM(l)) {
+               if (KEY_CSUM(r))
+                       l->ptr[KEY_PTRS(l)] = merge_chksums(l, r);
+               else
+                       SET_KEY_CSUM(l, 0);
+       }
+
+       SET_KEY_OFFSET(l, KEY_OFFSET(l) + KEY_SIZE(r));
+       SET_KEY_SIZE(l, KEY_SIZE(l) + KEY_SIZE(r));
+
+       return true;
+}
+
+const struct btree_keys_ops bch_extent_keys_ops = {
+       .sort_cmp       = bch_extent_sort_cmp,
+       .sort_fixup     = bch_extent_sort_fixup,
+       .insert_fixup   = bch_extent_insert_fixup,
+       .key_invalid    = bch_extent_invalid,
+       .key_bad        = bch_extent_bad,
+       .key_merge      = bch_extent_merge,
+       .key_to_text    = bch_extent_to_text,
+       .key_dump       = bch_bkey_dump,
+       .is_extents     = true,
+};
diff --git a/drivers/md/bcache/extents.h b/drivers/md/bcache/extents.h
new file mode 100644 (file)
index 0000000..e4e2340
--- /dev/null
@@ -0,0 +1,13 @@
+#ifndef _BCACHE_EXTENTS_H
+#define _BCACHE_EXTENTS_H
+
+extern const struct btree_keys_ops bch_btree_keys_ops;
+extern const struct btree_keys_ops bch_extent_keys_ops;
+
+struct bkey;
+struct cache_set;
+
+void bch_extent_to_text(char *, size_t, const struct bkey *);
+bool __bch_btree_ptr_invalid(struct cache_set *, const struct bkey *);
+
+#endif /* _BCACHE_EXTENTS_H */
index 9056632995b1b8a2de9de607c60cd693704c9c65..fa028fa82df41bf509e15d140f9b3b943c5a8c2d 100644 (file)
 
 #include <linux/blkdev.h>
 
-static void bch_bi_idx_hack_endio(struct bio *bio, int error)
-{
-       struct bio *p = bio->bi_private;
-
-       bio_endio(p, error);
-       bio_put(bio);
-}
-
-static void bch_generic_make_request_hack(struct bio *bio)
-{
-       if (bio->bi_idx) {
-               struct bio *clone = bio_alloc(GFP_NOIO, bio_segments(bio));
-
-               memcpy(clone->bi_io_vec,
-                      bio_iovec(bio),
-                      bio_segments(bio) * sizeof(struct bio_vec));
-
-               clone->bi_sector        = bio->bi_sector;
-               clone->bi_bdev          = bio->bi_bdev;
-               clone->bi_rw            = bio->bi_rw;
-               clone->bi_vcnt          = bio_segments(bio);
-               clone->bi_size          = bio->bi_size;
-
-               clone->bi_private       = bio;
-               clone->bi_end_io        = bch_bi_idx_hack_endio;
-
-               bio = clone;
-       }
-
-       /*
-        * Hack, since drivers that clone bios clone up to bi_max_vecs, but our
-        * bios might have had more than that (before we split them per device
-        * limitations).
-        *
-        * To be taken out once immutable bvec stuff is in.
-        */
-       bio->bi_max_vecs = bio->bi_vcnt;
-
-       generic_make_request(bio);
-}
-
-/**
- * bch_bio_split - split a bio
- * @bio:       bio to split
- * @sectors:   number of sectors to split from the front of @bio
- * @gfp:       gfp mask
- * @bs:                bio set to allocate from
- *
- * Allocates and returns a new bio which represents @sectors from the start of
- * @bio, and updates @bio to represent the remaining sectors.
- *
- * If bio_sectors(@bio) was less than or equal to @sectors, returns @bio
- * unchanged.
- *
- * The newly allocated bio will point to @bio's bi_io_vec, if the split was on a
- * bvec boundry; it is the caller's responsibility to ensure that @bio is not
- * freed before the split.
- */
-struct bio *bch_bio_split(struct bio *bio, int sectors,
-                         gfp_t gfp, struct bio_set *bs)
-{
-       unsigned idx = bio->bi_idx, vcnt = 0, nbytes = sectors << 9;
-       struct bio_vec *bv;
-       struct bio *ret = NULL;
-
-       BUG_ON(sectors <= 0);
-
-       if (sectors >= bio_sectors(bio))
-               return bio;
-
-       if (bio->bi_rw & REQ_DISCARD) {
-               ret = bio_alloc_bioset(gfp, 1, bs);
-               if (!ret)
-                       return NULL;
-               idx = 0;
-               goto out;
-       }
-
-       bio_for_each_segment(bv, bio, idx) {
-               vcnt = idx - bio->bi_idx;
-
-               if (!nbytes) {
-                       ret = bio_alloc_bioset(gfp, vcnt, bs);
-                       if (!ret)
-                               return NULL;
-
-                       memcpy(ret->bi_io_vec, bio_iovec(bio),
-                              sizeof(struct bio_vec) * vcnt);
-
-                       break;
-               } else if (nbytes < bv->bv_len) {
-                       ret = bio_alloc_bioset(gfp, ++vcnt, bs);
-                       if (!ret)
-                               return NULL;
-
-                       memcpy(ret->bi_io_vec, bio_iovec(bio),
-                              sizeof(struct bio_vec) * vcnt);
-
-                       ret->bi_io_vec[vcnt - 1].bv_len = nbytes;
-                       bv->bv_offset   += nbytes;
-                       bv->bv_len      -= nbytes;
-                       break;
-               }
-
-               nbytes -= bv->bv_len;
-       }
-out:
-       ret->bi_bdev    = bio->bi_bdev;
-       ret->bi_sector  = bio->bi_sector;
-       ret->bi_size    = sectors << 9;
-       ret->bi_rw      = bio->bi_rw;
-       ret->bi_vcnt    = vcnt;
-       ret->bi_max_vecs = vcnt;
-
-       bio->bi_sector  += sectors;
-       bio->bi_size    -= sectors << 9;
-       bio->bi_idx      = idx;
-
-       if (bio_integrity(bio)) {
-               if (bio_integrity_clone(ret, bio, gfp)) {
-                       bio_put(ret);
-                       return NULL;
-               }
-
-               bio_integrity_trim(ret, 0, bio_sectors(ret));
-               bio_integrity_trim(bio, bio_sectors(ret), bio_sectors(bio));
-       }
-
-       return ret;
-}
-
 static unsigned bch_bio_max_sectors(struct bio *bio)
 {
-       unsigned ret = bio_sectors(bio);
        struct request_queue *q = bdev_get_queue(bio->bi_bdev);
-       unsigned max_segments = min_t(unsigned, BIO_MAX_PAGES,
-                                     queue_max_segments(q));
+       struct bio_vec bv;
+       struct bvec_iter iter;
+       unsigned ret = 0, seg = 0;
 
        if (bio->bi_rw & REQ_DISCARD)
-               return min(ret, q->limits.max_discard_sectors);
-
-       if (bio_segments(bio) > max_segments ||
-           q->merge_bvec_fn) {
-               struct bio_vec *bv;
-               int i, seg = 0;
-
-               ret = 0;
-
-               bio_for_each_segment(bv, bio, i) {
-                       struct bvec_merge_data bvm = {
-                               .bi_bdev        = bio->bi_bdev,
-                               .bi_sector      = bio->bi_sector,
-                               .bi_size        = ret << 9,
-                               .bi_rw          = bio->bi_rw,
-                       };
-
-                       if (seg == max_segments)
-                               break;
+               return min(bio_sectors(bio), q->limits.max_discard_sectors);
+
+       bio_for_each_segment(bv, bio, iter) {
+               struct bvec_merge_data bvm = {
+                       .bi_bdev        = bio->bi_bdev,
+                       .bi_sector      = bio->bi_iter.bi_sector,
+                       .bi_size        = ret << 9,
+                       .bi_rw          = bio->bi_rw,
+               };
+
+               if (seg == min_t(unsigned, BIO_MAX_PAGES,
+                                queue_max_segments(q)))
+                       break;
 
-                       if (q->merge_bvec_fn &&
-                           q->merge_bvec_fn(q, &bvm, bv) < (int) bv->bv_len)
-                               break;
+               if (q->merge_bvec_fn &&
+                   q->merge_bvec_fn(q, &bvm, &bv) < (int) bv.bv_len)
+                       break;
 
-                       seg++;
-                       ret += bv->bv_len >> 9;
-               }
+               seg++;
+               ret += bv.bv_len >> 9;
        }
 
        ret = min(ret, queue_max_sectors(q));
 
        WARN_ON(!ret);
-       ret = max_t(int, ret, bio_iovec(bio)->bv_len >> 9);
+       ret = max_t(int, ret, bio_iovec(bio).bv_len >> 9);
 
        return ret;
 }
@@ -193,7 +55,7 @@ static void bch_bio_submit_split_done(struct closure *cl)
 
        s->bio->bi_end_io = s->bi_end_io;
        s->bio->bi_private = s->bi_private;
-       bio_endio(s->bio, 0);
+       bio_endio_nodec(s->bio, 0);
 
        closure_debug_destroy(&s->cl);
        mempool_free(s, s->p->bio_split_hook);
@@ -232,19 +94,19 @@ void bch_generic_make_request(struct bio *bio, struct bio_split_pool *p)
        bio_get(bio);
 
        do {
-               n = bch_bio_split(bio, bch_bio_max_sectors(bio),
-                                 GFP_NOIO, s->p->bio_split);
+               n = bio_next_split(bio, bch_bio_max_sectors(bio),
+                                  GFP_NOIO, s->p->bio_split);
 
                n->bi_end_io    = bch_bio_submit_split_endio;
                n->bi_private   = &s->cl;
 
                closure_get(&s->cl);
-               bch_generic_make_request_hack(n);
+               generic_make_request(n);
        } while (n != bio);
 
        continue_at(&s->cl, bch_bio_submit_split_done, NULL);
 submit:
-       bch_generic_make_request_hack(bio);
+       generic_make_request(bio);
 }
 
 /* Bios with headers */
@@ -272,8 +134,8 @@ void __bch_submit_bbio(struct bio *bio, struct cache_set *c)
 {
        struct bbio *b = container_of(bio, struct bbio, bio);
 
-       bio->bi_sector  = PTR_OFFSET(&b->key, 0);
-       bio->bi_bdev    = PTR_CACHE(c, &b->key, 0)->bdev;
+       bio->bi_iter.bi_sector  = PTR_OFFSET(&b->key, 0);
+       bio->bi_bdev            = PTR_CACHE(c, &b->key, 0)->bdev;
 
        b->submit_time_us = local_clock_us();
        closure_bio_submit(bio, bio->bi_private, PTR_CACHE(c, &b->key, 0));
index ecdaa671bd50457bf38d1cf9f896ffd1c8352546..18039affc306b539e187b9b57ba3f1c3c3b95e3d 100644 (file)
@@ -44,17 +44,17 @@ static int journal_read_bucket(struct cache *ca, struct list_head *list,
 
        closure_init_stack(&cl);
 
-       pr_debug("reading %llu", (uint64_t) bucket);
+       pr_debug("reading %u", bucket_index);
 
        while (offset < ca->sb.bucket_size) {
 reread:                left = ca->sb.bucket_size - offset;
-               len = min_t(unsigned, left, PAGE_SECTORS * 8);
+               len = min_t(unsigned, left, PAGE_SECTORS << JSET_BITS);
 
                bio_reset(bio);
-               bio->bi_sector  = bucket + offset;
+               bio->bi_iter.bi_sector  = bucket + offset;
                bio->bi_bdev    = ca->bdev;
                bio->bi_rw      = READ;
-               bio->bi_size    = len << 9;
+               bio->bi_iter.bi_size    = len << 9;
 
                bio->bi_end_io  = journal_read_endio;
                bio->bi_private = &cl;
@@ -74,19 +74,28 @@ reread:             left = ca->sb.bucket_size - offset;
                        struct list_head *where;
                        size_t blocks, bytes = set_bytes(j);
 
-                       if (j->magic != jset_magic(&ca->sb))
+                       if (j->magic != jset_magic(&ca->sb)) {
+                               pr_debug("%u: bad magic", bucket_index);
                                return ret;
+                       }
 
-                       if (bytes > left << 9)
+                       if (bytes > left << 9 ||
+                           bytes > PAGE_SIZE << JSET_BITS) {
+                               pr_info("%u: too big, %zu bytes, offset %u",
+                                       bucket_index, bytes, offset);
                                return ret;
+                       }
 
                        if (bytes > len << 9)
                                goto reread;
 
-                       if (j->csum != csum_set(j))
+                       if (j->csum != csum_set(j)) {
+                               pr_info("%u: bad csum, %zu bytes, offset %u",
+                                       bucket_index, bytes, offset);
                                return ret;
+                       }
 
-                       blocks = set_blocks(j, ca->set);
+                       blocks = set_blocks(j, block_bytes(ca->set));
 
                        while (!list_empty(list)) {
                                i = list_first_entry(list,
@@ -275,7 +284,7 @@ void bch_journal_mark(struct cache_set *c, struct list_head *list)
                }
 
                for (k = i->j.start;
-                    k < end(&i->j);
+                    k < bset_bkey_last(&i->j);
                     k = bkey_next(k)) {
                        unsigned j;
 
@@ -313,7 +322,7 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list)
                                 n, i->j.seq - 1, start, end);
 
                for (k = i->j.start;
-                    k < end(&i->j);
+                    k < bset_bkey_last(&i->j);
                     k = bkey_next(k)) {
                        trace_bcache_journal_replay_key(k);
 
@@ -437,13 +446,13 @@ static void do_journal_discard(struct cache *ca)
                atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT);
 
                bio_init(bio);
-               bio->bi_sector          = bucket_to_sector(ca->set,
+               bio->bi_iter.bi_sector  = bucket_to_sector(ca->set,
                                                ca->sb.d[ja->discard_idx]);
                bio->bi_bdev            = ca->bdev;
                bio->bi_rw              = REQ_WRITE|REQ_DISCARD;
                bio->bi_max_vecs        = 1;
                bio->bi_io_vec          = bio->bi_inline_vecs;
-               bio->bi_size            = bucket_bytes(ca);
+               bio->bi_iter.bi_size    = bucket_bytes(ca);
                bio->bi_end_io          = journal_discard_endio;
 
                closure_get(&ca->set->cl);
@@ -555,6 +564,14 @@ static void journal_write_done(struct closure *cl)
        continue_at_nobarrier(cl, journal_write, system_wq);
 }
 
+static void journal_write_unlock(struct closure *cl)
+{
+       struct cache_set *c = container_of(cl, struct cache_set, journal.io);
+
+       c->journal.io_in_flight = 0;
+       spin_unlock(&c->journal.lock);
+}
+
 static void journal_write_unlocked(struct closure *cl)
        __releases(c->journal.lock)
 {
@@ -562,22 +579,15 @@ static void journal_write_unlocked(struct closure *cl)
        struct cache *ca;
        struct journal_write *w = c->journal.cur;
        struct bkey *k = &c->journal.key;
-       unsigned i, sectors = set_blocks(w->data, c) * c->sb.block_size;
+       unsigned i, sectors = set_blocks(w->data, block_bytes(c)) *
+               c->sb.block_size;
 
        struct bio *bio;
        struct bio_list list;
        bio_list_init(&list);
 
        if (!w->need_write) {
-               /*
-                * XXX: have to unlock closure before we unlock journal lock,
-                * else we race with bch_journal(). But this way we race
-                * against cache set unregister. Doh.
-                */
-               set_closure_fn(cl, NULL, NULL);
-               closure_sub(cl, CLOSURE_RUNNING + 1);
-               spin_unlock(&c->journal.lock);
-               return;
+               closure_return_with_destructor(cl, journal_write_unlock);
        } else if (journal_full(&c->journal)) {
                journal_reclaim(c);
                spin_unlock(&c->journal.lock);
@@ -586,7 +596,7 @@ static void journal_write_unlocked(struct closure *cl)
                continue_at(cl, journal_write, system_wq);
        }
 
-       c->journal.blocks_free -= set_blocks(w->data, c);
+       c->journal.blocks_free -= set_blocks(w->data, block_bytes(c));
 
        w->data->btree_level = c->root->level;
 
@@ -608,10 +618,10 @@ static void journal_write_unlocked(struct closure *cl)
                atomic_long_add(sectors, &ca->meta_sectors_written);
 
                bio_reset(bio);
-               bio->bi_sector  = PTR_OFFSET(k, i);
+               bio->bi_iter.bi_sector  = PTR_OFFSET(k, i);
                bio->bi_bdev    = ca->bdev;
                bio->bi_rw      = REQ_WRITE|REQ_SYNC|REQ_META|REQ_FLUSH|REQ_FUA;
-               bio->bi_size    = sectors << 9;
+               bio->bi_iter.bi_size = sectors << 9;
 
                bio->bi_end_io  = journal_write_endio;
                bio->bi_private = w;
@@ -653,10 +663,12 @@ static void journal_try_write(struct cache_set *c)
 
        w->need_write = true;
 
-       if (closure_trylock(cl, &c->cl))
-               journal_write_unlocked(cl);
-       else
+       if (!c->journal.io_in_flight) {
+               c->journal.io_in_flight = 1;
+               closure_call(cl, journal_write_unlocked, NULL, &c->cl);
+       } else {
                spin_unlock(&c->journal.lock);
+       }
 }
 
 static struct journal_write *journal_wait_for_write(struct cache_set *c,
@@ -664,6 +676,7 @@ static struct journal_write *journal_wait_for_write(struct cache_set *c,
 {
        size_t sectors;
        struct closure cl;
+       bool wait = false;
 
        closure_init_stack(&cl);
 
@@ -673,16 +686,19 @@ static struct journal_write *journal_wait_for_write(struct cache_set *c,
                struct journal_write *w = c->journal.cur;
 
                sectors = __set_blocks(w->data, w->data->keys + nkeys,
-                                      c) * c->sb.block_size;
+                                      block_bytes(c)) * c->sb.block_size;
 
                if (sectors <= min_t(size_t,
                                     c->journal.blocks_free * c->sb.block_size,
                                     PAGE_SECTORS << JSET_BITS))
                        return w;
 
-               /* XXX: tracepoint */
+               if (wait)
+                       closure_wait(&c->journal.wait, &cl);
+
                if (!journal_full(&c->journal)) {
-                       trace_bcache_journal_entry_full(c);
+                       if (wait)
+                               trace_bcache_journal_entry_full(c);
 
                        /*
                         * XXX: If we were inserting so many keys that they
@@ -692,12 +708,11 @@ static struct journal_write *journal_wait_for_write(struct cache_set *c,
                         */
                        BUG_ON(!w->data->keys);
 
-                       closure_wait(&w->wait, &cl);
                        journal_try_write(c); /* unlocks */
                } else {
-                       trace_bcache_journal_full(c);
+                       if (wait)
+                               trace_bcache_journal_full(c);
 
-                       closure_wait(&c->journal.wait, &cl);
                        journal_reclaim(c);
                        spin_unlock(&c->journal.lock);
 
@@ -706,6 +721,7 @@ static struct journal_write *journal_wait_for_write(struct cache_set *c,
 
                closure_sync(&cl);
                spin_lock(&c->journal.lock);
+               wait = true;
        }
 }
 
@@ -736,7 +752,7 @@ atomic_t *bch_journal(struct cache_set *c,
 
        w = journal_wait_for_write(c, bch_keylist_nkeys(keys));
 
-       memcpy(end(w->data), keys->keys, bch_keylist_bytes(keys));
+       memcpy(bset_bkey_last(w->data), keys->keys, bch_keylist_bytes(keys));
        w->data->keys += bch_keylist_nkeys(keys);
 
        ret = &fifo_back(&c->journal.pin);
@@ -780,7 +796,6 @@ int bch_journal_alloc(struct cache_set *c)
 {
        struct journal *j = &c->journal;
 
-       closure_init_unlocked(&j->io);
        spin_lock_init(&j->lock);
        INIT_DELAYED_WORK(&j->work, journal_write_work);
 
index a6472fda94b25c00a340bcfefd48b3ac0c762c6e..9180c44650759b61b843ea8fe763a38b62277cd4 100644 (file)
@@ -104,6 +104,7 @@ struct journal {
        /* used when waiting because the journal was full */
        struct closure_waitlist wait;
        struct closure          io;
+       int                     io_in_flight;
        struct delayed_work     work;
 
        /* Number of blocks free in the bucket(s) we're currently writing to */
index f2f0998c4a91872407dd036a54fe72d243885fed..9eb60d102de84532e3a662390b1ba2934b744673 100644 (file)
@@ -86,7 +86,7 @@ static void moving_init(struct moving_io *io)
        bio_get(bio);
        bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
 
-       bio->bi_size            = KEY_SIZE(&io->w->key) << 9;
+       bio->bi_iter.bi_size    = KEY_SIZE(&io->w->key) << 9;
        bio->bi_max_vecs        = DIV_ROUND_UP(KEY_SIZE(&io->w->key),
                                               PAGE_SECTORS);
        bio->bi_private         = &io->cl;
@@ -102,7 +102,7 @@ static void write_moving(struct closure *cl)
        if (!op->error) {
                moving_init(io);
 
-               io->bio.bio.bi_sector = KEY_START(&io->w->key);
+               io->bio.bio.bi_iter.bi_sector = KEY_START(&io->w->key);
                op->write_prio          = 1;
                op->bio                 = &io->bio.bio;
 
@@ -211,7 +211,7 @@ void bch_moving_gc(struct cache_set *c)
        for_each_cache(ca, c, i) {
                unsigned sectors_to_move = 0;
                unsigned reserve_sectors = ca->sb.bucket_size *
-                       min(fifo_used(&ca->free), ca->free.size / 2);
+                       fifo_used(&ca->free[RESERVE_MOVINGGC]);
 
                ca->heap.used = 0;
 
index 61bcfc21d2a0f4972b581a689fd1c3c929f7bd38..72cd213f213f9e806dc9a0360000ffffe6466896 100644 (file)
@@ -197,14 +197,14 @@ static bool verify(struct cached_dev *dc, struct bio *bio)
 
 static void bio_csum(struct bio *bio, struct bkey *k)
 {
-       struct bio_vec *bv;
+       struct bio_vec bv;
+       struct bvec_iter iter;
        uint64_t csum = 0;
-       int i;
 
-       bio_for_each_segment(bv, bio, i) {
-               void *d = kmap(bv->bv_page) + bv->bv_offset;
-               csum = bch_crc64_update(csum, d, bv->bv_len);
-               kunmap(bv->bv_page);
+       bio_for_each_segment(bv, bio, iter) {
+               void *d = kmap(bv.bv_page) + bv.bv_offset;
+               csum = bch_crc64_update(csum, d, bv.bv_len);
+               kunmap(bv.bv_page);
        }
 
        k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1);
@@ -254,26 +254,44 @@ static void bch_data_insert_keys(struct closure *cl)
        closure_return(cl);
 }
 
+static int bch_keylist_realloc(struct keylist *l, unsigned u64s,
+                              struct cache_set *c)
+{
+       size_t oldsize = bch_keylist_nkeys(l);
+       size_t newsize = oldsize + u64s;
+
+       /*
+        * The journalling code doesn't handle the case where the keys to insert
+        * is bigger than an empty write: If we just return -ENOMEM here,
+        * bio_insert() and bio_invalidate() will insert the keys created so far
+        * and finish the rest when the keylist is empty.
+        */
+       if (newsize * sizeof(uint64_t) > block_bytes(c) - sizeof(struct jset))
+               return -ENOMEM;
+
+       return __bch_keylist_realloc(l, u64s);
+}
+
 static void bch_data_invalidate(struct closure *cl)
 {
        struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
        struct bio *bio = op->bio;
 
        pr_debug("invalidating %i sectors from %llu",
-                bio_sectors(bio), (uint64_t) bio->bi_sector);
+                bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector);
 
        while (bio_sectors(bio)) {
                unsigned sectors = min(bio_sectors(bio),
                                       1U << (KEY_SIZE_BITS - 1));
 
-               if (bch_keylist_realloc(&op->insert_keys, 0, op->c))
+               if (bch_keylist_realloc(&op->insert_keys, 2, op->c))
                        goto out;
 
-               bio->bi_sector  += sectors;
-               bio->bi_size    -= sectors << 9;
+               bio->bi_iter.bi_sector  += sectors;
+               bio->bi_iter.bi_size    -= sectors << 9;
 
                bch_keylist_add(&op->insert_keys,
-                               &KEY(op->inode, bio->bi_sector, sectors));
+                               &KEY(op->inode, bio->bi_iter.bi_sector, sectors));
        }
 
        op->insert_data_done = true;
@@ -356,21 +374,21 @@ static void bch_data_insert_start(struct closure *cl)
 
                /* 1 for the device pointer and 1 for the chksum */
                if (bch_keylist_realloc(&op->insert_keys,
-                                       1 + (op->csum ? 1 : 0),
+                                       3 + (op->csum ? 1 : 0),
                                        op->c))
                        continue_at(cl, bch_data_insert_keys, bcache_wq);
 
                k = op->insert_keys.top;
                bkey_init(k);
                SET_KEY_INODE(k, op->inode);
-               SET_KEY_OFFSET(k, bio->bi_sector);
+               SET_KEY_OFFSET(k, bio->bi_iter.bi_sector);
 
                if (!bch_alloc_sectors(op->c, k, bio_sectors(bio),
                                       op->write_point, op->write_prio,
                                       op->writeback))
                        goto err;
 
-               n = bch_bio_split(bio, KEY_SIZE(k), GFP_NOIO, split);
+               n = bio_next_split(bio, KEY_SIZE(k), GFP_NOIO, split);
 
                n->bi_end_io    = bch_data_insert_endio;
                n->bi_private   = cl;
@@ -521,7 +539,7 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
             (bio->bi_rw & REQ_WRITE)))
                goto skip;
 
-       if (bio->bi_sector & (c->sb.block_size - 1) ||
+       if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) ||
            bio_sectors(bio) & (c->sb.block_size - 1)) {
                pr_debug("skipping unaligned io");
                goto skip;
@@ -545,8 +563,8 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
 
        spin_lock(&dc->io_lock);
 
-       hlist_for_each_entry(i, iohash(dc, bio->bi_sector), hash)
-               if (i->last == bio->bi_sector &&
+       hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash)
+               if (i->last == bio->bi_iter.bi_sector &&
                    time_before(jiffies, i->jiffies))
                        goto found;
 
@@ -555,8 +573,8 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
        add_sequential(task);
        i->sequential = 0;
 found:
-       if (i->sequential + bio->bi_size > i->sequential)
-               i->sequential   += bio->bi_size;
+       if (i->sequential + bio->bi_iter.bi_size > i->sequential)
+               i->sequential   += bio->bi_iter.bi_size;
 
        i->last                  = bio_end_sector(bio);
        i->jiffies               = jiffies + msecs_to_jiffies(5000);
@@ -596,16 +614,13 @@ struct search {
        /* Stack frame for bio_complete */
        struct closure          cl;
 
-       struct bcache_device    *d;
-
        struct bbio             bio;
        struct bio              *orig_bio;
        struct bio              *cache_miss;
+       struct bcache_device    *d;
 
        unsigned                insert_bio_sectors;
-
        unsigned                recoverable:1;
-       unsigned                unaligned_bvec:1;
        unsigned                write:1;
        unsigned                read_dirty_data:1;
 
@@ -630,7 +645,8 @@ static void bch_cache_read_endio(struct bio *bio, int error)
 
        if (error)
                s->iop.error = error;
-       else if (ptr_stale(s->iop.c, &b->key, 0)) {
+       else if (!KEY_DIRTY(&b->key) &&
+                ptr_stale(s->iop.c, &b->key, 0)) {
                atomic_long_inc(&s->iop.c->cache_read_races);
                s->iop.error = -EINTR;
        }
@@ -649,15 +665,15 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
        struct bkey *bio_key;
        unsigned ptr;
 
-       if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_sector, 0)) <= 0)
+       if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0)
                return MAP_CONTINUE;
 
        if (KEY_INODE(k) != s->iop.inode ||
-           KEY_START(k) > bio->bi_sector) {
+           KEY_START(k) > bio->bi_iter.bi_sector) {
                unsigned bio_sectors = bio_sectors(bio);
                unsigned sectors = KEY_INODE(k) == s->iop.inode
                        ? min_t(uint64_t, INT_MAX,
-                               KEY_START(k) - bio->bi_sector)
+                               KEY_START(k) - bio->bi_iter.bi_sector)
                        : INT_MAX;
 
                int ret = s->d->cache_miss(b, s, bio, sectors);
@@ -679,14 +695,14 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
        if (KEY_DIRTY(k))
                s->read_dirty_data = true;
 
-       n = bch_bio_split(bio, min_t(uint64_t, INT_MAX,
-                                    KEY_OFFSET(k) - bio->bi_sector),
-                         GFP_NOIO, s->d->bio_split);
+       n = bio_next_split(bio, min_t(uint64_t, INT_MAX,
+                                     KEY_OFFSET(k) - bio->bi_iter.bi_sector),
+                          GFP_NOIO, s->d->bio_split);
 
        bio_key = &container_of(n, struct bbio, bio)->key;
        bch_bkey_copy_single_ptr(bio_key, k, ptr);
 
-       bch_cut_front(&KEY(s->iop.inode, n->bi_sector, 0), bio_key);
+       bch_cut_front(&KEY(s->iop.inode, n->bi_iter.bi_sector, 0), bio_key);
        bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key);
 
        n->bi_end_io    = bch_cache_read_endio;
@@ -711,10 +727,13 @@ static void cache_lookup(struct closure *cl)
 {
        struct search *s = container_of(cl, struct search, iop.cl);
        struct bio *bio = &s->bio.bio;
+       int ret;
 
-       int ret = bch_btree_map_keys(&s->op, s->iop.c,
-                                    &KEY(s->iop.inode, bio->bi_sector, 0),
-                                    cache_lookup_fn, MAP_END_KEY);
+       bch_btree_op_init(&s->op, -1);
+
+       ret = bch_btree_map_keys(&s->op, s->iop.c,
+                                &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0),
+                                cache_lookup_fn, MAP_END_KEY);
        if (ret == -EAGAIN)
                continue_at(cl, cache_lookup, bcache_wq);
 
@@ -755,13 +774,15 @@ static void bio_complete(struct search *s)
        }
 }
 
-static void do_bio_hook(struct search *s)
+static void do_bio_hook(struct search *s, struct bio *orig_bio)
 {
        struct bio *bio = &s->bio.bio;
-       memcpy(bio, s->orig_bio, sizeof(struct bio));
 
+       bio_init(bio);
+       __bio_clone_fast(bio, orig_bio);
        bio->bi_end_io          = request_endio;
        bio->bi_private         = &s->cl;
+
        atomic_set(&bio->bi_cnt, 3);
 }
 
@@ -773,43 +794,36 @@ static void search_free(struct closure *cl)
        if (s->iop.bio)
                bio_put(s->iop.bio);
 
-       if (s->unaligned_bvec)
-               mempool_free(s->bio.bio.bi_io_vec, s->d->unaligned_bvec);
-
        closure_debug_destroy(cl);
        mempool_free(s, s->d->c->search);
 }
 
-static struct search *search_alloc(struct bio *bio, struct bcache_device *d)
+static inline struct search *search_alloc(struct bio *bio,
+                                         struct bcache_device *d)
 {
        struct search *s;
-       struct bio_vec *bv;
 
        s = mempool_alloc(d->c->search, GFP_NOIO);
-       memset(s, 0, offsetof(struct search, iop.insert_keys));
 
-       __closure_init(&s->cl, NULL);
+       closure_init(&s->cl, NULL);
+       do_bio_hook(s, bio);
 
-       s->iop.inode            = d->id;
-       s->iop.c                = d->c;
-       s->d                    = d;
-       s->op.lock              = -1;
-       s->iop.write_point      = hash_long((unsigned long) current, 16);
        s->orig_bio             = bio;
-       s->write                = (bio->bi_rw & REQ_WRITE) != 0;
-       s->iop.flush_journal    = (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0;
+       s->cache_miss           = NULL;
+       s->d                    = d;
        s->recoverable          = 1;
+       s->write                = (bio->bi_rw & REQ_WRITE) != 0;
+       s->read_dirty_data      = 0;
        s->start_time           = jiffies;
-       do_bio_hook(s);
 
-       if (bio->bi_size != bio_segments(bio) * PAGE_SIZE) {
-               bv = mempool_alloc(d->unaligned_bvec, GFP_NOIO);
-               memcpy(bv, bio_iovec(bio),
-                      sizeof(struct bio_vec) * bio_segments(bio));
-
-               s->bio.bio.bi_io_vec    = bv;
-               s->unaligned_bvec       = 1;
-       }
+       s->iop.c                = d->c;
+       s->iop.bio              = NULL;
+       s->iop.inode            = d->id;
+       s->iop.write_point      = hash_long((unsigned long) current, 16);
+       s->iop.write_prio       = 0;
+       s->iop.error            = 0;
+       s->iop.flags            = 0;
+       s->iop.flush_journal    = (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0;
 
        return s;
 }
@@ -849,26 +863,13 @@ static void cached_dev_read_error(struct closure *cl)
 {
        struct search *s = container_of(cl, struct search, cl);
        struct bio *bio = &s->bio.bio;
-       struct bio_vec *bv;
-       int i;
 
        if (s->recoverable) {
                /* Retry from the backing device: */
                trace_bcache_read_retry(s->orig_bio);
 
                s->iop.error = 0;
-               bv = s->bio.bio.bi_io_vec;
-               do_bio_hook(s);
-               s->bio.bio.bi_io_vec = bv;
-
-               if (!s->unaligned_bvec)
-                       bio_for_each_segment(bv, s->orig_bio, i)
-                               bv->bv_offset = 0, bv->bv_len = PAGE_SIZE;
-               else
-                       memcpy(s->bio.bio.bi_io_vec,
-                              bio_iovec(s->orig_bio),
-                              sizeof(struct bio_vec) *
-                              bio_segments(s->orig_bio));
+               do_bio_hook(s, s->orig_bio);
 
                /* XXX: invalidate cache */
 
@@ -893,9 +894,9 @@ static void cached_dev_read_done(struct closure *cl)
 
        if (s->iop.bio) {
                bio_reset(s->iop.bio);
-               s->iop.bio->bi_sector = s->cache_miss->bi_sector;
+               s->iop.bio->bi_iter.bi_sector = s->cache_miss->bi_iter.bi_sector;
                s->iop.bio->bi_bdev = s->cache_miss->bi_bdev;
-               s->iop.bio->bi_size = s->insert_bio_sectors << 9;
+               s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
                bch_bio_map(s->iop.bio, NULL);
 
                bio_copy_data(s->cache_miss, s->iop.bio);
@@ -904,8 +905,7 @@ static void cached_dev_read_done(struct closure *cl)
                s->cache_miss = NULL;
        }
 
-       if (verify(dc, &s->bio.bio) && s->recoverable &&
-           !s->unaligned_bvec && !s->read_dirty_data)
+       if (verify(dc, &s->bio.bio) && s->recoverable && !s->read_dirty_data)
                bch_data_verify(dc, s->orig_bio);
 
        bio_complete(s);
@@ -945,7 +945,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
        struct bio *miss, *cache_bio;
 
        if (s->cache_miss || s->iop.bypass) {
-               miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split);
+               miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split);
                ret = miss == bio ? MAP_DONE : MAP_CONTINUE;
                goto out_submit;
        }
@@ -959,7 +959,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
        s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada);
 
        s->iop.replace_key = KEY(s->iop.inode,
-                                bio->bi_sector + s->insert_bio_sectors,
+                                bio->bi_iter.bi_sector + s->insert_bio_sectors,
                                 s->insert_bio_sectors);
 
        ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key);
@@ -968,7 +968,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
 
        s->iop.replace = true;
 
-       miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split);
+       miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split);
 
        /* btree_search_recurse()'s btree iterator is no good anymore */
        ret = miss == bio ? MAP_DONE : -EINTR;
@@ -979,9 +979,9 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
        if (!cache_bio)
                goto out_submit;
 
-       cache_bio->bi_sector    = miss->bi_sector;
-       cache_bio->bi_bdev      = miss->bi_bdev;
-       cache_bio->bi_size      = s->insert_bio_sectors << 9;
+       cache_bio->bi_iter.bi_sector    = miss->bi_iter.bi_sector;
+       cache_bio->bi_bdev              = miss->bi_bdev;
+       cache_bio->bi_iter.bi_size      = s->insert_bio_sectors << 9;
 
        cache_bio->bi_end_io    = request_endio;
        cache_bio->bi_private   = &s->cl;
@@ -1031,7 +1031,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
 {
        struct closure *cl = &s->cl;
        struct bio *bio = &s->bio.bio;
-       struct bkey start = KEY(dc->disk.id, bio->bi_sector, 0);
+       struct bkey start = KEY(dc->disk.id, bio->bi_iter.bi_sector, 0);
        struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0);
 
        bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end);
@@ -1087,8 +1087,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
                        closure_bio_submit(flush, cl, s->d);
                }
        } else {
-               s->iop.bio = bio_clone_bioset(bio, GFP_NOIO,
-                                             dc->disk.bio_split);
+               s->iop.bio = bio_clone_fast(bio, GFP_NOIO, dc->disk.bio_split);
 
                closure_bio_submit(bio, cl, s->d);
        }
@@ -1126,13 +1125,13 @@ static void cached_dev_make_request(struct request_queue *q, struct bio *bio)
        part_stat_unlock();
 
        bio->bi_bdev = dc->bdev;
-       bio->bi_sector += dc->sb.data_offset;
+       bio->bi_iter.bi_sector += dc->sb.data_offset;
 
        if (cached_dev_get(dc)) {
                s = search_alloc(bio, d);
                trace_bcache_request_start(s->d, bio);
 
-               if (!bio->bi_size) {
+               if (!bio->bi_iter.bi_size) {
                        /*
                         * can't call bch_journal_meta from under
                         * generic_make_request
@@ -1204,24 +1203,24 @@ void bch_cached_dev_request_init(struct cached_dev *dc)
 static int flash_dev_cache_miss(struct btree *b, struct search *s,
                                struct bio *bio, unsigned sectors)
 {
-       struct bio_vec *bv;
-       int i;
+       struct bio_vec bv;
+       struct bvec_iter iter;
 
        /* Zero fill bio */
 
-       bio_for_each_segment(bv, bio, i) {
-               unsigned j = min(bv->bv_len >> 9, sectors);
+       bio_for_each_segment(bv, bio, iter) {
+               unsigned j = min(bv.bv_len >> 9, sectors);
 
-               void *p = kmap(bv->bv_page);
-               memset(p + bv->bv_offset, 0, j << 9);
-               kunmap(bv->bv_page);
+               void *p = kmap(bv.bv_page);
+               memset(p + bv.bv_offset, 0, j << 9);
+               kunmap(bv.bv_page);
 
                sectors -= j;
        }
 
-       bio_advance(bio, min(sectors << 9, bio->bi_size));
+       bio_advance(bio, min(sectors << 9, bio->bi_iter.bi_size));
 
-       if (!bio->bi_size)
+       if (!bio->bi_iter.bi_size)
                return MAP_DONE;
 
        return MAP_CONTINUE;
@@ -1255,7 +1254,7 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
 
        trace_bcache_request_start(s->d, bio);
 
-       if (!bio->bi_size) {
+       if (!bio->bi_iter.bi_size) {
                /*
                 * can't call bch_journal_meta from under
                 * generic_make_request
@@ -1265,7 +1264,7 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
                                      bcache_wq);
        } else if (rw) {
                bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys,
-                                       &KEY(d->id, bio->bi_sector, 0),
+                                       &KEY(d->id, bio->bi_iter.bi_sector, 0),
                                        &KEY(d->id, bio_end_sector(bio), 0));
 
                s->iop.bypass           = (bio->bi_rw & REQ_DISCARD) != 0;
index 2cd65bf073c24689542d78f0ef316f81ed21ba1c..39f21dbedc38b43ec7bf4818cfca319d27d27433 100644 (file)
@@ -13,17 +13,22 @@ struct data_insert_op {
        uint16_t                write_prio;
        short                   error;
 
-       unsigned                bypass:1;
-       unsigned                writeback:1;
-       unsigned                flush_journal:1;
-       unsigned                csum:1;
+       union {
+               uint16_t        flags;
 
-       unsigned                replace:1;
-       unsigned                replace_collision:1;
+       struct {
+               unsigned        bypass:1;
+               unsigned        writeback:1;
+               unsigned        flush_journal:1;
+               unsigned        csum:1;
 
-       unsigned                insert_data_done:1;
+               unsigned        replace:1;
+               unsigned        replace_collision:1;
+
+               unsigned        insert_data_done:1;
+       };
+       };
 
-       /* Anything past this point won't get zeroed in search_alloc() */
        struct keylist          insert_keys;
        BKEY_PADDED(replace_key);
 };
index c57bfa071a57c58b06fabeb194cbf98f5f4fbf56..24a3a1546caa7a4c8e9c0aa70c9461574f87a591 100644 (file)
@@ -9,6 +9,7 @@
 #include "bcache.h"
 #include "btree.h"
 #include "debug.h"
+#include "extents.h"
 #include "request.h"
 #include "writeback.h"
 
@@ -225,7 +226,7 @@ static void write_bdev_super_endio(struct bio *bio, int error)
        struct cached_dev *dc = bio->bi_private;
        /* XXX: error checking */
 
-       closure_put(&dc->sb_write.cl);
+       closure_put(&dc->sb_write);
 }
 
 static void __write_super(struct cache_sb *sb, struct bio *bio)
@@ -233,9 +234,9 @@ static void __write_super(struct cache_sb *sb, struct bio *bio)
        struct cache_sb *out = page_address(bio->bi_io_vec[0].bv_page);
        unsigned i;
 
-       bio->bi_sector  = SB_SECTOR;
-       bio->bi_rw      = REQ_SYNC|REQ_META;
-       bio->bi_size    = SB_SIZE;
+       bio->bi_iter.bi_sector  = SB_SECTOR;
+       bio->bi_rw              = REQ_SYNC|REQ_META;
+       bio->bi_iter.bi_size    = SB_SIZE;
        bch_bio_map(bio, NULL);
 
        out->offset             = cpu_to_le64(sb->offset);
@@ -263,12 +264,20 @@ static void __write_super(struct cache_sb *sb, struct bio *bio)
        submit_bio(REQ_WRITE, bio);
 }
 
+static void bch_write_bdev_super_unlock(struct closure *cl)
+{
+       struct cached_dev *dc = container_of(cl, struct cached_dev, sb_write);
+
+       up(&dc->sb_write_mutex);
+}
+
 void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent)
 {
-       struct closure *cl = &dc->sb_write.cl;
+       struct closure *cl = &dc->sb_write;
        struct bio *bio = &dc->sb_bio;
 
-       closure_lock(&dc->sb_write, parent);
+       down(&dc->sb_write_mutex);
+       closure_init(cl, parent);
 
        bio_reset(bio);
        bio->bi_bdev    = dc->bdev;
@@ -278,7 +287,7 @@ void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent)
        closure_get(cl);
        __write_super(&dc->sb, bio);
 
-       closure_return(cl);
+       closure_return_with_destructor(cl, bch_write_bdev_super_unlock);
 }
 
 static void write_super_endio(struct bio *bio, int error)
@@ -286,16 +295,24 @@ static void write_super_endio(struct bio *bio, int error)
        struct cache *ca = bio->bi_private;
 
        bch_count_io_errors(ca, error, "writing superblock");
-       closure_put(&ca->set->sb_write.cl);
+       closure_put(&ca->set->sb_write);
+}
+
+static void bcache_write_super_unlock(struct closure *cl)
+{
+       struct cache_set *c = container_of(cl, struct cache_set, sb_write);
+
+       up(&c->sb_write_mutex);
 }
 
 void bcache_write_super(struct cache_set *c)
 {
-       struct closure *cl = &c->sb_write.cl;
+       struct closure *cl = &c->sb_write;
        struct cache *ca;
        unsigned i;
 
-       closure_lock(&c->sb_write, &c->cl);
+       down(&c->sb_write_mutex);
+       closure_init(cl, &c->cl);
 
        c->sb.seq++;
 
@@ -317,7 +334,7 @@ void bcache_write_super(struct cache_set *c)
                __write_super(&ca->sb, bio);
        }
 
-       closure_return(cl);
+       closure_return_with_destructor(cl, bcache_write_super_unlock);
 }
 
 /* UUID io */
@@ -325,29 +342,37 @@ void bcache_write_super(struct cache_set *c)
 static void uuid_endio(struct bio *bio, int error)
 {
        struct closure *cl = bio->bi_private;
-       struct cache_set *c = container_of(cl, struct cache_set, uuid_write.cl);
+       struct cache_set *c = container_of(cl, struct cache_set, uuid_write);
 
        cache_set_err_on(error, c, "accessing uuids");
        bch_bbio_free(bio, c);
        closure_put(cl);
 }
 
+static void uuid_io_unlock(struct closure *cl)
+{
+       struct cache_set *c = container_of(cl, struct cache_set, uuid_write);
+
+       up(&c->uuid_write_mutex);
+}
+
 static void uuid_io(struct cache_set *c, unsigned long rw,
                    struct bkey *k, struct closure *parent)
 {
-       struct closure *cl = &c->uuid_write.cl;
+       struct closure *cl = &c->uuid_write;
        struct uuid_entry *u;
        unsigned i;
        char buf[80];
 
        BUG_ON(!parent);
-       closure_lock(&c->uuid_write, parent);
+       down(&c->uuid_write_mutex);
+       closure_init(cl, parent);
 
        for (i = 0; i < KEY_PTRS(k); i++) {
                struct bio *bio = bch_bbio_alloc(c);
 
                bio->bi_rw      = REQ_SYNC|REQ_META|rw;
-               bio->bi_size    = KEY_SIZE(k) << 9;
+               bio->bi_iter.bi_size = KEY_SIZE(k) << 9;
 
                bio->bi_end_io  = uuid_endio;
                bio->bi_private = cl;
@@ -359,7 +384,7 @@ static void uuid_io(struct cache_set *c, unsigned long rw,
                        break;
        }
 
-       bch_bkey_to_text(buf, sizeof(buf), k);
+       bch_extent_to_text(buf, sizeof(buf), k);
        pr_debug("%s UUIDs at %s", rw & REQ_WRITE ? "wrote" : "read", buf);
 
        for (u = c->uuids; u < c->uuids + c->nr_uuids; u++)
@@ -368,14 +393,14 @@ static void uuid_io(struct cache_set *c, unsigned long rw,
                                 u - c->uuids, u->uuid, u->label,
                                 u->first_reg, u->last_reg, u->invalidated);
 
-       closure_return(cl);
+       closure_return_with_destructor(cl, uuid_io_unlock);
 }
 
 static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl)
 {
        struct bkey *k = &j->uuid_bucket;
 
-       if (bch_btree_ptr_invalid(c, k))
+       if (__bch_btree_ptr_invalid(c, k))
                return "bad uuid pointer";
 
        bkey_copy(&c->uuid_bucket, k);
@@ -420,7 +445,7 @@ static int __uuid_write(struct cache_set *c)
 
        lockdep_assert_held(&bch_register_lock);
 
-       if (bch_bucket_alloc_set(c, WATERMARK_METADATA, &k.key, 1, true))
+       if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, true))
                return 1;
 
        SET_KEY_SIZE(&k.key, c->sb.bucket_size);
@@ -503,10 +528,10 @@ static void prio_io(struct cache *ca, uint64_t bucket, unsigned long rw)
 
        closure_init_stack(cl);
 
-       bio->bi_sector  = bucket * ca->sb.bucket_size;
-       bio->bi_bdev    = ca->bdev;
-       bio->bi_rw      = REQ_SYNC|REQ_META|rw;
-       bio->bi_size    = bucket_bytes(ca);
+       bio->bi_iter.bi_sector  = bucket * ca->sb.bucket_size;
+       bio->bi_bdev            = ca->bdev;
+       bio->bi_rw              = REQ_SYNC|REQ_META|rw;
+       bio->bi_iter.bi_size    = bucket_bytes(ca);
 
        bio->bi_end_io  = prio_endio;
        bio->bi_private = ca;
@@ -538,8 +563,8 @@ void bch_prio_write(struct cache *ca)
        atomic_long_add(ca->sb.bucket_size * prio_buckets(ca),
                        &ca->meta_sectors_written);
 
-       pr_debug("free %zu, free_inc %zu, unused %zu", fifo_used(&ca->free),
-                fifo_used(&ca->free_inc), fifo_used(&ca->unused));
+       //pr_debug("free %zu, free_inc %zu, unused %zu", fifo_used(&ca->free),
+       //       fifo_used(&ca->free_inc), fifo_used(&ca->unused));
 
        for (i = prio_buckets(ca) - 1; i >= 0; --i) {
                long bucket;
@@ -558,7 +583,7 @@ void bch_prio_write(struct cache *ca)
                p->magic        = pset_magic(&ca->sb);
                p->csum         = bch_crc64(&p->magic, bucket_bytes(ca) - 8);
 
-               bucket = bch_bucket_alloc(ca, WATERMARK_PRIO, true);
+               bucket = bch_bucket_alloc(ca, RESERVE_PRIO, true);
                BUG_ON(bucket == -1);
 
                mutex_unlock(&ca->set->bucket_lock);
@@ -739,8 +764,6 @@ static void bcache_device_free(struct bcache_device *d)
        }
 
        bio_split_pool_free(&d->bio_split_hook);
-       if (d->unaligned_bvec)
-               mempool_destroy(d->unaligned_bvec);
        if (d->bio_split)
                bioset_free(d->bio_split);
        if (is_vmalloc_addr(d->full_dirty_stripes))
@@ -793,8 +816,6 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,
                return minor;
 
        if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) ||
-           !(d->unaligned_bvec = mempool_create_kmalloc_pool(1,
-                               sizeof(struct bio_vec) * BIO_MAX_PAGES)) ||
            bio_split_pool_init(&d->bio_split_hook) ||
            !(d->disk = alloc_disk(1))) {
                ida_simple_remove(&bcache_minor, minor);
@@ -1102,7 +1123,7 @@ static int cached_dev_init(struct cached_dev *dc, unsigned block_size)
        set_closure_fn(&dc->disk.cl, cached_dev_flush, system_wq);
        kobject_init(&dc->disk.kobj, &bch_cached_dev_ktype);
        INIT_WORK(&dc->detach, cached_dev_detach_finish);
-       closure_init_unlocked(&dc->sb_write);
+       sema_init(&dc->sb_write_mutex, 1);
        INIT_LIST_HEAD(&dc->io_lru);
        spin_lock_init(&dc->io_lock);
        bch_cache_accounting_init(&dc->accounting, &dc->disk.cl);
@@ -1114,6 +1135,12 @@ static int cached_dev_init(struct cached_dev *dc, unsigned block_size)
                hlist_add_head(&io->hash, dc->io_hash + RECENT_IO);
        }
 
+       dc->disk.stripe_size = q->limits.io_opt >> 9;
+
+       if (dc->disk.stripe_size)
+               dc->partial_stripes_expensive =
+                       q->limits.raid_partial_stripes_expensive;
+
        ret = bcache_device_init(&dc->disk, block_size,
                         dc->bdev->bd_part->nr_sects - dc->sb.data_offset);
        if (ret)
@@ -1325,8 +1352,8 @@ static void cache_set_free(struct closure *cl)
                if (ca)
                        kobject_put(&ca->kobj);
 
+       bch_bset_sort_state_free(&c->sort);
        free_pages((unsigned long) c->uuids, ilog2(bucket_pages(c)));
-       free_pages((unsigned long) c->sort, ilog2(bucket_pages(c)));
 
        if (c->bio_split)
                bioset_free(c->bio_split);
@@ -1451,21 +1478,17 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
        c->block_bits           = ilog2(sb->block_size);
        c->nr_uuids             = bucket_bytes(c) / sizeof(struct uuid_entry);
 
-       c->btree_pages          = c->sb.bucket_size / PAGE_SECTORS;
+       c->btree_pages          = bucket_pages(c);
        if (c->btree_pages > BTREE_MAX_PAGES)
                c->btree_pages = max_t(int, c->btree_pages / 4,
                                       BTREE_MAX_PAGES);
 
-       c->sort_crit_factor = int_sqrt(c->btree_pages);
-
-       closure_init_unlocked(&c->sb_write);
+       sema_init(&c->sb_write_mutex, 1);
        mutex_init(&c->bucket_lock);
        init_waitqueue_head(&c->try_wait);
        init_waitqueue_head(&c->bucket_wait);
-       closure_init_unlocked(&c->uuid_write);
-       mutex_init(&c->sort_lock);
+       sema_init(&c->uuid_write_mutex, 1);
 
-       spin_lock_init(&c->sort_time.lock);
        spin_lock_init(&c->btree_gc_time.lock);
        spin_lock_init(&c->btree_split_time.lock);
        spin_lock_init(&c->btree_read_time.lock);
@@ -1493,11 +1516,11 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
                                bucket_pages(c))) ||
            !(c->fill_iter = mempool_create_kmalloc_pool(1, iter_size)) ||
            !(c->bio_split = bioset_create(4, offsetof(struct bbio, bio))) ||
-           !(c->sort = alloc_bucket_pages(GFP_KERNEL, c)) ||
            !(c->uuids = alloc_bucket_pages(GFP_KERNEL, c)) ||
            bch_journal_alloc(c) ||
            bch_btree_cache_alloc(c) ||
-           bch_open_buckets_alloc(c))
+           bch_open_buckets_alloc(c) ||
+           bch_bset_sort_state_init(&c->sort, ilog2(c->btree_pages)))
                goto err;
 
        c->congested_read_threshold_us  = 2000;
@@ -1553,7 +1576,7 @@ static void run_cache_set(struct cache_set *c)
                k = &j->btree_root;
 
                err = "bad btree root";
-               if (bch_btree_ptr_invalid(c, k))
+               if (__bch_btree_ptr_invalid(c, k))
                        goto err;
 
                err = "error reading btree root";
@@ -1747,6 +1770,7 @@ err:
 void bch_cache_release(struct kobject *kobj)
 {
        struct cache *ca = container_of(kobj, struct cache, kobj);
+       unsigned i;
 
        if (ca->set)
                ca->set->cache[ca->sb.nr_this_dev] = NULL;
@@ -1760,7 +1784,9 @@ void bch_cache_release(struct kobject *kobj)
        free_heap(&ca->heap);
        free_fifo(&ca->unused);
        free_fifo(&ca->free_inc);
-       free_fifo(&ca->free);
+
+       for (i = 0; i < RESERVE_NR; i++)
+               free_fifo(&ca->free[i]);
 
        if (ca->sb_bio.bi_inline_vecs[0].bv_page)
                put_page(ca->sb_bio.bi_io_vec[0].bv_page);
@@ -1786,10 +1812,12 @@ static int cache_alloc(struct cache_sb *sb, struct cache *ca)
        ca->journal.bio.bi_max_vecs = 8;
        ca->journal.bio.bi_io_vec = ca->journal.bio.bi_inline_vecs;
 
-       free = roundup_pow_of_two(ca->sb.nbuckets) >> 9;
-       free = max_t(size_t, free, (prio_buckets(ca) + 8) * 2);
+       free = roundup_pow_of_two(ca->sb.nbuckets) >> 10;
 
-       if (!init_fifo(&ca->free,       free, GFP_KERNEL) ||
+       if (!init_fifo(&ca->free[RESERVE_BTREE], 8, GFP_KERNEL) ||
+           !init_fifo(&ca->free[RESERVE_PRIO], prio_buckets(ca), GFP_KERNEL) ||
+           !init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL) ||
+           !init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL) ||
            !init_fifo(&ca->free_inc,   free << 2, GFP_KERNEL) ||
            !init_fifo(&ca->unused,     free << 2, GFP_KERNEL) ||
            !init_heap(&ca->heap,       free << 3, GFP_KERNEL) ||
@@ -2034,7 +2062,8 @@ static void bcache_exit(void)
                kobject_put(bcache_kobj);
        if (bcache_wq)
                destroy_workqueue(bcache_wq);
-       unregister_blkdev(bcache_major, "bcache");
+       if (bcache_major)
+               unregister_blkdev(bcache_major, "bcache");
        unregister_reboot_notifier(&reboot);
 }
 
index a1f85612f0b3dfc5c90b768aaef48118034c02c7..c6ab69333a6dfde7761e9e2b05c16574f5c52480 100644 (file)
@@ -102,7 +102,6 @@ rw_attribute(bypass_torture_test);
 rw_attribute(key_merging_disabled);
 rw_attribute(gc_always_rewrite);
 rw_attribute(expensive_debug_checks);
-rw_attribute(freelist_percent);
 rw_attribute(cache_replacement_policy);
 rw_attribute(btree_shrinker_disabled);
 rw_attribute(copy_gc_enabled);
@@ -401,6 +400,48 @@ static struct attribute *bch_flash_dev_files[] = {
 };
 KTYPE(bch_flash_dev);
 
+struct bset_stats_op {
+       struct btree_op op;
+       size_t nodes;
+       struct bset_stats stats;
+};
+
+static int btree_bset_stats(struct btree_op *b_op, struct btree *b)
+{
+       struct bset_stats_op *op = container_of(b_op, struct bset_stats_op, op);
+
+       op->nodes++;
+       bch_btree_keys_stats(&b->keys, &op->stats);
+
+       return MAP_CONTINUE;
+}
+
+int bch_bset_print_stats(struct cache_set *c, char *buf)
+{
+       struct bset_stats_op op;
+       int ret;
+
+       memset(&op, 0, sizeof(op));
+       bch_btree_op_init(&op.op, -1);
+
+       ret = bch_btree_map_nodes(&op.op, c, &ZERO_KEY, btree_bset_stats);
+       if (ret < 0)
+               return ret;
+
+       return snprintf(buf, PAGE_SIZE,
+                       "btree nodes:           %zu\n"
+                       "written sets:          %zu\n"
+                       "unwritten sets:                %zu\n"
+                       "written key bytes:     %zu\n"
+                       "unwritten key bytes:   %zu\n"
+                       "floats:                        %zu\n"
+                       "failed:                        %zu\n",
+                       op.nodes,
+                       op.stats.sets_written, op.stats.sets_unwritten,
+                       op.stats.bytes_written, op.stats.bytes_unwritten,
+                       op.stats.floats, op.stats.failed);
+}
+
 SHOW(__bch_cache_set)
 {
        unsigned root_usage(struct cache_set *c)
@@ -419,7 +460,7 @@ lock_root:
                        rw_lock(false, b, b->level);
                } while (b != c->root);
 
-               for_each_key_filter(b, k, &iter, bch_ptr_bad)
+               for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
                        bytes += bkey_bytes(k);
 
                rw_unlock(false, b);
@@ -434,7 +475,7 @@ lock_root:
 
                mutex_lock(&c->bucket_lock);
                list_for_each_entry(b, &c->btree_cache, list)
-                       ret += 1 << (b->page_order + PAGE_SHIFT);
+                       ret += 1 << (b->keys.page_order + PAGE_SHIFT);
 
                mutex_unlock(&c->bucket_lock);
                return ret;
@@ -491,7 +532,7 @@ lock_root:
 
        sysfs_print_time_stats(&c->btree_gc_time,       btree_gc, sec, ms);
        sysfs_print_time_stats(&c->btree_split_time,    btree_split, sec, us);
-       sysfs_print_time_stats(&c->sort_time,           btree_sort, ms, us);
+       sysfs_print_time_stats(&c->sort.time,           btree_sort, ms, us);
        sysfs_print_time_stats(&c->btree_read_time,     btree_read, ms, us);
        sysfs_print_time_stats(&c->try_harder_time,     try_harder, ms, us);
 
@@ -711,9 +752,6 @@ SHOW(__bch_cache)
        sysfs_print(io_errors,
                    atomic_read(&ca->io_errors) >> IO_ERROR_SHIFT);
 
-       sysfs_print(freelist_percent, ca->free.size * 100 /
-                   ((size_t) ca->sb.nbuckets));
-
        if (attr == &sysfs_cache_replacement_policy)
                return bch_snprint_string_list(buf, PAGE_SIZE,
                                               cache_replacement_policies,
@@ -820,32 +858,6 @@ STORE(__bch_cache)
                }
        }
 
-       if (attr == &sysfs_freelist_percent) {
-               DECLARE_FIFO(long, free);
-               long i;
-               size_t p = strtoul_or_return(buf);
-
-               p = clamp_t(size_t,
-                           ((size_t) ca->sb.nbuckets * p) / 100,
-                           roundup_pow_of_two(ca->sb.nbuckets) >> 9,
-                           ca->sb.nbuckets / 2);
-
-               if (!init_fifo_exact(&free, p, GFP_KERNEL))
-                       return -ENOMEM;
-
-               mutex_lock(&ca->set->bucket_lock);
-
-               fifo_move(&free, &ca->free);
-               fifo_swap(&free, &ca->free);
-
-               mutex_unlock(&ca->set->bucket_lock);
-
-               while (fifo_pop(&free, i))
-                       atomic_dec(&ca->buckets[i].pin);
-
-               free_fifo(&free);
-       }
-
        if (attr == &sysfs_clear_stats) {
                atomic_long_set(&ca->sectors_written, 0);
                atomic_long_set(&ca->btree_sectors_written, 0);
@@ -869,7 +881,6 @@ static struct attribute *bch_cache_files[] = {
        &sysfs_metadata_written,
        &sysfs_io_errors,
        &sysfs_clear_stats,
-       &sysfs_freelist_percent,
        &sysfs_cache_replacement_policy,
        NULL
 };
index bb37618e76648b7bc3caf99532e4f81b48666dfe..db3ae4c2b2233a4026ebe8a183042eb84d53cdc4 100644 (file)
@@ -224,10 +224,10 @@ uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done)
 
 void bch_bio_map(struct bio *bio, void *base)
 {
-       size_t size = bio->bi_size;
+       size_t size = bio->bi_iter.bi_size;
        struct bio_vec *bv = bio->bi_io_vec;
 
-       BUG_ON(!bio->bi_size);
+       BUG_ON(!bio->bi_iter.bi_size);
        BUG_ON(bio->bi_vcnt);
 
        bv->bv_offset = base ? ((unsigned long) base) % PAGE_SIZE : 0;
index 1030c6020e986934e21c94628794e5e342271b49..ac7d0d1f70d7be9ae818a51c6d77c461d770eb4b 100644 (file)
@@ -2,6 +2,7 @@
 #ifndef _BCACHE_UTIL_H
 #define _BCACHE_UTIL_H
 
+#include <linux/blkdev.h>
 #include <linux/errno.h>
 #include <linux/kernel.h>
 #include <linux/llist.h>
@@ -17,11 +18,13 @@ struct closure;
 
 #ifdef CONFIG_BCACHE_DEBUG
 
+#define EBUG_ON(cond)                  BUG_ON(cond)
 #define atomic_dec_bug(v)      BUG_ON(atomic_dec_return(v) < 0)
 #define atomic_inc_bug(v, i)   BUG_ON(atomic_inc_return(v) <= i)
 
 #else /* DEBUG */
 
+#define EBUG_ON(cond)                  do { if (cond); } while (0)
 #define atomic_dec_bug(v)      atomic_dec(v)
 #define atomic_inc_bug(v, i)   atomic_inc(v)
 
@@ -391,6 +394,11 @@ struct time_stats {
 
 void bch_time_stats_update(struct time_stats *stats, uint64_t time);
 
+static inline unsigned local_clock_us(void)
+{
+       return local_clock() >> 10;
+}
+
 #define NSEC_PER_ns                    1L
 #define NSEC_PER_us                    NSEC_PER_USEC
 #define NSEC_PER_ms                    NSEC_PER_MSEC
index 6c44fe059c2769a4b2c317f25878596268726f0d..f4300e4c0114a0cc1abc3b90f757a03666d2637b 100644 (file)
@@ -111,7 +111,7 @@ static void dirty_init(struct keybuf_key *w)
        if (!io->dc->writeback_percent)
                bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
 
-       bio->bi_size            = KEY_SIZE(&w->key) << 9;
+       bio->bi_iter.bi_size    = KEY_SIZE(&w->key) << 9;
        bio->bi_max_vecs        = DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS);
        bio->bi_private         = w;
        bio->bi_io_vec          = bio->bi_inline_vecs;
@@ -184,7 +184,7 @@ static void write_dirty(struct closure *cl)
 
        dirty_init(w);
        io->bio.bi_rw           = WRITE;
-       io->bio.bi_sector       = KEY_START(&w->key);
+       io->bio.bi_iter.bi_sector = KEY_START(&w->key);
        io->bio.bi_bdev         = io->dc->bdev;
        io->bio.bi_end_io       = dirty_endio;
 
@@ -253,7 +253,7 @@ static void read_dirty(struct cached_dev *dc)
                io->dc          = dc;
 
                dirty_init(w);
-               io->bio.bi_sector       = PTR_OFFSET(&w->key, 0);
+               io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0);
                io->bio.bi_bdev         = PTR_CACHE(dc->disk.c,
                                                    &w->key, 0)->bdev;
                io->bio.bi_rw           = READ;
index c9ddcf4614b9300701c9867033c82bc13cadf472..e2f8598937ac41ff5c7577bc5e65aeb39de95386 100644 (file)
@@ -50,7 +50,7 @@ static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
                return false;
 
        if (dc->partial_stripes_expensive &&
-           bcache_dev_stripe_dirty(dc, bio->bi_sector,
+           bcache_dev_stripe_dirty(dc, bio->bi_iter.bi_sector,
                                    bio_sectors(bio)))
                return true;
 
index 3a8cfa2645c72f6539170f2ab2d3242bb4a6fa58..dd3646111561512f50728aa915b8a279be1c26ac 100644 (file)
  * original bio state.
  */
 
-struct dm_bio_vec_details {
-#if PAGE_SIZE < 65536
-       __u16 bv_len;
-       __u16 bv_offset;
-#else
-       unsigned bv_len;
-       unsigned bv_offset;
-#endif
-};
-
 struct dm_bio_details {
-       sector_t bi_sector;
        struct block_device *bi_bdev;
-       unsigned int bi_size;
-       unsigned short bi_idx;
        unsigned long bi_flags;
-       struct dm_bio_vec_details bi_io_vec[BIO_MAX_PAGES];
+       struct bvec_iter bi_iter;
 };
 
 static inline void dm_bio_record(struct dm_bio_details *bd, struct bio *bio)
 {
-       unsigned i;
-
-       bd->bi_sector = bio->bi_sector;
        bd->bi_bdev = bio->bi_bdev;
-       bd->bi_size = bio->bi_size;
-       bd->bi_idx = bio->bi_idx;
        bd->bi_flags = bio->bi_flags;
-
-       for (i = 0; i < bio->bi_vcnt; i++) {
-               bd->bi_io_vec[i].bv_len = bio->bi_io_vec[i].bv_len;
-               bd->bi_io_vec[i].bv_offset = bio->bi_io_vec[i].bv_offset;
-       }
+       bd->bi_iter = bio->bi_iter;
 }
 
 static inline void dm_bio_restore(struct dm_bio_details *bd, struct bio *bio)
 {
-       unsigned i;
-
-       bio->bi_sector = bd->bi_sector;
        bio->bi_bdev = bd->bi_bdev;
-       bio->bi_size = bd->bi_size;
-       bio->bi_idx = bd->bi_idx;
        bio->bi_flags = bd->bi_flags;
-
-       for (i = 0; i < bio->bi_vcnt; i++) {
-               bio->bi_io_vec[i].bv_len = bd->bi_io_vec[i].bv_len;
-               bio->bi_io_vec[i].bv_offset = bd->bi_io_vec[i].bv_offset;
-       }
+       bio->bi_iter = bd->bi_iter;
 }
 
 #endif
index 9ed42125514b38d560464e4dd3d741038db06858..66c5d130c8c24c4f3101ce78296460da4487f38b 100644 (file)
@@ -540,7 +540,7 @@ static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block,
        bio_init(&b->bio);
        b->bio.bi_io_vec = b->bio_vec;
        b->bio.bi_max_vecs = DM_BUFIO_INLINE_VECS;
-       b->bio.bi_sector = block << b->c->sectors_per_block_bits;
+       b->bio.bi_iter.bi_sector = block << b->c->sectors_per_block_bits;
        b->bio.bi_bdev = b->c->bdev;
        b->bio.bi_end_io = end_io;
 
index 930e8c3d73e985b1e75769a9894f13ffd32d756a..1e018e986610a57ef9f82a818aa1f70a8c364e30 100644 (file)
@@ -72,7 +72,7 @@ static enum io_pattern iot_pattern(struct io_tracker *t)
 
 static void iot_update_stats(struct io_tracker *t, struct bio *bio)
 {
-       if (bio->bi_sector == from_oblock(t->last_end_oblock) + 1)
+       if (bio->bi_iter.bi_sector == from_oblock(t->last_end_oblock) + 1)
                t->nr_seq_samples++;
        else {
                /*
@@ -87,7 +87,7 @@ static void iot_update_stats(struct io_tracker *t, struct bio *bio)
                t->nr_rand_samples++;
        }
 
-       t->last_end_oblock = to_oblock(bio->bi_sector + bio_sectors(bio) - 1);
+       t->last_end_oblock = to_oblock(bio_end_sector(bio) - 1);
 }
 
 static void iot_check_for_pattern_switch(struct io_tracker *t)
index 09334c275c79e91c7bf4fd41e18e641b2196073a..ffd472e015caa918facaed4f65a621c0f61e58a9 100644 (file)
@@ -85,6 +85,12 @@ static void dm_unhook_bio(struct dm_hook_info *h, struct bio *bio)
 {
        bio->bi_end_io = h->bi_end_io;
        bio->bi_private = h->bi_private;
+
+       /*
+        * Must bump bi_remaining to allow bio to complete with
+        * restored bi_end_io.
+        */
+       atomic_inc(&bio->bi_remaining);
 }
 
 /*----------------------------------------------------------------*/
@@ -664,15 +670,17 @@ static void remap_to_origin(struct cache *cache, struct bio *bio)
 static void remap_to_cache(struct cache *cache, struct bio *bio,
                           dm_cblock_t cblock)
 {
-       sector_t bi_sector = bio->bi_sector;
+       sector_t bi_sector = bio->bi_iter.bi_sector;
 
        bio->bi_bdev = cache->cache_dev->bdev;
        if (!block_size_is_power_of_two(cache))
-               bio->bi_sector = (from_cblock(cblock) * cache->sectors_per_block) +
-                               sector_div(bi_sector, cache->sectors_per_block);
+               bio->bi_iter.bi_sector =
+                       (from_cblock(cblock) * cache->sectors_per_block) +
+                       sector_div(bi_sector, cache->sectors_per_block);
        else
-               bio->bi_sector = (from_cblock(cblock) << cache->sectors_per_block_shift) |
-                               (bi_sector & (cache->sectors_per_block - 1));
+               bio->bi_iter.bi_sector =
+                       (from_cblock(cblock) << cache->sectors_per_block_shift) |
+                       (bi_sector & (cache->sectors_per_block - 1));
 }
 
 static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
@@ -712,7 +720,7 @@ static void remap_to_cache_dirty(struct cache *cache, struct bio *bio,
 
 static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio)
 {
-       sector_t block_nr = bio->bi_sector;
+       sector_t block_nr = bio->bi_iter.bi_sector;
 
        if (!block_size_is_power_of_two(cache))
                (void) sector_div(block_nr, cache->sectors_per_block);
@@ -1027,7 +1035,7 @@ static void issue_overwrite(struct dm_cache_migration *mg, struct bio *bio)
 static bool bio_writes_complete_block(struct cache *cache, struct bio *bio)
 {
        return (bio_data_dir(bio) == WRITE) &&
-               (bio->bi_size == (cache->sectors_per_block << SECTOR_SHIFT));
+               (bio->bi_iter.bi_size == (cache->sectors_per_block << SECTOR_SHIFT));
 }
 
 static void avoid_copy(struct dm_cache_migration *mg)
@@ -1252,7 +1260,7 @@ static void process_flush_bio(struct cache *cache, struct bio *bio)
        size_t pb_data_size = get_per_bio_data_size(cache);
        struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
 
-       BUG_ON(bio->bi_size);
+       BUG_ON(bio->bi_iter.bi_size);
        if (!pb->req_nr)
                remap_to_origin(cache, bio);
        else
@@ -1275,9 +1283,9 @@ static void process_flush_bio(struct cache *cache, struct bio *bio)
  */
 static void process_discard_bio(struct cache *cache, struct bio *bio)
 {
-       dm_block_t start_block = dm_sector_div_up(bio->bi_sector,
+       dm_block_t start_block = dm_sector_div_up(bio->bi_iter.bi_sector,
                                                  cache->discard_block_size);
-       dm_block_t end_block = bio->bi_sector + bio_sectors(bio);
+       dm_block_t end_block = bio_end_sector(bio);
        dm_block_t b;
 
        end_block = block_div(end_block, cache->discard_block_size);
index 81b0fa66045204604a979fdc929e723f6c914a5c..784695d22fde1acaaf11acd78c7263438c04648e 100644 (file)
@@ -39,10 +39,8 @@ struct convert_context {
        struct completion restart;
        struct bio *bio_in;
        struct bio *bio_out;
-       unsigned int offset_in;
-       unsigned int offset_out;
-       unsigned int idx_in;
-       unsigned int idx_out;
+       struct bvec_iter iter_in;
+       struct bvec_iter iter_out;
        sector_t cc_sector;
        atomic_t cc_pending;
 };
@@ -826,10 +824,10 @@ static void crypt_convert_init(struct crypt_config *cc,
 {
        ctx->bio_in = bio_in;
        ctx->bio_out = bio_out;
-       ctx->offset_in = 0;
-       ctx->offset_out = 0;
-       ctx->idx_in = bio_in ? bio_in->bi_idx : 0;
-       ctx->idx_out = bio_out ? bio_out->bi_idx : 0;
+       if (bio_in)
+               ctx->iter_in = bio_in->bi_iter;
+       if (bio_out)
+               ctx->iter_out = bio_out->bi_iter;
        ctx->cc_sector = sector + cc->iv_offset;
        init_completion(&ctx->restart);
 }
@@ -857,8 +855,8 @@ static int crypt_convert_block(struct crypt_config *cc,
                               struct convert_context *ctx,
                               struct ablkcipher_request *req)
 {
-       struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in);
-       struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out);
+       struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in);
+       struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out);
        struct dm_crypt_request *dmreq;
        u8 *iv;
        int r;
@@ -869,24 +867,15 @@ static int crypt_convert_block(struct crypt_config *cc,
        dmreq->iv_sector = ctx->cc_sector;
        dmreq->ctx = ctx;
        sg_init_table(&dmreq->sg_in, 1);
-       sg_set_page(&dmreq->sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT,
-                   bv_in->bv_offset + ctx->offset_in);
+       sg_set_page(&dmreq->sg_in, bv_in.bv_page, 1 << SECTOR_SHIFT,
+                   bv_in.bv_offset);
 
        sg_init_table(&dmreq->sg_out, 1);
-       sg_set_page(&dmreq->sg_out, bv_out->bv_page, 1 << SECTOR_SHIFT,
-                   bv_out->bv_offset + ctx->offset_out);
+       sg_set_page(&dmreq->sg_out, bv_out.bv_page, 1 << SECTOR_SHIFT,
+                   bv_out.bv_offset);
 
-       ctx->offset_in += 1 << SECTOR_SHIFT;
-       if (ctx->offset_in >= bv_in->bv_len) {
-               ctx->offset_in = 0;
-               ctx->idx_in++;
-       }
-
-       ctx->offset_out += 1 << SECTOR_SHIFT;
-       if (ctx->offset_out >= bv_out->bv_len) {
-               ctx->offset_out = 0;
-               ctx->idx_out++;
-       }
+       bio_advance_iter(ctx->bio_in, &ctx->iter_in, 1 << SECTOR_SHIFT);
+       bio_advance_iter(ctx->bio_out, &ctx->iter_out, 1 << SECTOR_SHIFT);
 
        if (cc->iv_gen_ops) {
                r = cc->iv_gen_ops->generator(cc, iv, dmreq);
@@ -937,8 +926,7 @@ static int crypt_convert(struct crypt_config *cc,
 
        atomic_set(&ctx->cc_pending, 1);
 
-       while(ctx->idx_in < ctx->bio_in->bi_vcnt &&
-             ctx->idx_out < ctx->bio_out->bi_vcnt) {
+       while (ctx->iter_in.bi_size && ctx->iter_out.bi_size) {
 
                crypt_alloc_req(cc, ctx);
 
@@ -1021,7 +1009,7 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size,
                size -= len;
        }
 
-       if (!clone->bi_size) {
+       if (!clone->bi_iter.bi_size) {
                bio_put(clone);
                return NULL;
        }
@@ -1161,7 +1149,7 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
        crypt_inc_pending(io);
 
        clone_init(io, clone);
-       clone->bi_sector = cc->start + io->sector;
+       clone->bi_iter.bi_sector = cc->start + io->sector;
 
        generic_make_request(clone);
        return 0;
@@ -1207,9 +1195,9 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
        }
 
        /* crypt_convert should have filled the clone bio */
-       BUG_ON(io->ctx.idx_out < clone->bi_vcnt);
+       BUG_ON(io->ctx.iter_out.bi_size);
 
-       clone->bi_sector = cc->start + io->sector;
+       clone->bi_iter.bi_sector = cc->start + io->sector;
 
        if (async)
                kcryptd_queue_io(io);
@@ -1224,7 +1212,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
        struct dm_crypt_io *new_io;
        int crypt_finished;
        unsigned out_of_pages = 0;
-       unsigned remaining = io->base_bio->bi_size;
+       unsigned remaining = io->base_bio->bi_iter.bi_size;
        sector_t sector = io->sector;
        int r;
 
@@ -1246,9 +1234,9 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
                }
 
                io->ctx.bio_out = clone;
-               io->ctx.idx_out = 0;
+               io->ctx.iter_out = clone->bi_iter;
 
-               remaining -= clone->bi_size;
+               remaining -= clone->bi_iter.bi_size;
                sector += bio_sectors(clone);
 
                crypt_inc_pending(io);
@@ -1290,8 +1278,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
                        crypt_inc_pending(new_io);
                        crypt_convert_init(cc, &new_io->ctx, NULL,
                                           io->base_bio, sector);
-                       new_io->ctx.idx_in = io->ctx.idx_in;
-                       new_io->ctx.offset_in = io->ctx.offset_in;
+                       new_io->ctx.iter_in = io->ctx.iter_in;
 
                        /*
                         * Fragments after the first use the base_io
@@ -1869,11 +1856,12 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
        if (unlikely(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) {
                bio->bi_bdev = cc->dev->bdev;
                if (bio_sectors(bio))
-                       bio->bi_sector = cc->start + dm_target_offset(ti, bio->bi_sector);
+                       bio->bi_iter.bi_sector = cc->start +
+                               dm_target_offset(ti, bio->bi_iter.bi_sector);
                return DM_MAPIO_REMAPPED;
        }
 
-       io = crypt_io_alloc(cc, bio, dm_target_offset(ti, bio->bi_sector));
+       io = crypt_io_alloc(cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
 
        if (bio_data_dir(io->base_bio) == READ) {
                if (kcryptd_io_read(io, GFP_NOWAIT))
index a8a511c053a5d5fda6574933e616719256768d31..42c3a27a14cc3a906b5f892a6206de348b6b58ee 100644 (file)
@@ -277,14 +277,15 @@ static int delay_map(struct dm_target *ti, struct bio *bio)
        if ((bio_data_dir(bio) == WRITE) && (dc->dev_write)) {
                bio->bi_bdev = dc->dev_write->bdev;
                if (bio_sectors(bio))
-                       bio->bi_sector = dc->start_write +
-                                        dm_target_offset(ti, bio->bi_sector);
+                       bio->bi_iter.bi_sector = dc->start_write +
+                               dm_target_offset(ti, bio->bi_iter.bi_sector);
 
                return delay_bio(dc, dc->write_delay, bio);
        }
 
        bio->bi_bdev = dc->dev_read->bdev;
-       bio->bi_sector = dc->start_read + dm_target_offset(ti, bio->bi_sector);
+       bio->bi_iter.bi_sector = dc->start_read +
+               dm_target_offset(ti, bio->bi_iter.bi_sector);
 
        return delay_bio(dc, dc->read_delay, bio);
 }
index c80a0ec5f1269b40be7da133c68c6e789c5e5329..b257e46876d357f831bf1c751010d5b16fa125b8 100644 (file)
@@ -248,7 +248,8 @@ static void flakey_map_bio(struct dm_target *ti, struct bio *bio)
 
        bio->bi_bdev = fc->dev->bdev;
        if (bio_sectors(bio))
-               bio->bi_sector = flakey_map_sector(ti, bio->bi_sector);
+               bio->bi_iter.bi_sector =
+                       flakey_map_sector(ti, bio->bi_iter.bi_sector);
 }
 
 static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc)
@@ -265,8 +266,8 @@ static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc)
                DMDEBUG("Corrupting data bio=%p by writing %u to byte %u "
                        "(rw=%c bi_rw=%lu bi_sector=%llu cur_bytes=%u)\n",
                        bio, fc->corrupt_bio_value, fc->corrupt_bio_byte,
-                       (bio_data_dir(bio) == WRITE) ? 'w' : 'r',
-                       bio->bi_rw, (unsigned long long)bio->bi_sector, bio_bytes);
+                       (bio_data_dir(bio) == WRITE) ? 'w' : 'r', bio->bi_rw,
+                       (unsigned long long)bio->bi_iter.bi_sector, bio_bytes);
        }
 }
 
index 2a20986a2fec9701cd25e443c990f2b7a8479f9f..b2b8a10e842784de5454e2639474f1a208b4b3f1 100644 (file)
@@ -201,26 +201,29 @@ static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offse
 /*
  * Functions for getting the pages from a bvec.
  */
-static void bvec_get_page(struct dpages *dp,
+static void bio_get_page(struct dpages *dp,
                  struct page **p, unsigned long *len, unsigned *offset)
 {
-       struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr;
-       *p = bvec->bv_page;
-       *len = bvec->bv_len;
-       *offset = bvec->bv_offset;
+       struct bio *bio = dp->context_ptr;
+       struct bio_vec bvec = bio_iovec(bio);
+       *p = bvec.bv_page;
+       *len = bvec.bv_len;
+       *offset = bvec.bv_offset;
 }
 
-static void bvec_next_page(struct dpages *dp)
+static void bio_next_page(struct dpages *dp)
 {
-       struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr;
-       dp->context_ptr = bvec + 1;
+       struct bio *bio = dp->context_ptr;
+       struct bio_vec bvec = bio_iovec(bio);
+
+       bio_advance(bio, bvec.bv_len);
 }
 
-static void bvec_dp_init(struct dpages *dp, struct bio_vec *bvec)
+static void bio_dp_init(struct dpages *dp, struct bio *bio)
 {
-       dp->get_page = bvec_get_page;
-       dp->next_page = bvec_next_page;
-       dp->context_ptr = bvec;
+       dp->get_page = bio_get_page;
+       dp->next_page = bio_next_page;
+       dp->context_ptr = bio;
 }
 
 /*
@@ -304,14 +307,14 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
                                          dm_sector_div_up(remaining, (PAGE_SIZE >> SECTOR_SHIFT)));
 
                bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
-               bio->bi_sector = where->sector + (where->count - remaining);
+               bio->bi_iter.bi_sector = where->sector + (where->count - remaining);
                bio->bi_bdev = where->bdev;
                bio->bi_end_io = endio;
                store_io_and_region_in_bio(bio, io, region);
 
                if (rw & REQ_DISCARD) {
                        num_sectors = min_t(sector_t, q->limits.max_discard_sectors, remaining);
-                       bio->bi_size = num_sectors << SECTOR_SHIFT;
+                       bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
                        remaining -= num_sectors;
                } else if (rw & REQ_WRITE_SAME) {
                        /*
@@ -320,7 +323,7 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
                        dp->get_page(dp, &page, &len, &offset);
                        bio_add_page(bio, page, logical_block_size, offset);
                        num_sectors = min_t(sector_t, q->limits.max_write_same_sectors, remaining);
-                       bio->bi_size = num_sectors << SECTOR_SHIFT;
+                       bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
 
                        offset = 0;
                        remaining -= num_sectors;
@@ -457,8 +460,8 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp,
                list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset);
                break;
 
-       case DM_IO_BVEC:
-               bvec_dp_init(dp, io_req->mem.ptr.bvec);
+       case DM_IO_BIO:
+               bio_dp_init(dp, io_req->mem.ptr.bio);
                break;
 
        case DM_IO_VMA:
index 4f99d267340cdb48c3a7b64edcdd4ed9a7fd48ea..53e848c1093936560a9554c9fdacbec2f6dae5bd 100644 (file)
@@ -85,7 +85,8 @@ static void linear_map_bio(struct dm_target *ti, struct bio *bio)
 
        bio->bi_bdev = lc->dev->bdev;
        if (bio_sectors(bio))
-               bio->bi_sector = linear_map_sector(ti, bio->bi_sector);
+               bio->bi_iter.bi_sector =
+                       linear_map_sector(ti, bio->bi_iter.bi_sector);
 }
 
 static int linear_map(struct dm_target *ti, struct bio *bio)
index 9584443c56148608d159ceab1d436fd6bacfda3b..f284e0bfb25fca869855f390f2d8d7a5519a0864 100644 (file)
@@ -432,7 +432,7 @@ static int mirror_available(struct mirror_set *ms, struct bio *bio)
        region_t region = dm_rh_bio_to_region(ms->rh, bio);
 
        if (log->type->in_sync(log, region, 0))
-               return choose_mirror(ms,  bio->bi_sector) ? 1 : 0;
+               return choose_mirror(ms,  bio->bi_iter.bi_sector) ? 1 : 0;
 
        return 0;
 }
@@ -442,15 +442,15 @@ static int mirror_available(struct mirror_set *ms, struct bio *bio)
  */
 static sector_t map_sector(struct mirror *m, struct bio *bio)
 {
-       if (unlikely(!bio->bi_size))
+       if (unlikely(!bio->bi_iter.bi_size))
                return 0;
-       return m->offset + dm_target_offset(m->ms->ti, bio->bi_sector);
+       return m->offset + dm_target_offset(m->ms->ti, bio->bi_iter.bi_sector);
 }
 
 static void map_bio(struct mirror *m, struct bio *bio)
 {
        bio->bi_bdev = m->dev->bdev;
-       bio->bi_sector = map_sector(m, bio);
+       bio->bi_iter.bi_sector = map_sector(m, bio);
 }
 
 static void map_region(struct dm_io_region *io, struct mirror *m,
@@ -526,8 +526,8 @@ static void read_async_bio(struct mirror *m, struct bio *bio)
        struct dm_io_region io;
        struct dm_io_request io_req = {
                .bi_rw = READ,
-               .mem.type = DM_IO_BVEC,
-               .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx,
+               .mem.type = DM_IO_BIO,
+               .mem.ptr.bio = bio,
                .notify.fn = read_callback,
                .notify.context = bio,
                .client = m->ms->io_client,
@@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
                 * We can only read balance if the region is in sync.
                 */
                if (likely(region_in_sync(ms, region, 1)))
-                       m = choose_mirror(ms, bio->bi_sector);
+                       m = choose_mirror(ms, bio->bi_iter.bi_sector);
                else if (m && atomic_read(&m->error_count))
                        m = NULL;
 
@@ -629,8 +629,8 @@ static void do_write(struct mirror_set *ms, struct bio *bio)
        struct mirror *m;
        struct dm_io_request io_req = {
                .bi_rw = WRITE | (bio->bi_rw & WRITE_FLUSH_FUA),
-               .mem.type = DM_IO_BVEC,
-               .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx,
+               .mem.type = DM_IO_BIO,
+               .mem.ptr.bio = bio,
                .notify.fn = write_callback,
                .notify.context = bio,
                .client = ms->io_client,
@@ -1181,7 +1181,7 @@ static int mirror_map(struct dm_target *ti, struct bio *bio)
         * The region is in-sync and we can perform reads directly.
         * Store enough information so we can retry if it fails.
         */
-       m = choose_mirror(ms, bio->bi_sector);
+       m = choose_mirror(ms, bio->bi_iter.bi_sector);
        if (unlikely(!m))
                return -EIO;
 
index 69732e03eb3490d636a0183bd22303742ab65c65..b929fd5f4984bb67fbb62474e24e5af425758770 100644 (file)
@@ -126,7 +126,8 @@ EXPORT_SYMBOL_GPL(dm_rh_region_to_sector);
 
 region_t dm_rh_bio_to_region(struct dm_region_hash *rh, struct bio *bio)
 {
-       return dm_rh_sector_to_region(rh, bio->bi_sector - rh->target_begin);
+       return dm_rh_sector_to_region(rh, bio->bi_iter.bi_sector -
+                                     rh->target_begin);
 }
 EXPORT_SYMBOL_GPL(dm_rh_bio_to_region);
 
index 717718558bd9908469b23bbb9b3cd0223ac243f3..ebddef5237e4b28e6254e486b3267dbccca9864e 100644 (file)
@@ -1438,6 +1438,7 @@ out:
        if (full_bio) {
                full_bio->bi_end_io = pe->full_bio_end_io;
                full_bio->bi_private = pe->full_bio_private;
+               atomic_inc(&full_bio->bi_remaining);
        }
        free_pending_exception(pe);
 
@@ -1619,11 +1620,10 @@ static void remap_exception(struct dm_snapshot *s, struct dm_exception *e,
                            struct bio *bio, chunk_t chunk)
 {
        bio->bi_bdev = s->cow->bdev;
-       bio->bi_sector = chunk_to_sector(s->store,
-                                        dm_chunk_number(e->new_chunk) +
-                                        (chunk - e->old_chunk)) +
-                                        (bio->bi_sector &
-                                         s->store->chunk_mask);
+       bio->bi_iter.bi_sector =
+               chunk_to_sector(s->store, dm_chunk_number(e->new_chunk) +
+                               (chunk - e->old_chunk)) +
+               (bio->bi_iter.bi_sector & s->store->chunk_mask);
 }
 
 static int snapshot_map(struct dm_target *ti, struct bio *bio)
@@ -1641,7 +1641,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
                return DM_MAPIO_REMAPPED;
        }
 
-       chunk = sector_to_chunk(s->store, bio->bi_sector);
+       chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
 
        /* Full snapshots are not usable */
        /* To get here the table must be live so s->active is always set. */
@@ -1702,7 +1702,8 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
                r = DM_MAPIO_SUBMITTED;
 
                if (!pe->started &&
-                   bio->bi_size == (s->store->chunk_size << SECTOR_SHIFT)) {
+                   bio->bi_iter.bi_size ==
+                   (s->store->chunk_size << SECTOR_SHIFT)) {
                        pe->started = 1;
                        up_write(&s->lock);
                        start_full_bio(pe, bio);
@@ -1758,7 +1759,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
                return DM_MAPIO_REMAPPED;
        }
 
-       chunk = sector_to_chunk(s->store, bio->bi_sector);
+       chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
 
        down_write(&s->lock);
 
@@ -2095,7 +2096,7 @@ static int do_origin(struct dm_dev *origin, struct bio *bio)
        down_read(&_origins_lock);
        o = __lookup_origin(origin->bdev);
        if (o)
-               r = __origin_write(&o->snapshots, bio->bi_sector, bio);
+               r = __origin_write(&o->snapshots, bio->bi_iter.bi_sector, bio);
        up_read(&_origins_lock);
 
        return r;
index 73c1712dad96d09f2760416852dd0cacd22cbd33..d1600d2aa2e2e6983643ef0ef864195f858d4f9d 100644 (file)
@@ -259,13 +259,15 @@ static int stripe_map_range(struct stripe_c *sc, struct bio *bio,
 {
        sector_t begin, end;
 
-       stripe_map_range_sector(sc, bio->bi_sector, target_stripe, &begin);
+       stripe_map_range_sector(sc, bio->bi_iter.bi_sector,
+                               target_stripe, &begin);
        stripe_map_range_sector(sc, bio_end_sector(bio),
                                target_stripe, &end);
        if (begin < end) {
                bio->bi_bdev = sc->stripe[target_stripe].dev->bdev;
-               bio->bi_sector = begin + sc->stripe[target_stripe].physical_start;
-               bio->bi_size = to_bytes(end - begin);
+               bio->bi_iter.bi_sector = begin +
+                       sc->stripe[target_stripe].physical_start;
+               bio->bi_iter.bi_size = to_bytes(end - begin);
                return DM_MAPIO_REMAPPED;
        } else {
                /* The range doesn't map to the target stripe */
@@ -293,9 +295,10 @@ static int stripe_map(struct dm_target *ti, struct bio *bio)
                return stripe_map_range(sc, bio, target_bio_nr);
        }
 
-       stripe_map_sector(sc, bio->bi_sector, &stripe, &bio->bi_sector);
+       stripe_map_sector(sc, bio->bi_iter.bi_sector,
+                         &stripe, &bio->bi_iter.bi_sector);
 
-       bio->bi_sector += sc->stripe[stripe].physical_start;
+       bio->bi_iter.bi_sector += sc->stripe[stripe].physical_start;
        bio->bi_bdev = sc->stripe[stripe].dev->bdev;
 
        return DM_MAPIO_REMAPPED;
index ff9ac4be47210839369e233d1d1dfc161cbfb852..09a688b3d48ca1445e136544321a54b112b280e1 100644 (file)
@@ -311,11 +311,11 @@ error:
 static int switch_map(struct dm_target *ti, struct bio *bio)
 {
        struct switch_ctx *sctx = ti->private;
-       sector_t offset = dm_target_offset(ti, bio->bi_sector);
+       sector_t offset = dm_target_offset(ti, bio->bi_iter.bi_sector);
        unsigned path_nr = switch_get_path_nr(sctx, offset);
 
        bio->bi_bdev = sctx->path_list[path_nr].dmdev->bdev;
-       bio->bi_sector = sctx->path_list[path_nr].start + offset;
+       bio->bi_iter.bi_sector = sctx->path_list[path_nr].start + offset;
 
        return DM_MAPIO_REMAPPED;
 }
index 726228b33a012f9994fc2f8843b25a0ca46ef966..faaf944597ab7669b90f3ecb85152fbcd16cbe33 100644 (file)
@@ -414,7 +414,7 @@ static bool block_size_is_power_of_two(struct pool *pool)
 static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
 {
        struct pool *pool = tc->pool;
-       sector_t block_nr = bio->bi_sector;
+       sector_t block_nr = bio->bi_iter.bi_sector;
 
        if (block_size_is_power_of_two(pool))
                block_nr >>= pool->sectors_per_block_shift;
@@ -427,14 +427,15 @@ static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
 static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
 {
        struct pool *pool = tc->pool;
-       sector_t bi_sector = bio->bi_sector;
+       sector_t bi_sector = bio->bi_iter.bi_sector;
 
        bio->bi_bdev = tc->pool_dev->bdev;
        if (block_size_is_power_of_two(pool))
-               bio->bi_sector = (block << pool->sectors_per_block_shift) |
-                               (bi_sector & (pool->sectors_per_block - 1));
+               bio->bi_iter.bi_sector =
+                       (block << pool->sectors_per_block_shift) |
+                       (bi_sector & (pool->sectors_per_block - 1));
        else
-               bio->bi_sector = (block * pool->sectors_per_block) +
+               bio->bi_iter.bi_sector = (block * pool->sectors_per_block) +
                                 sector_div(bi_sector, pool->sectors_per_block);
 }
 
@@ -612,8 +613,10 @@ static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *c
 
 static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
 {
-       if (m->bio)
+       if (m->bio) {
                m->bio->bi_end_io = m->saved_bi_end_io;
+               atomic_inc(&m->bio->bi_remaining);
+       }
        cell_error(m->tc->pool, m->cell);
        list_del(&m->list);
        mempool_free(m, m->tc->pool->mapping_pool);
@@ -627,8 +630,10 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
        int r;
 
        bio = m->bio;
-       if (bio)
+       if (bio) {
                bio->bi_end_io = m->saved_bi_end_io;
+               atomic_inc(&bio->bi_remaining);
+       }
 
        if (m->err) {
                cell_error(pool, m->cell);
@@ -731,7 +736,8 @@ static void process_prepared(struct pool *pool, struct list_head *head,
  */
 static int io_overlaps_block(struct pool *pool, struct bio *bio)
 {
-       return bio->bi_size == (pool->sectors_per_block << SECTOR_SHIFT);
+       return bio->bi_iter.bi_size ==
+               (pool->sectors_per_block << SECTOR_SHIFT);
 }
 
 static int io_overwrites_block(struct pool *pool, struct bio *bio)
@@ -1136,7 +1142,7 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio,
        if (bio_detain(pool, &key, bio, &cell))
                return;
 
-       if (bio_data_dir(bio) == WRITE && bio->bi_size)
+       if (bio_data_dir(bio) == WRITE && bio->bi_iter.bi_size)
                break_sharing(tc, bio, block, &key, lookup_result, cell);
        else {
                struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
@@ -1159,7 +1165,7 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block
        /*
         * Remap empty bios (flushes) immediately, without provisioning.
         */
-       if (!bio->bi_size) {
+       if (!bio->bi_iter.bi_size) {
                inc_all_io_entry(pool, bio);
                cell_defer_no_holder(tc, cell);
 
@@ -1258,7 +1264,7 @@ static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
        r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
        switch (r) {
        case 0:
-               if (lookup_result.shared && (rw == WRITE) && bio->bi_size)
+               if (lookup_result.shared && (rw == WRITE) && bio->bi_iter.bi_size)
                        handle_unserviceable_bio(tc->pool, bio);
                else {
                        inc_all_io_entry(tc->pool, bio);
@@ -2939,7 +2945,7 @@ out_unlock:
 
 static int thin_map(struct dm_target *ti, struct bio *bio)
 {
-       bio->bi_sector = dm_target_offset(ti, bio->bi_sector);
+       bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
 
        return thin_bio_map(ti, bio);
 }
index 4b7941db3aff33223481464f24a162d101ab7698..796007a5e0e1a4b6e83b0871c1fca1ef8c0c461f 100644 (file)
@@ -73,15 +73,10 @@ struct dm_verity_io {
        sector_t block;
        unsigned n_blocks;
 
-       /* saved bio vector */
-       struct bio_vec *io_vec;
-       unsigned io_vec_size;
+       struct bvec_iter iter;
 
        struct work_struct work;
 
-       /* A space for short vectors; longer vectors are allocated separately. */
-       struct bio_vec io_vec_inline[DM_VERITY_IO_VEC_INLINE];
-
        /*
         * Three variably-size fields follow this struct:
         *
@@ -284,9 +279,10 @@ release_ret_r:
 static int verity_verify_io(struct dm_verity_io *io)
 {
        struct dm_verity *v = io->v;
+       struct bio *bio = dm_bio_from_per_bio_data(io,
+                                                  v->ti->per_bio_data_size);
        unsigned b;
        int i;
-       unsigned vector = 0, offset = 0;
 
        for (b = 0; b < io->n_blocks; b++) {
                struct shash_desc *desc;
@@ -336,31 +332,22 @@ test_block_hash:
                }
 
                todo = 1 << v->data_dev_block_bits;
-               do {
-                       struct bio_vec *bv;
+               while (io->iter.bi_size) {
                        u8 *page;
-                       unsigned len;
-
-                       BUG_ON(vector >= io->io_vec_size);
-                       bv = &io->io_vec[vector];
-                       page = kmap_atomic(bv->bv_page);
-                       len = bv->bv_len - offset;
-                       if (likely(len >= todo))
-                               len = todo;
-                       r = crypto_shash_update(desc,
-                                       page + bv->bv_offset + offset, len);
+                       struct bio_vec bv = bio_iter_iovec(bio, io->iter);
+
+                       page = kmap_atomic(bv.bv_page);
+                       r = crypto_shash_update(desc, page + bv.bv_offset,
+                                               bv.bv_len);
                        kunmap_atomic(page);
+
                        if (r < 0) {
                                DMERR("crypto_shash_update failed: %d", r);
                                return r;
                        }
-                       offset += len;
-                       if (likely(offset == bv->bv_len)) {
-                               offset = 0;
-                               vector++;
-                       }
-                       todo -= len;
-               } while (todo);
+
+                       bio_advance_iter(bio, &io->iter, bv.bv_len);
+               }
 
                if (!v->version) {
                        r = crypto_shash_update(desc, v->salt, v->salt_size);
@@ -383,8 +370,6 @@ test_block_hash:
                        return -EIO;
                }
        }
-       BUG_ON(vector != io->io_vec_size);
-       BUG_ON(offset);
 
        return 0;
 }
@@ -400,10 +385,7 @@ static void verity_finish_io(struct dm_verity_io *io, int error)
        bio->bi_end_io = io->orig_bi_end_io;
        bio->bi_private = io->orig_bi_private;
 
-       if (io->io_vec != io->io_vec_inline)
-               mempool_free(io->io_vec, v->vec_mempool);
-
-       bio_endio(bio, error);
+       bio_endio_nodec(bio, error);
 }
 
 static void verity_work(struct work_struct *w)
@@ -493,9 +475,9 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
        struct dm_verity_io *io;
 
        bio->bi_bdev = v->data_dev->bdev;
-       bio->bi_sector = verity_map_sector(v, bio->bi_sector);
+       bio->bi_iter.bi_sector = verity_map_sector(v, bio->bi_iter.bi_sector);
 
-       if (((unsigned)bio->bi_sector | bio_sectors(bio)) &
+       if (((unsigned)bio->bi_iter.bi_sector | bio_sectors(bio)) &
            ((1 << (v->data_dev_block_bits - SECTOR_SHIFT)) - 1)) {
                DMERR_LIMIT("unaligned io");
                return -EIO;
@@ -514,18 +496,12 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
        io->v = v;
        io->orig_bi_end_io = bio->bi_end_io;
        io->orig_bi_private = bio->bi_private;
-       io->block = bio->bi_sector >> (v->data_dev_block_bits - SECTOR_SHIFT);
-       io->n_blocks = bio->bi_size >> v->data_dev_block_bits;
+       io->block = bio->bi_iter.bi_sector >> (v->data_dev_block_bits - SECTOR_SHIFT);
+       io->n_blocks = bio->bi_iter.bi_size >> v->data_dev_block_bits;
 
        bio->bi_end_io = verity_end_io;
        bio->bi_private = io;
-       io->io_vec_size = bio_segments(bio);
-       if (io->io_vec_size < DM_VERITY_IO_VEC_INLINE)
-               io->io_vec = io->io_vec_inline;
-       else
-               io->io_vec = mempool_alloc(v->vec_mempool, GFP_NOIO);
-       memcpy(io->io_vec, bio_iovec(bio),
-              io->io_vec_size * sizeof(struct bio_vec));
+       io->iter = bio->bi_iter;
 
        verity_submit_prefetch(v, io);
 
index b49c7628424171f0622ed4446e5c4111b00ba418..8c53b09b9a2c5a3050b22f4fba82af5563f1d59a 100644 (file)
@@ -575,7 +575,7 @@ static void start_io_acct(struct dm_io *io)
                atomic_inc_return(&md->pending[rw]));
 
        if (unlikely(dm_stats_used(&md->stats)))
-               dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_sector,
+               dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector,
                                    bio_sectors(bio), false, 0, &io->stats_aux);
 }
 
@@ -593,7 +593,7 @@ static void end_io_acct(struct dm_io *io)
        part_stat_unlock();
 
        if (unlikely(dm_stats_used(&md->stats)))
-               dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_sector,
+               dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector,
                                    bio_sectors(bio), true, duration, &io->stats_aux);
 
        /*
@@ -742,7 +742,7 @@ static void dec_pending(struct dm_io *io, int error)
                if (io_error == DM_ENDIO_REQUEUE)
                        return;
 
-               if ((bio->bi_rw & REQ_FLUSH) && bio->bi_size) {
+               if ((bio->bi_rw & REQ_FLUSH) && bio->bi_iter.bi_size) {
                        /*
                         * Preflush done for flush with data, reissue
                         * without REQ_FLUSH.
@@ -797,7 +797,7 @@ static void end_clone_bio(struct bio *clone, int error)
        struct dm_rq_clone_bio_info *info = clone->bi_private;
        struct dm_rq_target_io *tio = info->tio;
        struct bio *bio = info->orig;
-       unsigned int nr_bytes = info->orig->bi_size;
+       unsigned int nr_bytes = info->orig->bi_iter.bi_size;
 
        bio_put(clone);
 
@@ -1128,7 +1128,7 @@ static void __map_bio(struct dm_target_io *tio)
         * this io.
         */
        atomic_inc(&tio->io->io_count);
-       sector = clone->bi_sector;
+       sector = clone->bi_iter.bi_sector;
        r = ti->type->map(ti, clone);
        if (r == DM_MAPIO_REMAPPED) {
                /* the bio has been remapped so dispatch it */
@@ -1155,76 +1155,32 @@ struct clone_info {
        struct dm_io *io;
        sector_t sector;
        sector_t sector_count;
-       unsigned short idx;
 };
 
 static void bio_setup_sector(struct bio *bio, sector_t sector, sector_t len)
 {
-       bio->bi_sector = sector;
-       bio->bi_size = to_bytes(len);
-}
-
-static void bio_setup_bv(struct bio *bio, unsigned short idx, unsigned short bv_count)
-{
-       bio->bi_idx = idx;
-       bio->bi_vcnt = idx + bv_count;
-       bio->bi_flags &= ~(1 << BIO_SEG_VALID);
-}
-
-static void clone_bio_integrity(struct bio *bio, struct bio *clone,
-                               unsigned short idx, unsigned len, unsigned offset,
-                               unsigned trim)
-{
-       if (!bio_integrity(bio))
-               return;
-
-       bio_integrity_clone(clone, bio, GFP_NOIO);
-
-       if (trim)
-               bio_integrity_trim(clone, bio_sector_offset(bio, idx, offset), len);
-}
-
-/*
- * Creates a little bio that just does part of a bvec.
- */
-static void clone_split_bio(struct dm_target_io *tio, struct bio *bio,
-                           sector_t sector, unsigned short idx,
-                           unsigned offset, unsigned len)
-{
-       struct bio *clone = &tio->clone;
-       struct bio_vec *bv = bio->bi_io_vec + idx;
-
-       *clone->bi_io_vec = *bv;
-
-       bio_setup_sector(clone, sector, len);
-
-       clone->bi_bdev = bio->bi_bdev;
-       clone->bi_rw = bio->bi_rw;
-       clone->bi_vcnt = 1;
-       clone->bi_io_vec->bv_offset = offset;
-       clone->bi_io_vec->bv_len = clone->bi_size;
-       clone->bi_flags |= 1 << BIO_CLONED;
-
-       clone_bio_integrity(bio, clone, idx, len, offset, 1);
+       bio->bi_iter.bi_sector = sector;
+       bio->bi_iter.bi_size = to_bytes(len);
 }
 
 /*
  * Creates a bio that consists of range of complete bvecs.
  */
 static void clone_bio(struct dm_target_io *tio, struct bio *bio,
-                     sector_t sector, unsigned short idx,
-                     unsigned short bv_count, unsigned len)
+                     sector_t sector, unsigned len)
 {
        struct bio *clone = &tio->clone;
-       unsigned trim = 0;
 
-       __bio_clone(clone, bio);
-       bio_setup_sector(clone, sector, len);
-       bio_setup_bv(clone, idx, bv_count);
+       __bio_clone_fast(clone, bio);
+
+       if (bio_integrity(bio))
+               bio_integrity_clone(clone, bio, GFP_NOIO);
+
+       bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector));
+       clone->bi_iter.bi_size = to_bytes(len);
 
-       if (idx != bio->bi_idx || clone->bi_size < bio->bi_size)
-               trim = 1;
-       clone_bio_integrity(bio, clone, idx, len, 0, trim);
+       if (bio_integrity(bio))
+               bio_integrity_trim(clone, 0, len);
 }
 
 static struct dm_target_io *alloc_tio(struct clone_info *ci,
@@ -1257,7 +1213,7 @@ static void __clone_and_map_simple_bio(struct clone_info *ci,
         * ci->bio->bi_max_vecs is BIO_INLINE_VECS anyway, for both flush
         * and discard, so no need for concern about wasted bvec allocations.
         */
-        __bio_clone(clone, ci->bio);
+        __bio_clone_fast(clone, ci->bio);
        if (len)
                bio_setup_sector(clone, ci->sector, len);
 
@@ -1286,10 +1242,7 @@ static int __send_empty_flush(struct clone_info *ci)
 }
 
 static void __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
-                                    sector_t sector, int nr_iovecs,
-                                    unsigned short idx, unsigned short bv_count,
-                                    unsigned offset, unsigned len,
-                                    unsigned split_bvec)
+                                    sector_t sector, unsigned len)
 {
        struct bio *bio = ci->bio;
        struct dm_target_io *tio;
@@ -1303,11 +1256,8 @@ static void __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti
                num_target_bios = ti->num_write_bios(ti, bio);
 
        for (target_bio_nr = 0; target_bio_nr < num_target_bios; target_bio_nr++) {
-               tio = alloc_tio(ci, ti, nr_iovecs, target_bio_nr);
-               if (split_bvec)
-                       clone_split_bio(tio, bio, sector, idx, offset, len);
-               else
-                       clone_bio(tio, bio, sector, idx, bv_count, len);
+               tio = alloc_tio(ci, ti, 0, target_bio_nr);
+               clone_bio(tio, bio, sector, len);
                __map_bio(tio);
        }
 }
@@ -1378,60 +1328,6 @@ static int __send_write_same(struct clone_info *ci)
        return __send_changing_extent_only(ci, get_num_write_same_bios, NULL);
 }
 
-/*
- * Find maximum number of sectors / bvecs we can process with a single bio.
- */
-static sector_t __len_within_target(struct clone_info *ci, sector_t max, int *idx)
-{
-       struct bio *bio = ci->bio;
-       sector_t bv_len, total_len = 0;
-
-       for (*idx = ci->idx; max && (*idx < bio->bi_vcnt); (*idx)++) {
-               bv_len = to_sector(bio->bi_io_vec[*idx].bv_len);
-
-               if (bv_len > max)
-                       break;
-
-               max -= bv_len;
-               total_len += bv_len;
-       }
-
-       return total_len;
-}
-
-static int __split_bvec_across_targets(struct clone_info *ci,
-                                      struct dm_target *ti, sector_t max)
-{
-       struct bio *bio = ci->bio;
-       struct bio_vec *bv = bio->bi_io_vec + ci->idx;
-       sector_t remaining = to_sector(bv->bv_len);
-       unsigned offset = 0;
-       sector_t len;
-
-       do {
-               if (offset) {
-                       ti = dm_table_find_target(ci->map, ci->sector);
-                       if (!dm_target_is_valid(ti))
-                               return -EIO;
-
-                       max = max_io_len(ci->sector, ti);
-               }
-
-               len = min(remaining, max);
-
-               __clone_and_map_data_bio(ci, ti, ci->sector, 1, ci->idx, 0,
-                                        bv->bv_offset + offset, len, 1);
-
-               ci->sector += len;
-               ci->sector_count -= len;
-               offset += to_bytes(len);
-       } while (remaining -= len);
-
-       ci->idx++;
-
-       return 0;
-}
-
 /*
  * Select the correct strategy for processing a non-flush bio.
  */
@@ -1439,8 +1335,7 @@ static int __split_and_process_non_flush(struct clone_info *ci)
 {
        struct bio *bio = ci->bio;
        struct dm_target *ti;
-       sector_t len, max;
-       int idx;
+       unsigned len;
 
        if (unlikely(bio->bi_rw & REQ_DISCARD))
                return __send_discard(ci);
@@ -1451,41 +1346,14 @@ static int __split_and_process_non_flush(struct clone_info *ci)
        if (!dm_target_is_valid(ti))
                return -EIO;
 
-       max = max_io_len(ci->sector, ti);
-
-       /*
-        * Optimise for the simple case where we can do all of
-        * the remaining io with a single clone.
-        */
-       if (ci->sector_count <= max) {
-               __clone_and_map_data_bio(ci, ti, ci->sector, bio->bi_max_vecs,
-                                        ci->idx, bio->bi_vcnt - ci->idx, 0,
-                                        ci->sector_count, 0);
-               ci->sector_count = 0;
-               return 0;
-       }
-
-       /*
-        * There are some bvecs that don't span targets.
-        * Do as many of these as possible.
-        */
-       if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) {
-               len = __len_within_target(ci, max, &idx);
-
-               __clone_and_map_data_bio(ci, ti, ci->sector, bio->bi_max_vecs,
-                                        ci->idx, idx - ci->idx, 0, len, 0);
+       len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count);
 
-               ci->sector += len;
-               ci->sector_count -= len;
-               ci->idx = idx;
+       __clone_and_map_data_bio(ci, ti, ci->sector, len);
 
-               return 0;
-       }
+       ci->sector += len;
+       ci->sector_count -= len;
 
-       /*
-        * Handle a bvec that must be split between two or more targets.
-        */
-       return __split_bvec_across_targets(ci, ti, max);
+       return 0;
 }
 
 /*
@@ -1510,8 +1378,7 @@ static void __split_and_process_bio(struct mapped_device *md,
        ci.io->bio = bio;
        ci.io->md = md;
        spin_lock_init(&ci.io->endio_lock);
-       ci.sector = bio->bi_sector;
-       ci.idx = bio->bi_idx;
+       ci.sector = bio->bi_iter.bi_sector;
 
        start_io_acct(ci.io);
 
index 3193aefe982b7b42badf4eba4adc36f89439d70c..e8b4574956c73e500cd634fa0acafad4fad0b93d 100644 (file)
@@ -74,8 +74,8 @@ static void faulty_fail(struct bio *bio, int error)
 {
        struct bio *b = bio->bi_private;
 
-       b->bi_size = bio->bi_size;
-       b->bi_sector = bio->bi_sector;
+       b->bi_iter.bi_size = bio->bi_iter.bi_size;
+       b->bi_iter.bi_sector = bio->bi_iter.bi_sector;
 
        bio_put(bio);
 
@@ -185,26 +185,31 @@ static void make_request(struct mddev *mddev, struct bio *bio)
                        return;
                }
 
-               if (check_sector(conf, bio->bi_sector, bio_end_sector(bio), WRITE))
+               if (check_sector(conf, bio->bi_iter.bi_sector,
+                                bio_end_sector(bio), WRITE))
                        failit = 1;
                if (check_mode(conf, WritePersistent)) {
-                       add_sector(conf, bio->bi_sector, WritePersistent);
+                       add_sector(conf, bio->bi_iter.bi_sector,
+                                  WritePersistent);
                        failit = 1;
                }
                if (check_mode(conf, WriteTransient))
                        failit = 1;
        } else {
                /* read request */
-               if (check_sector(conf, bio->bi_sector, bio_end_sector(bio), READ))
+               if (check_sector(conf, bio->bi_iter.bi_sector,
+                                bio_end_sector(bio), READ))
                        failit = 1;
                if (check_mode(conf, ReadTransient))
                        failit = 1;
                if (check_mode(conf, ReadPersistent)) {
-                       add_sector(conf, bio->bi_sector, ReadPersistent);
+                       add_sector(conf, bio->bi_iter.bi_sector,
+                                  ReadPersistent);
                        failit = 1;
                }
                if (check_mode(conf, ReadFixable)) {
-                       add_sector(conf, bio->bi_sector, ReadFixable);
+                       add_sector(conf, bio->bi_iter.bi_sector,
+                                  ReadFixable);
                        failit = 1;
                }
        }
index f03fabd2b37bacf34a231a0bb034a6d8f2826e68..56f534b4a2d27036b1f820f8bf4bfed56a9b2002 100644 (file)
@@ -288,65 +288,65 @@ static int linear_stop (struct mddev *mddev)
 
 static void linear_make_request(struct mddev *mddev, struct bio *bio)
 {
+       char b[BDEVNAME_SIZE];
        struct dev_info *tmp_dev;
-       sector_t start_sector;
+       struct bio *split;
+       sector_t start_sector, end_sector, data_offset;
 
        if (unlikely(bio->bi_rw & REQ_FLUSH)) {
                md_flush_request(mddev, bio);
                return;
        }
 
-       rcu_read_lock();
-       tmp_dev = which_dev(mddev, bio->bi_sector);
-       start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors;
-
-
-       if (unlikely(bio->bi_sector >= (tmp_dev->end_sector)
-                    || (bio->bi_sector < start_sector))) {
-               char b[BDEVNAME_SIZE];
-
-               printk(KERN_ERR
-                      "md/linear:%s: make_request: Sector %llu out of bounds on "
-                      "dev %s: %llu sectors, offset %llu\n",
-                      mdname(mddev),
-                      (unsigned long long)bio->bi_sector,
-                      bdevname(tmp_dev->rdev->bdev, b),
-                      (unsigned long long)tmp_dev->rdev->sectors,
-                      (unsigned long long)start_sector);
-               rcu_read_unlock();
-               bio_io_error(bio);
-               return;
-       }
-       if (unlikely(bio_end_sector(bio) > tmp_dev->end_sector)) {
-               /* This bio crosses a device boundary, so we have to
-                * split it.
-                */
-               struct bio_pair *bp;
-               sector_t end_sector = tmp_dev->end_sector;
+       do {
+               rcu_read_lock();
 
-               rcu_read_unlock();
-
-               bp = bio_split(bio, end_sector - bio->bi_sector);
+               tmp_dev = which_dev(mddev, bio->bi_iter.bi_sector);
+               start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors;
+               end_sector = tmp_dev->end_sector;
+               data_offset = tmp_dev->rdev->data_offset;
+               bio->bi_bdev = tmp_dev->rdev->bdev;
 
-               linear_make_request(mddev, &bp->bio1);
-               linear_make_request(mddev, &bp->bio2);
-               bio_pair_release(bp);
-               return;
-       }
-                   
-       bio->bi_bdev = tmp_dev->rdev->bdev;
-       bio->bi_sector = bio->bi_sector - start_sector
-               + tmp_dev->rdev->data_offset;
-       rcu_read_unlock();
+               rcu_read_unlock();
 
-       if (unlikely((bio->bi_rw & REQ_DISCARD) &&
-                    !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) {
-               /* Just ignore it */
-               bio_endio(bio, 0);
-               return;
-       }
+               if (unlikely(bio->bi_iter.bi_sector >= end_sector ||
+                            bio->bi_iter.bi_sector < start_sector))
+                       goto out_of_bounds;
+
+               if (unlikely(bio_end_sector(bio) > end_sector)) {
+                       /* This bio crosses a device boundary, so we have to
+                        * split it.
+                        */
+                       split = bio_split(bio, end_sector -
+                                         bio->bi_iter.bi_sector,
+                                         GFP_NOIO, fs_bio_set);
+                       bio_chain(split, bio);
+               } else {
+                       split = bio;
+               }
 
-       generic_make_request(bio);
+               split->bi_iter.bi_sector = split->bi_iter.bi_sector -
+                       start_sector + data_offset;
+
+               if (unlikely((split->bi_rw & REQ_DISCARD) &&
+                        !blk_queue_discard(bdev_get_queue(split->bi_bdev)))) {
+                       /* Just ignore it */
+                       bio_endio(split, 0);
+               } else
+                       generic_make_request(split);
+       } while (split != bio);
+       return;
+
+out_of_bounds:
+       printk(KERN_ERR
+              "md/linear:%s: make_request: Sector %llu out of bounds on "
+              "dev %s: %llu sectors, offset %llu\n",
+              mdname(mddev),
+              (unsigned long long)bio->bi_iter.bi_sector,
+              bdevname(tmp_dev->rdev->bdev, b),
+              (unsigned long long)tmp_dev->rdev->sectors,
+              (unsigned long long)start_sector);
+       bio_io_error(bio);
 }
 
 static void linear_status (struct seq_file *seq, struct mddev *mddev)
index 40c531359a15af61ad9c3ba70506d1863085dffe..4ad5cc4e63e8438ca3c32fea1f40f69ec71657fb 100644 (file)
@@ -393,7 +393,7 @@ static void md_submit_flush_data(struct work_struct *ws)
        struct mddev *mddev = container_of(ws, struct mddev, flush_work);
        struct bio *bio = mddev->flush_bio;
 
-       if (bio->bi_size == 0)
+       if (bio->bi_iter.bi_size == 0)
                /* an empty barrier - all done */
                bio_endio(bio, 0);
        else {
@@ -754,7 +754,7 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
        struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev);
 
        bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev;
-       bio->bi_sector = sector;
+       bio->bi_iter.bi_sector = sector;
        bio_add_page(bio, page, size, 0);
        bio->bi_private = rdev;
        bio->bi_end_io = super_written;
@@ -782,18 +782,16 @@ int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
        struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev);
        int ret;
 
-       rw |= REQ_SYNC;
-
        bio->bi_bdev = (metadata_op && rdev->meta_bdev) ?
                rdev->meta_bdev : rdev->bdev;
        if (metadata_op)
-               bio->bi_sector = sector + rdev->sb_start;
+               bio->bi_iter.bi_sector = sector + rdev->sb_start;
        else if (rdev->mddev->reshape_position != MaxSector &&
                 (rdev->mddev->reshape_backwards ==
                  (sector >= rdev->mddev->reshape_position)))
-               bio->bi_sector = sector + rdev->new_data_offset;
+               bio->bi_iter.bi_sector = sector + rdev->new_data_offset;
        else
-               bio->bi_sector = sector + rdev->data_offset;
+               bio->bi_iter.bi_sector = sector + rdev->data_offset;
        bio_add_page(bio, page, size, 0);
        submit_bio_wait(rw, bio);
 
index 1642eae75a3335d1282a4bf53751802e1aeb52db..849ad39f547b9c1fbb8d993e118261a33b242134 100644 (file)
@@ -100,7 +100,7 @@ static void multipath_end_request(struct bio *bio, int error)
                md_error (mp_bh->mddev, rdev);
                printk(KERN_ERR "multipath: %s: rescheduling sector %llu\n", 
                       bdevname(rdev->bdev,b), 
-                      (unsigned long long)bio->bi_sector);
+                      (unsigned long long)bio->bi_iter.bi_sector);
                multipath_reschedule_retry(mp_bh);
        } else
                multipath_end_bh_io(mp_bh, error);
@@ -132,7 +132,7 @@ static void multipath_make_request(struct mddev *mddev, struct bio * bio)
        multipath = conf->multipaths + mp_bh->path;
 
        mp_bh->bio = *bio;
-       mp_bh->bio.bi_sector += multipath->rdev->data_offset;
+       mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset;
        mp_bh->bio.bi_bdev = multipath->rdev->bdev;
        mp_bh->bio.bi_rw |= REQ_FAILFAST_TRANSPORT;
        mp_bh->bio.bi_end_io = multipath_end_request;
@@ -355,21 +355,22 @@ static void multipathd(struct md_thread *thread)
                spin_unlock_irqrestore(&conf->device_lock, flags);
 
                bio = &mp_bh->bio;
-               bio->bi_sector = mp_bh->master_bio->bi_sector;
+               bio->bi_iter.bi_sector = mp_bh->master_bio->bi_iter.bi_sector;
                
                if ((mp_bh->path = multipath_map (conf))<0) {
                        printk(KERN_ALERT "multipath: %s: unrecoverable IO read"
                                " error for block %llu\n",
                                bdevname(bio->bi_bdev,b),
-                               (unsigned long long)bio->bi_sector);
+                               (unsigned long long)bio->bi_iter.bi_sector);
                        multipath_end_bh_io(mp_bh, -EIO);
                } else {
                        printk(KERN_ERR "multipath: %s: redirecting sector %llu"
                                " to another IO path\n",
                                bdevname(bio->bi_bdev,b),
-                               (unsigned long long)bio->bi_sector);
+                               (unsigned long long)bio->bi_iter.bi_sector);
                        *bio = *(mp_bh->master_bio);
-                       bio->bi_sector += conf->multipaths[mp_bh->path].rdev->data_offset;
+                       bio->bi_iter.bi_sector +=
+                               conf->multipaths[mp_bh->path].rdev->data_offset;
                        bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev;
                        bio->bi_rw |= REQ_FAILFAST_TRANSPORT;
                        bio->bi_end_io = multipath_end_request;
index c4d420b7d2f43d0804e1c1a94d88ca63484b5ec3..407a99e46f6993a770c21fdfff1972b7f64063b6 100644 (file)
@@ -501,10 +501,11 @@ static inline int is_io_in_chunk_boundary(struct mddev *mddev,
                        unsigned int chunk_sects, struct bio *bio)
 {
        if (likely(is_power_of_2(chunk_sects))) {
-               return chunk_sects >= ((bio->bi_sector & (chunk_sects-1))
+               return chunk_sects >=
+                       ((bio->bi_iter.bi_sector & (chunk_sects-1))
                                        + bio_sectors(bio));
        } else{
-               sector_t sector = bio->bi_sector;
+               sector_t sector = bio->bi_iter.bi_sector;
                return chunk_sects >= (sector_div(sector, chunk_sects)
                                                + bio_sectors(bio));
        }
@@ -512,64 +513,44 @@ static inline int is_io_in_chunk_boundary(struct mddev *mddev,
 
 static void raid0_make_request(struct mddev *mddev, struct bio *bio)
 {
-       unsigned int chunk_sects;
-       sector_t sector_offset;
        struct strip_zone *zone;
        struct md_rdev *tmp_dev;
+       struct bio *split;
 
        if (unlikely(bio->bi_rw & REQ_FLUSH)) {
                md_flush_request(mddev, bio);
                return;
        }
 
-       chunk_sects = mddev->chunk_sectors;
-       if (unlikely(!is_io_in_chunk_boundary(mddev, chunk_sects, bio))) {
-               sector_t sector = bio->bi_sector;
-               struct bio_pair *bp;
-               /* Sanity check -- queue functions should prevent this happening */
-               if (bio_segments(bio) > 1)
-                       goto bad_map;
-               /* This is a one page bio that upper layers
-                * refuse to split for us, so we need to split it.
-                */
-               if (likely(is_power_of_2(chunk_sects)))
-                       bp = bio_split(bio, chunk_sects - (sector &
-                                                          (chunk_sects-1)));
-               else
-                       bp = bio_split(bio, chunk_sects -
-                                      sector_div(sector, chunk_sects));
-               raid0_make_request(mddev, &bp->bio1);
-               raid0_make_request(mddev, &bp->bio2);
-               bio_pair_release(bp);
-               return;
-       }
+       do {
+               sector_t sector = bio->bi_iter.bi_sector;
+               unsigned chunk_sects = mddev->chunk_sectors;
 
-       sector_offset = bio->bi_sector;
-       zone = find_zone(mddev->private, &sector_offset);
-       tmp_dev = map_sector(mddev, zone, bio->bi_sector,
-                            &sector_offset);
-       bio->bi_bdev = tmp_dev->bdev;
-       bio->bi_sector = sector_offset + zone->dev_start +
-               tmp_dev->data_offset;
-
-       if (unlikely((bio->bi_rw & REQ_DISCARD) &&
-                    !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) {
-               /* Just ignore it */
-               bio_endio(bio, 0);
-               return;
-       }
+               unsigned sectors = chunk_sects -
+                       (likely(is_power_of_2(chunk_sects))
+                        ? (sector & (chunk_sects-1))
+                        : sector_div(sector, chunk_sects));
 
-       generic_make_request(bio);
-       return;
-
-bad_map:
-       printk("md/raid0:%s: make_request bug: can't convert block across chunks"
-              " or bigger than %dk %llu %d\n",
-              mdname(mddev), chunk_sects / 2,
-              (unsigned long long)bio->bi_sector, bio_sectors(bio) / 2);
+               if (sectors < bio_sectors(bio)) {
+                       split = bio_split(bio, sectors, GFP_NOIO, fs_bio_set);
+                       bio_chain(split, bio);
+               } else {
+                       split = bio;
+               }
 
-       bio_io_error(bio);
-       return;
+               zone = find_zone(mddev->private, &sector);
+               tmp_dev = map_sector(mddev, zone, sector, &sector);
+               split->bi_bdev = tmp_dev->bdev;
+               split->bi_iter.bi_sector = sector + zone->dev_start +
+                       tmp_dev->data_offset;
+
+               if (unlikely((split->bi_rw & REQ_DISCARD) &&
+                        !blk_queue_discard(bdev_get_queue(split->bi_bdev)))) {
+                       /* Just ignore it */
+                       bio_endio(split, 0);
+               } else
+                       generic_make_request(split);
+       } while (split != bio);
 }
 
 static void raid0_status(struct seq_file *seq, struct mddev *mddev)
index a49cfcc7a343188a5579350886795ce6fef35c4f..fd3a2a14b587da5e3bb5046b0017ed7bd46f67a1 100644 (file)
@@ -229,7 +229,7 @@ static void call_bio_endio(struct r1bio *r1_bio)
        int done;
        struct r1conf *conf = r1_bio->mddev->private;
        sector_t start_next_window = r1_bio->start_next_window;
-       sector_t bi_sector = bio->bi_sector;
+       sector_t bi_sector = bio->bi_iter.bi_sector;
 
        if (bio->bi_phys_segments) {
                unsigned long flags;
@@ -265,9 +265,8 @@ static void raid_end_bio_io(struct r1bio *r1_bio)
        if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
                pr_debug("raid1: sync end %s on sectors %llu-%llu\n",
                         (bio_data_dir(bio) == WRITE) ? "write" : "read",
-                        (unsigned long long) bio->bi_sector,
-                        (unsigned long long) bio->bi_sector +
-                        bio_sectors(bio) - 1);
+                        (unsigned long long) bio->bi_iter.bi_sector,
+                        (unsigned long long) bio_end_sector(bio) - 1);
 
                call_bio_endio(r1_bio);
        }
@@ -466,9 +465,8 @@ static void raid1_end_write_request(struct bio *bio, int error)
                                struct bio *mbio = r1_bio->master_bio;
                                pr_debug("raid1: behind end write sectors"
                                         " %llu-%llu\n",
-                                        (unsigned long long) mbio->bi_sector,
-                                        (unsigned long long) mbio->bi_sector +
-                                        bio_sectors(mbio) - 1);
+                                        (unsigned long long) mbio->bi_iter.bi_sector,
+                                        (unsigned long long) bio_end_sector(mbio) - 1);
                                call_bio_endio(r1_bio);
                        }
                }
@@ -875,7 +873,7 @@ static bool need_to_wait_for_sync(struct r1conf *conf, struct bio *bio)
                else if ((conf->next_resync - RESYNC_WINDOW_SECTORS
                                >= bio_end_sector(bio)) ||
                         (conf->next_resync + NEXT_NORMALIO_DISTANCE
-                               <= bio->bi_sector))
+                               <= bio->bi_iter.bi_sector))
                        wait = false;
                else
                        wait = true;
@@ -913,14 +911,14 @@ static sector_t wait_barrier(struct r1conf *conf, struct bio *bio)
 
        if (bio && bio_data_dir(bio) == WRITE) {
                if (conf->next_resync + NEXT_NORMALIO_DISTANCE
-                   <= bio->bi_sector) {
+                   <= bio->bi_iter.bi_sector) {
                        if (conf->start_next_window == MaxSector)
                                conf->start_next_window =
                                        conf->next_resync +
                                        NEXT_NORMALIO_DISTANCE;
 
                        if ((conf->start_next_window + NEXT_NORMALIO_DISTANCE)
-                           <= bio->bi_sector)
+                           <= bio->bi_iter.bi_sector)
                                conf->next_window_requests++;
                        else
                                conf->current_window_requests++;
@@ -1027,7 +1025,8 @@ do_sync_io:
                if (bvecs[i].bv_page)
                        put_page(bvecs[i].bv_page);
        kfree(bvecs);
-       pr_debug("%dB behind alloc failed, doing sync I/O\n", bio->bi_size);
+       pr_debug("%dB behind alloc failed, doing sync I/O\n",
+                bio->bi_iter.bi_size);
 }
 
 struct raid1_plug_cb {
@@ -1107,7 +1106,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
 
        if (bio_data_dir(bio) == WRITE &&
            bio_end_sector(bio) > mddev->suspend_lo &&
-           bio->bi_sector < mddev->suspend_hi) {
+           bio->bi_iter.bi_sector < mddev->suspend_hi) {
                /* As the suspend_* range is controlled by
                 * userspace, we want an interruptible
                 * wait.
@@ -1118,7 +1117,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
                        prepare_to_wait(&conf->wait_barrier,
                                        &w, TASK_INTERRUPTIBLE);
                        if (bio_end_sector(bio) <= mddev->suspend_lo ||
-                           bio->bi_sector >= mddev->suspend_hi)
+                           bio->bi_iter.bi_sector >= mddev->suspend_hi)
                                break;
                        schedule();
                }
@@ -1140,7 +1139,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
        r1_bio->sectors = bio_sectors(bio);
        r1_bio->state = 0;
        r1_bio->mddev = mddev;
-       r1_bio->sector = bio->bi_sector;
+       r1_bio->sector = bio->bi_iter.bi_sector;
 
        /* We might need to issue multiple reads to different
         * devices if there are bad blocks around, so we keep
@@ -1180,12 +1179,13 @@ read_again:
                r1_bio->read_disk = rdisk;
 
                read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
-               bio_trim(read_bio, r1_bio->sector - bio->bi_sector,
+               bio_trim(read_bio, r1_bio->sector - bio->bi_iter.bi_sector,
                         max_sectors);
 
                r1_bio->bios[rdisk] = read_bio;
 
-               read_bio->bi_sector = r1_bio->sector + mirror->rdev->data_offset;
+               read_bio->bi_iter.bi_sector = r1_bio->sector +
+                       mirror->rdev->data_offset;
                read_bio->bi_bdev = mirror->rdev->bdev;
                read_bio->bi_end_io = raid1_end_read_request;
                read_bio->bi_rw = READ | do_sync;
@@ -1197,7 +1197,7 @@ read_again:
                         */
 
                        sectors_handled = (r1_bio->sector + max_sectors
-                                          - bio->bi_sector);
+                                          - bio->bi_iter.bi_sector);
                        r1_bio->sectors = max_sectors;
                        spin_lock_irq(&conf->device_lock);
                        if (bio->bi_phys_segments == 0)
@@ -1218,7 +1218,8 @@ read_again:
                        r1_bio->sectors = bio_sectors(bio) - sectors_handled;
                        r1_bio->state = 0;
                        r1_bio->mddev = mddev;
-                       r1_bio->sector = bio->bi_sector + sectors_handled;
+                       r1_bio->sector = bio->bi_iter.bi_sector +
+                               sectors_handled;
                        goto read_again;
                } else
                        generic_make_request(read_bio);
@@ -1321,7 +1322,7 @@ read_again:
                        if (r1_bio->bios[j])
                                rdev_dec_pending(conf->mirrors[j].rdev, mddev);
                r1_bio->state = 0;
-               allow_barrier(conf, start_next_window, bio->bi_sector);
+               allow_barrier(conf, start_next_window, bio->bi_iter.bi_sector);
                md_wait_for_blocked_rdev(blocked_rdev, mddev);
                start_next_window = wait_barrier(conf, bio);
                /*
@@ -1348,7 +1349,7 @@ read_again:
                        bio->bi_phys_segments++;
                spin_unlock_irq(&conf->device_lock);
        }
-       sectors_handled = r1_bio->sector + max_sectors - bio->bi_sector;
+       sectors_handled = r1_bio->sector + max_sectors - bio->bi_iter.bi_sector;
 
        atomic_set(&r1_bio->remaining, 1);
        atomic_set(&r1_bio->behind_remaining, 0);
@@ -1360,7 +1361,7 @@ read_again:
                        continue;
 
                mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
-               bio_trim(mbio, r1_bio->sector - bio->bi_sector, max_sectors);
+               bio_trim(mbio, r1_bio->sector - bio->bi_iter.bi_sector, max_sectors);
 
                if (first_clone) {
                        /* do behind I/O ?
@@ -1394,7 +1395,7 @@ read_again:
 
                r1_bio->bios[i] = mbio;
 
-               mbio->bi_sector = (r1_bio->sector +
+               mbio->bi_iter.bi_sector = (r1_bio->sector +
                                   conf->mirrors[i].rdev->data_offset);
                mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
                mbio->bi_end_io = raid1_end_write_request;
@@ -1434,7 +1435,7 @@ read_again:
                r1_bio->sectors = bio_sectors(bio) - sectors_handled;
                r1_bio->state = 0;
                r1_bio->mddev = mddev;
-               r1_bio->sector = bio->bi_sector + sectors_handled;
+               r1_bio->sector = bio->bi_iter.bi_sector + sectors_handled;
                goto retry_write;
        }
 
@@ -1958,14 +1959,14 @@ static int process_checks(struct r1bio *r1_bio)
                /* fixup the bio for reuse */
                bio_reset(b);
                b->bi_vcnt = vcnt;
-               b->bi_size = r1_bio->sectors << 9;
-               b->bi_sector = r1_bio->sector +
+               b->bi_iter.bi_size = r1_bio->sectors << 9;
+               b->bi_iter.bi_sector = r1_bio->sector +
                        conf->mirrors[i].rdev->data_offset;
                b->bi_bdev = conf->mirrors[i].rdev->bdev;
                b->bi_end_io = end_sync_read;
                b->bi_private = r1_bio;
 
-               size = b->bi_size;
+               size = b->bi_iter.bi_size;
                for (j = 0; j < vcnt ; j++) {
                        struct bio_vec *bi;
                        bi = &b->bi_io_vec[j];
@@ -2220,11 +2221,11 @@ static int narrow_write_error(struct r1bio *r1_bio, int i)
                }
 
                wbio->bi_rw = WRITE;
-               wbio->bi_sector = r1_bio->sector;
-               wbio->bi_size = r1_bio->sectors << 9;
+               wbio->bi_iter.bi_sector = r1_bio->sector;
+               wbio->bi_iter.bi_size = r1_bio->sectors << 9;
 
                bio_trim(wbio, sector - r1_bio->sector, sectors);
-               wbio->bi_sector += rdev->data_offset;
+               wbio->bi_iter.bi_sector += rdev->data_offset;
                wbio->bi_bdev = rdev->bdev;
                if (submit_bio_wait(WRITE, wbio) == 0)
                        /* failure! */
@@ -2338,7 +2339,8 @@ read_more:
                }
                r1_bio->read_disk = disk;
                bio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev);
-               bio_trim(bio, r1_bio->sector - bio->bi_sector, max_sectors);
+               bio_trim(bio, r1_bio->sector - bio->bi_iter.bi_sector,
+                        max_sectors);
                r1_bio->bios[r1_bio->read_disk] = bio;
                rdev = conf->mirrors[disk].rdev;
                printk_ratelimited(KERN_ERR
@@ -2347,7 +2349,7 @@ read_more:
                                   mdname(mddev),
                                   (unsigned long long)r1_bio->sector,
                                   bdevname(rdev->bdev, b));
-               bio->bi_sector = r1_bio->sector + rdev->data_offset;
+               bio->bi_iter.bi_sector = r1_bio->sector + rdev->data_offset;
                bio->bi_bdev = rdev->bdev;
                bio->bi_end_io = raid1_end_read_request;
                bio->bi_rw = READ | do_sync;
@@ -2356,7 +2358,7 @@ read_more:
                        /* Drat - have to split this up more */
                        struct bio *mbio = r1_bio->master_bio;
                        int sectors_handled = (r1_bio->sector + max_sectors
-                                              - mbio->bi_sector);
+                                              - mbio->bi_iter.bi_sector);
                        r1_bio->sectors = max_sectors;
                        spin_lock_irq(&conf->device_lock);
                        if (mbio->bi_phys_segments == 0)
@@ -2374,7 +2376,8 @@ read_more:
                        r1_bio->state = 0;
                        set_bit(R1BIO_ReadError, &r1_bio->state);
                        r1_bio->mddev = mddev;
-                       r1_bio->sector = mbio->bi_sector + sectors_handled;
+                       r1_bio->sector = mbio->bi_iter.bi_sector +
+                               sectors_handled;
 
                        goto read_more;
                } else
@@ -2598,7 +2601,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
                }
                if (bio->bi_end_io) {
                        atomic_inc(&rdev->nr_pending);
-                       bio->bi_sector = sector_nr + rdev->data_offset;
+                       bio->bi_iter.bi_sector = sector_nr + rdev->data_offset;
                        bio->bi_bdev = rdev->bdev;
                        bio->bi_private = r1_bio;
                }
@@ -2698,7 +2701,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
                                                        continue;
                                                /* remove last page from this bio */
                                                bio->bi_vcnt--;
-                                               bio->bi_size -= len;
+                                               bio->bi_iter.bi_size -= len;
                                                bio->bi_flags &= ~(1<< BIO_SEG_VALID);
                                        }
                                        goto bio_full;
index 8d39d63281b9b5441b3ec8e524955356c8690871..33fc408e5eacef0a1dce55fd5c0d578fc244b663 100644 (file)
@@ -1152,14 +1152,12 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
        kfree(plug);
 }
 
-static void make_request(struct mddev *mddev, struct bio * bio)
+static void __make_request(struct mddev *mddev, struct bio *bio)
 {
        struct r10conf *conf = mddev->private;
        struct r10bio *r10_bio;
        struct bio *read_bio;
        int i;
-       sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask);
-       int chunk_sects = chunk_mask + 1;
        const int rw = bio_data_dir(bio);
        const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
        const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
@@ -1174,88 +1172,27 @@ static void make_request(struct mddev *mddev, struct bio * bio)
        int max_sectors;
        int sectors;
 
-       if (unlikely(bio->bi_rw & REQ_FLUSH)) {
-               md_flush_request(mddev, bio);
-               return;
-       }
-
-       /* If this request crosses a chunk boundary, we need to
-        * split it.  This will only happen for 1 PAGE (or less) requests.
-        */
-       if (unlikely((bio->bi_sector & chunk_mask) + bio_sectors(bio)
-                    > chunk_sects
-                    && (conf->geo.near_copies < conf->geo.raid_disks
-                        || conf->prev.near_copies < conf->prev.raid_disks))) {
-               struct bio_pair *bp;
-               /* Sanity check -- queue functions should prevent this happening */
-               if (bio_segments(bio) > 1)
-                       goto bad_map;
-               /* This is a one page bio that upper layers
-                * refuse to split for us, so we need to split it.
-                */
-               bp = bio_split(bio,
-                              chunk_sects - (bio->bi_sector & (chunk_sects - 1)) );
-
-               /* Each of these 'make_request' calls will call 'wait_barrier'.
-                * If the first succeeds but the second blocks due to the resync
-                * thread raising the barrier, we will deadlock because the
-                * IO to the underlying device will be queued in generic_make_request
-                * and will never complete, so will never reduce nr_pending.
-                * So increment nr_waiting here so no new raise_barriers will
-                * succeed, and so the second wait_barrier cannot block.
-                */
-               spin_lock_irq(&conf->resync_lock);
-               conf->nr_waiting++;
-               spin_unlock_irq(&conf->resync_lock);
-
-               make_request(mddev, &bp->bio1);
-               make_request(mddev, &bp->bio2);
-
-               spin_lock_irq(&conf->resync_lock);
-               conf->nr_waiting--;
-               wake_up(&conf->wait_barrier);
-               spin_unlock_irq(&conf->resync_lock);
-
-               bio_pair_release(bp);
-               return;
-       bad_map:
-               printk("md/raid10:%s: make_request bug: can't convert block across chunks"
-                      " or bigger than %dk %llu %d\n", mdname(mddev), chunk_sects/2,
-                      (unsigned long long)bio->bi_sector, bio_sectors(bio) / 2);
-
-               bio_io_error(bio);
-               return;
-       }
-
-       md_write_start(mddev, bio);
-
-       /*
-        * Register the new request and wait if the reconstruction
-        * thread has put up a bar for new requests.
-        * Continue immediately if no resync is active currently.
-        */
-       wait_barrier(conf);
-
        sectors = bio_sectors(bio);
        while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
-           bio->bi_sector < conf->reshape_progress &&
-           bio->bi_sector + sectors > conf->reshape_progress) {
+           bio->bi_iter.bi_sector < conf->reshape_progress &&
+           bio->bi_iter.bi_sector + sectors > conf->reshape_progress) {
                /* IO spans the reshape position.  Need to wait for
                 * reshape to pass
                 */
                allow_barrier(conf);
                wait_event(conf->wait_barrier,
-                          conf->reshape_progress <= bio->bi_sector ||
-                          conf->reshape_progress >= bio->bi_sector + sectors);
+                          conf->reshape_progress <= bio->bi_iter.bi_sector ||
+                          conf->reshape_progress >= bio->bi_iter.bi_sector +
+                          sectors);
                wait_barrier(conf);
        }
        if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
            bio_data_dir(bio) == WRITE &&
            (mddev->reshape_backwards
-            ? (bio->bi_sector < conf->reshape_safe &&
-               bio->bi_sector + sectors > conf->reshape_progress)
-            : (bio->bi_sector + sectors > conf->reshape_safe &&
-               bio->bi_sector < conf->reshape_progress))) {
+            ? (bio->bi_iter.bi_sector < conf->reshape_safe &&
+               bio->bi_iter.bi_sector + sectors > conf->reshape_progress)
+            : (bio->bi_iter.bi_sector + sectors > conf->reshape_safe &&
+               bio->bi_iter.bi_sector < conf->reshape_progress))) {
                /* Need to update reshape_position in metadata */
                mddev->reshape_position = conf->reshape_progress;
                set_bit(MD_CHANGE_DEVS, &mddev->flags);
@@ -1273,7 +1210,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
        r10_bio->sectors = sectors;
 
        r10_bio->mddev = mddev;
-       r10_bio->sector = bio->bi_sector;
+       r10_bio->sector = bio->bi_iter.bi_sector;
        r10_bio->state = 0;
 
        /* We might need to issue multiple reads to different
@@ -1302,13 +1239,13 @@ read_again:
                slot = r10_bio->read_slot;
 
                read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
-               bio_trim(read_bio, r10_bio->sector - bio->bi_sector,
+               bio_trim(read_bio, r10_bio->sector - bio->bi_iter.bi_sector,
                         max_sectors);
 
                r10_bio->devs[slot].bio = read_bio;
                r10_bio->devs[slot].rdev = rdev;
 
-               read_bio->bi_sector = r10_bio->devs[slot].addr +
+               read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr +
                        choose_data_offset(r10_bio, rdev);
                read_bio->bi_bdev = rdev->bdev;
                read_bio->bi_end_io = raid10_end_read_request;
@@ -1320,7 +1257,7 @@ read_again:
                         * need another r10_bio.
                         */
                        sectors_handled = (r10_bio->sector + max_sectors
-                                          - bio->bi_sector);
+                                          - bio->bi_iter.bi_sector);
                        r10_bio->sectors = max_sectors;
                        spin_lock_irq(&conf->device_lock);
                        if (bio->bi_phys_segments == 0)
@@ -1341,7 +1278,8 @@ read_again:
                        r10_bio->sectors = bio_sectors(bio) - sectors_handled;
                        r10_bio->state = 0;
                        r10_bio->mddev = mddev;
-                       r10_bio->sector = bio->bi_sector + sectors_handled;
+                       r10_bio->sector = bio->bi_iter.bi_sector +
+                               sectors_handled;
                        goto read_again;
                } else
                        generic_make_request(read_bio);
@@ -1499,7 +1437,8 @@ retry_write:
                        bio->bi_phys_segments++;
                spin_unlock_irq(&conf->device_lock);
        }
-       sectors_handled = r10_bio->sector + max_sectors - bio->bi_sector;
+       sectors_handled = r10_bio->sector + max_sectors -
+               bio->bi_iter.bi_sector;
 
        atomic_set(&r10_bio->remaining, 1);
        bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0);
@@ -1510,11 +1449,11 @@ retry_write:
                if (r10_bio->devs[i].bio) {
                        struct md_rdev *rdev = conf->mirrors[d].rdev;
                        mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
-                       bio_trim(mbio, r10_bio->sector - bio->bi_sector,
+                       bio_trim(mbio, r10_bio->sector - bio->bi_iter.bi_sector,
                                 max_sectors);
                        r10_bio->devs[i].bio = mbio;
 
-                       mbio->bi_sector = (r10_bio->devs[i].addr+
+                       mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr+
                                           choose_data_offset(r10_bio,
                                                              rdev));
                        mbio->bi_bdev = rdev->bdev;
@@ -1553,11 +1492,11 @@ retry_write:
                                rdev = conf->mirrors[d].rdev;
                        }
                        mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
-                       bio_trim(mbio, r10_bio->sector - bio->bi_sector,
+                       bio_trim(mbio, r10_bio->sector - bio->bi_iter.bi_sector,
                                 max_sectors);
                        r10_bio->devs[i].repl_bio = mbio;
 
-                       mbio->bi_sector = (r10_bio->devs[i].addr +
+                       mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr +
                                           choose_data_offset(
                                                   r10_bio, rdev));
                        mbio->bi_bdev = rdev->bdev;
@@ -1591,11 +1530,57 @@ retry_write:
                r10_bio->sectors = bio_sectors(bio) - sectors_handled;
 
                r10_bio->mddev = mddev;
-               r10_bio->sector = bio->bi_sector + sectors_handled;
+               r10_bio->sector = bio->bi_iter.bi_sector + sectors_handled;
                r10_bio->state = 0;
                goto retry_write;
        }
        one_write_done(r10_bio);
+}
+
+static void make_request(struct mddev *mddev, struct bio *bio)
+{
+       struct r10conf *conf = mddev->private;
+       sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask);
+       int chunk_sects = chunk_mask + 1;
+
+       struct bio *split;
+
+       if (unlikely(bio->bi_rw & REQ_FLUSH)) {
+               md_flush_request(mddev, bio);
+               return;
+       }
+
+       md_write_start(mddev, bio);
+
+       /*
+        * Register the new request and wait if the reconstruction
+        * thread has put up a bar for new requests.
+        * Continue immediately if no resync is active currently.
+        */
+       wait_barrier(conf);
+
+       do {
+
+               /*
+                * If this request crosses a chunk boundary, we need to split
+                * it.
+                */
+               if (unlikely((bio->bi_iter.bi_sector & chunk_mask) +
+                            bio_sectors(bio) > chunk_sects
+                            && (conf->geo.near_copies < conf->geo.raid_disks
+                                || conf->prev.near_copies <
+                                conf->prev.raid_disks))) {
+                       split = bio_split(bio, chunk_sects -
+                                         (bio->bi_iter.bi_sector &
+                                          (chunk_sects - 1)),
+                                         GFP_NOIO, fs_bio_set);
+                       bio_chain(split, bio);
+               } else {
+                       split = bio;
+               }
+
+               __make_request(mddev, split);
+       } while (split != bio);
 
        /* In case raid10d snuck in to freeze_array */
        wake_up(&conf->wait_barrier);
@@ -2124,10 +2109,10 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
                bio_reset(tbio);
 
                tbio->bi_vcnt = vcnt;
-               tbio->bi_size = r10_bio->sectors << 9;
+               tbio->bi_iter.bi_size = r10_bio->sectors << 9;
                tbio->bi_rw = WRITE;
                tbio->bi_private = r10_bio;
-               tbio->bi_sector = r10_bio->devs[i].addr;
+               tbio->bi_iter.bi_sector = r10_bio->devs[i].addr;
 
                for (j=0; j < vcnt ; j++) {
                        tbio->bi_io_vec[j].bv_offset = 0;
@@ -2144,7 +2129,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
                atomic_inc(&r10_bio->remaining);
                md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio));
 
-               tbio->bi_sector += conf->mirrors[d].rdev->data_offset;
+               tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset;
                tbio->bi_bdev = conf->mirrors[d].rdev->bdev;
                generic_make_request(tbio);
        }
@@ -2614,8 +2599,8 @@ static int narrow_write_error(struct r10bio *r10_bio, int i)
                        sectors = sect_to_write;
                /* Write at 'sector' for 'sectors' */
                wbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
-               bio_trim(wbio, sector - bio->bi_sector, sectors);
-               wbio->bi_sector = (r10_bio->devs[i].addr+
+               bio_trim(wbio, sector - bio->bi_iter.bi_sector, sectors);
+               wbio->bi_iter.bi_sector = (r10_bio->devs[i].addr+
                                   choose_data_offset(r10_bio, rdev) +
                                   (sector - r10_bio->sector));
                wbio->bi_bdev = rdev->bdev;
@@ -2687,10 +2672,10 @@ read_more:
                (unsigned long long)r10_bio->sector);
        bio = bio_clone_mddev(r10_bio->master_bio,
                              GFP_NOIO, mddev);
-       bio_trim(bio, r10_bio->sector - bio->bi_sector, max_sectors);
+       bio_trim(bio, r10_bio->sector - bio->bi_iter.bi_sector, max_sectors);
        r10_bio->devs[slot].bio = bio;
        r10_bio->devs[slot].rdev = rdev;
-       bio->bi_sector = r10_bio->devs[slot].addr
+       bio->bi_iter.bi_sector = r10_bio->devs[slot].addr
                + choose_data_offset(r10_bio, rdev);
        bio->bi_bdev = rdev->bdev;
        bio->bi_rw = READ | do_sync;
@@ -2701,7 +2686,7 @@ read_more:
                struct bio *mbio = r10_bio->master_bio;
                int sectors_handled =
                        r10_bio->sector + max_sectors
-                       - mbio->bi_sector;
+                       - mbio->bi_iter.bi_sector;
                r10_bio->sectors = max_sectors;
                spin_lock_irq(&conf->device_lock);
                if (mbio->bi_phys_segments == 0)
@@ -2719,7 +2704,7 @@ read_more:
                set_bit(R10BIO_ReadError,
                        &r10_bio->state);
                r10_bio->mddev = mddev;
-               r10_bio->sector = mbio->bi_sector
+               r10_bio->sector = mbio->bi_iter.bi_sector
                        + sectors_handled;
 
                goto read_more;
@@ -3157,7 +3142,8 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
                                bio->bi_end_io = end_sync_read;
                                bio->bi_rw = READ;
                                from_addr = r10_bio->devs[j].addr;
-                               bio->bi_sector = from_addr + rdev->data_offset;
+                               bio->bi_iter.bi_sector = from_addr +
+                                       rdev->data_offset;
                                bio->bi_bdev = rdev->bdev;
                                atomic_inc(&rdev->nr_pending);
                                /* and we write to 'i' (if not in_sync) */
@@ -3181,7 +3167,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
                                        bio->bi_private = r10_bio;
                                        bio->bi_end_io = end_sync_write;
                                        bio->bi_rw = WRITE;
-                                       bio->bi_sector = to_addr
+                                       bio->bi_iter.bi_sector = to_addr
                                                + rdev->data_offset;
                                        bio->bi_bdev = rdev->bdev;
                                        atomic_inc(&r10_bio->remaining);
@@ -3210,7 +3196,8 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
                                bio->bi_private = r10_bio;
                                bio->bi_end_io = end_sync_write;
                                bio->bi_rw = WRITE;
-                               bio->bi_sector = to_addr + rdev->data_offset;
+                               bio->bi_iter.bi_sector = to_addr +
+                                       rdev->data_offset;
                                bio->bi_bdev = rdev->bdev;
                                atomic_inc(&r10_bio->remaining);
                                break;
@@ -3328,7 +3315,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
                        bio->bi_private = r10_bio;
                        bio->bi_end_io = end_sync_read;
                        bio->bi_rw = READ;
-                       bio->bi_sector = sector +
+                       bio->bi_iter.bi_sector = sector +
                                conf->mirrors[d].rdev->data_offset;
                        bio->bi_bdev = conf->mirrors[d].rdev->bdev;
                        count++;
@@ -3350,7 +3337,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
                        bio->bi_private = r10_bio;
                        bio->bi_end_io = end_sync_write;
                        bio->bi_rw = WRITE;
-                       bio->bi_sector = sector +
+                       bio->bi_iter.bi_sector = sector +
                                conf->mirrors[d].replacement->data_offset;
                        bio->bi_bdev = conf->mirrors[d].replacement->bdev;
                        count++;
@@ -3397,7 +3384,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
                             bio2 = bio2->bi_next) {
                                /* remove last page from this bio */
                                bio2->bi_vcnt--;
-                               bio2->bi_size -= len;
+                               bio2->bi_iter.bi_size -= len;
                                bio2->bi_flags &= ~(1<< BIO_SEG_VALID);
                        }
                        goto bio_full;
@@ -4418,7 +4405,7 @@ read_more:
        read_bio = bio_alloc_mddev(GFP_KERNEL, RESYNC_PAGES, mddev);
 
        read_bio->bi_bdev = rdev->bdev;
-       read_bio->bi_sector = (r10_bio->devs[r10_bio->read_slot].addr
+       read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr
                               + rdev->data_offset);
        read_bio->bi_private = r10_bio;
        read_bio->bi_end_io = end_sync_read;
@@ -4426,7 +4413,7 @@ read_more:
        read_bio->bi_flags &= ~(BIO_POOL_MASK - 1);
        read_bio->bi_flags |= 1 << BIO_UPTODATE;
        read_bio->bi_vcnt = 0;
-       read_bio->bi_size = 0;
+       read_bio->bi_iter.bi_size = 0;
        r10_bio->master_bio = read_bio;
        r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum;
 
@@ -4452,7 +4439,8 @@ read_more:
 
                bio_reset(b);
                b->bi_bdev = rdev2->bdev;
-               b->bi_sector = r10_bio->devs[s/2].addr + rdev2->new_data_offset;
+               b->bi_iter.bi_sector = r10_bio->devs[s/2].addr +
+                       rdev2->new_data_offset;
                b->bi_private = r10_bio;
                b->bi_end_io = end_reshape_write;
                b->bi_rw = WRITE;
@@ -4479,7 +4467,7 @@ read_more:
                             bio2 = bio2->bi_next) {
                                /* Remove last page from this bio */
                                bio2->bi_vcnt--;
-                               bio2->bi_size -= len;
+                               bio2->bi_iter.bi_size -= len;
                                bio2->bi_flags &= ~(1<<BIO_SEG_VALID);
                        }
                        goto bio_full;
index 03f82ab87d9e73eb4fed4ede052c95fa5d891f09..f1feadeb7bb2d1b68a6592d946516e4036c7e939 100644 (file)
@@ -133,7 +133,7 @@ static inline void unlock_all_device_hash_locks_irq(struct r5conf *conf)
 static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector)
 {
        int sectors = bio_sectors(bio);
-       if (bio->bi_sector + sectors < sector + STRIPE_SECTORS)
+       if (bio->bi_iter.bi_sector + sectors < sector + STRIPE_SECTORS)
                return bio->bi_next;
        else
                return NULL;
@@ -225,7 +225,7 @@ static void return_io(struct bio *return_bi)
 
                return_bi = bi->bi_next;
                bi->bi_next = NULL;
-               bi->bi_size = 0;
+               bi->bi_iter.bi_size = 0;
                trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
                                         bi, 0);
                bio_endio(bi, 0);
@@ -852,10 +852,10 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
                                bi->bi_rw, i);
                        atomic_inc(&sh->count);
                        if (use_new_offset(conf, sh))
-                               bi->bi_sector = (sh->sector
+                               bi->bi_iter.bi_sector = (sh->sector
                                                 + rdev->new_data_offset);
                        else
-                               bi->bi_sector = (sh->sector
+                               bi->bi_iter.bi_sector = (sh->sector
                                                 + rdev->data_offset);
                        if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
                                bi->bi_rw |= REQ_NOMERGE;
@@ -863,7 +863,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
                        bi->bi_vcnt = 1;
                        bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
                        bi->bi_io_vec[0].bv_offset = 0;
-                       bi->bi_size = STRIPE_SIZE;
+                       bi->bi_iter.bi_size = STRIPE_SIZE;
                        /*
                         * If this is discard request, set bi_vcnt 0. We don't
                         * want to confuse SCSI because SCSI will replace payload
@@ -899,15 +899,15 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
                                rbi->bi_rw, i);
                        atomic_inc(&sh->count);
                        if (use_new_offset(conf, sh))
-                               rbi->bi_sector = (sh->sector
+                               rbi->bi_iter.bi_sector = (sh->sector
                                                  + rrdev->new_data_offset);
                        else
-                               rbi->bi_sector = (sh->sector
+                               rbi->bi_iter.bi_sector = (sh->sector
                                                  + rrdev->data_offset);
                        rbi->bi_vcnt = 1;
                        rbi->bi_io_vec[0].bv_len = STRIPE_SIZE;
                        rbi->bi_io_vec[0].bv_offset = 0;
-                       rbi->bi_size = STRIPE_SIZE;
+                       rbi->bi_iter.bi_size = STRIPE_SIZE;
                        /*
                         * If this is discard request, set bi_vcnt 0. We don't
                         * want to confuse SCSI because SCSI will replace payload
@@ -935,24 +935,24 @@ static struct dma_async_tx_descriptor *
 async_copy_data(int frombio, struct bio *bio, struct page *page,
        sector_t sector, struct dma_async_tx_descriptor *tx)
 {
-       struct bio_vec *bvl;
+       struct bio_vec bvl;
+       struct bvec_iter iter;
        struct page *bio_page;
-       int i;
        int page_offset;
        struct async_submit_ctl submit;
        enum async_tx_flags flags = 0;
 
-       if (bio->bi_sector >= sector)
-               page_offset = (signed)(bio->bi_sector - sector) * 512;
+       if (bio->bi_iter.bi_sector >= sector)
+               page_offset = (signed)(bio->bi_iter.bi_sector - sector) * 512;
        else
-               page_offset = (signed)(sector - bio->bi_sector) * -512;
+               page_offset = (signed)(sector - bio->bi_iter.bi_sector) * -512;
 
        if (frombio)
                flags |= ASYNC_TX_FENCE;
        init_async_submit(&submit, flags, tx, NULL, NULL, NULL);
 
-       bio_for_each_segment(bvl, bio, i) {
-               int len = bvl->bv_len;
+       bio_for_each_segment(bvl, bio, iter) {
+               int len = bvl.bv_len;
                int clen;
                int b_offset = 0;
 
@@ -968,8 +968,8 @@ async_copy_data(int frombio, struct bio *bio, struct page *page,
                        clen = len;
 
                if (clen > 0) {
-                       b_offset += bvl->bv_offset;
-                       bio_page = bvl->bv_page;
+                       b_offset += bvl.bv_offset;
+                       bio_page = bvl.bv_page;
                        if (frombio)
                                tx = async_memcpy(page, bio_page, page_offset,
                                                  b_offset, clen, &submit);
@@ -1012,7 +1012,7 @@ static void ops_complete_biofill(void *stripe_head_ref)
                        BUG_ON(!dev->read);
                        rbi = dev->read;
                        dev->read = NULL;
-                       while (rbi && rbi->bi_sector <
+                       while (rbi && rbi->bi_iter.bi_sector <
                                dev->sector + STRIPE_SECTORS) {
                                rbi2 = r5_next_bio(rbi, dev->sector);
                                if (!raid5_dec_bi_active_stripes(rbi)) {
@@ -1048,7 +1048,7 @@ static void ops_run_biofill(struct stripe_head *sh)
                        dev->read = rbi = dev->toread;
                        dev->toread = NULL;
                        spin_unlock_irq(&sh->stripe_lock);
-                       while (rbi && rbi->bi_sector <
+                       while (rbi && rbi->bi_iter.bi_sector <
                                dev->sector + STRIPE_SECTORS) {
                                tx = async_copy_data(0, rbi, dev->page,
                                        dev->sector, tx);
@@ -1390,7 +1390,7 @@ ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
                        wbi = dev->written = chosen;
                        spin_unlock_irq(&sh->stripe_lock);
 
-                       while (wbi && wbi->bi_sector <
+                       while (wbi && wbi->bi_iter.bi_sector <
                                dev->sector + STRIPE_SECTORS) {
                                if (wbi->bi_rw & REQ_FUA)
                                        set_bit(R5_WantFUA, &dev->flags);
@@ -2615,7 +2615,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
        int firstwrite=0;
 
        pr_debug("adding bi b#%llu to stripe s#%llu\n",
-               (unsigned long long)bi->bi_sector,
+               (unsigned long long)bi->bi_iter.bi_sector,
                (unsigned long long)sh->sector);
 
        /*
@@ -2633,12 +2633,12 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
                        firstwrite = 1;
        } else
                bip = &sh->dev[dd_idx].toread;
-       while (*bip && (*bip)->bi_sector < bi->bi_sector) {
-               if (bio_end_sector(*bip) > bi->bi_sector)
+       while (*bip && (*bip)->bi_iter.bi_sector < bi->bi_iter.bi_sector) {
+               if (bio_end_sector(*bip) > bi->bi_iter.bi_sector)
                        goto overlap;
                bip = & (*bip)->bi_next;
        }
-       if (*bip && (*bip)->bi_sector < bio_end_sector(bi))
+       if (*bip && (*bip)->bi_iter.bi_sector < bio_end_sector(bi))
                goto overlap;
 
        BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next);
@@ -2652,7 +2652,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
                sector_t sector = sh->dev[dd_idx].sector;
                for (bi=sh->dev[dd_idx].towrite;
                     sector < sh->dev[dd_idx].sector + STRIPE_SECTORS &&
-                            bi && bi->bi_sector <= sector;
+                            bi && bi->bi_iter.bi_sector <= sector;
                     bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) {
                        if (bio_end_sector(bi) >= sector)
                                sector = bio_end_sector(bi);
@@ -2662,7 +2662,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
        }
 
        pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
-               (unsigned long long)(*bip)->bi_sector,
+               (unsigned long long)(*bip)->bi_iter.bi_sector,
                (unsigned long long)sh->sector, dd_idx);
        spin_unlock_irq(&sh->stripe_lock);
 
@@ -2737,7 +2737,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
                if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
                        wake_up(&conf->wait_for_overlap);
 
-               while (bi && bi->bi_sector <
+               while (bi && bi->bi_iter.bi_sector <
                        sh->dev[i].sector + STRIPE_SECTORS) {
                        struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
                        clear_bit(BIO_UPTODATE, &bi->bi_flags);
@@ -2756,7 +2756,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
                bi = sh->dev[i].written;
                sh->dev[i].written = NULL;
                if (bi) bitmap_end = 1;
-               while (bi && bi->bi_sector <
+               while (bi && bi->bi_iter.bi_sector <
                       sh->dev[i].sector + STRIPE_SECTORS) {
                        struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
                        clear_bit(BIO_UPTODATE, &bi->bi_flags);
@@ -2780,7 +2780,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
                        spin_unlock_irq(&sh->stripe_lock);
                        if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
                                wake_up(&conf->wait_for_overlap);
-                       while (bi && bi->bi_sector <
+                       while (bi && bi->bi_iter.bi_sector <
                               sh->dev[i].sector + STRIPE_SECTORS) {
                                struct bio *nextbi =
                                        r5_next_bio(bi, sh->dev[i].sector);
@@ -3004,7 +3004,7 @@ static void handle_stripe_clean_event(struct r5conf *conf,
                                        clear_bit(R5_UPTODATE, &dev->flags);
                                wbi = dev->written;
                                dev->written = NULL;
-                               while (wbi && wbi->bi_sector <
+                               while (wbi && wbi->bi_iter.bi_sector <
                                        dev->sector + STRIPE_SECTORS) {
                                        wbi2 = r5_next_bio(wbi, dev->sector);
                                        if (!raid5_dec_bi_active_stripes(wbi)) {
@@ -4096,7 +4096,7 @@ static int raid5_mergeable_bvec(struct request_queue *q,
 
 static int in_chunk_boundary(struct mddev *mddev, struct bio *bio)
 {
-       sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);
+       sector_t sector = bio->bi_iter.bi_sector + get_start_sect(bio->bi_bdev);
        unsigned int chunk_sectors = mddev->chunk_sectors;
        unsigned int bio_sectors = bio_sectors(bio);
 
@@ -4233,9 +4233,9 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
        /*
         *      compute position
         */
-       align_bi->bi_sector =  raid5_compute_sector(conf, raid_bio->bi_sector,
-                                                   0,
-                                                   &dd_idx, NULL);
+       align_bi->bi_iter.bi_sector =
+               raid5_compute_sector(conf, raid_bio->bi_iter.bi_sector,
+                                    0, &dd_idx, NULL);
 
        end_sector = bio_end_sector(align_bi);
        rcu_read_lock();
@@ -4260,7 +4260,8 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
                align_bi->bi_flags &= ~(1 << BIO_SEG_VALID);
 
                if (!bio_fits_rdev(align_bi) ||
-                   is_badblock(rdev, align_bi->bi_sector, bio_sectors(align_bi),
+                   is_badblock(rdev, align_bi->bi_iter.bi_sector,
+                               bio_sectors(align_bi),
                                &first_bad, &bad_sectors)) {
                        /* too big in some way, or has a known bad block */
                        bio_put(align_bi);
@@ -4269,7 +4270,7 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
                }
 
                /* No reshape active, so we can trust rdev->data_offset */
-               align_bi->bi_sector += rdev->data_offset;
+               align_bi->bi_iter.bi_sector += rdev->data_offset;
 
                spin_lock_irq(&conf->device_lock);
                wait_event_lock_irq(conf->wait_for_stripe,
@@ -4281,7 +4282,7 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
                if (mddev->gendisk)
                        trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev),
                                              align_bi, disk_devt(mddev->gendisk),
-                                             raid_bio->bi_sector);
+                                             raid_bio->bi_iter.bi_sector);
                generic_make_request(align_bi);
                return 1;
        } else {
@@ -4464,8 +4465,8 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
                /* Skip discard while reshape is happening */
                return;
 
-       logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
-       last_sector = bi->bi_sector + (bi->bi_size>>9);
+       logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1);
+       last_sector = bi->bi_iter.bi_sector + (bi->bi_iter.bi_size>>9);
 
        bi->bi_next = NULL;
        bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
@@ -4569,7 +4570,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
                return;
        }
 
-       logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
+       logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1);
        last_sector = bio_end_sector(bi);
        bi->bi_next = NULL;
        bi->bi_phys_segments = 1;       /* over-loaded to count active stripes */
@@ -5053,7 +5054,8 @@ static int  retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
        int remaining;
        int handled = 0;
 
-       logical_sector = raid_bio->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
+       logical_sector = raid_bio->bi_iter.bi_sector &
+               ~((sector_t)STRIPE_SECTORS-1);
        sector = raid5_compute_sector(conf, logical_sector,
                                      0, &dd_idx, NULL);
        last_sector = bio_end_sector(raid_bio);
@@ -6101,6 +6103,7 @@ static int run(struct mddev *mddev)
                blk_queue_io_min(mddev->queue, chunk_size);
                blk_queue_io_opt(mddev->queue, chunk_size *
                                 (conf->raid_disks - conf->max_degraded));
+               mddev->queue->limits.raid_partial_stripes_expensive = 1;
                /*
                 * We can only discard a whole stripe. It doesn't make sense to
                 * discard data disk but write parity disk
index dd239bdbfcb4a0877db2ab49aa3c27a81eec7dd1..00d339c361fc0ecbd0b9b086e8c5756d28983d77 100644 (file)
@@ -2235,10 +2235,10 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
        }
 
        /* do we need to support multiple segments? */
-       if (bio_segments(req->bio) > 1 || bio_segments(rsp->bio) > 1) {
-               printk(MYIOC_s_ERR_FMT "%s: multiple segments req %u %u, rsp %u %u\n",
-                   ioc->name, __func__, bio_segments(req->bio), blk_rq_bytes(req),
-                   bio_segments(rsp->bio), blk_rq_bytes(rsp));
+       if (bio_multiple_segments(req->bio) ||
+           bio_multiple_segments(rsp->bio)) {
+               printk(MYIOC_s_ERR_FMT "%s: multiple segments req %u, rsp %u\n",
+                   ioc->name, __func__, blk_rq_bytes(req), blk_rq_bytes(rsp));
                return -EINVAL;
        }
 
index c169e07654cb918310b61cf0f554ee87971b7e58..f0fa4e8ca124a905844f8f30d705be967d467965 100644 (file)
@@ -3,7 +3,7 @@
  *                           Philip Edelbrock <phil@netroedge.com>
  * Copyright (C) 2003 Greg Kroah-Hartman <greg@kroah.com>
  * Copyright (C) 2003 IBM Corp.
- * Copyright (C) 2004 Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2004 Jean Delvare <jdelvare@suse.de>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
index 5fab4e6e83013c033c01d2465b4f25c756f39dce..5ebcda39f55407e146ab06d811edd5825939a41d 100644 (file)
@@ -157,10 +157,11 @@ config MTD_BCM47XX_PARTS
 
 comment "User Modules And Translation Layers"
 
+#
+# MTD block device support is select'ed if needed
+#
 config MTD_BLKDEVS
-       tristate "Common interface to block layer for MTD 'translation layers'"
-       depends on BLOCK
-       default n
+       tristate
 
 config MTD_BLOCK
        tristate "Caching block device access to MTD devices"
index 5a3942bf109cd9ccded20d42fc4ee62b36a3ad8e..96a33e3f7b000394c1480260246585531c88b920 100644 (file)
@@ -264,7 +264,8 @@ static struct mtd_part_parser afs_parser = {
 
 static int __init afs_parser_init(void)
 {
-       return register_mtd_parser(&afs_parser);
+       register_mtd_parser(&afs_parser);
+       return 0;
 }
 
 static void __exit afs_parser_exit(void)
index ddc0a4287a4b89c8124dd3629b9647e3ca800b74..7c9172ad26210e1d25c9506889e0cbb54212dbfe 100644 (file)
@@ -139,7 +139,8 @@ static struct mtd_part_parser ar7_parser = {
 
 static int __init ar7_parser_init(void)
 {
-       return register_mtd_parser(&ar7_parser);
+       register_mtd_parser(&ar7_parser);
+       return 0;
 }
 
 static void __exit ar7_parser_exit(void)
index 7a6384b0962a9de2c92dffb81f1fc45d6d89a490..de1eb92e42f57f15f197c3ba21d1831ab6dac06f 100644 (file)
  * Amount of bytes we read when analyzing each block of flash memory.
  * Set it big enough to allow detecting partition and reading important data.
  */
-#define BCM47XXPART_BYTES_TO_READ      0x404
+#define BCM47XXPART_BYTES_TO_READ      0x4e8
 
 /* Magics */
 #define BOARD_DATA_MAGIC               0x5246504D      /* MPFR */
+#define BOARD_DATA_MAGIC2              0xBD0D0BBD
+#define CFE_MAGIC                      0x43464531      /* 1EFC */
 #define FACTORY_MAGIC                  0x59544346      /* FCTY */
 #define POT_MAGIC1                     0x54544f50      /* POTT */
 #define POT_MAGIC2                     0x504f          /* OP */
@@ -102,8 +104,9 @@ static int bcm47xxpart_parse(struct mtd_info *master,
                        continue;
                }
 
-               /* CFE has small NVRAM at 0x400 */
-               if (buf[0x400 / 4] == NVRAM_HEADER) {
+               /* Magic or small NVRAM at 0x400 */
+               if ((buf[0x4e0 / 4] == CFE_MAGIC && buf[0x4e4 / 4] == CFE_MAGIC) ||
+                   (buf[0x400 / 4] == NVRAM_HEADER)) {
                        bcm47xxpart_add_part(&parts[curr_part++], "boot",
                                             offset, MTD_WRITEABLE);
                        continue;
@@ -190,6 +193,21 @@ static int bcm47xxpart_parse(struct mtd_info *master,
                                             offset, 0);
                        continue;
                }
+
+               /* Read middle of the block */
+               if (mtd_read(master, offset + 0x8000, 0x4,
+                            &bytes_read, (uint8_t *)buf) < 0) {
+                       pr_err("mtd_read error while parsing (offset: 0x%X)!\n",
+                              offset);
+                       continue;
+               }
+
+               /* Some devices (ex. WNDR3700v3) don't have a standard 'MPFR' */
+               if (buf[0x000 / 4] == BOARD_DATA_MAGIC2) {
+                       bcm47xxpart_add_part(&parts[curr_part++], "board_data",
+                                            offset, MTD_WRITEABLE);
+                       continue;
+               }
        }
 
        /* Look for NVRAM at the end of the last block. */
@@ -243,7 +261,8 @@ static struct mtd_part_parser bcm47xxpart_mtd_parser = {
 
 static int __init bcm47xxpart_init(void)
 {
-       return register_mtd_parser(&bcm47xxpart_mtd_parser);
+       register_mtd_parser(&bcm47xxpart_mtd_parser);
+       return 0;
 }
 
 static void __exit bcm47xxpart_exit(void)
index 5c813907661c3415c979b1176bf937f7c086f2c8..b2443f7031c9afb0bda808a37b6da7e8113d0a61 100644 (file)
@@ -221,7 +221,8 @@ static struct mtd_part_parser bcm63xx_cfe_parser = {
 
 static int __init bcm63xx_cfe_parser_init(void)
 {
-       return register_mtd_parser(&bcm63xx_cfe_parser);
+       register_mtd_parser(&bcm63xx_cfe_parser);
+       return 0;
 }
 
 static void __exit bcm63xx_cfe_parser_exit(void)
index 721caebbc5cc21344a58a710d466205f75f845ef..3e829b37af8d0e0d5c73eff2c03e054ed6516eab 100644 (file)
@@ -395,7 +395,8 @@ static int __init cmdline_parser_init(void)
 {
        if (mtdparts)
                mtdpart_setup(mtdparts);
-       return register_mtd_parser(&cmdline_parser);
+       register_mtd_parser(&cmdline_parser);
+       return 0;
 }
 
 static void __exit cmdline_parser_exit(void)
index 4f091c1a9981c060e3ea2cae43960ade8b4bc2f7..dd5e1018d37b39e9301c9c43e01195f4f363a66c 100644 (file)
@@ -2047,21 +2047,21 @@ static int __init docg3_probe(struct platform_device *pdev)
        ress = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!ress) {
                dev_err(dev, "No I/O memory resource defined\n");
-               goto noress;
+               return ret;
        }
-       base = ioremap(ress->start, DOC_IOSPACE_SIZE);
+       base = devm_ioremap(dev, ress->start, DOC_IOSPACE_SIZE);
 
        ret = -ENOMEM;
-       cascade = kzalloc(sizeof(*cascade) * DOC_MAX_NBFLOORS,
-                         GFP_KERNEL);
+       cascade = devm_kzalloc(dev, sizeof(*cascade) * DOC_MAX_NBFLOORS,
+                              GFP_KERNEL);
        if (!cascade)
-               goto nomem1;
+               return ret;
        cascade->base = base;
        mutex_init(&cascade->lock);
        cascade->bch = init_bch(DOC_ECC_BCH_M, DOC_ECC_BCH_T,
                             DOC_ECC_BCH_PRIMPOLY);
        if (!cascade->bch)
-               goto nomem2;
+               return ret;
 
        for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++) {
                mtd = doc_probe_device(cascade, floor, dev);
@@ -2101,11 +2101,6 @@ err_probe:
        for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++)
                if (cascade->floors[floor])
                        doc_release_device(cascade->floors[floor]);
-nomem2:
-       kfree(cascade);
-nomem1:
-       iounmap(base);
-noress:
        return ret;
 }
 
@@ -2119,7 +2114,6 @@ static int __exit docg3_release(struct platform_device *pdev)
 {
        struct docg3_cascade *cascade = platform_get_drvdata(pdev);
        struct docg3 *docg3 = cascade->floors[0]->priv;
-       void __iomem *base = cascade->base;
        int floor;
 
        doc_unregister_sysfs(pdev, cascade);
@@ -2129,8 +2123,6 @@ static int __exit docg3_release(struct platform_device *pdev)
                        doc_release_device(cascade->floors[floor]);
 
        free_bch(docg3->cascade->bch);
-       kfree(cascade);
-       iounmap(base);
        return 0;
 }
 
index 7eda71dbc183b7aa456e60a41f0490c882d94331..ad19139097025b97839fb7cbe123a1261e6cf876 100644 (file)
@@ -41,6 +41,7 @@
 #define        OPCODE_WRSR             0x01    /* Write status register 1 byte */
 #define        OPCODE_NORM_READ        0x03    /* Read data bytes (low frequency) */
 #define        OPCODE_FAST_READ        0x0b    /* Read data bytes (high frequency) */
+#define        OPCODE_QUAD_READ        0x6b    /* Read data bytes */
 #define        OPCODE_PP               0x02    /* Page program (up to 256 bytes) */
 #define        OPCODE_BE_4K            0x20    /* Erase 4KiB block */
 #define        OPCODE_BE_4K_PMC        0xd7    /* Erase 4KiB block on PMC chips */
 #define        OPCODE_CHIP_ERASE       0xc7    /* Erase whole flash chip */
 #define        OPCODE_SE               0xd8    /* Sector erase (usually 64KiB) */
 #define        OPCODE_RDID             0x9f    /* Read JEDEC ID */
+#define        OPCODE_RDCR             0x35    /* Read configuration register */
 
 /* 4-byte address opcodes - used on Spansion and some Macronix flashes. */
 #define        OPCODE_NORM_READ_4B     0x13    /* Read data bytes (low frequency) */
 #define        OPCODE_FAST_READ_4B     0x0c    /* Read data bytes (high frequency) */
+#define        OPCODE_QUAD_READ_4B     0x6c    /* Read data bytes */
 #define        OPCODE_PP_4B            0x12    /* Page program (up to 256 bytes) */
 #define        OPCODE_SE_4B            0xdc    /* Sector erase (usually 64KiB) */
 
 #define        SR_BP2                  0x10    /* Block protect 2 */
 #define        SR_SRWD                 0x80    /* SR write protect */
 
+#define SR_QUAD_EN_MX           0x40    /* Macronix Quad I/O */
+
+/* Configuration Register bits. */
+#define CR_QUAD_EN_SPAN                0x2     /* Spansion Quad I/O */
+
 /* Define max times to check status register before we give up. */
 #define        MAX_READY_WAIT_JIFFIES  (40 * HZ)       /* M25P16 specs 40s max chip erase */
 #define        MAX_CMD_SIZE            6
 
 /****************************************************************************/
 
+enum read_type {
+       M25P80_NORMAL = 0,
+       M25P80_FAST,
+       M25P80_QUAD,
+};
+
 struct m25p {
        struct spi_device       *spi;
        struct mutex            lock;
@@ -94,7 +108,7 @@ struct m25p {
        u8                      read_opcode;
        u8                      program_opcode;
        u8                      *command;
-       bool                    fast_read;
+       enum read_type          flash_read;
 };
 
 static inline struct m25p *mtd_to_m25p(struct mtd_info *mtd)
@@ -130,6 +144,26 @@ static int read_sr(struct m25p *flash)
        return val;
 }
 
+/*
+ * Read configuration register, returning its value in the
+ * location. Return the configuration register value.
+ * Returns negative if error occured.
+ */
+static int read_cr(struct m25p *flash)
+{
+       u8 code = OPCODE_RDCR;
+       int ret;
+       u8 val;
+
+       ret = spi_write_then_read(flash->spi, &code, 1, &val, 1);
+       if (ret < 0) {
+               dev_err(&flash->spi->dev, "error %d reading CR\n", ret);
+               return ret;
+       }
+
+       return val;
+}
+
 /*
  * Write status register 1 byte
  * Returns negative if error occurred.
@@ -219,6 +253,93 @@ static int wait_till_ready(struct m25p *flash)
        return 1;
 }
 
+/*
+ * Write status Register and configuration register with 2 bytes
+ * The first byte will be written to the status register, while the
+ * second byte will be written to the configuration register.
+ * Return negative if error occured.
+ */
+static int write_sr_cr(struct m25p *flash, u16 val)
+{
+       flash->command[0] = OPCODE_WRSR;
+       flash->command[1] = val & 0xff;
+       flash->command[2] = (val >> 8);
+
+       return spi_write(flash->spi, flash->command, 3);
+}
+
+static int macronix_quad_enable(struct m25p *flash)
+{
+       int ret, val;
+       u8 cmd[2];
+       cmd[0] = OPCODE_WRSR;
+
+       val = read_sr(flash);
+       cmd[1] = val | SR_QUAD_EN_MX;
+       write_enable(flash);
+
+       spi_write(flash->spi, &cmd, 2);
+
+       if (wait_till_ready(flash))
+               return 1;
+
+       ret = read_sr(flash);
+       if (!(ret > 0 && (ret & SR_QUAD_EN_MX))) {
+               dev_err(&flash->spi->dev, "Macronix Quad bit not set\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int spansion_quad_enable(struct m25p *flash)
+{
+       int ret;
+       int quad_en = CR_QUAD_EN_SPAN << 8;
+
+       write_enable(flash);
+
+       ret = write_sr_cr(flash, quad_en);
+       if (ret < 0) {
+               dev_err(&flash->spi->dev,
+                       "error while writing configuration register\n");
+               return -EINVAL;
+       }
+
+       /* read back and check it */
+       ret = read_cr(flash);
+       if (!(ret > 0 && (ret & CR_QUAD_EN_SPAN))) {
+               dev_err(&flash->spi->dev, "Spansion Quad bit not set\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int set_quad_mode(struct m25p *flash, u32 jedec_id)
+{
+       int status;
+
+       switch (JEDEC_MFR(jedec_id)) {
+       case CFI_MFR_MACRONIX:
+               status = macronix_quad_enable(flash);
+               if (status) {
+                       dev_err(&flash->spi->dev,
+                               "Macronix quad-read not enabled\n");
+                       return -EINVAL;
+               }
+               return status;
+       default:
+               status = spansion_quad_enable(flash);
+               if (status) {
+                       dev_err(&flash->spi->dev,
+                               "Spansion quad-read not enabled\n");
+                       return -EINVAL;
+               }
+               return status;
+       }
+}
+
 /*
  * Erase the whole flash memory
  *
@@ -349,6 +470,35 @@ static int m25p80_erase(struct mtd_info *mtd, struct erase_info *instr)
        return 0;
 }
 
+/*
+ * Dummy Cycle calculation for different type of read.
+ * It can be used to support more commands with
+ * different dummy cycle requirements.
+ */
+static inline int m25p80_dummy_cycles_read(struct m25p *flash)
+{
+       switch (flash->flash_read) {
+       case M25P80_FAST:
+       case M25P80_QUAD:
+               return 1;
+       case M25P80_NORMAL:
+               return 0;
+       default:
+               dev_err(&flash->spi->dev, "No valid read type supported\n");
+               return -1;
+       }
+}
+
+static inline unsigned int m25p80_rx_nbits(const struct m25p *flash)
+{
+       switch (flash->flash_read) {
+       case M25P80_QUAD:
+               return 4;
+       default:
+               return 0;
+       }
+}
+
 /*
  * Read an address range from the flash chip.  The address range
  * may be any size provided it is within the physical boundaries.
@@ -360,6 +510,7 @@ static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len,
        struct spi_transfer t[2];
        struct spi_message m;
        uint8_t opcode;
+       int dummy;
 
        pr_debug("%s: %s from 0x%08x, len %zd\n", dev_name(&flash->spi->dev),
                        __func__, (u32)from, len);
@@ -367,11 +518,18 @@ static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len,
        spi_message_init(&m);
        memset(t, 0, (sizeof t));
 
+       dummy =  m25p80_dummy_cycles_read(flash);
+       if (dummy < 0) {
+               dev_err(&flash->spi->dev, "No valid read command supported\n");
+               return -EINVAL;
+       }
+
        t[0].tx_buf = flash->command;
-       t[0].len = m25p_cmdsz(flash) + (flash->fast_read ? 1 : 0);
+       t[0].len = m25p_cmdsz(flash) + dummy;
        spi_message_add_tail(&t[0], &m);
 
        t[1].rx_buf = buf;
+       t[1].rx_nbits = m25p80_rx_nbits(flash);
        t[1].len = len;
        spi_message_add_tail(&t[1], &m);
 
@@ -391,8 +549,7 @@ static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len,
 
        spi_sync(flash->spi, &m);
 
-       *retlen = m.actual_length - m25p_cmdsz(flash) -
-                       (flash->fast_read ? 1 : 0);
+       *retlen = m.actual_length - m25p_cmdsz(flash) - dummy;
 
        mutex_unlock(&flash->lock);
 
@@ -698,6 +855,7 @@ struct flash_info {
 #define        SST_WRITE       0x04            /* use SST byte programming */
 #define        M25P_NO_FR      0x08            /* Can't do fastread */
 #define        SECT_4K_PMC     0x10            /* OPCODE_BE_4K_PMC works uniformly */
+#define        M25P80_QUAD_READ        0x20    /* Flash supports Quad Read */
 };
 
 #define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags)     \
@@ -775,7 +933,7 @@ static const struct spi_device_id m25p_ids[] = {
        { "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) },
        { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, 0) },
        { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
-       { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, 0) },
+       { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, M25P80_QUAD_READ) },
 
        /* Micron */
        { "n25q064",     INFO(0x20ba17, 0, 64 * 1024,  128, 0) },
@@ -795,8 +953,8 @@ static const struct spi_device_id m25p_ids[] = {
        { "s25sl032p",  INFO(0x010215, 0x4d00,  64 * 1024,  64, 0) },
        { "s25sl064p",  INFO(0x010216, 0x4d00,  64 * 1024, 128, 0) },
        { "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, 0) },
-       { "s25fl256s1", INFO(0x010219, 0x4d01,  64 * 1024, 512, 0) },
-       { "s25fl512s",  INFO(0x010220, 0x4d00, 256 * 1024, 256, 0) },
+       { "s25fl256s1", INFO(0x010219, 0x4d01,  64 * 1024, 512, M25P80_QUAD_READ) },
+       { "s25fl512s",  INFO(0x010220, 0x4d00, 256 * 1024, 256, M25P80_QUAD_READ) },
        { "s70fl01gs",  INFO(0x010221, 0x4d00, 256 * 1024, 256, 0) },
        { "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024,  64, 0) },
        { "s25sl12801", INFO(0x012018, 0x0301,  64 * 1024, 256, 0) },
@@ -851,6 +1009,7 @@ static const struct spi_device_id m25p_ids[] = {
        { "m25pe80", INFO(0x208014,  0, 64 * 1024, 16,       0) },
        { "m25pe16", INFO(0x208015,  0, 64 * 1024, 32, SECT_4K) },
 
+       { "m25px16",    INFO(0x207115,  0, 64 * 1024, 32, SECT_4K) },
        { "m25px32",    INFO(0x207116,  0, 64 * 1024, 64, SECT_4K) },
        { "m25px32-s0", INFO(0x207316,  0, 64 * 1024, 64, SECT_4K) },
        { "m25px32-s1", INFO(0x206316,  0, 64 * 1024, 64, SECT_4K) },
@@ -937,6 +1096,7 @@ static int m25p_probe(struct spi_device *spi)
        unsigned                        i;
        struct mtd_part_parser_data     ppdata;
        struct device_node *np = spi->dev.of_node;
+       int ret;
 
        /* Platform data helps sort out which chip type we have, as
         * well as how this board partitions it.  If we don't have
@@ -1051,22 +1211,46 @@ static int m25p_probe(struct spi_device *spi)
        flash->page_size = info->page_size;
        flash->mtd.writebufsize = flash->page_size;
 
-       if (np)
+       if (np) {
                /* If we were instantiated by DT, use it */
-               flash->fast_read = of_property_read_bool(np, "m25p,fast-read");
-       else
+               if (of_property_read_bool(np, "m25p,fast-read"))
+                       flash->flash_read = M25P80_FAST;
+               else
+                       flash->flash_read = M25P80_NORMAL;
+       } else {
                /* If we weren't instantiated by DT, default to fast-read */
-               flash->fast_read = true;
+               flash->flash_read = M25P80_FAST;
+       }
 
        /* Some devices cannot do fast-read, no matter what DT tells us */
        if (info->flags & M25P_NO_FR)
-               flash->fast_read = false;
+               flash->flash_read = M25P80_NORMAL;
+
+       /* Quad-read mode takes precedence over fast/normal */
+       if (spi->mode & SPI_RX_QUAD && info->flags & M25P80_QUAD_READ) {
+               ret = set_quad_mode(flash, info->jedec_id);
+               if (ret) {
+                       dev_err(&flash->spi->dev, "quad mode not supported\n");
+                       return ret;
+               }
+               flash->flash_read = M25P80_QUAD;
+       }
 
        /* Default commands */
-       if (flash->fast_read)
+       switch (flash->flash_read) {
+       case M25P80_QUAD:
+               flash->read_opcode = OPCODE_QUAD_READ;
+               break;
+       case M25P80_FAST:
                flash->read_opcode = OPCODE_FAST_READ;
-       else
+               break;
+       case M25P80_NORMAL:
                flash->read_opcode = OPCODE_NORM_READ;
+               break;
+       default:
+               dev_err(&flash->spi->dev, "No Read opcode defined\n");
+               return -EINVAL;
+       }
 
        flash->program_opcode = OPCODE_PP;
 
@@ -1077,9 +1261,17 @@ static int m25p_probe(struct spi_device *spi)
                flash->addr_width = 4;
                if (JEDEC_MFR(info->jedec_id) == CFI_MFR_AMD) {
                        /* Dedicated 4-byte command set */
-                       flash->read_opcode = flash->fast_read ?
-                               OPCODE_FAST_READ_4B :
-                               OPCODE_NORM_READ_4B;
+                       switch (flash->flash_read) {
+                       case M25P80_QUAD:
+                               flash->read_opcode = OPCODE_QUAD_READ_4B;
+                               break;
+                       case M25P80_FAST:
+                               flash->read_opcode = OPCODE_FAST_READ_4B;
+                               break;
+                       case M25P80_NORMAL:
+                               flash->read_opcode = OPCODE_NORM_READ_4B;
+                               break;
+                       }
                        flash->program_opcode = OPCODE_PP_4B;
                        /* No small sector erase for 4-byte command set */
                        flash->erase_opcode = OPCODE_SE_4B;
index 182849d39c61acb34c40a79ab6bf04e30162da79..5c8b322ba904b1691bf204efc3f4523106dc8f67 100644 (file)
@@ -205,7 +205,7 @@ static int __init ms02nv_init_one(ulong addr)
        mtd->type = MTD_RAM;
        mtd->flags = MTD_CAP_RAM;
        mtd->size = fixsize;
-       mtd->name = (char *)ms02nv_name;
+       mtd->name = ms02nv_name;
        mtd->owner = THIS_MODULE;
        mtd->_read = ms02nv_read;
        mtd->_write = ms02nv_write;
index 4a47b0266d4e587761a2db5c1cd2e1613fc710bc..624069de4f28c067260fc4fec75c35b713fc5c84 100644 (file)
@@ -669,7 +669,6 @@ static int add_dataflash_otp(struct spi_device *spi, char *name, int nr_pages,
        if (!err)
                return 0;
 
-       spi_set_drvdata(spi, NULL);
        kfree(priv);
        return err;
 }
@@ -899,10 +898,8 @@ static int dataflash_remove(struct spi_device *spi)
        pr_debug("%s: remove\n", dev_name(&spi->dev));
 
        status = mtd_device_unregister(&flash->mtd);
-       if (status == 0) {
-               spi_set_drvdata(spi, NULL);
+       if (status == 0)
                kfree(flash);
-       }
        return status;
 }
 
index ec59d65897fbe38976112ab071741b6c2d3b9189..8e285089229c364ee29c6b4f7c90a872d6e7c693 100644 (file)
@@ -92,7 +92,7 @@ static void __exit cleanup_mtdram(void)
 }
 
 int mtdram_init_device(struct mtd_info *mtd, void *mapped_address,
-               unsigned long size, char *name)
+               unsigned long size, const char *name)
 {
        memset(mtd, 0, sizeof(*mtd));
 
index 2ef19aa0086bee62d51a9986ea50f5983461869c..d38b6460d50565c21fa46978d3f2383d13299e6f 100644 (file)
@@ -388,7 +388,7 @@ static void put_chip(struct map_info *map, struct flchip *chip)
        wake_up(&chip->wq);
 }
 
-int do_write_buffer(struct map_info *map, struct flchip *chip,
+static int do_write_buffer(struct map_info *map, struct flchip *chip,
                        unsigned long adr, const struct kvec **pvec,
                        unsigned long *pvec_seek, int len)
 {
@@ -469,7 +469,7 @@ int do_write_buffer(struct map_info *map, struct flchip *chip,
        return ret;
 }
 
-int do_erase_oneblock(struct mtd_info *mtd, loff_t adr)
+static int do_erase_oneblock(struct mtd_info *mtd, loff_t adr)
 {
        struct map_info *map = mtd->priv;
        struct lpddr_private *lpddr = map->fldrv_priv;
@@ -748,34 +748,6 @@ static int lpddr_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
        return do_xxlock(mtd, ofs, len, DO_XXLOCK_UNLOCK);
 }
 
-int word_program(struct map_info *map, loff_t adr, uint32_t curval)
-{
-    int ret;
-       struct lpddr_private *lpddr = map->fldrv_priv;
-       int chipnum = adr >> lpddr->chipshift;
-       struct flchip *chip = &lpddr->chips[chipnum];
-
-       mutex_lock(&chip->mutex);
-       ret = get_chip(map, chip, FL_WRITING);
-       if (ret) {
-               mutex_unlock(&chip->mutex);
-               return ret;
-       }
-
-       send_pfow_command(map, LPDDR_WORD_PROGRAM, adr, 0x00, (map_word *)&curval);
-
-       ret = wait_for_ready(map, chip, (1<<lpddr->qinfo->SingleWordProgTime));
-       if (ret)        {
-               printk(KERN_WARNING"%s word_program error at: %llx; val: %x\n",
-                       map->name, adr, curval);
-               goto out;
-       }
-
-out:   put_chip(map, chip);
-       mutex_unlock(&chip->mutex);
-       return ret;
-}
-
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Alexey Korolev <akorolev@infradead.org>");
 MODULE_DESCRIPTION("MTD driver for LPDDR flash chips");
index 10debfea81e7147c25fa00e4913bd2d336c24b6a..d6b2451eab1d9f0db5808c3ad837ac2f6f604499 100644 (file)
@@ -13,6 +13,7 @@
  *
  */
 
+#include <linux/err.h>
 #include <linux/module.h>
 #include <linux/types.h>
 #include <linux/init.h>
@@ -162,13 +163,6 @@ static int ixp4xx_flash_remove(struct platform_device *dev)
                mtd_device_unregister(info->mtd);
                map_destroy(info->mtd);
        }
-       if (info->map.virt)
-               iounmap(info->map.virt);
-
-       if (info->res) {
-               release_resource(info->res);
-               kfree(info->res);
-       }
 
        if (plat->exit)
                plat->exit();
@@ -194,7 +188,8 @@ static int ixp4xx_flash_probe(struct platform_device *dev)
                        return err;
        }
 
-       info = kzalloc(sizeof(struct ixp4xx_flash_info), GFP_KERNEL);
+       info = devm_kzalloc(&dev->dev, sizeof(struct ixp4xx_flash_info),
+                           GFP_KERNEL);
        if(!info) {
                err = -ENOMEM;
                goto Error;
@@ -220,20 +215,9 @@ static int ixp4xx_flash_probe(struct platform_device *dev)
        info->map.write = ixp4xx_probe_write16;
        info->map.copy_from = ixp4xx_copy_from;
 
-       info->res = request_mem_region(dev->resource->start,
-                       resource_size(dev->resource),
-                       "IXP4XXFlash");
-       if (!info->res) {
-               printk(KERN_ERR "IXP4XXFlash: Could not reserve memory region\n");
-               err = -ENOMEM;
-               goto Error;
-       }
-
-       info->map.virt = ioremap(dev->resource->start,
-                                resource_size(dev->resource));
-       if (!info->map.virt) {
-               printk(KERN_ERR "IXP4XXFlash: Failed to ioremap region\n");
-               err = -EIO;
+       info->map.virt = devm_ioremap_resource(&dev->dev, dev->resource);
+       if (IS_ERR(info->map.virt)) {
+               err = PTR_ERR(info->map.virt);
                goto Error;
        }
 
index d7ac65d1d569dee08e325480501e29cc550ef012..93c507a6f86245024d6b1e79a54ca2f8ad8d78b1 100644 (file)
@@ -123,24 +123,28 @@ ltq_mtd_probe(struct platform_device *pdev)
                return -ENODEV;
        }
 
-       ltq_mtd = kzalloc(sizeof(struct ltq_mtd), GFP_KERNEL);
+       ltq_mtd = devm_kzalloc(&pdev->dev, sizeof(struct ltq_mtd), GFP_KERNEL);
+       if (!ltq_mtd)
+               return -ENOMEM;
+
        platform_set_drvdata(pdev, ltq_mtd);
 
        ltq_mtd->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!ltq_mtd->res) {
                dev_err(&pdev->dev, "failed to get memory resource\n");
-               err = -ENOENT;
-               goto err_out;
+               return -ENOENT;
        }
 
-       ltq_mtd->map = kzalloc(sizeof(struct map_info), GFP_KERNEL);
+       ltq_mtd->map = devm_kzalloc(&pdev->dev, sizeof(struct map_info),
+                                   GFP_KERNEL);
+       if (!ltq_mtd->map)
+               return -ENOMEM;
+
        ltq_mtd->map->phys = ltq_mtd->res->start;
        ltq_mtd->map->size = resource_size(ltq_mtd->res);
        ltq_mtd->map->virt = devm_ioremap_resource(&pdev->dev, ltq_mtd->res);
-       if (IS_ERR(ltq_mtd->map->virt)) {
-               err = PTR_ERR(ltq_mtd->map->virt);
-               goto err_out;
-       }
+       if (IS_ERR(ltq_mtd->map->virt))
+               return PTR_ERR(ltq_mtd->map->virt);
 
        ltq_mtd->map->name = ltq_map_name;
        ltq_mtd->map->bankwidth = 2;
@@ -155,8 +159,7 @@ ltq_mtd_probe(struct platform_device *pdev)
 
        if (!ltq_mtd->mtd) {
                dev_err(&pdev->dev, "probing failed\n");
-               err = -ENXIO;
-               goto err_free;
+               return -ENXIO;
        }
 
        ltq_mtd->mtd->owner = THIS_MODULE;
@@ -177,10 +180,6 @@ ltq_mtd_probe(struct platform_device *pdev)
 
 err_destroy:
        map_destroy(ltq_mtd->mtd);
-err_free:
-       kfree(ltq_mtd->map);
-err_out:
-       kfree(ltq_mtd);
        return err;
 }
 
@@ -189,13 +188,9 @@ ltq_mtd_remove(struct platform_device *pdev)
 {
        struct ltq_mtd *ltq_mtd = platform_get_drvdata(pdev);
 
-       if (ltq_mtd) {
-               if (ltq_mtd->mtd) {
-                       mtd_device_unregister(ltq_mtd->mtd);
-                       map_destroy(ltq_mtd->mtd);
-               }
-               kfree(ltq_mtd->map);
-               kfree(ltq_mtd);
+       if (ltq_mtd && ltq_mtd->mtd) {
+               mtd_device_unregister(ltq_mtd->mtd);
+               map_destroy(ltq_mtd->mtd);
        }
        return 0;
 }
index 0f55589a56b815af4c00bc521c504db468774fa5..9aad854fe9121aaa821181bffe64e819ec0e8498 100644 (file)
@@ -61,7 +61,7 @@ static int pxa2xx_flash_probe(struct platform_device *pdev)
        if (!info)
                return -ENOMEM;
 
-       info->map.name = (char *) flash->name;
+       info->map.name = flash->name;
        info->map.bankwidth = flash->width;
        info->map.phys = res->start;
        info->map.size = resource_size(res);
index d467f3b11c96c72ded5f3019cc823eb50fc6bd0b..39cc4181f02538a438b8fc427e00e19c1074fde3 100644 (file)
@@ -75,7 +75,7 @@ int uflash_devinit(struct platform_device *op, struct device_node *dp)
 
        up->name = of_get_property(dp, "model", NULL);
        if (up->name && 0 < strlen(up->name))
-               up->map.name = (char *)up->name;
+               up->map.name = up->name;
 
        up->map.phys = op->resource[0].start;
 
index 92311a56939fca8d53e05e178054520f80b3bcec..34c0b16aed5c4e2f7d61a22d3fdeaf2ca066a632 100644 (file)
@@ -313,15 +313,7 @@ static struct attribute *mtd_attrs[] = {
        &dev_attr_bitflip_threshold.attr,
        NULL,
 };
-
-static struct attribute_group mtd_group = {
-       .attrs          = mtd_attrs,
-};
-
-static const struct attribute_group *mtd_groups[] = {
-       &mtd_group,
-       NULL,
-};
+ATTRIBUTE_GROUPS(mtd);
 
 static struct device_type mtd_devtype = {
        .name           = "mtd",
index 6e732c3820c14bf9d07b693a0c1de9bfff0088f0..3c7d6d7623c1cd5557b4dfeb38e80e03fc6e2abc 100644 (file)
@@ -534,7 +534,7 @@ out_register:
        return slave;
 }
 
-int mtd_add_partition(struct mtd_info *master, char *name,
+int mtd_add_partition(struct mtd_info *master, const char *name,
                      long long offset, long long length)
 {
        struct mtd_partition part;
@@ -672,22 +672,19 @@ static struct mtd_part_parser *get_partition_parser(const char *name)
 
 #define put_partition_parser(p) do { module_put((p)->owner); } while (0)
 
-int register_mtd_parser(struct mtd_part_parser *p)
+void register_mtd_parser(struct mtd_part_parser *p)
 {
        spin_lock(&part_parser_lock);
        list_add(&p->list, &part_parsers);
        spin_unlock(&part_parser_lock);
-
-       return 0;
 }
 EXPORT_SYMBOL_GPL(register_mtd_parser);
 
-int deregister_mtd_parser(struct mtd_part_parser *p)
+void deregister_mtd_parser(struct mtd_part_parser *p)
 {
        spin_lock(&part_parser_lock);
        list_del(&p->list);
        spin_unlock(&part_parser_lock);
-       return 0;
 }
 EXPORT_SYMBOL_GPL(deregister_mtd_parser);
 
index 93ae6a6d94f713d6352abd612efd1fed05f43dfd..90ff447bf0437707fe763133532fbb6b89bae7b7 100644 (file)
@@ -95,7 +95,7 @@ config MTD_NAND_OMAP2
          platforms.
 
 config MTD_NAND_OMAP_BCH
-       depends on MTD_NAND && MTD_NAND_OMAP2 && ARCH_OMAP3
+       depends on MTD_NAND_OMAP2
        tristate "Support hardware based BCH error correction"
        default n
        select BCH
@@ -326,11 +326,11 @@ config MTD_NAND_ATMEL
          on Atmel AT91 and AVR32 processors.
 
 config MTD_NAND_PXA3xx
-       tristate "Support for NAND flash devices on PXA3xx"
+       tristate "NAND support on PXA3xx and Armada 370/XP"
        depends on PXA3xx || ARCH_MMP || PLAT_ORION
        help
          This enables the driver for the NAND flash device found on
-         PXA3xx processors
+         PXA3xx processors (NFCv1) and also on Armada 370/XP (NFCv2).
 
 config MTD_NAND_SLC_LPC32XX
        tristate "NXP LPC32xx SLC Controller"
@@ -458,17 +458,17 @@ config MTD_NAND_MXC
 
 config MTD_NAND_SH_FLCTL
        tristate "Support for NAND on Renesas SuperH FLCTL"
-       depends on SUPERH || ARCH_SHMOBILE
+       depends on SUPERH || ARCH_SHMOBILE || COMPILE_TEST
        help
          Several Renesas SuperH CPU has FLCTL. This option enables support
          for NAND Flash using FLCTL.
 
 config MTD_NAND_DAVINCI
-        tristate "Support NAND on DaVinci SoC"
-        depends on ARCH_DAVINCI
+        tristate "Support NAND on DaVinci/Keystone SoC"
+        depends on ARCH_DAVINCI || (ARCH_KEYSTONE && TI_AEMIF)
         help
          Enable the driver for NAND flash chips on Texas Instruments
-         DaVinci processors.
+         DaVinci/Keystone processors.
 
 config MTD_NAND_TXX9NDFMC
        tristate "NAND Flash support for TXx9 SoC"
index 59f08c44abdbc9be920ea62974d19bcdc7884889..c36e9b84487cd36b55d96efd1d17c6db9494c0fc 100644 (file)
@@ -1961,10 +1961,8 @@ static int atmel_nand_probe(struct platform_device *pdev)
 
        /* Allocate memory for the device structure (and zero it) */
        host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
-       if (!host) {
-               printk(KERN_ERR "atmel_nand: failed to allocate device structure.\n");
+       if (!host)
                return -ENOMEM;
-       }
 
        res = platform_driver_register(&atmel_nand_nfc_driver);
        if (res)
@@ -2062,14 +2060,14 @@ static int atmel_nand_probe(struct platform_device *pdev)
                }
 
                if (gpio_get_value(host->board.det_pin)) {
-                       printk(KERN_INFO "No SmartMedia card inserted.\n");
+                       dev_info(&pdev->dev, "No SmartMedia card inserted.\n");
                        res = -ENXIO;
                        goto err_no_card;
                }
        }
 
        if (host->board.on_flash_bbt || on_flash_bbt) {
-               printk(KERN_INFO "atmel_nand: Use On Flash BBT\n");
+               dev_info(&pdev->dev, "Use On Flash BBT\n");
                nand_chip->bbt_options |= NAND_BBT_USE_FLASH;
        }
 
index ae8dd7c4103922fc760786be079b7576ad648893..2880d888cfc5c260aefcdd5caba743d05a292ae6 100644 (file)
@@ -418,10 +418,8 @@ static int au1550nd_probe(struct platform_device *pdev)
        }
 
        ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
-       if (!ctx) {
-               dev_err(&pdev->dev, "no memory for NAND context\n");
+       if (!ctx)
                return -ENOMEM;
-       }
 
        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!r) {
@@ -480,6 +478,8 @@ static int au1550nd_probe(struct platform_device *pdev)
 
        mtd_device_register(&ctx->info, pd->parts, pd->num_parts);
 
+       platform_set_drvdata(pdev, ctx);
+
        return 0;
 
 out3:
index 2c42e125720f2141258e064e1fc024fef092057b..94f55dbde995974213b48158048a92553bfc7475 100644 (file)
@@ -745,7 +745,6 @@ static int bf5xx_nand_probe(struct platform_device *pdev)
 
        info = kzalloc(sizeof(*info), GFP_KERNEL);
        if (info == NULL) {
-               dev_err(&pdev->dev, "no memory for flash info\n");
                err = -ENOMEM;
                goto out_err_kzalloc;
        }
index c34985a55101b0606f76ed580f1e13ac562913e2..f2f64addb5e87119d8b72e9122565ae3ec9150f9 100644 (file)
@@ -640,10 +640,8 @@ static int cafe_nand_probe(struct pci_dev *pdev,
        pci_set_master(pdev);
 
        mtd = kzalloc(sizeof(*mtd) + sizeof(struct cafe_priv), GFP_KERNEL);
-       if (!mtd) {
-               dev_warn(&pdev->dev, "failed to alloc mtd_info\n");
+       if (!mtd)
                return  -ENOMEM;
-       }
        cafe = (void *)(&mtd[1]);
 
        mtd->dev.parent = &pdev->dev;
index 39b2ef848811a5d1cf1c7fea98ef8f6168e98c5f..66ec95e6ca6c760c2713a621d14ec09a51446d66 100644 (file)
@@ -164,7 +164,6 @@ static int __init cmx270_init(void)
                                  sizeof(struct nand_chip),
                                  GFP_KERNEL);
        if (!cmx270_nand_mtd) {
-               pr_debug("Unable to allocate CM-X270 NAND MTD device structure.\n");
                ret = -ENOMEM;
                goto err_kzalloc;
        }
index d469a9a1dea0de7ea5f31172995c6e79411e7822..88109d375ae7f65546a2225e894e19ade98c41ba 100644 (file)
@@ -199,7 +199,6 @@ static int __init cs553x_init_one(int cs, int mmio, unsigned long adr)
        /* Allocate memory for MTD device structure and private data */
        new_mtd = kzalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
        if (!new_mtd) {
-               printk(KERN_WARNING "Unable to allocate CS553X NAND MTD device structure.\n");
                err = -ENOMEM;
                goto out;
        }
index b77a01efb4837ea325988ee6e58e82bd128d7892..a4989ec6292efa0127ba4cc84d13fd07ad09fe4b 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/slab.h>
 #include <linux/of_device.h>
 #include <linux/of.h>
+#include <linux/of_mtd.h>
 
 #include <linux/platform_data/mtd-davinci.h>
 #include <linux/platform_data/mtd-davinci-aemif.h>
@@ -487,7 +488,7 @@ static int nand_davinci_dev_ready(struct mtd_info *mtd)
  * ten ECC bytes plus the manufacturer's bad block marker byte, and
  * and not overlapping the default BBT markers.
  */
-static struct nand_ecclayout hwecc4_small __initconst = {
+static struct nand_ecclayout hwecc4_small = {
        .eccbytes = 10,
        .eccpos = { 0, 1, 2, 3, 4,
                /* offset 5 holds the badblock marker */
@@ -503,7 +504,7 @@ static struct nand_ecclayout hwecc4_small __initconst = {
  * storing ten ECC bytes plus the manufacturer's bad block marker byte,
  * and not overlapping the default BBT markers.
  */
-static struct nand_ecclayout hwecc4_2048 __initconst = {
+static struct nand_ecclayout hwecc4_2048 = {
        .eccbytes = 40,
        .eccpos = {
                /* at the end of spare sector */
@@ -534,17 +535,19 @@ static struct davinci_nand_pdata
                struct davinci_nand_pdata *pdata;
                const char *mode;
                u32 prop;
-               int len;
 
                pdata =  devm_kzalloc(&pdev->dev,
                                sizeof(struct davinci_nand_pdata),
                                GFP_KERNEL);
                pdev->dev.platform_data = pdata;
                if (!pdata)
-                       return NULL;
+                       return ERR_PTR(-ENOMEM);
                if (!of_property_read_u32(pdev->dev.of_node,
                        "ti,davinci-chipselect", &prop))
                        pdev->id = prop;
+               else
+                       return ERR_PTR(-EINVAL);
+
                if (!of_property_read_u32(pdev->dev.of_node,
                        "ti,davinci-mask-ale", &prop))
                        pdata->mask_ale = prop;
@@ -555,6 +558,8 @@ static struct davinci_nand_pdata
                        "ti,davinci-mask-chipsel", &prop))
                        pdata->mask_chipsel = prop;
                if (!of_property_read_string(pdev->dev.of_node,
+                       "nand-ecc-mode", &mode) ||
+                   !of_property_read_string(pdev->dev.of_node,
                        "ti,davinci-ecc-mode", &mode)) {
                        if (!strncmp("none", mode, 4))
                                pdata->ecc_mode = NAND_ECC_NONE;
@@ -566,12 +571,16 @@ static struct davinci_nand_pdata
                if (!of_property_read_u32(pdev->dev.of_node,
                        "ti,davinci-ecc-bits", &prop))
                        pdata->ecc_bits = prop;
-               if (!of_property_read_u32(pdev->dev.of_node,
+
+               prop = of_get_nand_bus_width(pdev->dev.of_node);
+               if (0 < prop || !of_property_read_u32(pdev->dev.of_node,
                        "ti,davinci-nand-buswidth", &prop))
                        if (prop == 16)
                                pdata->options |= NAND_BUSWIDTH_16;
-               if (of_find_property(pdev->dev.of_node,
-                       "ti,davinci-nand-use-bbt", &len))
+               if (of_property_read_bool(pdev->dev.of_node,
+                       "nand-on-flash-bbt") ||
+                   of_property_read_bool(pdev->dev.of_node,
+                       "ti,davinci-nand-use-bbt"))
                        pdata->bbt_options = NAND_BBT_USE_FLASH;
        }
 
@@ -585,7 +594,7 @@ static struct davinci_nand_pdata
 }
 #endif
 
-static int __init nand_davinci_probe(struct platform_device *pdev)
+static int nand_davinci_probe(struct platform_device *pdev)
 {
        struct davinci_nand_pdata       *pdata;
        struct davinci_nand_info        *info;
@@ -598,6 +607,9 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
        nand_ecc_modes_t                ecc_mode;
 
        pdata = nand_davinci_get_pdata(pdev);
+       if (IS_ERR(pdata))
+               return PTR_ERR(pdata);
+
        /* insist on board-specific configuration */
        if (!pdata)
                return -ENODEV;
@@ -607,11 +619,8 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
                return -ENODEV;
 
        info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
-       if (!info) {
-               dev_err(&pdev->dev, "unable to allocate memory\n");
-               ret = -ENOMEM;
-               goto err_nomem;
-       }
+       if (!info)
+               return -ENOMEM;
 
        platform_set_drvdata(pdev, info);
 
@@ -619,19 +628,23 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
        res2 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
        if (!res1 || !res2) {
                dev_err(&pdev->dev, "resource missing\n");
-               ret = -EINVAL;
-               goto err_nomem;
+               return -EINVAL;
        }
 
        vaddr = devm_ioremap_resource(&pdev->dev, res1);
-       if (IS_ERR(vaddr)) {
-               ret = PTR_ERR(vaddr);
-               goto err_ioremap;
-       }
-       base = devm_ioremap_resource(&pdev->dev, res2);
-       if (IS_ERR(base)) {
-               ret = PTR_ERR(base);
-               goto err_ioremap;
+       if (IS_ERR(vaddr))
+               return PTR_ERR(vaddr);
+
+       /*
+        * This registers range is used to setup NAND settings. In case with
+        * TI AEMIF driver, the same memory address range is requested already
+        * by AEMIF, so we cannot request it twice, just ioremap.
+        * The AEMIF and NAND drivers not use the same registers in this range.
+        */
+       base = devm_ioremap(&pdev->dev, res2->start, resource_size(res2));
+       if (!base) {
+               dev_err(&pdev->dev, "ioremap failed for resource %pR\n", res2);
+               return -EADDRNOTAVAIL;
        }
 
        info->dev               = &pdev->dev;
@@ -699,7 +712,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
                        spin_unlock_irq(&davinci_nand_lock);
 
                        if (ret == -EBUSY)
-                               goto err_ecc;
+                               return ret;
 
                        info->chip.ecc.calculate = nand_davinci_calculate_4bit;
                        info->chip.ecc.correct = nand_davinci_correct_4bit;
@@ -715,8 +728,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
                info->chip.ecc.strength = pdata->ecc_bits;
                break;
        default:
-               ret = -EINVAL;
-               goto err_ecc;
+               return -EINVAL;
        }
        info->chip.ecc.mode = ecc_mode;
 
@@ -724,7 +736,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
        if (IS_ERR(info->clk)) {
                ret = PTR_ERR(info->clk);
                dev_dbg(&pdev->dev, "unable to get AEMIF clock, err %d\n", ret);
-               goto err_clk;
+               return ret;
        }
 
        ret = clk_prepare_enable(info->clk);
@@ -753,7 +765,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
                                                        info->core_chipsel);
        if (ret < 0) {
                dev_dbg(&pdev->dev, "NAND timing values setup fail\n");
-               goto err_timing;
+               goto err;
        }
 
        spin_lock_irq(&davinci_nand_lock);
@@ -769,7 +781,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
        ret = nand_scan_ident(&info->mtd, pdata->mask_chipsel ? 2 : 1, NULL);
        if (ret < 0) {
                dev_dbg(&pdev->dev, "no NAND chip(s) found\n");
-               goto err_scan;
+               goto err;
        }
 
        /* Update ECC layout if needed ... for 1-bit HW ECC, the default
@@ -783,7 +795,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
                if (!chunks || info->mtd.oobsize < 16) {
                        dev_dbg(&pdev->dev, "too small\n");
                        ret = -EINVAL;
-                       goto err_scan;
+                       goto err;
                }
 
                /* For small page chips, preserve the manufacturer's
@@ -814,7 +826,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
                dev_warn(&pdev->dev, "no 4-bit ECC support yet "
                                "for 4KiB-page NAND\n");
                ret = -EIO;
-               goto err_scan;
+               goto err;
 
 syndrome_done:
                info->chip.ecc.layout = &info->ecclayout;
@@ -822,7 +834,7 @@ syndrome_done:
 
        ret = nand_scan_tail(&info->mtd);
        if (ret < 0)
-               goto err_scan;
+               goto err;
 
        if (pdata->parts)
                ret = mtd_device_parse_register(&info->mtd, NULL, NULL,
@@ -835,7 +847,7 @@ syndrome_done:
                                                NULL, 0);
        }
        if (ret < 0)
-               goto err_scan;
+               goto err;
 
        val = davinci_nand_readl(info, NRCSR_OFFSET);
        dev_info(&pdev->dev, "controller rev. %d.%d\n",
@@ -843,8 +855,7 @@ syndrome_done:
 
        return 0;
 
-err_scan:
-err_timing:
+err:
        clk_disable_unprepare(info->clk);
 
 err_clk_enable:
@@ -852,15 +863,10 @@ err_clk_enable:
        if (ecc_mode == NAND_ECC_HW_SYNDROME)
                ecc4_busy = false;
        spin_unlock_irq(&davinci_nand_lock);
-
-err_ecc:
-err_clk:
-err_ioremap:
-err_nomem:
        return ret;
 }
 
-static int __exit nand_davinci_remove(struct platform_device *pdev)
+static int nand_davinci_remove(struct platform_device *pdev)
 {
        struct davinci_nand_info *info = platform_get_drvdata(pdev);
 
@@ -877,7 +883,8 @@ static int __exit nand_davinci_remove(struct platform_device *pdev)
 }
 
 static struct platform_driver nand_davinci_driver = {
-       .remove         = __exit_p(nand_davinci_remove),
+       .probe          = nand_davinci_probe,
+       .remove         = nand_davinci_remove,
        .driver         = {
                .name   = "davinci_nand",
                .owner  = THIS_MODULE,
@@ -886,7 +893,7 @@ static struct platform_driver nand_davinci_driver = {
 };
 MODULE_ALIAS("platform:davinci_nand");
 
-module_platform_driver_probe(nand_davinci_driver, nand_davinci_probe);
+module_platform_driver(nand_davinci_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Texas Instruments");
index 370b9dd7a2786841f3180d732321c99416030559..c07cd573ad3af0dd4fa1e91f36c4a44259bbd5e7 100644 (file)
@@ -125,7 +125,6 @@ static void reset_buf(struct denali_nand_info *denali)
 
 static void write_byte_to_buf(struct denali_nand_info *denali, uint8_t byte)
 {
-       BUG_ON(denali->buf.tail >= sizeof(denali->buf.buf));
        denali->buf.buf[denali->buf.tail++] = byte;
 }
 
@@ -897,7 +896,7 @@ static void read_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
 /* this function examines buffers to see if they contain data that
  * indicate that the buffer is part of an erased region of flash.
  */
-bool is_erased(uint8_t *buf, int len)
+static bool is_erased(uint8_t *buf, int len)
 {
        int i = 0;
        for (i = 0; i < len; i++)
@@ -1429,20 +1428,12 @@ int denali_init(struct denali_nand_info *denali)
                }
        }
 
-       /* Is 32-bit DMA supported? */
-       ret = dma_set_mask(denali->dev, DMA_BIT_MASK(32));
-       if (ret) {
-               pr_err("Spectra: no usable DMA configuration\n");
-               return ret;
-       }
-       denali->buf.dma_buf = dma_map_single(denali->dev, denali->buf.buf,
-                                            DENALI_BUF_SIZE,
-                                            DMA_BIDIRECTIONAL);
+       /* allocate a temporary buffer for nand_scan_ident() */
+       denali->buf.buf = devm_kzalloc(denali->dev, PAGE_SIZE,
+                                       GFP_DMA | GFP_KERNEL);
+       if (!denali->buf.buf)
+               return -ENOMEM;
 
-       if (dma_mapping_error(denali->dev, denali->buf.dma_buf)) {
-               dev_err(denali->dev, "Spectra: failed to map DMA buffer\n");
-               return -EIO;
-       }
        denali->mtd.dev.parent = denali->dev;
        denali_hw_init(denali);
        denali_drv_init(denali);
@@ -1475,12 +1466,29 @@ int denali_init(struct denali_nand_info *denali)
                goto failed_req_irq;
        }
 
-       /* MTD supported page sizes vary by kernel. We validate our
-        * kernel supports the device here.
-        */
-       if (denali->mtd.writesize > NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE) {
-               ret = -ENODEV;
-               pr_err("Spectra: device size not supported by this version of MTD.");
+       /* allocate the right size buffer now */
+       devm_kfree(denali->dev, denali->buf.buf);
+       denali->buf.buf = devm_kzalloc(denali->dev,
+                            denali->mtd.writesize + denali->mtd.oobsize,
+                            GFP_KERNEL);
+       if (!denali->buf.buf) {
+               ret = -ENOMEM;
+               goto failed_req_irq;
+       }
+
+       /* Is 32-bit DMA supported? */
+       ret = dma_set_mask(denali->dev, DMA_BIT_MASK(32));
+       if (ret) {
+               pr_err("Spectra: no usable DMA configuration\n");
+               goto failed_req_irq;
+       }
+
+       denali->buf.dma_buf = dma_map_single(denali->dev, denali->buf.buf,
+                            denali->mtd.writesize + denali->mtd.oobsize,
+                            DMA_BIDIRECTIONAL);
+       if (dma_mapping_error(denali->dev, denali->buf.dma_buf)) {
+               dev_err(denali->dev, "Spectra: failed to map DMA buffer\n");
+               ret = -EIO;
                goto failed_req_irq;
        }
 
@@ -1602,7 +1610,8 @@ EXPORT_SYMBOL(denali_init);
 void denali_remove(struct denali_nand_info *denali)
 {
        denali_irq_cleanup(denali->irq, denali);
-       dma_unmap_single(denali->dev, denali->buf.dma_buf, DENALI_BUF_SIZE,
+       dma_unmap_single(denali->dev, denali->buf.dma_buf,
+                       denali->mtd.writesize + denali->mtd.oobsize,
                        DMA_BIDIRECTIONAL);
 }
 EXPORT_SYMBOL(denali_remove);
index cec5712862c9d01c50c417cad96f0bf3aa9a60ec..96681746242171fcbb5fdb4ab6beb6a77be3e54b 100644 (file)
 
 #define ECC_SECTOR_SIZE     512
 
-#define DENALI_BUF_SIZE                (NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE)
-
 struct nand_buf {
        int head;
        int tail;
-       uint8_t buf[DENALI_BUF_SIZE];
+       uint8_t *buf;
        dma_addr_t dma_buf;
 };
 
index 92530244e2cbfdf48ecc2aa2282b90d12a98d0ad..babb02c4b2204ed0c5881a3577084c2ea826d5df 100644 (file)
@@ -108,7 +108,7 @@ static int denali_dt_probe(struct platform_device *ofdev)
                denali->dev->dma_mask = NULL;
        }
 
-       dt->clk = clk_get(&ofdev->dev, NULL);
+       dt->clk = devm_clk_get(&ofdev->dev, NULL);
        if (IS_ERR(dt->clk)) {
                dev_err(&ofdev->dev, "no clk available\n");
                return PTR_ERR(dt->clk);
@@ -124,7 +124,6 @@ static int denali_dt_probe(struct platform_device *ofdev)
 
 out_disable_clk:
        clk_disable_unprepare(dt->clk);
-       clk_put(dt->clk);
 
        return ret;
 }
@@ -135,7 +134,6 @@ static int denali_dt_remove(struct platform_device *ofdev)
 
        denali_remove(&dt->denali);
        clk_disable(dt->clk);
-       clk_put(dt->clk);
 
        return 0;
 }
index 033f177a6369b2d240b499f33df0b6bd925ed4d2..6e2f387b823f694e59e368bc987850a986c7d0c2 100644 (file)
@@ -21,7 +21,7 @@
 #define DENALI_NAND_NAME    "denali-nand-pci"
 
 /* List of platforms this NAND controller has be integrated into */
-static DEFINE_PCI_DEVICE_TABLE(denali_pci_ids) = {
+static const struct pci_device_id denali_pci_ids[] = {
        { PCI_VDEVICE(INTEL, 0x0701), INTEL_CE4100 },
        { PCI_VDEVICE(INTEL, 0x0809), INTEL_MRST },
        { /* end: all zeroes */ }
@@ -131,7 +131,6 @@ static struct pci_driver denali_pci_driver = {
 
 static int denali_init_pci(void)
 {
-       pr_info("Spectra MTD driver built on %s @ %s\n", __DATE__, __TIME__);
        return pci_register_driver(&denali_pci_driver);
 }
 module_init(denali_init_pci);
index b68a4959f700af3e2768af69e6dd9afd0f94c8e1..fec31d71b84e03d7a84cbaf15b4558c16d413e0a 100644 (file)
@@ -1058,7 +1058,6 @@ static inline int __init nftl_partscan(struct mtd_info *mtd, struct mtd_partitio
 
        buf = kmalloc(mtd->writesize, GFP_KERNEL);
        if (!buf) {
-               printk(KERN_ERR "DiskOnChip mediaheader kmalloc failed!\n");
                return 0;
        }
        if (!(numheaders = find_media_headers(mtd, buf, "ANAND", 1)))
@@ -1166,7 +1165,6 @@ static inline int __init inftl_partscan(struct mtd_info *mtd, struct mtd_partiti
 
        buf = kmalloc(mtd->writesize, GFP_KERNEL);
        if (!buf) {
-               printk(KERN_ERR "DiskOnChip mediaheader kmalloc failed!\n");
                return 0;
        }
 
@@ -1440,10 +1438,13 @@ static int __init doc_probe(unsigned long physadr)
        int reg, len, numchips;
        int ret = 0;
 
+       if (!request_mem_region(physadr, DOC_IOREMAP_LEN, NULL))
+               return -EBUSY;
        virtadr = ioremap(physadr, DOC_IOREMAP_LEN);
        if (!virtadr) {
                printk(KERN_ERR "Diskonchip ioremap failed: 0x%x bytes at 0x%lx\n", DOC_IOREMAP_LEN, physadr);
-               return -EIO;
+               ret = -EIO;
+               goto error_ioremap;
        }
 
        /* It's not possible to cleanly detect the DiskOnChip - the
@@ -1561,7 +1562,6 @@ static int __init doc_probe(unsigned long physadr)
            sizeof(struct nand_chip) + sizeof(struct doc_priv) + (2 * sizeof(struct nand_bbt_descr));
        mtd = kzalloc(len, GFP_KERNEL);
        if (!mtd) {
-               printk(KERN_ERR "DiskOnChip kmalloc (%d bytes) failed!\n", len);
                ret = -ENOMEM;
                goto fail;
        }
@@ -1629,6 +1629,10 @@ static int __init doc_probe(unsigned long physadr)
        WriteDOC(save_control, virtadr, DOCControl);
  fail:
        iounmap(virtadr);
+
+error_ioremap:
+       release_mem_region(physadr, DOC_IOREMAP_LEN);
+
        return ret;
 }
 
@@ -1645,6 +1649,7 @@ static void release_nanddoc(void)
                nextmtd = doc->nextdoc;
                nand_release(mtd);
                iounmap(doc->virtadr);
+               release_mem_region(doc->physadr, DOC_IOREMAP_LEN);
                kfree(mtd);
        }
 }
index c966fc7474ced5fc8423b9440340f1d9b5072bbc..bcf60800c3ce7f5e0489972ebe7a3e3f83d225df 100644 (file)
@@ -847,7 +847,6 @@ static int fsl_elbc_nand_probe(struct platform_device *pdev)
        if (!fsl_lbc_ctrl_dev->nand) {
                elbc_fcm_ctrl = kzalloc(sizeof(*elbc_fcm_ctrl), GFP_KERNEL);
                if (!elbc_fcm_ctrl) {
-                       dev_err(dev, "failed to allocate memory\n");
                        mutex_unlock(&fsl_elbc_nand_mutex);
                        ret = -ENOMEM;
                        goto err;
@@ -875,7 +874,7 @@ static int fsl_elbc_nand_probe(struct platform_device *pdev)
                goto err;
        }
 
-       priv->mtd.name = kasprintf(GFP_KERNEL, "%x.flash", (unsigned)res.start);
+       priv->mtd.name = kasprintf(GFP_KERNEL, "%llx.flash", (u64)res.start);
        if (!priv->mtd.name) {
                ret = -ENOMEM;
                goto err;
index 43355779cff583975721e5c7bd7770bc055aa1c3..90ca7e75d6f038e4cefb365ee2a436eac4ac8cc2 100644 (file)
@@ -1060,7 +1060,6 @@ static int fsl_ifc_nand_probe(struct platform_device *dev)
        if (!fsl_ifc_ctrl_dev->nand) {
                ifc_nand_ctrl = kzalloc(sizeof(*ifc_nand_ctrl), GFP_KERNEL);
                if (!ifc_nand_ctrl) {
-                       dev_err(&dev->dev, "failed to allocate memory\n");
                        mutex_unlock(&fsl_ifc_nand_mutex);
                        return -ENOMEM;
                }
@@ -1101,7 +1100,7 @@ static int fsl_ifc_nand_probe(struct platform_device *dev)
                    IFC_NAND_EVTER_INTR_FTOERIR_EN |
                    IFC_NAND_EVTER_INTR_WPERIR_EN,
                    &ifc->ifc_nand.nand_evter_intr_en);
-       priv->mtd.name = kasprintf(GFP_KERNEL, "%x.flash", (unsigned)res.start);
+       priv->mtd.name = kasprintf(GFP_KERNEL, "%llx.flash", (u64)res.start);
        if (!priv->mtd.name) {
                ret = -ENOMEM;
                goto err;
index 8b2752263db9a5549742bb36c3dcee48999b8b62..1550692973dc2ebcaa6864bc3e2c51607872b818 100644 (file)
@@ -889,10 +889,8 @@ static int fsmc_nand_probe_config_dt(struct platform_device *pdev,
 
        pdata->nand_timings = devm_kzalloc(&pdev->dev,
                                sizeof(*pdata->nand_timings), GFP_KERNEL);
-       if (!pdata->nand_timings) {
-               dev_err(&pdev->dev, "no memory for nand_timing\n");
+       if (!pdata->nand_timings)
                return -ENOMEM;
-       }
        of_property_read_u8_array(np, "timings", (u8 *)pdata->nand_timings,
                                                sizeof(*pdata->nand_timings));
 
@@ -950,10 +948,8 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
 
        /* Allocate memory for the device structure (and zero it) */
        host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
-       if (!host) {
-               dev_err(&pdev->dev, "failed to allocate device structure\n");
+       if (!host)
                return -ENOMEM;
-       }
 
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_data");
        host->data_va = devm_ioremap_resource(&pdev->dev, res);
@@ -1108,8 +1104,8 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
                        host->ecc_place = &fsmc_ecc4_lp_place;
                        break;
                default:
-                       printk(KERN_WARNING "No oob scheme defined for "
-                              "oobsize %d\n", mtd->oobsize);
+                       dev_warn(&pdev->dev, "No oob scheme defined for oobsize %d\n",
+                                mtd->oobsize);
                        BUG();
                }
        } else {
@@ -1124,8 +1120,8 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
                        nand->ecc.layout = &fsmc_ecc1_128_layout;
                        break;
                default:
-                       printk(KERN_WARNING "No oob scheme defined for "
-                              "oobsize %d\n", mtd->oobsize);
+                       dev_warn(&pdev->dev, "No oob scheme defined for oobsize %d\n",
+                                mtd->oobsize);
                        BUG();
                }
        }
index e826f898241f92b24704ba7103fc0cdd970a63d1..8e6148aa4539a290b4b3a4cda8883f1bbba58f92 100644 (file)
@@ -132,13 +132,17 @@ static int gpio_nand_get_config_of(const struct device *dev,
 
 static struct resource *gpio_nand_get_io_sync_of(struct platform_device *pdev)
 {
-       struct resource *r = devm_kzalloc(&pdev->dev, sizeof(*r), GFP_KERNEL);
+       struct resource *r;
        u64 addr;
 
-       if (!r || of_property_read_u64(pdev->dev.of_node,
+       if (of_property_read_u64(pdev->dev.of_node,
                                       "gpio-control-nand,io-sync-reg", &addr))
                return NULL;
 
+       r = devm_kzalloc(&pdev->dev, sizeof(*r), GFP_KERNEL);
+       if (!r)
+               return NULL;
+
        r->start = addr;
        r->end = r->start + 0x3;
        r->flags = IORESOURCE_MEM;
@@ -211,10 +215,8 @@ static int gpio_nand_probe(struct platform_device *pdev)
                return -EINVAL;
 
        gpiomtd = devm_kzalloc(&pdev->dev, sizeof(*gpiomtd), GFP_KERNEL);
-       if (!gpiomtd) {
-               dev_err(&pdev->dev, "failed to create NAND MTD\n");
+       if (!gpiomtd)
                return -ENOMEM;
-       }
 
        chip = &gpiomtd->nand_chip;
 
index aaced29727fb0437f6a0c142253b04cbc0ca82d0..dd1df605a1d61ec8a43ffb5515ea50ce2f3c9b69 100644 (file)
@@ -20,6 +20,7 @@
  */
 #include <linux/delay.h>
 #include <linux/clk.h>
+#include <linux/slab.h>
 
 #include "gpmi-nand.h"
 #include "gpmi-regs.h"
@@ -207,30 +208,41 @@ void gpmi_dump_info(struct gpmi_nand_data *this)
        u32 reg;
        int i;
 
-       pr_err("Show GPMI registers :\n");
+       dev_err(this->dev, "Show GPMI registers :\n");
        for (i = 0; i <= HW_GPMI_DEBUG / 0x10 + 1; i++) {
                reg = readl(r->gpmi_regs + i * 0x10);
-               pr_err("offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
+               dev_err(this->dev, "offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
        }
 
        /* start to print out the BCH info */
-       pr_err("Show BCH registers :\n");
+       dev_err(this->dev, "Show BCH registers :\n");
        for (i = 0; i <= HW_BCH_VERSION / 0x10 + 1; i++) {
                reg = readl(r->bch_regs + i * 0x10);
-               pr_err("offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
+               dev_err(this->dev, "offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
        }
-       pr_err("BCH Geometry :\n");
-       pr_err("GF length              : %u\n", geo->gf_len);
-       pr_err("ECC Strength           : %u\n", geo->ecc_strength);
-       pr_err("Page Size in Bytes     : %u\n", geo->page_size);
-       pr_err("Metadata Size in Bytes : %u\n", geo->metadata_size);
-       pr_err("ECC Chunk Size in Bytes: %u\n", geo->ecc_chunk_size);
-       pr_err("ECC Chunk Count        : %u\n", geo->ecc_chunk_count);
-       pr_err("Payload Size in Bytes  : %u\n", geo->payload_size);
-       pr_err("Auxiliary Size in Bytes: %u\n", geo->auxiliary_size);
-       pr_err("Auxiliary Status Offset: %u\n", geo->auxiliary_status_offset);
-       pr_err("Block Mark Byte Offset : %u\n", geo->block_mark_byte_offset);
-       pr_err("Block Mark Bit Offset  : %u\n", geo->block_mark_bit_offset);
+       dev_err(this->dev, "BCH Geometry :\n"
+               "GF length              : %u\n"
+               "ECC Strength           : %u\n"
+               "Page Size in Bytes     : %u\n"
+               "Metadata Size in Bytes : %u\n"
+               "ECC Chunk Size in Bytes: %u\n"
+               "ECC Chunk Count        : %u\n"
+               "Payload Size in Bytes  : %u\n"
+               "Auxiliary Size in Bytes: %u\n"
+               "Auxiliary Status Offset: %u\n"
+               "Block Mark Byte Offset : %u\n"
+               "Block Mark Bit Offset  : %u\n",
+               geo->gf_len,
+               geo->ecc_strength,
+               geo->page_size,
+               geo->metadata_size,
+               geo->ecc_chunk_size,
+               geo->ecc_chunk_count,
+               geo->payload_size,
+               geo->auxiliary_size,
+               geo->auxiliary_status_offset,
+               geo->block_mark_byte_offset,
+               geo->block_mark_bit_offset);
 }
 
 /* Configures the geometry for BCH.  */
@@ -265,8 +277,8 @@ int bch_set_geometry(struct gpmi_nand_data *this)
        * chip, otherwise it will lock up. So we skip resetting BCH on the MX23.
        * On the other hand, the MX28 needs the reset, because one case has been
        * seen where the BCH produced ECC errors constantly after 10000
-       * consecutive reboots. The latter case has not been seen on the MX23 yet,
-       * still we don't know if it could happen there as well.
+       * consecutive reboots. The latter case has not been seen on the MX23
+       * yet, still we don't know if it could happen there as well.
        */
        ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this));
        if (ret)
@@ -353,7 +365,7 @@ static int gpmi_nfc_compute_hardware_timing(struct gpmi_nand_data *this,
        improved_timing_is_available =
                (target.tREA_in_ns  >= 0) &&
                (target.tRLOH_in_ns >= 0) &&
-               (target.tRHOH_in_ns >= 0) ;
+               (target.tRHOH_in_ns >= 0);
 
        /* Inspect the clock. */
        nfc->clock_frequency_in_hz = clk_get_rate(r->clock[0]);
@@ -911,10 +923,14 @@ static int enable_edo_mode(struct gpmi_nand_data *this, int mode)
        struct resources  *r = &this->resources;
        struct nand_chip *nand = &this->nand;
        struct mtd_info  *mtd = &this->mtd;
-       uint8_t feature[ONFI_SUBFEATURE_PARAM_LEN] = {};
+       uint8_t *feature;
        unsigned long rate;
        int ret;
 
+       feature = kzalloc(ONFI_SUBFEATURE_PARAM_LEN, GFP_KERNEL);
+       if (!feature)
+               return -ENOMEM;
+
        nand->select_chip(mtd, 0);
 
        /* [1] send SET FEATURE commond to NAND */
@@ -942,11 +958,13 @@ static int enable_edo_mode(struct gpmi_nand_data *this, int mode)
 
        this->flags |= GPMI_ASYNC_EDO_ENABLED;
        this->timing_mode = mode;
+       kfree(feature);
        dev_info(this->dev, "enable the asynchronous EDO mode %d\n", mode);
        return 0;
 
 err_out:
        nand->select_chip(mtd, -1);
+       kfree(feature);
        dev_err(this->dev, "mode:%d ,failed in set feature.\n", mode);
        return -EINVAL;
 }
@@ -986,7 +1004,7 @@ void gpmi_begin(struct gpmi_nand_data *this)
        /* Enable the clock. */
        ret = gpmi_enable_clk(this);
        if (ret) {
-               pr_err("We failed in enable the clk\n");
+               dev_err(this->dev, "We failed in enable the clk\n");
                goto err_out;
        }
 
@@ -1003,7 +1021,7 @@ void gpmi_begin(struct gpmi_nand_data *this)
        /* [1] Set HW_GPMI_TIMING0 */
        reg = BF_GPMI_TIMING0_ADDRESS_SETUP(hw.address_setup_in_cycles) |
                BF_GPMI_TIMING0_DATA_HOLD(hw.data_hold_in_cycles)         |
-               BF_GPMI_TIMING0_DATA_SETUP(hw.data_setup_in_cycles)       ;
+               BF_GPMI_TIMING0_DATA_SETUP(hw.data_setup_in_cycles);
 
        writel(reg, gpmi_regs + HW_GPMI_TIMING0);
 
@@ -1090,7 +1108,7 @@ int gpmi_is_ready(struct gpmi_nand_data *this, unsigned chip)
                mask = MX28_BF_GPMI_STAT_READY_BUSY(1 << chip);
                reg = readl(r->gpmi_regs + HW_GPMI_STAT);
        } else
-               pr_err("unknow arch.\n");
+               dev_err(this->dev, "unknow arch.\n");
        return reg & mask;
 }
 
@@ -1121,10 +1139,8 @@ int gpmi_send_command(struct gpmi_nand_data *this)
        desc = dmaengine_prep_slave_sg(channel,
                                        (struct scatterlist *)pio,
                                        ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
-       if (!desc) {
-               pr_err("step 1 error\n");
-               return -1;
-       }
+       if (!desc)
+               return -EINVAL;
 
        /* [2] send out the COMMAND + ADDRESS string stored in @buffer */
        sgl = &this->cmd_sgl;
@@ -1134,11 +1150,8 @@ int gpmi_send_command(struct gpmi_nand_data *this)
        desc = dmaengine_prep_slave_sg(channel,
                                sgl, 1, DMA_MEM_TO_DEV,
                                DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
-
-       if (!desc) {
-               pr_err("step 2 error\n");
-               return -1;
-       }
+       if (!desc)
+               return -EINVAL;
 
        /* [3] submit the DMA */
        set_dma_type(this, DMA_FOR_COMMAND);
@@ -1167,20 +1180,17 @@ int gpmi_send_data(struct gpmi_nand_data *this)
        pio[1] = 0;
        desc = dmaengine_prep_slave_sg(channel, (struct scatterlist *)pio,
                                        ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
-       if (!desc) {
-               pr_err("step 1 error\n");
-               return -1;
-       }
+       if (!desc)
+               return -EINVAL;
 
        /* [2] send DMA request */
        prepare_data_dma(this, DMA_TO_DEVICE);
        desc = dmaengine_prep_slave_sg(channel, &this->data_sgl,
                                        1, DMA_MEM_TO_DEV,
                                        DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
-       if (!desc) {
-               pr_err("step 2 error\n");
-               return -1;
-       }
+       if (!desc)
+               return -EINVAL;
+
        /* [3] submit the DMA */
        set_dma_type(this, DMA_FOR_WRITE_DATA);
        return start_dma_without_bch_irq(this, desc);
@@ -1204,20 +1214,16 @@ int gpmi_read_data(struct gpmi_nand_data *this)
        desc = dmaengine_prep_slave_sg(channel,
                                        (struct scatterlist *)pio,
                                        ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
-       if (!desc) {
-               pr_err("step 1 error\n");
-               return -1;
-       }
+       if (!desc)
+               return -EINVAL;
 
        /* [2] : send DMA request */
        prepare_data_dma(this, DMA_FROM_DEVICE);
        desc = dmaengine_prep_slave_sg(channel, &this->data_sgl,
                                        1, DMA_DEV_TO_MEM,
                                        DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
-       if (!desc) {
-               pr_err("step 2 error\n");
-               return -1;
-       }
+       if (!desc)
+               return -EINVAL;
 
        /* [3] : submit the DMA */
        set_dma_type(this, DMA_FOR_READ_DATA);
@@ -1262,10 +1268,9 @@ int gpmi_send_page(struct gpmi_nand_data *this,
                                        (struct scatterlist *)pio,
                                        ARRAY_SIZE(pio), DMA_TRANS_NONE,
                                        DMA_CTRL_ACK);
-       if (!desc) {
-               pr_err("step 2 error\n");
-               return -1;
-       }
+       if (!desc)
+               return -EINVAL;
+
        set_dma_type(this, DMA_FOR_WRITE_ECC_PAGE);
        return start_dma_with_bch_irq(this, desc);
 }
@@ -1297,10 +1302,8 @@ int gpmi_read_page(struct gpmi_nand_data *this,
        desc = dmaengine_prep_slave_sg(channel,
                                (struct scatterlist *)pio, 2,
                                DMA_TRANS_NONE, 0);
-       if (!desc) {
-               pr_err("step 1 error\n");
-               return -1;
-       }
+       if (!desc)
+               return -EINVAL;
 
        /* [2] Enable the BCH block and read. */
        command_mode = BV_GPMI_CTRL0_COMMAND_MODE__READ;
@@ -1327,10 +1330,8 @@ int gpmi_read_page(struct gpmi_nand_data *this,
                                        (struct scatterlist *)pio,
                                        ARRAY_SIZE(pio), DMA_TRANS_NONE,
                                        DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
-       if (!desc) {
-               pr_err("step 2 error\n");
-               return -1;
-       }
+       if (!desc)
+               return -EINVAL;
 
        /* [3] Disable the BCH block */
        command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY;
@@ -1348,10 +1349,8 @@ int gpmi_read_page(struct gpmi_nand_data *this,
                                (struct scatterlist *)pio, 3,
                                DMA_TRANS_NONE,
                                DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
-       if (!desc) {
-               pr_err("step 3 error\n");
-               return -1;
-       }
+       if (!desc)
+               return -EINVAL;
 
        /* [4] submit the DMA */
        set_dma_type(this, DMA_FOR_READ_ECC_PAGE);
index dabbc14db5630d8592268012fc067559179aa0f8..ca6369fe91ff31fc89bd382dca62261774310ff0 100644 (file)
@@ -18,9 +18,6 @@
  * with this program; if not, write to the Free Software Foundation, Inc.,
  * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
  */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
 #include <linux/clk.h>
 #include <linux/slab.h>
 #include <linux/interrupt.h>
@@ -352,6 +349,9 @@ static int legacy_set_geometry(struct gpmi_nand_data *this)
 
 int common_nfc_set_geometry(struct gpmi_nand_data *this)
 {
+       if (of_property_read_bool(this->dev->of_node, "fsl,use-minimum-ecc")
+               && set_geometry_by_ecc_info(this))
+               return 0;
        return legacy_set_geometry(this);
 }
 
@@ -367,25 +367,28 @@ void prepare_data_dma(struct gpmi_nand_data *this, enum dma_data_direction dr)
        struct scatterlist *sgl = &this->data_sgl;
        int ret;
 
-       this->direct_dma_map_ok = true;
-
        /* first try to map the upper buffer directly */
-       sg_init_one(sgl, this->upper_buf, this->upper_len);
-       ret = dma_map_sg(this->dev, sgl, 1, dr);
-       if (ret == 0) {
-               /* We have to use our own DMA buffer. */
-               sg_init_one(sgl, this->data_buffer_dma, PAGE_SIZE);
-
-               if (dr == DMA_TO_DEVICE)
-                       memcpy(this->data_buffer_dma, this->upper_buf,
-                               this->upper_len);
-
+       if (virt_addr_valid(this->upper_buf) &&
+               !object_is_on_stack(this->upper_buf)) {
+               sg_init_one(sgl, this->upper_buf, this->upper_len);
                ret = dma_map_sg(this->dev, sgl, 1, dr);
                if (ret == 0)
-                       pr_err("DMA mapping failed.\n");
+                       goto map_fail;
 
-               this->direct_dma_map_ok = false;
+               this->direct_dma_map_ok = true;
+               return;
        }
+
+map_fail:
+       /* We have to use our own DMA buffer. */
+       sg_init_one(sgl, this->data_buffer_dma, this->upper_len);
+
+       if (dr == DMA_TO_DEVICE)
+               memcpy(this->data_buffer_dma, this->upper_buf, this->upper_len);
+
+       dma_map_sg(this->dev, sgl, 1, dr);
+
+       this->direct_dma_map_ok = false;
 }
 
 /* This will be called after the DMA operation is finished. */
@@ -416,7 +419,7 @@ static void dma_irq_callback(void *param)
                break;
 
        default:
-               pr_err("in wrong DMA operation.\n");
+               dev_err(this->dev, "in wrong DMA operation.\n");
        }
 
        complete(dma_c);
@@ -438,7 +441,8 @@ int start_dma_without_bch_irq(struct gpmi_nand_data *this,
        /* Wait for the interrupt from the DMA block. */
        err = wait_for_completion_timeout(dma_c, msecs_to_jiffies(1000));
        if (!err) {
-               pr_err("DMA timeout, last DMA :%d\n", this->last_dma_type);
+               dev_err(this->dev, "DMA timeout, last DMA :%d\n",
+                       this->last_dma_type);
                gpmi_dump_info(this);
                return -ETIMEDOUT;
        }
@@ -467,7 +471,8 @@ int start_dma_with_bch_irq(struct gpmi_nand_data *this,
        /* Wait for the interrupt from the BCH block. */
        err = wait_for_completion_timeout(bch_c, msecs_to_jiffies(1000));
        if (!err) {
-               pr_err("BCH timeout, last DMA :%d\n", this->last_dma_type);
+               dev_err(this->dev, "BCH timeout, last DMA :%d\n",
+                       this->last_dma_type);
                gpmi_dump_info(this);
                return -ETIMEDOUT;
        }
@@ -483,70 +488,38 @@ static int acquire_register_block(struct gpmi_nand_data *this,
        void __iomem *p;
 
        r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res_name);
-       if (!r) {
-               pr_err("Can't get resource for %s\n", res_name);
-               return -ENODEV;
-       }
-
-       p = ioremap(r->start, resource_size(r));
-       if (!p) {
-               pr_err("Can't remap %s\n", res_name);
-               return -ENOMEM;
-       }
+       p = devm_ioremap_resource(&pdev->dev, r);
+       if (IS_ERR(p))
+               return PTR_ERR(p);
 
        if (!strcmp(res_name, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME))
                res->gpmi_regs = p;
        else if (!strcmp(res_name, GPMI_NAND_BCH_REGS_ADDR_RES_NAME))
                res->bch_regs = p;
        else
-               pr_err("unknown resource name : %s\n", res_name);
+               dev_err(this->dev, "unknown resource name : %s\n", res_name);
 
        return 0;
 }
 
-static void release_register_block(struct gpmi_nand_data *this)
-{
-       struct resources *res = &this->resources;
-       if (res->gpmi_regs)
-               iounmap(res->gpmi_regs);
-       if (res->bch_regs)
-               iounmap(res->bch_regs);
-       res->gpmi_regs = NULL;
-       res->bch_regs = NULL;
-}
-
 static int acquire_bch_irq(struct gpmi_nand_data *this, irq_handler_t irq_h)
 {
        struct platform_device *pdev = this->pdev;
-       struct resources *res = &this->resources;
        const char *res_name = GPMI_NAND_BCH_INTERRUPT_RES_NAME;
        struct resource *r;
        int err;
 
        r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, res_name);
        if (!r) {
-               pr_err("Can't get resource for %s\n", res_name);
+               dev_err(this->dev, "Can't get resource for %s\n", res_name);
                return -ENODEV;
        }
 
-       err = request_irq(r->start, irq_h, 0, res_name, this);
-       if (err) {
-               pr_err("Can't own %s\n", res_name);
-               return err;
-       }
-
-       res->bch_low_interrupt = r->start;
-       res->bch_high_interrupt = r->end;
-       return 0;
-}
-
-static void release_bch_irq(struct gpmi_nand_data *this)
-{
-       struct resources *res = &this->resources;
-       int i = res->bch_low_interrupt;
+       err = devm_request_irq(this->dev, r->start, irq_h, 0, res_name, this);
+       if (err)
+               dev_err(this->dev, "error requesting BCH IRQ\n");
 
-       for (; i <= res->bch_high_interrupt; i++)
-               free_irq(i, this);
+       return err;
 }
 
 static void release_dma_channels(struct gpmi_nand_data *this)
@@ -567,7 +540,7 @@ static int acquire_dma_channels(struct gpmi_nand_data *this)
        /* request dma channel */
        dma_chan = dma_request_slave_channel(&pdev->dev, "rx-tx");
        if (!dma_chan) {
-               pr_err("Failed to request DMA channel.\n");
+               dev_err(this->dev, "Failed to request DMA channel.\n");
                goto acquire_err;
        }
 
@@ -579,21 +552,6 @@ acquire_err:
        return -EINVAL;
 }
 
-static void gpmi_put_clks(struct gpmi_nand_data *this)
-{
-       struct resources *r = &this->resources;
-       struct clk *clk;
-       int i;
-
-       for (i = 0; i < GPMI_CLK_MAX; i++) {
-               clk = r->clock[i];
-               if (clk) {
-                       clk_put(clk);
-                       r->clock[i] = NULL;
-               }
-       }
-}
-
 static char *extra_clks_for_mx6q[GPMI_CLK_MAX] = {
        "gpmi_apb", "gpmi_bch", "gpmi_bch_apb", "per1_bch",
 };
@@ -606,7 +564,7 @@ static int gpmi_get_clks(struct gpmi_nand_data *this)
        int err, i;
 
        /* The main clock is stored in the first. */
-       r->clock[0] = clk_get(this->dev, "gpmi_io");
+       r->clock[0] = devm_clk_get(this->dev, "gpmi_io");
        if (IS_ERR(r->clock[0])) {
                err = PTR_ERR(r->clock[0]);
                goto err_clock;
@@ -622,7 +580,7 @@ static int gpmi_get_clks(struct gpmi_nand_data *this)
                if (extra_clks[i - 1] == NULL)
                        break;
 
-               clk = clk_get(this->dev, extra_clks[i - 1]);
+               clk = devm_clk_get(this->dev, extra_clks[i - 1]);
                if (IS_ERR(clk)) {
                        err = PTR_ERR(clk);
                        goto err_clock;
@@ -644,7 +602,6 @@ static int gpmi_get_clks(struct gpmi_nand_data *this)
 
 err_clock:
        dev_dbg(this->dev, "failed in finding the clocks.\n");
-       gpmi_put_clks(this);
        return err;
 }
 
@@ -666,7 +623,7 @@ static int acquire_resources(struct gpmi_nand_data *this)
 
        ret = acquire_dma_channels(this);
        if (ret)
-               goto exit_dma_channels;
+               goto exit_regs;
 
        ret = gpmi_get_clks(this);
        if (ret)
@@ -675,18 +632,12 @@ static int acquire_resources(struct gpmi_nand_data *this)
 
 exit_clock:
        release_dma_channels(this);
-exit_dma_channels:
-       release_bch_irq(this);
 exit_regs:
-       release_register_block(this);
        return ret;
 }
 
 static void release_resources(struct gpmi_nand_data *this)
 {
-       gpmi_put_clks(this);
-       release_register_block(this);
-       release_bch_irq(this);
        release_dma_channels(this);
 }
 
@@ -732,8 +683,7 @@ static int read_page_prepare(struct gpmi_nand_data *this,
                                                length, DMA_FROM_DEVICE);
                if (dma_mapping_error(dev, dest_phys)) {
                        if (alt_size < length) {
-                               pr_err("%s, Alternate buffer is too small\n",
-                                       __func__);
+                               dev_err(dev, "Alternate buffer is too small\n");
                                return -ENOMEM;
                        }
                        goto map_failed;
@@ -783,8 +733,7 @@ static int send_page_prepare(struct gpmi_nand_data *this,
                                                DMA_TO_DEVICE);
                if (dma_mapping_error(dev, source_phys)) {
                        if (alt_size < length) {
-                               pr_err("%s, Alternate buffer is too small\n",
-                                       __func__);
+                               dev_err(dev, "Alternate buffer is too small\n");
                                return -ENOMEM;
                        }
                        goto map_failed;
@@ -837,14 +786,23 @@ static int gpmi_alloc_dma_buffer(struct gpmi_nand_data *this)
 {
        struct bch_geometry *geo = &this->bch_geometry;
        struct device *dev = this->dev;
+       struct mtd_info *mtd = &this->mtd;
 
        /* [1] Allocate a command buffer. PAGE_SIZE is enough. */
        this->cmd_buffer = kzalloc(PAGE_SIZE, GFP_DMA | GFP_KERNEL);
        if (this->cmd_buffer == NULL)
                goto error_alloc;
 
-       /* [2] Allocate a read/write data buffer. PAGE_SIZE is enough. */
-       this->data_buffer_dma = kzalloc(PAGE_SIZE, GFP_DMA | GFP_KERNEL);
+       /*
+        * [2] Allocate a read/write data buffer.
+        *     The gpmi_alloc_dma_buffer can be called twice.
+        *     We allocate a PAGE_SIZE length buffer if gpmi_alloc_dma_buffer
+        *     is called before the nand_scan_ident; and we allocate a buffer
+        *     of the real NAND page size when the gpmi_alloc_dma_buffer is
+        *     called after the nand_scan_ident.
+        */
+       this->data_buffer_dma = kzalloc(mtd->writesize ?: PAGE_SIZE,
+                                       GFP_DMA | GFP_KERNEL);
        if (this->data_buffer_dma == NULL)
                goto error_alloc;
 
@@ -872,7 +830,6 @@ static int gpmi_alloc_dma_buffer(struct gpmi_nand_data *this)
 
 error_alloc:
        gpmi_free_dma_buffer(this);
-       pr_err("Error allocating DMA buffers!\n");
        return -ENOMEM;
 }
 
@@ -904,7 +861,8 @@ static void gpmi_cmd_ctrl(struct mtd_info *mtd, int data, unsigned int ctrl)
 
        ret = gpmi_send_command(this);
        if (ret)
-               pr_err("Chip: %u, Error %d\n", this->current_chip, ret);
+               dev_err(this->dev, "Chip: %u, Error %d\n",
+                       this->current_chip, ret);
 
        this->command_length = 0;
 }
@@ -935,7 +893,7 @@ static void gpmi_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
        struct nand_chip *chip = mtd->priv;
        struct gpmi_nand_data *this = chip->priv;
 
-       pr_debug("len is %d\n", len);
+       dev_dbg(this->dev, "len is %d\n", len);
        this->upper_buf = buf;
        this->upper_len = len;
 
@@ -947,7 +905,7 @@ static void gpmi_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
        struct nand_chip *chip = mtd->priv;
        struct gpmi_nand_data *this = chip->priv;
 
-       pr_debug("len is %d\n", len);
+       dev_dbg(this->dev, "len is %d\n", len);
        this->upper_buf = (uint8_t *)buf;
        this->upper_len = len;
 
@@ -1026,13 +984,13 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
        unsigned int  max_bitflips = 0;
        int           ret;
 
-       pr_debug("page number is : %d\n", page);
+       dev_dbg(this->dev, "page number is : %d\n", page);
        ret = read_page_prepare(this, buf, mtd->writesize,
                                        this->payload_virt, this->payload_phys,
                                        nfc_geo->payload_size,
                                        &payload_virt, &payload_phys);
        if (ret) {
-               pr_err("Inadequate DMA buffer\n");
+               dev_err(this->dev, "Inadequate DMA buffer\n");
                ret = -ENOMEM;
                return ret;
        }
@@ -1046,7 +1004,7 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
                        nfc_geo->payload_size,
                        payload_virt, payload_phys);
        if (ret) {
-               pr_err("Error in ECC-based read: %d\n", ret);
+               dev_err(this->dev, "Error in ECC-based read: %d\n", ret);
                return ret;
        }
 
@@ -1102,7 +1060,7 @@ static int gpmi_ecc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
        dma_addr_t auxiliary_phys;
        int        ret;
 
-       pr_debug("ecc write page.\n");
+       dev_dbg(this->dev, "ecc write page.\n");
        if (this->swap_block_mark) {
                /*
                 * If control arrives here, we're doing block mark swapping.
@@ -1132,7 +1090,7 @@ static int gpmi_ecc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
                                nfc_geo->payload_size,
                                &payload_virt, &payload_phys);
                if (ret) {
-                       pr_err("Inadequate payload DMA buffer\n");
+                       dev_err(this->dev, "Inadequate payload DMA buffer\n");
                        return 0;
                }
 
@@ -1142,7 +1100,7 @@ static int gpmi_ecc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
                                nfc_geo->auxiliary_size,
                                &auxiliary_virt, &auxiliary_phys);
                if (ret) {
-                       pr_err("Inadequate auxiliary DMA buffer\n");
+                       dev_err(this->dev, "Inadequate auxiliary DMA buffer\n");
                        goto exit_auxiliary;
                }
        }
@@ -1150,7 +1108,7 @@ static int gpmi_ecc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
        /* Ask the NFC. */
        ret = gpmi_send_page(this, payload_phys, auxiliary_phys);
        if (ret)
-               pr_err("Error in ECC-based write: %d\n", ret);
+               dev_err(this->dev, "Error in ECC-based write: %d\n", ret);
 
        if (!this->swap_block_mark) {
                send_page_end(this, chip->oob_poi, mtd->oobsize,
@@ -1240,7 +1198,7 @@ static int gpmi_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
 {
        struct gpmi_nand_data *this = chip->priv;
 
-       pr_debug("page number is %d\n", page);
+       dev_dbg(this->dev, "page number is %d\n", page);
        /* clear the OOB buffer */
        memset(chip->oob_poi, ~0, mtd->oobsize);
 
@@ -1453,7 +1411,6 @@ static int mx23_write_transcription_stamp(struct gpmi_nand_data *this)
 
        /* Write the NCB fingerprint into the page buffer. */
        memset(buffer, ~0, mtd->writesize);
-       memset(chip->oob_poi, ~0, mtd->oobsize);
        memcpy(buffer + 12, fingerprint, strlen(fingerprint));
 
        /* Loop through the first search area, writing NCB fingerprints. */
@@ -1568,7 +1525,7 @@ static int gpmi_set_geometry(struct gpmi_nand_data *this)
        /* Set up the NFC geometry which is used by BCH. */
        ret = bch_set_geometry(this);
        if (ret) {
-               pr_err("Error setting BCH geometry : %d\n", ret);
+               dev_err(this->dev, "Error setting BCH geometry : %d\n", ret);
                return ret;
        }
 
@@ -1576,20 +1533,7 @@ static int gpmi_set_geometry(struct gpmi_nand_data *this)
        return gpmi_alloc_dma_buffer(this);
 }
 
-static int gpmi_pre_bbt_scan(struct gpmi_nand_data  *this)
-{
-       /* Set up swap_block_mark, must be set before the gpmi_set_geometry() */
-       if (GPMI_IS_MX23(this))
-               this->swap_block_mark = false;
-       else
-               this->swap_block_mark = true;
-
-       /* Set up the medium geometry */
-       return gpmi_set_geometry(this);
-
-}
-
-static void gpmi_nfc_exit(struct gpmi_nand_data *this)
+static void gpmi_nand_exit(struct gpmi_nand_data *this)
 {
        nand_release(&this->mtd);
        gpmi_free_dma_buffer(this);
@@ -1603,8 +1547,11 @@ static int gpmi_init_last(struct gpmi_nand_data *this)
        struct bch_geometry *bch_geo = &this->bch_geometry;
        int ret;
 
-       /* Prepare for the BBT scan. */
-       ret = gpmi_pre_bbt_scan(this);
+       /* Set up swap_block_mark, must be set before the gpmi_set_geometry() */
+       this->swap_block_mark = !GPMI_IS_MX23(this);
+
+       /* Set up the medium geometry */
+       ret = gpmi_set_geometry(this);
        if (ret)
                return ret;
 
@@ -1629,7 +1576,7 @@ static int gpmi_init_last(struct gpmi_nand_data *this)
        return 0;
 }
 
-static int gpmi_nfc_init(struct gpmi_nand_data *this)
+static int gpmi_nand_init(struct gpmi_nand_data *this)
 {
        struct mtd_info  *mtd = &this->mtd;
        struct nand_chip *chip = &this->nand;
@@ -1693,7 +1640,7 @@ static int gpmi_nfc_init(struct gpmi_nand_data *this)
        return 0;
 
 err_out:
-       gpmi_nfc_exit(this);
+       gpmi_nand_exit(this);
        return ret;
 }
 
@@ -1728,15 +1675,13 @@ static int gpmi_nand_probe(struct platform_device *pdev)
        if (of_id) {
                pdev->id_entry = of_id->data;
        } else {
-               pr_err("Failed to find the right device id.\n");
+               dev_err(&pdev->dev, "Failed to find the right device id.\n");
                return -ENODEV;
        }
 
        this = devm_kzalloc(&pdev->dev, sizeof(*this), GFP_KERNEL);
-       if (!this) {
-               pr_err("Failed to allocate per-device memory\n");
+       if (!this)
                return -ENOMEM;
-       }
 
        platform_set_drvdata(pdev, this);
        this->pdev  = pdev;
@@ -1750,7 +1695,7 @@ static int gpmi_nand_probe(struct platform_device *pdev)
        if (ret)
                goto exit_nfc_init;
 
-       ret = gpmi_nfc_init(this);
+       ret = gpmi_nand_init(this);
        if (ret)
                goto exit_nfc_init;
 
@@ -1770,7 +1715,7 @@ static int gpmi_nand_remove(struct platform_device *pdev)
 {
        struct gpmi_nand_data *this = platform_get_drvdata(pdev);
 
-       gpmi_nfc_exit(this);
+       gpmi_nand_exit(this);
        release_resources(this);
        return 0;
 }
index a7685e3a87486b86713fef19da82e9bbad3c6253..4c801fa1872530e681e801d2adaee333d4acffb4 100644 (file)
@@ -26,8 +26,6 @@
 struct resources {
        void __iomem  *gpmi_regs;
        void __iomem  *bch_regs;
-       unsigned int  bch_low_interrupt;
-       unsigned int  bch_high_interrupt;
        unsigned int  dma_low_channel;
        unsigned int  dma_high_channel;
        struct clk    *clock[GPMI_CLK_MAX];
index a264b888c66cc9153e0f160af0c50e4675826a93..a2c804de156bc71d31407020b9ffb133a408b4f1 100644 (file)
@@ -416,10 +416,8 @@ static int jz_nand_probe(struct platform_device *pdev)
        uint8_t nand_maf_id = 0, nand_dev_id = 0;
 
        nand = kzalloc(sizeof(*nand), GFP_KERNEL);
-       if (!nand) {
-               dev_err(&pdev->dev, "Failed to allocate device structure.\n");
+       if (!nand)
                return -ENOMEM;
-       }
 
        ret = jz_nand_ioremap_resource(pdev, "mmio", &nand->mem, &nand->base);
        if (ret)
index 327d96c035050ce178711f03ee8ff4dd527a1fb3..687478c9f09c9a92aee00bdc595f6b5a8757bc0c 100644 (file)
@@ -539,20 +539,6 @@ static int lpc32xx_write_page_lowlevel(struct mtd_info *mtd,
        return 0;
 }
 
-static int lpc32xx_write_page(struct mtd_info *mtd, struct nand_chip *chip,
-                       uint32_t offset, int data_len, const uint8_t *buf,
-                       int oob_required, int page, int cached, int raw)
-{
-       int res;
-
-       chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
-       res = lpc32xx_write_page_lowlevel(mtd, chip, buf, oob_required);
-       chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
-       lpc32xx_waitfunc(mtd, chip);
-
-       return res;
-}
-
 static int lpc32xx_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
                            int page)
 {
@@ -627,10 +613,8 @@ static struct lpc32xx_nand_cfg_mlc *lpc32xx_parse_dt(struct device *dev)
        struct device_node *np = dev->of_node;
 
        ncfg = devm_kzalloc(dev, sizeof(*ncfg), GFP_KERNEL);
-       if (!ncfg) {
-               dev_err(dev, "could not allocate memory for platform data\n");
+       if (!ncfg)
                return NULL;
-       }
 
        of_property_read_u32(np, "nxp,tcea-delay", &ncfg->tcea_delay);
        of_property_read_u32(np, "nxp,busy-delay", &ncfg->busy_delay);
@@ -666,10 +650,8 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
 
        /* Allocate memory for the device structure (and zero it) */
        host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
-       if (!host) {
-               dev_err(&pdev->dev, "failed to allocate device structure.\n");
+       if (!host)
                return -ENOMEM;
-       }
 
        rc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        host->io_base = devm_ioremap_resource(&pdev->dev, rc);
@@ -732,9 +714,9 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
        nand_chip->ecc.write_oob = lpc32xx_write_oob;
        nand_chip->ecc.read_oob = lpc32xx_read_oob;
        nand_chip->ecc.strength = 4;
-       nand_chip->write_page = lpc32xx_write_page;
        nand_chip->waitfunc = lpc32xx_waitfunc;
 
+       nand_chip->options = NAND_NO_SUBPAGE_WRITE;
        nand_chip->bbt_options = NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB;
        nand_chip->bbt_td = &lpc32xx_nand_bbt;
        nand_chip->bbt_md = &lpc32xx_nand_bbt_mirror;
@@ -764,14 +746,12 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
 
        host->dma_buf = devm_kzalloc(&pdev->dev, mtd->writesize, GFP_KERNEL);
        if (!host->dma_buf) {
-               dev_err(&pdev->dev, "Error allocating dma_buf memory\n");
                res = -ENOMEM;
                goto err_exit3;
        }
 
        host->dummy_buf = devm_kzalloc(&pdev->dev, mtd->writesize, GFP_KERNEL);
        if (!host->dummy_buf) {
-               dev_err(&pdev->dev, "Error allocating dummy_buf memory\n");
                res = -ENOMEM;
                goto err_exit3;
        }
index 23e6974ccd205ec23a7bf19adcea54ffb3304bfa..53a6742e3da39a2e1e43be81cf11be943c07a886 100644 (file)
@@ -725,10 +725,8 @@ static struct lpc32xx_nand_cfg_slc *lpc32xx_parse_dt(struct device *dev)
        struct device_node *np = dev->of_node;
 
        ncfg = devm_kzalloc(dev, sizeof(*ncfg), GFP_KERNEL);
-       if (!ncfg) {
-               dev_err(dev, "could not allocate memory for NAND config\n");
+       if (!ncfg)
                return NULL;
-       }
 
        of_property_read_u32(np, "nxp,wdr-clks", &ncfg->wdr_clks);
        of_property_read_u32(np, "nxp,wwidth", &ncfg->wwidth);
@@ -772,10 +770,8 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
 
        /* Allocate memory for the device structure (and zero it) */
        host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
-       if (!host) {
-               dev_err(&pdev->dev, "failed to allocate device structure\n");
+       if (!host)
                return -ENOMEM;
-       }
        host->io_base_dma = rc->start;
 
        host->io_base = devm_ioremap_resource(&pdev->dev, rc);
@@ -791,8 +787,8 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
        }
        if (host->ncfg->wp_gpio == -EPROBE_DEFER)
                return -EPROBE_DEFER;
-       if (gpio_is_valid(host->ncfg->wp_gpio) &&
-                       gpio_request(host->ncfg->wp_gpio, "NAND WP")) {
+       if (gpio_is_valid(host->ncfg->wp_gpio) && devm_gpio_request(&pdev->dev,
+                       host->ncfg->wp_gpio, "NAND WP")) {
                dev_err(&pdev->dev, "GPIO not available\n");
                return -EBUSY;
        }
@@ -808,7 +804,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
        mtd->dev.parent = &pdev->dev;
 
        /* Get NAND clock */
-       host->clk = clk_get(&pdev->dev, NULL);
+       host->clk = devm_clk_get(&pdev->dev, NULL);
        if (IS_ERR(host->clk)) {
                dev_err(&pdev->dev, "Clock failure\n");
                res = -ENOENT;
@@ -858,7 +854,6 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
        host->data_buf = devm_kzalloc(&pdev->dev, host->dma_buf_len,
                                      GFP_KERNEL);
        if (host->data_buf == NULL) {
-               dev_err(&pdev->dev, "Error allocating memory\n");
                res = -ENOMEM;
                goto err_exit2;
        }
@@ -927,10 +922,8 @@ err_exit3:
        dma_release_channel(host->dma_chan);
 err_exit2:
        clk_disable(host->clk);
-       clk_put(host->clk);
 err_exit1:
        lpc32xx_wp_enable(host);
-       gpio_free(host->ncfg->wp_gpio);
 
        return res;
 }
@@ -953,9 +946,7 @@ static int lpc32xx_nand_remove(struct platform_device *pdev)
        writel(tmp, SLC_CTRL(host->io_base));
 
        clk_disable(host->clk);
-       clk_put(host->clk);
        lpc32xx_wp_enable(host);
-       gpio_free(host->ncfg->wp_gpio);
 
        return 0;
 }
index 779e60d12f896f67196bbdd5f7b3338ccccd6742..31ee7cfbc12b628c9599f3d0c78b3de23185dc6c 100644 (file)
@@ -653,10 +653,8 @@ static int mpc5121_nfc_probe(struct platform_device *op)
        }
 
        prv = devm_kzalloc(dev, sizeof(*prv), GFP_KERNEL);
-       if (!prv) {
-               dev_err(dev, "Memory exhausted!\n");
+       if (!prv)
                return -ENOMEM;
-       }
 
        mtd = &prv->mtd;
        chip = &prv->chip;
@@ -786,7 +784,6 @@ static int mpc5121_nfc_probe(struct platform_device *op)
        /* Detect NAND chips */
        if (nand_scan(mtd, be32_to_cpup(chips_no))) {
                dev_err(dev, "NAND Flash not found !\n");
-               devm_free_irq(dev, prv->irq, mtd);
                retval = -ENXIO;
                goto error;
        }
@@ -811,7 +808,6 @@ static int mpc5121_nfc_probe(struct platform_device *op)
 
        default:
                dev_err(dev, "Unsupported NAND flash!\n");
-               devm_free_irq(dev, prv->irq, mtd);
                retval = -ENXIO;
                goto error;
        }
@@ -822,7 +818,6 @@ static int mpc5121_nfc_probe(struct platform_device *op)
        retval = mtd_device_parse_register(mtd, NULL, &ppdata, NULL, 0);
        if (retval) {
                dev_err(dev, "Error adding MTD device!\n");
-               devm_free_irq(dev, prv->irq, mtd);
                goto error;
        }
 
@@ -836,11 +831,8 @@ static int mpc5121_nfc_remove(struct platform_device *op)
 {
        struct device *dev = &op->dev;
        struct mtd_info *mtd = dev_get_drvdata(dev);
-       struct nand_chip *chip = mtd->priv;
-       struct mpc5121_nfc_prv *prv = chip->priv;
 
        nand_release(mtd);
-       devm_free_irq(dev, prv->irq, mtd);
        mpc5121_nfc_free(dev, mtd);
 
        return 0;
index 9dfdb06c508b05439cd93a71ce5c30ef8558335e..e9a4835c4dd9887f8546c5e95ce298674d5182a3 100644 (file)
@@ -677,7 +677,6 @@ static int mxc_nand_correct_data_v2_v3(struct mtd_info *mtd, u_char *dat,
                ecc_stat >>= 4;
        } while (--no_subpages);
 
-       mtd->ecc_stats.corrected += ret;
        pr_debug("%d Symbol Correctable RS-ECC Error\n", ret);
 
        return ret;
@@ -1400,12 +1399,15 @@ static int mxcnd_probe(struct platform_device *pdev)
        int err = 0;
 
        /* Allocate memory for MTD device structure and private data */
-       host = devm_kzalloc(&pdev->dev, sizeof(struct mxc_nand_host) +
-                       NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE, GFP_KERNEL);
+       host = devm_kzalloc(&pdev->dev, sizeof(struct mxc_nand_host),
+                       GFP_KERNEL);
        if (!host)
                return -ENOMEM;
 
-       host->data_buf = (uint8_t *)(host + 1);
+       /* allocate a temporary buffer for the nand_scan_ident() */
+       host->data_buf = devm_kzalloc(&pdev->dev, PAGE_SIZE, GFP_KERNEL);
+       if (!host->data_buf)
+               return -ENOMEM;
 
        host->dev = &pdev->dev;
        /* structures must be linked */
@@ -1512,7 +1514,9 @@ static int mxcnd_probe(struct platform_device *pdev)
        if (err)
                return err;
 
-       clk_prepare_enable(host->clk);
+       err = clk_prepare_enable(host->clk);
+       if (err)
+               return err;
        host->clk_act = 1;
 
        /*
@@ -1531,6 +1535,15 @@ static int mxcnd_probe(struct platform_device *pdev)
                goto escan;
        }
 
+       /* allocate the right size buffer now */
+       devm_kfree(&pdev->dev, (void *)host->data_buf);
+       host->data_buf = devm_kzalloc(&pdev->dev, mtd->writesize + mtd->oobsize,
+                                       GFP_KERNEL);
+       if (!host->data_buf) {
+               err = -ENOMEM;
+               goto escan;
+       }
+
        /* Call preset again, with correct writesize this time */
        host->devtype_data->preset(mtd);
 
@@ -1576,6 +1589,8 @@ static int mxcnd_remove(struct platform_device *pdev)
        struct mxc_nand_host *host = platform_get_drvdata(pdev);
 
        nand_release(&host->mtd);
+       if (host->clk_act)
+               clk_disable_unprepare(host->clk);
 
        return 0;
 }
index bd39f7b67906f65db526cbd0d1d27bb84ad93277..59eba5d2c68574fae787d36d2bd9e5ed29eb9f25 100644 (file)
@@ -29,6 +29,8 @@
  *
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/delay.h>
 #include <linux/errno.h>
@@ -201,6 +203,51 @@ static void nand_select_chip(struct mtd_info *mtd, int chipnr)
        }
 }
 
+/**
+ * nand_write_byte - [DEFAULT] write single byte to chip
+ * @mtd: MTD device structure
+ * @byte: value to write
+ *
+ * Default function to write a byte to I/O[7:0]
+ */
+static void nand_write_byte(struct mtd_info *mtd, uint8_t byte)
+{
+       struct nand_chip *chip = mtd->priv;
+
+       chip->write_buf(mtd, &byte, 1);
+}
+
+/**
+ * nand_write_byte16 - [DEFAULT] write single byte to a chip with width 16
+ * @mtd: MTD device structure
+ * @byte: value to write
+ *
+ * Default function to write a byte to I/O[7:0] on a 16-bit wide chip.
+ */
+static void nand_write_byte16(struct mtd_info *mtd, uint8_t byte)
+{
+       struct nand_chip *chip = mtd->priv;
+       uint16_t word = byte;
+
+       /*
+        * It's not entirely clear what should happen to I/O[15:8] when writing
+        * a byte. The ONFi spec (Revision 3.1; 2012-09-19, Section 2.16) reads:
+        *
+        *    When the host supports a 16-bit bus width, only data is
+        *    transferred at the 16-bit width. All address and command line
+        *    transfers shall use only the lower 8-bits of the data bus. During
+        *    command transfers, the host may place any value on the upper
+        *    8-bits of the data bus. During address transfers, the host shall
+        *    set the upper 8-bits of the data bus to 00h.
+        *
+        * One user of the write_byte callback is nand_onfi_set_features. The
+        * four parameters are specified to be written to I/O[7:0], but this is
+        * neither an address nor a command transfer. Let's assume a 0 on the
+        * upper I/O lines is OK.
+        */
+       chip->write_buf(mtd, (uint8_t *)&word, 2);
+}
+
 /**
  * nand_write_buf - [DEFAULT] write buffer to chip
  * @mtd: MTD device structure
@@ -1407,6 +1454,30 @@ static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob,
        return NULL;
 }
 
+/**
+ * nand_setup_read_retry - [INTERN] Set the READ RETRY mode
+ * @mtd: MTD device structure
+ * @retry_mode: the retry mode to use
+ *
+ * Some vendors supply a special command to shift the Vt threshold, to be used
+ * when there are too many bitflips in a page (i.e., ECC error). After setting
+ * a new threshold, the host should retry reading the page.
+ */
+static int nand_setup_read_retry(struct mtd_info *mtd, int retry_mode)
+{
+       struct nand_chip *chip = mtd->priv;
+
+       pr_debug("setting READ RETRY mode %d\n", retry_mode);
+
+       if (retry_mode >= chip->read_retries)
+               return -EINVAL;
+
+       if (!chip->setup_read_retry)
+               return -EOPNOTSUPP;
+
+       return chip->setup_read_retry(mtd, retry_mode);
+}
+
 /**
  * nand_do_read_ops - [INTERN] Read data with ECC
  * @mtd: MTD device structure
@@ -1420,7 +1491,6 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
 {
        int chipnr, page, realpage, col, bytes, aligned, oob_required;
        struct nand_chip *chip = mtd->priv;
-       struct mtd_ecc_stats stats;
        int ret = 0;
        uint32_t readlen = ops->len;
        uint32_t oobreadlen = ops->ooblen;
@@ -1429,8 +1499,8 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
 
        uint8_t *bufpoi, *oob, *buf;
        unsigned int max_bitflips = 0;
-
-       stats = mtd->ecc_stats;
+       int retry_mode = 0;
+       bool ecc_fail = false;
 
        chipnr = (int)(from >> chip->chip_shift);
        chip->select_chip(mtd, chipnr);
@@ -1445,6 +1515,8 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
        oob_required = oob ? 1 : 0;
 
        while (1) {
+               unsigned int ecc_failures = mtd->ecc_stats.failed;
+
                bytes = min(mtd->writesize - col, readlen);
                aligned = (bytes == mtd->writesize);
 
@@ -1452,6 +1524,7 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
                if (realpage != chip->pagebuf || oob) {
                        bufpoi = aligned ? buf : chip->buffers->databuf;
 
+read_retry:
                        chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
 
                        /*
@@ -1481,7 +1554,7 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
                        /* Transfer not aligned data */
                        if (!aligned) {
                                if (!NAND_HAS_SUBPAGE_READ(chip) && !oob &&
-                                   !(mtd->ecc_stats.failed - stats.failed) &&
+                                   !(mtd->ecc_stats.failed - ecc_failures) &&
                                    (ops->mode != MTD_OPS_RAW)) {
                                        chip->pagebuf = realpage;
                                        chip->pagebuf_bitflips = ret;
@@ -1492,8 +1565,6 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
                                memcpy(buf, chip->buffers->databuf + col, bytes);
                        }
 
-                       buf += bytes;
-
                        if (unlikely(oob)) {
                                int toread = min(oobreadlen, max_oobsize);
 
@@ -1511,6 +1582,25 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
                                else
                                        nand_wait_ready(mtd);
                        }
+
+                       if (mtd->ecc_stats.failed - ecc_failures) {
+                               if (retry_mode + 1 <= chip->read_retries) {
+                                       retry_mode++;
+                                       ret = nand_setup_read_retry(mtd,
+                                                       retry_mode);
+                                       if (ret < 0)
+                                               break;
+
+                                       /* Reset failures; retry */
+                                       mtd->ecc_stats.failed = ecc_failures;
+                                       goto read_retry;
+                               } else {
+                                       /* No more retry modes; real failure */
+                                       ecc_fail = true;
+                               }
+                       }
+
+                       buf += bytes;
                } else {
                        memcpy(buf, chip->buffers->databuf + col, bytes);
                        buf += bytes;
@@ -1520,6 +1610,14 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
 
                readlen -= bytes;
 
+               /* Reset to retry mode 0 */
+               if (retry_mode) {
+                       ret = nand_setup_read_retry(mtd, 0);
+                       if (ret < 0)
+                               break;
+                       retry_mode = 0;
+               }
+
                if (!readlen)
                        break;
 
@@ -1545,7 +1643,7 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
        if (ret < 0)
                return ret;
 
-       if (mtd->ecc_stats.failed - stats.failed)
+       if (ecc_fail)
                return -EBADMSG;
 
        return max_bitflips;
@@ -2716,6 +2814,7 @@ static int nand_onfi_set_features(struct mtd_info *mtd, struct nand_chip *chip,
                        int addr, uint8_t *subfeature_param)
 {
        int status;
+       int i;
 
        if (!chip->onfi_version ||
            !(le16_to_cpu(chip->onfi_params.opt_cmd)
@@ -2723,7 +2822,9 @@ static int nand_onfi_set_features(struct mtd_info *mtd, struct nand_chip *chip,
                return -EINVAL;
 
        chip->cmdfunc(mtd, NAND_CMD_SET_FEATURES, addr, -1);
-       chip->write_buf(mtd, subfeature_param, ONFI_SUBFEATURE_PARAM_LEN);
+       for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
+               chip->write_byte(mtd, subfeature_param[i]);
+
        status = chip->waitfunc(mtd, chip);
        if (status & NAND_STATUS_FAIL)
                return -EIO;
@@ -2740,6 +2841,8 @@ static int nand_onfi_set_features(struct mtd_info *mtd, struct nand_chip *chip,
 static int nand_onfi_get_features(struct mtd_info *mtd, struct nand_chip *chip,
                        int addr, uint8_t *subfeature_param)
 {
+       int i;
+
        if (!chip->onfi_version ||
            !(le16_to_cpu(chip->onfi_params.opt_cmd)
              & ONFI_OPT_CMD_SET_GET_FEATURES))
@@ -2749,7 +2852,8 @@ static int nand_onfi_get_features(struct mtd_info *mtd, struct nand_chip *chip,
        memset(subfeature_param, 0, ONFI_SUBFEATURE_PARAM_LEN);
 
        chip->cmdfunc(mtd, NAND_CMD_GET_FEATURES, addr, -1);
-       chip->read_buf(mtd, subfeature_param, ONFI_SUBFEATURE_PARAM_LEN);
+       for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
+               *subfeature_param++ = chip->read_byte(mtd);
        return 0;
 }
 
@@ -2812,6 +2916,8 @@ static void nand_set_defaults(struct nand_chip *chip, int busw)
                chip->block_markbad = nand_default_block_markbad;
        if (!chip->write_buf || chip->write_buf == nand_write_buf)
                chip->write_buf = busw ? nand_write_buf16 : nand_write_buf;
+       if (!chip->write_byte || chip->write_byte == nand_write_byte)
+               chip->write_byte = busw ? nand_write_byte16 : nand_write_byte;
        if (!chip->read_buf || chip->read_buf == nand_read_buf)
                chip->read_buf = busw ? nand_read_buf16 : nand_read_buf;
        if (!chip->scan_bbt)
@@ -2926,6 +3032,30 @@ ext_out:
        return ret;
 }
 
+static int nand_setup_read_retry_micron(struct mtd_info *mtd, int retry_mode)
+{
+       struct nand_chip *chip = mtd->priv;
+       uint8_t feature[ONFI_SUBFEATURE_PARAM_LEN] = {retry_mode};
+
+       return chip->onfi_set_features(mtd, chip, ONFI_FEATURE_ADDR_READ_RETRY,
+                       feature);
+}
+
+/*
+ * Configure chip properties from Micron vendor-specific ONFI table
+ */
+static void nand_onfi_detect_micron(struct nand_chip *chip,
+               struct nand_onfi_params *p)
+{
+       struct nand_onfi_vendor_micron *micron = (void *)p->vendor;
+
+       if (le16_to_cpu(p->vendor_revision) < 1)
+               return;
+
+       chip->read_retries = micron->read_retry_options;
+       chip->setup_read_retry = nand_setup_read_retry_micron;
+}
+
 /*
  * Check if the NAND chip is ONFI compliant, returns 1 if it is, 0 otherwise.
  */
@@ -2979,7 +3109,7 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
                chip->onfi_version = 10;
 
        if (!chip->onfi_version) {
-               pr_info("%s: unsupported ONFI version: %d\n", __func__, val);
+               pr_info("unsupported ONFI version: %d\n", val);
                return 0;
        }
 
@@ -3032,6 +3162,9 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
                pr_warn("Could not retrieve ONFI ECC requirements\n");
        }
 
+       if (p->jedec_id == NAND_MFR_MICRON)
+               nand_onfi_detect_micron(chip, p);
+
        return 1;
 }
 
@@ -3152,9 +3285,12 @@ static void nand_decode_ext_id(struct mtd_info *mtd, struct nand_chip *chip,
                        mtd->oobsize = 512;
                        break;
                case 6:
-               default: /* Other cases are "reserved" (unknown) */
                        mtd->oobsize = 640;
                        break;
+               case 7:
+               default: /* Other cases are "reserved" (unknown) */
+                       mtd->oobsize = 1024;
+                       break;
                }
                extid >>= 2;
                /* Calc blocksize */
@@ -3325,6 +3461,9 @@ static bool find_full_id_nand(struct mtd_info *mtd, struct nand_chip *chip,
 
                *busw = type->options & NAND_BUSWIDTH_16;
 
+               if (!mtd->name)
+                       mtd->name = type->name;
+
                return true;
        }
        return false;
@@ -3372,8 +3511,7 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
                id_data[i] = chip->read_byte(mtd);
 
        if (id_data[0] != *maf_id || id_data[1] != *dev_id) {
-               pr_info("%s: second ID read did not match "
-                       "%02x,%02x against %02x,%02x\n", __func__,
+               pr_info("second ID read did not match %02x,%02x against %02x,%02x\n",
                        *maf_id, *dev_id, id_data[0], id_data[1]);
                return ERR_PTR(-ENODEV);
        }
@@ -3440,10 +3578,10 @@ ident_done:
                 * Check, if buswidth is correct. Hardware drivers should set
                 * chip correct!
                 */
-               pr_info("NAND device: Manufacturer ID:"
-                       " 0x%02x, Chip ID: 0x%02x (%s %s)\n", *maf_id,
-                       *dev_id, nand_manuf_ids[maf_idx].name, mtd->name);
-               pr_warn("NAND bus width %d instead %d bit\n",
+               pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
+                       *maf_id, *dev_id);
+               pr_info("%s %s\n", nand_manuf_ids[maf_idx].name, mtd->name);
+               pr_warn("bus width %d instead %d bit\n",
                           (chip->options & NAND_BUSWIDTH_16) ? 16 : 8,
                           busw ? 16 : 8);
                return ERR_PTR(-EINVAL);
@@ -3472,14 +3610,13 @@ ident_done:
        if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
                chip->cmdfunc = nand_command_lp;
 
-       pr_info("NAND device: Manufacturer ID: 0x%02x, Chip ID: 0x%02x (%s %s)\n",
-               *maf_id, *dev_id, nand_manuf_ids[maf_idx].name,
+       pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
+               *maf_id, *dev_id);
+       pr_info("%s %s\n", nand_manuf_ids[maf_idx].name,
                chip->onfi_version ? chip->onfi_params.model : type->name);
-
-       pr_info("NAND device: %dMiB, %s, page size: %d, OOB size: %d\n",
+       pr_info("%dMiB, %s, page size: %d, OOB size: %d\n",
                (int)(chip->chipsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC",
                mtd->writesize, mtd->oobsize);
-
        return type;
 }
 
@@ -3535,7 +3672,7 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
                chip->select_chip(mtd, -1);
        }
        if (i > 1)
-               pr_info("%d NAND chips detected\n", i);
+               pr_info("%d chips detected\n", i);
 
        /* Store the number of chips and calc total size for mtd */
        chip->numchips = i;
index a87b0a3afa351a1b8f8991836cb99af930d858da..daa2faacd7d09f99422fa3f64fc6947a7f4f92cc 100644 (file)
@@ -169,6 +169,8 @@ struct nand_manufacturers nand_manuf_ids[] = {
        {NAND_MFR_AMD, "AMD/Spansion"},
        {NAND_MFR_MACRONIX, "Macronix"},
        {NAND_MFR_EON, "Eon"},
+       {NAND_MFR_SANDISK, "SanDisk"},
+       {NAND_MFR_INTEL, "Intel"},
        {0x0, "Unknown"}
 };
 
index 52115151e4a7f325491a3dbf6f16ccd87221b37a..9ee09a8177c67feae055da60dcb033e9bf76f4c4 100644 (file)
@@ -241,12 +241,10 @@ static int nuc900_nand_probe(struct platform_device *pdev)
 {
        struct nuc900_nand *nuc900_nand;
        struct nand_chip *chip;
-       int retval;
        struct resource *res;
 
-       retval = 0;
-
-       nuc900_nand = kzalloc(sizeof(struct nuc900_nand), GFP_KERNEL);
+       nuc900_nand = devm_kzalloc(&pdev->dev, sizeof(struct nuc900_nand),
+                                  GFP_KERNEL);
        if (!nuc900_nand)
                return -ENOMEM;
        chip = &(nuc900_nand->chip);
@@ -255,11 +253,9 @@ static int nuc900_nand_probe(struct platform_device *pdev)
        nuc900_nand->mtd.owner  = THIS_MODULE;
        spin_lock_init(&nuc900_nand->lock);
 
-       nuc900_nand->clk = clk_get(&pdev->dev, NULL);
-       if (IS_ERR(nuc900_nand->clk)) {
-               retval = -ENOENT;
-               goto fail1;
-       }
+       nuc900_nand->clk = devm_clk_get(&pdev->dev, NULL);
+       if (IS_ERR(nuc900_nand->clk))
+               return -ENOENT;
        clk_enable(nuc900_nand->clk);
 
        chip->cmdfunc           = nuc900_nand_command_lp;
@@ -272,57 +268,29 @@ static int nuc900_nand_probe(struct platform_device *pdev)
        chip->ecc.mode          = NAND_ECC_SOFT;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               retval = -ENXIO;
-               goto fail1;
-       }
-
-       if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
-               retval = -EBUSY;
-               goto fail1;
-       }
-
-       nuc900_nand->reg = ioremap(res->start, resource_size(res));
-       if (!nuc900_nand->reg) {
-               retval = -ENOMEM;
-               goto fail2;
-       }
+       nuc900_nand->reg = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(nuc900_nand->reg))
+               return PTR_ERR(nuc900_nand->reg);
 
        nuc900_nand_enable(nuc900_nand);
 
-       if (nand_scan(&(nuc900_nand->mtd), 1)) {
-               retval = -ENXIO;
-               goto fail3;
-       }
+       if (nand_scan(&(nuc900_nand->mtd), 1))
+               return -ENXIO;
 
        mtd_device_register(&(nuc900_nand->mtd), partitions,
                            ARRAY_SIZE(partitions));
 
        platform_set_drvdata(pdev, nuc900_nand);
 
-       return retval;
-
-fail3: iounmap(nuc900_nand->reg);
-fail2: release_mem_region(res->start, resource_size(res));
-fail1: kfree(nuc900_nand);
-       return retval;
+       return 0;
 }
 
 static int nuc900_nand_remove(struct platform_device *pdev)
 {
        struct nuc900_nand *nuc900_nand = platform_get_drvdata(pdev);
-       struct resource *res;
 
        nand_release(&nuc900_nand->mtd);
-       iounmap(nuc900_nand->reg);
-
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       release_mem_region(res->start, resource_size(res));
-
        clk_disable(nuc900_nand->clk);
-       clk_put(nuc900_nand->clk);
-
-       kfree(nuc900_nand);
 
        return 0;
 }
index f77725009907751e62963f02a6ce52b10e41e366..ef4190a02b7bd591af7c97be8ee1e3caecf6f83c 100644 (file)
@@ -1730,13 +1730,7 @@ static int omap_nand_probe(struct platform_device *pdev)
                break;
 
        case NAND_OMAP_POLLED:
-               if (nand_chip->options & NAND_BUSWIDTH_16) {
-                       nand_chip->read_buf   = omap_read_buf16;
-                       nand_chip->write_buf  = omap_write_buf16;
-               } else {
-                       nand_chip->read_buf   = omap_read_buf8;
-                       nand_chip->write_buf  = omap_write_buf8;
-               }
+               /* Use nand_base defaults for {read,write}_buf */
                break;
 
        case NAND_OMAP_PREFETCH_DMA:
index a393a5b6ce1e5028155a415ae33db88067a7bfff..dd7fe817eafb319ebc9ae92b2cbbac8613b38add 100644 (file)
@@ -87,7 +87,6 @@ static int __init orion_nand_probe(struct platform_device *pdev)
 
        nc = kzalloc(sizeof(struct nand_chip) + sizeof(struct mtd_info), GFP_KERNEL);
        if (!nc) {
-               printk(KERN_ERR "orion_nand: failed to allocate device structure.\n");
                ret = -ENOMEM;
                goto no_res;
        }
@@ -101,7 +100,7 @@ static int __init orion_nand_probe(struct platform_device *pdev)
 
        io_base = ioremap(res->start, resource_size(res));
        if (!io_base) {
-               printk(KERN_ERR "orion_nand: ioremap failed\n");
+               dev_err(&pdev->dev, "ioremap failed\n");
                ret = -EIO;
                goto no_res;
        }
@@ -110,7 +109,6 @@ static int __init orion_nand_probe(struct platform_device *pdev)
                board = devm_kzalloc(&pdev->dev, sizeof(struct orion_nand_data),
                                        GFP_KERNEL);
                if (!board) {
-                       printk(KERN_ERR "orion_nand: failed to allocate board structure.\n");
                        ret = -ENOMEM;
                        goto no_res;
                }
index 4d174366a0f09ec2ebecd99fc8241fc978a05caa..90f871acb0efec737c918532bbdd350ca7969220 100644 (file)
@@ -223,7 +223,7 @@ MODULE_DEVICE_TABLE(of, pasemi_nand_match);
 static struct platform_driver pasemi_nand_driver =
 {
        .driver = {
-               .name = (char*)driver_name,
+               .name = driver_name,
                .owner = THIS_MODULE,
                .of_match_table = pasemi_nand_match,
        },
index cad4cdc9df399a70c77640be5a634b837996790d..0b068a5c0bff3c05aa2eba8e1ae93a01f6b8db98 100644 (file)
@@ -9,6 +9,7 @@
  *
  */
 
+#include <linux/err.h>
 #include <linux/io.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
@@ -47,30 +48,16 @@ static int plat_nand_probe(struct platform_device *pdev)
                return -EINVAL;
        }
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res)
-               return -ENXIO;
-
        /* Allocate memory for the device structure (and zero it) */
-       data = kzalloc(sizeof(struct plat_nand_data), GFP_KERNEL);
-       if (!data) {
-               dev_err(&pdev->dev, "failed to allocate device structure.\n");
+       data = devm_kzalloc(&pdev->dev, sizeof(struct plat_nand_data),
+                           GFP_KERNEL);
+       if (!data)
                return -ENOMEM;
-       }
-
-       if (!request_mem_region(res->start, resource_size(res),
-                               dev_name(&pdev->dev))) {
-               dev_err(&pdev->dev, "request_mem_region failed\n");
-               err = -EBUSY;
-               goto out_free;
-       }
 
-       data->io_base = ioremap(res->start, resource_size(res));
-       if (data->io_base == NULL) {
-               dev_err(&pdev->dev, "ioremap failed\n");
-               err = -EIO;
-               goto out_release_io;
-       }
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       data->io_base = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(data->io_base))
+               return PTR_ERR(data->io_base);
 
        data->chip.priv = &data;
        data->mtd.priv = &data->chip;
@@ -122,11 +109,6 @@ static int plat_nand_probe(struct platform_device *pdev)
 out:
        if (pdata->ctrl.remove)
                pdata->ctrl.remove(pdev);
-       iounmap(data->io_base);
-out_release_io:
-       release_mem_region(res->start, resource_size(res));
-out_free:
-       kfree(data);
        return err;
 }
 
@@ -137,16 +119,10 @@ static int plat_nand_remove(struct platform_device *pdev)
 {
        struct plat_nand_data *data = platform_get_drvdata(pdev);
        struct platform_nand_data *pdata = dev_get_platdata(&pdev->dev);
-       struct resource *res;
-
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 
        nand_release(&data->mtd);
        if (pdata->ctrl.remove)
                pdata->ctrl.remove(pdev);
-       iounmap(data->io_base);
-       release_mem_region(res->start, resource_size(res));
-       kfree(data);
 
        return 0;
 }
index 4b3aaa898a8b6b3a4975d2218370711981cb59c4..2a7a0b27ac38dccc511abd296742a767ef183133 100644 (file)
@@ -7,6 +7,8 @@
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
+ *
+ * See Documentation/mtd/nand/pxa3xx-nand.txt for more details.
  */
 
 #include <linux/kernel.h>
@@ -24,6 +26,7 @@
 #include <linux/slab.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
+#include <linux/of_mtd.h>
 
 #if defined(CONFIG_ARCH_PXA) || defined(CONFIG_ARCH_MMP)
 #define ARCH_HAS_DMA
@@ -35,6 +38,7 @@
 
 #include <linux/platform_data/mtd-nand-pxa3xx.h>
 
+#define NAND_DEV_READY_TIMEOUT  50
 #define        CHIP_DELAY_TIMEOUT      (2 * HZ/10)
 #define NAND_STOP_DELAY                (2 * HZ/50)
 #define PAGE_CHUNK_SIZE                (2048)
@@ -54,6 +58,7 @@
 #define NDPCR          (0x18) /* Page Count Register */
 #define NDBDR0         (0x1C) /* Bad Block Register 0 */
 #define NDBDR1         (0x20) /* Bad Block Register 1 */
+#define NDECCCTRL      (0x28) /* ECC control */
 #define NDDB           (0x40) /* Data Buffer */
 #define NDCB0          (0x48) /* Command Buffer0 */
 #define NDCB1          (0x4C) /* Command Buffer1 */
@@ -80,6 +85,9 @@
 #define NDCR_INT_MASK           (0xFFF)
 
 #define NDSR_MASK              (0xfff)
+#define NDSR_ERR_CNT_OFF       (16)
+#define NDSR_ERR_CNT_MASK       (0x1f)
+#define NDSR_ERR_CNT(sr)       ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
 #define NDSR_RDY                (0x1 << 12)
 #define NDSR_FLASH_RDY          (0x1 << 11)
 #define NDSR_CS0_PAGED         (0x1 << 10)
@@ -88,8 +96,8 @@
 #define NDSR_CS1_CMDD          (0x1 << 7)
 #define NDSR_CS0_BBD           (0x1 << 6)
 #define NDSR_CS1_BBD           (0x1 << 5)
-#define NDSR_DBERR             (0x1 << 4)
-#define NDSR_SBERR             (0x1 << 3)
+#define NDSR_UNCORERR          (0x1 << 4)
+#define NDSR_CORERR            (0x1 << 3)
 #define NDSR_WRDREQ            (0x1 << 2)
 #define NDSR_RDDREQ            (0x1 << 1)
 #define NDSR_WRCMDREQ          (0x1)
 #define NDCB0_ST_ROW_EN         (0x1 << 26)
 #define NDCB0_AUTO_RS          (0x1 << 25)
 #define NDCB0_CSEL             (0x1 << 24)
+#define NDCB0_EXT_CMD_TYPE_MASK        (0x7 << 29)
+#define NDCB0_EXT_CMD_TYPE(x)  (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
 #define NDCB0_CMD_TYPE_MASK    (0x7 << 21)
 #define NDCB0_CMD_TYPE(x)      (((x) << 21) & NDCB0_CMD_TYPE_MASK)
 #define NDCB0_NC               (0x1 << 20)
 #define NDCB0_CMD1_MASK                (0xff)
 #define NDCB0_ADDR_CYC_SHIFT   (16)
 
+#define EXT_CMD_TYPE_DISPATCH  6 /* Command dispatch */
+#define EXT_CMD_TYPE_NAKED_RW  5 /* Naked read or Naked write */
+#define EXT_CMD_TYPE_READ      4 /* Read */
+#define EXT_CMD_TYPE_DISP_WR   4 /* Command dispatch with write */
+#define EXT_CMD_TYPE_FINAL     3 /* Final command */
+#define EXT_CMD_TYPE_LAST_RW   1 /* Last naked read/write */
+#define EXT_CMD_TYPE_MONO      0 /* Monolithic read/write */
+
 /* macros for registers read/write */
 #define nand_writel(info, off, val)    \
        __raw_writel((val), (info)->mmio_base + (off))
@@ -120,9 +138,9 @@ enum {
        ERR_NONE        = 0,
        ERR_DMABUSERR   = -1,
        ERR_SENDCMD     = -2,
-       ERR_DBERR       = -3,
+       ERR_UNCORERR    = -3,
        ERR_BBERR       = -4,
-       ERR_SBERR       = -5,
+       ERR_CORERR      = -5,
 };
 
 enum {
@@ -149,7 +167,6 @@ struct pxa3xx_nand_host {
        void                    *info_data;
 
        /* page size of attached chip */
-       unsigned int            page_size;
        int                     use_ecc;
        int                     cs;
 
@@ -167,11 +184,13 @@ struct pxa3xx_nand_info {
        struct clk              *clk;
        void __iomem            *mmio_base;
        unsigned long           mmio_phys;
-       struct completion       cmd_complete;
+       struct completion       cmd_complete, dev_ready;
 
        unsigned int            buf_start;
        unsigned int            buf_count;
        unsigned int            buf_size;
+       unsigned int            data_buff_pos;
+       unsigned int            oob_buff_pos;
 
        /* DMA information */
        int                     drcmr_dat;
@@ -195,13 +214,18 @@ struct pxa3xx_nand_info {
 
        int                     cs;
        int                     use_ecc;        /* use HW ECC ? */
+       int                     ecc_bch;        /* using BCH ECC? */
        int                     use_dma;        /* use DMA ? */
        int                     use_spare;      /* use spare ? */
-       int                     is_ready;
+       int                     need_wait;
 
-       unsigned int            page_size;      /* page size of attached chip */
-       unsigned int            data_size;      /* data size in FIFO */
+       unsigned int            data_size;      /* data to be read from FIFO */
+       unsigned int            chunk_size;     /* split commands chunk size */
        unsigned int            oob_size;
+       unsigned int            spare_size;
+       unsigned int            ecc_size;
+       unsigned int            ecc_err_cnt;
+       unsigned int            max_bitflips;
        int                     retcode;
 
        /* cached register value */
@@ -239,6 +263,64 @@ static struct pxa3xx_nand_flash builtin_flash_types[] = {
 { "256MiB 16-bit", 0xba20,  64, 2048, 16, 16, 2048, &timing[3] },
 };
 
+static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
+static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
+
+static struct nand_bbt_descr bbt_main_descr = {
+       .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+               | NAND_BBT_2BIT | NAND_BBT_VERSION,
+       .offs = 8,
+       .len = 6,
+       .veroffs = 14,
+       .maxblocks = 8,         /* Last 8 blocks in each chip */
+       .pattern = bbt_pattern
+};
+
+static struct nand_bbt_descr bbt_mirror_descr = {
+       .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+               | NAND_BBT_2BIT | NAND_BBT_VERSION,
+       .offs = 8,
+       .len = 6,
+       .veroffs = 14,
+       .maxblocks = 8,         /* Last 8 blocks in each chip */
+       .pattern = bbt_mirror_pattern
+};
+
+static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
+       .eccbytes = 32,
+       .eccpos = {
+               32, 33, 34, 35, 36, 37, 38, 39,
+               40, 41, 42, 43, 44, 45, 46, 47,
+               48, 49, 50, 51, 52, 53, 54, 55,
+               56, 57, 58, 59, 60, 61, 62, 63},
+       .oobfree = { {2, 30} }
+};
+
+static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
+       .eccbytes = 64,
+       .eccpos = {
+               32,  33,  34,  35,  36,  37,  38,  39,
+               40,  41,  42,  43,  44,  45,  46,  47,
+               48,  49,  50,  51,  52,  53,  54,  55,
+               56,  57,  58,  59,  60,  61,  62,  63,
+               96,  97,  98,  99,  100, 101, 102, 103,
+               104, 105, 106, 107, 108, 109, 110, 111,
+               112, 113, 114, 115, 116, 117, 118, 119,
+               120, 121, 122, 123, 124, 125, 126, 127},
+       /* Bootrom looks in bytes 0 & 5 for bad blocks */
+       .oobfree = { {6, 26}, { 64, 32} }
+};
+
+static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
+       .eccbytes = 128,
+       .eccpos = {
+               32,  33,  34,  35,  36,  37,  38,  39,
+               40,  41,  42,  43,  44,  45,  46,  47,
+               48,  49,  50,  51,  52,  53,  54,  55,
+               56,  57,  58,  59,  60,  61,  62,  63},
+       .oobfree = { }
+};
+
 /* Define a default flash type setting serve as flash detecting only */
 #define DEFAULT_FLASH_TYPE (&builtin_flash_types[0])
 
@@ -256,6 +338,29 @@ static struct pxa3xx_nand_flash builtin_flash_types[] = {
 /* convert nano-seconds to nand flash controller clock cycles */
 #define ns2cycle(ns, clk)      (int)((ns) * (clk / 1000000) / 1000)
 
+static struct of_device_id pxa3xx_nand_dt_ids[] = {
+       {
+               .compatible = "marvell,pxa3xx-nand",
+               .data       = (void *)PXA3XX_NAND_VARIANT_PXA,
+       },
+       {
+               .compatible = "marvell,armada370-nand",
+               .data       = (void *)PXA3XX_NAND_VARIANT_ARMADA370,
+       },
+       {}
+};
+MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids);
+
+static enum pxa3xx_nand_variant
+pxa3xx_nand_get_variant(struct platform_device *pdev)
+{
+       const struct of_device_id *of_id =
+                       of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
+       if (!of_id)
+               return PXA3XX_NAND_VARIANT_PXA;
+       return (enum pxa3xx_nand_variant)of_id->data;
+}
+
 static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
                                   const struct pxa3xx_nand_timing *t)
 {
@@ -280,25 +385,23 @@ static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
        nand_writel(info, NDTR1CS0, ndtr1);
 }
 
-static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info)
+/*
+ * Set the data and OOB size, depending on the selected
+ * spare and ECC configuration.
+ * Only applicable to READ0, READOOB and PAGEPROG commands.
+ */
+static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info,
+                               struct mtd_info *mtd)
 {
-       struct pxa3xx_nand_host *host = info->host[info->cs];
        int oob_enable = info->reg_ndcr & NDCR_SPARE_EN;
 
-       info->data_size = host->page_size;
-       if (!oob_enable) {
-               info->oob_size = 0;
+       info->data_size = mtd->writesize;
+       if (!oob_enable)
                return;
-       }
 
-       switch (host->page_size) {
-       case 2048:
-               info->oob_size = (info->use_ecc) ? 40 : 64;
-               break;
-       case 512:
-               info->oob_size = (info->use_ecc) ? 8 : 16;
-               break;
-       }
+       info->oob_size = info->spare_size;
+       if (!info->use_ecc)
+               info->oob_size += info->ecc_size;
 }
 
 /**
@@ -313,10 +416,15 @@ static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
 
        ndcr = info->reg_ndcr;
 
-       if (info->use_ecc)
+       if (info->use_ecc) {
                ndcr |= NDCR_ECC_EN;
-       else
+               if (info->ecc_bch)
+                       nand_writel(info, NDECCCTRL, 0x1);
+       } else {
                ndcr &= ~NDCR_ECC_EN;
+               if (info->ecc_bch)
+                       nand_writel(info, NDECCCTRL, 0x0);
+       }
 
        if (info->use_dma)
                ndcr |= NDCR_DMA_EN;
@@ -375,26 +483,39 @@ static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
 
 static void handle_data_pio(struct pxa3xx_nand_info *info)
 {
+       unsigned int do_bytes = min(info->data_size, info->chunk_size);
+
        switch (info->state) {
        case STATE_PIO_WRITING:
-               __raw_writesl(info->mmio_base + NDDB, info->data_buff,
-                               DIV_ROUND_UP(info->data_size, 4));
+               __raw_writesl(info->mmio_base + NDDB,
+                             info->data_buff + info->data_buff_pos,
+                             DIV_ROUND_UP(do_bytes, 4));
+
                if (info->oob_size > 0)
-                       __raw_writesl(info->mmio_base + NDDB, info->oob_buff,
-                                       DIV_ROUND_UP(info->oob_size, 4));
+                       __raw_writesl(info->mmio_base + NDDB,
+                                     info->oob_buff + info->oob_buff_pos,
+                                     DIV_ROUND_UP(info->oob_size, 4));
                break;
        case STATE_PIO_READING:
-               __raw_readsl(info->mmio_base + NDDB, info->data_buff,
-                               DIV_ROUND_UP(info->data_size, 4));
+               __raw_readsl(info->mmio_base + NDDB,
+                            info->data_buff + info->data_buff_pos,
+                            DIV_ROUND_UP(do_bytes, 4));
+
                if (info->oob_size > 0)
-                       __raw_readsl(info->mmio_base + NDDB, info->oob_buff,
-                                       DIV_ROUND_UP(info->oob_size, 4));
+                       __raw_readsl(info->mmio_base + NDDB,
+                                    info->oob_buff + info->oob_buff_pos,
+                                    DIV_ROUND_UP(info->oob_size, 4));
                break;
        default:
                dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
                                info->state);
                BUG();
        }
+
+       /* Update buffer pointers for multi-page read/write */
+       info->data_buff_pos += do_bytes;
+       info->oob_buff_pos += info->oob_size;
+       info->data_size -= do_bytes;
 }
 
 #ifdef ARCH_HAS_DMA
@@ -452,7 +573,7 @@ static void start_data_dma(struct pxa3xx_nand_info *info)
 static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
 {
        struct pxa3xx_nand_info *info = devid;
-       unsigned int status, is_completed = 0;
+       unsigned int status, is_completed = 0, is_ready = 0;
        unsigned int ready, cmd_done;
 
        if (info->cs == 0) {
@@ -465,10 +586,25 @@ static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
 
        status = nand_readl(info, NDSR);
 
-       if (status & NDSR_DBERR)
-               info->retcode = ERR_DBERR;
-       if (status & NDSR_SBERR)
-               info->retcode = ERR_SBERR;
+       if (status & NDSR_UNCORERR)
+               info->retcode = ERR_UNCORERR;
+       if (status & NDSR_CORERR) {
+               info->retcode = ERR_CORERR;
+               if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
+                   info->ecc_bch)
+                       info->ecc_err_cnt = NDSR_ERR_CNT(status);
+               else
+                       info->ecc_err_cnt = 1;
+
+               /*
+                * Each chunk composing a page is corrected independently,
+                * and we need to store maximum number of corrected bitflips
+                * to return it to the MTD layer in ecc.read_page().
+                */
+               info->max_bitflips = max_t(unsigned int,
+                                          info->max_bitflips,
+                                          info->ecc_err_cnt);
+       }
        if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
                /* whether use dma to transfer data */
                if (info->use_dma) {
@@ -488,8 +624,8 @@ static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
                is_completed = 1;
        }
        if (status & ready) {
-               info->is_ready = 1;
                info->state = STATE_READY;
+               is_ready = 1;
        }
 
        if (status & NDSR_WRCMDREQ) {
@@ -518,6 +654,8 @@ static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
        nand_writel(info, NDSR, status);
        if (is_completed)
                complete(&info->cmd_complete);
+       if (is_ready)
+               complete(&info->dev_ready);
 NORMAL_IRQ_EXIT:
        return IRQ_HANDLED;
 }
@@ -530,51 +668,94 @@ static inline int is_buf_blank(uint8_t *buf, size_t len)
        return 1;
 }
 
-static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,
-               uint16_t column, int page_addr)
+static void set_command_address(struct pxa3xx_nand_info *info,
+               unsigned int page_size, uint16_t column, int page_addr)
 {
-       int addr_cycle, exec_cmd;
-       struct pxa3xx_nand_host *host;
-       struct mtd_info *mtd;
+       /* small page addr setting */
+       if (page_size < PAGE_CHUNK_SIZE) {
+               info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
+                               | (column & 0xFF);
 
-       host = info->host[info->cs];
-       mtd = host->mtd;
-       addr_cycle = 0;
-       exec_cmd = 1;
+               info->ndcb2 = 0;
+       } else {
+               info->ndcb1 = ((page_addr & 0xFFFF) << 16)
+                               | (column & 0xFFFF);
+
+               if (page_addr & 0xFF0000)
+                       info->ndcb2 = (page_addr & 0xFF0000) >> 16;
+               else
+                       info->ndcb2 = 0;
+       }
+}
+
+static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
+{
+       struct pxa3xx_nand_host *host = info->host[info->cs];
+       struct mtd_info *mtd = host->mtd;
 
        /* reset data and oob column point to handle data */
        info->buf_start         = 0;
        info->buf_count         = 0;
        info->oob_size          = 0;
+       info->data_buff_pos     = 0;
+       info->oob_buff_pos      = 0;
        info->use_ecc           = 0;
        info->use_spare         = 1;
-       info->is_ready          = 0;
        info->retcode           = ERR_NONE;
-       if (info->cs != 0)
-               info->ndcb0 = NDCB0_CSEL;
-       else
-               info->ndcb0 = 0;
+       info->ecc_err_cnt       = 0;
+       info->ndcb3             = 0;
+       info->need_wait         = 0;
 
        switch (command) {
        case NAND_CMD_READ0:
        case NAND_CMD_PAGEPROG:
                info->use_ecc = 1;
        case NAND_CMD_READOOB:
-               pxa3xx_set_datasize(info);
+               pxa3xx_set_datasize(info, mtd);
                break;
        case NAND_CMD_PARAM:
                info->use_spare = 0;
                break;
-       case NAND_CMD_SEQIN:
-               exec_cmd = 0;
-               break;
        default:
                info->ndcb1 = 0;
                info->ndcb2 = 0;
-               info->ndcb3 = 0;
                break;
        }
 
+       /*
+        * If we are about to issue a read command, or about to set
+        * the write address, then clean the data buffer.
+        */
+       if (command == NAND_CMD_READ0 ||
+           command == NAND_CMD_READOOB ||
+           command == NAND_CMD_SEQIN) {
+
+               info->buf_count = mtd->writesize + mtd->oobsize;
+               memset(info->data_buff, 0xFF, info->buf_count);
+       }
+
+}
+
+static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
+               int ext_cmd_type, uint16_t column, int page_addr)
+{
+       int addr_cycle, exec_cmd;
+       struct pxa3xx_nand_host *host;
+       struct mtd_info *mtd;
+
+       host = info->host[info->cs];
+       mtd = host->mtd;
+       addr_cycle = 0;
+       exec_cmd = 1;
+
+       if (info->cs != 0)
+               info->ndcb0 = NDCB0_CSEL;
+       else
+               info->ndcb0 = 0;
+
+       if (command == NAND_CMD_SEQIN)
+               exec_cmd = 0;
+
        addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
                                    + host->col_addr_cycles);
 
@@ -589,30 +770,42 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,
                if (command == NAND_CMD_READOOB)
                        info->buf_start += mtd->writesize;
 
-               /* Second command setting for large pages */
-               if (host->page_size >= PAGE_CHUNK_SIZE)
+               /*
+                * Multiple page read needs an 'extended command type' field,
+                * which is either naked-read or last-read according to the
+                * state.
+                */
+               if (mtd->writesize == PAGE_CHUNK_SIZE) {
                        info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
+               } else if (mtd->writesize > PAGE_CHUNK_SIZE) {
+                       info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
+                                       | NDCB0_LEN_OVRD
+                                       | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
+                       info->ndcb3 = info->chunk_size +
+                                     info->oob_size;
+               }
+
+               set_command_address(info, mtd->writesize, column, page_addr);
+               break;
 
        case NAND_CMD_SEQIN:
-               /* small page addr setting */
-               if (unlikely(host->page_size < PAGE_CHUNK_SIZE)) {
-                       info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
-                                       | (column & 0xFF);
 
-                       info->ndcb2 = 0;
-               } else {
-                       info->ndcb1 = ((page_addr & 0xFFFF) << 16)
-                                       | (column & 0xFFFF);
+               info->buf_start = column;
+               set_command_address(info, mtd->writesize, 0, page_addr);
 
-                       if (page_addr & 0xFF0000)
-                               info->ndcb2 = (page_addr & 0xFF0000) >> 16;
-                       else
-                               info->ndcb2 = 0;
+               /*
+                * Multiple page programming needs to execute the initial
+                * SEQIN command that sets the page address.
+                */
+               if (mtd->writesize > PAGE_CHUNK_SIZE) {
+                       info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
+                               | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
+                               | addr_cycle
+                               | command;
+                       /* No data transfer in this case */
+                       info->data_size = 0;
+                       exec_cmd = 1;
                }
-
-               info->buf_count = mtd->writesize + mtd->oobsize;
-               memset(info->data_buff, 0xFF, info->buf_count);
-
                break;
 
        case NAND_CMD_PAGEPROG:
@@ -622,13 +815,40 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,
                        break;
                }
 
-               info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
-                               | NDCB0_AUTO_RS
-                               | NDCB0_ST_ROW_EN
-                               | NDCB0_DBC
-                               | (NAND_CMD_PAGEPROG << 8)
-                               | NAND_CMD_SEQIN
-                               | addr_cycle;
+               /* Second command setting for large pages */
+               if (mtd->writesize > PAGE_CHUNK_SIZE) {
+                       /*
+                        * Multiple page write uses the 'extended command'
+                        * field. This can be used to issue a command dispatch
+                        * or a naked-write depending on the current stage.
+                        */
+                       info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
+                                       | NDCB0_LEN_OVRD
+                                       | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
+                       info->ndcb3 = info->chunk_size +
+                                     info->oob_size;
+
+                       /*
+                        * This is the command dispatch that completes a chunked
+                        * page program operation.
+                        */
+                       if (info->data_size == 0) {
+                               info->ndcb0 = NDCB0_CMD_TYPE(0x1)
+                                       | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
+                                       | command;
+                               info->ndcb1 = 0;
+                               info->ndcb2 = 0;
+                               info->ndcb3 = 0;
+                       }
+               } else {
+                       info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
+                                       | NDCB0_AUTO_RS
+                                       | NDCB0_ST_ROW_EN
+                                       | NDCB0_DBC
+                                       | (NAND_CMD_PAGEPROG << 8)
+                                       | NAND_CMD_SEQIN
+                                       | addr_cycle;
+               }
                break;
 
        case NAND_CMD_PARAM:
@@ -691,8 +911,8 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,
        return exec_cmd;
 }
 
-static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command,
-                               int column, int page_addr)
+static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
+                        int column, int page_addr)
 {
        struct pxa3xx_nand_host *host = mtd->priv;
        struct pxa3xx_nand_info *info = host->info_data;
@@ -717,10 +937,15 @@ static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command,
                nand_writel(info, NDTR1CS0, info->ndtr1cs0);
        }
 
+       prepare_start_command(info, command);
+
        info->state = STATE_PREPARED;
-       exec_cmd = prepare_command_pool(info, command, column, page_addr);
+       exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
+
        if (exec_cmd) {
                init_completion(&info->cmd_complete);
+               init_completion(&info->dev_ready);
+               info->need_wait = 1;
                pxa3xx_nand_start(info);
 
                ret = wait_for_completion_timeout(&info->cmd_complete,
@@ -734,6 +959,117 @@ static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command,
        info->state = STATE_IDLE;
 }
 
+static void nand_cmdfunc_extended(struct mtd_info *mtd,
+                                 const unsigned command,
+                                 int column, int page_addr)
+{
+       struct pxa3xx_nand_host *host = mtd->priv;
+       struct pxa3xx_nand_info *info = host->info_data;
+       int ret, exec_cmd, ext_cmd_type;
+
+       /*
+        * if this is a x16 device then convert the input
+        * "byte" address into a "word" address appropriate
+        * for indexing a word-oriented device
+        */
+       if (info->reg_ndcr & NDCR_DWIDTH_M)
+               column /= 2;
+
+       /*
+        * There may be different NAND chip hooked to
+        * different chip select, so check whether
+        * chip select has been changed, if yes, reset the timing
+        */
+       if (info->cs != host->cs) {
+               info->cs = host->cs;
+               nand_writel(info, NDTR0CS0, info->ndtr0cs0);
+               nand_writel(info, NDTR1CS0, info->ndtr1cs0);
+       }
+
+       /* Select the extended command for the first command */
+       switch (command) {
+       case NAND_CMD_READ0:
+       case NAND_CMD_READOOB:
+               ext_cmd_type = EXT_CMD_TYPE_MONO;
+               break;
+       case NAND_CMD_SEQIN:
+               ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
+               break;
+       case NAND_CMD_PAGEPROG:
+               ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
+               break;
+       default:
+               ext_cmd_type = 0;
+               break;
+       }
+
+       prepare_start_command(info, command);
+
+       /*
+        * Prepare the "is ready" completion before starting a command
+        * transaction sequence. If the command is not executed the
+        * completion will be completed, see below.
+        *
+        * We can do that inside the loop because the command variable
+        * is invariant and thus so is the exec_cmd.
+        */
+       info->need_wait = 1;
+       init_completion(&info->dev_ready);
+       do {
+               info->state = STATE_PREPARED;
+               exec_cmd = prepare_set_command(info, command, ext_cmd_type,
+                                              column, page_addr);
+               if (!exec_cmd) {
+                       info->need_wait = 0;
+                       complete(&info->dev_ready);
+                       break;
+               }
+
+               init_completion(&info->cmd_complete);
+               pxa3xx_nand_start(info);
+
+               ret = wait_for_completion_timeout(&info->cmd_complete,
+                               CHIP_DELAY_TIMEOUT);
+               if (!ret) {
+                       dev_err(&info->pdev->dev, "Wait time out!!!\n");
+                       /* Stop State Machine for next command cycle */
+                       pxa3xx_nand_stop(info);
+                       break;
+               }
+
+               /* Check if the sequence is complete */
+               if (info->data_size == 0 && command != NAND_CMD_PAGEPROG)
+                       break;
+
+               /*
+                * After a splitted program command sequence has issued
+                * the command dispatch, the command sequence is complete.
+                */
+               if (info->data_size == 0 &&
+                   command == NAND_CMD_PAGEPROG &&
+                   ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
+                       break;
+
+               if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
+                       /* Last read: issue a 'last naked read' */
+                       if (info->data_size == info->chunk_size)
+                               ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
+                       else
+                               ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
+
+               /*
+                * If a splitted program command has no more data to transfer,
+                * the command dispatch must be issued to complete.
+                */
+               } else if (command == NAND_CMD_PAGEPROG &&
+                          info->data_size == 0) {
+                               ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
+               }
+       } while (1);
+
+       info->state = STATE_IDLE;
+}
+
 static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
                struct nand_chip *chip, const uint8_t *buf, int oob_required)
 {
@@ -753,20 +1089,14 @@ static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
        chip->read_buf(mtd, buf, mtd->writesize);
        chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
 
-       if (info->retcode == ERR_SBERR) {
-               switch (info->use_ecc) {
-               case 1:
-                       mtd->ecc_stats.corrected++;
-                       break;
-               case 0:
-               default:
-                       break;
-               }
-       } else if (info->retcode == ERR_DBERR) {
+       if (info->retcode == ERR_CORERR && info->use_ecc) {
+               mtd->ecc_stats.corrected += info->ecc_err_cnt;
+
+       } else if (info->retcode == ERR_UNCORERR) {
                /*
                 * for blank page (all 0xff), HW will calculate its ECC as
                 * 0, which is different from the ECC information within
-                * OOB, ignore such double bit errors
+                * OOB, ignore such uncorrectable errors
                 */
                if (is_buf_blank(buf, mtd->writesize))
                        info->retcode = ERR_NONE;
@@ -774,7 +1104,7 @@ static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
                        mtd->ecc_stats.failed++;
        }
 
-       return 0;
+       return info->max_bitflips;
 }
 
 static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
@@ -833,21 +1163,27 @@ static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
 {
        struct pxa3xx_nand_host *host = mtd->priv;
        struct pxa3xx_nand_info *info = host->info_data;
+       int ret;
+
+       if (info->need_wait) {
+               ret = wait_for_completion_timeout(&info->dev_ready,
+                               CHIP_DELAY_TIMEOUT);
+               info->need_wait = 0;
+               if (!ret) {
+                       dev_err(&info->pdev->dev, "Ready time out!!!\n");
+                       return NAND_STATUS_FAIL;
+               }
+       }
 
        /* pxa3xx_nand_send_command has waited for command complete */
        if (this->state == FL_WRITING || this->state == FL_ERASING) {
                if (info->retcode == ERR_NONE)
                        return 0;
-               else {
-                       /*
-                        * any error make it return 0x01 which will tell
-                        * the caller the erase and write fail
-                        */
-                       return 0x01;
-               }
+               else
+                       return NAND_STATUS_FAIL;
        }
 
-       return 0;
+       return NAND_STATUS_READY;
 }
 
 static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info,
@@ -869,7 +1205,6 @@ static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info,
        }
 
        /* calculate flash information */
-       host->page_size = f->page_size;
        host->read_id_bytes = (f->page_size == 2048) ? 4 : 2;
 
        /* calculate addressing information */
@@ -906,13 +1241,15 @@ static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
        uint32_t ndcr = nand_readl(info, NDCR);
 
        if (ndcr & NDCR_PAGE_SZ) {
-               host->page_size = 2048;
+               /* Controller's FIFO size */
+               info->chunk_size = 2048;
                host->read_id_bytes = 4;
        } else {
-               host->page_size = 512;
+               info->chunk_size = 512;
                host->read_id_bytes = 2;
        }
 
+       /* Set an initial chunk size */
        info->reg_ndcr = ndcr & ~NDCR_INT_MASK;
        info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
        info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
@@ -988,18 +1325,89 @@ static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
 static int pxa3xx_nand_sensing(struct pxa3xx_nand_info *info)
 {
        struct mtd_info *mtd;
+       struct nand_chip *chip;
        int ret;
+
        mtd = info->host[info->cs]->mtd;
+       chip = mtd->priv;
+
        /* use the common timing to make a try */
        ret = pxa3xx_nand_config_flash(info, &builtin_flash_types[0]);
        if (ret)
                return ret;
 
-       pxa3xx_nand_cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
-       if (info->is_ready)
-               return 0;
+       chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
+       ret = chip->waitfunc(mtd, chip);
+       if (ret & NAND_STATUS_FAIL)
+               return -ENODEV;
+
+       return 0;
+}
 
-       return -ENODEV;
+static int pxa_ecc_init(struct pxa3xx_nand_info *info,
+                       struct nand_ecc_ctrl *ecc,
+                       int strength, int ecc_stepsize, int page_size)
+{
+       if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
+               info->chunk_size = 2048;
+               info->spare_size = 40;
+               info->ecc_size = 24;
+               ecc->mode = NAND_ECC_HW;
+               ecc->size = 512;
+               ecc->strength = 1;
+               return 1;
+
+       } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
+               info->chunk_size = 512;
+               info->spare_size = 8;
+               info->ecc_size = 8;
+               ecc->mode = NAND_ECC_HW;
+               ecc->size = 512;
+               ecc->strength = 1;
+               return 1;
+
+       /*
+        * Required ECC: 4-bit correction per 512 bytes
+        * Select: 16-bit correction per 2048 bytes
+        */
+       } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
+               info->ecc_bch = 1;
+               info->chunk_size = 2048;
+               info->spare_size = 32;
+               info->ecc_size = 32;
+               ecc->mode = NAND_ECC_HW;
+               ecc->size = info->chunk_size;
+               ecc->layout = &ecc_layout_2KB_bch4bit;
+               ecc->strength = 16;
+               return 1;
+
+       } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
+               info->ecc_bch = 1;
+               info->chunk_size = 2048;
+               info->spare_size = 32;
+               info->ecc_size = 32;
+               ecc->mode = NAND_ECC_HW;
+               ecc->size = info->chunk_size;
+               ecc->layout = &ecc_layout_4KB_bch4bit;
+               ecc->strength = 16;
+               return 1;
+
+       /*
+        * Required ECC: 8-bit correction per 512 bytes
+        * Select: 16-bit correction per 1024 bytes
+        */
+       } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
+               info->ecc_bch = 1;
+               info->chunk_size = 1024;
+               info->spare_size = 0;
+               info->ecc_size = 32;
+               ecc->mode = NAND_ECC_HW;
+               ecc->size = info->chunk_size;
+               ecc->layout = &ecc_layout_4KB_bch8bit;
+               ecc->strength = 16;
+               return 1;
+       }
+       return 0;
 }
 
 static int pxa3xx_nand_scan(struct mtd_info *mtd)
@@ -1014,6 +1422,7 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd)
        uint32_t id = -1;
        uint64_t chipsize;
        int i, ret, num;
+       uint16_t ecc_strength, ecc_step;
 
        if (pdata->keep_config && !pxa3xx_nand_detect_config(info))
                goto KEEP_CONFIG;
@@ -1072,15 +1481,60 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd)
        pxa3xx_flash_ids[1].name = NULL;
        def = pxa3xx_flash_ids;
 KEEP_CONFIG:
-       chip->ecc.mode = NAND_ECC_HW;
-       chip->ecc.size = host->page_size;
-       chip->ecc.strength = 1;
-
        if (info->reg_ndcr & NDCR_DWIDTH_M)
                chip->options |= NAND_BUSWIDTH_16;
 
+       /* Device detection must be done with ECC disabled */
+       if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
+               nand_writel(info, NDECCCTRL, 0x0);
+
        if (nand_scan_ident(mtd, 1, def))
                return -ENODEV;
+
+       if (pdata->flash_bbt) {
+               /*
+                * We'll use a bad block table stored in-flash and don't
+                * allow writing the bad block marker to the flash.
+                */
+               chip->bbt_options |= NAND_BBT_USE_FLASH |
+                                    NAND_BBT_NO_OOB_BBM;
+               chip->bbt_td = &bbt_main_descr;
+               chip->bbt_md = &bbt_mirror_descr;
+       }
+
+       /*
+        * If the page size is bigger than the FIFO size, let's check
+        * we are given the right variant and then switch to the extended
+        * (aka splitted) command handling,
+        */
+       if (mtd->writesize > PAGE_CHUNK_SIZE) {
+               if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
+                       chip->cmdfunc = nand_cmdfunc_extended;
+               } else {
+                       dev_err(&info->pdev->dev,
+                               "unsupported page size on this variant\n");
+                       return -ENODEV;
+               }
+       }
+
+       ecc_strength = chip->ecc_strength_ds;
+       ecc_step = chip->ecc_step_ds;
+
+       /* Set default ECC strength requirements on non-ONFI devices */
+       if (ecc_strength < 1 && ecc_step < 1) {
+               ecc_strength = 1;
+               ecc_step = 512;
+       }
+
+       ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
+                          ecc_step, mtd->writesize);
+       if (!ret) {
+               dev_err(&info->pdev->dev,
+                       "ECC strength %d at page size %d is not supported\n",
+                       chip->ecc_strength_ds, mtd->writesize);
+               return -ENODEV;
+       }
+
        /* calculate addressing information */
        if (mtd->writesize >= 2048)
                host->col_addr_cycles = 2;
@@ -1121,6 +1575,7 @@ static int alloc_nand_resource(struct platform_device *pdev)
                return -ENOMEM;
 
        info->pdev = pdev;
+       info->variant = pxa3xx_nand_get_variant(pdev);
        for (cs = 0; cs < pdata->num_cs; cs++) {
                mtd = (struct mtd_info *)((unsigned int)&info[1] +
                      (sizeof(*mtd) + sizeof(*host)) * cs);
@@ -1138,11 +1593,12 @@ static int alloc_nand_resource(struct platform_device *pdev)
                chip->controller        = &info->controller;
                chip->waitfunc          = pxa3xx_nand_waitfunc;
                chip->select_chip       = pxa3xx_nand_select_chip;
-               chip->cmdfunc           = pxa3xx_nand_cmdfunc;
                chip->read_word         = pxa3xx_nand_read_word;
                chip->read_byte         = pxa3xx_nand_read_byte;
                chip->read_buf          = pxa3xx_nand_read_buf;
                chip->write_buf         = pxa3xx_nand_write_buf;
+               chip->options           |= NAND_NO_SUBPAGE_WRITE;
+               chip->cmdfunc           = nand_cmdfunc;
        }
 
        spin_lock_init(&chip->controller->lock);
@@ -1254,25 +1710,6 @@ static int pxa3xx_nand_remove(struct platform_device *pdev)
        return 0;
 }
 
-static struct of_device_id pxa3xx_nand_dt_ids[] = {
-       {
-               .compatible = "marvell,pxa3xx-nand",
-               .data       = (void *)PXA3XX_NAND_VARIANT_PXA,
-       },
-       {}
-};
-MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids);
-
-static enum pxa3xx_nand_variant
-pxa3xx_nand_get_variant(struct platform_device *pdev)
-{
-       const struct of_device_id *of_id =
-                       of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
-       if (!of_id)
-               return PXA3XX_NAND_VARIANT_PXA;
-       return (enum pxa3xx_nand_variant)of_id->data;
-}
-
 static int pxa3xx_nand_probe_dt(struct platform_device *pdev)
 {
        struct pxa3xx_nand_platform_data *pdata;
@@ -1292,6 +1729,7 @@ static int pxa3xx_nand_probe_dt(struct platform_device *pdev)
        if (of_get_property(np, "marvell,nand-keep-config", NULL))
                pdata->keep_config = 1;
        of_property_read_u32(np, "num-cs", &pdata->num_cs);
+       pdata->flash_bbt = of_get_nand_on_flash_bbt(np);
 
        pdev->dev.platform_data = pdata;
 
@@ -1329,7 +1767,6 @@ static int pxa3xx_nand_probe(struct platform_device *pdev)
        }
 
        info = platform_get_drvdata(pdev);
-       info->variant = pxa3xx_nand_get_variant(pdev);
        probe_success = 0;
        for (cs = 0; cs < pdata->num_cs; cs++) {
                struct mtd_info *mtd = info->host[cs]->mtd;
index d65cbe903d4015e37076f0505db987ee4383e0d5..f0918e7411d99147fd67c26c48a8672cb237437f 100644 (file)
 #include <linux/mtd/nand_ecc.h>
 #include <linux/mtd/partitions.h>
 
-#include <plat/regs-nand.h>
 #include <linux/platform_data/mtd-nand-s3c2410.h>
 
+#define S3C2410_NFREG(x) (x)
+
+#define S3C2410_NFCONF         S3C2410_NFREG(0x00)
+#define S3C2410_NFCMD          S3C2410_NFREG(0x04)
+#define S3C2410_NFADDR         S3C2410_NFREG(0x08)
+#define S3C2410_NFDATA         S3C2410_NFREG(0x0C)
+#define S3C2410_NFSTAT         S3C2410_NFREG(0x10)
+#define S3C2410_NFECC          S3C2410_NFREG(0x14)
+#define S3C2440_NFCONT         S3C2410_NFREG(0x04)
+#define S3C2440_NFCMD          S3C2410_NFREG(0x08)
+#define S3C2440_NFADDR         S3C2410_NFREG(0x0C)
+#define S3C2440_NFDATA         S3C2410_NFREG(0x10)
+#define S3C2440_NFSTAT         S3C2410_NFREG(0x20)
+#define S3C2440_NFMECC0                S3C2410_NFREG(0x2C)
+#define S3C2412_NFSTAT         S3C2410_NFREG(0x28)
+#define S3C2412_NFMECC0                S3C2410_NFREG(0x34)
+#define S3C2410_NFCONF_EN              (1<<15)
+#define S3C2410_NFCONF_INITECC         (1<<12)
+#define S3C2410_NFCONF_nFCE            (1<<11)
+#define S3C2410_NFCONF_TACLS(x)                ((x)<<8)
+#define S3C2410_NFCONF_TWRPH0(x)       ((x)<<4)
+#define S3C2410_NFCONF_TWRPH1(x)       ((x)<<0)
+#define S3C2410_NFSTAT_BUSY            (1<<0)
+#define S3C2440_NFCONF_TACLS(x)                ((x)<<12)
+#define S3C2440_NFCONF_TWRPH0(x)       ((x)<<8)
+#define S3C2440_NFCONF_TWRPH1(x)       ((x)<<4)
+#define S3C2440_NFCONT_INITECC         (1<<4)
+#define S3C2440_NFCONT_nFCE            (1<<1)
+#define S3C2440_NFCONT_ENABLE          (1<<0)
+#define S3C2440_NFSTAT_READY           (1<<0)
+#define S3C2412_NFCONF_NANDBOOT                (1<<31)
+#define S3C2412_NFCONT_INIT_MAIN_ECC   (1<<5)
+#define S3C2412_NFCONT_nFCE0           (1<<1)
+#define S3C2412_NFSTAT_READY           (1<<0)
+
 /* new oob placement block for use with hardware ecc generation
  */
 
@@ -919,7 +953,6 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
 
        info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
        if (info == NULL) {
-               dev_err(&pdev->dev, "no memory for flash info\n");
                err = -ENOMEM;
                goto exit_error;
        }
@@ -974,7 +1007,6 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
        size = nr_sets * sizeof(*info->mtds);
        info->mtds = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
        if (info->mtds == NULL) {
-               dev_err(&pdev->dev, "failed to allocate mtd storage\n");
                err = -ENOMEM;
                goto exit_error;
        }
index a3c84ebbe39227dac06bae3d0f9014849cf48b17..d72783dd7b962f798f1256067e3d4d9f5a1c5366 100644 (file)
@@ -151,7 +151,7 @@ static void flctl_setup_dma(struct sh_flctl *flctl)
        dma_cap_set(DMA_SLAVE, mask);
 
        flctl->chan_fifo0_tx = dma_request_channel(mask, shdma_chan_filter,
-                                           (void *)pdata->slave_id_fifo0_tx);
+                               (void *)(uintptr_t)pdata->slave_id_fifo0_tx);
        dev_dbg(&pdev->dev, "%s: TX: got channel %p\n", __func__,
                flctl->chan_fifo0_tx);
 
@@ -168,7 +168,7 @@ static void flctl_setup_dma(struct sh_flctl *flctl)
                goto err;
 
        flctl->chan_fifo0_rx = dma_request_channel(mask, shdma_chan_filter,
-                                           (void *)pdata->slave_id_fifo0_rx);
+                               (void *)(uintptr_t)pdata->slave_id_fifo0_rx);
        dev_dbg(&pdev->dev, "%s: RX: got channel %p\n", __func__,
                flctl->chan_fifo0_rx);
 
@@ -1021,7 +1021,6 @@ static irqreturn_t flctl_handle_flste(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
-#ifdef CONFIG_OF
 struct flctl_soc_config {
        unsigned long flcmncr_val;
        unsigned has_hwecc:1;
@@ -1059,10 +1058,8 @@ static struct sh_flctl_platform_data *flctl_parse_dt(struct device *dev)
 
        pdata = devm_kzalloc(dev, sizeof(struct sh_flctl_platform_data),
                                                                GFP_KERNEL);
-       if (!pdata) {
-               dev_err(dev, "%s: failed to allocate config data\n", __func__);
+       if (!pdata)
                return NULL;
-       }
 
        /* set SoC specific options */
        pdata->flcmncr_val = config->flcmncr_val;
@@ -1080,12 +1077,6 @@ static struct sh_flctl_platform_data *flctl_parse_dt(struct device *dev)
 
        return pdata;
 }
-#else /* CONFIG_OF */
-static struct sh_flctl_platform_data *flctl_parse_dt(struct device *dev)
-{
-       return NULL;
-}
-#endif /* CONFIG_OF */
 
 static int flctl_probe(struct platform_device *pdev)
 {
@@ -1094,38 +1085,30 @@ static int flctl_probe(struct platform_device *pdev)
        struct mtd_info *flctl_mtd;
        struct nand_chip *nand;
        struct sh_flctl_platform_data *pdata;
-       int ret = -ENXIO;
+       int ret;
        int irq;
        struct mtd_part_parser_data ppdata = {};
 
-       flctl = kzalloc(sizeof(struct sh_flctl), GFP_KERNEL);
-       if (!flctl) {
-               dev_err(&pdev->dev, "failed to allocate driver data\n");
+       flctl = devm_kzalloc(&pdev->dev, sizeof(struct sh_flctl), GFP_KERNEL);
+       if (!flctl)
                return -ENOMEM;
-       }
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_err(&pdev->dev, "failed to get I/O memory\n");
-               goto err_iomap;
-       }
-
-       flctl->reg = ioremap(res->start, resource_size(res));
-       if (flctl->reg == NULL) {
-               dev_err(&pdev->dev, "failed to remap I/O memory\n");
-               goto err_iomap;
-       }
+       flctl->reg = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(flctl->reg))
+               return PTR_ERR(flctl->reg);
 
        irq = platform_get_irq(pdev, 0);
        if (irq < 0) {
                dev_err(&pdev->dev, "failed to get flste irq data\n");
-               goto err_flste;
+               return -ENXIO;
        }
 
-       ret = request_irq(irq, flctl_handle_flste, IRQF_SHARED, "flste", flctl);
+       ret = devm_request_irq(&pdev->dev, irq, flctl_handle_flste, IRQF_SHARED,
+                              "flste", flctl);
        if (ret) {
                dev_err(&pdev->dev, "request interrupt failed.\n");
-               goto err_flste;
+               return ret;
        }
 
        if (pdev->dev.of_node)
@@ -1135,8 +1118,7 @@ static int flctl_probe(struct platform_device *pdev)
 
        if (!pdata) {
                dev_err(&pdev->dev, "no setup data defined\n");
-               ret = -EINVAL;
-               goto err_pdata;
+               return -EINVAL;
        }
 
        platform_set_drvdata(pdev, flctl);
@@ -1190,12 +1172,6 @@ static int flctl_probe(struct platform_device *pdev)
 err_chip:
        flctl_release_dma(flctl);
        pm_runtime_disable(&pdev->dev);
-err_pdata:
-       free_irq(irq, flctl);
-err_flste:
-       iounmap(flctl->reg);
-err_iomap:
-       kfree(flctl);
        return ret;
 }
 
@@ -1206,9 +1182,6 @@ static int flctl_remove(struct platform_device *pdev)
        flctl_release_dma(flctl);
        nand_release(&flctl->mtd);
        pm_runtime_disable(&pdev->dev);
-       free_irq(platform_get_irq(pdev, 0), flctl);
-       iounmap(flctl->reg);
-       kfree(flctl);
 
        return 0;
 }
index 87908d760feb067c3cb179b35bfa5a3c462ab737..e81059b58382f23af69848211415c4095ea36edf 100644 (file)
@@ -121,10 +121,8 @@ static int sharpsl_nand_probe(struct platform_device *pdev)
 
        /* Allocate memory for MTD device structure and private data */
        sharpsl = kzalloc(sizeof(struct sharpsl_nand), GFP_KERNEL);
-       if (!sharpsl) {
-               printk("Unable to allocate SharpSL NAND MTD device structure.\n");
+       if (!sharpsl)
                return -ENOMEM;
-       }
 
        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!r) {
@@ -136,7 +134,7 @@ static int sharpsl_nand_probe(struct platform_device *pdev)
        /* map physical address */
        sharpsl->io = ioremap(r->start, resource_size(r));
        if (!sharpsl->io) {
-               printk("ioremap to access Sharp SL NAND chip failed\n");
+               dev_err(&pdev->dev, "ioremap to access Sharp SL NAND chip failed\n");
                err = -EIO;
                goto err_ioremap;
        }
index a3747c914d5718abd342f53f8253e2d4c0ea3353..fb8fd35fa668516dcb763037e164691482eca9ea 100644 (file)
@@ -371,11 +371,9 @@ static int tmio_probe(struct platform_device *dev)
        if (data == NULL)
                dev_warn(&dev->dev, "NULL platform data!\n");
 
-       tmio = kzalloc(sizeof *tmio, GFP_KERNEL);
-       if (!tmio) {
-               retval = -ENOMEM;
-               goto err_kzalloc;
-       }
+       tmio = devm_kzalloc(&dev->dev, sizeof(*tmio), GFP_KERNEL);
+       if (!tmio)
+               return -ENOMEM;
 
        tmio->dev = dev;
 
@@ -385,22 +383,18 @@ static int tmio_probe(struct platform_device *dev)
        mtd->priv = nand_chip;
        mtd->name = "tmio-nand";
 
-       tmio->ccr = ioremap(ccr->start, resource_size(ccr));
-       if (!tmio->ccr) {
-               retval = -EIO;
-               goto err_iomap_ccr;
-       }
+       tmio->ccr = devm_ioremap(&dev->dev, ccr->start, resource_size(ccr));
+       if (!tmio->ccr)
+               return -EIO;
 
        tmio->fcr_base = fcr->start & 0xfffff;
-       tmio->fcr = ioremap(fcr->start, resource_size(fcr));
-       if (!tmio->fcr) {
-               retval = -EIO;
-               goto err_iomap_fcr;
-       }
+       tmio->fcr = devm_ioremap(&dev->dev, fcr->start, resource_size(fcr));
+       if (!tmio->fcr)
+               return -EIO;
 
        retval = tmio_hw_init(dev, tmio);
        if (retval)
-               goto err_hwinit;
+               return retval;
 
        /* Set address of NAND IO lines */
        nand_chip->IO_ADDR_R = tmio->fcr;
@@ -428,7 +422,8 @@ static int tmio_probe(struct platform_device *dev)
        /* 15 us command delay time */
        nand_chip->chip_delay = 15;
 
-       retval = request_irq(irq, &tmio_irq, 0, dev_name(&dev->dev), tmio);
+       retval = devm_request_irq(&dev->dev, irq, &tmio_irq, 0,
+                                 dev_name(&dev->dev), tmio);
        if (retval) {
                dev_err(&dev->dev, "request_irq error %d\n", retval);
                goto err_irq;
@@ -440,7 +435,7 @@ static int tmio_probe(struct platform_device *dev)
        /* Scan to find existence of the device */
        if (nand_scan(mtd, 1)) {
                retval = -ENODEV;
-               goto err_scan;
+               goto err_irq;
        }
        /* Register the partitions */
        retval = mtd_device_parse_register(mtd, NULL, NULL,
@@ -451,18 +446,8 @@ static int tmio_probe(struct platform_device *dev)
 
        nand_release(mtd);
 
-err_scan:
-       if (tmio->irq)
-               free_irq(tmio->irq, tmio);
 err_irq:
        tmio_hw_stop(dev, tmio);
-err_hwinit:
-       iounmap(tmio->fcr);
-err_iomap_fcr:
-       iounmap(tmio->ccr);
-err_iomap_ccr:
-       kfree(tmio);
-err_kzalloc:
        return retval;
 }
 
@@ -471,12 +456,7 @@ static int tmio_remove(struct platform_device *dev)
        struct tmio_nand *tmio = platform_get_drvdata(dev);
 
        nand_release(&tmio->mtd);
-       if (tmio->irq)
-               free_irq(tmio->irq, tmio);
        tmio_hw_stop(dev, tmio);
-       iounmap(tmio->fcr);
-       iounmap(tmio->ccr);
-       kfree(tmio);
        return 0;
 }
 
index 235714a421dd6770851f2f5026522123f46dbefc..c1622a5ba8140710bf7807a31a622c283c45ad4b 100644 (file)
@@ -319,11 +319,8 @@ static int __init txx9ndfmc_probe(struct platform_device *dev)
                        continue;
                txx9_priv = kzalloc(sizeof(struct txx9ndfmc_priv),
                                    GFP_KERNEL);
-               if (!txx9_priv) {
-                       dev_err(&dev->dev, "Unable to allocate "
-                               "TXx9 NDFMC MTD device structure.\n");
+               if (!txx9_priv)
                        continue;
-               }
                chip = &txx9_priv->chip;
                mtd = &txx9_priv->mtd;
                mtd->owner = THIS_MODULE;
index d64f8c30945fbcd8e2d09a5b8342e75c081cd343..aa26c32e1bc28943426992eba09fddc207cf104d 100644 (file)
@@ -81,7 +81,7 @@ static int parse_ofpart_partitions(struct mtd_info *master,
                partname = of_get_property(pp, "label", &len);
                if (!partname)
                        partname = of_get_property(pp, "name", &len);
-               (*pparts)[i].name = (char *)partname;
+               (*pparts)[i].name = partname;
 
                if (of_get_property(pp, "read-only", &len))
                        (*pparts)[i].mask_flags |= MTD_WRITEABLE;
@@ -152,7 +152,7 @@ static int parse_ofoldpart_partitions(struct mtd_info *master,
                if (names && (plen > 0)) {
                        int len = strlen(names) + 1;
 
-                       (*pparts)[i].name = (char *)names;
+                       (*pparts)[i].name = names;
                        plen -= len;
                        names += len;
                } else {
@@ -173,18 +173,9 @@ static struct mtd_part_parser ofoldpart_parser = {
 
 static int __init ofpart_parser_init(void)
 {
-       int rc;
-       rc = register_mtd_parser(&ofpart_parser);
-       if (rc)
-               goto out;
-
-       rc = register_mtd_parser(&ofoldpart_parser);
-       if (!rc)
-               return 0;
-
-       deregister_mtd_parser(&ofoldpart_parser);
-out:
-       return rc;
+       register_mtd_parser(&ofpart_parser);
+       register_mtd_parser(&ofoldpart_parser);
+       return 0;
 }
 
 static void __exit ofpart_parser_exit(void)
index 63699fffc96de24b3098f629ea495184103386c8..8e1919b6f07449f007c0b1766f4ffcde5db4c0f1 100644 (file)
@@ -58,7 +58,7 @@ static int generic_onenand_probe(struct platform_device *pdev)
                goto out_release_mem_region;
        }
 
-       info->onenand.mmcontrol = pdata ? pdata->mmcontrol : 0;
+       info->onenand.mmcontrol = pdata ? pdata->mmcontrol : NULL;
        info->onenand.irq = platform_get_irq(pdev, 0);
 
        info->mtd.name = dev_name(&pdev->dev);
index 580035c803d693eb38b8f37750a85d57f70cadfa..5da911ebdf495152c68c52af72d0784f0a8ede54 100644 (file)
@@ -300,7 +300,8 @@ MODULE_ALIAS("RedBoot");
 
 static int __init redboot_parser_init(void)
 {
-       return register_mtd_parser(&redboot_parser);
+       register_mtd_parser(&redboot_parser);
+       return 0;
 }
 
 static void __exit redboot_parser_exit(void)
index 70106607c247289b6328c9ff99321d64ffc49c81..e579f9027c47d82bb1e4d577ddd13128324a0075 100644 (file)
@@ -19,7 +19,7 @@
  * or detected.
  */
 
-#if defined(CONFIG_MTD_NAND) || defined(CONFIG_MTD_NAND_MODULE)
+#if IS_ENABLED(CONFIG_MTD_NAND)
 
 struct nand_ecc_test {
        const char *name;
index a7db819bca9278356598152060c4684bd6f615c7..4c08018d7333138a95d0d6a3c72c67131f842ed5 100644 (file)
@@ -2346,7 +2346,7 @@ static void bond_loadbalance_arp_mon(struct work_struct *work)
                                            arp_work.work);
        struct slave *slave, *oldcurrent;
        struct list_head *iter;
-       int do_failover = 0;
+       int do_failover = 0, slave_state_changed = 0;
 
        if (!bond_has_slaves(bond))
                goto re_arm;
@@ -2370,7 +2370,7 @@ static void bond_loadbalance_arp_mon(struct work_struct *work)
                            bond_time_in_interval(bond, slave->dev->last_rx, 1)) {
 
                                slave->link  = BOND_LINK_UP;
-                               bond_set_active_slave(slave);
+                               slave_state_changed = 1;
 
                                /* primary_slave has no meaning in round-robin
                                 * mode. the window of a slave being up and
@@ -2399,7 +2399,7 @@ static void bond_loadbalance_arp_mon(struct work_struct *work)
                            !bond_time_in_interval(bond, slave->dev->last_rx, 2)) {
 
                                slave->link  = BOND_LINK_DOWN;
-                               bond_set_backup_slave(slave);
+                               slave_state_changed = 1;
 
                                if (slave->link_failure_count < UINT_MAX)
                                        slave->link_failure_count++;
@@ -2426,19 +2426,24 @@ static void bond_loadbalance_arp_mon(struct work_struct *work)
 
        rcu_read_unlock();
 
-       if (do_failover) {
-               /* the bond_select_active_slave must hold RTNL
-                * and curr_slave_lock for write.
-                */
+       if (do_failover || slave_state_changed) {
                if (!rtnl_trylock())
                        goto re_arm;
-               block_netpoll_tx();
-               write_lock_bh(&bond->curr_slave_lock);
 
-               bond_select_active_slave(bond);
+               if (slave_state_changed) {
+                       bond_slave_state_change(bond);
+               } else if (do_failover) {
+                       /* the bond_select_active_slave must hold RTNL
+                        * and curr_slave_lock for write.
+                        */
+                       block_netpoll_tx();
+                       write_lock_bh(&bond->curr_slave_lock);
 
-               write_unlock_bh(&bond->curr_slave_lock);
-               unblock_netpoll_tx();
+                       bond_select_active_slave(bond);
+
+                       write_unlock_bh(&bond->curr_slave_lock);
+                       unblock_netpoll_tx();
+               }
                rtnl_unlock();
        }
 
@@ -2599,45 +2604,51 @@ do_failover:
 
 /*
  * Send ARP probes for active-backup mode ARP monitor.
- *
- * Called with rcu_read_lock hold.
  */
-static void bond_ab_arp_probe(struct bonding *bond)
+static bool bond_ab_arp_probe(struct bonding *bond)
 {
        struct slave *slave, *before = NULL, *new_slave = NULL,
-                    *curr_arp_slave = rcu_dereference(bond->current_arp_slave);
+                    *curr_arp_slave, *curr_active_slave;
        struct list_head *iter;
        bool found = false;
 
-       read_lock(&bond->curr_slave_lock);
+       rcu_read_lock();
+       curr_arp_slave = rcu_dereference(bond->current_arp_slave);
+       curr_active_slave = rcu_dereference(bond->curr_active_slave);
 
-       if (curr_arp_slave && bond->curr_active_slave)
+       if (curr_arp_slave && curr_active_slave)
                pr_info("PROBE: c_arp %s && cas %s BAD\n",
                        curr_arp_slave->dev->name,
-                       bond->curr_active_slave->dev->name);
+                       curr_active_slave->dev->name);
 
-       if (bond->curr_active_slave) {
-               bond_arp_send_all(bond, bond->curr_active_slave);
-               read_unlock(&bond->curr_slave_lock);
-               return;
+       if (curr_active_slave) {
+               bond_arp_send_all(bond, curr_active_slave);
+               rcu_read_unlock();
+               return true;
        }
-
-       read_unlock(&bond->curr_slave_lock);
+       rcu_read_unlock();
 
        /* if we don't have a curr_active_slave, search for the next available
         * backup slave from the current_arp_slave and make it the candidate
         * for becoming the curr_active_slave
         */
 
+       if (!rtnl_trylock())
+               return false;
+       /* curr_arp_slave might have gone away */
+       curr_arp_slave = ACCESS_ONCE(bond->current_arp_slave);
+
        if (!curr_arp_slave) {
-               curr_arp_slave = bond_first_slave_rcu(bond);
-               if (!curr_arp_slave)
-                       return;
+               curr_arp_slave = bond_first_slave(bond);
+               if (!curr_arp_slave) {
+                       rtnl_unlock();
+                       return true;
+               }
        }
 
        bond_set_slave_inactive_flags(curr_arp_slave);
 
-       bond_for_each_slave_rcu(bond, slave, iter) {
+       bond_for_each_slave(bond, slave, iter) {
                if (!found && !before && IS_UP(slave->dev))
                        before = slave;
 
@@ -2667,21 +2678,26 @@ static void bond_ab_arp_probe(struct bonding *bond)
        if (!new_slave && before)
                new_slave = before;
 
-       if (!new_slave)
-               return;
+       if (!new_slave) {
+               rtnl_unlock();
+               return true;
+       }
 
        new_slave->link = BOND_LINK_BACK;
        bond_set_slave_active_flags(new_slave);
        bond_arp_send_all(bond, new_slave);
        new_slave->jiffies = jiffies;
        rcu_assign_pointer(bond->current_arp_slave, new_slave);
+       rtnl_unlock();
+
+       return true;
 }
 
 static void bond_activebackup_arp_mon(struct work_struct *work)
 {
        struct bonding *bond = container_of(work, struct bonding,
                                            arp_work.work);
-       bool should_notify_peers = false;
+       bool should_notify_peers = false, should_commit = false;
        int delta_in_ticks;
 
        delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);
@@ -2690,12 +2706,11 @@ static void bond_activebackup_arp_mon(struct work_struct *work)
                goto re_arm;
 
        rcu_read_lock();
-
        should_notify_peers = bond_should_notify_peers(bond);
+       should_commit = bond_ab_arp_inspect(bond);
+       rcu_read_unlock();
 
-       if (bond_ab_arp_inspect(bond)) {
-               rcu_read_unlock();
-
+       if (should_commit) {
                /* Race avoidance with bond_close flush of workqueue */
                if (!rtnl_trylock()) {
                        delta_in_ticks = 1;
@@ -2704,13 +2719,14 @@ static void bond_activebackup_arp_mon(struct work_struct *work)
                }
 
                bond_ab_arp_commit(bond);
-
                rtnl_unlock();
-               rcu_read_lock();
        }
 
-       bond_ab_arp_probe(bond);
-       rcu_read_unlock();
+       if (!bond_ab_arp_probe(bond)) {
+               /* rtnl locking failed, re-arm */
+               delta_in_ticks = 1;
+               should_notify_peers = false;
+       }
 
 re_arm:
        if (bond->params.arp_interval)
index 1a9062f4e0d6d4429d6cfb230867905b446615c5..86ccfb9f71cc4dd8c843f40f5eee38b7c0c033ab 100644 (file)
@@ -303,6 +303,19 @@ static inline void bond_set_backup_slave(struct slave *slave)
        }
 }
 
+static inline void bond_slave_state_change(struct bonding *bond)
+{
+       struct list_head *iter;
+       struct slave *tmp;
+
+       bond_for_each_slave(bond, tmp, iter) {
+               if (tmp->link == BOND_LINK_UP)
+                       bond_set_active_slave(tmp);
+               else if (tmp->link == BOND_LINK_DOWN)
+                       bond_set_backup_slave(tmp);
+       }
+}
+
 static inline int bond_slave_state(struct slave *slave)
 {
        return slave->backup;
index 811fa5d5c6971c0a71f55d12e420fb38691bb9f7..30104b60da85a2df50259d0216991d0004e06069 100644 (file)
@@ -212,7 +212,6 @@ static int __init apne_probe1(struct net_device *dev, int ioaddr)
     int neX000, ctron;
 #endif
     static unsigned version_printed;
-    struct ei_device *ei_local = netdev_priv(dev);
 
     if ((apne_msg_enable & NETIF_MSG_DRV) && (version_printed++ == 0))
                netdev_info(dev, version);
index 92a467ff4104da3bedd8aa6762c51c19ea977a22..38fc794c1655d9d011d425ccce35cfa41ee43889 100644 (file)
@@ -358,49 +358,47 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 
        cfg_idx = bnx2x_get_link_cfg_idx(bp);
        old_multi_phy_config = bp->link_params.multi_phy_config;
-       switch (cmd->port) {
-       case PORT_TP:
-               if (bp->port.supported[cfg_idx] & SUPPORTED_TP)
-                       break; /* no port change */
-
-               if (!(bp->port.supported[0] & SUPPORTED_TP ||
-                     bp->port.supported[1] & SUPPORTED_TP)) {
-                       DP(BNX2X_MSG_ETHTOOL, "Unsupported port type\n");
-                       return -EINVAL;
-               }
-               bp->link_params.multi_phy_config &=
-                       ~PORT_HW_CFG_PHY_SELECTION_MASK;
-               if (bp->link_params.multi_phy_config &
-                   PORT_HW_CFG_PHY_SWAPPED_ENABLED)
-                       bp->link_params.multi_phy_config |=
-                       PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
-               else
-                       bp->link_params.multi_phy_config |=
-                       PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
-               break;
-       case PORT_FIBRE:
-       case PORT_DA:
-               if (bp->port.supported[cfg_idx] & SUPPORTED_FIBRE)
-                       break; /* no port change */
-
-               if (!(bp->port.supported[0] & SUPPORTED_FIBRE ||
-                     bp->port.supported[1] & SUPPORTED_FIBRE)) {
+       if (cmd->port != bnx2x_get_port_type(bp)) {
+               switch (cmd->port) {
+               case PORT_TP:
+                       if (!(bp->port.supported[0] & SUPPORTED_TP ||
+                             bp->port.supported[1] & SUPPORTED_TP)) {
+                               DP(BNX2X_MSG_ETHTOOL,
+                                  "Unsupported port type\n");
+                               return -EINVAL;
+                       }
+                       bp->link_params.multi_phy_config &=
+                               ~PORT_HW_CFG_PHY_SELECTION_MASK;
+                       if (bp->link_params.multi_phy_config &
+                           PORT_HW_CFG_PHY_SWAPPED_ENABLED)
+                               bp->link_params.multi_phy_config |=
+                               PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
+                       else
+                               bp->link_params.multi_phy_config |=
+                               PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
+                       break;
+               case PORT_FIBRE:
+               case PORT_DA:
+                       if (!(bp->port.supported[0] & SUPPORTED_FIBRE ||
+                             bp->port.supported[1] & SUPPORTED_FIBRE)) {
+                               DP(BNX2X_MSG_ETHTOOL,
+                                  "Unsupported port type\n");
+                               return -EINVAL;
+                       }
+                       bp->link_params.multi_phy_config &=
+                               ~PORT_HW_CFG_PHY_SELECTION_MASK;
+                       if (bp->link_params.multi_phy_config &
+                           PORT_HW_CFG_PHY_SWAPPED_ENABLED)
+                               bp->link_params.multi_phy_config |=
+                               PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
+                       else
+                               bp->link_params.multi_phy_config |=
+                               PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
+                       break;
+               default:
                        DP(BNX2X_MSG_ETHTOOL, "Unsupported port type\n");
                        return -EINVAL;
                }
-               bp->link_params.multi_phy_config &=
-                       ~PORT_HW_CFG_PHY_SELECTION_MASK;
-               if (bp->link_params.multi_phy_config &
-                   PORT_HW_CFG_PHY_SWAPPED_ENABLED)
-                       bp->link_params.multi_phy_config |=
-                       PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
-               else
-                       bp->link_params.multi_phy_config |=
-                       PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
-               break;
-       default:
-               DP(BNX2X_MSG_ETHTOOL, "Unsupported port type\n");
-               return -EINVAL;
        }
        /* Save new config in case command complete successfully */
        new_multi_phy_config = bp->link_params.multi_phy_config;
index e118a3ec62bc263fcf380aa9832c7b251bf59460..c9c445e7b4a5ab0d756880926fc529ed538a70a6 100644 (file)
@@ -13102,9 +13102,9 @@ static void __bnx2x_remove(struct pci_dev *pdev,
 
                if (atomic_read(&pdev->enable_cnt) == 1)
                        pci_release_regions(pdev);
-       }
 
-       pci_disable_device(pdev);
+               pci_disable_device(pdev);
+       }
 }
 
 static void bnx2x_remove_one(struct pci_dev *pdev)
index a4b940862b83b49b392d23bffb2b0d0f922d2af0..b901371ca361a1e6eafa411f5b144f6e8866aa9c 100644 (file)
@@ -4440,9 +4440,10 @@ bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
        /* Check if APP Table has changed */
        if (memcmp(&new_cfg->app,
                   &old_cfg->app,
-                  sizeof(new_cfg->app)))
+                  sizeof(new_cfg->app))) {
                need_reconfig = true;
                dev_info(&pf->pdev->dev, "APP Table change detected.\n");
+       }
 
        return need_reconfig;
 }
index 6509935d145e8930ad7594b0583f31e872a4aa69..55a37ae11440791d78e5dd69bf486cb5b4f976f3 100644 (file)
@@ -5020,6 +5020,8 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                }
        }
 
+       netif_napi_add(dev, &hw->napi, sky2_poll, NAPI_WEIGHT);
+
        err = register_netdev(dev);
        if (err) {
                dev_err(&pdev->dev, "cannot register net device\n");
@@ -5028,8 +5030,6 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        netif_carrier_off(dev);
 
-       netif_napi_add(dev, &hw->napi, sky2_poll, NAPI_WEIGHT);
-
        sky2_show_addr(dev);
 
        if (hw->ports > 1) {
index 30874cda84764996735d2a070ab45516311e8def..54ebf300332a353246066e5cf62f1d097355e1e5 100644 (file)
@@ -683,12 +683,17 @@ void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
                adapter->ahw->linkup = 0;
                netif_carrier_off(netdev);
        } else if (!adapter->ahw->linkup && linkup) {
-               /* Do not advertise Link up if the port is in loopback mode */
-               if (qlcnic_83xx_check(adapter) && adapter->ahw->lb_mode)
+               adapter->ahw->linkup = 1;
+
+               /* Do not advertise Link up to the stack if device
+                * is in loopback mode
+                */
+               if (qlcnic_83xx_check(adapter) && adapter->ahw->lb_mode) {
+                       netdev_info(netdev, "NIC Link is up for loopback test\n");
                        return;
+               }
 
                netdev_info(netdev, "NIC Link is up\n");
-               adapter->ahw->linkup = 1;
                netif_carrier_on(netdev);
        }
 }
@@ -1150,13 +1155,13 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
        u16 lro_length, length, data_offset, t_vid, vid = 0xffff;
        u32 seq_number;
 
-       if (unlikely(ring > adapter->max_rds_rings))
+       if (unlikely(ring >= adapter->max_rds_rings))
                return NULL;
 
        rds_ring = &recv_ctx->rds_rings[ring];
 
        index = qlcnic_get_lro_sts_refhandle(sts_data0);
-       if (unlikely(index > rds_ring->num_desc))
+       if (unlikely(index >= rds_ring->num_desc))
                return NULL;
 
        buffer = &rds_ring->rx_buf_arr[index];
@@ -1662,13 +1667,13 @@ qlcnic_83xx_process_lro(struct qlcnic_adapter *adapter,
        u16 vid = 0xffff;
        int err;
 
-       if (unlikely(ring > adapter->max_rds_rings))
+       if (unlikely(ring >= adapter->max_rds_rings))
                return NULL;
 
        rds_ring = &recv_ctx->rds_rings[ring];
 
        index = qlcnic_83xx_hndl(sts_data[0]);
-       if (unlikely(index > rds_ring->num_desc))
+       if (unlikely(index >= rds_ring->num_desc))
                return NULL;
 
        buffer = &rds_ring->rx_buf_arr[index];
index 1f79d47c45fa3c9ec9b747958c71428cd0850ce5..ba78c7481fa3432f32fd7d5a5e7c56b66423b801 100644 (file)
@@ -1837,6 +1837,7 @@ int __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
        qlcnic_linkevent_request(adapter, 1);
 
        adapter->ahw->reset_context = 0;
+       netif_tx_start_all_queues(netdev);
        return 0;
 }
 
@@ -2704,14 +2705,8 @@ static int qlcnic_open(struct net_device *netdev)
 
        err = __qlcnic_up(adapter, netdev);
        if (err)
-               goto err_out;
-
-       netif_tx_start_all_queues(netdev);
-
-       return 0;
+               qlcnic_detach(adapter);
 
-err_out:
-       qlcnic_detach(adapter);
        return err;
 }
 
index 17a1ca2050f4eaa9f47f9394d43849ba6f97cfba..0638c1810d54547df9eafb961439085dff4364a2 100644 (file)
@@ -448,8 +448,7 @@ static int qlcnic_sriov_set_guest_vlan_mode(struct qlcnic_adapter *adapter,
        return 0;
 }
 
-static int qlcnic_sriov_get_vf_acl(struct qlcnic_adapter *adapter,
-                                  struct qlcnic_info *info)
+static int qlcnic_sriov_get_vf_acl(struct qlcnic_adapter *adapter)
 {
        struct qlcnic_sriov *sriov = adapter->ahw->sriov;
        struct qlcnic_cmd_args cmd;
@@ -495,10 +494,6 @@ static int qlcnic_sriov_vf_init_driver(struct qlcnic_adapter *adapter)
        if (err)
                return -EIO;
 
-       err = qlcnic_sriov_get_vf_acl(adapter, &nic_info);
-       if (err)
-               return err;
-
        if (qlcnic_83xx_get_port_info(adapter))
                return -EIO;
 
@@ -555,6 +550,10 @@ static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter,
        if (err)
                goto err_out_send_channel_term;
 
+       err = qlcnic_sriov_get_vf_acl(adapter);
+       if (err)
+               goto err_out_send_channel_term;
+
        err = qlcnic_setup_netdev(adapter, adapter->netdev, pci_using_dac);
        if (err)
                goto err_out_send_channel_term;
index d93aa87408c222760cc0ce3c40030917051b7b0c..a2e7d2c96e3678c309377d3e4dbb416feb5fbc38 100644 (file)
@@ -1524,9 +1524,9 @@ static void stmmac_check_ether_addr(struct stmmac_priv *priv)
                                             priv->dev->dev_addr, 0);
                if (!is_valid_ether_addr(priv->dev->dev_addr))
                        eth_hw_addr_random(priv->dev);
+               pr_info("%s: device MAC address %pM\n", priv->dev->name,
+                       priv->dev->dev_addr);
        }
-       pr_warn("%s: device MAC address %pM\n", priv->dev->name,
-               priv->dev->dev_addr);
 }
 
 /**
@@ -1635,7 +1635,7 @@ static int stmmac_hw_setup(struct net_device *dev)
        stmmac_mmc_setup(priv);
 
        ret = stmmac_init_ptp(priv);
-       if (ret)
+       if (ret && ret != -EOPNOTSUPP)
                pr_warn("%s: failed PTP initialisation\n", __func__);
 
 #ifdef CONFIG_STMMAC_DEBUG_FS
index a26eecb1212ce735a2de234e31a12ae9a4e90b37..7b594ce3f21db2102139e8802d4d9e69bb61a3e8 100644 (file)
@@ -462,7 +462,7 @@ struct nvsp_message {
 
 #define NETVSC_MTU 65536
 
-#define NETVSC_RECEIVE_BUFFER_SIZE             (1024*1024*2)   /* 2MB */
+#define NETVSC_RECEIVE_BUFFER_SIZE             (1024*1024*16)  /* 16MB */
 
 #define NETVSC_RECEIVE_BUFFER_ID               0xcafe
 
index 93b485b96249b32347586f6494b7e1d15ab3cc2a..03a2c6e171584ff5f639d686458fbb75b7dcfb47 100644 (file)
@@ -136,8 +136,7 @@ static int netvsc_destroy_recv_buf(struct netvsc_device *net_device)
 
        if (net_device->recv_buf) {
                /* Free up the receive buffer */
-               free_pages((unsigned long)net_device->recv_buf,
-                       get_order(net_device->recv_buf_size));
+               vfree(net_device->recv_buf);
                net_device->recv_buf = NULL;
        }
 
@@ -163,9 +162,7 @@ static int netvsc_init_recv_buf(struct hv_device *device)
                return -ENODEV;
        ndev = net_device->ndev;
 
-       net_device->recv_buf =
-               (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
-                               get_order(net_device->recv_buf_size));
+       net_device->recv_buf = vzalloc(net_device->recv_buf_size);
        if (!net_device->recv_buf) {
                netdev_err(ndev, "unable to allocate receive "
                        "buffer of size %d\n", net_device->recv_buf_size);
index bcf01af4b879252dbe5d6339dd7134cb848280f4..44c4db8450f0347b4ea6ce861da7f05524ebdb5d 100644 (file)
@@ -69,6 +69,7 @@
 #include <net/netns/generic.h>
 #include <net/rtnetlink.h>
 #include <net/sock.h>
+#include <linux/seq_file.h>
 
 #include <asm/uaccess.h>
 
@@ -2228,6 +2229,27 @@ static int tun_chr_close(struct inode *inode, struct file *file)
        return 0;
 }
 
+#ifdef CONFIG_PROC_FS
+static int tun_chr_show_fdinfo(struct seq_file *m, struct file *f)
+{
+       struct tun_struct *tun;
+       struct ifreq ifr;
+
+       memset(&ifr, 0, sizeof(ifr));
+
+       rtnl_lock();
+       tun = tun_get(f);
+       if (tun)
+               tun_get_iff(current->nsproxy->net_ns, tun, &ifr);
+       rtnl_unlock();
+
+       if (tun)
+               tun_put(tun);
+
+       return seq_printf(m, "iff:\t%s\n", ifr.ifr_name);
+}
+#endif
+
 static const struct file_operations tun_fops = {
        .owner  = THIS_MODULE,
        .llseek = no_llseek,
@@ -2242,7 +2264,10 @@ static const struct file_operations tun_fops = {
 #endif
        .open   = tun_chr_open,
        .release = tun_chr_close,
-       .fasync = tun_chr_fasync
+       .fasync = tun_chr_fasync,
+#ifdef CONFIG_PROC_FS
+       .show_fdinfo = tun_chr_show_fdinfo,
+#endif
 };
 
 static struct miscdevice tun_miscdev = {
index e955c569298626c924f6cec16c3d32de8bec54d6..ff04d4f95baa3561fbf42899bf95f69eab28412f 100644 (file)
@@ -117,6 +117,7 @@ struct netfront_info {
        } tx_skbs[NET_TX_RING_SIZE];
        grant_ref_t gref_tx_head;
        grant_ref_t grant_tx_ref[NET_TX_RING_SIZE];
+       struct page *grant_tx_page[NET_TX_RING_SIZE];
        unsigned tx_skb_freelist;
 
        spinlock_t   rx_lock ____cacheline_aligned_in_smp;
@@ -396,6 +397,7 @@ static void xennet_tx_buf_gc(struct net_device *dev)
                        gnttab_release_grant_reference(
                                &np->gref_tx_head, np->grant_tx_ref[id]);
                        np->grant_tx_ref[id] = GRANT_INVALID_REF;
+                       np->grant_tx_page[id] = NULL;
                        add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, id);
                        dev_kfree_skb_irq(skb);
                }
@@ -452,6 +454,7 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
                gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
                                                mfn, GNTMAP_readonly);
 
+               np->grant_tx_page[id] = virt_to_page(data);
                tx->gref = np->grant_tx_ref[id] = ref;
                tx->offset = offset;
                tx->size = len;
@@ -497,6 +500,7 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
                                                        np->xbdev->otherend_id,
                                                        mfn, GNTMAP_readonly);
 
+                       np->grant_tx_page[id] = page;
                        tx->gref = np->grant_tx_ref[id] = ref;
                        tx->offset = offset;
                        tx->size = bytes;
@@ -596,6 +600,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
        mfn = virt_to_mfn(data);
        gnttab_grant_foreign_access_ref(
                ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly);
+       np->grant_tx_page[id] = virt_to_page(data);
        tx->gref = np->grant_tx_ref[id] = ref;
        tx->offset = offset;
        tx->size = len;
@@ -1085,10 +1090,11 @@ static void xennet_release_tx_bufs(struct netfront_info *np)
                        continue;
 
                skb = np->tx_skbs[i].skb;
-               gnttab_end_foreign_access_ref(np->grant_tx_ref[i],
-                                             GNTMAP_readonly);
-               gnttab_release_grant_reference(&np->gref_tx_head,
-                                              np->grant_tx_ref[i]);
+               get_page(np->grant_tx_page[i]);
+               gnttab_end_foreign_access(np->grant_tx_ref[i],
+                                         GNTMAP_readonly,
+                                         (unsigned long)page_address(np->grant_tx_page[i]));
+               np->grant_tx_page[i] = NULL;
                np->grant_tx_ref[i] = GRANT_INVALID_REF;
                add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, i);
                dev_kfree_skb_irq(skb);
@@ -1097,78 +1103,35 @@ static void xennet_release_tx_bufs(struct netfront_info *np)
 
 static void xennet_release_rx_bufs(struct netfront_info *np)
 {
-       struct mmu_update      *mmu = np->rx_mmu;
-       struct multicall_entry *mcl = np->rx_mcl;
-       struct sk_buff_head free_list;
-       struct sk_buff *skb;
-       unsigned long mfn;
-       int xfer = 0, noxfer = 0, unused = 0;
        int id, ref;
 
-       dev_warn(&np->netdev->dev, "%s: fix me for copying receiver.\n",
-                        __func__);
-       return;
-
-       skb_queue_head_init(&free_list);
-
        spin_lock_bh(&np->rx_lock);
 
        for (id = 0; id < NET_RX_RING_SIZE; id++) {
-               ref = np->grant_rx_ref[id];
-               if (ref == GRANT_INVALID_REF) {
-                       unused++;
-                       continue;
-               }
+               struct sk_buff *skb;
+               struct page *page;
 
                skb = np->rx_skbs[id];
-               mfn = gnttab_end_foreign_transfer_ref(ref);
-               gnttab_release_grant_reference(&np->gref_rx_head, ref);
-               np->grant_rx_ref[id] = GRANT_INVALID_REF;
-
-               if (0 == mfn) {
-                       skb_shinfo(skb)->nr_frags = 0;
-                       dev_kfree_skb(skb);
-                       noxfer++;
+               if (!skb)
                        continue;
-               }
 
-               if (!xen_feature(XENFEAT_auto_translated_physmap)) {
-                       /* Remap the page. */
-                       const struct page *page =
-                               skb_frag_page(&skb_shinfo(skb)->frags[0]);
-                       unsigned long pfn = page_to_pfn(page);
-                       void *vaddr = page_address(page);
+               ref = np->grant_rx_ref[id];
+               if (ref == GRANT_INVALID_REF)
+                       continue;
 
-                       MULTI_update_va_mapping(mcl, (unsigned long)vaddr,
-                                               mfn_pte(mfn, PAGE_KERNEL),
-                                               0);
-                       mcl++;
-                       mmu->ptr = ((u64)mfn << PAGE_SHIFT)
-                               | MMU_MACHPHYS_UPDATE;
-                       mmu->val = pfn;
-                       mmu++;
+               page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
 
-                       set_phys_to_machine(pfn, mfn);
-               }
-               __skb_queue_tail(&free_list, skb);
-               xfer++;
-       }
-
-       dev_info(&np->netdev->dev, "%s: %d xfer, %d noxfer, %d unused\n",
-                __func__, xfer, noxfer, unused);
+               /* gnttab_end_foreign_access() needs a page ref until
+                * foreign access is ended (which may be deferred).
+                */
+               get_page(page);
+               gnttab_end_foreign_access(ref, 0,
+                                         (unsigned long)page_address(page));
+               np->grant_rx_ref[id] = GRANT_INVALID_REF;
 
-       if (xfer) {
-               if (!xen_feature(XENFEAT_auto_translated_physmap)) {
-                       /* Do all the remapping work and M2P updates. */
-                       MULTI_mmu_update(mcl, np->rx_mmu, mmu - np->rx_mmu,
-                                        NULL, DOMID_SELF);
-                       mcl++;
-                       HYPERVISOR_multicall(np->rx_mcl, mcl - np->rx_mcl);
-               }
+               kfree_skb(skb);
        }
 
-       __skb_queue_purge(&free_list);
-
        spin_unlock_bh(&np->rx_lock);
 }
 
@@ -1339,6 +1302,7 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
        for (i = 0; i < NET_RX_RING_SIZE; i++) {
                np->rx_skbs[i] = NULL;
                np->grant_rx_ref[i] = GRANT_INVALID_REF;
+               np->grant_tx_page[i] = NULL;
        }
 
        /* A grant for every tx ring slot */
index b13303e75a34ea1b4f3d563eac5bb88b2c1d1a8d..440ed776efd4c97d015cd1ebef066d2a2f528719 100644 (file)
@@ -25,4 +25,18 @@ config CHROMEOS_LAPTOP
          If you have a supported Chromebook, choose Y or M here.
          The module will be called chromeos_laptop.
 
+config CHROMEOS_PSTORE
+       tristate "Chrome OS pstore support"
+       ---help---
+         This module instantiates the persistent storage on x86 ChromeOS
+         devices. It can be used to store away console logs and crash
+         information across reboots.
+
+         The range of memory used is 0xf00000-0x1000000, traditionally
+         the memory used to back VGA controller memory.
+
+         If you have a supported Chromebook, choose Y or M here.
+         The module will be called chromeos_pstore.
+
+
 endif # CHROMEOS_PLATFORMS
index 015e9195e2266a62838d6dde999bdd547610a807..2b860ca7450fd57bee3c70c82de4a24d53c480f0 100644 (file)
@@ -1,2 +1,3 @@
 
 obj-$(CONFIG_CHROMEOS_LAPTOP)  += chromeos_laptop.o
+obj-$(CONFIG_CHROMEOS_PSTORE)  += chromeos_pstore.o
index 3e5b4497a1d02010b4c63bdc939f67127bb19976..7f3aad0e115c49f4c2ea85c149dace0e901620ce 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/input.h>
 #include <linux/interrupt.h>
 #include <linux/module.h>
+#include <linux/platform_device.h>
 
 #define ATMEL_TP_I2C_ADDR      0x4b
 #define ATMEL_TP_I2C_BL_ADDR   0x25
@@ -40,7 +41,7 @@ static struct i2c_client *als;
 static struct i2c_client *tp;
 static struct i2c_client *ts;
 
-const char *i2c_adapter_names[] = {
+static const char *i2c_adapter_names[] = {
        "SMBus I801 adapter",
        "i915 gmbus vga",
        "i915 gmbus panel",
@@ -53,20 +54,33 @@ enum i2c_adapter_type {
        I2C_ADAPTER_PANEL,
 };
 
-static struct i2c_board_info __initdata cyapa_device = {
+struct i2c_peripheral {
+       int (*add)(enum i2c_adapter_type type);
+       enum i2c_adapter_type type;
+};
+
+#define MAX_I2C_PERIPHERALS 3
+
+struct chromeos_laptop {
+       struct i2c_peripheral i2c_peripherals[MAX_I2C_PERIPHERALS];
+};
+
+static struct chromeos_laptop *cros_laptop;
+
+static struct i2c_board_info cyapa_device = {
        I2C_BOARD_INFO("cyapa", CYAPA_TP_I2C_ADDR),
        .flags          = I2C_CLIENT_WAKE,
 };
 
-static struct i2c_board_info __initdata isl_als_device = {
+static struct i2c_board_info isl_als_device = {
        I2C_BOARD_INFO("isl29018", ISL_ALS_I2C_ADDR),
 };
 
-static struct i2c_board_info __initdata tsl2583_als_device = {
+static struct i2c_board_info tsl2583_als_device = {
        I2C_BOARD_INFO("tsl2583", TAOS_ALS_I2C_ADDR),
 };
 
-static struct i2c_board_info __initdata tsl2563_als_device = {
+static struct i2c_board_info tsl2563_als_device = {
        I2C_BOARD_INFO("tsl2563", TAOS_ALS_I2C_ADDR),
 };
 
@@ -89,7 +103,7 @@ static struct mxt_platform_data atmel_224s_tp_platform_data = {
        .config_length          = 0,
 };
 
-static struct i2c_board_info __initdata atmel_224s_tp_device = {
+static struct i2c_board_info atmel_224s_tp_device = {
        I2C_BOARD_INFO("atmel_mxt_tp", ATMEL_TP_I2C_ADDR),
        .platform_data = &atmel_224s_tp_platform_data,
        .flags          = I2C_CLIENT_WAKE,
@@ -110,13 +124,13 @@ static struct mxt_platform_data atmel_1664s_platform_data = {
        .config_length          = 0,
 };
 
-static struct i2c_board_info __initdata atmel_1664s_device = {
+static struct i2c_board_info atmel_1664s_device = {
        I2C_BOARD_INFO("atmel_mxt_ts", ATMEL_TS_I2C_ADDR),
        .platform_data = &atmel_1664s_platform_data,
        .flags          = I2C_CLIENT_WAKE,
 };
 
-static struct i2c_client __init *__add_probed_i2c_device(
+static struct i2c_client *__add_probed_i2c_device(
                const char *name,
                int bus,
                struct i2c_board_info *info,
@@ -169,7 +183,7 @@ static struct i2c_client __init *__add_probed_i2c_device(
        return client;
 }
 
-static int __init __find_i2c_adap(struct device *dev, void *data)
+static int __find_i2c_adap(struct device *dev, void *data)
 {
        const char *name = data;
        static const char *prefix = "i2c-";
@@ -180,7 +194,7 @@ static int __init __find_i2c_adap(struct device *dev, void *data)
        return (strncmp(adapter->name, name, strlen(name)) == 0);
 }
 
-static int __init find_i2c_adapter_num(enum i2c_adapter_type type)
+static int find_i2c_adapter_num(enum i2c_adapter_type type)
 {
        struct device *dev = NULL;
        struct i2c_adapter *adapter;
@@ -189,8 +203,9 @@ static int __init find_i2c_adapter_num(enum i2c_adapter_type type)
        dev = bus_find_device(&i2c_bus_type, NULL, (void *)name,
                              __find_i2c_adap);
        if (!dev) {
-               pr_err("%s: i2c adapter %s not found on system.\n", __func__,
-                      name);
+               /* Adapters may appear later. Deferred probing will retry */
+               pr_notice("%s: i2c adapter %s not found on system.\n", __func__,
+                         name);
                return -ENODEV;
        }
        adapter = to_i2c_adapter(dev);
@@ -205,7 +220,7 @@ static int __init find_i2c_adapter_num(enum i2c_adapter_type type)
  * Returns NULL if no devices found.
  * See Documentation/i2c/instantiating-devices for more information.
  */
-static __init struct i2c_client *add_probed_i2c_device(
+static struct i2c_client *add_probed_i2c_device(
                const char *name,
                enum i2c_adapter_type type,
                struct i2c_board_info *info,
@@ -222,7 +237,7 @@ static __init struct i2c_client *add_probed_i2c_device(
  * info->addr.
  * Returns NULL if no device found.
  */
-static __init struct i2c_client *add_i2c_device(const char *name,
+static struct i2c_client *add_i2c_device(const char *name,
                                                enum i2c_adapter_type type,
                                                struct i2c_board_info *info)
 {
@@ -233,161 +248,259 @@ static __init struct i2c_client *add_i2c_device(const char *name,
                                       addr_list);
 }
 
-
-static struct i2c_client __init *add_smbus_device(const char *name,
-                                                 struct i2c_board_info *info)
+static int setup_cyapa_tp(enum i2c_adapter_type type)
 {
-       return add_i2c_device(name, I2C_ADAPTER_SMBUS, info);
-}
+       if (tp)
+               return 0;
 
-static int __init setup_cyapa_smbus_tp(const struct dmi_system_id *id)
-{
-       /* add cyapa touchpad on smbus */
-       tp = add_smbus_device("trackpad", &cyapa_device);
-       return 0;
+       /* add cyapa touchpad */
+       tp = add_i2c_device("trackpad", type, &cyapa_device);
+       return (!tp) ? -EAGAIN : 0;
 }
 
-static int __init setup_atmel_224s_tp(const struct dmi_system_id *id)
+static int setup_atmel_224s_tp(enum i2c_adapter_type type)
 {
        const unsigned short addr_list[] = { ATMEL_TP_I2C_BL_ADDR,
                                             ATMEL_TP_I2C_ADDR,
                                             I2C_CLIENT_END };
+       if (tp)
+               return 0;
 
-       /* add atmel mxt touchpad on VGA DDC GMBus */
-       tp = add_probed_i2c_device("trackpad", I2C_ADAPTER_VGADDC,
+       /* add atmel mxt touchpad */
+       tp = add_probed_i2c_device("trackpad", type,
                                   &atmel_224s_tp_device, addr_list);
-       return 0;
+       return (!tp) ? -EAGAIN : 0;
 }
 
-static int __init setup_atmel_1664s_ts(const struct dmi_system_id *id)
+static int setup_atmel_1664s_ts(enum i2c_adapter_type type)
 {
        const unsigned short addr_list[] = { ATMEL_TS_I2C_BL_ADDR,
                                             ATMEL_TS_I2C_ADDR,
                                             I2C_CLIENT_END };
+       if (ts)
+               return 0;
 
-       /* add atmel mxt touch device on PANEL GMBus */
-       ts = add_probed_i2c_device("touchscreen", I2C_ADAPTER_PANEL,
+       /* add atmel mxt touch device */
+       ts = add_probed_i2c_device("touchscreen", type,
                                   &atmel_1664s_device, addr_list);
-       return 0;
+       return (!ts) ? -EAGAIN : 0;
 }
 
-
-static int __init setup_isl29018_als(const struct dmi_system_id *id)
+static int setup_isl29018_als(enum i2c_adapter_type type)
 {
+       if (als)
+               return 0;
+
        /* add isl29018 light sensor */
-       als = add_smbus_device("lightsensor", &isl_als_device);
-       return 0;
+       als = add_i2c_device("lightsensor", type, &isl_als_device);
+       return (!als) ? -EAGAIN : 0;
 }
 
-static int __init setup_isl29023_als(const struct dmi_system_id *id)
+static int setup_tsl2583_als(enum i2c_adapter_type type)
 {
-       /* add isl29023 light sensor on Panel GMBus */
-       als = add_i2c_device("lightsensor", I2C_ADAPTER_PANEL,
-                            &isl_als_device);
-       return 0;
+       if (als)
+               return 0;
+
+       /* add tsl2583 light sensor */
+       als = add_i2c_device(NULL, type, &tsl2583_als_device);
+       return (!als) ? -EAGAIN : 0;
 }
 
-static int __init setup_tsl2583_als(const struct dmi_system_id *id)
+static int setup_tsl2563_als(enum i2c_adapter_type type)
 {
-       /* add tsl2583 light sensor on smbus */
-       als = add_smbus_device(NULL, &tsl2583_als_device);
-       return 0;
+       if (als)
+               return 0;
+
+       /* add tsl2563 light sensor */
+       als = add_i2c_device(NULL, type, &tsl2563_als_device);
+       return (!als) ? -EAGAIN : 0;
 }
 
-static int __init setup_tsl2563_als(const struct dmi_system_id *id)
+static int __init chromeos_laptop_dmi_matched(const struct dmi_system_id *id)
 {
-       /* add tsl2563 light sensor on smbus */
-       als = add_smbus_device(NULL, &tsl2563_als_device);
-       return 0;
+       cros_laptop = (void *)id->driver_data;
+       pr_debug("DMI Matched %s.\n", id->ident);
+
+       /* Indicate to dmi_scan that processing is done. */
+       return 1;
 }
 
-static struct dmi_system_id __initdata chromeos_laptop_dmi_table[] = {
-       {
-               .ident = "Samsung Series 5 550 - Touchpad",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG"),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "Lumpy"),
-               },
-               .callback = setup_cyapa_smbus_tp,
+static int chromeos_laptop_probe(struct platform_device *pdev)
+{
+       int i;
+       int ret = 0;
+
+       for (i = 0; i < MAX_I2C_PERIPHERALS; i++) {
+               struct i2c_peripheral *i2c_dev;
+
+               i2c_dev = &cros_laptop->i2c_peripherals[i];
+
+               /* No more peripherals. */
+               if (i2c_dev->add == NULL)
+                       break;
+
+               /* Add the device. Set -EPROBE_DEFER on any failure */
+               if (i2c_dev->add(i2c_dev->type))
+                       ret = -EPROBE_DEFER;
+       }
+
+       return ret;
+}
+
+static struct chromeos_laptop samsung_series_5_550 = {
+       .i2c_peripherals = {
+               /* Touchpad. */
+               { .add = setup_cyapa_tp, I2C_ADAPTER_SMBUS },
+               /* Light Sensor. */
+               { .add = setup_isl29018_als, I2C_ADAPTER_SMBUS },
        },
-       {
-               .ident = "Chromebook Pixel - Touchscreen",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "Link"),
-               },
-               .callback = setup_atmel_1664s_ts,
+};
+
+static struct chromeos_laptop samsung_series_5 = {
+       .i2c_peripherals = {
+               /* Light Sensor. */
+               { .add = setup_tsl2583_als, I2C_ADAPTER_SMBUS },
+       },
+};
+
+static struct chromeos_laptop chromebook_pixel = {
+       .i2c_peripherals = {
+               /* Touch Screen. */
+               { .add = setup_atmel_1664s_ts, I2C_ADAPTER_PANEL },
+               /* Touchpad. */
+               { .add = setup_atmel_224s_tp, I2C_ADAPTER_VGADDC },
+               /* Light Sensor. */
+               { .add = setup_isl29018_als, I2C_ADAPTER_PANEL },
+       },
+};
+
+static struct chromeos_laptop acer_c7_chromebook = {
+       .i2c_peripherals = {
+               /* Touchpad. */
+               { .add = setup_cyapa_tp, I2C_ADAPTER_SMBUS },
+       },
+};
+
+static struct chromeos_laptop acer_ac700 = {
+       .i2c_peripherals = {
+               /* Light Sensor. */
+               { .add = setup_tsl2563_als, I2C_ADAPTER_SMBUS },
        },
+};
+
+static struct chromeos_laptop hp_pavilion_14_chromebook = {
+       .i2c_peripherals = {
+               /* Touchpad. */
+               { .add = setup_cyapa_tp, I2C_ADAPTER_SMBUS },
+       },
+};
+
+static struct chromeos_laptop cr48 = {
+       .i2c_peripherals = {
+               /* Light Sensor. */
+               { .add = setup_tsl2563_als, I2C_ADAPTER_SMBUS },
+       },
+};
+
+#define _CBDD(board_) \
+       .callback = chromeos_laptop_dmi_matched, \
+       .driver_data = (void *)&board_
+
+static struct dmi_system_id chromeos_laptop_dmi_table[] __initdata = {
        {
-               .ident = "Chromebook Pixel - Touchpad",
+               .ident = "Samsung Series 5 550",
                .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "Link"),
+                       DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Lumpy"),
                },
-               .callback = setup_atmel_224s_tp,
+               _CBDD(samsung_series_5_550),
        },
        {
-               .ident = "Samsung Series 5 550 - Light Sensor",
+               .ident = "Samsung Series 5",
                .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG"),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "Lumpy"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Alex"),
                },
-               .callback = setup_isl29018_als,
+               _CBDD(samsung_series_5),
        },
        {
-               .ident = "Chromebook Pixel - Light Sensor",
+               .ident = "Chromebook Pixel",
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
                        DMI_MATCH(DMI_PRODUCT_NAME, "Link"),
                },
-               .callback = setup_isl29023_als,
+               _CBDD(chromebook_pixel),
        },
        {
-               .ident = "Acer C7 Chromebook - Touchpad",
+               .ident = "Acer C7 Chromebook",
                .matches = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "Parrot"),
                },
-               .callback = setup_cyapa_smbus_tp,
+               _CBDD(acer_c7_chromebook),
        },
        {
-               .ident = "HP Pavilion 14 Chromebook - Touchpad",
+               .ident = "Acer AC700",
                .matches = {
-                       DMI_MATCH(DMI_PRODUCT_NAME, "Butterfly"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "ZGB"),
                },
-               .callback = setup_cyapa_smbus_tp,
+               _CBDD(acer_ac700),
        },
        {
-               .ident = "Samsung Series 5 - Light Sensor",
+               .ident = "HP Pavilion 14 Chromebook",
                .matches = {
-                       DMI_MATCH(DMI_PRODUCT_NAME, "Alex"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Butterfly"),
                },
-               .callback = setup_tsl2583_als,
+               _CBDD(hp_pavilion_14_chromebook),
        },
        {
-               .ident = "Cr-48 - Light Sensor",
+               .ident = "Cr-48",
                .matches = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "Mario"),
                },
-               .callback = setup_tsl2563_als,
-       },
-       {
-               .ident = "Acer AC700 - Light Sensor",
-               .matches = {
-                       DMI_MATCH(DMI_PRODUCT_NAME, "ZGB"),
-               },
-               .callback = setup_tsl2563_als,
+               _CBDD(cr48),
        },
        { }
 };
 MODULE_DEVICE_TABLE(dmi, chromeos_laptop_dmi_table);
 
+static struct platform_device *cros_platform_device;
+
+static struct platform_driver cros_platform_driver = {
+       .driver = {
+               .name = "chromeos_laptop",
+               .owner = THIS_MODULE,
+       },
+       .probe = chromeos_laptop_probe,
+};
+
 static int __init chromeos_laptop_init(void)
 {
+       int ret;
        if (!dmi_check_system(chromeos_laptop_dmi_table)) {
                pr_debug("%s unsupported system.\n", __func__);
                return -ENODEV;
        }
+
+       ret = platform_driver_register(&cros_platform_driver);
+       if (ret)
+               return ret;
+
+       cros_platform_device = platform_device_alloc("chromeos_laptop", -1);
+       if (!cros_platform_device) {
+               ret = -ENOMEM;
+               goto fail_platform_device1;
+       }
+
+       ret = platform_device_add(cros_platform_device);
+       if (ret)
+               goto fail_platform_device2;
+
        return 0;
+
+fail_platform_device2:
+       platform_device_put(cros_platform_device);
+fail_platform_device1:
+       platform_driver_unregister(&cros_platform_driver);
+       return ret;
 }
 
 static void __exit chromeos_laptop_exit(void)
@@ -398,6 +511,9 @@ static void __exit chromeos_laptop_exit(void)
                i2c_unregister_device(tp);
        if (ts)
                i2c_unregister_device(ts);
+
+       platform_device_unregister(cros_platform_device);
+       platform_driver_unregister(&cros_platform_driver);
 }
 
 module_init(chromeos_laptop_init);
diff --git a/drivers/platform/chrome/chromeos_pstore.c b/drivers/platform/chrome/chromeos_pstore.c
new file mode 100644 (file)
index 0000000..e0e0e65
--- /dev/null
@@ -0,0 +1,101 @@
+/*
+ *  chromeos_pstore.c - Driver to instantiate Chromebook ramoops device
+ *
+ *  Copyright (C) 2013 Google, Inc.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation, version 2 of the License.
+ */
+
+#include <linux/dmi.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pstore_ram.h>
+
+static struct dmi_system_id chromeos_pstore_dmi_table[] __initdata = {
+       {
+               /*
+                * Today all Chromebooks/boxes ship with GOOGLE as vendor and
+                * coreboot as bios vendor. No other systems with this
+                * combination are known to date.
+                */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
+                       DMI_MATCH(DMI_BIOS_VENDOR, "coreboot"),
+               },
+       },
+       {
+               /*
+                * The first Samsung Chromebox and Chromebook Series 5 550 use
+                * coreboot but with Samsung as the system vendor.
+                */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG"),
+                       DMI_MATCH(DMI_BIOS_VENDOR, "coreboot"),
+               },
+       },
+       {
+               /* x86-alex, the first Samsung Chromebook. */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Alex"),
+               },
+       },
+       {
+               /* x86-mario, the Cr-48 pilot device from Google. */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "IEC"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Mario"),
+               },
+       },
+       {
+               /* x86-zgb, the first Acer Chromebook. */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "ACER"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "ZGB"),
+               },
+       },
+       { }
+};
+MODULE_DEVICE_TABLE(dmi, chromeos_pstore_dmi_table);
+
+/*
+ * On x86 chromebooks/boxes, the firmware will keep the legacy VGA memory
+ * range untouched across reboots, so we use that to store our pstore
+ * contents for panic logs, etc.
+ */
+static struct ramoops_platform_data chromeos_ramoops_data = {
+       .mem_size       = 0x100000,
+       .mem_address    = 0xf00000,
+       .record_size    = 0x20000,
+       .console_size   = 0x20000,
+       .ftrace_size    = 0x20000,
+       .dump_oops      = 1,
+};
+
+static struct platform_device chromeos_ramoops = {
+       .name = "ramoops",
+       .dev = {
+               .platform_data = &chromeos_ramoops_data,
+       },
+};
+
+static int __init chromeos_pstore_init(void)
+{
+       if (dmi_check_system(chromeos_pstore_dmi_table))
+               return platform_device_register(&chromeos_ramoops);
+
+       return -ENODEV;
+}
+
+static void __exit chromeos_pstore_exit(void)
+{
+       platform_device_unregister(&chromeos_ramoops);
+}
+
+module_init(chromeos_pstore_init);
+module_exit(chromeos_pstore_exit);
+
+MODULE_DESCRIPTION("Chrome OS pstore module");
+MODULE_LICENSE("GPL");
index d9dcd37b5a521e86baf54702ff96f91657037aff..5ae65c11d544d4feb9affb727b6e00bec4b13894 100644 (file)
@@ -197,6 +197,17 @@ config HP_ACCEL
          To compile this driver as a module, choose M here: the module will
          be called hp_accel.
 
+config HP_WIRELESS
+       tristate "HP WIRELESS"
+       depends on ACPI
+       depends on INPUT
+       help
+        This driver provides supports for new HP wireless button for Windows 8.
+        On such systems the driver should load automatically (via ACPI alias).
+
+        To compile this driver as a module, choose M here: the module will
+        be called hp-wireless.
+
 config HP_WMI
        tristate "HP WMI extras"
        depends on ACPI_WMI
@@ -808,4 +819,12 @@ config PVPANIC
          a paravirtualized device provided by QEMU; it lets a virtual machine
          (guest) communicate panic events to the host.
 
+config INTEL_BAYTRAIL_MBI
+       tristate
+       depends on PCI
+       ---help---
+         Needed on Baytrail platforms for access to the IOSF Sideband Mailbox
+         Interface. This is a requirement for systems that need to configure
+         the PUNIT for power management features such as RAPL.
+
 endif # X86_PLATFORM_DEVICES
index f0e6aa407ffb9ee8786e7aa71f5c76ed00ec6d81..9b87cfc42b8419202553a1d49986b7a737932882 100644 (file)
@@ -16,6 +16,7 @@ obj-$(CONFIG_DELL_WMI_AIO)    += dell-wmi-aio.o
 obj-$(CONFIG_ACER_WMI)         += acer-wmi.o
 obj-$(CONFIG_ACERHDF)          += acerhdf.o
 obj-$(CONFIG_HP_ACCEL)         += hp_accel.o
+obj-$(CONFIG_HP_WIRELESS)      += hp-wireless.o
 obj-$(CONFIG_HP_WMI)           += hp-wmi.o
 obj-$(CONFIG_AMILO_RFKILL)     += amilo-rfkill.o
 obj-$(CONFIG_TC1100_WMI)       += tc1100-wmi.o
@@ -54,3 +55,4 @@ obj-$(CONFIG_INTEL_RST)               += intel-rst.o
 obj-$(CONFIG_INTEL_SMARTCONNECT)       += intel-smartconnect.o
 
 obj-$(CONFIG_PVPANIC)           += pvpanic.o
+obj-$(CONFIG_INTEL_BAYTRAIL_MBI)       += intel_baytrail.o
index 109f6383040cf99b99430a44a989c129660de8d3..c5e082fb82fa1b3a56b8673f48778bdac8276cac 100644 (file)
@@ -183,7 +183,6 @@ struct asus_wmi {
 
        struct input_dev *inputdev;
        struct backlight_device *backlight_device;
-       struct device *hwmon_device;
        struct platform_device *platform_device;
 
        struct led_classdev wlan_led;
@@ -1072,20 +1071,12 @@ static ssize_t asus_hwmon_temp1(struct device *dev,
        return sprintf(buf, "%d\n", value);
 }
 
-static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO, asus_hwmon_pwm1, NULL, 0);
-static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, asus_hwmon_temp1, NULL, 0);
-
-static ssize_t
-show_name(struct device *dev, struct device_attribute *attr, char *buf)
-{
-       return sprintf(buf, "asus\n");
-}
-static SENSOR_DEVICE_ATTR(name, S_IRUGO, show_name, NULL, 0);
+static DEVICE_ATTR(pwm1, S_IRUGO, asus_hwmon_pwm1, NULL);
+static DEVICE_ATTR(temp1_input, S_IRUGO, asus_hwmon_temp1, NULL);
 
 static struct attribute *hwmon_attributes[] = {
-       &sensor_dev_attr_pwm1.dev_attr.attr,
-       &sensor_dev_attr_temp1_input.dev_attr.attr,
-       &sensor_dev_attr_name.dev_attr.attr,
+       &dev_attr_pwm1.attr,
+       &dev_attr_temp1_input.attr,
        NULL
 };
 
@@ -1099,9 +1090,9 @@ static umode_t asus_hwmon_sysfs_is_visible(struct kobject *kobj,
        int dev_id = -1;
        u32 value = ASUS_WMI_UNSUPPORTED_METHOD;
 
-       if (attr == &sensor_dev_attr_pwm1.dev_attr.attr)
+       if (attr == &dev_attr_pwm1.attr)
                dev_id = ASUS_WMI_DEVID_FAN_CTRL;
-       else if (attr == &sensor_dev_attr_temp1_input.dev_attr.attr)
+       else if (attr == &dev_attr_temp1_input.attr)
                dev_id = ASUS_WMI_DEVID_THERMAL_CTRL;
 
        if (dev_id != -1) {
@@ -1136,35 +1127,20 @@ static struct attribute_group hwmon_attribute_group = {
        .is_visible = asus_hwmon_sysfs_is_visible,
        .attrs = hwmon_attributes
 };
-
-static void asus_wmi_hwmon_exit(struct asus_wmi *asus)
-{
-       struct device *hwmon;
-
-       hwmon = asus->hwmon_device;
-       if (!hwmon)
-               return;
-       sysfs_remove_group(&hwmon->kobj, &hwmon_attribute_group);
-       hwmon_device_unregister(hwmon);
-       asus->hwmon_device = NULL;
-}
+__ATTRIBUTE_GROUPS(hwmon_attribute);
 
 static int asus_wmi_hwmon_init(struct asus_wmi *asus)
 {
        struct device *hwmon;
-       int result;
 
-       hwmon = hwmon_device_register(&asus->platform_device->dev);
+       hwmon = hwmon_device_register_with_groups(&asus->platform_device->dev,
+                                                 "asus", asus,
+                                                 hwmon_attribute_groups);
        if (IS_ERR(hwmon)) {
                pr_err("Could not register asus hwmon device\n");
                return PTR_ERR(hwmon);
        }
-       dev_set_drvdata(hwmon, asus);
-       asus->hwmon_device = hwmon;
-       result = sysfs_create_group(&hwmon->kobj, &hwmon_attribute_group);
-       if (result)
-               asus_wmi_hwmon_exit(asus);
-       return result;
+       return 0;
 }
 
 /*
@@ -1835,7 +1811,6 @@ fail_backlight:
 fail_rfkill:
        asus_wmi_led_exit(asus);
 fail_leds:
-       asus_wmi_hwmon_exit(asus);
 fail_hwmon:
        asus_wmi_input_exit(asus);
 fail_input:
@@ -1853,7 +1828,6 @@ static int asus_wmi_remove(struct platform_device *device)
        wmi_remove_notify_handler(asus->driver->event_guid);
        asus_wmi_backlight_exit(asus);
        asus_wmi_input_exit(asus);
-       asus_wmi_hwmon_exit(asus);
        asus_wmi_led_exit(asus);
        asus_wmi_rfkill_exit(asus);
        asus_wmi_debugfs_exit(asus);
index eaa78edb1f4ef2b9f5af80bd0f715dfaa9304bd8..7297df2ebf503771d080a3467869819d72921b08 100644 (file)
 /* ======= */
 struct compal_data{
        /* Fan control */
-       struct device *hwmon_dev;
-       int pwm_enable; /* 0:full on, 1:set by pwm1, 2:control by moterboard */
+       int pwm_enable; /* 0:full on, 1:set by pwm1, 2:control by motherboard */
        unsigned char curr_pwm;
 
        /* Power supply */
@@ -402,15 +401,6 @@ SIMPLE_MASKED_STORE_SHOW(wake_up_wlan,     WAKE_UP_ADDR, WAKE_UP_WLAN)
 SIMPLE_MASKED_STORE_SHOW(wake_up_key,  WAKE_UP_ADDR, WAKE_UP_KEY)
 SIMPLE_MASKED_STORE_SHOW(wake_up_mouse,        WAKE_UP_ADDR, WAKE_UP_MOUSE)
 
-
-/* General hwmon interface */
-static ssize_t hwmon_name_show(struct device *dev,
-               struct device_attribute *attr, char *buf)
-{
-       return sprintf(buf, "%s\n", DRIVER_NAME);
-}
-
-
 /* Fan control interface */
 static ssize_t pwm_enable_show(struct device *dev,
                struct device_attribute *attr, char *buf)
@@ -665,55 +655,55 @@ static DEVICE_ATTR(wake_up_key,
 static DEVICE_ATTR(wake_up_mouse,
                0644, wake_up_mouse_show,       wake_up_mouse_store);
 
-static SENSOR_DEVICE_ATTR(name,        S_IRUGO, hwmon_name_show,   NULL, 1);
-static SENSOR_DEVICE_ATTR(fan1_input,  S_IRUGO, fan_show,          NULL, 1);
-static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, temp_cpu,          NULL, 1);
-static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, temp_cpu_local,    NULL, 1);
-static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, temp_cpu_DTS,      NULL, 1);
-static SENSOR_DEVICE_ATTR(temp4_input, S_IRUGO, temp_northbridge,  NULL, 1);
-static SENSOR_DEVICE_ATTR(temp5_input, S_IRUGO, temp_vga,          NULL, 1);
-static SENSOR_DEVICE_ATTR(temp6_input, S_IRUGO, temp_SKIN,         NULL, 1);
-static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, label_cpu,         NULL, 1);
-static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO, label_cpu_local,   NULL, 1);
-static SENSOR_DEVICE_ATTR(temp3_label, S_IRUGO, label_cpu_DTS,     NULL, 1);
-static SENSOR_DEVICE_ATTR(temp4_label, S_IRUGO, label_northbridge, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp5_label, S_IRUGO, label_vga,         NULL, 1);
-static SENSOR_DEVICE_ATTR(temp6_label, S_IRUGO, label_SKIN,        NULL, 1);
-static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, pwm_show, pwm_store, 1);
-static SENSOR_DEVICE_ATTR(pwm1_enable,
-               S_IRUGO | S_IWUSR, pwm_enable_show, pwm_enable_store, 0);
-
-static struct attribute *compal_attributes[] = {
+static DEVICE_ATTR(fan1_input,  S_IRUGO, fan_show,          NULL);
+static DEVICE_ATTR(temp1_input, S_IRUGO, temp_cpu,          NULL);
+static DEVICE_ATTR(temp2_input, S_IRUGO, temp_cpu_local,    NULL);
+static DEVICE_ATTR(temp3_input, S_IRUGO, temp_cpu_DTS,      NULL);
+static DEVICE_ATTR(temp4_input, S_IRUGO, temp_northbridge,  NULL);
+static DEVICE_ATTR(temp5_input, S_IRUGO, temp_vga,          NULL);
+static DEVICE_ATTR(temp6_input, S_IRUGO, temp_SKIN,         NULL);
+static DEVICE_ATTR(temp1_label, S_IRUGO, label_cpu,         NULL);
+static DEVICE_ATTR(temp2_label, S_IRUGO, label_cpu_local,   NULL);
+static DEVICE_ATTR(temp3_label, S_IRUGO, label_cpu_DTS,     NULL);
+static DEVICE_ATTR(temp4_label, S_IRUGO, label_northbridge, NULL);
+static DEVICE_ATTR(temp5_label, S_IRUGO, label_vga,         NULL);
+static DEVICE_ATTR(temp6_label, S_IRUGO, label_SKIN,        NULL);
+static DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, pwm_show, pwm_store);
+static DEVICE_ATTR(pwm1_enable,
+                  S_IRUGO | S_IWUSR, pwm_enable_show, pwm_enable_store);
+
+static struct attribute *compal_platform_attrs[] = {
        &dev_attr_wake_up_pme.attr,
        &dev_attr_wake_up_modem.attr,
        &dev_attr_wake_up_lan.attr,
        &dev_attr_wake_up_wlan.attr,
        &dev_attr_wake_up_key.attr,
        &dev_attr_wake_up_mouse.attr,
-       /* Maybe put the sensor-stuff in a separate hwmon-driver? That way,
-        * the hwmon sysfs won't be cluttered with the above files. */
-       &sensor_dev_attr_name.dev_attr.attr,
-       &sensor_dev_attr_pwm1_enable.dev_attr.attr,
-       &sensor_dev_attr_pwm1.dev_attr.attr,
-       &sensor_dev_attr_fan1_input.dev_attr.attr,
-       &sensor_dev_attr_temp1_input.dev_attr.attr,
-       &sensor_dev_attr_temp2_input.dev_attr.attr,
-       &sensor_dev_attr_temp3_input.dev_attr.attr,
-       &sensor_dev_attr_temp4_input.dev_attr.attr,
-       &sensor_dev_attr_temp5_input.dev_attr.attr,
-       &sensor_dev_attr_temp6_input.dev_attr.attr,
-       &sensor_dev_attr_temp1_label.dev_attr.attr,
-       &sensor_dev_attr_temp2_label.dev_attr.attr,
-       &sensor_dev_attr_temp3_label.dev_attr.attr,
-       &sensor_dev_attr_temp4_label.dev_attr.attr,
-       &sensor_dev_attr_temp5_label.dev_attr.attr,
-       &sensor_dev_attr_temp6_label.dev_attr.attr,
        NULL
 };
+static struct attribute_group compal_platform_attr_group = {
+       .attrs = compal_platform_attrs
+};
 
-static struct attribute_group compal_attribute_group = {
-       .attrs = compal_attributes
+static struct attribute *compal_hwmon_attrs[] = {
+       &dev_attr_pwm1_enable.attr,
+       &dev_attr_pwm1.attr,
+       &dev_attr_fan1_input.attr,
+       &dev_attr_temp1_input.attr,
+       &dev_attr_temp2_input.attr,
+       &dev_attr_temp3_input.attr,
+       &dev_attr_temp4_input.attr,
+       &dev_attr_temp5_input.attr,
+       &dev_attr_temp6_input.attr,
+       &dev_attr_temp1_label.attr,
+       &dev_attr_temp2_label.attr,
+       &dev_attr_temp3_label.attr,
+       &dev_attr_temp4_label.attr,
+       &dev_attr_temp5_label.attr,
+       &dev_attr_temp6_label.attr,
+       NULL
 };
+ATTRIBUTE_GROUPS(compal_hwmon);
 
 static int compal_probe(struct platform_device *);
 static int compal_remove(struct platform_device *);
@@ -1021,30 +1011,28 @@ static int compal_probe(struct platform_device *pdev)
 {
        int err;
        struct compal_data *data;
+       struct device *hwmon_dev;
 
        if (!extra_features)
                return 0;
 
        /* Fan control */
-       data = kzalloc(sizeof(struct compal_data), GFP_KERNEL);
+       data = devm_kzalloc(&pdev->dev, sizeof(struct compal_data), GFP_KERNEL);
        if (!data)
                return -ENOMEM;
 
        initialize_fan_control_data(data);
 
-       err = sysfs_create_group(&pdev->dev.kobj, &compal_attribute_group);
-       if (err) {
-               kfree(data);
+       err = sysfs_create_group(&pdev->dev.kobj, &compal_platform_attr_group);
+       if (err)
                return err;
-       }
 
-       data->hwmon_dev = hwmon_device_register(&pdev->dev);
-       if (IS_ERR(data->hwmon_dev)) {
-               err = PTR_ERR(data->hwmon_dev);
-               sysfs_remove_group(&pdev->dev.kobj,
-                               &compal_attribute_group);
-               kfree(data);
-               return err;
+       hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
+                                                     DRIVER_NAME, data,
+                                                     compal_hwmon_groups);
+       if (IS_ERR(hwmon_dev)) {
+               err = PTR_ERR(hwmon_dev);
+               goto remove;
        }
 
        /* Power supply */
@@ -1054,6 +1042,10 @@ static int compal_probe(struct platform_device *pdev)
        platform_set_drvdata(pdev, data);
 
        return 0;
+
+remove:
+       sysfs_remove_group(&pdev->dev.kobj, &compal_platform_attr_group);
+       return err;
 }
 
 static void __exit compal_cleanup(void)
@@ -1080,12 +1072,9 @@ static int compal_remove(struct platform_device *pdev)
        pwm_disable_control();
 
        data = platform_get_drvdata(pdev);
-       hwmon_device_unregister(data->hwmon_dev);
        power_supply_unregister(&data->psy);
 
-       kfree(data);
-
-       sysfs_remove_group(&pdev->dev.kobj, &compal_attribute_group);
+       sysfs_remove_group(&pdev->dev.kobj, &compal_platform_attr_group);
 
        return 0;
 }
index c608b1d33f4a60893a3bdc87b52773f872259f6d..fed4111ac31a6d6fbff0c02a6be563485ccec7dc 100644 (file)
@@ -559,19 +559,45 @@ static void dell_update_rfkill(struct work_struct *ignored)
 }
 static DECLARE_DELAYED_WORK(dell_rfkill_work, dell_update_rfkill);
 
+static bool dell_laptop_i8042_filter(unsigned char data, unsigned char str,
+                             struct serio *port)
+{
+       static bool extended;
+
+       if (str & 0x20)
+               return false;
+
+       if (unlikely(data == 0xe0)) {
+               extended = true;
+               return false;
+       } else if (unlikely(extended)) {
+               switch (data) {
+               case 0x8:
+                       schedule_delayed_work(&dell_rfkill_work,
+                                             round_jiffies_relative(HZ / 4));
+                       break;
+               }
+               extended = false;
+       }
+
+       return false;
+}
 
 static int __init dell_setup_rfkill(void)
 {
-       int status;
-       int ret;
+       int status, ret, whitelisted;
        const char *product;
 
        /*
-        * rfkill causes trouble on various non Latitudes, according to Dell
-        * actually testing the rfkill functionality is only done on Latitudes.
+        * rfkill support causes trouble on various models, mostly Inspirons.
+        * So we whitelist certain series, and don't support rfkill on others.
         */
+       whitelisted = 0;
        product = dmi_get_system_info(DMI_PRODUCT_NAME);
-       if (!force_rfkill && (!product || strncmp(product, "Latitude", 8)))
+       if (product &&  (strncmp(product, "Latitude", 8) == 0 ||
+                        strncmp(product, "Precision", 9) == 0))
+               whitelisted = 1;
+       if (!force_rfkill && !whitelisted)
                return 0;
 
        get_buffer();
@@ -633,7 +659,16 @@ static int __init dell_setup_rfkill(void)
                        goto err_wwan;
        }
 
+       ret = i8042_install_filter(dell_laptop_i8042_filter);
+       if (ret) {
+               pr_warn("Unable to install key filter\n");
+               goto err_filter;
+       }
+
        return 0;
+err_filter:
+       if (wwan_rfkill)
+               rfkill_unregister(wwan_rfkill);
 err_wwan:
        rfkill_destroy(wwan_rfkill);
        if (bluetooth_rfkill)
@@ -684,7 +719,7 @@ static int dell_send_intensity(struct backlight_device *bd)
 
 out:
        release_buffer();
-       return 0;
+       return ret;
 }
 
 static int dell_get_intensity(struct backlight_device *bd)
@@ -755,30 +790,6 @@ static void touchpad_led_exit(void)
        led_classdev_unregister(&touchpad_led);
 }
 
-static bool dell_laptop_i8042_filter(unsigned char data, unsigned char str,
-                             struct serio *port)
-{
-       static bool extended;
-
-       if (str & 0x20)
-               return false;
-
-       if (unlikely(data == 0xe0)) {
-               extended = true;
-               return false;
-       } else if (unlikely(extended)) {
-               switch (data) {
-               case 0x8:
-                       schedule_delayed_work(&dell_rfkill_work,
-                                             round_jiffies_relative(HZ / 4));
-                       break;
-               }
-               extended = false;
-       }
-
-       return false;
-}
-
 static int __init dell_init(void)
 {
        int max_intensity = 0;
@@ -828,12 +839,6 @@ static int __init dell_init(void)
                goto fail_rfkill;
        }
 
-       ret = i8042_install_filter(dell_laptop_i8042_filter);
-       if (ret) {
-               pr_warn("Unable to install key filter\n");
-               goto fail_filter;
-       }
-
        if (quirks && quirks->touchpad_led)
                touchpad_led_init(&platform_device->dev);
 
@@ -885,7 +890,6 @@ static int __init dell_init(void)
 fail_backlight:
        i8042_remove_filter(dell_laptop_i8042_filter);
        cancel_delayed_work_sync(&dell_rfkill_work);
-fail_filter:
        dell_cleanup_rfkill();
 fail_rfkill:
        free_page((unsigned long)bufferpage);
index ed69ec5f36f77b73afb23ff5199c4d2885f29cbe..399e8c5621923890a43ea81f25fde5d0388f152e 100644 (file)
@@ -165,7 +165,6 @@ struct eeepc_laptop {
 
        struct platform_device *platform_device;
        struct acpi_device *device;             /* the device we are in */
-       struct device *hwmon_device;
        struct backlight_device *backlight_device;
 
        struct input_dev *inputdev;
@@ -1068,7 +1067,7 @@ static ssize_t show_sys_hwmon(int (*get)(void), char *buf)
        {                                                               \
                return store_sys_hwmon(_get, buf, count);               \
        }                                                               \
-       static SENSOR_DEVICE_ATTR(_name, _mode, show_##_name, store_##_name, 0);
+       static DEVICE_ATTR(_name, _mode, show_##_name, store_##_name);
 
 EEEPC_CREATE_SENSOR_ATTR(fan1_input, S_IRUGO, eeepc_get_fan_rpm, NULL);
 EEEPC_CREATE_SENSOR_ATTR(pwm1, S_IRUGO | S_IWUSR,
@@ -1076,55 +1075,26 @@ EEEPC_CREATE_SENSOR_ATTR(pwm1, S_IRUGO | S_IWUSR,
 EEEPC_CREATE_SENSOR_ATTR(pwm1_enable, S_IRUGO | S_IWUSR,
                         eeepc_get_fan_ctrl, eeepc_set_fan_ctrl);
 
-static ssize_t
-show_name(struct device *dev, struct device_attribute *attr, char *buf)
-{
-       return sprintf(buf, "eeepc\n");
-}
-static SENSOR_DEVICE_ATTR(name, S_IRUGO, show_name, NULL, 0);
-
-static struct attribute *hwmon_attributes[] = {
-       &sensor_dev_attr_pwm1.dev_attr.attr,
-       &sensor_dev_attr_fan1_input.dev_attr.attr,
-       &sensor_dev_attr_pwm1_enable.dev_attr.attr,
-       &sensor_dev_attr_name.dev_attr.attr,
+static struct attribute *hwmon_attrs[] = {
+       &dev_attr_pwm1.attr,
+       &dev_attr_fan1_input.attr,
+       &dev_attr_pwm1_enable.attr,
        NULL
 };
-
-static struct attribute_group hwmon_attribute_group = {
-       .attrs = hwmon_attributes
-};
-
-static void eeepc_hwmon_exit(struct eeepc_laptop *eeepc)
-{
-       struct device *hwmon;
-
-       hwmon = eeepc->hwmon_device;
-       if (!hwmon)
-               return;
-       sysfs_remove_group(&hwmon->kobj,
-                          &hwmon_attribute_group);
-       hwmon_device_unregister(hwmon);
-       eeepc->hwmon_device = NULL;
-}
+ATTRIBUTE_GROUPS(hwmon);
 
 static int eeepc_hwmon_init(struct eeepc_laptop *eeepc)
 {
+       struct device *dev = &eeepc->platform_device->dev;
        struct device *hwmon;
-       int result;
 
-       hwmon = hwmon_device_register(&eeepc->platform_device->dev);
+       hwmon = devm_hwmon_device_register_with_groups(dev, "eeepc", NULL,
+                                                      hwmon_groups);
        if (IS_ERR(hwmon)) {
                pr_err("Could not register eeepc hwmon device\n");
-               eeepc->hwmon_device = NULL;
                return PTR_ERR(hwmon);
        }
-       eeepc->hwmon_device = hwmon;
-       result = sysfs_create_group(&hwmon->kobj,
-                                   &hwmon_attribute_group);
-       if (result)
-               eeepc_hwmon_exit(eeepc);
-       return result;
+       return 0;
 }
 
 /*
@@ -1480,7 +1450,6 @@ static int eeepc_acpi_add(struct acpi_device *device)
 fail_rfkill:
        eeepc_led_exit(eeepc);
 fail_led:
-       eeepc_hwmon_exit(eeepc);
 fail_hwmon:
        eeepc_input_exit(eeepc);
 fail_input:
@@ -1500,7 +1469,6 @@ static int eeepc_acpi_remove(struct acpi_device *device)
        eeepc_backlight_exit(eeepc);
        eeepc_rfkill_exit(eeepc);
        eeepc_input_exit(eeepc);
-       eeepc_hwmon_exit(eeepc);
        eeepc_led_exit(eeepc);
        eeepc_platform_exit(eeepc);
 
index 9d30d69aa78f24a3bbb1caa71f797f9c520bbc5c..be02bcc346d30cb9dc17fff807aa414d0147216a 100644 (file)
@@ -633,7 +633,6 @@ static struct dmi_system_id fujitsu_dmi_table[] = {
 
 static int acpi_fujitsu_add(struct acpi_device *device)
 {
-       int result = 0;
        int state = 0;
        struct input_dev *input;
        int error;
@@ -669,8 +668,8 @@ static int acpi_fujitsu_add(struct acpi_device *device)
        if (error)
                goto err_free_input_dev;
 
-       result = acpi_bus_update_power(fujitsu->acpi_handle, &state);
-       if (result) {
+       error = acpi_bus_update_power(fujitsu->acpi_handle, &state);
+       if (error) {
                pr_err("Error reading power state\n");
                goto err_unregister_input_dev;
        }
@@ -700,7 +699,7 @@ static int acpi_fujitsu_add(struct acpi_device *device)
                fujitsu->max_brightness = FUJITSU_LCD_N_LEVELS;
        get_lcd_level();
 
-       return result;
+       return 0;
 
 err_unregister_input_dev:
        input_unregister_device(input);
@@ -708,7 +707,7 @@ err_unregister_input_dev:
 err_free_input_dev:
        input_free_device(input);
 err_stop:
-       return result;
+       return error;
 }
 
 static int acpi_fujitsu_remove(struct acpi_device *device)
@@ -831,8 +830,8 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
        if (error)
                goto err_free_input_dev;
 
-       result = acpi_bus_update_power(fujitsu_hotkey->acpi_handle, &state);
-       if (result) {
+       error = acpi_bus_update_power(fujitsu_hotkey->acpi_handle, &state);
+       if (error) {
                pr_err("Error reading power state\n");
                goto err_unregister_input_dev;
        }
@@ -907,7 +906,7 @@ err_free_input_dev:
 err_free_fifo:
        kfifo_free(&fujitsu_hotkey->fifo);
 err_stop:
-       return result;
+       return error;
 }
 
 static int acpi_fujitsu_hotkey_remove(struct acpi_device *device)
diff --git a/drivers/platform/x86/hp-wireless.c b/drivers/platform/x86/hp-wireless.c
new file mode 100644 (file)
index 0000000..415348f
--- /dev/null
@@ -0,0 +1,132 @@
+/*
+ *  hp-wireless button for Windows 8
+ *
+ *  Copyright (C) 2014 Alex Hung <alex.hung@canonical.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/platform_device.h>
+#include <linux/acpi.h>
+#include <acpi/acpi_bus.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Alex Hung");
+MODULE_ALIAS("acpi*:HPQ6001:*");
+
+static struct input_dev *hpwl_input_dev;
+
+static const struct acpi_device_id hpwl_ids[] = {
+       {"HPQ6001", 0},
+       {"", 0},
+};
+
+static int hp_wireless_input_setup(void)
+{
+       int err;
+
+       hpwl_input_dev = input_allocate_device();
+       if (!hpwl_input_dev)
+               return -ENOMEM;
+
+       hpwl_input_dev->name = "HP Wireless hotkeys";
+       hpwl_input_dev->phys = "hpq6001/input0";
+       hpwl_input_dev->id.bustype = BUS_HOST;
+       hpwl_input_dev->evbit[0] = BIT(EV_KEY);
+       set_bit(KEY_RFKILL, hpwl_input_dev->keybit);
+
+       err = input_register_device(hpwl_input_dev);
+       if (err)
+               goto err_free_dev;
+
+       return 0;
+
+err_free_dev:
+       input_free_device(hpwl_input_dev);
+       return err;
+}
+
+static void hp_wireless_input_destroy(void)
+{
+       input_unregister_device(hpwl_input_dev);
+}
+
+static void hpwl_notify(struct acpi_device *acpi_dev, u32 event)
+{
+       if (event != 0x80) {
+               pr_info("Received unknown event (0x%x)\n", event);
+               return;
+       }
+
+       input_report_key(hpwl_input_dev, KEY_RFKILL, 1);
+       input_sync(hpwl_input_dev);
+       input_report_key(hpwl_input_dev, KEY_RFKILL, 0);
+       input_sync(hpwl_input_dev);
+}
+
+static int hpwl_add(struct acpi_device *device)
+{
+       int err;
+
+       err = hp_wireless_input_setup();
+       return err;
+}
+
+static int hpwl_remove(struct acpi_device *device)
+{
+       hp_wireless_input_destroy();
+       return 0;
+}
+
+static struct acpi_driver hpwl_driver = {
+       .name   = "hp-wireless",
+       .owner  = THIS_MODULE,
+       .ids    = hpwl_ids,
+       .ops    = {
+               .add    = hpwl_add,
+               .remove = hpwl_remove,
+               .notify = hpwl_notify,
+       },
+};
+
+static int __init hpwl_init(void)
+{
+       int err;
+
+       pr_info("Initializing HPQ6001 module\n");
+       err = acpi_bus_register_driver(&hpwl_driver);
+       if (err) {
+               pr_err("Unable to register HP wireless control driver.\n");
+               goto error_acpi_register;
+       }
+
+       return 0;
+
+error_acpi_register:
+       return err;
+}
+
+static void __exit hpwl_exit(void)
+{
+       pr_info("Exiting HPQ6001 module\n");
+       acpi_bus_unregister_driver(&hpwl_driver);
+}
+
+module_init(hpwl_init);
+module_exit(hpwl_exit);
index aff4d0670edfec733e131c0137fd82f70e0d2206..3dc934438c28f9082144ded5f346f39e3716f4db 100644 (file)
@@ -77,6 +77,7 @@ static inline void delayed_sysfs_set(struct led_classdev *led_cdev,
 static struct acpi_device_id lis3lv02d_device_ids[] = {
        {"HPQ0004", 0}, /* HP Mobile Data Protection System PNP */
        {"HPQ6000", 0}, /* HP Mobile Data Protection System PNP */
+       {"HPQ6007", 0}, /* HP Mobile Data Protection System PNP */
        {"", 0},
 };
 MODULE_DEVICE_TABLE(acpi, lis3lv02d_device_ids);
@@ -88,7 +89,7 @@ MODULE_DEVICE_TABLE(acpi, lis3lv02d_device_ids);
  *
  * Returns 0 on success.
  */
-int lis3lv02d_acpi_init(struct lis3lv02d *lis3)
+static int lis3lv02d_acpi_init(struct lis3lv02d *lis3)
 {
        struct acpi_device *dev = lis3->bus_priv;
        if (acpi_evaluate_object(dev->handle, METHOD_NAME__INI,
@@ -106,7 +107,7 @@ int lis3lv02d_acpi_init(struct lis3lv02d *lis3)
  *
  * Returns 0 on success.
  */
-int lis3lv02d_acpi_read(struct lis3lv02d *lis3, int reg, u8 *ret)
+static int lis3lv02d_acpi_read(struct lis3lv02d *lis3, int reg, u8 *ret)
 {
        struct acpi_device *dev = lis3->bus_priv;
        union acpi_object arg0 = { ACPI_TYPE_INTEGER };
@@ -129,7 +130,7 @@ int lis3lv02d_acpi_read(struct lis3lv02d *lis3, int reg, u8 *ret)
  *
  * Returns 0 on success.
  */
-int lis3lv02d_acpi_write(struct lis3lv02d *lis3, int reg, u8 val)
+static int lis3lv02d_acpi_write(struct lis3lv02d *lis3, int reg, u8 val)
 {
        struct acpi_device *dev = lis3->bus_priv;
        unsigned long long ret; /* Not used when writting */
diff --git a/drivers/platform/x86/intel_baytrail.c b/drivers/platform/x86/intel_baytrail.c
new file mode 100644 (file)
index 0000000..f96626b
--- /dev/null
@@ -0,0 +1,224 @@
+/*
+ * Baytrail IOSF-SB MailBox Interface Driver
+ * Copyright (c) 2013, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ *
+ * The IOSF-SB is a fabric bus available on Atom based SOC's that uses a
+ * mailbox interface (MBI) to communicate with mutiple devices. This
+ * driver implements BayTrail-specific access to this interface.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/pci.h>
+
+#include "intel_baytrail.h"
+
+static DEFINE_SPINLOCK(iosf_mbi_lock);
+
+static inline u32 iosf_mbi_form_mcr(u8 op, u8 port, u8 offset)
+{
+       return (op << 24) | (port << 16) | (offset << 8) | BT_MBI_ENABLE;
+}
+
+static struct pci_dev *mbi_pdev;       /* one mbi device */
+
+/* Hold lock before calling */
+static int iosf_mbi_pci_read_mdr(u32 mcrx, u32 mcr, u32 *mdr)
+{
+       int result;
+
+       if (!mbi_pdev)
+               return -ENODEV;
+
+       if (mcrx) {
+               result = pci_write_config_dword(mbi_pdev,
+                                               BT_MBI_MCRX_OFFSET, mcrx);
+               if (result < 0)
+                       goto iosf_mbi_read_err;
+       }
+
+       result = pci_write_config_dword(mbi_pdev,
+                                       BT_MBI_MCR_OFFSET, mcr);
+       if (result < 0)
+               goto iosf_mbi_read_err;
+
+       result = pci_read_config_dword(mbi_pdev,
+                                      BT_MBI_MDR_OFFSET, mdr);
+       if (result < 0)
+               goto iosf_mbi_read_err;
+
+       return 0;
+
+iosf_mbi_read_err:
+       dev_err(&mbi_pdev->dev, "error: PCI config operation returned %d\n",
+               result);
+       return result;
+}
+
+/* Hold lock before calling */
+static int iosf_mbi_pci_write_mdr(u32 mcrx, u32 mcr, u32 mdr)
+{
+       int result;
+
+       if (!mbi_pdev)
+               return -ENODEV;
+
+       result = pci_write_config_dword(mbi_pdev,
+                                       BT_MBI_MDR_OFFSET, mdr);
+       if (result < 0)
+               goto iosf_mbi_write_err;
+
+       if (mcrx) {
+               result = pci_write_config_dword(mbi_pdev,
+                        BT_MBI_MCRX_OFFSET, mcrx);
+               if (result < 0)
+                       goto iosf_mbi_write_err;
+       }
+
+       result = pci_write_config_dword(mbi_pdev,
+                                       BT_MBI_MCR_OFFSET, mcr);
+       if (result < 0)
+               goto iosf_mbi_write_err;
+
+       return 0;
+
+iosf_mbi_write_err:
+       dev_err(&mbi_pdev->dev, "error: PCI config operation returned %d\n",
+               result);
+       return result;
+}
+
+int bt_mbi_read(u8 port, u8 opcode, u32 offset, u32 *mdr)
+{
+       u32 mcr, mcrx;
+       unsigned long flags;
+       int ret;
+
+       /*Access to the GFX unit is handled by GPU code */
+       BUG_ON(port == BT_MBI_UNIT_GFX);
+
+       mcr = iosf_mbi_form_mcr(opcode, port, offset & BT_MBI_MASK_LO);
+       mcrx = offset & BT_MBI_MASK_HI;
+
+       spin_lock_irqsave(&iosf_mbi_lock, flags);
+       ret = iosf_mbi_pci_read_mdr(mcrx, mcr, mdr);
+       spin_unlock_irqrestore(&iosf_mbi_lock, flags);
+
+       return ret;
+}
+EXPORT_SYMBOL(bt_mbi_read);
+
+int bt_mbi_write(u8 port, u8 opcode, u32 offset, u32 mdr)
+{
+       u32 mcr, mcrx;
+       unsigned long flags;
+       int ret;
+
+       /*Access to the GFX unit is handled by GPU code */
+       BUG_ON(port == BT_MBI_UNIT_GFX);
+
+       mcr = iosf_mbi_form_mcr(opcode, port, offset & BT_MBI_MASK_LO);
+       mcrx = offset & BT_MBI_MASK_HI;
+
+       spin_lock_irqsave(&iosf_mbi_lock, flags);
+       ret = iosf_mbi_pci_write_mdr(mcrx, mcr, mdr);
+       spin_unlock_irqrestore(&iosf_mbi_lock, flags);
+
+       return ret;
+}
+EXPORT_SYMBOL(bt_mbi_write);
+
+int bt_mbi_modify(u8 port, u8 opcode, u32 offset, u32 mdr, u32 mask)
+{
+       u32 mcr, mcrx;
+       u32 value;
+       unsigned long flags;
+       int ret;
+
+       /*Access to the GFX unit is handled by GPU code */
+       BUG_ON(port == BT_MBI_UNIT_GFX);
+
+       mcr = iosf_mbi_form_mcr(opcode, port, offset & BT_MBI_MASK_LO);
+       mcrx = offset & BT_MBI_MASK_HI;
+
+       spin_lock_irqsave(&iosf_mbi_lock, flags);
+
+       /* Read current mdr value */
+       ret = iosf_mbi_pci_read_mdr(mcrx, mcr & BT_MBI_RD_MASK, &value);
+       if (ret < 0) {
+               spin_unlock_irqrestore(&iosf_mbi_lock, flags);
+               return ret;
+       }
+
+       /* Apply mask */
+       value &= ~mask;
+       mdr &= mask;
+       value |= mdr;
+
+       /* Write back */
+       ret = iosf_mbi_pci_write_mdr(mcrx, mcr | BT_MBI_WR_MASK, value);
+
+       spin_unlock_irqrestore(&iosf_mbi_lock, flags);
+
+       return ret;
+}
+EXPORT_SYMBOL(bt_mbi_modify);
+
+static int iosf_mbi_probe(struct pci_dev *pdev,
+                         const struct pci_device_id *unused)
+{
+       int ret;
+
+       ret = pci_enable_device(pdev);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "error: could not enable device\n");
+               return ret;
+       }
+
+       mbi_pdev = pci_dev_get(pdev);
+       return 0;
+}
+
+static DEFINE_PCI_DEVICE_TABLE(iosf_mbi_pci_ids) = {
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0F00) },
+       { 0, },
+};
+MODULE_DEVICE_TABLE(pci, iosf_mbi_pci_ids);
+
+static struct pci_driver iosf_mbi_pci_driver = {
+       .name           = "iosf_mbi_pci",
+       .probe          = iosf_mbi_probe,
+       .id_table       = iosf_mbi_pci_ids,
+};
+
+static int __init bt_mbi_init(void)
+{
+       return pci_register_driver(&iosf_mbi_pci_driver);
+}
+
+static void __exit bt_mbi_exit(void)
+{
+       pci_unregister_driver(&iosf_mbi_pci_driver);
+       if (mbi_pdev) {
+               pci_dev_put(mbi_pdev);
+               mbi_pdev = NULL;
+       }
+}
+
+module_init(bt_mbi_init);
+module_exit(bt_mbi_exit);
+
+MODULE_AUTHOR("David E. Box <david.e.box@linux.intel.com>");
+MODULE_DESCRIPTION("BayTrail Mailbox Interface accessor");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel_baytrail.h b/drivers/platform/x86/intel_baytrail.h
new file mode 100644 (file)
index 0000000..8bcc311
--- /dev/null
@@ -0,0 +1,90 @@
+/*
+ * intel_baytrail.h: MailBox access support for Intel BayTrail platforms
+ */
+
+#ifndef INTEL_BAYTRAIL_MBI_SYMS_H
+#define INTEL_BAYTRAIL_MBI_SYMS_H
+
+#define BT_MBI_MCR_OFFSET      0xD0
+#define BT_MBI_MDR_OFFSET      0xD4
+#define BT_MBI_MCRX_OFFSET     0xD8
+
+#define BT_MBI_RD_MASK         0xFEFFFFFF
+#define BT_MBI_WR_MASK         0X01000000
+
+#define BT_MBI_MASK_HI         0xFFFFFF00
+#define BT_MBI_MASK_LO         0x000000FF
+#define BT_MBI_ENABLE          0xF0
+
+/* BT-SB unit access methods */
+#define BT_MBI_UNIT_AUNIT      0x00
+#define BT_MBI_UNIT_SMC                0x01
+#define BT_MBI_UNIT_CPU                0x02
+#define BT_MBI_UNIT_BUNIT      0x03
+#define BT_MBI_UNIT_PMC                0x04
+#define BT_MBI_UNIT_GFX                0x06
+#define BT_MBI_UNIT_SMI                0x0C
+#define BT_MBI_UNIT_USB                0x43
+#define BT_MBI_UNIT_SATA       0xA3
+#define BT_MBI_UNIT_PCIE       0xA6
+
+/* Read/write opcodes */
+#define BT_MBI_AUNIT_READ      0x10
+#define BT_MBI_AUNIT_WRITE     0x11
+#define BT_MBI_SMC_READ                0x10
+#define BT_MBI_SMC_WRITE       0x11
+#define BT_MBI_CPU_READ                0x10
+#define BT_MBI_CPU_WRITE       0x11
+#define BT_MBI_BUNIT_READ      0x10
+#define BT_MBI_BUNIT_WRITE     0x11
+#define BT_MBI_PMC_READ                0x06
+#define BT_MBI_PMC_WRITE       0x07
+#define BT_MBI_GFX_READ                0x00
+#define BT_MBI_GFX_WRITE       0x01
+#define BT_MBI_SMIO_READ       0x06
+#define BT_MBI_SMIO_WRITE      0x07
+#define BT_MBI_USB_READ                0x06
+#define BT_MBI_USB_WRITE       0x07
+#define BT_MBI_SATA_READ       0x00
+#define BT_MBI_SATA_WRITE      0x01
+#define BT_MBI_PCIE_READ       0x00
+#define BT_MBI_PCIE_WRITE      0x01
+
+/**
+ * bt_mbi_read() - MailBox Interface read command
+ * @port:      port indicating subunit being accessed
+ * @opcode:    port specific read or write opcode
+ * @offset:    register address offset
+ * @mdr:       register data to be read
+ *
+ * Locking is handled by spinlock - cannot sleep.
+ * Return: Nonzero on error
+ */
+int bt_mbi_read(u8 port, u8 opcode, u32 offset, u32 *mdr);
+
+/**
+ * bt_mbi_write() - MailBox unmasked write command
+ * @port:      port indicating subunit being accessed
+ * @opcode:    port specific read or write opcode
+ * @offset:    register address offset
+ * @mdr:       register data to be written
+ *
+ * Locking is handled by spinlock - cannot sleep.
+ * Return: Nonzero on error
+ */
+int bt_mbi_write(u8 port, u8 opcode, u32 offset, u32 mdr);
+
+/**
+ * bt_mbi_modify() - MailBox masked write command
+ * @port:      port indicating subunit being accessed
+ * @opcode:    port specific read or write opcode
+ * @offset:    register address offset
+ * @mdr:       register data being modified
+ * @mask:      mask indicating bits in mdr to be modified
+ *
+ * Locking is handled by spinlock - cannot sleep.
+ * Return: Nonzero on error
+ */
+int bt_mbi_modify(u8 port, u8 opcode, u32 offset, u32 mdr, u32 mask);
+
+#endif /* INTEL_BAYTRAIL_MBI_SYMS_H */
index 60ea476a91305c8f357f1950064cee54e92f0f29..76ca094ed01237fff731a46b84c04a5edcedc71b 100644 (file)
 #define IPC_RWBUF_SIZE    20           /* IPC Read buffer Size */
 #define IPC_IOC                  0x100         /* IPC command register IOC bit */
 
-enum {
-       SCU_IPC_LINCROFT,
-       SCU_IPC_PENWELL,
-       SCU_IPC_CLOVERVIEW,
-       SCU_IPC_TANGIER,
-};
+#define PCI_DEVICE_ID_LINCROFT         0x082a
+#define PCI_DEVICE_ID_PENWELL          0x080e
+#define PCI_DEVICE_ID_CLOVERVIEW       0x08ea
+#define PCI_DEVICE_ID_TANGIER          0x11a0
 
 /* intel scu ipc driver data*/
 struct intel_scu_ipc_pdata_t {
@@ -78,35 +76,29 @@ struct intel_scu_ipc_pdata_t {
        u8 irq_mode;
 };
 
-static struct intel_scu_ipc_pdata_t intel_scu_ipc_pdata[] = {
-       [SCU_IPC_LINCROFT] = {
-               .ipc_base = 0xff11c000,
-               .i2c_base = 0xff12b000,
-               .ipc_len = 0x100,
-               .i2c_len = 0x10,
-               .irq_mode = 0,
-       },
-       [SCU_IPC_PENWELL] = {
-               .ipc_base = 0xff11c000,
-               .i2c_base = 0xff12b000,
-               .ipc_len = 0x100,
-               .i2c_len = 0x10,
-               .irq_mode = 1,
-       },
-       [SCU_IPC_CLOVERVIEW] = {
-               .ipc_base = 0xff11c000,
-               .i2c_base = 0xff12b000,
-               .ipc_len = 0x100,
-               .i2c_len = 0x10,
-               .irq_mode = 1,
-       },
-       [SCU_IPC_TANGIER] = {
-               .ipc_base = 0xff009000,
-               .i2c_base  = 0xff00d000,
-               .ipc_len  = 0x100,
-               .i2c_len = 0x10,
-               .irq_mode = 0,
-       },
+static struct intel_scu_ipc_pdata_t intel_scu_ipc_lincroft_pdata = {
+       .ipc_base = 0xff11c000,
+       .i2c_base = 0xff12b000,
+       .ipc_len = 0x100,
+       .i2c_len = 0x10,
+       .irq_mode = 0,
+};
+
+/* Penwell and Cloverview */
+static struct intel_scu_ipc_pdata_t intel_scu_ipc_penwell_pdata = {
+       .ipc_base = 0xff11c000,
+       .i2c_base = 0xff12b000,
+       .ipc_len = 0x100,
+       .i2c_len = 0x10,
+       .irq_mode = 1,
+};
+
+static struct intel_scu_ipc_pdata_t intel_scu_ipc_tangier_pdata = {
+       .ipc_base = 0xff009000,
+       .i2c_base  = 0xff00d000,
+       .ipc_len  = 0x100,
+       .i2c_len = 0x10,
+       .irq_mode = 0,
 };
 
 static int ipc_probe(struct pci_dev *dev, const struct pci_device_id *id);
@@ -583,15 +575,14 @@ static irqreturn_t ioc(int irq, void *dev_id)
  */
 static int ipc_probe(struct pci_dev *dev, const struct pci_device_id *id)
 {
-       int err, pid;
+       int err;
        struct intel_scu_ipc_pdata_t *pdata;
        resource_size_t pci_resource;
 
        if (ipcdev.pdev)                /* We support only one SCU */
                return -EBUSY;
 
-       pid = id->driver_data;
-       pdata = &intel_scu_ipc_pdata[pid];
+       pdata = (struct intel_scu_ipc_pdata_t *)id->driver_data;
 
        ipcdev.pdev = pci_dev_get(dev);
        ipcdev.irq_mode = pdata->irq_mode;
@@ -650,11 +641,21 @@ static void ipc_remove(struct pci_dev *pdev)
 }
 
 static DEFINE_PCI_DEVICE_TABLE(pci_ids) = {
-       {PCI_VDEVICE(INTEL, 0x082a), SCU_IPC_LINCROFT},
-       {PCI_VDEVICE(INTEL, 0x080e), SCU_IPC_PENWELL},
-       {PCI_VDEVICE(INTEL, 0x08ea), SCU_IPC_CLOVERVIEW},
-       {PCI_VDEVICE(INTEL, 0x11a0), SCU_IPC_TANGIER},
-       { 0,}
+       {
+               PCI_VDEVICE(INTEL, PCI_DEVICE_ID_LINCROFT),
+               (kernel_ulong_t)&intel_scu_ipc_lincroft_pdata,
+       }, {
+               PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PENWELL),
+               (kernel_ulong_t)&intel_scu_ipc_penwell_pdata,
+       }, {
+               PCI_VDEVICE(INTEL, PCI_DEVICE_ID_CLOVERVIEW),
+               (kernel_ulong_t)&intel_scu_ipc_penwell_pdata,
+       }, {
+               PCI_VDEVICE(INTEL, PCI_DEVICE_ID_TANGIER),
+               (kernel_ulong_t)&intel_scu_ipc_tangier_pdata,
+       }, {
+               0,
+       }
 };
 MODULE_DEVICE_TABLE(pci, pci_ids);
 
index 3c59c0a3ee0f0f2d57a895f853d6915c37d39822..f4bad83053a9d5972e504a1cc4c0f80c8f6ba96b 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/init.h>
+#include <linux/mxm-wmi.h>
 #include <linux/acpi.h>
 
 MODULE_AUTHOR("Dave Airlie");
index 563e4f595f836c1d1ad966688d61c33e4a8d33d5..8f8551a63cc0b6de1bbb55a63ad538e760b72b63 100644 (file)
@@ -789,7 +789,7 @@ static int sony_nc_buffer_call(acpi_handle handle, char *name, u64 *value,
                void *buffer, size_t buflen)
 {
        int ret = 0;
-       size_t len = len;
+       size_t len;
        union acpi_object *object = __call_snc_method(handle, name, value);
 
        if (!object)
index 7ad1ed091f9225fc572a1b70fde9ecf622d06ad1..90dd7645a9e504449fd837ec1ad280775fc84ae9 100644 (file)
@@ -149,6 +149,7 @@ static const struct acpi_device_id toshiba_device_ids[] = {
 MODULE_DEVICE_TABLE(acpi, toshiba_device_ids);
 
 static const struct key_entry toshiba_acpi_keymap[] = {
+       { KE_KEY, 0x9e, { KEY_RFKILL } },
        { KE_KEY, 0x101, { KEY_MUTE } },
        { KE_KEY, 0x102, { KEY_ZOOMOUT } },
        { KE_KEY, 0x103, { KEY_ZOOMIN } },
index 92bd22ce676012697be18504d96741b4a3910466..9cbc567698cefd0d3933f7c77198d4ccf34ceeeb 100644 (file)
@@ -504,7 +504,7 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev,
        struct dasd_diag_req *dreq;
        struct dasd_diag_bio *dbio;
        struct req_iterator iter;
-       struct bio_vec *bv;
+       struct bio_vec bv;
        char *dst;
        unsigned int count, datasize;
        sector_t recid, first_rec, last_rec;
@@ -525,10 +525,10 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev,
        /* Check struct bio and count the number of blocks for the request. */
        count = 0;
        rq_for_each_segment(bv, req, iter) {
-               if (bv->bv_len & (blksize - 1))
+               if (bv.bv_len & (blksize - 1))
                        /* Fba can only do full blocks. */
                        return ERR_PTR(-EINVAL);
-               count += bv->bv_len >> (block->s2b_shift + 9);
+               count += bv.bv_len >> (block->s2b_shift + 9);
        }
        /* Paranoia. */
        if (count != last_rec - first_rec + 1)
@@ -545,8 +545,8 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev,
        dbio = dreq->bio;
        recid = first_rec;
        rq_for_each_segment(bv, req, iter) {
-               dst = page_address(bv->bv_page) + bv->bv_offset;
-               for (off = 0; off < bv->bv_len; off += blksize) {
+               dst = page_address(bv.bv_page) + bv.bv_offset;
+               for (off = 0; off < bv.bv_len; off += blksize) {
                        memset(dbio, 0, sizeof (struct dasd_diag_bio));
                        dbio->type = rw_cmd;
                        dbio->block_number = recid + 1;
index 95e45782692fa7bb2a89e9ec566a6a035f736381..2e8e0755070b609b13e9e49f5db5f17ac232ef69 100644 (file)
@@ -2551,7 +2551,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
        struct dasd_ccw_req *cqr;
        struct ccw1 *ccw;
        struct req_iterator iter;
-       struct bio_vec *bv;
+       struct bio_vec bv;
        char *dst;
        unsigned int off;
        int count, cidaw, cplength, datasize;
@@ -2573,13 +2573,13 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
        count = 0;
        cidaw = 0;
        rq_for_each_segment(bv, req, iter) {
-               if (bv->bv_len & (blksize - 1))
+               if (bv.bv_len & (blksize - 1))
                        /* Eckd can only do full blocks. */
                        return ERR_PTR(-EINVAL);
-               count += bv->bv_len >> (block->s2b_shift + 9);
+               count += bv.bv_len >> (block->s2b_shift + 9);
 #if defined(CONFIG_64BIT)
-               if (idal_is_needed (page_address(bv->bv_page), bv->bv_len))
-                       cidaw += bv->bv_len >> (block->s2b_shift + 9);
+               if (idal_is_needed (page_address(bv.bv_page), bv.bv_len))
+                       cidaw += bv.bv_len >> (block->s2b_shift + 9);
 #endif
        }
        /* Paranoia. */
@@ -2650,16 +2650,16 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
                              last_rec - recid + 1, cmd, basedev, blksize);
        }
        rq_for_each_segment(bv, req, iter) {
-               dst = page_address(bv->bv_page) + bv->bv_offset;
+               dst = page_address(bv.bv_page) + bv.bv_offset;
                if (dasd_page_cache) {
                        char *copy = kmem_cache_alloc(dasd_page_cache,
                                                      GFP_DMA | __GFP_NOWARN);
                        if (copy && rq_data_dir(req) == WRITE)
-                               memcpy(copy + bv->bv_offset, dst, bv->bv_len);
+                               memcpy(copy + bv.bv_offset, dst, bv.bv_len);
                        if (copy)
-                               dst = copy + bv->bv_offset;
+                               dst = copy + bv.bv_offset;
                }
-               for (off = 0; off < bv->bv_len; off += blksize) {
+               for (off = 0; off < bv.bv_len; off += blksize) {
                        sector_t trkid = recid;
                        unsigned int recoffs = sector_div(trkid, blk_per_trk);
                        rcmd = cmd;
@@ -2735,7 +2735,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
        struct dasd_ccw_req *cqr;
        struct ccw1 *ccw;
        struct req_iterator iter;
-       struct bio_vec *bv;
+       struct bio_vec bv;
        char *dst, *idaw_dst;
        unsigned int cidaw, cplength, datasize;
        unsigned int tlf;
@@ -2813,8 +2813,8 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
        idaw_dst = NULL;
        idaw_len = 0;
        rq_for_each_segment(bv, req, iter) {
-               dst = page_address(bv->bv_page) + bv->bv_offset;
-               seg_len = bv->bv_len;
+               dst = page_address(bv.bv_page) + bv.bv_offset;
+               seg_len = bv.bv_len;
                while (seg_len) {
                        if (new_track) {
                                trkid = recid;
@@ -3039,7 +3039,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
 {
        struct dasd_ccw_req *cqr;
        struct req_iterator iter;
-       struct bio_vec *bv;
+       struct bio_vec bv;
        char *dst;
        unsigned int trkcount, ctidaw;
        unsigned char cmd;
@@ -3125,8 +3125,8 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
                new_track = 1;
                recid = first_rec;
                rq_for_each_segment(bv, req, iter) {
-                       dst = page_address(bv->bv_page) + bv->bv_offset;
-                       seg_len = bv->bv_len;
+                       dst = page_address(bv.bv_page) + bv.bv_offset;
+                       seg_len = bv.bv_len;
                        while (seg_len) {
                                if (new_track) {
                                        trkid = recid;
@@ -3158,9 +3158,9 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
                }
        } else {
                rq_for_each_segment(bv, req, iter) {
-                       dst = page_address(bv->bv_page) + bv->bv_offset;
+                       dst = page_address(bv.bv_page) + bv.bv_offset;
                        last_tidaw = itcw_add_tidaw(itcw, 0x00,
-                                                   dst, bv->bv_len);
+                                                   dst, bv.bv_len);
                        if (IS_ERR(last_tidaw)) {
                                ret = -EINVAL;
                                goto out_error;
@@ -3278,7 +3278,7 @@ static struct dasd_ccw_req *dasd_raw_build_cp(struct dasd_device *startdev,
        struct dasd_ccw_req *cqr;
        struct ccw1 *ccw;
        struct req_iterator iter;
-       struct bio_vec *bv;
+       struct bio_vec bv;
        char *dst;
        unsigned char cmd;
        unsigned int trkcount;
@@ -3378,8 +3378,8 @@ static struct dasd_ccw_req *dasd_raw_build_cp(struct dasd_device *startdev,
                        idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE);
        }
        rq_for_each_segment(bv, req, iter) {
-               dst = page_address(bv->bv_page) + bv->bv_offset;
-               seg_len = bv->bv_len;
+               dst = page_address(bv.bv_page) + bv.bv_offset;
+               seg_len = bv.bv_len;
                if (cmd == DASD_ECKD_CCW_READ_TRACK)
                        memset(dst, 0, seg_len);
                if (!len_to_track_end) {
@@ -3424,7 +3424,7 @@ dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
        struct dasd_eckd_private *private;
        struct ccw1 *ccw;
        struct req_iterator iter;
-       struct bio_vec *bv;
+       struct bio_vec bv;
        char *dst, *cda;
        unsigned int blksize, blk_per_trk, off;
        sector_t recid;
@@ -3442,8 +3442,8 @@ dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
        if (private->uses_cdl == 0 || recid > 2*blk_per_trk)
                ccw++;
        rq_for_each_segment(bv, req, iter) {
-               dst = page_address(bv->bv_page) + bv->bv_offset;
-               for (off = 0; off < bv->bv_len; off += blksize) {
+               dst = page_address(bv.bv_page) + bv.bv_offset;
+               for (off = 0; off < bv.bv_len; off += blksize) {
                        /* Skip locate record. */
                        if (private->uses_cdl && recid <= 2*blk_per_trk)
                                ccw++;
@@ -3454,7 +3454,7 @@ dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
                                        cda = (char *)((addr_t) ccw->cda);
                                if (dst != cda) {
                                        if (rq_data_dir(req) == READ)
-                                               memcpy(dst, cda, bv->bv_len);
+                                               memcpy(dst, cda, bv.bv_len);
                                        kmem_cache_free(dasd_page_cache,
                                            (void *)((addr_t)cda & PAGE_MASK));
                                }
index 9cbc8c32ba595739cdff63da752a8f080b51e0de..2c8e68bf9a1cd658be919151387fe3ae24bffad3 100644 (file)
@@ -260,7 +260,7 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev,
        struct dasd_ccw_req *cqr;
        struct ccw1 *ccw;
        struct req_iterator iter;
-       struct bio_vec *bv;
+       struct bio_vec bv;
        char *dst;
        int count, cidaw, cplength, datasize;
        sector_t recid, first_rec, last_rec;
@@ -283,13 +283,13 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev,
        count = 0;
        cidaw = 0;
        rq_for_each_segment(bv, req, iter) {
-               if (bv->bv_len & (blksize - 1))
+               if (bv.bv_len & (blksize - 1))
                        /* Fba can only do full blocks. */
                        return ERR_PTR(-EINVAL);
-               count += bv->bv_len >> (block->s2b_shift + 9);
+               count += bv.bv_len >> (block->s2b_shift + 9);
 #if defined(CONFIG_64BIT)
-               if (idal_is_needed (page_address(bv->bv_page), bv->bv_len))
-                       cidaw += bv->bv_len / blksize;
+               if (idal_is_needed (page_address(bv.bv_page), bv.bv_len))
+                       cidaw += bv.bv_len / blksize;
 #endif
        }
        /* Paranoia. */
@@ -326,16 +326,16 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev,
        }
        recid = first_rec;
        rq_for_each_segment(bv, req, iter) {
-               dst = page_address(bv->bv_page) + bv->bv_offset;
+               dst = page_address(bv.bv_page) + bv.bv_offset;
                if (dasd_page_cache) {
                        char *copy = kmem_cache_alloc(dasd_page_cache,
                                                      GFP_DMA | __GFP_NOWARN);
                        if (copy && rq_data_dir(req) == WRITE)
-                               memcpy(copy + bv->bv_offset, dst, bv->bv_len);
+                               memcpy(copy + bv.bv_offset, dst, bv.bv_len);
                        if (copy)
-                               dst = copy + bv->bv_offset;
+                               dst = copy + bv.bv_offset;
                }
-               for (off = 0; off < bv->bv_len; off += blksize) {
+               for (off = 0; off < bv.bv_len; off += blksize) {
                        /* Locate record for stupid devices. */
                        if (private->rdc_data.mode.bits.data_chain == 0) {
                                ccw[-1].flags |= CCW_FLAG_CC;
@@ -384,7 +384,7 @@ dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req)
        struct dasd_fba_private *private;
        struct ccw1 *ccw;
        struct req_iterator iter;
-       struct bio_vec *bv;
+       struct bio_vec bv;
        char *dst, *cda;
        unsigned int blksize, off;
        int status;
@@ -399,8 +399,8 @@ dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req)
        if (private->rdc_data.mode.bits.data_chain != 0)
                ccw++;
        rq_for_each_segment(bv, req, iter) {
-               dst = page_address(bv->bv_page) + bv->bv_offset;
-               for (off = 0; off < bv->bv_len; off += blksize) {
+               dst = page_address(bv.bv_page) + bv.bv_offset;
+               for (off = 0; off < bv.bv_len; off += blksize) {
                        /* Skip locate record. */
                        if (private->rdc_data.mode.bits.data_chain == 0)
                                ccw++;
@@ -411,7 +411,7 @@ dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req)
                                        cda = (char *)((addr_t) ccw->cda);
                                if (dst != cda) {
                                        if (rq_data_dir(req) == READ)
-                                               memcpy(dst, cda, bv->bv_len);
+                                               memcpy(dst, cda, bv.bv_len);
                                        kmem_cache_free(dasd_page_cache,
                                            (void *)((addr_t)cda & PAGE_MASK));
                                }
index 6eca019bcf30a50edfab1a80daf1b351d2320474..ebf41e228e55836e6ec764b105c357e1856c9d1b 100644 (file)
@@ -808,18 +808,19 @@ static void
 dcssblk_make_request(struct request_queue *q, struct bio *bio)
 {
        struct dcssblk_dev_info *dev_info;
-       struct bio_vec *bvec;
+       struct bio_vec bvec;
+       struct bvec_iter iter;
        unsigned long index;
        unsigned long page_addr;
        unsigned long source_addr;
        unsigned long bytes_done;
-       int i;
 
        bytes_done = 0;
        dev_info = bio->bi_bdev->bd_disk->private_data;
        if (dev_info == NULL)
                goto fail;
-       if ((bio->bi_sector & 7) != 0 || (bio->bi_size & 4095) != 0)
+       if ((bio->bi_iter.bi_sector & 7) != 0 ||
+           (bio->bi_iter.bi_size & 4095) != 0)
                /* Request is not page-aligned. */
                goto fail;
        if (bio_end_sector(bio) > get_capacity(bio->bi_bdev->bd_disk)) {
@@ -842,22 +843,22 @@ dcssblk_make_request(struct request_queue *q, struct bio *bio)
                }
        }
 
-       index = (bio->bi_sector >> 3);
-       bio_for_each_segment(bvec, bio, i) {
+       index = (bio->bi_iter.bi_sector >> 3);
+       bio_for_each_segment(bvec, bio, iter) {
                page_addr = (unsigned long)
-                       page_address(bvec->bv_page) + bvec->bv_offset;
+                       page_address(bvec.bv_page) + bvec.bv_offset;
                source_addr = dev_info->start + (index<<12) + bytes_done;
-               if (unlikely((page_addr & 4095) != 0) || (bvec->bv_len & 4095) != 0)
+               if (unlikely((page_addr & 4095) != 0) || (bvec.bv_len & 4095) != 0)
                        // More paranoia.
                        goto fail;
                if (bio_data_dir(bio) == READ) {
                        memcpy((void*)page_addr, (void*)source_addr,
-                               bvec->bv_len);
+                               bvec.bv_len);
                } else {
                        memcpy((void*)source_addr, (void*)page_addr,
-                               bvec->bv_len);
+                               bvec.bv_len);
                }
-               bytes_done += bvec->bv_len;
+               bytes_done += bvec.bv_len;
        }
        bio_endio(bio, 0);
        return;
index d0ab5019d885cea6113f677a4f58990fa4a3ca55..76bed1743db1c7ef23576282b13d9ef8f551aed8 100644 (file)
@@ -130,7 +130,7 @@ static void scm_request_prepare(struct scm_request *scmrq)
        struct aidaw *aidaw = scmrq->aidaw;
        struct msb *msb = &scmrq->aob->msb[0];
        struct req_iterator iter;
-       struct bio_vec *bv;
+       struct bio_vec bv;
 
        msb->bs = MSB_BS_4K;
        scmrq->aob->request.msb_count = 1;
@@ -142,9 +142,9 @@ static void scm_request_prepare(struct scm_request *scmrq)
        msb->data_addr = (u64) aidaw;
 
        rq_for_each_segment(bv, scmrq->request, iter) {
-               WARN_ON(bv->bv_offset);
-               msb->blk_count += bv->bv_len >> 12;
-               aidaw->data_addr = (u64) page_address(bv->bv_page);
+               WARN_ON(bv.bv_offset);
+               msb->blk_count += bv.bv_len >> 12;
+               aidaw->data_addr = (u64) page_address(bv.bv_page);
                aidaw++;
        }
 }
index 27f930cd657fcdd3a223412cb68c393ebd565556..9aae909d47a53c88d6345101db2f344b8bc1f7a9 100644 (file)
@@ -122,7 +122,7 @@ static void scm_prepare_cluster_request(struct scm_request *scmrq)
        struct aidaw *aidaw = scmrq->aidaw;
        struct msb *msb = &scmrq->aob->msb[0];
        struct req_iterator iter;
-       struct bio_vec *bv;
+       struct bio_vec bv;
        int i = 0;
        u64 addr;
 
@@ -163,7 +163,7 @@ static void scm_prepare_cluster_request(struct scm_request *scmrq)
                        i++;
                }
                rq_for_each_segment(bv, req, iter) {
-                       aidaw->data_addr = (u64) page_address(bv->bv_page);
+                       aidaw->data_addr = (u64) page_address(bv.bv_page);
                        aidaw++;
                        i++;
                }
index 58141f0651f280b4cc48b510f09455edf8b6cc0a..6969d39f1e2eba7de41856cabc0d1557b7f3efe4 100644 (file)
@@ -184,25 +184,26 @@ static unsigned long xpram_highest_page_index(void)
 static void xpram_make_request(struct request_queue *q, struct bio *bio)
 {
        xpram_device_t *xdev = bio->bi_bdev->bd_disk->private_data;
-       struct bio_vec *bvec;
+       struct bio_vec bvec;
+       struct bvec_iter iter;
        unsigned int index;
        unsigned long page_addr;
        unsigned long bytes;
-       int i;
 
-       if ((bio->bi_sector & 7) != 0 || (bio->bi_size & 4095) != 0)
+       if ((bio->bi_iter.bi_sector & 7) != 0 ||
+           (bio->bi_iter.bi_size & 4095) != 0)
                /* Request is not page-aligned. */
                goto fail;
-       if ((bio->bi_size >> 12) > xdev->size)
+       if ((bio->bi_iter.bi_size >> 12) > xdev->size)
                /* Request size is no page-aligned. */
                goto fail;
-       if ((bio->bi_sector >> 3) > 0xffffffffU - xdev->offset)
+       if ((bio->bi_iter.bi_sector >> 3) > 0xffffffffU - xdev->offset)
                goto fail;
-       index = (bio->bi_sector >> 3) + xdev->offset;
-       bio_for_each_segment(bvec, bio, i) {
+       index = (bio->bi_iter.bi_sector >> 3) + xdev->offset;
+       bio_for_each_segment(bvec, bio, iter) {
                page_addr = (unsigned long)
-                       kmap(bvec->bv_page) + bvec->bv_offset;
-               bytes = bvec->bv_len;
+                       kmap(bvec.bv_page) + bvec.bv_offset;
+               bytes = bvec.bv_len;
                if ((page_addr & 4095) != 0 || (bytes & 4095) != 0)
                        /* More paranoia. */
                        goto fail;
index ac0bdded060fb259b483ecbe88c433550fff27c7..a0de045eb227d5eeb845bf9f6c925bbfcfe73634 100644 (file)
@@ -738,6 +738,8 @@ struct qeth_discipline {
        int (*freeze)(struct ccwgroup_device *);
        int (*thaw) (struct ccwgroup_device *);
        int (*restore)(struct ccwgroup_device *);
+       int (*control_event_handler)(struct qeth_card *card,
+                                       struct qeth_ipa_cmd *cmd);
 };
 
 struct qeth_vlan_vid {
@@ -948,13 +950,10 @@ int qeth_query_card_info(struct qeth_card *card,
 int qeth_send_control_data(struct qeth_card *, int, struct qeth_cmd_buffer *,
        int (*reply_cb)(struct qeth_card *, struct qeth_reply*, unsigned long),
        void *reply_param);
-void qeth_bridge_state_change(struct qeth_card *card, struct qeth_ipa_cmd *cmd);
-void qeth_bridgeport_query_support(struct qeth_card *card);
 int qeth_bridgeport_query_ports(struct qeth_card *card,
        enum qeth_sbp_roles *role, enum qeth_sbp_states *state);
 int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role);
 int qeth_bridgeport_an_set(struct qeth_card *card, int enable);
-void qeth_bridge_host_event(struct qeth_card *card, struct qeth_ipa_cmd *cmd);
 int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int);
 int qeth_get_elements_no(struct qeth_card *, struct sk_buff *, int);
 int qeth_get_elements_for_frags(struct sk_buff *);
index c05dacbf4e23563eb73eafb6687080b9bad58faf..c3a83df07894e51195d914111c15be2c615f09c1 100644 (file)
@@ -69,6 +69,7 @@ static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
 static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int);
 
 struct workqueue_struct *qeth_wq;
+EXPORT_SYMBOL_GPL(qeth_wq);
 
 static void qeth_close_dev_handler(struct work_struct *work)
 {
@@ -616,15 +617,12 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
                                qeth_schedule_recovery(card);
                                return NULL;
                        case IPA_CMD_SETBRIDGEPORT:
-                               if (cmd->data.sbp.hdr.command_code ==
-                                       IPA_SBP_BRIDGE_PORT_STATE_CHANGE) {
-                                       qeth_bridge_state_change(card, cmd);
-                                       return NULL;
-                               } else
-                                       return cmd;
                        case IPA_CMD_ADDRESS_CHANGE_NOTIF:
-                               qeth_bridge_host_event(card, cmd);
-                               return NULL;
+                               if (card->discipline->control_event_handler
+                                                               (card, cmd))
+                                       return cmd;
+                               else
+                                       return NULL;
                        case IPA_CMD_MODCCID:
                                return cmd;
                        case IPA_CMD_REGISTER_LOCAL_ADDR:
@@ -4973,10 +4971,6 @@ retriable:
                qeth_query_setadapterparms(card);
        if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST))
                qeth_query_setdiagass(card);
-       qeth_bridgeport_query_support(card);
-       if (card->options.sbp.supported_funcs)
-               dev_info(&card->gdev->dev,
-               "The device represents a HiperSockets Bridge Capable Port\n");
        return 0;
 out:
        dev_warn(&card->gdev->dev, "The qeth device driver failed to recover "
index 914d2c121fd87c92fe9ed4acbddee3b5bee2c95d..0710550093ce6ac5fea8ddbf80f8409bbc1186f6 100644 (file)
@@ -33,6 +33,11 @@ static int qeth_l2_send_setdelmac(struct qeth_card *, __u8 *,
                                            unsigned long));
 static void qeth_l2_set_multicast_list(struct net_device *);
 static int qeth_l2_recover(void *);
+static void qeth_bridgeport_query_support(struct qeth_card *card);
+static void qeth_bridge_state_change(struct qeth_card *card,
+                                       struct qeth_ipa_cmd *cmd);
+static void qeth_bridge_host_event(struct qeth_card *card,
+                                       struct qeth_ipa_cmd *cmd);
 
 static int qeth_l2_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 {
@@ -989,6 +994,10 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
                rc = -ENODEV;
                goto out_remove;
        }
+       qeth_bridgeport_query_support(card);
+       if (card->options.sbp.supported_funcs)
+               dev_info(&card->gdev->dev,
+               "The device represents a HiperSockets Bridge Capable Port\n");
        qeth_trace_features(card);
 
        if (!card->dev && qeth_l2_setup_netdev(card)) {
@@ -1233,6 +1242,26 @@ out:
        return rc;
 }
 
+/* Returns zero if the command is successfully "consumed" */
+static int qeth_l2_control_event(struct qeth_card *card,
+                                       struct qeth_ipa_cmd *cmd)
+{
+       switch (cmd->hdr.command) {
+       case IPA_CMD_SETBRIDGEPORT:
+               if (cmd->data.sbp.hdr.command_code ==
+                               IPA_SBP_BRIDGE_PORT_STATE_CHANGE) {
+                       qeth_bridge_state_change(card, cmd);
+                       return 0;
+               } else
+                       return 1;
+       case IPA_CMD_ADDRESS_CHANGE_NOTIF:
+               qeth_bridge_host_event(card, cmd);
+               return 0;
+       default:
+               return 1;
+       }
+}
+
 struct qeth_discipline qeth_l2_discipline = {
        .start_poll = qeth_qdio_start_poll,
        .input_handler = (qdio_handler_t *) qeth_qdio_input_handler,
@@ -1246,6 +1275,7 @@ struct qeth_discipline qeth_l2_discipline = {
        .freeze = qeth_l2_pm_suspend,
        .thaw = qeth_l2_pm_resume,
        .restore = qeth_l2_pm_resume,
+       .control_event_handler = qeth_l2_control_event,
 };
 EXPORT_SYMBOL_GPL(qeth_l2_discipline);
 
@@ -1463,7 +1493,8 @@ static void qeth_bridge_state_change_worker(struct work_struct *work)
        kfree(data);
 }
 
-void qeth_bridge_state_change(struct qeth_card *card, struct qeth_ipa_cmd *cmd)
+static void qeth_bridge_state_change(struct qeth_card *card,
+                                       struct qeth_ipa_cmd *cmd)
 {
        struct qeth_sbp_state_change *qports =
                 &cmd->data.sbp.data.state_change;
@@ -1488,7 +1519,6 @@ void qeth_bridge_state_change(struct qeth_card *card, struct qeth_ipa_cmd *cmd)
                        sizeof(struct qeth_sbp_state_change) + extrasize);
        queue_work(qeth_wq, &data->worker);
 }
-EXPORT_SYMBOL(qeth_bridge_state_change);
 
 struct qeth_bridge_host_data {
        struct work_struct worker;
@@ -1528,7 +1558,8 @@ static void qeth_bridge_host_event_worker(struct work_struct *work)
        kfree(data);
 }
 
-void qeth_bridge_host_event(struct qeth_card *card, struct qeth_ipa_cmd *cmd)
+static void qeth_bridge_host_event(struct qeth_card *card,
+                                       struct qeth_ipa_cmd *cmd)
 {
        struct qeth_ipacmd_addr_change *hostevs =
                 &cmd->data.addrchange;
@@ -1560,7 +1591,6 @@ void qeth_bridge_host_event(struct qeth_card *card, struct qeth_ipa_cmd *cmd)
                        sizeof(struct qeth_ipacmd_addr_change) + extrasize);
        queue_work(qeth_wq, &data->worker);
 }
-EXPORT_SYMBOL(qeth_bridge_host_event);
 
 /* SETBRIDGEPORT support; sending commands */
 
@@ -1683,7 +1713,7 @@ static int qeth_bridgeport_query_support_cb(struct qeth_card *card,
  * Sets bitmask of supported setbridgeport subfunctions in the qeth_card
  * strucutre: card->options.sbp.supported_funcs.
  */
-void qeth_bridgeport_query_support(struct qeth_card *card)
+static void qeth_bridgeport_query_support(struct qeth_card *card)
 {
        struct qeth_cmd_buffer *iob;
        struct qeth_ipa_cmd *cmd;
@@ -1709,7 +1739,6 @@ void qeth_bridgeport_query_support(struct qeth_card *card)
        }
        card->options.sbp.supported_funcs = cbctl.data.supported;
 }
-EXPORT_SYMBOL_GPL(qeth_bridgeport_query_support);
 
 static int qeth_bridgeport_query_ports_cb(struct qeth_card *card,
        struct qeth_reply *reply, unsigned long data)
index c1b0b2761f8dce6e0d3ad709902eeb19a925beba..0f430424c3b8b0aec231c3e2ca69d739c0c29701 100644 (file)
@@ -3593,6 +3593,13 @@ out:
        return rc;
 }
 
+/* Returns zero if the command is successfully "consumed" */
+static int qeth_l3_control_event(struct qeth_card *card,
+                                       struct qeth_ipa_cmd *cmd)
+{
+       return 1;
+}
+
 struct qeth_discipline qeth_l3_discipline = {
        .start_poll = qeth_qdio_start_poll,
        .input_handler = (qdio_handler_t *) qeth_qdio_input_handler,
@@ -3606,6 +3613,7 @@ struct qeth_discipline qeth_l3_discipline = {
        .freeze = qeth_l3_pm_suspend,
        .thaw = qeth_l3_pm_resume,
        .restore = qeth_l3_pm_resume,
+       .control_event_handler = qeth_l3_control_event,
 };
 EXPORT_SYMBOL_GPL(qeth_l3_discipline);
 
index c1441ed282eb911ff67a6363ce5a78f6cbe45199..c7763e482eb235a12d4b48887070ec88154580ff 100644 (file)
@@ -11,7 +11,6 @@
 #include <linux/sched.h>
 #include <linux/wait.h>
 #include <linux/delay.h>
-#include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
index fc1339cf91ac65d135ef5bf3c14e242462e1453b..7c71e7b4febf96a2cebc62446acee68550116d97 100644 (file)
@@ -9,7 +9,6 @@
 #include <linux/fs.h>
 #include <linux/errno.h>
 #include <linux/major.h>
-#include <linux/init.h>
 #include <linux/miscdevice.h>
 #include <linux/ioport.h>              /* request_region */
 #include <linux/slab.h>
index ddbe5a9e713dfba93218ad886e5df1f49b9fd19c..af15a2fdab5e92e9473b1dbf7184a2f024a2e236 100644 (file)
@@ -19,7 +19,6 @@
  */
 
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/kthread.h>
 #include <linux/delay.h>
 #include <linux/ioport.h>
index d9f268f237749df06e63f11cec4c19c69326f995..25c738e9ef19cea55a7a48ccbfb4b874789b056b 100644 (file)
@@ -9,7 +9,6 @@
 #include <linux/miscdevice.h>
 #include <linux/fcntl.h>
 #include <linux/poll.h>
-#include <linux/init.h>
 #include <linux/mutex.h>
 #include <linux/spinlock.h>
 #include <linux/mm.h>
index b0aae0536d588441aee8409e200ba417a6080b39..b7acafc8509956d583a506b54b7fdab30a723eff 100644 (file)
@@ -11,7 +11,6 @@
 #include <linux/slab.h>
 #include <linux/mutex.h>
 #include <linux/ioport.h>
-#include <linux/init.h>
 #include <linux/miscdevice.h>
 #include <linux/mm.h>
 #include <linux/of.h>
index 446b85110a1fc0a69b07e42bb3ecc7144d76d1ce..0cac7d8fd0f7cac75b0ecc2d63662d18eeda4a56 100644 (file)
@@ -2163,10 +2163,10 @@ int sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
        }
 
        /* do we need to support multiple segments? */
-       if (bio_segments(req->bio) > 1 || bio_segments(rsp->bio) > 1) {
-               printk("%s: multiple segments req %u %u, rsp %u %u\n",
-                      __func__, bio_segments(req->bio), blk_rq_bytes(req),
-                      bio_segments(rsp->bio), blk_rq_bytes(rsp));
+       if (bio_multiple_segments(req->bio) ||
+           bio_multiple_segments(rsp->bio)) {
+               printk("%s: multiple segments req %u, rsp %u\n",
+                      __func__, blk_rq_bytes(req), blk_rq_bytes(rsp));
                return -EINVAL;
        }
 
index 9d26637308bebe2fc2b2fdec681891cfcbaf9b07..410f4a3e88887a6f0087c06f9da42f22b61f556d 100644 (file)
@@ -1901,7 +1901,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
        struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
        Mpi2SmpPassthroughRequest_t *mpi_request;
        Mpi2SmpPassthroughReply_t *mpi_reply;
-       int rc, i;
+       int rc;
        u16 smid;
        u32 ioc_state;
        unsigned long timeleft;
@@ -1916,7 +1916,8 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
        void *pci_addr_out = NULL;
        u16 wait_state_count;
        struct request *rsp = req->next_rq;
-       struct bio_vec *bvec = NULL;
+       struct bio_vec bvec;
+       struct bvec_iter iter;
 
        if (!rsp) {
                printk(MPT2SAS_ERR_FMT "%s: the smp response space is "
@@ -1942,7 +1943,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
        ioc->transport_cmds.status = MPT2_CMD_PENDING;
 
        /* Check if the request is split across multiple segments */
-       if (bio_segments(req->bio) > 1) {
+       if (bio_multiple_segments(req->bio)) {
                u32 offset = 0;
 
                /* Allocate memory and copy the request */
@@ -1955,11 +1956,11 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
                        goto out;
                }
 
-               bio_for_each_segment(bvec, req->bio, i) {
+               bio_for_each_segment(bvec, req->bio, iter) {
                        memcpy(pci_addr_out + offset,
-                           page_address(bvec->bv_page) + bvec->bv_offset,
-                           bvec->bv_len);
-                       offset += bvec->bv_len;
+                           page_address(bvec.bv_page) + bvec.bv_offset,
+                           bvec.bv_len);
+                       offset += bvec.bv_len;
                }
        } else {
                dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio),
@@ -1974,7 +1975,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
 
        /* Check if the response needs to be populated across
         * multiple segments */
-       if (bio_segments(rsp->bio) > 1) {
+       if (bio_multiple_segments(rsp->bio)) {
                pci_addr_in = pci_alloc_consistent(ioc->pdev, blk_rq_bytes(rsp),
                    &pci_dma_in);
                if (!pci_addr_in) {
@@ -2041,7 +2042,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
        sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
            MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
        sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
-       if (bio_segments(req->bio) > 1) {
+       if (bio_multiple_segments(req->bio)) {
                ioc->base_add_sg_single(psge, sgl_flags |
                    (blk_rq_bytes(req) - 4), pci_dma_out);
        } else {
@@ -2057,7 +2058,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
            MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
            MPI2_SGE_FLAGS_END_OF_LIST);
        sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
-       if (bio_segments(rsp->bio) > 1) {
+       if (bio_multiple_segments(rsp->bio)) {
                ioc->base_add_sg_single(psge, sgl_flags |
                    (blk_rq_bytes(rsp) + 4), pci_dma_in);
        } else {
@@ -2102,23 +2103,23 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
                    le16_to_cpu(mpi_reply->ResponseDataLength);
                /* check if the resp needs to be copied from the allocated
                 * pci mem */
-               if (bio_segments(rsp->bio) > 1) {
+               if (bio_multiple_segments(rsp->bio)) {
                        u32 offset = 0;
                        u32 bytes_to_copy =
                            le16_to_cpu(mpi_reply->ResponseDataLength);
-                       bio_for_each_segment(bvec, rsp->bio, i) {
-                               if (bytes_to_copy <= bvec->bv_len) {
-                                       memcpy(page_address(bvec->bv_page) +
-                                           bvec->bv_offset, pci_addr_in +
+                       bio_for_each_segment(bvec, rsp->bio, iter) {
+                               if (bytes_to_copy <= bvec.bv_len) {
+                                       memcpy(page_address(bvec.bv_page) +
+                                           bvec.bv_offset, pci_addr_in +
                                            offset, bytes_to_copy);
                                        break;
                                } else {
-                                       memcpy(page_address(bvec->bv_page) +
-                                           bvec->bv_offset, pci_addr_in +
-                                           offset, bvec->bv_len);
-                                       bytes_to_copy -= bvec->bv_len;
+                                       memcpy(page_address(bvec.bv_page) +
+                                           bvec.bv_offset, pci_addr_in +
+                                           offset, bvec.bv_len);
+                                       bytes_to_copy -= bvec.bv_len;
                                }
-                               offset += bvec->bv_len;
+                               offset += bvec.bv_len;
                        }
                }
        } else {
index e771a88c6a7441c45c6b49e8ffb85d5258b22e99..65170cb1a00fa5fa0ca3c95a94e3ed686edca9f7 100644 (file)
@@ -1884,7 +1884,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
        struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
        Mpi2SmpPassthroughRequest_t *mpi_request;
        Mpi2SmpPassthroughReply_t *mpi_reply;
-       int rc, i;
+       int rc;
        u16 smid;
        u32 ioc_state;
        unsigned long timeleft;
@@ -1898,7 +1898,8 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
        void *pci_addr_out = NULL;
        u16 wait_state_count;
        struct request *rsp = req->next_rq;
-       struct bio_vec *bvec = NULL;
+       struct bio_vec bvec;
+       struct bvec_iter iter;
 
        if (!rsp) {
                pr_err(MPT3SAS_FMT "%s: the smp response space is missing\n",
@@ -1925,7 +1926,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
        ioc->transport_cmds.status = MPT3_CMD_PENDING;
 
        /* Check if the request is split across multiple segments */
-       if (req->bio->bi_vcnt > 1) {
+       if (bio_multiple_segments(req->bio)) {
                u32 offset = 0;
 
                /* Allocate memory and copy the request */
@@ -1938,11 +1939,11 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
                        goto out;
                }
 
-               bio_for_each_segment(bvec, req->bio, i) {
+               bio_for_each_segment(bvec, req->bio, iter) {
                        memcpy(pci_addr_out + offset,
-                           page_address(bvec->bv_page) + bvec->bv_offset,
-                           bvec->bv_len);
-                       offset += bvec->bv_len;
+                           page_address(bvec.bv_page) + bvec.bv_offset,
+                           bvec.bv_len);
+                       offset += bvec.bv_len;
                }
        } else {
                dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio),
@@ -1957,7 +1958,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
 
        /* Check if the response needs to be populated across
         * multiple segments */
-       if (rsp->bio->bi_vcnt > 1) {
+       if (bio_multiple_segments(rsp->bio)) {
                pci_addr_in = pci_alloc_consistent(ioc->pdev, blk_rq_bytes(rsp),
                    &pci_dma_in);
                if (!pci_addr_in) {
@@ -2018,7 +2019,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
        mpi_request->RequestDataLength = cpu_to_le16(blk_rq_bytes(req) - 4);
        psge = &mpi_request->SGL;
 
-       if (req->bio->bi_vcnt > 1)
+       if (bio_multiple_segments(req->bio))
                ioc->build_sg(ioc, psge, pci_dma_out, (blk_rq_bytes(req) - 4),
                    pci_dma_in, (blk_rq_bytes(rsp) + 4));
        else
@@ -2063,23 +2064,23 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
 
                /* check if the resp needs to be copied from the allocated
                 * pci mem */
-               if (rsp->bio->bi_vcnt > 1) {
+               if (bio_multiple_segments(rsp->bio)) {
                        u32 offset = 0;
                        u32 bytes_to_copy =
                            le16_to_cpu(mpi_reply->ResponseDataLength);
-                       bio_for_each_segment(bvec, rsp->bio, i) {
-                               if (bytes_to_copy <= bvec->bv_len) {
-                                       memcpy(page_address(bvec->bv_page) +
-                                           bvec->bv_offset, pci_addr_in +
+                       bio_for_each_segment(bvec, rsp->bio, iter) {
+                               if (bytes_to_copy <= bvec.bv_len) {
+                                       memcpy(page_address(bvec.bv_page) +
+                                           bvec.bv_offset, pci_addr_in +
                                            offset, bytes_to_copy);
                                        break;
                                } else {
-                                       memcpy(page_address(bvec->bv_page) +
-                                           bvec->bv_offset, pci_addr_in +
-                                           offset, bvec->bv_len);
-                                       bytes_to_copy -= bvec->bv_len;
+                                       memcpy(page_address(bvec.bv_page) +
+                                           bvec.bv_offset, pci_addr_in +
+                                           offset, bvec.bv_len);
+                                       bytes_to_copy -= bvec.bv_len;
                                }
-                               offset += bvec->bv_len;
+                               offset += bvec.bv_len;
                        }
                }
        } else {
index aa66361ed44b71772da913c74c69640b43909f23..bac04c2335aaf997c73e7b3b8b8a08129bfca455 100644 (file)
@@ -731,7 +731,7 @@ static int _osd_req_list_objects(struct osd_request *or,
 
        bio->bi_rw &= ~REQ_WRITE;
        or->in.bio = bio;
-       or->in.total_bytes = bio->bi_size;
+       or->in.total_bytes = bio->bi_iter.bi_size;
        return 0;
 }
 
index 9846c6ab2aaa92eeab130a92fe4d7b8d539b624c..470954aba7289a758a650cd82b2f1dfe50ae54f1 100644 (file)
@@ -801,7 +801,7 @@ static int sd_setup_write_same_cmnd(struct scsi_device *sdp, struct request *rq)
        if (sdkp->device->no_write_same)
                return BLKPREP_KILL;
 
-       BUG_ON(bio_offset(bio) || bio_iovec(bio)->bv_len != sdp->sector_size);
+       BUG_ON(bio_offset(bio) || bio_iovec(bio).bv_len != sdp->sector_size);
 
        sector >>= ilog2(sdp->sector_size) - 9;
        nr_sectors >>= ilog2(sdp->sector_size) - 9;
index 6174ca4ea27594487d7dc0828d9e21841742b8ed..a7a691d0af7d105a431ba3b560a5496acb71d58b 100644 (file)
@@ -365,7 +365,6 @@ void sd_dif_prepare(struct request *rq, sector_t hw_sector,
        struct bio *bio;
        struct scsi_disk *sdkp;
        struct sd_dif_tuple *sdt;
-       unsigned int i, j;
        u32 phys, virt;
 
        sdkp = rq->bio->bi_bdev->bd_disk->private_data;
@@ -376,19 +375,21 @@ void sd_dif_prepare(struct request *rq, sector_t hw_sector,
        phys = hw_sector & 0xffffffff;
 
        __rq_for_each_bio(bio, rq) {
-               struct bio_vec *iv;
+               struct bio_vec iv;
+               struct bvec_iter iter;
+               unsigned int j;
 
                /* Already remapped? */
                if (bio_flagged(bio, BIO_MAPPED_INTEGRITY))
                        break;
 
-               virt = bio->bi_integrity->bip_sector & 0xffffffff;
+               virt = bio->bi_integrity->bip_iter.bi_sector & 0xffffffff;
 
-               bip_for_each_vec(iv, bio->bi_integrity, i) {
-                       sdt = kmap_atomic(iv->bv_page)
-                               + iv->bv_offset;
+               bip_for_each_vec(iv, bio->bi_integrity, iter) {
+                       sdt = kmap_atomic(iv.bv_page)
+                               + iv.bv_offset;
 
-                       for (j = 0 ; j < iv->bv_len ; j += tuple_sz, sdt++) {
+                       for (j = 0; j < iv.bv_len; j += tuple_sz, sdt++) {
 
                                if (be32_to_cpu(sdt->ref_tag) == virt)
                                        sdt->ref_tag = cpu_to_be32(phys);
@@ -414,7 +415,7 @@ void sd_dif_complete(struct scsi_cmnd *scmd, unsigned int good_bytes)
        struct scsi_disk *sdkp;
        struct bio *bio;
        struct sd_dif_tuple *sdt;
-       unsigned int i, j, sectors, sector_sz;
+       unsigned int j, sectors, sector_sz;
        u32 phys, virt;
 
        sdkp = scsi_disk(scmd->request->rq_disk);
@@ -430,15 +431,16 @@ void sd_dif_complete(struct scsi_cmnd *scmd, unsigned int good_bytes)
                phys >>= 3;
 
        __rq_for_each_bio(bio, scmd->request) {
-               struct bio_vec *iv;
+               struct bio_vec iv;
+               struct bvec_iter iter;
 
-               virt = bio->bi_integrity->bip_sector & 0xffffffff;
+               virt = bio->bi_integrity->bip_iter.bi_sector & 0xffffffff;
 
-               bip_for_each_vec(iv, bio->bi_integrity, i) {
-                       sdt = kmap_atomic(iv->bv_page)
-                               + iv->bv_offset;
+               bip_for_each_vec(iv, bio->bi_integrity, iter) {
+                       sdt = kmap_atomic(iv.bv_page)
+                               + iv.bv_offset;
 
-                       for (j = 0 ; j < iv->bv_len ; j += tuple_sz, sdt++) {
+                       for (j = 0; j < iv.bv_len; j += tuple_sz, sdt++) {
 
                                if (sectors == 0) {
                                        kunmap_atomic(sdt);
index 5338e8d4c50fa998582fb86209f66c95a11419a8..0718905adeb256cb2a2dd12336f3dbb7db365d23 100644 (file)
@@ -194,10 +194,10 @@ static int do_bio_lustrebacked(struct lloop_device *lo, struct bio *head)
        struct cl_object     *obj = ll_i2info(inode)->lli_clob;
        pgoff_t        offset;
        int                ret;
-       int                i;
        int                rw;
        obd_count            page_count = 0;
-       struct bio_vec       *bvec;
+       struct bio_vec       bvec;
+       struct bvec_iter   iter;
        struct bio         *bio;
        ssize_t        bytes;
 
@@ -220,15 +220,15 @@ static int do_bio_lustrebacked(struct lloop_device *lo, struct bio *head)
        for (bio = head; bio != NULL; bio = bio->bi_next) {
                LASSERT(rw == bio->bi_rw);
 
-               offset = (pgoff_t)(bio->bi_sector << 9) + lo->lo_offset;
-               bio_for_each_segment(bvec, bio, i) {
-                       BUG_ON(bvec->bv_offset != 0);
-                       BUG_ON(bvec->bv_len != PAGE_CACHE_SIZE);
+               offset = (pgoff_t)(bio->bi_iter.bi_sector << 9) + lo->lo_offset;
+               bio_for_each_segment(bvec, bio, iter) {
+                       BUG_ON(bvec.bv_offset != 0);
+                       BUG_ON(bvec.bv_len != PAGE_CACHE_SIZE);
 
-                       pages[page_count] = bvec->bv_page;
+                       pages[page_count] = bvec.bv_page;
                        offsets[page_count] = offset;
                        page_count++;
-                       offset += bvec->bv_len;
+                       offset += bvec.bv_len;
                }
                LASSERT(page_count <= LLOOP_MAX_SEGMENTS);
        }
@@ -313,7 +313,8 @@ static unsigned int loop_get_bio(struct lloop_device *lo, struct bio **req)
        bio = &lo->lo_bio;
        while (*bio && (*bio)->bi_rw == rw) {
                CDEBUG(D_INFO, "bio sector %llu size %u count %u vcnt%u \n",
-                      (unsigned long long)(*bio)->bi_sector, (*bio)->bi_size,
+                      (unsigned long long)(*bio)->bi_iter.bi_sector,
+                      (*bio)->bi_iter.bi_size,
                       page_count, (*bio)->bi_vcnt);
                if (page_count + (*bio)->bi_vcnt > LLOOP_MAX_SEGMENTS)
                        break;
@@ -347,7 +348,8 @@ static void loop_make_request(struct request_queue *q, struct bio *old_bio)
                goto err;
 
        CDEBUG(D_INFO, "submit bio sector %llu size %u\n",
-              (unsigned long long)old_bio->bi_sector, old_bio->bi_size);
+              (unsigned long long)old_bio->bi_iter.bi_sector,
+              old_bio->bi_iter.bi_size);
 
        spin_lock_irq(&lo->lo_lock);
        inactive = (lo->lo_state != LLOOP_BOUND);
@@ -367,7 +369,7 @@ static void loop_make_request(struct request_queue *q, struct bio *old_bio)
        loop_add_bio(lo, old_bio);
        return;
 err:
-       cfs_bio_io_error(old_bio, old_bio->bi_size);
+       cfs_bio_io_error(old_bio, old_bio->bi_iter.bi_size);
 }
 
 
@@ -378,7 +380,7 @@ static inline void loop_handle_bio(struct lloop_device *lo, struct bio *bio)
        while (bio) {
                struct bio *tmp = bio->bi_next;
                bio->bi_next = NULL;
-               cfs_bio_endio(bio, bio->bi_size, ret);
+               cfs_bio_endio(bio, bio->bi_iter.bi_size, ret);
                bio = tmp;
        }
 }
index 3277d9838f4e928ab3555720a186e476e826a720..108f2733106d77c00b73003e106febe6aa8998cc 100644 (file)
@@ -171,13 +171,14 @@ static inline int valid_io_request(struct zram *zram, struct bio *bio)
        u64 start, end, bound;
 
        /* unaligned request */
-       if (unlikely(bio->bi_sector & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
+       if (unlikely(bio->bi_iter.bi_sector &
+                    (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
                return 0;
-       if (unlikely(bio->bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
+       if (unlikely(bio->bi_iter.bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
                return 0;
 
-       start = bio->bi_sector;
-       end = start + (bio->bi_size >> SECTOR_SHIFT);
+       start = bio->bi_iter.bi_sector;
+       end = start + (bio->bi_iter.bi_size >> SECTOR_SHIFT);
        bound = zram->disksize >> SECTOR_SHIFT;
        /* out of range range */
        if (unlikely(start >= bound || end > bound || start > end))
@@ -680,9 +681,10 @@ out:
 
 static void __zram_make_request(struct zram *zram, struct bio *bio, int rw)
 {
-       int i, offset;
+       int offset;
        u32 index;
-       struct bio_vec *bvec;
+       struct bio_vec bvec;
+       struct bvec_iter iter;
 
        switch (rw) {
        case READ:
@@ -693,36 +695,37 @@ static void __zram_make_request(struct zram *zram, struct bio *bio, int rw)
                break;
        }
 
-       index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
-       offset = (bio->bi_sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
+       index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
+       offset = (bio->bi_iter.bi_sector &
+                 (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
 
-       bio_for_each_segment(bvec, bio, i) {
+       bio_for_each_segment(bvec, bio, iter) {
                int max_transfer_size = PAGE_SIZE - offset;
 
-               if (bvec->bv_len > max_transfer_size) {
+               if (bvec.bv_len > max_transfer_size) {
                        /*
                         * zram_bvec_rw() can only make operation on a single
                         * zram page. Split the bio vector.
                         */
                        struct bio_vec bv;
 
-                       bv.bv_page = bvec->bv_page;
+                       bv.bv_page = bvec.bv_page;
                        bv.bv_len = max_transfer_size;
-                       bv.bv_offset = bvec->bv_offset;
+                       bv.bv_offset = bvec.bv_offset;
 
                        if (zram_bvec_rw(zram, &bv, index, offset, bio, rw) < 0)
                                goto out;
 
-                       bv.bv_len = bvec->bv_len - max_transfer_size;
+                       bv.bv_len = bvec.bv_len - max_transfer_size;
                        bv.bv_offset += max_transfer_size;
                        if (zram_bvec_rw(zram, &bv, index+1, 0, bio, rw) < 0)
                                goto out;
                } else
-                       if (zram_bvec_rw(zram, bvec, index, offset, bio, rw)
+                       if (zram_bvec_rw(zram, &bvec, index, offset, bio, rw)
                            < 0)
                                goto out;
 
-               update_position(&index, &offset, bvec);
+               update_position(&index, &offset, &bvec);
        }
 
        set_bit(BIO_UPTODATE, &bio->bi_flags);
index c87959f12760462ca76740737e7cc839bdd4fc58..2d29356d0c85a076e90db99bbda9f1a428f9c336 100644 (file)
@@ -319,7 +319,7 @@ iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num)
        bio->bi_bdev = ib_dev->ibd_bd;
        bio->bi_private = cmd;
        bio->bi_end_io = &iblock_bio_done;
-       bio->bi_sector = lba;
+       bio->bi_iter.bi_sector = lba;
 
        return bio;
 }
index d98e4334897040cd7e6a482fee5a7a913338c4ec..67423805e6d9b0ed761c26958e7cf2df9a31e8f0 100644 (file)
@@ -455,11 +455,11 @@ static void load_code(struct icom_port *icom_port)
        for (index = 0; index < fw->size; index++)
                new_page[index] = fw->data[index];
 
-       release_firmware(fw);
-
        writeb((char) ((fw->size + 16)/16), &icom_port->dram->mac_length);
        writel(temp_pci, &icom_port->dram->mac_load_addr);
 
+       release_firmware(fw);
+
        /*Setting the syncReg to 0x80 causes adapter to start downloading
           the personality code into adapter instruction RAM.
           Once code is loaded, it will begin executing and, based on
index 9cbd3acaf37fca13a55ea6b9a854e259790d1758..8fa1134e005165dc2ca33ef43b9d575368602590 100644 (file)
@@ -1508,10 +1508,14 @@ static int pch_uart_verify_port(struct uart_port *port,
                        __func__);
                return -EOPNOTSUPP;
 #endif
-               dev_info(priv->port.dev, "PCH UART : Use DMA Mode\n");
-               if (!priv->use_dma)
+               if (!priv->use_dma) {
                        pch_request_dma(port);
-               priv->use_dma = 1;
+                       if (priv->chan_rx)
+                               priv->use_dma = 1;
+               }
+               dev_info(priv->port.dev, "PCH UART: %s\n",
+                               priv->use_dma ?
+                               "Use DMA Mode" : "No DMA");
        }
 
        return 0;
index c1af04d46682657794b4893f3eac571a13acc685..9cd706df3b3351cbfa10fa14c254b36724601cc1 100644 (file)
@@ -1209,7 +1209,6 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
 
        /* reset the fifos (and setup the uart) */
        s3c24xx_serial_resetport(port, cfg);
-       clk_disable_unprepare(ourport->clk);
        return 0;
 }
 
@@ -1287,6 +1286,13 @@ static int s3c24xx_serial_probe(struct platform_device *pdev)
        uart_add_one_port(&s3c24xx_uart_drv, &ourport->port);
        platform_set_drvdata(pdev, &ourport->port);
 
+       /*
+        * Deactivate the clock enabled in s3c24xx_serial_init_port here,
+        * so that a potential re-enablement through the pm-callback overlaps
+        * and keeps the clock enabled in this case.
+        */
+       clk_disable_unprepare(ourport->clk);
+
 #ifdef CONFIG_SAMSUNG_CLOCK
        ret = device_create_file(&pdev->dev, &dev_attr_clock_source);
        if (ret < 0)
index cde4619327606e87b1869fb10fee0ebcefe95c46..7309ac704e2641e410fe75f60278e273348573e3 100644 (file)
@@ -1577,10 +1577,10 @@ static bool fb_do_apertures_overlap(struct apertures_struct *gena,
 static int do_unregister_framebuffer(struct fb_info *fb_info);
 
 #define VGA_FB_PHYS 0xA0000
-static void do_remove_conflicting_framebuffers(struct apertures_struct *a,
-                                    const char *name, bool primary)
+static int do_remove_conflicting_framebuffers(struct apertures_struct *a,
+                                             const char *name, bool primary)
 {
-       int i;
+       int i, ret;
 
        /* check all firmware fbs and kick off if the base addr overlaps */
        for (i = 0 ; i < FB_MAX; i++) {
@@ -1599,22 +1599,29 @@ static void do_remove_conflicting_framebuffers(struct apertures_struct *a,
                        printk(KERN_INFO "fb: conflicting fb hw usage "
                               "%s vs %s - removing generic driver\n",
                               name, registered_fb[i]->fix.id);
-                       do_unregister_framebuffer(registered_fb[i]);
+                       ret = do_unregister_framebuffer(registered_fb[i]);
+                       if (ret)
+                               return ret;
                }
        }
+
+       return 0;
 }
 
 static int do_register_framebuffer(struct fb_info *fb_info)
 {
-       int i;
+       int i, ret;
        struct fb_event event;
        struct fb_videomode mode;
 
        if (fb_check_foreignness(fb_info))
                return -ENOSYS;
 
-       do_remove_conflicting_framebuffers(fb_info->apertures, fb_info->fix.id,
-                                        fb_is_primary_device(fb_info));
+       ret = do_remove_conflicting_framebuffers(fb_info->apertures,
+                                                fb_info->fix.id,
+                                                fb_is_primary_device(fb_info));
+       if (ret)
+               return ret;
 
        if (num_registered_fb == FB_MAX)
                return -ENXIO;
@@ -1739,12 +1746,16 @@ int unlink_framebuffer(struct fb_info *fb_info)
 }
 EXPORT_SYMBOL(unlink_framebuffer);
 
-void remove_conflicting_framebuffers(struct apertures_struct *a,
-                                    const char *name, bool primary)
+int remove_conflicting_framebuffers(struct apertures_struct *a,
+                                   const char *name, bool primary)
 {
+       int ret;
+
        mutex_lock(&registration_lock);
-       do_remove_conflicting_framebuffers(a, name, primary);
+       ret = do_remove_conflicting_framebuffers(a, name, primary);
        mutex_unlock(&registration_lock);
+
+       return ret;
 }
 EXPORT_SYMBOL(remove_conflicting_framebuffers);
 
index f51646f15cf2d4a6826c35f52d6f0f1074c6d898..bbeb8dd7f108fe9f65111337b7d1a30a039c3e2c 100644 (file)
@@ -3726,7 +3726,6 @@ static int __init omap_dispchw_probe(struct platform_device *pdev)
        }
 
        pm_runtime_enable(&pdev->dev);
-       pm_runtime_irq_safe(&pdev->dev);
 
        r = dispc_runtime_get();
        if (r)
index 5be6e919f7850d7d101542b55120163cec935162..4c4c566c52a35c73e582aa27b35fb7821ae69f8a 100644 (file)
@@ -87,6 +87,14 @@ config DA9055_WATCHDOG
          This driver can also be built as a module.  If so, the module
          will be called da9055_wdt.
 
+config GPIO_WATCHDOG
+       tristate "Watchdog device controlled through GPIO-line"
+       depends on OF_GPIO
+       select WATCHDOG_CORE
+       help
+         If you say yes here you get support for watchdog device
+         controlled through GPIO-line.
+
 config WM831X_WATCHDOG
        tristate "WM831x watchdog"
        depends on MFD_WM831X
@@ -109,7 +117,7 @@ config WM8350_WATCHDOG
 
 config ARM_SP805_WATCHDOG
        tristate "ARM SP805 Watchdog"
-       depends on ARM && ARM_AMBA
+       depends on (ARM || ARM64) && ARM_AMBA
        select WATCHDOG_CORE
        help
          ARM Primecell SP805 Watchdog timer. This will reboot your system when
@@ -188,6 +196,7 @@ config S3C2410_WATCHDOG
        tristate "S3C2410 Watchdog"
        depends on HAVE_S3C2410_WATCHDOG
        select WATCHDOG_CORE
+       select MFD_SYSCON if ARCH_EXYNOS5
        help
          Watchdog timer block in the Samsung SoCs. This will reboot
          the system when the timer expires with the watchdog enabled.
@@ -214,10 +223,9 @@ config SA1100_WATCHDOG
 
 config DW_WATCHDOG
        tristate "Synopsys DesignWare watchdog"
-       depends on ARM && HAVE_CLK
        help
          Say Y here if to include support for the Synopsys DesignWare
-         watchdog timer found in many ARM chips.
+         watchdog timer found in many chips.
          To compile this driver as a module, choose M here: the
          module will be called dw_wdt.
 
@@ -270,10 +278,11 @@ config IOP_WATCHDOG
 
 config DAVINCI_WATCHDOG
        tristate "DaVinci watchdog"
-       depends on ARCH_DAVINCI
+       depends on ARCH_DAVINCI || ARCH_KEYSTONE
+       select WATCHDOG_CORE
        help
          Say Y here if to include support for the watchdog timer
-         in the DaVinci DM644x/DM646x processors.
+         in the DaVinci DM644x/DM646x or Keystone processors.
          To compile this driver as a module, choose M here: the
          module will be called davinci_wdt.
 
@@ -883,13 +892,22 @@ config VIA_WDT
        Most people will say N.
 
 config W83627HF_WDT
-       tristate "W83627HF/W83627DHG Watchdog Timer"
+       tristate "Watchdog timer for W83627HF/W83627DHG and compatibles"
        depends on X86
        select WATCHDOG_CORE
        ---help---
-         This is the driver for the hardware watchdog on the W83627HF chipset
-         as used in Advantech PC-9578 and Tyan S2721-533 motherboards
-         (and likely others). The driver also supports the W83627DHG chip.
+         This is the driver for the hardware watchdog on the following
+         Super I/O chips.
+               W83627DHG/DHG-P/EHF/EHG/F/G/HF/S/SF/THF/UHG/UG
+               W83637HF
+               W83667HG/HG-B
+               W83687THF
+               W83697HF
+               W83697UG
+               NCT6775
+               NCT6776
+               NCT6779
+
          This watchdog simply watches your kernel to make sure it doesn't
          freeze, and if it does, it reboots your computer after a certain
          amount of time.
@@ -1139,6 +1157,28 @@ config BCM2835_WDT
          To compile this driver as a loadable module, choose M here.
          The module will be called bcm2835_wdt.
 
+config BCM_KONA_WDT
+       tristate "BCM Kona Watchdog"
+       depends on ARCH_BCM
+       select WATCHDOG_CORE
+       help
+         Support for the watchdog timer on the following Broadcom BCM281xx
+         family, which includes BCM11130, BCM11140, BCM11351, BCM28145 and
+         BCM28155 variants.
+
+         Say 'Y' or 'M' here to enable the driver. The module will be called
+         bcm_kona_wdt.
+
+config BCM_KONA_WDT_DEBUG
+       bool "DEBUGFS support for BCM Kona Watchdog"
+       depends on BCM_KONA_WDT
+       help
+         If enabled, adds /sys/kernel/debug/bcm_kona_wdt/info which provides
+         access to the driver's internal data structures as well as watchdog
+         timer hardware registres.
+
+         If in doubt, say 'N'.
+
 config LANTIQ_WDT
        tristate "Lantiq SoC watchdog"
        depends on LANTIQ
@@ -1171,6 +1211,7 @@ config MPC5200_WDT
 config 8xxx_WDT
        tristate "MPC8xxx Platform Watchdog Timer"
        depends on PPC_8xx || PPC_83xx || PPC_86xx
+       select WATCHDOG_CORE
        help
          This driver is for a SoC level watchdog that exists on some
          Freescale PowerPC processors. So far this driver supports:
index 91bd95a64baf7bfe083e6f67ff803f2edc0ce2f9..985a66cda76f23ac89932672f1f813c9cab2dade 100644 (file)
@@ -57,6 +57,7 @@ obj-$(CONFIG_RETU_WATCHDOG) += retu_wdt.o
 obj-$(CONFIG_BCM2835_WDT) += bcm2835_wdt.o
 obj-$(CONFIG_MOXART_WDT) += moxart_wdt.o
 obj-$(CONFIG_SIRFSOC_WATCHDOG) += sirfsoc_wdt.o
+obj-$(CONFIG_BCM_KONA_WDT) += bcm_kona_wdt.o
 
 # AVR32 Architecture
 obj-$(CONFIG_AT32AP700X_WDT) += at32ap700x_wdt.o
@@ -171,6 +172,7 @@ obj-$(CONFIG_XEN_WDT) += xen_wdt.o
 # Architecture Independent
 obj-$(CONFIG_DA9052_WATCHDOG) += da9052_wdt.o
 obj-$(CONFIG_DA9055_WATCHDOG) += da9055_wdt.o
+obj-$(CONFIG_GPIO_WATCHDOG)    += gpio_wdt.o
 obj-$(CONFIG_WM831X_WATCHDOG) += wm831x_wdt.o
 obj-$(CONFIG_WM8350_WATCHDOG) += wm8350_wdt.o
 obj-$(CONFIG_MAX63XX_WATCHDOG) += max63xx_wdt.o
index fbb7b94cabfd5816e7532fe0093b579b831b63a8..3a17fbd39f8aed015f00ecafdbb5839a2e8296f4 100644 (file)
@@ -301,7 +301,7 @@ static int ali_notify_sys(struct notifier_block *this,
  *     want to register another driver on the same PCI id.
  */
 
-static DEFINE_PCI_DEVICE_TABLE(ali_pci_tbl) __used = {
+static const struct pci_device_id ali_pci_tbl[] __used = {
        { PCI_VENDOR_ID_AL, 0x1533, PCI_ANY_ID, PCI_ANY_ID,},
        { PCI_VENDOR_ID_AL, 0x1535, PCI_ANY_ID, PCI_ANY_ID,},
        { 0, },
index 12f0b762b528948b15e30fab605860a34c01ba57..996b2f7d330e94ec3143e6bebe0c86f74e162943 100644 (file)
@@ -414,7 +414,7 @@ err_out:
 module_init(alim7101_wdt_init);
 module_exit(alim7101_wdt_unload);
 
-static DEFINE_PCI_DEVICE_TABLE(alim7101_pci_tbl) __used = {
+static const struct pci_device_id alim7101_pci_tbl[] __used = {
        { PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533) },
        { PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101) },
        { }
index be37dde4f864448c13643ee4535e5b6a830be34c..489729b262987965dd760fd50c60053c2caec57c 100644 (file)
 
 #include <linux/errno.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/io.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/moduleparam.h>
 #include <linux/platform_device.h>
+#include <linux/reboot.h>
 #include <linux/types.h>
 #include <linux/watchdog.h>
 #include <linux/jiffies.h>
 #include <linux/bitops.h>
 #include <linux/uaccess.h>
 #include <linux/of.h>
+#include <linux/of_irq.h>
 
 #include "at91sam9_wdt.h"
 
 #define DRV_NAME "AT91SAM9 Watchdog"
 
-#define wdt_read(field) \
-       __raw_readl(at91wdt_private.base + field)
-#define wdt_write(field, val) \
-       __raw_writel((val), at91wdt_private.base + field)
+#define wdt_read(wdt, field) \
+       __raw_readl((wdt)->base + (field))
+#define wdt_write(wtd, field, val) \
+       __raw_writel((val), (wdt)->base + (field))
 
 /* AT91SAM9 watchdog runs a 12bit counter @ 256Hz,
  * use this to convert a watchdog
  * value from/to milliseconds.
  */
-#define ms_to_ticks(t) (((t << 8) / 1000) - 1)
-#define ticks_to_ms(t) (((t + 1) * 1000) >> 8)
+#define ticks_to_hz_rounddown(t)       ((((t) + 1) * HZ) >> 8)
+#define ticks_to_hz_roundup(t)         (((((t) + 1) * HZ) + 255) >> 8)
+#define ticks_to_secs(t)               (((t) + 1) >> 8)
+#define secs_to_ticks(s)               ((s) ? (((s) << 8) - 1) : 0)
+
+#define WDT_MR_RESET   0x3FFF2FFF
+
+/* Watchdog max counter value in ticks */
+#define WDT_COUNTER_MAX_TICKS  0xFFF
+
+/* Watchdog max delta/value in secs */
+#define WDT_COUNTER_MAX_SECS   ticks_to_secs(WDT_COUNTER_MAX_TICKS)
 
 /* Hardware timeout in seconds */
 #define WDT_HW_TIMEOUT 2
@@ -66,23 +79,40 @@ module_param(nowayout, bool, 0);
 MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
        "(default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
 
-static struct watchdog_device at91_wdt_dev;
-static void at91_ping(unsigned long data);
-
-static struct {
+#define to_wdt(wdd) container_of(wdd, struct at91wdt, wdd)
+struct at91wdt {
+       struct watchdog_device wdd;
        void __iomem *base;
        unsigned long next_heartbeat;   /* the next_heartbeat for the timer */
        struct timer_list timer;        /* The timer that pings the watchdog */
-} at91wdt_private;
+       u32 mr;
+       u32 mr_mask;
+       unsigned long heartbeat;        /* WDT heartbeat in jiffies */
+       bool nowayout;
+       unsigned int irq;
+};
 
 /* ......................................................................... */
 
+static irqreturn_t wdt_interrupt(int irq, void *dev_id)
+{
+       struct at91wdt *wdt = (struct at91wdt *)dev_id;
+
+       if (wdt_read(wdt, AT91_WDT_SR)) {
+               pr_crit("at91sam9 WDT software reset\n");
+               emergency_restart();
+               pr_crit("Reboot didn't ?????\n");
+       }
+
+       return IRQ_HANDLED;
+}
+
 /*
  * Reload the watchdog timer.  (ie, pat the watchdog)
  */
-static inline void at91_wdt_reset(void)
+static inline void at91_wdt_reset(struct at91wdt *wdt)
 {
-       wdt_write(AT91_WDT_CR, AT91_WDT_KEY | AT91_WDT_WDRSTT);
+       wdt_write(wdt, AT91_WDT_CR, AT91_WDT_KEY | AT91_WDT_WDRSTT);
 }
 
 /*
@@ -90,26 +120,21 @@ static inline void at91_wdt_reset(void)
  */
 static void at91_ping(unsigned long data)
 {
-       if (time_before(jiffies, at91wdt_private.next_heartbeat) ||
-           (!watchdog_active(&at91_wdt_dev))) {
-               at91_wdt_reset();
-               mod_timer(&at91wdt_private.timer, jiffies + WDT_TIMEOUT);
-       } else
+       struct at91wdt *wdt = (struct at91wdt *)data;
+       if (time_before(jiffies, wdt->next_heartbeat) ||
+           !watchdog_active(&wdt->wdd)) {
+               at91_wdt_reset(wdt);
+               mod_timer(&wdt->timer, jiffies + wdt->heartbeat);
+       } else {
                pr_crit("I will reset your machine !\n");
-}
-
-static int at91_wdt_ping(struct watchdog_device *wdd)
-{
-       /* calculate when the next userspace timeout will be */
-       at91wdt_private.next_heartbeat = jiffies + wdd->timeout * HZ;
-       return 0;
+       }
 }
 
 static int at91_wdt_start(struct watchdog_device *wdd)
 {
-       /* calculate the next userspace timeout and modify the timer */
-       at91_wdt_ping(wdd);
-       mod_timer(&at91wdt_private.timer, jiffies + WDT_TIMEOUT);
+       struct at91wdt *wdt = to_wdt(wdd);
+       /* calculate when the next userspace timeout will be */
+       wdt->next_heartbeat = jiffies + wdd->timeout * HZ;
        return 0;
 }
 
@@ -122,39 +147,104 @@ static int at91_wdt_stop(struct watchdog_device *wdd)
 static int at91_wdt_set_timeout(struct watchdog_device *wdd, unsigned int new_timeout)
 {
        wdd->timeout = new_timeout;
-       return 0;
+       return at91_wdt_start(wdd);
 }
 
-/*
- * Set the watchdog time interval in 1/256Hz (write-once)
- * Counter is 12 bit.
- */
-static int at91_wdt_settimeout(unsigned int timeout)
+static int at91_wdt_init(struct platform_device *pdev, struct at91wdt *wdt)
 {
-       unsigned int reg;
-       unsigned int mr;
-
-       /* Check if disabled */
-       mr = wdt_read(AT91_WDT_MR);
-       if (mr & AT91_WDT_WDDIS) {
-               pr_err("sorry, watchdog is disabled\n");
-               return -EIO;
+       u32 tmp;
+       u32 delta;
+       u32 value;
+       int err;
+       u32 mask = wdt->mr_mask;
+       unsigned long min_heartbeat = 1;
+       unsigned long max_heartbeat;
+       struct device *dev = &pdev->dev;
+
+       tmp = wdt_read(wdt, AT91_WDT_MR);
+       if ((tmp & mask) != (wdt->mr & mask)) {
+               if (tmp == WDT_MR_RESET) {
+                       wdt_write(wdt, AT91_WDT_MR, wdt->mr);
+                       tmp = wdt_read(wdt, AT91_WDT_MR);
+               }
+       }
+
+       if (tmp & AT91_WDT_WDDIS) {
+               if (wdt->mr & AT91_WDT_WDDIS)
+                       return 0;
+               dev_err(dev, "watchdog is disabled\n");
+               return -EINVAL;
+       }
+
+       value = tmp & AT91_WDT_WDV;
+       delta = (tmp & AT91_WDT_WDD) >> 16;
+
+       if (delta < value)
+               min_heartbeat = ticks_to_hz_roundup(value - delta);
+
+       max_heartbeat = ticks_to_hz_rounddown(value);
+       if (!max_heartbeat) {
+               dev_err(dev,
+                       "heartbeat is too small for the system to handle it correctly\n");
+               return -EINVAL;
        }
 
        /*
-        * All counting occurs at SLOW_CLOCK / 128 = 256 Hz
-        *
-        * Since WDV is a 12-bit counter, the maximum period is
-        * 4096 / 256 = 16 seconds.
+        * Try to reset the watchdog counter 4 or 2 times more often than
+        * actually requested, to avoid spurious watchdog reset.
+        * If this is not possible because of the min_heartbeat value, reset
+        * it at the min_heartbeat period.
         */
-       reg = AT91_WDT_WDRSTEN  /* causes watchdog reset */
-               /* | AT91_WDT_WDRPROC   causes processor reset only */
-               | AT91_WDT_WDDBGHLT     /* disabled in debug mode */
-               | AT91_WDT_WDD          /* restart at any time */
-               | (timeout & AT91_WDT_WDV);  /* timer value */
-       wdt_write(AT91_WDT_MR, reg);
+       if ((max_heartbeat / 4) >= min_heartbeat)
+               wdt->heartbeat = max_heartbeat / 4;
+       else if ((max_heartbeat / 2) >= min_heartbeat)
+               wdt->heartbeat = max_heartbeat / 2;
+       else
+               wdt->heartbeat = min_heartbeat;
+
+       if (max_heartbeat < min_heartbeat + 4)
+               dev_warn(dev,
+                        "min heartbeat and max heartbeat might be too close for the system to handle it correctly\n");
+
+       if ((tmp & AT91_WDT_WDFIEN) && wdt->irq) {
+               err = request_irq(wdt->irq, wdt_interrupt,
+                                 IRQF_SHARED | IRQF_IRQPOLL,
+                                 pdev->name, wdt);
+               if (err)
+                       return err;
+       }
+
+       if ((tmp & wdt->mr_mask) != (wdt->mr & wdt->mr_mask))
+               dev_warn(dev,
+                        "watchdog already configured differently (mr = %x expecting %x)\n",
+                        tmp & wdt->mr_mask, wdt->mr & wdt->mr_mask);
+
+       setup_timer(&wdt->timer, at91_ping, (unsigned long)wdt);
+
+       /*
+        * Use min_heartbeat the first time to avoid spurious watchdog reset:
+        * we don't know for how long the watchdog counter is running, and
+        *  - resetting it right now might trigger a watchdog fault reset
+        *  - waiting for heartbeat time might lead to a watchdog timeout
+        *    reset
+        */
+       mod_timer(&wdt->timer, jiffies + min_heartbeat);
+
+       /* Try to set timeout from device tree first */
+       if (watchdog_init_timeout(&wdt->wdd, 0, dev))
+               watchdog_init_timeout(&wdt->wdd, heartbeat, dev);
+       watchdog_set_nowayout(&wdt->wdd, wdt->nowayout);
+       err = watchdog_register_device(&wdt->wdd);
+       if (err)
+               goto out_stop_timer;
+
+       wdt->next_heartbeat = jiffies + wdt->wdd.timeout * HZ;
 
        return 0;
+
+out_stop_timer:
+       del_timer(&wdt->timer);
+       return err;
 }
 
 /* ......................................................................... */
@@ -169,61 +259,123 @@ static const struct watchdog_ops at91_wdt_ops = {
        .owner =        THIS_MODULE,
        .start =        at91_wdt_start,
        .stop =         at91_wdt_stop,
-       .ping =         at91_wdt_ping,
        .set_timeout =  at91_wdt_set_timeout,
 };
 
-static struct watchdog_device at91_wdt_dev = {
-       .info =         &at91_wdt_info,
-       .ops =          &at91_wdt_ops,
-       .timeout =      WDT_HEARTBEAT,
-       .min_timeout =  1,
-       .max_timeout =  0xFFFF,
-};
+#if defined(CONFIG_OF)
+static int of_at91wdt_init(struct device_node *np, struct at91wdt *wdt)
+{
+       u32 min = 0;
+       u32 max = WDT_COUNTER_MAX_SECS;
+       const char *tmp;
+
+       /* Get the interrupts property */
+       wdt->irq = irq_of_parse_and_map(np, 0);
+       if (!wdt->irq)
+               dev_warn(wdt->wdd.parent, "failed to get IRQ from DT\n");
+
+       if (!of_property_read_u32_index(np, "atmel,max-heartbeat-sec", 0,
+                                       &max)) {
+               if (!max || max > WDT_COUNTER_MAX_SECS)
+                       max = WDT_COUNTER_MAX_SECS;
+
+               if (!of_property_read_u32_index(np, "atmel,min-heartbeat-sec",
+                                               0, &min)) {
+                       if (min >= max)
+                               min = max - 1;
+               }
+       }
+
+       min = secs_to_ticks(min);
+       max = secs_to_ticks(max);
+
+       wdt->mr_mask = 0x3FFFFFFF;
+       wdt->mr = 0;
+       if (!of_property_read_string(np, "atmel,watchdog-type", &tmp) &&
+           !strcmp(tmp, "software")) {
+               wdt->mr |= AT91_WDT_WDFIEN;
+               wdt->mr_mask &= ~AT91_WDT_WDRPROC;
+       } else {
+               wdt->mr |= AT91_WDT_WDRSTEN;
+       }
+
+       if (!of_property_read_string(np, "atmel,reset-type", &tmp) &&
+           !strcmp(tmp, "proc"))
+               wdt->mr |= AT91_WDT_WDRPROC;
+
+       if (of_property_read_bool(np, "atmel,disable")) {
+               wdt->mr |= AT91_WDT_WDDIS;
+               wdt->mr_mask &= AT91_WDT_WDDIS;
+       }
+
+       if (of_property_read_bool(np, "atmel,idle-halt"))
+               wdt->mr |= AT91_WDT_WDIDLEHLT;
+
+       if (of_property_read_bool(np, "atmel,dbg-halt"))
+               wdt->mr |= AT91_WDT_WDDBGHLT;
+
+       wdt->mr |= max | ((max - min) << 16);
+
+       return 0;
+}
+#else
+static inline int of_at91wdt_init(struct device_node *np, struct at91wdt *wdt)
+{
+       return 0;
+}
+#endif
 
 static int __init at91wdt_probe(struct platform_device *pdev)
 {
        struct resource *r;
-       int res;
+       int err;
+       struct at91wdt *wdt;
 
-       r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!r)
-               return -ENODEV;
-       at91wdt_private.base = ioremap(r->start, resource_size(r));
-       if (!at91wdt_private.base) {
-               dev_err(&pdev->dev, "failed to map registers, aborting.\n");
+       wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
+       if (!wdt)
                return -ENOMEM;
-       }
 
-       at91_wdt_dev.parent = &pdev->dev;
-       watchdog_init_timeout(&at91_wdt_dev, heartbeat, &pdev->dev);
-       watchdog_set_nowayout(&at91_wdt_dev, nowayout);
+       wdt->mr = (WDT_HW_TIMEOUT * 256) | AT91_WDT_WDRSTEN | AT91_WDT_WDD |
+                 AT91_WDT_WDDBGHLT | AT91_WDT_WDIDLEHLT;
+       wdt->mr_mask = 0x3FFFFFFF;
+       wdt->nowayout = nowayout;
+       wdt->wdd.parent = &pdev->dev;
+       wdt->wdd.info = &at91_wdt_info;
+       wdt->wdd.ops = &at91_wdt_ops;
+       wdt->wdd.timeout = WDT_HEARTBEAT;
+       wdt->wdd.min_timeout = 1;
+       wdt->wdd.max_timeout = 0xFFFF;
 
-       /* Set watchdog */
-       res = at91_wdt_settimeout(ms_to_ticks(WDT_HW_TIMEOUT * 1000));
-       if (res)
-               return res;
+       r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       wdt->base = devm_ioremap_resource(&pdev->dev, r);
+       if (IS_ERR(wdt->base))
+               return PTR_ERR(wdt->base);
+
+       if (pdev->dev.of_node) {
+               err = of_at91wdt_init(pdev->dev.of_node, wdt);
+               if (err)
+                       return err;
+       }
 
-       res = watchdog_register_device(&at91_wdt_dev);
-       if (res)
-               return res;
+       err = at91_wdt_init(pdev, wdt);
+       if (err)
+               return err;
 
-       at91wdt_private.next_heartbeat = jiffies + at91_wdt_dev.timeout * HZ;
-       setup_timer(&at91wdt_private.timer, at91_ping, 0);
-       mod_timer(&at91wdt_private.timer, jiffies + WDT_TIMEOUT);
+       platform_set_drvdata(pdev, wdt);
 
        pr_info("enabled (heartbeat=%d sec, nowayout=%d)\n",
-               at91_wdt_dev.timeout, nowayout);
+               wdt->wdd.timeout, wdt->nowayout);
 
        return 0;
 }
 
 static int __exit at91wdt_remove(struct platform_device *pdev)
 {
-       watchdog_unregister_device(&at91_wdt_dev);
+       struct at91wdt *wdt = platform_get_drvdata(pdev);
+       watchdog_unregister_device(&wdt->wdd);
 
        pr_warn("I quit now, hardware will probably reboot!\n");
-       del_timer(&at91wdt_private.timer);
+       del_timer(&wdt->timer);
 
        return 0;
 }
diff --git a/drivers/watchdog/bcm_kona_wdt.c b/drivers/watchdog/bcm_kona_wdt.c
new file mode 100644 (file)
index 0000000..9c24809
--- /dev/null
@@ -0,0 +1,368 @@
+/*
+ * Copyright (C) 2013 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/watchdog.h>
+
+#define SECWDOG_CTRL_REG               0x00000000
+#define SECWDOG_COUNT_REG              0x00000004
+
+#define SECWDOG_RESERVED_MASK          0x1dffffff
+#define SECWDOG_WD_LOAD_FLAG           0x10000000
+#define SECWDOG_EN_MASK                        0x08000000
+#define SECWDOG_SRSTEN_MASK            0x04000000
+#define SECWDOG_RES_MASK               0x00f00000
+#define SECWDOG_COUNT_MASK             0x000fffff
+
+#define SECWDOG_MAX_COUNT              SECWDOG_COUNT_MASK
+#define SECWDOG_CLKS_SHIFT             20
+#define SECWDOG_MAX_RES                        15
+#define SECWDOG_DEFAULT_RESOLUTION     4
+#define SECWDOG_MAX_TRY                        1000
+
+#define SECS_TO_TICKS(x, w)            ((x) << (w)->resolution)
+#define TICKS_TO_SECS(x, w)            ((x) >> (w)->resolution)
+
+#define BCM_KONA_WDT_NAME              "bcm_kona_wdt"
+
+struct bcm_kona_wdt {
+       void __iomem *base;
+       /*
+        * One watchdog tick is 1/(2^resolution) seconds. Resolution can take
+        * the values 0-15, meaning one tick can be 1s to 30.52us. Our default
+        * resolution of 4 means one tick is 62.5ms.
+        *
+        * The watchdog counter is 20 bits. Depending on resolution, the maximum
+        * counter value of 0xfffff expires after about 12 days (resolution 0)
+        * down to only 32s (resolution 15). The default resolution of 4 gives
+        * us a maximum of about 18 hours and 12 minutes before the watchdog
+        * times out.
+        */
+       int resolution;
+       spinlock_t lock;
+#ifdef CONFIG_BCM_KONA_WDT_DEBUG
+       unsigned long busy_count;
+       struct dentry *debugfs;
+#endif
+};
+
+static int secure_register_read(struct bcm_kona_wdt *wdt, uint32_t offset)
+{
+       uint32_t val;
+       unsigned count = 0;
+
+       /*
+        * If the WD_LOAD_FLAG is set, the watchdog counter field is being
+        * updated in hardware. Once the WD timer is updated in hardware, it
+        * gets cleared.
+        */
+       do {
+               if (unlikely(count > 1))
+                       udelay(5);
+               val = readl_relaxed(wdt->base + offset);
+               count++;
+       } while ((val & SECWDOG_WD_LOAD_FLAG) && count < SECWDOG_MAX_TRY);
+
+#ifdef CONFIG_BCM_KONA_WDT_DEBUG
+       /* Remember the maximum number iterations due to WD_LOAD_FLAG */
+       if (count > wdt->busy_count)
+               wdt->busy_count = count;
+#endif
+
+       /* This is the only place we return a negative value. */
+       if (val & SECWDOG_WD_LOAD_FLAG)
+               return -ETIMEDOUT;
+
+       /* We always mask out reserved bits. */
+       val &= SECWDOG_RESERVED_MASK;
+
+       return val;
+}
+
+#ifdef CONFIG_BCM_KONA_WDT_DEBUG
+
+static int bcm_kona_wdt_dbg_show(struct seq_file *s, void *data)
+{
+       int ctl_val, cur_val, ret;
+       unsigned long flags;
+       struct bcm_kona_wdt *wdt = s->private;
+
+       if (!wdt)
+               return seq_puts(s, "No device pointer\n");
+
+       spin_lock_irqsave(&wdt->lock, flags);
+       ctl_val = secure_register_read(wdt, SECWDOG_CTRL_REG);
+       cur_val = secure_register_read(wdt, SECWDOG_COUNT_REG);
+       spin_unlock_irqrestore(&wdt->lock, flags);
+
+       if (ctl_val < 0 || cur_val < 0) {
+               ret = seq_puts(s, "Error accessing hardware\n");
+       } else {
+               int ctl, cur, ctl_sec, cur_sec, res;
+
+               ctl = ctl_val & SECWDOG_COUNT_MASK;
+               res = (ctl_val & SECWDOG_RES_MASK) >> SECWDOG_CLKS_SHIFT;
+               cur = cur_val & SECWDOG_COUNT_MASK;
+               ctl_sec = TICKS_TO_SECS(ctl, wdt);
+               cur_sec = TICKS_TO_SECS(cur, wdt);
+               ret = seq_printf(s, "Resolution: %d / %d\n"
+                               "Control: %d s / %d (%#x) ticks\n"
+                               "Current: %d s / %d (%#x) ticks\n"
+                               "Busy count: %lu\n", res,
+                               wdt->resolution, ctl_sec, ctl, ctl, cur_sec,
+                               cur, cur, wdt->busy_count);
+       }
+
+       return ret;
+}
+
+static int bcm_kona_dbg_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, bcm_kona_wdt_dbg_show, inode->i_private);
+}
+
+static const struct file_operations bcm_kona_dbg_operations = {
+       .open           = bcm_kona_dbg_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
+static void bcm_kona_wdt_debug_init(struct platform_device *pdev)
+{
+       struct dentry *dir;
+       struct bcm_kona_wdt *wdt = platform_get_drvdata(pdev);
+
+       if (!wdt)
+               return;
+
+       wdt->debugfs = NULL;
+
+       dir = debugfs_create_dir(BCM_KONA_WDT_NAME, NULL);
+       if (IS_ERR_OR_NULL(dir))
+               return;
+
+       if (debugfs_create_file("info", S_IFREG | S_IRUGO, dir, wdt,
+                               &bcm_kona_dbg_operations))
+               wdt->debugfs = dir;
+       else
+               debugfs_remove_recursive(dir);
+}
+
+static void bcm_kona_wdt_debug_exit(struct platform_device *pdev)
+{
+       struct bcm_kona_wdt *wdt = platform_get_drvdata(pdev);
+
+       if (wdt && wdt->debugfs) {
+               debugfs_remove_recursive(wdt->debugfs);
+               wdt->debugfs = NULL;
+       }
+}
+
+#else
+
+static void bcm_kona_wdt_debug_init(struct platform_device *pdev) {}
+static void bcm_kona_wdt_debug_exit(struct platform_device *pdev) {}
+
+#endif /* CONFIG_BCM_KONA_WDT_DEBUG */
+
+static int bcm_kona_wdt_ctrl_reg_modify(struct bcm_kona_wdt *wdt,
+                                       unsigned mask, unsigned newval)
+{
+       int val;
+       unsigned long flags;
+       int ret = 0;
+
+       spin_lock_irqsave(&wdt->lock, flags);
+
+       val = secure_register_read(wdt, SECWDOG_CTRL_REG);
+       if (val < 0) {
+               ret = val;
+       } else {
+               val &= ~mask;
+               val |= newval;
+               writel_relaxed(val, wdt->base + SECWDOG_CTRL_REG);
+       }
+
+       spin_unlock_irqrestore(&wdt->lock, flags);
+
+       return ret;
+}
+
+static int bcm_kona_wdt_set_resolution_reg(struct bcm_kona_wdt *wdt)
+{
+       if (wdt->resolution > SECWDOG_MAX_RES)
+               return -EINVAL;
+
+       return bcm_kona_wdt_ctrl_reg_modify(wdt, SECWDOG_RES_MASK,
+                                       wdt->resolution << SECWDOG_CLKS_SHIFT);
+}
+
+static int bcm_kona_wdt_set_timeout_reg(struct watchdog_device *wdog,
+                                       unsigned watchdog_flags)
+{
+       struct bcm_kona_wdt *wdt = watchdog_get_drvdata(wdog);
+
+       return bcm_kona_wdt_ctrl_reg_modify(wdt, SECWDOG_COUNT_MASK,
+                                       SECS_TO_TICKS(wdog->timeout, wdt) |
+                                       watchdog_flags);
+}
+
+static int bcm_kona_wdt_set_timeout(struct watchdog_device *wdog,
+       unsigned int t)
+{
+       wdog->timeout = t;
+       return 0;
+}
+
+static unsigned int bcm_kona_wdt_get_timeleft(struct watchdog_device *wdog)
+{
+       struct bcm_kona_wdt *wdt = watchdog_get_drvdata(wdog);
+       int val;
+       unsigned long flags;
+
+       spin_lock_irqsave(&wdt->lock, flags);
+       val = secure_register_read(wdt, SECWDOG_COUNT_REG);
+       spin_unlock_irqrestore(&wdt->lock, flags);
+
+       if (val < 0)
+               return val;
+
+       return TICKS_TO_SECS(val & SECWDOG_COUNT_MASK, wdt);
+}
+
+static int bcm_kona_wdt_start(struct watchdog_device *wdog)
+{
+       return bcm_kona_wdt_set_timeout_reg(wdog,
+                                       SECWDOG_EN_MASK | SECWDOG_SRSTEN_MASK);
+}
+
+static int bcm_kona_wdt_stop(struct watchdog_device *wdog)
+{
+       struct bcm_kona_wdt *wdt = watchdog_get_drvdata(wdog);
+
+       return bcm_kona_wdt_ctrl_reg_modify(wdt, SECWDOG_EN_MASK |
+                                           SECWDOG_SRSTEN_MASK, 0);
+}
+
+static struct watchdog_ops bcm_kona_wdt_ops = {
+       .owner =        THIS_MODULE,
+       .start =        bcm_kona_wdt_start,
+       .stop =         bcm_kona_wdt_stop,
+       .set_timeout =  bcm_kona_wdt_set_timeout,
+       .get_timeleft = bcm_kona_wdt_get_timeleft,
+};
+
+static struct watchdog_info bcm_kona_wdt_info = {
+       .options =      WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE |
+                       WDIOF_KEEPALIVEPING,
+       .identity =     "Broadcom Kona Watchdog Timer",
+};
+
+static struct watchdog_device bcm_kona_wdt_wdd = {
+       .info =         &bcm_kona_wdt_info,
+       .ops =          &bcm_kona_wdt_ops,
+       .min_timeout =  1,
+       .max_timeout =  SECWDOG_MAX_COUNT >> SECWDOG_DEFAULT_RESOLUTION,
+       .timeout =      SECWDOG_MAX_COUNT >> SECWDOG_DEFAULT_RESOLUTION,
+};
+
+static void bcm_kona_wdt_shutdown(struct platform_device *pdev)
+{
+       bcm_kona_wdt_stop(&bcm_kona_wdt_wdd);
+}
+
+static int bcm_kona_wdt_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct bcm_kona_wdt *wdt;
+       struct resource *res;
+       int ret;
+
+       wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
+       if (!wdt)
+               return -ENOMEM;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       wdt->base = devm_ioremap_resource(dev, res);
+       if (IS_ERR(wdt->base))
+               return -ENODEV;
+
+       wdt->resolution = SECWDOG_DEFAULT_RESOLUTION;
+       ret = bcm_kona_wdt_set_resolution_reg(wdt);
+       if (ret) {
+               dev_err(dev, "Failed to set resolution (error: %d)", ret);
+               return ret;
+       }
+
+       spin_lock_init(&wdt->lock);
+       platform_set_drvdata(pdev, wdt);
+       watchdog_set_drvdata(&bcm_kona_wdt_wdd, wdt);
+
+       ret = bcm_kona_wdt_set_timeout_reg(&bcm_kona_wdt_wdd, 0);
+       if (ret) {
+               dev_err(dev, "Failed set watchdog timeout");
+               return ret;
+       }
+
+       ret = watchdog_register_device(&bcm_kona_wdt_wdd);
+       if (ret) {
+               dev_err(dev, "Failed to register watchdog device");
+               return ret;
+       }
+
+       bcm_kona_wdt_debug_init(pdev);
+       dev_dbg(dev, "Broadcom Kona Watchdog Timer");
+
+       return 0;
+}
+
+static int bcm_kona_wdt_remove(struct platform_device *pdev)
+{
+       bcm_kona_wdt_debug_exit(pdev);
+       bcm_kona_wdt_shutdown(pdev);
+       watchdog_unregister_device(&bcm_kona_wdt_wdd);
+       dev_dbg(&pdev->dev, "Watchdog driver disabled");
+
+       return 0;
+}
+
+static const struct of_device_id bcm_kona_wdt_of_match[] = {
+       { .compatible = "brcm,kona-wdt", },
+       {},
+};
+MODULE_DEVICE_TABLE(of, bcm_kona_wdt_of_match);
+
+static struct platform_driver bcm_kona_wdt_driver = {
+       .driver = {
+                       .name = BCM_KONA_WDT_NAME,
+                       .owner = THIS_MODULE,
+                       .of_match_table = bcm_kona_wdt_of_match,
+                 },
+       .probe = bcm_kona_wdt_probe,
+       .remove = bcm_kona_wdt_remove,
+       .shutdown = bcm_kona_wdt_shutdown,
+};
+
+module_platform_driver(bcm_kona_wdt_driver);
+
+MODULE_ALIAS("platform:" BCM_KONA_WDT_NAME);
+MODULE_AUTHOR("Markus Mayer <mmayer@broadcom.com>");
+MODULE_DESCRIPTION("Broadcom Kona Watchdog Driver");
+MODULE_LICENSE("GPL v2");
index 12591f6596efe54a2d9f58b3cf26547867440685..b1bae03742a9a6cf5a2c32e5746ecc520c45add9 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Watchdog driver for DaVinci DM644x/DM646x processors
  *
- * Copyright (C) 2006 Texas Instruments.
+ * Copyright (C) 2006-2013 Texas Instruments.
  *
  * 2007 (c) MontaVista Software, Inc. This file is licensed under
  * the terms of the GNU General Public License version 2. This program
 #include <linux/moduleparam.h>
 #include <linux/types.h>
 #include <linux/kernel.h>
-#include <linux/fs.h>
-#include <linux/miscdevice.h>
 #include <linux/watchdog.h>
 #include <linux/init.h>
-#include <linux/bitops.h>
 #include <linux/platform_device.h>
-#include <linux/spinlock.h>
-#include <linux/uaccess.h>
 #include <linux/io.h>
 #include <linux/device.h>
 #include <linux/clk.h>
-#include <linux/slab.h>
 #include <linux/err.h>
 
 #define MODULE_NAME "DAVINCI-WDT: "
 #define WDKEY_SEQ0             (0xa5c6 << 16)
 #define WDKEY_SEQ1             (0xda7e << 16)
 
-static int heartbeat = DEFAULT_HEARTBEAT;
+static int heartbeat;
 
-static DEFINE_SPINLOCK(io_lock);
-static unsigned long wdt_status;
-#define WDT_IN_USE        0
-#define WDT_OK_TO_CLOSE   1
-#define WDT_REGION_INITED 2
-#define WDT_DEVICE_INITED 3
-
-static void __iomem    *wdt_base;
-struct clk             *wdt_clk;
-
-static void wdt_service(void)
-{
-       spin_lock(&io_lock);
-
-       /* put watchdog in service state */
-       iowrite32(WDKEY_SEQ0, wdt_base + WDTCR);
-       /* put watchdog in active state */
-       iowrite32(WDKEY_SEQ1, wdt_base + WDTCR);
-
-       spin_unlock(&io_lock);
-}
+/*
+ * struct to hold data for each WDT device
+ * @base - base io address of WD device
+ * @clk - source clock of WDT
+ * @wdd - hold watchdog device as is in WDT core
+ */
+struct davinci_wdt_device {
+       void __iomem            *base;
+       struct clk              *clk;
+       struct watchdog_device  wdd;
+};
 
-static void wdt_enable(void)
+static int davinci_wdt_start(struct watchdog_device *wdd)
 {
        u32 tgcr;
        u32 timer_margin;
        unsigned long wdt_freq;
+       struct davinci_wdt_device *davinci_wdt = watchdog_get_drvdata(wdd);
 
-       wdt_freq = clk_get_rate(wdt_clk);
-
-       spin_lock(&io_lock);
+       wdt_freq = clk_get_rate(davinci_wdt->clk);
 
        /* disable, internal clock source */
-       iowrite32(0, wdt_base + TCR);
+       iowrite32(0, davinci_wdt->base + TCR);
        /* reset timer, set mode to 64-bit watchdog, and unreset */
-       iowrite32(0, wdt_base + TGCR);
+       iowrite32(0, davinci_wdt->base + TGCR);
        tgcr = TIMMODE_64BIT_WDOG | TIM12RS_UNRESET | TIM34RS_UNRESET;
-       iowrite32(tgcr, wdt_base + TGCR);
+       iowrite32(tgcr, davinci_wdt->base + TGCR);
        /* clear counter regs */
-       iowrite32(0, wdt_base + TIM12);
-       iowrite32(0, wdt_base + TIM34);
+       iowrite32(0, davinci_wdt->base + TIM12);
+       iowrite32(0, davinci_wdt->base + TIM34);
        /* set timeout period */
-       timer_margin = (((u64)heartbeat * wdt_freq) & 0xffffffff);
-       iowrite32(timer_margin, wdt_base + PRD12);
-       timer_margin = (((u64)heartbeat * wdt_freq) >> 32);
-       iowrite32(timer_margin, wdt_base + PRD34);
+       timer_margin = (((u64)wdd->timeout * wdt_freq) & 0xffffffff);
+       iowrite32(timer_margin, davinci_wdt->base + PRD12);
+       timer_margin = (((u64)wdd->timeout * wdt_freq) >> 32);
+       iowrite32(timer_margin, davinci_wdt->base + PRD34);
        /* enable run continuously */
-       iowrite32(ENAMODE12_PERIODIC, wdt_base + TCR);
+       iowrite32(ENAMODE12_PERIODIC, davinci_wdt->base + TCR);
        /* Once the WDT is in pre-active state write to
         * TIM12, TIM34, PRD12, PRD34, TCR, TGCR, WDTCR are
         * write protected (except for the WDKEY field)
         */
        /* put watchdog in pre-active state */
-       iowrite32(WDKEY_SEQ0 | WDEN, wdt_base + WDTCR);
+       iowrite32(WDKEY_SEQ0 | WDEN, davinci_wdt->base + WDTCR);
        /* put watchdog in active state */
-       iowrite32(WDKEY_SEQ1 | WDEN, wdt_base + WDTCR);
-
-       spin_unlock(&io_lock);
+       iowrite32(WDKEY_SEQ1 | WDEN, davinci_wdt->base + WDTCR);
+       return 0;
 }
 
-static int davinci_wdt_open(struct inode *inode, struct file *file)
+static int davinci_wdt_ping(struct watchdog_device *wdd)
 {
-       if (test_and_set_bit(WDT_IN_USE, &wdt_status))
-               return -EBUSY;
-
-       wdt_enable();
+       struct davinci_wdt_device *davinci_wdt = watchdog_get_drvdata(wdd);
 
-       return nonseekable_open(inode, file);
+       /* put watchdog in service state */
+       iowrite32(WDKEY_SEQ0, davinci_wdt->base + WDTCR);
+       /* put watchdog in active state */
+       iowrite32(WDKEY_SEQ1, davinci_wdt->base + WDTCR);
+       return 0;
 }
 
-static ssize_t
-davinci_wdt_write(struct file *file, const char *data, size_t len,
-                 loff_t *ppos)
+static unsigned int davinci_wdt_get_timeleft(struct watchdog_device *wdd)
 {
-       if (len)
-               wdt_service();
+       u64 timer_counter;
+       unsigned long freq;
+       u32 val;
+       struct davinci_wdt_device *davinci_wdt = watchdog_get_drvdata(wdd);
 
-       return len;
-}
+       /* if timeout has occured then return 0 */
+       val = ioread32(davinci_wdt->base + WDTCR);
+       if (val & WDFLAG)
+               return 0;
 
-static const struct watchdog_info ident = {
-       .options = WDIOF_KEEPALIVEPING,
-       .identity = "DaVinci Watchdog",
-};
+       freq = clk_get_rate(davinci_wdt->clk);
 
-static long davinci_wdt_ioctl(struct file *file,
-                                       unsigned int cmd, unsigned long arg)
-{
-       int ret = -ENOTTY;
-
-       switch (cmd) {
-       case WDIOC_GETSUPPORT:
-               ret = copy_to_user((struct watchdog_info *)arg, &ident,
-                                  sizeof(ident)) ? -EFAULT : 0;
-               break;
-
-       case WDIOC_GETSTATUS:
-       case WDIOC_GETBOOTSTATUS:
-               ret = put_user(0, (int *)arg);
-               break;
-
-       case WDIOC_KEEPALIVE:
-               wdt_service();
-               ret = 0;
-               break;
-
-       case WDIOC_GETTIMEOUT:
-               ret = put_user(heartbeat, (int *)arg);
-               break;
-       }
-       return ret;
-}
+       if (!freq)
+               return 0;
 
-static int davinci_wdt_release(struct inode *inode, struct file *file)
-{
-       wdt_service();
-       clear_bit(WDT_IN_USE, &wdt_status);
+       timer_counter = ioread32(davinci_wdt->base + TIM12);
+       timer_counter |= ((u64)ioread32(davinci_wdt->base + TIM34) << 32);
 
-       return 0;
+       do_div(timer_counter, freq);
+
+       return wdd->timeout - timer_counter;
 }
 
-static const struct file_operations davinci_wdt_fops = {
-       .owner = THIS_MODULE,
-       .llseek = no_llseek,
-       .write = davinci_wdt_write,
-       .unlocked_ioctl = davinci_wdt_ioctl,
-       .open = davinci_wdt_open,
-       .release = davinci_wdt_release,
+static const struct watchdog_info davinci_wdt_info = {
+       .options = WDIOF_KEEPALIVEPING,
+       .identity = "DaVinci/Keystone Watchdog",
 };
 
-static struct miscdevice davinci_wdt_miscdev = {
-       .minor = WATCHDOG_MINOR,
-       .name = "watchdog",
-       .fops = &davinci_wdt_fops,
+static const struct watchdog_ops davinci_wdt_ops = {
+       .owner          = THIS_MODULE,
+       .start          = davinci_wdt_start,
+       .stop           = davinci_wdt_ping,
+       .ping           = davinci_wdt_ping,
+       .get_timeleft   = davinci_wdt_get_timeleft,
 };
 
 static int davinci_wdt_probe(struct platform_device *pdev)
@@ -204,37 +159,53 @@ static int davinci_wdt_probe(struct platform_device *pdev)
        int ret = 0;
        struct device *dev = &pdev->dev;
        struct resource  *wdt_mem;
+       struct watchdog_device *wdd;
+       struct davinci_wdt_device *davinci_wdt;
+
+       davinci_wdt = devm_kzalloc(dev, sizeof(*davinci_wdt), GFP_KERNEL);
+       if (!davinci_wdt)
+               return -ENOMEM;
 
-       wdt_clk = devm_clk_get(dev, NULL);
-       if (WARN_ON(IS_ERR(wdt_clk)))
-               return PTR_ERR(wdt_clk);
+       davinci_wdt->clk = devm_clk_get(dev, NULL);
+       if (WARN_ON(IS_ERR(davinci_wdt->clk)))
+               return PTR_ERR(davinci_wdt->clk);
 
-       clk_prepare_enable(wdt_clk);
+       clk_prepare_enable(davinci_wdt->clk);
 
-       if (heartbeat < 1 || heartbeat > MAX_HEARTBEAT)
-               heartbeat = DEFAULT_HEARTBEAT;
+       platform_set_drvdata(pdev, davinci_wdt);
 
-       dev_info(dev, "heartbeat %d sec\n", heartbeat);
+       wdd                     = &davinci_wdt->wdd;
+       wdd->info               = &davinci_wdt_info;
+       wdd->ops                = &davinci_wdt_ops;
+       wdd->min_timeout        = 1;
+       wdd->max_timeout        = MAX_HEARTBEAT;
+       wdd->timeout            = DEFAULT_HEARTBEAT;
+
+       watchdog_init_timeout(wdd, heartbeat, dev);
+
+       dev_info(dev, "heartbeat %d sec\n", wdd->timeout);
+
+       watchdog_set_drvdata(wdd, davinci_wdt);
+       watchdog_set_nowayout(wdd, 1);
 
        wdt_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       wdt_base = devm_ioremap_resource(dev, wdt_mem);
-       if (IS_ERR(wdt_base))
-               return PTR_ERR(wdt_base);
+       davinci_wdt->base = devm_ioremap_resource(dev, wdt_mem);
+       if (IS_ERR(davinci_wdt->base))
+               return PTR_ERR(davinci_wdt->base);
 
-       ret = misc_register(&davinci_wdt_miscdev);
-       if (ret < 0) {
-               dev_err(dev, "cannot register misc device\n");
-       } else {
-               set_bit(WDT_DEVICE_INITED, &wdt_status);
-       }
+       ret = watchdog_register_device(wdd);
+       if (ret < 0)
+               dev_err(dev, "cannot register watchdog device\n");
 
        return ret;
 }
 
 static int davinci_wdt_remove(struct platform_device *pdev)
 {
-       misc_deregister(&davinci_wdt_miscdev);
-       clk_disable_unprepare(wdt_clk);
+       struct davinci_wdt_device *davinci_wdt = platform_get_drvdata(pdev);
+
+       watchdog_unregister_device(&davinci_wdt->wdd);
+       clk_disable_unprepare(davinci_wdt->clk);
 
        return 0;
 }
index a46f5c7ee7ff4e6c11eceb36a59ca7438949e5b5..ee4f86ba83eca1856dc85a0ad777e11732e4e5ea 100644 (file)
@@ -8,7 +8,7 @@
  * 2 of the License, or (at your option) any later version.
  *
  * This file implements a driver for the Synopsys DesignWare watchdog device
- * in the many ARM subsystems. The watchdog has 16 different timeout periods
+ * in the many subsystems. The watchdog has 16 different timeout periods
  * and these are a function of the input clock frequency.
  *
  * The DesignWare watchdog cannot be stopped once it has been started so we
diff --git a/drivers/watchdog/gpio_wdt.c b/drivers/watchdog/gpio_wdt.c
new file mode 100644 (file)
index 0000000..220a9e0
--- /dev/null
@@ -0,0 +1,254 @@
+/*
+ * Driver for watchdog device controlled through GPIO-line
+ *
+ * Author: 2013, Alexander Shiyan <shc_work@mail.ru>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+#include <linux/watchdog.h>
+
+#define SOFT_TIMEOUT_MIN       1
+#define SOFT_TIMEOUT_DEF       60
+#define SOFT_TIMEOUT_MAX       0xffff
+
+enum {
+       HW_ALGO_TOGGLE,
+       HW_ALGO_LEVEL,
+};
+
+struct gpio_wdt_priv {
+       int                     gpio;
+       bool                    active_low;
+       bool                    state;
+       unsigned int            hw_algo;
+       unsigned int            hw_margin;
+       unsigned long           last_jiffies;
+       struct notifier_block   notifier;
+       struct timer_list       timer;
+       struct watchdog_device  wdd;
+};
+
+static void gpio_wdt_disable(struct gpio_wdt_priv *priv)
+{
+       gpio_set_value_cansleep(priv->gpio, !priv->active_low);
+
+       /* Put GPIO back to tristate */
+       if (priv->hw_algo == HW_ALGO_TOGGLE)
+               gpio_direction_input(priv->gpio);
+}
+
+static int gpio_wdt_start(struct watchdog_device *wdd)
+{
+       struct gpio_wdt_priv *priv = watchdog_get_drvdata(wdd);
+
+       priv->state = priv->active_low;
+       gpio_direction_output(priv->gpio, priv->state);
+       priv->last_jiffies = jiffies;
+       mod_timer(&priv->timer, priv->last_jiffies + priv->hw_margin);
+
+       return 0;
+}
+
+static int gpio_wdt_stop(struct watchdog_device *wdd)
+{
+       struct gpio_wdt_priv *priv = watchdog_get_drvdata(wdd);
+
+       mod_timer(&priv->timer, 0);
+       gpio_wdt_disable(priv);
+
+       return 0;
+}
+
+static int gpio_wdt_ping(struct watchdog_device *wdd)
+{
+       struct gpio_wdt_priv *priv = watchdog_get_drvdata(wdd);
+
+       priv->last_jiffies = jiffies;
+
+       return 0;
+}
+
+static int gpio_wdt_set_timeout(struct watchdog_device *wdd, unsigned int t)
+{
+       wdd->timeout = t;
+
+       return gpio_wdt_ping(wdd);
+}
+
+static void gpio_wdt_hwping(unsigned long data)
+{
+       struct watchdog_device *wdd = (struct watchdog_device *)data;
+       struct gpio_wdt_priv *priv = watchdog_get_drvdata(wdd);
+
+       if (time_after(jiffies, priv->last_jiffies +
+                      msecs_to_jiffies(wdd->timeout * 1000))) {
+               dev_crit(wdd->dev, "Timer expired. System will reboot soon!\n");
+               return;
+       }
+
+       /* Restart timer */
+       mod_timer(&priv->timer, jiffies + priv->hw_margin);
+
+       switch (priv->hw_algo) {
+       case HW_ALGO_TOGGLE:
+               /* Toggle output pin */
+               priv->state = !priv->state;
+               gpio_set_value_cansleep(priv->gpio, priv->state);
+               break;
+       case HW_ALGO_LEVEL:
+               /* Pulse */
+               gpio_set_value_cansleep(priv->gpio, !priv->active_low);
+               udelay(1);
+               gpio_set_value_cansleep(priv->gpio, priv->active_low);
+               break;
+       }
+}
+
+static int gpio_wdt_notify_sys(struct notifier_block *nb, unsigned long code,
+                              void *unused)
+{
+       struct gpio_wdt_priv *priv = container_of(nb, struct gpio_wdt_priv,
+                                                 notifier);
+
+       mod_timer(&priv->timer, 0);
+
+       switch (code) {
+       case SYS_HALT:
+       case SYS_POWER_OFF:
+               gpio_wdt_disable(priv);
+               break;
+       default:
+               break;
+       }
+
+       return NOTIFY_DONE;
+}
+
+static const struct watchdog_info gpio_wdt_ident = {
+       .options        = WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING |
+                         WDIOF_SETTIMEOUT,
+       .identity       = "GPIO Watchdog",
+};
+
+static const struct watchdog_ops gpio_wdt_ops = {
+       .owner          = THIS_MODULE,
+       .start          = gpio_wdt_start,
+       .stop           = gpio_wdt_stop,
+       .ping           = gpio_wdt_ping,
+       .set_timeout    = gpio_wdt_set_timeout,
+};
+
+static int gpio_wdt_probe(struct platform_device *pdev)
+{
+       struct gpio_wdt_priv *priv;
+       enum of_gpio_flags flags;
+       unsigned int hw_margin;
+       unsigned long f = 0;
+       const char *algo;
+       int ret;
+
+       priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+       if (!priv)
+               return -ENOMEM;
+
+       priv->gpio = of_get_gpio_flags(pdev->dev.of_node, 0, &flags);
+       if (!gpio_is_valid(priv->gpio))
+               return priv->gpio;
+
+       priv->active_low = flags & OF_GPIO_ACTIVE_LOW;
+
+       ret = of_property_read_string(pdev->dev.of_node, "hw_algo", &algo);
+       if (ret)
+               return ret;
+       if (!strncmp(algo, "toggle", 6)) {
+               priv->hw_algo = HW_ALGO_TOGGLE;
+               f = GPIOF_IN;
+       } else if (!strncmp(algo, "level", 5)) {
+               priv->hw_algo = HW_ALGO_LEVEL;
+               f = priv->active_low ? GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW;
+       } else {
+               return -EINVAL;
+       }
+
+       ret = devm_gpio_request_one(&pdev->dev, priv->gpio, f,
+                                   dev_name(&pdev->dev));
+       if (ret)
+               return ret;
+
+       ret = of_property_read_u32(pdev->dev.of_node,
+                                  "hw_margin_ms", &hw_margin);
+       if (ret)
+               return ret;
+       /* Disallow values lower than 2 and higher than 65535 ms */
+       if (hw_margin < 2 || hw_margin > 65535)
+               return -EINVAL;
+
+       /* Use safe value (1/2 of real timeout) */
+       priv->hw_margin = msecs_to_jiffies(hw_margin / 2);
+
+       watchdog_set_drvdata(&priv->wdd, priv);
+
+       priv->wdd.info          = &gpio_wdt_ident;
+       priv->wdd.ops           = &gpio_wdt_ops;
+       priv->wdd.min_timeout   = SOFT_TIMEOUT_MIN;
+       priv->wdd.max_timeout   = SOFT_TIMEOUT_MAX;
+
+       if (watchdog_init_timeout(&priv->wdd, 0, &pdev->dev) < 0)
+               priv->wdd.timeout = SOFT_TIMEOUT_DEF;
+
+       setup_timer(&priv->timer, gpio_wdt_hwping, (unsigned long)&priv->wdd);
+
+       ret = watchdog_register_device(&priv->wdd);
+       if (ret)
+               return ret;
+
+       priv->notifier.notifier_call = gpio_wdt_notify_sys;
+       ret = register_reboot_notifier(&priv->notifier);
+       if (ret)
+               watchdog_unregister_device(&priv->wdd);
+
+       return ret;
+}
+
+static int gpio_wdt_remove(struct platform_device *pdev)
+{
+       struct gpio_wdt_priv *priv = platform_get_drvdata(pdev);
+
+       del_timer_sync(&priv->timer);
+       unregister_reboot_notifier(&priv->notifier);
+       watchdog_unregister_device(&priv->wdd);
+
+       return 0;
+}
+
+static const struct of_device_id gpio_wdt_dt_ids[] = {
+       { .compatible = "linux,wdt-gpio", },
+       { }
+};
+MODULE_DEVICE_TABLE(of, gpio_wdt_dt_ids);
+
+static struct platform_driver gpio_wdt_driver = {
+       .driver = {
+               .name           = "gpio-wdt",
+               .owner          = THIS_MODULE,
+               .of_match_table = gpio_wdt_dt_ids,
+       },
+       .probe  = gpio_wdt_probe,
+       .remove = gpio_wdt_remove,
+};
+module_platform_driver(gpio_wdt_driver);
+
+MODULE_AUTHOR("Alexander Shiyan <shc_work@mail.ru>");
+MODULE_DESCRIPTION("GPIO Watchdog");
+MODULE_LICENSE("GPL");
index 45b979d9dd13acf223ac4ede12262c51fe6e1e9d..2b75e8b472796e987c3651599b7583442d5b1089 100644 (file)
@@ -39,7 +39,7 @@
 #endif /* CONFIG_HPWDT_NMI_DECODING */
 #include <asm/nmi.h>
 
-#define HPWDT_VERSION                  "1.3.2"
+#define HPWDT_VERSION                  "1.3.3"
 #define SECS_TO_TICKS(secs)            ((secs) * 1000 / 128)
 #define TICKS_TO_SECS(ticks)           ((ticks) * 128 / 1000)
 #define HPWDT_MAX_TIMER                        TICKS_TO_SECS(65535)
@@ -55,7 +55,7 @@ static void __iomem *pci_mem_addr;            /* the PCI-memory address */
 static unsigned long __iomem *hpwdt_timer_reg;
 static unsigned long __iomem *hpwdt_timer_con;
 
-static DEFINE_PCI_DEVICE_TABLE(hpwdt_devices) = {
+static const struct pci_device_id hpwdt_devices[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_COMPAQ, 0xB203) },   /* iLO2 */
        { PCI_DEVICE(PCI_VENDOR_ID_HP, 0x3306) },       /* iLO3 */
        {0},                    /* terminate list */
@@ -501,8 +501,13 @@ static int hpwdt_pretimeout(unsigned int ulReason, struct pt_regs *regs)
                                "but unable to determine source.\n");
                }
        }
-       panic("An NMI occurred, please see the Integrated "
-               "Management Log for details.\n");
+       panic("An NMI occurred. Depending on your system the reason "
+               "for the NMI is logged in any one of the following "
+               "resources:\n"
+               "1. Integrated Management Log (IML)\n"
+               "2. OA Syslog\n"
+               "3. OA Forward Progress Log\n"
+               "4. iLO Event Log");
 
 out:
        return NMI_DONE;
index a72fe9361ddf028ef717959183cc5bdfc3ea0478..25a2bfdb4e9d2e41812b676e4b93b0c4219c47a9 100644 (file)
@@ -334,7 +334,7 @@ static struct miscdevice esb_miscdev = {
 /*
  * Data for PCI driver interface
  */
-static DEFINE_PCI_DEVICE_TABLE(esb_pci_tbl) = {
+static const struct pci_device_id esb_pci_tbl[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_9), },
        { 0, },                 /* End of list */
 };
index b4786bccc42c3c062153ef06ce2419900d84a76d..dd51d9539b33df6ea6b9aa9960d9f0fa5fe7c169 100644 (file)
@@ -2,6 +2,7 @@
  * Watchdog driver for IMX2 and later processors
  *
  *  Copyright (C) 2010 Wolfram Sang, Pengutronix e.K. <w.sang@pengutronix.de>
+ *  Copyright (C) 2014 Freescale Semiconductor, Inc.
  *
  * some parts adapted by similar drivers from Darius Augulis and Vladimir
  * Zapolskiy, additional improvements by Wim Van Sebroeck.
@@ -40,6 +41,7 @@
 #define IMX2_WDT_WCR_WT                (0xFF << 8)     /* -> Watchdog Timeout Field */
 #define IMX2_WDT_WCR_WRE       (1 << 3)        /* -> WDOG Reset Enable */
 #define IMX2_WDT_WCR_WDE       (1 << 2)        /* -> Watchdog Enable */
+#define IMX2_WDT_WCR_WDZST     (1 << 0)        /* -> Watchdog timer Suspend */
 
 #define IMX2_WDT_WSR           0x02            /* Service Register */
 #define IMX2_WDT_SEQ1          0x5555          /* -> service sequence 1 */
@@ -87,6 +89,8 @@ static inline void imx2_wdt_setup(void)
 {
        u16 val = __raw_readw(imx2_wdt.base + IMX2_WDT_WCR);
 
+       /* Suspend timer in low power mode, write once-only */
+       val |= IMX2_WDT_WCR_WDZST;
        /* Strip the old watchdog Time-Out value */
        val &= ~IMX2_WDT_WCR_WT;
        /* Generate reset if WDOG times out */
index 4166e4d116a8e1c73b91f305e9216dc47cdf4c4e..4aa3a8a876fe84f109a211b560e453ccf879172b 100644 (file)
@@ -19,6 +19,8 @@
 #include <linux/watchdog.h>
 #include <linux/moduleparam.h>
 
+#include <asm/system_misc.h>
+
 #define REG_COUNT                      0x4
 #define REG_MODE                       0x8
 #define REG_ENABLE                     0xC
@@ -29,8 +31,17 @@ struct moxart_wdt_dev {
        unsigned int clock_frequency;
 };
 
+static struct moxart_wdt_dev *moxart_restart_ctx;
+
 static int heartbeat;
 
+static void moxart_wdt_restart(enum reboot_mode reboot_mode, const char *cmd)
+{
+       writel(1, moxart_restart_ctx->base + REG_COUNT);
+       writel(0x5ab9, moxart_restart_ctx->base + REG_MODE);
+       writel(0x03, moxart_restart_ctx->base + REG_ENABLE);
+}
+
 static int moxart_wdt_stop(struct watchdog_device *wdt_dev)
 {
        struct moxart_wdt_dev *moxart_wdt = watchdog_get_drvdata(wdt_dev);
@@ -125,6 +136,9 @@ static int moxart_wdt_probe(struct platform_device *pdev)
        if (err)
                return err;
 
+       moxart_restart_ctx = moxart_wdt;
+       arm_pm_restart = moxart_wdt_restart;
+
        dev_dbg(dev, "Watchdog enabled (heartbeat=%d sec, nowayout=%d)\n",
                moxart_wdt->dev.timeout, nowayout);
 
@@ -135,6 +149,7 @@ static int moxart_wdt_remove(struct platform_device *pdev)
 {
        struct moxart_wdt_dev *moxart_wdt = platform_get_drvdata(pdev);
 
+       arm_pm_restart = NULL;
        moxart_wdt_stop(&moxart_wdt->dev);
        watchdog_unregister_device(&moxart_wdt->dev);
 
index d82152077fd9fb1c512145c92a8700cb0ccaf385..c1f65b4c0aa4050442bd1742d32a0883e68537dd 100644 (file)
@@ -73,9 +73,7 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
  * to 0
  */
 static int prescale = 1;
-static unsigned int timeout_sec;
 
-static unsigned long wdt_is_open;
 static DEFINE_SPINLOCK(wdt_spinlock);
 
 static void mpc8xxx_wdt_keepalive(void)
@@ -87,39 +85,23 @@ static void mpc8xxx_wdt_keepalive(void)
        spin_unlock(&wdt_spinlock);
 }
 
+static struct watchdog_device mpc8xxx_wdt_dev;
 static void mpc8xxx_wdt_timer_ping(unsigned long arg);
-static DEFINE_TIMER(wdt_timer, mpc8xxx_wdt_timer_ping, 0, 0);
+static DEFINE_TIMER(wdt_timer, mpc8xxx_wdt_timer_ping, 0,
+               (unsigned long)&mpc8xxx_wdt_dev);
 
 static void mpc8xxx_wdt_timer_ping(unsigned long arg)
 {
+       struct watchdog_device *w = (struct watchdog_device *)arg;
+
        mpc8xxx_wdt_keepalive();
        /* We're pinging it twice faster than needed, just to be sure. */
-       mod_timer(&wdt_timer, jiffies + HZ * timeout_sec / 2);
-}
-
-static void mpc8xxx_wdt_pr_warn(const char *msg)
-{
-       pr_crit("%s, expect the %s soon!\n", msg,
-               reset ? "reset" : "machine check exception");
+       mod_timer(&wdt_timer, jiffies + HZ * w->timeout / 2);
 }
 
-static ssize_t mpc8xxx_wdt_write(struct file *file, const char __user *buf,
-                                size_t count, loff_t *ppos)
-{
-       if (count)
-               mpc8xxx_wdt_keepalive();
-       return count;
-}
-
-static int mpc8xxx_wdt_open(struct inode *inode, struct file *file)
+static int mpc8xxx_wdt_start(struct watchdog_device *w)
 {
        u32 tmp = SWCRR_SWEN;
-       if (test_and_set_bit(0, &wdt_is_open))
-               return -EBUSY;
-
-       /* Once we start the watchdog we can't stop it */
-       if (nowayout)
-               __module_get(THIS_MODULE);
 
        /* Good, fire up the show */
        if (prescale)
@@ -133,59 +115,37 @@ static int mpc8xxx_wdt_open(struct inode *inode, struct file *file)
 
        del_timer_sync(&wdt_timer);
 
-       return nonseekable_open(inode, file);
+       return 0;
 }
 
-static int mpc8xxx_wdt_release(struct inode *inode, struct file *file)
+static int mpc8xxx_wdt_ping(struct watchdog_device *w)
 {
-       if (!nowayout)
-               mpc8xxx_wdt_timer_ping(0);
-       else
-               mpc8xxx_wdt_pr_warn("watchdog closed");
-       clear_bit(0, &wdt_is_open);
+       mpc8xxx_wdt_keepalive();
        return 0;
 }
 
-static long mpc8xxx_wdt_ioctl(struct file *file, unsigned int cmd,
-                                                       unsigned long arg)
+static int mpc8xxx_wdt_stop(struct watchdog_device *w)
 {
-       void __user *argp = (void __user *)arg;
-       int __user *p = argp;
-       static const struct watchdog_info ident = {
-               .options = WDIOF_KEEPALIVEPING,
-               .firmware_version = 1,
-               .identity = "MPC8xxx",
-       };
-
-       switch (cmd) {
-       case WDIOC_GETSUPPORT:
-               return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0;
-       case WDIOC_GETSTATUS:
-       case WDIOC_GETBOOTSTATUS:
-               return put_user(0, p);
-       case WDIOC_KEEPALIVE:
-               mpc8xxx_wdt_keepalive();
-               return 0;
-       case WDIOC_GETTIMEOUT:
-               return put_user(timeout_sec, p);
-       default:
-               return -ENOTTY;
-       }
+       mod_timer(&wdt_timer, jiffies);
+       return 0;
 }
 
-static const struct file_operations mpc8xxx_wdt_fops = {
-       .owner          = THIS_MODULE,
-       .llseek         = no_llseek,
-       .write          = mpc8xxx_wdt_write,
-       .unlocked_ioctl = mpc8xxx_wdt_ioctl,
-       .open           = mpc8xxx_wdt_open,
-       .release        = mpc8xxx_wdt_release,
+static struct watchdog_info mpc8xxx_wdt_info = {
+       .options = WDIOF_KEEPALIVEPING,
+       .firmware_version = 1,
+       .identity = "MPC8xxx",
 };
 
-static struct miscdevice mpc8xxx_wdt_miscdev = {
-       .minor  = WATCHDOG_MINOR,
-       .name   = "watchdog",
-       .fops   = &mpc8xxx_wdt_fops,
+static struct watchdog_ops mpc8xxx_wdt_ops = {
+       .owner = THIS_MODULE,
+       .start = mpc8xxx_wdt_start,
+       .ping = mpc8xxx_wdt_ping,
+       .stop = mpc8xxx_wdt_stop,
+};
+
+static struct watchdog_device mpc8xxx_wdt_dev = {
+       .info = &mpc8xxx_wdt_info,
+       .ops = &mpc8xxx_wdt_ops,
 };
 
 static const struct of_device_id mpc8xxx_wdt_match[];
@@ -197,6 +157,7 @@ static int mpc8xxx_wdt_probe(struct platform_device *ofdev)
        const struct mpc8xxx_wdt_type *wdt_type;
        u32 freq = fsl_get_sys_freq();
        bool enabled;
+       unsigned int timeout_sec;
 
        match = of_match_device(mpc8xxx_wdt_match, &ofdev->dev);
        if (!match)
@@ -223,6 +184,7 @@ static int mpc8xxx_wdt_probe(struct platform_device *ofdev)
        else
                timeout_sec = timeout / freq;
 
+       mpc8xxx_wdt_dev.timeout = timeout_sec;
 #ifdef MODULE
        ret = mpc8xxx_wdt_init_late();
        if (ret)
@@ -238,7 +200,7 @@ static int mpc8xxx_wdt_probe(struct platform_device *ofdev)
         * userspace handles it.
         */
        if (enabled)
-               mpc8xxx_wdt_timer_ping(0);
+               mod_timer(&wdt_timer, jiffies);
        return 0;
 err_unmap:
        iounmap(wd_base);
@@ -248,9 +210,10 @@ err_unmap:
 
 static int mpc8xxx_wdt_remove(struct platform_device *ofdev)
 {
-       mpc8xxx_wdt_pr_warn("watchdog removed");
+       pr_crit("Watchdog removed, expect the %s soon!\n",
+               reset ? "reset" : "machine check exception");
        del_timer_sync(&wdt_timer);
-       misc_deregister(&mpc8xxx_wdt_miscdev);
+       watchdog_unregister_device(&mpc8xxx_wdt_dev);
        iounmap(wd_base);
 
        return 0;
@@ -302,10 +265,11 @@ static int mpc8xxx_wdt_init_late(void)
        if (!wd_base)
                return -ENODEV;
 
-       ret = misc_register(&mpc8xxx_wdt_miscdev);
+       watchdog_set_nowayout(&mpc8xxx_wdt_dev, nowayout);
+
+       ret = watchdog_register_device(&mpc8xxx_wdt_dev);
        if (ret) {
-               pr_err("cannot register miscdev on minor=%d (err=%d)\n",
-                      WATCHDOG_MINOR, ret);
+               pr_err("cannot register watchdog device (err=%d)\n", ret);
                return ret;
        }
        return 0;
index 231e5b9d5c8e079ec424b9331aca90912f42efae..0b9ec61e131330524e417197304702baaebdf272 100644 (file)
@@ -289,7 +289,7 @@ static struct miscdevice nv_tco_miscdev = {
  * register a pci_driver, because someone else might one day
  * want to register another driver on the same PCI id.
  */
-static DEFINE_PCI_DEVICE_TABLE(tco_pci_tbl) = {
+static const struct pci_device_id tco_pci_tbl[] = {
        { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SMBUS,
          PCI_ANY_ID, PCI_ANY_ID, },
        { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SMBUS,
index b4864f254b48940b7fd9b7febb368b6608af4716..c0d07eef2640b9cb0f2469acda6a579e57a16d94 100644 (file)
@@ -801,7 +801,7 @@ static void pcipcwd_card_exit(struct pci_dev *pdev)
        cards_found--;
 }
 
-static DEFINE_PCI_DEVICE_TABLE(pcipcwd_pci_tbl) = {
+static const struct pci_device_id pcipcwd_pci_tbl[] = {
        { PCI_VENDOR_ID_QUICKLOGIC, PCI_DEVICE_ID_WATCHDOG_PCIPCWD,
                PCI_ANY_ID, PCI_ANY_ID, },
        { 0 },                  /* End of list */
index 7d8fd041ee250d3759fc2b86519012a09ded76e2..aec946df6ed976d2479762dc51d69060a5504382 100644 (file)
@@ -40,6 +40,8 @@
 #include <linux/slab.h>
 #include <linux/err.h>
 #include <linux/of.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
 
 #define S3C2410_WTCON          0x00
 #define S3C2410_WTDAT          0x04
 #define CONFIG_S3C2410_WATCHDOG_ATBOOT         (0)
 #define CONFIG_S3C2410_WATCHDOG_DEFAULT_TIME   (15)
 
+#define EXYNOS5_RST_STAT_REG_OFFSET            0x0404
+#define EXYNOS5_WDT_DISABLE_REG_OFFSET         0x0408
+#define EXYNOS5_WDT_MASK_RESET_REG_OFFSET      0x040c
+#define QUIRK_HAS_PMU_CONFIG                   (1 << 0)
+#define QUIRK_HAS_RST_STAT                     (1 << 1)
+
+/* These quirks require that we have a PMU register map */
+#define QUIRKS_HAVE_PMUREG                     (QUIRK_HAS_PMU_CONFIG | \
+                                                QUIRK_HAS_RST_STAT)
+
 static bool nowayout   = WATCHDOG_NOWAYOUT;
 static int tmr_margin;
 static int tmr_atboot  = CONFIG_S3C2410_WATCHDOG_ATBOOT;
@@ -83,6 +95,30 @@ MODULE_PARM_DESC(soft_noboot, "Watchdog action, set to 1 to ignore reboots, "
                        "0 to reboot (default 0)");
 MODULE_PARM_DESC(debug, "Watchdog debug, set to >1 for debug (default 0)");
 
+/**
+ * struct s3c2410_wdt_variant - Per-variant config data
+ *
+ * @disable_reg: Offset in pmureg for the register that disables the watchdog
+ * timer reset functionality.
+ * @mask_reset_reg: Offset in pmureg for the register that masks the watchdog
+ * timer reset functionality.
+ * @mask_bit: Bit number for the watchdog timer in the disable register and the
+ * mask reset register.
+ * @rst_stat_reg: Offset in pmureg for the register that has the reset status.
+ * @rst_stat_bit: Bit number in the rst_stat register indicating a watchdog
+ * reset.
+ * @quirks: A bitfield of quirks.
+ */
+
+struct s3c2410_wdt_variant {
+       int disable_reg;
+       int mask_reset_reg;
+       int mask_bit;
+       int rst_stat_reg;
+       int rst_stat_bit;
+       u32 quirks;
+};
+
 struct s3c2410_wdt {
        struct device           *dev;
        struct clk              *clock;
@@ -93,8 +129,54 @@ struct s3c2410_wdt {
        unsigned long           wtdat_save;
        struct watchdog_device  wdt_device;
        struct notifier_block   freq_transition;
+       struct s3c2410_wdt_variant *drv_data;
+       struct regmap *pmureg;
 };
 
+static const struct s3c2410_wdt_variant drv_data_s3c2410 = {
+       .quirks = 0
+};
+
+#ifdef CONFIG_OF
+static const struct s3c2410_wdt_variant drv_data_exynos5250  = {
+       .disable_reg = EXYNOS5_WDT_DISABLE_REG_OFFSET,
+       .mask_reset_reg = EXYNOS5_WDT_MASK_RESET_REG_OFFSET,
+       .mask_bit = 20,
+       .rst_stat_reg = EXYNOS5_RST_STAT_REG_OFFSET,
+       .rst_stat_bit = 20,
+       .quirks = QUIRK_HAS_PMU_CONFIG | QUIRK_HAS_RST_STAT,
+};
+
+static const struct s3c2410_wdt_variant drv_data_exynos5420 = {
+       .disable_reg = EXYNOS5_WDT_DISABLE_REG_OFFSET,
+       .mask_reset_reg = EXYNOS5_WDT_MASK_RESET_REG_OFFSET,
+       .mask_bit = 0,
+       .rst_stat_reg = EXYNOS5_RST_STAT_REG_OFFSET,
+       .rst_stat_bit = 9,
+       .quirks = QUIRK_HAS_PMU_CONFIG | QUIRK_HAS_RST_STAT,
+};
+
+static const struct of_device_id s3c2410_wdt_match[] = {
+       { .compatible = "samsung,s3c2410-wdt",
+         .data = &drv_data_s3c2410 },
+       { .compatible = "samsung,exynos5250-wdt",
+         .data = &drv_data_exynos5250 },
+       { .compatible = "samsung,exynos5420-wdt",
+         .data = &drv_data_exynos5420 },
+       {},
+};
+MODULE_DEVICE_TABLE(of, s3c2410_wdt_match);
+#endif
+
+static const struct platform_device_id s3c2410_wdt_ids[] = {
+       {
+               .name = "s3c2410-wdt",
+               .driver_data = (unsigned long)&drv_data_s3c2410,
+       },
+       {}
+};
+MODULE_DEVICE_TABLE(platform, s3c2410_wdt_ids);
+
 /* watchdog control routines */
 
 #define DBG(fmt, ...)                                  \
@@ -110,6 +192,35 @@ static inline struct s3c2410_wdt *freq_to_wdt(struct notifier_block *nb)
        return container_of(nb, struct s3c2410_wdt, freq_transition);
 }
 
+static int s3c2410wdt_mask_and_disable_reset(struct s3c2410_wdt *wdt, bool mask)
+{
+       int ret;
+       u32 mask_val = 1 << wdt->drv_data->mask_bit;
+       u32 val = 0;
+
+       /* No need to do anything if no PMU CONFIG needed */
+       if (!(wdt->drv_data->quirks & QUIRK_HAS_PMU_CONFIG))
+               return 0;
+
+       if (mask)
+               val = mask_val;
+
+       ret = regmap_update_bits(wdt->pmureg,
+                       wdt->drv_data->disable_reg,
+                       mask_val, val);
+       if (ret < 0)
+               goto error;
+
+       ret = regmap_update_bits(wdt->pmureg,
+                       wdt->drv_data->mask_reset_reg,
+                       mask_val, val);
+ error:
+       if (ret < 0)
+               dev_err(wdt->dev, "failed to update reg(%d)\n", ret);
+
+       return ret;
+}
+
 static int s3c2410wdt_keepalive(struct watchdog_device *wdd)
 {
        struct s3c2410_wdt *wdt = watchdog_get_drvdata(wdd);
@@ -188,7 +299,7 @@ static int s3c2410wdt_set_heartbeat(struct watchdog_device *wdd, unsigned timeou
        if (timeout < 1)
                return -EINVAL;
 
-       freq /= 128;
+       freq = DIV_ROUND_UP(freq, 128);
        count = timeout * freq;
 
        DBG("%s: count=%d, timeout=%d, freq=%lu\n",
@@ -200,21 +311,18 @@ static int s3c2410wdt_set_heartbeat(struct watchdog_device *wdd, unsigned timeou
        */
 
        if (count >= 0x10000) {
-               for (divisor = 1; divisor <= 0x100; divisor++) {
-                       if ((count / divisor) < 0x10000)
-                               break;
-               }
+               divisor = DIV_ROUND_UP(count, 0xffff);
 
-               if ((count / divisor) >= 0x10000) {
+               if (divisor > 0x100) {
                        dev_err(wdt->dev, "timeout %d too big\n", timeout);
                        return -EINVAL;
                }
        }
 
        DBG("%s: timeout=%d, divisor=%d, count=%d (%08x)\n",
-           __func__, timeout, divisor, count, count/divisor);
+           __func__, timeout, divisor, count, DIV_ROUND_UP(count, divisor));
 
-       count /= divisor;
+       count = DIV_ROUND_UP(count, divisor);
        wdt->count = count;
 
        /* update the pre-scaler */
@@ -264,7 +372,7 @@ static irqreturn_t s3c2410wdt_irq(int irqno, void *param)
        return IRQ_HANDLED;
 }
 
-#ifdef CONFIG_CPU_FREQ
+#ifdef CONFIG_ARM_S3C24XX_CPUFREQ
 
 static int s3c2410wdt_cpufreq_transition(struct notifier_block *nb,
                                          unsigned long val, void *data)
@@ -331,6 +439,37 @@ static inline void s3c2410wdt_cpufreq_deregister(struct s3c2410_wdt *wdt)
 }
 #endif
 
+static inline unsigned int s3c2410wdt_get_bootstatus(struct s3c2410_wdt *wdt)
+{
+       unsigned int rst_stat;
+       int ret;
+
+       if (!(wdt->drv_data->quirks & QUIRK_HAS_RST_STAT))
+               return 0;
+
+       ret = regmap_read(wdt->pmureg, wdt->drv_data->rst_stat_reg, &rst_stat);
+       if (ret)
+               dev_warn(wdt->dev, "Couldn't get RST_STAT register\n");
+       else if (rst_stat & BIT(wdt->drv_data->rst_stat_bit))
+               return WDIOF_CARDRESET;
+
+       return 0;
+}
+
+/* s3c2410_get_wdt_driver_data */
+static inline struct s3c2410_wdt_variant *
+get_wdt_drv_data(struct platform_device *pdev)
+{
+       if (pdev->dev.of_node) {
+               const struct of_device_id *match;
+               match = of_match_node(s3c2410_wdt_match, pdev->dev.of_node);
+               return (struct s3c2410_wdt_variant *)match->data;
+       } else {
+               return (struct s3c2410_wdt_variant *)
+                       platform_get_device_id(pdev)->driver_data;
+       }
+}
+
 static int s3c2410wdt_probe(struct platform_device *pdev)
 {
        struct device *dev;
@@ -353,6 +492,16 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
        spin_lock_init(&wdt->lock);
        wdt->wdt_device = s3c2410_wdd;
 
+       wdt->drv_data = get_wdt_drv_data(pdev);
+       if (wdt->drv_data->quirks & QUIRKS_HAVE_PMUREG) {
+               wdt->pmureg = syscon_regmap_lookup_by_phandle(dev->of_node,
+                                               "samsung,syscon-phandle");
+               if (IS_ERR(wdt->pmureg)) {
+                       dev_err(dev, "syscon regmap lookup failed.\n");
+                       return PTR_ERR(wdt->pmureg);
+               }
+       }
+
        wdt_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
        if (wdt_irq == NULL) {
                dev_err(dev, "no irq resource specified\n");
@@ -415,12 +564,18 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
 
        watchdog_set_nowayout(&wdt->wdt_device, nowayout);
 
+       wdt->wdt_device.bootstatus = s3c2410wdt_get_bootstatus(wdt);
+
        ret = watchdog_register_device(&wdt->wdt_device);
        if (ret) {
                dev_err(dev, "cannot register watchdog (%d)\n", ret);
                goto err_cpufreq;
        }
 
+       ret = s3c2410wdt_mask_and_disable_reset(wdt, false);
+       if (ret < 0)
+               goto err_unregister;
+
        if (tmr_atboot && started == 0) {
                dev_info(dev, "starting watchdog timer\n");
                s3c2410wdt_start(&wdt->wdt_device);
@@ -445,6 +600,9 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
 
        return 0;
 
+ err_unregister:
+       watchdog_unregister_device(&wdt->wdt_device);
+
  err_cpufreq:
        s3c2410wdt_cpufreq_deregister(wdt);
 
@@ -458,8 +616,13 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
 
 static int s3c2410wdt_remove(struct platform_device *dev)
 {
+       int ret;
        struct s3c2410_wdt *wdt = platform_get_drvdata(dev);
 
+       ret = s3c2410wdt_mask_and_disable_reset(wdt, true);
+       if (ret < 0)
+               return ret;
+
        watchdog_unregister_device(&wdt->wdt_device);
 
        s3c2410wdt_cpufreq_deregister(wdt);
@@ -474,6 +637,8 @@ static void s3c2410wdt_shutdown(struct platform_device *dev)
 {
        struct s3c2410_wdt *wdt = platform_get_drvdata(dev);
 
+       s3c2410wdt_mask_and_disable_reset(wdt, true);
+
        s3c2410wdt_stop(&wdt->wdt_device);
 }
 
@@ -481,12 +646,17 @@ static void s3c2410wdt_shutdown(struct platform_device *dev)
 
 static int s3c2410wdt_suspend(struct device *dev)
 {
+       int ret;
        struct s3c2410_wdt *wdt = dev_get_drvdata(dev);
 
        /* Save watchdog state, and turn it off. */
        wdt->wtcon_save = readl(wdt->reg_base + S3C2410_WTCON);
        wdt->wtdat_save = readl(wdt->reg_base + S3C2410_WTDAT);
 
+       ret = s3c2410wdt_mask_and_disable_reset(wdt, true);
+       if (ret < 0)
+               return ret;
+
        /* Note that WTCNT doesn't need to be saved. */
        s3c2410wdt_stop(&wdt->wdt_device);
 
@@ -495,6 +665,7 @@ static int s3c2410wdt_suspend(struct device *dev)
 
 static int s3c2410wdt_resume(struct device *dev)
 {
+       int ret;
        struct s3c2410_wdt *wdt = dev_get_drvdata(dev);
 
        /* Restore watchdog state. */
@@ -502,6 +673,10 @@ static int s3c2410wdt_resume(struct device *dev)
        writel(wdt->wtdat_save, wdt->reg_base + S3C2410_WTCNT);/* Reset count */
        writel(wdt->wtcon_save, wdt->reg_base + S3C2410_WTCON);
 
+       ret = s3c2410wdt_mask_and_disable_reset(wdt, false);
+       if (ret < 0)
+               return ret;
+
        dev_info(dev, "watchdog %sabled\n",
                (wdt->wtcon_save & S3C2410_WTCON_ENABLE) ? "en" : "dis");
 
@@ -512,18 +687,11 @@ static int s3c2410wdt_resume(struct device *dev)
 static SIMPLE_DEV_PM_OPS(s3c2410wdt_pm_ops, s3c2410wdt_suspend,
                        s3c2410wdt_resume);
 
-#ifdef CONFIG_OF
-static const struct of_device_id s3c2410_wdt_match[] = {
-       { .compatible = "samsung,s3c2410-wdt" },
-       {},
-};
-MODULE_DEVICE_TABLE(of, s3c2410_wdt_match);
-#endif
-
 static struct platform_driver s3c2410wdt_driver = {
        .probe          = s3c2410wdt_probe,
        .remove         = s3c2410wdt_remove,
        .shutdown       = s3c2410wdt_shutdown,
+       .id_table       = s3c2410_wdt_ids,
        .driver         = {
                .owner  = THIS_MODULE,
                .name   = "s3c2410-wdt",
@@ -538,4 +706,3 @@ MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>, "
              "Dimitry Andric <dimitry.andric@tomtom.com>");
 MODULE_DESCRIPTION("S3C2410 Watchdog Device Driver");
 MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:s3c2410-wdt");
index ced3edc95957b471419f069e838084428a67f26b..702d07870808311b36d859570cbf03c096026faa 100644 (file)
@@ -212,7 +212,7 @@ static struct platform_driver sirfsoc_wdt_driver = {
                .name = "sirfsoc-wdt",
                .owner = THIS_MODULE,
                .pm = &sirfsoc_wdt_pm_ops,
-               .of_match_table = of_match_ptr(sirfsoc_wdt_of_match),
+               .of_match_table = sirfsoc_wdt_of_match,
        },
        .probe = sirfsoc_wdt_probe,
        .remove = sirfsoc_wdt_remove,
index ce63a1bbf395fa94ea5833aa0eb98d84aaa5dc75..5cca9cddb87d188f19a33e8e3b6a2fc2851f8a36 100644 (file)
@@ -303,7 +303,7 @@ static struct miscdevice sp5100_tco_miscdev = {
  * register a pci_driver, because someone else might
  * want to register another driver on the same PCI id.
  */
-static DEFINE_PCI_DEVICE_TABLE(sp5100_tco_pci_tbl) = {
+static const struct pci_device_id sp5100_tco_pci_tbl[] = {
        { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS, PCI_ANY_ID,
          PCI_ANY_ID, },
        { 0, },                 /* End of list */
index 1a68f760cf866a1da43ce6d6a9978daf7ec0d736..d2cd9f0bcb9a2df915472446f295139f04fc0ede 100644 (file)
@@ -239,7 +239,7 @@ static void wdt_remove(struct pci_dev *pdev)
        pci_disable_device(pdev);
 }
 
-static DEFINE_PCI_DEVICE_TABLE(wdt_pci_table) = {
+static const struct pci_device_id wdt_pci_table[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_CX700) },
        { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VX800) },
        { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VX855) },
index e24b2108287483803c82709d0feec1dae8b29907..b1da0c18fd1ac4b9d02cdb9bf9245699a83d3e3c 100644 (file)
 #define WATCHDOG_NAME "w83627hf/thf/hg/dhg WDT"
 #define WATCHDOG_TIMEOUT 60            /* 60 sec default timeout */
 
-/* You must set this - there is no sane way to probe for this board. */
-static int wdt_io = 0x2E;
-module_param(wdt_io, int, 0);
-MODULE_PARM_DESC(wdt_io, "w83627hf/thf WDT io port (default 0x2E)");
+static int wdt_io;
+static int cr_wdt_timeout;     /* WDT timeout register */
+static int cr_wdt_control;     /* WDT control register */
+
+enum chips { w83627hf, w83627s, w83697hf, w83697ug, w83637hf, w83627thf,
+            w83687thf, w83627ehf, w83627dhg, w83627uhg, w83667hg, w83627dhg_p,
+            w83667hg_b, nct6775, nct6776, nct6779 };
 
 static int timeout;                    /* in seconds */
 module_param(timeout, int, 0);
@@ -72,6 +75,29 @@ MODULE_PARM_DESC(nowayout,
 
 #define W83627HF_LD_WDT                0x08
 
+#define W83627HF_ID            0x52
+#define W83627S_ID             0x59
+#define W83697HF_ID            0x60
+#define W83697UG_ID            0x68
+#define W83637HF_ID            0x70
+#define W83627THF_ID           0x82
+#define W83687THF_ID           0x85
+#define W83627EHF_ID           0x88
+#define W83627DHG_ID           0xa0
+#define W83627UHG_ID           0xa2
+#define W83667HG_ID            0xa5
+#define W83627DHG_P_ID         0xb0
+#define W83667HG_B_ID          0xb3
+#define NCT6775_ID             0xb4
+#define NCT6776_ID             0xc3
+#define NCT6779_ID             0xc5
+
+#define W83627HF_WDT_TIMEOUT   0xf6
+#define W83697HF_WDT_TIMEOUT   0xf4
+
+#define W83627HF_WDT_CONTROL   0xf5
+#define W83697HF_WDT_CONTROL   0xf3
+
 static void superio_outb(int reg, int val)
 {
        outb(reg, WDT_EFER);
@@ -106,10 +132,7 @@ static void superio_exit(void)
        release_region(wdt_io, 2);
 }
 
-/* tyan motherboards seem to set F5 to 0x4C ?
- * So explicitly init to appropriate value. */
-
-static int w83627hf_init(struct watchdog_device *wdog)
+static int w83627hf_init(struct watchdog_device *wdog, enum chips chip)
 {
        int ret;
        unsigned char t;
@@ -119,35 +142,83 @@ static int w83627hf_init(struct watchdog_device *wdog)
                return ret;
 
        superio_select(W83627HF_LD_WDT);
-       t = superio_inb(0x20);  /* check chip version   */
-       if (t == 0x82) {        /* W83627THF            */
-               t = (superio_inb(0x2b) & 0xf7);
-               superio_outb(0x2b, t | 0x04); /* set GPIO3 to WDT0 */
-       } else if (t == 0x88 || t == 0xa0) {    /* W83627EHF / W83627DHG */
-               t = superio_inb(0x2d);
-               superio_outb(0x2d, t & ~0x01);  /* set GPIO5 to WDT0 */
-       }
 
        /* set CR30 bit 0 to activate GPIO2 */
        t = superio_inb(0x30);
        if (!(t & 0x01))
                superio_outb(0x30, t | 0x01);
 
-       t = superio_inb(0xF6);
+       switch (chip) {
+       case w83627hf:
+       case w83627s:
+               t = superio_inb(0x2B) & ~0x10;
+               superio_outb(0x2B, t); /* set GPIO24 to WDT0 */
+               break;
+       case w83697hf:
+               /* Set pin 119 to WDTO# mode (= CR29, WDT0) */
+               t = superio_inb(0x29) & ~0x60;
+               t |= 0x20;
+               superio_outb(0x29, t);
+               break;
+       case w83697ug:
+               /* Set pin 118 to WDTO# mode */
+               t = superio_inb(0x2b) & ~0x04;
+               superio_outb(0x2b, t);
+               break;
+       case w83627thf:
+               t = (superio_inb(0x2B) & ~0x08) | 0x04;
+               superio_outb(0x2B, t); /* set GPIO3 to WDT0 */
+               break;
+       case w83627dhg:
+       case w83627dhg_p:
+               t = superio_inb(0x2D) & ~0x01; /* PIN77 -> WDT0# */
+               superio_outb(0x2D, t); /* set GPIO5 to WDT0 */
+               t = superio_inb(cr_wdt_control);
+               t |= 0x02;      /* enable the WDTO# output low pulse
+                                * to the KBRST# pin */
+               superio_outb(cr_wdt_control, t);
+               break;
+       case w83637hf:
+               break;
+       case w83687thf:
+               t = superio_inb(0x2C) & ~0x80; /* PIN47 -> WDT0# */
+               superio_outb(0x2C, t);
+               break;
+       case w83627ehf:
+       case w83627uhg:
+       case w83667hg:
+       case w83667hg_b:
+       case nct6775:
+       case nct6776:
+       case nct6779:
+               /*
+                * These chips have a fixed WDTO# output pin (W83627UHG),
+                * or support more than one WDTO# output pin.
+                * Don't touch its configuration, and hope the BIOS
+                * does the right thing.
+                */
+               t = superio_inb(cr_wdt_control);
+               t |= 0x02;      /* enable the WDTO# output low pulse
+                                * to the KBRST# pin */
+               superio_outb(cr_wdt_control, t);
+               break;
+       default:
+               break;
+       }
+
+       t = superio_inb(cr_wdt_timeout);
        if (t != 0) {
                pr_info("Watchdog already running. Resetting timeout to %d sec\n",
                        wdog->timeout);
-               superio_outb(0xF6, wdog->timeout);
+               superio_outb(cr_wdt_timeout, wdog->timeout);
        }
 
        /* set second mode & disable keyboard turning off watchdog */
-       t = superio_inb(0xF5) & ~0x0C;
-       /* enable the WDTO# output low pulse to the KBRST# pin */
-       t |= 0x02;
-       superio_outb(0xF5, t);
+       t = superio_inb(cr_wdt_control) & ~0x0C;
+       superio_outb(cr_wdt_control, t);
 
-       /* disable keyboard & mouse turning off watchdog */
-       t = superio_inb(0xF7) & ~0xC0;
+       /* reset trigger, disable keyboard & mouse turning off watchdog */
+       t = superio_inb(0xF7) & ~0xD0;
        superio_outb(0xF7, t);
 
        superio_exit();
@@ -164,7 +235,7 @@ static int wdt_set_time(unsigned int timeout)
                return ret;
 
        superio_select(W83627HF_LD_WDT);
-       superio_outb(0xF6, timeout);
+       superio_outb(cr_wdt_timeout, timeout);
        superio_exit();
 
        return 0;
@@ -197,7 +268,7 @@ static unsigned int wdt_get_time(struct watchdog_device *wdog)
                return 0;
 
        superio_select(W83627HF_LD_WDT);
-       timeleft = superio_inb(0xF6);
+       timeleft = superio_inb(cr_wdt_timeout);
        superio_exit();
 
        return timeleft;
@@ -249,16 +320,123 @@ static struct notifier_block wdt_notifier = {
        .notifier_call = wdt_notify_sys,
 };
 
+static int wdt_find(int addr)
+{
+       u8 val;
+       int ret;
+
+       cr_wdt_timeout = W83627HF_WDT_TIMEOUT;
+       cr_wdt_control = W83627HF_WDT_CONTROL;
+
+       ret = superio_enter();
+       if (ret)
+               return ret;
+       superio_select(W83627HF_LD_WDT);
+       val = superio_inb(0x20);
+       switch (val) {
+       case W83627HF_ID:
+               ret = w83627hf;
+               break;
+       case W83627S_ID:
+               ret = w83627s;
+               break;
+       case W83697HF_ID:
+               ret = w83697hf;
+               cr_wdt_timeout = W83697HF_WDT_TIMEOUT;
+               cr_wdt_control = W83697HF_WDT_CONTROL;
+               break;
+       case W83697UG_ID:
+               ret = w83697ug;
+               cr_wdt_timeout = W83697HF_WDT_TIMEOUT;
+               cr_wdt_control = W83697HF_WDT_CONTROL;
+               break;
+       case W83637HF_ID:
+               ret = w83637hf;
+               break;
+       case W83627THF_ID:
+               ret = w83627thf;
+               break;
+       case W83687THF_ID:
+               ret = w83687thf;
+               break;
+       case W83627EHF_ID:
+               ret = w83627ehf;
+               break;
+       case W83627DHG_ID:
+               ret = w83627dhg;
+               break;
+       case W83627DHG_P_ID:
+               ret = w83627dhg_p;
+               break;
+       case W83627UHG_ID:
+               ret = w83627uhg;
+               break;
+       case W83667HG_ID:
+               ret = w83667hg;
+               break;
+       case W83667HG_B_ID:
+               ret = w83667hg_b;
+               break;
+       case NCT6775_ID:
+               ret = nct6775;
+               break;
+       case NCT6776_ID:
+               ret = nct6776;
+               break;
+       case NCT6779_ID:
+               ret = nct6779;
+               break;
+       case 0xff:
+               ret = -ENODEV;
+               break;
+       default:
+               ret = -ENODEV;
+               pr_err("Unsupported chip ID: 0x%02x\n", val);
+               break;
+       }
+       superio_exit();
+       return ret;
+}
+
 static int __init wdt_init(void)
 {
        int ret;
+       int chip;
+       const char * const chip_name[] = {
+               "W83627HF",
+               "W83627S",
+               "W83697HF",
+               "W83697UG",
+               "W83637HF",
+               "W83627THF",
+               "W83687THF",
+               "W83627EHF",
+               "W83627DHG",
+               "W83627UHG",
+               "W83667HG",
+               "W83667DHG-P",
+               "W83667HG-B",
+               "NCT6775",
+               "NCT6776",
+               "NCT6779",
+       };
+
+       wdt_io = 0x2e;
+       chip = wdt_find(0x2e);
+       if (chip < 0) {
+               wdt_io = 0x4e;
+               chip = wdt_find(0x4e);
+               if (chip < 0)
+                       return chip;
+       }
 
-       pr_info("WDT driver for the Winbond(TM) W83627HF/THF/HG/DHG Super I/O chip initialising\n");
+       pr_info("WDT driver for %s Super I/O chip initialising\n",
+               chip_name[chip]);
 
        watchdog_init_timeout(&wdt_dev, timeout, NULL);
        watchdog_set_nowayout(&wdt_dev, nowayout);
 
-       ret = w83627hf_init(&wdt_dev);
+       ret = w83627hf_init(&wdt_dev, chip);
        if (ret) {
                pr_err("failed to initialize watchdog (err=%d)\n", ret);
                return ret;
index 461336c4519faf3acee6dad759030a6f0778876d..cec9b559647dee675439058efb07138fe866edb7 100644 (file)
@@ -78,7 +78,7 @@ int watchdog_init_timeout(struct watchdog_device *wdd,
        watchdog_check_min_max_timeout(wdd);
 
        /* try to get the timeout module parameter first */
-       if (!watchdog_timeout_invalid(wdd, timeout_parm)) {
+       if (!watchdog_timeout_invalid(wdd, timeout_parm) && timeout_parm) {
                wdd->timeout = timeout_parm;
                return ret;
        }
@@ -89,7 +89,7 @@ int watchdog_init_timeout(struct watchdog_device *wdd,
        if (dev == NULL || dev->of_node == NULL)
                return ret;
        of_property_read_u32(dev->of_node, "timeout-sec", &t);
-       if (!watchdog_timeout_invalid(wdd, t))
+       if (!watchdog_timeout_invalid(wdd, t) && t)
                wdd->timeout = t;
        else
                ret = -EINVAL;
index ee89ba4dea63c932ecad137b49712191b9a9b799..3dc578e7121110bea28748b16c7a44b6817a566f 100644 (file)
@@ -720,7 +720,7 @@ static void wdtpci_remove_one(struct pci_dev *pdev)
 }
 
 
-static DEFINE_PCI_DEVICE_TABLE(wdtpci_pci_tbl) = {
+static const struct pci_device_id wdtpci_pci_tbl[] = {
        {
                .vendor    = PCI_VENDOR_ID_ACCESSIO,
                .device    = PCI_DEVICE_ID_ACCESSIO_WDG_CSM,
index fc60b31453eefbbdcc234c7df78c5504da655980..0bad24ddc2e7a39abf29547f24173cc050017d15 100644 (file)
@@ -134,8 +134,7 @@ int bio_integrity_add_page(struct bio *bio, struct page *page,
                return 0;
        }
 
-       iv = bip_vec_idx(bip, bip->bip_vcnt);
-       BUG_ON(iv == NULL);
+       iv = bip->bip_vec + bip->bip_vcnt;
 
        iv->bv_page = page;
        iv->bv_len = len;
@@ -203,6 +202,12 @@ static inline unsigned int bio_integrity_hw_sectors(struct blk_integrity *bi,
        return sectors;
 }
 
+static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
+                                              unsigned int sectors)
+{
+       return bio_integrity_hw_sectors(bi, sectors) * bi->tuple_size;
+}
+
 /**
  * bio_integrity_tag_size - Retrieve integrity tag space
  * @bio:       bio to inspect
@@ -215,9 +220,9 @@ unsigned int bio_integrity_tag_size(struct bio *bio)
 {
        struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
 
-       BUG_ON(bio->bi_size == 0);
+       BUG_ON(bio->bi_iter.bi_size == 0);
 
-       return bi->tag_size * (bio->bi_size / bi->sector_size);
+       return bi->tag_size * (bio->bi_iter.bi_size / bi->sector_size);
 }
 EXPORT_SYMBOL(bio_integrity_tag_size);
 
@@ -235,9 +240,9 @@ int bio_integrity_tag(struct bio *bio, void *tag_buf, unsigned int len, int set)
        nr_sectors = bio_integrity_hw_sectors(bi,
                                        DIV_ROUND_UP(len, bi->tag_size));
 
-       if (nr_sectors * bi->tuple_size > bip->bip_size) {
-               printk(KERN_ERR "%s: tag too big for bio: %u > %u\n",
-                      __func__, nr_sectors * bi->tuple_size, bip->bip_size);
+       if (nr_sectors * bi->tuple_size > bip->bip_iter.bi_size) {
+               printk(KERN_ERR "%s: tag too big for bio: %u > %u\n", __func__,
+                      nr_sectors * bi->tuple_size, bip->bip_iter.bi_size);
                return -1;
        }
 
@@ -299,29 +304,30 @@ static void bio_integrity_generate(struct bio *bio)
 {
        struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
        struct blk_integrity_exchg bix;
-       struct bio_vec *bv;
-       sector_t sector = bio->bi_sector;
-       unsigned int i, sectors, total;
+       struct bio_vec bv;
+       struct bvec_iter iter;
+       sector_t sector = bio->bi_iter.bi_sector;
+       unsigned int sectors, total;
        void *prot_buf = bio->bi_integrity->bip_buf;
 
        total = 0;
        bix.disk_name = bio->bi_bdev->bd_disk->disk_name;
        bix.sector_size = bi->sector_size;
 
-       bio_for_each_segment(bv, bio, i) {
-               void *kaddr = kmap_atomic(bv->bv_page);
-               bix.data_buf = kaddr + bv->bv_offset;
-               bix.data_size = bv->bv_len;
+       bio_for_each_segment(bv, bio, iter) {
+               void *kaddr = kmap_atomic(bv.bv_page);
+               bix.data_buf = kaddr + bv.bv_offset;
+               bix.data_size = bv.bv_len;
                bix.prot_buf = prot_buf;
                bix.sector = sector;
 
                bi->generate_fn(&bix);
 
-               sectors = bv->bv_len / bi->sector_size;
+               sectors = bv.bv_len / bi->sector_size;
                sector += sectors;
                prot_buf += sectors * bi->tuple_size;
                total += sectors * bi->tuple_size;
-               BUG_ON(total > bio->bi_integrity->bip_size);
+               BUG_ON(total > bio->bi_integrity->bip_iter.bi_size);
 
                kunmap_atomic(kaddr);
        }
@@ -386,8 +392,8 @@ int bio_integrity_prep(struct bio *bio)
 
        bip->bip_owns_buf = 1;
        bip->bip_buf = buf;
-       bip->bip_size = len;
-       bip->bip_sector = bio->bi_sector;
+       bip->bip_iter.bi_size = len;
+       bip->bip_iter.bi_sector = bio->bi_iter.bi_sector;
 
        /* Map it */
        offset = offset_in_page(buf);
@@ -442,16 +448,18 @@ static int bio_integrity_verify(struct bio *bio)
        struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
        struct blk_integrity_exchg bix;
        struct bio_vec *bv;
-       sector_t sector = bio->bi_integrity->bip_sector;
-       unsigned int i, sectors, total, ret;
+       sector_t sector = bio->bi_integrity->bip_iter.bi_sector;
+       unsigned int sectors, total, ret;
        void *prot_buf = bio->bi_integrity->bip_buf;
+       int i;
 
        ret = total = 0;
        bix.disk_name = bio->bi_bdev->bd_disk->disk_name;
        bix.sector_size = bi->sector_size;
 
-       bio_for_each_segment(bv, bio, i) {
+       bio_for_each_segment_all(bv, bio, i) {
                void *kaddr = kmap_atomic(bv->bv_page);
+
                bix.data_buf = kaddr + bv->bv_offset;
                bix.data_size = bv->bv_len;
                bix.prot_buf = prot_buf;
@@ -468,7 +476,7 @@ static int bio_integrity_verify(struct bio *bio)
                sector += sectors;
                prot_buf += sectors * bi->tuple_size;
                total += sectors * bi->tuple_size;
-               BUG_ON(total > bio->bi_integrity->bip_size);
+               BUG_ON(total > bio->bi_integrity->bip_iter.bi_size);
 
                kunmap_atomic(kaddr);
        }
@@ -495,7 +503,7 @@ static void bio_integrity_verify_fn(struct work_struct *work)
 
        /* Restore original bio completion handler */
        bio->bi_end_io = bip->bip_end_io;
-       bio_endio(bio, error);
+       bio_endio_nodec(bio, error);
 }
 
 /**
@@ -532,56 +540,6 @@ void bio_integrity_endio(struct bio *bio, int error)
 }
 EXPORT_SYMBOL(bio_integrity_endio);
 
-/**
- * bio_integrity_mark_head - Advance bip_vec skip bytes
- * @bip:       Integrity vector to advance
- * @skip:      Number of bytes to advance it
- */
-void bio_integrity_mark_head(struct bio_integrity_payload *bip,
-                            unsigned int skip)
-{
-       struct bio_vec *iv;
-       unsigned int i;
-
-       bip_for_each_vec(iv, bip, i) {
-               if (skip == 0) {
-                       bip->bip_idx = i;
-                       return;
-               } else if (skip >= iv->bv_len) {
-                       skip -= iv->bv_len;
-               } else { /* skip < iv->bv_len) */
-                       iv->bv_offset += skip;
-                       iv->bv_len -= skip;
-                       bip->bip_idx = i;
-                       return;
-               }
-       }
-}
-
-/**
- * bio_integrity_mark_tail - Truncate bip_vec to be len bytes long
- * @bip:       Integrity vector to truncate
- * @len:       New length of integrity vector
- */
-void bio_integrity_mark_tail(struct bio_integrity_payload *bip,
-                            unsigned int len)
-{
-       struct bio_vec *iv;
-       unsigned int i;
-
-       bip_for_each_vec(iv, bip, i) {
-               if (len == 0) {
-                       bip->bip_vcnt = i;
-                       return;
-               } else if (len >= iv->bv_len) {
-                       len -= iv->bv_len;
-               } else { /* len < iv->bv_len) */
-                       iv->bv_len = len;
-                       len = 0;
-               }
-       }
-}
-
 /**
  * bio_integrity_advance - Advance integrity vector
  * @bio:       bio whose integrity vector to update
@@ -595,13 +553,9 @@ void bio_integrity_advance(struct bio *bio, unsigned int bytes_done)
 {
        struct bio_integrity_payload *bip = bio->bi_integrity;
        struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
-       unsigned int nr_sectors;
+       unsigned bytes = bio_integrity_bytes(bi, bytes_done >> 9);
 
-       BUG_ON(bip == NULL);
-       BUG_ON(bi == NULL);
-
-       nr_sectors = bio_integrity_hw_sectors(bi, bytes_done >> 9);
-       bio_integrity_mark_head(bip, nr_sectors * bi->tuple_size);
+       bvec_iter_advance(bip->bip_vec, &bip->bip_iter, bytes);
 }
 EXPORT_SYMBOL(bio_integrity_advance);
 
@@ -621,63 +575,12 @@ void bio_integrity_trim(struct bio *bio, unsigned int offset,
 {
        struct bio_integrity_payload *bip = bio->bi_integrity;
        struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
-       unsigned int nr_sectors;
-
-       BUG_ON(bip == NULL);
-       BUG_ON(bi == NULL);
-       BUG_ON(!bio_flagged(bio, BIO_CLONED));
 
-       nr_sectors = bio_integrity_hw_sectors(bi, sectors);
-       bip->bip_sector = bip->bip_sector + offset;
-       bio_integrity_mark_head(bip, offset * bi->tuple_size);
-       bio_integrity_mark_tail(bip, sectors * bi->tuple_size);
+       bio_integrity_advance(bio, offset << 9);
+       bip->bip_iter.bi_size = bio_integrity_bytes(bi, sectors);
 }
 EXPORT_SYMBOL(bio_integrity_trim);
 
-/**
- * bio_integrity_split - Split integrity metadata
- * @bio:       Protected bio
- * @bp:                Resulting bio_pair
- * @sectors:   Offset
- *
- * Description: Splits an integrity page into a bio_pair.
- */
-void bio_integrity_split(struct bio *bio, struct bio_pair *bp, int sectors)
-{
-       struct blk_integrity *bi;
-       struct bio_integrity_payload *bip = bio->bi_integrity;
-       unsigned int nr_sectors;
-
-       if (bio_integrity(bio) == 0)
-               return;
-
-       bi = bdev_get_integrity(bio->bi_bdev);
-       BUG_ON(bi == NULL);
-       BUG_ON(bip->bip_vcnt != 1);
-
-       nr_sectors = bio_integrity_hw_sectors(bi, sectors);
-
-       bp->bio1.bi_integrity = &bp->bip1;
-       bp->bio2.bi_integrity = &bp->bip2;
-
-       bp->iv1 = bip->bip_vec[bip->bip_idx];
-       bp->iv2 = bip->bip_vec[bip->bip_idx];
-
-       bp->bip1.bip_vec = &bp->iv1;
-       bp->bip2.bip_vec = &bp->iv2;
-
-       bp->iv1.bv_len = sectors * bi->tuple_size;
-       bp->iv2.bv_offset += sectors * bi->tuple_size;
-       bp->iv2.bv_len -= sectors * bi->tuple_size;
-
-       bp->bip1.bip_sector = bio->bi_integrity->bip_sector;
-       bp->bip2.bip_sector = bio->bi_integrity->bip_sector + nr_sectors;
-
-       bp->bip1.bip_vcnt = bp->bip2.bip_vcnt = 1;
-       bp->bip1.bip_idx = bp->bip2.bip_idx = 0;
-}
-EXPORT_SYMBOL(bio_integrity_split);
-
 /**
  * bio_integrity_clone - Callback for cloning bios with integrity metadata
  * @bio:       New bio
@@ -702,9 +605,8 @@ int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
        memcpy(bip->bip_vec, bip_src->bip_vec,
               bip_src->bip_vcnt * sizeof(struct bio_vec));
 
-       bip->bip_sector = bip_src->bip_sector;
        bip->bip_vcnt = bip_src->bip_vcnt;
-       bip->bip_idx = bip_src->bip_idx;
+       bip->bip_iter = bip_src->bip_iter;
 
        return 0;
 }
index 33d79a4eb92d6e623aa90e2291af39b2b2689d83..75c49a38223969c1f7256868cb3b09fc7d3bd286 100644 (file)
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -38,8 +38,6 @@
  */
 #define BIO_INLINE_VECS                4
 
-static mempool_t *bio_split_pool __read_mostly;
-
 /*
  * if you change this list, also change bvec_alloc or things will
  * break badly! cannot be bigger than what you can fit into an
@@ -273,6 +271,7 @@ void bio_init(struct bio *bio)
 {
        memset(bio, 0, sizeof(*bio));
        bio->bi_flags = 1 << BIO_UPTODATE;
+       atomic_set(&bio->bi_remaining, 1);
        atomic_set(&bio->bi_cnt, 1);
 }
 EXPORT_SYMBOL(bio_init);
@@ -295,9 +294,35 @@ void bio_reset(struct bio *bio)
 
        memset(bio, 0, BIO_RESET_BYTES);
        bio->bi_flags = flags|(1 << BIO_UPTODATE);
+       atomic_set(&bio->bi_remaining, 1);
 }
 EXPORT_SYMBOL(bio_reset);
 
+static void bio_chain_endio(struct bio *bio, int error)
+{
+       bio_endio(bio->bi_private, error);
+       bio_put(bio);
+}
+
+/**
+ * bio_chain - chain bio completions
+ *
+ * The caller won't have a bi_end_io called when @bio completes - instead,
+ * @parent's bi_end_io won't be called until both @parent and @bio have
+ * completed; the chained bio will also be freed when it completes.
+ *
+ * The caller must not set bi_private or bi_end_io in @bio.
+ */
+void bio_chain(struct bio *bio, struct bio *parent)
+{
+       BUG_ON(bio->bi_private || bio->bi_end_io);
+
+       bio->bi_private = parent;
+       bio->bi_end_io  = bio_chain_endio;
+       atomic_inc(&parent->bi_remaining);
+}
+EXPORT_SYMBOL(bio_chain);
+
 static void bio_alloc_rescue(struct work_struct *work)
 {
        struct bio_set *bs = container_of(work, struct bio_set, rescue_work);
@@ -473,13 +498,13 @@ EXPORT_SYMBOL(bio_alloc_bioset);
 void zero_fill_bio(struct bio *bio)
 {
        unsigned long flags;
-       struct bio_vec *bv;
-       int i;
+       struct bio_vec bv;
+       struct bvec_iter iter;
 
-       bio_for_each_segment(bv, bio, i) {
-               char *data = bvec_kmap_irq(bv, &flags);
-               memset(data, 0, bv->bv_len);
-               flush_dcache_page(bv->bv_page);
+       bio_for_each_segment(bv, bio, iter) {
+               char *data = bvec_kmap_irq(&bv, &flags);
+               memset(data, 0, bv.bv_len);
+               flush_dcache_page(bv.bv_page);
                bvec_kunmap_irq(data, &flags);
        }
 }
@@ -515,51 +540,49 @@ inline int bio_phys_segments(struct request_queue *q, struct bio *bio)
 EXPORT_SYMBOL(bio_phys_segments);
 
 /**
- *     __bio_clone     -       clone a bio
+ *     __bio_clone_fast - clone a bio that shares the original bio's biovec
  *     @bio: destination bio
  *     @bio_src: bio to clone
  *
  *     Clone a &bio. Caller will own the returned bio, but not
  *     the actual data it points to. Reference count of returned
  *     bio will be one.
+ *
+ *     Caller must ensure that @bio_src is not freed before @bio.
  */
-void __bio_clone(struct bio *bio, struct bio *bio_src)
+void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
 {
-       memcpy(bio->bi_io_vec, bio_src->bi_io_vec,
-               bio_src->bi_max_vecs * sizeof(struct bio_vec));
+       BUG_ON(bio->bi_pool && BIO_POOL_IDX(bio) != BIO_POOL_NONE);
 
        /*
         * most users will be overriding ->bi_bdev with a new target,
         * so we don't set nor calculate new physical/hw segment counts here
         */
-       bio->bi_sector = bio_src->bi_sector;
        bio->bi_bdev = bio_src->bi_bdev;
        bio->bi_flags |= 1 << BIO_CLONED;
        bio->bi_rw = bio_src->bi_rw;
-       bio->bi_vcnt = bio_src->bi_vcnt;
-       bio->bi_size = bio_src->bi_size;
-       bio->bi_idx = bio_src->bi_idx;
+       bio->bi_iter = bio_src->bi_iter;
+       bio->bi_io_vec = bio_src->bi_io_vec;
 }
-EXPORT_SYMBOL(__bio_clone);
+EXPORT_SYMBOL(__bio_clone_fast);
 
 /**
- *     bio_clone_bioset -      clone a bio
+ *     bio_clone_fast - clone a bio that shares the original bio's biovec
  *     @bio: bio to clone
  *     @gfp_mask: allocation priority
  *     @bs: bio_set to allocate from
  *
- *     Like __bio_clone, only also allocates the returned bio
+ *     Like __bio_clone_fast, only also allocates the returned bio
  */
-struct bio *bio_clone_bioset(struct bio *bio, gfp_t gfp_mask,
-                            struct bio_set *bs)
+struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs)
 {
        struct bio *b;
 
-       b = bio_alloc_bioset(gfp_mask, bio->bi_max_vecs, bs);
+       b = bio_alloc_bioset(gfp_mask, 0, bs);
        if (!b)
                return NULL;
 
-       __bio_clone(b, bio);
+       __bio_clone_fast(b, bio);
 
        if (bio_integrity(bio)) {
                int ret;
@@ -574,6 +597,74 @@ struct bio *bio_clone_bioset(struct bio *bio, gfp_t gfp_mask,
 
        return b;
 }
+EXPORT_SYMBOL(bio_clone_fast);
+
+/**
+ *     bio_clone_bioset - clone a bio
+ *     @bio_src: bio to clone
+ *     @gfp_mask: allocation priority
+ *     @bs: bio_set to allocate from
+ *
+ *     Clone bio. Caller will own the returned bio, but not the actual data it
+ *     points to. Reference count of returned bio will be one.
+ */
+struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask,
+                            struct bio_set *bs)
+{
+       unsigned nr_iovecs = 0;
+       struct bvec_iter iter;
+       struct bio_vec bv;
+       struct bio *bio;
+
+       /*
+        * Pre immutable biovecs, __bio_clone() used to just do a memcpy from
+        * bio_src->bi_io_vec to bio->bi_io_vec.
+        *
+        * We can't do that anymore, because:
+        *
+        *  - The point of cloning the biovec is to produce a bio with a biovec
+        *    the caller can modify: bi_idx and bi_bvec_done should be 0.
+        *
+        *  - The original bio could've had more than BIO_MAX_PAGES biovecs; if
+        *    we tried to clone the whole thing bio_alloc_bioset() would fail.
+        *    But the clone should succeed as long as the number of biovecs we
+        *    actually need to allocate is fewer than BIO_MAX_PAGES.
+        *
+        *  - Lastly, bi_vcnt should not be looked at or relied upon by code
+        *    that does not own the bio - reason being drivers don't use it for
+        *    iterating over the biovec anymore, so expecting it to be kept up
+        *    to date (i.e. for clones that share the parent biovec) is just
+        *    asking for trouble and would force extra work on
+        *    __bio_clone_fast() anyways.
+        */
+
+       bio_for_each_segment(bv, bio_src, iter)
+               nr_iovecs++;
+
+       bio = bio_alloc_bioset(gfp_mask, nr_iovecs, bs);
+       if (!bio)
+               return NULL;
+
+       bio->bi_bdev            = bio_src->bi_bdev;
+       bio->bi_rw              = bio_src->bi_rw;
+       bio->bi_iter.bi_sector  = bio_src->bi_iter.bi_sector;
+       bio->bi_iter.bi_size    = bio_src->bi_iter.bi_size;
+
+       bio_for_each_segment(bv, bio_src, iter)
+               bio->bi_io_vec[bio->bi_vcnt++] = bv;
+
+       if (bio_integrity(bio_src)) {
+               int ret;
+
+               ret = bio_integrity_clone(bio, bio_src, gfp_mask);
+               if (ret < 0) {
+                       bio_put(bio);
+                       return NULL;
+               }
+       }
+
+       return bio;
+}
 EXPORT_SYMBOL(bio_clone_bioset);
 
 /**
@@ -612,7 +703,7 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
        if (unlikely(bio_flagged(bio, BIO_CLONED)))
                return 0;
 
-       if (((bio->bi_size + len) >> 9) > max_sectors)
+       if (((bio->bi_iter.bi_size + len) >> 9) > max_sectors)
                return 0;
 
        /*
@@ -635,8 +726,9 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
                                           simulate merging updated prev_bvec
                                           as new bvec. */
                                        .bi_bdev = bio->bi_bdev,
-                                       .bi_sector = bio->bi_sector,
-                                       .bi_size = bio->bi_size - prev_bv_len,
+                                       .bi_sector = bio->bi_iter.bi_sector,
+                                       .bi_size = bio->bi_iter.bi_size -
+                                               prev_bv_len,
                                        .bi_rw = bio->bi_rw,
                                };
 
@@ -684,8 +776,8 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
        if (q->merge_bvec_fn) {
                struct bvec_merge_data bvm = {
                        .bi_bdev = bio->bi_bdev,
-                       .bi_sector = bio->bi_sector,
-                       .bi_size = bio->bi_size,
+                       .bi_sector = bio->bi_iter.bi_sector,
+                       .bi_size = bio->bi_iter.bi_size,
                        .bi_rw = bio->bi_rw,
                };
 
@@ -708,7 +800,7 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
        bio->bi_vcnt++;
        bio->bi_phys_segments++;
  done:
-       bio->bi_size += len;
+       bio->bi_iter.bi_size += len;
        return len;
 }
 
@@ -807,28 +899,7 @@ void bio_advance(struct bio *bio, unsigned bytes)
        if (bio_integrity(bio))
                bio_integrity_advance(bio, bytes);
 
-       bio->bi_sector += bytes >> 9;
-       bio->bi_size -= bytes;
-
-       if (bio->bi_rw & BIO_NO_ADVANCE_ITER_MASK)
-               return;
-
-       while (bytes) {
-               if (unlikely(bio->bi_idx >= bio->bi_vcnt)) {
-                       WARN_ONCE(1, "bio idx %d >= vcnt %d\n",
-                                 bio->bi_idx, bio->bi_vcnt);
-                       break;
-               }
-
-               if (bytes >= bio_iovec(bio)->bv_len) {
-                       bytes -= bio_iovec(bio)->bv_len;
-                       bio->bi_idx++;
-               } else {
-                       bio_iovec(bio)->bv_len -= bytes;
-                       bio_iovec(bio)->bv_offset += bytes;
-                       bytes = 0;
-               }
-       }
+       bio_advance_iter(bio, &bio->bi_iter, bytes);
 }
 EXPORT_SYMBOL(bio_advance);
 
@@ -874,117 +945,80 @@ EXPORT_SYMBOL(bio_alloc_pages);
  */
 void bio_copy_data(struct bio *dst, struct bio *src)
 {
-       struct bio_vec *src_bv, *dst_bv;
-       unsigned src_offset, dst_offset, bytes;
+       struct bvec_iter src_iter, dst_iter;
+       struct bio_vec src_bv, dst_bv;
        void *src_p, *dst_p;
+       unsigned bytes;
 
-       src_bv = bio_iovec(src);
-       dst_bv = bio_iovec(dst);
-
-       src_offset = src_bv->bv_offset;
-       dst_offset = dst_bv->bv_offset;
+       src_iter = src->bi_iter;
+       dst_iter = dst->bi_iter;
 
        while (1) {
-               if (src_offset == src_bv->bv_offset + src_bv->bv_len) {
-                       src_bv++;
-                       if (src_bv == bio_iovec_idx(src, src->bi_vcnt)) {
-                               src = src->bi_next;
-                               if (!src)
-                                       break;
-
-                               src_bv = bio_iovec(src);
-                       }
+               if (!src_iter.bi_size) {
+                       src = src->bi_next;
+                       if (!src)
+                               break;
 
-                       src_offset = src_bv->bv_offset;
+                       src_iter = src->bi_iter;
                }
 
-               if (dst_offset == dst_bv->bv_offset + dst_bv->bv_len) {
-                       dst_bv++;
-                       if (dst_bv == bio_iovec_idx(dst, dst->bi_vcnt)) {
-                               dst = dst->bi_next;
-                               if (!dst)
-                                       break;
-
-                               dst_bv = bio_iovec(dst);
-                       }
+               if (!dst_iter.bi_size) {
+                       dst = dst->bi_next;
+                       if (!dst)
+                               break;
 
-                       dst_offset = dst_bv->bv_offset;
+                       dst_iter = dst->bi_iter;
                }
 
-               bytes = min(dst_bv->bv_offset + dst_bv->bv_len - dst_offset,
-                           src_bv->bv_offset + src_bv->bv_len - src_offset);
+               src_bv = bio_iter_iovec(src, src_iter);
+               dst_bv = bio_iter_iovec(dst, dst_iter);
+
+               bytes = min(src_bv.bv_len, dst_bv.bv_len);
 
-               src_p = kmap_atomic(src_bv->bv_page);
-               dst_p = kmap_atomic(dst_bv->bv_page);
+               src_p = kmap_atomic(src_bv.bv_page);
+               dst_p = kmap_atomic(dst_bv.bv_page);
 
-               memcpy(dst_p + dst_offset,
-                      src_p + src_offset,
+               memcpy(dst_p + dst_bv.bv_offset,
+                      src_p + src_bv.bv_offset,
                       bytes);
 
                kunmap_atomic(dst_p);
                kunmap_atomic(src_p);
 
-               src_offset += bytes;
-               dst_offset += bytes;
+               bio_advance_iter(src, &src_iter, bytes);
+               bio_advance_iter(dst, &dst_iter, bytes);
        }
 }
 EXPORT_SYMBOL(bio_copy_data);
 
 struct bio_map_data {
-       struct bio_vec *iovecs;
-       struct sg_iovec *sgvecs;
        int nr_sgvecs;
        int is_our_pages;
+       struct sg_iovec sgvecs[];
 };
 
 static void bio_set_map_data(struct bio_map_data *bmd, struct bio *bio,
                             struct sg_iovec *iov, int iov_count,
                             int is_our_pages)
 {
-       memcpy(bmd->iovecs, bio->bi_io_vec, sizeof(struct bio_vec) * bio->bi_vcnt);
        memcpy(bmd->sgvecs, iov, sizeof(struct sg_iovec) * iov_count);
        bmd->nr_sgvecs = iov_count;
        bmd->is_our_pages = is_our_pages;
        bio->bi_private = bmd;
 }
 
-static void bio_free_map_data(struct bio_map_data *bmd)
-{
-       kfree(bmd->iovecs);
-       kfree(bmd->sgvecs);
-       kfree(bmd);
-}
-
 static struct bio_map_data *bio_alloc_map_data(int nr_segs,
                                               unsigned int iov_count,
                                               gfp_t gfp_mask)
 {
-       struct bio_map_data *bmd;
-
        if (iov_count > UIO_MAXIOV)
                return NULL;
 
-       bmd = kmalloc(sizeof(*bmd), gfp_mask);
-       if (!bmd)
-               return NULL;
-
-       bmd->iovecs = kmalloc(sizeof(struct bio_vec) * nr_segs, gfp_mask);
-       if (!bmd->iovecs) {
-               kfree(bmd);
-               return NULL;
-       }
-
-       bmd->sgvecs = kmalloc(sizeof(struct sg_iovec) * iov_count, gfp_mask);
-       if (bmd->sgvecs)
-               return bmd;
-
-       kfree(bmd->iovecs);
-       kfree(bmd);
-       return NULL;
+       return kmalloc(sizeof(struct bio_map_data) +
+                      sizeof(struct sg_iovec) * iov_count, gfp_mask);
 }
 
-static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs,
-                         struct sg_iovec *iov, int iov_count,
+static int __bio_copy_iov(struct bio *bio, struct sg_iovec *iov, int iov_count,
                          int to_user, int from_user, int do_free_page)
 {
        int ret = 0, i;
@@ -994,7 +1028,7 @@ static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs,
 
        bio_for_each_segment_all(bvec, bio, i) {
                char *bv_addr = page_address(bvec->bv_page);
-               unsigned int bv_len = iovecs[i].bv_len;
+               unsigned int bv_len = bvec->bv_len;
 
                while (bv_len && iov_idx < iov_count) {
                        unsigned int bytes;
@@ -1054,14 +1088,14 @@ int bio_uncopy_user(struct bio *bio)
                 * don't copy into a random user address space, just free.
                 */
                if (current->mm)
-                       ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs,
-                                            bmd->nr_sgvecs, bio_data_dir(bio) == READ,
+                       ret = __bio_copy_iov(bio, bmd->sgvecs, bmd->nr_sgvecs,
+                                            bio_data_dir(bio) == READ,
                                             0, bmd->is_our_pages);
                else if (bmd->is_our_pages)
                        bio_for_each_segment_all(bvec, bio, i)
                                __free_page(bvec->bv_page);
        }
-       bio_free_map_data(bmd);
+       kfree(bmd);
        bio_put(bio);
        return ret;
 }
@@ -1175,7 +1209,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
         */
        if ((!write_to_vm && (!map_data || !map_data->null_mapped)) ||
            (map_data && map_data->from_user)) {
-               ret = __bio_copy_iov(bio, bio->bi_io_vec, iov, iov_count, 0, 1, 0);
+               ret = __bio_copy_iov(bio, iov, iov_count, 0, 1, 0);
                if (ret)
                        goto cleanup;
        }
@@ -1189,7 +1223,7 @@ cleanup:
 
        bio_put(bio);
 out_bmd:
-       bio_free_map_data(bmd);
+       kfree(bmd);
        return ERR_PTR(ret);
 }
 
@@ -1485,7 +1519,7 @@ struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len,
        if (IS_ERR(bio))
                return bio;
 
-       if (bio->bi_size == len)
+       if (bio->bi_iter.bi_size == len)
                return bio;
 
        /*
@@ -1506,16 +1540,15 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
 
        bio_for_each_segment_all(bvec, bio, i) {
                char *addr = page_address(bvec->bv_page);
-               int len = bmd->iovecs[i].bv_len;
 
                if (read)
-                       memcpy(p, addr, len);
+                       memcpy(p, addr, bvec->bv_len);
 
                __free_page(bvec->bv_page);
-               p += len;
+               p += bvec->bv_len;
        }
 
-       bio_free_map_data(bmd);
+       kfree(bmd);
        bio_put(bio);
 }
 
@@ -1686,11 +1719,11 @@ void bio_check_pages_dirty(struct bio *bio)
 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
 void bio_flush_dcache_pages(struct bio *bi)
 {
-       int i;
-       struct bio_vec *bvec;
+       struct bio_vec bvec;
+       struct bvec_iter iter;
 
-       bio_for_each_segment(bvec, bi, i)
-               flush_dcache_page(bvec->bv_page);
+       bio_for_each_segment(bvec, bi, iter)
+               flush_dcache_page(bvec.bv_page);
 }
 EXPORT_SYMBOL(bio_flush_dcache_pages);
 #endif
@@ -1711,96 +1744,86 @@ EXPORT_SYMBOL(bio_flush_dcache_pages);
  **/
 void bio_endio(struct bio *bio, int error)
 {
-       if (error)
-               clear_bit(BIO_UPTODATE, &bio->bi_flags);
-       else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
-               error = -EIO;
+       while (bio) {
+               BUG_ON(atomic_read(&bio->bi_remaining) <= 0);
 
-       if (bio->bi_end_io)
-               bio->bi_end_io(bio, error);
-}
-EXPORT_SYMBOL(bio_endio);
+               if (error)
+                       clear_bit(BIO_UPTODATE, &bio->bi_flags);
+               else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
+                       error = -EIO;
 
-void bio_pair_release(struct bio_pair *bp)
-{
-       if (atomic_dec_and_test(&bp->cnt)) {
-               struct bio *master = bp->bio1.bi_private;
+               if (!atomic_dec_and_test(&bio->bi_remaining))
+                       return;
 
-               bio_endio(master, bp->error);
-               mempool_free(bp, bp->bio2.bi_private);
+               /*
+                * Need to have a real endio function for chained bios,
+                * otherwise various corner cases will break (like stacking
+                * block devices that save/restore bi_end_io) - however, we want
+                * to avoid unbounded recursion and blowing the stack. Tail call
+                * optimization would handle this, but compiling with frame
+                * pointers also disables gcc's sibling call optimization.
+                */
+               if (bio->bi_end_io == bio_chain_endio) {
+                       struct bio *parent = bio->bi_private;
+                       bio_put(bio);
+                       bio = parent;
+               } else {
+                       if (bio->bi_end_io)
+                               bio->bi_end_io(bio, error);
+                       bio = NULL;
+               }
        }
 }
-EXPORT_SYMBOL(bio_pair_release);
+EXPORT_SYMBOL(bio_endio);
 
-static void bio_pair_end_1(struct bio *bi, int err)
+/**
+ * bio_endio_nodec - end I/O on a bio, without decrementing bi_remaining
+ * @bio:       bio
+ * @error:     error, if any
+ *
+ * For code that has saved and restored bi_end_io; thing hard before using this
+ * function, probably you should've cloned the entire bio.
+ **/
+void bio_endio_nodec(struct bio *bio, int error)
 {
-       struct bio_pair *bp = container_of(bi, struct bio_pair, bio1);
-
-       if (err)
-               bp->error = err;
-
-       bio_pair_release(bp);
+       atomic_inc(&bio->bi_remaining);
+       bio_endio(bio, error);
 }
+EXPORT_SYMBOL(bio_endio_nodec);
 
-static void bio_pair_end_2(struct bio *bi, int err)
-{
-       struct bio_pair *bp = container_of(bi, struct bio_pair, bio2);
-
-       if (err)
-               bp->error = err;
-
-       bio_pair_release(bp);
-}
-
-/*
- * split a bio - only worry about a bio with a single page in its iovec
+/**
+ * bio_split - split a bio
+ * @bio:       bio to split
+ * @sectors:   number of sectors to split from the front of @bio
+ * @gfp:       gfp mask
+ * @bs:                bio set to allocate from
+ *
+ * Allocates and returns a new bio which represents @sectors from the start of
+ * @bio, and updates @bio to represent the remaining sectors.
+ *
+ * The newly allocated bio will point to @bio's bi_io_vec; it is the caller's
+ * responsibility to ensure that @bio is not freed before the split.
  */
-struct bio_pair *bio_split(struct bio *bi, int first_sectors)
+struct bio *bio_split(struct bio *bio, int sectors,
+                     gfp_t gfp, struct bio_set *bs)
 {
-       struct bio_pair *bp = mempool_alloc(bio_split_pool, GFP_NOIO);
-
-       if (!bp)
-               return bp;
-
-       trace_block_split(bdev_get_queue(bi->bi_bdev), bi,
-                               bi->bi_sector + first_sectors);
-
-       BUG_ON(bio_segments(bi) > 1);
-       atomic_set(&bp->cnt, 3);
-       bp->error = 0;
-       bp->bio1 = *bi;
-       bp->bio2 = *bi;
-       bp->bio2.bi_sector += first_sectors;
-       bp->bio2.bi_size -= first_sectors << 9;
-       bp->bio1.bi_size = first_sectors << 9;
-
-       if (bi->bi_vcnt != 0) {
-               bp->bv1 = *bio_iovec(bi);
-               bp->bv2 = *bio_iovec(bi);
-
-               if (bio_is_rw(bi)) {
-                       bp->bv2.bv_offset += first_sectors << 9;
-                       bp->bv2.bv_len -= first_sectors << 9;
-                       bp->bv1.bv_len = first_sectors << 9;
-               }
+       struct bio *split = NULL;
 
-               bp->bio1.bi_io_vec = &bp->bv1;
-               bp->bio2.bi_io_vec = &bp->bv2;
+       BUG_ON(sectors <= 0);
+       BUG_ON(sectors >= bio_sectors(bio));
 
-               bp->bio1.bi_max_vecs = 1;
-               bp->bio2.bi_max_vecs = 1;
-       }
+       split = bio_clone_fast(bio, gfp, bs);
+       if (!split)
+               return NULL;
 
-       bp->bio1.bi_end_io = bio_pair_end_1;
-       bp->bio2.bi_end_io = bio_pair_end_2;
+       split->bi_iter.bi_size = sectors << 9;
 
-       bp->bio1.bi_private = bi;
-       bp->bio2.bi_private = bio_split_pool;
+       if (bio_integrity(split))
+               bio_integrity_trim(split, 0, sectors);
 
-       if (bio_integrity(bi))
-               bio_integrity_split(bi, bp, first_sectors);
+       bio_advance(bio, split->bi_iter.bi_size);
 
-       return bp;
+       return split;
 }
 EXPORT_SYMBOL(bio_split);
 
@@ -1814,80 +1837,20 @@ void bio_trim(struct bio *bio, int offset, int size)
 {
        /* 'bio' is a cloned bio which we need to trim to match
         * the given offset and size.
-        * This requires adjusting bi_sector, bi_size, and bi_io_vec
         */
-       int i;
-       struct bio_vec *bvec;
-       int sofar = 0;
 
        size <<= 9;
-       if (offset == 0 && size == bio->bi_size)
+       if (offset == 0 && size == bio->bi_iter.bi_size)
                return;
 
        clear_bit(BIO_SEG_VALID, &bio->bi_flags);
 
        bio_advance(bio, offset << 9);
 
-       bio->bi_size = size;
-
-       /* avoid any complications with bi_idx being non-zero*/
-       if (bio->bi_idx) {
-               memmove(bio->bi_io_vec, bio->bi_io_vec+bio->bi_idx,
-                       (bio->bi_vcnt - bio->bi_idx) * sizeof(struct bio_vec));
-               bio->bi_vcnt -= bio->bi_idx;
-               bio->bi_idx = 0;
-       }
-       /* Make sure vcnt and last bv are not too big */
-       bio_for_each_segment(bvec, bio, i) {
-               if (sofar + bvec->bv_len > size)
-                       bvec->bv_len = size - sofar;
-               if (bvec->bv_len == 0) {
-                       bio->bi_vcnt = i;
-                       break;
-               }
-               sofar += bvec->bv_len;
-       }
+       bio->bi_iter.bi_size = size;
 }
 EXPORT_SYMBOL_GPL(bio_trim);
 
-/**
- *      bio_sector_offset - Find hardware sector offset in bio
- *      @bio:           bio to inspect
- *      @index:         bio_vec index
- *      @offset:        offset in bv_page
- *
- *      Return the number of hardware sectors between beginning of bio
- *      and an end point indicated by a bio_vec index and an offset
- *      within that vector's page.
- */
-sector_t bio_sector_offset(struct bio *bio, unsigned short index,
-                          unsigned int offset)
-{
-       unsigned int sector_sz;
-       struct bio_vec *bv;
-       sector_t sectors;
-       int i;
-
-       sector_sz = queue_logical_block_size(bio->bi_bdev->bd_disk->queue);
-       sectors = 0;
-
-       if (index >= bio->bi_idx)
-               index = bio->bi_vcnt - 1;
-
-       bio_for_each_segment_all(bv, bio, i) {
-               if (i == index) {
-                       if (offset > bv->bv_offset)
-                               sectors += (offset - bv->bv_offset) / sector_sz;
-                       break;
-               }
-
-               sectors += bv->bv_len / sector_sz;
-       }
-
-       return sectors;
-}
-EXPORT_SYMBOL(bio_sector_offset);
-
 /*
  * create memory pools for biovec's in a bio_set.
  * use the global biovec slabs created for general use.
@@ -2065,11 +2028,6 @@ static int __init init_bio(void)
        if (bioset_integrity_create(fs_bio_set, BIO_POOL_SIZE))
                panic("bio: can't create integrity pool\n");
 
-       bio_split_pool = mempool_create_kmalloc_pool(BIO_SPLIT_ENTRIES,
-                                                    sizeof(struct bio_pair));
-       if (!bio_split_pool)
-               panic("bio: can't create split pool\n");
-
        return 0;
 }
 subsys_initcall(init_bio);
index 131d82800b3af45778cb8651f5c559bd57cec437..cb05e1c842c5b8b84dee98d1a3f452eaa179417e 100644 (file)
@@ -1695,7 +1695,7 @@ static int btrfsic_read_block(struct btrfsic_state *state,
                        return -1;
                }
                bio->bi_bdev = block_ctx->dev->bdev;
-               bio->bi_sector = dev_bytenr >> 9;
+               bio->bi_iter.bi_sector = dev_bytenr >> 9;
 
                for (j = i; j < num_pages; j++) {
                        ret = bio_add_page(bio, block_ctx->pagev[j],
@@ -3013,7 +3013,7 @@ static void __btrfsic_submit_bio(int rw, struct bio *bio)
                int bio_is_patched;
                char **mapped_datav;
 
-               dev_bytenr = 512 * bio->bi_sector;
+               dev_bytenr = 512 * bio->bi_iter.bi_sector;
                bio_is_patched = 0;
                if (dev_state->state->print_mask &
                    BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
@@ -3021,8 +3021,8 @@ static void __btrfsic_submit_bio(int rw, struct bio *bio)
                               "submit_bio(rw=0x%x, bi_vcnt=%u,"
                               " bi_sector=%llu (bytenr %llu), bi_bdev=%p)\n",
                               rw, bio->bi_vcnt,
-                              (unsigned long long)bio->bi_sector, dev_bytenr,
-                              bio->bi_bdev);
+                              (unsigned long long)bio->bi_iter.bi_sector,
+                              dev_bytenr, bio->bi_bdev);
 
                mapped_datav = kmalloc(sizeof(*mapped_datav) * bio->bi_vcnt,
                                       GFP_NOFS);
index 1499b27b41863e7dfbbe7da134b1a7fb66dece34..f5cdeb4b553824744429cff1f4d8b57c17a27909 100644 (file)
@@ -172,7 +172,8 @@ static void end_compressed_bio_read(struct bio *bio, int err)
                goto out;
 
        inode = cb->inode;
-       ret = check_compressed_csum(inode, cb, (u64)bio->bi_sector << 9);
+       ret = check_compressed_csum(inode, cb,
+                                   (u64)bio->bi_iter.bi_sector << 9);
        if (ret)
                goto csum_failed;
 
@@ -201,18 +202,16 @@ csum_failed:
        if (cb->errors) {
                bio_io_error(cb->orig_bio);
        } else {
-               int bio_index = 0;
-               struct bio_vec *bvec = cb->orig_bio->bi_io_vec;
+               int i;
+               struct bio_vec *bvec;
 
                /*
                 * we have verified the checksum already, set page
                 * checked so the end_io handlers know about it
                 */
-               while (bio_index < cb->orig_bio->bi_vcnt) {
+               bio_for_each_segment_all(bvec, cb->orig_bio, i)
                        SetPageChecked(bvec->bv_page);
-                       bvec++;
-                       bio_index++;
-               }
+
                bio_endio(cb->orig_bio, 0);
        }
 
@@ -372,7 +371,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
        for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) {
                page = compressed_pages[pg_index];
                page->mapping = inode->i_mapping;
-               if (bio->bi_size)
+               if (bio->bi_iter.bi_size)
                        ret = io_tree->ops->merge_bio_hook(WRITE, page, 0,
                                                           PAGE_CACHE_SIZE,
                                                           bio, 0);
@@ -506,7 +505,7 @@ static noinline int add_ra_bio_pages(struct inode *inode,
 
                if (!em || last_offset < em->start ||
                    (last_offset + PAGE_CACHE_SIZE > extent_map_end(em)) ||
-                   (em->block_start >> 9) != cb->orig_bio->bi_sector) {
+                   (em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) {
                        free_extent_map(em);
                        unlock_extent(tree, last_offset, end);
                        unlock_page(page);
@@ -552,7 +551,7 @@ next:
  * in it.  We don't actually do IO on those pages but allocate new ones
  * to hold the compressed pages on disk.
  *
- * bio->bi_sector points to the compressed extent on disk
+ * bio->bi_iter.bi_sector points to the compressed extent on disk
  * bio->bi_io_vec points to all of the inode pages
  * bio->bi_vcnt is a count of pages
  *
@@ -573,7 +572,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
        struct page *page;
        struct block_device *bdev;
        struct bio *comp_bio;
-       u64 cur_disk_byte = (u64)bio->bi_sector << 9;
+       u64 cur_disk_byte = (u64)bio->bi_iter.bi_sector << 9;
        u64 em_len;
        u64 em_start;
        struct extent_map *em;
@@ -659,7 +658,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
                page->mapping = inode->i_mapping;
                page->index = em_start >> PAGE_CACHE_SHIFT;
 
-               if (comp_bio->bi_size)
+               if (comp_bio->bi_iter.bi_size)
                        ret = tree->ops->merge_bio_hook(READ, page, 0,
                                                        PAGE_CACHE_SIZE,
                                                        comp_bio, 0);
@@ -687,8 +686,8 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
                                                        comp_bio, sums);
                                BUG_ON(ret); /* -ENOMEM */
                        }
-                       sums += (comp_bio->bi_size + root->sectorsize - 1) /
-                               root->sectorsize;
+                       sums += (comp_bio->bi_iter.bi_size +
+                                root->sectorsize - 1) / root->sectorsize;
 
                        ret = btrfs_map_bio(root, READ, comp_bio,
                                            mirror_num, 0);
index 8072cfa8a3b16c075e5c381f481e7cb874d9c531..e71039ea66cf9d4bf8a56bdd984bc7e26597aa74 100644 (file)
@@ -842,20 +842,17 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
 
 static int btree_csum_one_bio(struct bio *bio)
 {
-       struct bio_vec *bvec = bio->bi_io_vec;
-       int bio_index = 0;
+       struct bio_vec *bvec;
        struct btrfs_root *root;
-       int ret = 0;
+       int i, ret = 0;
 
-       WARN_ON(bio->bi_vcnt <= 0);
-       while (bio_index < bio->bi_vcnt) {
+       bio_for_each_segment_all(bvec, bio, i) {
                root = BTRFS_I(bvec->bv_page->mapping->host)->root;
                ret = csum_dirty_buffer(root, bvec->bv_page);
                if (ret)
                        break;
-               bio_index++;
-               bvec++;
        }
+
        return ret;
 }
 
@@ -1695,7 +1692,7 @@ static void end_workqueue_fn(struct btrfs_work *work)
        bio->bi_private = end_io_wq->private;
        bio->bi_end_io = end_io_wq->end_io;
        kfree(end_io_wq);
-       bio_endio(bio, error);
+       bio_endio_nodec(bio, error);
 }
 
 static int cleaner_kthread(void *arg)
index ff43802a7c886088e37c5c1c16427f2b522cad30..bcb6f1b780d64512868303c04a7939060612e3e3 100644 (file)
@@ -1984,7 +1984,7 @@ int repair_io_failure(struct btrfs_fs_info *fs_info, u64 start,
        bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
        if (!bio)
                return -EIO;
-       bio->bi_size = 0;
+       bio->bi_iter.bi_size = 0;
        map_length = length;
 
        ret = btrfs_map_block(fs_info, WRITE, logical,
@@ -1995,7 +1995,7 @@ int repair_io_failure(struct btrfs_fs_info *fs_info, u64 start,
        }
        BUG_ON(mirror_num != bbio->mirror_num);
        sector = bbio->stripes[mirror_num-1].physical >> 9;
-       bio->bi_sector = sector;
+       bio->bi_iter.bi_sector = sector;
        dev = bbio->stripes[mirror_num-1].dev;
        kfree(bbio);
        if (!dev || !dev->bdev || !dev->writeable) {
@@ -2268,9 +2268,9 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
                return -EIO;
        }
        bio->bi_end_io = failed_bio->bi_end_io;
-       bio->bi_sector = failrec->logical >> 9;
+       bio->bi_iter.bi_sector = failrec->logical >> 9;
        bio->bi_bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
-       bio->bi_size = 0;
+       bio->bi_iter.bi_size = 0;
 
        btrfs_failed_bio = btrfs_io_bio(failed_bio);
        if (btrfs_failed_bio->csum) {
@@ -2332,12 +2332,13 @@ int end_extent_writepage(struct page *page, int err, u64 start, u64 end)
  */
 static void end_bio_extent_writepage(struct bio *bio, int err)
 {
-       struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
+       struct bio_vec *bvec;
        struct extent_io_tree *tree;
        u64 start;
        u64 end;
+       int i;
 
-       do {
+       bio_for_each_segment_all(bvec, bio, i) {
                struct page *page = bvec->bv_page;
                tree = &BTRFS_I(page->mapping->host)->io_tree;
 
@@ -2355,14 +2356,11 @@ static void end_bio_extent_writepage(struct bio *bio, int err)
                start = page_offset(page);
                end = start + bvec->bv_offset + bvec->bv_len - 1;
 
-               if (--bvec >= bio->bi_io_vec)
-                       prefetchw(&bvec->bv_page->flags);
-
                if (end_extent_writepage(page, err, start, end))
                        continue;
 
                end_page_writeback(page);
-       } while (bvec >= bio->bi_io_vec);
+       }
 
        bio_put(bio);
 }
@@ -2392,9 +2390,8 @@ endio_readpage_release_extent(struct extent_io_tree *tree, u64 start, u64 len,
  */
 static void end_bio_extent_readpage(struct bio *bio, int err)
 {
+       struct bio_vec *bvec;
        int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
-       struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1;
-       struct bio_vec *bvec = bio->bi_io_vec;
        struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
        struct extent_io_tree *tree;
        u64 offset = 0;
@@ -2405,16 +2402,17 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
        u64 extent_len = 0;
        int mirror;
        int ret;
+       int i;
 
        if (err)
                uptodate = 0;
 
-       do {
+       bio_for_each_segment_all(bvec, bio, i) {
                struct page *page = bvec->bv_page;
                struct inode *inode = page->mapping->host;
 
                pr_debug("end_bio_extent_readpage: bi_sector=%llu, err=%d, "
-                        "mirror=%lu\n", (u64)bio->bi_sector, err,
+                        "mirror=%lu\n", (u64)bio->bi_iter.bi_sector, err,
                         io_bio->mirror_num);
                tree = &BTRFS_I(inode)->io_tree;
 
@@ -2433,9 +2431,6 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
                end = start + bvec->bv_offset + bvec->bv_len - 1;
                len = bvec->bv_len;
 
-               if (++bvec <= bvec_end)
-                       prefetchw(&bvec->bv_page->flags);
-
                mirror = io_bio->mirror_num;
                if (likely(uptodate && tree->ops &&
                           tree->ops->readpage_end_io_hook)) {
@@ -2516,7 +2511,7 @@ readpage_ok:
                        extent_start = start;
                        extent_len = end + 1 - start;
                }
-       } while (bvec <= bvec_end);
+       }
 
        if (extent_len)
                endio_readpage_release_extent(tree, extent_start, extent_len,
@@ -2547,9 +2542,8 @@ btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
        }
 
        if (bio) {
-               bio->bi_size = 0;
                bio->bi_bdev = bdev;
-               bio->bi_sector = first_sector;
+               bio->bi_iter.bi_sector = first_sector;
                btrfs_bio = btrfs_io_bio(bio);
                btrfs_bio->csum = NULL;
                btrfs_bio->csum_allocated = NULL;
@@ -2643,7 +2637,7 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree,
        if (bio_ret && *bio_ret) {
                bio = *bio_ret;
                if (old_compressed)
-                       contig = bio->bi_sector == sector;
+                       contig = bio->bi_iter.bi_sector == sector;
                else
                        contig = bio_end_sector(bio) == sector;
 
@@ -3410,20 +3404,18 @@ static void end_extent_buffer_writeback(struct extent_buffer *eb)
 
 static void end_bio_extent_buffer_writepage(struct bio *bio, int err)
 {
-       int uptodate = err == 0;
-       struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
+       struct bio_vec *bvec;
        struct extent_buffer *eb;
-       int done;
+       int i, done;
 
-       do {
+       bio_for_each_segment_all(bvec, bio, i) {
                struct page *page = bvec->bv_page;
 
-               bvec--;
                eb = (struct extent_buffer *)page->private;
                BUG_ON(!eb);
                done = atomic_dec_and_test(&eb->io_pages);
 
-               if (!uptodate || test_bit(EXTENT_BUFFER_IOERR, &eb->bflags)) {
+               if (err || test_bit(EXTENT_BUFFER_IOERR, &eb->bflags)) {
                        set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
                        ClearPageUptodate(page);
                        SetPageError(page);
@@ -3435,10 +3427,9 @@ static void end_bio_extent_buffer_writepage(struct bio *bio, int err)
                        continue;
 
                end_extent_buffer_writeback(eb);
-       } while (bvec >= bio->bi_io_vec);
+       }
 
        bio_put(bio);
-
 }
 
 static int write_one_eb(struct extent_buffer *eb,
index 6f384886028386f2f069756ef18e757b10ba9dbf..84a46a42d26269b94fbb0a823e1fec43439d5e69 100644 (file)
@@ -182,7 +182,7 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
        if (!path)
                return -ENOMEM;
 
-       nblocks = bio->bi_size >> inode->i_sb->s_blocksize_bits;
+       nblocks = bio->bi_iter.bi_size >> inode->i_sb->s_blocksize_bits;
        if (!dst) {
                if (nblocks * csum_size > BTRFS_BIO_INLINE_CSUM_SIZE) {
                        btrfs_bio->csum_allocated = kmalloc(nblocks * csum_size,
@@ -201,7 +201,7 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
                csum = (u8 *)dst;
        }
 
-       if (bio->bi_size > PAGE_CACHE_SIZE * 8)
+       if (bio->bi_iter.bi_size > PAGE_CACHE_SIZE * 8)
                path->reada = 2;
 
        WARN_ON(bio->bi_vcnt <= 0);
@@ -217,7 +217,7 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
                path->skip_locking = 1;
        }
 
-       disk_bytenr = (u64)bio->bi_sector << 9;
+       disk_bytenr = (u64)bio->bi_iter.bi_sector << 9;
        if (dio)
                offset = logical_offset;
        while (bio_index < bio->bi_vcnt) {
@@ -302,7 +302,7 @@ int btrfs_lookup_bio_sums_dio(struct btrfs_root *root, struct inode *inode,
                              struct btrfs_dio_private *dip, struct bio *bio,
                              u64 offset)
 {
-       int len = (bio->bi_sector << 9) - dip->disk_bytenr;
+       int len = (bio->bi_iter.bi_sector << 9) - dip->disk_bytenr;
        u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
        int ret;
 
@@ -447,11 +447,12 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
        u64 offset;
 
        WARN_ON(bio->bi_vcnt <= 0);
-       sums = kzalloc(btrfs_ordered_sum_size(root, bio->bi_size), GFP_NOFS);
+       sums = kzalloc(btrfs_ordered_sum_size(root, bio->bi_iter.bi_size),
+                      GFP_NOFS);
        if (!sums)
                return -ENOMEM;
 
-       sums->len = bio->bi_size;
+       sums->len = bio->bi_iter.bi_size;
        INIT_LIST_HEAD(&sums->list);
 
        if (contig)
@@ -461,7 +462,7 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
 
        ordered = btrfs_lookup_ordered_extent(inode, offset);
        BUG_ON(!ordered); /* Logic error */
-       sums->bytenr = (u64)bio->bi_sector << 9;
+       sums->bytenr = (u64)bio->bi_iter.bi_sector << 9;
        index = 0;
 
        while (bio_index < bio->bi_vcnt) {
@@ -476,7 +477,7 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
                        btrfs_add_ordered_sum(inode, ordered, sums);
                        btrfs_put_ordered_extent(ordered);
 
-                       bytes_left = bio->bi_size - total_bytes;
+                       bytes_left = bio->bi_iter.bi_size - total_bytes;
 
                        sums = kzalloc(btrfs_ordered_sum_size(root, bytes_left),
                                       GFP_NOFS);
@@ -484,7 +485,7 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
                        sums->len = bytes_left;
                        ordered = btrfs_lookup_ordered_extent(inode, offset);
                        BUG_ON(!ordered); /* Logic error */
-                       sums->bytenr = ((u64)bio->bi_sector << 9) +
+                       sums->bytenr = ((u64)bio->bi_iter.bi_sector << 9) +
                                       total_bytes;
                        index = 0;
                }
index 514b291b135405dd1fbd21f9a8e4edc1b161f5af..d546d8c3038baa4451aa2f338a0c24592a3ea48f 100644 (file)
@@ -1577,7 +1577,7 @@ int btrfs_merge_bio_hook(int rw, struct page *page, unsigned long offset,
                         unsigned long bio_flags)
 {
        struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
-       u64 logical = (u64)bio->bi_sector << 9;
+       u64 logical = (u64)bio->bi_iter.bi_sector << 9;
        u64 length = 0;
        u64 map_length;
        int ret;
@@ -1585,7 +1585,7 @@ int btrfs_merge_bio_hook(int rw, struct page *page, unsigned long offset,
        if (bio_flags & EXTENT_BIO_COMPRESSED)
                return 0;
 
-       length = bio->bi_size;
+       length = bio->bi_iter.bi_size;
        map_length = length;
        ret = btrfs_map_block(root->fs_info, rw, logical,
                              &map_length, NULL, 0);
@@ -6783,17 +6783,16 @@ unlock_err:
 static void btrfs_endio_direct_read(struct bio *bio, int err)
 {
        struct btrfs_dio_private *dip = bio->bi_private;
-       struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1;
-       struct bio_vec *bvec = bio->bi_io_vec;
+       struct bio_vec *bvec;
        struct inode *inode = dip->inode;
        struct btrfs_root *root = BTRFS_I(inode)->root;
        struct bio *dio_bio;
        u32 *csums = (u32 *)dip->csum;
-       int index = 0;
        u64 start;
+       int i;
 
        start = dip->logical_offset;
-       do {
+       bio_for_each_segment_all(bvec, bio, i) {
                if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
                        struct page *page = bvec->bv_page;
                        char *kaddr;
@@ -6809,18 +6808,16 @@ static void btrfs_endio_direct_read(struct bio *bio, int err)
                        local_irq_restore(flags);
 
                        flush_dcache_page(bvec->bv_page);
-                       if (csum != csums[index]) {
+                       if (csum != csums[i]) {
                                btrfs_err(root->fs_info, "csum failed ino %llu off %llu csum %u expected csum %u",
                                          btrfs_ino(inode), start, csum,
-                                         csums[index]);
+                                         csums[i]);
                                err = -EIO;
                        }
                }
 
                start += bvec->bv_len;
-               bvec++;
-               index++;
-       } while (bvec <= bvec_end);
+       }
 
        unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset,
                      dip->logical_offset + dip->bytes - 1);
@@ -6901,7 +6898,8 @@ static void btrfs_end_dio_bio(struct bio *bio, int err)
                printk(KERN_ERR "btrfs direct IO failed ino %llu rw %lu "
                      "sector %#Lx len %u err no %d\n",
                      btrfs_ino(dip->inode), bio->bi_rw,
-                     (unsigned long long)bio->bi_sector, bio->bi_size, err);
+                     (unsigned long long)bio->bi_iter.bi_sector,
+                     bio->bi_iter.bi_size, err);
                dip->errors = 1;
 
                /*
@@ -6992,7 +6990,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
        struct bio *bio;
        struct bio *orig_bio = dip->orig_bio;
        struct bio_vec *bvec = orig_bio->bi_io_vec;
-       u64 start_sector = orig_bio->bi_sector;
+       u64 start_sector = orig_bio->bi_iter.bi_sector;
        u64 file_offset = dip->logical_offset;
        u64 submit_len = 0;
        u64 map_length;
@@ -7000,7 +6998,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
        int ret = 0;
        int async_submit = 0;
 
-       map_length = orig_bio->bi_size;
+       map_length = orig_bio->bi_iter.bi_size;
        ret = btrfs_map_block(root->fs_info, rw, start_sector << 9,
                              &map_length, NULL, 0);
        if (ret) {
@@ -7008,7 +7006,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
                return -EIO;
        }
 
-       if (map_length >= orig_bio->bi_size) {
+       if (map_length >= orig_bio->bi_iter.bi_size) {
                bio = orig_bio;
                goto submit;
        }
@@ -7060,7 +7058,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
                        bio->bi_private = dip;
                        bio->bi_end_io = btrfs_end_dio_bio;
 
-                       map_length = orig_bio->bi_size;
+                       map_length = orig_bio->bi_iter.bi_size;
                        ret = btrfs_map_block(root->fs_info, rw,
                                              start_sector << 9,
                                              &map_length, NULL, 0);
@@ -7118,7 +7116,8 @@ static void btrfs_submit_direct(int rw, struct bio *dio_bio,
 
        if (!skip_sum && !write) {
                csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
-               sum_len = dio_bio->bi_size >> inode->i_sb->s_blocksize_bits;
+               sum_len = dio_bio->bi_iter.bi_size >>
+                       inode->i_sb->s_blocksize_bits;
                sum_len *= csum_size;
        } else {
                sum_len = 0;
@@ -7133,8 +7132,8 @@ static void btrfs_submit_direct(int rw, struct bio *dio_bio,
        dip->private = dio_bio->bi_private;
        dip->inode = inode;
        dip->logical_offset = file_offset;
-       dip->bytes = dio_bio->bi_size;
-       dip->disk_bytenr = (u64)dio_bio->bi_sector << 9;
+       dip->bytes = dio_bio->bi_iter.bi_size;
+       dip->disk_bytenr = (u64)dio_bio->bi_iter.bi_sector << 9;
        io_bio->bi_private = dip;
        dip->errors = 0;
        dip->orig_bio = io_bio;
index 24ac21840a9a0797cdb086a58e90e64a3c1481ad..9af0b25d991a8c64653b4fa20f4dc31c2794b943 100644 (file)
@@ -1032,8 +1032,8 @@ static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
 
        /* see if we can add this page onto our existing bio */
        if (last) {
-               last_end = (u64)last->bi_sector << 9;
-               last_end += last->bi_size;
+               last_end = (u64)last->bi_iter.bi_sector << 9;
+               last_end += last->bi_iter.bi_size;
 
                /*
                 * we can't merge these if they are from different
@@ -1053,9 +1053,9 @@ static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
        if (!bio)
                return -ENOMEM;
 
-       bio->bi_size = 0;
+       bio->bi_iter.bi_size = 0;
        bio->bi_bdev = stripe->dev->bdev;
-       bio->bi_sector = disk_start >> 9;
+       bio->bi_iter.bi_sector = disk_start >> 9;
        set_bit(BIO_UPTODATE, &bio->bi_flags);
 
        bio_add_page(bio, page, PAGE_CACHE_SIZE, 0);
@@ -1111,7 +1111,7 @@ static void index_rbio_pages(struct btrfs_raid_bio *rbio)
 
        spin_lock_irq(&rbio->bio_list_lock);
        bio_list_for_each(bio, &rbio->bio_list) {
-               start = (u64)bio->bi_sector << 9;
+               start = (u64)bio->bi_iter.bi_sector << 9;
                stripe_offset = start - rbio->raid_map[0];
                page_index = stripe_offset >> PAGE_CACHE_SHIFT;
 
@@ -1272,7 +1272,7 @@ cleanup:
 static int find_bio_stripe(struct btrfs_raid_bio *rbio,
                           struct bio *bio)
 {
-       u64 physical = bio->bi_sector;
+       u64 physical = bio->bi_iter.bi_sector;
        u64 stripe_start;
        int i;
        struct btrfs_bio_stripe *stripe;
@@ -1298,7 +1298,7 @@ static int find_bio_stripe(struct btrfs_raid_bio *rbio,
 static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio,
                                   struct bio *bio)
 {
-       u64 logical = bio->bi_sector;
+       u64 logical = bio->bi_iter.bi_sector;
        u64 stripe_start;
        int i;
 
@@ -1602,8 +1602,8 @@ static int plug_cmp(void *priv, struct list_head *a, struct list_head *b)
                                                 plug_list);
        struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio,
                                                 plug_list);
-       u64 a_sector = ra->bio_list.head->bi_sector;
-       u64 b_sector = rb->bio_list.head->bi_sector;
+       u64 a_sector = ra->bio_list.head->bi_iter.bi_sector;
+       u64 b_sector = rb->bio_list.head->bi_iter.bi_sector;
 
        if (a_sector < b_sector)
                return -1;
@@ -1691,7 +1691,7 @@ int raid56_parity_write(struct btrfs_root *root, struct bio *bio,
        if (IS_ERR(rbio))
                return PTR_ERR(rbio);
        bio_list_add(&rbio->bio_list, bio);
-       rbio->bio_list_bytes = bio->bi_size;
+       rbio->bio_list_bytes = bio->bi_iter.bi_size;
 
        /*
         * don't plug on full rbios, just get them out the door
@@ -2044,7 +2044,7 @@ int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
 
        rbio->read_rebuild = 1;
        bio_list_add(&rbio->bio_list, bio);
-       rbio->bio_list_bytes = bio->bi_size;
+       rbio->bio_list_bytes = bio->bi_iter.bi_size;
 
        rbio->faila = find_logical_bio_stripe(rbio, bio);
        if (rbio->faila == -1) {
index 1fd3f33c330abe930fbd03de1deb5968e32fd7b5..bb9a928fa3a848c597d842a94fe2e49a48766cf0 100644 (file)
@@ -1308,7 +1308,7 @@ static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
                        continue;
                }
                bio->bi_bdev = page->dev->bdev;
-               bio->bi_sector = page->physical >> 9;
+               bio->bi_iter.bi_sector = page->physical >> 9;
 
                bio_add_page(bio, page->page, PAGE_SIZE, 0);
                if (btrfsic_submit_bio_wait(READ, bio))
@@ -1427,7 +1427,7 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
                if (!bio)
                        return -EIO;
                bio->bi_bdev = page_bad->dev->bdev;
-               bio->bi_sector = page_bad->physical >> 9;
+               bio->bi_iter.bi_sector = page_bad->physical >> 9;
 
                ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0);
                if (PAGE_SIZE != ret) {
@@ -1520,7 +1520,7 @@ again:
                bio->bi_private = sbio;
                bio->bi_end_io = scrub_wr_bio_end_io;
                bio->bi_bdev = sbio->dev->bdev;
-               bio->bi_sector = sbio->physical >> 9;
+               bio->bi_iter.bi_sector = sbio->physical >> 9;
                sbio->err = 0;
        } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
                   spage->physical_for_dev_replace ||
@@ -1926,7 +1926,7 @@ again:
                bio->bi_private = sbio;
                bio->bi_end_io = scrub_bio_end_io;
                bio->bi_bdev = sbio->dev->bdev;
-               bio->bi_sector = sbio->physical >> 9;
+               bio->bi_iter.bi_sector = sbio->physical >> 9;
                sbio->err = 0;
        } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
                   spage->physical ||
@@ -3371,8 +3371,8 @@ static int write_page_nocow(struct scrub_ctx *sctx,
                spin_unlock(&sctx->stat_lock);
                return -ENOMEM;
        }
-       bio->bi_size = 0;
-       bio->bi_sector = physical_for_dev_replace >> 9;
+       bio->bi_iter.bi_size = 0;
+       bio->bi_iter.bi_sector = physical_for_dev_replace >> 9;
        bio->bi_bdev = dev->bdev;
        ret = bio_add_page(bio, page, PAGE_CACHE_SIZE, 0);
        if (ret != PAGE_CACHE_SIZE) {
index 92303f42baaa92d5d845edddff1f8600fc46518e..54d2685a3071f512bc8d8a5c67a60a03f179b29b 100644 (file)
@@ -5298,6 +5298,13 @@ static void btrfs_end_bio(struct bio *bio, int err)
                        bio_put(bio);
                        bio = bbio->orig_bio;
                }
+
+               /*
+                * We have original bio now. So increment bi_remaining to
+                * account for it in endio
+                */
+               atomic_inc(&bio->bi_remaining);
+
                bio->bi_private = bbio->private;
                bio->bi_end_io = bbio->end_io;
                btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
@@ -5411,7 +5418,7 @@ static int bio_size_ok(struct block_device *bdev, struct bio *bio,
        if (!q->merge_bvec_fn)
                return 1;
 
-       bvm.bi_size = bio->bi_size - prev->bv_len;
+       bvm.bi_size = bio->bi_iter.bi_size - prev->bv_len;
        if (q->merge_bvec_fn(q, &bvm, prev) < prev->bv_len)
                return 0;
        return 1;
@@ -5426,7 +5433,7 @@ static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
        bio->bi_private = bbio;
        btrfs_io_bio(bio)->stripe_index = dev_nr;
        bio->bi_end_io = btrfs_end_bio;
-       bio->bi_sector = physical >> 9;
+       bio->bi_iter.bi_sector = physical >> 9;
 #ifdef DEBUG
        {
                struct rcu_string *name;
@@ -5464,7 +5471,7 @@ again:
        while (bvec <= (first_bio->bi_io_vec + first_bio->bi_vcnt - 1)) {
                if (bio_add_page(bio, bvec->bv_page, bvec->bv_len,
                                 bvec->bv_offset) < bvec->bv_len) {
-                       u64 len = bio->bi_size;
+                       u64 len = bio->bi_iter.bi_size;
 
                        atomic_inc(&bbio->stripes_pending);
                        submit_stripe_bio(root, bbio, bio, physical, dev_nr,
@@ -5486,7 +5493,7 @@ static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
                bio->bi_private = bbio->private;
                bio->bi_end_io = bbio->end_io;
                btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
-               bio->bi_sector = logical >> 9;
+               bio->bi_iter.bi_sector = logical >> 9;
                kfree(bbio);
                bio_endio(bio, -EIO);
        }
@@ -5497,7 +5504,7 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
 {
        struct btrfs_device *dev;
        struct bio *first_bio = bio;
-       u64 logical = (u64)bio->bi_sector << 9;
+       u64 logical = (u64)bio->bi_iter.bi_sector << 9;
        u64 length = 0;
        u64 map_length;
        u64 *raid_map = NULL;
@@ -5506,7 +5513,7 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
        int total_devs = 1;
        struct btrfs_bio *bbio = NULL;
 
-       length = bio->bi_size;
+       length = bio->bi_iter.bi_size;
        map_length = length;
 
        ret = __btrfs_map_block(root->fs_info, rw, logical, &map_length, &bbio,
index 6024877335caf2a9dfa6af1018c5da19b0e8a2ae..651dba10b9c2b5468af528ad3114166b90d8c067 100644 (file)
@@ -1312,7 +1312,7 @@ static void bh_lru_install(struct buffer_head *bh)
                }
                while (out < BH_LRU_SIZE)
                        bhs[out++] = NULL;
-               memcpy(__this_cpu_ptr(&bh_lrus.bhs), bhs, sizeof(bhs));
+               memcpy(this_cpu_ptr(&bh_lrus.bhs), bhs, sizeof(bhs));
        }
        bh_lru_unlock();
 
@@ -2982,11 +2982,11 @@ static void guard_bh_eod(int rw, struct bio *bio, struct buffer_head *bh)
         * let it through, and the IO layer will turn it into
         * an EIO.
         */
-       if (unlikely(bio->bi_sector >= maxsector))
+       if (unlikely(bio->bi_iter.bi_sector >= maxsector))
                return;
 
-       maxsector -= bio->bi_sector;
-       bytes = bio->bi_size;
+       maxsector -= bio->bi_iter.bi_sector;
+       bytes = bio->bi_iter.bi_size;
        if (likely((bytes >> 9) <= maxsector))
                return;
 
@@ -2994,7 +2994,7 @@ static void guard_bh_eod(int rw, struct bio *bio, struct buffer_head *bh)
        bytes = maxsector << 9;
 
        /* Truncate the bio.. */
-       bio->bi_size = bytes;
+       bio->bi_iter.bi_size = bytes;
        bio->bi_io_vec[0].bv_len = bytes;
 
        /* ..and clear the end of the buffer for reads */
@@ -3029,14 +3029,14 @@ int _submit_bh(int rw, struct buffer_head *bh, unsigned long bio_flags)
         */
        bio = bio_alloc(GFP_NOIO, 1);
 
-       bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
+       bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
        bio->bi_bdev = bh->b_bdev;
        bio->bi_io_vec[0].bv_page = bh->b_page;
        bio->bi_io_vec[0].bv_len = bh->b_size;
        bio->bi_io_vec[0].bv_offset = bh_offset(bh);
 
        bio->bi_vcnt = 1;
-       bio->bi_size = bh->b_size;
+       bio->bi_iter.bi_size = bh->b_size;
 
        bio->bi_end_io = end_bio_bh_io_sync;
        bio->bi_private = bh;
index 64fddbc1d17ba17cfa61778659c72b9dea0fed8e..66d377a12f7c197b5e4e20b94606aae80d71e696 100644 (file)
@@ -107,14 +107,14 @@ struct posix_acl *ceph_get_acl(struct inode *inode, int type)
        return acl;
 }
 
-static int ceph_set_acl(struct dentry *dentry, struct inode *inode,
-                               struct posix_acl *acl, int type)
+int ceph_set_acl(struct inode *inode, struct posix_acl *acl, int type)
 {
        int ret = 0, size = 0;
        const char *name = NULL;
        char *value = NULL;
        struct iattr newattrs;
        umode_t new_mode = inode->i_mode, old_mode = inode->i_mode;
+       struct dentry *dentry = d_find_alias(inode);
 
        if (acl) {
                ret = posix_acl_valid(acl);
@@ -208,16 +208,15 @@ int ceph_init_acl(struct dentry *dentry, struct inode *inode, struct inode *dir)
 
        if (IS_POSIXACL(dir) && acl) {
                if (S_ISDIR(inode->i_mode)) {
-                       ret = ceph_set_acl(dentry, inode, acl,
-                                               ACL_TYPE_DEFAULT);
+                       ret = ceph_set_acl(inode, acl, ACL_TYPE_DEFAULT);
                        if (ret)
                                goto out_release;
                }
-               ret = posix_acl_create(&acl, GFP_NOFS, &inode->i_mode);
+               ret = __posix_acl_create(&acl, GFP_NOFS, &inode->i_mode);
                if (ret < 0)
                        goto out;
                else if (ret > 0)
-                       ret = ceph_set_acl(dentry, inode, acl, ACL_TYPE_ACCESS);
+                       ret = ceph_set_acl(inode, acl, ACL_TYPE_ACCESS);
                else
                        cache_no_acl(inode);
        } else {
@@ -229,104 +228,3 @@ out_release:
 out:
        return ret;
 }
-
-int ceph_acl_chmod(struct dentry *dentry, struct inode *inode)
-{
-       struct posix_acl *acl;
-       int ret = 0;
-
-       if (S_ISLNK(inode->i_mode)) {
-               ret = -EOPNOTSUPP;
-               goto out;
-       }
-
-       if (!IS_POSIXACL(inode))
-               goto out;
-
-       acl = ceph_get_acl(inode, ACL_TYPE_ACCESS);
-       if (IS_ERR_OR_NULL(acl)) {
-               ret = PTR_ERR(acl);
-               goto out;
-       }
-
-       ret = posix_acl_chmod(&acl, GFP_KERNEL, inode->i_mode);
-       if (ret)
-               goto out;
-       ret = ceph_set_acl(dentry, inode, acl, ACL_TYPE_ACCESS);
-       posix_acl_release(acl);
-out:
-       return ret;
-}
-
-static int ceph_xattr_acl_get(struct dentry *dentry, const char *name,
-                               void *value, size_t size, int type)
-{
-       struct posix_acl *acl;
-       int ret = 0;
-
-       if (!IS_POSIXACL(dentry->d_inode))
-               return -EOPNOTSUPP;
-
-       acl = ceph_get_acl(dentry->d_inode, type);
-       if (IS_ERR(acl))
-               return PTR_ERR(acl);
-       if (acl == NULL)
-               return -ENODATA;
-
-       ret = posix_acl_to_xattr(&init_user_ns, acl, value, size);
-       posix_acl_release(acl);
-
-       return ret;
-}
-
-static int ceph_xattr_acl_set(struct dentry *dentry, const char *name,
-                       const void *value, size_t size, int flags, int type)
-{
-       int ret = 0;
-       struct posix_acl *acl = NULL;
-
-       if (!inode_owner_or_capable(dentry->d_inode)) {
-               ret = -EPERM;
-               goto out;
-       }
-
-       if (!IS_POSIXACL(dentry->d_inode)) {
-               ret = -EOPNOTSUPP;
-               goto out;
-       }
-
-       if (value) {
-               acl = posix_acl_from_xattr(&init_user_ns, value, size);
-               if (IS_ERR(acl)) {
-                       ret = PTR_ERR(acl);
-                       goto out;
-               }
-
-               if (acl) {
-                       ret = posix_acl_valid(acl);
-                       if (ret)
-                               goto out_release;
-               }
-       }
-
-       ret = ceph_set_acl(dentry, dentry->d_inode, acl, type);
-
-out_release:
-       posix_acl_release(acl);
-out:
-       return ret;
-}
-
-const struct xattr_handler ceph_xattr_acl_default_handler = {
-       .prefix = POSIX_ACL_XATTR_DEFAULT,
-       .flags  = ACL_TYPE_DEFAULT,
-       .get    = ceph_xattr_acl_get,
-       .set    = ceph_xattr_acl_set,
-};
-
-const struct xattr_handler ceph_xattr_acl_access_handler = {
-       .prefix = POSIX_ACL_XATTR_ACCESS,
-       .flags  = ACL_TYPE_ACCESS,
-       .get    = ceph_xattr_acl_get,
-       .set    = ceph_xattr_acl_set,
-};
index 619616d585b04128d9084a0051d9c43ea533a3b6..6da4df84ba300824a8a6afdea9f2a20121600765 100644 (file)
@@ -1303,6 +1303,7 @@ const struct inode_operations ceph_dir_iops = {
        .listxattr = ceph_listxattr,
        .removexattr = ceph_removexattr,
        .get_acl = ceph_get_acl,
+       .set_acl = ceph_set_acl,
        .mknod = ceph_mknod,
        .symlink = ceph_symlink,
        .mkdir = ceph_mkdir,
index 6fc10a7d7c5926d9a5d22f4921c14ad8c2e7a1a3..32d519d8a2e210316fbf2af3b2c0a842d47a13b2 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/namei.h>
 #include <linux/writeback.h>
 #include <linux/vmalloc.h>
+#include <linux/posix_acl.h>
 
 #include "super.h"
 #include "mds_client.h"
@@ -96,6 +97,7 @@ const struct inode_operations ceph_file_iops = {
        .listxattr = ceph_listxattr,
        .removexattr = ceph_removexattr,
        .get_acl = ceph_get_acl,
+       .set_acl = ceph_set_acl,
 };
 
 
@@ -1615,6 +1617,7 @@ static const struct inode_operations ceph_symlink_iops = {
        .listxattr = ceph_listxattr,
        .removexattr = ceph_removexattr,
        .get_acl = ceph_get_acl,
+       .set_acl = ceph_set_acl,
 };
 
 /*
@@ -1805,7 +1808,7 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr)
                __mark_inode_dirty(inode, inode_dirty_flags);
 
        if (ia_valid & ATTR_MODE) {
-               err = ceph_acl_chmod(dentry, inode);
+               err = posix_acl_chmod(inode, attr->ia_mode);
                if (err)
                        goto out_put;
        }
index c299f7d19bf35b6f0765fb2f267d22d541310941..aa260590f6154cce25517c24789e62f58ed1b7a3 100644 (file)
@@ -718,6 +718,7 @@ extern void ceph_queue_writeback(struct inode *inode);
 extern int ceph_do_getattr(struct inode *inode, int mask);
 extern int ceph_permission(struct inode *inode, int mask);
 extern int ceph_setattr(struct dentry *dentry, struct iattr *attr);
+extern int ceph_setattr(struct dentry *dentry, struct iattr *attr);
 extern int ceph_getattr(struct vfsmount *mnt, struct dentry *dentry,
                        struct kstat *stat);
 
@@ -736,20 +737,19 @@ extern void __init ceph_xattr_init(void);
 extern void ceph_xattr_exit(void);
 
 /* acl.c */
-extern const struct xattr_handler ceph_xattr_acl_access_handler;
-extern const struct xattr_handler ceph_xattr_acl_default_handler;
 extern const struct xattr_handler *ceph_xattr_handlers[];
 
 #ifdef CONFIG_CEPH_FS_POSIX_ACL
 
 struct posix_acl *ceph_get_acl(struct inode *, int);
+int ceph_set_acl(struct inode *inode, struct posix_acl *acl, int type);
 int ceph_init_acl(struct dentry *, struct inode *, struct inode *);
-int ceph_acl_chmod(struct dentry *, struct inode *);
 void ceph_forget_all_cached_acls(struct inode *inode);
 
 #else
 
 #define ceph_get_acl NULL
+#define ceph_set_acl NULL
 
 static inline int ceph_init_acl(struct dentry *dentry, struct inode *inode,
                                struct inode *dir)
index c7581f3733c1e08a78c8358fd718dd1dc3ac613a..898b6565ad3e2c114baca0282fafea6a5643071a 100644 (file)
@@ -6,6 +6,7 @@
 #include <linux/ceph/decode.h>
 
 #include <linux/xattr.h>
+#include <linux/posix_acl_xattr.h>
 #include <linux/slab.h>
 
 #define XATTR_CEPH_PREFIX "ceph."
@@ -17,8 +18,8 @@
  */
 const struct xattr_handler *ceph_xattr_handlers[] = {
 #ifdef CONFIG_CEPH_FS_POSIX_ACL
-       &ceph_xattr_acl_access_handler,
-       &ceph_xattr_acl_default_handler,
+       &posix_acl_access_xattr_handler,
+       &posix_acl_default_xattr_handler,
 #endif
        NULL,
 };
index ab5954b50267d29afa219bbaec128af2304f59e2..ac44a69fbea9a533dbcc27cdc27771f4f7795f1e 100644 (file)
@@ -204,7 +204,7 @@ out:
 }
 
 #ifdef CONFIG_COMPAT
-COMPAT_SYSCALL_DEFINE4(lookup_dcookie, u32, w0, u32, w1, char __user *, buf, size_t, len)
+COMPAT_SYSCALL_DEFINE4(lookup_dcookie, u32, w0, u32, w1, char __user *, buf, compat_size_t, len)
 {
 #ifdef __BIG_ENDIAN
        return sys_lookup_dcookie(((u64)w0 << 32) | w1, buf, len);
index 0e04142d5962312fcb055738479247b2364a252e..160a5489a93936372c85683ee8cfd6da5185007b 100644 (file)
@@ -375,7 +375,7 @@ dio_bio_alloc(struct dio *dio, struct dio_submit *sdio,
        bio = bio_alloc(GFP_KERNEL, nr_vecs);
 
        bio->bi_bdev = bdev;
-       bio->bi_sector = first_sector;
+       bio->bi_iter.bi_sector = first_sector;
        if (dio->is_async)
                bio->bi_end_io = dio_bio_end_aio;
        else
@@ -719,7 +719,7 @@ static inline int dio_send_cur_page(struct dio *dio, struct dio_submit *sdio,
        if (sdio->bio) {
                loff_t cur_offset = sdio->cur_page_fs_offset;
                loff_t bio_next_offset = sdio->logical_offset_in_bio +
-                       sdio->bio->bi_size;
+                       sdio->bio->bi_iter.bi_size;
 
                /*
                 * See whether this new request is contiguous with the old.
index d488f80ee32df1137e91df0aed72bef2f61b49ac..ab95508e3d4018eab92647c6d2308e98524080d1 100644 (file)
@@ -65,9 +65,9 @@ static void ext4_finish_bio(struct bio *bio)
 {
        int i;
        int error = !test_bit(BIO_UPTODATE, &bio->bi_flags);
+       struct bio_vec *bvec;
 
-       for (i = 0; i < bio->bi_vcnt; i++) {
-               struct bio_vec *bvec = &bio->bi_io_vec[i];
+       bio_for_each_segment_all(bvec, bio, i) {
                struct page *page = bvec->bv_page;
                struct buffer_head *bh, *head;
                unsigned bio_start = bvec->bv_offset;
@@ -298,7 +298,7 @@ ext4_io_end_t *ext4_get_io_end(ext4_io_end_t *io_end)
 static void ext4_end_bio(struct bio *bio, int error)
 {
        ext4_io_end_t *io_end = bio->bi_private;
-       sector_t bi_sector = bio->bi_sector;
+       sector_t bi_sector = bio->bi_iter.bi_sector;
 
        BUG_ON(!io_end);
        bio->bi_end_io = NULL;
@@ -366,7 +366,7 @@ static int io_submit_init_bio(struct ext4_io_submit *io,
        bio = bio_alloc(GFP_NOIO, min(nvecs, BIO_MAX_PAGES));
        if (!bio)
                return -ENOMEM;
-       bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
+       bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
        bio->bi_bdev = bh->b_bdev;
        bio->bi_end_io = ext4_end_bio;
        bio->bi_private = ext4_get_io_end(io->io_end);
index 0ae558723506e1a8a96f5653444dc11f5a8feb27..2261ccdd0b5f04a37be390f1b28c8703fafa86b4 100644 (file)
 
 static void f2fs_read_end_io(struct bio *bio, int err)
 {
-       const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
-       struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
+       struct bio_vec *bvec;
+       int i;
 
-       do {
+       bio_for_each_segment_all(bvec, bio, i) {
                struct page *page = bvec->bv_page;
 
-               if (--bvec >= bio->bi_io_vec)
-                       prefetchw(&bvec->bv_page->flags);
-
-               if (unlikely(!uptodate)) {
+               if (!err) {
+                       SetPageUptodate(page);
+               } else {
                        ClearPageUptodate(page);
                        SetPageError(page);
-               } else {
-                       SetPageUptodate(page);
                }
                unlock_page(page);
-       } while (bvec >= bio->bi_io_vec);
-
+       }
        bio_put(bio);
 }
 
 static void f2fs_write_end_io(struct bio *bio, int err)
 {
-       const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
-       struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
-       struct f2fs_sb_info *sbi = F2FS_SB(bvec->bv_page->mapping->host->i_sb);
+       struct f2fs_sb_info *sbi = F2FS_SB(bio->bi_io_vec->bv_page->mapping->host->i_sb);
+       struct bio_vec *bvec;
+       int i;
 
-       do {
+       bio_for_each_segment_all(bvec, bio, i) {
                struct page *page = bvec->bv_page;
 
-               if (--bvec >= bio->bi_io_vec)
-                       prefetchw(&bvec->bv_page->flags);
-
-               if (unlikely(!uptodate)) {
+               if (unlikely(err)) {
                        SetPageError(page);
                        set_bit(AS_EIO, &page->mapping->flags);
                        set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG);
@@ -67,7 +60,7 @@ static void f2fs_write_end_io(struct bio *bio, int err)
                }
                end_page_writeback(page);
                dec_page_count(sbi, F2FS_WRITEBACK);
-       } while (bvec >= bio->bi_io_vec);
+       }
 
        if (bio->bi_private)
                complete(bio->bi_private);
@@ -91,7 +84,7 @@ static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
        bio = bio_alloc(GFP_NOIO, npages);
 
        bio->bi_bdev = sbi->sb->s_bdev;
-       bio->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr);
+       bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr);
        bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io;
 
        return bio;
index 58f06400b7b8dcece9597b51b05ebf9ebc092396..76693793ceddfe7f936c360a6c3494d1882a849a 100644 (file)
@@ -273,7 +273,7 @@ static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno)
                nrvecs = max(nrvecs/2, 1U);
        }
 
-       bio->bi_sector = blkno * (sb->s_blocksize >> 9);
+       bio->bi_iter.bi_sector = blkno * (sb->s_blocksize >> 9);
        bio->bi_bdev = sb->s_bdev;
        bio->bi_end_io = gfs2_end_log_write;
        bio->bi_private = sdp;
index 1e712b566d76a74435b4d2faa5417956815cec78..c6872d09561a2d53c8e57374eb700f4fb578ae78 100644 (file)
@@ -238,7 +238,7 @@ static int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector, int silent)
        lock_page(page);
 
        bio = bio_alloc(GFP_NOFS, 1);
-       bio->bi_sector = sector * (sb->s_blocksize >> 9);
+       bio->bi_iter.bi_sector = sector * (sb->s_blocksize >> 9);
        bio->bi_bdev = sb->s_bdev;
        bio_add_page(bio, page, PAGE_SIZE, 0);
 
index e9a97a0d431480616043410a51567730bebafda3..3f999649587ff8185ebd326c3672acee83542de4 100644 (file)
@@ -63,7 +63,7 @@ int hfsplus_submit_bio(struct super_block *sb, sector_t sector,
        sector &= ~((io_size >> HFSPLUS_SECTOR_SHIFT) - 1);
 
        bio = bio_alloc(GFP_NOIO, 1);
-       bio->bi_sector = sector;
+       bio->bi_iter.bi_sector = sector;
        bio->bi_bdev = sb->s_bdev;
 
        if (!(rw & WRITE) && data)
index 4f47aa24b5562001cf8983d6c7634c373d50206b..b8fd651307a42e2fce3d15dfcc1976e2526932ab 100644 (file)
@@ -288,6 +288,8 @@ struct jffs2_xattr_datum *jffs2_alloc_xattr_datum(void)
        struct jffs2_xattr_datum *xd;
        xd = kmem_cache_zalloc(xattr_datum_cache, GFP_KERNEL);
        dbg_memalloc("%p\n", xd);
+       if (!xd)
+               return NULL;
 
        xd->class = RAWNODE_CLASS_XATTR_DATUM;
        xd->node = (void *)xd;
@@ -306,6 +308,8 @@ struct jffs2_xattr_ref *jffs2_alloc_xattr_ref(void)
        struct jffs2_xattr_ref *ref;
        ref = kmem_cache_zalloc(xattr_ref_cache, GFP_KERNEL);
        dbg_memalloc("%p\n", ref);
+       if (!ref)
+               return NULL;
 
        ref->class = RAWNODE_CLASS_XATTR_REF;
        ref->node = (void *)ref;
index 360d27c488873825fed5c04f8bb2320a51a39d62..8d811e02b4b92bb26d28367c727fcbe909fa95d5 100644 (file)
@@ -1998,20 +1998,20 @@ static int lbmRead(struct jfs_log * log, int pn, struct lbuf ** bpp)
 
        bio = bio_alloc(GFP_NOFS, 1);
 
-       bio->bi_sector = bp->l_blkno << (log->l2bsize - 9);
+       bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9);
        bio->bi_bdev = log->bdev;
        bio->bi_io_vec[0].bv_page = bp->l_page;
        bio->bi_io_vec[0].bv_len = LOGPSIZE;
        bio->bi_io_vec[0].bv_offset = bp->l_offset;
 
        bio->bi_vcnt = 1;
-       bio->bi_size = LOGPSIZE;
+       bio->bi_iter.bi_size = LOGPSIZE;
 
        bio->bi_end_io = lbmIODone;
        bio->bi_private = bp;
        /*check if journaling to disk has been disabled*/
        if (log->no_integrity) {
-               bio->bi_size = 0;
+               bio->bi_iter.bi_size = 0;
                lbmIODone(bio, 0);
        } else {
                submit_bio(READ_SYNC, bio);
@@ -2144,21 +2144,21 @@ static void lbmStartIO(struct lbuf * bp)
        jfs_info("lbmStartIO\n");
 
        bio = bio_alloc(GFP_NOFS, 1);
-       bio->bi_sector = bp->l_blkno << (log->l2bsize - 9);
+       bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9);
        bio->bi_bdev = log->bdev;
        bio->bi_io_vec[0].bv_page = bp->l_page;
        bio->bi_io_vec[0].bv_len = LOGPSIZE;
        bio->bi_io_vec[0].bv_offset = bp->l_offset;
 
        bio->bi_vcnt = 1;
-       bio->bi_size = LOGPSIZE;
+       bio->bi_iter.bi_size = LOGPSIZE;
 
        bio->bi_end_io = lbmIODone;
        bio->bi_private = bp;
 
        /* check if journaling to disk has been disabled */
        if (log->no_integrity) {
-               bio->bi_size = 0;
+               bio->bi_iter.bi_size = 0;
                lbmIODone(bio, 0);
        } else {
                submit_bio(WRITE_SYNC, bio);
index d165cde0c68dda885c2f5bb512f48465f521c4a1..49ba7ff1bbb9a15d8939128df2021354f2db6c52 100644 (file)
@@ -416,7 +416,7 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc)
                         * count from hitting zero before we're through
                         */
                        inc_io(page);
-                       if (!bio->bi_size)
+                       if (!bio->bi_iter.bi_size)
                                goto dump_bio;
                        submit_bio(WRITE, bio);
                        nr_underway++;
@@ -438,7 +438,7 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc)
 
                bio = bio_alloc(GFP_NOFS, 1);
                bio->bi_bdev = inode->i_sb->s_bdev;
-               bio->bi_sector = pblock << (inode->i_blkbits - 9);
+               bio->bi_iter.bi_sector = pblock << (inode->i_blkbits - 9);
                bio->bi_end_io = metapage_write_end_io;
                bio->bi_private = page;
 
@@ -452,7 +452,7 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc)
        if (bio) {
                if (bio_add_page(bio, page, bio_bytes, bio_offset) < bio_bytes)
                                goto add_failed;
-               if (!bio->bi_size)
+               if (!bio->bi_iter.bi_size)
                        goto dump_bio;
 
                submit_bio(WRITE, bio);
@@ -517,7 +517,8 @@ static int metapage_readpage(struct file *fp, struct page *page)
 
                        bio = bio_alloc(GFP_NOFS, 1);
                        bio->bi_bdev = inode->i_sb->s_bdev;
-                       bio->bi_sector = pblock << (inode->i_blkbits - 9);
+                       bio->bi_iter.bi_sector =
+                               pblock << (inode->i_blkbits - 9);
                        bio->bi_end_io = metapage_read_end_io;
                        bio->bi_private = page;
                        len = xlen << inode->i_blkbits;
index 0f95f0d0b3133e9b3129e3807a842438e162a245..76279e11982d854c9b22312cf51b3bd3a97e256d 100644 (file)
@@ -26,9 +26,9 @@ static int sync_request(struct page *page, struct block_device *bdev, int rw)
        bio_vec.bv_len = PAGE_SIZE;
        bio_vec.bv_offset = 0;
        bio.bi_vcnt = 1;
-       bio.bi_size = PAGE_SIZE;
        bio.bi_bdev = bdev;
-       bio.bi_sector = page->index * (PAGE_SIZE >> 9);
+       bio.bi_iter.bi_sector = page->index * (PAGE_SIZE >> 9);
+       bio.bi_iter.bi_size = PAGE_SIZE;
 
        return submit_bio_wait(rw, &bio);
 }
@@ -56,22 +56,18 @@ static DECLARE_WAIT_QUEUE_HEAD(wq);
 static void writeseg_end_io(struct bio *bio, int err)
 {
        const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
-       struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
+       struct bio_vec *bvec;
+       int i;
        struct super_block *sb = bio->bi_private;
        struct logfs_super *super = logfs_super(sb);
-       struct page *page;
 
        BUG_ON(!uptodate); /* FIXME: Retry io or write elsewhere */
        BUG_ON(err);
-       BUG_ON(bio->bi_vcnt == 0);
-       do {
-               page = bvec->bv_page;
-               if (--bvec >= bio->bi_io_vec)
-                       prefetchw(&bvec->bv_page->flags);
-
-               end_page_writeback(page);
-               page_cache_release(page);
-       } while (bvec >= bio->bi_io_vec);
+
+       bio_for_each_segment_all(bvec, bio, i) {
+               end_page_writeback(bvec->bv_page);
+               page_cache_release(bvec->bv_page);
+       }
        bio_put(bio);
        if (atomic_dec_and_test(&super->s_pending_writes))
                wake_up(&wq);
@@ -96,9 +92,9 @@ static int __bdev_writeseg(struct super_block *sb, u64 ofs, pgoff_t index,
                if (i >= max_pages) {
                        /* Block layer cannot split bios :( */
                        bio->bi_vcnt = i;
-                       bio->bi_size = i * PAGE_SIZE;
+                       bio->bi_iter.bi_size = i * PAGE_SIZE;
                        bio->bi_bdev = super->s_bdev;
-                       bio->bi_sector = ofs >> 9;
+                       bio->bi_iter.bi_sector = ofs >> 9;
                        bio->bi_private = sb;
                        bio->bi_end_io = writeseg_end_io;
                        atomic_inc(&super->s_pending_writes);
@@ -123,9 +119,9 @@ static int __bdev_writeseg(struct super_block *sb, u64 ofs, pgoff_t index,
                unlock_page(page);
        }
        bio->bi_vcnt = nr_pages;
-       bio->bi_size = nr_pages * PAGE_SIZE;
+       bio->bi_iter.bi_size = nr_pages * PAGE_SIZE;
        bio->bi_bdev = super->s_bdev;
-       bio->bi_sector = ofs >> 9;
+       bio->bi_iter.bi_sector = ofs >> 9;
        bio->bi_private = sb;
        bio->bi_end_io = writeseg_end_io;
        atomic_inc(&super->s_pending_writes);
@@ -188,9 +184,9 @@ static int do_erase(struct super_block *sb, u64 ofs, pgoff_t index,
                if (i >= max_pages) {
                        /* Block layer cannot split bios :( */
                        bio->bi_vcnt = i;
-                       bio->bi_size = i * PAGE_SIZE;
+                       bio->bi_iter.bi_size = i * PAGE_SIZE;
                        bio->bi_bdev = super->s_bdev;
-                       bio->bi_sector = ofs >> 9;
+                       bio->bi_iter.bi_sector = ofs >> 9;
                        bio->bi_private = sb;
                        bio->bi_end_io = erase_end_io;
                        atomic_inc(&super->s_pending_writes);
@@ -209,9 +205,9 @@ static int do_erase(struct super_block *sb, u64 ofs, pgoff_t index,
                bio->bi_io_vec[i].bv_offset = 0;
        }
        bio->bi_vcnt = nr_pages;
-       bio->bi_size = nr_pages * PAGE_SIZE;
+       bio->bi_iter.bi_size = nr_pages * PAGE_SIZE;
        bio->bi_bdev = super->s_bdev;
-       bio->bi_sector = ofs >> 9;
+       bio->bi_iter.bi_sector = ofs >> 9;
        bio->bi_private = sb;
        bio->bi_end_io = erase_end_io;
        atomic_inc(&super->s_pending_writes);
index 0face1c4d4c6bd4ea33cb60e45b8c7fad8235acf..4979ffa60aaabfd36839adec6feafcb17a876d98 100644 (file)
  */
 static void mpage_end_io(struct bio *bio, int err)
 {
-       const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
-       struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
+       struct bio_vec *bv;
+       int i;
 
-       do {
-               struct page *page = bvec->bv_page;
+       bio_for_each_segment_all(bv, bio, i) {
+               struct page *page = bv->bv_page;
 
-               if (--bvec >= bio->bi_io_vec)
-                       prefetchw(&bvec->bv_page->flags);
                if (bio_data_dir(bio) == READ) {
-                       if (uptodate) {
+                       if (!err) {
                                SetPageUptodate(page);
                        } else {
                                ClearPageUptodate(page);
@@ -60,14 +58,15 @@ static void mpage_end_io(struct bio *bio, int err)
                        }
                        unlock_page(page);
                } else { /* bio_data_dir(bio) == WRITE */
-                       if (!uptodate) {
+                       if (err) {
                                SetPageError(page);
                                if (page->mapping)
                                        set_bit(AS_EIO, &page->mapping->flags);
                        }
                        end_page_writeback(page);
                }
-       } while (bvec >= bio->bi_io_vec);
+       }
+
        bio_put(bio);
 }
 
@@ -94,7 +93,7 @@ mpage_alloc(struct block_device *bdev,
 
        if (bio) {
                bio->bi_bdev = bdev;
-               bio->bi_sector = first_sector;
+               bio->bi_iter.bi_sector = first_sector;
        }
        return bio;
 }
index e242bbf729723d1d45ae0cac7370952167b026cd..56ff823ca82e0979f08355f80aaf75ae4a019393 100644 (file)
@@ -134,8 +134,8 @@ bl_submit_bio(int rw, struct bio *bio)
        if (bio) {
                get_parallel(bio->bi_private);
                dprintk("%s submitting %s bio %u@%llu\n", __func__,
-                       rw == READ ? "read" : "write",
-                       bio->bi_size, (unsigned long long)bio->bi_sector);
+                       rw == READ ? "read" : "write", bio->bi_iter.bi_size,
+                       (unsigned long long)bio->bi_iter.bi_sector);
                submit_bio(rw, bio);
        }
        return NULL;
@@ -156,7 +156,8 @@ static struct bio *bl_alloc_init_bio(int npg, sector_t isect,
        }
 
        if (bio) {
-               bio->bi_sector = isect - be->be_f_offset + be->be_v_offset;
+               bio->bi_iter.bi_sector = isect - be->be_f_offset +
+                       be->be_v_offset;
                bio->bi_bdev = be->be_mdev;
                bio->bi_end_io = end_io;
                bio->bi_private = par;
@@ -201,18 +202,14 @@ static struct bio *bl_add_page_to_bio(struct bio *bio, int npg, int rw,
 static void bl_end_io_read(struct bio *bio, int err)
 {
        struct parallel_io *par = bio->bi_private;
-       const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
-       struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
+       struct bio_vec *bvec;
+       int i;
 
-       do {
-               struct page *page = bvec->bv_page;
+       if (!err)
+               bio_for_each_segment_all(bvec, bio, i)
+                       SetPageUptodate(bvec->bv_page);
 
-               if (--bvec >= bio->bi_io_vec)
-                       prefetchw(&bvec->bv_page->flags);
-               if (uptodate)
-                       SetPageUptodate(page);
-       } while (bvec >= bio->bi_io_vec);
-       if (!uptodate) {
+       if (err) {
                struct nfs_read_data *rdata = par->data;
                struct nfs_pgio_header *header = rdata->header;
 
@@ -383,20 +380,16 @@ static void mark_extents_written(struct pnfs_block_layout *bl,
 static void bl_end_io_write_zero(struct bio *bio, int err)
 {
        struct parallel_io *par = bio->bi_private;
-       const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
-       struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
-
-       do {
-               struct page *page = bvec->bv_page;
+       struct bio_vec *bvec;
+       int i;
 
-               if (--bvec >= bio->bi_io_vec)
-                       prefetchw(&bvec->bv_page->flags);
+       bio_for_each_segment_all(bvec, bio, i) {
                /* This is the zeroing page we added */
-               end_page_writeback(page);
-               page_cache_release(page);
-       } while (bvec >= bio->bi_io_vec);
+               end_page_writeback(bvec->bv_page);
+               page_cache_release(bvec->bv_page);
+       }
 
-       if (unlikely(!uptodate)) {
+       if (unlikely(err)) {
                struct nfs_write_data *data = par->data;
                struct nfs_pgio_header *header = data->header;
 
@@ -519,7 +512,7 @@ bl_do_readpage_sync(struct page *page, struct pnfs_block_extent *be,
        isect = (page->index << PAGE_CACHE_SECTOR_SHIFT) +
                (offset / SECTOR_SIZE);
 
-       bio->bi_sector = isect - be->be_f_offset + be->be_v_offset;
+       bio->bi_iter.bi_sector = isect - be->be_f_offset + be->be_v_offset;
        bio->bi_bdev = be->be_mdev;
        bio->bi_end_io = bl_read_single_end_io;
 
index d2255d7054210ef1daf54396d804bf6a45109ace..aa9bc973f36a31eacbca297c71f2cd9bb5fb81e9 100644 (file)
@@ -924,11 +924,11 @@ static const struct inode_operations nfs3_dir_inode_operations = {
        .permission     = nfs_permission,
        .getattr        = nfs_getattr,
        .setattr        = nfs_setattr,
+#ifdef CONFIG_NFS_V3_ACL
        .listxattr      = generic_listxattr,
        .getxattr       = generic_getxattr,
        .setxattr       = generic_setxattr,
        .removexattr    = generic_removexattr,
-#ifdef CONFIG_NFS_V3_ACL
        .get_acl        = nfs3_get_acl,
        .set_acl        = nfs3_set_acl,
 #endif
@@ -938,11 +938,11 @@ static const struct inode_operations nfs3_file_inode_operations = {
        .permission     = nfs_permission,
        .getattr        = nfs_getattr,
        .setattr        = nfs_setattr,
+#ifdef CONFIG_NFS_V3_ACL
        .listxattr      = generic_listxattr,
        .getxattr       = generic_getxattr,
        .setxattr       = generic_setxattr,
        .removexattr    = generic_removexattr,
-#ifdef CONFIG_NFS_V3_ACL
        .get_acl        = nfs3_get_acl,
        .set_acl        = nfs3_set_acl,
 #endif
index 8b68218e2c1c25c188f7585966dd9f694807e096..a812fd1b92a4593fc744606e5ae15ff128c8e2c7 100644 (file)
@@ -45,7 +45,7 @@ struct svc_rqst;
 
 struct nfs4_acl *nfs4_acl_new(int);
 int nfs4_acl_get_whotype(char *, u32);
-int nfs4_acl_write_who(int who, char *p);
+__be32 nfs4_acl_write_who(int who, __be32 **p, int *len);
 
 int nfsd4_get_nfs4_acl(struct svc_rqst *rqstp, struct dentry *dentry,
                struct nfs4_acl **acl);
index d5c5b3e00266219320720944a17a7e7be6feb811..b582f9ab6b2a9a492a31abc60625ec56ab5d3912 100644 (file)
@@ -84,12 +84,4 @@ int  nfsd_cache_lookup(struct svc_rqst *);
 void   nfsd_cache_update(struct svc_rqst *, int, __be32 *);
 int    nfsd_reply_cache_stats_open(struct inode *, struct file *);
 
-#ifdef CONFIG_NFSD_V4
-void   nfsd4_set_statp(struct svc_rqst *rqstp, __be32 *statp);
-#else  /* CONFIG_NFSD_V4 */
-static inline void nfsd4_set_statp(struct svc_rqst *rqstp, __be32 *statp)
-{
-}
-#endif /* CONFIG_NFSD_V4 */
-
 #endif /* NFSCACHE_H */
index bf95f6b817a49c36ceb3b1be4af4ab7ea9eaf19b..66e58db019369cc95f9881702b4791e274396787 100644 (file)
@@ -56,7 +56,7 @@ static inline void nfsd_idmap_shutdown(struct net *net)
 
 __be32 nfsd_map_name_to_uid(struct svc_rqst *, const char *, size_t, kuid_t *);
 __be32 nfsd_map_name_to_gid(struct svc_rqst *, const char *, size_t, kgid_t *);
-int nfsd_map_uid_to_name(struct svc_rqst *, kuid_t, char *);
-int nfsd_map_gid_to_name(struct svc_rqst *, kgid_t, char *);
+__be32 nfsd4_encode_user(struct svc_rqst *, kuid_t, __be32 **, int *);
+__be32 nfsd4_encode_group(struct svc_rqst *, kgid_t, __be32 **, int *);
 
 #endif /* LINUX_NFSD_IDMAP_H */
index 849a7c3ced22cbacf33c3fa3e33802b43e3fb798..d32b3aa6600da986ab964ec2c30a4493c24eb5cb 100644 (file)
@@ -95,6 +95,7 @@ struct nfsd_net {
        time_t nfsd4_grace;
 
        bool nfsd_net_up;
+       bool lockd_up;
 
        /*
         * Time of server startup
index 14d9ecb96cff0ba476549467fbc321f94ea55ff3..de6e39e12cb3e4655ab2017cda8bea11383335ed 100644 (file)
@@ -168,7 +168,7 @@ encode_fattr3(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp,
              struct kstat *stat)
 {
        *p++ = htonl(nfs3_ftypes[(stat->mode & S_IFMT) >> 12]);
-       *p++ = htonl((u32) stat->mode);
+       *p++ = htonl((u32) (stat->mode & S_IALLUGO));
        *p++ = htonl((u32) stat->nlink);
        *p++ = htonl((u32) from_kuid(&init_user_ns, stat->uid));
        *p++ = htonl((u32) from_kgid(&init_user_ns, stat->gid));
@@ -842,21 +842,21 @@ out:
 
 static __be32 *encode_entryplus_baggage(struct nfsd3_readdirres *cd, __be32 *p, const char *name, int namlen)
 {
-       struct svc_fh   fh;
+       struct svc_fh   *fh = &cd->scratch;
        __be32 err;
 
-       fh_init(&fh, NFS3_FHSIZE);
-       err = compose_entry_fh(cd, &fh, name, namlen);
+       fh_init(fh, NFS3_FHSIZE);
+       err = compose_entry_fh(cd, fh, name, namlen);
        if (err) {
                *p++ = 0;
                *p++ = 0;
                goto out;
        }
-       p = encode_post_op_attr(cd->rqstp, p, &fh);
+       p = encode_post_op_attr(cd->rqstp, p, fh);
        *p++ = xdr_one;                 /* yes, a file handle follows */
-       p = encode_fh(p, &fh);
+       p = encode_fh(p, fh);
 out:
-       fh_put(&fh);
+       fh_put(fh);
        return p;
 }
 
index 649ad7cf22044bb10bd6cb64527cba7295226a97..d3a587144222b56becd0ce82597fb95bc8767592 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/nfs_fs.h>
 #include <linux/export.h>
 #include "nfsfh.h"
+#include "nfsd.h"
 #include "acl.h"
 #include "vfs.h"
 
@@ -916,17 +917,22 @@ nfs4_acl_get_whotype(char *p, u32 len)
        return NFS4_ACL_WHO_NAMED;
 }
 
-int
-nfs4_acl_write_who(int who, char *p)
+__be32 nfs4_acl_write_who(int who, __be32 **p, int *len)
 {
        int i;
+       int bytes;
 
        for (i = 0; i < ARRAY_SIZE(s2t_map); i++) {
-               if (s2t_map[i].type == who) {
-                       memcpy(p, s2t_map[i].string, s2t_map[i].stringlen);
-                       return s2t_map[i].stringlen;
-               }
+               if (s2t_map[i].type != who)
+                       continue;
+               bytes = 4 + (XDR_QUADLEN(s2t_map[i].stringlen) << 2);
+               if (bytes > *len)
+                       return nfserr_resource;
+               *p = xdr_encode_opaque(*p, s2t_map[i].string,
+                                       s2t_map[i].stringlen);
+               *len -= bytes;
+               return 0;
        }
-       BUG();
+       WARN_ON_ONCE(1);
        return -1;
 }
index 4832fd819f884f4a6b436a70369fea4ea9bfcf93..c0dfde68742e463256cd76e7661b439adca849eb 100644 (file)
@@ -551,27 +551,46 @@ idmap_name_to_id(struct svc_rqst *rqstp, int type, const char *name, u32 namelen
        return 0;
 }
 
-static int
-idmap_id_to_name(struct svc_rqst *rqstp, int type, u32 id, char *name)
+static __be32 encode_ascii_id(u32 id, __be32 **p, int *buflen)
+{
+       char buf[11];
+       int len;
+       int bytes;
+
+       len = sprintf(buf, "%u", id);
+       bytes = 4 + (XDR_QUADLEN(len) << 2);
+       if (bytes > *buflen)
+               return nfserr_resource;
+       *p = xdr_encode_opaque(*p, buf, len);
+       *buflen -= bytes;
+       return 0;
+}
+
+static __be32 idmap_id_to_name(struct svc_rqst *rqstp, int type, u32 id, __be32 **p, int *buflen)
 {
        struct ent *item, key = {
                .id = id,
                .type = type,
        };
        int ret;
+       int bytes;
        struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
 
        strlcpy(key.authname, rqst_authname(rqstp), sizeof(key.authname));
        ret = idmap_lookup(rqstp, idtoname_lookup, &key, nn->idtoname_cache, &item);
        if (ret == -ENOENT)
-               return sprintf(name, "%u", id);
+               return encode_ascii_id(id, p, buflen);
        if (ret)
-               return ret;
+               return nfserrno(ret);
        ret = strlen(item->name);
-       BUG_ON(ret > IDMAP_NAMESZ);
-       memcpy(name, item->name, ret);
+       WARN_ON_ONCE(ret > IDMAP_NAMESZ);
+       bytes = 4 + (XDR_QUADLEN(ret) << 2);
+       if (bytes > *buflen)
+               return nfserr_resource;
+       *p = xdr_encode_opaque(*p, item->name, ret);
+       *buflen -= bytes;
        cache_put(&item->h, nn->idtoname_cache);
-       return ret;
+       return 0;
 }
 
 static bool
@@ -603,12 +622,11 @@ do_name_to_id(struct svc_rqst *rqstp, int type, const char *name, u32 namelen, u
        return idmap_name_to_id(rqstp, type, name, namelen, id);
 }
 
-static int
-do_id_to_name(struct svc_rqst *rqstp, int type, u32 id, char *name)
+static __be32 encode_name_from_id(struct svc_rqst *rqstp, int type, u32 id, __be32 **p, int *buflen)
 {
        if (nfs4_disable_idmapping && rqstp->rq_cred.cr_flavor < RPC_AUTH_GSS)
-               return sprintf(name, "%u", id);
-       return idmap_id_to_name(rqstp, type, id, name);
+               return encode_ascii_id(id, p, buflen);
+       return idmap_id_to_name(rqstp, type, id, p, buflen);
 }
 
 __be32
@@ -637,16 +655,14 @@ nfsd_map_name_to_gid(struct svc_rqst *rqstp, const char *name, size_t namelen,
        return status;
 }
 
-int
-nfsd_map_uid_to_name(struct svc_rqst *rqstp, kuid_t uid, char *name)
+__be32 nfsd4_encode_user(struct svc_rqst *rqstp, kuid_t uid,  __be32 **p, int *buflen)
 {
        u32 id = from_kuid(&init_user_ns, uid);
-       return do_id_to_name(rqstp, IDMAP_TYPE_USER, id, name);
+       return encode_name_from_id(rqstp, IDMAP_TYPE_USER, id, p, buflen);
 }
 
-int
-nfsd_map_gid_to_name(struct svc_rqst *rqstp, kgid_t gid, char *name)
+__be32 nfsd4_encode_group(struct svc_rqst *rqstp, kgid_t gid, __be32 **p, int *buflen)
 {
        u32 id = from_kgid(&init_user_ns, gid);
-       return do_id_to_name(rqstp, IDMAP_TYPE_GROUP, id, name);
+       return encode_name_from_id(rqstp, IDMAP_TYPE_GROUP, id, p, buflen);
 }
index 825b8a99b99b42aaf1675518ea979a77b67c1955..82189b208af31700418217e62d8339f13dce6565 100644 (file)
@@ -231,17 +231,16 @@ static void nfsd4_set_open_owner_reply_cache(struct nfsd4_compound_state *cstate
 }
 
 static __be32
-do_open_lookup(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_open *open)
+do_open_lookup(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_open *open, struct svc_fh **resfh)
 {
        struct svc_fh *current_fh = &cstate->current_fh;
-       struct svc_fh *resfh;
        int accmode;
        __be32 status;
 
-       resfh = kmalloc(sizeof(struct svc_fh), GFP_KERNEL);
-       if (!resfh)
+       *resfh = kmalloc(sizeof(struct svc_fh), GFP_KERNEL);
+       if (!*resfh)
                return nfserr_jukebox;
-       fh_init(resfh, NFS4_FHSIZE);
+       fh_init(*resfh, NFS4_FHSIZE);
        open->op_truncate = 0;
 
        if (open->op_create) {
@@ -266,12 +265,12 @@ do_open_lookup(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, stru
                 */
                status = do_nfsd_create(rqstp, current_fh, open->op_fname.data,
                                        open->op_fname.len, &open->op_iattr,
-                                       resfh, open->op_createmode,
+                                       *resfh, open->op_createmode,
                                        (u32 *)open->op_verf.data,
                                        &open->op_truncate, &open->op_created);
 
                if (!status && open->op_label.len)
-                       nfsd4_security_inode_setsecctx(resfh, &open->op_label, open->op_bmval);
+                       nfsd4_security_inode_setsecctx(*resfh, &open->op_label, open->op_bmval);
 
                /*
                 * Following rfc 3530 14.2.16, use the returned bitmask
@@ -281,31 +280,32 @@ do_open_lookup(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, stru
                if (open->op_createmode == NFS4_CREATE_EXCLUSIVE && status == 0)
                        open->op_bmval[1] = (FATTR4_WORD1_TIME_ACCESS |
                                                        FATTR4_WORD1_TIME_MODIFY);
-       } else {
+       } else
+               /*
+                * Note this may exit with the parent still locked.
+                * We will hold the lock until nfsd4_open's final
+                * lookup, to prevent renames or unlinks until we've had
+                * a chance to an acquire a delegation if appropriate.
+                */
                status = nfsd_lookup(rqstp, current_fh,
-                                    open->op_fname.data, open->op_fname.len, resfh);
-               fh_unlock(current_fh);
-       }
+                                    open->op_fname.data, open->op_fname.len, *resfh);
        if (status)
                goto out;
-       status = nfsd_check_obj_isreg(resfh);
+       status = nfsd_check_obj_isreg(*resfh);
        if (status)
                goto out;
 
        if (is_create_with_attrs(open) && open->op_acl != NULL)
-               do_set_nfs4_acl(rqstp, resfh, open->op_acl, open->op_bmval);
+               do_set_nfs4_acl(rqstp, *resfh, open->op_acl, open->op_bmval);
 
-       nfsd4_set_open_owner_reply_cache(cstate, open, resfh);
+       nfsd4_set_open_owner_reply_cache(cstate, open, *resfh);
        accmode = NFSD_MAY_NOP;
        if (open->op_created ||
                        open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR)
                accmode |= NFSD_MAY_OWNER_OVERRIDE;
-       status = do_open_permission(rqstp, resfh, open, accmode);
+       status = do_open_permission(rqstp, *resfh, open, accmode);
        set_change_info(&open->op_cinfo, current_fh);
-       fh_dup2(current_fh, resfh);
 out:
-       fh_put(resfh);
-       kfree(resfh);
        return status;
 }
 
@@ -358,6 +358,7 @@ nfsd4_open(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
           struct nfsd4_open *open)
 {
        __be32 status;
+       struct svc_fh *resfh = NULL;
        struct nfsd4_compoundres *resp;
        struct net *net = SVC_NET(rqstp);
        struct nfsd_net *nn = net_generic(net, nfsd_net_id);
@@ -424,7 +425,7 @@ nfsd4_open(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
        switch (open->op_claim_type) {
                case NFS4_OPEN_CLAIM_DELEGATE_CUR:
                case NFS4_OPEN_CLAIM_NULL:
-                       status = do_open_lookup(rqstp, cstate, open);
+                       status = do_open_lookup(rqstp, cstate, open, &resfh);
                        if (status)
                                goto out;
                        break;
@@ -440,6 +441,7 @@ nfsd4_open(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
                        status = do_open_fhandle(rqstp, cstate, open);
                        if (status)
                                goto out;
+                       resfh = &cstate->current_fh;
                        break;
                case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
                case NFS4_OPEN_CLAIM_DELEGATE_PREV:
@@ -459,9 +461,14 @@ nfsd4_open(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
         * successful, it (1) truncates the file if open->op_truncate was
         * set, (2) sets open->op_stateid, (3) sets open->op_delegation.
         */
-       status = nfsd4_process_open2(rqstp, &cstate->current_fh, open);
+       status = nfsd4_process_open2(rqstp, resfh, open);
        WARN_ON(status && open->op_created);
 out:
+       if (resfh && resfh != &cstate->current_fh) {
+               fh_dup2(&cstate->current_fh, resfh);
+               fh_put(resfh);
+               kfree(resfh);
+       }
        nfsd4_cleanup_open_state(open, status);
        if (open->op_openowner && !nfsd4_has_session(cstate))
                cstate->replay_owner = &open->op_openowner->oo_owner;
@@ -1070,8 +1077,10 @@ _nfsd4_verify(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
                                    cstate->current_fh.fh_dentry, &p,
                                    count, verify->ve_bmval,
                                    rqstp, 0);
-
-       /* this means that nfsd4_encode_fattr() ran out of space */
+       /*
+        * If nfsd4_encode_fattr() ran out of space, assume that's because
+        * the attributes are longer (hence different) than those given:
+        */
        if (status == nfserr_resource)
                status = nfserr_not_same;
        if (status)
@@ -1525,7 +1534,8 @@ static inline u32 nfsd4_write_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
 static inline u32 nfsd4_exchange_id_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
 {
        return (op_encode_hdr_size + 2 + 1 + /* eir_clientid, eir_sequenceid */\
-               1 + 1 + 2 + /* eir_flags, spr_how, spo_must_enforce & _allow */\
+               1 + 1 + /* eir_flags, spr_how */\
+               4 + /* spo_must_enforce & _allow with bitmap */\
                2 + /*eir_server_owner.so_minor_id */\
                /* eir_server_owner.so_major_id<> */\
                XDR_QUADLEN(NFS4_OPAQUE_LIMIT) + 1 +\
@@ -1882,6 +1892,7 @@ struct svc_version        nfsd_version4 = {
                .vs_proc        = nfsd_procedures4,
                .vs_dispatch    = nfsd_dispatch,
                .vs_xdrsize     = NFS4_SVC_XDRSIZE,
+               .vs_rpcb_optnl  = 1,
 };
 
 /*
index 105d6fa7c5149ab496cf614763a4c3145f52c74e..d5d070fbeb35a98f6053ae4299c225f57a8bdd74 100644 (file)
@@ -832,10 +832,11 @@ static void nfsd4_put_drc_mem(struct nfsd4_channel_attrs *ca)
        spin_unlock(&nfsd_drc_lock);
 }
 
-static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *attrs)
+static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *fattrs,
+                                          struct nfsd4_channel_attrs *battrs)
 {
-       int numslots = attrs->maxreqs;
-       int slotsize = slot_bytes(attrs);
+       int numslots = fattrs->maxreqs;
+       int slotsize = slot_bytes(fattrs);
        struct nfsd4_session *new;
        int mem, i;
 
@@ -852,6 +853,10 @@ static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *attrs)
                if (!new->se_slots[i])
                        goto out_free;
        }
+
+       memcpy(&new->se_fchannel, fattrs, sizeof(struct nfsd4_channel_attrs));
+       memcpy(&new->se_bchannel, battrs, sizeof(struct nfsd4_channel_attrs));
+
        return new;
 out_free:
        while (i--)
@@ -997,8 +1002,7 @@ static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, stru
        list_add(&new->se_perclnt, &clp->cl_sessions);
        spin_unlock(&clp->cl_lock);
        spin_unlock(&nn->client_lock);
-       memcpy(&new->se_fchannel, &cses->fore_channel,
-                       sizeof(struct nfsd4_channel_attrs));
+
        if (cses->flags & SESSION4_BACK_CHAN) {
                struct sockaddr *sa = svc_addr(rqstp);
                /*
@@ -1851,6 +1855,11 @@ static __be32 check_forechannel_attrs(struct nfsd4_channel_attrs *ca, struct nfs
        return nfs_ok;
 }
 
+#define NFSD_CB_MAX_REQ_SZ     ((NFS4_enc_cb_recall_sz + \
+                                RPC_MAX_HEADER_WITH_AUTH) * sizeof(__be32))
+#define NFSD_CB_MAX_RESP_SZ    ((NFS4_dec_cb_recall_sz + \
+                                RPC_MAX_REPHEADER_WITH_AUTH) * sizeof(__be32))
+
 static __be32 check_backchannel_attrs(struct nfsd4_channel_attrs *ca)
 {
        ca->headerpadsz = 0;
@@ -1861,9 +1870,9 @@ static __be32 check_backchannel_attrs(struct nfsd4_channel_attrs *ca)
         * less than 1k.  Tighten up this estimate in the unlikely event
         * it turns out to be a problem for some client:
         */
-       if (ca->maxreq_sz < NFS4_enc_cb_recall_sz + RPC_MAX_HEADER_WITH_AUTH)
+       if (ca->maxreq_sz < NFSD_CB_MAX_REQ_SZ)
                return nfserr_toosmall;
-       if (ca->maxresp_sz < NFS4_dec_cb_recall_sz + RPC_MAX_REPHEADER_WITH_AUTH)
+       if (ca->maxresp_sz < NFSD_CB_MAX_RESP_SZ)
                return nfserr_toosmall;
        ca->maxresp_cached = 0;
        if (ca->maxops < 2)
@@ -1913,9 +1922,9 @@ nfsd4_create_session(struct svc_rqst *rqstp,
                return status;
        status = check_backchannel_attrs(&cr_ses->back_channel);
        if (status)
-               return status;
+               goto out_release_drc_mem;
        status = nfserr_jukebox;
-       new = alloc_session(&cr_ses->fore_channel);
+       new = alloc_session(&cr_ses->fore_channel, &cr_ses->back_channel);
        if (!new)
                goto out_release_drc_mem;
        conn = alloc_conn_from_crses(rqstp, cr_ses);
@@ -3034,18 +3043,18 @@ static int nfs4_setlease(struct nfs4_delegation *dp)
        if (!fl)
                return -ENOMEM;
        fl->fl_file = find_readable_file(fp);
-       list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations);
        status = vfs_setlease(fl->fl_file, fl->fl_type, &fl);
-       if (status) {
-               list_del_init(&dp->dl_perclnt);
-               locks_free_lock(fl);
-               return status;
-       }
+       if (status)
+               goto out_free;
+       list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations);
        fp->fi_lease = fl;
        fp->fi_deleg_file = get_file(fl->fl_file);
        atomic_set(&fp->fi_delegees, 1);
        list_add(&dp->dl_perfile, &fp->fi_delegations);
        return 0;
+out_free:
+       locks_free_lock(fl);
+       return status;
 }
 
 static int nfs4_set_delegation(struct nfs4_delegation *dp, struct nfs4_file *fp)
@@ -3125,6 +3134,7 @@ nfs4_open_delegation(struct net *net, struct svc_fh *fh,
                                goto out_no_deleg;
                        break;
                case NFS4_OPEN_CLAIM_NULL:
+               case NFS4_OPEN_CLAIM_FH:
                        /*
                         * Let's not give out any delegations till everyone's
                         * had the chance to reclaim theirs....
index ee7237f99f54cd413dba6375dbc344084d0ece56..63f2395c57ed72bcb55cab62e1234d3c27bea0be 100644 (file)
@@ -103,11 +103,6 @@ xdr_error:                                 \
        (x) = (u64)ntohl(*p++) << 32;           \
        (x) |= ntohl(*p++);                     \
 } while (0)
-#define READTIME(x)       do {                 \
-       p++;                                    \
-       (x) = ntohl(*p++);                      \
-       p++;                                    \
-} while (0)
 #define READMEM(x,nbytes) do {                 \
        x = (char *)p;                          \
        p += XDR_QUADLEN(nbytes);               \
@@ -190,6 +185,15 @@ static int zero_clientid(clientid_t *clid)
        return (clid->cl_boot == 0) && (clid->cl_id == 0);
 }
 
+/**
+ * defer_free - mark an allocation as deferred freed
+ * @argp: NFSv4 compound argument structure to be freed with
+ * @release: release callback to free @p, typically kfree()
+ * @p: pointer to be freed
+ *
+ * Marks @p to be freed when processing the compound operation
+ * described in @argp finishes.
+ */
 static int
 defer_free(struct nfsd4_compoundargs *argp,
                void (*release)(const void *), void *p)
@@ -206,6 +210,16 @@ defer_free(struct nfsd4_compoundargs *argp,
        return 0;
 }
 
+/**
+ * savemem - duplicate a chunk of memory for later processing
+ * @argp: NFSv4 compound argument structure to be freed with
+ * @p: pointer to be duplicated
+ * @nbytes: length to be duplicated
+ *
+ * Returns a pointer to a copy of @nbytes bytes of memory at @p
+ * that are preserved until processing of the NFSv4 compound
+ * operation described by @argp finishes.
+ */
 static char *savemem(struct nfsd4_compoundargs *argp, __be32 *p, int nbytes)
 {
        if (p == argp->tmp) {
@@ -257,7 +271,6 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval,
        int expected_len, len = 0;
        u32 dummy32;
        char *buf;
-       int host_err;
 
        DECODE_HEAD;
        iattr->ia_valid = 0;
@@ -284,10 +297,9 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval,
                        return nfserr_resource;
 
                *acl = nfs4_acl_new(nace);
-               if (*acl == NULL) {
-                       host_err = -ENOMEM;
-                       goto out_nfserr;
-               }
+               if (*acl == NULL)
+                       return nfserr_jukebox;
+
                defer_free(argp, kfree, *acl);
 
                (*acl)->naces = nace;
@@ -425,10 +437,6 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval,
                goto xdr_error;
 
        DECODE_TAIL;
-
-out_nfserr:
-       status = nfserrno(host_err);
-       goto out;
 }
 
 static __be32
@@ -1957,56 +1965,16 @@ static u32 nfs4_file_type(umode_t mode)
        };
 }
 
-static __be32
-nfsd4_encode_name(struct svc_rqst *rqstp, int whotype, kuid_t uid, kgid_t gid,
-                       __be32 **p, int *buflen)
-{
-       int status;
-
-       if (*buflen < (XDR_QUADLEN(IDMAP_NAMESZ) << 2) + 4)
-               return nfserr_resource;
-       if (whotype != NFS4_ACL_WHO_NAMED)
-               status = nfs4_acl_write_who(whotype, (u8 *)(*p + 1));
-       else if (gid_valid(gid))
-               status = nfsd_map_gid_to_name(rqstp, gid, (u8 *)(*p + 1));
-       else
-               status = nfsd_map_uid_to_name(rqstp, uid, (u8 *)(*p + 1));
-       if (status < 0)
-               return nfserrno(status);
-       *p = xdr_encode_opaque(*p, NULL, status);
-       *buflen -= (XDR_QUADLEN(status) << 2) + 4;
-       BUG_ON(*buflen < 0);
-       return 0;
-}
-
-static inline __be32
-nfsd4_encode_user(struct svc_rqst *rqstp, kuid_t user, __be32 **p, int *buflen)
-{
-       return nfsd4_encode_name(rqstp, NFS4_ACL_WHO_NAMED, user, INVALID_GID,
-                                p, buflen);
-}
-
-static inline __be32
-nfsd4_encode_group(struct svc_rqst *rqstp, kgid_t group, __be32 **p, int *buflen)
-{
-       return nfsd4_encode_name(rqstp, NFS4_ACL_WHO_NAMED, INVALID_UID, group,
-                                p, buflen);
-}
-
 static inline __be32
 nfsd4_encode_aclname(struct svc_rqst *rqstp, struct nfs4_ace *ace,
                __be32 **p, int *buflen)
 {
-       kuid_t uid = INVALID_UID;
-       kgid_t gid = INVALID_GID;
-
-       if (ace->whotype == NFS4_ACL_WHO_NAMED) {
-               if (ace->flag & NFS4_ACE_IDENTIFIER_GROUP)
-                       gid = ace->who_gid;
-               else
-                       uid = ace->who_uid;
-       }
-       return nfsd4_encode_name(rqstp, ace->whotype, uid, gid, p, buflen);
+       if (ace->whotype != NFS4_ACL_WHO_NAMED)
+               return nfs4_acl_write_who(ace->whotype, p, buflen);
+       else if (ace->flag & NFS4_ACE_IDENTIFIER_GROUP)
+               return nfsd4_encode_group(rqstp, ace->who_gid, p, buflen);
+       else
+               return nfsd4_encode_user(rqstp, ace->who_uid, p, buflen);
 }
 
 #define WORD0_ABSENT_FS_ATTRS (FATTR4_WORD0_FS_LOCATIONS | FATTR4_WORD0_FSID | \
@@ -2090,7 +2058,7 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
        u32 bmval1 = bmval[1];
        u32 bmval2 = bmval[2];
        struct kstat stat;
-       struct svc_fh tempfh;
+       struct svc_fh *tempfh = NULL;
        struct kstatfs statfs;
        int buflen = count << 2;
        __be32 *attrlenp;
@@ -2137,11 +2105,15 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
                        goto out_nfserr;
        }
        if ((bmval0 & (FATTR4_WORD0_FILEHANDLE | FATTR4_WORD0_FSID)) && !fhp) {
-               fh_init(&tempfh, NFS4_FHSIZE);
-               status = fh_compose(&tempfh, exp, dentry, NULL);
+               tempfh = kmalloc(sizeof(struct svc_fh), GFP_KERNEL);
+               status = nfserr_jukebox;
+               if (!tempfh)
+                       goto out;
+               fh_init(tempfh, NFS4_FHSIZE);
+               status = fh_compose(tempfh, exp, dentry, NULL);
                if (status)
                        goto out;
-               fhp = &tempfh;
+               fhp = tempfh;
        }
        if (bmval0 & (FATTR4_WORD0_ACL | FATTR4_WORD0_ACLSUPPORT
                        | FATTR4_WORD0_SUPPORTED_ATTRS)) {
@@ -2222,8 +2194,10 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
                if ((buflen -= 4) < 0)
                        goto out_resource;
                dummy = nfs4_file_type(stat.mode);
-               if (dummy == NF4BAD)
-                       goto out_serverfault;
+               if (dummy == NF4BAD) {
+                       status = nfserr_serverfault;
+                       goto out;
+               }
                WRITE32(dummy);
        }
        if (bmval0 & FATTR4_WORD0_FH_EXPIRE_TYPE) {
@@ -2317,8 +2291,6 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
                        WRITE32(ace->flag);
                        WRITE32(ace->access_mask & NFS4_ACE_MASK_ALL);
                        status = nfsd4_encode_aclname(rqstp, ace, &p, &buflen);
-                       if (status == nfserr_resource)
-                               goto out_resource;
                        if (status)
                                goto out;
                }
@@ -2379,8 +2351,6 @@ out_acl:
        }
        if (bmval0 & FATTR4_WORD0_FS_LOCATIONS) {
                status = nfsd4_encode_fs_locations(rqstp, exp, &p, &buflen);
-               if (status == nfserr_resource)
-                       goto out_resource;
                if (status)
                        goto out;
        }
@@ -2431,15 +2401,11 @@ out_acl:
        }
        if (bmval1 & FATTR4_WORD1_OWNER) {
                status = nfsd4_encode_user(rqstp, stat.uid, &p, &buflen);
-               if (status == nfserr_resource)
-                       goto out_resource;
                if (status)
                        goto out;
        }
        if (bmval1 & FATTR4_WORD1_OWNER_GROUP) {
                status = nfsd4_encode_group(rqstp, stat.gid, &p, &buflen);
-               if (status == nfserr_resource)
-                       goto out_resource;
                if (status)
                        goto out;
        }
@@ -2533,8 +2499,8 @@ out:
                security_release_secctx(context, contextlen);
 #endif /* CONFIG_NFSD_V4_SECURITY_LABEL */
        kfree(acl);
-       if (fhp == &tempfh)
-               fh_put(&tempfh);
+       if (tempfh)
+               fh_put(tempfh);
        return status;
 out_nfserr:
        status = nfserrno(err);
@@ -2542,9 +2508,6 @@ out_nfserr:
 out_resource:
        status = nfserr_resource;
        goto out;
-out_serverfault:
-       status = nfserr_serverfault;
-       goto out;
 }
 
 static inline int attributes_need_mount(u32 *bmval)
@@ -2621,17 +2584,14 @@ out_put:
 static __be32 *
 nfsd4_encode_rdattr_error(__be32 *p, int buflen, __be32 nfserr)
 {
-       __be32 *attrlenp;
-
        if (buflen < 6)
                return NULL;
        *p++ = htonl(2);
        *p++ = htonl(FATTR4_WORD0_RDATTR_ERROR); /* bmval0 */
        *p++ = htonl(0);                         /* bmval1 */
 
-       attrlenp = p++;
+       *p++ = htonl(4);     /* attribute length */
        *p++ = nfserr;       /* no htonl */
-       *attrlenp = htonl((char *)p - (char *)attrlenp - 4);
        return p;
 }
 
@@ -3244,7 +3204,7 @@ nfsd4_do_encode_secinfo(struct nfsd4_compoundres *resp,
 
                if (rpcauth_get_gssinfo(pf, &info) == 0) {
                        supported++;
-                       RESERVE_SPACE(4 + 4 + info.oid.len + 4 + 4);
+                       RESERVE_SPACE(4 + 4 + XDR_LEN(info.oid.len) + 4 + 4);
                        WRITE32(RPC_AUTH_GSS);
                        WRITE32(info.oid.len);
                        WRITEMEM(info.oid.data, info.oid.len);
@@ -3379,35 +3339,43 @@ nfsd4_encode_exchange_id(struct nfsd4_compoundres *resp, __be32 nfserr,
                8 /* eir_clientid */ +
                4 /* eir_sequenceid */ +
                4 /* eir_flags */ +
-               4 /* spr_how */ +
-               8 /* spo_must_enforce, spo_must_allow */ +
-               8 /* so_minor_id */ +
-               4 /* so_major_id.len */ +
-               (XDR_QUADLEN(major_id_sz) * 4) +
-               4 /* eir_server_scope.len */ +
-               (XDR_QUADLEN(server_scope_sz) * 4) +
-               4 /* eir_server_impl_id.count (0) */);
+               4 /* spr_how */);
 
        WRITEMEM(&exid->clientid, 8);
        WRITE32(exid->seqid);
        WRITE32(exid->flags);
 
        WRITE32(exid->spa_how);
+       ADJUST_ARGS();
+
        switch (exid->spa_how) {
        case SP4_NONE:
                break;
        case SP4_MACH_CRED:
+               /* spo_must_enforce, spo_must_allow */
+               RESERVE_SPACE(16);
+
                /* spo_must_enforce bitmap: */
                WRITE32(2);
                WRITE32(nfs4_minimal_spo_must_enforce[0]);
                WRITE32(nfs4_minimal_spo_must_enforce[1]);
                /* empty spo_must_allow bitmap: */
                WRITE32(0);
+
+               ADJUST_ARGS();
                break;
        default:
                WARN_ON_ONCE(1);
        }
 
+       RESERVE_SPACE(
+               8 /* so_minor_id */ +
+               4 /* so_major_id.len */ +
+               (XDR_QUADLEN(major_id_sz) * 4) +
+               4 /* eir_server_scope.len */ +
+               (XDR_QUADLEN(server_scope_sz) * 4) +
+               4 /* eir_server_impl_id.count (0) */);
+
        /* The server_owner struct */
        WRITE64(minor_id);      /* Minor id */
        /* major id */
@@ -3473,28 +3441,6 @@ nfsd4_encode_create_session(struct nfsd4_compoundres *resp, __be32 nfserr,
        return 0;
 }
 
-static __be32
-nfsd4_encode_destroy_session(struct nfsd4_compoundres *resp, __be32 nfserr,
-                            struct nfsd4_destroy_session *destroy_session)
-{
-       return nfserr;
-}
-
-static __be32
-nfsd4_encode_free_stateid(struct nfsd4_compoundres *resp, __be32 nfserr,
-                         struct nfsd4_free_stateid *free_stateid)
-{
-       __be32 *p;
-
-       if (nfserr)
-               return nfserr;
-
-       RESERVE_SPACE(4);
-       *p++ = nfserr;
-       ADJUST_ARGS();
-       return nfserr;
-}
-
 static __be32
 nfsd4_encode_sequence(struct nfsd4_compoundres *resp, __be32 nfserr,
                      struct nfsd4_sequence *seq)
@@ -3593,8 +3539,8 @@ static nfsd4_enc nfsd4_enc_ops[] = {
        [OP_BIND_CONN_TO_SESSION] = (nfsd4_enc)nfsd4_encode_bind_conn_to_session,
        [OP_EXCHANGE_ID]        = (nfsd4_enc)nfsd4_encode_exchange_id,
        [OP_CREATE_SESSION]     = (nfsd4_enc)nfsd4_encode_create_session,
-       [OP_DESTROY_SESSION]    = (nfsd4_enc)nfsd4_encode_destroy_session,
-       [OP_FREE_STATEID]       = (nfsd4_enc)nfsd4_encode_free_stateid,
+       [OP_DESTROY_SESSION]    = (nfsd4_enc)nfsd4_encode_noop,
+       [OP_FREE_STATEID]       = (nfsd4_enc)nfsd4_encode_noop,
        [OP_GET_DIR_DELEGATION] = (nfsd4_enc)nfsd4_encode_noop,
        [OP_GETDEVICEINFO]      = (nfsd4_enc)nfsd4_encode_noop,
        [OP_GETDEVICELIST]      = (nfsd4_enc)nfsd4_encode_noop,
index b6af150c96b8cdf616950c1bd0f4f8403eeae5c8..f8f060ffbf4f173888db46dc0c000ce8c1ea34c6 100644 (file)
@@ -131,13 +131,6 @@ nfsd_reply_cache_alloc(void)
        return rp;
 }
 
-static void
-nfsd_reply_cache_unhash(struct svc_cacherep *rp)
-{
-       hlist_del_init(&rp->c_hash);
-       list_del_init(&rp->c_lru);
-}
-
 static void
 nfsd_reply_cache_free_locked(struct svc_cacherep *rp)
 {
@@ -416,22 +409,8 @@ nfsd_cache_lookup(struct svc_rqst *rqstp)
 
        /*
         * Since the common case is a cache miss followed by an insert,
-        * preallocate an entry. First, try to reuse the first entry on the LRU
-        * if it works, then go ahead and prune the LRU list.
+        * preallocate an entry.
         */
-       spin_lock(&cache_lock);
-       if (!list_empty(&lru_head)) {
-               rp = list_first_entry(&lru_head, struct svc_cacherep, c_lru);
-               if (nfsd_cache_entry_expired(rp) ||
-                   num_drc_entries >= max_drc_entries) {
-                       nfsd_reply_cache_unhash(rp);
-                       prune_cache_entries();
-                       goto search_cache;
-               }
-       }
-
-       /* No expired ones available, allocate a new one. */
-       spin_unlock(&cache_lock);
        rp = nfsd_reply_cache_alloc();
        spin_lock(&cache_lock);
        if (likely(rp)) {
@@ -439,7 +418,9 @@ nfsd_cache_lookup(struct svc_rqst *rqstp)
                drc_mem_usage += sizeof(*rp);
        }
 
-search_cache:
+       /* go ahead and prune the cache */
+       prune_cache_entries();
+
        found = nfsd_cache_search(rqstp, csum);
        if (found) {
                if (likely(rp))
@@ -453,15 +434,6 @@ search_cache:
                goto out;
        }
 
-       /*
-        * We're keeping the one we just allocated. Are we now over the
-        * limit? Prune one off the tip of the LRU in trade for the one we
-        * just allocated if so.
-        */
-       if (num_drc_entries >= max_drc_entries)
-               nfsd_reply_cache_free_locked(list_first_entry(&lru_head,
-                                               struct svc_cacherep, c_lru));
-
        nfsdstats.rcmisses++;
        rqstp->rq_cacherep = rp;
        rp->c_state = RC_INPROG;
index 760c85a6f534a45b0eb396a62acc305484163109..9a4a5f9e7468748f7195ac92f86d7317c83e0822 100644 (file)
@@ -241,6 +241,15 @@ static void nfsd_shutdown_generic(void)
        nfsd_racache_shutdown();
 }
 
+static bool nfsd_needs_lockd(void)
+{
+#if defined(CONFIG_NFSD_V3)
+       return (nfsd_versions[2] != NULL) || (nfsd_versions[3] != NULL);
+#else
+       return (nfsd_versions[2] != NULL);
+#endif
+}
+
 static int nfsd_startup_net(int nrservs, struct net *net)
 {
        struct nfsd_net *nn = net_generic(net, nfsd_net_id);
@@ -255,9 +264,14 @@ static int nfsd_startup_net(int nrservs, struct net *net)
        ret = nfsd_init_socks(net);
        if (ret)
                goto out_socks;
-       ret = lockd_up(net);
-       if (ret)
-               goto out_socks;
+
+       if (nfsd_needs_lockd() && !nn->lockd_up) {
+               ret = lockd_up(net);
+               if (ret)
+                       goto out_socks;
+               nn->lockd_up = 1;
+       }
+
        ret = nfs4_state_start_net(net);
        if (ret)
                goto out_lockd;
@@ -266,7 +280,10 @@ static int nfsd_startup_net(int nrservs, struct net *net)
        return 0;
 
 out_lockd:
-       lockd_down(net);
+       if (nn->lockd_up) {
+               lockd_down(net);
+               nn->lockd_up = 0;
+       }
 out_socks:
        nfsd_shutdown_generic();
        return ret;
@@ -277,7 +294,10 @@ static void nfsd_shutdown_net(struct net *net)
        struct nfsd_net *nn = net_generic(net, nfsd_net_id);
 
        nfs4_state_shutdown_net(net);
-       lockd_down(net);
+       if (nn->lockd_up) {
+               lockd_down(net);
+               nn->lockd_up = 0;
+       }
        nn->nfsd_net_up = false;
        nfsd_shutdown_generic();
 }
index 9c769a47ac5ab7efc9a2b939305ffbad45ed988f..b17d93214d0153b426282b1f23e3414768fb6ca0 100644 (file)
@@ -152,7 +152,7 @@ encode_fattr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp,
        type = (stat->mode & S_IFMT);
 
        *p++ = htonl(nfs_ftypes[type >> 12]);
-       *p++ = htonl((u32) stat->mode);
+       *p++ = htonl((u32) (stat->mode & S_IALLUGO));
        *p++ = htonl((u32) stat->nlink);
        *p++ = htonl((u32) from_kuid(&init_user_ns, stat->uid));
        *p++ = htonl((u32) from_kgid(&init_user_ns, stat->gid));
index 1426eb66c8c699cf4104bde226986dda77ccc4c8..017d3cb5e99b4391027fe37b7c56c23966754e2d 100644 (file)
@@ -207,7 +207,12 @@ nfsd_lookup_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp,
                                goto out_nfserr;
                }
        } else {
-               fh_lock(fhp);
+               /*
+                * In the nfsd4_open() case, this may be held across
+                * subsequent open and delegation acquisition which may
+                * need to take the child's i_mutex:
+                */
+               fh_lock_nested(fhp, I_MUTEX_PARENT);
                dentry = lookup_one_len(name, dparent, len);
                host_err = PTR_ERR(dentry);
                if (IS_ERR(dentry))
@@ -273,13 +278,6 @@ out:
        return err;
 }
 
-static int nfsd_break_lease(struct inode *inode)
-{
-       if (!S_ISREG(inode->i_mode))
-               return 0;
-       return break_lease(inode, O_WRONLY | O_NONBLOCK);
-}
-
 /*
  * Commit metadata changes to stable storage.
  */
@@ -348,8 +346,7 @@ nfsd_sanitize_attrs(struct inode *inode, struct iattr *iap)
 
        /* Revoke setuid/setgid on chown */
        if (!S_ISDIR(inode->i_mode) &&
-           (((iap->ia_valid & ATTR_UID) && !uid_eq(iap->ia_uid, inode->i_uid)) ||
-            ((iap->ia_valid & ATTR_GID) && !gid_eq(iap->ia_gid, inode->i_gid)))) {
+           ((iap->ia_valid & ATTR_UID) || (iap->ia_valid & ATTR_GID))) {
                iap->ia_valid |= ATTR_KILL_PRIV;
                if (iap->ia_valid & ATTR_MODE) {
                        /* we're setting mode too, just clear the s*id bits */
@@ -449,16 +446,10 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
                goto out_put_write_access;
        }
 
-       host_err = nfsd_break_lease(inode);
-       if (host_err)
-               goto out_put_write_access_nfserror;
-
        fh_lock(fhp);
        host_err = notify_change(dentry, iap, NULL);
        fh_unlock(fhp);
 
-out_put_write_access_nfserror:
-       err = nfserrno(host_err);
 out_put_write_access:
        if (size_change)
                put_write_access(inode);
@@ -1609,11 +1600,6 @@ nfsd_link(struct svc_rqst *rqstp, struct svc_fh *ffhp,
        err = nfserr_noent;
        if (!dold->d_inode)
                goto out_dput;
-       host_err = nfsd_break_lease(dold->d_inode);
-       if (host_err) {
-               err = nfserrno(host_err);
-               goto out_dput;
-       }
        host_err = vfs_link(dold, dirp, dnew, NULL);
        if (!host_err) {
                err = nfserrno(commit_metadata(ffhp));
@@ -1707,14 +1693,6 @@ nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen,
        if (ffhp->fh_export->ex_path.dentry != tfhp->fh_export->ex_path.dentry)
                goto out_dput_new;
 
-       host_err = nfsd_break_lease(odentry->d_inode);
-       if (host_err)
-               goto out_dput_new;
-       if (ndentry->d_inode) {
-               host_err = nfsd_break_lease(ndentry->d_inode);
-               if (host_err)
-                       goto out_dput_new;
-       }
        host_err = vfs_rename(fdir, odentry, tdir, ndentry, NULL);
        if (!host_err) {
                host_err = commit_metadata(tfhp);
@@ -1784,16 +1762,12 @@ nfsd_unlink(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
        if (!type)
                type = rdentry->d_inode->i_mode & S_IFMT;
 
-       host_err = nfsd_break_lease(rdentry->d_inode);
-       if (host_err)
-               goto out_put;
        if (type != S_IFDIR)
                host_err = vfs_unlink(dirp, rdentry, NULL);
        else
                host_err = vfs_rmdir(dirp, rdentry);
        if (!host_err)
                host_err = commit_metadata(fhp);
-out_put:
        dput(rdentry);
 
 out_nfserr:
index 1bc1d440a1a5677899a7d85d6b5926bb2f3f7900..fbe90bdb2214e976fa49b50ee8903d25089c2177 100644 (file)
@@ -86,8 +86,6 @@ __be32                nfsd_link(struct svc_rqst *, struct svc_fh *,
 __be32         nfsd_rename(struct svc_rqst *,
                                struct svc_fh *, char *, int,
                                struct svc_fh *, char *, int);
-__be32         nfsd_remove(struct svc_rqst *,
-                               struct svc_fh *, char *, int);
 __be32         nfsd_unlink(struct svc_rqst *, struct svc_fh *, int type,
                                char *name, int len);
 __be32         nfsd_readdir(struct svc_rqst *, struct svc_fh *,
index b6d5542a4ac8e185e48d9d7c64fab36f5068dd88..335e04aaf7db18842fb63923feebfb31e26e4353 100644 (file)
@@ -174,6 +174,9 @@ struct nfsd3_linkres {
 struct nfsd3_readdirres {
        __be32                  status;
        struct svc_fh           fh;
+       /* Just to save kmalloc on every readdirplus entry (svc_fh is a
+        * little large for the stack): */
+       struct svc_fh           scratch;
        int                     count;
        __be32                  verf[2];
 
index b3ed6446ed8e9a420fedfc2ccbe861c5b46664ed..d278a0d034968d3bdb57b7f942048af7e2ed6712 100644 (file)
@@ -228,7 +228,7 @@ struct nfsd4_open {
        u32             op_create;          /* request */
        u32             op_createmode;      /* request */
        u32             op_bmval[3];        /* request */
-       struct iattr    iattr;              /* UNCHECKED4, GUARDED4, EXCLUSIVE4_1 */
+       struct iattr    op_iattr;           /* UNCHECKED4, GUARDED4, EXCLUSIVE4_1 */
        nfs4_verifier   op_verf __attribute__((aligned(32)));
                                            /* EXCLUSIVE4 */
        clientid_t      op_clientid;        /* request */
@@ -250,7 +250,6 @@ struct nfsd4_open {
        struct nfs4_acl *op_acl;
        struct xdr_netobj op_label;
 };
-#define op_iattr       iattr
 
 struct nfsd4_open_confirm {
        stateid_t       oc_req_stateid          /* request */;
@@ -374,7 +373,6 @@ struct nfsd4_test_stateid {
 
 struct nfsd4_free_stateid {
        stateid_t       fr_stateid;         /* request */
-       __be32          fr_status;          /* response */
 };
 
 /* also used for NVERIFY */
index 2d8be51f90dc9257bf74cad77b719d17f781c739..dc3a9efdaab87751e47edcf9ef3a807fed4573db 100644 (file)
@@ -416,7 +416,8 @@ static struct bio *nilfs_alloc_seg_bio(struct the_nilfs *nilfs, sector_t start,
        }
        if (likely(bio)) {
                bio->bi_bdev = nilfs->ns_bdev;
-               bio->bi_sector = start << (nilfs->ns_blocksize_bits - 9);
+               bio->bi_iter.bi_sector =
+                       start << (nilfs->ns_blocksize_bits - 9);
        }
        return bio;
 }
index 58772623f02a90e7e7c4355921ba0c8457024175..0e792f5e3147c3cfcf38a980716075ea509b0e46 100644 (file)
@@ -16,12 +16,6 @@ static bool should_merge(struct fsnotify_event *old_fsn,
 {
        struct fanotify_event_info *old, *new;
 
-#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
-       /* dont merge two permission events */
-       if ((old_fsn->mask & FAN_ALL_PERM_EVENTS) &&
-           (new_fsn->mask & FAN_ALL_PERM_EVENTS))
-               return false;
-#endif
        pr_debug("%s: old=%p new=%p\n", __func__, old_fsn, new_fsn);
        old = FANOTIFY_E(old_fsn);
        new = FANOTIFY_E(new_fsn);
@@ -34,14 +28,23 @@ static bool should_merge(struct fsnotify_event *old_fsn,
 }
 
 /* and the list better be locked by something too! */
-static struct fsnotify_event *fanotify_merge(struct list_head *list,
-                                            struct fsnotify_event *event)
+static int fanotify_merge(struct list_head *list, struct fsnotify_event *event)
 {
        struct fsnotify_event *test_event;
        bool do_merge = false;
 
        pr_debug("%s: list=%p event=%p\n", __func__, list, event);
 
+#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
+       /*
+        * Don't merge a permission event with any other event so that we know
+        * the event structure we have created in fanotify_handle_event() is the
+        * one we should check for permission response.
+        */
+       if (event->mask & FAN_ALL_PERM_EVENTS)
+               return 0;
+#endif
+
        list_for_each_entry_reverse(test_event, list, list) {
                if (should_merge(test_event, event)) {
                        do_merge = true;
@@ -50,10 +53,10 @@ static struct fsnotify_event *fanotify_merge(struct list_head *list,
        }
 
        if (!do_merge)
-               return NULL;
+               return 0;
 
        test_event->mask |= event->mask;
-       return test_event;
+       return 1;
 }
 
 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
@@ -149,7 +152,6 @@ static int fanotify_handle_event(struct fsnotify_group *group,
        int ret = 0;
        struct fanotify_event_info *event;
        struct fsnotify_event *fsn_event;
-       struct fsnotify_event *notify_fsn_event;
 
        BUILD_BUG_ON(FAN_ACCESS != FS_ACCESS);
        BUILD_BUG_ON(FAN_MODIFY != FS_MODIFY);
@@ -188,21 +190,19 @@ static int fanotify_handle_event(struct fsnotify_group *group,
        event->response = 0;
 #endif
 
-       notify_fsn_event = fsnotify_add_notify_event(group, fsn_event,
-                                                    fanotify_merge);
-       if (notify_fsn_event) {
+       ret = fsnotify_add_notify_event(group, fsn_event, fanotify_merge);
+       if (ret) {
+               BUG_ON(mask & FAN_ALL_PERM_EVENTS);
                /* Our event wasn't used in the end. Free it. */
                fsnotify_destroy_event(group, fsn_event);
-               if (IS_ERR(notify_fsn_event))
-                       return PTR_ERR(notify_fsn_event);
-               /* We need to ask about a different events after a merge... */
-               event = FANOTIFY_E(notify_fsn_event);
-               fsn_event = notify_fsn_event;
+               ret = 0;
        }
 
 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
-       if (fsn_event->mask & FAN_ALL_PERM_EVENTS)
+       if (mask & FAN_ALL_PERM_EVENTS) {
                ret = fanotify_get_response_from_access(group, event);
+               fsnotify_destroy_event(group, fsn_event);
+       }
 #endif
        return ret;
 }
index 0e90174a116a2c337f77af9e11760f1c21e00e43..32a2f034fb94b5915d1e8362ecbac21f362653fb 100644 (file)
@@ -4,6 +4,13 @@
 
 extern struct kmem_cache *fanotify_event_cachep;
 
+/*
+ * Lifetime of the structure differs for normal and permission events. In both
+ * cases the structure is allocated in fanotify_handle_event(). For normal
+ * events the structure is freed immediately after reporting it to userspace.
+ * For permission events we free it only after we receive response from
+ * userspace.
+ */
 struct fanotify_event_info {
        struct fsnotify_event fse;
        /*
index 1fd66abe574003cce8766e4533c1c42b27de4aff..b6175fa11bf856809d1ee6a43ee9b861980e9160 100644 (file)
@@ -319,7 +319,12 @@ static ssize_t fanotify_read(struct file *file, char __user *buf,
                        if (IS_ERR(kevent))
                                break;
                        ret = copy_event_to_user(group, kevent, buf);
-                       fsnotify_destroy_event(group, kevent);
+                       /*
+                        * Permission events get destroyed after we
+                        * receive response
+                        */
+                       if (!(kevent->mask & FAN_ALL_PERM_EVENTS))
+                               fsnotify_destroy_event(group, kevent);
                        if (ret < 0)
                                break;
                        buf += ret;
index aad1a35e9af117fdc397cca897ba6f192f2de7a0..d5ee56348bb803fd0ddff46d4f3da7d0fa7016d1 100644 (file)
@@ -53,15 +53,13 @@ static bool event_compare(struct fsnotify_event *old_fsn,
        return false;
 }
 
-static struct fsnotify_event *inotify_merge(struct list_head *list,
-                                           struct fsnotify_event *event)
+static int inotify_merge(struct list_head *list,
+                         struct fsnotify_event *event)
 {
        struct fsnotify_event *last_event;
 
        last_event = list_entry(list->prev, struct fsnotify_event, list);
-       if (!event_compare(last_event, event))
-               return NULL;
-       return last_event;
+       return event_compare(last_event, event);
 }
 
 int inotify_handle_event(struct fsnotify_group *group,
@@ -73,9 +71,8 @@ int inotify_handle_event(struct fsnotify_group *group,
 {
        struct inotify_inode_mark *i_mark;
        struct inotify_event_info *event;
-       struct fsnotify_event *added_event;
        struct fsnotify_event *fsn_event;
-       int ret = 0;
+       int ret;
        int len = 0;
        int alloc_len = sizeof(struct inotify_event_info);
 
@@ -110,18 +107,16 @@ int inotify_handle_event(struct fsnotify_group *group,
        if (len)
                strcpy(event->name, file_name);
 
-       added_event = fsnotify_add_notify_event(group, fsn_event, inotify_merge);
-       if (added_event) {
+       ret = fsnotify_add_notify_event(group, fsn_event, inotify_merge);
+       if (ret) {
                /* Our event wasn't used in the end. Free it. */
                fsnotify_destroy_event(group, fsn_event);
-               if (IS_ERR(added_event))
-                       ret = PTR_ERR(added_event);
        }
 
        if (inode_mark->mask & IN_ONESHOT)
                fsnotify_destroy_mark(inode_mark, group);
 
-       return ret;
+       return 0;
 }
 
 static void inotify_freeing_mark(struct fsnotify_mark *fsn_mark, struct fsnotify_group *group)
index 952237b8e2d27bbea9466bf41db33ba854b937c6..18b3c4427dcac0f2c9125581cc171be3f1eb3a9f 100644 (file)
@@ -79,15 +79,15 @@ void fsnotify_destroy_event(struct fsnotify_group *group,
 
 /*
  * Add an event to the group notification queue.  The group can later pull this
- * event off the queue to deal with.  If the event is successfully added to the
- * group's notification queue, a reference is taken on event.
+ * event off the queue to deal with.  The function returns 0 if the event was
+ * added to the queue, 1 if the event was merged with some other queued event.
  */
-struct fsnotify_event *fsnotify_add_notify_event(struct fsnotify_group *group,
-                                                struct fsnotify_event *event,
-                                                struct fsnotify_event *(*merge)(struct list_head *,
-                                                                                struct fsnotify_event *))
+int fsnotify_add_notify_event(struct fsnotify_group *group,
+                             struct fsnotify_event *event,
+                             int (*merge)(struct list_head *,
+                                          struct fsnotify_event *))
 {
-       struct fsnotify_event *return_event = NULL;
+       int ret = 0;
        struct list_head *list = &group->notification_list;
 
        pr_debug("%s: group=%p event=%p\n", __func__, group, event);
@@ -98,14 +98,14 @@ struct fsnotify_event *fsnotify_add_notify_event(struct fsnotify_group *group,
                /* Queue overflow event only if it isn't already queued */
                if (list_empty(&group->overflow_event.list))
                        event = &group->overflow_event;
-               return_event = event;
+               ret = 1;
        }
 
        if (!list_empty(list) && merge) {
-               return_event = merge(list, event);
-               if (return_event) {
+               ret = merge(list, event);
+               if (ret) {
                        mutex_unlock(&group->notification_mutex);
-                       return return_event;
+                       return ret;
                }
        }
 
@@ -115,7 +115,7 @@ struct fsnotify_event *fsnotify_add_notify_event(struct fsnotify_group *group,
 
        wake_up(&group->notification_waitq);
        kill_fasync(&group->fsn_fa, SIGIO, POLL_IN);
-       return return_event;
+       return ret;
 }
 
 /*
index 73920ffda05b331c85ef1760d97083d1590a4a16..bf482dfed14fecf17406a6aa2d517929d6834800 100644 (file)
@@ -413,7 +413,7 @@ static struct bio *o2hb_setup_one_bio(struct o2hb_region *reg,
        }
 
        /* Must put everything in 512 byte sectors for the bio... */
-       bio->bi_sector = (reg->hr_start_block + cs) << (bits - 9);
+       bio->bi_iter.bi_sector = (reg->hr_start_block + cs) << (bits - 9);
        bio->bi_bdev = reg->hr_bdev;
        bio->bi_private = wc;
        bio->bi_end_io = o2hb_bio_end_io;
index 1193ffd0356547b63cdf0a503a2eb68ae84d8ba6..edc5746a902a090ce4dbda6b8b1205e729dff5b3 100644 (file)
@@ -964,9 +964,9 @@ out:
        return ret;
 }
 
-COMPAT_SYSCALL_DEFINE3(readv, unsigned long, fd,
+COMPAT_SYSCALL_DEFINE3(readv, compat_ulong_t, fd,
                const struct compat_iovec __user *,vec,
-               unsigned long, vlen)
+               compat_ulong_t, vlen)
 {
        struct fd f = fdget(fd);
        ssize_t ret;
@@ -1001,9 +1001,9 @@ COMPAT_SYSCALL_DEFINE4(preadv64, unsigned long, fd,
        return ret;
 }
 
-COMPAT_SYSCALL_DEFINE5(preadv, unsigned long, fd,
+COMPAT_SYSCALL_DEFINE5(preadv, compat_ulong_t, fd,
                const struct compat_iovec __user *,vec,
-               unsigned long, vlen, u32, pos_low, u32, pos_high)
+               compat_ulong_t, vlen, u32, pos_low, u32, pos_high)
 {
        loff_t pos = ((loff_t)pos_high << 32) | pos_low;
        return compat_sys_preadv64(fd, vec, vlen, pos);
@@ -1031,9 +1031,9 @@ out:
        return ret;
 }
 
-COMPAT_SYSCALL_DEFINE3(writev, unsigned long, fd,
+COMPAT_SYSCALL_DEFINE3(writev, compat_ulong_t, fd,
                const struct compat_iovec __user *, vec,
-               unsigned long, vlen)
+               compat_ulong_t, vlen)
 {
        struct fd f = fdget(fd);
        ssize_t ret;
@@ -1068,9 +1068,9 @@ COMPAT_SYSCALL_DEFINE4(pwritev64, unsigned long, fd,
        return ret;
 }
 
-COMPAT_SYSCALL_DEFINE5(pwritev, unsigned long, fd,
+COMPAT_SYSCALL_DEFINE5(pwritev, compat_ulong_t, fd,
                const struct compat_iovec __user *,vec,
-               unsigned long, vlen, u32, pos_low, u32, pos_high)
+               compat_ulong_t, vlen, u32, pos_low, u32, pos_high)
 {
        loff_t pos = ((loff_t)pos_high << 32) | pos_low;
        return compat_sys_pwritev64(fd, vec, vlen, pos);
index a26739451b535cf02a8016c423583f76a26bac72..db2cfb067d0b1ea88f8b64875ceb174d3ae582d2 100644 (file)
@@ -407,7 +407,7 @@ xfs_alloc_ioend_bio(
        struct bio              *bio = bio_alloc(GFP_NOIO, nvecs);
 
        ASSERT(bio->bi_private == NULL);
-       bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
+       bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
        bio->bi_bdev = bh->b_bdev;
        return bio;
 }
index 9fccfb59429119be16786e1fd2d9b6104546ef93..9c061ef2b0d973c913a1baaee4a43bc27523b244 100644 (file)
@@ -445,8 +445,8 @@ _xfs_buf_find(
        numbytes = BBTOB(numblks);
 
        /* Check for IOs smaller than the sector size / not sector aligned */
-       ASSERT(!(numbytes < (1 << btp->bt_sshift)));
-       ASSERT(!(BBTOB(blkno) & (xfs_off_t)btp->bt_smask));
+       ASSERT(!(numbytes < btp->bt_meta_sectorsize));
+       ASSERT(!(BBTOB(blkno) & (xfs_off_t)btp->bt_meta_sectormask));
 
        /*
         * Corrupted block numbers can get through to here, unfortunately, so we
@@ -1240,7 +1240,7 @@ next_chunk:
 
        bio = bio_alloc(GFP_NOIO, nr_pages);
        bio->bi_bdev = bp->b_target->bt_bdev;
-       bio->bi_sector = sector;
+       bio->bi_iter.bi_sector = sector;
        bio->bi_end_io = xfs_buf_bio_end_io;
        bio->bi_private = bp;
 
@@ -1262,7 +1262,7 @@ next_chunk:
                total_nr_pages--;
        }
 
-       if (likely(bio->bi_size)) {
+       if (likely(bio->bi_iter.bi_size)) {
                if (xfs_buf_is_vmapped(bp)) {
                        flush_kernel_vmap_range(bp->b_addr,
                                                xfs_buf_vmap_len(bp));
@@ -1599,9 +1599,9 @@ xfs_setsize_buftarg(
        unsigned int            blocksize,
        unsigned int            sectorsize)
 {
-       btp->bt_bsize = blocksize;
-       btp->bt_sshift = ffs(sectorsize) - 1;
-       btp->bt_smask = sectorsize - 1;
+       /* Set up metadata sector size info */
+       btp->bt_meta_sectorsize = sectorsize;
+       btp->bt_meta_sectormask = sectorsize - 1;
 
        if (set_blocksize(btp->bt_bdev, sectorsize)) {
                char name[BDEVNAME_SIZE];
@@ -1614,6 +1614,10 @@ xfs_setsize_buftarg(
                return EINVAL;
        }
 
+       /* Set up device logical sector size mask */
+       btp->bt_logical_sectorsize = bdev_logical_block_size(btp->bt_bdev);
+       btp->bt_logical_sectormask = bdev_logical_block_size(btp->bt_bdev) - 1;
+
        return 0;
 }
 
index 1cf21a4a9f221de465299bf820fa710c3b3aa40e..995339534db6a4b65c6ec332055734f7415ca621 100644 (file)
@@ -88,14 +88,28 @@ typedef unsigned int xfs_buf_flags_t;
  */
 #define XFS_BSTATE_DISPOSE      (1 << 0)       /* buffer being discarded */
 
+/*
+ * The xfs_buftarg contains 2 notions of "sector size" -
+ *
+ * 1) The metadata sector size, which is the minimum unit and
+ *    alignment of IO which will be performed by metadata operations.
+ * 2) The device logical sector size
+ *
+ * The first is specified at mkfs time, and is stored on-disk in the
+ * superblock's sb_sectsize.
+ *
+ * The latter is derived from the underlying device, and controls direct IO
+ * alignment constraints.
+ */
 typedef struct xfs_buftarg {
        dev_t                   bt_dev;
        struct block_device     *bt_bdev;
        struct backing_dev_info *bt_bdi;
        struct xfs_mount        *bt_mount;
-       unsigned int            bt_bsize;
-       unsigned int            bt_sshift;
-       size_t                  bt_smask;
+       unsigned int            bt_meta_sectorsize;
+       size_t                  bt_meta_sectormask;
+       size_t                  bt_logical_sectorsize;
+       size_t                  bt_logical_sectormask;
 
        /* LRU control structures */
        struct shrinker         bt_shrinker;
index e001215926326c6a87ce1d5d9887a11aafeb84ca..2e7989e3a2d67374d17e5086ec3b15bfbcb32e2d 100644 (file)
@@ -261,7 +261,8 @@ xfs_file_aio_read(
                xfs_buftarg_t   *target =
                        XFS_IS_REALTIME_INODE(ip) ?
                                mp->m_rtdev_targp : mp->m_ddev_targp;
-               if ((pos & target->bt_smask) || (size & target->bt_smask)) {
+               /* DIO must be aligned to device logical sector size */
+               if ((pos | size) & target->bt_logical_sectormask) {
                        if (pos == i_size_read(inode))
                                return 0;
                        return -XFS_ERROR(EINVAL);
@@ -641,9 +642,11 @@ xfs_file_dio_aio_write(
        struct xfs_buftarg      *target = XFS_IS_REALTIME_INODE(ip) ?
                                        mp->m_rtdev_targp : mp->m_ddev_targp;
 
-       if ((pos & target->bt_smask) || (count & target->bt_smask))
+       /* DIO must be aligned to device logical sector size */
+       if ((pos | count) & target->bt_logical_sectormask)
                return -XFS_ERROR(EINVAL);
 
+       /* "unaligned" here means not aligned to a filesystem block */
        if ((pos & mp->m_blockmask) || ((pos + count) & mp->m_blockmask))
                unaligned_io = 1;
 
index 518aa56b8f2e46d1602bb800c03f5c3d06b9fb38..bcfe61202115510b22509ad49aadc16bcbbb4368 100644 (file)
@@ -1583,7 +1583,7 @@ xfs_file_ioctl(
                        XFS_IS_REALTIME_INODE(ip) ?
                        mp->m_rtdev_targp : mp->m_ddev_targp;
 
-               da.d_mem = da.d_miniosz = 1 << target->bt_sshift;
+               da.d_mem =  da.d_miniosz = target->bt_logical_sectorsize;
                da.d_maxiosz = INT_MAX & ~(da.d_miniosz - 1);
 
                if (copy_to_user(arg, &da, sizeof(da)))
index 1d4a920ef7ff4ffea56470ef24c2e6cf8a7b0696..04086c5be930e2941f8be91cb0a47107da73ad08 100644 (file)
@@ -56,6 +56,7 @@
 #include <linux/mutex.h>
 #include <linux/io.h>
 #include <linux/slab.h>
+#include <linux/ratelimit.h>
 #if defined(__alpha__) || defined(__powerpc__)
 #include <asm/pgtable.h>       /* For pte_wrprotect */
 #endif
@@ -136,7 +137,6 @@ int drm_err(const char *func, const char *format, ...);
 
 /* driver capabilities and requirements mask */
 #define DRIVER_USE_AGP     0x1
-#define DRIVER_REQUIRE_AGP 0x2
 #define DRIVER_PCI_DMA     0x8
 #define DRIVER_SG          0x10
 #define DRIVER_HAVE_DMA    0x20
@@ -180,6 +180,22 @@ int drm_err(const char *func, const char *format, ...);
 #define DRM_ERROR(fmt, ...)                            \
        drm_err(__func__, fmt, ##__VA_ARGS__)
 
+/**
+ * Rate limited error output.  Like DRM_ERROR() but won't flood the log.
+ *
+ * \param fmt printf() like format string.
+ * \param arg arguments
+ */
+#define DRM_ERROR_RATELIMITED(fmt, ...)                                \
+({                                                                     \
+       static DEFINE_RATELIMIT_STATE(_rs,                              \
+                                     DEFAULT_RATELIMIT_INTERVAL,       \
+                                     DEFAULT_RATELIMIT_BURST);         \
+                                                                       \
+       if (__ratelimit(&_rs))                                          \
+               drm_err(__func__, fmt, ##__VA_ARGS__);                  \
+})
+
 #define DRM_INFO(fmt, ...)                             \
        printk(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__)
 
@@ -422,7 +438,6 @@ struct drm_file {
        struct pid *pid;
        kuid_t uid;
        drm_magic_t magic;
-       unsigned long ioctl_count;
        struct list_head lhead;
        struct drm_minor *minor;
        unsigned long lock_count;
@@ -511,7 +526,7 @@ struct drm_device_dma {
  */
 struct drm_agp_mem {
        unsigned long handle;           /**< handle */
-       DRM_AGP_MEM *memory;
+       struct agp_memory *memory;
        unsigned long bound;            /**< address */
        int pages;
        struct list_head head;
@@ -523,7 +538,7 @@ struct drm_agp_mem {
  * \sa drm_agp_init() and drm_device::agp.
  */
 struct drm_agp_head {
-       DRM_AGP_KERN agp_info;          /**< AGP device information */
+       struct agp_kern_info agp_info;          /**< AGP device information */
        struct list_head memory;
        unsigned long mode;             /**< AGP mode */
        struct agp_bridge_data *bridge;
@@ -606,13 +621,6 @@ struct drm_ati_pcigart_info {
        int table_size;
 };
 
-/**
- * GEM specific mm private for tracking GEM objects
- */
-struct drm_gem_mm {
-       struct drm_vma_offset_manager vma_manager;
-};
-
 /**
  * This structure defines the drm_mm memory object, which will be used by the
  * DRM for its buffer objects.
@@ -750,10 +758,6 @@ struct drm_bus {
        int (*set_unique)(struct drm_device *dev, struct drm_master *master,
                          struct drm_unique *unique);
        int (*irq_by_busid)(struct drm_device *dev, struct drm_irq_busid *p);
-       /* hooks that are for PCI */
-       int (*agp_init)(struct drm_device *dev);
-       void (*agp_destroy)(struct drm_device *dev);
-
 };
 
 /**
@@ -841,6 +845,7 @@ struct drm_driver {
         *
         * \param dev  DRM device.
         * \param crtc Id of the crtc to query.
+        * \param flags Flags from the caller (DRM_CALLED_FROM_VBLIRQ or 0).
         * \param *vpos Target location for current vertical scanout position.
         * \param *hpos Target location for current horizontal scanout position.
         * \param *stime Target location for timestamp taken immediately before
@@ -863,6 +868,7 @@ struct drm_driver {
         *
         */
        int (*get_scanout_position) (struct drm_device *dev, int crtc,
+                                    unsigned int flags,
                                     int *vpos, int *hpos, ktime_t *stime,
                                     ktime_t *etime);
 
@@ -903,7 +909,7 @@ struct drm_driver {
 
        /* these have to be filled in */
 
-       irqreturn_t(*irq_handler) (DRM_IRQ_ARGS);
+       irqreturn_t(*irq_handler) (int irq, void *arg);
        void (*irq_preinstall) (struct drm_device *dev);
        int (*irq_postinstall) (struct drm_device *dev);
        void (*irq_uninstall) (struct drm_device *dev);
@@ -995,8 +1001,8 @@ struct drm_driver {
        } kdriver;
        struct drm_bus *bus;
 
-       /* List of devices hanging off this driver */
-       struct list_head device_list;
+       /* List of devices hanging off this driver with stealth attach. */
+       struct list_head legacy_dev_list;
 };
 
 #define DRM_MINOR_UNASSIGNED 0
@@ -1085,7 +1091,7 @@ struct drm_vblank_crtc {
  * may contain multiple heads.
  */
 struct drm_device {
-       struct list_head driver_item;   /**< list of devices per driver */
+       struct list_head legacy_dev_list;/**< list of devices per driver for stealth attach cleanup */
        char *devname;                  /**< For /proc/interrupts */
        int if_version;                 /**< Highest interface version set */
 
@@ -1098,8 +1104,6 @@ struct drm_device {
        /** \name Usage Counters */
        /*@{ */
        int open_count;                 /**< Outstanding files open */
-       atomic_t ioctl_count;           /**< Outstanding IOCTLs pending */
-       atomic_t vma_count;             /**< Outstanding vma areas open */
        int buf_use;                    /**< Buffers in use -- cannot alloc */
        atomic_t buf_alloc;             /**< Buffer allocation in progress */
        /*@} */
@@ -1176,7 +1180,6 @@ struct drm_device {
        struct drm_sg_mem *sg;  /**< Scatter gather memory */
        unsigned int num_crtcs;                  /**< Number of CRTCs on this device */
        void *dev_private;              /**< device private data */
-       void *mm_private;
        struct address_space *dev_mapping;
        struct drm_sigdata sigdata;        /**< For block_all_signals */
        sigset_t sigmask;
@@ -1194,6 +1197,7 @@ struct drm_device {
        /*@{ */
        struct mutex object_name_lock;
        struct idr object_name_idr;
+       struct drm_vma_offset_manager *vma_offset_manager;
        /*@} */
        int switch_power_state;
 
@@ -1268,6 +1272,7 @@ extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
                                /* Memory management support (drm_memory.h) */
 #include <drm/drm_memory.h>
 
+
                                /* Misc. IOCTL support (drm_ioctl.h) */
 extern int drm_irq_by_busid(struct drm_device *dev, void *data,
                            struct drm_file *file_priv);
@@ -1398,8 +1403,10 @@ extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
                                                 int crtc, int *max_error,
                                                 struct timeval *vblank_time,
                                                 unsigned flags,
-                                                struct drm_crtc *refcrtc);
-extern void drm_calc_timestamping_constants(struct drm_crtc *crtc);
+                                                const struct drm_crtc *refcrtc,
+                                                const struct drm_display_mode *mode);
+extern void drm_calc_timestamping_constants(struct drm_crtc *crtc,
+                                           const struct drm_display_mode *mode);
 
 extern bool
 drm_mode_parse_command_line_for_connector(const char *mode_option,
@@ -1461,6 +1468,30 @@ extern int drm_debugfs_create_files(const struct drm_info_list *files,
 extern int drm_debugfs_remove_files(const struct drm_info_list *files,
                                    int count, struct drm_minor *minor);
 extern int drm_debugfs_cleanup(struct drm_minor *minor);
+#else
+static inline int drm_debugfs_init(struct drm_minor *minor, int minor_id,
+                                  struct dentry *root)
+{
+       return 0;
+}
+
+static inline int drm_debugfs_create_files(const struct drm_info_list *files,
+                                          int count, struct dentry *root,
+                                          struct drm_minor *minor)
+{
+       return 0;
+}
+
+static inline int drm_debugfs_remove_files(const struct drm_info_list *files,
+                                          int count, struct drm_minor *minor)
+{
+       return 0;
+}
+
+static inline int drm_debugfs_cleanup(struct drm_minor *minor)
+{
+       return 0;
+}
 #endif
 
                                /* Info file support */
@@ -1645,6 +1676,7 @@ static __inline__ int drm_pci_device_is_agp(struct drm_device *dev)
 
        return pci_find_capability(dev->pdev, PCI_CAP_ID_AGP);
 }
+void drm_pci_agp_destroy(struct drm_device *dev);
 
 extern int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver);
 extern void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver);
@@ -1660,7 +1692,6 @@ extern int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *speed_mask);
 
 /* platform section */
 extern int drm_platform_init(struct drm_driver *driver, struct platform_device *platform_device);
-extern void drm_platform_exit(struct drm_driver *driver, struct platform_device *platform_device);
 
 /* returns true if currently okay to sleep */
 static __inline__ bool drm_can_sleep(void)
index a184eeee9c96bfb4afc4a1bc8b173a0a71fa1225..86a02188074bcec037714fdb1ea293dfb4ae3ebe 100644 (file)
 
 #if __OS_HAS_AGP
 
-void drm_free_agp(DRM_AGP_MEM * handle, int pages);
-int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start);
-int drm_unbind_agp(DRM_AGP_MEM * handle);
-DRM_AGP_MEM *drm_agp_bind_pages(struct drm_device *dev,
+void drm_free_agp(struct agp_memory * handle, int pages);
+int drm_bind_agp(struct agp_memory * handle, unsigned int start);
+int drm_unbind_agp(struct agp_memory * handle);
+struct agp_memory *drm_agp_bind_pages(struct drm_device *dev,
                                struct page **pages,
                                unsigned long num_pages,
                                uint32_t gtt_offset,
                                uint32_t type);
 
 struct drm_agp_head *drm_agp_init(struct drm_device *dev);
-void drm_agp_destroy(struct drm_agp_head *agp);
 void drm_agp_clear(struct drm_device *dev);
 int drm_agp_acquire(struct drm_device *dev);
 int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
@@ -46,29 +45,23 @@ int drm_agp_unbind_ioctl(struct drm_device *dev, void *data,
 int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request);
 int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
                       struct drm_file *file_priv);
-
-static inline int drm_core_has_AGP(struct drm_device *dev)
-{
-       return drm_core_check_feature(dev, DRIVER_USE_AGP);
-}
-
 #else /* __OS_HAS_AGP */
 
-static inline void drm_free_agp(DRM_AGP_MEM * handle, int pages)
+static inline void drm_free_agp(struct agp_memory * handle, int pages)
 {
 }
 
-static inline int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start)
+static inline int drm_bind_agp(struct agp_memory * handle, unsigned int start)
 {
        return -ENODEV;
 }
 
-static inline int drm_unbind_agp(DRM_AGP_MEM * handle)
+static inline int drm_unbind_agp(struct agp_memory * handle)
 {
        return -ENODEV;
 }
 
-static inline DRM_AGP_MEM *drm_agp_bind_pages(struct drm_device *dev,
+static inline struct agp_memory *drm_agp_bind_pages(struct drm_device *dev,
                                              struct page **pages,
                                              unsigned long num_pages,
                                              uint32_t gtt_offset,
@@ -82,10 +75,6 @@ static inline struct drm_agp_head *drm_agp_init(struct drm_device *dev)
        return NULL;
 }
 
-static inline void drm_agp_destroy(struct drm_agp_head *agp)
-{
-}
-
 static inline void drm_agp_clear(struct drm_device *dev)
 {
 }
@@ -183,12 +172,6 @@ static inline int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
 {
        return -ENODEV;
 }
-
-static inline int drm_core_has_AGP(struct drm_device *dev)
-{
-       return 0;
-}
-
 #endif /* __OS_HAS_AGP */
 
 #endif /* _DRM_AGPSUPPORT_H_ */
index f32c5cd51f4125a455a81ff60cd190c1383d31e1..71727b6210ae57d5b064be1cab19a4dc660df6f8 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/types.h>
 #include <linux/idr.h>
 #include <linux/fb.h>
+#include <linux/hdmi.h>
 #include <drm/drm_mode.h>
 
 #include <drm/drm_fourcc.h>
@@ -181,6 +182,7 @@ struct drm_display_mode {
 
        int vrefresh;           /* in Hz */
        int hsync;              /* in kHz */
+       enum hdmi_picture_aspect picture_aspect_ratio;
 };
 
 static inline bool drm_mode_is_stereo(const struct drm_display_mode *mode)
@@ -447,7 +449,7 @@ struct drm_crtc {
        uint16_t *gamma_store;
 
        /* Constants needed for precise vblank and swap timestamping. */
-       s64 framedur_ns, linedur_ns, pixeldur_ns;
+       int framedur_ns, linedur_ns, pixeldur_ns;
 
        /* if you are using the helper */
        void *helper_private;
@@ -929,6 +931,19 @@ extern int drm_crtc_init(struct drm_device *dev,
                         struct drm_crtc *crtc,
                         const struct drm_crtc_funcs *funcs);
 extern void drm_crtc_cleanup(struct drm_crtc *crtc);
+extern unsigned int drm_crtc_index(struct drm_crtc *crtc);
+
+/**
+ * drm_crtc_mask - find the mask of a registered CRTC
+ * @crtc: CRTC to find mask for
+ *
+ * Given a registered CRTC, return the mask bit of that CRTC for an
+ * encoder's possible_crtcs field.
+ */
+static inline uint32_t drm_crtc_mask(struct drm_crtc *crtc)
+{
+       return 1 << drm_crtc_index(crtc);
+}
 
 extern void drm_connector_ida_init(void);
 extern void drm_connector_ida_destroy(void);
@@ -950,6 +965,19 @@ extern int drm_encoder_init(struct drm_device *dev,
                            const struct drm_encoder_funcs *funcs,
                            int encoder_type);
 
+/**
+ * drm_encoder_crtc_ok - can a given crtc drive a given encoder?
+ * @encoder: encoder to test
+ * @crtc: crtc to test
+ *
+ * Return false if @encoder can't be driven by @crtc, true otherwise.
+ */
+static inline bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
+                                      struct drm_crtc *crtc)
+{
+       return !!(encoder->possible_crtcs & drm_crtc_mask(crtc));
+}
+
 extern int drm_plane_init(struct drm_device *dev,
                          struct drm_plane *plane,
                          unsigned long possible_crtcs,
index ef6ad3a8e58e517f31624767fb1751f7ada463c5..b1388b5fe7acd7d597f6345f273ce26de61d7535 100644 (file)
@@ -120,8 +120,8 @@ struct drm_encoder_helper_funcs {
  */
 struct drm_connector_helper_funcs {
        int (*get_modes)(struct drm_connector *connector);
-       int (*mode_valid)(struct drm_connector *connector,
-                         struct drm_display_mode *mode);
+       enum drm_mode_status (*mode_valid)(struct drm_connector *connector,
+                                          struct drm_display_mode *mode);
        struct drm_encoder *(*best_encoder)(struct drm_connector *connector);
 };
 
index a92c3754e3bbffe56c286d85bd2212f753ebe2c3..1d09050a8c001749ba26087581b93cb2de1f9153 100644 (file)
  * 1.2 formally includes both eDP and DPI definitions.
  */
 
-#define AUX_NATIVE_WRITE       0x8
-#define AUX_NATIVE_READ                0x9
-#define AUX_I2C_WRITE          0x0
-#define AUX_I2C_READ           0x1
-#define AUX_I2C_STATUS         0x2
-#define AUX_I2C_MOT            0x4
-
-#define AUX_NATIVE_REPLY_ACK   (0x0 << 4)
-#define AUX_NATIVE_REPLY_NACK  (0x1 << 4)
-#define AUX_NATIVE_REPLY_DEFER (0x2 << 4)
-#define AUX_NATIVE_REPLY_MASK  (0x3 << 4)
-
-#define AUX_I2C_REPLY_ACK      (0x0 << 6)
-#define AUX_I2C_REPLY_NACK     (0x1 << 6)
-#define AUX_I2C_REPLY_DEFER    (0x2 << 6)
-#define AUX_I2C_REPLY_MASK     (0x3 << 6)
+#define DP_AUX_I2C_WRITE               0x0
+#define DP_AUX_I2C_READ                        0x1
+#define DP_AUX_I2C_STATUS              0x2
+#define DP_AUX_I2C_MOT                 0x4
+#define DP_AUX_NATIVE_WRITE            0x8
+#define DP_AUX_NATIVE_READ             0x9
+
+#define DP_AUX_NATIVE_REPLY_ACK                (0x0 << 0)
+#define DP_AUX_NATIVE_REPLY_NACK       (0x1 << 0)
+#define DP_AUX_NATIVE_REPLY_DEFER      (0x2 << 0)
+#define DP_AUX_NATIVE_REPLY_MASK       (0x3 << 0)
+
+#define DP_AUX_I2C_REPLY_ACK           (0x0 << 2)
+#define DP_AUX_I2C_REPLY_NACK          (0x1 << 2)
+#define DP_AUX_I2C_REPLY_DEFER         (0x2 << 2)
+#define DP_AUX_I2C_REPLY_MASK          (0x3 << 2)
 
 /* AUX CH addresses */
 /* DPCD */
 
 #define DP_TEST_REQUEST                            0x218
 # define DP_TEST_LINK_TRAINING             (1 << 0)
-# define DP_TEST_LINK_PATTERN              (1 << 1)
+# define DP_TEST_LINK_VIDEO_PATTERN        (1 << 1)
 # define DP_TEST_LINK_EDID_READ                    (1 << 2)
 # define DP_TEST_LINK_PHY_TEST_PATTERN     (1 << 3) /* DPCD >= 1.1 */
+# define DP_TEST_LINK_FAUX_PATTERN         (1 << 4) /* DPCD >= 1.2 */
 
 #define DP_TEST_LINK_RATE                  0x219
 # define DP_LINK_RATE_162                  (0x6)
diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h
new file mode 100644 (file)
index 0000000..d32628a
--- /dev/null
@@ -0,0 +1,158 @@
+/*
+ * MIPI DSI Bus
+ *
+ * Copyright (C) 2012-2013, Samsung Electronics, Co., Ltd.
+ * Andrzej Hajda <a.hajda@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __DRM_MIPI_DSI_H__
+#define __DRM_MIPI_DSI_H__
+
+#include <linux/device.h>
+
+struct mipi_dsi_host;
+struct mipi_dsi_device;
+
+/**
+ * struct mipi_dsi_msg - read/write DSI buffer
+ * @channel: virtual channel id
+ * @type: payload data type
+ * @tx_len: length of @tx_buf
+ * @tx_buf: data to be written
+ * @rx_len: length of @rx_buf
+ * @rx_buf: data to be read, or NULL
+ */
+struct mipi_dsi_msg {
+       u8 channel;
+       u8 type;
+
+       size_t tx_len;
+       const void *tx_buf;
+
+       size_t rx_len;
+       void *rx_buf;
+};
+
+/**
+ * struct mipi_dsi_host_ops - DSI bus operations
+ * @attach: attach DSI device to DSI host
+ * @detach: detach DSI device from DSI host
+ * @transfer: send and/or receive DSI packet, return number of received bytes,
+ *           or error
+ */
+struct mipi_dsi_host_ops {
+       int (*attach)(struct mipi_dsi_host *host,
+                     struct mipi_dsi_device *dsi);
+       int (*detach)(struct mipi_dsi_host *host,
+                     struct mipi_dsi_device *dsi);
+       ssize_t (*transfer)(struct mipi_dsi_host *host,
+                           struct mipi_dsi_msg *msg);
+};
+
+/**
+ * struct mipi_dsi_host - DSI host device
+ * @dev: driver model device node for this DSI host
+ * @ops: DSI host operations
+ */
+struct mipi_dsi_host {
+       struct device *dev;
+       const struct mipi_dsi_host_ops *ops;
+};
+
+int mipi_dsi_host_register(struct mipi_dsi_host *host);
+void mipi_dsi_host_unregister(struct mipi_dsi_host *host);
+
+/* DSI mode flags */
+
+/* video mode */
+#define MIPI_DSI_MODE_VIDEO            BIT(0)
+/* video burst mode */
+#define MIPI_DSI_MODE_VIDEO_BURST      BIT(1)
+/* video pulse mode */
+#define MIPI_DSI_MODE_VIDEO_SYNC_PULSE BIT(2)
+/* enable auto vertical count mode */
+#define MIPI_DSI_MODE_VIDEO_AUTO_VERT  BIT(3)
+/* enable hsync-end packets in vsync-pulse and v-porch area */
+#define MIPI_DSI_MODE_VIDEO_HSE                BIT(4)
+/* disable hfront-porch area */
+#define MIPI_DSI_MODE_VIDEO_HFP                BIT(5)
+/* disable hback-porch area */
+#define MIPI_DSI_MODE_VIDEO_HBP                BIT(6)
+/* disable hsync-active area */
+#define MIPI_DSI_MODE_VIDEO_HSA                BIT(7)
+/* flush display FIFO on vsync pulse */
+#define MIPI_DSI_MODE_VSYNC_FLUSH      BIT(8)
+/* disable EoT packets in HS mode */
+#define MIPI_DSI_MODE_EOT_PACKET       BIT(9)
+
+enum mipi_dsi_pixel_format {
+       MIPI_DSI_FMT_RGB888,
+       MIPI_DSI_FMT_RGB666,
+       MIPI_DSI_FMT_RGB666_PACKED,
+       MIPI_DSI_FMT_RGB565,
+};
+
+/**
+ * struct mipi_dsi_device - DSI peripheral device
+ * @host: DSI host for this peripheral
+ * @dev: driver model device node for this peripheral
+ * @channel: virtual channel assigned to the peripheral
+ * @format: pixel format for video mode
+ * @lanes: number of active data lanes
+ * @mode_flags: DSI operation mode related flags
+ */
+struct mipi_dsi_device {
+       struct mipi_dsi_host *host;
+       struct device dev;
+
+       unsigned int channel;
+       unsigned int lanes;
+       enum mipi_dsi_pixel_format format;
+       unsigned long mode_flags;
+};
+
+#define to_mipi_dsi_device(d) container_of(d, struct mipi_dsi_device, dev)
+
+int mipi_dsi_attach(struct mipi_dsi_device *dsi);
+int mipi_dsi_detach(struct mipi_dsi_device *dsi);
+int mipi_dsi_dcs_write(struct mipi_dsi_device *dsi, unsigned int channel,
+                      const void *data, size_t len);
+ssize_t mipi_dsi_dcs_read(struct mipi_dsi_device *dsi, unsigned int channel,
+                         u8 cmd, void *data, size_t len);
+
+/**
+ * struct mipi_dsi_driver - DSI driver
+ * @driver: device driver model driver
+ * @probe: callback for device binding
+ * @remove: callback for device unbinding
+ */
+struct mipi_dsi_driver {
+       struct device_driver driver;
+       int(*probe)(struct mipi_dsi_device *dsi);
+       int(*remove)(struct mipi_dsi_device *dsi);
+};
+
+#define to_mipi_dsi_driver(d) container_of(d, struct mipi_dsi_driver, driver)
+
+static inline void *mipi_dsi_get_drvdata(const struct mipi_dsi_device *dsi)
+{
+       return dev_get_drvdata(&dsi->dev);
+}
+
+static inline void mipi_dsi_set_drvdata(struct mipi_dsi_device *dsi, void *data)
+{
+       dev_set_drvdata(&dsi->dev, data);
+}
+
+int mipi_dsi_driver_register(struct mipi_dsi_driver *driver);
+void mipi_dsi_driver_unregister(struct mipi_dsi_driver *driver);
+
+#define module_mipi_dsi_driver(__mipi_dsi_driver) \
+       module_driver(__mipi_dsi_driver, mipi_dsi_driver_register, \
+                       mipi_dsi_driver_unregister)
+
+#endif /* __DRM_MIPI_DSI__ */
index 815fafc6b4adb2f0acc7f4474351b6e56d30cef5..86ab99bc0ac50e560b5f31c55fd3f91231607244 100644 (file)
@@ -21,7 +21,6 @@ static inline void writeq(u64 val, void __iomem *reg)
 
 /** Current process ID */
 #define DRM_CURRENTPID                 task_pid_nr(current)
-#define DRM_SUSER(p)                   capable(CAP_SYS_ADMIN)
 #define DRM_UDELAY(d)                  udelay(d)
 /** Read a byte from a MMIO region */
 #define DRM_READ8(map, offset)         readb(((void __iomem *)(map)->handle) + (offset))
@@ -35,45 +34,12 @@ static inline void writeq(u64 val, void __iomem *reg)
 #define DRM_WRITE16(map, offset, val)   writew(val, ((void __iomem *)(map)->handle) + (offset))
 /** Write a dword into a MMIO region */
 #define DRM_WRITE32(map, offset, val)  writel(val, ((void __iomem *)(map)->handle) + (offset))
-/** Read memory barrier */
 
 /** Read a qword from a MMIO region - be careful using these unless you really understand them */
 #define DRM_READ64(map, offset)                readq(((void __iomem *)(map)->handle) + (offset))
 /** Write a qword into a MMIO region */
 #define DRM_WRITE64(map, offset, val)  writeq(val, ((void __iomem *)(map)->handle) + (offset))
 
-#define DRM_READMEMORYBARRIER()                rmb()
-/** Write memory barrier */
-#define DRM_WRITEMEMORYBARRIER()       wmb()
-/** Read/write memory barrier */
-#define DRM_MEMORYBARRIER()            mb()
-
-/** IRQ handler arguments and return type and values */
-#define DRM_IRQ_ARGS           int irq, void *arg
-
-/** AGP types */
-#if __OS_HAS_AGP
-#define DRM_AGP_MEM            struct agp_memory
-#define DRM_AGP_KERN           struct agp_kern_info
-#else
-/* define some dummy types for non AGP supporting kernels */
-struct no_agp_kern {
-       unsigned long aper_base;
-       unsigned long aper_size;
-};
-#define DRM_AGP_MEM             int
-#define DRM_AGP_KERN            struct no_agp_kern
-#endif
-
-/** Other copying of data to kernel space */
-#define DRM_COPY_FROM_USER(arg1, arg2, arg3)           \
-       copy_from_user(arg1, arg2, arg3)
-/** Other copying of data from kernel space */
-#define DRM_COPY_TO_USER(arg1, arg2, arg3)             \
-       copy_to_user(arg1, arg2, arg3)
-
-#define DRM_HZ HZ
-
 #define DRM_WAIT_ON( ret, queue, timeout, condition )          \
 do {                                                           \
        DECLARE_WAITQUEUE(entry, current);                      \
@@ -97,6 +63,3 @@ do {                                                          \
        __set_current_state(TASK_RUNNING);                      \
        remove_wait_queue(&(queue), &entry);                    \
 } while (0)
-
-#define DRM_WAKEUP( queue ) wake_up( queue )
-#define DRM_INIT_WAITQUEUE( queue ) init_waitqueue_head( queue )
diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h
new file mode 100644 (file)
index 0000000..c2ab77a
--- /dev/null
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2013, NVIDIA Corporation.  All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __DRM_PANEL_H__
+#define __DRM_PANEL_H__
+
+#include <linux/list.h>
+
+struct drm_connector;
+struct drm_device;
+struct drm_panel;
+
+struct drm_panel_funcs {
+       int (*disable)(struct drm_panel *panel);
+       int (*enable)(struct drm_panel *panel);
+       int (*get_modes)(struct drm_panel *panel);
+};
+
+struct drm_panel {
+       struct drm_device *drm;
+       struct drm_connector *connector;
+       struct device *dev;
+
+       const struct drm_panel_funcs *funcs;
+
+       struct list_head list;
+};
+
+static inline int drm_panel_disable(struct drm_panel *panel)
+{
+       if (panel && panel->funcs && panel->funcs->disable)
+               return panel->funcs->disable(panel);
+
+       return panel ? -ENOSYS : -EINVAL;
+}
+
+static inline int drm_panel_enable(struct drm_panel *panel)
+{
+       if (panel && panel->funcs && panel->funcs->enable)
+               return panel->funcs->enable(panel);
+
+       return panel ? -ENOSYS : -EINVAL;
+}
+
+void drm_panel_init(struct drm_panel *panel);
+
+int drm_panel_add(struct drm_panel *panel);
+void drm_panel_remove(struct drm_panel *panel);
+
+int drm_panel_attach(struct drm_panel *panel, struct drm_connector *connector);
+int drm_panel_detach(struct drm_panel *panel);
+
+#ifdef CONFIG_OF
+struct drm_panel *of_drm_find_panel(struct device_node *np);
+#else
+static inline struct drm_panel *of_drm_find_panel(struct device_node *np)
+{
+       return NULL;
+}
+#endif
+
+#endif
index 8639c85d61c400f4694e99ca7e1282df23f5ef66..32d34ebf0706fa8f73e793b035816fd7cfaa36a3 100644 (file)
@@ -681,6 +681,15 @@ extern int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement);
 extern int ttm_tt_swapout(struct ttm_tt *ttm,
                          struct file *persistent_swap_storage);
 
+/**
+ * ttm_tt_unpopulate - free pages from a ttm
+ *
+ * @ttm: Pointer to the ttm_tt structure
+ *
+ * Calls the driver method to free all pages from a ttm
+ */
+extern void ttm_tt_unpopulate(struct ttm_tt *ttm);
+
 /*
  * ttm_bo.c
  */
index 58b029894eb33ea32dff2ba2977546c097f2b533..0097cc03034e18b10e5ad9cdc34b9019eb821314 100644 (file)
@@ -190,13 +190,25 @@ extern int ttm_base_object_init(struct ttm_object_file *tfile,
  * @key: Hash key
  *
  * Looks up a struct ttm_base_object with the key @key.
- * Also verifies that the object is visible to the application, by
- * comparing the @tfile argument and checking the object shareable flag.
  */
 
 extern struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file
                                                      *tfile, uint32_t key);
 
+/**
+ * ttm_base_object_lookup_for_ref
+ *
+ * @tdev: Pointer to a struct ttm_object_device.
+ * @key: Hash key
+ *
+ * Looks up a struct ttm_base_object with the key @key.
+ * This function should only be used when the struct tfile associated with the
+ * caller doesn't yet have a reference to the base object.
+ */
+
+extern struct ttm_base_object *
+ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key);
+
 /**
  * ttm_base_object_unref
  *
@@ -218,6 +230,8 @@ extern void ttm_base_object_unref(struct ttm_base_object **p_base);
  * @existed: Upon completion, indicates that an identical reference object
  * already existed, and the refcount was upped on that object instead.
  *
+ * Checks that the base object is shareable and adds a ref object to it.
+ *
  * Adding a ref object to a base object is basically like referencing the
  * base object, but a user-space application holds the reference. When the
  * file corresponding to @tfile is closed, all its reference objects are
index 04d318d1187aa072723b615b05087ae5f4c16bb2..032ed87ef0f332afbe94f79ae3b982f060a181ce 100644 (file)
@@ -57,7 +57,7 @@
 #define EXTPCLK_CLK_SRC                                        40
 #define HDMI_CLK_SRC                                   41
 #define VSYNC_CLK_SRC                                  42
-#define RBCPR_CLK_SRC                                  43
+#define MMSS_RBCPR_CLK_SRC                             43
 #define CAMSS_CCI_CCI_AHB_CLK                          44
 #define CAMSS_CCI_CCI_CLK                              45
 #define CAMSS_CSI0_AHB_CLK                             46
index 060ff695085c596f2ddcd2166e7825290d75bd3d..70654521dab69fb03443723550e9b9f8c64a6533 100644 (file)
  * various member access, note that bio_data should of course not be used
  * on highmem page vectors
  */
-#define bio_iovec_idx(bio, idx)        (&((bio)->bi_io_vec[(idx)]))
-#define bio_iovec(bio)         bio_iovec_idx((bio), (bio)->bi_idx)
-#define bio_page(bio)          bio_iovec((bio))->bv_page
-#define bio_offset(bio)                bio_iovec((bio))->bv_offset
-#define bio_segments(bio)      ((bio)->bi_vcnt - (bio)->bi_idx)
-#define bio_sectors(bio)       ((bio)->bi_size >> 9)
-#define bio_end_sector(bio)    ((bio)->bi_sector + bio_sectors((bio)))
+#define __bvec_iter_bvec(bvec, iter)   (&(bvec)[(iter).bi_idx])
+
+#define bvec_iter_page(bvec, iter)                             \
+       (__bvec_iter_bvec((bvec), (iter))->bv_page)
+
+#define bvec_iter_len(bvec, iter)                              \
+       min((iter).bi_size,                                     \
+           __bvec_iter_bvec((bvec), (iter))->bv_len - (iter).bi_bvec_done)
+
+#define bvec_iter_offset(bvec, iter)                           \
+       (__bvec_iter_bvec((bvec), (iter))->bv_offset + (iter).bi_bvec_done)
+
+#define bvec_iter_bvec(bvec, iter)                             \
+((struct bio_vec) {                                            \
+       .bv_page        = bvec_iter_page((bvec), (iter)),       \
+       .bv_len         = bvec_iter_len((bvec), (iter)),        \
+       .bv_offset      = bvec_iter_offset((bvec), (iter)),     \
+})
+
+#define bio_iter_iovec(bio, iter)                              \
+       bvec_iter_bvec((bio)->bi_io_vec, (iter))
+
+#define bio_iter_page(bio, iter)                               \
+       bvec_iter_page((bio)->bi_io_vec, (iter))
+#define bio_iter_len(bio, iter)                                        \
+       bvec_iter_len((bio)->bi_io_vec, (iter))
+#define bio_iter_offset(bio, iter)                             \
+       bvec_iter_offset((bio)->bi_io_vec, (iter))
+
+#define bio_page(bio)          bio_iter_page((bio), (bio)->bi_iter)
+#define bio_offset(bio)                bio_iter_offset((bio), (bio)->bi_iter)
+#define bio_iovec(bio)         bio_iter_iovec((bio), (bio)->bi_iter)
+
+#define bio_multiple_segments(bio)                             \
+       ((bio)->bi_iter.bi_size != bio_iovec(bio).bv_len)
+#define bio_sectors(bio)       ((bio)->bi_iter.bi_size >> 9)
+#define bio_end_sector(bio)    ((bio)->bi_iter.bi_sector + bio_sectors((bio)))
+
+/*
+ * Check whether this bio carries any data or not. A NULL bio is allowed.
+ */
+static inline bool bio_has_data(struct bio *bio)
+{
+       if (bio &&
+           bio->bi_iter.bi_size &&
+           !(bio->bi_rw & REQ_DISCARD))
+               return true;
+
+       return false;
+}
+
+static inline bool bio_is_rw(struct bio *bio)
+{
+       if (!bio_has_data(bio))
+               return false;
+
+       if (bio->bi_rw & BIO_NO_ADVANCE_ITER_MASK)
+               return false;
+
+       return true;
+}
+
+static inline bool bio_mergeable(struct bio *bio)
+{
+       if (bio->bi_rw & REQ_NOMERGE_FLAGS)
+               return false;
+
+       return true;
+}
 
 static inline unsigned int bio_cur_bytes(struct bio *bio)
 {
-       if (bio->bi_vcnt)
-               return bio_iovec(bio)->bv_len;
+       if (bio_has_data(bio))
+               return bio_iovec(bio).bv_len;
        else /* dataless requests such as discard */
-               return bio->bi_size;
+               return bio->bi_iter.bi_size;
 }
 
 static inline void *bio_data(struct bio *bio)
 {
-       if (bio->bi_vcnt)
+       if (bio_has_data(bio))
                return page_address(bio_page(bio)) + bio_offset(bio);
 
        return NULL;
@@ -97,19 +159,16 @@ static inline void *bio_data(struct bio *bio)
  * permanent PIO fall back, user is probably better off disabling highmem
  * I/O completely on that queue (see ide-dma for example)
  */
-#define __bio_kmap_atomic(bio, idx)                            \
-       (kmap_atomic(bio_iovec_idx((bio), (idx))->bv_page) +    \
-               bio_iovec_idx((bio), (idx))->bv_offset)
+#define __bio_kmap_atomic(bio, iter)                           \
+       (kmap_atomic(bio_iter_iovec((bio), (iter)).bv_page) +   \
+               bio_iter_iovec((bio), (iter)).bv_offset)
 
-#define __bio_kunmap_atomic(addr) kunmap_atomic(addr)
+#define __bio_kunmap_atomic(addr)      kunmap_atomic(addr)
 
 /*
  * merge helpers etc
  */
 
-#define __BVEC_END(bio)                bio_iovec_idx((bio), (bio)->bi_vcnt - 1)
-#define __BVEC_START(bio)      bio_iovec_idx((bio), (bio)->bi_idx)
-
 /* Default implementation of BIOVEC_PHYS_MERGEABLE */
 #define __BIOVEC_PHYS_MERGEABLE(vec1, vec2)    \
        ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
@@ -126,33 +185,76 @@ static inline void *bio_data(struct bio *bio)
        (((addr1) | (mask)) == (((addr2) - 1) | (mask)))
 #define BIOVEC_SEG_BOUNDARY(q, b1, b2) \
        __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q)))
-#define BIO_SEG_BOUNDARY(q, b1, b2) \
-       BIOVEC_SEG_BOUNDARY((q), __BVEC_END((b1)), __BVEC_START((b2)))
 
 #define bio_io_error(bio) bio_endio((bio), -EIO)
 
-/*
- * drivers should not use the __ version unless they _really_ know what
- * they're doing
- */
-#define __bio_for_each_segment(bvl, bio, i, start_idx)                 \
-       for (bvl = bio_iovec_idx((bio), (start_idx)), i = (start_idx);  \
-            i < (bio)->bi_vcnt;                                        \
-            bvl++, i++)
-
 /*
  * drivers should _never_ use the all version - the bio may have been split
  * before it got to the driver and the driver won't own all of it
  */
 #define bio_for_each_segment_all(bvl, bio, i)                          \
-       for (i = 0;                                                     \
-            bvl = bio_iovec_idx((bio), (i)), i < (bio)->bi_vcnt;       \
-            i++)
+       for (i = 0, bvl = (bio)->bi_io_vec; i < (bio)->bi_vcnt; i++, bvl++)
+
+static inline void bvec_iter_advance(struct bio_vec *bv, struct bvec_iter *iter,
+                                    unsigned bytes)
+{
+       WARN_ONCE(bytes > iter->bi_size,
+                 "Attempted to advance past end of bvec iter\n");
+
+       while (bytes) {
+               unsigned len = min(bytes, bvec_iter_len(bv, *iter));
+
+               bytes -= len;
+               iter->bi_size -= len;
+               iter->bi_bvec_done += len;
+
+               if (iter->bi_bvec_done == __bvec_iter_bvec(bv, *iter)->bv_len) {
+                       iter->bi_bvec_done = 0;
+                       iter->bi_idx++;
+               }
+       }
+}
+
+#define for_each_bvec(bvl, bio_vec, iter, start)                       \
+       for ((iter) = start;                                            \
+            (bvl) = bvec_iter_bvec((bio_vec), (iter)),                 \
+               (iter).bi_size;                                         \
+            bvec_iter_advance((bio_vec), &(iter), (bvl).bv_len))
+
+
+static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter,
+                                   unsigned bytes)
+{
+       iter->bi_sector += bytes >> 9;
+
+       if (bio->bi_rw & BIO_NO_ADVANCE_ITER_MASK)
+               iter->bi_size -= bytes;
+       else
+               bvec_iter_advance(bio->bi_io_vec, iter, bytes);
+}
 
-#define bio_for_each_segment(bvl, bio, i)                              \
-       for (i = (bio)->bi_idx;                                         \
-            bvl = bio_iovec_idx((bio), (i)), i < (bio)->bi_vcnt;       \
-            i++)
+#define __bio_for_each_segment(bvl, bio, iter, start)                  \
+       for (iter = (start);                                            \
+            (iter).bi_size &&                                          \
+               ((bvl = bio_iter_iovec((bio), (iter))), 1);             \
+            bio_advance_iter((bio), &(iter), (bvl).bv_len))
+
+#define bio_for_each_segment(bvl, bio, iter)                           \
+       __bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter)
+
+#define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len)
+
+static inline unsigned bio_segments(struct bio *bio)
+{
+       unsigned segs = 0;
+       struct bio_vec bv;
+       struct bvec_iter iter;
+
+       bio_for_each_segment(bv, bio, iter)
+               segs++;
+
+       return segs;
+}
 
 /*
  * get a reference to a bio, so it won't disappear. the intended use is
@@ -177,16 +279,15 @@ static inline void *bio_data(struct bio *bio)
 struct bio_integrity_payload {
        struct bio              *bip_bio;       /* parent bio */
 
-       sector_t                bip_sector;     /* virtual start sector */
+       struct bvec_iter        bip_iter;
 
+       /* kill - should just use bip_vec */
        void                    *bip_buf;       /* generated integrity data */
-       bio_end_io_t            *bip_end_io;    /* saved I/O completion fn */
 
-       unsigned int            bip_size;
+       bio_end_io_t            *bip_end_io;    /* saved I/O completion fn */
 
        unsigned short          bip_slab;       /* slab the bip came from */
        unsigned short          bip_vcnt;       /* # of integrity bio_vecs */
-       unsigned short          bip_idx;        /* current bip_vec index */
        unsigned                bip_owns_buf:1; /* should free bip_buf */
 
        struct work_struct      bip_work;       /* I/O completion */
@@ -196,29 +297,28 @@ struct bio_integrity_payload {
 };
 #endif /* CONFIG_BLK_DEV_INTEGRITY */
 
-/*
- * A bio_pair is used when we need to split a bio.
- * This can only happen for a bio that refers to just one
- * page of data, and in the unusual situation when the
- * page crosses a chunk/device boundary
+extern void bio_trim(struct bio *bio, int offset, int size);
+extern struct bio *bio_split(struct bio *bio, int sectors,
+                            gfp_t gfp, struct bio_set *bs);
+
+/**
+ * bio_next_split - get next @sectors from a bio, splitting if necessary
+ * @bio:       bio to split
+ * @sectors:   number of sectors to split from the front of @bio
+ * @gfp:       gfp mask
+ * @bs:                bio set to allocate from
  *
- * The address of the master bio is stored in bio1.bi_private
- * The address of the pool the pair was allocated from is stored
- *   in bio2.bi_private
+ * Returns a bio representing the next @sectors of @bio - if the bio is smaller
+ * than @sectors, returns the original bio unchanged.
  */
-struct bio_pair {
-       struct bio                      bio1, bio2;
-       struct bio_vec                  bv1, bv2;
-#if defined(CONFIG_BLK_DEV_INTEGRITY)
-       struct bio_integrity_payload    bip1, bip2;
-       struct bio_vec                  iv1, iv2;
-#endif
-       atomic_t                        cnt;
-       int                             error;
-};
-extern struct bio_pair *bio_split(struct bio *bi, int first_sectors);
-extern void bio_pair_release(struct bio_pair *dbio);
-extern void bio_trim(struct bio *bio, int offset, int size);
+static inline struct bio *bio_next_split(struct bio *bio, int sectors,
+                                        gfp_t gfp, struct bio_set *bs)
+{
+       if (sectors >= bio_sectors(bio))
+               return bio;
+
+       return bio_split(bio, sectors, gfp, bs);
+}
 
 extern struct bio_set *bioset_create(unsigned int, unsigned int);
 extern void bioset_free(struct bio_set *);
@@ -227,7 +327,8 @@ extern mempool_t *biovec_create_pool(struct bio_set *bs, int pool_entries);
 extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *);
 extern void bio_put(struct bio *);
 
-extern void __bio_clone(struct bio *, struct bio *);
+extern void __bio_clone_fast(struct bio *, struct bio *);
+extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *);
 extern struct bio *bio_clone_bioset(struct bio *, gfp_t, struct bio_set *bs);
 
 extern struct bio_set *fs_bio_set;
@@ -254,6 +355,7 @@ static inline struct bio *bio_clone_kmalloc(struct bio *bio, gfp_t gfp_mask)
 }
 
 extern void bio_endio(struct bio *, int);
+extern void bio_endio_nodec(struct bio *, int);
 struct request_queue;
 extern int bio_phys_segments(struct request_queue *, struct bio *);
 
@@ -262,12 +364,12 @@ extern void bio_advance(struct bio *, unsigned);
 
 extern void bio_init(struct bio *);
 extern void bio_reset(struct bio *);
+void bio_chain(struct bio *, struct bio *);
 
 extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
 extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
                           unsigned int, unsigned int);
 extern int bio_get_nr_vecs(struct block_device *);
-extern sector_t bio_sector_offset(struct bio *, unsigned short, unsigned int);
 extern struct bio *bio_map_user(struct request_queue *, struct block_device *,
                                unsigned long, unsigned int, int, gfp_t);
 struct sg_iovec;
@@ -357,47 +459,17 @@ static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
 }
 #endif
 
-static inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx,
+static inline char *__bio_kmap_irq(struct bio *bio, struct bvec_iter iter,
                                   unsigned long *flags)
 {
-       return bvec_kmap_irq(bio_iovec_idx(bio, idx), flags);
+       return bvec_kmap_irq(&bio_iter_iovec(bio, iter), flags);
 }
 #define __bio_kunmap_irq(buf, flags)   bvec_kunmap_irq(buf, flags)
 
 #define bio_kmap_irq(bio, flags) \
-       __bio_kmap_irq((bio), (bio)->bi_idx, (flags))
+       __bio_kmap_irq((bio), (bio)->bi_iter, (flags))
 #define bio_kunmap_irq(buf,flags)      __bio_kunmap_irq(buf, flags)
 
-/*
- * Check whether this bio carries any data or not. A NULL bio is allowed.
- */
-static inline bool bio_has_data(struct bio *bio)
-{
-       if (bio && bio->bi_vcnt)
-               return true;
-
-       return false;
-}
-
-static inline bool bio_is_rw(struct bio *bio)
-{
-       if (!bio_has_data(bio))
-               return false;
-
-       if (bio->bi_rw & REQ_WRITE_SAME)
-               return false;
-
-       return true;
-}
-
-static inline bool bio_mergeable(struct bio *bio)
-{
-       if (bio->bi_rw & REQ_NOMERGE_FLAGS)
-               return false;
-
-       return true;
-}
-
 /*
  * BIO list management for use by remapping drivers (e.g. DM or MD) and loop.
  *
@@ -559,16 +631,12 @@ struct biovec_slab {
 
 #if defined(CONFIG_BLK_DEV_INTEGRITY)
 
-#define bip_vec_idx(bip, idx)  (&(bip->bip_vec[(idx)]))
-#define bip_vec(bip)           bip_vec_idx(bip, 0)
 
-#define __bip_for_each_vec(bvl, bip, i, start_idx)                     \
-       for (bvl = bip_vec_idx((bip), (start_idx)), i = (start_idx);    \
-            i < (bip)->bip_vcnt;                                       \
-            bvl++, i++)
 
-#define bip_for_each_vec(bvl, bip, i)                                  \
-       __bip_for_each_vec(bvl, bip, i, (bip)->bip_idx)
+#define bip_vec_idx(bip, idx)  (&(bip->bip_vec[(idx)]))
+
+#define bip_for_each_vec(bvl, bip, iter)                               \
+       for_each_bvec(bvl, (bip)->bip_vec, iter, (bip)->bip_iter)
 
 #define bio_for_each_integrity_vec(_bvl, _bio, _iter)                  \
        for_each_bio(_bio)                                              \
@@ -586,7 +654,6 @@ extern int bio_integrity_prep(struct bio *);
 extern void bio_integrity_endio(struct bio *, int);
 extern void bio_integrity_advance(struct bio *, unsigned int);
 extern void bio_integrity_trim(struct bio *, unsigned int, unsigned int);
-extern void bio_integrity_split(struct bio *, struct bio_pair *, int);
 extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t);
 extern int bioset_integrity_create(struct bio_set *, int);
 extern void bioset_integrity_free(struct bio_set *);
@@ -630,12 +697,6 @@ static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
        return 0;
 }
 
-static inline void bio_integrity_split(struct bio *bio, struct bio_pair *bp,
-                                      int sectors)
-{
-       return;
-}
-
 static inline void bio_integrity_advance(struct bio *bio,
                                         unsigned int bytes_done)
 {
index ab0e9b2025b36d401443f213a646fb68fe392605..161b23105b1ec9d90f3520f08fb66d0d9be66358 100644 (file)
@@ -113,7 +113,6 @@ enum {
 };
 
 struct request_queue *blk_mq_init_queue(struct blk_mq_reg *, void *);
-void blk_mq_free_queue(struct request_queue *);
 int blk_mq_register_disk(struct gendisk *);
 void blk_mq_unregister_disk(struct gendisk *);
 void blk_mq_init_commands(struct request_queue *, void (*init)(void *data, struct blk_mq_hw_ctx *, struct request *, unsigned int), void *data);
@@ -159,16 +158,16 @@ static inline struct request *blk_mq_tag_to_rq(struct blk_mq_hw_ctx *hctx,
 }
 
 #define queue_for_each_hw_ctx(q, hctx, i)                              \
-       for ((i) = 0, hctx = (q)->queue_hw_ctx[0];                      \
-            (i) < (q)->nr_hw_queues; (i)++, hctx = (q)->queue_hw_ctx[i])
+       for ((i) = 0; (i) < (q)->nr_hw_queues &&                        \
+            ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++)
 
 #define queue_for_each_ctx(q, ctx, i)                                  \
-       for ((i) = 0, ctx = per_cpu_ptr((q)->queue_ctx, 0);             \
-            (i) < (q)->nr_queues; (i)++, ctx = per_cpu_ptr(q->queue_ctx, (i)))
+       for ((i) = 0; (i) < (q)->nr_queues &&                           \
+            ({ ctx = per_cpu_ptr((q)->queue_ctx, (i)); 1; }); (i)++)
 
 #define hctx_for_each_ctx(hctx, ctx, i)                                        \
-       for ((i) = 0, ctx = (hctx)->ctxs[0];                            \
-            (i) < (hctx)->nr_ctx; (i)++, ctx = (hctx)->ctxs[(i)])
+       for ((i) = 0; (i) < (hctx)->nr_ctx &&                           \
+            ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++)
 
 #define blk_ctx_sum(q, sum)                                            \
 ({                                                                     \
index 238ef0ed62f85f18085b6446bc681d8c18d674dc..bbc3a6c88fce3410b954b6c91c407297e2f03e7f 100644 (file)
@@ -28,13 +28,22 @@ struct bio_vec {
        unsigned int    bv_offset;
 };
 
+struct bvec_iter {
+       sector_t                bi_sector;      /* device address in 512 byte
+                                                  sectors */
+       unsigned int            bi_size;        /* residual I/O count */
+
+       unsigned int            bi_idx;         /* current index into bvl_vec */
+
+       unsigned int            bi_bvec_done;   /* number of bytes completed in
+                                                  current bvec */
+};
+
 /*
  * main unit of I/O for the block layer and lower layers (ie drivers and
  * stacking drivers)
  */
 struct bio {
-       sector_t                bi_sector;      /* device address in 512 byte
-                                                  sectors */
        struct bio              *bi_next;       /* request queue link */
        struct block_device     *bi_bdev;
        unsigned long           bi_flags;       /* status, command, etc */
@@ -42,16 +51,13 @@ struct bio {
                                                 * top bits priority
                                                 */
 
-       unsigned short          bi_vcnt;        /* how many bio_vec's */
-       unsigned short          bi_idx;         /* current index into bvl_vec */
+       struct bvec_iter        bi_iter;
 
        /* Number of segments in this BIO after
         * physical address coalescing is performed.
         */
        unsigned int            bi_phys_segments;
 
-       unsigned int            bi_size;        /* residual I/O count */
-
        /*
         * To keep track of the max segment size, we account for the
         * sizes of the first and last mergeable segments in this bio.
@@ -59,6 +65,8 @@ struct bio {
        unsigned int            bi_seg_front_size;
        unsigned int            bi_seg_back_size;
 
+       atomic_t                bi_remaining;
+
        bio_end_io_t            *bi_end_io;
 
        void                    *bi_private;
@@ -74,11 +82,13 @@ struct bio {
        struct bio_integrity_payload *bi_integrity;  /* data integrity */
 #endif
 
+       unsigned short          bi_vcnt;        /* how many bio_vec's */
+
        /*
         * Everything starting with bi_max_vecs will be preserved by bio_reset()
         */
 
-       unsigned int            bi_max_vecs;    /* max bvl_vecs we can hold */
+       unsigned short          bi_max_vecs;    /* max bvl_vecs we can hold */
 
        atomic_t                bi_cnt;         /* pin count */
 
index 1b135d49b27985d3243cdbf92d6002ce5eea8597..0375654adb28cb423911aa7afccce6eb1f015e14 100644 (file)
@@ -291,6 +291,7 @@ struct queue_limits {
        unsigned char           discard_misaligned;
        unsigned char           cluster;
        unsigned char           discard_zeroes_data;
+       unsigned char           raid_partial_stripes_expensive;
 };
 
 struct request_queue {
@@ -735,7 +736,7 @@ struct rq_map_data {
 };
 
 struct req_iterator {
-       int i;
+       struct bvec_iter iter;
        struct bio *bio;
 };
 
@@ -748,10 +749,11 @@ struct req_iterator {
 
 #define rq_for_each_segment(bvl, _rq, _iter)                   \
        __rq_for_each_bio(_iter.bio, _rq)                       \
-               bio_for_each_segment(bvl, _iter.bio, _iter.i)
+               bio_for_each_segment(bvl, _iter.bio, _iter.iter)
 
-#define rq_iter_last(rq, _iter)                                        \
-               (_iter.bio->bi_next == NULL && _iter.i == _iter.bio->bi_vcnt-1)
+#define rq_iter_last(bvec, _iter)                              \
+               (_iter.bio->bi_next == NULL &&                  \
+                bio_iter_last(bvec, _iter.iter))
 
 #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
 # error        "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
index 20ee8b63a96848ad1bc63fb29ce97c853502d700..d21f2dba07314c48dce2414c4be23d2191180c81 100644 (file)
@@ -1,6 +1,7 @@
 #ifndef __FS_CEPH_MESSENGER_H
 #define __FS_CEPH_MESSENGER_H
 
+#include <linux/blk_types.h>
 #include <linux/kref.h>
 #include <linux/mutex.h>
 #include <linux/net.h>
@@ -119,8 +120,7 @@ struct ceph_msg_data_cursor {
 #ifdef CONFIG_BLOCK
                struct {                                /* bio */
                        struct bio      *bio;           /* bio from list */
-                       unsigned int    vector_index;   /* vector from bio */
-                       unsigned int    vector_offset;  /* bytes from vector */
+                       struct bvec_iter bvec_iter;
                };
 #endif /* CONFIG_BLOCK */
                struct {                                /* pages */
index f1a098a4450c0849cdf2fab128d179b92e98d0bc..939533da93a7355be0a40422627090f1aad751b2 100644 (file)
@@ -488,6 +488,8 @@ struct clk_onecell_data {
        unsigned int clk_num;
 };
 
+extern struct of_device_id __clk_of_table;
+
 #define CLK_OF_DECLARE(name, compat, fn)                       \
        static const struct of_device_id __clk_of_table_##name  \
                __used __section(__clk_of_table)                \
diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h
new file mode 100644 (file)
index 0000000..092b641
--- /dev/null
@@ -0,0 +1,298 @@
+/*
+ * TI clock drivers support
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef __LINUX_CLK_TI_H__
+#define __LINUX_CLK_TI_H__
+
+#include <linux/clkdev.h>
+
+/**
+ * struct dpll_data - DPLL registers and integration data
+ * @mult_div1_reg: register containing the DPLL M and N bitfields
+ * @mult_mask: mask of the DPLL M bitfield in @mult_div1_reg
+ * @div1_mask: mask of the DPLL N bitfield in @mult_div1_reg
+ * @clk_bypass: struct clk pointer to the clock's bypass clock input
+ * @clk_ref: struct clk pointer to the clock's reference clock input
+ * @control_reg: register containing the DPLL mode bitfield
+ * @enable_mask: mask of the DPLL mode bitfield in @control_reg
+ * @last_rounded_rate: cache of the last rate result of omap2_dpll_round_rate()
+ * @last_rounded_m: cache of the last M result of omap2_dpll_round_rate()
+ * @last_rounded_m4xen: cache of the last M4X result of
+ *                     omap4_dpll_regm4xen_round_rate()
+ * @last_rounded_lpmode: cache of the last lpmode result of
+ *                      omap4_dpll_lpmode_recalc()
+ * @max_multiplier: maximum valid non-bypass multiplier value (actual)
+ * @last_rounded_n: cache of the last N result of omap2_dpll_round_rate()
+ * @min_divider: minimum valid non-bypass divider value (actual)
+ * @max_divider: maximum valid non-bypass divider value (actual)
+ * @modes: possible values of @enable_mask
+ * @autoidle_reg: register containing the DPLL autoidle mode bitfield
+ * @idlest_reg: register containing the DPLL idle status bitfield
+ * @autoidle_mask: mask of the DPLL autoidle mode bitfield in @autoidle_reg
+ * @freqsel_mask: mask of the DPLL jitter correction bitfield in @control_reg
+ * @idlest_mask: mask of the DPLL idle status bitfield in @idlest_reg
+ * @lpmode_mask: mask of the DPLL low-power mode bitfield in @control_reg
+ * @m4xen_mask: mask of the DPLL M4X multiplier bitfield in @control_reg
+ * @auto_recal_bit: bitshift of the driftguard enable bit in @control_reg
+ * @recal_en_bit: bitshift of the PRM_IRQENABLE_* bit for recalibration IRQs
+ * @recal_st_bit: bitshift of the PRM_IRQSTATUS_* bit for recalibration IRQs
+ * @flags: DPLL type/features (see below)
+ *
+ * Possible values for @flags:
+ * DPLL_J_TYPE: "J-type DPLL" (only some 36xx, 4xxx DPLLs)
+ *
+ * @freqsel_mask is only used on the OMAP34xx family and AM35xx.
+ *
+ * XXX Some DPLLs have multiple bypass inputs, so it's not technically
+ * correct to only have one @clk_bypass pointer.
+ *
+ * XXX The runtime-variable fields (@last_rounded_rate, @last_rounded_m,
+ * @last_rounded_n) should be separated from the runtime-fixed fields
+ * and placed into a different structure, so that the runtime-fixed data
+ * can be placed into read-only space.
+ */
+struct dpll_data {
+       void __iomem            *mult_div1_reg;
+       u32                     mult_mask;
+       u32                     div1_mask;
+       struct clk              *clk_bypass;
+       struct clk              *clk_ref;
+       void __iomem            *control_reg;
+       u32                     enable_mask;
+       unsigned long           last_rounded_rate;
+       u16                     last_rounded_m;
+       u8                      last_rounded_m4xen;
+       u8                      last_rounded_lpmode;
+       u16                     max_multiplier;
+       u8                      last_rounded_n;
+       u8                      min_divider;
+       u16                     max_divider;
+       u8                      modes;
+       void __iomem            *autoidle_reg;
+       void __iomem            *idlest_reg;
+       u32                     autoidle_mask;
+       u32                     freqsel_mask;
+       u32                     idlest_mask;
+       u32                     dco_mask;
+       u32                     sddiv_mask;
+       u32                     lpmode_mask;
+       u32                     m4xen_mask;
+       u8                      auto_recal_bit;
+       u8                      recal_en_bit;
+       u8                      recal_st_bit;
+       u8                      flags;
+};
+
+struct clk_hw_omap_ops;
+
+/**
+ * struct clk_hw_omap - OMAP struct clk
+ * @node: list_head connecting this clock into the full clock list
+ * @enable_reg: register to write to enable the clock (see @enable_bit)
+ * @enable_bit: bitshift to write to enable/disable the clock (see @enable_reg)
+ * @flags: see "struct clk.flags possibilities" above
+ * @clksel_reg: for clksel clks, register va containing src/divisor select
+ * @clksel_mask: bitmask in @clksel_reg for the src/divisor selector
+ * @clksel: for clksel clks, pointer to struct clksel for this clock
+ * @dpll_data: for DPLLs, pointer to struct dpll_data for this clock
+ * @clkdm_name: clockdomain name that this clock is contained in
+ * @clkdm: pointer to struct clockdomain, resolved from @clkdm_name at runtime
+ * @ops: clock ops for this clock
+ */
+struct clk_hw_omap {
+       struct clk_hw           hw;
+       struct list_head        node;
+       unsigned long           fixed_rate;
+       u8                      fixed_div;
+       void __iomem            *enable_reg;
+       u8                      enable_bit;
+       u8                      flags;
+       void __iomem            *clksel_reg;
+       u32                     clksel_mask;
+       const struct clksel     *clksel;
+       struct dpll_data        *dpll_data;
+       const char              *clkdm_name;
+       struct clockdomain      *clkdm;
+       const struct clk_hw_omap_ops    *ops;
+};
+
+/*
+ * struct clk_hw_omap.flags possibilities
+ *
+ * XXX document the rest of the clock flags here
+ *
+ * ENABLE_REG_32BIT: (OMAP1 only) clock control register must be accessed
+ *     with 32bit ops, by default OMAP1 uses 16bit ops.
+ * CLOCK_IDLE_CONTROL: (OMAP1 only) clock has autoidle support.
+ * CLOCK_NO_IDLE_PARENT: (OMAP1 only) when clock is enabled, its parent
+ *     clock is put to no-idle mode.
+ * ENABLE_ON_INIT: Clock is enabled on init.
+ * INVERT_ENABLE: By default, clock enable bit behavior is '1' enable, '0'
+ *     disable. This inverts the behavior making '0' enable and '1' disable.
+ * CLOCK_CLKOUTX2: (OMAP4 only) DPLL CLKOUT and CLKOUTX2 GATE_CTRL
+ *     bits share the same register.  This flag allows the
+ *     omap4_dpllmx*() code to determine which GATE_CTRL bit field
+ *     should be used.  This is a temporary solution - a better approach
+ *     would be to associate clock type-specific data with the clock,
+ *     similar to the struct dpll_data approach.
+ * MEMMAP_ADDRESSING: Use memmap addressing to access clock registers.
+ */
+#define ENABLE_REG_32BIT       (1 << 0)        /* Use 32-bit access */
+#define CLOCK_IDLE_CONTROL     (1 << 1)
+#define CLOCK_NO_IDLE_PARENT   (1 << 2)
+#define ENABLE_ON_INIT         (1 << 3)        /* Enable upon framework init */
+#define INVERT_ENABLE          (1 << 4)        /* 0 enables, 1 disables */
+#define CLOCK_CLKOUTX2         (1 << 5)
+#define MEMMAP_ADDRESSING      (1 << 6)
+
+/* CM_CLKEN_PLL*.EN* bit values - not all are available for every DPLL */
+#define DPLL_LOW_POWER_STOP    0x1
+#define DPLL_LOW_POWER_BYPASS  0x5
+#define DPLL_LOCKED            0x7
+
+/* DPLL Type and DCO Selection Flags */
+#define DPLL_J_TYPE            0x1
+
+/* Composite clock component types */
+enum {
+       CLK_COMPONENT_TYPE_GATE = 0,
+       CLK_COMPONENT_TYPE_DIVIDER,
+       CLK_COMPONENT_TYPE_MUX,
+       CLK_COMPONENT_TYPE_MAX,
+};
+
+/**
+ * struct ti_dt_clk - OMAP DT clock alias declarations
+ * @lk: clock lookup definition
+ * @node_name: clock DT node to map to
+ */
+struct ti_dt_clk {
+       struct clk_lookup               lk;
+       char                            *node_name;
+};
+
+#define DT_CLK(dev, con, name)         \
+       {                               \
+               .lk = {                 \
+                       .dev_id = dev,  \
+                       .con_id = con,  \
+               },                      \
+               .node_name = name,      \
+       }
+
+/* Maximum number of clock memmaps */
+#define CLK_MAX_MEMMAPS                        4
+
+typedef void (*ti_of_clk_init_cb_t)(struct clk_hw *, struct device_node *);
+
+/**
+ * struct clk_omap_reg - OMAP register declaration
+ * @offset: offset from the master IP module base address
+ * @index: index of the master IP module
+ */
+struct clk_omap_reg {
+       u16 offset;
+       u16 index;
+};
+
+/**
+ * struct ti_clk_ll_ops - low-level register access ops for a clock
+ * @clk_readl: pointer to register read function
+ * @clk_writel: pointer to register write function
+ *
+ * Low-level register access ops are generally used by the basic clock types
+ * (clk-gate, clk-mux, clk-divider etc.) to provide support for various
+ * low-level hardware interfaces (direct MMIO, regmap etc.), but can also be
+ * used by other hardware-specific clock drivers if needed.
+ */
+struct ti_clk_ll_ops {
+       u32     (*clk_readl)(void __iomem *reg);
+       void    (*clk_writel)(u32 val, void __iomem *reg);
+};
+
+extern struct ti_clk_ll_ops *ti_clk_ll_ops;
+
+extern const struct clk_ops ti_clk_divider_ops;
+extern const struct clk_ops ti_clk_mux_ops;
+
+#define to_clk_hw_omap(_hw) container_of(_hw, struct clk_hw_omap, hw)
+
+void omap2_init_clk_hw_omap_clocks(struct clk *clk);
+int omap3_noncore_dpll_enable(struct clk_hw *hw);
+void omap3_noncore_dpll_disable(struct clk_hw *hw);
+int omap3_noncore_dpll_set_rate(struct clk_hw *hw, unsigned long rate,
+                               unsigned long parent_rate);
+unsigned long omap4_dpll_regm4xen_recalc(struct clk_hw *hw,
+                                        unsigned long parent_rate);
+long omap4_dpll_regm4xen_round_rate(struct clk_hw *hw,
+                                   unsigned long target_rate,
+                                   unsigned long *parent_rate);
+u8 omap2_init_dpll_parent(struct clk_hw *hw);
+unsigned long omap3_dpll_recalc(struct clk_hw *hw, unsigned long parent_rate);
+long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate,
+                          unsigned long *parent_rate);
+void omap2_init_clk_clkdm(struct clk_hw *clk);
+unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw,
+                                   unsigned long parent_rate);
+int omap2_clkops_enable_clkdm(struct clk_hw *hw);
+void omap2_clkops_disable_clkdm(struct clk_hw *hw);
+int omap2_clk_disable_autoidle_all(void);
+void omap2_clk_enable_init_clocks(const char **clk_names, u8 num_clocks);
+int omap3_dpll4_set_rate(struct clk_hw *clk, unsigned long rate,
+                        unsigned long parent_rate);
+int omap2_dflt_clk_enable(struct clk_hw *hw);
+void omap2_dflt_clk_disable(struct clk_hw *hw);
+int omap2_dflt_clk_is_enabled(struct clk_hw *hw);
+void omap3_clk_lock_dpll5(void);
+
+void __iomem *ti_clk_get_reg_addr(struct device_node *node, int index);
+void ti_dt_clocks_register(struct ti_dt_clk *oclks);
+void ti_dt_clk_init_provider(struct device_node *np, int index);
+void ti_dt_clockdomains_setup(void);
+int ti_clk_retry_init(struct device_node *node, struct clk_hw *hw,
+                     ti_of_clk_init_cb_t func);
+int of_ti_clk_autoidle_setup(struct device_node *node);
+int ti_clk_add_component(struct device_node *node, struct clk_hw *hw, int type);
+
+int omap3430_dt_clk_init(void);
+int omap3630_dt_clk_init(void);
+int am35xx_dt_clk_init(void);
+int ti81xx_dt_clk_init(void);
+int omap4xxx_dt_clk_init(void);
+int omap5xxx_dt_clk_init(void);
+int dra7xx_dt_clk_init(void);
+int am33xx_dt_clk_init(void);
+int am43xx_dt_clk_init(void);
+
+#ifdef CONFIG_OF
+void of_ti_clk_allow_autoidle_all(void);
+void of_ti_clk_deny_autoidle_all(void);
+#else
+static inline void of_ti_clk_allow_autoidle_all(void) { }
+static inline void of_ti_clk_deny_autoidle_all(void) { }
+#endif
+
+extern const struct clk_hw_omap_ops clkhwops_omap3_dpll;
+extern const struct clk_hw_omap_ops clkhwops_omap4_dpllmx;
+extern const struct clk_hw_omap_ops clkhwops_wait;
+extern const struct clk_hw_omap_ops clkhwops_omap3430es2_dss_usbhost_wait;
+extern const struct clk_hw_omap_ops clkhwops_am35xx_ipss_module_wait;
+extern const struct clk_hw_omap_ops clkhwops_am35xx_ipss_wait;
+extern const struct clk_hw_omap_ops clkhwops_iclk;
+extern const struct clk_hw_omap_ops clkhwops_iclk_wait;
+extern const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_ssi_wait;
+extern const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_dss_usbhost_wait;
+extern const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_hsotgusb_wait;
+
+#endif
index a0f9280421eca511b00e80f51e8e65436ec47e3a..2e6dce6e5c2acf9bae626033c700ec0da012bbcb 100644 (file)
@@ -37,9 +37,9 @@ int cmdline_parts_parse(struct cmdline_parts **parts, const char *cmdline);
 struct cmdline_parts *cmdline_parts_find(struct cmdline_parts *parts,
                                         const char *bdev);
 
-void cmdline_parts_set(struct cmdline_parts *parts, sector_t disk_size,
-                      int slot,
-                      int (*add_part)(int, struct cmdline_subpart *, void *),
-                      void *param);
+int cmdline_parts_set(struct cmdline_parts *parts, sector_t disk_size,
+                     int slot,
+                     int (*add_part)(int, struct cmdline_subpart *, void *),
+                     void *param);
 
 #endif /* CMDLINEPARSEH */
index eb8a49d75ab3155bb6910eab585e5dfed9f64dee..19f6003291def61a40a2305a453838d13c92892f 100644 (file)
@@ -327,16 +327,16 @@ asmlinkage long compat_sys_keyctl(u32 option,
                              u32 arg2, u32 arg3, u32 arg4, u32 arg5);
 asmlinkage long compat_sys_ustat(unsigned dev, struct compat_ustat __user *u32);
 
-asmlinkage ssize_t compat_sys_readv(unsigned long fd,
-               const struct compat_iovec __user *vec, unsigned long vlen);
-asmlinkage ssize_t compat_sys_writev(unsigned long fd,
-               const struct compat_iovec __user *vec, unsigned long vlen);
-asmlinkage ssize_t compat_sys_preadv(unsigned long fd,
+asmlinkage ssize_t compat_sys_readv(compat_ulong_t fd,
+               const struct compat_iovec __user *vec, compat_ulong_t vlen);
+asmlinkage ssize_t compat_sys_writev(compat_ulong_t fd,
+               const struct compat_iovec __user *vec, compat_ulong_t vlen);
+asmlinkage ssize_t compat_sys_preadv(compat_ulong_t fd,
                const struct compat_iovec __user *vec,
-               unsigned long vlen, u32 pos_low, u32 pos_high);
-asmlinkage ssize_t compat_sys_pwritev(unsigned long fd,
+               compat_ulong_t vlen, u32 pos_low, u32 pos_high);
+asmlinkage ssize_t compat_sys_pwritev(compat_ulong_t fd,
                const struct compat_iovec __user *vec,
-               unsigned long vlen, u32 pos_low, u32 pos_high);
+               compat_ulong_t vlen, u32 pos_low, u32 pos_high);
 asmlinkage long comat_sys_lseek(unsigned int, compat_off_t, unsigned int);
 
 asmlinkage long compat_sys_execve(const char __user *filename, const compat_uptr_t __user *argv,
@@ -422,7 +422,7 @@ extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
                                  compat_long_t addr, compat_long_t data);
 
-asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, size_t);
+asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, compat_size_t);
 /*
  * epoll (fs/eventpoll.c) compat bits follow ...
  */
index f4b0aa3126f5deae8ff8908375a9da1eca790ecf..a68cbe59e6ad190023e410cb32784b1fb6a67d2a 100644 (file)
@@ -29,7 +29,7 @@ typedef void (*io_notify_fn)(unsigned long error, void *context);
 
 enum dm_io_mem_type {
        DM_IO_PAGE_LIST,/* Page list */
-       DM_IO_BVEC,     /* Bio vector */
+       DM_IO_BIO,      /* Bio vector */
        DM_IO_VMA,      /* Virtual memory area */
        DM_IO_KMEM,     /* Kernel memory */
 };
@@ -41,7 +41,7 @@ struct dm_io_memory {
 
        union {
                struct page_list *pl;
-               struct bio_vec *bvec;
+               struct bio *bio;
                void *vma;
                void *addr;
        } ptr;
index 57c9a8ae4f2df2127dffe7e88aee95b5f2cdce1a..7ac17f57250e48feb5024cb27fe5389972186001 100644 (file)
@@ -27,7 +27,6 @@ struct root_entry;
 
 
 #ifdef CONFIG_INTEL_IOMMU
-extern void free_dmar_iommu(struct intel_iommu *iommu);
 extern int iommu_calculate_agaw(struct intel_iommu *iommu);
 extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu);
 extern int dmar_disabled;
@@ -41,9 +40,6 @@ static inline int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
 {
        return 0;
 }
-static inline void free_dmar_iommu(struct intel_iommu *iommu)
-{
-}
 #define dmar_disabled  (1)
 #define intel_iommu_enabled (0)
 #endif
index 6fd9390ccf91ab541f4aba92ac80da639bdf58ad..c5c92d59e5316820d0ae078fe7ce78e8407f2ede 100644 (file)
@@ -257,7 +257,7 @@ struct dma_chan_percpu {
  * @dev: class device for sysfs
  * @device_node: used to add this to the device chan list
  * @local: per-cpu pointer to a struct dma_chan_percpu
- * @client-count: how many clients are using this channel
+ * @client_count: how many clients are using this channel
  * @table_count: number of appearances in the mem-to-mem allocation table
  * @private: private data for certain client-channel associations
  */
@@ -279,10 +279,10 @@ struct dma_chan {
 
 /**
  * struct dma_chan_dev - relate sysfs device node to backing channel device
- * @chan - driver channel device
- * @device - sysfs device
- * @dev_id - parent dma_device dev_id
- * @idr_ref - reference count to gate release of dma_device dev_id
+ * @chan: driver channel device
+ * @device: sysfs device
+ * @dev_id: parent dma_device dev_id
+ * @idr_ref: reference count to gate release of dma_device dev_id
  */
 struct dma_chan_dev {
        struct dma_chan *chan;
@@ -306,9 +306,8 @@ enum dma_slave_buswidth {
 /**
  * struct dma_slave_config - dma slave channel runtime config
  * @direction: whether the data shall go in or out on this slave
- * channel, right now. DMA_TO_DEVICE and DMA_FROM_DEVICE are
- * legal values, DMA_BIDIRECTIONAL is not acceptable since we
- * need to differentiate source and target addresses.
+ * channel, right now. DMA_MEM_TO_DEV and DMA_DEV_TO_MEM are
+ * legal values.
  * @src_addr: this is the physical address where DMA slave data
  * should be read (RX), if the source is memory this argument is
  * ignored.
index b029d1aa2d12a6d18f38f6472ef63bf44bb44568..eccb0c0c6cf633c35e7a0ebfd5b9b5511a19bf39 100644 (file)
@@ -33,6 +33,7 @@ struct acpi_dmar_header;
 #define DMAR_X2APIC_OPT_OUT    0x2
 
 struct intel_iommu;
+
 #ifdef CONFIG_DMAR_TABLE
 extern struct acpi_table_header *dmar_tbl;
 struct dmar_drhd_unit {
@@ -52,6 +53,10 @@ extern struct list_head dmar_drhd_units;
 #define for_each_drhd_unit(drhd) \
        list_for_each_entry(drhd, &dmar_drhd_units, list)
 
+#define for_each_active_drhd_unit(drhd)                                        \
+       list_for_each_entry(drhd, &dmar_drhd_units, list)               \
+               if (drhd->ignored) {} else
+
 #define for_each_active_iommu(i, drhd)                                 \
        list_for_each_entry(drhd, &dmar_drhd_units, list)               \
                if (i=drhd->iommu, drhd->ignored) {} else
@@ -62,13 +67,13 @@ extern struct list_head dmar_drhd_units;
 
 extern int dmar_table_init(void);
 extern int dmar_dev_scope_init(void);
+extern int dmar_parse_dev_scope(void *start, void *end, int *cnt,
+                               struct pci_dev ***devices, u16 segment);
+extern void dmar_free_dev_scope(struct pci_dev ***devices, int *cnt);
 
 /* Intel IOMMU detection */
 extern int detect_intel_iommu(void);
 extern int enable_drhd_fault_handling(void);
-
-extern int parse_ioapics_under_ir(void);
-extern int alloc_iommu(struct dmar_drhd_unit *);
 #else
 static inline int detect_intel_iommu(void)
 {
@@ -157,8 +162,6 @@ struct dmar_atsr_unit {
 int dmar_parse_rmrr_atsr_dev(void);
 extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header);
 extern int dmar_parse_one_atsr(struct acpi_dmar_header *header);
-extern int dmar_parse_dev_scope(void *start, void *end, int *cnt,
-                               struct pci_dev ***devices, u16 segment);
 extern int intel_iommu_init(void);
 #else /* !CONFIG_INTEL_IOMMU: */
 static inline int intel_iommu_init(void) { return -ENODEV; }
index 70c4836e4a9f8fb16dd578bbbcfa25ecfec0b129..fe6ac956550e4e099a04889e9b33bf0f9a917c2e 100644 (file)
@@ -613,8 +613,8 @@ extern ssize_t fb_sys_write(struct fb_info *info, const char __user *buf,
 extern int register_framebuffer(struct fb_info *fb_info);
 extern int unregister_framebuffer(struct fb_info *fb_info);
 extern int unlink_framebuffer(struct fb_info *fb_info);
-extern void remove_conflicting_framebuffers(struct apertures_struct *a,
-                               const char *name, bool primary);
+extern int remove_conflicting_framebuffers(struct apertures_struct *a,
+                                          const char *name, bool primary);
 extern int fb_prepare_logo(struct fb_info *fb_info, int rotate);
 extern int fb_show_logo(struct fb_info *fb_info, int rotate);
 extern char* fb_get_buffer_offset(struct fb_info *info, struct fb_pixmap *buf, u32 size);
index 7d8d5e608594c911c2eacc78dbb094c5384fd4af..3d286ff49ab0c82309ec3499bf27cc77f75f3670 100644 (file)
@@ -322,10 +322,10 @@ extern int fsnotify_fasync(int fd, struct file *file, int on);
 extern void fsnotify_destroy_event(struct fsnotify_group *group,
                                   struct fsnotify_event *event);
 /* attach the event to the group notification queue */
-extern struct fsnotify_event *fsnotify_add_notify_event(struct fsnotify_group *group,
-                                                       struct fsnotify_event *event,
-                                                       struct fsnotify_event *(*merge)(struct list_head *,
-                                                                                       struct fsnotify_event *));
+extern int fsnotify_add_notify_event(struct fsnotify_group *group,
+                                    struct fsnotify_event *event,
+                                    int (*merge)(struct list_head *,
+                                                 struct fsnotify_event *));
 /* true if the group notification queue is empty */
 extern bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group);
 /* return, but do not dequeue the first event on the notification queue */
index f5b9b87ac9a9e807513686f2b97b96ad48432810..3af847273277785332e994aedc2559db74c1623a 100644 (file)
@@ -281,4 +281,10 @@ int host1x_device_exit(struct host1x_device *device);
 int host1x_client_register(struct host1x_client *client);
 int host1x_client_unregister(struct host1x_client *client);
 
+struct tegra_mipi_device;
+
+struct tegra_mipi_device *tegra_mipi_request(struct device *device);
+void tegra_mipi_free(struct tegra_mipi_device *device);
+int tegra_mipi_calibrate(struct tegra_mipi_device *device);
+
 #endif
index 017fb40f702ae241784b68c0b4aa61b496869491..8f1b086ca5bcc962cbd9d509c4235346c46fefbc 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * i2c-smbus.h - SMBus extensions to the I2C protocol
  *
- * Copyright (C) 2010 Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2010 Jean Delvare <jdelvare@suse.de>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
index d9c8dbd3373f90d298ac66001ccdd13fa6f54ddf..deddeb8c337cff2c67d2b9205065741539dbf97c 100644 (file)
@@ -342,11 +342,25 @@ i2c_register_board_info(int busnum, struct i2c_board_info const *info,
 }
 #endif /* I2C_BOARDINFO */
 
-/*
+/**
+ * struct i2c_algorithm - represent I2C transfer method
+ * @master_xfer: Issue a set of i2c transactions to the given I2C adapter
+ *   defined by the msgs array, with num messages available to transfer via
+ *   the adapter specified by adap.
+ * @smbus_xfer: Issue smbus transactions to the given I2C adapter. If this
+ *   is not present, then the bus layer will try and convert the SMBus calls
+ *   into I2C transfers instead.
+ * @functionality: Return the flags that this algorithm/adapter pair supports
+ *   from the I2C_FUNC_* flags.
+ *
  * The following structs are for those who like to implement new bus drivers:
  * i2c_algorithm is the interface to a class of hardware solutions which can
  * be addressed using the same bus algorithms - i.e. bit-banging or the PCF8584
  * to name two of the most common.
+ *
+ * The return codes from the @master_xfer field should indicate the type of
+ * error code that occured during the transfer, as documented in the kernel
+ * Documentation file Documentation/i2c/fault-codes.
  */
 struct i2c_algorithm {
        /* If an adapter algorithm can't do I2C-level access, set master_xfer
index d380c5e680086ec4627c745f86dc197e7110ea58..2c4bed593b32367559cc2aff89e40b88306bab90 100644 (file)
@@ -288,6 +288,7 @@ struct q_inval {
 
 struct ir_table {
        struct irte *base;
+       unsigned long *bitmap;
 };
 #endif
 
@@ -347,8 +348,6 @@ static inline void __iommu_flush_cache(
 extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev);
 extern int dmar_find_matched_atsr_unit(struct pci_dev *dev);
 
-extern int alloc_iommu(struct dmar_drhd_unit *drhd);
-extern void free_iommu(struct intel_iommu *iommu);
 extern int dmar_enable_qi(struct intel_iommu *iommu);
 extern void dmar_disable_qi(struct intel_iommu *iommu);
 extern int dmar_reenable_qi(struct intel_iommu *iommu);
index a444c790fa7235e2bc7247313804a5062d0a4db8..b96a5b2136e46be7fbeec8235c10a918df737ae9 100644 (file)
 #include <linux/types.h>
 #include <trace/events/iommu.h>
 
-#define IOMMU_READ     (1)
-#define IOMMU_WRITE    (2)
-#define IOMMU_CACHE    (4) /* DMA cache coherency */
+#define IOMMU_READ     (1 << 0)
+#define IOMMU_WRITE    (1 << 1)
+#define IOMMU_CACHE    (1 << 2) /* DMA cache coherency */
+#define IOMMU_EXEC     (1 << 3)
 
 struct iommu_ops;
 struct iommu_group;
@@ -247,6 +248,11 @@ static inline struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
        return NULL;
 }
 
+static inline struct iommu_group *iommu_group_get_by_id(int id)
+{
+       return NULL;
+}
+
 static inline void iommu_domain_free(struct iommu_domain *domain)
 {
 }
@@ -291,8 +297,8 @@ static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_ad
        return 0;
 }
 
-static inline int domain_has_cap(struct iommu_domain *domain,
-                                unsigned long cap)
+static inline int iommu_domain_has_cap(struct iommu_domain *domain,
+                                      unsigned long cap)
 {
        return 0;
 }
index d3e8ad23a8e0238645fe553f8183a2ad74bf5fc3..a6a42dd024661324dbeed5b9cfaa028744bae154 100644 (file)
@@ -6,6 +6,11 @@
 #include <linux/export.h>
 #include <asm/linkage.h>
 
+/* Some toolchains use other characters (e.g. '`') to mark new line in macro */
+#ifndef ASM_NL
+#define ASM_NL          ;
+#endif
+
 #ifdef __cplusplus
 #define CPP_ASMLINKAGE extern "C"
 #else
 
 #ifndef ENTRY
 #define ENTRY(name) \
-  .globl name; \
-  ALIGN; \
-  name:
+       .globl name ASM_NL \
+       ALIGN ASM_NL \
+       name:
 #endif
 #endif /* LINKER_SCRIPT */
 
 #ifndef WEAK
 #define WEAK(name)        \
-       .weak name;        \
+       .weak name ASM_NL   \
        name:
 #endif
 
 #ifndef END
 #define END(name) \
-  .size name, .-name
+       .size name, .-name
 #endif
 
 /* If symbol 'name' is treated as a subroutine (gets called, and returns)
  */
 #ifndef ENDPROC
 #define ENDPROC(name) \
-  .type name, @function; \
-  END(name)
+       .type name, @function ASM_NL \
+       END(name)
 #endif
 
 #endif
index 6156686bf108f38b91fcfb09766f73012dac20ea..ac39d910e70bda7c209d783dc6f01464a836e08a 100644 (file)
@@ -110,9 +110,6 @@ struct mc13xxx_led_platform_data {
        int id;
        const char *name;
        const char *default_trigger;
-
-/* Three or two bits current selection depending on the led */
-       char max_current;
 };
 
 #define MAX_LED_CONTROL_REGS   6
@@ -121,7 +118,7 @@ struct mc13xxx_leds_platform_data {
        struct mc13xxx_led_platform_data *led;
        int num_leds;
 
-/* LED Control 0 */
+/* MC13783 LED Control 0 */
 #define MC13783_LED_C0_ENABLE          (1 << 0)
 #define MC13783_LED_C0_TRIODE_MD       (1 << 7)
 #define MC13783_LED_C0_TRIODE_AD       (1 << 8)
@@ -129,21 +126,43 @@ struct mc13xxx_leds_platform_data {
 #define MC13783_LED_C0_BOOST           (1 << 10)
 #define MC13783_LED_C0_ABMODE(x)       (((x) & 0x7) << 11)
 #define MC13783_LED_C0_ABREF(x)                (((x) & 0x3) << 14)
-/* LED Control 1 */
+/* MC13783 LED Control 1 */
 #define MC13783_LED_C1_TC1HALF         (1 << 18)
 #define MC13783_LED_C1_SLEWLIM         (1 << 23)
-/* LED Control 2 */
+/* MC13783 LED Control 2 */
+#define MC13783_LED_C2_CURRENT_MD(x)   (((x) & 0x7) << 0)
+#define MC13783_LED_C2_CURRENT_AD(x)   (((x) & 0x7) << 3)
+#define MC13783_LED_C2_CURRENT_KP(x)   (((x) & 0x7) << 6)
 #define MC13783_LED_C2_PERIOD(x)       (((x) & 0x3) << 21)
 #define MC13783_LED_C2_SLEWLIM         (1 << 23)
-/* LED Control 3 */
+/* MC13783 LED Control 3 */
+#define MC13783_LED_C3_CURRENT_R1(x)   (((x) & 0x3) << 0)
+#define MC13783_LED_C3_CURRENT_G1(x)   (((x) & 0x3) << 2)
+#define MC13783_LED_C3_CURRENT_B1(x)   (((x) & 0x3) << 4)
 #define MC13783_LED_C3_PERIOD(x)       (((x) & 0x3) << 21)
 #define MC13783_LED_C3_TRIODE_TC1      (1 << 23)
-/* LED Control 4 */
+/* MC13783 LED Control 4 */
+#define MC13783_LED_C4_CURRENT_R2(x)   (((x) & 0x3) << 0)
+#define MC13783_LED_C4_CURRENT_G2(x)   (((x) & 0x3) << 2)
+#define MC13783_LED_C4_CURRENT_B2(x)   (((x) & 0x3) << 4)
 #define MC13783_LED_C4_PERIOD(x)       (((x) & 0x3) << 21)
 #define MC13783_LED_C4_TRIODE_TC2      (1 << 23)
-/* LED Control 5 */
+/* MC13783 LED Control 5 */
+#define MC13783_LED_C5_CURRENT_R3(x)   (((x) & 0x3) << 0)
+#define MC13783_LED_C5_CURRENT_G3(x)   (((x) & 0x3) << 2)
+#define MC13783_LED_C5_CURRENT_B3(x)   (((x) & 0x3) << 4)
 #define MC13783_LED_C5_PERIOD(x)       (((x) & 0x3) << 21)
 #define MC13783_LED_C5_TRIODE_TC3      (1 << 23)
+/* MC13892 LED Control 0 */
+#define MC13892_LED_C0_CURRENT_MD(x)   (((x) & 0x7) << 9)
+#define MC13892_LED_C0_CURRENT_AD(x)   (((x) & 0x7) << 21)
+/* MC13892 LED Control 1 */
+#define MC13892_LED_C1_CURRENT_KP(x)   (((x) & 0x7) << 9)
+/* MC13892 LED Control 2 */
+#define MC13892_LED_C2_CURRENT_R(x)    (((x) & 0x7) << 9)
+#define MC13892_LED_C2_CURRENT_G(x)    (((x) & 0x7) << 21)
+/* MC13892 LED Control 3 */
+#define MC13892_LED_C3_CURRENT_B(x)    (((x) & 0x7) << 9)
        u32 led_control[MAX_LED_CONTROL_REGS];
 };
 
index 68891313875d756a4151aa54847214277e62ba79..628a6a21ddf035063c07051c95b25b24686fb246 100644 (file)
@@ -3,6 +3,6 @@
 
 #include <linux/mtd/mtd.h>
 int mtdram_init_device(struct mtd_info *mtd, void *mapped_address,
-                       unsigned long size, char *name);
+                       unsigned long size, const char *name);
 
 #endif /* __MTD_MTDRAM_H__ */
index 9e6c8f9f306e016923a0c32d3ae0e2d9f0ce0d1f..32f8612469d8d2d2c5a659f9326551bcbbd8806b 100644 (file)
@@ -219,6 +219,9 @@ struct nand_chip;
 /* ONFI feature address */
 #define ONFI_FEATURE_ADDR_TIMING_MODE  0x1
 
+/* Vendor-specific feature address (Micron) */
+#define ONFI_FEATURE_ADDR_READ_RETRY   0x89
+
 /* ONFI subfeature parameters length */
 #define ONFI_SUBFEATURE_PARAM_LEN      4
 
@@ -279,16 +282,17 @@ struct nand_onfi_params {
        __le16 io_pin_capacitance_typ;
        __le16 input_pin_capacitance_typ;
        u8 input_pin_capacitance_max;
-       u8 driver_strenght_support;
+       u8 driver_strength_support;
        __le16 t_int_r;
        __le16 t_ald;
        u8 reserved4[7];
 
        /* vendor */
-       u8 reserved5[90];
+       __le16 vendor_revision;
+       u8 vendor[88];
 
        __le16 crc;
-} __attribute__((packed));
+} __packed;
 
 #define ONFI_CRC_BASE  0x4F4E
 
@@ -326,6 +330,26 @@ struct onfi_ext_param_page {
         */
 } __packed;
 
+struct nand_onfi_vendor_micron {
+       u8 two_plane_read;
+       u8 read_cache;
+       u8 read_unique_id;
+       u8 dq_imped;
+       u8 dq_imped_num_settings;
+       u8 dq_imped_feat_addr;
+       u8 rb_pulldown_strength;
+       u8 rb_pulldown_strength_feat_addr;
+       u8 rb_pulldown_strength_num_settings;
+       u8 otp_mode;
+       u8 otp_page_start;
+       u8 otp_data_prot_addr;
+       u8 otp_num_pages;
+       u8 otp_feat_addr;
+       u8 read_retry_options;
+       u8 reserved[72];
+       u8 param_revision;
+} __packed;
+
 /**
  * struct nand_hw_control - Control structure for hardware controller (e.g ECC generator) shared among independent devices
  * @lock:               protection lock
@@ -432,6 +456,8 @@ struct nand_buffers {
  *                     flash device.
  * @read_byte:         [REPLACEABLE] read one byte from the chip
  * @read_word:         [REPLACEABLE] read one word from the chip
+ * @write_byte:                [REPLACEABLE] write a single byte to the chip on the
+ *                     low 8 I/O lines
  * @write_buf:         [REPLACEABLE] write data from the buffer to the chip
  * @read_buf:          [REPLACEABLE] read data from the chip into the buffer
  * @select_chip:       [REPLACEABLE] select chip nr
@@ -451,6 +477,8 @@ struct nand_buffers {
  *                     commands to the chip.
  * @waitfunc:          [REPLACEABLE] hardwarespecific function for wait on
  *                     ready.
+ * @setup_read_retry:  [FLASHSPECIFIC] flash (vendor) specific function for
+ *                     setting the read-retry mode. Mostly needed for MLC NAND.
  * @ecc:               [BOARDSPECIFIC] ECC control structure
  * @buffers:           buffer structure for read/write
  * @hwcontrol:         platform-specific hardware control structure
@@ -497,6 +525,7 @@ struct nand_buffers {
  *                     non 0 if ONFI supported.
  * @onfi_params:       [INTERN] holds the ONFI page parameter when ONFI is
  *                     supported, 0 otherwise.
+ * @read_retries:      [INTERN] the number of read retry modes supported
  * @onfi_set_features: [REPLACEABLE] set the features for ONFI nand
  * @onfi_get_features: [REPLACEABLE] get the features for ONFI nand
  * @bbt:               [INTERN] bad block table pointer
@@ -521,6 +550,7 @@ struct nand_chip {
 
        uint8_t (*read_byte)(struct mtd_info *mtd);
        u16 (*read_word)(struct mtd_info *mtd);
+       void (*write_byte)(struct mtd_info *mtd, uint8_t byte);
        void (*write_buf)(struct mtd_info *mtd, const uint8_t *buf, int len);
        void (*read_buf)(struct mtd_info *mtd, uint8_t *buf, int len);
        void (*select_chip)(struct mtd_info *mtd, int chip);
@@ -544,6 +574,7 @@ struct nand_chip {
                        int feature_addr, uint8_t *subfeature_para);
        int (*onfi_get_features)(struct mtd_info *mtd, struct nand_chip *chip,
                        int feature_addr, uint8_t *subfeature_para);
+       int (*setup_read_retry)(struct mtd_info *mtd, int retry_mode);
 
        int chip_delay;
        unsigned int options;
@@ -568,6 +599,8 @@ struct nand_chip {
        int onfi_version;
        struct nand_onfi_params onfi_params;
 
+       int read_retries;
+
        flstate_t state;
 
        uint8_t *oob_poi;
@@ -600,6 +633,8 @@ struct nand_chip {
 #define NAND_MFR_AMD           0x01
 #define NAND_MFR_MACRONIX      0xc2
 #define NAND_MFR_EON           0x92
+#define NAND_MFR_SANDISK       0x45
+#define NAND_MFR_INTEL         0x89
 
 /* The maximum expected count of bytes in the NAND ID sequence */
 #define NAND_MAX_ID_LEN 8
index 1f8d24bdafdac2bc28789c777315068311368765..6a35e6de5da174fc7b9f82f67be421f4d74a1cc3 100644 (file)
@@ -37,7 +37,7 @@
  */
 
 struct mtd_partition {
-       char *name;                     /* identifier string */
+       const char *name;               /* identifier string */
        uint64_t size;                  /* partition size */
        uint64_t offset;                /* offset within the master MTD space */
        uint32_t mask_flags;            /* master MTD flags to mask out for this partition */
@@ -76,11 +76,11 @@ struct mtd_part_parser {
                        struct mtd_part_parser_data *);
 };
 
-extern int register_mtd_parser(struct mtd_part_parser *parser);
-extern int deregister_mtd_parser(struct mtd_part_parser *parser);
+extern void register_mtd_parser(struct mtd_part_parser *parser);
+extern void deregister_mtd_parser(struct mtd_part_parser *parser);
 
 int mtd_is_partition(const struct mtd_info *mtd);
-int mtd_add_partition(struct mtd_info *master, char *name,
+int mtd_add_partition(struct mtd_info *master, const char *name,
                      long long offset, long long length);
 int mtd_del_partition(struct mtd_info *master, int partno);
 uint64_t mtd_get_device_size(const struct mtd_info *mtd);
index 6f10e938ff7e74d4db19b991a55193f5073946e7..cb32d9c1e8dc83502e39143a8c25913bab3193d5 100644 (file)
@@ -7,7 +7,7 @@
  */
 
 #ifndef __LINUX_OF_MTD_H
-#define __LINUX_OF_NET_H
+#define __LINUX_OF_MTD_H
 
 #ifdef CONFIG_OF_MTD
 
index 3a3942823c209163501b1dcd49a550d1664c56da..eabac4e2fc993b114ae940b1be960f04015ade0f 100644 (file)
@@ -43,6 +43,11 @@ struct sdma_script_start_addrs {
        s32 dptc_dvfs_addr;
        s32 utra_addr;
        s32 ram_code_start_addr;
+       /* End of v1 array */
+       s32 mcu_2_ssish_addr;
+       s32 ssish_2_mcu_addr;
+       s32 hdmi_dma_addr;
+       /* End of v2 array */
 };
 
 /**
index beac6b8b6a7b3846cf3925cab343da187fc686e1..bcbc6c3c14c0da82547b861e7108ccee41351f71 100644 (file)
@@ -39,6 +39,7 @@ enum sdma_peripheral_type {
        IMX_DMATYPE_IPU_MEMORY, /* IPU Memory */
        IMX_DMATYPE_ASRC,       /* ASRC */
        IMX_DMATYPE_ESAI,       /* ESAI */
+       IMX_DMATYPE_SSI_DUAL,   /* SSI Dual FIFO */
 };
 
 enum imx_dma_prio {
index 239e0fc1bb1f9af55a1abecaf0a68b01d0faa214..66574ea39f97dc8b22e49d6625e0a5c16292b821 100644 (file)
@@ -1,6 +1,4 @@
 /*
- *  linux/arch/arm/mach-mmp/include/mach/sram.h
- *
  *  SRAM Memory Management
  *
  *  Copyright (c) 2011 Marvell Semiconductors Inc.
@@ -11,8 +9,8 @@
  *
  */
 
-#ifndef __ASM_ARCH_SRAM_H
-#define __ASM_ARCH_SRAM_H
+#ifndef __DMA_MMP_TDMA_H
+#define __DMA_MMP_TDMA_H
 
 #include <linux/genalloc.h>
 
@@ -32,4 +30,4 @@ struct sram_platdata {
 
 extern struct gen_pool *sram_get_gpool(char *pool_name);
 
-#endif /* __ASM_ARCH_SRAM_H */
+#endif /* __DMA_MMP_TDMA_H */
index 8ec18f64e3965e68fcf6ffbfae436687987f3466..92ffd3245f76c67bc2c02a36067a77425338be9f 100644 (file)
@@ -1,11 +1,9 @@
 /*
- * arch/arm/plat-orion/include/plat/mv_xor.h
- *
  * Marvell XOR platform device data definition file.
  */
 
-#ifndef __PLAT_MV_XOR_H
-#define __PLAT_MV_XOR_H
+#ifndef __DMA_MV_XOR_H
+#define __DMA_MV_XOR_H
 
 #include <linux/dmaengine.h>
 #include <linux/mbus.h>
index 24b536ebdf13de364c5c5bf0f0106d654eb94d31..d2be19a51acde32102710cdf0db536fe4d364f01 100644 (file)
@@ -1,6 +1,4 @@
 /*
- * arch/arm/mach-kirkwood/include/mach/leds-netxbig.h
- *
  * Platform data structure for netxbig LED driver
  *
  * This file is licensed under the terms of the GNU General Public
@@ -8,8 +6,8 @@
  * warranty of any kind, whether express or implied.
  */
 
-#ifndef __MACH_LEDS_NETXBIG_H
-#define __MACH_LEDS_NETXBIG_H
+#ifndef __LEDS_KIRKWOOD_NETXBIG_H
+#define __LEDS_KIRKWOOD_NETXBIG_H
 
 struct netxbig_gpio_ext {
        unsigned        *addr;
@@ -52,4 +50,4 @@ struct netxbig_led_platform_data {
        int                     num_leds;
 };
 
-#endif /* __MACH_LEDS_NETXBIG_H */
+#endif /* __LEDS_KIRKWOOD_NETXBIG_H */
index e21272e5f66847112a7805ed90730e0eca612b42..6a9fed57f34620c20a931f317150e5a381058e8f 100644 (file)
@@ -1,6 +1,4 @@
 /*
- * arch/arm/mach-kirkwood/include/mach/leds-ns2.h
- *
  * Platform data structure for Network Space v2 LED driver
  *
  * This file is licensed under the terms of the GNU General Public
@@ -8,8 +6,8 @@
  * warranty of any kind, whether express or implied.
  */
 
-#ifndef __MACH_LEDS_NS2_H
-#define __MACH_LEDS_NS2_H
+#ifndef __LEDS_KIRKWOOD_NS2_H
+#define __LEDS_KIRKWOOD_NS2_H
 
 struct ns2_led {
        const char      *name;
@@ -23,4 +21,4 @@ struct ns2_led_platform_data {
        struct ns2_led  *leds;
 };
 
-#endif /* __MACH_LEDS_NS2_H */
+#endif /* __LEDS_KIRKWOOD_NS2_H */
index 4da5bfa2147f245ab980c3a1514647ef40eecf4b..3e9dd6676b9799aba4999d422a624f9a3a991b44 100644 (file)
@@ -1,6 +1,4 @@
 /*
- * arch/arm/plat-omap/include/mach/nand.h
- *
  * Copyright (C) 2006 Micron Technology Inc.
  *
  * This program is free software; you can redistribute it and/or modify
index ffb801998e5dfa10c450f99b2dbb943e8f7a05d4..a941471249299c842a7cfea12de9a4091d2e44e2 100644 (file)
@@ -55,6 +55,9 @@ struct pxa3xx_nand_platform_data {
        /* indicate how many chip selects will be used */
        int     num_cs;
 
+       /* use an flash-based bad block table */
+       bool    flash_bbt;
+
        const struct mtd_partition              *parts[NUM_CHIP_SELECT];
        unsigned int                            nr_parts[NUM_CHIP_SELECT];
 
index e9a9fb188f972555172349a8e0250ac237c7db5d..56ff0e6f5ad1dae5bfe9d774a71cb85a32856afa 100644 (file)
@@ -1,6 +1,4 @@
 /*
- * arch/arm/plat-omap/include/mach/onenand.h
- *
  * Copyright (C) 2006 Nokia Corporation
  * Author: Juha Yrjola
  *
index 9f3c180834d141a269f7c4ec84c8021532029c9c..a7ce77c7c1a8e5d5d7009e9122c1684fa3b70d95 100644 (file)
@@ -1,13 +1,11 @@
 /*
- * arch/arm/plat-orion/include/plat/orion_nand.h
- *
  * This file is licensed under the terms of the GNU General Public
  * License version 2.  This program is licensed "as is" without any
  * warranty of any kind, whether express or implied.
  */
 
-#ifndef __PLAT_ORION_NAND_H
-#define __PLAT_ORION_NAND_H
+#ifndef __MTD_ORION_NAND_H
+#define __MTD_ORION_NAND_H
 
 /*
  * Device bus NAND private data
index 54334393ab926ada9935fa5aba6e1a3756b27a88..a947ab8b441ad968f0953e6ce15d03270325e55b 100644 (file)
@@ -7,20 +7,6 @@
 
 struct clk;
 
-/**
- * enum si5351_variant - SiLabs Si5351 chip variant
- * @SI5351_VARIANT_A: Si5351A (8 output clocks, XTAL input)
- * @SI5351_VARIANT_A3: Si5351A MSOP10 (3 output clocks, XTAL input)
- * @SI5351_VARIANT_B: Si5351B (8 output clocks, XTAL/VXCO input)
- * @SI5351_VARIANT_C: Si5351C (8 output clocks, XTAL/CLKIN input)
- */
-enum si5351_variant {
-       SI5351_VARIANT_A = 1,
-       SI5351_VARIANT_A3 = 2,
-       SI5351_VARIANT_B = 3,
-       SI5351_VARIANT_C = 4,
-};
-
 /**
  * enum si5351_pll_src - Si5351 pll clock source
  * @SI5351_PLL_SRC_DEFAULT: default, do not change eeprom config
@@ -115,14 +101,12 @@ struct si5351_clkout_config {
 
 /**
  * struct si5351_platform_data - Platform data for the Si5351 clock driver
- * @variant: Si5351 chip variant
  * @clk_xtal: xtal input clock
  * @clk_clkin: clkin input clock
  * @pll_src: array of pll source clock setting
  * @clkout: array of clkout configuration
  */
 struct si5351_platform_data {
-       enum si5351_variant variant;
        struct clk *clk_xtal;
        struct clk *clk_clkin;
        enum si5351_pll_src pll_src[2];
index 1f689e62e4cbe58dac7a67e990009bf95e35e99c..f589c9af8cbf1250da1945bac436f27d92987e80 100644 (file)
@@ -2456,6 +2456,7 @@ void skb_zerocopy(struct sk_buff *to, const struct sk_buff *from,
 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len);
 int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
 void skb_scrub_packet(struct sk_buff *skb, bool xnet);
+unsigned int skb_gso_transport_seglen(const struct sk_buff *skb);
 struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
 
 struct skb_checksum_ops {
index 6eecfc2e4f989b8e9511719cee900f75e057850c..04e76322124634af03ec734996184a5b76b5e528 100644 (file)
@@ -368,7 +368,7 @@ struct svc_program {
        struct svc_program *    pg_next;        /* other programs (same xprt) */
        u32                     pg_prog;        /* program number */
        unsigned int            pg_lovers;      /* lowest version */
-       unsigned int            pg_hivers;      /* lowest version */
+       unsigned int            pg_hivers;      /* highest version */
        unsigned int            pg_nvers;       /* number of versions */
        struct svc_version **   pg_vers;        /* version array */
        char *                  pg_name;        /* service name */
@@ -386,8 +386,10 @@ struct svc_version {
        struct svc_procedure *  vs_proc;        /* per-procedure info */
        u32                     vs_xdrsize;     /* xdrsize needed for this version */
 
-       unsigned int            vs_hidden : 1;  /* Don't register with portmapper.
+       unsigned int            vs_hidden : 1,  /* Don't register with portmapper.
                                                 * Only used for nfsacl so far. */
+                               vs_rpcb_optnl:1;/* Don't care the result of register.
+                                                * Only used for nfsv4. */
 
        /* Override dispatch function (e.g. when caching replies).
         * A return value of 0 means drop the request. 
index e4b948080d20e7a537c7a83da17b8b5b7fec0008..a67b384157689ec9fda2abfec0173122fc98a4a8 100644 (file)
@@ -142,8 +142,6 @@ static inline unsigned long zone_page_state_snapshot(struct zone *zone,
        return x;
 }
 
-extern unsigned long global_reclaimable_pages(void);
-
 #ifdef CONFIG_NUMA
 /*
  * Determine the per node value of a stat item. This function
index e2b9576d00e24772580a601a268e42bdc46e349e..7110897c3dfa595e385b8a946f7547ca05583fec 100644 (file)
@@ -24,10 +24,10 @@ DECLARE_EVENT_CLASS(bcache_request,
                __entry->dev            = bio->bi_bdev->bd_dev;
                __entry->orig_major     = d->disk->major;
                __entry->orig_minor     = d->disk->first_minor;
-               __entry->sector         = bio->bi_sector;
-               __entry->orig_sector    = bio->bi_sector - 16;
-               __entry->nr_sector      = bio->bi_size >> 9;
-               blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
+               __entry->sector         = bio->bi_iter.bi_sector;
+               __entry->orig_sector    = bio->bi_iter.bi_sector - 16;
+               __entry->nr_sector      = bio->bi_iter.bi_size >> 9;
+               blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
        ),
 
        TP_printk("%d,%d %s %llu + %u (from %d,%d @ %llu)",
@@ -99,9 +99,9 @@ DECLARE_EVENT_CLASS(bcache_bio,
 
        TP_fast_assign(
                __entry->dev            = bio->bi_bdev->bd_dev;
-               __entry->sector         = bio->bi_sector;
-               __entry->nr_sector      = bio->bi_size >> 9;
-               blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
+               __entry->sector         = bio->bi_iter.bi_sector;
+               __entry->nr_sector      = bio->bi_iter.bi_size >> 9;
+               blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
        ),
 
        TP_printk("%d,%d  %s %llu + %u",
@@ -134,9 +134,9 @@ TRACE_EVENT(bcache_read,
 
        TP_fast_assign(
                __entry->dev            = bio->bi_bdev->bd_dev;
-               __entry->sector         = bio->bi_sector;
-               __entry->nr_sector      = bio->bi_size >> 9;
-               blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
+               __entry->sector         = bio->bi_iter.bi_sector;
+               __entry->nr_sector      = bio->bi_iter.bi_size >> 9;
+               blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
                __entry->cache_hit = hit;
                __entry->bypass = bypass;
        ),
@@ -162,9 +162,9 @@ TRACE_EVENT(bcache_write,
 
        TP_fast_assign(
                __entry->dev            = bio->bi_bdev->bd_dev;
-               __entry->sector         = bio->bi_sector;
-               __entry->nr_sector      = bio->bi_size >> 9;
-               blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
+               __entry->sector         = bio->bi_iter.bi_sector;
+               __entry->nr_sector      = bio->bi_iter.bi_size >> 9;
+               blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
                __entry->writeback = writeback;
                __entry->bypass = bypass;
        ),
@@ -247,7 +247,7 @@ TRACE_EVENT(bcache_btree_write,
        TP_fast_assign(
                __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0);
                __entry->block  = b->written;
-               __entry->keys   = b->sets[b->nsets].data->keys;
+               __entry->keys   = b->keys.set[b->keys.nsets].data->keys;
        ),
 
        TP_printk("bucket %zu", __entry->bucket)
@@ -411,7 +411,7 @@ TRACE_EVENT(bcache_alloc_invalidate,
        ),
 
        TP_fast_assign(
-               __entry->free           = fifo_used(&ca->free);
+               __entry->free           = fifo_used(&ca->free[RESERVE_NONE]);
                __entry->free_inc       = fifo_used(&ca->free_inc);
                __entry->free_inc_size  = ca->free_inc.size;
                __entry->unused         = fifo_used(&ca->unused);
@@ -422,8 +422,8 @@ TRACE_EVENT(bcache_alloc_invalidate,
 );
 
 TRACE_EVENT(bcache_alloc_fail,
-       TP_PROTO(struct cache *ca),
-       TP_ARGS(ca),
+       TP_PROTO(struct cache *ca, unsigned reserve),
+       TP_ARGS(ca, reserve),
 
        TP_STRUCT__entry(
                __field(unsigned,       free                    )
@@ -433,7 +433,7 @@ TRACE_EVENT(bcache_alloc_fail,
        ),
 
        TP_fast_assign(
-               __entry->free           = fifo_used(&ca->free);
+               __entry->free           = fifo_used(&ca->free[reserve]);
                __entry->free_inc       = fifo_used(&ca->free_inc);
                __entry->unused         = fifo_used(&ca->unused);
                __entry->blocked        = atomic_read(&ca->set->prio_blocked);
index 4c2301d2ef1aa979ea0d6594ad1b6404368b920b..e76ae19a8d6fe65705e48104bb8212ed8bddb546 100644 (file)
@@ -243,9 +243,9 @@ TRACE_EVENT(block_bio_bounce,
        TP_fast_assign(
                __entry->dev            = bio->bi_bdev ?
                                          bio->bi_bdev->bd_dev : 0;
-               __entry->sector         = bio->bi_sector;
+               __entry->sector         = bio->bi_iter.bi_sector;
                __entry->nr_sector      = bio_sectors(bio);
-               blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
+               blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
                memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
        ),
 
@@ -280,10 +280,10 @@ TRACE_EVENT(block_bio_complete,
 
        TP_fast_assign(
                __entry->dev            = bio->bi_bdev->bd_dev;
-               __entry->sector         = bio->bi_sector;
+               __entry->sector         = bio->bi_iter.bi_sector;
                __entry->nr_sector      = bio_sectors(bio);
                __entry->error          = error;
-               blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
+               blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
        ),
 
        TP_printk("%d,%d %s %llu + %u [%d]",
@@ -308,9 +308,9 @@ DECLARE_EVENT_CLASS(block_bio_merge,
 
        TP_fast_assign(
                __entry->dev            = bio->bi_bdev->bd_dev;
-               __entry->sector         = bio->bi_sector;
+               __entry->sector         = bio->bi_iter.bi_sector;
                __entry->nr_sector      = bio_sectors(bio);
-               blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
+               blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
                memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
        ),
 
@@ -375,9 +375,9 @@ TRACE_EVENT(block_bio_queue,
 
        TP_fast_assign(
                __entry->dev            = bio->bi_bdev->bd_dev;
-               __entry->sector         = bio->bi_sector;
+               __entry->sector         = bio->bi_iter.bi_sector;
                __entry->nr_sector      = bio_sectors(bio);
-               blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
+               blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
                memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
        ),
 
@@ -403,7 +403,7 @@ DECLARE_EVENT_CLASS(block_get_rq,
 
        TP_fast_assign(
                __entry->dev            = bio ? bio->bi_bdev->bd_dev : 0;
-               __entry->sector         = bio ? bio->bi_sector : 0;
+               __entry->sector         = bio ? bio->bi_iter.bi_sector : 0;
                __entry->nr_sector      = bio ? bio_sectors(bio) : 0;
                blk_fill_rwbs(__entry->rwbs,
                              bio ? bio->bi_rw : 0, __entry->nr_sector);
@@ -538,9 +538,9 @@ TRACE_EVENT(block_split,
 
        TP_fast_assign(
                __entry->dev            = bio->bi_bdev->bd_dev;
-               __entry->sector         = bio->bi_sector;
+               __entry->sector         = bio->bi_iter.bi_sector;
                __entry->new_sector     = new_sector;
-               blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
+               blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
                memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
        ),
 
@@ -579,11 +579,11 @@ TRACE_EVENT(block_bio_remap,
 
        TP_fast_assign(
                __entry->dev            = bio->bi_bdev->bd_dev;
-               __entry->sector         = bio->bi_sector;
+               __entry->sector         = bio->bi_iter.bi_sector;
                __entry->nr_sector      = bio_sectors(bio);
                __entry->old_dev        = dev;
                __entry->old_sector     = from;
-               blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
+               blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
        ),
 
        TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
index 3b9f28dfc8492160940d28e58acf1dc9dc6e5081..67f38faac589ad52ac5850e5af602799753b8d29 100644 (file)
@@ -629,8 +629,8 @@ DECLARE_EVENT_CLASS(f2fs__submit_bio,
                __entry->dev            = sb->s_dev;
                __entry->rw             = rw;
                __entry->type           = type;
-               __entry->sector         = bio->bi_sector;
-               __entry->size           = bio->bi_size;
+               __entry->sector         = bio->bi_iter.bi_sector;
+               __entry->size           = bio->bi_iter.bi_size;
        ),
 
        TP_printk("dev = (%d,%d), %s%s, %s, sector = %lld, size = %u",
index 76982b2a1b5831d20ce768a274d3f862d6c9e971..3dbcc1e771c03509a3c48906c2f7885416d7ec04 100644 (file)
@@ -27,8 +27,8 @@ struct ipc64_perm {
        unsigned char           __pad1[4 - sizeof(__kernel_mode_t)];
        unsigned short          seq;
        unsigned short          __pad2;
-       unsigned long           __unused1;
-       unsigned long           __unused2;
+       __kernel_ulong_t        __unused1;
+       __kernel_ulong_t        __unused2;
 };
 
 #endif /* __ASM_GENERIC_IPCBUF_H */
index aec850d9159e9a174c90fd954bdef2cf2f19e539..f55ecc43c60fa6124839c5315a5836093a446ceb 100644 (file)
@@ -35,13 +35,13 @@ struct msqid64_ds {
 #if __BITS_PER_LONG != 64
        unsigned long   __unused3;
 #endif
-       unsigned long  msg_cbytes;      /* current number of bytes on queue */
-       unsigned long  msg_qnum;        /* number of messages in queue */
-       unsigned long  msg_qbytes;      /* max number of bytes on queue */
+       __kernel_ulong_t msg_cbytes;    /* current number of bytes on queue */
+       __kernel_ulong_t msg_qnum;      /* number of messages in queue */
+       __kernel_ulong_t msg_qbytes;    /* max number of bytes on queue */
        __kernel_pid_t msg_lspid;       /* pid of last msgsnd */
        __kernel_pid_t msg_lrpid;       /* last receive pid */
-       unsigned long  __unused4;
-       unsigned long  __unused5;
+       __kernel_ulong_t __unused4;
+       __kernel_ulong_t __unused5;
 };
 
 #endif /* __ASM_GENERIC_MSGBUF_H */
index 5768fa60ac8230b2e989d911bed5fe9841b0d791..7e9fb2f0853bc0c7544173cd3874586818a46406 100644 (file)
@@ -39,21 +39,21 @@ struct shmid64_ds {
 #endif
        __kernel_pid_t          shm_cpid;       /* pid of creator */
        __kernel_pid_t          shm_lpid;       /* pid of last operator */
-       unsigned long           shm_nattch;     /* no. of current attaches */
-       unsigned long           __unused4;
-       unsigned long           __unused5;
+       __kernel_ulong_t        shm_nattch;     /* no. of current attaches */
+       __kernel_ulong_t        __unused4;
+       __kernel_ulong_t        __unused5;
 };
 
 struct shminfo64 {
-       unsigned long   shmmax;
-       unsigned long   shmmin;
-       unsigned long   shmmni;
-       unsigned long   shmseg;
-       unsigned long   shmall;
-       unsigned long   __unused1;
-       unsigned long   __unused2;
-       unsigned long   __unused3;
-       unsigned long   __unused4;
+       __kernel_ulong_t        shmmax;
+       __kernel_ulong_t        shmmin;
+       __kernel_ulong_t        shmmni;
+       __kernel_ulong_t        shmseg;
+       __kernel_ulong_t        shmall;
+       __kernel_ulong_t        __unused1;
+       __kernel_ulong_t        __unused2;
+       __kernel_ulong_t        __unused3;
+       __kernel_ulong_t        __unused4;
 };
 
 #endif /* __ASM_GENERIC_SHMBUF_H */
index 9b24d65fed72b9d06843cba89ebeeabff07fb71f..3c9a833992e872f095a4373d6cb5e33602129acc 100644 (file)
@@ -181,7 +181,6 @@ enum drm_map_type {
        _DRM_AGP = 3,             /**< AGP/GART */
        _DRM_SCATTER_GATHER = 4,  /**< Scatter/gather memory for PCI DMA */
        _DRM_CONSISTENT = 5,      /**< Consistent memory for PCI DMA */
-       _DRM_GEM = 6,             /**< GEM object (obsolete) */
 };
 
 /**
index 3a4e97bd860771b6ad8e3b5d026a35200fcad4d3..126bfaa8bb6be45caf723889c077673da8c992f4 100644 (file)
@@ -222,6 +222,7 @@ typedef struct _drm_i915_sarea {
 #define DRM_I915_GEM_SET_CACHING       0x2f
 #define DRM_I915_GEM_GET_CACHING       0x30
 #define DRM_I915_REG_READ              0x31
+#define DRM_I915_GET_RESET_STATS       0x32
 
 #define DRM_IOCTL_I915_INIT            DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
 #define DRM_IOCTL_I915_FLUSH           DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@@ -271,6 +272,7 @@ typedef struct _drm_i915_sarea {
 #define DRM_IOCTL_I915_GEM_CONTEXT_CREATE      DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create)
 #define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY     DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy)
 #define DRM_IOCTL_I915_REG_READ                        DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read)
+#define DRM_IOCTL_I915_GET_RESET_STATS         DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GET_RESET_STATS, struct drm_i915_reset_stats)
 
 /* Allow drivers to submit batchbuffers directly to hardware, relying
  * on the security mechanisms provided by hardware.
@@ -719,7 +721,7 @@ struct drm_i915_gem_execbuffer2 {
  */
 #define I915_EXEC_IS_PINNED            (1<<10)
 
-/** Provide a hint to the kernel that the command stream and auxilliary
+/** Provide a hint to the kernel that the command stream and auxiliary
  * state buffers already holds the correct presumed addresses and so the
  * relocation process may be skipped if no buffers need to be moved in
  * preparation for the execbuffer.
@@ -1030,4 +1032,21 @@ struct drm_i915_reg_read {
        __u64 offset;
        __u64 val; /* Return value */
 };
+
+struct drm_i915_reset_stats {
+       __u32 ctx_id;
+       __u32 flags;
+
+       /* All resets since boot/module reload, for all contexts */
+       __u32 reset_count;
+
+       /* Number of batches lost when active in GPU, for this context */
+       __u32 batch_active;
+
+       /* Number of batches lost pending for execution, for this context */
+       __u32 batch_pending;
+
+       __u32 pad;
+};
+
 #endif /* _UAPI_I915_DRM_H_ */
index fe421e8a431bcf91dd1d5e845b5d06ce81bbe23d..d9ea3a73afe2d2a4df80ebebd23316df5077c346 100644 (file)
@@ -985,6 +985,8 @@ struct drm_radeon_cs {
 #define RADEON_INFO_CIK_MACROTILE_MODE_ARRAY   0x18
 /* query the number of render backends */
 #define RADEON_INFO_SI_BACKEND_ENABLED_MASK    0x19
+/* max engine clock - needed for OpenCL */
+#define RADEON_INFO_MAX_SCLK           0x1a
 
 
 struct drm_radeon_info {
index f854ca4a1372812dda6f71c20c8a84b3bdcfc19a..9971c560ed9aa42dc841e76b9586f05e25225d96 100644 (file)
 #ifndef __VMWGFX_DRM_H__
 #define __VMWGFX_DRM_H__
 
+#ifndef __KERNEL__
+#include <drm.h>
+#endif
+
 #define DRM_VMW_MAX_SURFACE_FACES 6
 #define DRM_VMW_MAX_MIP_LEVELS 24
 
 #define DRM_VMW_PRESENT              18
 #define DRM_VMW_PRESENT_READBACK     19
 #define DRM_VMW_UPDATE_LAYOUT        20
+#define DRM_VMW_CREATE_SHADER        21
+#define DRM_VMW_UNREF_SHADER         22
+#define DRM_VMW_GB_SURFACE_CREATE    23
+#define DRM_VMW_GB_SURFACE_REF       24
+#define DRM_VMW_SYNCCPU              25
 
 /*************************************************************************/
 /**
@@ -76,6 +85,8 @@
 #define DRM_VMW_PARAM_MAX_FB_SIZE      5
 #define DRM_VMW_PARAM_FIFO_HW_VERSION  6
 #define DRM_VMW_PARAM_MAX_SURF_MEMORY  7
+#define DRM_VMW_PARAM_3D_CAPS_SIZE     8
+#define DRM_VMW_PARAM_MAX_MOB_MEMORY   9
 
 /**
  * struct drm_vmw_getparam_arg
@@ -788,4 +799,253 @@ struct drm_vmw_update_layout_arg {
        uint64_t rects;
 };
 
+
+/*************************************************************************/
+/**
+ * DRM_VMW_CREATE_SHADER - Create shader
+ *
+ * Creates a shader and optionally binds it to a dma buffer containing
+ * the shader byte-code.
+ */
+
+/**
+ * enum drm_vmw_shader_type - Shader types
+ */
+enum drm_vmw_shader_type {
+       drm_vmw_shader_type_vs = 0,
+       drm_vmw_shader_type_ps,
+       drm_vmw_shader_type_gs
+};
+
+
+/**
+ * struct drm_vmw_shader_create_arg
+ *
+ * @shader_type: Shader type of the shader to create.
+ * @size: Size of the byte-code in bytes.
+ * where the shader byte-code starts
+ * @buffer_handle: Buffer handle identifying the buffer containing the
+ * shader byte-code
+ * @shader_handle: On successful completion contains a handle that
+ * can be used to subsequently identify the shader.
+ * @offset: Offset in bytes into the buffer given by @buffer_handle,
+ *
+ * Input / Output argument to the DRM_VMW_CREATE_SHADER Ioctl.
+ */
+struct drm_vmw_shader_create_arg {
+       enum drm_vmw_shader_type shader_type;
+       uint32_t size;
+       uint32_t buffer_handle;
+       uint32_t shader_handle;
+       uint64_t offset;
+};
+
+/*************************************************************************/
+/**
+ * DRM_VMW_UNREF_SHADER - Unreferences a shader
+ *
+ * Destroys a user-space reference to a shader, optionally destroying
+ * it.
+ */
+
+/**
+ * struct drm_vmw_shader_arg
+ *
+ * @handle: Handle identifying the shader to destroy.
+ *
+ * Input argument to the DRM_VMW_UNREF_SHADER ioctl.
+ */
+struct drm_vmw_shader_arg {
+       uint32_t handle;
+       uint32_t pad64;
+};
+
+/*************************************************************************/
+/**
+ * DRM_VMW_GB_SURFACE_CREATE - Create a host guest-backed surface.
+ *
+ * Allocates a surface handle and queues a create surface command
+ * for the host on the first use of the surface. The surface ID can
+ * be used as the surface ID in commands referencing the surface.
+ */
+
+/**
+ * enum drm_vmw_surface_flags
+ *
+ * @drm_vmw_surface_flag_shareable:     Whether the surface is shareable
+ * @drm_vmw_surface_flag_scanout:       Whether the surface is a scanout
+ *                                      surface.
+ * @drm_vmw_surface_flag_create_buffer: Create a backup buffer if none is
+ *                                      given.
+ */
+enum drm_vmw_surface_flags {
+       drm_vmw_surface_flag_shareable = (1 << 0),
+       drm_vmw_surface_flag_scanout = (1 << 1),
+       drm_vmw_surface_flag_create_buffer = (1 << 2)
+};
+
+/**
+ * struct drm_vmw_gb_surface_create_req
+ *
+ * @svga3d_flags:     SVGA3d surface flags for the device.
+ * @format:           SVGA3d format.
+ * @mip_level:        Number of mip levels for all faces.
+ * @drm_surface_flags Flags as described above.
+ * @multisample_count Future use. Set to 0.
+ * @autogen_filter    Future use. Set to 0.
+ * @buffer_handle     Buffer handle of backup buffer. SVGA3D_INVALID_ID
+ *                    if none.
+ * @base_size         Size of the base mip level for all faces.
+ *
+ * Input argument to the  DRM_VMW_GB_SURFACE_CREATE Ioctl.
+ * Part of output argument for the DRM_VMW_GB_SURFACE_REF Ioctl.
+ */
+struct drm_vmw_gb_surface_create_req {
+       uint32_t svga3d_flags;
+       uint32_t format;
+       uint32_t mip_levels;
+       enum drm_vmw_surface_flags drm_surface_flags;
+       uint32_t multisample_count;
+       uint32_t autogen_filter;
+       uint32_t buffer_handle;
+       uint32_t pad64;
+       struct drm_vmw_size base_size;
+};
+
+/**
+ * struct drm_vmw_gb_surface_create_rep
+ *
+ * @handle:            Surface handle.
+ * @backup_size:       Size of backup buffers for this surface.
+ * @buffer_handle:     Handle of backup buffer. SVGA3D_INVALID_ID if none.
+ * @buffer_size:       Actual size of the buffer identified by
+ *                     @buffer_handle
+ * @buffer_map_handle: Offset into device address space for the buffer
+ *                     identified by @buffer_handle.
+ *
+ * Part of output argument for the DRM_VMW_GB_SURFACE_REF ioctl.
+ * Output argument for the DRM_VMW_GB_SURFACE_CREATE ioctl.
+ */
+struct drm_vmw_gb_surface_create_rep {
+       uint32_t handle;
+       uint32_t backup_size;
+       uint32_t buffer_handle;
+       uint32_t buffer_size;
+       uint64_t buffer_map_handle;
+};
+
+/**
+ * union drm_vmw_gb_surface_create_arg
+ *
+ * @req: Input argument as described above.
+ * @rep: Output argument as described above.
+ *
+ * Argument to the DRM_VMW_GB_SURFACE_CREATE ioctl.
+ */
+union drm_vmw_gb_surface_create_arg {
+       struct drm_vmw_gb_surface_create_rep rep;
+       struct drm_vmw_gb_surface_create_req req;
+};
+
+/*************************************************************************/
+/**
+ * DRM_VMW_GB_SURFACE_REF - Reference a host surface.
+ *
+ * Puts a reference on a host surface with a given handle, as previously
+ * returned by the DRM_VMW_GB_SURFACE_CREATE ioctl.
+ * A reference will make sure the surface isn't destroyed while we hold
+ * it and will allow the calling client to use the surface handle in
+ * the command stream.
+ *
+ * On successful return, the Ioctl returns the surface information given
+ * to and returned from the DRM_VMW_GB_SURFACE_CREATE ioctl.
+ */
+
+/**
+ * struct drm_vmw_gb_surface_reference_arg
+ *
+ * @creq: The data used as input when the surface was created, as described
+ *        above at "struct drm_vmw_gb_surface_create_req"
+ * @crep: Additional data output when the surface was created, as described
+ *        above at "struct drm_vmw_gb_surface_create_rep"
+ *
+ * Output Argument to the DRM_VMW_GB_SURFACE_REF ioctl.
+ */
+struct drm_vmw_gb_surface_ref_rep {
+       struct drm_vmw_gb_surface_create_req creq;
+       struct drm_vmw_gb_surface_create_rep crep;
+};
+
+/**
+ * union drm_vmw_gb_surface_reference_arg
+ *
+ * @req: Input data as described above at "struct drm_vmw_surface_arg"
+ * @rep: Output data as described above at "struct drm_vmw_gb_surface_ref_rep"
+ *
+ * Argument to the DRM_VMW_GB_SURFACE_REF Ioctl.
+ */
+union drm_vmw_gb_surface_reference_arg {
+       struct drm_vmw_gb_surface_ref_rep rep;
+       struct drm_vmw_surface_arg req;
+};
+
+
+/*************************************************************************/
+/**
+ * DRM_VMW_SYNCCPU - Sync a DMA buffer / MOB for CPU access.
+ *
+ * Idles any previously submitted GPU operations on the buffer and
+ * by default blocks command submissions that reference the buffer.
+ * If the file descriptor used to grab a blocking CPU sync is closed, the
+ * cpu sync is released.
+ * The flags argument indicates how the grab / release operation should be
+ * performed:
+ */
+
+/**
+ * enum drm_vmw_synccpu_flags - Synccpu flags:
+ *
+ * @drm_vmw_synccpu_read: Sync for read. If sync is done for read only, it's a
+ * hint to the kernel to allow command submissions that references the buffer
+ * for read-only.
+ * @drm_vmw_synccpu_write: Sync for write. Block all command submissions
+ * referencing this buffer.
+ * @drm_vmw_synccpu_dontblock: Dont wait for GPU idle, but rather return
+ * -EBUSY should the buffer be busy.
+ * @drm_vmw_synccpu_allow_cs: Allow command submission that touches the buffer
+ * while the buffer is synced for CPU. This is similar to the GEM bo idle
+ * behavior.
+ */
+enum drm_vmw_synccpu_flags {
+       drm_vmw_synccpu_read = (1 << 0),
+       drm_vmw_synccpu_write = (1 << 1),
+       drm_vmw_synccpu_dontblock = (1 << 2),
+       drm_vmw_synccpu_allow_cs = (1 << 3)
+};
+
+/**
+ * enum drm_vmw_synccpu_op - Synccpu operations:
+ *
+ * @drm_vmw_synccpu_grab:    Grab the buffer for CPU operations
+ * @drm_vmw_synccpu_release: Release a previous grab.
+ */
+enum drm_vmw_synccpu_op {
+       drm_vmw_synccpu_grab,
+       drm_vmw_synccpu_release
+};
+
+/**
+ * struct drm_vmw_synccpu_arg
+ *
+ * @op:                             The synccpu operation as described above.
+ * @handle:                 Handle identifying the buffer object.
+ * @flags:                  Flags as described above.
+ */
+struct drm_vmw_synccpu_arg {
+       enum drm_vmw_synccpu_op op;
+       enum drm_vmw_synccpu_flags flags;
+       uint32_t handle;
+       uint32_t pad64;
+};
+
 #endif
index 164a7e2639886333bded7e9e0c5948c4c0c06da7..22b6ad31c706dae59544286faea5808d7b303342 100644 (file)
@@ -39,6 +39,7 @@ static inline void SET_##name(struct bkey *k, unsigned i, __u64 v)    \
 }
 
 #define KEY_SIZE_BITS          16
+#define KEY_MAX_U64S           8
 
 KEY_FIELD(KEY_PTRS,    high, 60, 3)
 KEY_FIELD(HEADER_SIZE, high, 58, 2)
@@ -118,7 +119,7 @@ static inline struct bkey *bkey_next(const struct bkey *k)
        return (struct bkey *) (d + bkey_u64s(k));
 }
 
-static inline struct bkey *bkey_last(const struct bkey *k, unsigned nr_keys)
+static inline struct bkey *bkey_idx(const struct bkey *k, unsigned nr_keys)
 {
        __u64 *d = (void *) k;
        return (struct bkey *) (d + nr_keys);
index f1f3dd5981b28ff73d5e52c2fb1606d174c3c5dd..84c517cbce902df6ba75be909c016b2da2fca3d5 100644 (file)
@@ -185,7 +185,8 @@ enum {
                                 * to clear media change status */
        FD_UNUSED_BIT,
        FD_DISK_CHANGED_BIT,    /* disk has been changed since last i/o */
-       FD_DISK_WRITABLE_BIT    /* disk is writable */
+       FD_DISK_WRITABLE_BIT,   /* disk is writable */
+       FD_OPEN_SHOULD_FAIL_BIT
 };
 
 #define FDSETDRVPRM _IOW(2, 0x90, struct floppy_drive_params)
index 8b5a79615fbf16aaf21a3912697ddf7562346d81..d0a2b8e89813934725dd217da8081cff7d2ce813 100644 (file)
 #define MQ_BYTES_MAX   819200
 
 struct mq_attr {
-       long    mq_flags;       /* message queue flags                  */
-       long    mq_maxmsg;      /* maximum number of messages           */
-       long    mq_msgsize;     /* maximum message size                 */
-       long    mq_curmsgs;     /* number of messages currently queued  */
-       long    __reserved[4];  /* ignored for input, zeroed for output */
+       __kernel_long_t mq_flags;       /* message queue flags                  */
+       __kernel_long_t mq_maxmsg;      /* maximum number of messages           */
+       __kernel_long_t mq_msgsize;     /* maximum message size                 */
+       __kernel_long_t mq_curmsgs;     /* number of messages currently queued  */
+       __kernel_long_t __reserved[4];  /* ignored for input, zeroed for output */
 };
 
 /*
index 22d95c6854e0cb0fbadaafae1c681ed9ad805780..a70375526578039691cf324a68108b60e4876619 100644 (file)
@@ -34,8 +34,8 @@ struct msqid_ds {
 
 /* message buffer for msgsnd and msgrcv calls */
 struct msgbuf {
-       long mtype;         /* type of message */
-       char mtext[1];      /* message text */
+       __kernel_long_t mtype;          /* type of message */
+       char mtext[1];                  /* message text */
 };
 
 /* buffer for msgctl calls IPC_INFO, MSG_INFO */
index e0ed28477f48fa5fb05e50a8f65aaf8dd636ddb3..36fb3b5fb1817287a86290334a48dcca58801f73 100644 (file)
 struct rusage {
        struct timeval ru_utime;        /* user time used */
        struct timeval ru_stime;        /* system time used */
-       long    ru_maxrss;              /* maximum resident set size */
-       long    ru_ixrss;               /* integral shared memory size */
-       long    ru_idrss;               /* integral unshared data size */
-       long    ru_isrss;               /* integral unshared stack size */
-       long    ru_minflt;              /* page reclaims */
-       long    ru_majflt;              /* page faults */
-       long    ru_nswap;               /* swaps */
-       long    ru_inblock;             /* block input operations */
-       long    ru_oublock;             /* block output operations */
-       long    ru_msgsnd;              /* messages sent */
-       long    ru_msgrcv;              /* messages received */
-       long    ru_nsignals;            /* signals received */
-       long    ru_nvcsw;               /* voluntary context switches */
-       long    ru_nivcsw;              /* involuntary " */
+       __kernel_long_t ru_maxrss;      /* maximum resident set size */
+       __kernel_long_t ru_ixrss;       /* integral shared memory size */
+       __kernel_long_t ru_idrss;       /* integral unshared data size */
+       __kernel_long_t ru_isrss;       /* integral unshared stack size */
+       __kernel_long_t ru_minflt;      /* page reclaims */
+       __kernel_long_t ru_majflt;      /* page faults */
+       __kernel_long_t ru_nswap;       /* swaps */
+       __kernel_long_t ru_inblock;     /* block input operations */
+       __kernel_long_t ru_oublock;     /* block output operations */
+       __kernel_long_t ru_msgsnd;      /* messages sent */
+       __kernel_long_t ru_msgrcv;      /* messages received */
+       __kernel_long_t ru_nsignals;    /* signals received */
+       __kernel_long_t ru_nvcsw;       /* voluntary context switches */
+       __kernel_long_t ru_nivcsw;      /* involuntary " */
 };
 
 struct rlimit {
-       unsigned long   rlim_cur;
-       unsigned long   rlim_max;
+       __kernel_ulong_t        rlim_cur;
+       __kernel_ulong_t        rlim_max;
 };
 
 #define RLIM64_INFINITY                (~0ULL)
index ec36fa1a83a44ce26db2b8f1df203bfb33c85d5f..78b69413f582bec895e06084153c8eae2acfbf2a 100644 (file)
@@ -68,11 +68,11 @@ struct      shminfo {
 
 struct shm_info {
        int used_ids;
-       unsigned long shm_tot;  /* total allocated shm */
-       unsigned long shm_rss;  /* total resident shm */
-       unsigned long shm_swp;  /* total swapped shm */
-       unsigned long swap_attempts;
-       unsigned long swap_successes;
+       __kernel_ulong_t shm_tot;       /* total allocated shm */
+       __kernel_ulong_t shm_rss;       /* total resident shm */
+       __kernel_ulong_t shm_swp;       /* total swapped shm */
+       __kernel_ulong_t swap_attempts;
+       __kernel_ulong_t swap_successes;
 };
 
 
index a7ea81f13711e80f68cf52b6a626d7a09461886e..92685d8264449c515d69652ad6c9fc23eae28344 100644 (file)
  */
 struct timex {
        unsigned int modes;     /* mode selector */
-       long offset;            /* time offset (usec) */
-       long freq;              /* frequency offset (scaled ppm) */
-       long maxerror;          /* maximum error (usec) */
-       long esterror;          /* estimated error (usec) */
+       __kernel_long_t offset; /* time offset (usec) */
+       __kernel_long_t freq;   /* frequency offset (scaled ppm) */
+       __kernel_long_t maxerror;/* maximum error (usec) */
+       __kernel_long_t esterror;/* estimated error (usec) */
        int status;             /* clock command/status */
-       long constant;          /* pll time constant */
-       long precision;         /* clock precision (usec) (read only) */
-       long tolerance;         /* clock frequency tolerance (ppm)
-                                * (read only)
-                                */
+       __kernel_long_t constant;/* pll time constant */
+       __kernel_long_t precision;/* clock precision (usec) (read only) */
+       __kernel_long_t tolerance;/* clock frequency tolerance (ppm)
+                                  * (read only)
+                                  */
        struct timeval time;    /* (read only, except for ADJ_SETOFFSET) */
-       long tick;              /* (modified) usecs between clock ticks */
+       __kernel_long_t tick;   /* (modified) usecs between clock ticks */
 
-       long ppsfreq;           /* pps frequency (scaled ppm) (ro) */
-       long jitter;            /* pps jitter (us) (ro) */
+       __kernel_long_t ppsfreq;/* pps frequency (scaled ppm) (ro) */
+       __kernel_long_t jitter; /* pps jitter (us) (ro) */
        int shift;              /* interval duration (s) (shift) (ro) */
-       long stabil;            /* pps stability (scaled ppm) (ro) */
-       long jitcnt;            /* jitter limit exceeded (ro) */
-       long calcnt;            /* calibration intervals (ro) */
-       long errcnt;            /* calibration errors (ro) */
-       long stbcnt;            /* stability limit exceeded (ro) */
+       __kernel_long_t stabil;            /* pps stability (scaled ppm) (ro) */
+       __kernel_long_t jitcnt; /* jitter limit exceeded (ro) */
+       __kernel_long_t calcnt; /* calibration intervals (ro) */
+       __kernel_long_t errcnt; /* calibration errors (ro) */
+       __kernel_long_t stbcnt; /* stability limit exceeded (ro) */
 
        int tai;                /* TAI offset (ro) */
 
index d09dd10c5a5efc2c206a85bd31a431268e37cc7f..9a58bc2588105900d79ec27c46bdcb8374c32cbc 100644 (file)
@@ -32,7 +32,7 @@ static int submit(int rw, struct block_device *bdev, sector_t sector,
        struct bio *bio;
 
        bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1);
-       bio->bi_sector = sector;
+       bio->bi_iter.bi_sector = sector;
        bio->bi_bdev = bdev;
        bio->bi_end_io = end_swap_bio_read;
 
index f785aef65799cdb0068f0016bb0183a3bfe312a6..b418cb0d72424ab454e66e3cde881784ce1f0fad 100644 (file)
@@ -781,8 +781,8 @@ static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
        if (!error && !bio_flagged(bio, BIO_UPTODATE))
                error = EIO;
 
-       __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what,
-                       error, 0, NULL);
+       __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
+                       bio->bi_rw, what, error, 0, NULL);
 }
 
 static void blk_add_trace_bio_bounce(void *ignore,
@@ -885,8 +885,9 @@ static void blk_add_trace_split(void *ignore,
        if (bt) {
                __be64 rpdu = cpu_to_be64(pdu);
 
-               __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw,
-                               BLK_TA_SPLIT, !bio_flagged(bio, BIO_UPTODATE),
+               __blk_add_trace(bt, bio->bi_iter.bi_sector,
+                               bio->bi_iter.bi_size, bio->bi_rw, BLK_TA_SPLIT,
+                               !bio_flagged(bio, BIO_UPTODATE),
                                sizeof(rpdu), &rpdu);
        }
 }
@@ -918,9 +919,9 @@ static void blk_add_trace_bio_remap(void *ignore,
        r.device_to   = cpu_to_be32(bio->bi_bdev->bd_dev);
        r.sector_from = cpu_to_be64(from);
 
-       __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw,
-                       BLK_TA_REMAP, !bio_flagged(bio, BIO_UPTODATE),
-                       sizeof(r), &r);
+       __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
+                       bio->bi_rw, BLK_TA_REMAP,
+                       !bio_flagged(bio, BIO_UPTODATE), sizeof(r), &r);
 }
 
 /**
index c38083871f11dbaf3f7252674b937fe5e161380a..2defd1308b045c46389a6232391999914573deb6 100644 (file)
@@ -463,7 +463,7 @@ static int active_pfn_set_overlap(unsigned long pfn, int overlap)
        int i;
 
        if (overlap > ACTIVE_PFN_MAX_OVERLAP || overlap < 0)
-               return 0;
+               return overlap;
 
        for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
                if (overlap & 1 << i)
@@ -486,7 +486,7 @@ static void active_pfn_inc_overlap(unsigned long pfn)
         * debug_dma_assert_idle() as the pfn may be marked idle
         * prematurely.
         */
-       WARN_ONCE(overlap == 0,
+       WARN_ONCE(overlap > ACTIVE_PFN_MAX_OVERLAP,
                  "DMA-API: exceeded %d overlapping mappings of pfn %lx\n",
                  ACTIVE_PFN_MAX_OVERLAP, pfn);
 }
@@ -517,7 +517,11 @@ static void active_pfn_remove(struct dma_debug_entry *entry)
        unsigned long flags;
 
        spin_lock_irqsave(&radix_lock, flags);
-       if (active_pfn_dec_overlap(entry->pfn) == 0)
+       /* since we are counting overlaps the final put of the
+        * entry->pfn will occur when the overlap count is 0.
+        * active_pfn_dec_overlap() returns -1 in that case
+        */
+       if (active_pfn_dec_overlap(entry->pfn) < 0)
                radix_tree_delete(&dma_active_pfn, entry->pfn);
        spin_unlock_irqrestore(&radix_lock, flags);
 }
index dda31168844f42c9c6fe2f9c9d25526e30884529..bdb9a456bcbb50471310b9f636df72c6bc377ddd 100644 (file)
@@ -316,7 +316,7 @@ EXPORT_SYMBOL(gen_pool_alloc);
  * gen_pool_dma_alloc - allocate special memory from the pool for DMA usage
  * @pool: pool to allocate from
  * @size: number of bytes to allocate from the pool
- * @dma: dma-view physical address
+ * @dma: dma-view physical address return value.  Use NULL if unneeded.
  *
  * Allocate the requested number of bytes from the specified pool.
  * Uses the pool allocation function (with first-fit algorithm by default).
@@ -334,7 +334,8 @@ void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size, dma_addr_t *dma)
        if (!vaddr)
                return NULL;
 
-       *dma = gen_pool_virt_to_phys(pool, vaddr);
+       if (dma)
+               *dma = gen_pool_virt_to_phys(pool, vaddr);
 
        return (void *)vaddr;
 }
index 5a7d58fb883bfa1c4917e48d251cd132c8d9baf9..523918b8c6dcbef6968c37e2fa38ee87a3b2518c 100644 (file)
@@ -98,27 +98,24 @@ int init_emergency_isa_pool(void)
 static void copy_to_high_bio_irq(struct bio *to, struct bio *from)
 {
        unsigned char *vfrom;
-       struct bio_vec *tovec, *fromvec;
-       int i;
-
-       bio_for_each_segment(tovec, to, i) {
-               fromvec = from->bi_io_vec + i;
-
-               /*
-                * not bounced
-                */
-               if (tovec->bv_page == fromvec->bv_page)
-                       continue;
-
-               /*
-                * fromvec->bv_offset and fromvec->bv_len might have been
-                * modified by the block layer, so use the original copy,
-                * bounce_copy_vec already uses tovec->bv_len
-                */
-               vfrom = page_address(fromvec->bv_page) + tovec->bv_offset;
+       struct bio_vec tovec, *fromvec = from->bi_io_vec;
+       struct bvec_iter iter;
+
+       bio_for_each_segment(tovec, to, iter) {
+               if (tovec.bv_page != fromvec->bv_page) {
+                       /*
+                        * fromvec->bv_offset and fromvec->bv_len might have
+                        * been modified by the block layer, so use the original
+                        * copy, bounce_copy_vec already uses tovec->bv_len
+                        */
+                       vfrom = page_address(fromvec->bv_page) +
+                               tovec.bv_offset;
+
+                       bounce_copy_vec(&tovec, vfrom);
+                       flush_dcache_page(tovec.bv_page);
+               }
 
-               bounce_copy_vec(tovec, vfrom);
-               flush_dcache_page(tovec->bv_page);
+               fromvec++;
        }
 }
 
@@ -201,13 +198,14 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
 {
        struct bio *bio;
        int rw = bio_data_dir(*bio_orig);
-       struct bio_vec *to, *from;
+       struct bio_vec *to, from;
+       struct bvec_iter iter;
        unsigned i;
 
        if (force)
                goto bounce;
-       bio_for_each_segment(from, *bio_orig, i)
-               if (page_to_pfn(from->bv_page) > queue_bounce_pfn(q))
+       bio_for_each_segment(from, *bio_orig, iter)
+               if (page_to_pfn(from.bv_page) > queue_bounce_pfn(q))
                        goto bounce;
 
        return;
index 612c14f5e0f570159fcf775991e88615fca26606..29e1e761f9ebe3fee42eea1b0e77589122a23d24 100644 (file)
@@ -83,7 +83,6 @@ extern unsigned long highest_memmap_pfn;
  */
 extern int isolate_lru_page(struct page *page);
 extern void putback_lru_page(struct page *page);
-extern unsigned long zone_reclaimable_pages(struct zone *zone);
 extern bool zone_reclaimable(struct zone *zone);
 
 /*
index 87d21a6ff63c5730a2708414b6054a139b77cdf1..39a31e7f004505991e37219bdb1e17f571efb933 100644 (file)
@@ -1077,6 +1077,9 @@ static void * __init memblock_virt_alloc_internal(
        if (!align)
                align = SMP_CACHE_BYTES;
 
+       if (max_addr > memblock.current_limit)
+               max_addr = memblock.current_limit;
+
 again:
        alloc = memblock_find_in_range_node(size, align, min_addr, max_addr,
                                            nid);
index 36cb46cddf61aacc1b47f3d925ad58cc7a9926bb..873de7e542bc91993e6922a6b5ada5d4759a6da6 100644 (file)
@@ -2654,7 +2654,7 @@ void mpol_free_shared_policy(struct shared_policy *p)
 }
 
 #ifdef CONFIG_NUMA_BALANCING
-static bool __initdata numabalancing_override;
+static int __initdata numabalancing_override;
 
 static void __init check_numabalancing_enable(void)
 {
@@ -2663,9 +2663,15 @@ static void __init check_numabalancing_enable(void)
        if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
                numabalancing_default = true;
 
+       /* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
+       if (numabalancing_override)
+               set_numabalancing_state(numabalancing_override == 1);
+
        if (nr_node_ids > 1 && !numabalancing_override) {
-               printk(KERN_INFO "Enabling automatic NUMA balancing. "
-                       "Configure with numa_balancing= or the kernel.numa_balancing sysctl");
+               pr_info("%s automatic NUMA balancing. "
+                       "Configure with numa_balancing= or the "
+                       "kernel.numa_balancing sysctl",
+                       numabalancing_default ? "Enabling" : "Disabling");
                set_numabalancing_state(numabalancing_default);
        }
 }
@@ -2675,18 +2681,17 @@ static int __init setup_numabalancing(char *str)
        int ret = 0;
        if (!str)
                goto out;
-       numabalancing_override = true;
 
        if (!strcmp(str, "enable")) {
-               set_numabalancing_state(true);
+               numabalancing_override = 1;
                ret = 1;
        } else if (!strcmp(str, "disable")) {
-               set_numabalancing_state(false);
+               numabalancing_override = -1;
                ret = 1;
        }
 out:
        if (!ret)
-               printk(KERN_WARNING "Unable to parse numa_balancing=\n");
+               pr_warn("Unable to parse numa_balancing=\n");
 
        return ret;
 }
index 63807583d8e89f1c96f8b05bcf5fe422ed200c26..2d30e2cfe8047606064f117fbaca4675540e00dd 100644 (file)
@@ -191,6 +191,26 @@ static unsigned long writeout_period_time = 0;
  * global dirtyable memory first.
  */
 
+/**
+ * zone_dirtyable_memory - number of dirtyable pages in a zone
+ * @zone: the zone
+ *
+ * Returns the zone's number of pages potentially available for dirty
+ * page cache.  This is the base value for the per-zone dirty limits.
+ */
+static unsigned long zone_dirtyable_memory(struct zone *zone)
+{
+       unsigned long nr_pages;
+
+       nr_pages = zone_page_state(zone, NR_FREE_PAGES);
+       nr_pages -= min(nr_pages, zone->dirty_balance_reserve);
+
+       nr_pages += zone_page_state(zone, NR_INACTIVE_FILE);
+       nr_pages += zone_page_state(zone, NR_ACTIVE_FILE);
+
+       return nr_pages;
+}
+
 static unsigned long highmem_dirtyable_memory(unsigned long total)
 {
 #ifdef CONFIG_HIGHMEM
@@ -198,11 +218,9 @@ static unsigned long highmem_dirtyable_memory(unsigned long total)
        unsigned long x = 0;
 
        for_each_node_state(node, N_HIGH_MEMORY) {
-               struct zone *z =
-                       &NODE_DATA(node)->node_zones[ZONE_HIGHMEM];
+               struct zone *z = &NODE_DATA(node)->node_zones[ZONE_HIGHMEM];
 
-               x += zone_page_state(z, NR_FREE_PAGES) +
-                    zone_reclaimable_pages(z) - z->dirty_balance_reserve;
+               x += zone_dirtyable_memory(z);
        }
        /*
         * Unreclaimable memory (kernel memory or anonymous memory
@@ -238,9 +256,12 @@ static unsigned long global_dirtyable_memory(void)
 {
        unsigned long x;
 
-       x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages();
+       x = global_page_state(NR_FREE_PAGES);
        x -= min(x, dirty_balance_reserve);
 
+       x += global_page_state(NR_INACTIVE_FILE);
+       x += global_page_state(NR_ACTIVE_FILE);
+
        if (!vm_highmem_is_dirtyable)
                x -= highmem_dirtyable_memory(x);
 
@@ -288,32 +309,6 @@ void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
        trace_global_dirty_state(background, dirty);
 }
 
-/**
- * zone_dirtyable_memory - number of dirtyable pages in a zone
- * @zone: the zone
- *
- * Returns the zone's number of pages potentially available for dirty
- * page cache.  This is the base value for the per-zone dirty limits.
- */
-static unsigned long zone_dirtyable_memory(struct zone *zone)
-{
-       /*
-        * The effective global number of dirtyable pages may exclude
-        * highmem as a big-picture measure to keep the ratio between
-        * dirty memory and lowmem reasonable.
-        *
-        * But this function is purely about the individual zone and a
-        * highmem zone can hold its share of dirty pages, so we don't
-        * care about vm_highmem_is_dirtyable here.
-        */
-       unsigned long nr_pages = zone_page_state(zone, NR_FREE_PAGES) +
-               zone_reclaimable_pages(zone);
-
-       /* don't allow this to underflow */
-       nr_pages -= min(nr_pages, zone->dirty_balance_reserve);
-       return nr_pages;
-}
-
 /**
  * zone_dirty_limit - maximum number of dirty pages allowed in a zone
  * @zone: the zone
index 7247be6114ac894523d8273743a4f168ceab3afa..7c59ef681381bb7afeef2cf5207d269e9a95c1f8 100644 (file)
@@ -31,13 +31,13 @@ static struct bio *get_swap_bio(gfp_t gfp_flags,
 
        bio = bio_alloc(gfp_flags, 1);
        if (bio) {
-               bio->bi_sector = map_swap_page(page, &bio->bi_bdev);
-               bio->bi_sector <<= PAGE_SHIFT - 9;
+               bio->bi_iter.bi_sector = map_swap_page(page, &bio->bi_bdev);
+               bio->bi_iter.bi_sector <<= PAGE_SHIFT - 9;
                bio->bi_io_vec[0].bv_page = page;
                bio->bi_io_vec[0].bv_len = PAGE_SIZE;
                bio->bi_io_vec[0].bv_offset = 0;
                bio->bi_vcnt = 1;
-               bio->bi_size = PAGE_SIZE;
+               bio->bi_iter.bi_size = PAGE_SIZE;
                bio->bi_end_io = end_io;
        }
        return bio;
@@ -62,7 +62,7 @@ void end_swap_bio_write(struct bio *bio, int err)
                printk(KERN_ALERT "Write-error on swap-device (%u:%u:%Lu)\n",
                                imajor(bio->bi_bdev->bd_inode),
                                iminor(bio->bi_bdev->bd_inode),
-                               (unsigned long long)bio->bi_sector);
+                               (unsigned long long)bio->bi_iter.bi_sector);
                ClearPageReclaim(page);
        }
        end_page_writeback(page);
@@ -80,7 +80,7 @@ void end_swap_bio_read(struct bio *bio, int err)
                printk(KERN_ALERT "Read-error on swap-device (%u:%u:%Lu)\n",
                                imajor(bio->bi_bdev->bd_inode),
                                iminor(bio->bi_bdev->bd_inode),
-                               (unsigned long long)bio->bi_sector);
+                               (unsigned long long)bio->bi_iter.bi_sector);
                goto out;
        }
 
index 7cdbb44aa90bd99bef05f43ee27cc13b2121ed7a..0de2360d65f3f4f4cce4e0ae94f02d78a51a2429 100644 (file)
@@ -211,8 +211,6 @@ out:
 int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
                pgoff_t offset, unsigned long nr_to_read)
 {
-       int ret = 0;
-
        if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readpages))
                return -EINVAL;
 
@@ -226,15 +224,13 @@ int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
                        this_chunk = nr_to_read;
                err = __do_page_cache_readahead(mapping, filp,
                                                offset, this_chunk, 0);
-               if (err < 0) {
-                       ret = err;
-                       break;
-               }
-               ret += err;
+               if (err < 0)
+                       return err;
+
                offset += this_chunk;
                nr_to_read -= this_chunk;
        }
-       return ret;
+       return 0;
 }
 
 /*
@@ -576,8 +572,7 @@ do_readahead(struct address_space *mapping, struct file *filp,
        if (!mapping || !mapping->a_ops)
                return -EINVAL;
 
-       force_page_cache_readahead(mapping, filp, index, nr);
-       return 0;
+       return force_page_cache_readahead(mapping, filp, index, nr);
 }
 
 SYSCALL_DEFINE3(readahead, int, fd, loff_t, offset, size_t, count)
index 8e40321da091b66f24f983b266acb344ce41d56c..1ec3c619ba04b955f0d64f32ae3d432d113488f6 100644 (file)
@@ -233,14 +233,17 @@ out_unlock:
        mutex_unlock(&slab_mutex);
        put_online_cpus();
 
-       /*
-        * There is no point in flooding logs with warnings or especially
-        * crashing the system if we fail to create a cache for a memcg. In
-        * this case we will be accounting the memcg allocation to the root
-        * cgroup until we succeed to create its own cache, but it isn't that
-        * critical.
-        */
-       if (err && !memcg) {
+       if (err) {
+               /*
+                * There is no point in flooding logs with warnings or
+                * especially crashing the system if we fail to create a cache
+                * for a memcg. In this case we will be accounting the memcg
+                * allocation to the root cgroup until we succeed to create its
+                * own cache, but it isn't that critical.
+                */
+               if (!memcg)
+                       return NULL;
+
                if (flags & SLAB_PANIC)
                        panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n",
                                name, err);
index 34bb8c65a2d8d7e9973fa501fafbe10cf571b8f3..545a170ebf9f66cf0e3716c9cd6f4cb7eef0eda6 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1559,7 +1559,7 @@ static inline void *acquire_slab(struct kmem_cache *s,
                new.freelist = freelist;
        }
 
-       VM_BUG_ON_PAGE(new.frozen, &new);
+       VM_BUG_ON(new.frozen);
        new.frozen = 1;
 
        if (!__cmpxchg_double_slab(s, page,
@@ -1812,7 +1812,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
                        set_freepointer(s, freelist, prior);
                        new.counters = counters;
                        new.inuse--;
-                       VM_BUG_ON_PAGE(!new.frozen, &new);
+                       VM_BUG_ON(!new.frozen);
 
                } while (!__cmpxchg_double_slab(s, page,
                        prior, counters,
@@ -1840,7 +1840,7 @@ redo:
 
        old.freelist = page->freelist;
        old.counters = page->counters;
-       VM_BUG_ON_PAGE(!old.frozen, &old);
+       VM_BUG_ON(!old.frozen);
 
        /* Determine target state of the slab */
        new.counters = old.counters;
@@ -1952,7 +1952,7 @@ static void unfreeze_partials(struct kmem_cache *s,
 
                        old.freelist = page->freelist;
                        old.counters = page->counters;
-                       VM_BUG_ON_PAGE(!old.frozen, &old);
+                       VM_BUG_ON(!old.frozen);
 
                        new.counters = old.counters;
                        new.freelist = old.freelist;
@@ -2225,7 +2225,7 @@ static inline void *get_freelist(struct kmem_cache *s, struct page *page)
                counters = page->counters;
 
                new.counters = counters;
-               VM_BUG_ON_PAGE(!new.frozen, &new);
+               VM_BUG_ON(!new.frozen);
 
                new.inuse = page->objects;
                new.frozen = freelist != NULL;
@@ -2319,7 +2319,7 @@ load_freelist:
         * page is pointing to the page from which the objects are obtained.
         * That page must be frozen for per cpu allocations to work.
         */
-       VM_BUG_ON_PAGE(!c->page->frozen, c->page);
+       VM_BUG_ON(!c->page->frozen);
        c->freelist = get_freepointer(s, freelist);
        c->tid = next_tid(c->tid);
        local_irq_restore(flags);
index 90c4075d8d75af6358ba6c789bc0747f53460a4d..a9c74b409681a460f2c2ef5f45b3f2283a81a4b8 100644 (file)
@@ -147,7 +147,7 @@ static bool global_reclaim(struct scan_control *sc)
 }
 #endif
 
-unsigned long zone_reclaimable_pages(struct zone *zone)
+static unsigned long zone_reclaimable_pages(struct zone *zone)
 {
        int nr;
 
@@ -3315,27 +3315,6 @@ void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx)
        wake_up_interruptible(&pgdat->kswapd_wait);
 }
 
-/*
- * The reclaimable count would be mostly accurate.
- * The less reclaimable pages may be
- * - mlocked pages, which will be moved to unevictable list when encountered
- * - mapped pages, which may require several travels to be reclaimed
- * - dirty pages, which is not "instantly" reclaimable
- */
-unsigned long global_reclaimable_pages(void)
-{
-       int nr;
-
-       nr = global_page_state(NR_ACTIVE_FILE) +
-            global_page_state(NR_INACTIVE_FILE);
-
-       if (get_nr_swap_pages() > 0)
-               nr += global_page_state(NR_ACTIVE_ANON) +
-                     global_page_state(NR_INACTIVE_ANON);
-
-       return nr;
-}
-
 #ifdef CONFIG_HIBERNATION
 /*
  * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of
index 2ed1304d22a7dfed5c8bc9f86d5f0f5cb1b91742..0e478a0f4204b72ed19ae49c349d632cda009e02 100644 (file)
@@ -778,13 +778,12 @@ static void ceph_msg_data_bio_cursor_init(struct ceph_msg_data_cursor *cursor,
 
        bio = data->bio;
        BUG_ON(!bio);
-       BUG_ON(!bio->bi_vcnt);
 
        cursor->resid = min(length, data->bio_length);
        cursor->bio = bio;
-       cursor->vector_index = 0;
-       cursor->vector_offset = 0;
-       cursor->last_piece = length <= bio->bi_io_vec[0].bv_len;
+       cursor->bvec_iter = bio->bi_iter;
+       cursor->last_piece =
+               cursor->resid <= bio_iter_len(bio, cursor->bvec_iter);
 }
 
 static struct page *ceph_msg_data_bio_next(struct ceph_msg_data_cursor *cursor,
@@ -793,71 +792,63 @@ static struct page *ceph_msg_data_bio_next(struct ceph_msg_data_cursor *cursor,
 {
        struct ceph_msg_data *data = cursor->data;
        struct bio *bio;
-       struct bio_vec *bio_vec;
-       unsigned int index;
+       struct bio_vec bio_vec;
 
        BUG_ON(data->type != CEPH_MSG_DATA_BIO);
 
        bio = cursor->bio;
        BUG_ON(!bio);
 
-       index = cursor->vector_index;
-       BUG_ON(index >= (unsigned int) bio->bi_vcnt);
+       bio_vec = bio_iter_iovec(bio, cursor->bvec_iter);
 
-       bio_vec = &bio->bi_io_vec[index];
-       BUG_ON(cursor->vector_offset >= bio_vec->bv_len);
-       *page_offset = (size_t) (bio_vec->bv_offset + cursor->vector_offset);
+       *page_offset = (size_t) bio_vec.bv_offset;
        BUG_ON(*page_offset >= PAGE_SIZE);
        if (cursor->last_piece) /* pagelist offset is always 0 */
                *length = cursor->resid;
        else
-               *length = (size_t) (bio_vec->bv_len - cursor->vector_offset);
+               *length = (size_t) bio_vec.bv_len;
        BUG_ON(*length > cursor->resid);
        BUG_ON(*page_offset + *length > PAGE_SIZE);
 
-       return bio_vec->bv_page;
+       return bio_vec.bv_page;
 }
 
 static bool ceph_msg_data_bio_advance(struct ceph_msg_data_cursor *cursor,
                                        size_t bytes)
 {
        struct bio *bio;
-       struct bio_vec *bio_vec;
-       unsigned int index;
+       struct bio_vec bio_vec;
 
        BUG_ON(cursor->data->type != CEPH_MSG_DATA_BIO);
 
        bio = cursor->bio;
        BUG_ON(!bio);
 
-       index = cursor->vector_index;
-       BUG_ON(index >= (unsigned int) bio->bi_vcnt);
-       bio_vec = &bio->bi_io_vec[index];
+       bio_vec = bio_iter_iovec(bio, cursor->bvec_iter);
 
        /* Advance the cursor offset */
 
        BUG_ON(cursor->resid < bytes);
        cursor->resid -= bytes;
-       cursor->vector_offset += bytes;
-       if (cursor->vector_offset < bio_vec->bv_len)
+
+       bio_advance_iter(bio, &cursor->bvec_iter, bytes);
+
+       if (bytes < bio_vec.bv_len)
                return false;   /* more bytes to process in this segment */
-       BUG_ON(cursor->vector_offset != bio_vec->bv_len);
 
        /* Move on to the next segment, and possibly the next bio */
 
-       if (++index == (unsigned int) bio->bi_vcnt) {
+       if (!cursor->bvec_iter.bi_size) {
                bio = bio->bi_next;
-               index = 0;
+               cursor->bvec_iter = bio->bi_iter;
        }
        cursor->bio = bio;
-       cursor->vector_index = index;
-       cursor->vector_offset = 0;
 
        if (!cursor->last_piece) {
                BUG_ON(!cursor->resid);
                BUG_ON(!bio);
                /* A short read is OK, so use <= rather than == */
-               if (cursor->resid <= bio->bi_io_vec[index].bv_len)
+               if (cursor->resid <= bio_iter_len(bio, cursor->bvec_iter))
                        cursor->last_piece = true;
        }
 
index 8f519dbb358b4b8ab8d2f497a000383fe7656341..5976ef0846bdda08db6289bb91f05a63b85e6e3a 100644 (file)
@@ -47,6 +47,8 @@
 #include <linux/in.h>
 #include <linux/inet.h>
 #include <linux/slab.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
 #include <linux/netdevice.h>
 #ifdef CONFIG_NET_CLS_ACT
 #include <net/pkt_sched.h>
@@ -2119,7 +2121,7 @@ EXPORT_SYMBOL_GPL(skb_zerocopy_headlen);
 /**
  *     skb_zerocopy - Zero copy skb to skb
  *     @to: destination buffer
- *     @source: source buffer
+ *     @from: source buffer
  *     @len: number of bytes to copy from source buffer
  *     @hlen: size of linear headroom in destination buffer
  *
@@ -3916,3 +3918,26 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
        nf_reset_trace(skb);
 }
 EXPORT_SYMBOL_GPL(skb_scrub_packet);
+
+/**
+ * skb_gso_transport_seglen - Return length of individual segments of a gso packet
+ *
+ * @skb: GSO skb
+ *
+ * skb_gso_transport_seglen is used to determine the real size of the
+ * individual segments, including Layer4 headers (TCP/UDP).
+ *
+ * The MAC/L2 or network (IP, IPv6) headers are not accounted for.
+ */
+unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
+{
+       const struct skb_shared_info *shinfo = skb_shinfo(skb);
+       unsigned int hdr_len;
+
+       if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
+               hdr_len = tcp_hdrlen(skb);
+       else
+               hdr_len = sizeof(struct udphdr);
+       return hdr_len + shinfo->gso_size;
+}
+EXPORT_SYMBOL_GPL(skb_gso_transport_seglen);
index 083f905bf1096d748a90e3a2bff52983a936a680..860aa2d445bae361d5d588a6c1a4e41310d0d629 100644 (file)
@@ -678,7 +678,7 @@ int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev,
                        hc06_ptr += 3;
                } else {
                        /* compress nothing */
-                       memcpy(hc06_ptr, &hdr, 4);
+                       memcpy(hc06_ptr, hdr, 4);
                        /* replace the top byte with new ECN | DSCP format */
                        *hc06_ptr = tmp;
                        hc06_ptr += 4;
index e7a92fdb36f61b779e04b1da45acf12312302c6b..ec4f762efda50918254329d9cccfd493533fae4b 100644 (file)
@@ -178,7 +178,7 @@ static int ipgre_err(struct sk_buff *skb, u32 info,
        else
                itn = net_generic(net, ipgre_net_id);
 
-       iph = (const struct iphdr *)skb->data;
+       iph = (const struct iphdr *)(icmp_hdr(skb) + 1);
        t = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
                             iph->daddr, iph->saddr, tpi->key);
 
index 054a3e97d822b61646cfff92c2ff2ed0f1e0740a..3d4da2c16b6a3c6d6bc41d8fec0ed182037b0801 100644 (file)
@@ -314,7 +314,7 @@ static int ip_rcv_finish(struct sk_buff *skb)
        const struct iphdr *iph = ip_hdr(skb);
        struct rtable *rt;
 
-       if (sysctl_ip_early_demux && !skb_dst(skb)) {
+       if (sysctl_ip_early_demux && !skb_dst(skb) && skb->sk == NULL) {
                const struct net_protocol *ipprot;
                int protocol = iph->protocol;
 
index c0e3cb72ad70ad211f4801e13c5e207dc48318e3..bd28f386bd02020ef3adc4295a90021d6d43a0f7 100644 (file)
@@ -40,6 +40,7 @@
 #include <linux/if_ether.h>
 #include <linux/if_vlan.h>
 #include <linux/rculist.h>
+#include <linux/err.h>
 
 #include <net/sock.h>
 #include <net/ip.h>
@@ -930,7 +931,7 @@ int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
        }
        rtnl_unlock();
 
-       return PTR_RET(itn->fb_tunnel_dev);
+       return PTR_ERR_OR_ZERO(itn->fb_tunnel_dev);
 }
 EXPORT_SYMBOL_GPL(ip_tunnel_init_net);
 
index 302d6fb1ff2b43fb027a633a0ba3f87eea58d4e9..51d54dc376f3b1862040f38e6f58e2c75001049c 100644 (file)
@@ -49,7 +49,7 @@
 
 int ip6_rcv_finish(struct sk_buff *skb)
 {
-       if (sysctl_ip_early_demux && !skb_dst(skb)) {
+       if (sysctl_ip_early_demux && !skb_dst(skb) && skb->sk == NULL) {
                const struct inet6_protocol *ipprot;
 
                ipprot = rcu_dereference(inet6_protos[ipv6_hdr(skb)->nexthdr]);
index 2dae8a5df23fe56f93620dcaf313a54d6e41b231..94425e421213a3fd2719428244156a890a98127a 100644 (file)
@@ -43,7 +43,7 @@ int llc_mac_hdr_init(struct sk_buff *skb,
                        rc = 0;
                break;
        default:
-               WARN(1, "device type not supported: %d\n", skb->dev->type);
+               break;
        }
        return rc;
 }
index 4106ca95ec86f43b8c235dd5d94e9fc016ea06f5..7bf5b5b9e8b9400af1cbaecaeaf9a8c222670492 100644 (file)
@@ -381,6 +381,8 @@ static int rxrpc_connect_exclusive(struct rxrpc_sock *rx,
 
                rxrpc_assign_connection_id(conn);
                rx->conn = conn;
+       } else {
+               spin_lock(&trans->client_lock);
        }
 
        /* we've got a connection with a free channel and we can now attach the
index 898492a8d61be8fde5bcdf66084d525bee23b5f0..34b5490dde655ccdbac5dcbbc7b0df1b88ab42ca 100644 (file)
@@ -180,7 +180,8 @@ int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock,
                if (copy > len - copied)
                        copy = len - copied;
 
-               if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
+               if (skb->ip_summed == CHECKSUM_UNNECESSARY ||
+                   skb->ip_summed == CHECKSUM_PARTIAL) {
                        ret = skb_copy_datagram_iovec(skb, offset,
                                                      msg->msg_iov, copy);
                } else {
@@ -353,6 +354,10 @@ csum_copy_error:
        if (continue_call)
                rxrpc_put_call(continue_call);
        rxrpc_kill_skb(skb);
+       if (!(flags & MSG_PEEK)) {
+               if (skb_dequeue(&rx->sk.sk_receive_queue) != skb)
+                       BUG();
+       }
        skb_kill_datagram(&rx->sk, skb, flags);
        rxrpc_put_call(call);
        return -EAGAIN;
index fbba5b0ec1215be171d17c70fa5be3a7c79314b9..1cb413fead89522a64931a4e2472b39c4d6a720d 100644 (file)
@@ -21,7 +21,6 @@
 #include <net/netlink.h>
 #include <net/sch_generic.h>
 #include <net/pkt_sched.h>
-#include <net/tcp.h>
 
 
 /*     Simple Token Bucket Filter.
@@ -148,16 +147,10 @@ static u64 psched_ns_t2l(const struct psched_ratecfg *r,
  * Return length of individual segments of a gso packet,
  * including all headers (MAC, IP, TCP/UDP)
  */
-static unsigned int skb_gso_seglen(const struct sk_buff *skb)
+static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb)
 {
        unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
-       const struct skb_shared_info *shinfo = skb_shinfo(skb);
-
-       if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
-               hdr_len += tcp_hdrlen(skb);
-       else
-               hdr_len += sizeof(struct udphdr);
-       return hdr_len + shinfo->gso_size;
+       return hdr_len + skb_gso_transport_seglen(skb);
 }
 
 /* GSO packet is too big, segment it so that tbf can transmit
@@ -202,7 +195,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
        int ret;
 
        if (qdisc_pkt_len(skb) > q->max_size) {
-               if (skb_is_gso(skb) && skb_gso_seglen(skb) <= q->max_size)
+               if (skb_is_gso(skb) && skb_gso_mac_seglen(skb) <= q->max_size)
                        return tbf_segment(skb, sch);
                return qdisc_reshape_fail(skb, sch);
        }
index 76e42e6be7558d9578b38506a28525350e0cf8bf..24589bd2a4b600cae5fdb8f5c0f54fb5198e2115 100644 (file)
@@ -59,6 +59,7 @@
 #include <linux/crypto.h>
 #include <linux/sunrpc/gss_krb5.h>
 #include <linux/sunrpc/xdr.h>
+#include <linux/lcm.h>
 
 #ifdef RPC_DEBUG
 # define RPCDBG_FACILITY        RPCDBG_AUTH
@@ -72,7 +73,7 @@
 static void krb5_nfold(u32 inbits, const u8 *in,
                       u32 outbits, u8 *out)
 {
-       int a, b, c, lcm;
+       unsigned long ulcm;
        int byte, i, msbit;
 
        /* the code below is more readable if I make these bytes
@@ -82,17 +83,7 @@ static void krb5_nfold(u32 inbits, const u8 *in,
        outbits >>= 3;
 
        /* first compute lcm(n,k) */
-
-       a = outbits;
-       b = inbits;
-
-       while (b != 0) {
-               c = b;
-               b = a%b;
-               a = c;
-       }
-
-       lcm = outbits*inbits/a;
+       ulcm = lcm(inbits, outbits);
 
        /* now do the real work */
 
@@ -101,7 +92,7 @@ static void krb5_nfold(u32 inbits, const u8 *in,
 
        /* this will end up cycling through k lcm(k,n)/k times, which
           is correct */
-       for (i = lcm-1; i >= 0; i--) {
+       for (i = ulcm-1; i >= 0; i--) {
                /* compute the msbit in k which gets added into this byte */
                msbit = (
                        /* first, start with the msbit in the first,
index 458f85e9b0ba088575a72ef6dd6ff8d21484d290..abbb7dcd16897125863098cb48f6a6411488225c 100644 (file)
@@ -137,7 +137,6 @@ void init_gssp_clnt(struct sunrpc_net *sn)
 {
        mutex_init(&sn->gssp_lock);
        sn->gssp_clnt = NULL;
-       init_waitqueue_head(&sn->gssp_wq);
 }
 
 int set_gssp_clnt(struct net *net)
@@ -154,7 +153,6 @@ int set_gssp_clnt(struct net *net)
                sn->gssp_clnt = clnt;
        }
        mutex_unlock(&sn->gssp_lock);
-       wake_up(&sn->gssp_wq);
        return ret;
 }
 
index 008cdade5aae387db601607c463aa20291777513..0f73f450774675da7666d10cb50c57571a11c27a 100644 (file)
@@ -1263,65 +1263,34 @@ out:
        return ret;
 }
 
-DEFINE_SPINLOCK(use_gssp_lock);
-
-static bool use_gss_proxy(struct net *net)
-{
-       struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
-
-       if (sn->use_gss_proxy != -1)
-               return sn->use_gss_proxy;
-       spin_lock(&use_gssp_lock);
-       /*
-        * If you wanted gss-proxy, you should have said so before
-        * starting to accept requests:
-        */
-       sn->use_gss_proxy = 0;
-       spin_unlock(&use_gssp_lock);
-       return 0;
-}
-
-#ifdef CONFIG_PROC_FS
-
+/*
+ * Try to set the sn->use_gss_proxy variable to a new value. We only allow
+ * it to be changed if it's currently undefined (-1). If it's any other value
+ * then return -EBUSY unless the type wouldn't have changed anyway.
+ */
 static int set_gss_proxy(struct net *net, int type)
 {
        struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
-       int ret = 0;
+       int ret;
 
        WARN_ON_ONCE(type != 0 && type != 1);
-       spin_lock(&use_gssp_lock);
-       if (sn->use_gss_proxy == -1 || sn->use_gss_proxy == type)
-               sn->use_gss_proxy = type;
-       else
-               ret = -EBUSY;
-       spin_unlock(&use_gssp_lock);
-       wake_up(&sn->gssp_wq);
-       return ret;
-}
-
-static inline bool gssp_ready(struct sunrpc_net *sn)
-{
-       switch (sn->use_gss_proxy) {
-               case -1:
-                       return false;
-               case 0:
-                       return true;
-               case 1:
-                       return sn->gssp_clnt;
-       }
-       WARN_ON_ONCE(1);
-       return false;
+       ret = cmpxchg(&sn->use_gss_proxy, -1, type);
+       if (ret != -1 && ret != type)
+               return -EBUSY;
+       return 0;
 }
 
-static int wait_for_gss_proxy(struct net *net, struct file *file)
+static bool use_gss_proxy(struct net *net)
 {
        struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
 
-       if (file->f_flags & O_NONBLOCK && !gssp_ready(sn))
-               return -EAGAIN;
-       return wait_event_interruptible(sn->gssp_wq, gssp_ready(sn));
+       /* If use_gss_proxy is still undefined, then try to disable it */
+       if (sn->use_gss_proxy == -1)
+               set_gss_proxy(net, 0);
+       return sn->use_gss_proxy;
 }
 
+#ifdef CONFIG_PROC_FS
 
 static ssize_t write_gssp(struct file *file, const char __user *buf,
                         size_t count, loff_t *ppos)
@@ -1342,10 +1311,10 @@ static ssize_t write_gssp(struct file *file, const char __user *buf,
                return res;
        if (i != 1)
                return -EINVAL;
-       res = set_gss_proxy(net, 1);
+       res = set_gssp_clnt(net);
        if (res)
                return res;
-       res = set_gssp_clnt(net);
+       res = set_gss_proxy(net, 1);
        if (res)
                return res;
        return count;
@@ -1355,16 +1324,12 @@ static ssize_t read_gssp(struct file *file, char __user *buf,
                         size_t count, loff_t *ppos)
 {
        struct net *net = PDE_DATA(file_inode(file));
+       struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
        unsigned long p = *ppos;
        char tbuf[10];
        size_t len;
-       int ret;
 
-       ret = wait_for_gss_proxy(net, file);
-       if (ret)
-               return ret;
-
-       snprintf(tbuf, sizeof(tbuf), "%d\n", use_gss_proxy(net));
+       snprintf(tbuf, sizeof(tbuf), "%d\n", sn->use_gss_proxy);
        len = strlen(tbuf);
        if (p >= len)
                return 0;
@@ -1626,8 +1591,7 @@ svcauth_gss_wrap_resp_integ(struct svc_rqst *rqstp)
        BUG_ON(integ_len % 4);
        *p++ = htonl(integ_len);
        *p++ = htonl(gc->gc_seq);
-       if (xdr_buf_subsegment(resbuf, &integ_buf, integ_offset,
-                               integ_len))
+       if (xdr_buf_subsegment(resbuf, &integ_buf, integ_offset, integ_len))
                BUG();
        if (resbuf->tail[0].iov_base == NULL) {
                if (resbuf->head[0].iov_len + RPC_MAX_AUTH_SIZE > PAGE_SIZE)
@@ -1635,10 +1599,8 @@ svcauth_gss_wrap_resp_integ(struct svc_rqst *rqstp)
                resbuf->tail[0].iov_base = resbuf->head[0].iov_base
                                                + resbuf->head[0].iov_len;
                resbuf->tail[0].iov_len = 0;
-               resv = &resbuf->tail[0];
-       } else {
-               resv = &resbuf->tail[0];
        }
+       resv = &resbuf->tail[0];
        mic.data = (u8 *)resv->iov_base + resv->iov_len + 4;
        if (gss_get_mic(gsd->rsci->mechctx, &integ_buf, &mic))
                goto out_err;
index e521d20e19701d917953e49033ce85fac8a96c8d..ae333c1845bb42f28198cdfb957cd03e7412c14c 100644 (file)
@@ -1111,9 +1111,7 @@ void qword_addhex(char **bpp, int *lp, char *buf, int blen)
                *bp++ = 'x';
                len -= 2;
                while (blen && len >= 2) {
-                       unsigned char c = *buf++;
-                       *bp++ = '0' + ((c&0xf0)>>4) + (c>=0xa0)*('a'-'9'-1);
-                       *bp++ = '0' + (c&0x0f) + ((c&0x0f)>=0x0a)*('a'-'9'-1);
+                       bp = hex_byte_pack(bp, *buf++);
                        len -= 2;
                        blen--;
                }
index 94e506f9d72bb62440d923360753c98ecf5f6725..df58268765351ebd1b4376f7504915cd1b9fff6b 100644 (file)
@@ -27,7 +27,6 @@ struct sunrpc_net {
        unsigned int rpcb_is_af_local : 1;
 
        struct mutex gssp_lock;
-       wait_queue_head_t gssp_wq;
        struct rpc_clnt *gssp_clnt;
        int use_gss_proxy;
        int pipe_version;
index e7fbe368b4a38f665c538ae98b3c8db1ce2a5b81..5de6801cd924ec8e71d216bd5ea9ebffd4a391a8 100644 (file)
@@ -916,9 +916,6 @@ static int __svc_register(struct net *net, const char *progname,
 #endif
        }
 
-       if (error < 0)
-               printk(KERN_WARNING "svc: failed to register %sv%u RPC "
-                       "service (errno %d).\n", progname, version, -error);
        return error;
 }
 
@@ -937,6 +934,7 @@ int svc_register(const struct svc_serv *serv, struct net *net,
                 const unsigned short port)
 {
        struct svc_program      *progp;
+       struct svc_version      *vers;
        unsigned int            i;
        int                     error = 0;
 
@@ -946,7 +944,8 @@ int svc_register(const struct svc_serv *serv, struct net *net,
 
        for (progp = serv->sv_program; progp; progp = progp->pg_next) {
                for (i = 0; i < progp->pg_nvers; i++) {
-                       if (progp->pg_vers[i] == NULL)
+                       vers = progp->pg_vers[i];
+                       if (vers == NULL)
                                continue;
 
                        dprintk("svc: svc_register(%sv%d, %s, %u, %u)%s\n",
@@ -955,16 +954,26 @@ int svc_register(const struct svc_serv *serv, struct net *net,
                                        proto == IPPROTO_UDP?  "udp" : "tcp",
                                        port,
                                        family,
-                                       progp->pg_vers[i]->vs_hidden?
-                                               " (but not telling portmap)" : "");
+                                       vers->vs_hidden ?
+                                       " (but not telling portmap)" : "");
 
-                       if (progp->pg_vers[i]->vs_hidden)
+                       if (vers->vs_hidden)
                                continue;
 
                        error = __svc_register(net, progp->pg_name, progp->pg_prog,
                                                i, family, proto, port);
-                       if (error < 0)
+
+                       if (vers->vs_rpcb_optnl) {
+                               error = 0;
+                               continue;
+                       }
+
+                       if (error < 0) {
+                               printk(KERN_WARNING "svc: failed to register "
+                                       "%sv%u RPC service (errno %d).\n",
+                                       progp->pg_name, i, -error);
                                break;
+                       }
                }
        }
 
index 2a7ca8ffe83a9ad47576959b30f9664cc3996b0a..817a1e5239692e9fb3f5117f36eb0895d426ea5f 100644 (file)
@@ -2964,10 +2964,9 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
 
        /*
         * Once we've associated a backchannel xprt with a connection,
-        * we want to keep it around as long as long as the connection
-        * lasts, in case we need to start using it for a backchannel
-        * again; this reference won't be dropped until bc_xprt is
-        * destroyed.
+        * we want to keep it around as long as the connection lasts,
+        * in case we need to start using it for a backchannel again;
+        * this reference won't be dropped until bc_xprt is destroyed.
         */
        xprt_get(xprt);
        args->bc_xprt->xpt_bc_xprt = xprt;
index d105a44b68f664a55559e38217dcf27ccf025b70..63d91e22ed7ccd18a0cd77852af647112ed216bb 100755 (executable)
@@ -43,7 +43,8 @@ scm_version()
        fi
 
        # Check for git and a git repo.
-       if test -d .git && head=`git rev-parse --verify --short HEAD 2>/dev/null`; then
+       if test -z "$(git rev-parse --show-cdup 2>/dev/null)" &&
+          head=`git rev-parse --verify --short HEAD 2>/dev/null`; then
 
                # If we are at a tagged commit (like "v2.6.30-rc6"), we ignore
                # it, because this version is defined in the top level Makefile.
index f9090b167ad7c1ba2c3e91f1af2e042356c25841..6404e1ef20d076bc693e198aab3afb3dde9bf08d 100644 (file)
@@ -164,6 +164,7 @@ struct fsl_ssi_private {
        bool baudclk_locked;
        bool irq_stats;
        bool offline_config;
+       bool use_dual_fifo;
        u8 i2s_mode;
        spinlock_t baudclk_lock;
        struct clk *baudclk;
@@ -721,6 +722,12 @@ static int fsl_ssi_setup(struct fsl_ssi_private *ssi_private)
                                CCSR_SSI_SxCCR_DC(2));
        }
 
+       if (ssi_private->use_dual_fifo) {
+               write_ssi_mask(&ssi->srcr, 0, CCSR_SSI_SRCR_RFEN1);
+               write_ssi_mask(&ssi->stcr, 0, CCSR_SSI_STCR_TFEN1);
+               write_ssi_mask(&ssi->scr, 0, CCSR_SSI_SCR_TCH_EN);
+       }
+
        return 0;
 }
 
@@ -752,6 +759,15 @@ static int fsl_ssi_startup(struct snd_pcm_substream *substream,
                spin_unlock_irqrestore(&ssi_private->baudclk_lock, flags);
        }
 
+       /* When using dual fifo mode, it is safer to ensure an even period
+        * size. If appearing to an odd number while DMA always starts its
+        * task from fifo0, fifo1 would be neglected at the end of each
+        * period. But SSI would still access fifo1 with an invalid data.
+        */
+       if (ssi_private->use_dual_fifo)
+               snd_pcm_hw_constraint_step(substream->runtime, 0,
+                               SNDRV_PCM_HW_PARAM_PERIOD_SIZE, 2);
+
        return 0;
 }
 
@@ -1370,7 +1386,7 @@ static int fsl_ssi_probe(struct platform_device *pdev)
 
        if (hw_type == FSL_SSI_MX21 || hw_type == FSL_SSI_MX51 ||
                        hw_type == FSL_SSI_MX35) {
-               u32 dma_events[2];
+               u32 dma_events[2], dmas[4];
                ssi_private->ssi_on_imx = true;
 
                ssi_private->clk = devm_clk_get(&pdev->dev, NULL);
@@ -1426,6 +1442,16 @@ static int fsl_ssi_probe(struct platform_device *pdev)
                                goto error_clk;
                        }
                }
+               /* Should this be merge with the above? */
+               if (!of_property_read_u32_array(pdev->dev.of_node, "dmas", dmas, 4)
+                               && dmas[2] == IMX_DMATYPE_SSI_DUAL) {
+                       ssi_private->use_dual_fifo = true;
+                       /* When using dual fifo mode, we need to keep watermark
+                        * as even numbers due to dma script limitation.
+                        */
+                       ssi_private->dma_params_tx.maxburst &= ~0x1;
+                       ssi_private->dma_params_rx.maxburst &= ~0x1;
+               }
 
                shared = of_device_is_compatible(of_get_parent(np),
                            "fsl,spba-bus");