Merge 3.2-rc5 into staging-next
authorGreg Kroah-Hartman <gregkh@suse.de>
Sat, 10 Dec 2011 03:01:27 +0000 (19:01 -0800)
committerGreg Kroah-Hartman <gregkh@suse.de>
Sat, 10 Dec 2011 03:01:27 +0000 (19:01 -0800)
This resolves the conflict in the
drivers/staging/iio/industrialio-core.c file due to two different
changes made to resolve the same problem.

Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
606 files changed:
CREDITS
Documentation/devicetree/bindings/vendor-prefixes.txt
Documentation/filesystems/btrfs.txt
Documentation/kernel-parameters.txt
Documentation/networking/ip-sysctl.txt
Documentation/power/devices.txt
Documentation/power/runtime_pm.txt
Documentation/sound/alsa/soc/machine.txt
Documentation/usb/linux-cdc-acm.inf
MAINTAINERS
Makefile
arch/arm/Kconfig
arch/arm/common/gic.c
arch/arm/common/pl330.c
arch/arm/configs/at91cap9_defconfig [new file with mode: 0644]
arch/arm/configs/at91cap9adk_defconfig [deleted file]
arch/arm/configs/at91rm9200_defconfig
arch/arm/configs/at91sam9260_defconfig [new file with mode: 0644]
arch/arm/configs/at91sam9260ek_defconfig [deleted file]
arch/arm/configs/at91sam9g20_defconfig [new file with mode: 0644]
arch/arm/configs/at91sam9g20ek_defconfig [deleted file]
arch/arm/configs/at91sam9g45_defconfig
arch/arm/configs/at91sam9rl_defconfig [new file with mode: 0644]
arch/arm/configs/at91sam9rlek_defconfig [deleted file]
arch/arm/configs/ezx_defconfig
arch/arm/configs/imote2_defconfig
arch/arm/configs/magician_defconfig
arch/arm/configs/omap1_defconfig
arch/arm/configs/u300_defconfig
arch/arm/configs/u8500_defconfig
arch/arm/configs/zeus_defconfig
arch/arm/include/asm/pmu.h
arch/arm/include/asm/topology.h
arch/arm/kernel/entry-armv.S
arch/arm/kernel/kprobes-arm.c
arch/arm/kernel/kprobes-test-arm.c
arch/arm/kernel/kprobes-test-thumb.c
arch/arm/kernel/kprobes-test.h
arch/arm/kernel/perf_event.c
arch/arm/kernel/pmu.c
arch/arm/kernel/process.c
arch/arm/kernel/topology.c
arch/arm/lib/bitops.h
arch/arm/lib/changebit.S
arch/arm/lib/clearbit.S
arch/arm/lib/setbit.S
arch/arm/lib/testchangebit.S
arch/arm/lib/testclearbit.S
arch/arm/lib/testsetbit.S
arch/arm/mach-at91/at91rm9200_devices.c
arch/arm/mach-at91/at91sam9260.c
arch/arm/mach-at91/at91sam9260_devices.c
arch/arm/mach-at91/at91sam9261_devices.c
arch/arm/mach-at91/at91sam9263_devices.c
arch/arm/mach-at91/include/mach/system_rev.h
arch/arm/mach-davinci/board-da850-evm.c
arch/arm/mach-davinci/board-dm365-evm.c
arch/arm/mach-davinci/board-dm646x-evm.c
arch/arm/mach-davinci/dm646x.c
arch/arm/mach-davinci/include/mach/psc.h
arch/arm/mach-davinci/psc.c
arch/arm/mach-exynos/cpuidle.c
arch/arm/mach-highbank/highbank.c
arch/arm/mach-imx/Kconfig
arch/arm/mach-imx/clock-imx6q.c
arch/arm/mach-imx/mach-imx6q.c
arch/arm/mach-imx/mm-imx3.c
arch/arm/mach-imx/src.c
arch/arm/mach-mmp/gplugd.c
arch/arm/mach-mmp/include/mach/gpio-pxa.h
arch/arm/mach-msm/devices-iommu.c
arch/arm/mach-mx5/cpu.c
arch/arm/mach-mx5/imx51-dt.c
arch/arm/mach-mx5/imx53-dt.c
arch/arm/mach-mx5/mm.c
arch/arm/mach-mxs/clock-mx28.c
arch/arm/mach-mxs/include/mach/mx28.h
arch/arm/mach-mxs/include/mach/mxs.h
arch/arm/mach-mxs/mach-m28evk.c
arch/arm/mach-mxs/mach-stmp378x_devb.c
arch/arm/mach-mxs/module-tx28.c
arch/arm/mach-omap1/Kconfig
arch/arm/mach-omap1/board-ams-delta.c
arch/arm/mach-omap1/clock.h
arch/arm/mach-omap1/clock_data.c
arch/arm/mach-omap1/devices.c
arch/arm/mach-omap2/Kconfig
arch/arm/mach-omap2/Makefile
arch/arm/mach-omap2/cpuidle34xx.c
arch/arm/mach-omap2/display.c
arch/arm/mach-omap2/display.h [new file with mode: 0644]
arch/arm/mach-omap2/io.h [deleted file]
arch/arm/mach-omap2/omap_hwmod.c
arch/arm/mach-omap2/omap_hwmod_2420_data.c
arch/arm/mach-omap2/omap_hwmod_2430_data.c
arch/arm/mach-omap2/omap_hwmod_2xxx_3xxx_ipblock_data.c
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
arch/arm/mach-omap2/omap_hwmod_44xx_data.c
arch/arm/mach-omap2/omap_hwmod_common_data.c
arch/arm/mach-omap2/omap_hwmod_common_data.h
arch/arm/mach-omap2/omap_l3_noc.c
arch/arm/mach-omap2/pm.c
arch/arm/mach-omap2/smartreflex.c
arch/arm/mach-omap2/twl-common.c
arch/arm/mach-omap2/twl-common.h
arch/arm/mach-prima2/pm.c
arch/arm/mach-prima2/prima2.c
arch/arm/mach-pxa/balloon3.c
arch/arm/mach-pxa/colibri-pxa320.c
arch/arm/mach-pxa/gumstix.c
arch/arm/mach-pxa/include/mach/palm27x.h
arch/arm/mach-pxa/palm27x.c
arch/arm/mach-pxa/palmtc.c
arch/arm/mach-pxa/vpac270.c
arch/arm/mach-s3c64xx/dev-spi.c
arch/arm/mach-s3c64xx/mach-crag6410-module.c
arch/arm/mach-s3c64xx/s3c6400.c
arch/arm/mach-s3c64xx/setup-fb-24bpp.c
arch/arm/mach-sa1100/Makefile.boot
arch/arm/mm/cache-l2x0.c
arch/arm/mm/dma-mapping.c
arch/arm/mm/mmap.c
arch/arm/plat-mxc/include/mach/common.h
arch/arm/plat-mxc/include/mach/mxc.h
arch/arm/plat-mxc/include/mach/system.h
arch/arm/plat-mxc/system.c
arch/arm/plat-omap/include/plat/clock.h
arch/arm/plat-omap/include/plat/common.h
arch/arm/plat-s3c24xx/cpu-freq-debugfs.c
arch/arm/plat-s5p/sysmmu.c
arch/arm/plat-samsung/include/plat/gpio-cfg.h
arch/arm/plat-samsung/pd.c
arch/arm/plat-samsung/pwm.c
arch/arm/tools/mach-types
arch/m68k/include/asm/unistd.h
arch/m68k/kernel/syscalltable.S
arch/mips/kernel/perf_event_mipsxx.c
arch/powerpc/boot/dts/p1023rds.dts
arch/powerpc/configs/ppc44x_defconfig
arch/powerpc/mm/hugetlbpage.c
arch/powerpc/platforms/85xx/Kconfig
arch/powerpc/platforms/85xx/p3060_qds.c
arch/powerpc/sysdev/ehv_pic.c
arch/powerpc/sysdev/fsl_lbc.c
arch/powerpc/sysdev/qe_lib/qe.c
arch/s390/include/asm/pgtable.h
arch/s390/kernel/ptrace.c
arch/s390/kernel/setup.c
arch/s390/kernel/signal.c
arch/tile/include/asm/irq.h
arch/tile/kernel/irq.c
arch/tile/kernel/pci-dma.c
arch/tile/kernel/pci.c
arch/tile/kernel/sysfs.c
arch/tile/lib/exports.c
arch/tile/mm/homecache.c
arch/x86/Kconfig
arch/x86/include/asm/e820.h
arch/x86/include/asm/efi.h
arch/x86/include/asm/intel_scu_ipc.h
arch/x86/include/asm/mrst.h
arch/x86/include/asm/msr.h
arch/x86/include/asm/system.h
arch/x86/include/asm/timer.h
arch/x86/include/asm/uv/uv_mmrs.h
arch/x86/kernel/apic/x2apic_uv_x.c
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/mtrr/generic.c
arch/x86/kernel/cpu/perf_event.c
arch/x86/kernel/cpu/perf_event_amd_ibs.c
arch/x86/kernel/cpu/perf_event_intel.c
arch/x86/kernel/cpu/perf_event_intel_ds.c
arch/x86/kernel/cpu/perf_event_p4.c
arch/x86/kernel/e820.c
arch/x86/kernel/hpet.c
arch/x86/kernel/irq_64.c
arch/x86/kernel/microcode_core.c
arch/x86/kernel/mpparse.c
arch/x86/kernel/process.c
arch/x86/kernel/quirks.c
arch/x86/kernel/reboot.c
arch/x86/kernel/rtc.c
arch/x86/kernel/setup.c
arch/x86/mm/gup.c
arch/x86/mm/highmem_32.c
arch/x86/oprofile/init.c
arch/x86/platform/efi/efi.c
arch/x86/platform/efi/efi_64.c
arch/x86/platform/mrst/mrst.c
arch/x86/xen/setup.c
drivers/acpi/apei/erst.c
drivers/base/core.c
drivers/crypto/mv_cesa.c
drivers/edac/mpc85xx_edac.c
drivers/firmware/efivars.c
drivers/firmware/sigma.c
drivers/gpio/Makefile
drivers/gpio/gpio-pca953x.c
drivers/gpu/drm/drm_crtc_helper.c
drivers/gpu/drm/exynos/exynos_drm_buf.c
drivers/gpu/drm/exynos/exynos_drm_buf.h
drivers/gpu/drm/exynos/exynos_drm_connector.c
drivers/gpu/drm/exynos/exynos_drm_crtc.c
drivers/gpu/drm/exynos/exynos_drm_crtc.h
drivers/gpu/drm/exynos/exynos_drm_drv.c
drivers/gpu/drm/exynos/exynos_drm_drv.h
drivers/gpu/drm/exynos/exynos_drm_encoder.c
drivers/gpu/drm/exynos/exynos_drm_encoder.h
drivers/gpu/drm/exynos/exynos_drm_fb.c
drivers/gpu/drm/exynos/exynos_drm_fbdev.c
drivers/gpu/drm/exynos/exynos_drm_fimd.c
drivers/gpu/drm/exynos/exynos_drm_gem.c
drivers/gpu/drm/exynos/exynos_drm_gem.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/nouveau/nouveau_display.c
drivers/gpu/drm/nouveau/nouveau_drv.c
drivers/gpu/drm/nouveau/nouveau_drv.h
drivers/gpu/drm/nouveau/nouveau_object.c
drivers/gpu/drm/nouveau/nouveau_sgdma.c
drivers/gpu/drm/nouveau/nv50_display.c
drivers/gpu/drm/nouveau/nvc0_graph.c
drivers/gpu/drm/nouveau/nvd0_display.c
drivers/gpu/drm/radeon/atombios_crtc.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/evergreen_cs.c
drivers/gpu/drm/radeon/evergreen_reg.h
drivers/gpu/drm/radeon/evergreend.h
drivers/gpu/drm/radeon/r100.c
drivers/gpu/drm/radeon/radeon_acpi.c
drivers/gpu/drm/radeon/radeon_encoders.c
drivers/gpu/drm/radeon/rs600.c
drivers/gpu/drm/radeon/rv770.c
drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
drivers/hid/hid-core.c
drivers/hid/hid-ids.h
drivers/hwmon/ad7314.c
drivers/hwmon/ads7871.c
drivers/hwmon/exynos4_tmu.c
drivers/hwmon/gpio-fan.c
drivers/hwmon/jz4740-hwmon.c
drivers/hwmon/ntc_thermistor.c
drivers/hwmon/s3c-hwmon.c
drivers/hwmon/sch5627.c
drivers/hwmon/sch5636.c
drivers/hwmon/twl4030-madc-hwmon.c
drivers/hwmon/ultra45_env.c
drivers/hwmon/wm831x-hwmon.c
drivers/hwmon/wm8350-hwmon.c
drivers/i2c/busses/i2c-nuc900.c
drivers/infiniband/core/addr.c
drivers/infiniband/hw/cxgb3/iwch_cm.c
drivers/infiniband/hw/cxgb4/cm.c
drivers/infiniband/hw/cxgb4/cq.c
drivers/infiniband/hw/nes/nes_cm.c
drivers/infiniband/hw/qib/qib_iba7322.c
drivers/infiniband/hw/qib/qib_qsfp.c
drivers/infiniband/ulp/ipoib/ipoib_ib.c
drivers/infiniband/ulp/ipoib/ipoib_main.c
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
drivers/iommu/intel-iommu.c
drivers/iommu/intr_remapping.c
drivers/isdn/divert/divert_procfs.c
drivers/isdn/i4l/isdn_net.c
drivers/md/bitmap.c
drivers/md/md.c
drivers/md/raid5.c
drivers/net/arcnet/Kconfig
drivers/net/bonding/bond_main.c
drivers/net/can/sja1000/peak_pci.c
drivers/net/ethernet/broadcom/b44.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
drivers/net/ethernet/davicom/dm9000.c
drivers/net/ethernet/freescale/Kconfig
drivers/net/ethernet/ibm/ehea/ehea.h
drivers/net/ethernet/ibm/ehea/ehea_main.c
drivers/net/ethernet/ibm/iseries_veth.c
drivers/net/ethernet/jme.c
drivers/net/ethernet/jme.h
drivers/net/ethernet/pasemi/Makefile
drivers/net/ethernet/qlogic/qlge/qlge.h
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/tile/tilepro.c
drivers/net/phy/Kconfig
drivers/net/wireless/ath/ath9k/hw.c
drivers/net/wireless/iwlwifi/iwl-1000.c
drivers/net/wireless/iwlwifi/iwl-5000.c
drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
drivers/net/wireless/iwlwifi/iwl-agn-sta.c
drivers/net/wireless/iwlwifi/iwl-agn.c
drivers/net/wireless/iwlwifi/iwl-agn.h
drivers/net/wireless/iwlwifi/iwl-core.c
drivers/net/wireless/iwlwifi/iwl-core.h
drivers/net/wireless/iwlwifi/iwl-shared.h
drivers/net/wireless/p54/p54spi.c
drivers/net/wireless/prism54/isl_ioctl.c
drivers/net/wireless/rt2x00/rt2800lib.c
drivers/net/wireless/rtlwifi/ps.c
drivers/net/xen-netback/netback.c
drivers/of/irq.c
drivers/oprofile/oprof.c
drivers/oprofile/timer_int.c
drivers/platform/x86/toshiba_acpi.c
drivers/power/intel_mid_battery.c
drivers/ptp/ptp_clock.c
drivers/rapidio/devices/tsi721.c
drivers/rapidio/devices/tsi721.h
drivers/regulator/aat2870-regulator.c
drivers/regulator/core.c
drivers/regulator/twl-regulator.c
drivers/rtc/class.c
drivers/rtc/interface.c
drivers/rtc/rtc-s3c.c
drivers/s390/cio/chsc.c
drivers/s390/cio/cio.h
drivers/s390/cio/css.c
drivers/s390/cio/device.c
drivers/s390/cio/device_fsm.c
drivers/s390/cio/device_ops.c
drivers/s390/cio/io_sch.h
drivers/s390/crypto/ap_bus.c
drivers/spi/Kconfig
drivers/spi/spi-ath79.c
drivers/spi/spi-gpio.c
drivers/spi/spi-nuc900.c
drivers/staging/comedi/comedi_fops.c
drivers/staging/comedi/drivers/usbduxsigma.c
drivers/staging/iio/industrialio-core.c
drivers/staging/rts_pstor/rtsx.c
drivers/staging/usbip/vhci_rx.c
drivers/target/iscsi/iscsi_target.c
drivers/target/iscsi/iscsi_target_auth.c
drivers/target/iscsi/iscsi_target_core.h
drivers/target/iscsi/iscsi_target_erl1.c
drivers/target/iscsi/iscsi_target_login.c
drivers/target/iscsi/iscsi_target_nego.c
drivers/target/loopback/tcm_loop.c
drivers/target/target_core_alua.c
drivers/target/target_core_cdb.c
drivers/target/target_core_configfs.c
drivers/target/target_core_device.c
drivers/target/target_core_file.c
drivers/target/target_core_iblock.c
drivers/target/target_core_pr.c
drivers/target/target_core_pscsi.c
drivers/target/target_core_rd.c
drivers/target/target_core_tmr.c
drivers/target/target_core_transport.c
drivers/target/tcm_fc/tfc_cmd.c
drivers/target/tcm_fc/tfc_conf.c
drivers/usb/gadget/amd5536udc.c
drivers/usb/gadget/f_serial.c
drivers/usb/gadget/fsl_mxc_udc.c
drivers/usb/gadget/fsl_qe_udc.c
drivers/usb/gadget/fsl_udc_core.c
drivers/usb/gadget/fsl_usb2_udc.h
drivers/usb/gadget/m66592-udc.c
drivers/usb/gadget/net2280.c
drivers/usb/gadget/r8a66597-udc.c
drivers/usb/gadget/s3c-hsotg.c
drivers/usb/gadget/s3c-hsudc.c
drivers/usb/host/ehci-sched.c
drivers/usb/host/whci/qset.c
drivers/usb/host/xhci.c
drivers/usb/musb/musb_core.c
drivers/usb/musb/musb_gadget.c
drivers/usb/renesas_usbhs/mod_gadget.c
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/ftdi_sio_ids.h
drivers/usb/serial/option.c
drivers/usb/storage/unusual_devs.h
drivers/video/da8xx-fb.c
drivers/video/omap/dispc.c
drivers/video/omap2/dss/dispc.c
drivers/video/omap2/dss/hdmi.c
drivers/video/via/share.h
fs/btrfs/ctree.h
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.c
fs/btrfs/free-space-cache.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/scrub.c
fs/btrfs/super.c
fs/btrfs/volumes.c
fs/cifs/connect.c
fs/cifs/file.c
fs/cifs/readdir.c
fs/cifs/smbencrypt.c
fs/dcache.c
fs/ext4/inode.c
fs/namespace.c
fs/ocfs2/alloc.c
fs/ocfs2/aops.c
fs/ocfs2/aops.h
fs/ocfs2/cluster/heartbeat.c
fs/ocfs2/cluster/netdebug.c
fs/ocfs2/cluster/tcp.c
fs/ocfs2/cluster/tcp.h
fs/ocfs2/dir.c
fs/ocfs2/dlm/dlmcommon.h
fs/ocfs2/dlm/dlmdomain.c
fs/ocfs2/dlm/dlmlock.c
fs/ocfs2/dlm/dlmmaster.c
fs/ocfs2/dlm/dlmrecovery.c
fs/ocfs2/dlm/dlmthread.c
fs/ocfs2/dlmglue.c
fs/ocfs2/extent_map.c
fs/ocfs2/extent_map.h
fs/ocfs2/file.c
fs/ocfs2/inode.c
fs/ocfs2/inode.h
fs/ocfs2/ioctl.c
fs/ocfs2/journal.c
fs/ocfs2/journal.h
fs/ocfs2/mmap.c
fs/ocfs2/move_extents.c
fs/ocfs2/ocfs2.h
fs/ocfs2/quota_local.c
fs/ocfs2/slot_map.c
fs/ocfs2/stack_o2cb.c
fs/ocfs2/super.c
fs/ocfs2/xattr.c
fs/proc/meminfo.c
fs/proc/stat.c
fs/pstore/platform.c
fs/seq_file.c
fs/xfs/xfs_acl.c
fs/xfs/xfs_attr_leaf.c
fs/xfs/xfs_bmap.c
fs/xfs/xfs_export.c
fs/xfs/xfs_inode.c
fs/xfs/xfs_inode.h
fs/xfs/xfs_log.c
fs/xfs/xfs_sync.c
fs/xfs/xfs_trace.h
include/asm-generic/unistd.h
include/drm/drm_pciids.h
include/drm/exynos_drm.h
include/linux/clocksource.h
include/linux/compat.h
include/linux/dcache.h
include/linux/fs.h
include/linux/ftrace_event.h
include/linux/init_task.h
include/linux/mm.h
include/linux/netdevice.h
include/linux/pci_ids.h
include/linux/perf_event.h
include/linux/pkt_sched.h
include/linux/pm.h
include/linux/pstore.h
include/linux/shrinker.h
include/linux/sigma.h
include/net/dst.h
include/net/dst_ops.h
include/net/inet_sock.h
include/net/inetpeer.h
include/net/netfilter/nf_conntrack_ecache.h
include/net/netns/conntrack.h
include/net/red.h
include/net/route.h
include/target/target_core_base.h
include/target/target_core_transport.h
include/video/omapdss.h
kernel/cgroup_freezer.c
kernel/events/core.c
kernel/events/internal.h
kernel/events/ring_buffer.c
kernel/hrtimer.c
kernel/irq/manage.c
kernel/irq/spurious.c
kernel/jump_label.c
kernel/lockdep.c
kernel/power/hibernate.c
kernel/printk.c
kernel/sched.c
kernel/sched_fair.c
kernel/sched_features.h
kernel/sched_rt.c
kernel/time/alarmtimer.c
kernel/time/clockevents.c
kernel/time/clocksource.c
kernel/time/tick-broadcast.c
kernel/time/timekeeping.c
kernel/timer.c
kernel/trace/ftrace.c
kernel/trace/trace_events.c
kernel/trace/trace_events_filter.c
lib/dma-debug.c
mm/huge_memory.c
mm/hugetlb.c
mm/migrate.c
mm/page_alloc.c
mm/percpu-vm.c
mm/percpu.c
mm/slab.c
mm/slub.c
mm/vmalloc.c
mm/vmscan.c
net/bridge/br_netlink.c
net/bridge/br_stp.c
net/caif/cffrml.c
net/core/dev.c
net/core/dev_addr_lists.c
net/core/neighbour.c
net/core/request_sock.c
net/core/secure_seq.c
net/core/skbuff.c
net/dccp/ipv4.c
net/decnet/dn_route.c
net/decnet/dn_timer.c
net/ipv4/devinet.c
net/ipv4/igmp.c
net/ipv4/inet_diag.c
net/ipv4/ip_forward.c
net/ipv4/ip_options.c
net/ipv4/netfilter.c
net/ipv4/netfilter/Kconfig
net/ipv4/route.c
net/ipv4/udp.c
net/ipv6/inet6_connection_sock.c
net/ipv6/ipv6_sockglue.c
net/ipv6/ndisc.c
net/ipv6/netfilter/Kconfig
net/ipv6/route.c
net/ipv6/tcp_ipv6.c
net/ipv6/udp.c
net/l2tp/l2tp_core.c
net/mac80211/agg-tx.c
net/mac80211/debugfs_sta.c
net/mac80211/main.c
net/mac80211/status.c
net/mac80211/util.c
net/netfilter/Kconfig
net/netfilter/ipset/ip_set_hash_ipport.c
net/netfilter/ipset/ip_set_hash_ipportip.c
net/netfilter/ipset/ip_set_hash_ipportnet.c
net/netfilter/nf_conntrack_ecache.c
net/netfilter/nf_conntrack_netlink.c
net/netlabel/netlabel_kapi.c
net/sched/sch_red.c
net/sched/sch_teql.c
net/sctp/auth.c
net/sunrpc/xprtsock.c
net/unix/af_unix.c
net/wireless/nl80211.c
net/wireless/reg.c
net/xfrm/xfrm_policy.c
security/apparmor/path.c
security/tomoyo/realpath.c
sound/pci/cs5535audio/cs5535audio_pcm.c
sound/pci/hda/hda_codec.c
sound/pci/hda/hda_eld.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_cirrus.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/pci/hda/patch_sigmatel.c
sound/pci/hda/patch_via.c
sound/pci/lx6464es/lx_core.c
sound/pci/lx6464es/lx_core.h
sound/pci/rme9652/hdspm.c
sound/pci/sis7019.c
sound/soc/atmel/Kconfig
sound/soc/atmel/Makefile
sound/soc/atmel/playpaq_wm8510.c [deleted file]
sound/soc/codecs/ad1836.h
sound/soc/codecs/adau1373.c
sound/soc/codecs/cs4270.c
sound/soc/codecs/cs4271.c
sound/soc/codecs/cs42l51.c
sound/soc/codecs/max9877.c
sound/soc/codecs/rt5631.c
sound/soc/codecs/sgtl5000.c
sound/soc/codecs/sta32x.c
sound/soc/codecs/sta32x.h
sound/soc/codecs/uda1380.c
sound/soc/codecs/wm8731.c
sound/soc/codecs/wm8753.c
sound/soc/codecs/wm8962.c
sound/soc/codecs/wm8993.c
sound/soc/codecs/wm8994.c
sound/soc/codecs/wm9081.c
sound/soc/codecs/wm9090.c
sound/soc/codecs/wm_hubs.c
sound/soc/fsl/fsl_ssi.c
sound/soc/fsl/mpc8610_hpcd.c
sound/soc/imx/Kconfig
sound/soc/kirkwood/Kconfig
sound/soc/nuc900/nuc900-ac97.c
sound/soc/pxa/Kconfig
sound/soc/samsung/smdk_wm8994.c
sound/soc/samsung/speyside.c
sound/soc/soc-core.c
sound/soc/soc-utils.c
sound/usb/quirks-table.h
tools/perf/builtin-stat.c
tools/perf/util/evsel.c
tools/perf/util/header.c
tools/perf/util/hist.c
tools/perf/util/hist.h
tools/perf/util/session.c
tools/perf/util/trace-event-parse.c

diff --git a/CREDITS b/CREDITS
index 07e32a87d956808fbb8b979fb38d434d93c879fc..44fce988eaac8cd22bfe5a5e753ae1bb58b3476d 100644 (file)
--- a/CREDITS
+++ b/CREDITS
@@ -688,10 +688,13 @@ S: Oxfordshire, UK.
 
 N: Kees Cook
 E: kees@outflux.net
-W: http://outflux.net/
-P: 1024D/17063E6D 9FA3 C49C 23C9 D1BC 2E30  1975 1FFF 4BA9 1706 3E6D
-D: Minor updates to SCSI types, added /proc/pid/maps protection
+E: kees@ubuntu.com
+E: keescook@chromium.org
+W: http://outflux.net/blog/
+P: 4096R/DC6DC026 A5C3 F68F 229D D60F 723E  6E13 8972 F4DF DC6D C026
+D: Various security things, bug fixes, and documentation.
 S: (ask for current address)
+S: Portland, Oregon
 S: USA
 
 N: Robin Cornelius
index e8552782b440af99ed14a6e851e3db5ed47d05fd..874921e97802d1d8c0e7cc2b694cca4bc3f18bf4 100644 (file)
@@ -33,6 +33,7 @@ qcom  Qualcomm, Inc.
 ramtron        Ramtron International
 samsung        Samsung Semiconductor
 schindler      Schindler
+sil    Silicon Image
 simtek
 sirf   SiRF Technology, Inc.
 stericsson     ST-Ericsson
index 64087c34327fe0ba11e790e0a41224b8e7c1d30c..7671352216f1369d8d3c7dd02f9ae06fd9f90c87 100644 (file)
@@ -63,8 +63,8 @@ IRC network.
 Userspace tools for creating and manipulating Btrfs file systems are
 available from the git repository at the following location:
 
- http://git.kernel.org/?p=linux/kernel/git/mason/btrfs-progs-unstable.git
- git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-progs-unstable.git
+ http://git.kernel.org/?p=linux/kernel/git/mason/btrfs-progs.git
+ git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-progs.git
 
 These include the following tools:
 
index a0c5c5f4fce6e9587346a4a049c9725e5ca45de5..81c287fad79d6370d0d697d5ddf33b8af756a036 100644 (file)
@@ -315,12 +315,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        CPU-intensive style benchmark, and it can vary highly in
                        a microbenchmark depending on workload and compiler.
 
-                       1: only for 32-bit processes
-                       2: only for 64-bit processes
+                       32: only for 32-bit processes
+                       64: only for 64-bit processes
                        on: enable for both 32- and 64-bit processes
                        off: disable for both 32- and 64-bit processes
 
-       amd_iommu=      [HW,X86-84]
+       amd_iommu=      [HW,X86-64]
                        Pass parameters to the AMD IOMMU driver in the system.
                        Possible values are:
                        fullflush - enable flushing of IO/TLB entries when
index f049a1ca186fbf6eb5e55ed9eb3a65bb8601b1f8..589f2da5d5454dd96f828c01168aecb84770473f 100644 (file)
@@ -282,11 +282,11 @@ tcp_max_ssthresh - INTEGER
        Default: 0 (off)
 
 tcp_max_syn_backlog - INTEGER
-       Maximal number of remembered connection requests, which are
-       still did not receive an acknowledgment from connecting client.
-       Default value is 1024 for systems with more than 128Mb of memory,
-       and 128 for low memory machines. If server suffers of overload,
-       try to increase this number.
+       Maximal number of remembered connection requests, which have not
+       received an acknowledgment from connecting client.
+       The minimal value is 128 for low memory machines, and it will
+       increase in proportion to the memory of machine.
+       If server suffers from overload, try increasing this number.
 
 tcp_max_tw_buckets - INTEGER
        Maximal number of timewait sockets held by system simultaneously.
index 646a89e0c07d50c682912ef0200dc087363cd37f..3139fb505dcec97cb609e02326110e8b5725a43a 100644 (file)
@@ -123,9 +123,10 @@ please refer directly to the source code for more information about it.
 Subsystem-Level Methods
 -----------------------
 The core methods to suspend and resume devices reside in struct dev_pm_ops
-pointed to by the pm member of struct bus_type, struct device_type and
-struct class.  They are mostly of interest to the people writing infrastructure
-for buses, like PCI or USB, or device type and device class drivers.
+pointed to by the ops member of struct dev_pm_domain, or by the pm member of
+struct bus_type, struct device_type and struct class.  They are mostly of
+interest to the people writing infrastructure for platforms and buses, like PCI
+or USB, or device type and device class drivers.
 
 Bus drivers implement these methods as appropriate for the hardware and the
 drivers using it; PCI works differently from USB, and so on.  Not many people
@@ -139,41 +140,57 @@ sequencing in the driver model tree.
 
 /sys/devices/.../power/wakeup files
 -----------------------------------
-All devices in the driver model have two flags to control handling of wakeup
-events (hardware signals that can force the device and/or system out of a low
-power state).  These flags are initialized by bus or device driver code using
+All device objects in the driver model contain fields that control the handling
+of system wakeup events (hardware signals that can force the system out of a
+sleep state).  These fields are initialized by bus or device driver code using
 device_set_wakeup_capable() and device_set_wakeup_enable(), defined in
 include/linux/pm_wakeup.h.
 
-The "can_wakeup" flag just records whether the device (and its driver) can
+The "power.can_wakeup" flag just records whether the device (and its driver) can
 physically support wakeup events.  The device_set_wakeup_capable() routine
-affects this flag.  The "should_wakeup" flag controls whether the device should
-try to use its wakeup mechanism.  device_set_wakeup_enable() affects this flag;
-for the most part drivers should not change its value.  The initial value of
-should_wakeup is supposed to be false for the majority of devices; the major
-exceptions are power buttons, keyboards, and Ethernet adapters whose WoL
-(wake-on-LAN) feature has been set up with ethtool.  It should also default
-to true for devices that don't generate wakeup requests on their own but merely
-forward wakeup requests from one bus to another (like PCI bridges).
+affects this flag.  The "power.wakeup" field is a pointer to an object of type
+struct wakeup_source used for controlling whether or not the device should use
+its system wakeup mechanism and for notifying the PM core of system wakeup
+events signaled by the device.  This object is only present for wakeup-capable
+devices (i.e. devices whose "can_wakeup" flags are set) and is created (or
+removed) by device_set_wakeup_capable().
 
 Whether or not a device is capable of issuing wakeup events is a hardware
 matter, and the kernel is responsible for keeping track of it.  By contrast,
 whether or not a wakeup-capable device should issue wakeup events is a policy
 decision, and it is managed by user space through a sysfs attribute: the
-power/wakeup file.  User space can write the strings "enabled" or "disabled" to
-set or clear the "should_wakeup" flag, respectively.  This file is only present
-for wakeup-capable devices (i.e. devices whose "can_wakeup" flags are set)
-and is created (or removed) by device_set_wakeup_capable().  Reads from the
-file will return the corresponding string.
-
-The device_may_wakeup() routine returns true only if both flags are set.
+"power/wakeup" file.  User space can write the strings "enabled" or "disabled"
+to it to indicate whether or not, respectively, the device is supposed to signal
+system wakeup.  This file is only present if the "power.wakeup" object exists
+for the given device and is created (or removed) along with that object, by
+device_set_wakeup_capable().  Reads from the file will return the corresponding
+string.
+
+The "power/wakeup" file is supposed to contain the "disabled" string initially
+for the majority of devices; the major exceptions are power buttons, keyboards,
+and Ethernet adapters whose WoL (wake-on-LAN) feature has been set up with
+ethtool.  It should also default to "enabled" for devices that don't generate
+wakeup requests on their own but merely forward wakeup requests from one bus to
+another (like PCI Express ports).
+
+The device_may_wakeup() routine returns true only if the "power.wakeup" object
+exists and the corresponding "power/wakeup" file contains the string "enabled".
 This information is used by subsystems, like the PCI bus type code, to see
 whether or not to enable the devices' wakeup mechanisms.  If device wakeup
 mechanisms are enabled or disabled directly by drivers, they also should use
 device_may_wakeup() to decide what to do during a system sleep transition.
-However for runtime power management, wakeup events should be enabled whenever
-the device and driver both support them, regardless of the should_wakeup flag.
-
+Device drivers, however, are not supposed to call device_set_wakeup_enable()
+directly in any case.
+
+It ought to be noted that system wakeup is conceptually different from "remote
+wakeup" used by runtime power management, although it may be supported by the
+same physical mechanism.  Remote wakeup is a feature allowing devices in
+low-power states to trigger specific interrupts to signal conditions in which
+they should be put into the full-power state.  Those interrupts may or may not
+be used to signal system wakeup events, depending on the hardware design.  On
+some systems it is impossible to trigger them from system sleep states.  In any
+case, remote wakeup should always be enabled for runtime power management for
+all devices and drivers that support it.
 
 /sys/devices/.../power/control files
 ------------------------------------
@@ -249,20 +266,31 @@ for every device before the next phase begins.  Not all busses or classes
 support all these callbacks and not all drivers use all the callbacks.  The
 various phases always run after tasks have been frozen and before they are
 unfrozen.  Furthermore, the *_noirq phases run at a time when IRQ handlers have
-been disabled (except for those marked with the IRQ_WAKEUP flag).
-
-All phases use bus, type, or class callbacks (that is, methods defined in
-dev->bus->pm, dev->type->pm, or dev->class->pm).  These callbacks are mutually
-exclusive, so if the device type provides a struct dev_pm_ops object pointed to
-by its pm field (i.e. both dev->type and dev->type->pm are defined), the
-callbacks included in that object (i.e. dev->type->pm) will be used.  Otherwise,
-if the class provides a struct dev_pm_ops object pointed to by its pm field
-(i.e. both dev->class and dev->class->pm are defined), the PM core will use the
-callbacks from that object (i.e. dev->class->pm).  Finally, if the pm fields of
-both the device type and class objects are NULL (or those objects do not exist),
-the callbacks provided by the bus (that is, the callbacks from dev->bus->pm)
-will be used (this allows device types to override callbacks provided by bus
-types or classes if necessary).
+been disabled (except for those marked with the IRQF_NO_SUSPEND flag).
+
+All phases use PM domain, bus, type, or class callbacks (that is, methods
+defined in dev->pm_domain->ops, dev->bus->pm, dev->type->pm, or dev->class->pm).
+These callbacks are regarded by the PM core as mutually exclusive.  Moreover,
+PM domain callbacks always take precedence over bus, type and class callbacks,
+while type callbacks take precedence over bus and class callbacks, and class
+callbacks take precedence over bus callbacks.  To be precise, the following
+rules are used to determine which callback to execute in the given phase:
+
+    1. If dev->pm_domain is present, the PM core will attempt to execute the
+       callback included in dev->pm_domain->ops.  If that callback is not
+       present, no action will be carried out for the given device.
+
+    2. Otherwise, if both dev->type and dev->type->pm are present, the callback
+       included in dev->type->pm will be executed.
+
+    3. Otherwise, if both dev->class and dev->class->pm are present, the
+       callback included in dev->class->pm will be executed.
+
+    4. Otherwise, if both dev->bus and dev->bus->pm are present, the callback
+       included in dev->bus->pm will be executed.
+
+This allows PM domains and device types to override callbacks provided by bus
+types or device classes if necessary.
 
 These callbacks may in turn invoke device- or driver-specific methods stored in
 dev->driver->pm, but they don't have to.
@@ -283,9 +311,8 @@ When the system goes into the standby or memory sleep state, the phases are:
 
        After the prepare callback method returns, no new children may be
        registered below the device.  The method may also prepare the device or
-       driver in some way for the upcoming system power transition (for
-       example, by allocating additional memory required for this purpose), but
-       it should not put the device into a low-power state.
+       driver in some way for the upcoming system power transition, but it
+       should not put the device into a low-power state.
 
     2. The suspend methods should quiesce the device to stop it from performing
        I/O.  They also may save the device registers and put it into the
index 5336149f831ba47d6215ccc1d9f9d3f66df370df..c2ae8bf77d46d6fbd12490a6a9aacc14c11c4931 100644 (file)
@@ -44,25 +44,33 @@ struct dev_pm_ops {
 };
 
 The ->runtime_suspend(), ->runtime_resume() and ->runtime_idle() callbacks
-are executed by the PM core for either the power domain, or the device type
-(if the device power domain's struct dev_pm_ops does not exist), or the class
-(if the device power domain's and type's struct dev_pm_ops object does not
-exist), or the bus type (if the device power domain's, type's and class'
-struct dev_pm_ops objects do not exist) of the given device, so the priority
-order of callbacks from high to low is that power domain callbacks, device
-type callbacks, class callbacks and bus type callbacks, and the high priority
-one will take precedence over low priority one. The bus type, device type and
-class callbacks are referred to as subsystem-level callbacks in what follows,
-and generally speaking, the power domain callbacks are used for representing
-power domains within a SoC.
+are executed by the PM core for the device's subsystem that may be either of
+the following:
+
+  1. PM domain of the device, if the device's PM domain object, dev->pm_domain,
+     is present.
+
+  2. Device type of the device, if both dev->type and dev->type->pm are present.
+
+  3. Device class of the device, if both dev->class and dev->class->pm are
+     present.
+
+  4. Bus type of the device, if both dev->bus and dev->bus->pm are present.
+
+The PM core always checks which callback to use in the order given above, so the
+priority order of callbacks from high to low is: PM domain, device type, class
+and bus type.  Moreover, the high-priority one will always take precedence over
+a low-priority one.  The PM domain, bus type, device type and class callbacks
+are referred to as subsystem-level callbacks in what follows.
 
 By default, the callbacks are always invoked in process context with interrupts
 enabled.  However, subsystems can use the pm_runtime_irq_safe() helper function
-to tell the PM core that a device's ->runtime_suspend() and ->runtime_resume()
-callbacks should be invoked in atomic context with interrupts disabled.
-This implies that these callback routines must not block or sleep, but it also
-means that the synchronous helper functions listed at the end of Section 4 can
-be used within an interrupt handler or in an atomic context.
+to tell the PM core that their ->runtime_suspend(), ->runtime_resume() and
+->runtime_idle() callbacks may be invoked in atomic context with interrupts
+disabled for a given device.  This implies that the callback routines in
+question must not block or sleep, but it also means that the synchronous helper
+functions listed at the end of Section 4 may be used for that device within an
+interrupt handler or generally in an atomic context.
 
 The subsystem-level suspend callback is _entirely_ _responsible_ for handling
 the suspend of the device as appropriate, which may, but need not include
index 3e2ec9cbf3976d0d21c6ee90d7fe075a210a33eb..d50c14df34112ed2095942062bcaab90d90697bd 100644 (file)
@@ -50,8 +50,7 @@ Machine DAI Configuration
 The machine DAI configuration glues all the codec and CPU DAIs together. It can
 also be used to set up the DAI system clock and for any machine related DAI
 initialisation e.g. the machine audio map can be connected to the codec audio
-map, unconnected codec pins can be set as such. Please see corgi.c, spitz.c
-for examples.
+map, unconnected codec pins can be set as such.
 
 struct snd_soc_dai_link is used to set up each DAI in your machine. e.g.
 
@@ -83,8 +82,7 @@ Machine Power Map
 The machine driver can optionally extend the codec power map and to become an
 audio power map of the audio subsystem. This allows for automatic power up/down
 of speaker/HP amplifiers, etc. Codec pins can be connected to the machines jack
-sockets in the machine init function. See soc/pxa/spitz.c and dapm.txt for
-details.
+sockets in the machine init function.
 
 
 Machine Controls
index 37a02ce5484176670fde42a5bb915427316587f3..f0ffc27d4c0ac9d52efa62ecc0f775fd877b7387 100644 (file)
@@ -90,10 +90,10 @@ ServiceBinary=%12%\USBSER.sys
 [SourceDisksFiles]
 [SourceDisksNames]
 [DeviceList]
-%DESCRIPTION%=DriverInstall, USB\VID_0525&PID_A4A7, USB\VID_1D6B&PID_0104&MI_02
+%DESCRIPTION%=DriverInstall, USB\VID_0525&PID_A4A7, USB\VID_1D6B&PID_0104&MI_02, USB\VID_1D6B&PID_0106&MI_00
 
 [DeviceList.NTamd64]
-%DESCRIPTION%=DriverInstall, USB\VID_0525&PID_A4A7, USB\VID_1D6B&PID_0104&MI_02
+%DESCRIPTION%=DriverInstall, USB\VID_0525&PID_A4A7, USB\VID_1D6B&PID_0104&MI_02, USB\VID_1D6B&PID_0106&MI_00
 
 
 ;------------------------------------------------------------------------------
index d34351b1265bd45291639fee1a561b764e30e1b9..d6c09f3dcaabd97f13336b936556b9e63268a7c4 100644 (file)
@@ -511,8 +511,8 @@ M:  Joerg Roedel <joerg.roedel@amd.com>
 L:     iommu@lists.linux-foundation.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/joro/linux-2.6-iommu.git
 S:     Supported
-F:     arch/x86/kernel/amd_iommu*.c
-F:     arch/x86/include/asm/amd_iommu*.h
+F:     drivers/iommu/amd_iommu*.[ch]
+F:     include/linux/amd-iommu.h
 
 AMD MICROCODE UPDATE SUPPORT
 M:     Andreas Herrmann <andreas.herrmann3@amd.com>
@@ -789,6 +789,7 @@ L:  linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 T:     git git://git.pengutronix.de/git/imx/linux-2.6.git
 F:     arch/arm/mach-mx*/
+F:     arch/arm/mach-imx/
 F:     arch/arm/plat-mxc/
 
 ARM/FREESCALE IMX51
@@ -804,6 +805,13 @@ S: Maintained
 T:     git git://git.linaro.org/people/shawnguo/linux-2.6.git
 F:     arch/arm/mach-imx/*imx6*
 
+ARM/FREESCALE MXS ARM ARCHITECTURE
+M:     Shawn Guo <shawn.guo@linaro.org>
+L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S:     Maintained
+T:     git git://git.linaro.org/people/shawnguo/linux-2.6.git
+F:     arch/arm/mach-mxs/
+
 ARM/GLOMATION GESBC9312SX MACHINE SUPPORT
 M:     Lennert Buytenhek <kernel@wantstofly.org>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -1046,35 +1054,18 @@ ARM/SAMSUNG ARM ARCHITECTURES
 M:     Ben Dooks <ben-linux@fluff.org>
 M:     Kukjin Kim <kgene.kim@samsung.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+L:     linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
 W:     http://www.fluff.org/ben/linux/
 S:     Maintained
 F:     arch/arm/plat-samsung/
 F:     arch/arm/plat-s3c24xx/
 F:     arch/arm/plat-s5p/
+F:     arch/arm/mach-s3c24*/
+F:     arch/arm/mach-s3c64xx/
 F:     drivers/*/*s3c2410*
 F:     drivers/*/*/*s3c2410*
-
-ARM/S3C2410 ARM ARCHITECTURE
-M:     Ben Dooks <ben-linux@fluff.org>
-L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-W:     http://www.fluff.org/ben/linux/
-S:     Maintained
-F:     arch/arm/mach-s3c2410/
-
-ARM/S3C244x ARM ARCHITECTURE
-M:     Ben Dooks <ben-linux@fluff.org>
-L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-W:     http://www.fluff.org/ben/linux/
-S:     Maintained
-F:     arch/arm/mach-s3c2440/
-F:     arch/arm/mach-s3c2443/
-
-ARM/S3C64xx ARM ARCHITECTURE
-M:     Ben Dooks <ben-linux@fluff.org>
-L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-W:     http://www.fluff.org/ben/linux/
-S:     Maintained
-F:     arch/arm/mach-s3c64xx/
+F:     drivers/spi/spi-s3c*
+F:     sound/soc/samsung/*
 
 ARM/S5P EXYNOS ARM ARCHITECTURES
 M:     Kukjin Kim <kgene.kim@samsung.com>
@@ -4318,8 +4309,9 @@ F:        include/linux/mm.h
 F:     mm/
 
 MEMORY RESOURCE CONTROLLER
+M:     Johannes Weiner <hannes@cmpxchg.org>
+M:     Michal Hocko <mhocko@suse.cz>
 M:     Balbir Singh <bsingharora@gmail.com>
-M:     Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
 M:     KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
 L:     cgroups@vger.kernel.org
 L:     linux-mm@kvack.org
@@ -5674,7 +5666,6 @@ F:        drivers/media/video/*7146*
 F:     include/media/*7146*
 
 SAMSUNG AUDIO (ASoC) DRIVERS
-M:     Jassi Brar <jassisinghbrar@gmail.com>
 M:     Sangbeom Kim <sbkim73@samsung.com>
 L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
 S:     Supported
index 3a8f0640cda0e47e985bf7e84e5a0461efb9282c..d1ea73f74c2f42b2c4297fc6d45b75e38d561853 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 2
 SUBLEVEL = 0
-EXTRAVERSION = -rc3
+EXTRAVERSION = -rc5
 NAME = Saber-toothed Squirrel
 
 # *DOCUMENTATION*
index 44789eff983f444c22cd967cbe418f7e50159358..e084b7e981e8ff301aaae64ef3a2dc7e66701247 100644 (file)
@@ -1231,7 +1231,7 @@ config ARM_ERRATA_742231
          capabilities of the processor.
 
 config PL310_ERRATA_588369
-       bool "Clean & Invalidate maintenance operations do not invalidate clean lines"
+       bool "PL310 errata: Clean & Invalidate maintenance operations do not invalidate clean lines"
        depends on CACHE_L2X0
        help
           The PL310 L2 cache controller implements three types of Clean &
@@ -1256,7 +1256,7 @@ config ARM_ERRATA_720789
          entries regardless of the ASID.
 
 config PL310_ERRATA_727915
-       bool "Background Clean & Invalidate by Way operation can cause data corruption"
+       bool "PL310 errata: Background Clean & Invalidate by Way operation can cause data corruption"
        depends on CACHE_L2X0
        help
          PL310 implements the Clean & Invalidate by Way L2 cache maintenance
@@ -1289,8 +1289,8 @@ config ARM_ERRATA_751472
          operation is received by a CPU before the ICIALLUIS has completed,
          potentially leading to corrupted entries in the cache or TLB.
 
-config ARM_ERRATA_753970
-       bool "ARM errata: cache sync operation may be faulty"
+config PL310_ERRATA_753970
+       bool "PL310 errata: cache sync operation may be faulty"
        depends on CACHE_PL310
        help
          This option enables the workaround for the 753970 PL310 (r3p0) erratum.
@@ -1352,6 +1352,18 @@ config ARM_ERRATA_764369
          relevant cache maintenance functions and sets a specific bit
          in the diagnostic control register of the SCU.
 
+config PL310_ERRATA_769419
+       bool "PL310 errata: no automatic Store Buffer drain"
+       depends on CACHE_L2X0
+       help
+         On revisions of the PL310 prior to r3p2, the Store Buffer does
+         not automatically drain. This can cause normal, non-cacheable
+         writes to be retained when the memory system is idle, leading
+         to suboptimal I/O performance for drivers using coherent DMA.
+         This option adds a write barrier to the cpu_idle loop so that,
+         on systems with an outer cache, the store buffer is drained
+         explicitly.
+
 endmenu
 
 source "arch/arm/common/Kconfig"
index 0e6ae470c94f26589c0721a059dfaded9b2f30b1..410a546060a2eecf82859d76c528223e8796f9f4 100644 (file)
@@ -526,7 +526,8 @@ static void __init gic_pm_init(struct gic_chip_data *gic)
                sizeof(u32));
        BUG_ON(!gic->saved_ppi_conf);
 
-       cpu_pm_register_notifier(&gic_notifier_block);
+       if (gic == &gic_data[0])
+               cpu_pm_register_notifier(&gic_notifier_block);
 }
 #else
 static void __init gic_pm_init(struct gic_chip_data *gic)
@@ -581,13 +582,16 @@ void __init gic_init(unsigned int gic_nr, int irq_start,
         * For primary GICs, skip over SGIs.
         * For secondary GICs, skip over PPIs, too.
         */
+       domain->hwirq_base = 32;
        if (gic_nr == 0) {
                gic_cpu_base_addr = cpu_base;
-               domain->hwirq_base = 16;
-               if (irq_start > 0)
-                       irq_start = (irq_start & ~31) + 16;
-       } else
-               domain->hwirq_base = 32;
+
+               if ((irq_start & 31) > 0) {
+                       domain->hwirq_base = 16;
+                       if (irq_start != -1)
+                               irq_start = (irq_start & ~31) + 16;
+               }
+       }
 
        /*
         * Find out how many interrupts are supported.
index 7129cfbdacd6887920cf28994625be7b35f65503..f407a6b35d3dd1e6ad1e72afec38837dcfdd796a 100644 (file)
@@ -1211,8 +1211,8 @@ static inline u32 _prepare_ccr(const struct pl330_reqcfg *rqc)
        ccr |= (rqc->brst_size << CC_SRCBRSTSIZE_SHFT);
        ccr |= (rqc->brst_size << CC_DSTBRSTSIZE_SHFT);
 
-       ccr |= (rqc->dcctl << CC_SRCCCTRL_SHFT);
-       ccr |= (rqc->scctl << CC_DSTCCTRL_SHFT);
+       ccr |= (rqc->scctl << CC_SRCCCTRL_SHFT);
+       ccr |= (rqc->dcctl << CC_DSTCCTRL_SHFT);
 
        ccr |= (rqc->swap << CC_SWAP_SHFT);
 
@@ -1623,6 +1623,11 @@ static inline int _alloc_event(struct pl330_thread *thrd)
        return -1;
 }
 
+static bool _chan_ns(const struct pl330_info *pi, int i)
+{
+       return pi->pcfg.irq_ns & (1 << i);
+}
+
 /* Upon success, returns IdentityToken for the
  * allocated channel, NULL otherwise.
  */
@@ -1647,7 +1652,8 @@ void *pl330_request_channel(const struct pl330_info *pi)
 
        for (i = 0; i < chans; i++) {
                thrd = &pl330->channels[i];
-               if (thrd->free) {
+               if ((thrd->free) && (!_manager_ns(thrd) ||
+                                       _chan_ns(pi, i))) {
                        thrd->ev = _alloc_event(thrd);
                        if (thrd->ev >= 0) {
                                thrd->free = false;
diff --git a/arch/arm/configs/at91cap9_defconfig b/arch/arm/configs/at91cap9_defconfig
new file mode 100644 (file)
index 0000000..8826eb2
--- /dev/null
@@ -0,0 +1,108 @@
+CONFIG_EXPERIMENTAL=y
+# CONFIG_LOCALVERSION_AUTO is not set
+# CONFIG_SWAP is not set
+CONFIG_SYSVIPC=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_ARCH_AT91=y
+CONFIG_ARCH_AT91CAP9=y
+CONFIG_MACH_AT91CAP9ADK=y
+CONFIG_MTD_AT91_DATAFLASH_CARD=y
+CONFIG_AT91_PROGRAMMABLE_CLOCKS=y
+# CONFIG_ARM_THUMB is not set
+CONFIG_AEABI=y
+CONFIG_LEDS=y
+CONFIG_LEDS_CPU=y
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_CMDLINE="console=ttyS0,115200 root=/dev/ram0 rw"
+CONFIG_FPE_NWFPE=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_INET_DIAG is not set
+# CONFIG_IPV6 is not set
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_MTD=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_JEDECPROBE=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_DATAFLASH=y
+CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_ATMEL=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_NETDEVICES=y
+CONFIG_MII=y
+CONFIG_MACB=y
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+CONFIG_INPUT_EVDEV=y
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_ADS7846=y
+# CONFIG_SERIO is not set
+CONFIG_SERIAL_ATMEL=y
+CONFIG_SERIAL_ATMEL_CONSOLE=y
+CONFIG_HW_RANDOM=y
+CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_SPI=y
+CONFIG_SPI_ATMEL=y
+# CONFIG_HWMON is not set
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_NOWAYOUT=y
+CONFIG_FB=y
+CONFIG_FB_ATMEL=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_CLUT224 is not set
+# CONFIG_USB_HID is not set
+CONFIG_USB=y
+CONFIG_USB_DEVICEFS=y
+CONFIG_USB_MON=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_ETH=m
+CONFIG_USB_FILE_STORAGE=m
+CONFIG_MMC=y
+CONFIG_MMC_AT91=m
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_AT91SAM9=y
+CONFIG_EXT2_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_JFFS2_FS=y
+CONFIG_CRAMFS=y
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_850=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_DEBUG_FS=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_USER=y
diff --git a/arch/arm/configs/at91cap9adk_defconfig b/arch/arm/configs/at91cap9adk_defconfig
deleted file mode 100644 (file)
index ffb1edd..0000000
+++ /dev/null
@@ -1,115 +0,0 @@
-CONFIG_EXPERIMENTAL=y
-# CONFIG_LOCALVERSION_AUTO is not set
-# CONFIG_SWAP is not set
-CONFIG_SYSVIPC=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_SLAB=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
-CONFIG_ARCH_AT91=y
-CONFIG_ARCH_AT91CAP9=y
-CONFIG_MACH_AT91CAP9ADK=y
-CONFIG_MTD_AT91_DATAFLASH_CARD=y
-CONFIG_AT91_PROGRAMMABLE_CLOCKS=y
-# CONFIG_ARM_THUMB is not set
-CONFIG_AEABI=y
-CONFIG_LEDS=y
-CONFIG_LEDS_CPU=y
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_CMDLINE="console=ttyS0,115200 root=/dev/ram0 rw"
-CONFIG_FPE_NWFPE=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_BOOTP=y
-CONFIG_IP_PNP_RARP=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
-# CONFIG_INET_DIAG is not set
-# CONFIG_IPV6 is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
-CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
-CONFIG_MTD_BLOCK=y
-CONFIG_MTD_CFI=y
-CONFIG_MTD_JEDECPROBE=y
-CONFIG_MTD_CFI_AMDSTD=y
-CONFIG_MTD_PHYSMAP=y
-CONFIG_MTD_DATAFLASH=y
-CONFIG_MTD_NAND=y
-CONFIG_MTD_NAND_ATMEL=y
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_ATMEL_SSC=y
-CONFIG_SCSI=y
-CONFIG_BLK_DEV_SD=y
-CONFIG_SCSI_MULTI_LUN=y
-CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_MII=y
-CONFIG_MACB=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
-CONFIG_INPUT_EVDEV=y
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-CONFIG_INPUT_TOUCHSCREEN=y
-CONFIG_TOUCHSCREEN_ADS7846=y
-# CONFIG_SERIO is not set
-CONFIG_SERIAL_ATMEL=y
-CONFIG_SERIAL_ATMEL_CONSOLE=y
-CONFIG_HW_RANDOM=y
-CONFIG_I2C=y
-CONFIG_I2C_CHARDEV=y
-CONFIG_SPI=y
-CONFIG_SPI_ATMEL=y
-# CONFIG_HWMON is not set
-CONFIG_WATCHDOG=y
-CONFIG_WATCHDOG_NOWAYOUT=y
-CONFIG_FB=y
-CONFIG_FB_ATMEL=y
-# CONFIG_VGA_CONSOLE is not set
-CONFIG_LOGO=y
-# CONFIG_LOGO_LINUX_MONO is not set
-# CONFIG_LOGO_LINUX_CLUT224 is not set
-# CONFIG_USB_HID is not set
-CONFIG_USB=y
-CONFIG_USB_DEVICEFS=y
-CONFIG_USB_MON=y
-CONFIG_USB_OHCI_HCD=y
-CONFIG_USB_STORAGE=y
-CONFIG_USB_GADGET=y
-CONFIG_USB_ETH=m
-CONFIG_USB_FILE_STORAGE=m
-CONFIG_MMC=y
-CONFIG_MMC_AT91=m
-CONFIG_RTC_CLASS=y
-CONFIG_RTC_DRV_AT91SAM9=y
-CONFIG_EXT2_FS=y
-CONFIG_INOTIFY=y
-CONFIG_VFAT_FS=y
-CONFIG_TMPFS=y
-CONFIG_JFFS2_FS=y
-CONFIG_CRAMFS=y
-CONFIG_NFS_FS=y
-CONFIG_ROOT_NFS=y
-CONFIG_NLS_CODEPAGE_437=y
-CONFIG_NLS_CODEPAGE_850=y
-CONFIG_NLS_ISO8859_1=y
-CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_KERNEL=y
-CONFIG_DEBUG_INFO=y
-CONFIG_DEBUG_USER=y
index 38cb7c98542623a92cc7791a89f701e9fe5adf48..bbe4e1a1f5d86f81464e393e2fd2ab228e34542c 100644 (file)
@@ -5,7 +5,6 @@ CONFIG_SYSVIPC=y
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_MODULES=y
 CONFIG_MODULE_FORCE_LOAD=y
@@ -56,7 +55,6 @@ CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
 CONFIG_IP_PNP_BOOTP=y
 CONFIG_NET_IPIP=m
-CONFIG_NET_IPGRE=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
@@ -75,18 +73,8 @@ CONFIG_IPV6_TUNNEL=m
 CONFIG_BRIDGE=m
 CONFIG_VLAN_8021Q=m
 CONFIG_BT=m
-CONFIG_BT_L2CAP=m
-CONFIG_BT_SCO=m
-CONFIG_BT_RFCOMM=m
-CONFIG_BT_RFCOMM_TTY=y
-CONFIG_BT_BNEP=m
-CONFIG_BT_BNEP_MC_FILTER=y
-CONFIG_BT_BNEP_PROTO_FILTER=y
-CONFIG_BT_HIDP=m
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_MTD=y
-CONFIG_MTD_CONCAT=y
-CONFIG_MTD_PARTITIONS=y
 CONFIG_MTD_CMDLINE_PARTS=y
 CONFIG_MTD_AFS_PARTS=y
 CONFIG_MTD_CHAR=y
@@ -108,8 +96,6 @@ CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_NBD=y
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_ATMEL_TCLIB=y
-CONFIG_EEPROM_LEGACY=m
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_BLK_DEV_SR=m
@@ -119,14 +105,23 @@ CONFIG_SCSI_MULTI_LUN=y
 # CONFIG_SCSI_LOWLEVEL is not set
 CONFIG_NETDEVICES=y
 CONFIG_TUN=m
+CONFIG_ARM_AT91_ETHER=y
 CONFIG_PHYLIB=y
 CONFIG_DAVICOM_PHY=y
 CONFIG_SMSC_PHY=y
 CONFIG_MICREL_PHY=y
-CONFIG_NET_ETHERNET=y
-CONFIG_ARM_AT91_ETHER=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
+CONFIG_PPP=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_MPPE=m
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPPOE=m
+CONFIG_PPP_ASYNC=y
+CONFIG_SLIP=m
+CONFIG_SLIP_COMPRESSED=y
+CONFIG_SLIP_SMART=y
+CONFIG_SLIP_MODE_SLIP6=y
 CONFIG_USB_CATC=m
 CONFIG_USB_KAWETH=m
 CONFIG_USB_PEGASUS=m
@@ -139,18 +134,6 @@ CONFIG_USB_NET_RNDIS_HOST=m
 CONFIG_USB_ALI_M5632=y
 CONFIG_USB_AN2720=y
 CONFIG_USB_EPSON2888=y
-CONFIG_PPP=y
-CONFIG_PPP_MULTILINK=y
-CONFIG_PPP_FILTER=y
-CONFIG_PPP_ASYNC=y
-CONFIG_PPP_DEFLATE=y
-CONFIG_PPP_BSDCOMP=y
-CONFIG_PPP_MPPE=m
-CONFIG_PPPOE=m
-CONFIG_SLIP=m
-CONFIG_SLIP_COMPRESSED=y
-CONFIG_SLIP_SMART=y
-CONFIG_SLIP_MODE_SLIP6=y
 # CONFIG_INPUT_MOUSEDEV_PSAUX is not set
 CONFIG_INPUT_MOUSEDEV_SCREEN_X=640
 CONFIG_INPUT_MOUSEDEV_SCREEN_Y=480
@@ -158,9 +141,9 @@ CONFIG_INPUT_EVDEV=y
 CONFIG_KEYBOARD_GPIO=y
 # CONFIG_INPUT_MOUSE is not set
 CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_LEGACY_PTY_COUNT=32
 CONFIG_SERIAL_ATMEL=y
 CONFIG_SERIAL_ATMEL_CONSOLE=y
-CONFIG_LEGACY_PTY_COUNT=32
 CONFIG_HW_RANDOM=y
 CONFIG_I2C=y
 CONFIG_I2C_CHARDEV=y
@@ -290,7 +273,6 @@ CONFIG_NFS_V3_ACL=y
 CONFIG_NFS_V4=y
 CONFIG_ROOT_NFS=y
 CONFIG_NFSD=y
-CONFIG_SMB_FS=m
 CONFIG_CIFS=m
 CONFIG_PARTITION_ADVANCED=y
 CONFIG_MAC_PARTITION=y
@@ -335,7 +317,6 @@ CONFIG_NLS_UTF8=y
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_FS=y
 CONFIG_DEBUG_KERNEL=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
 # CONFIG_FTRACE is not set
 CONFIG_CRYPTO_PCBC=y
 CONFIG_CRYPTO_SHA1=y
diff --git a/arch/arm/configs/at91sam9260_defconfig b/arch/arm/configs/at91sam9260_defconfig
new file mode 100644 (file)
index 0000000..505b376
--- /dev/null
@@ -0,0 +1,91 @@
+CONFIG_EXPERIMENTAL=y
+# CONFIG_LOCALVERSION_AUTO is not set
+# CONFIG_SWAP is not set
+CONFIG_SYSVIPC=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_ARCH_AT91=y
+CONFIG_ARCH_AT91SAM9260=y
+CONFIG_ARCH_AT91SAM9260_SAM9XE=y
+CONFIG_MACH_AT91SAM9260EK=y
+CONFIG_MACH_CAM60=y
+CONFIG_MACH_SAM9_L9260=y
+CONFIG_MACH_AFEB9260=y
+CONFIG_MACH_USB_A9260=y
+CONFIG_MACH_QIL_A9260=y
+CONFIG_MACH_CPU9260=y
+CONFIG_MACH_FLEXIBITY=y
+CONFIG_MACH_SNAPPER_9260=y
+CONFIG_MACH_AT91SAM_DT=y
+CONFIG_AT91_PROGRAMMABLE_CLOCKS=y
+# CONFIG_ARM_THUMB is not set
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_ARM_APPENDED_DTB=y
+CONFIG_ARM_ATAG_DTB_COMPAT=y
+CONFIG_CMDLINE="mem=64M console=ttyS0,115200 initrd=0x21100000,3145728 root=/dev/ram0 rw"
+CONFIG_FPE_NWFPE=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_BOOTP=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_IPV6 is not set
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_NETDEVICES=y
+CONFIG_MII=y
+CONFIG_MACB=y
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+CONFIG_SERIAL_ATMEL=y
+CONFIG_SERIAL_ATMEL_CONSOLE=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_GPIO=y
+# CONFIG_HWMON is not set
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_NOWAYOUT=y
+CONFIG_AT91SAM9X_WATCHDOG=y
+# CONFIG_USB_HID is not set
+CONFIG_USB=y
+CONFIG_USB_DEVICEFS=y
+CONFIG_USB_MON=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_STORAGE_DEBUG=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_ZERO=m
+CONFIG_USB_GADGETFS=m
+CONFIG_USB_FILE_STORAGE=m
+CONFIG_USB_G_SERIAL=m
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_AT91SAM9=y
+CONFIG_EXT2_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_CRAMFS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_850=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DEBUG_USER=y
+CONFIG_DEBUG_LL=y
diff --git a/arch/arm/configs/at91sam9260ek_defconfig b/arch/arm/configs/at91sam9260ek_defconfig
deleted file mode 100644 (file)
index f8a9226..0000000
+++ /dev/null
@@ -1,83 +0,0 @@
-CONFIG_EXPERIMENTAL=y
-# CONFIG_LOCALVERSION_AUTO is not set
-# CONFIG_SWAP is not set
-CONFIG_SYSVIPC=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_SLAB=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
-CONFIG_ARCH_AT91=y
-CONFIG_ARCH_AT91SAM9260=y
-CONFIG_MACH_AT91SAM9260EK=y
-CONFIG_AT91_PROGRAMMABLE_CLOCKS=y
-# CONFIG_ARM_THUMB is not set
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_CMDLINE="mem=64M console=ttyS0,115200 initrd=0x21100000,3145728 root=/dev/ram0 rw"
-CONFIG_FPE_NWFPE=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_BOOTP=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
-# CONFIG_IPV6 is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_ATMEL_SSC=y
-CONFIG_SCSI=y
-CONFIG_BLK_DEV_SD=y
-CONFIG_SCSI_MULTI_LUN=y
-CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_MII=y
-CONFIG_MACB=y
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-# CONFIG_SERIO is not set
-CONFIG_SERIAL_ATMEL=y
-CONFIG_SERIAL_ATMEL_CONSOLE=y
-# CONFIG_HW_RANDOM is not set
-CONFIG_I2C=y
-CONFIG_I2C_CHARDEV=y
-CONFIG_I2C_GPIO=y
-# CONFIG_HWMON is not set
-CONFIG_WATCHDOG=y
-CONFIG_WATCHDOG_NOWAYOUT=y
-CONFIG_AT91SAM9X_WATCHDOG=y
-# CONFIG_VGA_CONSOLE is not set
-# CONFIG_USB_HID is not set
-CONFIG_USB=y
-CONFIG_USB_DEVICEFS=y
-CONFIG_USB_MON=y
-CONFIG_USB_OHCI_HCD=y
-CONFIG_USB_STORAGE=y
-CONFIG_USB_STORAGE_DEBUG=y
-CONFIG_USB_GADGET=y
-CONFIG_USB_ZERO=m
-CONFIG_USB_GADGETFS=m
-CONFIG_USB_FILE_STORAGE=m
-CONFIG_USB_G_SERIAL=m
-CONFIG_RTC_CLASS=y
-CONFIG_RTC_DRV_AT91SAM9=y
-CONFIG_EXT2_FS=y
-CONFIG_INOTIFY=y
-CONFIG_VFAT_FS=y
-CONFIG_TMPFS=y
-CONFIG_CRAMFS=y
-CONFIG_NLS_CODEPAGE_437=y
-CONFIG_NLS_CODEPAGE_850=y
-CONFIG_NLS_ISO8859_1=y
-CONFIG_DEBUG_KERNEL=y
-CONFIG_DEBUG_USER=y
-CONFIG_DEBUG_LL=y
diff --git a/arch/arm/configs/at91sam9g20_defconfig b/arch/arm/configs/at91sam9g20_defconfig
new file mode 100644 (file)
index 0000000..9123568
--- /dev/null
@@ -0,0 +1,124 @@
+CONFIG_EXPERIMENTAL=y
+# CONFIG_LOCALVERSION_AUTO is not set
+# CONFIG_SWAP is not set
+CONFIG_SYSVIPC=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_ARCH_AT91=y
+CONFIG_ARCH_AT91SAM9G20=y
+CONFIG_MACH_AT91SAM9G20EK=y
+CONFIG_MACH_AT91SAM9G20EK_2MMC=y
+CONFIG_MACH_CPU9G20=y
+CONFIG_MACH_ACMENETUSFOXG20=y
+CONFIG_MACH_PORTUXG20=y
+CONFIG_MACH_STAMP9G20=y
+CONFIG_MACH_PCONTROL_G20=y
+CONFIG_MACH_GSIA18S=y
+CONFIG_MACH_USB_A9G20=y
+CONFIG_MACH_SNAPPER_9260=y
+CONFIG_MACH_AT91SAM_DT=y
+CONFIG_AT91_PROGRAMMABLE_CLOCKS=y
+# CONFIG_ARM_THUMB is not set
+CONFIG_AEABI=y
+CONFIG_LEDS=y
+CONFIG_LEDS_CPU=y
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_ARM_APPENDED_DTB=y
+CONFIG_ARM_ATAG_DTB_COMPAT=y
+CONFIG_CMDLINE="mem=64M console=ttyS0,115200 initrd=0x21100000,3145728 root=/dev/ram0 rw"
+CONFIG_FPE_NWFPE=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_BOOTP=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_IPV6 is not set
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_MTD=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_DATAFLASH=y
+CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_ATMEL=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_SCSI_MULTI_LUN=y
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_NETDEVICES=y
+CONFIG_MII=y
+CONFIG_MACB=y
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=320
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=240
+CONFIG_INPUT_EVDEV=y
+# CONFIG_KEYBOARD_ATKBD is not set
+CONFIG_KEYBOARD_GPIO=y
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_LEGACY_PTY_COUNT=16
+CONFIG_SERIAL_ATMEL=y
+CONFIG_SERIAL_ATMEL_CONSOLE=y
+CONFIG_HW_RANDOM=y
+CONFIG_SPI=y
+CONFIG_SPI_ATMEL=y
+CONFIG_SPI_SPIDEV=y
+# CONFIG_HWMON is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_SEQUENCER=y
+CONFIG_SND_MIXER_OSS=y
+CONFIG_SND_PCM_OSS=y
+CONFIG_SND_SEQUENCER_OSS=y
+# CONFIG_SND_VERBOSE_PROCFS is not set
+CONFIG_USB=y
+CONFIG_USB_DEVICEFS=y
+# CONFIG_USB_DEVICE_CLASS is not set
+CONFIG_USB_MON=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_ZERO=m
+CONFIG_USB_GADGETFS=m
+CONFIG_USB_FILE_STORAGE=m
+CONFIG_USB_G_SERIAL=m
+CONFIG_MMC=y
+CONFIG_MMC_AT91=m
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_AT91SAM9=y
+CONFIG_EXT2_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_SUMMARY=y
+CONFIG_CRAMFS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+CONFIG_ROOT_NFS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_850=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_ISO8859_15=y
+CONFIG_NLS_UTF8=y
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
diff --git a/arch/arm/configs/at91sam9g20ek_defconfig b/arch/arm/configs/at91sam9g20ek_defconfig
deleted file mode 100644 (file)
index 9e90e6d..0000000
+++ /dev/null
@@ -1,123 +0,0 @@
-CONFIG_EXPERIMENTAL=y
-# CONFIG_LOCALVERSION_AUTO is not set
-# CONFIG_SWAP is not set
-CONFIG_SYSVIPC=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_SLAB=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
-CONFIG_ARCH_AT91=y
-CONFIG_ARCH_AT91SAM9G20=y
-CONFIG_MACH_AT91SAM9G20EK=y
-CONFIG_MACH_AT91SAM9G20EK_2MMC=y
-CONFIG_AT91_PROGRAMMABLE_CLOCKS=y
-# CONFIG_ARM_THUMB is not set
-CONFIG_AEABI=y
-CONFIG_LEDS=y
-CONFIG_LEDS_CPU=y
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_CMDLINE="mem=64M console=ttyS0,115200 initrd=0x21100000,3145728 root=/dev/ram0 rw"
-CONFIG_FPE_NWFPE=y
-CONFIG_PM=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_BOOTP=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
-# CONFIG_IPV6 is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-CONFIG_MTD=y
-CONFIG_MTD_CONCAT=y
-CONFIG_MTD_PARTITIONS=y
-CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
-CONFIG_MTD_BLOCK=y
-CONFIG_MTD_DATAFLASH=y
-CONFIG_MTD_NAND=y
-CONFIG_MTD_NAND_ATMEL=y
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_ATMEL_SSC=y
-CONFIG_SCSI=y
-CONFIG_BLK_DEV_SD=y
-CONFIG_SCSI_MULTI_LUN=y
-# CONFIG_SCSI_LOWLEVEL is not set
-CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_MII=y
-CONFIG_MACB=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
-CONFIG_INPUT_MOUSEDEV_SCREEN_X=320
-CONFIG_INPUT_MOUSEDEV_SCREEN_Y=240
-CONFIG_INPUT_EVDEV=y
-# CONFIG_KEYBOARD_ATKBD is not set
-CONFIG_KEYBOARD_GPIO=y
-# CONFIG_INPUT_MOUSE is not set
-CONFIG_SERIAL_ATMEL=y
-CONFIG_SERIAL_ATMEL_CONSOLE=y
-CONFIG_LEGACY_PTY_COUNT=16
-CONFIG_HW_RANDOM=y
-CONFIG_SPI=y
-CONFIG_SPI_ATMEL=y
-CONFIG_SPI_SPIDEV=y
-# CONFIG_HWMON is not set
-# CONFIG_VGA_CONSOLE is not set
-CONFIG_SOUND=y
-CONFIG_SND=y
-CONFIG_SND_SEQUENCER=y
-CONFIG_SND_MIXER_OSS=y
-CONFIG_SND_PCM_OSS=y
-CONFIG_SND_SEQUENCER_OSS=y
-# CONFIG_SND_VERBOSE_PROCFS is not set
-CONFIG_SND_AT73C213=y
-CONFIG_USB=y
-CONFIG_USB_DEVICEFS=y
-# CONFIG_USB_DEVICE_CLASS is not set
-CONFIG_USB_MON=y
-CONFIG_USB_OHCI_HCD=y
-CONFIG_USB_STORAGE=y
-CONFIG_USB_GADGET=y
-CONFIG_USB_ZERO=m
-CONFIG_USB_GADGETFS=m
-CONFIG_USB_FILE_STORAGE=m
-CONFIG_USB_G_SERIAL=m
-CONFIG_MMC=y
-CONFIG_MMC_AT91=m
-CONFIG_NEW_LEDS=y
-CONFIG_LEDS_CLASS=y
-CONFIG_LEDS_GPIO=y
-CONFIG_LEDS_TRIGGERS=y
-CONFIG_LEDS_TRIGGER_TIMER=y
-CONFIG_LEDS_TRIGGER_HEARTBEAT=y
-CONFIG_RTC_CLASS=y
-CONFIG_RTC_DRV_AT91SAM9=y
-CONFIG_EXT2_FS=y
-CONFIG_INOTIFY=y
-CONFIG_MSDOS_FS=y
-CONFIG_VFAT_FS=y
-CONFIG_TMPFS=y
-CONFIG_JFFS2_FS=y
-CONFIG_JFFS2_SUMMARY=y
-CONFIG_CRAMFS=y
-CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
-CONFIG_ROOT_NFS=y
-CONFIG_NLS_CODEPAGE_437=y
-CONFIG_NLS_CODEPAGE_850=y
-CONFIG_NLS_ISO8859_1=y
-CONFIG_NLS_ISO8859_15=y
-CONFIG_NLS_UTF8=y
-# CONFIG_ENABLE_WARN_DEPRECATED is not set
index c5876d244f4b2d667db3d4753bdc1bdab1203064..606d48f3b8f81c10370b718d7b2b3475818b9a03 100644 (file)
@@ -18,6 +18,7 @@ CONFIG_MODULE_UNLOAD=y
 CONFIG_ARCH_AT91=y
 CONFIG_ARCH_AT91SAM9G45=y
 CONFIG_MACH_AT91SAM9M10G45EK=y
+CONFIG_MACH_AT91SAM_DT=y
 CONFIG_AT91_PROGRAMMABLE_CLOCKS=y
 CONFIG_AT91_SLOW_CLOCK=y
 CONFIG_AEABI=y
@@ -73,11 +74,8 @@ CONFIG_SCSI_MULTI_LUN=y
 # CONFIG_SCSI_LOWLEVEL is not set
 CONFIG_NETDEVICES=y
 CONFIG_MII=y
-CONFIG_DAVICOM_PHY=y
-CONFIG_NET_ETHERNET=y
 CONFIG_MACB=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
+CONFIG_DAVICOM_PHY=y
 CONFIG_LIBERTAS_THINFIRM=m
 CONFIG_LIBERTAS_THINFIRM_USB=m
 CONFIG_AT76C50X_USB=m
@@ -131,7 +129,6 @@ CONFIG_I2C_GPIO=y
 CONFIG_SPI=y
 CONFIG_SPI_ATMEL=y
 # CONFIG_HWMON is not set
-# CONFIG_MFD_SUPPORT is not set
 CONFIG_FB=y
 CONFIG_FB_ATMEL=y
 CONFIG_FB_UDL=m
diff --git a/arch/arm/configs/at91sam9rl_defconfig b/arch/arm/configs/at91sam9rl_defconfig
new file mode 100644 (file)
index 0000000..ad562ee
--- /dev/null
@@ -0,0 +1,79 @@
+CONFIG_EXPERIMENTAL=y
+# CONFIG_LOCALVERSION_AUTO is not set
+# CONFIG_SWAP is not set
+CONFIG_SYSVIPC=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_ARCH_AT91=y
+CONFIG_ARCH_AT91SAM9RL=y
+CONFIG_MACH_AT91SAM9RLEK=y
+CONFIG_AT91_PROGRAMMABLE_CLOCKS=y
+# CONFIG_ARM_THUMB is not set
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_CMDLINE="mem=64M console=ttyS0,115200 initrd=0x21100000,17105363 root=/dev/ram0 rw"
+CONFIG_FPE_NWFPE=y
+CONFIG_NET=y
+CONFIG_UNIX=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_MTD=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_DATAFLASH=y
+CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_ATMEL=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=4
+CONFIG_BLK_DEV_RAM_SIZE=24576
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_SCSI_MULTI_LUN=y
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=320
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=240
+CONFIG_INPUT_EVDEV=y
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_ATMEL_TSADCC=y
+# CONFIG_SERIO is not set
+CONFIG_SERIAL_ATMEL=y
+CONFIG_SERIAL_ATMEL_CONSOLE=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_GPIO=y
+CONFIG_SPI=y
+CONFIG_SPI_ATMEL=y
+# CONFIG_HWMON is not set
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_NOWAYOUT=y
+CONFIG_AT91SAM9X_WATCHDOG=y
+CONFIG_FB=y
+CONFIG_FB_ATMEL=y
+CONFIG_MMC=y
+CONFIG_MMC_AT91=m
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_AT91SAM9=y
+CONFIG_EXT2_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_CRAMFS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_850=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_ISO8859_15=y
+CONFIG_NLS_UTF8=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_USER=y
+CONFIG_DEBUG_LL=y
diff --git a/arch/arm/configs/at91sam9rlek_defconfig b/arch/arm/configs/at91sam9rlek_defconfig
deleted file mode 100644 (file)
index 75621e4..0000000
+++ /dev/null
@@ -1,84 +0,0 @@
-CONFIG_EXPERIMENTAL=y
-# CONFIG_LOCALVERSION_AUTO is not set
-# CONFIG_SWAP is not set
-CONFIG_SYSVIPC=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_SLAB=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
-CONFIG_ARCH_AT91=y
-CONFIG_ARCH_AT91SAM9RL=y
-CONFIG_MACH_AT91SAM9RLEK=y
-CONFIG_AT91_PROGRAMMABLE_CLOCKS=y
-# CONFIG_ARM_THUMB is not set
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_CMDLINE="mem=64M console=ttyS0,115200 initrd=0x21100000,17105363 root=/dev/ram0 rw"
-CONFIG_FPE_NWFPE=y
-CONFIG_NET=y
-CONFIG_UNIX=y
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-CONFIG_MTD=y
-CONFIG_MTD_CONCAT=y
-CONFIG_MTD_PARTITIONS=y
-CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
-CONFIG_MTD_BLOCK=y
-CONFIG_MTD_DATAFLASH=y
-CONFIG_MTD_NAND=y
-CONFIG_MTD_NAND_ATMEL=y
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_COUNT=4
-CONFIG_BLK_DEV_RAM_SIZE=24576
-CONFIG_ATMEL_SSC=y
-CONFIG_SCSI=y
-CONFIG_BLK_DEV_SD=y
-CONFIG_SCSI_MULTI_LUN=y
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
-CONFIG_INPUT_MOUSEDEV_SCREEN_X=320
-CONFIG_INPUT_MOUSEDEV_SCREEN_Y=240
-CONFIG_INPUT_EVDEV=y
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-CONFIG_INPUT_TOUCHSCREEN=y
-CONFIG_TOUCHSCREEN_ATMEL_TSADCC=y
-# CONFIG_SERIO is not set
-CONFIG_SERIAL_ATMEL=y
-CONFIG_SERIAL_ATMEL_CONSOLE=y
-# CONFIG_HW_RANDOM is not set
-CONFIG_I2C=y
-CONFIG_I2C_CHARDEV=y
-CONFIG_I2C_GPIO=y
-CONFIG_SPI=y
-CONFIG_SPI_ATMEL=y
-# CONFIG_HWMON is not set
-CONFIG_WATCHDOG=y
-CONFIG_WATCHDOG_NOWAYOUT=y
-CONFIG_AT91SAM9X_WATCHDOG=y
-CONFIG_FB=y
-CONFIG_FB_ATMEL=y
-# CONFIG_VGA_CONSOLE is not set
-CONFIG_MMC=y
-CONFIG_MMC_AT91=m
-CONFIG_RTC_CLASS=y
-CONFIG_RTC_DRV_AT91SAM9=y
-CONFIG_EXT2_FS=y
-CONFIG_INOTIFY=y
-CONFIG_MSDOS_FS=y
-CONFIG_VFAT_FS=y
-CONFIG_TMPFS=y
-CONFIG_CRAMFS=y
-CONFIG_NLS_CODEPAGE_437=y
-CONFIG_NLS_CODEPAGE_850=y
-CONFIG_NLS_ISO8859_1=y
-CONFIG_NLS_ISO8859_15=y
-CONFIG_NLS_UTF8=y
-CONFIG_DEBUG_KERNEL=y
-CONFIG_DEBUG_INFO=y
-CONFIG_DEBUG_USER=y
-CONFIG_DEBUG_LL=y
index 227a477346edb44a2a83ba49a0f9b7cd2b165ec0..d95763d5f0d83df6543364e3ea648c2ad88994bf 100644 (file)
@@ -287,7 +287,7 @@ CONFIG_USB=y
 # CONFIG_USB_DEVICE_CLASS is not set
 CONFIG_USB_OHCI_HCD=y
 CONFIG_USB_GADGET=y
-CONFIG_USB_GADGET_PXA27X=y
+CONFIG_USB_PXA27X=y
 CONFIG_USB_ETH=m
 # CONFIG_USB_ETH_RNDIS is not set
 CONFIG_MMC=y
index 176ec22af0342f215b59a2778144d69ef0f4c806..fd996bb13022879dee93c308c8e8ad154918ec20 100644 (file)
@@ -263,7 +263,7 @@ CONFIG_USB=y
 # CONFIG_USB_DEVICE_CLASS is not set
 CONFIG_USB_OHCI_HCD=y
 CONFIG_USB_GADGET=y
-CONFIG_USB_GADGET_PXA27X=y
+CONFIG_USB_PXA27X=y
 CONFIG_USB_ETH=m
 # CONFIG_USB_ETH_RNDIS is not set
 CONFIG_MMC=y
index a88e64d4e9a5862c28160c9d1b7d3cd01edf0550..443675d317e6de326c576caf47ae9ff179a0814c 100644 (file)
@@ -132,7 +132,7 @@ CONFIG_USB_MON=m
 CONFIG_USB_OHCI_HCD=y
 CONFIG_USB_GADGET=y
 CONFIG_USB_GADGET_VBUS_DRAW=500
-CONFIG_USB_GADGET_PXA27X=y
+CONFIG_USB_PXA27X=y
 CONFIG_USB_ETH=m
 # CONFIG_USB_ETH_RNDIS is not set
 CONFIG_USB_GADGETFS=m
index 7b63462b349d7de9965b13bec7c1c85e57c25fcd..945a34f2a34dbd9e711ac440500d1e17cfad2ac0 100644 (file)
@@ -48,13 +48,7 @@ CONFIG_MACH_SX1=y
 CONFIG_MACH_NOKIA770=y
 CONFIG_MACH_AMS_DELTA=y
 CONFIG_MACH_OMAP_GENERIC=y
-CONFIG_OMAP_CLOCKS_SET_BY_BOOTLOADER=y
-CONFIG_OMAP_ARM_216MHZ=y
-CONFIG_OMAP_ARM_195MHZ=y
-CONFIG_OMAP_ARM_192MHZ=y
 CONFIG_OMAP_ARM_182MHZ=y
-CONFIG_OMAP_ARM_168MHZ=y
-# CONFIG_OMAP_ARM_60MHZ is not set
 # CONFIG_ARM_THUMB is not set
 CONFIG_PCCARD=y
 CONFIG_OMAP_CF=y
index 4a5a12681be2038c908318ffc953c3ad7a9bd431..374000ec4e4e9a66bbf07304abd75dced85a0b66 100644 (file)
@@ -14,8 +14,6 @@ CONFIG_MODULE_UNLOAD=y
 CONFIG_ARCH_U300=y
 CONFIG_MACH_U300=y
 CONFIG_MACH_U300_BS335=y
-CONFIG_MACH_U300_DUAL_RAM=y
-CONFIG_U300_DEBUG=y
 CONFIG_MACH_U300_SPIDUMMY=y
 CONFIG_NO_HZ=y
 CONFIG_HIGH_RES_TIMERS=y
@@ -26,19 +24,21 @@ CONFIG_ZBOOT_ROM_BSS=0x0
 CONFIG_CMDLINE="root=/dev/ram0 rw rootfstype=rootfs console=ttyAMA0,115200n8 lpj=515072"
 CONFIG_CPU_IDLE=y
 CONFIG_FPE_NWFPE=y
-CONFIG_PM=y
 # CONFIG_SUSPEND is not set
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 # CONFIG_PREVENT_FIRMWARE_BUILD is not set
-# CONFIG_MISC_DEVICES is not set
+CONFIG_MTD=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_FSMC=y
 # CONFIG_INPUT_MOUSEDEV is not set
 CONFIG_INPUT_EVDEV=y
 # CONFIG_KEYBOARD_ATKBD is not set
 # CONFIG_INPUT_MOUSE is not set
 # CONFIG_SERIO is not set
+CONFIG_LEGACY_PTY_COUNT=16
 CONFIG_SERIAL_AMBA_PL011=y
 CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
-CONFIG_LEGACY_PTY_COUNT=16
 # CONFIG_HW_RANDOM is not set
 CONFIG_I2C=y
 # CONFIG_HWMON is not set
@@ -51,6 +51,7 @@ CONFIG_BACKLIGHT_CLASS_DEVICE=y
 # CONFIG_HID_SUPPORT is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_MMC=y
+CONFIG_MMC_CLKGATE=y
 CONFIG_MMC_ARMMMCI=y
 CONFIG_RTC_CLASS=y
 # CONFIG_RTC_HCTOSYS is not set
@@ -65,10 +66,8 @@ CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ISO8859_1=y
 CONFIG_PRINTK_TIME=y
 CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_KERNEL=y
 # CONFIG_SCHED_DEBUG is not set
 CONFIG_TIMER_STATS=y
 # CONFIG_DEBUG_PREEMPT is not set
 CONFIG_DEBUG_INFO=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
 # CONFIG_CRC32 is not set
index 97d31a4663daf0c6186cd948a05abf35687686d3..2d7b6e7b72713cba72df0349462c29dbe38a67ba 100644 (file)
@@ -10,7 +10,7 @@ CONFIG_MODULE_UNLOAD=y
 CONFIG_ARCH_U8500=y
 CONFIG_UX500_SOC_DB5500=y
 CONFIG_UX500_SOC_DB8500=y
-CONFIG_MACH_U8500=y
+CONFIG_MACH_HREFV60=y
 CONFIG_MACH_SNOWBALL=y
 CONFIG_MACH_U5500=y
 CONFIG_NO_HZ=y
@@ -24,6 +24,7 @@ CONFIG_CPU_FREQ=y
 CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
 CONFIG_VFP=y
 CONFIG_NEON=y
+CONFIG_PM_RUNTIME=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
@@ -41,11 +42,8 @@ CONFIG_MISC_DEVICES=y
 CONFIG_AB8500_PWM=y
 CONFIG_SENSORS_BH1780=y
 CONFIG_NETDEVICES=y
-CONFIG_SMSC_PHY=y
-CONFIG_NET_ETHERNET=y
 CONFIG_SMSC911X=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
+CONFIG_SMSC_PHY=y
 # CONFIG_WLAN is not set
 # CONFIG_INPUT_MOUSEDEV_PSAUX is not set
 CONFIG_INPUT_EVDEV=y
@@ -72,15 +70,12 @@ CONFIG_SPI=y
 CONFIG_SPI_PL022=y
 CONFIG_GPIO_STMPE=y
 CONFIG_GPIO_TC3589X=y
-# CONFIG_HWMON is not set
 CONFIG_MFD_STMPE=y
 CONFIG_MFD_TC3589X=y
+CONFIG_AB5500_CORE=y
 CONFIG_AB8500_CORE=y
 CONFIG_REGULATOR_AB8500=y
 # CONFIG_HID_SUPPORT is not set
-CONFIG_USB_MUSB_HDRC=y
-CONFIG_USB_GADGET_MUSB_HDRC=y
-CONFIG_MUSB_PIO_ONLY=y
 CONFIG_USB_GADGET=y
 CONFIG_AB8500_USB=y
 CONFIG_MMC=y
@@ -97,6 +92,7 @@ CONFIG_DMADEVICES=y
 CONFIG_STE_DMA40=y
 CONFIG_STAGING=y
 CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4=y
+CONFIG_HSEM_U8500=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT2_FS_XATTR=y
 CONFIG_EXT2_FS_POSIX_ACL=y
index 59577ad3f4efdfda65e65753054673a32614e80f..547a3c1e59dbcd88ea9da77505691110508f9daf 100644 (file)
@@ -140,7 +140,7 @@ CONFIG_USB_SERIAL=m
 CONFIG_USB_SERIAL_GENERIC=y
 CONFIG_USB_SERIAL_MCT_U232=m
 CONFIG_USB_GADGET=m
-CONFIG_USB_GADGET_PXA27X=y
+CONFIG_USB_PXA27X=y
 CONFIG_USB_ETH=m
 CONFIG_USB_GADGETFS=m
 CONFIG_USB_FILE_STORAGE=m
index 71d99b83cdb980178aac275e487c2db081c041fc..0bda22c094a6dd1b70100444d6e29fa1a41688ef 100644 (file)
@@ -55,16 +55,6 @@ reserve_pmu(enum arm_pmu_type type);
 extern void
 release_pmu(enum arm_pmu_type type);
 
-/**
- * init_pmu() - Initialise the PMU.
- *
- * Initialise the system ready for PMU enabling. This should typically set the
- * IRQ affinity and nothing else. The users (oprofile/perf events etc) will do
- * the actual hardware initialisation.
- */
-extern int
-init_pmu(enum arm_pmu_type type);
-
 #else /* CONFIG_CPU_HAS_PMU */
 
 #include <linux/err.h>
index a7e457ed27c31e1185ebe3a0eaa745d88c35029d..58b8b84adcd2cf5f295e6869b68350f9dcadc798 100644 (file)
@@ -25,7 +25,7 @@ extern struct cputopo_arm cpu_topology[NR_CPUS];
 
 void init_cpu_topology(void);
 void store_cpu_topology(unsigned int cpuid);
-const struct cpumask *cpu_coregroup_mask(unsigned int cpu);
+const struct cpumask *cpu_coregroup_mask(int cpu);
 
 #else
 
index 9ad50c4208aebf5aaf7ee245444da5789935b269..b145f16c91bc786db82fcd3cd66ccdee7b740aa4 100644 (file)
@@ -497,7 +497,7 @@ ENDPROC(__und_usr)
        .popsection
        .pushsection __ex_table,"a"
        .long   1b, 4b
-#if __LINUX_ARM_ARCH__ >= 7
+#if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
        .long   2b, 4b
        .long   3b, 4b
 #endif
index 9fe8910308af922eda3c17ffee908c99652c1a1f..8a30c89da70ec104d4c1499f3a88a1dc4721e6e0 100644 (file)
@@ -519,10 +519,12 @@ static const union decode_item arm_cccc_0000_____1001_table[] = {
 static const union decode_item arm_cccc_0001_____1001_table[] = {
        /* Synchronization primitives                                   */
 
+#if __LINUX_ARM_ARCH__ < 6
+       /* Deprecated on ARMv6 and may be UNDEFINED on v7               */
        /* SMP/SWPB             cccc 0001 0x00 xxxx xxxx xxxx 1001 xxxx */
        DECODE_EMULATEX (0x0fb000f0, 0x01000090, emulate_rd12rn16rm0_rwflags_nopc,
                                                 REGS(NOPC, NOPC, 0, 0, NOPC)),
-
+#endif
        /* LDREX/STREX{,D,B,H}  cccc 0001 1xxx xxxx xxxx xxxx 1001 xxxx */
        /* And unallocated instructions...                              */
        DECODE_END
index fc82de8bdcce1081801cece62fa3d97480d520e1..ba32b393b3f0c514c83799687348d52655bfe1da 100644 (file)
@@ -427,18 +427,25 @@ void kprobe_arm_test_cases(void)
 
        TEST_GROUP("Synchronization primitives")
 
-       /*
-        * Use hard coded constants for SWP instructions to avoid warnings
-        * about deprecated instructions.
-        */
-       TEST_RP( ".word 0xe108e097 @ swp        lr, r",7,VAL2,", [r",8,0,"]")
-       TEST_R(  ".word 0x610d0091 @ swpvs      r0, r",1,VAL1,", [sp]")
-       TEST_RP( ".word 0xe10cd09e @ swp        sp, r",14,VAL2,", [r",12,13*4,"]")
+#if __LINUX_ARM_ARCH__ < 6
+       TEST_RP("swp    lr, r",7,VAL2,", [r",8,0,"]")
+       TEST_R( "swpvs  r0, r",1,VAL1,", [sp]")
+       TEST_RP("swp    sp, r",14,VAL2,", [r",12,13*4,"]")
+#else
+       TEST_UNSUPPORTED(".word 0xe108e097 @ swp        lr, r7, [r8]")
+       TEST_UNSUPPORTED(".word 0x610d0091 @ swpvs      r0, r1, [sp]")
+       TEST_UNSUPPORTED(".word 0xe10cd09e @ swp        sp, r14 [r12]")
+#endif
        TEST_UNSUPPORTED(".word 0xe102f091 @ swp pc, r1, [r2]")
        TEST_UNSUPPORTED(".word 0xe102009f @ swp r0, pc, [r2]")
        TEST_UNSUPPORTED(".word 0xe10f0091 @ swp r0, r1, [pc]")
-       TEST_RP( ".word 0xe148e097 @ swpb       lr, r",7,VAL2,", [r",8,0,"]")
-       TEST_R(  ".word 0x614d0091 @ swpvsb     r0, r",1,VAL1,", [sp]")
+#if __LINUX_ARM_ARCH__ < 6
+       TEST_RP("swpb   lr, r",7,VAL2,", [r",8,0,"]")
+       TEST_R( "swpvsb r0, r",1,VAL1,", [sp]")
+#else
+       TEST_UNSUPPORTED(".word 0xe148e097 @ swpb       lr, r7, [r8]")
+       TEST_UNSUPPORTED(".word 0x614d0091 @ swpvsb     r0, r1, [sp]")
+#endif
        TEST_UNSUPPORTED(".word 0xe142f091 @ swpb pc, r1, [r2]")
 
        TEST_UNSUPPORTED(".word 0xe1100090") /* Unallocated space */
@@ -550,7 +557,7 @@ void kprobe_arm_test_cases(void)
        TEST_RPR(  "strccd      r",8, VAL2,", [r",13,0, ", r",12,48,"]")
        TEST_RPR(  "strd        r",4, VAL1,", [r",2, 24,", r",3, 48,"]!")
        TEST_RPR(  "strcsd      r",12,VAL2,", [r",11,48,", -r",10,24,"]!")
-       TEST_RPR(  "strd        r",2, VAL1,", [r",3, 24,"], r",4,48,"")
+       TEST_RPR(  "strd        r",2, VAL1,", [r",5, 24,"], r",4,48,"")
        TEST_RPR(  "strd        r",10,VAL2,", [r",9, 48,"], -r",7,24,"")
        TEST_UNSUPPORTED(".word 0xe1afc0fa      @ strd r12, [pc, r10]!")
 
index 5e726c31c45aef5084a7e7ddf07bb1762ed4c403..5d8b857922220b4be4e9babf6e050c94625a56eb 100644 (file)
@@ -222,8 +222,8 @@ void kprobe_thumb16_test_cases(void)
 DONT_TEST_IN_ITBLOCK(
        TEST_BF_R(  "cbnz       r",0,0, ", 2f")
        TEST_BF_R(  "cbz        r",2,-1,", 2f")
-       TEST_BF_RX( "cbnz       r",4,1, ", 2f",0x20)
-       TEST_BF_RX( "cbz        r",7,0, ", 2f",0x40)
+       TEST_BF_RX( "cbnz       r",4,1, ", 2f", SPACE_0x20)
+       TEST_BF_RX( "cbz        r",7,0, ", 2f", SPACE_0x40)
 )
        TEST_R("sxth    r0, r",7, HH1,"")
        TEST_R("sxth    r7, r",0, HH2,"")
@@ -246,7 +246,7 @@ DONT_TEST_IN_ITBLOCK(
        TESTCASE_START(code)            \
        TEST_ARG_PTR(13, offset)        \
        TEST_ARG_END("")                \
-       TEST_BRANCH_F(code,0)           \
+       TEST_BRANCH_F(code)             \
        TESTCASE_END
 
        TEST("push      {r0}")
@@ -319,8 +319,8 @@ CONDITION_INSTRUCTIONS(8,
 
        TEST_BF(  "b    2f")
        TEST_BB(  "b    2b")
-       TEST_BF_X("b    2f", 0x400)
-       TEST_BB_X("b    2b", 0x400)
+       TEST_BF_X("b    2f", SPACE_0x400)
+       TEST_BB_X("b    2b", SPACE_0x400)
 
        TEST_GROUP("Testing instructions in IT blocks")
 
@@ -746,7 +746,7 @@ CONDITION_INSTRUCTIONS(22,
        TEST_BB("bne.w  2b")
        TEST_BF("bgt.w  2f")
        TEST_BB("blt.w  2b")
-       TEST_BF_X("bpl.w        2f",0x1000)
+       TEST_BF_X("bpl.w        2f", SPACE_0x1000)
 )
 
        TEST_UNSUPPORTED("msr   cpsr, r0")
@@ -786,11 +786,11 @@ CONDITION_INSTRUCTIONS(22,
 
        TEST_BF(  "b.w  2f")
        TEST_BB(  "b.w  2b")
-       TEST_BF_X("b.w  2f", 0x1000)
+       TEST_BF_X("b.w  2f", SPACE_0x1000)
 
        TEST_BF(  "bl.w 2f")
        TEST_BB(  "bl.w 2b")
-       TEST_BB_X("bl.w 2b", 0x1000)
+       TEST_BB_X("bl.w 2b", SPACE_0x1000)
 
        TEST_X( "blx    __dummy_arm_subroutine",
                ".arm                           \n\t"
index 0dc5d77b9356bcd86b4b0c087bf642bc6396e06e..e28a869b1ae4b7be5abfb7d088c3214bfe2caab7 100644 (file)
@@ -149,23 +149,31 @@ struct test_arg_end {
        "1:     "instruction"                           \n\t"   \
        "       nop                                     \n\t"
 
-#define TEST_BRANCH_F(instruction, xtra_dist)                  \
+#define TEST_BRANCH_F(instruction)                             \
        TEST_INSTRUCTION(instruction)                           \
-       ".if "#xtra_dist"                               \n\t"   \
        "       b       99f                             \n\t"   \
-       ".space "#xtra_dist"                            \n\t"   \
-       ".endif                                         \n\t"   \
+       "2:     nop                                     \n\t"
+
+#define TEST_BRANCH_B(instruction)                             \
+       "       b       50f                             \n\t"   \
+       "       b       99f                             \n\t"   \
+       "2:     nop                                     \n\t"   \
+       "       b       99f                             \n\t"   \
+       TEST_INSTRUCTION(instruction)
+
+#define TEST_BRANCH_FX(instruction, codex)                     \
+       TEST_INSTRUCTION(instruction)                           \
+       "       b       99f                             \n\t"   \
+       codex"                                          \n\t"   \
        "       b       99f                             \n\t"   \
        "2:     nop                                     \n\t"
 
-#define TEST_BRANCH_B(instruction, xtra_dist)                  \
+#define TEST_BRANCH_BX(instruction, codex)                     \
        "       b       50f                             \n\t"   \
        "       b       99f                             \n\t"   \
        "2:     nop                                     \n\t"   \
        "       b       99f                             \n\t"   \
-       ".if "#xtra_dist"                               \n\t"   \
-       ".space "#xtra_dist"                            \n\t"   \
-       ".endif                                         \n\t"   \
+       codex"                                          \n\t"   \
        TEST_INSTRUCTION(instruction)
 
 #define TESTCASE_END                                           \
@@ -301,47 +309,60 @@ struct test_arg_end {
        TESTCASE_START(code1 #reg1 code2)       \
        TEST_ARG_PTR(reg1, val1)                \
        TEST_ARG_END("")                        \
-       TEST_BRANCH_F(code1 #reg1 code2, 0)     \
+       TEST_BRANCH_F(code1 #reg1 code2)        \
        TESTCASE_END
 
-#define TEST_BF_X(code, xtra_dist)             \
+#define TEST_BF(code)                          \
        TESTCASE_START(code)                    \
        TEST_ARG_END("")                        \
-       TEST_BRANCH_F(code, xtra_dist)          \
+       TEST_BRANCH_F(code)                     \
        TESTCASE_END
 
-#define TEST_BB_X(code, xtra_dist)             \
+#define TEST_BB(code)                          \
        TESTCASE_START(code)                    \
        TEST_ARG_END("")                        \
-       TEST_BRANCH_B(code, xtra_dist)          \
+       TEST_BRANCH_B(code)                     \
        TESTCASE_END
 
-#define TEST_BF_RX(code1, reg, val, code2, xtra_dist)  \
-       TESTCASE_START(code1 #reg code2)                \
-       TEST_ARG_REG(reg, val)                          \
-       TEST_ARG_END("")                                \
-       TEST_BRANCH_F(code1 #reg code2, xtra_dist)      \
+#define TEST_BF_R(code1, reg, val, code2)      \
+       TESTCASE_START(code1 #reg code2)        \
+       TEST_ARG_REG(reg, val)                  \
+       TEST_ARG_END("")                        \
+       TEST_BRANCH_F(code1 #reg code2)         \
        TESTCASE_END
 
-#define TEST_BB_RX(code1, reg, val, code2, xtra_dist)  \
-       TESTCASE_START(code1 #reg code2)                \
-       TEST_ARG_REG(reg, val)                          \
-       TEST_ARG_END("")                                \
-       TEST_BRANCH_B(code1 #reg code2, xtra_dist)      \
+#define TEST_BB_R(code1, reg, val, code2)      \
+       TESTCASE_START(code1 #reg code2)        \
+       TEST_ARG_REG(reg, val)                  \
+       TEST_ARG_END("")                        \
+       TEST_BRANCH_B(code1 #reg code2)         \
        TESTCASE_END
 
-#define TEST_BF(code)  TEST_BF_X(code, 0)
-#define TEST_BB(code)  TEST_BB_X(code, 0)
-
-#define TEST_BF_R(code1, reg, val, code2) TEST_BF_RX(code1, reg, val, code2, 0)
-#define TEST_BB_R(code1, reg, val, code2) TEST_BB_RX(code1, reg, val, code2, 0)
-
 #define TEST_BF_RR(code1, reg1, val1, code2, reg2, val2, code3)        \
        TESTCASE_START(code1 #reg1 code2 #reg2 code3)           \
        TEST_ARG_REG(reg1, val1)                                \
        TEST_ARG_REG(reg2, val2)                                \
        TEST_ARG_END("")                                        \
-       TEST_BRANCH_F(code1 #reg1 code2 #reg2 code3, 0)         \
+       TEST_BRANCH_F(code1 #reg1 code2 #reg2 code3)            \
+       TESTCASE_END
+
+#define TEST_BF_X(code, codex)                 \
+       TESTCASE_START(code)                    \
+       TEST_ARG_END("")                        \
+       TEST_BRANCH_FX(code, codex)             \
+       TESTCASE_END
+
+#define TEST_BB_X(code, codex)                 \
+       TESTCASE_START(code)                    \
+       TEST_ARG_END("")                        \
+       TEST_BRANCH_BX(code, codex)             \
+       TESTCASE_END
+
+#define TEST_BF_RX(code1, reg, val, code2, codex)      \
+       TESTCASE_START(code1 #reg code2)                \
+       TEST_ARG_REG(reg, val)                          \
+       TEST_ARG_END("")                                \
+       TEST_BRANCH_FX(code1 #reg code2, codex)         \
        TESTCASE_END
 
 #define TEST_X(code, codex)                    \
@@ -372,6 +393,25 @@ struct test_arg_end {
        TESTCASE_END
 
 
+/*
+ * Macros for defining space directives spread over multiple lines.
+ * These are required so the compiler guesses better the length of inline asm
+ * code and will spill the literal pool early enough to avoid generating PC
+ * relative loads with out of range offsets.
+ */
+#define TWICE(x)       x x
+#define SPACE_0x8      TWICE(".space 4\n\t")
+#define SPACE_0x10     TWICE(SPACE_0x8)
+#define SPACE_0x20     TWICE(SPACE_0x10)
+#define SPACE_0x40     TWICE(SPACE_0x20)
+#define SPACE_0x80     TWICE(SPACE_0x40)
+#define SPACE_0x100    TWICE(SPACE_0x80)
+#define SPACE_0x200    TWICE(SPACE_0x100)
+#define SPACE_0x400    TWICE(SPACE_0x200)
+#define SPACE_0x800    TWICE(SPACE_0x400)
+#define SPACE_0x1000   TWICE(SPACE_0x800)
+
+
 /* Various values used in test cases... */
 #define N(val) (val ^ 0xffffffff)
 #define VAL1   0x12345678
index 24e2347be6b1043ad7cf87f70ab88fe6e9a270c9..8e9c98edc0682a8aa23790737a3b63b08dea847f 100644 (file)
@@ -343,19 +343,25 @@ validate_group(struct perf_event *event)
 {
        struct perf_event *sibling, *leader = event->group_leader;
        struct pmu_hw_events fake_pmu;
+       DECLARE_BITMAP(fake_used_mask, ARMPMU_MAX_HWEVENTS);
 
-       memset(&fake_pmu, 0, sizeof(fake_pmu));
+       /*
+        * Initialise the fake PMU. We only need to populate the
+        * used_mask for the purposes of validation.
+        */
+       memset(fake_used_mask, 0, sizeof(fake_used_mask));
+       fake_pmu.used_mask = fake_used_mask;
 
        if (!validate_event(&fake_pmu, leader))
-               return -ENOSPC;
+               return -EINVAL;
 
        list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
                if (!validate_event(&fake_pmu, sibling))
-                       return -ENOSPC;
+                       return -EINVAL;
        }
 
        if (!validate_event(&fake_pmu, event))
-               return -ENOSPC;
+               return -EINVAL;
 
        return 0;
 }
@@ -396,6 +402,9 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu)
        int i, err, irq, irqs;
        struct platform_device *pmu_device = armpmu->plat_device;
 
+       if (!pmu_device)
+               return -ENODEV;
+
        err = reserve_pmu(armpmu->type);
        if (err) {
                pr_warning("unable to reserve pmu\n");
index 2c3407ee857675242c874f49adaf9d26339f79bf..2334bf8a650a16a35d5849a4b6c309d5f19eb1f1 100644 (file)
@@ -33,3 +33,4 @@ release_pmu(enum arm_pmu_type type)
 {
        clear_bit_unlock(type, pmu_lock);
 }
+EXPORT_SYMBOL_GPL(release_pmu);
index 75316f0dd02ae3b0be19e10a982f6a5c7e606ff3..3d0c6fb74ae4efe521cfc563ea11e0fa9738d465 100644 (file)
@@ -192,6 +192,9 @@ void cpu_idle(void)
 #endif
 
                        local_irq_disable();
+#ifdef CONFIG_PL310_ERRATA_769419
+                       wmb();
+#endif
                        if (hlt_counter) {
                                local_irq_enable();
                                cpu_relax();
index 1040c00405d0f362916c77030249fff754eda5e8..8200deaa14f680b553bdea41652fac0aaa58e23a 100644 (file)
@@ -43,7 +43,7 @@
 
 struct cputopo_arm cpu_topology[NR_CPUS];
 
-const struct cpumask *cpu_coregroup_mask(unsigned int cpu)
+const struct cpumask *cpu_coregroup_mask(int cpu)
 {
        return &cpu_topology[cpu].core_sibling;
 }
index 10d868a5a48135840278022e153f3610b0b7a640..d6408d1ee543fe5e3ceabbcda01b25efb07676ba 100644 (file)
@@ -1,5 +1,9 @@
+#include <asm/unwind.h>
+
 #if __LINUX_ARM_ARCH__ >= 6
-       .macro  bitop, instr
+       .macro  bitop, name, instr
+ENTRY( \name           )
+UNWIND(        .fnstart        )
        ands    ip, r1, #3
        strneb  r1, [ip]                @ assert word-aligned
        mov     r2, #1
        cmp     r0, #0
        bne     1b
        bx      lr
+UNWIND(        .fnend          )
+ENDPROC(\name          )
        .endm
 
-       .macro  testop, instr, store
+       .macro  testop, name, instr, store
+ENTRY( \name           )
+UNWIND(        .fnstart        )
        ands    ip, r1, #3
        strneb  r1, [ip]                @ assert word-aligned
        mov     r2, #1
        cmp     r0, #0
        movne   r0, #1
 2:     bx      lr
+UNWIND(        .fnend          )
+ENDPROC(\name          )
        .endm
 #else
-       .macro  bitop, instr
+       .macro  bitop, name, instr
+ENTRY( \name           )
+UNWIND(        .fnstart        )
        ands    ip, r1, #3
        strneb  r1, [ip]                @ assert word-aligned
        and     r2, r0, #31
@@ -49,6 +61,8 @@
        str     r2, [r1, r0, lsl #2]
        restore_irqs ip
        mov     pc, lr
+UNWIND(        .fnend          )
+ENDPROC(\name          )
        .endm
 
 /**
@@ -59,7 +73,9 @@
  * Note: we can trivially conditionalise the store instruction
  * to avoid dirtying the data cache.
  */
-       .macro  testop, instr, store
+       .macro  testop, name, instr, store
+ENTRY( \name           )
+UNWIND(        .fnstart        )
        ands    ip, r1, #3
        strneb  r1, [ip]                @ assert word-aligned
        and     r3, r0, #31
@@ -73,5 +89,7 @@
        moveq   r0, #0
        restore_irqs ip
        mov     pc, lr
+UNWIND(        .fnend          )
+ENDPROC(\name          )
        .endm
 #endif
index 68ed5b62e83976d906bad4409fdabda385caaf96..f4027862172f8a4f1082ae06d7568615f3e5ad45 100644 (file)
@@ -12,6 +12,4 @@
 #include "bitops.h"
                 .text
 
-ENTRY(_change_bit)
-       bitop   eor
-ENDPROC(_change_bit)
+bitop  _change_bit, eor
index 4c04c3b51eeb0d11bc3b755044b0a63ca6f0db1b..f6b75fb64d30557c5b22655adec3ba2bd34b0c07 100644 (file)
@@ -12,6 +12,4 @@
 #include "bitops.h"
                 .text
 
-ENTRY(_clear_bit)
-       bitop   bic
-ENDPROC(_clear_bit)
+bitop  _clear_bit, bic
index bbee5c66a23e177494875e5db4fc19da97ed0d73..618fedae4b370aac8b65c2a39ae51046b0ff711a 100644 (file)
@@ -12,6 +12,4 @@
 #include "bitops.h"
                .text
 
-ENTRY(_set_bit)
-       bitop   orr
-ENDPROC(_set_bit)
+bitop  _set_bit, orr
index 15a4d431f229440979aaf179fd423eb647b8da0f..4becdc3a59cbb60717bae345ec7b3f58243abdb6 100644 (file)
@@ -12,6 +12,4 @@
 #include "bitops.h"
                 .text
 
-ENTRY(_test_and_change_bit)
-       testop  eor, str
-ENDPROC(_test_and_change_bit)
+testop _test_and_change_bit, eor, str
index 521b66b5b95da197fa661f142547414399a0af59..918841dcce7ad57ef5e880f1c09547404070fbe5 100644 (file)
@@ -12,6 +12,4 @@
 #include "bitops.h"
                 .text
 
-ENTRY(_test_and_clear_bit)
-       testop  bicne, strne
-ENDPROC(_test_and_clear_bit)
+testop _test_and_clear_bit, bicne, strne
index 1c98cc2185bb0885ae0cac805193f52d34345608..8d1b2fe9e4873ba8d53ba75773a7e30e92292b7c 100644 (file)
@@ -12,6 +12,4 @@
 #include "bitops.h"
                 .text
 
-ENTRY(_test_and_set_bit)
-       testop  orreq, streq
-ENDPROC(_test_and_set_bit)
+testop _test_and_set_bit, orreq, streq
index 66591fa53e057d59011a533a2037cfc976116790..ad930688358ca1c5683e984dc1b85799b582c5c0 100644 (file)
@@ -83,7 +83,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) {}
  *  USB Device (Gadget)
  * -------------------------------------------------------------------- */
 
-#ifdef CONFIG_USB_GADGET_AT91
+#ifdef CONFIG_USB_AT91
 static struct at91_udc_data udc_data;
 
 static struct resource udc_resources[] = {
index b84a9f642f5953a5ff527d3b593b1a628b61232b..0d20677fbef027591c91c2d442d528f7fa6c73f0 100644 (file)
@@ -195,9 +195,9 @@ static struct clk_lookup periph_clocks_lookups[] = {
        CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tc0_clk),
        CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.0", &tc1_clk),
        CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.0", &tc2_clk),
-       CLKDEV_CON_DEV_ID("t3_clk", "atmel_tcb.1", &tc3_clk),
-       CLKDEV_CON_DEV_ID("t4_clk", "atmel_tcb.1", &tc4_clk),
-       CLKDEV_CON_DEV_ID("t5_clk", "atmel_tcb.1", &tc5_clk),
+       CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.1", &tc3_clk),
+       CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.1", &tc4_clk),
+       CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.1", &tc5_clk),
        CLKDEV_CON_DEV_ID("pclk", "ssc.0", &ssc_clk),
        /* more usart lookup table for DT entries */
        CLKDEV_CON_DEV_ID("usart", "fffff200.serial", &mck),
index 25e3464fb07f1fabe1714d009efd9ef8bb4783e2..629fa977497239f171d66ef47563c3da9d2b0127 100644 (file)
@@ -84,7 +84,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) {}
  *  USB Device (Gadget)
  * -------------------------------------------------------------------- */
 
-#ifdef CONFIG_USB_GADGET_AT91
+#ifdef CONFIG_USB_AT91
 static struct at91_udc_data udc_data;
 
 static struct resource udc_resources[] = {
index ae78f4d03b738851b5e0ef191c26997c34304d9a..a178b58b0b9c8d59850ca91f0ecab9203198bbcc 100644 (file)
@@ -87,7 +87,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) {}
  *  USB Device (Gadget)
  * -------------------------------------------------------------------- */
 
-#ifdef CONFIG_USB_GADGET_AT91
+#ifdef CONFIG_USB_AT91
 static struct at91_udc_data udc_data;
 
 static struct resource udc_resources[] = {
index ad017eb1f8df4c2ff9514cae10e0d555269ac09c..d5fbac9ff4faed0da1c112869b5c5144c4cfe2f6 100644 (file)
@@ -92,7 +92,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) {}
  *  USB Device (Gadget)
  * -------------------------------------------------------------------- */
 
-#ifdef CONFIG_USB_GADGET_AT91
+#ifdef CONFIG_USB_AT91
 static struct at91_udc_data udc_data;
 
 static struct resource udc_resources[] = {
index 8f4866045b41e213172c761f522ffca720a5802e..ec164a4124c9b27aa0c3b72325dc91ec13ddc673 100644 (file)
@@ -19,7 +19,7 @@
 #define BOARD_HAVE_NAND_16BIT  (1 << 31)
 static inline int board_have_nand_16bit(void)
 {
-       return system_rev & BOARD_HAVE_NAND_16BIT;
+       return (system_rev & BOARD_HAVE_NAND_16BIT) ? 1 : 0;
 }
 
 #endif /* __ARCH_SYSTEM_REV_H__ */
index 1d7d2499522674f94143771e84aa26f323869ac7..6659a90dbcadafffdc6ae4988f1a1c6148a955b5 100644 (file)
@@ -753,7 +753,7 @@ static struct snd_platform_data da850_evm_snd_data = {
        .num_serializer = ARRAY_SIZE(da850_iis_serializer_direction),
        .tdm_slots      = 2,
        .serial_dir     = da850_iis_serializer_direction,
-       .asp_chan_q     = EVENTQ_1,
+       .asp_chan_q     = EVENTQ_0,
        .version        = MCASP_VERSION_2,
        .txnumevt       = 1,
        .rxnumevt       = 1,
index 1918ae711428b3d5b9cf2556c212bfc156f90664..46e1f4173b9735c622c8a95c5c9a605782c1eda6 100644 (file)
@@ -107,7 +107,7 @@ static struct mtd_partition davinci_nand_partitions[] = {
                /* UBL (a few copies) plus U-Boot */
                .name           = "bootloader",
                .offset         = 0,
-               .size           = 28 * NAND_BLOCK_SIZE,
+               .size           = 30 * NAND_BLOCK_SIZE,
                .mask_flags     = MTD_WRITEABLE, /* force read-only */
        }, {
                /* U-Boot environment */
index e574d7f837a850e4ddd5efb02f102c47d4e10cd9..635bf7740157bb7ea88b94580c98ead839f44068 100644 (file)
@@ -564,7 +564,7 @@ static int setup_vpif_input_channel_mode(int mux_mode)
        int val;
        u32 value;
 
-       if (!vpif_vsclkdis_reg || !cpld_client)
+       if (!vpif_vidclkctl_reg || !cpld_client)
                return -ENXIO;
 
        val = i2c_smbus_read_byte(cpld_client);
@@ -572,7 +572,7 @@ static int setup_vpif_input_channel_mode(int mux_mode)
                return val;
 
        spin_lock_irqsave(&vpif_reg_lock, flags);
-       value = __raw_readl(vpif_vsclkdis_reg);
+       value = __raw_readl(vpif_vidclkctl_reg);
        if (mux_mode) {
                val &= VPIF_INPUT_TWO_CHANNEL;
                value |= VIDCH1CLK;
@@ -580,7 +580,7 @@ static int setup_vpif_input_channel_mode(int mux_mode)
                val |= VPIF_INPUT_ONE_CHANNEL;
                value &= ~VIDCH1CLK;
        }
-       __raw_writel(value, vpif_vsclkdis_reg);
+       __raw_writel(value, vpif_vidclkctl_reg);
        spin_unlock_irqrestore(&vpif_reg_lock, flags);
 
        err = i2c_smbus_write_byte(cpld_client, val);
index 0b68ed534f8e6d3d0a16effc5cc4811a8f94f85d..af27c130595fb6897cb104253ad157f567d53f04 100644 (file)
@@ -161,7 +161,6 @@ static struct clk dsp_clk = {
        .name = "dsp",
        .parent = &pll1_sysclk1,
        .lpsc = DM646X_LPSC_C64X_CPU,
-       .flags = PSC_DSP,
        .usecount = 1,                  /* REVISIT how to disable? */
 };
 
index fa59c097223dc85ec965224e9ebfc0ac8dccf2c2..8bc3fc2561711801610461c46b803a7fbfbfde9a 100644 (file)
 #define PTCMD          0x120
 #define PTSTAT         0x128
 #define PDSTAT         0x200
-#define PDCTL1         0x304
+#define PDCTL          0x300
 #define MDSTAT         0x800
 #define MDCTL          0xA00
 
 #define PSC_STATE_ENABLE       3
 
 #define MDSTAT_STATE_MASK      0x3f
+#define PDSTAT_STATE_MASK      0x1f
 #define MDCTL_FORCE            BIT(31)
+#define PDCTL_NEXT             BIT(1)
+#define PDCTL_EPCGOOD          BIT(8)
 
 #ifndef __ASSEMBLER__
 
index 1fb6bdff38c1f5e9ff796e72c4c57be824919209..d7e210f4b55c85d7e2a2fc4831fb3f0a77f72990 100644 (file)
@@ -52,7 +52,7 @@ int __init davinci_psc_is_clk_active(unsigned int ctlr, unsigned int id)
 void davinci_psc_config(unsigned int domain, unsigned int ctlr,
                unsigned int id, bool enable, u32 flags)
 {
-       u32 epcpr, ptcmd, ptstat, pdstat, pdctl1, mdstat, mdctl;
+       u32 epcpr, ptcmd, ptstat, pdstat, pdctl, mdstat, mdctl;
        void __iomem *psc_base;
        struct davinci_soc_info *soc_info = &davinci_soc_info;
        u32 next_state = PSC_STATE_ENABLE;
@@ -79,11 +79,11 @@ void davinci_psc_config(unsigned int domain, unsigned int ctlr,
                mdctl |= MDCTL_FORCE;
        __raw_writel(mdctl, psc_base + MDCTL + 4 * id);
 
-       pdstat = __raw_readl(psc_base + PDSTAT);
-       if ((pdstat & 0x00000001) == 0) {
-               pdctl1 = __raw_readl(psc_base + PDCTL1);
-               pdctl1 |= 0x1;
-               __raw_writel(pdctl1, psc_base + PDCTL1);
+       pdstat = __raw_readl(psc_base + PDSTAT + 4 * domain);
+       if ((pdstat & PDSTAT_STATE_MASK) == 0) {
+               pdctl = __raw_readl(psc_base + PDCTL + 4 * domain);
+               pdctl |= PDCTL_NEXT;
+               __raw_writel(pdctl, psc_base + PDCTL + 4 * domain);
 
                ptcmd = 1 << domain;
                __raw_writel(ptcmd, psc_base + PTCMD);
@@ -92,9 +92,9 @@ void davinci_psc_config(unsigned int domain, unsigned int ctlr,
                        epcpr = __raw_readl(psc_base + EPCPR);
                } while ((((epcpr >> domain) & 1) == 0));
 
-               pdctl1 = __raw_readl(psc_base + PDCTL1);
-               pdctl1 |= 0x100;
-               __raw_writel(pdctl1, psc_base + PDCTL1);
+               pdctl = __raw_readl(psc_base + PDCTL + 4 * domain);
+               pdctl |= PDCTL_EPCGOOD;
+               __raw_writel(pdctl, psc_base + PDCTL + 4 * domain);
        } else {
                ptcmd = 1 << domain;
                __raw_writel(ptcmd, psc_base + PTCMD);
index 35f6502144ae14c9ffa989066f51add9ee084375..4ebb382c597918e1053a221b35bd52a9e9d1b39a 100644 (file)
@@ -12,6 +12,8 @@
 #include <linux/init.h>
 #include <linux/cpuidle.h>
 #include <linux/io.h>
+#include <linux/export.h>
+#include <linux/time.h>
 
 #include <asm/proc-fns.h>
 
index b82dcf08e747e1052ea13b4a11feef25b7f7775b..88660d500f5be259bde8bd94389805881cf8975e 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/of_irq.h>
 #include <linux/of_platform.h>
 #include <linux/of_address.h>
+#include <linux/smp.h>
 
 #include <asm/cacheflush.h>
 #include <asm/unified.h>
@@ -72,6 +73,9 @@ static void __init highbank_map_io(void)
 
 void highbank_set_cpu_jump(int cpu, void *jump_addr)
 {
+#ifdef CONFIG_SMP
+       cpu = cpu_logical_map(cpu);
+#endif
        writel(BSYM(virt_to_phys(jump_addr)), HB_JUMP_TABLE_VIRT(cpu));
        __cpuc_flush_dcache_area(HB_JUMP_TABLE_VIRT(cpu), 16);
        outer_clean_range(HB_JUMP_TABLE_PHYS(cpu),
index 5f7f9c2a34aec39cdd4f312c326cd0005d5ea027..c44aa974e79c473d123631236269bdb8c2daa8a3 100644 (file)
@@ -10,11 +10,6 @@ config HAVE_IMX_MMDC
 config HAVE_IMX_SRC
        bool
 
-#
-# ARCH_MX31 and ARCH_MX35 are left for compatibility
-# Some usages assume that having one of them implies not having (e.g.) ARCH_MX2.
-# To easily distinguish good and reviewed from unreviewed usages new (and IMHO
-# more sensible) names are used: SOC_IMX31 and SOC_IMX35
 config ARCH_MX1
        bool
 
@@ -27,12 +22,6 @@ config ARCH_MX25
 config MACH_MX27
        bool
 
-config ARCH_MX31
-       bool
-
-config ARCH_MX35
-       bool
-
 config SOC_IMX1
        bool
        select ARCH_MX1
@@ -72,7 +61,6 @@ config SOC_IMX31
        select CPU_V6
        select IMX_HAVE_PLATFORM_MXC_RNGA
        select ARCH_MXC_AUDMUX_V2
-       select ARCH_MX31
        select MXC_AVIC
        select SMP_ON_UP if SMP
 
@@ -82,7 +70,6 @@ config SOC_IMX35
        select ARCH_MXC_IOMUX_V3
        select ARCH_MXC_AUDMUX_V2
        select HAVE_EPIT
-       select ARCH_MX35
        select MXC_AVIC
        select SMP_ON_UP if SMP
 
index 613a1b993bff9f7fbf9f71595b5ffa8c74922d4d..039a7abb165a35f4afc10ae08dbb7cc62f1121f7 100644 (file)
@@ -1953,14 +1953,17 @@ static struct map_desc imx6q_clock_desc[] = {
        imx_map_entry(MX6Q, ANATOP, MT_DEVICE),
 };
 
+void __init imx6q_clock_map_io(void)
+{
+       iotable_init(imx6q_clock_desc, ARRAY_SIZE(imx6q_clock_desc));
+}
+
 int __init mx6q_clocks_init(void)
 {
        struct device_node *np;
        void __iomem *base;
        int i, irq;
 
-       iotable_init(imx6q_clock_desc, ARRAY_SIZE(imx6q_clock_desc));
-
        /* retrieve the freqency of fixed clocks from device tree */
        for_each_compatible_node(np, NULL, "fixed-clock") {
                u32 rate;
index 8bf5fa349484e2da42a4c6deea64bc042b1263e3..8deb012189b5a7e185f7299a8ad8c8503edad587 100644 (file)
@@ -34,16 +34,18 @@ static void __init imx6q_map_io(void)
 {
        imx_lluart_map_io();
        imx_scu_map_io();
+       imx6q_clock_map_io();
 }
 
-static void __init imx6q_gpio_add_irq_domain(struct device_node *np,
+static int __init imx6q_gpio_add_irq_domain(struct device_node *np,
                                struct device_node *interrupt_parent)
 {
-       static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS -
-                                  32 * 7; /* imx6q gets 7 gpio ports */
+       static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS;
 
+       gpio_irq_base -= 32;
        irq_domain_add_simple(np, gpio_irq_base);
-       gpio_irq_base += 32;
+
+       return 0;
 }
 
 static const struct of_device_id imx6q_irq_match[] __initconst = {
index 9f0e82ec3398dfcf7e83c8df246e54cdf53b9503..31807d2a8b7bf1a65d57c8f4cb748bb9e6697934 100644 (file)
 static void imx3_idle(void)
 {
        unsigned long reg = 0;
-       __asm__ __volatile__(
-               /* disable I and D cache */
-               "mrc p15, 0, %0, c1, c0, 0\n"
-               "bic %0, %0, #0x00001000\n"
-               "bic %0, %0, #0x00000004\n"
-               "mcr p15, 0, %0, c1, c0, 0\n"
-               /* invalidate I cache */
-               "mov %0, #0\n"
-               "mcr p15, 0, %0, c7, c5, 0\n"
-               /* clear and invalidate D cache */
-               "mov %0, #0\n"
-               "mcr p15, 0, %0, c7, c14, 0\n"
-               /* WFI */
-               "mov %0, #0\n"
-               "mcr p15, 0, %0, c7, c0, 4\n"
-               "nop\n" "nop\n" "nop\n" "nop\n"
-               "nop\n" "nop\n" "nop\n"
-               /* enable I and D cache */
-               "mrc p15, 0, %0, c1, c0, 0\n"
-               "orr %0, %0, #0x00001000\n"
-               "orr %0, %0, #0x00000004\n"
-               "mcr p15, 0, %0, c1, c0, 0\n"
-               : "=r" (reg));
+
+       if (!need_resched())
+               __asm__ __volatile__(
+                       /* disable I and D cache */
+                       "mrc p15, 0, %0, c1, c0, 0\n"
+                       "bic %0, %0, #0x00001000\n"
+                       "bic %0, %0, #0x00000004\n"
+                       "mcr p15, 0, %0, c1, c0, 0\n"
+                       /* invalidate I cache */
+                       "mov %0, #0\n"
+                       "mcr p15, 0, %0, c7, c5, 0\n"
+                       /* clear and invalidate D cache */
+                       "mov %0, #0\n"
+                       "mcr p15, 0, %0, c7, c14, 0\n"
+                       /* WFI */
+                       "mov %0, #0\n"
+                       "mcr p15, 0, %0, c7, c0, 4\n"
+                       "nop\n" "nop\n" "nop\n" "nop\n"
+                       "nop\n" "nop\n" "nop\n"
+                       /* enable I and D cache */
+                       "mrc p15, 0, %0, c1, c0, 0\n"
+                       "orr %0, %0, #0x00001000\n"
+                       "orr %0, %0, #0x00000004\n"
+                       "mcr p15, 0, %0, c1, c0, 0\n"
+                       : "=r" (reg));
+       local_irq_enable();
 }
 
 static void __iomem *imx3_ioremap(unsigned long phys_addr, size_t size,
@@ -108,6 +111,7 @@ void imx3_init_l2x0(void)
        l2x0_init(l2x0_base, 0x00030024, 0x00000000);
 }
 
+#ifdef CONFIG_SOC_IMX31
 static struct map_desc mx31_io_desc[] __initdata = {
        imx_map_entry(MX31, X_MEMC, MT_DEVICE),
        imx_map_entry(MX31, AVIC, MT_DEVICE_NONSHARED),
@@ -126,33 +130,11 @@ void __init mx31_map_io(void)
        iotable_init(mx31_io_desc, ARRAY_SIZE(mx31_io_desc));
 }
 
-static struct map_desc mx35_io_desc[] __initdata = {
-       imx_map_entry(MX35, X_MEMC, MT_DEVICE),
-       imx_map_entry(MX35, AVIC, MT_DEVICE_NONSHARED),
-       imx_map_entry(MX35, AIPS1, MT_DEVICE_NONSHARED),
-       imx_map_entry(MX35, AIPS2, MT_DEVICE_NONSHARED),
-       imx_map_entry(MX35, SPBA0, MT_DEVICE_NONSHARED),
-};
-
-void __init mx35_map_io(void)
-{
-       iotable_init(mx35_io_desc, ARRAY_SIZE(mx35_io_desc));
-}
-
 void __init imx31_init_early(void)
 {
        mxc_set_cpu_type(MXC_CPU_MX31);
        mxc_arch_reset_init(MX31_IO_ADDRESS(MX31_WDOG_BASE_ADDR));
-       imx_idle = imx3_idle;
-       imx_ioremap = imx3_ioremap;
-}
-
-void __init imx35_init_early(void)
-{
-       mxc_set_cpu_type(MXC_CPU_MX35);
-       mxc_iomux_v3_init(MX35_IO_ADDRESS(MX35_IOMUXC_BASE_ADDR));
-       mxc_arch_reset_init(MX35_IO_ADDRESS(MX35_WDOG_BASE_ADDR));
-       imx_idle = imx3_idle;
+       pm_idle = imx3_idle;
        imx_ioremap = imx3_ioremap;
 }
 
@@ -161,11 +143,6 @@ void __init mx31_init_irq(void)
        mxc_init_irq(MX31_IO_ADDRESS(MX31_AVIC_BASE_ADDR));
 }
 
-void __init mx35_init_irq(void)
-{
-       mxc_init_irq(MX35_IO_ADDRESS(MX35_AVIC_BASE_ADDR));
-}
-
 static struct sdma_script_start_addrs imx31_to1_sdma_script __initdata = {
        .per_2_per_addr = 1677,
 };
@@ -199,6 +176,35 @@ void __init imx31_soc_init(void)
 
        imx_add_imx_sdma("imx31-sdma", MX31_SDMA_BASE_ADDR, MX31_INT_SDMA, &imx31_sdma_pdata);
 }
+#endif /* ifdef CONFIG_SOC_IMX31 */
+
+#ifdef CONFIG_SOC_IMX35
+static struct map_desc mx35_io_desc[] __initdata = {
+       imx_map_entry(MX35, X_MEMC, MT_DEVICE),
+       imx_map_entry(MX35, AVIC, MT_DEVICE_NONSHARED),
+       imx_map_entry(MX35, AIPS1, MT_DEVICE_NONSHARED),
+       imx_map_entry(MX35, AIPS2, MT_DEVICE_NONSHARED),
+       imx_map_entry(MX35, SPBA0, MT_DEVICE_NONSHARED),
+};
+
+void __init mx35_map_io(void)
+{
+       iotable_init(mx35_io_desc, ARRAY_SIZE(mx35_io_desc));
+}
+
+void __init imx35_init_early(void)
+{
+       mxc_set_cpu_type(MXC_CPU_MX35);
+       mxc_iomux_v3_init(MX35_IO_ADDRESS(MX35_IOMUXC_BASE_ADDR));
+       mxc_arch_reset_init(MX35_IO_ADDRESS(MX35_WDOG_BASE_ADDR));
+       pm_idle = imx3_idle;
+       imx_ioremap = imx3_ioremap;
+}
+
+void __init mx35_init_irq(void)
+{
+       mxc_init_irq(MX35_IO_ADDRESS(MX35_AVIC_BASE_ADDR));
+}
 
 static struct sdma_script_start_addrs imx35_to1_sdma_script __initdata = {
        .ap_2_ap_addr = 642,
@@ -254,3 +260,4 @@ void __init imx35_soc_init(void)
 
        imx_add_imx_sdma("imx35-sdma", MX35_SDMA_BASE_ADDR, MX35_INT_SDMA, &imx35_sdma_pdata);
 }
+#endif /* ifdef CONFIG_SOC_IMX35 */
index 36cacbd0dcc2fa8c5d98bd5cb1e141a9d30e3503..a8e33681b73251f498a7cfee463f20c3f6d0e1ff 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/io.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
+#include <linux/smp.h>
 #include <asm/unified.h>
 
 #define SRC_SCR                                0x000
 
 static void __iomem *src_base;
 
+#ifndef CONFIG_SMP
+#define cpu_logical_map(cpu)           0
+#endif
+
 void imx_enable_cpu(int cpu, bool enable)
 {
        u32 mask, val;
 
+       cpu = cpu_logical_map(cpu);
        mask = 1 << (BP_SRC_SCR_CORE1_ENABLE + cpu - 1);
        val = readl_relaxed(src_base + SRC_SCR);
        val = enable ? val | mask : val & ~mask;
@@ -35,6 +41,7 @@ void imx_enable_cpu(int cpu, bool enable)
 
 void imx_set_cpu_jump(int cpu, void *jump_addr)
 {
+       cpu = cpu_logical_map(cpu);
        writel_relaxed(BSYM(virt_to_phys(jump_addr)),
                       src_base + SRC_GPR1 + cpu * 8);
 }
index 69156568bc41f95891df3c17ca06836a9c3bf5bb..4665767a4f79ee918ec1fd24053d296ce13d0806 100644 (file)
@@ -182,7 +182,7 @@ static void __init gplugd_init(void)
 
        /* on-chip devices */
        pxa168_add_uart(3);
-       pxa168_add_ssp(0);
+       pxa168_add_ssp(1);
        pxa168_add_twsi(0, NULL, ARRAY_AND_SIZE(gplugd_i2c_board_info));
 
        pxa168_add_eth(&gplugd_eth_platform_data);
index d14eeaf163226d3b2ff53c394976be090b1e4d62..99b4ce1b6562cebf64651b9a0039599d624593d6 100644 (file)
@@ -7,7 +7,7 @@
 #define GPIO_REGS_VIRT (APB_VIRT_BASE + 0x19000)
 
 #define BANK_OFF(n)    (((n) < 3) ? (n) << 2 : 0x100 + (((n) - 3) << 2))
-#define GPIO_REG(x)    (GPIO_REGS_VIRT + (x))
+#define GPIO_REG(x)    (*(volatile u32 *)(GPIO_REGS_VIRT + (x)))
 
 #define NR_BUILTIN_GPIO                IRQ_GPIO_NUM
 
index 24030d0da6e3c59ec9ddfa2a76995001f747bc62..0fb7a17df3987e259297d21de210971d2f65fb80 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/kernel.h>
 #include <linux/platform_device.h>
 #include <linux/bootmem.h>
+#include <linux/module.h>
 #include <mach/irqs.h>
 #include <mach/iommu.h>
 
index 5c5328257dca2f5560ccaa33bf98a27b80582b27..5e2e7a8438606f43015d90115046e555eee1a3c5 100644 (file)
@@ -16,7 +16,7 @@
 #include <linux/init.h>
 #include <linux/module.h>
 #include <mach/hardware.h>
-#include <asm/io.h>
+#include <linux/io.h>
 
 static int mx5_cpu_rev = -1;
 
@@ -67,7 +67,8 @@ static int __init mx51_neon_fixup(void)
        if (!cpu_is_mx51())
                return 0;
 
-       if (mx51_revision() < IMX_CHIP_REVISION_3_0 && (elf_hwcap & HWCAP_NEON)) {
+       if (mx51_revision() < IMX_CHIP_REVISION_3_0 &&
+                       (elf_hwcap & HWCAP_NEON)) {
                elf_hwcap &= ~HWCAP_NEON;
                pr_info("Turning off NEON support, detected broken NEON implementation\n");
        }
index ccc61585659bdb17d55bb91bfc1cbb2ebf6cca1c..596edd967dbfef9a21b009b327f42cdda3ad60b0 100644 (file)
@@ -44,20 +44,22 @@ static const struct of_dev_auxdata imx51_auxdata_lookup[] __initconst = {
        { /* sentinel */ }
 };
 
-static void __init imx51_tzic_add_irq_domain(struct device_node *np,
+static int __init imx51_tzic_add_irq_domain(struct device_node *np,
                                struct device_node *interrupt_parent)
 {
        irq_domain_add_simple(np, 0);
+       return 0;
 }
 
-static void __init imx51_gpio_add_irq_domain(struct device_node *np,
+static int __init imx51_gpio_add_irq_domain(struct device_node *np,
                                struct device_node *interrupt_parent)
 {
-       static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS -
-                                  32 * 4; /* imx51 gets 4 gpio ports */
+       static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS;
 
+       gpio_irq_base -= 32;
        irq_domain_add_simple(np, gpio_irq_base);
-       gpio_irq_base += 32;
+
+       return 0;
 }
 
 static const struct of_device_id imx51_irq_match[] __initconst = {
index ccaa0b81b7683f86b7750ca852f6b8b67d80eabd..85bfd5ff21b0bb925583679260321fbc7e25731b 100644 (file)
@@ -48,20 +48,22 @@ static const struct of_dev_auxdata imx53_auxdata_lookup[] __initconst = {
        { /* sentinel */ }
 };
 
-static void __init imx53_tzic_add_irq_domain(struct device_node *np,
+static int __init imx53_tzic_add_irq_domain(struct device_node *np,
                                struct device_node *interrupt_parent)
 {
        irq_domain_add_simple(np, 0);
+       return 0;
 }
 
-static void __init imx53_gpio_add_irq_domain(struct device_node *np,
+static int __init imx53_gpio_add_irq_domain(struct device_node *np,
                                struct device_node *interrupt_parent)
 {
-       static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS -
-                                  32 * 7; /* imx53 gets 7 gpio ports */
+       static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS;
 
+       gpio_irq_base -= 32;
        irq_domain_add_simple(np, gpio_irq_base);
-       gpio_irq_base += 32;
+
+       return 0;
 }
 
 static const struct of_device_id imx53_irq_match[] __initconst = {
index 26eacc9d0d90fbf88930bd6a009c58efe6914c70..df4a508f240a04a47ce8bb5abbf6074f843c8aae 100644 (file)
@@ -23,7 +23,9 @@
 
 static void imx5_idle(void)
 {
-       mx5_cpu_lp_set(WAIT_UNCLOCKED_POWER_OFF);
+       if (!need_resched())
+               mx5_cpu_lp_set(WAIT_UNCLOCKED_POWER_OFF);
+       local_irq_enable();
 }
 
 /*
@@ -89,7 +91,7 @@ void __init imx51_init_early(void)
        mxc_set_cpu_type(MXC_CPU_MX51);
        mxc_iomux_v3_init(MX51_IO_ADDRESS(MX51_IOMUXC_BASE_ADDR));
        mxc_arch_reset_init(MX51_IO_ADDRESS(MX51_WDOG1_BASE_ADDR));
-       imx_idle = imx5_idle;
+       pm_idle = imx5_idle;
 }
 
 void __init imx53_init_early(void)
index 229ae3494216da2920bddb5164d66ce523a6ddeb..da6e4aad177c2097b12515b2ddd2572e8b397ea2 100644 (file)
@@ -404,7 +404,7 @@ static int name##_set_rate(struct clk *clk, unsigned long rate)             \
        reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_##dr);         \
        reg &= ~BM_CLKCTRL_##dr##_DIV;                                  \
        reg |= div << BP_CLKCTRL_##dr##_DIV;                            \
-       if (reg | (1 << clk->enable_shift)) {                           \
+       if (reg & (1 << clk->enable_shift)) {                           \
                pr_err("%s: clock is gated\n", __func__);               \
                return -EINVAL;                                         \
        }                                                               \
index 75d86118b76a2c524cea92c7320d25bb7d4225a5..30c7990f3c01d65c1f05ad9d425a090381661c9b 100644 (file)
 #define MX28_INT_CAN1                  9
 #define MX28_INT_LRADC_TOUCH           10
 #define MX28_INT_HSADC                 13
-#define MX28_INT_IRADC_THRESH0         14
-#define MX28_INT_IRADC_THRESH1         15
+#define MX28_INT_LRADC_THRESH0         14
+#define MX28_INT_LRADC_THRESH1         15
 #define MX28_INT_LRADC_CH0             16
 #define MX28_INT_LRADC_CH1             17
 #define MX28_INT_LRADC_CH2             18
index 0d2d2b470998a9d2cfac229654a9ee2135272a2f..bde5f6634747c639af514ecf9d699b1176891802 100644 (file)
@@ -30,6 +30,7 @@
  */
 #define cpu_is_mx23()          (                                       \
                machine_is_mx23evk() ||                                 \
+               machine_is_stmp378x() ||                                \
                0)
 #define cpu_is_mx28()          (                                       \
                machine_is_mx28evk() ||                                 \
index 3b1681e4f49a1ae633e5ac4b3d31137f2f92b014..6b00577b70256254e29951bdf8828a2ecd613fdb 100644 (file)
@@ -361,6 +361,6 @@ static struct sys_timer m28evk_timer = {
 MACHINE_START(M28EVK, "DENX M28 EVK")
        .map_io         = mx28_map_io,
        .init_irq       = mx28_init_irq,
-       .init_machine   = m28evk_init,
        .timer          = &m28evk_timer,
+       .init_machine   = m28evk_init,
 MACHINE_END
index 177e53123a02e5b67f617e5031c047354703103c..6834dea38c04cce77e20bffc2baa3019327d9610 100644 (file)
@@ -115,6 +115,6 @@ static struct sys_timer stmp378x_dvb_timer = {
 MACHINE_START(STMP378X, "STMP378X")
        .map_io         = mx23_map_io,
        .init_irq       = mx23_init_irq,
-       .init_machine   = stmp378x_dvb_init,
        .timer          = &stmp378x_dvb_timer,
+       .init_machine   = stmp378x_dvb_init,
 MACHINE_END
index 0fcff47009cf13ff53db9757e58c9dc0cbf5b927..9a7b08b2a92559caf1028df076040c5e5dc798ae 100644 (file)
@@ -66,11 +66,11 @@ static const iomux_cfg_t tx28_fec1_pads[] __initconst = {
        MX28_PAD_ENET0_CRS__ENET1_RX_EN,
 };
 
-static struct fec_platform_data tx28_fec0_data = {
+static const struct fec_platform_data tx28_fec0_data __initconst = {
        .phy = PHY_INTERFACE_MODE_RMII,
 };
 
-static struct fec_platform_data tx28_fec1_data = {
+static const struct fec_platform_data tx28_fec1_data __initconst = {
        .phy = PHY_INTERFACE_MODE_RMII,
 };
 
index e0a028161ddee89a4119014ed83420bd0a21ad4e..73f287d6429b629d57f7c093645962ce48cee318 100644 (file)
@@ -171,14 +171,6 @@ config MACH_OMAP_GENERIC
 comment "OMAP CPU Speed"
        depends on ARCH_OMAP1
 
-config OMAP_CLOCKS_SET_BY_BOOTLOADER
-       bool "OMAP clocks set by bootloader"
-       depends on ARCH_OMAP1
-       help
-         Enable this option to prevent the kernel from overriding the clock
-         frequencies programmed by bootloader for MPU, DSP, MMUs, TC,
-         internal LCD controller and MPU peripherals.
-
 config OMAP_ARM_216MHZ
        bool "OMAP ARM 216 MHz CPU (1710 only)"
         depends on ARCH_OMAP1 && ARCH_OMAP16XX
index 51bae31cf361289e5f2711ad46a41afd4b01b09c..b0f15d234a12b4ad9dd1beb34da7df187e3f86b4 100644 (file)
@@ -302,8 +302,6 @@ static void __init ams_delta_init(void)
        omap_cfg_reg(J19_1610_CAM_D6);
        omap_cfg_reg(J18_1610_CAM_D7);
 
-       iotable_init(ams_delta_io_desc, ARRAY_SIZE(ams_delta_io_desc));
-
        omap_board_config = ams_delta_config;
        omap_board_config_size = ARRAY_SIZE(ams_delta_config);
        omap_serial_init();
@@ -373,10 +371,16 @@ static int __init ams_delta_modem_init(void)
 }
 arch_initcall(ams_delta_modem_init);
 
+static void __init ams_delta_map_io(void)
+{
+       omap15xx_map_io();
+       iotable_init(ams_delta_io_desc, ARRAY_SIZE(ams_delta_io_desc));
+}
+
 MACHINE_START(AMS_DELTA, "Amstrad E3 (Delta)")
        /* Maintainer: Jonathan McDowell <noodles@earth.li> */
        .atag_offset    = 0x100,
-       .map_io         = omap15xx_map_io,
+       .map_io         = ams_delta_map_io,
        .init_early     = omap1_init_early,
        .reserve        = omap_reserve,
        .init_irq       = omap1_init_irq,
index eaf09efb91caec613eeb73e961e7fbd77a93cf21..16b1423b454a32dea7197460a393b12a4bd4e2b0 100644 (file)
@@ -17,7 +17,8 @@
 
 #include <plat/clock.h>
 
-extern int __init omap1_clk_init(void);
+int omap1_clk_init(void);
+void omap1_clk_late_init(void);
 extern int omap1_clk_enable(struct clk *clk);
 extern void omap1_clk_disable(struct clk *clk);
 extern long omap1_clk_round_rate(struct clk *clk, unsigned long rate);
index 92400b9eb69f10c419c7e1295e238d56d5ef6110..9ff90a744a2140a0bf5168403301ac9cec68b927 100644 (file)
@@ -16,6 +16,8 @@
 
 #include <linux/kernel.h>
 #include <linux/clk.h>
+#include <linux/cpufreq.h>
+#include <linux/delay.h>
 #include <linux/io.h>
 
 #include <asm/mach-types.h>  /* for machine_is_* */
@@ -767,6 +769,15 @@ static struct clk_functions omap1_clk_functions = {
        .clk_disable_unused     = omap1_clk_disable_unused,
 };
 
+static void __init omap1_show_rates(void)
+{
+       pr_notice("Clocking rate (xtal/DPLL1/MPU): "
+                       "%ld.%01ld/%ld.%01ld/%ld.%01ld MHz\n",
+               ck_ref.rate / 1000000, (ck_ref.rate / 100000) % 10,
+               ck_dpll1.rate / 1000000, (ck_dpll1.rate / 100000) % 10,
+               arm_ck.rate / 1000000, (arm_ck.rate / 100000) % 10);
+}
+
 int __init omap1_clk_init(void)
 {
        struct omap_clk *c;
@@ -835,9 +846,12 @@ int __init omap1_clk_init(void)
        /* We want to be in syncronous scalable mode */
        omap_writew(0x1000, ARM_SYSST);
 
-#ifdef CONFIG_OMAP_CLOCKS_SET_BY_BOOTLOADER
-       /* Use values set by bootloader. Determine PLL rate and recalculate
-        * dependent clocks as if kernel had changed PLL or divisors.
+
+       /*
+        * Initially use the values set by bootloader. Determine PLL rate and
+        * recalculate dependent clocks as if kernel had changed PLL or
+        * divisors. See also omap1_clk_late_init() that can reprogram dpll1
+        * after the SRAM is initialized.
         */
        {
                unsigned pll_ctl_val = omap_readw(DPLL_CTL);
@@ -862,25 +876,10 @@ int __init omap1_clk_init(void)
                        }
                }
        }
-#else
-       /* Find the highest supported frequency and enable it */
-       if (omap1_select_table_rate(&virtual_ck_mpu, ~0)) {
-               printk(KERN_ERR "System frequencies not set. Check your config.\n");
-               /* Guess sane values (60MHz) */
-               omap_writew(0x2290, DPLL_CTL);
-               omap_writew(cpu_is_omap7xx() ? 0x3005 : 0x1005, ARM_CKCTL);
-               ck_dpll1.rate = 60000000;
-       }
-#endif
        propagate_rate(&ck_dpll1);
        /* Cache rates for clocks connected to ck_ref (not dpll1) */
        propagate_rate(&ck_ref);
-       printk(KERN_INFO "Clocking rate (xtal/DPLL1/MPU): "
-               "%ld.%01ld/%ld.%01ld/%ld.%01ld MHz\n",
-              ck_ref.rate / 1000000, (ck_ref.rate / 100000) % 10,
-              ck_dpll1.rate / 1000000, (ck_dpll1.rate / 100000) % 10,
-              arm_ck.rate / 1000000, (arm_ck.rate / 100000) % 10);
-
+       omap1_show_rates();
        if (machine_is_omap_perseus2() || machine_is_omap_fsample()) {
                /* Select slicer output as OMAP input clock */
                omap_writew(omap_readw(OMAP7XX_PCC_UPLD_CTRL) & ~0x1,
@@ -925,3 +924,27 @@ int __init omap1_clk_init(void)
 
        return 0;
 }
+
+#define OMAP1_DPLL1_SANE_VALUE 60000000
+
+void __init omap1_clk_late_init(void)
+{
+       unsigned long rate = ck_dpll1.rate;
+
+       if (rate >= OMAP1_DPLL1_SANE_VALUE)
+               return;
+
+       /* System booting at unusable rate, force reprogramming of DPLL1 */
+       ck_dpll1_p->rate = 0;
+
+       /* Find the highest supported frequency and enable it */
+       if (omap1_select_table_rate(&virtual_ck_mpu, ~0)) {
+               pr_err("System frequencies not set, using default. Check your config.\n");
+               omap_writew(0x2290, DPLL_CTL);
+               omap_writew(cpu_is_omap7xx() ? 0x2005 : 0x0005, ARM_CKCTL);
+               ck_dpll1.rate = OMAP1_DPLL1_SANE_VALUE;
+       }
+       propagate_rate(&ck_dpll1);
+       omap1_show_rates();
+       loops_per_jiffy = cpufreq_scale(loops_per_jiffy, rate, ck_dpll1.rate);
+}
index 48ef9888e820e5d95ecbd61274d2904a712ce63f..475cb2f50d872f326325991b76d0c5348aa5cfd2 100644 (file)
@@ -30,6 +30,8 @@
 #include <plat/omap7xx.h>
 #include <plat/mcbsp.h>
 
+#include "clock.h"
+
 /*-------------------------------------------------------------------------*/
 
 #if defined(CONFIG_RTC_DRV_OMAP) || defined(CONFIG_RTC_DRV_OMAP_MODULE)
@@ -293,6 +295,7 @@ static int __init omap1_init_devices(void)
                return -ENODEV;
 
        omap_sram_init();
+       omap1_clk_late_init();
 
        /* please keep these calls, and their implementations above,
         * in alphabetical order so they're easier to sort through.
index 503414718905862d12e104b7940bfc7dc1524971..e1293aa513d338fe19ffd7990b189109b7ca0787 100644 (file)
@@ -334,6 +334,7 @@ config MACH_OMAP4_PANDA
 config OMAP3_EMU
        bool "OMAP3 debugging peripherals"
        depends on ARCH_OMAP3
+       select ARM_AMBA
        select OC_ETM
        help
          Say Y here to enable debugging hardware of omap3
index 69ab1c069134ccee42da01c015aea9d219408f4a..b009f17dee5606de2acbb1b7ba25c8938d0c5adb 100644 (file)
@@ -4,7 +4,7 @@
 
 # Common support
 obj-y := id.o io.o control.o mux.o devices.o serial.o gpmc.o timer.o pm.o \
-        common.o gpio.o dma.o wd_timer.o
+        common.o gpio.o dma.o wd_timer.o display.o
 
 omap-2-3-common                                = irq.o sdrc.o
 hwmod-common                           = omap_hwmod.o \
@@ -264,7 +264,4 @@ smsc911x-$(CONFIG_SMSC911X)         := gpmc-smsc911x.o
 obj-y                                  += $(smsc911x-m) $(smsc911x-y)
 obj-$(CONFIG_ARCH_OMAP4)               += hwspinlock.o
 
-disp-$(CONFIG_OMAP2_DSS)               := display.o
-obj-y                                  += $(disp-m) $(disp-y)
-
 obj-y                                  += common-board-devices.o twl-common.o
index 1fe35c24fba278ee57614be628ce2c16babb36a4..942bb4f19f9fd6df5af92b1b551cb1a3f3bb2313 100644 (file)
@@ -24,6 +24,7 @@
 
 #include <linux/sched.h>
 #include <linux/cpuidle.h>
+#include <linux/export.h>
 
 #include <plat/prcm.h>
 #include <plat/irqs.h>
index adb2756e242f104bc8dd35e969ba4957d3407b7a..dce9905d64bb6e1af6c0b80d012142ffdfca49ee 100644 (file)
 #include <plat/omap_hwmod.h>
 #include <plat/omap_device.h>
 #include <plat/omap-pm.h>
+#include <plat/common.h>
 
 #include "control.h"
+#include "display.h"
+
+#define DISPC_CONTROL          0x0040
+#define DISPC_CONTROL2         0x0238
+#define DISPC_IRQSTATUS                0x0018
+
+#define DSS_SYSCONFIG          0x10
+#define DSS_SYSSTATUS          0x14
+#define DSS_CONTROL            0x40
+#define DSS_SDI_CONTROL                0x44
+#define DSS_PLL_CONTROL                0x48
+
+#define LCD_EN_MASK            (0x1 << 0)
+#define DIGIT_EN_MASK          (0x1 << 1)
+
+#define FRAMEDONE_IRQ_SHIFT    0
+#define EVSYNC_EVEN_IRQ_SHIFT  2
+#define EVSYNC_ODD_IRQ_SHIFT   3
+#define FRAMEDONE2_IRQ_SHIFT   22
+#define FRAMEDONETV_IRQ_SHIFT  24
+
+/*
+ * FRAMEDONE_IRQ_TIMEOUT: how long (in milliseconds) to wait during DISPC
+ *     reset before deciding that something has gone wrong
+ */
+#define FRAMEDONE_IRQ_TIMEOUT          100
 
 static struct platform_device omap_display_device = {
        .name          = "omapdss",
@@ -172,3 +199,135 @@ int __init omap_display_init(struct omap_dss_board_info *board_data)
 
        return r;
 }
+
+static void dispc_disable_outputs(void)
+{
+       u32 v, irq_mask = 0;
+       bool lcd_en, digit_en, lcd2_en = false;
+       int i;
+       struct omap_dss_dispc_dev_attr *da;
+       struct omap_hwmod *oh;
+
+       oh = omap_hwmod_lookup("dss_dispc");
+       if (!oh) {
+               WARN(1, "display: could not disable outputs during reset - could not find dss_dispc hwmod\n");
+               return;
+       }
+
+       if (!oh->dev_attr) {
+               pr_err("display: could not disable outputs during reset due to missing dev_attr\n");
+               return;
+       }
+
+       da = (struct omap_dss_dispc_dev_attr *)oh->dev_attr;
+
+       /* store value of LCDENABLE and DIGITENABLE bits */
+       v = omap_hwmod_read(oh, DISPC_CONTROL);
+       lcd_en = v & LCD_EN_MASK;
+       digit_en = v & DIGIT_EN_MASK;
+
+       /* store value of LCDENABLE for LCD2 */
+       if (da->manager_count > 2) {
+               v = omap_hwmod_read(oh, DISPC_CONTROL2);
+               lcd2_en = v & LCD_EN_MASK;
+       }
+
+       if (!(lcd_en | digit_en | lcd2_en))
+               return; /* no managers currently enabled */
+
+       /*
+        * If any manager was enabled, we need to disable it before
+        * DSS clocks are disabled or DISPC module is reset
+        */
+       if (lcd_en)
+               irq_mask |= 1 << FRAMEDONE_IRQ_SHIFT;
+
+       if (digit_en) {
+               if (da->has_framedonetv_irq) {
+                       irq_mask |= 1 << FRAMEDONETV_IRQ_SHIFT;
+               } else {
+                       irq_mask |= 1 << EVSYNC_EVEN_IRQ_SHIFT |
+                               1 << EVSYNC_ODD_IRQ_SHIFT;
+               }
+       }
+
+       if (lcd2_en)
+               irq_mask |= 1 << FRAMEDONE2_IRQ_SHIFT;
+
+       /*
+        * clear any previous FRAMEDONE, FRAMEDONETV,
+        * EVSYNC_EVEN/ODD or FRAMEDONE2 interrupts
+        */
+       omap_hwmod_write(irq_mask, oh, DISPC_IRQSTATUS);
+
+       /* disable LCD and TV managers */
+       v = omap_hwmod_read(oh, DISPC_CONTROL);
+       v &= ~(LCD_EN_MASK | DIGIT_EN_MASK);
+       omap_hwmod_write(v, oh, DISPC_CONTROL);
+
+       /* disable LCD2 manager */
+       if (da->manager_count > 2) {
+               v = omap_hwmod_read(oh, DISPC_CONTROL2);
+               v &= ~LCD_EN_MASK;
+               omap_hwmod_write(v, oh, DISPC_CONTROL2);
+       }
+
+       i = 0;
+       while ((omap_hwmod_read(oh, DISPC_IRQSTATUS) & irq_mask) !=
+              irq_mask) {
+               i++;
+               if (i > FRAMEDONE_IRQ_TIMEOUT) {
+                       pr_err("didn't get FRAMEDONE1/2 or TV interrupt\n");
+                       break;
+               }
+               mdelay(1);
+       }
+}
+
+#define MAX_MODULE_SOFTRESET_WAIT      10000
+int omap_dss_reset(struct omap_hwmod *oh)
+{
+       struct omap_hwmod_opt_clk *oc;
+       int c = 0;
+       int i, r;
+
+       if (!(oh->class->sysc->sysc_flags & SYSS_HAS_RESET_STATUS)) {
+               pr_err("dss_core: hwmod data doesn't contain reset data\n");
+               return -EINVAL;
+       }
+
+       for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++)
+               if (oc->_clk)
+                       clk_enable(oc->_clk);
+
+       dispc_disable_outputs();
+
+       /* clear SDI registers */
+       if (cpu_is_omap3430()) {
+               omap_hwmod_write(0x0, oh, DSS_SDI_CONTROL);
+               omap_hwmod_write(0x0, oh, DSS_PLL_CONTROL);
+       }
+
+       /*
+        * clear DSS_CONTROL register to switch DSS clock sources to
+        * PRCM clock, if any
+        */
+       omap_hwmod_write(0x0, oh, DSS_CONTROL);
+
+       omap_test_timeout((omap_hwmod_read(oh, oh->class->sysc->syss_offs)
+                               & SYSS_RESETDONE_MASK),
+                       MAX_MODULE_SOFTRESET_WAIT, c);
+
+       if (c == MAX_MODULE_SOFTRESET_WAIT)
+               pr_warning("dss_core: waiting for reset to finish failed\n");
+       else
+               pr_debug("dss_core: softreset done\n");
+
+       for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++)
+               if (oc->_clk)
+                       clk_disable(oc->_clk);
+
+       r = (c == MAX_MODULE_SOFTRESET_WAIT) ? -ETIMEDOUT : 0;
+
+       return r;
+}
diff --git a/arch/arm/mach-omap2/display.h b/arch/arm/mach-omap2/display.h
new file mode 100644 (file)
index 0000000..b871b01
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * display.h - OMAP2+ integration-specific DSS header
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ARCH_ARM_MACH_OMAP2_DISPLAY_H
+#define __ARCH_ARM_MACH_OMAP2_DISPLAY_H
+
+#include <linux/kernel.h>
+
+struct omap_dss_dispc_dev_attr {
+       u8      manager_count;
+       bool    has_framedonetv_irq;
+};
+
+#endif
diff --git a/arch/arm/mach-omap2/io.h b/arch/arm/mach-omap2/io.h
deleted file mode 100644 (file)
index e69de29..0000000
index 6b3088db83b7e916314615b0f19023ab22805e07..207a2ff9a8c4e1473c6c497ab8c969f64e0ac0c7 100644 (file)
@@ -749,7 +749,7 @@ static int _count_mpu_irqs(struct omap_hwmod *oh)
                ohii = &oh->mpu_irqs[i++];
        } while (ohii->irq != -1);
 
-       return i;
+       return i-1;
 }
 
 /**
@@ -772,7 +772,7 @@ static int _count_sdma_reqs(struct omap_hwmod *oh)
                ohdi = &oh->sdma_reqs[i++];
        } while (ohdi->dma_req != -1);
 
-       return i;
+       return i-1;
 }
 
 /**
@@ -795,7 +795,7 @@ static int _count_ocp_if_addr_spaces(struct omap_hwmod_ocp_if *os)
                mem = &os->addr[i++];
        } while (mem->pa_start != mem->pa_end);
 
-       return i;
+       return i-1;
 }
 
 /**
index 6d7206213525d03ac830d8ca6092ce4317863f37..a5409ce3f3233eaac531ed070fc1f9cf74e8398f 100644 (file)
@@ -875,6 +875,10 @@ static struct omap_hwmod_ocp_if *omap2420_dss_slaves[] = {
 };
 
 static struct omap_hwmod_opt_clk dss_opt_clks[] = {
+       /*
+        * The DSS HW needs all DSS clocks enabled during reset. The dss_core
+        * driver does not use these clocks.
+        */
        { .role = "tv_clk", .clk = "dss_54m_fck" },
        { .role = "sys_clk", .clk = "dss2_fck" },
 };
@@ -899,7 +903,7 @@ static struct omap_hwmod omap2420_dss_core_hwmod = {
        .slaves_cnt     = ARRAY_SIZE(omap2420_dss_slaves),
        .masters        = omap2420_dss_masters,
        .masters_cnt    = ARRAY_SIZE(omap2420_dss_masters),
-       .flags          = HWMOD_NO_IDLEST,
+       .flags          = HWMOD_NO_IDLEST | HWMOD_CONTROL_OPT_CLKS_IN_RESET,
 };
 
 /* l4_core -> dss_dispc */
@@ -939,6 +943,7 @@ static struct omap_hwmod omap2420_dss_dispc_hwmod = {
        .slaves         = omap2420_dss_dispc_slaves,
        .slaves_cnt     = ARRAY_SIZE(omap2420_dss_dispc_slaves),
        .flags          = HWMOD_NO_IDLEST,
+       .dev_attr       = &omap2_3_dss_dispc_dev_attr
 };
 
 /* l4_core -> dss_rfbi */
@@ -961,6 +966,10 @@ static struct omap_hwmod_ocp_if *omap2420_dss_rfbi_slaves[] = {
        &omap2420_l4_core__dss_rfbi,
 };
 
+static struct omap_hwmod_opt_clk dss_rfbi_opt_clks[] = {
+       { .role = "ick", .clk = "dss_ick" },
+};
+
 static struct omap_hwmod omap2420_dss_rfbi_hwmod = {
        .name           = "dss_rfbi",
        .class          = &omap2_rfbi_hwmod_class,
@@ -972,6 +981,8 @@ static struct omap_hwmod omap2420_dss_rfbi_hwmod = {
                        .module_offs = CORE_MOD,
                },
        },
+       .opt_clks       = dss_rfbi_opt_clks,
+       .opt_clks_cnt   = ARRAY_SIZE(dss_rfbi_opt_clks),
        .slaves         = omap2420_dss_rfbi_slaves,
        .slaves_cnt     = ARRAY_SIZE(omap2420_dss_rfbi_slaves),
        .flags          = HWMOD_NO_IDLEST,
@@ -981,7 +992,7 @@ static struct omap_hwmod omap2420_dss_rfbi_hwmod = {
 static struct omap_hwmod_ocp_if omap2420_l4_core__dss_venc = {
        .master         = &omap2420_l4_core_hwmod,
        .slave          = &omap2420_dss_venc_hwmod,
-       .clk            = "dss_54m_fck",
+       .clk            = "dss_ick",
        .addr           = omap2_dss_venc_addrs,
        .fw = {
                .omap2 = {
@@ -1001,7 +1012,7 @@ static struct omap_hwmod_ocp_if *omap2420_dss_venc_slaves[] = {
 static struct omap_hwmod omap2420_dss_venc_hwmod = {
        .name           = "dss_venc",
        .class          = &omap2_venc_hwmod_class,
-       .main_clk       = "dss1_fck",
+       .main_clk       = "dss_54m_fck",
        .prcm           = {
                .omap2 = {
                        .prcm_reg_id = 1,
index a2580d01c3ff98b25a57b6508bd959d0e58c700f..c4f56cb60d7d676ddda36e232c8b385a58fef6ba 100644 (file)
@@ -942,6 +942,10 @@ static struct omap_hwmod_ocp_if *omap2430_dss_slaves[] = {
 };
 
 static struct omap_hwmod_opt_clk dss_opt_clks[] = {
+       /*
+        * The DSS HW needs all DSS clocks enabled during reset. The dss_core
+        * driver does not use these clocks.
+        */
        { .role = "tv_clk", .clk = "dss_54m_fck" },
        { .role = "sys_clk", .clk = "dss2_fck" },
 };
@@ -966,7 +970,7 @@ static struct omap_hwmod omap2430_dss_core_hwmod = {
        .slaves_cnt     = ARRAY_SIZE(omap2430_dss_slaves),
        .masters        = omap2430_dss_masters,
        .masters_cnt    = ARRAY_SIZE(omap2430_dss_masters),
-       .flags          = HWMOD_NO_IDLEST,
+       .flags          = HWMOD_NO_IDLEST | HWMOD_CONTROL_OPT_CLKS_IN_RESET,
 };
 
 /* l4_core -> dss_dispc */
@@ -1000,6 +1004,7 @@ static struct omap_hwmod omap2430_dss_dispc_hwmod = {
        .slaves         = omap2430_dss_dispc_slaves,
        .slaves_cnt     = ARRAY_SIZE(omap2430_dss_dispc_slaves),
        .flags          = HWMOD_NO_IDLEST,
+       .dev_attr       = &omap2_3_dss_dispc_dev_attr
 };
 
 /* l4_core -> dss_rfbi */
@@ -1016,6 +1021,10 @@ static struct omap_hwmod_ocp_if *omap2430_dss_rfbi_slaves[] = {
        &omap2430_l4_core__dss_rfbi,
 };
 
+static struct omap_hwmod_opt_clk dss_rfbi_opt_clks[] = {
+       { .role = "ick", .clk = "dss_ick" },
+};
+
 static struct omap_hwmod omap2430_dss_rfbi_hwmod = {
        .name           = "dss_rfbi",
        .class          = &omap2_rfbi_hwmod_class,
@@ -1027,6 +1036,8 @@ static struct omap_hwmod omap2430_dss_rfbi_hwmod = {
                        .module_offs = CORE_MOD,
                },
        },
+       .opt_clks       = dss_rfbi_opt_clks,
+       .opt_clks_cnt   = ARRAY_SIZE(dss_rfbi_opt_clks),
        .slaves         = omap2430_dss_rfbi_slaves,
        .slaves_cnt     = ARRAY_SIZE(omap2430_dss_rfbi_slaves),
        .flags          = HWMOD_NO_IDLEST,
@@ -1036,7 +1047,7 @@ static struct omap_hwmod omap2430_dss_rfbi_hwmod = {
 static struct omap_hwmod_ocp_if omap2430_l4_core__dss_venc = {
        .master         = &omap2430_l4_core_hwmod,
        .slave          = &omap2430_dss_venc_hwmod,
-       .clk            = "dss_54m_fck",
+       .clk            = "dss_ick",
        .addr           = omap2_dss_venc_addrs,
        .flags          = OCPIF_SWSUP_IDLE,
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
@@ -1050,7 +1061,7 @@ static struct omap_hwmod_ocp_if *omap2430_dss_venc_slaves[] = {
 static struct omap_hwmod omap2430_dss_venc_hwmod = {
        .name           = "dss_venc",
        .class          = &omap2_venc_hwmod_class,
-       .main_clk       = "dss1_fck",
+       .main_clk       = "dss_54m_fck",
        .prcm           = {
                .omap2 = {
                        .prcm_reg_id = 1,
index c451729d289adfd17090c3be58c53937626835fe..c11273da5dcc33f046e94babfdb6f34c1d6f2778 100644 (file)
@@ -11,6 +11,7 @@
 #include <plat/omap_hwmod.h>
 #include <plat/serial.h>
 #include <plat/dma.h>
+#include <plat/common.h>
 
 #include <mach/irqs.h>
 
@@ -43,13 +44,15 @@ static struct omap_hwmod_class_sysconfig omap2_dss_sysc = {
        .rev_offs       = 0x0000,
        .sysc_offs      = 0x0010,
        .syss_offs      = 0x0014,
-       .sysc_flags     = (SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE),
+       .sysc_flags     = (SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE |
+                          SYSS_HAS_RESET_STATUS),
        .sysc_fields    = &omap_hwmod_sysc_type1,
 };
 
 struct omap_hwmod_class omap2_dss_hwmod_class = {
        .name   = "dss",
        .sysc   = &omap2_dss_sysc,
+       .reset  = omap_dss_reset,
 };
 
 /*
index bc9035ec87fc59aa08410cdbc24156619c87c7a1..7f8915ad50990b1af2e877ed169c32cd0d15cd02 100644 (file)
@@ -1369,9 +1369,14 @@ static struct omap_hwmod_ocp_if *omap3xxx_dss_slaves[] = {
 };
 
 static struct omap_hwmod_opt_clk dss_opt_clks[] = {
-       { .role = "tv_clk", .clk = "dss_tv_fck" },
-       { .role = "video_clk", .clk = "dss_96m_fck" },
+       /*
+        * The DSS HW needs all DSS clocks enabled during reset. The dss_core
+        * driver does not use these clocks.
+        */
        { .role = "sys_clk", .clk = "dss2_alwon_fck" },
+       { .role = "tv_clk", .clk = "dss_tv_fck" },
+       /* required only on OMAP3430 */
+       { .role = "tv_dac_clk", .clk = "dss_96m_fck" },
 };
 
 static struct omap_hwmod omap3430es1_dss_core_hwmod = {
@@ -1394,11 +1399,12 @@ static struct omap_hwmod omap3430es1_dss_core_hwmod = {
        .slaves_cnt     = ARRAY_SIZE(omap3430es1_dss_slaves),
        .masters        = omap3xxx_dss_masters,
        .masters_cnt    = ARRAY_SIZE(omap3xxx_dss_masters),
-       .flags          = HWMOD_NO_IDLEST,
+       .flags          = HWMOD_NO_IDLEST | HWMOD_CONTROL_OPT_CLKS_IN_RESET,
 };
 
 static struct omap_hwmod omap3xxx_dss_core_hwmod = {
        .name           = "dss_core",
+       .flags          = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
        .class          = &omap2_dss_hwmod_class,
        .main_clk       = "dss1_alwon_fck", /* instead of dss_fck */
        .sdma_reqs      = omap3xxx_dss_sdma_chs,
@@ -1456,6 +1462,7 @@ static struct omap_hwmod omap3xxx_dss_dispc_hwmod = {
        .slaves         = omap3xxx_dss_dispc_slaves,
        .slaves_cnt     = ARRAY_SIZE(omap3xxx_dss_dispc_slaves),
        .flags          = HWMOD_NO_IDLEST,
+       .dev_attr       = &omap2_3_dss_dispc_dev_attr
 };
 
 /*
@@ -1486,6 +1493,7 @@ static struct omap_hwmod_addr_space omap3xxx_dss_dsi1_addrs[] = {
 static struct omap_hwmod_ocp_if omap3xxx_l4_core__dss_dsi1 = {
        .master         = &omap3xxx_l4_core_hwmod,
        .slave          = &omap3xxx_dss_dsi1_hwmod,
+       .clk            = "dss_ick",
        .addr           = omap3xxx_dss_dsi1_addrs,
        .fw = {
                .omap2 = {
@@ -1502,6 +1510,10 @@ static struct omap_hwmod_ocp_if *omap3xxx_dss_dsi1_slaves[] = {
        &omap3xxx_l4_core__dss_dsi1,
 };
 
+static struct omap_hwmod_opt_clk dss_dsi1_opt_clks[] = {
+       { .role = "sys_clk", .clk = "dss2_alwon_fck" },
+};
+
 static struct omap_hwmod omap3xxx_dss_dsi1_hwmod = {
        .name           = "dss_dsi1",
        .class          = &omap3xxx_dsi_hwmod_class,
@@ -1514,6 +1526,8 @@ static struct omap_hwmod omap3xxx_dss_dsi1_hwmod = {
                        .module_offs = OMAP3430_DSS_MOD,
                },
        },
+       .opt_clks       = dss_dsi1_opt_clks,
+       .opt_clks_cnt   = ARRAY_SIZE(dss_dsi1_opt_clks),
        .slaves         = omap3xxx_dss_dsi1_slaves,
        .slaves_cnt     = ARRAY_SIZE(omap3xxx_dss_dsi1_slaves),
        .flags          = HWMOD_NO_IDLEST,
@@ -1540,6 +1554,10 @@ static struct omap_hwmod_ocp_if *omap3xxx_dss_rfbi_slaves[] = {
        &omap3xxx_l4_core__dss_rfbi,
 };
 
+static struct omap_hwmod_opt_clk dss_rfbi_opt_clks[] = {
+       { .role = "ick", .clk = "dss_ick" },
+};
+
 static struct omap_hwmod omap3xxx_dss_rfbi_hwmod = {
        .name           = "dss_rfbi",
        .class          = &omap2_rfbi_hwmod_class,
@@ -1551,6 +1569,8 @@ static struct omap_hwmod omap3xxx_dss_rfbi_hwmod = {
                        .module_offs = OMAP3430_DSS_MOD,
                },
        },
+       .opt_clks       = dss_rfbi_opt_clks,
+       .opt_clks_cnt   = ARRAY_SIZE(dss_rfbi_opt_clks),
        .slaves         = omap3xxx_dss_rfbi_slaves,
        .slaves_cnt     = ARRAY_SIZE(omap3xxx_dss_rfbi_slaves),
        .flags          = HWMOD_NO_IDLEST,
@@ -1560,7 +1580,7 @@ static struct omap_hwmod omap3xxx_dss_rfbi_hwmod = {
 static struct omap_hwmod_ocp_if omap3xxx_l4_core__dss_venc = {
        .master         = &omap3xxx_l4_core_hwmod,
        .slave          = &omap3xxx_dss_venc_hwmod,
-       .clk            = "dss_tv_fck",
+       .clk            = "dss_ick",
        .addr           = omap2_dss_venc_addrs,
        .fw = {
                .omap2 = {
@@ -1578,10 +1598,15 @@ static struct omap_hwmod_ocp_if *omap3xxx_dss_venc_slaves[] = {
        &omap3xxx_l4_core__dss_venc,
 };
 
+static struct omap_hwmod_opt_clk dss_venc_opt_clks[] = {
+       /* required only on OMAP3430 */
+       { .role = "tv_dac_clk", .clk = "dss_96m_fck" },
+};
+
 static struct omap_hwmod omap3xxx_dss_venc_hwmod = {
        .name           = "dss_venc",
        .class          = &omap2_venc_hwmod_class,
-       .main_clk       = "dss1_alwon_fck",
+       .main_clk       = "dss_tv_fck",
        .prcm           = {
                .omap2 = {
                        .prcm_reg_id = 1,
@@ -1589,6 +1614,8 @@ static struct omap_hwmod omap3xxx_dss_venc_hwmod = {
                        .module_offs = OMAP3430_DSS_MOD,
                },
        },
+       .opt_clks       = dss_venc_opt_clks,
+       .opt_clks_cnt   = ARRAY_SIZE(dss_venc_opt_clks),
        .slaves         = omap3xxx_dss_venc_slaves,
        .slaves_cnt     = ARRAY_SIZE(omap3xxx_dss_venc_slaves),
        .flags          = HWMOD_NO_IDLEST,
index 7695e5d43316686429158ec56aafacbc7d25a6a7..daaf165af696f212c6dcadb4db7ac3539d54dcd6 100644 (file)
@@ -30,6 +30,7 @@
 #include <plat/mmc.h>
 #include <plat/i2c.h>
 #include <plat/dmtimer.h>
+#include <plat/common.h>
 
 #include "omap_hwmod_common_data.h"
 
@@ -1187,6 +1188,7 @@ static struct omap_hwmod_class_sysconfig omap44xx_dss_sysc = {
 static struct omap_hwmod_class omap44xx_dss_hwmod_class = {
        .name   = "dss",
        .sysc   = &omap44xx_dss_sysc,
+       .reset  = omap_dss_reset,
 };
 
 /* dss */
@@ -1240,12 +1242,12 @@ static struct omap_hwmod_ocp_if *omap44xx_dss_slaves[] = {
 static struct omap_hwmod_opt_clk dss_opt_clks[] = {
        { .role = "sys_clk", .clk = "dss_sys_clk" },
        { .role = "tv_clk", .clk = "dss_tv_clk" },
-       { .role = "dss_clk", .clk = "dss_dss_clk" },
-       { .role = "video_clk", .clk = "dss_48mhz_clk" },
+       { .role = "hdmi_clk", .clk = "dss_48mhz_clk" },
 };
 
 static struct omap_hwmod omap44xx_dss_hwmod = {
        .name           = "dss_core",
+       .flags          = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
        .class          = &omap44xx_dss_hwmod_class,
        .clkdm_name     = "l3_dss_clkdm",
        .main_clk       = "dss_dss_clk",
@@ -1325,6 +1327,11 @@ static struct omap_hwmod_addr_space omap44xx_dss_dispc_addrs[] = {
        { }
 };
 
+static struct omap_dss_dispc_dev_attr omap44xx_dss_dispc_dev_attr = {
+       .manager_count          = 3,
+       .has_framedonetv_irq    = 1
+};
+
 /* l4_per -> dss_dispc */
 static struct omap_hwmod_ocp_if omap44xx_l4_per__dss_dispc = {
        .master         = &omap44xx_l4_per_hwmod,
@@ -1340,12 +1347,6 @@ static struct omap_hwmod_ocp_if *omap44xx_dss_dispc_slaves[] = {
        &omap44xx_l4_per__dss_dispc,
 };
 
-static struct omap_hwmod_opt_clk dss_dispc_opt_clks[] = {
-       { .role = "sys_clk", .clk = "dss_sys_clk" },
-       { .role = "tv_clk", .clk = "dss_tv_clk" },
-       { .role = "hdmi_clk", .clk = "dss_48mhz_clk" },
-};
-
 static struct omap_hwmod omap44xx_dss_dispc_hwmod = {
        .name           = "dss_dispc",
        .class          = &omap44xx_dispc_hwmod_class,
@@ -1359,10 +1360,9 @@ static struct omap_hwmod omap44xx_dss_dispc_hwmod = {
                        .context_offs = OMAP4_RM_DSS_DSS_CONTEXT_OFFSET,
                },
        },
-       .opt_clks       = dss_dispc_opt_clks,
-       .opt_clks_cnt   = ARRAY_SIZE(dss_dispc_opt_clks),
        .slaves         = omap44xx_dss_dispc_slaves,
        .slaves_cnt     = ARRAY_SIZE(omap44xx_dss_dispc_slaves),
+       .dev_attr       = &omap44xx_dss_dispc_dev_attr
 };
 
 /*
@@ -1624,7 +1624,7 @@ static struct omap_hwmod omap44xx_dss_hdmi_hwmod = {
        .clkdm_name     = "l3_dss_clkdm",
        .mpu_irqs       = omap44xx_dss_hdmi_irqs,
        .sdma_reqs      = omap44xx_dss_hdmi_sdma_reqs,
-       .main_clk       = "dss_dss_clk",
+       .main_clk       = "dss_48mhz_clk",
        .prcm = {
                .omap4 = {
                        .clkctrl_offs = OMAP4_CM_DSS_DSS_CLKCTRL_OFFSET,
@@ -1785,7 +1785,7 @@ static struct omap_hwmod omap44xx_dss_venc_hwmod = {
        .name           = "dss_venc",
        .class          = &omap44xx_venc_hwmod_class,
        .clkdm_name     = "l3_dss_clkdm",
-       .main_clk       = "dss_dss_clk",
+       .main_clk       = "dss_tv_clk",
        .prcm = {
                .omap4 = {
                        .clkctrl_offs = OMAP4_CM_DSS_DSS_CLKCTRL_OFFSET,
index de832ebc93a98c8d2556675de6ae3bfd7a5b2342..51e5418899fb446cd2317efe2622e3683860eec4 100644 (file)
@@ -49,3 +49,7 @@ struct omap_hwmod_sysc_fields omap_hwmod_sysc_type2 = {
        .srst_shift     = SYSC_TYPE2_SOFTRESET_SHIFT,
 };
 
+struct omap_dss_dispc_dev_attr omap2_3_dss_dispc_dev_attr = {
+       .manager_count          = 2,
+       .has_framedonetv_irq    = 0
+};
index 39a7c37f45870446a9f61d682eb5cc333cbac94c..ad5d8f04c0b8803edeac09dff53686c156365b91 100644 (file)
@@ -16,6 +16,8 @@
 
 #include <plat/omap_hwmod.h>
 
+#include "display.h"
+
 /* Common address space across OMAP2xxx */
 extern struct omap_hwmod_addr_space omap2xxx_uart1_addr_space[];
 extern struct omap_hwmod_addr_space omap2xxx_uart2_addr_space[];
@@ -111,4 +113,6 @@ extern struct omap_hwmod_class omap2xxx_dma_hwmod_class;
 extern struct omap_hwmod_class omap2xxx_mailbox_hwmod_class;
 extern struct omap_hwmod_class omap2xxx_mcspi_class;
 
+extern struct omap_dss_dispc_dev_attr omap2_3_dss_dispc_dev_attr;
+
 #endif
index 6a66aa5e2a5b368f702f646366cb7175e6c2ad52..d15225ff5c4969b3ddde9cc79ece7caf725001eb 100644 (file)
@@ -237,7 +237,7 @@ static int __devexit omap4_l3_remove(struct platform_device *pdev)
 static const struct of_device_id l3_noc_match[] = {
        {.compatible = "ti,omap4-l3-noc", },
        {},
-}
+};
 MODULE_DEVICE_TABLE(of, l3_noc_match);
 #else
 #define l3_noc_match NULL
index 1e79bdf313e311fc945fb56bb2dffa3817d3b86e..00bff46ca48beb606557f8fba7a1974f571ebb91 100644 (file)
@@ -24,6 +24,7 @@
 #include "powerdomain.h"
 #include "clockdomain.h"
 #include "pm.h"
+#include "twl-common.h"
 
 static struct omap_device_pm_latency *pm_lats;
 
@@ -226,11 +227,8 @@ postcore_initcall(omap2_common_pm_init);
 
 static int __init omap2_common_pm_late_init(void)
 {
-       /* Init the OMAP TWL parameters */
-       omap3_twl_init();
-       omap4_twl_init();
-
        /* Init the voltage layer */
+       omap_pmic_late_init();
        omap_voltage_late_init();
 
        /* Initialize the voltages */
index 6a4f6839a7d93fecc95259506c3ea38962a3ba5a..cf246b39bac745dc315576ecc161c35666643381 100644 (file)
@@ -139,7 +139,7 @@ static irqreturn_t sr_interrupt(int irq, void *data)
                sr_write_reg(sr_info, ERRCONFIG_V1, status);
        } else if (sr_info->ip_type == SR_TYPE_V2) {
                /* Read the status bits */
-               sr_read_reg(sr_info, IRQSTATUS);
+               status = sr_read_reg(sr_info, IRQSTATUS);
 
                /* Clear them by writing back */
                sr_write_reg(sr_info, IRQSTATUS, status);
index 5224357721686dc41d5a703a3ba02ae1e1e7bf6a..10b20c652e5dc390026bc1eec5042ff351c387f4 100644 (file)
@@ -30,6 +30,7 @@
 #include <plat/usb.h>
 
 #include "twl-common.h"
+#include "pm.h"
 
 static struct i2c_board_info __initdata pmic_i2c_board_info = {
        .addr           = 0x48,
@@ -48,6 +49,16 @@ void __init omap_pmic_init(int bus, u32 clkrate,
        omap_register_i2c_bus(bus, clkrate, &pmic_i2c_board_info, 1);
 }
 
+void __init omap_pmic_late_init(void)
+{
+       /* Init the OMAP TWL parameters (if PMIC has been registerd) */
+       if (!pmic_i2c_board_info.irq)
+               return;
+
+       omap3_twl_init();
+       omap4_twl_init();
+}
+
 #if defined(CONFIG_ARCH_OMAP3)
 static struct twl4030_usb_data omap3_usb_pdata = {
        .usb_mode       = T2_USB_MODE_ULPI,
index 5e83a5bd37fb719dd06ea8deac01e0c3c2922e80..275dde8cb27aa789ce4d9bfa89c8ed92fb122711 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef __OMAP_PMIC_COMMON__
 #define __OMAP_PMIC_COMMON__
 
+#include <plat/irqs.h>
+
 #define TWL_COMMON_PDATA_USB           (1 << 0)
 #define TWL_COMMON_PDATA_BCI           (1 << 1)
 #define TWL_COMMON_PDATA_MADC          (1 << 2)
@@ -30,6 +32,7 @@ struct twl4030_platform_data;
 
 void omap_pmic_init(int bus, u32 clkrate, const char *pmic_type, int pmic_irq,
                    struct twl4030_platform_data *pmic_data);
+void omap_pmic_late_init(void);
 
 static inline void omap2_pmic_init(const char *pmic_type,
                                   struct twl4030_platform_data *pmic_data)
index cb53160f6c5d3127b961edbf102e70ab34733712..26ebb57719df5d3fbf389e46cb723b92029b8191 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/kernel.h>
 #include <linux/suspend.h>
 #include <linux/slab.h>
+#include <linux/module.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/of_device.h>
index ef555c041962983814be50c03d606b91bcd15b0d..a12b689a87026c9e06af50351b2b20d8c7e01d7d 100644 (file)
@@ -8,6 +8,7 @@
 
 #include <linux/init.h>
 #include <linux/kernel.h>
+#include <asm/sizes.h>
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
 #include <linux/of.h>
index fc0b8544e17455f27fb020cee143dbaee16123b2..4b81f59a4cbaf5aebbac8c1d55980d9b4dd45618 100644 (file)
@@ -307,7 +307,7 @@ static inline void balloon3_mmc_init(void) {}
 /******************************************************************************
  * USB Gadget
  ******************************************************************************/
-#if defined(CONFIG_USB_GADGET_PXA27X)||defined(CONFIG_USB_GADGET_PXA27X_MODULE)
+#if defined(CONFIG_USB_PXA27X)||defined(CONFIG_USB_PXA27X_MODULE)
 static void balloon3_udc_command(int cmd)
 {
        if (cmd == PXA2XX_UDC_CMD_CONNECT)
index 692e1ffc558628526105f3b378f6ce745650480e..d23b92b80488257db2ef2bf4af458fc4060feaf8 100644 (file)
@@ -146,7 +146,7 @@ static void __init colibri_pxa320_init_eth(void)
 static inline void __init colibri_pxa320_init_eth(void) {}
 #endif /* CONFIG_AX88796 */
 
-#if defined(CONFIG_USB_GADGET_PXA27X)||defined(CONFIG_USB_GADGET_PXA27X_MODULE)
+#if defined(CONFIG_USB_PXA27X)||defined(CONFIG_USB_PXA27X_MODULE)
 static struct gpio_vbus_mach_info colibri_pxa320_gpio_vbus_info = {
        .gpio_vbus              = mfp_to_gpio(MFP_PIN_GPIO96),
        .gpio_pullup            = -1,
index 9c8208ca04150e4a3bbb2d41cccb28287a0efe99..ffdd70dad327dd135f1a2d5fc36a3cde45f2f86b 100644 (file)
@@ -106,7 +106,7 @@ static void __init gumstix_mmc_init(void)
 }
 #endif
 
-#ifdef CONFIG_USB_GADGET_PXA25X
+#ifdef CONFIG_USB_PXA25X
 static struct gpio_vbus_mach_info gumstix_udc_info = {
        .gpio_vbus              = GPIO_GUMSTIX_USB_GPIOn,
        .gpio_pullup            = GPIO_GUMSTIX_USB_GPIOx,
index f80bbe246afe5812b17db200d709b13f6160f72c..d4eac3d6ffb5ecd04df4abeb0ed45b32a2882705 100644 (file)
@@ -37,8 +37,8 @@ extern void __init palm27x_lcd_init(int power,
 #define palm27x_lcd_init(power, mode)  do {} while (0)
 #endif
 
-#if    defined(CONFIG_USB_GADGET_PXA27X) || \
-       defined(CONFIG_USB_GADGET_PXA27X_MODULE)
+#if    defined(CONFIG_USB_PXA27X) || \
+       defined(CONFIG_USB_PXA27X_MODULE)
 extern void __init palm27x_udc_init(int vbus, int pullup,
                                        int vbus_inverted);
 #else
index 325c245c0a0dd3916129b2875caaec931758137e..fbc10d7b95d1e8ef7ceec5dd03d512c7c2d314bf 100644 (file)
@@ -164,8 +164,8 @@ void __init palm27x_lcd_init(int power, struct pxafb_mode_info *mode)
 /******************************************************************************
  * USB Gadget
  ******************************************************************************/
-#if    defined(CONFIG_USB_GADGET_PXA27X) || \
-       defined(CONFIG_USB_GADGET_PXA27X_MODULE)
+#if    defined(CONFIG_USB_PXA27X) || \
+       defined(CONFIG_USB_PXA27X_MODULE)
 static struct gpio_vbus_mach_info palm27x_udc_info = {
        .gpio_vbus_inverted     = 1,
 };
index 6ec7caefb37c8219d02f45fc0088249295006fa5..2c24c67fd92b6d863fc2e534b6ad9f15b7daaaf7 100644 (file)
@@ -338,7 +338,7 @@ static inline void palmtc_mkp_init(void) {}
 /******************************************************************************
  * UDC
  ******************************************************************************/
-#if defined(CONFIG_USB_GADGET_PXA25X)||defined(CONFIG_USB_GADGET_PXA25X_MODULE)
+#if defined(CONFIG_USB_PXA25X)||defined(CONFIG_USB_PXA25X_MODULE)
 static struct gpio_vbus_mach_info palmtc_udc_info = {
        .gpio_vbus              = GPIO_NR_PALMTC_USB_DETECT_N,
        .gpio_vbus_inverted     = 1,
index a7539a6ed1ff2a5d1ce4a460183a17fe28900e45..ca0c6615028c42aa0414cc2e76de4a0a29c83918 100644 (file)
@@ -343,7 +343,7 @@ static inline void vpac270_uhc_init(void) {}
 /******************************************************************************
  * USB Gadget
  ******************************************************************************/
-#if defined(CONFIG_USB_GADGET_PXA27X)||defined(CONFIG_USB_GADGET_PXA27X_MODULE)
+#if defined(CONFIG_USB_PXA27X)||defined(CONFIG_USB_PXA27X_MODULE)
 static struct gpio_vbus_mach_info vpac270_gpio_vbus_info = {
        .gpio_vbus              = GPIO41_VPAC270_UDC_DETECT,
        .gpio_pullup            = -1,
index 5e6b42089eb44d7048b39cc2855403cd44cc0ddc..3341fd118723ff11cc7a21df95241c1252c2f6a0 100644 (file)
@@ -10,6 +10,7 @@
 
 #include <linux/kernel.h>
 #include <linux/string.h>
+#include <linux/export.h>
 #include <linux/platform_device.h>
 #include <linux/dma-mapping.h>
 #include <linux/gpio.h>
index 66668565ee75e6694c086030504bf5d01fc59bef..f208154b1382d0492898ebba6b6d7c14079ad80d 100644 (file)
@@ -8,7 +8,7 @@
  * published by the Free Software Foundation.
  */
 
-#include <linux/module.h>
+#include <linux/export.h>
 #include <linux/interrupt.h>
 #include <linux/i2c.h>
 
index 7a3bc32df425a3426016ec5448e884ad0e0443ea..51c00f2453c6c62ef9537143c0f31ad8ea416424 100644 (file)
@@ -70,7 +70,7 @@ void __init s3c6400_init_irq(void)
        s3c64xx_init_irq(~0 & ~(0xf << 5), ~0);
 }
 
-struct sysdev_class s3c6400_sysclass = {
+static struct sysdev_class s3c6400_sysclass = {
        .name   = "s3c6400-core",
 };
 
index 83d2afb79e9f88370fbced458f6ffbf9a5b1b1c8..2cf80026c58d470c5f328da8b6df696fa4514bae 100644 (file)
@@ -20,7 +20,7 @@
 #include <plat/fb.h>
 #include <plat/gpio-cfg.h>
 
-extern void s3c64xx_fb_gpio_setup_24bpp(void)
+void s3c64xx_fb_gpio_setup_24bpp(void)
 {
        s3c_gpio_cfgrange_nopull(S3C64XX_GPI(0), 16, S3C_GPIO_SFN(2));
        s3c_gpio_cfgrange_nopull(S3C64XX_GPJ(0), 12, S3C_GPIO_SFN(2));
index 5a616f6e56120c850c9afa93af19513a4b80ef1c..f7951aa0456287eadb4356ef5d1a3ecd3266abba 100644 (file)
@@ -1,5 +1,5 @@
-ifeq ($(CONFIG_ARCH_SA1100),y)
-   zreladdr-$(CONFIG_SA1111)           += 0xc0208000
+ifeq ($(CONFIG_SA1111),y)
+   zreladdr-y  += 0xc0208000
 else
    zreladdr-y  += 0xc0008000
 endif
index 8ac9e9f84790bb946716016607478ad97711376d..b1e192ba8c2450cb75a8118f86f8b650be284c82 100644 (file)
@@ -61,7 +61,7 @@ static inline void cache_sync(void)
 {
        void __iomem *base = l2x0_base;
 
-#ifdef CONFIG_ARM_ERRATA_753970
+#ifdef CONFIG_PL310_ERRATA_753970
        /* write to an unmmapped register */
        writel_relaxed(0, base + L2X0_DUMMY_REG);
 #else
index e4e7f6cba1ab4823fdbf0fe7ab2109bf6df89ed7..1aa664a1999fce45c2548726b50b6fa924608ec2 100644 (file)
@@ -168,7 +168,7 @@ static int __init consistent_init(void)
        pte_t *pte;
        int i = 0;
        unsigned long base = consistent_base;
-       unsigned long num_ptes = (CONSISTENT_END - base) >> PGDIR_SHIFT;
+       unsigned long num_ptes = (CONSISTENT_END - base) >> PMD_SHIFT;
 
        consistent_pte = kmalloc(num_ptes * sizeof(pte_t), GFP_KERNEL);
        if (!consistent_pte) {
@@ -332,6 +332,15 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
        struct page *page;
        void *addr;
 
+       /*
+        * Following is a work-around (a.k.a. hack) to prevent pages
+        * with __GFP_COMP being passed to split_page() which cannot
+        * handle them.  The real problem is that this flag probably
+        * should be 0 on ARM as it is not supported on this
+        * platform; see CONFIG_HUGETLBFS.
+        */
+       gfp &= ~(__GFP_COMP);
+
        *handle = ~0;
        size = PAGE_ALIGN(size);
 
index 74be05f3e03ac58be921aff208c6f6aa460ab683..44b628e4d6ea9c0121acf892ffbcfb30d0fc40ad 100644 (file)
@@ -9,8 +9,7 @@
 #include <linux/io.h>
 #include <linux/personality.h>
 #include <linux/random.h>
-#include <asm/cputype.h>
-#include <asm/system.h>
+#include <asm/cachetype.h>
 
 #define COLOUR_ALIGN(addr,pgoff)               \
        ((((addr)+SHMLBA-1)&~(SHMLBA-1)) +      \
@@ -32,25 +31,15 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
        struct mm_struct *mm = current->mm;
        struct vm_area_struct *vma;
        unsigned long start_addr;
-#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
-       unsigned int cache_type;
-       int do_align = 0, aliasing = 0;
+       int do_align = 0;
+       int aliasing = cache_is_vipt_aliasing();
 
        /*
         * We only need to do colour alignment if either the I or D
-        * caches alias.  This is indicated by bits 9 and 21 of the
-        * cache type register.
+        * caches alias.
         */
-       cache_type = read_cpuid_cachetype();
-       if (cache_type != read_cpuid_id()) {
-               aliasing = (cache_type | cache_type >> 12) & (1 << 11);
-               if (aliasing)
-                       do_align = filp || flags & MAP_SHARED;
-       }
-#else
-#define do_align 0
-#define aliasing 0
-#endif
+       if (aliasing)
+               do_align = filp || (flags & MAP_SHARED);
 
        /*
         * We enforce the MAP_FIXED case.
index 83b745a5e1b724e92ee87be633dc15b00edab968..c75f254abd857c07e6f8133716b51d5925f17581 100644 (file)
@@ -85,7 +85,6 @@ enum mxc_cpu_pwr_mode {
 };
 
 extern void mx5_cpu_lp_set(enum mxc_cpu_pwr_mode mode);
-extern void (*imx_idle)(void);
 extern void imx_print_silicon_rev(const char *cpu, int srev);
 
 void avic_handle_irq(struct pt_regs *);
@@ -133,4 +132,5 @@ extern void imx53_qsb_common_init(void);
 extern void imx53_smd_common_init(void);
 extern int imx6q_set_lpm(enum mxc_cpu_pwr_mode mode);
 extern void imx6q_pm_init(void);
+extern void imx6q_clock_map_io(void);
 #endif
index 00a78193c681ae16f5ed61f1b587f3ecf9e777f8..a4d36d601d55a5ed493f9191929850c6530f804f 100644 (file)
 #define IMX_CHIP_REVISION_3_3          0x33
 #define IMX_CHIP_REVISION_UNKNOWN      0xff
 
-#define IMX_CHIP_REVISION_1_0_STRING           "1.0"
-#define IMX_CHIP_REVISION_1_1_STRING           "1.1"
-#define IMX_CHIP_REVISION_1_2_STRING           "1.2"
-#define IMX_CHIP_REVISION_1_3_STRING           "1.3"
-#define IMX_CHIP_REVISION_2_0_STRING           "2.0"
-#define IMX_CHIP_REVISION_2_1_STRING           "2.1"
-#define IMX_CHIP_REVISION_2_2_STRING           "2.2"
-#define IMX_CHIP_REVISION_2_3_STRING           "2.3"
-#define IMX_CHIP_REVISION_3_0_STRING           "3.0"
-#define IMX_CHIP_REVISION_3_1_STRING           "3.1"
-#define IMX_CHIP_REVISION_3_2_STRING           "3.2"
-#define IMX_CHIP_REVISION_3_3_STRING           "3.3"
-#define IMX_CHIP_REVISION_UNKNOWN_STRING       "unknown"
-
 #ifndef __ASSEMBLY__
 extern unsigned int __mxc_cpu_type;
 #endif
index cf88b3593fba794d1a2de0c871f278ec03988da5..b9895d250167cf6b384d5cb1320e00e855537019 100644 (file)
 #ifndef __ASM_ARCH_MXC_SYSTEM_H__
 #define __ASM_ARCH_MXC_SYSTEM_H__
 
-extern void (*imx_idle)(void);
-
 static inline void arch_idle(void)
 {
-       if (imx_idle != NULL)
-               (imx_idle)();
-       else
-               cpu_do_idle();
+       cpu_do_idle();
 }
 
 void arch_reset(char mode, const char *cmd);
index 9dad8dcc2ea9dde822720e14c0c55ead52e8a420..d65fb31a55ca47ef350e38da864b6d34cf5295f9 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/io.h>
 #include <linux/err.h>
 #include <linux/delay.h>
+#include <linux/module.h>
 
 #include <mach/hardware.h>
 #include <mach/common.h>
@@ -28,8 +29,8 @@
 #include <asm/system.h>
 #include <asm/mach-types.h>
 
-void (*imx_idle)(void) = NULL;
 void __iomem *(*imx_ioremap)(unsigned long, size_t, unsigned int) = NULL;
+EXPORT_SYMBOL_GPL(imx_ioremap);
 
 static void __iomem *wdog_base;
 
index 197ca03c3f7d8490a109a9b576b2e2f9c9c420e6..eb73ab40e9556ee03f36e63fa7c06b9e77b4b1d3 100644 (file)
@@ -165,8 +165,8 @@ struct dpll_data {
        u8                      auto_recal_bit;
        u8                      recal_en_bit;
        u8                      recal_st_bit;
-       u8                      flags;
 #  endif
+       u8                      flags;
 };
 
 #endif
index c50df4814f6f43935c7b021064c610f200ff0dc0..3ff3e36580f267df6890712efef55e8ee22bdbc7 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/delay.h>
 
 #include <plat/i2c.h>
+#include <plat/omap_hwmod.h>
 
 struct sys_timer;
 
@@ -55,6 +56,8 @@ void am35xx_init_early(void);
 void ti816x_init_early(void);
 void omap4430_init_early(void);
 
+extern int omap_dss_reset(struct omap_hwmod *);
+
 void omap_sram_init(void);
 
 /*
index a9276667c2fb0e59c21e643aab28aef2debdee84..c7adad0e8de091ad42d021958bb1fc3b3c8cff03 100644 (file)
@@ -12,7 +12,7 @@
 */
 
 #include <linux/init.h>
-#include <linux/module.h>
+#include <linux/export.h>
 #include <linux/interrupt.h>
 #include <linux/ioport.h>
 #include <linux/cpufreq.h>
index e1cbc728c7759b1e1f54923f11120b3ea4647cb2..c8bec9c7655d417ebbcb78096f16a7cbeddcf54d 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/io.h>
 #include <linux/interrupt.h>
 #include <linux/platform_device.h>
+#include <linux/export.h>
 
 #include <asm/pgtable.h>
 
index d48245bb02b3bbf6178a45e7e6ae48ad3ec8d7f6..df8155b9d4d19e550b881abf22df4dbbeab3315c 100644 (file)
@@ -24,6 +24,8 @@
 #ifndef __PLAT_GPIO_CFG_H
 #define __PLAT_GPIO_CFG_H __FILE__
 
+#include<linux/types.h>
+
 typedef unsigned int __bitwise__ samsung_gpio_pull_t;
 typedef unsigned int __bitwise__ s5p_gpio_drvstr_t;
 
index efe1d564473e02c6195774fbc47c51a23a97fad4..312b510d86b76bf0b96feba92de8aae8163380c3 100644 (file)
@@ -11,7 +11,7 @@
 */
 
 #include <linux/init.h>
-#include <linux/module.h>
+#include <linux/export.h>
 #include <linux/platform_device.h>
 #include <linux/err.h>
 #include <linux/pm_runtime.h>
index dc1185dcf80d559757d289f4490723888786c2fa..c559d8438c70ee2d873afc43a7d14ee1fc2002c2 100644 (file)
@@ -11,7 +11,7 @@
  * the Free Software Foundation; either version 2 of the License.
 */
 
-#include <linux/module.h>
+#include <linux/export.h>
 #include <linux/kernel.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
index 5bdeef9698470b354dc3f144caadabda08dae00d..ccbe16f47227e2ebb171032b2a5de66289014cd0 100644 (file)
@@ -1123,5 +1123,6 @@ blissc                    MACH_BLISSC             BLISSC                  3491
 thales_adc             MACH_THALES_ADC         THALES_ADC              3492
 ubisys_p9d_evp         MACH_UBISYS_P9D_EVP     UBISYS_P9D_EVP          3493
 atdgp318               MACH_ATDGP318           ATDGP318                3494
+m28evk                 MACH_M28EVK             M28EVK                  3613
 smdk4212               MACH_SMDK4212           SMDK4212                3638
 smdk4412               MACH_SMDK4412           SMDK4412                3765
index 43f984e93970b8acbabbfd6554920a4b4fb265f4..303192fc9260d50f44cdf6e7ef0371a2fc41d9da 100644 (file)
 #define __NR_clock_adjtime     342
 #define __NR_syncfs            343
 #define __NR_setns             344
+#define __NR_process_vm_readv  345
+#define __NR_process_vm_writev 346
 
 #ifdef __KERNEL__
 
-#define NR_syscalls            345
+#define NR_syscalls            347
 
 #define __ARCH_WANT_IPC_PARSE_VERSION
 #define __ARCH_WANT_OLD_READDIR
index c468f2edaa85ec0cd2356e392a0f0e7efddb8218..ce827b376110a6a815b4ccece0a066e5ad66cc59 100644 (file)
@@ -365,4 +365,6 @@ ENTRY(sys_call_table)
        .long sys_clock_adjtime
        .long sys_syncfs
        .long sys_setns
+       .long sys_process_vm_readv      /* 345 */
+       .long sys_process_vm_writev
 
index 4f2971bcf8e5464577885b5ecc64db11c388d295..315fc0b250f8fe3373684f12f5c0437fd3d0b7b1 100644 (file)
@@ -623,7 +623,7 @@ static int mipspmu_event_init(struct perf_event *event)
        if (!atomic_inc_not_zero(&active_events)) {
                if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) {
                        atomic_dec(&active_events);
-                       return -ENOSPC;
+                       return -EINVAL;
                }
 
                mutex_lock(&pmu_reserve_mutex);
@@ -732,15 +732,15 @@ static int validate_group(struct perf_event *event)
        memset(&fake_cpuc, 0, sizeof(fake_cpuc));
 
        if (!validate_event(&fake_cpuc, leader))
-               return -ENOSPC;
+               return -EINVAL;
 
        list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
                if (!validate_event(&fake_cpuc, sibling))
-                       return -ENOSPC;
+                       return -EINVAL;
        }
 
        if (!validate_event(&fake_cpuc, event))
-               return -ENOSPC;
+               return -EINVAL;
 
        return 0;
 }
index d9b776740a6739707d26a9ad836a39533bc8718c..d3b478242ea9586b793f9def431a6d9a4ea00c80 100644 (file)
                        interrupt-parent = <&mpic>;
                        interrupts = <16 2>;
                        interrupt-map-mask = <0xf800 0 0 7>;
+                       /* IRQ[0:3] are pulled up on board, set to active-low */
                        interrupt-map = <
                                /* IDSEL 0x0 */
                                0000 0 0 1 &mpic 0 1
                        interrupt-parent = <&mpic>;
                        interrupts = <16 2>;
                        interrupt-map-mask = <0xf800 0 0 7>;
+                       /*
+                        * IRQ[4:6] only for PCIe, set to active-high,
+                        * IRQ[7] is pulled up on board, set to active-low
+                        */
                        interrupt-map = <
                                /* IDSEL 0x0 */
-                               0000 0 0 1 &mpic 4 1
-                               0000 0 0 2 &mpic 5 1
-                               0000 0 0 3 &mpic 6 1
+                               0000 0 0 1 &mpic 4 2
+                               0000 0 0 2 &mpic 5 2
+                               0000 0 0 3 &mpic 6 2
                                0000 0 0 4 &mpic 7 1
                                >;
                        ranges = <0x2000000 0x0 0xa0000000
                        interrupt-parent = <&mpic>;
                        interrupts = <16 2>;
                        interrupt-map-mask = <0xf800 0 0 7>;
+                       /*
+                        * IRQ[8:10] are pulled up on board, set to active-low
+                        * IRQ[11] only for PCIe, set to active-high,
+                        */
                        interrupt-map = <
                                /* IDSEL 0x0 */
                                0000 0 0 1 &mpic 8 1
                                0000 0 0 2 &mpic 9 1
                                0000 0 0 3 &mpic 10 1
-                               0000 0 0 4 &mpic 11 1
+                               0000 0 0 4 &mpic 11 2
                                >;
                        ranges = <0x2000000 0x0 0x80000000
                                  0x2000000 0x0 0x80000000
index 6cdf1c0d2c8a10acc43796c20af80d52f7b78d64..3b98d735434142bb9240a6a2d4906d0b882a3ab2 100644 (file)
@@ -52,6 +52,8 @@ CONFIG_MTD_CFI=y
 CONFIG_MTD_JEDECPROBE=y
 CONFIG_MTD_CFI_AMDSTD=y
 CONFIG_MTD_PHYSMAP_OF=y
+CONFIG_MTD_NAND=m
+CONFIG_MTD_NAND_NDFC=m
 CONFIG_MTD_UBI=m
 CONFIG_MTD_UBI_GLUEBI=m
 CONFIG_PROC_DEVICETREE=y
index 5964371303ac4a5941eb2a0ccceca27cf6330354..8558b572e55d3e6ba2ee7b5656b9eb9f9e39b5c3 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/of_fdt.h>
 #include <linux/memblock.h>
 #include <linux/bootmem.h>
+#include <linux/moduleparam.h>
 #include <asm/pgtable.h>
 #include <asm/pgalloc.h>
 #include <asm/tlb.h>
index 45023e26aea369685d88c752aa52814ac07dd27a..d7946be298b6fc1f842eaff3e14b4ae502924f3c 100644 (file)
@@ -203,7 +203,7 @@ config P3060_QDS
        select PPC_E500MC
        select PHYS_64BIT
        select SWIOTLB
-       select MPC8xxx_GPIO
+       select GPIO_MPC8XXX
        select HAS_RAPIDIO
        select PPC_EPAPR_HV_PIC
        help
index 01dcf44871e9a4c79273d14efd1fcde0b60b6ba9..081cf4ac188161b4f4bb535d0d57209bc14fd03c 100644 (file)
@@ -70,7 +70,7 @@ define_machine(p3060_qds) {
        .power_save             = e500_idle,
 };
 
-machine_device_initcall(p3060_qds, declare_of_platform_devices);
+machine_device_initcall(p3060_qds, corenet_ds_publish_devices);
 
 #ifdef CONFIG_SWIOTLB
 machine_arch_initcall(p3060_qds, swiotlb_setup_bus_notifier);
index af1a5df46b3e54ff75bb392b97ff26e8f1e72b4f..b6731e4a6646e16e81c62de273d8a6cd579b1ca7 100644 (file)
@@ -280,6 +280,7 @@ void __init ehv_pic_init(void)
 
        if (!ehv_pic->irqhost) {
                of_node_put(np);
+               kfree(ehv_pic);
                return;
        }
 
index c4d96fa32ba557a68512b133400fef585e799eb4..d5c3c90ee6981a14d40da0175f3d3ddc0c0f77f4 100644 (file)
@@ -328,6 +328,7 @@ static int __devinit fsl_lbc_ctrl_probe(struct platform_device *dev)
 err:
        iounmap(fsl_lbc_ctrl_dev->regs);
        kfree(fsl_lbc_ctrl_dev);
+       fsl_lbc_ctrl_dev = NULL;
        return ret;
 }
 
index 3363fbc964f86c1d42676116f813a2ac71b9e0e2..ceb09cbd2329e782fe00a5c3e1326d217ab3e92b 100644 (file)
@@ -216,7 +216,7 @@ int qe_setbrg(enum qe_clock brg, unsigned int rate, unsigned int multiplier)
        /* Errata QE_General4, which affects some MPC832x and MPC836x SOCs, says
           that the BRG divisor must be even if you're not using divide-by-16
           mode. */
-       if (!div16 && (divisor & 1))
+       if (!div16 && (divisor & 1) && (divisor > 3))
                divisor++;
 
        tempval = ((divisor - 1) << QE_BRGC_DIVISOR_SHIFT) |
index 524d23b8610ceb65c42661a97e79226125dbadf3..4f289ff0b7fe27b7de54d8886b6aee0b8890066f 100644 (file)
@@ -599,10 +599,10 @@ static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste)
        skey = page_get_storage_key(address);
        bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
        /* Clear page changed & referenced bit in the storage key */
-       if (bits) {
-               skey ^= bits;
-               page_set_storage_key(address, skey, 1);
-       }
+       if (bits & _PAGE_CHANGED)
+               page_set_storage_key(address, skey ^ bits, 1);
+       else if (bits)
+               page_reset_referenced(address);
        /* Transfer page changed & referenced bit to guest bits in pgste */
        pgste_val(pgste) |= bits << 48;         /* RCP_GR_BIT & RCP_GC_BIT */
        /* Get host changed & referenced bits from pgste */
index 450931a45b684b2044106921a66a4702e1eb93f1..573bc29551ef471fee58b89d02df0ea0ff956503 100644 (file)
@@ -296,13 +296,6 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
                     ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA))))
                        /* Invalid psw mask. */
                        return -EINVAL;
-               if (addr == (addr_t) &dummy->regs.psw.addr)
-                       /*
-                        * The debugger changed the instruction address,
-                        * reset system call restart, see signal.c:do_signal
-                        */
-                       task_thread_info(child)->system_call = 0;
-
                *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data;
 
        } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) {
@@ -614,11 +607,6 @@ static int __poke_user_compat(struct task_struct *child,
                        /* Transfer 31 bit amode bit to psw mask. */
                        regs->psw.mask = (regs->psw.mask & ~PSW_MASK_BA) |
                                (__u64)(tmp & PSW32_ADDR_AMODE);
-                       /*
-                        * The debugger changed the instruction address,
-                        * reset system call restart, see signal.c:do_signal
-                        */
-                       task_thread_info(child)->system_call = 0;
                } else {
                        /* gpr 0-15 */
                        *(__u32*)((addr_t) &regs->psw + addr*2 + 4) = tmp;
@@ -905,6 +893,14 @@ static int s390_last_break_get(struct task_struct *target,
        return 0;
 }
 
+static int s390_last_break_set(struct task_struct *target,
+                              const struct user_regset *regset,
+                              unsigned int pos, unsigned int count,
+                              const void *kbuf, const void __user *ubuf)
+{
+       return 0;
+}
+
 #endif
 
 static int s390_system_call_get(struct task_struct *target,
@@ -951,6 +947,7 @@ static const struct user_regset s390_regsets[] = {
                .size = sizeof(long),
                .align = sizeof(long),
                .get = s390_last_break_get,
+               .set = s390_last_break_set,
        },
 #endif
        [REGSET_SYSTEM_CALL] = {
@@ -1116,6 +1113,14 @@ static int s390_compat_last_break_get(struct task_struct *target,
        return 0;
 }
 
+static int s390_compat_last_break_set(struct task_struct *target,
+                                     const struct user_regset *regset,
+                                     unsigned int pos, unsigned int count,
+                                     const void *kbuf, const void __user *ubuf)
+{
+       return 0;
+}
+
 static const struct user_regset s390_compat_regsets[] = {
        [REGSET_GENERAL] = {
                .core_note_type = NT_PRSTATUS,
@@ -1139,6 +1144,7 @@ static const struct user_regset s390_compat_regsets[] = {
                .size = sizeof(long),
                .align = sizeof(long),
                .get = s390_compat_last_break_get,
+               .set = s390_compat_last_break_set,
        },
        [REGSET_SYSTEM_CALL] = {
                .core_note_type = NT_S390_SYSTEM_CALL,
index e58a462949b164ea90c5697696b9ccec47bb7ff4..e54c4ff8abaaa3d1a34efd34decd0d12c713e4ef 100644 (file)
@@ -579,7 +579,7 @@ static unsigned long __init find_crash_base(unsigned long crash_size,
                *msg = "first memory chunk must be at least crashkernel size";
                return 0;
        }
-       if (is_kdump_kernel() && (crash_size == OLDMEM_SIZE))
+       if (OLDMEM_BASE && crash_size == OLDMEM_SIZE)
                return OLDMEM_BASE;
 
        for (i = MEMORY_CHUNKS - 1; i >= 0; i--) {
index 05a85bc14c98a2556e86bf40cec7e76de2dc2423..7f6f9f35454518f091e4fb86e3d39e46c8aaf391 100644 (file)
@@ -460,9 +460,9 @@ void do_signal(struct pt_regs *regs)
                                                     regs->svc_code >> 16);
                                break;
                        }
-                       /* No longer in a system call */
-                       clear_thread_flag(TIF_SYSCALL);
                }
+               /* No longer in a system call */
+               clear_thread_flag(TIF_SYSCALL);
 
                if ((is_compat_task() ?
                     handle_signal32(signr, &ka, &info, oldset, regs) :
@@ -486,6 +486,7 @@ void do_signal(struct pt_regs *regs)
        }
 
        /* No handlers present - check for system call restart */
+       clear_thread_flag(TIF_SYSCALL);
        if (current_thread_info()->system_call) {
                regs->svc_code = current_thread_info()->system_call;
                switch (regs->gprs[2]) {
@@ -500,9 +501,6 @@ void do_signal(struct pt_regs *regs)
                        regs->gprs[2] = regs->orig_gpr2;
                        set_thread_flag(TIF_SYSCALL);
                        break;
-               default:
-                       clear_thread_flag(TIF_SYSCALL);
-                       break;
                }
        }
 
index 94e9a511de849c925bcf28841368ce278feade0c..f80f8ceabc67abd6ef57fce76cb1a224f53204c8 100644 (file)
@@ -74,16 +74,6 @@ enum {
  */
 void tile_irq_activate(unsigned int irq, int tile_irq_type);
 
-/*
- * For onboard, non-PCI (e.g. TILE_IRQ_PERCPU) devices, drivers know
- * how to use enable/disable_percpu_irq() to manage interrupts on each
- * core.  We can't use the generic enable/disable_irq() because they
- * use a single reference count per irq, rather than per cpu per irq.
- */
-void enable_percpu_irq(unsigned int irq);
-void disable_percpu_irq(unsigned int irq);
-
-
 void setup_irq_regs(void);
 
 #endif /* _ASM_TILE_IRQ_H */
index aa0134db2dd683e4f2bde38823bcabc931f7d8ef..02e62806501256ad63f322d26461bd34f8bcc4e9 100644 (file)
@@ -152,14 +152,13 @@ void tile_dev_intr(struct pt_regs *regs, int intnum)
  * Remove an irq from the disabled mask.  If we're in an interrupt
  * context, defer enabling the HW interrupt until we leave.
  */
-void enable_percpu_irq(unsigned int irq)
+static void tile_irq_chip_enable(struct irq_data *d)
 {
-       get_cpu_var(irq_disable_mask) &= ~(1UL << irq);
+       get_cpu_var(irq_disable_mask) &= ~(1UL << d->irq);
        if (__get_cpu_var(irq_depth) == 0)
-               unmask_irqs(1UL << irq);
+               unmask_irqs(1UL << d->irq);
        put_cpu_var(irq_disable_mask);
 }
-EXPORT_SYMBOL(enable_percpu_irq);
 
 /*
  * Add an irq to the disabled mask.  We disable the HW interrupt
@@ -167,13 +166,12 @@ EXPORT_SYMBOL(enable_percpu_irq);
  * in an interrupt context, the return path is careful to avoid
  * unmasking a newly disabled interrupt.
  */
-void disable_percpu_irq(unsigned int irq)
+static void tile_irq_chip_disable(struct irq_data *d)
 {
-       get_cpu_var(irq_disable_mask) |= (1UL << irq);
-       mask_irqs(1UL << irq);
+       get_cpu_var(irq_disable_mask) |= (1UL << d->irq);
+       mask_irqs(1UL << d->irq);
        put_cpu_var(irq_disable_mask);
 }
-EXPORT_SYMBOL(disable_percpu_irq);
 
 /* Mask an interrupt. */
 static void tile_irq_chip_mask(struct irq_data *d)
@@ -209,6 +207,8 @@ static void tile_irq_chip_eoi(struct irq_data *d)
 
 static struct irq_chip tile_irq_chip = {
        .name = "tile_irq_chip",
+       .irq_enable = tile_irq_chip_enable,
+       .irq_disable = tile_irq_chip_disable,
        .irq_ack = tile_irq_chip_ack,
        .irq_eoi = tile_irq_chip_eoi,
        .irq_mask = tile_irq_chip_mask,
index 658f2ce426a44ef5fefab5852cfbe05dc796953b..b3ed19f8779c4a9058ea818bc8fb31a6a75ef34b 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/mm.h>
 #include <linux/dma-mapping.h>
 #include <linux/vmalloc.h>
+#include <linux/export.h>
 #include <asm/tlbflush.h>
 #include <asm/homecache.h>
 
index 2a8014cb1ff52f0ef3e24a58a5e20ddcfeb3c8cc..9d610d3fb11e9ac93c6e17875cff26257fa89e46 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/irq.h>
 #include <linux/io.h>
 #include <linux/uaccess.h>
+#include <linux/export.h>
 
 #include <asm/processor.h>
 #include <asm/sections.h>
index b671a86f45152155bf0170504bdf2511112cdfb0..602908268093cf53397df895e68e240cc601d552 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/cpu.h>
 #include <linux/slab.h>
 #include <linux/smp.h>
+#include <linux/stat.h>
 #include <hv/hypervisor.h>
 
 /* Return a string queried from the hypervisor, truncated to page size. */
index a87d2a859ba97de91db6e8769cb8ecb1bcd78e49..2a81d32de0da518989e5a5118ab91b77774e3e7f 100644 (file)
@@ -39,6 +39,9 @@ EXPORT_SYMBOL(finv_user_asm);
 EXPORT_SYMBOL(current_text_addr);
 EXPORT_SYMBOL(dump_stack);
 
+/* arch/tile/kernel/head.S */
+EXPORT_SYMBOL(empty_zero_page);
+
 /* arch/tile/lib/, various memcpy files */
 EXPORT_SYMBOL(memcpy);
 EXPORT_SYMBOL(__copy_to_user_inatomic);
index cbe6f4f9eca3c93c43039be31238fdeb56ee36dc..1cc6ae477c98b59711c29deb5674e50c63c96be3 100644 (file)
@@ -449,9 +449,12 @@ void homecache_free_pages(unsigned long addr, unsigned int order)
        VM_BUG_ON(!virt_addr_valid((void *)addr));
        page = virt_to_page((void *)addr);
        if (put_page_testzero(page)) {
-               int pages = (1 << order);
                homecache_change_page_home(page, order, initial_page_home());
-               while (pages--)
-                       __free_page(page++);
+               if (order == 0) {
+                       free_hot_cold_page(page, 0);
+               } else {
+                       init_page_count(page);
+                       __free_pages(page, order);
+               }
        }
 }
index cb9a1044a771be75563305f8909097a67cd21778..efb42949cc09349e37246baa07f018648186386f 100644 (file)
@@ -390,7 +390,7 @@ config X86_INTEL_CE
          This option compiles in support for the CE4100 SOC for settop
          boxes and media devices.
 
-config X86_INTEL_MID
+config X86_WANT_INTEL_MID
        bool "Intel MID platform support"
        depends on X86_32
        depends on X86_EXTENDED_PLATFORM
@@ -399,7 +399,10 @@ config X86_INTEL_MID
          systems which do not have the PCI legacy interfaces (Moorestown,
          Medfield). If you are building for a PC class system say N here.
 
-if X86_INTEL_MID
+if X86_WANT_INTEL_MID
+
+config X86_INTEL_MID
+       bool
 
 config X86_MRST
        bool "Moorestown MID platform"
@@ -411,6 +414,7 @@ config X86_MRST
        select SPI
        select INTEL_SCU_IPC
        select X86_PLATFORM_DEVICES
+       select X86_INTEL_MID
        ---help---
          Moorestown is Intel's Low Power Intel Architecture (LPIA) based Moblin
          Internet Device(MID) platform. Moorestown consists of two chips:
index 908b96957d88adf11694f6e83652b47d06d3b8f0..c9547033e38e8fee78964ffda671d9e3402a4455 100644 (file)
  */
 #define E820_RESERVED_KERN        128
 
+/*
+ * Address ranges that need to be mapped by the kernel direct
+ * mapping. This is used to make sure regions such as
+ * EFI_RUNTIME_SERVICES_DATA are directly mapped. See setup_arch().
+ */
+#define E820_RESERVED_EFI         129
+
 #ifndef __ASSEMBLY__
 #include <linux/types.h>
 struct e820entry {
@@ -115,6 +122,7 @@ static inline void early_memtest(unsigned long start, unsigned long end)
 }
 #endif
 
+extern unsigned long e820_end_pfn(unsigned long limit_pfn, unsigned type);
 extern unsigned long e820_end_of_ram_pfn(void);
 extern unsigned long e820_end_of_low_ram_pfn(void);
 extern u64 early_reserve_e820(u64 startt, u64 sizet, u64 align);
index 7093e4a6a0bc6dd5644b2b14cbef9ef5f4085eef..b8d8bfcd44a95f48b15537b555636bbbb32c57b9 100644 (file)
@@ -33,8 +33,6 @@ extern unsigned long asmlinkage efi_call_phys(void *, ...);
 #define efi_call_virt6(f, a1, a2, a3, a4, a5, a6)      \
        efi_call_virt(f, a1, a2, a3, a4, a5, a6)
 
-#define efi_ioremap(addr, size, type)          ioremap_cache(addr, size)
-
 #else /* !CONFIG_X86_32 */
 
 extern u64 efi_call0(void *fp);
@@ -84,9 +82,6 @@ extern u64 efi_call6(void *fp, u64 arg1, u64 arg2, u64 arg3,
        efi_call6((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
                  (u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6))
 
-extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size,
-                                u32 type);
-
 #endif /* CONFIG_X86_32 */
 
 extern int add_efi_memmap;
index 4420993acc4734c962922d58ee63e0b700cf330e..925b605eb5c601fa9a6f6c24cf41e596b92d86f8 100644 (file)
@@ -3,11 +3,15 @@
 
 #include <linux/notifier.h>
 
-#define IPCMSG_VRTC    0xFA     /* Set vRTC device */
-
-/* Command id associated with message IPCMSG_VRTC */
-#define IPC_CMD_VRTC_SETTIME      1 /* Set time */
-#define IPC_CMD_VRTC_SETALARM     2 /* Set alarm */
+#define IPCMSG_WARM_RESET      0xF0
+#define IPCMSG_COLD_RESET      0xF1
+#define IPCMSG_SOFT_RESET      0xF2
+#define IPCMSG_COLD_BOOT       0xF3
+
+#define IPCMSG_VRTC            0xFA     /* Set vRTC device */
+       /* Command id associated with message IPCMSG_VRTC */
+       #define IPC_CMD_VRTC_SETTIME      1 /* Set time */
+       #define IPC_CMD_VRTC_SETALARM     2 /* Set alarm */
 
 /* Read single register */
 int intel_scu_ipc_ioread8(u16 addr, u8 *data);
index e6283129c821014eba1afcbbb1bdd042b6b73e04..93f79094c2243211eede22db91acdf33098a059a 100644 (file)
@@ -31,11 +31,20 @@ enum mrst_cpu_type {
 };
 
 extern enum mrst_cpu_type __mrst_cpu_chip;
+
+#ifdef CONFIG_X86_INTEL_MID
+
 static inline enum mrst_cpu_type mrst_identify_cpu(void)
 {
        return __mrst_cpu_chip;
 }
 
+#else /* !CONFIG_X86_INTEL_MID */
+
+#define mrst_identify_cpu()    (0)
+
+#endif /* !CONFIG_X86_INTEL_MID */
+
 enum mrst_timer_options {
        MRST_TIMER_DEFAULT,
        MRST_TIMER_APBT_ONLY,
index 084ef95274cd78ceb51b1ea7a208a7a5e486199a..95203d40ffdde69d014c986453905280b49ee9e5 100644 (file)
@@ -169,7 +169,14 @@ static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high)
        return native_write_msr_safe(msr, low, high);
 }
 
-/* rdmsr with exception handling */
+/*
+ * rdmsr with exception handling.
+ *
+ * Please note that the exception handling works only after we've
+ * switched to the "smart" #GP handler in trap_init() which knows about
+ * exception tables - using this macro earlier than that causes machine
+ * hangs on boxes which do not implement the @msr in the first argument.
+ */
 #define rdmsr_safe(msr, p1, p2)                                        \
 ({                                                             \
        int __err;                                              \
index c2ff2a1d845e402249e44a70e41459805c3faaa8..2d2f01ce6dcbf1a9c8b72ebef77a159e1b3a5b1a 100644 (file)
@@ -401,6 +401,7 @@ extern unsigned long arch_align_stack(unsigned long sp);
 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
 
 void default_idle(void);
+bool set_pm_idle_to_default(void);
 
 void stop_this_cpu(void *dummy);
 
index fa7b9176b76cb33820034403fd8f4a50dc49709c..431793e5d4846f23bf5947f933fdaf9f5ae1987a 100644 (file)
@@ -32,6 +32,22 @@ extern int no_timer_check;
  *  (mathieu.desnoyers@polymtl.ca)
  *
  *                     -johnstul@us.ibm.com "math is hard, lets go shopping!"
+ *
+ * In:
+ *
+ * ns = cycles * cyc2ns_scale / SC
+ *
+ * Although we may still have enough bits to store the value of ns,
+ * in some cases, we may not have enough bits to store cycles * cyc2ns_scale,
+ * leading to an incorrect result.
+ *
+ * To avoid this, we can decompose 'cycles' into quotient and remainder
+ * of division by SC.  Then,
+ *
+ * ns = (quot * SC + rem) * cyc2ns_scale / SC
+ *    = quot * cyc2ns_scale + (rem * cyc2ns_scale) / SC
+ *
+ *                     - sqazi@google.com
  */
 
 DECLARE_PER_CPU(unsigned long, cyc2ns);
@@ -41,9 +57,14 @@ DECLARE_PER_CPU(unsigned long long, cyc2ns_offset);
 
 static inline unsigned long long __cycles_2_ns(unsigned long long cyc)
 {
+       unsigned long long quot;
+       unsigned long long rem;
        int cpu = smp_processor_id();
        unsigned long long ns = per_cpu(cyc2ns_offset, cpu);
-       ns += cyc * per_cpu(cyc2ns, cpu) >> CYC2NS_SCALE_FACTOR;
+       quot = (cyc >> CYC2NS_SCALE_FACTOR);
+       rem = cyc & ((1ULL << CYC2NS_SCALE_FACTOR) - 1);
+       ns += quot * per_cpu(cyc2ns, cpu) +
+               ((rem * per_cpu(cyc2ns, cpu)) >> CYC2NS_SCALE_FACTOR);
        return ns;
 }
 
index 10474fb1185df7e30f52101cea4e358a18e39458..cf1d73643f60723dc514b52e66969ed46a9c01a4 100644 (file)
@@ -57,6 +57,7 @@
 
 #define UV1_HUB_PART_NUMBER    0x88a5
 #define UV2_HUB_PART_NUMBER    0x8eb8
+#define UV2_HUB_PART_NUMBER_X  0x1111
 
 /* Compat: if this #define is present, UV headers support UV2 */
 #define UV2_HUB_IS_SUPPORTED   1
index 62ae3001ae02c4348d640dc01f67fd055718deb0..9d59bbacd4e3cb7a76474a59e2965c739140d475 100644 (file)
@@ -93,6 +93,8 @@ static int __init early_get_pnodeid(void)
 
        if (node_id.s.part_number == UV2_HUB_PART_NUMBER)
                uv_min_hub_revision_id += UV2_HUB_REVISION_BASE - 1;
+       if (node_id.s.part_number == UV2_HUB_PART_NUMBER_X)
+               uv_min_hub_revision_id += UV2_HUB_REVISION_BASE - 1;
 
        uv_hub_info->hub_revision = uv_min_hub_revision_id;
        pnode = (node_id.s.node_id >> 1) & ((1 << m_n_config.s.n_skt) - 1);
index c7e46cb353279080f2b1f67b34e283f0086a5e83..0bab2b18bb2099c4290f046bb210035b05083869 100644 (file)
@@ -442,8 +442,6 @@ static void __cpuinit bsp_init_amd(struct cpuinfo_x86 *c)
 
 static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
 {
-       u32 dummy;
-
        early_init_amd_mc(c);
 
        /*
@@ -473,12 +471,12 @@ static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
                        set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
        }
 #endif
-
-       rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
 }
 
 static void __cpuinit init_amd(struct cpuinfo_x86 *c)
 {
+       u32 dummy;
+
 #ifdef CONFIG_SMP
        unsigned long long value;
 
@@ -657,6 +655,8 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
                        checking_wrmsrl(MSR_AMD64_MCx_MASK(4), mask);
                }
        }
+
+       rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
 }
 
 #ifdef CONFIG_X86_32
index a71efcdbb0925ffe7c2f97d58d0625e4179662fc..97b26356e9ee8b022b45ae1adc5ef7628a5e2539 100644 (file)
@@ -547,6 +547,7 @@ static void generic_get_mtrr(unsigned int reg, unsigned long *base,
 
                if (tmp != mask_lo) {
                        printk(KERN_WARNING "mtrr: your BIOS has configured an incorrect mask, fixing it.\n");
+                       add_taint(TAINT_FIRMWARE_WORKAROUND);
                        mask_lo = tmp;
                }
        }
@@ -693,6 +694,7 @@ static void prepare_set(void) __acquires(set_atomicity_lock)
 
        /* Disable MTRRs, and set the default type to uncached */
        mtrr_wrmsr(MSR_MTRRdefType, deftype_lo & ~0xcff, deftype_hi);
+       wbinvd();
 }
 
 static void post_set(void) __releases(set_atomicity_lock)
index 640891014b2ae3dccdef498db11f05a0942145e5..2bda212a0010ca561e8f34a9d56c2502b47ff70b 100644 (file)
@@ -312,12 +312,8 @@ int x86_setup_perfctr(struct perf_event *event)
                        return -EOPNOTSUPP;
        }
 
-       /*
-        * Do not allow config1 (extended registers) to propagate,
-        * there's no sane user-space generalization yet:
-        */
        if (attr->type == PERF_TYPE_RAW)
-               return 0;
+               return x86_pmu_extra_regs(event->attr.config, event);
 
        if (attr->type == PERF_TYPE_HW_CACHE)
                return set_ext_hw_attr(hwc, event);
@@ -588,7 +584,7 @@ done:
                                x86_pmu.put_event_constraints(cpuc, cpuc->event_list[i]);
                }
        }
-       return num ? -ENOSPC : 0;
+       return num ? -EINVAL : 0;
 }
 
 /*
@@ -607,7 +603,7 @@ static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader,
 
        if (is_x86_event(leader)) {
                if (n >= max_count)
-                       return -ENOSPC;
+                       return -EINVAL;
                cpuc->event_list[n] = leader;
                n++;
        }
@@ -620,7 +616,7 @@ static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader,
                        continue;
 
                if (n >= max_count)
-                       return -ENOSPC;
+                       return -EINVAL;
 
                cpuc->event_list[n] = event;
                n++;
@@ -1316,7 +1312,7 @@ static int validate_event(struct perf_event *event)
        c = x86_pmu.get_event_constraints(fake_cpuc, event);
 
        if (!c || !c->weight)
-               ret = -ENOSPC;
+               ret = -EINVAL;
 
        if (x86_pmu.put_event_constraints)
                x86_pmu.put_event_constraints(fake_cpuc, event);
@@ -1341,7 +1337,7 @@ static int validate_group(struct perf_event *event)
 {
        struct perf_event *leader = event->group_leader;
        struct cpu_hw_events *fake_cpuc;
-       int ret = -ENOSPC, n;
+       int ret = -EINVAL, n;
 
        fake_cpuc = allocate_fake_cpuc();
        if (IS_ERR(fake_cpuc))
index ab6343d21825d7d9328fae81fd6faef3bf092a13..3b8a2d30d14e8ebeb2c58406212fbbd3c79ba935 100644 (file)
@@ -199,8 +199,7 @@ static int force_ibs_eilvt_setup(void)
                goto out;
        }
 
-       pr_err(FW_BUG "using offset %d for IBS interrupts\n", offset);
-       pr_err(FW_BUG "workaround enabled for IBS LVT offset\n");
+       pr_info("IBS: LVT offset %d assigned\n", offset);
 
        return 0;
 out:
@@ -265,19 +264,23 @@ perf_ibs_cpu_notifier(struct notifier_block *self, unsigned long action, void *h
 static __init int amd_ibs_init(void)
 {
        u32 caps;
-       int ret;
+       int ret = -EINVAL;
 
        caps = __get_ibs_caps();
        if (!caps)
                return -ENODEV; /* ibs not supported by the cpu */
 
-       if (!ibs_eilvt_valid()) {
-               ret = force_ibs_eilvt_setup();
-               if (ret) {
-                       pr_err("Failed to setup IBS, %d\n", ret);
-                       return ret;
-               }
-       }
+       /*
+        * Force LVT offset assignment for family 10h: The offsets are
+        * not assigned by the BIOS for this family, so the OS is
+        * responsible for doing it. If the OS assignment fails, fall
+        * back to BIOS settings and try to setup this.
+        */
+       if (boot_cpu_data.x86 == 0x10)
+               force_ibs_eilvt_setup();
+
+       if (!ibs_eilvt_valid())
+               goto out;
 
        get_online_cpus();
        ibs_caps = caps;
@@ -287,7 +290,11 @@ static __init int amd_ibs_init(void)
        smp_call_function(setup_APIC_ibs, NULL, 1);
        put_online_cpus();
 
-       return perf_event_ibs_init();
+       ret = perf_event_ibs_init();
+out:
+       if (ret)
+               pr_err("Failed to setup IBS, %d\n", ret);
+       return ret;
 }
 
 /* Since we need the pci subsystem to init ibs we can't do this earlier: */
index 2be5ebe9987209d41e76aede9e28ee9271070a34..8d601b18bf9f43688f2a6c5cb90994e00a4fa8d8 100644 (file)
@@ -1545,6 +1545,13 @@ static void intel_clovertown_quirks(void)
        x86_pmu.pebs_constraints = NULL;
 }
 
+static void intel_sandybridge_quirks(void)
+{
+       printk(KERN_WARNING "PEBS disabled due to CPU errata.\n");
+       x86_pmu.pebs = 0;
+       x86_pmu.pebs_constraints = NULL;
+}
+
 __init int intel_pmu_init(void)
 {
        union cpuid10_edx edx;
@@ -1694,6 +1701,7 @@ __init int intel_pmu_init(void)
                break;
 
        case 42: /* SandyBridge */
+               x86_pmu.quirks = intel_sandybridge_quirks;
        case 45: /* SandyBridge, "Romely-EP" */
                memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
                       sizeof(hw_cache_event_ids));
index c0d238f49db843cbd98bea7a7bc68b700fc40d7f..73da6b64f5b788ccbb83eef3317c32161b755fca 100644 (file)
@@ -493,6 +493,7 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
        unsigned long from = cpuc->lbr_entries[0].from;
        unsigned long old_to, to = cpuc->lbr_entries[0].to;
        unsigned long ip = regs->ip;
+       int is_64bit = 0;
 
        /*
         * We don't need to fixup if the PEBS assist is fault like
@@ -544,7 +545,10 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
                } else
                        kaddr = (void *)to;
 
-               kernel_insn_init(&insn, kaddr);
+#ifdef CONFIG_X86_64
+               is_64bit = kernel_ip(to) || !test_thread_flag(TIF_IA32);
+#endif
+               insn_init(&insn, kaddr, is_64bit);
                insn_get_length(&insn);
                to += insn.length;
        } while (to < ip);
index 492bf1358a7c388a9e252e8f031d6c6122e540d8..ef484d9d0a251b0128a164486096ce43c4ea8f4c 100644 (file)
@@ -1268,7 +1268,7 @@ reserve:
        }
 
 done:
-       return num ? -ENOSPC : 0;
+       return num ? -EINVAL : 0;
 }
 
 static __initconst const struct x86_pmu p4_pmu = {
index 303a0e48f076feb3feb522d4052ac4b958995d42..65ffd110a81bc95491fb13ae7c87cd7be1738f93 100644 (file)
@@ -135,6 +135,7 @@ static void __init e820_print_type(u32 type)
                printk(KERN_CONT "(usable)");
                break;
        case E820_RESERVED:
+       case E820_RESERVED_EFI:
                printk(KERN_CONT "(reserved)");
                break;
        case E820_ACPI:
@@ -783,7 +784,7 @@ u64 __init early_reserve_e820(u64 startt, u64 sizet, u64 align)
 /*
  * Find the highest page frame number we have available
  */
-static unsigned long __init e820_end_pfn(unsigned long limit_pfn, unsigned type)
+unsigned long __init e820_end_pfn(unsigned long limit_pfn, unsigned type)
 {
        int i;
        unsigned long last_pfn = 0;
index b946a9eac7d9f29fb6d955e3541bf03295612367..1bb0bf4d92cd8edf9d639ea5aabd08bc7efad892 100644 (file)
@@ -1049,6 +1049,14 @@ int hpet_rtc_timer_init(void)
 }
 EXPORT_SYMBOL_GPL(hpet_rtc_timer_init);
 
+static void hpet_disable_rtc_channel(void)
+{
+       unsigned long cfg;
+       cfg = hpet_readl(HPET_T1_CFG);
+       cfg &= ~HPET_TN_ENABLE;
+       hpet_writel(cfg, HPET_T1_CFG);
+}
+
 /*
  * The functions below are called from rtc driver.
  * Return 0 if HPET is not being used.
@@ -1060,6 +1068,9 @@ int hpet_mask_rtc_irq_bit(unsigned long bit_mask)
                return 0;
 
        hpet_rtc_flags &= ~bit_mask;
+       if (unlikely(!hpet_rtc_flags))
+               hpet_disable_rtc_channel();
+
        return 1;
 }
 EXPORT_SYMBOL_GPL(hpet_mask_rtc_irq_bit);
@@ -1125,15 +1136,11 @@ EXPORT_SYMBOL_GPL(hpet_rtc_dropped_irq);
 
 static void hpet_rtc_timer_reinit(void)
 {
-       unsigned int cfg, delta;
+       unsigned int delta;
        int lost_ints = -1;
 
-       if (unlikely(!hpet_rtc_flags)) {
-               cfg = hpet_readl(HPET_T1_CFG);
-               cfg &= ~HPET_TN_ENABLE;
-               hpet_writel(cfg, HPET_T1_CFG);
-               return;
-       }
+       if (unlikely(!hpet_rtc_flags))
+               hpet_disable_rtc_channel();
 
        if (!(hpet_rtc_flags & RTC_PIE) || hpet_pie_limit)
                delta = hpet_default_delta;
index acf8fbf8fbda1960de6cbd18eaf47a0a049eed8b..69bca468c47a8ffc22ea5811cb3bdf63caf0f44b 100644 (file)
@@ -38,6 +38,9 @@ static inline void stack_overflow_check(struct pt_regs *regs)
 #ifdef CONFIG_DEBUG_STACKOVERFLOW
        u64 curbase = (u64)task_stack_page(current);
 
+       if (user_mode_vm(regs))
+               return;
+
        WARN_ONCE(regs->sp >= curbase &&
                  regs->sp <= curbase + THREAD_SIZE &&
                  regs->sp <  curbase + sizeof(struct thread_info) +
index f2d2a664e7975acace35742dc7bf3fe3446c2aeb..9d46f5e43b51f0dd2b02ad0575c1e90470df862a 100644 (file)
@@ -256,7 +256,7 @@ static int __init microcode_dev_init(void)
        return 0;
 }
 
-static void microcode_dev_exit(void)
+static void __exit microcode_dev_exit(void)
 {
        misc_deregister(&microcode_dev);
 }
@@ -519,10 +519,8 @@ static int __init microcode_init(void)
 
        microcode_pdev = platform_device_register_simple("microcode", -1,
                                                         NULL, 0);
-       if (IS_ERR(microcode_pdev)) {
-               microcode_dev_exit();
+       if (IS_ERR(microcode_pdev))
                return PTR_ERR(microcode_pdev);
-       }
 
        get_online_cpus();
        mutex_lock(&microcode_mutex);
@@ -532,14 +530,12 @@ static int __init microcode_init(void)
        mutex_unlock(&microcode_mutex);
        put_online_cpus();
 
-       if (error) {
-               platform_device_unregister(microcode_pdev);
-               return error;
-       }
+       if (error)
+               goto out_pdev;
 
        error = microcode_dev_init();
        if (error)
-               return error;
+               goto out_sysdev_driver;
 
        register_syscore_ops(&mc_syscore_ops);
        register_hotcpu_notifier(&mc_cpu_notifier);
@@ -548,6 +544,20 @@ static int __init microcode_init(void)
                " <tigran@aivazian.fsnet.co.uk>, Peter Oruba\n");
 
        return 0;
+
+out_sysdev_driver:
+       get_online_cpus();
+       mutex_lock(&microcode_mutex);
+
+       sysdev_driver_unregister(&cpu_sysdev_class, &mc_sysdev_driver);
+
+       mutex_unlock(&microcode_mutex);
+       put_online_cpus();
+
+out_pdev:
+       platform_device_unregister(microcode_pdev);
+       return error;
+
 }
 module_init(microcode_init);
 
index 9103b89c145a534215824a9b2a7d80aa9e112527..0741b062a3048a6e2b1b5bd0eb4edbbf3d5bb9cf 100644 (file)
@@ -95,8 +95,8 @@ static void __init MP_bus_info(struct mpc_bus *m)
        }
 #endif
 
+       set_bit(m->busid, mp_bus_not_pci);
        if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA) - 1) == 0) {
-               set_bit(m->busid, mp_bus_not_pci);
 #if defined(CONFIG_EISA) || defined(CONFIG_MCA)
                mp_bus_id_to_type[m->busid] = MP_BUS_ISA;
 #endif
index b9b3b1a51643931a0405a1c0f9cc5b62eec43b1f..ee5d4fbd53b4bac72d19e3aa3000e77467f0c25e 100644 (file)
@@ -403,6 +403,14 @@ void default_idle(void)
 EXPORT_SYMBOL(default_idle);
 #endif
 
+bool set_pm_idle_to_default(void)
+{
+       bool ret = !!pm_idle;
+
+       pm_idle = default_idle;
+
+       return ret;
+}
 void stop_this_cpu(void *dummy)
 {
        local_irq_disable();
index b78643d0f9a53d8b2050a14f7e8185b540ed8595..03920a15a632289605c6f0ba0b563d64065214a7 100644 (file)
@@ -553,4 +553,17 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC,
                        quirk_amd_nb_node);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_LINK,
                        quirk_amd_nb_node);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F0,
+                       quirk_amd_nb_node);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F1,
+                       quirk_amd_nb_node);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F2,
+                       quirk_amd_nb_node);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3,
+                       quirk_amd_nb_node);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4,
+                       quirk_amd_nb_node);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F5,
+                       quirk_amd_nb_node);
+
 #endif
index e334be1182b9f0b3666464f8ff9f13b22d3ef1dc..37a458b521a6020598b69305c782a0f28bd75b27 100644 (file)
@@ -124,7 +124,7 @@ __setup("reboot=", reboot_setup);
  */
 
 /*
- * Some machines require the "reboot=b"  commandline option,
+ * Some machines require the "reboot=b" or "reboot=k"  commandline options,
  * this quirk makes that automatic.
  */
 static int __init set_bios_reboot(const struct dmi_system_id *d)
@@ -136,6 +136,15 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
        return 0;
 }
 
+static int __init set_kbd_reboot(const struct dmi_system_id *d)
+{
+       if (reboot_type != BOOT_KBD) {
+               reboot_type = BOOT_KBD;
+               printk(KERN_INFO "%s series board detected. Selecting KBD-method for reboot.\n", d->ident);
+       }
+       return 0;
+}
+
 static struct dmi_system_id __initdata reboot_dmi_table[] = {
        {       /* Handle problems with rebooting on Dell E520's */
                .callback = set_bios_reboot,
@@ -295,7 +304,7 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
                },
        },
        { /* Handle reboot issue on Acer Aspire one */
-               .callback = set_bios_reboot,
+               .callback = set_kbd_reboot,
                .ident = "Acer Aspire One A110",
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
@@ -443,6 +452,14 @@ static struct dmi_system_id __initdata pci_reboot_dmi_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6420"),
                },
        },
+       {       /* Handle problems with rebooting on the OptiPlex 990. */
+               .callback = set_pci_reboot,
+               .ident = "Dell OptiPlex 990",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 990"),
+               },
+       },
        { }
 };
 
index 348ce016a835c291deeae528fb327ea8f93ffbdb..af6db6ec5b2a20db3d13861bdb97b878f8371f1e 100644 (file)
@@ -12,6 +12,7 @@
 #include <asm/vsyscall.h>
 #include <asm/x86_init.h>
 #include <asm/time.h>
+#include <asm/mrst.h>
 
 #ifdef CONFIG_X86_32
 /*
@@ -242,6 +243,10 @@ static __init int add_rtc_cmos(void)
        if (of_have_populated_dt())
                return 0;
 
+       /* Intel MID platforms don't have ioport rtc */
+       if (mrst_identify_cpu())
+               return -ENODEV;
+
        platform_device_register(&rtc_device);
        dev_info(&rtc_device.dev,
                 "registered platform RTC device (no PNP device found)\n");
index cf0ef986cb6dff51348c17c691491f6f48c61a60..9a9e40fb091ccbf0c79885dbbfd7d2e64b125d45 100644 (file)
@@ -691,6 +691,8 @@ early_param("reservelow", parse_reservelow);
 
 void __init setup_arch(char **cmdline_p)
 {
+       unsigned long end_pfn;
+
 #ifdef CONFIG_X86_32
        memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
        visws_early_detect();
@@ -932,7 +934,24 @@ void __init setup_arch(char **cmdline_p)
        init_gbpages();
 
        /* max_pfn_mapped is updated here */
-       max_low_pfn_mapped = init_memory_mapping(0, max_low_pfn<<PAGE_SHIFT);
+       end_pfn = max_low_pfn;
+
+#ifdef CONFIG_X86_64
+       /*
+        * There may be regions after the last E820_RAM region that we
+        * want to include in the kernel direct mapping, such as
+        * EFI_RUNTIME_SERVICES_DATA.
+        */
+       if (efi_enabled) {
+               unsigned long efi_end;
+
+               efi_end = e820_end_pfn(MAXMEM>>PAGE_SHIFT, E820_RESERVED_EFI);
+               if (efi_end > max_low_pfn)
+                       end_pfn = efi_end;
+       }
+#endif
+
+       max_low_pfn_mapped = init_memory_mapping(0, end_pfn << PAGE_SHIFT);
        max_pfn_mapped = max_low_pfn_mapped;
 
 #ifdef CONFIG_X86_64
index ea305856151cefc62fccd7f216519bc9d5f2945c..dd74e46828c0fc243740b61a18c2dea654fafb5e 100644 (file)
@@ -201,6 +201,8 @@ static noinline int gup_huge_pud(pud_t pud, unsigned long addr,
        do {
                VM_BUG_ON(compound_head(page) != head);
                pages[*nr] = page;
+               if (PageTail(page))
+                       get_huge_page_tail(page);
                (*nr)++;
                page++;
                refs++;
index b49962662101a0cf7361f0035e1b017333efc22a..f4f29b19fac5f2cc7c46023ef86c02a66b137e8e 100644 (file)
@@ -45,6 +45,7 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
        vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
        BUG_ON(!pte_none(*(kmap_pte-idx)));
        set_pte(kmap_pte-idx, mk_pte(page, prot));
+       arch_flush_lazy_mmu_mode();
 
        return (void *)vaddr;
 }
@@ -88,6 +89,7 @@ void __kunmap_atomic(void *kvaddr)
                 */
                kpte_clear_flush(kmap_pte-idx, vaddr);
                kmap_atomic_idx_pop();
+               arch_flush_lazy_mmu_mode();
        }
 #ifdef CONFIG_DEBUG_HIGHMEM
        else {
index cdfe4c54decac05e4943a00e27803f78898b6419..f148cf65267836d66e1fa666d612dca5669950c3 100644 (file)
@@ -21,6 +21,7 @@ extern int op_nmi_timer_init(struct oprofile_operations *ops);
 extern void op_nmi_exit(void);
 extern void x86_backtrace(struct pt_regs * const regs, unsigned int depth);
 
+static int nmi_timer;
 
 int __init oprofile_arch_init(struct oprofile_operations *ops)
 {
@@ -31,8 +32,9 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
 #ifdef CONFIG_X86_LOCAL_APIC
        ret = op_nmi_init(ops);
 #endif
+       nmi_timer = (ret != 0);
 #ifdef CONFIG_X86_IO_APIC
-       if (ret < 0)
+       if (nmi_timer)
                ret = op_nmi_timer_init(ops);
 #endif
        ops->backtrace = x86_backtrace;
@@ -44,6 +46,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
 void oprofile_arch_exit(void)
 {
 #ifdef CONFIG_X86_LOCAL_APIC
-       op_nmi_exit();
+       if (!nmi_timer)
+               op_nmi_exit();
 #endif
 }
index 37718f0f053d53346566c80958885f159b61dcde..c9718a16be158bfa994e774b1b1ad2dd079c68cb 100644 (file)
@@ -323,10 +323,13 @@ static void __init do_add_efi_memmap(void)
                case EFI_UNUSABLE_MEMORY:
                        e820_type = E820_UNUSABLE;
                        break;
+               case EFI_RUNTIME_SERVICES_DATA:
+                       e820_type = E820_RESERVED_EFI;
+                       break;
                default:
                        /*
                         * EFI_RESERVED_TYPE EFI_RUNTIME_SERVICES_CODE
-                        * EFI_RUNTIME_SERVICES_DATA EFI_MEMORY_MAPPED_IO
+                        * EFI_MEMORY_MAPPED_IO
                         * EFI_MEMORY_MAPPED_IO_PORT_SPACE EFI_PAL_CODE
                         */
                        e820_type = E820_RESERVED;
@@ -671,10 +674,21 @@ void __init efi_enter_virtual_mode(void)
                end_pfn = PFN_UP(end);
                if (end_pfn <= max_low_pfn_mapped
                    || (end_pfn > (1UL << (32 - PAGE_SHIFT))
-                       && end_pfn <= max_pfn_mapped))
+                       && end_pfn <= max_pfn_mapped)) {
                        va = __va(md->phys_addr);
-               else
-                       va = efi_ioremap(md->phys_addr, size, md->type);
+
+                       if (!(md->attribute & EFI_MEMORY_WB)) {
+                               addr = (u64) (unsigned long)va;
+                               npages = md->num_pages;
+                               memrange_efi_to_native(&addr, &npages);
+                               set_memory_uc(addr, npages);
+                       }
+               } else {
+                       if (!(md->attribute & EFI_MEMORY_WB))
+                               va = ioremap_nocache(md->phys_addr, size);
+                       else
+                               va = ioremap_cache(md->phys_addr, size);
+               }
 
                md->virt_addr = (u64) (unsigned long) va;
 
@@ -684,13 +698,6 @@ void __init efi_enter_virtual_mode(void)
                        continue;
                }
 
-               if (!(md->attribute & EFI_MEMORY_WB)) {
-                       addr = md->virt_addr;
-                       npages = md->num_pages;
-                       memrange_efi_to_native(&addr, &npages);
-                       set_memory_uc(addr, npages);
-               }
-
                systab = (u64) (unsigned long) efi_phys.systab;
                if (md->phys_addr <= systab && systab < end) {
                        systab += md->virt_addr - md->phys_addr;
index ac3aa54e26546ba5cb4121eba0c58ca00f06ea82..312250c6b2de3078bc47562c6b3034e53e74e4f3 100644 (file)
@@ -80,20 +80,3 @@ void __init efi_call_phys_epilog(void)
        local_irq_restore(efi_flags);
        early_code_mapping_set_exec(0);
 }
-
-void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size,
-                                u32 type)
-{
-       unsigned long last_map_pfn;
-
-       if (type == EFI_MEMORY_MAPPED_IO)
-               return ioremap(phys_addr, size);
-
-       last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size);
-       if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size) {
-               unsigned long top = last_map_pfn << PAGE_SHIFT;
-               efi_ioremap(top, size - (top - phys_addr), type);
-       }
-
-       return (void __iomem *)__va(phys_addr);
-}
index b1489a06a49dbc5ac0f4acd5eeaddc6136f05c1a..ad4ec1cb097ecfae17e4a99aed37cf985b03cb78 100644 (file)
@@ -76,6 +76,20 @@ struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
 EXPORT_SYMBOL_GPL(sfi_mrtc_array);
 int sfi_mrtc_num;
 
+static void mrst_power_off(void)
+{
+       if (__mrst_cpu_chip == MRST_CPU_CHIP_LINCROFT)
+               intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 1);
+}
+
+static void mrst_reboot(void)
+{
+       if (__mrst_cpu_chip == MRST_CPU_CHIP_LINCROFT)
+               intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 0);
+       else
+               intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
+}
+
 /* parse all the mtimer info to a static mtimer array */
 static int __init sfi_parse_mtmr(struct sfi_table_header *table)
 {
@@ -265,17 +279,6 @@ static int mrst_i8042_detect(void)
        return 0;
 }
 
-/* Reboot and power off are handled by the SCU on a MID device */
-static void mrst_power_off(void)
-{
-       intel_scu_ipc_simple_command(0xf1, 1);
-}
-
-static void mrst_reboot(void)
-{
-       intel_scu_ipc_simple_command(0xf1, 0);
-}
-
 /*
  * Moorestown does not have external NMI source nor port 0x61 to report
  * NMI status. The possible NMI sources are from pmu as a result of NMI
@@ -484,6 +487,46 @@ static void __init *max7315_platform_data(void *info)
        return max7315;
 }
 
+static void *tca6416_platform_data(void *info)
+{
+       static struct pca953x_platform_data tca6416;
+       struct i2c_board_info *i2c_info = info;
+       int gpio_base, intr;
+       char base_pin_name[SFI_NAME_LEN + 1];
+       char intr_pin_name[SFI_NAME_LEN + 1];
+
+       strcpy(i2c_info->type, "tca6416");
+       strcpy(base_pin_name, "tca6416_base");
+       strcpy(intr_pin_name, "tca6416_int");
+
+       gpio_base = get_gpio_by_name(base_pin_name);
+       intr = get_gpio_by_name(intr_pin_name);
+
+       if (gpio_base == -1)
+               return NULL;
+       tca6416.gpio_base = gpio_base;
+       if (intr != -1) {
+               i2c_info->irq = intr + MRST_IRQ_OFFSET;
+               tca6416.irq_base = gpio_base + MRST_IRQ_OFFSET;
+       } else {
+               i2c_info->irq = -1;
+               tca6416.irq_base = -1;
+       }
+       return &tca6416;
+}
+
+static void *mpu3050_platform_data(void *info)
+{
+       struct i2c_board_info *i2c_info = info;
+       int intr = get_gpio_by_name("mpu3050_int");
+
+       if (intr == -1)
+               return NULL;
+
+       i2c_info->irq = intr + MRST_IRQ_OFFSET;
+       return NULL;
+}
+
 static void __init *emc1403_platform_data(void *info)
 {
        static short intr2nd_pdata;
@@ -646,12 +689,15 @@ static void *msic_ocd_platform_data(void *info)
 static const struct devs_id __initconst device_ids[] = {
        {"bma023", SFI_DEV_TYPE_I2C, 1, &no_platform_data},
        {"pmic_gpio", SFI_DEV_TYPE_SPI, 1, &pmic_gpio_platform_data},
+       {"pmic_gpio", SFI_DEV_TYPE_IPC, 1, &pmic_gpio_platform_data},
        {"spi_max3111", SFI_DEV_TYPE_SPI, 0, &max3111_platform_data},
        {"i2c_max7315", SFI_DEV_TYPE_I2C, 1, &max7315_platform_data},
        {"i2c_max7315_2", SFI_DEV_TYPE_I2C, 1, &max7315_platform_data},
+       {"tca6416", SFI_DEV_TYPE_I2C, 1, &tca6416_platform_data},
        {"emc1403", SFI_DEV_TYPE_I2C, 1, &emc1403_platform_data},
        {"i2c_accel", SFI_DEV_TYPE_I2C, 0, &lis331dl_platform_data},
        {"pmic_audio", SFI_DEV_TYPE_IPC, 1, &no_platform_data},
+       {"mpu3050", SFI_DEV_TYPE_I2C, 1, &mpu3050_platform_data},
 
        /* MSIC subdevices */
        {"msic_battery", SFI_DEV_TYPE_IPC, 1, &msic_battery_platform_data},
index 38d0af4fefec19f52d5e724c8f08102d391dc2e6..1093f80c162d24e5e455d760a5b77e1b16f800c5 100644 (file)
@@ -410,6 +410,6 @@ void __init xen_arch_setup(void)
 #endif
        disable_cpuidle();
        boot_option_idle_override = IDLE_HALT;
-
+       WARN_ON(set_pm_idle_to_default());
        fiddle_vdso();
 }
index 127408069ca7fa745953fd5a8e164cd74779c680..631b9477b99c02f827103aa00a8d3a83c380d359 100644 (file)
@@ -932,7 +932,8 @@ static int erst_check_table(struct acpi_table_erst *erst_tab)
 static int erst_open_pstore(struct pstore_info *psi);
 static int erst_close_pstore(struct pstore_info *psi);
 static ssize_t erst_reader(u64 *id, enum pstore_type_id *type,
-                          struct timespec *time, struct pstore_info *psi);
+                          struct timespec *time, char **buf,
+                          struct pstore_info *psi);
 static int erst_writer(enum pstore_type_id type, u64 *id, unsigned int part,
                       size_t size, struct pstore_info *psi);
 static int erst_clearer(enum pstore_type_id type, u64 id,
@@ -986,17 +987,23 @@ static int erst_close_pstore(struct pstore_info *psi)
 }
 
 static ssize_t erst_reader(u64 *id, enum pstore_type_id *type,
-                          struct timespec *time, struct pstore_info *psi)
+                          struct timespec *time, char **buf,
+                          struct pstore_info *psi)
 {
        int rc;
        ssize_t len = 0;
        u64 record_id;
-       struct cper_pstore_record *rcd = (struct cper_pstore_record *)
-                                       (erst_info.buf - sizeof(*rcd));
+       struct cper_pstore_record *rcd;
+       size_t rcd_len = sizeof(*rcd) + erst_info.bufsize;
 
        if (erst_disable)
                return -ENODEV;
 
+       rcd = kmalloc(rcd_len, GFP_KERNEL);
+       if (!rcd) {
+               rc = -ENOMEM;
+               goto out;
+       }
 skip:
        rc = erst_get_record_id_next(&reader_pos, &record_id);
        if (rc)
@@ -1004,22 +1011,27 @@ skip:
 
        /* no more record */
        if (record_id == APEI_ERST_INVALID_RECORD_ID) {
-               rc = -1;
+               rc = -EINVAL;
                goto out;
        }
 
-       len = erst_read(record_id, &rcd->hdr, sizeof(*rcd) +
-                       erst_info.bufsize);
+       len = erst_read(record_id, &rcd->hdr, rcd_len);
        /* The record may be cleared by others, try read next record */
        if (len == -ENOENT)
                goto skip;
-       else if (len < 0) {
-               rc = -1;
+       else if (len < sizeof(*rcd)) {
+               rc = -EIO;
                goto out;
        }
        if (uuid_le_cmp(rcd->hdr.creator_id, CPER_CREATOR_PSTORE) != 0)
                goto skip;
 
+       *buf = kmalloc(len, GFP_KERNEL);
+       if (*buf == NULL) {
+               rc = -ENOMEM;
+               goto out;
+       }
+       memcpy(*buf, rcd->data, len - sizeof(*rcd));
        *id = record_id;
        if (uuid_le_cmp(rcd->sec_hdr.section_type,
                        CPER_SECTION_TYPE_DMESG) == 0)
@@ -1037,6 +1049,7 @@ skip:
        time->tv_nsec = 0;
 
 out:
+       kfree(rcd);
        return (rc < 0) ? rc : (len - sizeof(*rcd));
 }
 
index d8b3d89db043e7e9ead44cb8b4a00946f4980fa6..919daa7cd5b1db3443c65863cc07c20c830f680b 100644 (file)
@@ -1743,8 +1743,10 @@ void device_shutdown(void)
                 */
                list_del_init(&dev->kobj.entry);
                spin_unlock(&devices_kset->list_lock);
-               /* Disable all device's runtime power management */
-               pm_runtime_disable(dev);
+
+               /* Don't allow any more runtime suspends */
+               pm_runtime_get_noresume(dev);
+               pm_runtime_barrier(dev);
 
                if (dev->bus && dev->bus->shutdown) {
                        dev_dbg(dev, "shutdown\n");
index 5c6f56f21443ae7e01a1b9fc90b3202674fbaa37..dcd8babae9eb36fe864bae433558e5e1b4c77d83 100644 (file)
@@ -343,11 +343,13 @@ static void mv_process_hash_current(int first_block)
                else
                        op.config |= CFG_MID_FRAG;
 
-               writel(req_ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A);
-               writel(req_ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B);
-               writel(req_ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C);
-               writel(req_ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D);
-               writel(req_ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E);
+               if (first_block) {
+                       writel(req_ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A);
+                       writel(req_ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B);
+                       writel(req_ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C);
+                       writel(req_ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D);
+                       writel(req_ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E);
+               }
        }
 
        memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config));
index 8af8e864a9cffbc13c91c625a837e11bb8de24fe..73464a62adf74ae1483a16ad71847c03fc03169f 100644 (file)
@@ -1128,7 +1128,7 @@ static struct of_device_id mpc85xx_mc_err_of_match[] = {
        { .compatible = "fsl,p1020-memory-controller", },
        { .compatible = "fsl,p1021-memory-controller", },
        { .compatible = "fsl,p2020-memory-controller", },
-       { .compatible = "fsl,p4080-memory-controller", },
+       { .compatible = "fsl,qoriq-memory-controller", },
        {},
 };
 MODULE_DEVICE_TABLE(of, mpc85xx_mc_err_of_match);
index 8370f72d87ff5ed955789973845629f406f6734e..b0a81173a268175f71606e1aad2ab6067b67f698 100644 (file)
@@ -457,7 +457,8 @@ static int efi_pstore_close(struct pstore_info *psi)
 }
 
 static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
-                              struct timespec *timespec, struct pstore_info *psi)
+                              struct timespec *timespec,
+                              char **buf, struct pstore_info *psi)
 {
        efi_guid_t vendor = LINUX_EFI_CRASH_GUID;
        struct efivars *efivars = psi->data;
@@ -478,7 +479,11 @@ static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
                                timespec->tv_nsec = 0;
                                get_var_data_locked(efivars, &efivars->walk_entry->var);
                                size = efivars->walk_entry->var.DataSize;
-                               memcpy(psi->buf, efivars->walk_entry->var.Data, size);
+                               *buf = kmalloc(size, GFP_KERNEL);
+                               if (*buf == NULL)
+                                       return -ENOMEM;
+                               memcpy(*buf, efivars->walk_entry->var.Data,
+                                      size);
                                efivars->walk_entry = list_entry(efivars->walk_entry->list.next,
                                                   struct efivar_entry, list);
                                return size;
@@ -576,7 +581,8 @@ static int efi_pstore_close(struct pstore_info *psi)
 }
 
 static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
-                              struct timespec *time, struct pstore_info *psi)
+                              struct timespec *timespec,
+                              char **buf, struct pstore_info *psi)
 {
        return -1;
 }
index f10fc521951b17491348f0cd0fbb1f3013e31eae..1eedb6f7fdabe46efa082039818bef6f31fe1591 100644 (file)
 #include <linux/module.h>
 #include <linux/sigma.h>
 
-/* Return: 0==OK, <0==error, =1 ==no more actions */
+static size_t sigma_action_size(struct sigma_action *sa)
+{
+       size_t payload = 0;
+
+       switch (sa->instr) {
+       case SIGMA_ACTION_WRITEXBYTES:
+       case SIGMA_ACTION_WRITESINGLE:
+       case SIGMA_ACTION_WRITESAFELOAD:
+               payload = sigma_action_len(sa);
+               break;
+       default:
+               break;
+       }
+
+       payload = ALIGN(payload, 2);
+
+       return payload + sizeof(struct sigma_action);
+}
+
+/*
+ * Returns a negative error value in case of an error, 0 if processing of
+ * the firmware should be stopped after this action, 1 otherwise.
+ */
 static int
-process_sigma_action(struct i2c_client *client, struct sigma_firmware *ssfw)
+process_sigma_action(struct i2c_client *client, struct sigma_action *sa)
 {
-       struct sigma_action *sa = (void *)(ssfw->fw->data + ssfw->pos);
        size_t len = sigma_action_len(sa);
-       int ret = 0;
+       int ret;
 
        pr_debug("%s: instr:%i addr:%#x len:%zu\n", __func__,
                sa->instr, sa->addr, len);
@@ -29,44 +50,50 @@ process_sigma_action(struct i2c_client *client, struct sigma_firmware *ssfw)
        case SIGMA_ACTION_WRITEXBYTES:
        case SIGMA_ACTION_WRITESINGLE:
        case SIGMA_ACTION_WRITESAFELOAD:
-               if (ssfw->fw->size < ssfw->pos + len)
-                       return -EINVAL;
                ret = i2c_master_send(client, (void *)&sa->addr, len);
                if (ret < 0)
                        return -EINVAL;
                break;
-
        case SIGMA_ACTION_DELAY:
-               ret = 0;
                udelay(len);
                len = 0;
                break;
-
        case SIGMA_ACTION_END:
-               return 1;
-
+               return 0;
        default:
                return -EINVAL;
        }
 
-       /* when arrive here ret=0 or sent data */
-       ssfw->pos += sigma_action_size(sa, len);
-       return ssfw->pos == ssfw->fw->size;
+       return 1;
 }
 
 static int
 process_sigma_actions(struct i2c_client *client, struct sigma_firmware *ssfw)
 {
-       pr_debug("%s: processing %p\n", __func__, ssfw);
+       struct sigma_action *sa;
+       size_t size;
+       int ret;
+
+       while (ssfw->pos + sizeof(*sa) <= ssfw->fw->size) {
+               sa = (struct sigma_action *)(ssfw->fw->data + ssfw->pos);
+
+               size = sigma_action_size(sa);
+               ssfw->pos += size;
+               if (ssfw->pos > ssfw->fw->size || size == 0)
+                       break;
+
+               ret = process_sigma_action(client, sa);
 
-       while (1) {
-               int ret = process_sigma_action(client, ssfw);
                pr_debug("%s: action returned %i\n", __func__, ret);
-               if (ret == 1)
-                       return 0;
-               else if (ret)
+
+               if (ret <= 0)
                        return ret;
        }
+
+       if (ssfw->pos != ssfw->fw->size)
+               return -EINVAL;
+
+       return 0;
 }
 
 int process_sigma_firmware(struct i2c_client *client, const char *name)
@@ -89,16 +116,24 @@ int process_sigma_firmware(struct i2c_client *client, const char *name)
 
        /* then verify the header */
        ret = -EINVAL;
-       if (fw->size < sizeof(*ssfw_head))
+
+       /*
+        * Reject too small or unreasonable large files. The upper limit has been
+        * chosen a bit arbitrarily, but it should be enough for all practical
+        * purposes and having the limit makes it easier to avoid integer
+        * overflows later in the loading process.
+        */
+       if (fw->size < sizeof(*ssfw_head) || fw->size >= 0x4000000)
                goto done;
 
        ssfw_head = (void *)fw->data;
        if (memcmp(ssfw_head->magic, SIGMA_MAGIC, ARRAY_SIZE(ssfw_head->magic)))
                goto done;
 
-       crc = crc32(0, fw->data, fw->size);
+       crc = crc32(0, fw->data + sizeof(*ssfw_head),
+                       fw->size - sizeof(*ssfw_head));
        pr_debug("%s: crc=%x\n", __func__, crc);
-       if (crc != ssfw_head->crc)
+       if (crc != le32_to_cpu(ssfw_head->crc))
                goto done;
 
        ssfw.pos = sizeof(*ssfw_head);
index dbcb0bcfd8dadf49ed156311594e704f81a9b9df..4e018d6a763996127cd370a6a3908273024f37a8 100644 (file)
@@ -18,7 +18,7 @@ obj-$(CONFIG_ARCH_DAVINCI)    += gpio-davinci.o
 obj-$(CONFIG_GPIO_EP93XX)      += gpio-ep93xx.o
 obj-$(CONFIG_GPIO_IT8761E)     += gpio-it8761e.o
 obj-$(CONFIG_GPIO_JANZ_TTL)    += gpio-janz-ttl.o
-obj-$(CONFIG_MACH_KS8695)      += gpio-ks8695.o
+obj-$(CONFIG_ARCH_KS8695)      += gpio-ks8695.o
 obj-$(CONFIG_GPIO_LANGWELL)    += gpio-langwell.o
 obj-$(CONFIG_ARCH_LPC32XX)     += gpio-lpc32xx.o
 obj-$(CONFIG_GPIO_MAX730X)     += gpio-max730x.o
index 147df8ae79dbd42a3105a38dd67a067fca7d36f6..d3f3e8f5456129e7036fc3737181d37b40b8c642 100644 (file)
@@ -546,7 +546,7 @@ static void pca953x_irq_teardown(struct pca953x_chip *chip)
  * Translate OpenFirmware node properties into platform_data
  * WARNING: This is DEPRECATED and will be removed eventually!
  */
-void
+static void
 pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, int *invert)
 {
        struct device_node *node;
@@ -574,7 +574,7 @@ pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, int *invert)
                *invert = *val;
 }
 #else
-void
+static void
 pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, int *invert)
 {
        *gpio_base = -1;
index 3969f7553fe75bf5fa877020e3ccea53f150dfd8..d2619d72ceceb16d7b88a33b09708219483019b4 100644 (file)
@@ -456,6 +456,30 @@ done:
 EXPORT_SYMBOL(drm_crtc_helper_set_mode);
 
 
+static int
+drm_crtc_helper_disable(struct drm_crtc *crtc)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_connector *connector;
+       struct drm_encoder *encoder;
+
+       /* Decouple all encoders and their attached connectors from this crtc */
+       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+               if (encoder->crtc != crtc)
+                       continue;
+
+               list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+                       if (connector->encoder != encoder)
+                               continue;
+
+                       connector->encoder = NULL;
+               }
+       }
+
+       drm_helper_disable_unused_functions(dev);
+       return 0;
+}
+
 /**
  * drm_crtc_helper_set_config - set a new config from userspace
  * @crtc: CRTC to setup
@@ -510,8 +534,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
                                (int)set->num_connectors, set->x, set->y);
        } else {
                DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
-               set->mode = NULL;
-               set->num_connectors = 0;
+               return drm_crtc_helper_disable(set->crtc);
        }
 
        dev = set->crtc->dev;
index 6f8afea94fc979b74476746aca1ae32341846357..2bb07bca511a12b6949892186565cfde960db8ca 100644 (file)
 #include "drm.h"
 
 #include "exynos_drm_drv.h"
+#include "exynos_drm_gem.h"
 #include "exynos_drm_buf.h"
 
-static DEFINE_MUTEX(exynos_drm_buf_lock);
-
 static int lowlevel_buffer_allocate(struct drm_device *dev,
-               struct exynos_drm_buf_entry *entry)
+               struct exynos_drm_gem_buf *buffer)
 {
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
-       entry->vaddr = dma_alloc_writecombine(dev->dev, entry->size,
-                       (dma_addr_t *)&entry->paddr, GFP_KERNEL);
-       if (!entry->paddr) {
+       buffer->kvaddr = dma_alloc_writecombine(dev->dev, buffer->size,
+                       &buffer->dma_addr, GFP_KERNEL);
+       if (!buffer->kvaddr) {
                DRM_ERROR("failed to allocate buffer.\n");
                return -ENOMEM;
        }
 
-       DRM_DEBUG_KMS("allocated : vaddr(0x%x), paddr(0x%x), size(0x%x)\n",
-                       (unsigned int)entry->vaddr, entry->paddr, entry->size);
+       DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n",
+                       (unsigned long)buffer->kvaddr,
+                       (unsigned long)buffer->dma_addr,
+                       buffer->size);
 
        return 0;
 }
 
 static void lowlevel_buffer_deallocate(struct drm_device *dev,
-               struct exynos_drm_buf_entry *entry)
+               struct exynos_drm_gem_buf *buffer)
 {
        DRM_DEBUG_KMS("%s.\n", __FILE__);
 
-       if (entry->paddr && entry->vaddr && entry->size)
-               dma_free_writecombine(dev->dev, entry->size, entry->vaddr,
-                               entry->paddr);
+       if (buffer->dma_addr && buffer->size)
+               dma_free_writecombine(dev->dev, buffer->size, buffer->kvaddr,
+                               (dma_addr_t)buffer->dma_addr);
        else
-               DRM_DEBUG_KMS("entry data is null.\n");
+               DRM_DEBUG_KMS("buffer data are invalid.\n");
 }
 
-struct exynos_drm_buf_entry *exynos_drm_buf_create(struct drm_device *dev,
+struct exynos_drm_gem_buf *exynos_drm_buf_create(struct drm_device *dev,
                unsigned int size)
 {
-       struct exynos_drm_buf_entry *entry;
+       struct exynos_drm_gem_buf *buffer;
 
        DRM_DEBUG_KMS("%s.\n", __FILE__);
+       DRM_DEBUG_KMS("desired size = 0x%x\n", size);
 
-       entry = kzalloc(sizeof(*entry), GFP_KERNEL);
-       if (!entry) {
-               DRM_ERROR("failed to allocate exynos_drm_buf_entry.\n");
+       buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+       if (!buffer) {
+               DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n");
                return ERR_PTR(-ENOMEM);
        }
 
-       entry->size = size;
+       buffer->size = size;
 
        /*
         * allocate memory region with size and set the memory information
-        * to vaddr and paddr of a entry object.
+        * to vaddr and dma_addr of a buffer object.
         */
-       if (lowlevel_buffer_allocate(dev, entry) < 0) {
-               kfree(entry);
-               entry = NULL;
+       if (lowlevel_buffer_allocate(dev, buffer) < 0) {
+               kfree(buffer);
+               buffer = NULL;
                return ERR_PTR(-ENOMEM);
        }
 
-       return entry;
+       return buffer;
 }
 
 void exynos_drm_buf_destroy(struct drm_device *dev,
-               struct exynos_drm_buf_entry *entry)
+               struct exynos_drm_gem_buf *buffer)
 {
        DRM_DEBUG_KMS("%s.\n", __FILE__);
 
-       if (!entry) {
-               DRM_DEBUG_KMS("entry is null.\n");
+       if (!buffer) {
+               DRM_DEBUG_KMS("buffer is null.\n");
                return;
        }
 
-       lowlevel_buffer_deallocate(dev, entry);
+       lowlevel_buffer_deallocate(dev, buffer);
 
-       kfree(entry);
-       entry = NULL;
+       kfree(buffer);
+       buffer = NULL;
 }
 
 MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
index 045d59eab01a29b5270410d661968d408dcfa663..6e91f9caa5dbdfe12aade5461c3ad253888f20bf 100644 (file)
 #ifndef _EXYNOS_DRM_BUF_H_
 #define _EXYNOS_DRM_BUF_H_
 
-/*
- * exynos drm buffer entry structure.
- *
- * @paddr: physical address of allocated memory.
- * @vaddr: kernel virtual address of allocated memory.
- * @size: size of allocated memory.
- */
-struct exynos_drm_buf_entry {
-       dma_addr_t paddr;
-       void __iomem *vaddr;
-       unsigned int size;
-};
-
 /* allocate physical memory. */
-struct exynos_drm_buf_entry *exynos_drm_buf_create(struct drm_device *dev,
+struct exynos_drm_gem_buf *exynos_drm_buf_create(struct drm_device *dev,
                unsigned int size);
 
-/* get physical memory information of a drm framebuffer. */
-struct exynos_drm_buf_entry *exynos_drm_fb_get_buf(struct drm_framebuffer *fb);
+/* get memory information of a drm framebuffer. */
+struct exynos_drm_gem_buf *exynos_drm_fb_get_buf(struct drm_framebuffer *fb);
 
 /* remove allocated physical memory. */
 void exynos_drm_buf_destroy(struct drm_device *dev,
-               struct exynos_drm_buf_entry *entry);
+               struct exynos_drm_gem_buf *buffer);
 
 #endif
index 985d9e7687287d915c6a76e2b7219f9e7840e7f8..d620b0784257f05cab7c52b121a460c99f6f7daf 100644 (file)
@@ -37,6 +37,8 @@
 
 struct exynos_drm_connector {
        struct drm_connector    drm_connector;
+       uint32_t                encoder_id;
+       struct exynos_drm_manager *manager;
 };
 
 /* convert exynos_video_timings to drm_display_mode */
@@ -47,6 +49,7 @@ convert_to_display_mode(struct drm_display_mode *mode,
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
        mode->clock = timing->pixclock / 1000;
+       mode->vrefresh = timing->refresh;
 
        mode->hdisplay = timing->xres;
        mode->hsync_start = mode->hdisplay + timing->left_margin;
@@ -57,6 +60,12 @@ convert_to_display_mode(struct drm_display_mode *mode,
        mode->vsync_start = mode->vdisplay + timing->upper_margin;
        mode->vsync_end = mode->vsync_start + timing->vsync_len;
        mode->vtotal = mode->vsync_end + timing->lower_margin;
+
+       if (timing->vmode & FB_VMODE_INTERLACED)
+               mode->flags |= DRM_MODE_FLAG_INTERLACE;
+
+       if (timing->vmode & FB_VMODE_DOUBLE)
+               mode->flags |= DRM_MODE_FLAG_DBLSCAN;
 }
 
 /* convert drm_display_mode to exynos_video_timings */
@@ -69,7 +78,7 @@ convert_to_video_timing(struct fb_videomode *timing,
        memset(timing, 0, sizeof(*timing));
 
        timing->pixclock = mode->clock * 1000;
-       timing->refresh = mode->vrefresh;
+       timing->refresh = drm_mode_vrefresh(mode);
 
        timing->xres = mode->hdisplay;
        timing->left_margin = mode->hsync_start - mode->hdisplay;
@@ -92,15 +101,16 @@ convert_to_video_timing(struct fb_videomode *timing,
 
 static int exynos_drm_connector_get_modes(struct drm_connector *connector)
 {
-       struct exynos_drm_manager *manager =
-                               exynos_drm_get_manager(connector->encoder);
-       struct exynos_drm_display *display = manager->display;
+       struct exynos_drm_connector *exynos_connector =
+                                       to_exynos_connector(connector);
+       struct exynos_drm_manager *manager = exynos_connector->manager;
+       struct exynos_drm_display_ops *display_ops = manager->display_ops;
        unsigned int count;
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
-       if (!display) {
-               DRM_DEBUG_KMS("display is null.\n");
+       if (!display_ops) {
+               DRM_DEBUG_KMS("display_ops is null.\n");
                return 0;
        }
 
@@ -112,7 +122,7 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
         * P.S. in case of lcd panel, count is always 1 if success
         * because lcd panel has only one mode.
         */
-       if (display->get_edid) {
+       if (display_ops->get_edid) {
                int ret;
                void *edid;
 
@@ -122,7 +132,7 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
                        return 0;
                }
 
-               ret = display->get_edid(manager->dev, connector,
+               ret = display_ops->get_edid(manager->dev, connector,
                                                edid, MAX_EDID);
                if (ret < 0) {
                        DRM_ERROR("failed to get edid data.\n");
@@ -140,8 +150,8 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
                struct drm_display_mode *mode = drm_mode_create(connector->dev);
                struct fb_videomode *timing;
 
-               if (display->get_timing)
-                       timing = display->get_timing(manager->dev);
+               if (display_ops->get_timing)
+                       timing = display_ops->get_timing(manager->dev);
                else {
                        drm_mode_destroy(connector->dev, mode);
                        return 0;
@@ -162,9 +172,10 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
 static int exynos_drm_connector_mode_valid(struct drm_connector *connector,
                                            struct drm_display_mode *mode)
 {
-       struct exynos_drm_manager *manager =
-                               exynos_drm_get_manager(connector->encoder);
-       struct exynos_drm_display *display = manager->display;
+       struct exynos_drm_connector *exynos_connector =
+                                       to_exynos_connector(connector);
+       struct exynos_drm_manager *manager = exynos_connector->manager;
+       struct exynos_drm_display_ops *display_ops = manager->display_ops;
        struct fb_videomode timing;
        int ret = MODE_BAD;
 
@@ -172,8 +183,8 @@ static int exynos_drm_connector_mode_valid(struct drm_connector *connector,
 
        convert_to_video_timing(&timing, mode);
 
-       if (display && display->check_timing)
-               if (!display->check_timing(manager->dev, (void *)&timing))
+       if (display_ops && display_ops->check_timing)
+               if (!display_ops->check_timing(manager->dev, (void *)&timing))
                        ret = MODE_OK;
 
        return ret;
@@ -181,9 +192,25 @@ static int exynos_drm_connector_mode_valid(struct drm_connector *connector,
 
 struct drm_encoder *exynos_drm_best_encoder(struct drm_connector *connector)
 {
+       struct drm_device *dev = connector->dev;
+       struct exynos_drm_connector *exynos_connector =
+                                       to_exynos_connector(connector);
+       struct drm_mode_object *obj;
+       struct drm_encoder *encoder;
+
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
-       return connector->encoder;
+       obj = drm_mode_object_find(dev, exynos_connector->encoder_id,
+                                  DRM_MODE_OBJECT_ENCODER);
+       if (!obj) {
+               DRM_DEBUG_KMS("Unknown ENCODER ID %d\n",
+                               exynos_connector->encoder_id);
+               return NULL;
+       }
+
+       encoder = obj_to_encoder(obj);
+
+       return encoder;
 }
 
 static struct drm_connector_helper_funcs exynos_connector_helper_funcs = {
@@ -196,15 +223,17 @@ static struct drm_connector_helper_funcs exynos_connector_helper_funcs = {
 static enum drm_connector_status
 exynos_drm_connector_detect(struct drm_connector *connector, bool force)
 {
-       struct exynos_drm_manager *manager =
-                               exynos_drm_get_manager(connector->encoder);
-       struct exynos_drm_display *display = manager->display;
+       struct exynos_drm_connector *exynos_connector =
+                                       to_exynos_connector(connector);
+       struct exynos_drm_manager *manager = exynos_connector->manager;
+       struct exynos_drm_display_ops *display_ops =
+                                       manager->display_ops;
        enum drm_connector_status status = connector_status_disconnected;
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
-       if (display && display->is_connected) {
-               if (display->is_connected(manager->dev))
+       if (display_ops && display_ops->is_connected) {
+               if (display_ops->is_connected(manager->dev))
                        status = connector_status_connected;
                else
                        status = connector_status_disconnected;
@@ -251,9 +280,11 @@ struct drm_connector *exynos_drm_connector_create(struct drm_device *dev,
 
        connector = &exynos_connector->drm_connector;
 
-       switch (manager->display->type) {
+       switch (manager->display_ops->type) {
        case EXYNOS_DISPLAY_TYPE_HDMI:
                type = DRM_MODE_CONNECTOR_HDMIA;
+               connector->interlace_allowed = true;
+               connector->polled = DRM_CONNECTOR_POLL_HPD;
                break;
        default:
                type = DRM_MODE_CONNECTOR_Unknown;
@@ -267,7 +298,10 @@ struct drm_connector *exynos_drm_connector_create(struct drm_device *dev,
        if (err)
                goto err_connector;
 
+       exynos_connector->encoder_id = encoder->base.id;
+       exynos_connector->manager = manager;
        connector->encoder = encoder;
+
        err = drm_mode_connector_attach_encoder(connector, encoder);
        if (err) {
                DRM_ERROR("failed to attach a connector to a encoder\n");
index 9337e5e2dbb6204428178c8c3a962ebb3d61fab0..ee43cc22085304f7f07f267cd63a7d2be05d249a 100644 (file)
 #include "drmP.h"
 #include "drm_crtc_helper.h"
 
+#include "exynos_drm_crtc.h"
 #include "exynos_drm_drv.h"
 #include "exynos_drm_fb.h"
 #include "exynos_drm_encoder.h"
+#include "exynos_drm_gem.h"
 #include "exynos_drm_buf.h"
 
 #define to_exynos_crtc(x)      container_of(x, struct exynos_drm_crtc,\
                                drm_crtc)
 
-/*
- * Exynos specific crtc postion structure.
- *
- * @fb_x: offset x on a framebuffer to be displyed
- *     - the unit is screen coordinates.
- * @fb_y: offset y on a framebuffer to be displayed
- *     - the unit is screen coordinates.
- * @crtc_x: offset x on hardware screen.
- * @crtc_y: offset y on hardware screen.
- * @crtc_w: width of hardware screen.
- * @crtc_h: height of hardware screen.
- */
-struct exynos_drm_crtc_pos {
-       unsigned int fb_x;
-       unsigned int fb_y;
-       unsigned int crtc_x;
-       unsigned int crtc_y;
-       unsigned int crtc_w;
-       unsigned int crtc_h;
-};
-
 /*
  * Exynos specific crtc structure.
  *
@@ -85,30 +66,31 @@ static void exynos_drm_crtc_apply(struct drm_crtc *crtc)
 
        exynos_drm_fn_encoder(crtc, overlay,
                        exynos_drm_encoder_crtc_mode_set);
-       exynos_drm_fn_encoder(crtc, NULL, exynos_drm_encoder_crtc_commit);
+       exynos_drm_fn_encoder(crtc, &exynos_crtc->pipe,
+                       exynos_drm_encoder_crtc_commit);
 }
 
-static int exynos_drm_overlay_update(struct exynos_drm_overlay *overlay,
-                                      struct drm_framebuffer *fb,
-                                      struct drm_display_mode *mode,
-                                      struct exynos_drm_crtc_pos *pos)
+int exynos_drm_overlay_update(struct exynos_drm_overlay *overlay,
+                             struct drm_framebuffer *fb,
+                             struct drm_display_mode *mode,
+                             struct exynos_drm_crtc_pos *pos)
 {
-       struct exynos_drm_buf_entry *entry;
+       struct exynos_drm_gem_buf *buffer;
        unsigned int actual_w;
        unsigned int actual_h;
 
-       entry = exynos_drm_fb_get_buf(fb);
-       if (!entry) {
-               DRM_LOG_KMS("entry is null.\n");
+       buffer = exynos_drm_fb_get_buf(fb);
+       if (!buffer) {
+               DRM_LOG_KMS("buffer is null.\n");
                return -EFAULT;
        }
 
-       overlay->paddr = entry->paddr;
-       overlay->vaddr = entry->vaddr;
+       overlay->dma_addr = buffer->dma_addr;
+       overlay->vaddr = buffer->kvaddr;
 
-       DRM_DEBUG_KMS("vaddr = 0x%lx, paddr = 0x%lx\n",
+       DRM_DEBUG_KMS("vaddr = 0x%lx, dma_addr = 0x%lx\n",
                        (unsigned long)overlay->vaddr,
-                       (unsigned long)overlay->paddr);
+                       (unsigned long)overlay->dma_addr);
 
        actual_w = min((mode->hdisplay - pos->crtc_x), pos->crtc_w);
        actual_h = min((mode->vdisplay - pos->crtc_y), pos->crtc_h);
@@ -171,9 +153,26 @@ static int exynos_drm_crtc_update(struct drm_crtc *crtc)
 
 static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode)
 {
-       DRM_DEBUG_KMS("%s\n", __FILE__);
+       struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
 
-       /* TODO */
+       DRM_DEBUG_KMS("crtc[%d] mode[%d]\n", crtc->base.id, mode);
+
+       switch (mode) {
+       case DRM_MODE_DPMS_ON:
+               exynos_drm_fn_encoder(crtc, &exynos_crtc->pipe,
+                               exynos_drm_encoder_crtc_commit);
+               break;
+       case DRM_MODE_DPMS_STANDBY:
+       case DRM_MODE_DPMS_SUSPEND:
+       case DRM_MODE_DPMS_OFF:
+               /* TODO */
+               exynos_drm_fn_encoder(crtc, NULL,
+                               exynos_drm_encoder_crtc_disable);
+               break;
+       default:
+               DRM_DEBUG_KMS("unspecified mode %d\n", mode);
+               break;
+       }
 }
 
 static void exynos_drm_crtc_prepare(struct drm_crtc *crtc)
@@ -185,9 +184,12 @@ static void exynos_drm_crtc_prepare(struct drm_crtc *crtc)
 
 static void exynos_drm_crtc_commit(struct drm_crtc *crtc)
 {
+       struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
-       /* drm framework doesn't check NULL. */
+       exynos_drm_fn_encoder(crtc, &exynos_crtc->pipe,
+                       exynos_drm_encoder_crtc_commit);
 }
 
 static bool
index c584042d6d2cb21c8282975f9edf28898b4e56e0..25f72a62cb880b757b54e0514693eedd624ce27b 100644 (file)
@@ -35,4 +35,29 @@ int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr);
 int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int crtc);
 void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int crtc);
 
+/*
+ * Exynos specific crtc postion structure.
+ *
+ * @fb_x: offset x on a framebuffer to be displyed
+ *     - the unit is screen coordinates.
+ * @fb_y: offset y on a framebuffer to be displayed
+ *     - the unit is screen coordinates.
+ * @crtc_x: offset x on hardware screen.
+ * @crtc_y: offset y on hardware screen.
+ * @crtc_w: width of hardware screen.
+ * @crtc_h: height of hardware screen.
+ */
+struct exynos_drm_crtc_pos {
+       unsigned int fb_x;
+       unsigned int fb_y;
+       unsigned int crtc_x;
+       unsigned int crtc_y;
+       unsigned int crtc_w;
+       unsigned int crtc_h;
+};
+
+int exynos_drm_overlay_update(struct exynos_drm_overlay *overlay,
+                             struct drm_framebuffer *fb,
+                             struct drm_display_mode *mode,
+                             struct exynos_drm_crtc_pos *pos);
 #endif
index 83810cbe3c1770aeea62814dfdd98c2e1b33df2e..53e2216de61dd5c42c35eaa7f0c23ca1e991a621 100644 (file)
@@ -27,6 +27,7 @@
 
 #include "drmP.h"
 #include "drm.h"
+#include "drm_crtc_helper.h"
 
 #include <drm/exynos_drm.h>
 
@@ -61,6 +62,9 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
 
        drm_mode_config_init(dev);
 
+       /* init kms poll for handling hpd */
+       drm_kms_helper_poll_init(dev);
+
        exynos_drm_mode_config_init(dev);
 
        /*
@@ -116,6 +120,7 @@ static int exynos_drm_unload(struct drm_device *dev)
        exynos_drm_fbdev_fini(dev);
        exynos_drm_device_unregister(dev);
        drm_vblank_cleanup(dev);
+       drm_kms_helper_poll_fini(dev);
        drm_mode_config_cleanup(dev);
        kfree(dev->dev_private);
 
index c03683f2ae72dffb084aaede5dba582a68c835f9..5e02e6ecc2e026955c9bf88bd5461bf893c1dbec 100644 (file)
@@ -29,6 +29,7 @@
 #ifndef _EXYNOS_DRM_DRV_H_
 #define _EXYNOS_DRM_DRV_H_
 
+#include <linux/module.h>
 #include "drm.h"
 
 #define MAX_CRTC       2
@@ -79,8 +80,8 @@ struct exynos_drm_overlay_ops {
  * @scan_flag: interlace or progressive way.
  *     (it could be DRM_MODE_FLAG_*)
  * @bpp: pixel size.(in bit)
- * @paddr: bus(accessed by dma) physical memory address to this overlay
- *             and this is physically continuous.
+ * @dma_addr: bus(accessed by dma) address to the memory region allocated
+ *     for a overlay.
  * @vaddr: virtual memory addresss to this overlay.
  * @default_win: a window to be enabled.
  * @color_key: color key on or off.
@@ -108,7 +109,7 @@ struct exynos_drm_overlay {
        unsigned int scan_flag;
        unsigned int bpp;
        unsigned int pitch;
-       dma_addr_t paddr;
+       dma_addr_t dma_addr;
        void __iomem *vaddr;
 
        bool default_win;
@@ -130,7 +131,7 @@ struct exynos_drm_overlay {
  * @check_timing: check if timing is valid or not.
  * @power_on: display device on or off.
  */
-struct exynos_drm_display {
+struct exynos_drm_display_ops {
        enum exynos_drm_output_type type;
        bool (*is_connected)(struct device *dev);
        int (*get_edid)(struct device *dev, struct drm_connector *connector,
@@ -146,12 +147,14 @@ struct exynos_drm_display {
  * @mode_set: convert drm_display_mode to hw specific display mode and
  *           would be called by encoder->mode_set().
  * @commit: set current hw specific display mode to hw.
+ * @disable: disable hardware specific display mode.
  * @enable_vblank: specific driver callback for enabling vblank interrupt.
  * @disable_vblank: specific driver callback for disabling vblank interrupt.
  */
 struct exynos_drm_manager_ops {
        void (*mode_set)(struct device *subdrv_dev, void *mode);
        void (*commit)(struct device *subdrv_dev);
+       void (*disable)(struct device *subdrv_dev);
        int (*enable_vblank)(struct device *subdrv_dev);
        void (*disable_vblank)(struct device *subdrv_dev);
 };
@@ -178,7 +181,7 @@ struct exynos_drm_manager {
        int pipe;
        struct exynos_drm_manager_ops *ops;
        struct exynos_drm_overlay_ops *overlay_ops;
-       struct exynos_drm_display *display;
+       struct exynos_drm_display_ops *display_ops;
 };
 
 /*
index 7cf6fa86a67efb57e10407213639e8084d2c2138..153061415bafba2d3c83d155a7e86a4901db0b52 100644 (file)
@@ -53,15 +53,36 @@ static void exynos_drm_encoder_dpms(struct drm_encoder *encoder, int mode)
        struct drm_device *dev = encoder->dev;
        struct drm_connector *connector;
        struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder);
+       struct exynos_drm_manager_ops *manager_ops = manager->ops;
 
        DRM_DEBUG_KMS("%s, encoder dpms: %d\n", __FILE__, mode);
 
+       switch (mode) {
+       case DRM_MODE_DPMS_ON:
+               if (manager_ops && manager_ops->commit)
+                       manager_ops->commit(manager->dev);
+               break;
+       case DRM_MODE_DPMS_STANDBY:
+       case DRM_MODE_DPMS_SUSPEND:
+       case DRM_MODE_DPMS_OFF:
+               /* TODO */
+               if (manager_ops && manager_ops->disable)
+                       manager_ops->disable(manager->dev);
+               break;
+       default:
+               DRM_ERROR("unspecified mode %d\n", mode);
+               break;
+       }
+
        list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
                if (connector->encoder == encoder) {
-                       struct exynos_drm_display *display = manager->display;
+                       struct exynos_drm_display_ops *display_ops =
+                                                       manager->display_ops;
 
-                       if (display && display->power_on)
-                               display->power_on(manager->dev, mode);
+                       DRM_DEBUG_KMS("connector[%d] dpms[%d]\n",
+                                       connector->base.id, mode);
+                       if (display_ops && display_ops->power_on)
+                               display_ops->power_on(manager->dev, mode);
                }
        }
 }
@@ -116,15 +137,11 @@ static void exynos_drm_encoder_commit(struct drm_encoder *encoder)
 {
        struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder);
        struct exynos_drm_manager_ops *manager_ops = manager->ops;
-       struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
        if (manager_ops && manager_ops->commit)
                manager_ops->commit(manager->dev);
-
-       if (overlay_ops && overlay_ops->commit)
-               overlay_ops->commit(manager->dev);
 }
 
 static struct drm_crtc *
@@ -208,10 +225,23 @@ void exynos_drm_fn_encoder(struct drm_crtc *crtc, void *data,
 {
        struct drm_device *dev = crtc->dev;
        struct drm_encoder *encoder;
+       struct exynos_drm_private *private = dev->dev_private;
+       struct exynos_drm_manager *manager;
 
        list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-               if (encoder->crtc != crtc)
-                       continue;
+               /*
+                * if crtc is detached from encoder, check pipe,
+                * otherwise check crtc attached to encoder
+                */
+               if (!encoder->crtc) {
+                       manager = to_exynos_encoder(encoder)->manager;
+                       if (manager->pipe < 0 ||
+                                       private->crtc[manager->pipe] != crtc)
+                               continue;
+               } else {
+                       if (encoder->crtc != crtc)
+                               continue;
+               }
 
                fn(encoder, data);
        }
@@ -250,8 +280,18 @@ void exynos_drm_encoder_crtc_commit(struct drm_encoder *encoder, void *data)
        struct exynos_drm_manager *manager =
                to_exynos_encoder(encoder)->manager;
        struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
+       int crtc = *(int *)data;
+
+       DRM_DEBUG_KMS("%s\n", __FILE__);
+
+       /*
+        * when crtc is detached from encoder, this pipe is used
+        * to select manager operation
+        */
+       manager->pipe = crtc;
 
-       overlay_ops->commit(manager->dev);
+       if (overlay_ops && overlay_ops->commit)
+               overlay_ops->commit(manager->dev);
 }
 
 void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data)
@@ -261,7 +301,28 @@ void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data)
        struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
        struct exynos_drm_overlay *overlay = data;
 
-       overlay_ops->mode_set(manager->dev, overlay);
+       if (overlay_ops && overlay_ops->mode_set)
+               overlay_ops->mode_set(manager->dev, overlay);
+}
+
+void exynos_drm_encoder_crtc_disable(struct drm_encoder *encoder, void *data)
+{
+       struct exynos_drm_manager *manager =
+               to_exynos_encoder(encoder)->manager;
+       struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
+
+       DRM_DEBUG_KMS("\n");
+
+       if (overlay_ops && overlay_ops->disable)
+               overlay_ops->disable(manager->dev);
+
+       /*
+        * crtc is already detached from encoder and last
+        * function for detaching is properly done, so
+        * clear pipe from manager to prevent repeated call
+        */
+       if (!encoder->crtc)
+               manager->pipe = -1;
 }
 
 MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
index 5ecd645d06a97e1959599ecfd94d2b7ba2d50847..a22acfbf0e4ed6dc31894d03f7a5369158ded8a2 100644 (file)
@@ -41,5 +41,6 @@ void exynos_drm_enable_vblank(struct drm_encoder *encoder, void *data);
 void exynos_drm_disable_vblank(struct drm_encoder *encoder, void *data);
 void exynos_drm_encoder_crtc_commit(struct drm_encoder *encoder, void *data);
 void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data);
+void exynos_drm_encoder_crtc_disable(struct drm_encoder *encoder, void *data);
 
 #endif
index 48d29cfd5240197f645c6732a9360bb255656f97..5bf4a1ac7f828cd26509857e5ed9b024f094f831 100644 (file)
@@ -29,7 +29,9 @@
 #include "drmP.h"
 #include "drm_crtc.h"
 #include "drm_crtc_helper.h"
+#include "drm_fb_helper.h"
 
+#include "exynos_drm_drv.h"
 #include "exynos_drm_fb.h"
 #include "exynos_drm_buf.h"
 #include "exynos_drm_gem.h"
  *
  * @fb: drm framebuffer obejct.
  * @exynos_gem_obj: exynos specific gem object containing a gem object.
- * @entry: pointer to exynos drm buffer entry object.
- *     - containing only the information to physically continuous memory
- *     region allocated at default framebuffer creation.
+ * @buffer: pointer to exynos_drm_gem_buffer object.
+ *     - contain the memory information to memory region allocated
+ *     at default framebuffer creation.
  */
 struct exynos_drm_fb {
        struct drm_framebuffer          fb;
        struct exynos_drm_gem_obj       *exynos_gem_obj;
-       struct exynos_drm_buf_entry     *entry;
+       struct exynos_drm_gem_buf       *buffer;
 };
 
 static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
@@ -63,8 +65,8 @@ static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
         * default framebuffer has no gem object so
         * a buffer of the default framebuffer should be released at here.
         */
-       if (!exynos_fb->exynos_gem_obj && exynos_fb->entry)
-               exynos_drm_buf_destroy(fb->dev, exynos_fb->entry);
+       if (!exynos_fb->exynos_gem_obj && exynos_fb->buffer)
+               exynos_drm_buf_destroy(fb->dev, exynos_fb->buffer);
 
        kfree(exynos_fb);
        exynos_fb = NULL;
@@ -143,29 +145,29 @@ exynos_drm_fb_init(struct drm_file *file_priv, struct drm_device *dev,
         */
        if (!mode_cmd->handle) {
                if (!file_priv) {
-                       struct exynos_drm_buf_entry *entry;
+                       struct exynos_drm_gem_buf *buffer;
 
                        /*
                         * in case that file_priv is NULL, it allocates
                         * only buffer and this buffer would be used
                         * for default framebuffer.
                         */
-                       entry = exynos_drm_buf_create(dev, size);
-                       if (IS_ERR(entry)) {
-                               ret = PTR_ERR(entry);
+                       buffer = exynos_drm_buf_create(dev, size);
+                       if (IS_ERR(buffer)) {
+                               ret = PTR_ERR(buffer);
                                goto err_buffer;
                        }
 
-                       exynos_fb->entry = entry;
+                       exynos_fb->buffer = buffer;
 
-                       DRM_LOG_KMS("default fb: paddr = 0x%lx, size = 0x%x\n",
-                                       (unsigned long)entry->paddr, size);
+                       DRM_LOG_KMS("default: dma_addr = 0x%lx, size = 0x%x\n",
+                                       (unsigned long)buffer->dma_addr, size);
 
                        goto out;
                } else {
-                       exynos_gem_obj = exynos_drm_gem_create(file_priv, dev,
-                                                       size,
-                                                       &mode_cmd->handle);
+                       exynos_gem_obj = exynos_drm_gem_create(dev, file_priv,
+                                                       &mode_cmd->handle,
+                                                       size);
                        if (IS_ERR(exynos_gem_obj)) {
                                ret = PTR_ERR(exynos_gem_obj);
                                goto err_buffer;
@@ -189,10 +191,10 @@ exynos_drm_fb_init(struct drm_file *file_priv, struct drm_device *dev,
         * so that default framebuffer has no its own gem object,
         * only its own buffer object.
         */
-       exynos_fb->entry = exynos_gem_obj->entry;
+       exynos_fb->buffer = exynos_gem_obj->buffer;
 
-       DRM_LOG_KMS("paddr = 0x%lx, size = 0x%x, gem object = 0x%x\n",
-                       (unsigned long)exynos_fb->entry->paddr, size,
+       DRM_LOG_KMS("dma_addr = 0x%lx, size = 0x%x, gem object = 0x%x\n",
+                       (unsigned long)exynos_fb->buffer->dma_addr, size,
                        (unsigned int)&exynos_gem_obj->base);
 
 out:
@@ -220,26 +222,36 @@ struct drm_framebuffer *exynos_drm_fb_create(struct drm_device *dev,
        return exynos_drm_fb_init(file_priv, dev, mode_cmd);
 }
 
-struct exynos_drm_buf_entry *exynos_drm_fb_get_buf(struct drm_framebuffer *fb)
+struct exynos_drm_gem_buf *exynos_drm_fb_get_buf(struct drm_framebuffer *fb)
 {
        struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
-       struct exynos_drm_buf_entry *entry;
+       struct exynos_drm_gem_buf *buffer;
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
-       entry = exynos_fb->entry;
-       if (!entry)
+       buffer = exynos_fb->buffer;
+       if (!buffer)
                return NULL;
 
-       DRM_DEBUG_KMS("vaddr = 0x%lx, paddr = 0x%lx\n",
-                       (unsigned long)entry->vaddr,
-                       (unsigned long)entry->paddr);
+       DRM_DEBUG_KMS("vaddr = 0x%lx, dma_addr = 0x%lx\n",
+                       (unsigned long)buffer->kvaddr,
+                       (unsigned long)buffer->dma_addr);
 
-       return entry;
+       return buffer;
+}
+
+static void exynos_drm_output_poll_changed(struct drm_device *dev)
+{
+       struct exynos_drm_private *private = dev->dev_private;
+       struct drm_fb_helper *fb_helper = private->fb_helper;
+
+       if (fb_helper)
+               drm_fb_helper_hotplug_event(fb_helper);
 }
 
 static struct drm_mode_config_funcs exynos_drm_mode_config_funcs = {
        .fb_create = exynos_drm_fb_create,
+       .output_poll_changed = exynos_drm_output_poll_changed,
 };
 
 void exynos_drm_mode_config_init(struct drm_device *dev)
index 1f4b3d1a77134d46ae193446a2720020dfd745cf..836f4100818710e830229b985591bfc85a047f03 100644 (file)
@@ -33,6 +33,7 @@
 
 #include "exynos_drm_drv.h"
 #include "exynos_drm_fb.h"
+#include "exynos_drm_gem.h"
 #include "exynos_drm_buf.h"
 
 #define MAX_CONNECTOR          4
@@ -85,15 +86,13 @@ static struct fb_ops exynos_drm_fb_ops = {
 };
 
 static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
-                                    struct drm_framebuffer *fb,
-                                    unsigned int fb_width,
-                                    unsigned int fb_height)
+                                    struct drm_framebuffer *fb)
 {
        struct fb_info *fbi = helper->fbdev;
        struct drm_device *dev = helper->dev;
        struct exynos_drm_fbdev *exynos_fb = to_exynos_fbdev(helper);
-       struct exynos_drm_buf_entry *entry;
-       unsigned int size = fb_width * fb_height * (fb->bits_per_pixel >> 3);
+       struct exynos_drm_gem_buf *buffer;
+       unsigned int size = fb->width * fb->height * (fb->bits_per_pixel >> 3);
        unsigned long offset;
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -101,20 +100,20 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
        exynos_fb->fb = fb;
 
        drm_fb_helper_fill_fix(fbi, fb->pitch, fb->depth);
-       drm_fb_helper_fill_var(fbi, helper, fb_width, fb_height);
+       drm_fb_helper_fill_var(fbi, helper, fb->width, fb->height);
 
-       entry = exynos_drm_fb_get_buf(fb);
-       if (!entry) {
-               DRM_LOG_KMS("entry is null.\n");
+       buffer = exynos_drm_fb_get_buf(fb);
+       if (!buffer) {
+               DRM_LOG_KMS("buffer is null.\n");
                return -EFAULT;
        }
 
        offset = fbi->var.xoffset * (fb->bits_per_pixel >> 3);
        offset += fbi->var.yoffset * fb->pitch;
 
-       dev->mode_config.fb_base = entry->paddr;
-       fbi->screen_base = entry->vaddr + offset;
-       fbi->fix.smem_start = entry->paddr + offset;
+       dev->mode_config.fb_base = (resource_size_t)buffer->dma_addr;
+       fbi->screen_base = buffer->kvaddr + offset;
+       fbi->fix.smem_start = (unsigned long)(buffer->dma_addr + offset);
        fbi->screen_size = size;
        fbi->fix.smem_len = size;
 
@@ -171,8 +170,7 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
                goto out;
        }
 
-       ret = exynos_drm_fbdev_update(helper, helper->fb, sizes->fb_width,
-                       sizes->fb_height);
+       ret = exynos_drm_fbdev_update(helper, helper->fb);
        if (ret < 0)
                fb_dealloc_cmap(&fbi->cmap);
 
@@ -235,8 +233,7 @@ static int exynos_drm_fbdev_recreate(struct drm_fb_helper *helper,
        }
 
        helper->fb = exynos_fbdev->fb;
-       return exynos_drm_fbdev_update(helper, helper->fb, sizes->fb_width,
-                       sizes->fb_height);
+       return exynos_drm_fbdev_update(helper, helper->fb);
 }
 
 static int exynos_drm_fbdev_probe(struct drm_fb_helper *helper,
@@ -405,6 +402,18 @@ int exynos_drm_fbdev_reinit(struct drm_device *dev)
        fb_helper = private->fb_helper;
 
        if (fb_helper) {
+               struct list_head temp_list;
+
+               INIT_LIST_HEAD(&temp_list);
+
+               /*
+                * fb_helper is reintialized but kernel fb is reused
+                * so kernel_fb_list need to be backuped and restored
+                */
+               if (!list_empty(&fb_helper->kernel_fb_list))
+                       list_replace_init(&fb_helper->kernel_fb_list,
+                                       &temp_list);
+
                drm_fb_helper_fini(fb_helper);
 
                ret = drm_fb_helper_init(dev, fb_helper,
@@ -414,6 +423,9 @@ int exynos_drm_fbdev_reinit(struct drm_device *dev)
                        return ret;
                }
 
+               if (!list_empty(&temp_list))
+                       list_replace(&temp_list, &fb_helper->kernel_fb_list);
+
                ret = drm_fb_helper_single_add_all_connectors(fb_helper);
                if (ret < 0) {
                        DRM_ERROR("failed to add fb helper to connectors\n");
index 4659c88cdd9bbaec81e6d6d1d9fe1d7b03964dec..db3b3d9e731d86475d734884e2ec113a8be5f914 100644 (file)
@@ -64,7 +64,7 @@ struct fimd_win_data {
        unsigned int            fb_width;
        unsigned int            fb_height;
        unsigned int            bpp;
-       dma_addr_t              paddr;
+       dma_addr_t              dma_addr;
        void __iomem            *vaddr;
        unsigned int            buf_offsize;
        unsigned int            line_size;      /* bytes */
@@ -124,7 +124,7 @@ static int fimd_display_power_on(struct device *dev, int mode)
        return 0;
 }
 
-static struct exynos_drm_display fimd_display = {
+static struct exynos_drm_display_ops fimd_display_ops = {
        .type = EXYNOS_DISPLAY_TYPE_LCD,
        .is_connected = fimd_display_is_connected,
        .get_timing = fimd_get_timing,
@@ -177,6 +177,40 @@ static void fimd_commit(struct device *dev)
        writel(val, ctx->regs + VIDCON0);
 }
 
+static void fimd_disable(struct device *dev)
+{
+       struct fimd_context *ctx = get_fimd_context(dev);
+       struct exynos_drm_subdrv *subdrv = &ctx->subdrv;
+       struct drm_device *drm_dev = subdrv->drm_dev;
+       struct exynos_drm_manager *manager = &subdrv->manager;
+       u32 val;
+
+       DRM_DEBUG_KMS("%s\n", __FILE__);
+
+       /* fimd dma off */
+       val = readl(ctx->regs + VIDCON0);
+       val &= ~(VIDCON0_ENVID | VIDCON0_ENVID_F);
+       writel(val, ctx->regs + VIDCON0);
+
+       /*
+        * if vblank is enabled status with dma off then
+        * it disables vsync interrupt.
+        */
+       if (drm_dev->vblank_enabled[manager->pipe] &&
+               atomic_read(&drm_dev->vblank_refcount[manager->pipe])) {
+               drm_vblank_put(drm_dev, manager->pipe);
+
+               /*
+                * if vblank_disable_allowed is 0 then disable
+                * vsync interrupt right now else the vsync interrupt
+                * would be disabled by drm timer once a current process
+                * gives up ownershop of vblank event.
+                */
+               if (!drm_dev->vblank_disable_allowed)
+                       drm_vblank_off(drm_dev, manager->pipe);
+       }
+}
+
 static int fimd_enable_vblank(struct device *dev)
 {
        struct fimd_context *ctx = get_fimd_context(dev);
@@ -220,6 +254,7 @@ static void fimd_disable_vblank(struct device *dev)
 
 static struct exynos_drm_manager_ops fimd_manager_ops = {
        .commit = fimd_commit,
+       .disable = fimd_disable,
        .enable_vblank = fimd_enable_vblank,
        .disable_vblank = fimd_disable_vblank,
 };
@@ -251,7 +286,7 @@ static void fimd_win_mode_set(struct device *dev,
        win_data->ovl_height = overlay->crtc_height;
        win_data->fb_width = overlay->fb_width;
        win_data->fb_height = overlay->fb_height;
-       win_data->paddr = overlay->paddr + offset;
+       win_data->dma_addr = overlay->dma_addr + offset;
        win_data->vaddr = overlay->vaddr + offset;
        win_data->bpp = overlay->bpp;
        win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) *
@@ -263,7 +298,7 @@ static void fimd_win_mode_set(struct device *dev,
        DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
                        win_data->ovl_width, win_data->ovl_height);
        DRM_DEBUG_KMS("paddr = 0x%lx, vaddr = 0x%lx\n",
-                       (unsigned long)win_data->paddr,
+                       (unsigned long)win_data->dma_addr,
                        (unsigned long)win_data->vaddr);
        DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n",
                        overlay->fb_width, overlay->crtc_width);
@@ -376,16 +411,16 @@ static void fimd_win_commit(struct device *dev)
        writel(val, ctx->regs + SHADOWCON);
 
        /* buffer start address */
-       val = win_data->paddr;
+       val = (unsigned long)win_data->dma_addr;
        writel(val, ctx->regs + VIDWx_BUF_START(win, 0));
 
        /* buffer end address */
        size = win_data->fb_width * win_data->ovl_height * (win_data->bpp >> 3);
-       val = win_data->paddr + size;
+       val = (unsigned long)(win_data->dma_addr + size);
        writel(val, ctx->regs + VIDWx_BUF_END(win, 0));
 
        DRM_DEBUG_KMS("start addr = 0x%lx, end addr = 0x%lx, size = 0x%lx\n",
-                       (unsigned long)win_data->paddr, val, size);
+                       (unsigned long)win_data->dma_addr, val, size);
        DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
                        win_data->ovl_width, win_data->ovl_height);
 
@@ -447,7 +482,6 @@ static void fimd_win_commit(struct device *dev)
 static void fimd_win_disable(struct device *dev)
 {
        struct fimd_context *ctx = get_fimd_context(dev);
-       struct fimd_win_data *win_data;
        int win = ctx->default_win;
        u32 val;
 
@@ -456,8 +490,6 @@ static void fimd_win_disable(struct device *dev)
        if (win < 0 || win > WINDOWS_NR)
                return;
 
-       win_data = &ctx->win_data[win];
-
        /* protect windows */
        val = readl(ctx->regs + SHADOWCON);
        val |= SHADOWCON_WINx_PROTECT(win);
@@ -528,6 +560,16 @@ static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
                /* VSYNC interrupt */
                writel(VIDINTCON1_INT_FRAME, ctx->regs + VIDINTCON1);
 
+       /*
+        * in case that vblank_disable_allowed is 1, it could induce
+        * the problem that manager->pipe could be -1 because with
+        * disable callback, vsync interrupt isn't disabled and at this moment,
+        * vsync interrupt could occur. the vsync interrupt would be disabled
+        * by timer handler later.
+        */
+       if (manager->pipe == -1)
+               return IRQ_HANDLED;
+
        drm_handle_vblank(drm_dev, manager->pipe);
        fimd_finish_pageflip(drm_dev, manager->pipe);
 
@@ -548,13 +590,6 @@ static int fimd_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
         */
        drm_dev->irq_enabled = 1;
 
-       /*
-        * with vblank_disable_allowed = 1, vblank interrupt will be disabled
-        * by drm timer once a current process gives up ownership of
-        * vblank event.(drm_vblank_put function was called)
-        */
-       drm_dev->vblank_disable_allowed = 1;
-
        return 0;
 }
 
@@ -731,7 +766,7 @@ static int __devinit fimd_probe(struct platform_device *pdev)
        subdrv->manager.pipe = -1;
        subdrv->manager.ops = &fimd_manager_ops;
        subdrv->manager.overlay_ops = &fimd_overlay_ops;
-       subdrv->manager.display = &fimd_display;
+       subdrv->manager.display_ops = &fimd_display_ops;
        subdrv->manager.dev = dev;
 
        platform_set_drvdata(pdev, ctx);
index a8e7a88906ed22e43fb23d23a859dc825a335451..aba0fe47f7eaae47c10ac4b0839eba8c1edf7f54 100644 (file)
@@ -62,40 +62,28 @@ static unsigned int get_gem_mmap_offset(struct drm_gem_object *obj)
        return (unsigned int)obj->map_list.hash.key << PAGE_SHIFT;
 }
 
-struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_file *file_priv,
-               struct drm_device *dev, unsigned int size,
-               unsigned int *handle)
+static struct exynos_drm_gem_obj
+               *exynos_drm_gem_init(struct drm_device *drm_dev,
+                       struct drm_file *file_priv, unsigned int *handle,
+                       unsigned int size)
 {
        struct exynos_drm_gem_obj *exynos_gem_obj;
-       struct exynos_drm_buf_entry *entry;
        struct drm_gem_object *obj;
        int ret;
 
-       DRM_DEBUG_KMS("%s\n", __FILE__);
-
-       size = roundup(size, PAGE_SIZE);
-
        exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL);
        if (!exynos_gem_obj) {
                DRM_ERROR("failed to allocate exynos gem object.\n");
                return ERR_PTR(-ENOMEM);
        }
 
-       /* allocate the new buffer object and memory region. */
-       entry = exynos_drm_buf_create(dev, size);
-       if (!entry) {
-               kfree(exynos_gem_obj);
-               return ERR_PTR(-ENOMEM);
-       }
-
-       exynos_gem_obj->entry = entry;
-
        obj = &exynos_gem_obj->base;
 
-       ret = drm_gem_object_init(dev, obj, size);
+       ret = drm_gem_object_init(drm_dev, obj, size);
        if (ret < 0) {
-               DRM_ERROR("failed to initailize gem object.\n");
-               goto err_obj_init;
+               DRM_ERROR("failed to initialize gem object.\n");
+               ret = -EINVAL;
+               goto err_object_init;
        }
 
        DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
@@ -127,24 +115,50 @@ err_handle_create:
 err_create_mmap_offset:
        drm_gem_object_release(obj);
 
-err_obj_init:
-       exynos_drm_buf_destroy(dev, exynos_gem_obj->entry);
-
+err_object_init:
        kfree(exynos_gem_obj);
 
        return ERR_PTR(ret);
 }
 
+struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
+                               struct drm_file *file_priv,
+                               unsigned int *handle, unsigned long size)
+{
+
+       struct exynos_drm_gem_obj *exynos_gem_obj = NULL;
+       struct exynos_drm_gem_buf *buffer;
+
+       size = roundup(size, PAGE_SIZE);
+
+       DRM_DEBUG_KMS("%s: size = 0x%lx\n", __FILE__, size);
+
+       buffer = exynos_drm_buf_create(dev, size);
+       if (IS_ERR(buffer)) {
+               return ERR_CAST(buffer);
+       }
+
+       exynos_gem_obj = exynos_drm_gem_init(dev, file_priv, handle, size);
+       if (IS_ERR(exynos_gem_obj)) {
+               exynos_drm_buf_destroy(dev, buffer);
+               return exynos_gem_obj;
+       }
+
+       exynos_gem_obj->buffer = buffer;
+
+       return exynos_gem_obj;
+}
+
 int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
-               struct drm_file *file_priv)
+                                       struct drm_file *file_priv)
 {
        struct drm_exynos_gem_create *args = data;
-       struct exynos_drm_gem_obj *exynos_gem_obj;
+       struct exynos_drm_gem_obj *exynos_gem_obj = NULL;
 
-       DRM_DEBUG_KMS("%s : size = 0x%x\n", __FILE__, args->size);
+       DRM_DEBUG_KMS("%s\n", __FILE__);
 
-       exynos_gem_obj = exynos_drm_gem_create(file_priv, dev, args->size,
-                       &args->handle);
+       exynos_gem_obj = exynos_drm_gem_create(dev, file_priv,
+                                               &args->handle, args->size);
        if (IS_ERR(exynos_gem_obj))
                return PTR_ERR(exynos_gem_obj);
 
@@ -175,7 +189,7 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp,
 {
        struct drm_gem_object *obj = filp->private_data;
        struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
-       struct exynos_drm_buf_entry *entry;
+       struct exynos_drm_gem_buf *buffer;
        unsigned long pfn, vm_size;
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -187,20 +201,20 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp,
 
        vm_size = vma->vm_end - vma->vm_start;
        /*
-        * a entry contains information to physically continuous memory
+        * a buffer contains information to physically continuous memory
         * allocated by user request or at framebuffer creation.
         */
-       entry = exynos_gem_obj->entry;
+       buffer = exynos_gem_obj->buffer;
 
        /* check if user-requested size is valid. */
-       if (vm_size > entry->size)
+       if (vm_size > buffer->size)
                return -EINVAL;
 
        /*
         * get page frame number to physical memory to be mapped
         * to user space.
         */
-       pfn = exynos_gem_obj->entry->paddr >> PAGE_SHIFT;
+       pfn = ((unsigned long)exynos_gem_obj->buffer->dma_addr) >> PAGE_SHIFT;
 
        DRM_DEBUG_KMS("pfn = 0x%lx\n", pfn);
 
@@ -281,7 +295,7 @@ void exynos_drm_gem_free_object(struct drm_gem_object *gem_obj)
 
        exynos_gem_obj = to_exynos_gem_obj(gem_obj);
 
-       exynos_drm_buf_destroy(gem_obj->dev, exynos_gem_obj->entry);
+       exynos_drm_buf_destroy(gem_obj->dev, exynos_gem_obj->buffer);
 
        kfree(exynos_gem_obj);
 }
@@ -302,8 +316,8 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
        args->pitch = args->width * args->bpp >> 3;
        args->size = args->pitch * args->height;
 
-       exynos_gem_obj = exynos_drm_gem_create(file_priv, dev, args->size,
-                                                       &args->handle);
+       exynos_gem_obj = exynos_drm_gem_create(dev, file_priv, &args->handle,
+                                                       args->size);
        if (IS_ERR(exynos_gem_obj))
                return PTR_ERR(exynos_gem_obj);
 
@@ -360,7 +374,8 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 
        mutex_lock(&dev->struct_mutex);
 
-       pfn = (exynos_gem_obj->entry->paddr >> PAGE_SHIFT) + page_offset;
+       pfn = (((unsigned long)exynos_gem_obj->buffer->dma_addr) >>
+                       PAGE_SHIFT) + page_offset;
 
        ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
 
index e5fc0148277bf84b5fe8c9a31365256c940da802..ef8797334e6da746c54eed761521aacdc7afd7a8 100644 (file)
 #define to_exynos_gem_obj(x)   container_of(x,\
                        struct exynos_drm_gem_obj, base)
 
+/*
+ * exynos drm gem buffer structure.
+ *
+ * @kvaddr: kernel virtual address to allocated memory region.
+ * @dma_addr: bus address(accessed by dma) to allocated memory region.
+ *     - this address could be physical address without IOMMU and
+ *     device address with IOMMU.
+ * @size: size of allocated memory region.
+ */
+struct exynos_drm_gem_buf {
+       void __iomem            *kvaddr;
+       dma_addr_t              dma_addr;
+       unsigned long           size;
+};
+
 /*
  * exynos drm buffer structure.
  *
  * @base: a gem object.
  *     - a new handle to this gem object would be created
  *     by drm_gem_handle_create().
- * @entry: pointer to exynos drm buffer entry object.
- *     - containing the information to physically
+ * @buffer: a pointer to exynos_drm_gem_buffer object.
+ *     - contain the information to memory region allocated
+ *     by user request or at framebuffer creation.
  *     continuous memory region allocated by user request
  *     or at framebuffer creation.
  *
  */
 struct exynos_drm_gem_obj {
        struct drm_gem_object base;
-       struct exynos_drm_buf_entry *entry;
+       struct exynos_drm_gem_buf *buffer;
 };
 
 /* create a new buffer and get a new gem handle. */
-struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_file *file_priv,
-               struct drm_device *dev, unsigned int size,
-               unsigned int *handle);
+struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
+               struct drm_file *file_priv,
+               unsigned int *handle, unsigned long size);
 
 /*
  * request gem object creation and buffer allocation as the size
index 8359dc777041be9265e53907025564196b1d0fbc..60ff1b63b568c464248fb5651d16fb550d54c69b 100644 (file)
@@ -2026,8 +2026,13 @@ i915_wait_request(struct intel_ring_buffer *ring,
         * to handle this, the waiter on a request often wants an associated
         * buffer to have made it to the inactive list, and we would need
         * a separate wait queue to handle that.
+        *
+        * To avoid a recursion with the ilk VT-d workaround (that calls
+        * gpu_idle when unbinding objects with interruptible==false) don't
+        * retire requests in that case (because it might call unbind if the
+        * active list holds the last reference to the object).
         */
-       if (ret == 0)
+       if (ret == 0 && dev_priv->mm.interruptible)
                i915_gem_retire_requests_ring(ring);
 
        return ret;
index ddbabefb4273ffa0fe071d49422dd448d21875d9..b12fd2c80812d002e0840787e8ff47481a3691a2 100644 (file)
@@ -369,3 +369,48 @@ nouveau_finish_page_flip(struct nouveau_channel *chan,
        spin_unlock_irqrestore(&dev->event_lock, flags);
        return 0;
 }
+
+int
+nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
+                           struct drm_mode_create_dumb *args)
+{
+       struct nouveau_bo *bo;
+       int ret;
+
+       args->pitch = roundup(args->width * (args->bpp / 8), 256);
+       args->size = args->pitch * args->height;
+       args->size = roundup(args->size, PAGE_SIZE);
+
+       ret = nouveau_gem_new(dev, args->size, 0, TTM_PL_FLAG_VRAM, 0, 0, &bo);
+       if (ret)
+               return ret;
+
+       ret = drm_gem_handle_create(file_priv, bo->gem, &args->handle);
+       drm_gem_object_unreference_unlocked(bo->gem);
+       return ret;
+}
+
+int
+nouveau_display_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev,
+                            uint32_t handle)
+{
+       return drm_gem_handle_delete(file_priv, handle);
+}
+
+int
+nouveau_display_dumb_map_offset(struct drm_file *file_priv,
+                               struct drm_device *dev,
+                               uint32_t handle, uint64_t *poffset)
+{
+       struct drm_gem_object *gem;
+
+       gem = drm_gem_object_lookup(dev, file_priv, handle);
+       if (gem) {
+               struct nouveau_bo *bo = gem->driver_private;
+               *poffset = bo->bo.addr_space_offset;
+               drm_gem_object_unreference_unlocked(gem);
+               return 0;
+       }
+
+       return -ENOENT;
+}
index 9f7bb12952623b51bed52e21148ec04dcca902af..9791d13c9e3b8d9d223bd5bb1cae182de4bc89ee 100644 (file)
@@ -433,6 +433,10 @@ static struct drm_driver driver = {
        .gem_open_object = nouveau_gem_object_open,
        .gem_close_object = nouveau_gem_object_close,
 
+       .dumb_create = nouveau_display_dumb_create,
+       .dumb_map_offset = nouveau_display_dumb_map_offset,
+       .dumb_destroy = nouveau_display_dumb_destroy,
+
        .name = DRIVER_NAME,
        .desc = DRIVER_DESC,
 #ifdef GIT_REVISION
index 29837da1098b3a85a50bbc9ee4ccc2b0a4901dab..4c0be3a4ed882f5430ea628ba4d80fef1f1d3690 100644 (file)
@@ -1418,6 +1418,12 @@ int nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
                           struct drm_pending_vblank_event *event);
 int nouveau_finish_page_flip(struct nouveau_channel *,
                             struct nouveau_page_flip_state *);
+int nouveau_display_dumb_create(struct drm_file *, struct drm_device *,
+                               struct drm_mode_create_dumb *args);
+int nouveau_display_dumb_map_offset(struct drm_file *, struct drm_device *,
+                                   uint32_t handle, uint64_t *offset);
+int nouveau_display_dumb_destroy(struct drm_file *, struct drm_device *,
+                                uint32_t handle);
 
 /* nv10_gpio.c */
 int nv10_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
index 02222c540aee1a38f963dd13eeda7f0f7c7e8844..960c0ae0c0c3de650dac30beaec208b9bfd3558d 100644 (file)
@@ -680,7 +680,7 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
                return ret;
        }
 
-       ret = drm_mm_init(&chan->ramin_heap, base, size);
+       ret = drm_mm_init(&chan->ramin_heap, base, size - base);
        if (ret) {
                NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret);
                nouveau_gpuobj_ref(NULL, &chan->ramin);
index b75258a9fe44d544521431133ecd64ec0136637d..c8a463b76c89f03f96f68e48107d9993d2a2bda1 100644 (file)
@@ -67,7 +67,10 @@ nouveau_sgdma_clear(struct ttm_backend *be)
                        pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
                                       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
                }
+               nvbe->unmap_pages = false;
        }
+
+       nvbe->pages = NULL;
 }
 
 static void
index d23ca00e7d627c65e3814891c0a8aa0136f58395..06de250fe617df89ad4e05a34d3e05be8907f126 100644 (file)
@@ -616,7 +616,7 @@ nv50_display_unk10_handler(struct drm_device *dev)
        struct drm_nouveau_private *dev_priv = dev->dev_private;
        struct nv50_display *disp = nv50_display(dev);
        u32 unk30 = nv_rd32(dev, 0x610030), mc;
-       int i, crtc, or, type = OUTPUT_ANY;
+       int i, crtc, or = 0, type = OUTPUT_ANY;
 
        NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30);
        disp->irq.dcb = NULL;
@@ -708,7 +708,7 @@ nv50_display_unk20_handler(struct drm_device *dev)
        struct nv50_display *disp = nv50_display(dev);
        u32 unk30 = nv_rd32(dev, 0x610030), tmp, pclk, script, mc = 0;
        struct dcb_entry *dcb;
-       int i, crtc, or, type = OUTPUT_ANY;
+       int i, crtc, or = 0, type = OUTPUT_ANY;
 
        NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30);
        dcb = disp->irq.dcb;
index a74e501afd25b44b7315d497f8ca30908dcb3995..ecfafd70cf0ed2b6f9ee0b51c74031caa337de27 100644 (file)
@@ -381,6 +381,8 @@ nvc0_graph_init_gpc_0(struct drm_device *dev)
        u8  tpnr[GPC_MAX];
        int i, gpc, tpc;
 
+       nv_wr32(dev, TP_UNIT(0, 0, 0x5c), 1); /* affects TFB offset queries */
+
        /*
         *      TP      ROP UNKVAL(magic_not_rop_nr)
         * 450: 4/0/0/0 2        3
index 23d63b4b3d77078ce4a351f82b8d7cba16d280b7..cb006a718e700f2c72cf06dfdac0c7f22e8cbdac 100644 (file)
@@ -780,7 +780,7 @@ nvd0_sor_dpms(struct drm_encoder *encoder, int mode)
                        continue;
 
                if (nv_partner != nv_encoder &&
-                   nv_partner->dcb->or == nv_encoder->or) {
+                   nv_partner->dcb->or == nv_encoder->dcb->or) {
                        if (nv_partner->last_dpms == DRM_MODE_DPMS_ON)
                                return;
                        break;
index 87631fede1f8ed2a750419200688c151f552e7e6..2b97262e3ab14af5af32b4d85711828563450b09 100644 (file)
@@ -1107,9 +1107,40 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
                return -EINVAL;
        }
 
-       if (tiling_flags & RADEON_TILING_MACRO)
+       if (tiling_flags & RADEON_TILING_MACRO) {
+               if (rdev->family >= CHIP_CAYMAN)
+                       tmp = rdev->config.cayman.tile_config;
+               else
+                       tmp = rdev->config.evergreen.tile_config;
+
+               switch ((tmp & 0xf0) >> 4) {
+               case 0: /* 4 banks */
+                       fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_4_BANK);
+                       break;
+               case 1: /* 8 banks */
+               default:
+                       fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_8_BANK);
+                       break;
+               case 2: /* 16 banks */
+                       fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_16_BANK);
+                       break;
+               }
+
+               switch ((tmp & 0xf000) >> 12) {
+               case 0: /* 1KB rows */
+               default:
+                       fb_format |= EVERGREEN_GRPH_TILE_SPLIT(EVERGREEN_ADDR_SURF_TILE_SPLIT_1KB);
+                       break;
+               case 1: /* 2KB rows */
+                       fb_format |= EVERGREEN_GRPH_TILE_SPLIT(EVERGREEN_ADDR_SURF_TILE_SPLIT_2KB);
+                       break;
+               case 2: /* 4KB rows */
+                       fb_format |= EVERGREEN_GRPH_TILE_SPLIT(EVERGREEN_ADDR_SURF_TILE_SPLIT_4KB);
+                       break;
+               }
+
                fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1);
-       else if (tiling_flags & RADEON_TILING_MICRO)
+       else if (tiling_flags & RADEON_TILING_MICRO)
                fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1);
 
        switch (radeon_crtc->crtc_id) {
index 1d603a3335db65b4bf425228a8f98221d01fcd48..5e00d1670aa9964b8d49d0e289e9da5211e501d2 100644 (file)
@@ -82,6 +82,7 @@ u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
 {
        struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
        u32 tmp = RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset);
+       int i;
 
        /* Lock the graphics update lock */
        tmp |= EVERGREEN_GRPH_UPDATE_LOCK;
@@ -99,7 +100,11 @@ u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
               (u32)crtc_base);
 
        /* Wait for update_pending to go high. */
-       while (!(RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING));
+       for (i = 0; i < rdev->usec_timeout; i++) {
+               if (RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING)
+                       break;
+               udelay(1);
+       }
        DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
 
        /* Unlock the lock, so double-buffering can take place inside vblank */
index 38e1bda73d33be343058b1f63f9349ca7f80e3f1..cd4590aae154154d62906d901a63b8b368fea861 100644 (file)
@@ -38,6 +38,7 @@ struct evergreen_cs_track {
        u32                     group_size;
        u32                     nbanks;
        u32                     npipes;
+       u32                     row_size;
        /* value we track */
        u32                     nsamples;
        u32                     cb_color_base_last[12];
@@ -77,6 +78,44 @@ struct evergreen_cs_track {
        struct radeon_bo        *db_s_write_bo;
 };
 
+static u32 evergreen_cs_get_aray_mode(u32 tiling_flags)
+{
+       if (tiling_flags & RADEON_TILING_MACRO)
+               return ARRAY_2D_TILED_THIN1;
+       else if (tiling_flags & RADEON_TILING_MICRO)
+               return ARRAY_1D_TILED_THIN1;
+       else
+               return ARRAY_LINEAR_GENERAL;
+}
+
+static u32 evergreen_cs_get_num_banks(u32 nbanks)
+{
+       switch (nbanks) {
+       case 2:
+               return ADDR_SURF_2_BANK;
+       case 4:
+               return ADDR_SURF_4_BANK;
+       case 8:
+       default:
+               return ADDR_SURF_8_BANK;
+       case 16:
+               return ADDR_SURF_16_BANK;
+       }
+}
+
+static u32 evergreen_cs_get_tile_split(u32 row_size)
+{
+       switch (row_size) {
+       case 1:
+       default:
+               return ADDR_SURF_TILE_SPLIT_1KB;
+       case 2:
+               return ADDR_SURF_TILE_SPLIT_2KB;
+       case 4:
+               return ADDR_SURF_TILE_SPLIT_4KB;
+       }
+}
+
 static void evergreen_cs_track_init(struct evergreen_cs_track *track)
 {
        int i;
@@ -490,12 +529,11 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
                        }
                        ib[idx] &= ~Z_ARRAY_MODE(0xf);
                        track->db_z_info &= ~Z_ARRAY_MODE(0xf);
+                       ib[idx] |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+                       track->db_z_info |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
                        if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
-                               ib[idx] |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
-                               track->db_z_info |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
-                       } else {
-                               ib[idx] |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
-                               track->db_z_info |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
+                               ib[idx] |= DB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
+                               ib[idx] |= DB_TILE_SPLIT(evergreen_cs_get_tile_split(track->row_size));
                        }
                }
                break;
@@ -618,13 +656,8 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
                                                "0x%04X\n", reg);
                                return -EINVAL;
                        }
-                       if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
-                               ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
-                               track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
-                       } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
-                               ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
-                               track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
-                       }
+                       ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+                       track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
                }
                break;
        case CB_COLOR8_INFO:
@@ -640,13 +673,8 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
                                                "0x%04X\n", reg);
                                return -EINVAL;
                        }
-                       if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
-                               ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
-                               track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
-                       } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
-                               ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
-                               track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
-                       }
+                       ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+                       track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
                }
                break;
        case CB_COLOR0_PITCH:
@@ -701,6 +729,16 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
        case CB_COLOR9_ATTRIB:
        case CB_COLOR10_ATTRIB:
        case CB_COLOR11_ATTRIB:
+               r = evergreen_cs_packet_next_reloc(p, &reloc);
+               if (r) {
+                       dev_warn(p->dev, "bad SET_CONTEXT_REG "
+                                       "0x%04X\n", reg);
+                       return -EINVAL;
+               }
+               if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
+                       ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
+                       ib[idx] |= CB_TILE_SPLIT(evergreen_cs_get_tile_split(track->row_size));
+               }
                break;
        case CB_COLOR0_DIM:
        case CB_COLOR1_DIM:
@@ -1318,10 +1356,14 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
                                }
                                ib[idx+1+(i*8)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
                                if (!p->keep_tiling_flags) {
-                                       if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
-                                               ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
-                                       else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
-                                               ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
+                                       ib[idx+1+(i*8)+1] |=
+                                               TEX_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+                                       if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
+                                               ib[idx+1+(i*8)+6] |=
+                                                       TEX_TILE_SPLIT(evergreen_cs_get_tile_split(track->row_size));
+                                               ib[idx+1+(i*8)+7] |=
+                                                       TEX_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
+                                       }
                                }
                                texture = reloc->robj;
                                /* tex mip base */
@@ -1422,6 +1464,7 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
 {
        struct radeon_cs_packet pkt;
        struct evergreen_cs_track *track;
+       u32 tmp;
        int r;
 
        if (p->track == NULL) {
@@ -1430,9 +1473,63 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
                if (track == NULL)
                        return -ENOMEM;
                evergreen_cs_track_init(track);
-               track->npipes = p->rdev->config.evergreen.tiling_npipes;
-               track->nbanks = p->rdev->config.evergreen.tiling_nbanks;
-               track->group_size = p->rdev->config.evergreen.tiling_group_size;
+               if (p->rdev->family >= CHIP_CAYMAN)
+                       tmp = p->rdev->config.cayman.tile_config;
+               else
+                       tmp = p->rdev->config.evergreen.tile_config;
+
+               switch (tmp & 0xf) {
+               case 0:
+                       track->npipes = 1;
+                       break;
+               case 1:
+               default:
+                       track->npipes = 2;
+                       break;
+               case 2:
+                       track->npipes = 4;
+                       break;
+               case 3:
+                       track->npipes = 8;
+                       break;
+               }
+
+               switch ((tmp & 0xf0) >> 4) {
+               case 0:
+                       track->nbanks = 4;
+                       break;
+               case 1:
+               default:
+                       track->nbanks = 8;
+                       break;
+               case 2:
+                       track->nbanks = 16;
+                       break;
+               }
+
+               switch ((tmp & 0xf00) >> 8) {
+               case 0:
+                       track->group_size = 256;
+                       break;
+               case 1:
+               default:
+                       track->group_size = 512;
+                       break;
+               }
+
+               switch ((tmp & 0xf000) >> 12) {
+               case 0:
+                       track->row_size = 1;
+                       break;
+               case 1:
+               default:
+                       track->row_size = 2;
+                       break;
+               case 2:
+                       track->row_size = 4;
+                       break;
+               }
+
                p->track = track;
        }
        do {
index c781c92c3451285a14360f57bf18a53802dca1ab..7d7f2155e34c305729f8487c55fc8c2c741ccc44 100644 (file)
 #       define EVERGREEN_GRPH_DEPTH_8BPP                0
 #       define EVERGREEN_GRPH_DEPTH_16BPP               1
 #       define EVERGREEN_GRPH_DEPTH_32BPP               2
+#       define EVERGREEN_GRPH_NUM_BANKS(x)              (((x) & 0x3) << 2)
+#       define EVERGREEN_ADDR_SURF_2_BANK               0
+#       define EVERGREEN_ADDR_SURF_4_BANK               1
+#       define EVERGREEN_ADDR_SURF_8_BANK               2
+#       define EVERGREEN_ADDR_SURF_16_BANK              3
+#       define EVERGREEN_GRPH_Z(x)                      (((x) & 0x3) << 4)
+#       define EVERGREEN_GRPH_BANK_WIDTH(x)             (((x) & 0x3) << 6)
+#       define EVERGREEN_ADDR_SURF_BANK_WIDTH_1         0
+#       define EVERGREEN_ADDR_SURF_BANK_WIDTH_2         1
+#       define EVERGREEN_ADDR_SURF_BANK_WIDTH_4         2
+#       define EVERGREEN_ADDR_SURF_BANK_WIDTH_8         3
 #       define EVERGREEN_GRPH_FORMAT(x)                 (((x) & 0x7) << 8)
 /* 8 BPP */
 #       define EVERGREEN_GRPH_FORMAT_INDEXED            0
 #       define EVERGREEN_GRPH_FORMAT_8B_BGRA1010102     5
 #       define EVERGREEN_GRPH_FORMAT_RGB111110          6
 #       define EVERGREEN_GRPH_FORMAT_BGR101111          7
+#       define EVERGREEN_GRPH_BANK_HEIGHT(x)            (((x) & 0x3) << 11)
+#       define EVERGREEN_ADDR_SURF_BANK_HEIGHT_1        0
+#       define EVERGREEN_ADDR_SURF_BANK_HEIGHT_2        1
+#       define EVERGREEN_ADDR_SURF_BANK_HEIGHT_4        2
+#       define EVERGREEN_ADDR_SURF_BANK_HEIGHT_8        3
+#       define EVERGREEN_GRPH_TILE_SPLIT(x)             (((x) & 0x7) << 13)
+#       define EVERGREEN_ADDR_SURF_TILE_SPLIT_64B       0
+#       define EVERGREEN_ADDR_SURF_TILE_SPLIT_128B      1
+#       define EVERGREEN_ADDR_SURF_TILE_SPLIT_256B      2
+#       define EVERGREEN_ADDR_SURF_TILE_SPLIT_512B      3
+#       define EVERGREEN_ADDR_SURF_TILE_SPLIT_1KB       4
+#       define EVERGREEN_ADDR_SURF_TILE_SPLIT_2KB       5
+#       define EVERGREEN_ADDR_SURF_TILE_SPLIT_4KB       6
+#       define EVERGREEN_GRPH_MACRO_TILE_ASPECT(x)      (((x) & 0x3) << 18)
+#       define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_1  0
+#       define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_2  1
+#       define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_4  2
+#       define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_8  3
 #       define EVERGREEN_GRPH_ARRAY_MODE(x)             (((x) & 0x7) << 20)
 #       define EVERGREEN_GRPH_ARRAY_LINEAR_GENERAL      0
 #       define EVERGREEN_GRPH_ARRAY_LINEAR_ALIGNED      1
index b937c49054d9df9d63fea1279d6a1e123fd899e0..e00039e59a75b2bceb45ff69f63b740fd3193f40 100644 (file)
 #define DB_HTILE_DATA_BASE                             0x28014
 #define DB_Z_INFO                                      0x28040
 #       define Z_ARRAY_MODE(x)                          ((x) << 4)
+#       define DB_TILE_SPLIT(x)                         (((x) & 0x7) << 8)
+#       define DB_NUM_BANKS(x)                          (((x) & 0x3) << 12)
+#       define DB_BANK_WIDTH(x)                         (((x) & 0x3) << 16)
+#       define DB_BANK_HEIGHT(x)                        (((x) & 0x3) << 20)
 #define DB_STENCIL_INFO                                        0x28044
 #define DB_Z_READ_BASE                                 0x28048
 #define DB_STENCIL_READ_BASE                           0x2804c
 #      define CB_SF_EXPORT_FULL                        0
 #      define CB_SF_EXPORT_NORM                        1
 #define        CB_COLOR0_ATTRIB                                0x28c74
+#       define CB_TILE_SPLIT(x)                         (((x) & 0x7) << 5)
+#       define ADDR_SURF_TILE_SPLIT_64B                 0
+#       define ADDR_SURF_TILE_SPLIT_128B                1
+#       define ADDR_SURF_TILE_SPLIT_256B                2
+#       define ADDR_SURF_TILE_SPLIT_512B                3
+#       define ADDR_SURF_TILE_SPLIT_1KB                 4
+#       define ADDR_SURF_TILE_SPLIT_2KB                 5
+#       define ADDR_SURF_TILE_SPLIT_4KB                 6
+#       define CB_NUM_BANKS(x)                          (((x) & 0x3) << 10)
+#       define ADDR_SURF_2_BANK                         0
+#       define ADDR_SURF_4_BANK                         1
+#       define ADDR_SURF_8_BANK                         2
+#       define ADDR_SURF_16_BANK                        3
+#       define CB_BANK_WIDTH(x)                         (((x) & 0x3) << 13)
+#       define ADDR_SURF_BANK_WIDTH_1                   0
+#       define ADDR_SURF_BANK_WIDTH_2                   1
+#       define ADDR_SURF_BANK_WIDTH_4                   2
+#       define ADDR_SURF_BANK_WIDTH_8                   3
+#       define CB_BANK_HEIGHT(x)                        (((x) & 0x3) << 16)
+#       define ADDR_SURF_BANK_HEIGHT_1                  0
+#       define ADDR_SURF_BANK_HEIGHT_2                  1
+#       define ADDR_SURF_BANK_HEIGHT_4                  2
+#       define ADDR_SURF_BANK_HEIGHT_8                  3
 #define        CB_COLOR0_DIM                                   0x28c78
 /* only CB0-7 blocks have these regs */
 #define        CB_COLOR0_CMASK                                 0x28c7c
 #      define SQ_SEL_1                                 5
 #define SQ_TEX_RESOURCE_WORD5_0                         0x30014
 #define SQ_TEX_RESOURCE_WORD6_0                         0x30018
+#       define TEX_TILE_SPLIT(x)                        (((x) & 0x7) << 29)
 #define SQ_TEX_RESOURCE_WORD7_0                         0x3001c
+#       define TEX_BANK_WIDTH(x)                        (((x) & 0x3) << 8)
+#       define TEX_BANK_HEIGHT(x)                       (((x) & 0x3) << 10)
+#       define TEX_NUM_BANKS(x)                         (((x) & 0x3) << 16)
 
 #define SQ_VTX_CONSTANT_WORD0_0                                0x30000
 #define SQ_VTX_CONSTANT_WORD1_0                                0x30004
index ad158ea499015e1ceb8ca956c611d9f8944a133d..bfc08f6320f83b83569bec08a2968014c9e90a4f 100644 (file)
@@ -187,13 +187,18 @@ u32 r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
 {
        struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
        u32 tmp = ((u32)crtc_base) | RADEON_CRTC_OFFSET__OFFSET_LOCK;
+       int i;
 
        /* Lock the graphics update lock */
        /* update the scanout addresses */
        WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp);
 
        /* Wait for update_pending to go high. */
-       while (!(RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET));
+       for (i = 0; i < rdev->usec_timeout; i++) {
+               if (RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET)
+                       break;
+               udelay(1);
+       }
        DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
 
        /* Unlock the lock, so double-buffering can take place inside vblank */
index 3f6636bb2d7f874abf0208cff67010f82c2f4e75..3516a6081dcfcc3acb35d4d64286ea0f321da01e 100644 (file)
@@ -35,7 +35,8 @@ static int radeon_atif_call(acpi_handle handle)
 
        /* Fail only if calling the method fails and ATIF is supported */
        if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
-               printk(KERN_DEBUG "failed to evaluate ATIF got %s\n", acpi_format_exception(status));
+               DRM_DEBUG_DRIVER("failed to evaluate ATIF got %s\n",
+                                acpi_format_exception(status));
                kfree(buffer.pointer);
                return 1;
        }
@@ -50,13 +51,13 @@ int radeon_acpi_init(struct radeon_device *rdev)
        acpi_handle handle;
        int ret;
 
-       /* No need to proceed if we're sure that ATIF is not supported */
-       if (!ASIC_IS_AVIVO(rdev) || !rdev->bios)
-               return 0;
-
        /* Get the device handle */
        handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev);
 
+       /* No need to proceed if we're sure that ATIF is not supported */
+       if (!ASIC_IS_AVIVO(rdev) || !rdev->bios || !handle)
+               return 0;
+
        /* Call the ATIF method */
        ret = radeon_atif_call(handle);
        if (ret)
index 06e413e6a920207850734185f1c459989616f625..4b27efa4405b94b63011b2e8948d678c35ccfd62 100644 (file)
@@ -233,13 +233,12 @@ u16 radeon_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder)
                switch (radeon_encoder->encoder_id) {
                case ENCODER_OBJECT_ID_TRAVIS:
                case ENCODER_OBJECT_ID_NUTMEG:
-                       return true;
+                       return radeon_encoder->encoder_id;
                default:
-                       return false;
+                       return ENCODER_OBJECT_ID_NONE;
                }
        }
-
-       return false;
+       return ENCODER_OBJECT_ID_NONE;
 }
 
 void radeon_panel_mode_fixup(struct drm_encoder *encoder,
index 481b99e89f6542d661c4f0697d413a6eff40f821..b1053d64042313df931b9cde933e121bfa814517 100644 (file)
@@ -62,6 +62,7 @@ u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
 {
        struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
        u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
+       int i;
 
        /* Lock the graphics update lock */
        tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
@@ -74,7 +75,11 @@ u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
               (u32)crtc_base);
 
        /* Wait for update_pending to go high. */
-       while (!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING));
+       for (i = 0; i < rdev->usec_timeout; i++) {
+               if (RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING)
+                       break;
+               udelay(1);
+       }
        DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
 
        /* Unlock the lock, so double-buffering can take place inside vblank */
index a983f410ab89d7d549530d7b7c2f42287a6a7969..23ae1c60ab3d97b8a1576c567054b736fd70eb93 100644 (file)
@@ -47,6 +47,7 @@ u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
 {
        struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
        u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
+       int i;
 
        /* Lock the graphics update lock */
        tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
@@ -66,7 +67,11 @@ u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
               (u32)crtc_base);
 
        /* Wait for update_pending to go high. */
-       while (!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING));
+       for (i = 0; i < rdev->usec_timeout; i++) {
+               if (RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING)
+                       break;
+               udelay(1);
+       }
        DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
 
        /* Unlock the lock, so double-buffering can take place inside vblank */
index 3f6343502d1f96aea110dccdeb59a39cb9fe0065..5ff561d4e0b496727d0e1e8416da14ce871ffd90 100644 (file)
@@ -140,7 +140,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
                goto out_clips;
        }
 
-       clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL);
+       clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL);
        if (clips == NULL) {
                DRM_ERROR("Failed to allocate clip rect list.\n");
                ret = -ENOMEM;
@@ -232,7 +232,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
                goto out_clips;
        }
 
-       clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL);
+       clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL);
        if (clips == NULL) {
                DRM_ERROR("Failed to allocate clip rect list.\n");
                ret = -ENOMEM;
index 880e285d7578afa3ebeae75de4dad77443d754ce..37d40545ed77347007d1051689895d40b116cd1d 100644 (file)
@@ -1809,7 +1809,8 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
        }
 
        rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
-       rects = kzalloc(rects_size, GFP_KERNEL);
+       rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect),
+                       GFP_KERNEL);
        if (unlikely(!rects)) {
                ret = -ENOMEM;
                goto out_unlock;
@@ -1824,10 +1825,10 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
        }
 
        for (i = 0; i < arg->num_outputs; ++i) {
-               if (rects->x < 0 ||
-                   rects->y < 0 ||
-                   rects->x + rects->w > mode_config->max_width ||
-                   rects->y + rects->h > mode_config->max_height) {
+               if (rects[i].x < 0 ||
+                   rects[i].y < 0 ||
+                   rects[i].x + rects[i].w > mode_config->max_width ||
+                   rects[i].y + rects[i].h > mode_config->max_height) {
                        DRM_ERROR("Invalid GUI layout.\n");
                        ret = -EINVAL;
                        goto out_free;
index 848a56c0279c8ac61687340c732521094428e541..af353842f75feaceadeedcc547eeb880519f86df 100644 (file)
@@ -1771,8 +1771,8 @@ static const struct hid_device_id hid_ignore_list[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0001) },
        { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0002) },
-       { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0003) },
        { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0004) },
        { HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_4_PHIDGETSERVO_30) },
        { HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_1_PHIDGETSERVO_30) },
index 06ce996b8b6504f65c9216c3173a38cf5a51cb34..4a441a6f996748a923204fbcb53c4850eb3af061 100644 (file)
 #define USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR 0x0002
 
 #define USB_VENDOR_ID_GENERAL_TOUCH    0x0dfc
-#define USB_DEVICE_ID_GENERAL_TOUCH_WIN7_TWOFINGERS 0x0001
+#define USB_DEVICE_ID_GENERAL_TOUCH_WIN7_TWOFINGERS 0x0003
 
 #define USB_VENDOR_ID_GLAB             0x06c2
 #define USB_DEVICE_ID_4_PHIDGETSERVO_30        0x0038
index 318e38e853764eeeea220bfb519079cf20f687bc..5d760f3d21c2d61ee85227ab7107bf4c38e011f9 100644 (file)
@@ -160,7 +160,6 @@ MODULE_DEVICE_TABLE(spi, ad7314_id);
 static struct spi_driver ad7314_driver = {
        .driver = {
                .name = "ad7314",
-               .bus = &spi_bus_type,
                .owner = THIS_MODULE,
        },
        .probe = ad7314_probe,
index 52319340e182da8f189eae518d80bd3674d26425..04450f8bf5da28e4dff56e70ab86e294920c2cf2 100644 (file)
@@ -227,7 +227,6 @@ static int __devexit ads7871_remove(struct spi_device *spi)
 static struct spi_driver ads7871_driver = {
        .driver = {
                .name = DEVICE_NAME,
-               .bus = &spi_bus_type,
                .owner = THIS_MODULE,
        },
 
index faa0884f61f67bc49e72e93c075160d46078cba4..f2359a0093bd65e52c6985e6a06eef3a0cec8d37 100644 (file)
@@ -506,17 +506,7 @@ static struct platform_driver exynos4_tmu_driver = {
        .resume = exynos4_tmu_resume,
 };
 
-static int __init exynos4_tmu_driver_init(void)
-{
-       return platform_driver_register(&exynos4_tmu_driver);
-}
-module_init(exynos4_tmu_driver_init);
-
-static void __exit exynos4_tmu_driver_exit(void)
-{
-       platform_driver_unregister(&exynos4_tmu_driver);
-}
-module_exit(exynos4_tmu_driver_exit);
+module_platform_driver(exynos4_tmu_driver);
 
 MODULE_DESCRIPTION("EXYNOS4 TMU Driver");
 MODULE_AUTHOR("Donggeun Kim <dg77.kim@samsung.com>");
index 89aa9fb743af07260c8deadbc8956eb4c1ada2de..9ba38f318ffb458247dd66b58b77c3c03e203727 100644 (file)
@@ -539,18 +539,7 @@ static struct platform_driver gpio_fan_driver = {
        },
 };
 
-static int __init gpio_fan_init(void)
-{
-       return platform_driver_register(&gpio_fan_driver);
-}
-
-static void __exit gpio_fan_exit(void)
-{
-       platform_driver_unregister(&gpio_fan_driver);
-}
-
-module_init(gpio_fan_init);
-module_exit(gpio_fan_exit);
+module_platform_driver(gpio_fan_driver);
 
 MODULE_AUTHOR("Simon Guinot <sguinot@lacie.com>");
 MODULE_DESCRIPTION("GPIO FAN driver");
index fea292d43407cecba6e75b70021617f871d6b1b8..7a48b1eb423334780116f1cb6e16edcec8f19a49 100644 (file)
@@ -212,17 +212,7 @@ struct platform_driver jz4740_hwmon_driver = {
        },
 };
 
-static int __init jz4740_hwmon_init(void)
-{
-       return platform_driver_register(&jz4740_hwmon_driver);
-}
-module_init(jz4740_hwmon_init);
-
-static void __exit jz4740_hwmon_exit(void)
-{
-       platform_driver_unregister(&jz4740_hwmon_driver);
-}
-module_exit(jz4740_hwmon_exit);
+module_platform_driver(jz4740_hwmon_driver);
 
 MODULE_DESCRIPTION("JZ4740 SoC HWMON driver");
 MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
index eab11615dced6b54e71996bfd4df60c159bbe570..9b382ec2c3bd4de978d98a6d9f24f797684fe534 100644 (file)
@@ -432,19 +432,7 @@ static struct platform_driver ntc_thermistor_driver = {
        .id_table = ntc_thermistor_id,
 };
 
-static int __init ntc_thermistor_init(void)
-{
-       return platform_driver_register(&ntc_thermistor_driver);
-}
-
-module_init(ntc_thermistor_init);
-
-static void __exit ntc_thermistor_cleanup(void)
-{
-       platform_driver_unregister(&ntc_thermistor_driver);
-}
-
-module_exit(ntc_thermistor_cleanup);
+module_platform_driver(ntc_thermistor_driver);
 
 MODULE_DESCRIPTION("NTC Thermistor Driver");
 MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
index b39f52e2752a7bca54a1bb25c7083382b01aa52e..f6c26d19f521aaa98dd2b0596d67ab27028b8d15 100644 (file)
@@ -393,18 +393,7 @@ static struct platform_driver s3c_hwmon_driver = {
        .remove         = __devexit_p(s3c_hwmon_remove),
 };
 
-static int __init s3c_hwmon_init(void)
-{
-       return platform_driver_register(&s3c_hwmon_driver);
-}
-
-static void __exit s3c_hwmon_exit(void)
-{
-       platform_driver_unregister(&s3c_hwmon_driver);
-}
-
-module_init(s3c_hwmon_init);
-module_exit(s3c_hwmon_exit);
+module_platform_driver(s3c_hwmon_driver);
 
 MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
 MODULE_DESCRIPTION("S3C ADC HWMon driver");
index e3b5c6039c2541e67e63066e4311f2dc6c1021b6..79b6dabe3161461a3f1ccdf277588860f7863df9 100644 (file)
@@ -590,19 +590,8 @@ static struct platform_driver sch5627_driver = {
        .remove         = sch5627_remove,
 };
 
-static int __init sch5627_init(void)
-{
-       return platform_driver_register(&sch5627_driver);
-}
-
-static void __exit sch5627_exit(void)
-{
-       platform_driver_unregister(&sch5627_driver);
-}
+module_platform_driver(sch5627_driver);
 
 MODULE_DESCRIPTION("SMSC SCH5627 Hardware Monitoring Driver");
 MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
 MODULE_LICENSE("GPL");
-
-module_init(sch5627_init);
-module_exit(sch5627_exit);
index 244407aa79fcbe05dc05b10ae7d88c0327db6d29..9d5236fb09b421e6a1e78cd5b190eaa9d19f6a2d 100644 (file)
@@ -521,19 +521,8 @@ static struct platform_driver sch5636_driver = {
        .remove         = sch5636_remove,
 };
 
-static int __init sch5636_init(void)
-{
-       return platform_driver_register(&sch5636_driver);
-}
-
-static void __exit sch5636_exit(void)
-{
-       platform_driver_unregister(&sch5636_driver);
-}
+module_platform_driver(sch5636_driver);
 
 MODULE_DESCRIPTION("SMSC SCH5636 Hardware Monitoring Driver");
 MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
 MODULE_LICENSE("GPL");
-
-module_init(sch5636_init);
-module_exit(sch5636_exit);
index 57240740b161d190329b5ca3b7dfaf0d049ef02e..0018c7dd0097de5045f646d98e715713ea7edba4 100644 (file)
@@ -136,19 +136,7 @@ static struct platform_driver twl4030_madc_hwmon_driver = {
                   },
 };
 
-static int __init twl4030_madc_hwmon_init(void)
-{
-       return platform_driver_register(&twl4030_madc_hwmon_driver);
-}
-
-module_init(twl4030_madc_hwmon_init);
-
-static void __exit twl4030_madc_hwmon_exit(void)
-{
-       platform_driver_unregister(&twl4030_madc_hwmon_driver);
-}
-
-module_exit(twl4030_madc_hwmon_exit);
+module_platform_driver(twl4030_madc_hwmon_driver);
 
 MODULE_DESCRIPTION("TWL4030 ADC Hwmon driver");
 MODULE_LICENSE("GPL");
index 3cd07bf42dca4203c93712823fdf3dd2fb3335fa..b9a87e89bab4471ce29aba1509f14bd908603b3e 100644 (file)
@@ -309,15 +309,4 @@ static struct platform_driver env_driver = {
        .remove         = __devexit_p(env_remove),
 };
 
-static int __init env_init(void)
-{
-       return platform_driver_register(&env_driver);
-}
-
-static void __exit env_exit(void)
-{
-       platform_driver_unregister(&env_driver);
-}
-
-module_init(env_init);
-module_exit(env_exit);
+module_platform_driver(env_driver);
index 97b1f834a4714b540fd95352271bb253ef130c9c..9b598ed26020563978f1142840c4b6a5f0ecad77 100644 (file)
@@ -209,17 +209,7 @@ static struct platform_driver wm831x_hwmon_driver = {
        },
 };
 
-static int __init wm831x_hwmon_init(void)
-{
-       return platform_driver_register(&wm831x_hwmon_driver);
-}
-module_init(wm831x_hwmon_init);
-
-static void __exit wm831x_hwmon_exit(void)
-{
-       platform_driver_unregister(&wm831x_hwmon_driver);
-}
-module_exit(wm831x_hwmon_exit);
+module_platform_driver(wm831x_hwmon_driver);
 
 MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
 MODULE_DESCRIPTION("WM831x Hardware Monitoring");
index 13290595ca8660f25ebbd0e0c350795dc3d6ccc8..3ff67edbdc44af02a7d0553dd2d74530cc00ca6f 100644 (file)
@@ -133,17 +133,7 @@ static struct platform_driver wm8350_hwmon_driver = {
        },
 };
 
-static int __init wm8350_hwmon_init(void)
-{
-       return platform_driver_register(&wm8350_hwmon_driver);
-}
-module_init(wm8350_hwmon_init);
-
-static void __exit wm8350_hwmon_exit(void)
-{
-       platform_driver_unregister(&wm8350_hwmon_driver);
-}
-module_exit(wm8350_hwmon_exit);
+module_platform_driver(wm8350_hwmon_driver);
 
 MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
 MODULE_DESCRIPTION("WM8350 Hardware Monitoring");
index 835e47b39bc264535c567669d12a81784a8c90c7..03b61577888748a4d9a61cfe46b413eaaa219737 100644 (file)
@@ -593,7 +593,7 @@ static int __devinit nuc900_i2c_probe(struct platform_device *pdev)
        i2c->adap.algo_data = i2c;
        i2c->adap.dev.parent = &pdev->dev;
 
-       mfp_set_groupg(&pdev->dev);
+       mfp_set_groupg(&pdev->dev, NULL);
 
        clk_get_rate(i2c->clk);
 
index 691276bafd7812c168c4dadbe1aa315bf43bad80..e9cf51b1343ba520d8928367a1eca80f9f531657 100644 (file)
@@ -216,7 +216,9 @@ static int addr4_resolve(struct sockaddr_in *src_in,
 
        neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, rt->dst.dev);
        if (!neigh || !(neigh->nud_state & NUD_VALID)) {
+               rcu_read_lock();
                neigh_event_send(dst_get_neighbour(&rt->dst), NULL);
+               rcu_read_unlock();
                ret = -ENODATA;
                if (neigh)
                        goto release;
@@ -274,15 +276,16 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
                goto put;
        }
 
+       rcu_read_lock();
        neigh = dst_get_neighbour(dst);
        if (!neigh || !(neigh->nud_state & NUD_VALID)) {
                if (neigh)
                        neigh_event_send(neigh, NULL);
                ret = -ENODATA;
-               goto put;
+       } else {
+               ret = rdma_copy_addr(addr, dst->dev, neigh->ha);
        }
-
-       ret = rdma_copy_addr(addr, dst->dev, neigh->ha);
+       rcu_read_unlock();
 put:
        dst_release(dst);
        return ret;
index de6d0774e60990f644e39ca815159f68b5a065d1..c88b12beef25be72383f3423894121882d76d009 100644 (file)
@@ -1375,8 +1375,10 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
                goto reject;
        }
        dst = &rt->dst;
+       rcu_read_lock();
        neigh = dst_get_neighbour(dst);
        l2t = t3_l2t_get(tdev, neigh, neigh->dev);
+       rcu_read_unlock();
        if (!l2t) {
                printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
                       __func__);
@@ -1946,10 +1948,12 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
        }
        ep->dst = &rt->dst;
 
+       rcu_read_lock();
        neigh = dst_get_neighbour(ep->dst);
 
        /* get a l2t entry */
        ep->l2t = t3_l2t_get(ep->com.tdev, neigh, neigh->dev);
+       rcu_read_unlock();
        if (!ep->l2t) {
                printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
                err = -ENOMEM;
index b36cdac9c558a35aa78f2061e9f7435dc1504754..0747004313ad5cabdc34a6e206b0fb5d49e25a32 100644 (file)
@@ -542,8 +542,10 @@ static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
                     (mpa_rev_to_use == 2 ? MPA_ENHANCED_RDMA_CONN : 0);
        mpa->private_data_size = htons(ep->plen);
        mpa->revision = mpa_rev_to_use;
-       if (mpa_rev_to_use == 1)
+       if (mpa_rev_to_use == 1) {
                ep->tried_with_mpa_v1 = 1;
+               ep->retry_with_mpa_v1 = 0;
+       }
 
        if (mpa_rev_to_use == 2) {
                mpa->private_data_size +=
@@ -1594,6 +1596,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
                goto reject;
        }
        dst = &rt->dst;
+       rcu_read_lock();
        neigh = dst_get_neighbour(dst);
        if (neigh->dev->flags & IFF_LOOPBACK) {
                pdev = ip_dev_find(&init_net, peer_ip);
@@ -1620,6 +1623,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
                rss_qid = dev->rdev.lldi.rxq_ids[
                          cxgb4_port_idx(neigh->dev) * step];
        }
+       rcu_read_unlock();
        if (!l2t) {
                printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
                       __func__);
@@ -1820,6 +1824,7 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
        }
        ep->dst = &rt->dst;
 
+       rcu_read_lock();
        neigh = dst_get_neighbour(ep->dst);
 
        /* get a l2t entry */
@@ -1856,6 +1861,7 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
                ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[
                        cxgb4_port_idx(neigh->dev) * step];
        }
+       rcu_read_unlock();
        if (!ep->l2t) {
                printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
                err = -ENOMEM;
@@ -2301,6 +2307,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
        }
        ep->dst = &rt->dst;
 
+       rcu_read_lock();
        neigh = dst_get_neighbour(ep->dst);
 
        /* get a l2t entry */
@@ -2339,6 +2346,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
                ep->retry_with_mpa_v1 = 0;
                ep->tried_with_mpa_v1 = 0;
        }
+       rcu_read_unlock();
        if (!ep->l2t) {
                printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
                err = -ENOMEM;
index f35a935267e77e7a58c3be85437cbcb1a1f35cb7..0f1607c8325a5bc8a03e3a5d5471d39ed5741e8b 100644 (file)
@@ -311,7 +311,7 @@ void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
        while (ptr != cq->sw_pidx) {
                cqe = &cq->sw_queue[ptr];
                if (RQ_TYPE(cqe) && (CQE_OPCODE(cqe) != FW_RI_READ_RESP) &&
-                   (CQE_QPID(cqe) == wq->rq.qid) && cqe_completes_wr(cqe, wq))
+                   (CQE_QPID(cqe) == wq->sq.qid) && cqe_completes_wr(cqe, wq))
                        (*count)++;
                if (++ptr == cq->size)
                        ptr = 0;
index dfce9ea98a39b3f0f68e6d152d438554ebe8ee84..0a52d72371ee5f6cf2f938ba81fa0c887fd00070 100644 (file)
@@ -1377,9 +1377,11 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
                neigh_release(neigh);
        }
 
-       if ((neigh == NULL) || (!(neigh->nud_state & NUD_VALID)))
+       if ((neigh == NULL) || (!(neigh->nud_state & NUD_VALID))) {
+               rcu_read_lock();
                neigh_event_send(dst_get_neighbour(&rt->dst), NULL);
-
+               rcu_read_unlock();
+       }
        ip_rt_put(rt);
        return rc;
 }
index 5bd2162b95dcb8c93051ce7e496b628222764184..1d5895941e193e35e5ff88804fb00a9c29c8a5fa 100644 (file)
@@ -2307,19 +2307,11 @@ static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
                SYM_LSB(IBCCtrlA_0, MaxPktLen);
        ppd->cpspec->ibcctrl_a = ibc; /* without linkcmd or linkinitcmd! */
 
-       /* initially come up waiting for TS1, without sending anything. */
-       val = ppd->cpspec->ibcctrl_a | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
-               QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
-
-       ppd->cpspec->ibcctrl_a = val;
        /*
         * Reset the PCS interface to the serdes (and also ibc, which is still
         * in reset from above).  Writes new value of ibcctrl_a as last step.
         */
        qib_7322_mini_pcs_reset(ppd);
-       qib_write_kreg(dd, kr_scratch, 0ULL);
-       /* clear the linkinit cmds */
-       ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, LinkInitCmd);
 
        if (!ppd->cpspec->ibcctrl_b) {
                unsigned lse = ppd->link_speed_enabled;
@@ -2385,6 +2377,14 @@ static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
        ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0, IBLinkEn);
        set_vls(ppd);
 
+       /* initially come up DISABLED, without sending anything. */
+       val = ppd->cpspec->ibcctrl_a | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
+                                       QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
+       qib_write_kreg_port(ppd, krp_ibcctrl_a, val);
+       qib_write_kreg(dd, kr_scratch, 0ULL);
+       /* clear the linkinit cmds */
+       ppd->cpspec->ibcctrl_a = val & ~SYM_MASK(IBCCtrlA_0, LinkInitCmd);
+
        /* be paranoid against later code motion, etc. */
        spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
        ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvIBPortEnable);
@@ -5241,7 +5241,7 @@ static int qib_7322_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
                           off */
                        if (ppd->dd->flags & QIB_HAS_QSFP) {
                                qd->t_insert = get_jiffies_64();
-                               schedule_work(&qd->work);
+                               queue_work(ib_wq, &qd->work);
                        }
                        spin_lock_irqsave(&ppd->sdma_lock, flags);
                        if (__qib_sdma_running(ppd))
index e06c4ed383f14598674ce8a0847c9780a6231c46..fa71b1e666c5414fbba2357fe531e75cabc7986c 100644 (file)
@@ -480,18 +480,6 @@ void qib_qsfp_init(struct qib_qsfp_data *qd,
        udelay(20); /* Generous RST dwell */
 
        dd->f_gpio_mod(dd, mask, mask, mask);
-       /* Spec says module can take up to two seconds! */
-       mask = QSFP_GPIO_MOD_PRS_N;
-       if (qd->ppd->hw_pidx)
-               mask <<= QSFP_GPIO_PORT2_SHIFT;
-
-       /* Do not try to wait here. Better to let event handle it */
-       if (!qib_qsfp_mod_present(qd->ppd))
-               goto bail;
-       /* We see a module, but it may be unwise to look yet. Just schedule */
-       qd->t_insert = get_jiffies_64();
-       queue_work(ib_wq, &qd->work);
-bail:
        return;
 }
 
index 0ef9af94997dcd5737922aa37a1483491b0c63f1..4115be54ba3b32626dc75c4529a3aa186b6f02c1 100644 (file)
@@ -57,21 +57,24 @@ struct ipoib_ah *ipoib_create_ah(struct net_device *dev,
                                 struct ib_pd *pd, struct ib_ah_attr *attr)
 {
        struct ipoib_ah *ah;
+       struct ib_ah *vah;
 
        ah = kmalloc(sizeof *ah, GFP_KERNEL);
        if (!ah)
-               return NULL;
+               return ERR_PTR(-ENOMEM);
 
        ah->dev       = dev;
        ah->last_send = 0;
        kref_init(&ah->ref);
 
-       ah->ah = ib_create_ah(pd, attr);
-       if (IS_ERR(ah->ah)) {
+       vah = ib_create_ah(pd, attr);
+       if (IS_ERR(vah)) {
                kfree(ah);
-               ah = NULL;
-       } else
+               ah = (struct ipoib_ah *)vah;
+       } else {
+               ah->ah = vah;
                ipoib_dbg(netdev_priv(dev), "Created ah %p\n", ah->ah);
+       }
 
        return ah;
 }
index 7567b60002309a19a2d0a4cc13da855a46042be4..83695b48b010a643a9b16d9240fa9a8538fa5d06 100644 (file)
@@ -432,7 +432,7 @@ static void path_rec_completion(int status,
 
        spin_lock_irqsave(&priv->lock, flags);
 
-       if (ah) {
+       if (!IS_ERR_OR_NULL(ah)) {
                path->pathrec = *pathrec;
 
                old_ah   = path->ah;
@@ -555,6 +555,7 @@ static int path_rec_start(struct net_device *dev,
        return 0;
 }
 
+/* called with rcu_read_lock */
 static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
 {
        struct ipoib_dev_priv *priv = netdev_priv(dev);
@@ -636,6 +637,7 @@ err_drop:
        spin_unlock_irqrestore(&priv->lock, flags);
 }
 
+/* called with rcu_read_lock */
 static void ipoib_path_lookup(struct sk_buff *skb, struct net_device *dev)
 {
        struct ipoib_dev_priv *priv = netdev_priv(skb->dev);
@@ -720,13 +722,14 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
        struct neighbour *n = NULL;
        unsigned long flags;
 
+       rcu_read_lock();
        if (likely(skb_dst(skb)))
                n = dst_get_neighbour(skb_dst(skb));
 
        if (likely(n)) {
                if (unlikely(!*to_ipoib_neigh(n))) {
                        ipoib_path_lookup(skb, dev);
-                       return NETDEV_TX_OK;
+                       goto unlock;
                }
 
                neigh = *to_ipoib_neigh(n);
@@ -749,17 +752,17 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
                        ipoib_neigh_free(dev, neigh);
                        spin_unlock_irqrestore(&priv->lock, flags);
                        ipoib_path_lookup(skb, dev);
-                       return NETDEV_TX_OK;
+                       goto unlock;
                }
 
                if (ipoib_cm_get(neigh)) {
                        if (ipoib_cm_up(neigh)) {
                                ipoib_cm_send(dev, skb, ipoib_cm_get(neigh));
-                               return NETDEV_TX_OK;
+                               goto unlock;
                        }
                } else if (neigh->ah) {
                        ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(n->ha));
-                       return NETDEV_TX_OK;
+                       goto unlock;
                }
 
                if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
@@ -793,13 +796,14 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
                                           phdr->hwaddr + 4);
                                dev_kfree_skb_any(skb);
                                ++dev->stats.tx_dropped;
-                               return NETDEV_TX_OK;
+                               goto unlock;
                        }
 
                        unicast_arp_send(skb, dev, phdr);
                }
        }
-
+unlock:
+       rcu_read_unlock();
        return NETDEV_TX_OK;
 }
 
@@ -837,7 +841,7 @@ static int ipoib_hard_header(struct sk_buff *skb,
        dst = skb_dst(skb);
        n = NULL;
        if (dst)
-               n = dst_get_neighbour(dst);
+               n = dst_get_neighbour_raw(dst);
        if ((!dst || !n) && daddr) {
                struct ipoib_pseudoheader *phdr =
                        (struct ipoib_pseudoheader *) skb_push(skb, sizeof *phdr);
index 1b7a9768635673f1a78f74e54e9e33fd37a80eb7..873bff97e69e266f6c39624f5fe3319eb76f7aae 100644 (file)
@@ -240,8 +240,11 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
                av.grh.dgid = mcast->mcmember.mgid;
 
                ah = ipoib_create_ah(dev, priv->pd, &av);
-               if (!ah) {
-                       ipoib_warn(priv, "ib_address_create failed\n");
+               if (IS_ERR(ah)) {
+                       ipoib_warn(priv, "ib_address_create failed %ld\n",
+                               -PTR_ERR(ah));
+                       /* use original error */
+                       return PTR_ERR(ah);
                } else {
                        spin_lock_irq(&priv->lock);
                        mcast->ah = ah;
@@ -266,7 +269,7 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
 
                skb->dev = dev;
                if (dst)
-                       n = dst_get_neighbour(dst);
+                       n = dst_get_neighbour_raw(dst);
                if (!dst || !n) {
                        /* put pseudoheader back on for next time */
                        skb_push(skb, sizeof (struct ipoib_pseudoheader));
@@ -722,6 +725,8 @@ out:
        if (mcast && mcast->ah) {
                struct dst_entry *dst = skb_dst(skb);
                struct neighbour *n = NULL;
+
+               rcu_read_lock();
                if (dst)
                        n = dst_get_neighbour(dst);
                if (n && !*to_ipoib_neigh(n)) {
@@ -734,7 +739,7 @@ out:
                                list_add_tail(&neigh->list, &mcast->neigh_list);
                        }
                }
-
+               rcu_read_unlock();
                spin_unlock_irqrestore(&priv->lock, flags);
                ipoib_send(dev, skb, mcast->ah, IB_MULTICAST_QPN);
                return;
index c0c7820d4c46b406465e0d2d8e059a80ce819476..a004c3945c67c4ca891779e5b776ebfcb9c25b7f 100644 (file)
@@ -3524,7 +3524,7 @@ found:
        return 0;
 }
 
-int dmar_parse_rmrr_atsr_dev(void)
+int __init dmar_parse_rmrr_atsr_dev(void)
 {
        struct dmar_rmrr_unit *rmrr, *rmrr_n;
        struct dmar_atsr_unit *atsr, *atsr_n;
index 07c9f189f3143250e5ea2d0ea20fcdaa2b1674df..6777ca049471728d445ec323e3f051bdc19126f9 100644 (file)
@@ -773,7 +773,7 @@ int __init parse_ioapics_under_ir(void)
        return ir_supported;
 }
 
-int ir_dev_scope_init(void)
+int __init ir_dev_scope_init(void)
 {
        if (!intr_remapping_enabled)
                return 0;
index 33ec9e4677727800d5439e16300be820c92090b8..9021182c4b766e02454365f1e2b6d22822358f8a 100644 (file)
@@ -242,6 +242,12 @@ static int isdn_divert_ioctl_unlocked(struct file *file, uint cmd, ulong arg)
                case IIOCDOCFINT:
                        if (!divert_if.drv_to_name(dioctl.cf_ctrl.drvid))
                                return (-EINVAL);       /* invalid driver */
+                       if (strnlen(dioctl.cf_ctrl.msn, sizeof(dioctl.cf_ctrl.msn)) ==
+                                       sizeof(dioctl.cf_ctrl.msn))
+                               return -EINVAL;
+                       if (strnlen(dioctl.cf_ctrl.fwd_nr, sizeof(dioctl.cf_ctrl.fwd_nr)) ==
+                                       sizeof(dioctl.cf_ctrl.fwd_nr))
+                               return -EINVAL;
                        if ((i = cf_command(dioctl.cf_ctrl.drvid,
                                            (cmd == IIOCDOCFACT) ? 1 : (cmd == IIOCDOCFDIS) ? 0 : 2,
                                            dioctl.cf_ctrl.cfproc,
index 1f73d7f7e0242e4e73d55582935f4ff0a11f6646..2339d7396b9ea305dd845e592d227bd726d3d43b 100644 (file)
@@ -2756,6 +2756,9 @@ isdn_net_setcfg(isdn_net_ioctl_cfg * cfg)
                        char *c,
                        *e;
 
+                       if (strnlen(cfg->drvid, sizeof(cfg->drvid)) ==
+                                       sizeof(cfg->drvid))
+                               return -EINVAL;
                        drvidx = -1;
                        chidx = -1;
                        strcpy(drvid, cfg->drvid);
index 7878712721bf431a1315f44db1c3b2dcc9486245..b6907118283a627a656fbe408e368da9853ff634 100644 (file)
@@ -1106,10 +1106,12 @@ void bitmap_write_all(struct bitmap *bitmap)
         */
        int i;
 
+       spin_lock_irq(&bitmap->lock);
        for (i = 0; i < bitmap->file_pages; i++)
                set_page_attr(bitmap, bitmap->filemap[i],
                              BITMAP_PAGE_NEEDWRITE);
        bitmap->allclean = 0;
+       spin_unlock_irq(&bitmap->lock);
 }
 
 static void bitmap_count_page(struct bitmap *bitmap, sector_t offset, int inc)
@@ -1605,7 +1607,9 @@ void bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e)
        for (chunk = s; chunk <= e; chunk++) {
                sector_t sec = (sector_t)chunk << CHUNK_BLOCK_SHIFT(bitmap);
                bitmap_set_memory_bits(bitmap, sec, 1);
+               spin_lock_irq(&bitmap->lock);
                bitmap_file_set_bit(bitmap, sec);
+               spin_unlock_irq(&bitmap->lock);
                if (sec < bitmap->mddev->recovery_cp)
                        /* We are asserting that the array is dirty,
                         * so move the recovery_cp address back so
index 84acfe7d10e48e33ea581924d4648948d005d90c..ee981737edfcf3ff674749116d44d89bd45be751 100644 (file)
@@ -570,7 +570,7 @@ static void mddev_put(struct mddev *mddev)
            mddev->ctime == 0 && !mddev->hold_active) {
                /* Array is not configured at all, and not held active,
                 * so destroy it */
-               list_del(&mddev->all_mddevs);
+               list_del_init(&mddev->all_mddevs);
                bs = mddev->bio_set;
                mddev->bio_set = NULL;
                if (mddev->gendisk) {
@@ -2546,7 +2546,8 @@ state_show(struct md_rdev *rdev, char *page)
                sep = ",";
        }
        if (test_bit(Blocked, &rdev->flags) ||
-           rdev->badblocks.unacked_exist) {
+           (rdev->badblocks.unacked_exist
+            && !test_bit(Faulty, &rdev->flags))) {
                len += sprintf(page+len, "%sblocked", sep);
                sep = ",";
        }
@@ -3788,6 +3789,8 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
        if (err)
                return err;
        else {
+               if (mddev->hold_active == UNTIL_IOCTL)
+                       mddev->hold_active = 0;
                sysfs_notify_dirent_safe(mddev->sysfs_state);
                return len;
        }
@@ -4487,11 +4490,20 @@ md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
 
        if (!entry->show)
                return -EIO;
+       spin_lock(&all_mddevs_lock);
+       if (list_empty(&mddev->all_mddevs)) {
+               spin_unlock(&all_mddevs_lock);
+               return -EBUSY;
+       }
+       mddev_get(mddev);
+       spin_unlock(&all_mddevs_lock);
+
        rv = mddev_lock(mddev);
        if (!rv) {
                rv = entry->show(mddev, page);
                mddev_unlock(mddev);
        }
+       mddev_put(mddev);
        return rv;
 }
 
@@ -4507,13 +4519,19 @@ md_attr_store(struct kobject *kobj, struct attribute *attr,
                return -EIO;
        if (!capable(CAP_SYS_ADMIN))
                return -EACCES;
+       spin_lock(&all_mddevs_lock);
+       if (list_empty(&mddev->all_mddevs)) {
+               spin_unlock(&all_mddevs_lock);
+               return -EBUSY;
+       }
+       mddev_get(mddev);
+       spin_unlock(&all_mddevs_lock);
        rv = mddev_lock(mddev);
-       if (mddev->hold_active == UNTIL_IOCTL)
-               mddev->hold_active = 0;
        if (!rv) {
                rv = entry->store(mddev, page, length);
                mddev_unlock(mddev);
        }
+       mddev_put(mddev);
        return rv;
 }
 
@@ -7840,6 +7858,7 @@ int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
                                  s + rdev->data_offset, sectors, acknowledged);
        if (rv) {
                /* Make sure they get written out promptly */
+               sysfs_notify_dirent_safe(rdev->sysfs_state);
                set_bit(MD_CHANGE_CLEAN, &rdev->mddev->flags);
                md_wakeup_thread(rdev->mddev->thread);
        }
index 297e260921787f490b63ddf88a9ea5adbdfd82c4..31670f8d6b65789c071c1fd8d212c168138e14d8 100644 (file)
@@ -3036,6 +3036,8 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
                if (dev->written)
                        s->written++;
                rdev = rcu_dereference(conf->disks[i].rdev);
+               if (rdev && test_bit(Faulty, &rdev->flags))
+                       rdev = NULL;
                if (rdev) {
                        is_bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS,
                                             &first_bad, &bad_sectors);
@@ -3063,12 +3065,12 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
                        }
                } else if (test_bit(In_sync, &rdev->flags))
                        set_bit(R5_Insync, &dev->flags);
-               else if (!test_bit(Faulty, &rdev->flags)) {
+               else {
                        /* in sync if before recovery_offset */
                        if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset)
                                set_bit(R5_Insync, &dev->flags);
                }
-               if (test_bit(R5_WriteError, &dev->flags)) {
+               if (rdev && test_bit(R5_WriteError, &dev->flags)) {
                        clear_bit(R5_Insync, &dev->flags);
                        if (!test_bit(Faulty, &rdev->flags)) {
                                s->handle_bad_blocks = 1;
@@ -3076,7 +3078,7 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
                        } else
                                clear_bit(R5_WriteError, &dev->flags);
                }
-               if (test_bit(R5_MadeGood, &dev->flags)) {
+               if (rdev && test_bit(R5_MadeGood, &dev->flags)) {
                        if (!test_bit(Faulty, &rdev->flags)) {
                                s->handle_bad_blocks = 1;
                                atomic_inc(&rdev->nr_pending);
index a73d9dc80ff674d7b90de88226511470b9c8c873..84fb6349a59ab7d78c2ba37fe4d06538e5d52bd8 100644 (file)
@@ -4,7 +4,7 @@
 
 menuconfig ARCNET
        depends on NETDEVICES && (ISA || PCI || PCMCIA)
-       bool "ARCnet support"
+       tristate "ARCnet support"
        ---help---
          If you have a network card of this type, say Y and check out the
          (arguably) beautiful poetry in
index b0c577256487b4b7c35d690da3a2ca619d2deb4c..7f8756825b8abf62f924d569b3e923c7f955c413 100644 (file)
@@ -2553,30 +2553,6 @@ re_arm:
        }
 }
 
-static __be32 bond_glean_dev_ip(struct net_device *dev)
-{
-       struct in_device *idev;
-       struct in_ifaddr *ifa;
-       __be32 addr = 0;
-
-       if (!dev)
-               return 0;
-
-       rcu_read_lock();
-       idev = __in_dev_get_rcu(dev);
-       if (!idev)
-               goto out;
-
-       ifa = idev->ifa_list;
-       if (!ifa)
-               goto out;
-
-       addr = ifa->ifa_local;
-out:
-       rcu_read_unlock();
-       return addr;
-}
-
 static int bond_has_this_ip(struct bonding *bond, __be32 ip)
 {
        struct vlan_entry *vlan;
@@ -3322,6 +3298,10 @@ static int bond_inetaddr_event(struct notifier_block *this, unsigned long event,
        struct bonding *bond;
        struct vlan_entry *vlan;
 
+       /* we only care about primary address */
+       if(ifa->ifa_flags & IFA_F_SECONDARY)
+               return NOTIFY_DONE;
+
        list_for_each_entry(bond, &bn->dev_list, bond_list) {
                if (bond->dev == event_dev) {
                        switch (event) {
@@ -3329,7 +3309,7 @@ static int bond_inetaddr_event(struct notifier_block *this, unsigned long event,
                                bond->master_ip = ifa->ifa_local;
                                return NOTIFY_OK;
                        case NETDEV_DOWN:
-                               bond->master_ip = bond_glean_dev_ip(bond->dev);
+                               bond->master_ip = 0;
                                return NOTIFY_OK;
                        default:
                                return NOTIFY_DONE;
@@ -3345,8 +3325,7 @@ static int bond_inetaddr_event(struct notifier_block *this, unsigned long event,
                                        vlan->vlan_ip = ifa->ifa_local;
                                        return NOTIFY_OK;
                                case NETDEV_DOWN:
-                                       vlan->vlan_ip =
-                                               bond_glean_dev_ip(vlan_dev);
+                                       vlan->vlan_ip = 0;
                                        return NOTIFY_OK;
                                default:
                                        return NOTIFY_DONE;
index 905bce0b3a4328bdc59243481d2624c63f55b0e8..2c7f5036f570fd70d1078c1fabef3b7336d2d89e 100644 (file)
@@ -20,7 +20,6 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/version.h>
 #include <linux/module.h>
 #include <linux/interrupt.h>
 #include <linux/netdevice.h>
index 4cf835dbc1222f6c90154b601bc02b76d6efbbb6..3fb66d09ece59e463c81eb20b77ba7cfa7df9fd0 100644 (file)
@@ -608,7 +608,7 @@ static void b44_tx(struct b44 *bp)
                                 skb->len,
                                 DMA_TO_DEVICE);
                rp->skb = NULL;
-               dev_kfree_skb(skb);
+               dev_kfree_skb_irq(skb);
        }
 
        bp->tx_cons = cons;
index bce203fa4b9e274c24e06e52b133a0936328c4e5..882f48f0a03cdb47568bff2e67b95cc1cdab5cb3 100644 (file)
@@ -10327,6 +10327,43 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
        return 0;
 }
 
+
+static void bnx2x_5461x_set_link_led(struct bnx2x_phy *phy,
+                                      struct link_params *params, u8 mode)
+{
+       struct bnx2x *bp = params->bp;
+       u16 temp;
+
+       bnx2x_cl22_write(bp, phy,
+               MDIO_REG_GPHY_SHADOW,
+               MDIO_REG_GPHY_SHADOW_LED_SEL1);
+       bnx2x_cl22_read(bp, phy,
+               MDIO_REG_GPHY_SHADOW,
+               &temp);
+       temp &= 0xff00;
+
+       DP(NETIF_MSG_LINK, "54618x set link led (mode=%x)\n", mode);
+       switch (mode) {
+       case LED_MODE_FRONT_PANEL_OFF:
+       case LED_MODE_OFF:
+               temp |= 0x00ee;
+               break;
+       case LED_MODE_OPER:
+               temp |= 0x0001;
+               break;
+       case LED_MODE_ON:
+               temp |= 0x00ff;
+               break;
+       default:
+               break;
+       }
+       bnx2x_cl22_write(bp, phy,
+               MDIO_REG_GPHY_SHADOW,
+               MDIO_REG_GPHY_SHADOW_WR_ENA | temp);
+       return;
+}
+
+
 static void bnx2x_54618se_link_reset(struct bnx2x_phy *phy,
                                     struct link_params *params)
 {
@@ -11103,7 +11140,7 @@ static struct bnx2x_phy phy_54618se = {
        .config_loopback = (config_loopback_t)bnx2x_54618se_config_loopback,
        .format_fw_ver  = (format_fw_ver_t)NULL,
        .hw_reset       = (hw_reset_t)NULL,
-       .set_link_led   = (set_link_led_t)NULL,
+       .set_link_led   = (set_link_led_t)bnx2x_5461x_set_link_led,
        .phy_specific_func = (phy_specific_func_t)NULL
 };
 /*****************************************************************/
index fc7bd0f23c0b7e656905eb590ea6d8c0d360ea44..e58073ef33b47e5dc45708d794726a7a038470a2 100644 (file)
@@ -6990,6 +6990,7 @@ Theotherbitsarereservedandshouldbezero*/
 #define MDIO_REG_INTR_MASK                             0x1b
 #define MDIO_REG_INTR_MASK_LINK_STATUS                 (0x1 << 1)
 #define MDIO_REG_GPHY_SHADOW                           0x1c
+#define MDIO_REG_GPHY_SHADOW_LED_SEL1                  (0x0d << 10)
 #define MDIO_REG_GPHY_SHADOW_LED_SEL2                  (0x0e << 10)
 #define MDIO_REG_GPHY_SHADOW_WR_ENA                    (0x1 << 15)
 #define MDIO_REG_GPHY_SHADOW_AUTO_DET_MED              (0x1e << 10)
index 438f4580bf66207539761761e37c42e0ef303b78..2a22f52563532201891fa4a720d11180886b714c 100644 (file)
@@ -613,7 +613,7 @@ static int dm9000_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
 
                if (!dm->wake_state)
                        irq_set_irq_wake(dm->irq_wake, 1);
-               else if (dm->wake_state & !opts)
+               else if (dm->wake_state && !opts)
                        irq_set_irq_wake(dm->irq_wake, 0);
        }
 
index c520cfd3b29805440508acff0a8748b5956fee11..5272f9d4dda9448faece5a061d413c71620f6da4 100644 (file)
@@ -24,6 +24,7 @@ config FEC
        bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
        depends on (M523x || M527x || M5272 || M528x || M520x || M532x || \
                   ARCH_MXC || ARCH_MXS)
+       default ARCH_MXC || ARCH_MXS if ARM
        select PHYLIB
        ---help---
          Say Y here if you want to use the built-in 10/100 Fast ethernet
index 410d6a1984ed400863c7e5eabd63e7041629d101..6650068c996c48158c3b09c8975b912440e00aff 100644 (file)
@@ -61,9 +61,9 @@
 #ifdef EHEA_SMALL_QUEUES
 #define EHEA_MAX_CQE_COUNT      1023
 #define EHEA_DEF_ENTRIES_SQ     1023
-#define EHEA_DEF_ENTRIES_RQ1    4095
+#define EHEA_DEF_ENTRIES_RQ1    1023
 #define EHEA_DEF_ENTRIES_RQ2    1023
-#define EHEA_DEF_ENTRIES_RQ3    1023
+#define EHEA_DEF_ENTRIES_RQ3    511
 #else
 #define EHEA_MAX_CQE_COUNT      4080
 #define EHEA_DEF_ENTRIES_SQ     4080
index 37b70f7052b68ad9a16593cb299cf6eb380a6491..bfeccbfde236237dc0c8486d0dd1d12d192d3a02 100644 (file)
@@ -371,7 +371,8 @@ static void ehea_update_stats(struct work_struct *work)
 out_herr:
        free_page((unsigned long)cb2);
 resched:
-       schedule_delayed_work(&port->stats_work, msecs_to_jiffies(1000));
+       schedule_delayed_work(&port->stats_work,
+                             round_jiffies_relative(msecs_to_jiffies(1000)));
 }
 
 static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
@@ -2434,7 +2435,8 @@ static int ehea_open(struct net_device *dev)
        }
 
        mutex_unlock(&port->port_lock);
-       schedule_delayed_work(&port->stats_work, msecs_to_jiffies(1000));
+       schedule_delayed_work(&port->stats_work,
+                             round_jiffies_relative(msecs_to_jiffies(1000)));
 
        return ret;
 }
index 4326681df382ac461c24e21eb6a6ea0ee64d4426..acc31af6594a243b91d7c9f666363581f68ddf05 100644 (file)
@@ -1421,7 +1421,7 @@ static void veth_receive(struct veth_lpar_connection *cnx,
 
                /* FIXME: do we need this? */
                memset(local_list, 0, sizeof(local_list));
-               memset(remote_list, 0, sizeof(VETH_MAX_FRAMES_PER_MSG));
+               memset(remote_list, 0, sizeof(remote_list));
 
                /* a 0 address marks the end of the valid entries */
                if (senddata->addr[startchunk] == 0)
index 7becff1f387d7c9d84fb26ab3db6f7299bb64f15..76b84573566bd850a267c02a9d533fac5b68c7c4 100644 (file)
@@ -1744,6 +1744,112 @@ jme_phy_off(struct jme_adapter *jme)
                jme_new_phy_off(jme);
 }
 
+static int
+jme_phy_specreg_read(struct jme_adapter *jme, u32 specreg)
+{
+       u32 phy_addr;
+
+       phy_addr = JM_PHY_SPEC_REG_READ | specreg;
+       jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_ADDR_REG,
+                       phy_addr);
+       return jme_mdio_read(jme->dev, jme->mii_if.phy_id,
+                       JM_PHY_SPEC_DATA_REG);
+}
+
+static void
+jme_phy_specreg_write(struct jme_adapter *jme, u32 ext_reg, u32 phy_data)
+{
+       u32 phy_addr;
+
+       phy_addr = JM_PHY_SPEC_REG_WRITE | ext_reg;
+       jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_DATA_REG,
+                       phy_data);
+       jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_ADDR_REG,
+                       phy_addr);
+}
+
+static int
+jme_phy_calibration(struct jme_adapter *jme)
+{
+       u32 ctrl1000, phy_data;
+
+       jme_phy_off(jme);
+       jme_phy_on(jme);
+       /*  Enabel PHY test mode 1 */
+       ctrl1000 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_CTRL1000);
+       ctrl1000 &= ~PHY_GAD_TEST_MODE_MSK;
+       ctrl1000 |= PHY_GAD_TEST_MODE_1;
+       jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_CTRL1000, ctrl1000);
+
+       phy_data = jme_phy_specreg_read(jme, JM_PHY_EXT_COMM_2_REG);
+       phy_data &= ~JM_PHY_EXT_COMM_2_CALI_MODE_0;
+       phy_data |= JM_PHY_EXT_COMM_2_CALI_LATCH |
+                       JM_PHY_EXT_COMM_2_CALI_ENABLE;
+       jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_2_REG, phy_data);
+       msleep(20);
+       phy_data = jme_phy_specreg_read(jme, JM_PHY_EXT_COMM_2_REG);
+       phy_data &= ~(JM_PHY_EXT_COMM_2_CALI_ENABLE |
+                       JM_PHY_EXT_COMM_2_CALI_MODE_0 |
+                       JM_PHY_EXT_COMM_2_CALI_LATCH);
+       jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_2_REG, phy_data);
+
+       /*  Disable PHY test mode */
+       ctrl1000 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_CTRL1000);
+       ctrl1000 &= ~PHY_GAD_TEST_MODE_MSK;
+       jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_CTRL1000, ctrl1000);
+       return 0;
+}
+
+static int
+jme_phy_setEA(struct jme_adapter *jme)
+{
+       u32 phy_comm0 = 0, phy_comm1 = 0;
+       u8 nic_ctrl;
+
+       pci_read_config_byte(jme->pdev, PCI_PRIV_SHARE_NICCTRL, &nic_ctrl);
+       if ((nic_ctrl & 0x3) == JME_FLAG_PHYEA_ENABLE)
+               return 0;
+
+       switch (jme->pdev->device) {
+       case PCI_DEVICE_ID_JMICRON_JMC250:
+               if (((jme->chip_main_rev == 5) &&
+                       ((jme->chip_sub_rev == 0) || (jme->chip_sub_rev == 1) ||
+                       (jme->chip_sub_rev == 3))) ||
+                       (jme->chip_main_rev >= 6)) {
+                       phy_comm0 = 0x008A;
+                       phy_comm1 = 0x4109;
+               }
+               if ((jme->chip_main_rev == 3) &&
+                       ((jme->chip_sub_rev == 1) || (jme->chip_sub_rev == 2)))
+                       phy_comm0 = 0xE088;
+               break;
+       case PCI_DEVICE_ID_JMICRON_JMC260:
+               if (((jme->chip_main_rev == 5) &&
+                       ((jme->chip_sub_rev == 0) || (jme->chip_sub_rev == 1) ||
+                       (jme->chip_sub_rev == 3))) ||
+                       (jme->chip_main_rev >= 6)) {
+                       phy_comm0 = 0x008A;
+                       phy_comm1 = 0x4109;
+               }
+               if ((jme->chip_main_rev == 3) &&
+                       ((jme->chip_sub_rev == 1) || (jme->chip_sub_rev == 2)))
+                       phy_comm0 = 0xE088;
+               if ((jme->chip_main_rev == 2) && (jme->chip_sub_rev == 0))
+                       phy_comm0 = 0x608A;
+               if ((jme->chip_main_rev == 2) && (jme->chip_sub_rev == 2))
+                       phy_comm0 = 0x408A;
+               break;
+       default:
+               return -ENODEV;
+       }
+       if (phy_comm0)
+               jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_0_REG, phy_comm0);
+       if (phy_comm1)
+               jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_1_REG, phy_comm1);
+
+       return 0;
+}
+
 static int
 jme_open(struct net_device *netdev)
 {
@@ -1769,7 +1875,8 @@ jme_open(struct net_device *netdev)
                jme_set_settings(netdev, &jme->old_ecmd);
        else
                jme_reset_phy_processor(jme);
-
+       jme_phy_calibration(jme);
+       jme_phy_setEA(jme);
        jme_reset_link(jme);
 
        return 0;
@@ -3184,7 +3291,8 @@ jme_resume(struct device *dev)
                jme_set_settings(netdev, &jme->old_ecmd);
        else
                jme_reset_phy_processor(jme);
-
+       jme_phy_calibration(jme);
+       jme_phy_setEA(jme);
        jme_start_irq(jme);
        netif_device_attach(netdev);
 
@@ -3239,4 +3347,3 @@ MODULE_DESCRIPTION("JMicron JMC2x0 PCI Express Ethernet driver");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(DRV_VERSION);
 MODULE_DEVICE_TABLE(pci, jme_pci_tbl);
-
index 02ea27c1dcb5a464f06d9b38d98259f6f080fd4c..4304072bd3c536e852a38cf6c37716df8500ed66 100644 (file)
@@ -760,6 +760,25 @@ enum jme_rxmcs_bits {
                                  RXMCS_CHECKSUM,
 };
 
+/*     Extern PHY common register 2    */
+
+#define PHY_GAD_TEST_MODE_1                    0x00002000
+#define PHY_GAD_TEST_MODE_MSK                  0x0000E000
+#define JM_PHY_SPEC_REG_READ                   0x00004000
+#define JM_PHY_SPEC_REG_WRITE                  0x00008000
+#define PHY_CALIBRATION_DELAY                  20
+#define JM_PHY_SPEC_ADDR_REG                   0x1E
+#define JM_PHY_SPEC_DATA_REG                   0x1F
+
+#define JM_PHY_EXT_COMM_0_REG                  0x30
+#define JM_PHY_EXT_COMM_1_REG                  0x31
+#define JM_PHY_EXT_COMM_2_REG                  0x32
+#define JM_PHY_EXT_COMM_2_CALI_ENABLE          0x01
+#define JM_PHY_EXT_COMM_2_CALI_MODE_0          0x02
+#define JM_PHY_EXT_COMM_2_CALI_LATCH           0x10
+#define PCI_PRIV_SHARE_NICCTRL                 0xF5
+#define JME_FLAG_PHYEA_ENABLE                  0x2
+
 /*
  * Wakeup Frame setup interface registers
  */
index 05db5434bafc3284eb0a3b6162a64c8ed2c436d6..90497ffb1ac39d5635d258730f8cc229275aa240 100644 (file)
@@ -2,4 +2,5 @@
 # Makefile for the A Semi network device drivers.
 #
 
-obj-$(CONFIG_PASEMI_MAC) += pasemi_mac.o pasemi_mac_ethtool.o
+obj-$(CONFIG_PASEMI_MAC) += pasemi_mac_driver.o
+pasemi_mac_driver-objs := pasemi_mac.o pasemi_mac_ethtool.o
index 8731f79c9efc40439bac7af0363225d885f42036..b8478aab050e76efa2b734572b3f866f98da321f 100644 (file)
 
 
 #define TX_DESC_PER_IOCB 8
-/* The maximum number of frags we handle is based
- * on PAGE_SIZE...
- */
-#if (PAGE_SHIFT == 12) || (PAGE_SHIFT == 13)   /* 4k & 8k pages */
+
+#if ((MAX_SKB_FRAGS - TX_DESC_PER_IOCB) + 2) > 0
 #define TX_DESC_PER_OAL ((MAX_SKB_FRAGS - TX_DESC_PER_IOCB) + 2)
 #else /* all other page sizes */
 #define TX_DESC_PER_OAL 0
@@ -1353,7 +1351,7 @@ struct tx_ring_desc {
        struct ob_mac_iocb_req *queue_entry;
        u32 index;
        struct oal oal;
-       struct map_list map[MAX_SKB_FRAGS + 1];
+       struct map_list map[MAX_SKB_FRAGS + 2];
        int map_cnt;
        struct tx_ring_desc *next;
 };
index 6f06aa10f0d729a040a6e34a244d57338ba3025f..67bf0781999200d656e09e990be95709700cbce1 100644 (file)
@@ -1183,11 +1183,13 @@ static u8 rtl8168d_efuse_read(void __iomem *ioaddr, int reg_addr)
        return value;
 }
 
-static void rtl8169_irq_mask_and_ack(void __iomem *ioaddr)
+static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp)
 {
-       RTL_W16(IntrMask, 0x0000);
+       void __iomem *ioaddr = tp->mmio_addr;
 
-       RTL_W16(IntrStatus, 0xffff);
+       RTL_W16(IntrMask, 0x0000);
+       RTL_W16(IntrStatus, tp->intr_event);
+       RTL_R8(ChipCmd);
 }
 
 static unsigned int rtl8169_tbi_reset_pending(struct rtl8169_private *tp)
@@ -3933,8 +3935,6 @@ static void rtl_hw_reset(struct rtl8169_private *tp)
                        break;
                udelay(100);
        }
-
-       rtl8169_init_ring_indexes(tp);
 }
 
 static int __devinit
@@ -4339,7 +4339,7 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
        void __iomem *ioaddr = tp->mmio_addr;
 
        /* Disable interrupts */
-       rtl8169_irq_mask_and_ack(ioaddr);
+       rtl8169_irq_mask_and_ack(tp);
 
        rtl_rx_close(tp);
 
@@ -4885,8 +4885,7 @@ static void rtl_hw_start_8168(struct net_device *dev)
        RTL_W16(IntrMitigate, 0x5151);
 
        /* Work around for RxFIFO overflow. */
-       if (tp->mac_version == RTL_GIGA_MAC_VER_11 ||
-           tp->mac_version == RTL_GIGA_MAC_VER_22) {
+       if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
                tp->intr_event |= RxFIFOOver | PCSTimeout;
                tp->intr_event &= ~RxOverflow;
        }
@@ -5076,6 +5075,11 @@ static void rtl_hw_start_8101(struct net_device *dev)
        void __iomem *ioaddr = tp->mmio_addr;
        struct pci_dev *pdev = tp->pci_dev;
 
+       if (tp->mac_version >= RTL_GIGA_MAC_VER_30) {
+               tp->intr_event &= ~RxFIFOOver;
+               tp->napi_event &= ~RxFIFOOver;
+       }
+
        if (tp->mac_version == RTL_GIGA_MAC_VER_13 ||
            tp->mac_version == RTL_GIGA_MAC_VER_16) {
                int cap = pci_pcie_cap(pdev);
@@ -5342,7 +5346,7 @@ static void rtl8169_wait_for_quiescence(struct net_device *dev)
        /* Wait for any pending NAPI task to complete */
        napi_disable(&tp->napi);
 
-       rtl8169_irq_mask_and_ack(ioaddr);
+       rtl8169_irq_mask_and_ack(tp);
 
        tp->intr_mask = 0xffff;
        RTL_W16(IntrMask, tp->intr_event);
@@ -5389,14 +5393,16 @@ static void rtl8169_reset_task(struct work_struct *work)
        if (!netif_running(dev))
                goto out_unlock;
 
+       rtl8169_hw_reset(tp);
+
        rtl8169_wait_for_quiescence(dev);
 
        for (i = 0; i < NUM_RX_DESC; i++)
                rtl8169_mark_to_asic(tp->RxDescArray + i, rx_buf_sz);
 
        rtl8169_tx_clear(tp);
+       rtl8169_init_ring_indexes(tp);
 
-       rtl8169_hw_reset(tp);
        rtl_hw_start(dev);
        netif_wake_queue(dev);
        rtl8169_check_link_status(dev, tp, tp->mmio_addr);
@@ -5407,11 +5413,6 @@ out_unlock:
 
 static void rtl8169_tx_timeout(struct net_device *dev)
 {
-       struct rtl8169_private *tp = netdev_priv(dev);
-
-       rtl8169_hw_reset(tp);
-
-       /* Let's wait a bit while any (async) irq lands on */
        rtl8169_schedule_work(dev, rtl8169_reset_task);
 }
 
@@ -5804,6 +5805,10 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
         */
        status = RTL_R16(IntrStatus);
        while (status && status != 0xffff) {
+               status &= tp->intr_event;
+               if (!status)
+                       break;
+
                handled = 1;
 
                /* Handle all of the error cases first. These will reset
@@ -5818,27 +5823,9 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
                        switch (tp->mac_version) {
                        /* Work around for rx fifo overflow */
                        case RTL_GIGA_MAC_VER_11:
-                       case RTL_GIGA_MAC_VER_22:
-                       case RTL_GIGA_MAC_VER_26:
                                netif_stop_queue(dev);
                                rtl8169_tx_timeout(dev);
                                goto done;
-                       /* Testers needed. */
-                       case RTL_GIGA_MAC_VER_17:
-                       case RTL_GIGA_MAC_VER_19:
-                       case RTL_GIGA_MAC_VER_20:
-                       case RTL_GIGA_MAC_VER_21:
-                       case RTL_GIGA_MAC_VER_23:
-                       case RTL_GIGA_MAC_VER_24:
-                       case RTL_GIGA_MAC_VER_27:
-                       case RTL_GIGA_MAC_VER_28:
-                       case RTL_GIGA_MAC_VER_31:
-                       /* Experimental science. Pktgen proof. */
-                       case RTL_GIGA_MAC_VER_12:
-                       case RTL_GIGA_MAC_VER_25:
-                               if (status == RxFIFOOver)
-                                       goto done;
-                               break;
                        default:
                                break;
                        }
index 8ea770a89f2556b8c762f2af345e5044d45ff9ba..72cd190b9c1a0734d401b193e1bd7b12cb49b3af 100644 (file)
@@ -781,10 +781,15 @@ static void stmmac_mmc_setup(struct stmmac_priv *priv)
        unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
                            MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
 
-       /* Do not manage MMC IRQ (FIXME) */
+       /* Mask MMC irq, counters are managed in SW and registers
+        * are cleared on each READ eventually. */
        dwmac_mmc_intr_all_mask(priv->ioaddr);
-       dwmac_mmc_ctrl(priv->ioaddr, mode);
-       memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
+
+       if (priv->dma_cap.rmon) {
+               dwmac_mmc_ctrl(priv->ioaddr, mode);
+               memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
+       } else
+               pr_info(" No MAC Management Counters available");
 }
 
 static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv)
@@ -1012,8 +1017,7 @@ static int stmmac_open(struct net_device *dev)
        memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
        priv->xstats.threshold = tc;
 
-       if (priv->dma_cap.rmon)
-               stmmac_mmc_setup(priv);
+       stmmac_mmc_setup(priv);
 
        /* Start the ball rolling... */
        DBG(probe, DEBUG, "%s: DMA RX/TX processes started...\n", dev->name);
index 10826d8a2a2df36b3731051b2d4abb504c15ce39..1187a1169eb21b50351599939cc4cb9728ad3b57 100644 (file)
@@ -926,7 +926,7 @@ static int tile_net_poll(struct napi_struct *napi, int budget)
                goto done;
 
        /* Re-enable the ingress interrupt. */
-       enable_percpu_irq(priv->intr_id);
+       enable_percpu_irq(priv->intr_id, 0);
 
        /* HACK: Avoid the "rotting packet" problem (see above). */
        if (qup->__packet_receive_read !=
@@ -1296,7 +1296,7 @@ static void tile_net_open_enable(void *dev_ptr)
        info->napi_enabled = true;
 
        /* Enable the ingress interrupt. */
-       enable_percpu_irq(priv->intr_id);
+       enable_percpu_irq(priv->intr_id, 0);
 }
 
 
@@ -1697,7 +1697,7 @@ static unsigned int tile_net_tx_frags(lepp_frag_t *frags,
        for (i = 0; i < sh->nr_frags; i++) {
 
                skb_frag_t *f = &sh->frags[i];
-               unsigned long pfn = page_to_pfn(f->page);
+               unsigned long pfn = page_to_pfn(skb_frag_page(f));
 
                /* FIXME: Compute "hash_for_home" properly. */
                /* ISSUE: The hypervisor checks CHIP_HAS_REV1_DMA_PACKETS(). */
@@ -1706,7 +1706,7 @@ static unsigned int tile_net_tx_frags(lepp_frag_t *frags,
                /* FIXME: Hmmm. */
                if (!hash_default) {
                        void *va = pfn_to_kaddr(pfn) + f->page_offset;
-                       BUG_ON(PageHighMem(f->page));
+                       BUG_ON(PageHighMem(skb_frag_page(f)));
                        finv_buffer_remote(va, f->size, 0);
                }
 
index bb88e12101c78b86b91def144677e4ecb10d8091..a70244306c9462830c4ebde0b87334ef7667fa0e 100644 (file)
@@ -3,7 +3,7 @@
 #
 
 menuconfig PHYLIB
-       bool "PHY Device support and infrastructure"
+       tristate "PHY Device support and infrastructure"
        depends on !S390
        depends on NETDEVICES
        help
index 2f91acccb7dbc80499e7988b74ac62521bd06799..8873c6e6fb9627c02c41d4b1d36ffb340da3eacf 100644 (file)
@@ -1827,7 +1827,8 @@ static void ath9k_set_power_sleep(struct ath_hw *ah, int setChip)
        }
 
        /* Clear Bit 14 of AR_WA after putting chip into Full Sleep mode. */
-       REG_WRITE(ah, AR_WA, ah->WARegVal & ~AR_WA_D3_L1_DISABLE);
+       if (AR_SREV_9300_20_OR_LATER(ah))
+               REG_WRITE(ah, AR_WA, ah->WARegVal & ~AR_WA_D3_L1_DISABLE);
 }
 
 /*
index e12b48c2cff6ab71e097d38c9632eeb11044bed0..dd008b0e6417a3b3e15679399252c53af9af990d 100644 (file)
@@ -191,6 +191,7 @@ static struct iwl_base_params iwl1000_base_params = {
        .chain_noise_scale = 1000,
        .wd_timeout = IWL_DEF_WD_TIMEOUT,
        .max_event_log_size = 128,
+       .wd_disable = true,
 };
 static struct iwl_ht_params iwl1000_ht_params = {
        .ht_greenfield_support = true,
index c511c98a89a81284e03f621705cbd09736616297..f55fb2d1af52f3891e8d29bc7a21ab193b7f243f 100644 (file)
@@ -364,6 +364,7 @@ static struct iwl_base_params iwl5000_base_params = {
        .wd_timeout = IWL_LONG_WD_TIMEOUT,
        .max_event_log_size = 512,
        .no_idle_support = true,
+       .wd_disable = true,
 };
 static struct iwl_ht_params iwl5000_ht_params = {
        .ht_greenfield_support = true,
index 58a381c01c89c9a367c7b592d65a49eb02457be0..a7a6def40d05aa29fe6d739daf1f0d247bf45a80 100644 (file)
@@ -528,6 +528,24 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
        return 0;
 }
 
+void iwlagn_config_ht40(struct ieee80211_conf *conf,
+       struct iwl_rxon_context *ctx)
+{
+       if (conf_is_ht40_minus(conf)) {
+               ctx->ht.extension_chan_offset =
+                       IEEE80211_HT_PARAM_CHA_SEC_BELOW;
+               ctx->ht.is_40mhz = true;
+       } else if (conf_is_ht40_plus(conf)) {
+               ctx->ht.extension_chan_offset =
+                       IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
+               ctx->ht.is_40mhz = true;
+       } else {
+               ctx->ht.extension_chan_offset =
+                       IEEE80211_HT_PARAM_CHA_SEC_NONE;
+               ctx->ht.is_40mhz = false;
+       }
+}
+
 int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
 {
        struct iwl_priv *priv = hw->priv;
@@ -586,19 +604,11 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
                                ctx->ht.enabled = conf_is_ht(conf);
 
                        if (ctx->ht.enabled) {
-                               if (conf_is_ht40_minus(conf)) {
-                                       ctx->ht.extension_chan_offset =
-                                               IEEE80211_HT_PARAM_CHA_SEC_BELOW;
-                                       ctx->ht.is_40mhz = true;
-                               } else if (conf_is_ht40_plus(conf)) {
-                                       ctx->ht.extension_chan_offset =
-                                               IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
-                                       ctx->ht.is_40mhz = true;
-                               } else {
-                                       ctx->ht.extension_chan_offset =
-                                               IEEE80211_HT_PARAM_CHA_SEC_NONE;
-                                       ctx->ht.is_40mhz = false;
-                               }
+                               /* if HT40 is used, it should not change
+                                * after associated except channel switch */
+                               if (iwl_is_associated_ctx(ctx) &&
+                                    !ctx->ht.is_40mhz)
+                                       iwlagn_config_ht40(conf, ctx);
                        } else
                                ctx->ht.is_40mhz = false;
 
index ed6283623932e2436ddc364d8429ee085df28249..4b2aa1da09532a98e4505afe70f2ec3114fa6510 100644 (file)
@@ -1268,9 +1268,6 @@ int iwl_set_dynamic_key(struct iwl_priv *priv,
 
        switch (keyconf->cipher) {
        case WLAN_CIPHER_SUITE_TKIP:
-               keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
-               keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
-
                if (sta)
                        addr = sta->addr;
                else /* station mode case only */
@@ -1283,8 +1280,6 @@ int iwl_set_dynamic_key(struct iwl_priv *priv,
                                          seq.tkip.iv32, p1k, CMD_SYNC);
                break;
        case WLAN_CIPHER_SUITE_CCMP:
-               keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
-               /* fall through */
        case WLAN_CIPHER_SUITE_WEP40:
        case WLAN_CIPHER_SUITE_WEP104:
                ret = iwlagn_send_sta_key(priv, keyconf, sta_id,
index ccba69b7f8a78f9b54635328bee5c3e3d4d4553a..bacc06c95e7ac798449e11ca883e5b5dc1def717 100644 (file)
@@ -2316,6 +2316,17 @@ static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
                return -EOPNOTSUPP;
        }
 
+       switch (key->cipher) {
+       case WLAN_CIPHER_SUITE_TKIP:
+               key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
+               /* fall through */
+       case WLAN_CIPHER_SUITE_CCMP:
+               key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+               break;
+       default:
+               break;
+       }
+
        /*
         * We could program these keys into the hardware as well, but we
         * don't expect much multicast traffic in IBSS and having keys
@@ -2599,21 +2610,9 @@ static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
 
        /* Configure HT40 channels */
        ctx->ht.enabled = conf_is_ht(conf);
-       if (ctx->ht.enabled) {
-               if (conf_is_ht40_minus(conf)) {
-                       ctx->ht.extension_chan_offset =
-                               IEEE80211_HT_PARAM_CHA_SEC_BELOW;
-                       ctx->ht.is_40mhz = true;
-               } else if (conf_is_ht40_plus(conf)) {
-                       ctx->ht.extension_chan_offset =
-                               IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
-                       ctx->ht.is_40mhz = true;
-               } else {
-                       ctx->ht.extension_chan_offset =
-                               IEEE80211_HT_PARAM_CHA_SEC_NONE;
-                       ctx->ht.is_40mhz = false;
-               }
-       } else
+       if (ctx->ht.enabled)
+               iwlagn_config_ht40(conf, ctx);
+       else
                ctx->ht.is_40mhz = false;
 
        if ((le16_to_cpu(ctx->staging.channel) != ch))
@@ -3499,9 +3498,10 @@ MODULE_PARM_DESC(plcp_check, "Check plcp health (default: 1 [enabled])");
 module_param_named(ack_check, iwlagn_mod_params.ack_check, bool, S_IRUGO);
 MODULE_PARM_DESC(ack_check, "Check ack health (default: 0 [disabled])");
 
-module_param_named(wd_disable, iwlagn_mod_params.wd_disable, bool, S_IRUGO);
+module_param_named(wd_disable, iwlagn_mod_params.wd_disable, int, S_IRUGO);
 MODULE_PARM_DESC(wd_disable,
-               "Disable stuck queue watchdog timer (default: 0 [enabled])");
+               "Disable stuck queue watchdog timer 0=system default, "
+               "1=disable, 2=enable (default: 0)");
 
 /*
  * set bt_coex_active to true, uCode will do kill/defer
index 5b936ec1a541ecc1ecd213771f7d4e75f7960136..3856abaea50792885442b78b084e4c328ace3fc6 100644 (file)
@@ -86,6 +86,8 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
                             struct ieee80211_vif *vif,
                             struct ieee80211_bss_conf *bss_conf,
                             u32 changes);
+void iwlagn_config_ht40(struct ieee80211_conf *conf,
+                       struct iwl_rxon_context *ctx);
 
 /* uCode */
 int iwlagn_rx_calib_result(struct iwl_priv *priv,
index 001fdf140abbb72de6d12fe95dac3d8211158ff0..fcf54160e4ed51b3f89a84447898c7948acaaf59 100644 (file)
@@ -1810,11 +1810,23 @@ void iwl_setup_watchdog(struct iwl_priv *priv)
 {
        unsigned int timeout = priv->cfg->base_params->wd_timeout;
 
-       if (timeout && !iwlagn_mod_params.wd_disable)
-               mod_timer(&priv->watchdog,
-                         jiffies + msecs_to_jiffies(IWL_WD_TICK(timeout)));
-       else
-               del_timer(&priv->watchdog);
+       if (!iwlagn_mod_params.wd_disable) {
+               /* use system default */
+               if (timeout && !priv->cfg->base_params->wd_disable)
+                       mod_timer(&priv->watchdog,
+                               jiffies +
+                               msecs_to_jiffies(IWL_WD_TICK(timeout)));
+               else
+                       del_timer(&priv->watchdog);
+       } else {
+               /* module parameter overwrite default configuration */
+               if (timeout && iwlagn_mod_params.wd_disable == 2)
+                       mod_timer(&priv->watchdog,
+                               jiffies +
+                               msecs_to_jiffies(IWL_WD_TICK(timeout)));
+               else
+                       del_timer(&priv->watchdog);
+       }
 }
 
 /**
index 137da33807044bedd162a501a65454037abce49f..f2fc288f3dd31f5068e1bb29fbe3307fa1d2045d 100644 (file)
@@ -113,6 +113,7 @@ struct iwl_lib_ops {
  * @shadow_reg_enable: HW shadhow register bit
  * @no_idle_support: do not support idle mode
  * @hd_v2: v2 of enhanced sensitivity value, used for 2000 series and up
+ * wd_disable: disable watchdog timer
  */
 struct iwl_base_params {
        int eeprom_size;
@@ -134,6 +135,7 @@ struct iwl_base_params {
        const bool shadow_reg_enable;
        const bool no_idle_support;
        const bool hd_v2;
+       const bool wd_disable;
 };
 /*
  * @advanced_bt_coexist: support advanced bt coexist
index 1f7a93c67c45ed67998389ac406917875e826125..14eaf37ce3b1c6a06fdad12d9cc74371274ba894 100644 (file)
@@ -120,7 +120,7 @@ extern struct iwl_mod_params iwlagn_mod_params;
  * @restart_fw: restart firmware, default = 1
  * @plcp_check: enable plcp health check, default = true
  * @ack_check: disable ack health check, default = false
- * @wd_disable: enable stuck queue check, default = false
+ * @wd_disable: enable stuck queue check, default = 0
  * @bt_coex_active: enable bt coex, default = true
  * @led_mode: system default, default = 0
  * @no_sleep_autoadjust: disable autoadjust, default = true
@@ -141,7 +141,7 @@ struct iwl_mod_params {
        int restart_fw;
        bool plcp_check;
        bool ack_check;
-       bool wd_disable;
+       int  wd_disable;
        bool bt_coex_active;
        int led_mode;
        bool no_sleep_autoadjust;
index f18df82eeb92caccedb8381bb95e4f55cd4d1901..78d0d6988553d40354b9fbf2396e1657eaefd9c6 100644 (file)
@@ -588,8 +588,6 @@ static void p54spi_op_stop(struct ieee80211_hw *dev)
 
        WARN_ON(priv->fw_state != FW_STATE_READY);
 
-       cancel_work_sync(&priv->work);
-
        p54spi_power_off(priv);
        spin_lock_irqsave(&priv->tx_lock, flags);
        INIT_LIST_HEAD(&priv->tx_pending);
@@ -597,6 +595,8 @@ static void p54spi_op_stop(struct ieee80211_hw *dev)
 
        priv->fw_state = FW_STATE_OFF;
        mutex_unlock(&priv->mutex);
+
+       cancel_work_sync(&priv->work);
 }
 
 static int __devinit p54spi_probe(struct spi_device *spi)
@@ -656,6 +656,7 @@ static int __devinit p54spi_probe(struct spi_device *spi)
        init_completion(&priv->fw_comp);
        INIT_LIST_HEAD(&priv->tx_pending);
        mutex_init(&priv->mutex);
+       spin_lock_init(&priv->tx_lock);
        SET_IEEE80211_DEV(hw, &spi->dev);
        priv->common.open = p54spi_op_start;
        priv->common.stop = p54spi_op_stop;
index d97a2caf582b3997f2378434117758a928cefbd3..bc2ba80c47bb9ff0105d8ed2c119ab3b3589364e 100644 (file)
@@ -778,7 +778,7 @@ prism54_get_essid(struct net_device *ndev, struct iw_request_info *info,
                dwrq->flags = 0;
                dwrq->length = 0;
        }
-       essid->octets[essid->length] = '\0';
+       essid->octets[dwrq->length] = '\0';
        memcpy(extra, essid->octets, dwrq->length);
        kfree(essid);
 
index 3f183a15186e09b5e05a2e296c5c66bd94426b23..1ba079dffb11573e86f1b547ac8868e313840f26 100644 (file)
@@ -3771,7 +3771,7 @@ static void rt2800_efuse_read(struct rt2x00_dev *rt2x00dev, unsigned int i)
        /* Apparently the data is read from end to start */
        rt2800_register_read_lock(rt2x00dev, EFUSE_DATA3, &reg);
        /* The returned value is in CPU order, but eeprom is le */
-       rt2x00dev->eeprom[i] = cpu_to_le32(reg);
+       *(u32 *)&rt2x00dev->eeprom[i] = cpu_to_le32(reg);
        rt2800_register_read_lock(rt2x00dev, EFUSE_DATA2, &reg);
        *(u32 *)&rt2x00dev->eeprom[i + 2] = cpu_to_le32(reg);
        rt2800_register_read_lock(rt2x00dev, EFUSE_DATA1, &reg);
index db526284454336e47bf2c530e1b0630123809524..55c8e50f45fd143b7e606894c71e072c97480f2a 100644 (file)
@@ -395,7 +395,7 @@ void rtl_lps_enter(struct ieee80211_hw *hw)
        if (mac->link_state != MAC80211_LINKED)
                return;
 
-       spin_lock(&rtlpriv->locks.lps_lock);
+       spin_lock_irq(&rtlpriv->locks.lps_lock);
 
        /* Idle for a while if we connect to AP a while ago. */
        if (mac->cnt_after_linked >= 2) {
@@ -407,7 +407,7 @@ void rtl_lps_enter(struct ieee80211_hw *hw)
                }
        }
 
-       spin_unlock(&rtlpriv->locks.lps_lock);
+       spin_unlock_irq(&rtlpriv->locks.lps_lock);
 }
 
 /*Leave the leisure power save mode.*/
@@ -416,8 +416,9 @@ void rtl_lps_leave(struct ieee80211_hw *hw)
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
        struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       unsigned long flags;
 
-       spin_lock(&rtlpriv->locks.lps_lock);
+       spin_lock_irqsave(&rtlpriv->locks.lps_lock, flags);
 
        if (ppsc->fwctrl_lps) {
                if (ppsc->dot11_psmode != EACTIVE) {
@@ -438,7 +439,7 @@ void rtl_lps_leave(struct ieee80211_hw *hw)
                        rtl_lps_set_psmode(hw, EACTIVE);
                }
        }
-       spin_unlock(&rtlpriv->locks.lps_lock);
+       spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flags);
 }
 
 /* For sw LPS*/
@@ -539,9 +540,9 @@ void rtl_swlps_rf_awake(struct ieee80211_hw *hw)
                RT_CLEAR_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM);
        }
 
-       spin_lock(&rtlpriv->locks.lps_lock);
+       spin_lock_irq(&rtlpriv->locks.lps_lock);
        rtl_ps_set_rf_state(hw, ERFON, RF_CHANGE_BY_PS);
-       spin_unlock(&rtlpriv->locks.lps_lock);
+       spin_unlock_irq(&rtlpriv->locks.lps_lock);
 }
 
 void rtl_swlps_rfon_wq_callback(void *data)
@@ -574,9 +575,9 @@ void rtl_swlps_rf_sleep(struct ieee80211_hw *hw)
        if (rtlpriv->link_info.busytraffic)
                return;
 
-       spin_lock(&rtlpriv->locks.lps_lock);
+       spin_lock_irq(&rtlpriv->locks.lps_lock);
        rtl_ps_set_rf_state(hw, ERFSLEEP, RF_CHANGE_BY_PS);
-       spin_unlock(&rtlpriv->locks.lps_lock);
+       spin_unlock_irq(&rtlpriv->locks.lps_lock);
 
        if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM &&
                !RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM)) {
index 0cb594c86090fa7c25f3578132528f5f616aef26..15e332d08c8d7825382e21bfba8c868636c2c1f6 100644 (file)
@@ -1021,7 +1021,7 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
                pending_idx = *((u16 *)skb->data);
                xen_netbk_idx_release(netbk, pending_idx);
                for (j = start; j < i; j++) {
-                       pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
+                       pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
                        xen_netbk_idx_release(netbk, pending_idx);
                }
 
@@ -1668,7 +1668,7 @@ static int __init netback_init(void)
                                             "netback/%u", group);
 
                if (IS_ERR(netbk->task)) {
-                       printk(KERN_ALERT "kthread_run() fails at netback\n");
+                       printk(KERN_ALERT "kthread_create() fails at netback\n");
                        del_timer(&netbk->net_timer);
                        rc = PTR_ERR(netbk->task);
                        goto failed_init;
index 791270b8bd1ca755e27632c1b68f9f252662a3a0..0f0cfa3bca301e2d824ad64ebdaba23d6c617825 100644 (file)
 #include <linux/string.h>
 #include <linux/slab.h>
 
-/* For archs that don't support NO_IRQ (such as x86), provide a dummy value */
-#ifndef NO_IRQ
-#define NO_IRQ 0
-#endif
-
 /**
  * irq_of_parse_and_map - Parse and map an interrupt into linux virq space
  * @device: Device node of the device whose interrupt is to be mapped
@@ -44,7 +39,7 @@ unsigned int irq_of_parse_and_map(struct device_node *dev, int index)
        struct of_irq oirq;
 
        if (of_irq_map_one(dev, index, &oirq))
-               return NO_IRQ;
+               return 0;
 
        return irq_create_of_mapping(oirq.controller, oirq.specifier,
                                     oirq.size);
@@ -345,7 +340,7 @@ int of_irq_to_resource(struct device_node *dev, int index, struct resource *r)
 
        /* Only dereference the resource if both the
         * resource and the irq are valid. */
-       if (r && irq != NO_IRQ) {
+       if (r && irq) {
                r->start = r->end = irq;
                r->flags = IORESOURCE_IRQ;
                r->name = dev->full_name;
@@ -363,7 +358,7 @@ int of_irq_count(struct device_node *dev)
 {
        int nr = 0;
 
-       while (of_irq_to_resource(dev, nr, NULL) != NO_IRQ)
+       while (of_irq_to_resource(dev, nr, NULL))
                nr++;
 
        return nr;
@@ -383,7 +378,7 @@ int of_irq_to_resource_table(struct device_node *dev, struct resource *res,
        int i;
 
        for (i = 0; i < nr_irqs; i++, res++)
-               if (of_irq_to_resource(dev, i, res) == NO_IRQ)
+               if (!of_irq_to_resource(dev, i, res))
                        break;
 
        return i;
@@ -424,6 +419,8 @@ void __init of_irq_init(const struct of_device_id *matches)
 
                desc->dev = np;
                desc->interrupt_parent = of_irq_find_parent(np);
+               if (desc->interrupt_parent == np)
+                       desc->interrupt_parent = NULL;
                list_add_tail(&desc->list, &intc_desc_list);
        }
 
index dccd8636095cb361e2e0e1e2bb8fb7fdd57ecee2..f8c752e408a663d55adf7e84ca4fb42aa93d1d02 100644 (file)
@@ -239,26 +239,45 @@ int oprofile_set_ulong(unsigned long *addr, unsigned long val)
        return err;
 }
 
+static int timer_mode;
+
 static int __init oprofile_init(void)
 {
        int err;
 
+       /* always init architecture to setup backtrace support */
        err = oprofile_arch_init(&oprofile_ops);
-       if (err < 0 || timer) {
-               printk(KERN_INFO "oprofile: using timer interrupt.\n");
+
+       timer_mode = err || timer;      /* fall back to timer mode on errors */
+       if (timer_mode) {
+               if (!err)
+                       oprofile_arch_exit();
                err = oprofile_timer_init(&oprofile_ops);
                if (err)
                        return err;
        }
-       return oprofilefs_register();
+
+       err = oprofilefs_register();
+       if (!err)
+               return 0;
+
+       /* failed */
+       if (timer_mode)
+               oprofile_timer_exit();
+       else
+               oprofile_arch_exit();
+
+       return err;
 }
 
 
 static void __exit oprofile_exit(void)
 {
-       oprofile_timer_exit();
        oprofilefs_unregister();
-       oprofile_arch_exit();
+       if (timer_mode)
+               oprofile_timer_exit();
+       else
+               oprofile_arch_exit();
 }
 
 
index 3ef44624f5103ddaf405e76fcafd0afe6b27a132..878fba1265829cdab586a145d86a332b5ce32874 100644 (file)
@@ -110,6 +110,7 @@ int oprofile_timer_init(struct oprofile_operations *ops)
        ops->start = oprofile_hrtimer_start;
        ops->stop = oprofile_hrtimer_stop;
        ops->cpu_type = "timer";
+       printk(KERN_INFO "oprofile: using timer interrupt.\n");
        return 0;
 }
 
index 13ef8c37471d0a6575fed59d6cf92dece74c34ba..dcdc1f4a4624d782d35f3776d1a69bbfdf983cdb 100644 (file)
@@ -121,6 +121,7 @@ struct toshiba_acpi_dev {
        int illumination_supported:1;
        int video_supported:1;
        int fan_supported:1;
+       int system_event_supported:1;
 
        struct mutex mutex;
 };
@@ -724,7 +725,7 @@ static int keys_proc_show(struct seq_file *m, void *v)
        u32 hci_result;
        u32 value;
 
-       if (!dev->key_event_valid) {
+       if (!dev->key_event_valid && dev->system_event_supported) {
                hci_read1(dev, HCI_SYSTEM_EVENT, &value, &hci_result);
                if (hci_result == HCI_SUCCESS) {
                        dev->key_event_valid = 1;
@@ -964,6 +965,8 @@ static int __devinit toshiba_acpi_add(struct acpi_device *acpi_dev)
 
        /* enable event fifo */
        hci_write1(dev, HCI_SYSTEM_EVENT, 1, &hci_result);
+       if (hci_result == HCI_SUCCESS)
+               dev->system_event_supported = 1;
 
        props.type = BACKLIGHT_PLATFORM;
        props.max_brightness = HCI_LCD_BRIGHTNESS_LEVELS - 1;
@@ -1032,12 +1035,15 @@ static void toshiba_acpi_notify(struct acpi_device *acpi_dev, u32 event)
 {
        struct toshiba_acpi_dev *dev = acpi_driver_data(acpi_dev);
        u32 hci_result, value;
+       int retries = 3;
 
-       if (event != 0x80)
+       if (!dev->system_event_supported || event != 0x80)
                return;
+
        do {
                hci_read1(dev, HCI_SYSTEM_EVENT, &value, &hci_result);
-               if (hci_result == HCI_SUCCESS) {
+               switch (hci_result) {
+               case HCI_SUCCESS:
                        if (value == 0x100)
                                continue;
                        /* act on key press; ignore key release */
@@ -1049,14 +1055,19 @@ static void toshiba_acpi_notify(struct acpi_device *acpi_dev, u32 event)
                                pr_info("Unknown key %x\n",
                                       value);
                        }
-               } else if (hci_result == HCI_NOT_SUPPORTED) {
+                       break;
+               case HCI_NOT_SUPPORTED:
                        /* This is a workaround for an unresolved issue on
                         * some machines where system events sporadically
                         * become disabled. */
                        hci_write1(dev, HCI_SYSTEM_EVENT, 1, &hci_result);
                        pr_notice("Re-enabled hotkeys\n");
+                       /* fall through */
+               default:
+                       retries--;
+                       break;
                }
-       } while (hci_result != HCI_EMPTY);
+       } while (retries && hci_result != HCI_EMPTY);
 }
 
 
index cffcb7c00b0068da7b32a467358b7e71a0388e13..01fa671ec97f6af9a31bf1a8c39ceef86691617b 100644 (file)
@@ -61,7 +61,8 @@ MODULE_PARM_DESC(debug, "Flag to enable PMIC Battery debug messages.");
 #define PMIC_BATT_CHR_SBATDET_MASK     (1 << 5)
 #define PMIC_BATT_CHR_SDCLMT_MASK      (1 << 6)
 #define PMIC_BATT_CHR_SUSBOVP_MASK     (1 << 7)
-#define PMIC_BATT_CHR_EXCPT_MASK       0xC6
+#define PMIC_BATT_CHR_EXCPT_MASK       0x86
+
 #define PMIC_BATT_ADC_ACCCHRG_MASK     (1 << 31)
 #define PMIC_BATT_ADC_ACCCHRGVAL_MASK  0x7FFFFFFF
 
@@ -304,11 +305,6 @@ static void pmic_battery_read_status(struct pmic_power_module_info *pbi)
                        pbi->batt_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
                        pmic_battery_log_event(BATT_EVENT_BATOVP_EXCPT);
                        batt_exception = 1;
-               } else if (r8 & PMIC_BATT_CHR_SDCLMT_MASK) {
-                       pbi->batt_health = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
-                       pbi->batt_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
-                       pmic_battery_log_event(BATT_EVENT_DCLMT_EXCPT);
-                       batt_exception = 1;
                } else if (r8 & PMIC_BATT_CHR_STEMP_MASK) {
                        pbi->batt_health = POWER_SUPPLY_HEALTH_OVERHEAT;
                        pbi->batt_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
@@ -316,6 +312,10 @@ static void pmic_battery_read_status(struct pmic_power_module_info *pbi)
                        batt_exception = 1;
                } else {
                        pbi->batt_health = POWER_SUPPLY_HEALTH_GOOD;
+                       if (r8 & PMIC_BATT_CHR_SDCLMT_MASK) {
+                               /* PMIC will change charging current automatically */
+                               pmic_battery_log_event(BATT_EVENT_DCLMT_EXCPT);
+                       }
                }
        }
 
index cf3f9997546dc41d10a143390e834d0926720964..10451a15e8284f33be26996363c5ffd6c79ad56a 100644 (file)
@@ -101,7 +101,9 @@ static s32 scaled_ppm_to_ppb(long ppm)
 
 static int ptp_clock_getres(struct posix_clock *pc, struct timespec *tp)
 {
-       return 1; /* always round timer functions to one nanosecond */
+       tp->tv_sec = 0;
+       tp->tv_nsec = 1;
+       return 0;
 }
 
 static int ptp_clock_settime(struct posix_clock *pc, const struct timespec *tp)
index 5225930a10cd24fd5003d553bfd65ada9e10eebc..691b1ab1a3d0499d85815bd03551e9d22518e494 100644 (file)
@@ -851,14 +851,12 @@ static int tsi721_doorbell_init(struct tsi721_device *priv)
        INIT_WORK(&priv->idb_work, tsi721_db_dpc);
 
        /* Allocate buffer for inbound doorbells queue */
-       priv->idb_base = dma_alloc_coherent(&priv->pdev->dev,
+       priv->idb_base = dma_zalloc_coherent(&priv->pdev->dev,
                                IDB_QSIZE * TSI721_IDB_ENTRY_SIZE,
                                &priv->idb_dma, GFP_KERNEL);
        if (!priv->idb_base)
                return -ENOMEM;
 
-       memset(priv->idb_base, 0, IDB_QSIZE * TSI721_IDB_ENTRY_SIZE);
-
        dev_dbg(&priv->pdev->dev, "Allocated IDB buffer @ %p (phys = %llx)\n",
                priv->idb_base, (unsigned long long)priv->idb_dma);
 
@@ -904,7 +902,7 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
         */
 
        /* Allocate space for DMA descriptors */
-       bd_ptr = dma_alloc_coherent(&priv->pdev->dev,
+       bd_ptr = dma_zalloc_coherent(&priv->pdev->dev,
                                        bd_num * sizeof(struct tsi721_dma_desc),
                                        &bd_phys, GFP_KERNEL);
        if (!bd_ptr)
@@ -913,8 +911,6 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
        priv->bdma[chnum].bd_phys = bd_phys;
        priv->bdma[chnum].bd_base = bd_ptr;
 
-       memset(bd_ptr, 0, bd_num * sizeof(struct tsi721_dma_desc));
-
        dev_dbg(&priv->pdev->dev, "DMA descriptors @ %p (phys = %llx)\n",
                bd_ptr, (unsigned long long)bd_phys);
 
@@ -922,7 +918,7 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
        sts_size = (bd_num >= TSI721_DMA_MINSTSSZ) ?
                                        bd_num : TSI721_DMA_MINSTSSZ;
        sts_size = roundup_pow_of_two(sts_size);
-       sts_ptr = dma_alloc_coherent(&priv->pdev->dev,
+       sts_ptr = dma_zalloc_coherent(&priv->pdev->dev,
                                     sts_size * sizeof(struct tsi721_dma_sts),
                                     &sts_phys, GFP_KERNEL);
        if (!sts_ptr) {
@@ -938,8 +934,6 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
        priv->bdma[chnum].sts_base = sts_ptr;
        priv->bdma[chnum].sts_size = sts_size;
 
-       memset(sts_ptr, 0, sts_size);
-
        dev_dbg(&priv->pdev->dev,
                "desc status FIFO @ %p (phys = %llx) size=0x%x\n",
                sts_ptr, (unsigned long long)sts_phys, sts_size);
@@ -1400,7 +1394,7 @@ static int tsi721_open_outb_mbox(struct rio_mport *mport, void *dev_id,
 
        /* Outbound message descriptor status FIFO allocation */
        priv->omsg_ring[mbox].sts_size = roundup_pow_of_two(entries + 1);
-       priv->omsg_ring[mbox].sts_base = dma_alloc_coherent(&priv->pdev->dev,
+       priv->omsg_ring[mbox].sts_base = dma_zalloc_coherent(&priv->pdev->dev,
                        priv->omsg_ring[mbox].sts_size *
                                                sizeof(struct tsi721_dma_sts),
                        &priv->omsg_ring[mbox].sts_phys, GFP_KERNEL);
@@ -1412,9 +1406,6 @@ static int tsi721_open_outb_mbox(struct rio_mport *mport, void *dev_id,
                goto out_desc;
        }
 
-       memset(priv->omsg_ring[mbox].sts_base, 0,
-               entries * sizeof(struct tsi721_dma_sts));
-
        /*
         * Configure Outbound Messaging Engine
         */
@@ -2116,8 +2107,8 @@ static int __devinit tsi721_setup_mport(struct tsi721_device *priv)
        INIT_LIST_HEAD(&mport->dbells);
 
        rio_init_dbell_res(&mport->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff);
-       rio_init_mbox_res(&mport->riores[RIO_INB_MBOX_RESOURCE], 0, 0);
-       rio_init_mbox_res(&mport->riores[RIO_OUTB_MBOX_RESOURCE], 0, 0);
+       rio_init_mbox_res(&mport->riores[RIO_INB_MBOX_RESOURCE], 0, 3);
+       rio_init_mbox_res(&mport->riores[RIO_OUTB_MBOX_RESOURCE], 0, 3);
        strcpy(mport->name, "Tsi721 mport");
 
        /* Hook up interrupt handler */
@@ -2163,7 +2154,7 @@ static int __devinit tsi721_probe(struct pci_dev *pdev,
                                  const struct pci_device_id *id)
 {
        struct tsi721_device *priv;
-       int i;
+       int i, cap;
        int err;
        u32 regval;
 
@@ -2271,10 +2262,20 @@ static int __devinit tsi721_probe(struct pci_dev *pdev,
                        dev_info(&pdev->dev, "Unable to set consistent DMA mask\n");
        }
 
-       /* Clear "no snoop" and "relaxed ordering" bits. */
-       pci_read_config_dword(pdev, 0x40 + PCI_EXP_DEVCTL, &regval);
-       regval &= ~(PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN);
-       pci_write_config_dword(pdev, 0x40 + PCI_EXP_DEVCTL, regval);
+       cap = pci_pcie_cap(pdev);
+       BUG_ON(cap == 0);
+
+       /* Clear "no snoop" and "relaxed ordering" bits, use default MRRS. */
+       pci_read_config_dword(pdev, cap + PCI_EXP_DEVCTL, &regval);
+       regval &= ~(PCI_EXP_DEVCTL_READRQ | PCI_EXP_DEVCTL_RELAX_EN |
+                   PCI_EXP_DEVCTL_NOSNOOP_EN);
+       regval |= 0x2 << MAX_READ_REQUEST_SZ_SHIFT;
+       pci_write_config_dword(pdev, cap + PCI_EXP_DEVCTL, regval);
+
+       /* Adjust PCIe completion timeout. */
+       pci_read_config_dword(pdev, cap + PCI_EXP_DEVCTL2, &regval);
+       regval &= ~(0x0f);
+       pci_write_config_dword(pdev, cap + PCI_EXP_DEVCTL2, regval | 0x2);
 
        /*
         * FIXUP: correct offsets of MSI-X tables in the MSI-X Capability Block
index 58be4deb1402ad9f2a119c196e208a8b0d2f2593..822e54c394d5cd0690ee80d2c5173eb05140018f 100644 (file)
@@ -72,6 +72,8 @@
 #define TSI721_MSIXPBA_OFFSET  0x2a000
 #define TSI721_PCIECFG_EPCTL   0x400
 
+#define MAX_READ_REQUEST_SZ_SHIFT      12
+
 /*
  * Event Management Registers
  */
index 5abeb3ac3e8da43df3d58bf677c3ac0e3d17116e..298c6c6a279574561eb24f329b115fe134d274a1 100644 (file)
@@ -160,7 +160,7 @@ static struct aat2870_regulator *aat2870_get_regulator(int id)
                        break;
        }
 
-       if (!ri)
+       if (i == ARRAY_SIZE(aat2870_regulators))
                return NULL;
 
        ri->enable_addr = AAT2870_LDO_EN;
index 669d0216022195e36bcb8f59b25cf14de1dff6c9..938398f3e869c57814fc130b9133ac435118214e 100644 (file)
@@ -2799,8 +2799,8 @@ void regulator_unregister(struct regulator_dev *rdev)
        list_del(&rdev->list);
        if (rdev->supply)
                regulator_put(rdev->supply);
-       device_unregister(&rdev->dev);
        kfree(rdev->constraints);
+       device_unregister(&rdev->dev);
        mutex_unlock(&regulator_list_mutex);
 }
 EXPORT_SYMBOL_GPL(regulator_unregister);
index ee8747f4fa08b187ef2f79dbb51e3f69a770bc21..11cc308d66e925db83fa8c50697e734608aac1b8 100644 (file)
@@ -71,6 +71,7 @@ struct twlreg_info {
 #define VREG_TYPE              1
 #define VREG_REMAP             2
 #define VREG_DEDICATED         3       /* LDO control */
+#define VREG_VOLTAGE_SMPS_4030 9
 /* TWL6030 register offsets */
 #define VREG_TRANS             1
 #define VREG_STATE             2
@@ -514,6 +515,32 @@ static struct regulator_ops twl4030ldo_ops = {
        .get_status     = twl4030reg_get_status,
 };
 
+static int
+twl4030smps_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV,
+                       unsigned *selector)
+{
+       struct twlreg_info *info = rdev_get_drvdata(rdev);
+       int vsel = DIV_ROUND_UP(min_uV - 600000, 12500);
+
+       twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE_SMPS_4030,
+               vsel);
+       return 0;
+}
+
+static int twl4030smps_get_voltage(struct regulator_dev *rdev)
+{
+       struct twlreg_info *info = rdev_get_drvdata(rdev);
+       int vsel = twlreg_read(info, TWL_MODULE_PM_RECEIVER,
+               VREG_VOLTAGE_SMPS_4030);
+
+       return vsel * 12500 + 600000;
+}
+
+static struct regulator_ops twl4030smps_ops = {
+       .set_voltage    = twl4030smps_set_voltage,
+       .get_voltage    = twl4030smps_get_voltage,
+};
+
 static int twl6030ldo_list_voltage(struct regulator_dev *rdev, unsigned index)
 {
        struct twlreg_info      *info = rdev_get_drvdata(rdev);
@@ -856,6 +883,21 @@ static struct regulator_ops twlsmps_ops = {
                }, \
        }
 
+#define TWL4030_ADJUSTABLE_SMPS(label, offset, num, turnon_delay, remap_conf) \
+       { \
+       .base = offset, \
+       .id = num, \
+       .delay = turnon_delay, \
+       .remap = remap_conf, \
+       .desc = { \
+               .name = #label, \
+               .id = TWL4030_REG_##label, \
+               .ops = &twl4030smps_ops, \
+               .type = REGULATOR_VOLTAGE, \
+               .owner = THIS_MODULE, \
+               }, \
+       }
+
 #define TWL6030_ADJUSTABLE_LDO(label, offset, min_mVolts, max_mVolts) { \
        .base = offset, \
        .min_mV = min_mVolts, \
@@ -947,8 +989,8 @@ static struct twlreg_info twl_regs[] = {
        TWL4030_ADJUSTABLE_LDO(VINTANA2, 0x43, 12, 100, 0x08),
        TWL4030_FIXED_LDO(VINTDIG, 0x47, 1500, 13, 100, 0x08),
        TWL4030_ADJUSTABLE_LDO(VIO, 0x4b, 14, 1000, 0x08),
-       TWL4030_ADJUSTABLE_LDO(VDD1, 0x55, 15, 1000, 0x08),
-       TWL4030_ADJUSTABLE_LDO(VDD2, 0x63, 16, 1000, 0x08),
+       TWL4030_ADJUSTABLE_SMPS(VDD1, 0x55, 15, 1000, 0x08),
+       TWL4030_ADJUSTABLE_SMPS(VDD2, 0x63, 16, 1000, 0x08),
        TWL4030_FIXED_LDO(VUSB1V5, 0x71, 1500, 17, 100, 0x08),
        TWL4030_FIXED_LDO(VUSB1V8, 0x74, 1800, 18, 100, 0x08),
        TWL4030_FIXED_LDO(VUSB3V1, 0x77, 3100, 19, 150, 0x08),
index e8326f26fa2f5c5ca11d26901f5e70052a1ffea6..dc4c2748bbc38bfac593cc47a2ff7bac6a34c8fe 100644 (file)
@@ -63,7 +63,7 @@ static int rtc_suspend(struct device *dev, pm_message_t mesg)
         */
        delta = timespec_sub(old_system, old_rtc);
        delta_delta = timespec_sub(delta, old_delta);
-       if (abs(delta_delta.tv_sec)  >= 2) {
+       if (delta_delta.tv_sec < -2 || delta_delta.tv_sec >= 2) {
                /*
                 * if delta_delta is too large, assume time correction
                 * has occured and set old_delta to the current delta.
@@ -97,9 +97,8 @@ static int rtc_resume(struct device *dev)
        rtc_tm_to_time(&tm, &new_rtc.tv_sec);
        new_rtc.tv_nsec = 0;
 
-       if (new_rtc.tv_sec <= old_rtc.tv_sec) {
-               if (new_rtc.tv_sec < old_rtc.tv_sec)
-                       pr_debug("%s:  time travel!\n", dev_name(&rtc->dev));
+       if (new_rtc.tv_sec < old_rtc.tv_sec) {
+               pr_debug("%s:  time travel!\n", dev_name(&rtc->dev));
                return 0;
        }
 
@@ -116,7 +115,8 @@ static int rtc_resume(struct device *dev)
        sleep_time = timespec_sub(sleep_time,
                        timespec_sub(new_system, old_system));
 
-       timekeeping_inject_sleeptime(&sleep_time);
+       if (sleep_time.tv_sec >= 0)
+               timekeeping_inject_sleeptime(&sleep_time);
        return 0;
 }
 
index 8e286259a007fbc5921b4c569cdb959f62762f7a..fa4d9f324189a8cb876692764b5eed7601cb1950 100644 (file)
@@ -319,6 +319,20 @@ int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
 }
 EXPORT_SYMBOL_GPL(rtc_read_alarm);
 
+static int ___rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
+{
+       int err;
+
+       if (!rtc->ops)
+               err = -ENODEV;
+       else if (!rtc->ops->set_alarm)
+               err = -EINVAL;
+       else
+               err = rtc->ops->set_alarm(rtc->dev.parent, alarm);
+
+       return err;
+}
+
 static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
 {
        struct rtc_time tm;
@@ -342,14 +356,7 @@ static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
         * over right here, before we set the alarm.
         */
 
-       if (!rtc->ops)
-               err = -ENODEV;
-       else if (!rtc->ops->set_alarm)
-               err = -EINVAL;
-       else
-               err = rtc->ops->set_alarm(rtc->dev.parent, alarm);
-
-       return err;
+       return ___rtc_set_alarm(rtc, alarm);
 }
 
 int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
@@ -763,6 +770,20 @@ static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
        return 0;
 }
 
+static void rtc_alarm_disable(struct rtc_device *rtc)
+{
+       struct rtc_wkalrm alarm;
+       struct rtc_time tm;
+
+       __rtc_read_time(rtc, &tm);
+
+       alarm.time = rtc_ktime_to_tm(ktime_add(rtc_tm_to_ktime(tm),
+                                    ktime_set(300, 0)));
+       alarm.enabled = 0;
+
+       ___rtc_set_alarm(rtc, &alarm);
+}
+
 /**
  * rtc_timer_remove - Removes a rtc_timer from the rtc_device timerqueue
  * @rtc rtc device
@@ -784,8 +805,10 @@ static void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer)
                struct rtc_wkalrm alarm;
                int err;
                next = timerqueue_getnext(&rtc->timerqueue);
-               if (!next)
+               if (!next) {
+                       rtc_alarm_disable(rtc);
                        return;
+               }
                alarm.time = rtc_ktime_to_tm(next->expires);
                alarm.enabled = 1;
                err = __rtc_set_alarm(rtc, &alarm);
@@ -847,7 +870,8 @@ again:
                err = __rtc_set_alarm(rtc, &alarm);
                if (err == -ETIME)
                        goto again;
-       }
+       } else
+               rtc_alarm_disable(rtc);
 
        mutex_unlock(&rtc->ops_lock);
 }
index 7639ab906f02e35ceec0a75bfaddd418e17478e8..5b979d9cc3324ffccd455da21e09a35b88f753ad 100644 (file)
@@ -202,7 +202,6 @@ static int s3c_rtc_settime(struct device *dev, struct rtc_time *tm)
        void __iomem *base = s3c_rtc_base;
        int year = tm->tm_year - 100;
 
-       clk_enable(rtc_clk);
        pr_debug("set time %04d.%02d.%02d %02d:%02d:%02d\n",
                 1900 + tm->tm_year, tm->tm_mon, tm->tm_mday,
                 tm->tm_hour, tm->tm_min, tm->tm_sec);
@@ -214,6 +213,7 @@ static int s3c_rtc_settime(struct device *dev, struct rtc_time *tm)
                return -EINVAL;
        }
 
+       clk_enable(rtc_clk);
        writeb(bin2bcd(tm->tm_sec),  base + S3C2410_RTCSEC);
        writeb(bin2bcd(tm->tm_min),  base + S3C2410_RTCMIN);
        writeb(bin2bcd(tm->tm_hour), base + S3C2410_RTCHOUR);
index 75c3f1f8fd434301c3ba4a07a632e0ffefa6aac3..a84631a7391d3ed50c680b939129d159fc25daa3 100644 (file)
@@ -529,10 +529,7 @@ __s390_vary_chpid_on(struct subchannel_id schid, void *data)
 int chsc_chp_vary(struct chp_id chpid, int on)
 {
        struct channel_path *chp = chpid_to_chp(chpid);
-       struct chp_link link;
 
-       memset(&link, 0, sizeof(struct chp_link));
-       link.chpid = chpid;
        /* Wait until previous actions have settled. */
        css_wait_for_slow_path();
        /*
@@ -542,10 +539,10 @@ int chsc_chp_vary(struct chp_id chpid, int on)
                /* Try to update the channel path descritor. */
                chsc_determine_base_channel_path_desc(chpid, &chp->desc);
                for_each_subchannel_staged(s390_subchannel_vary_chpid_on,
-                                          __s390_vary_chpid_on, &link);
+                                          __s390_vary_chpid_on, &chpid);
        } else
                for_each_subchannel_staged(s390_subchannel_vary_chpid_off,
-                                          NULL, &link);
+                                          NULL, &chpid);
 
        return 0;
 }
index 155a82bcb9e545e2888430337d0b1c2b50239acd..4a1ff5c2eb881355204ffe8e047cfbe3bb8e3706 100644 (file)
@@ -68,8 +68,13 @@ struct schib {
        __u8 mda[4];             /* model dependent area */
 } __attribute__ ((packed,aligned(4)));
 
+/*
+ * When rescheduled, todo's with higher values will overwrite those
+ * with lower values.
+ */
 enum sch_todo {
        SCH_TODO_NOTHING,
+       SCH_TODO_EVAL,
        SCH_TODO_UNREG,
 };
 
index 92d7324acb1c78fbab348a2ea190c8901351df9d..21908e67bf6745d8dc91f791347a9d9cee538aa1 100644 (file)
@@ -195,51 +195,6 @@ void css_sch_device_unregister(struct subchannel *sch)
 }
 EXPORT_SYMBOL_GPL(css_sch_device_unregister);
 
-static void css_sch_todo(struct work_struct *work)
-{
-       struct subchannel *sch;
-       enum sch_todo todo;
-
-       sch = container_of(work, struct subchannel, todo_work);
-       /* Find out todo. */
-       spin_lock_irq(sch->lock);
-       todo = sch->todo;
-       CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid,
-                     sch->schid.sch_no, todo);
-       sch->todo = SCH_TODO_NOTHING;
-       spin_unlock_irq(sch->lock);
-       /* Perform todo. */
-       if (todo == SCH_TODO_UNREG)
-               css_sch_device_unregister(sch);
-       /* Release workqueue ref. */
-       put_device(&sch->dev);
-}
-
-/**
- * css_sched_sch_todo - schedule a subchannel operation
- * @sch: subchannel
- * @todo: todo
- *
- * Schedule the operation identified by @todo to be performed on the slow path
- * workqueue. Do nothing if another operation with higher priority is already
- * scheduled. Needs to be called with subchannel lock held.
- */
-void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
-{
-       CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n",
-                     sch->schid.ssid, sch->schid.sch_no, todo);
-       if (sch->todo >= todo)
-               return;
-       /* Get workqueue ref. */
-       if (!get_device(&sch->dev))
-               return;
-       sch->todo = todo;
-       if (!queue_work(cio_work_q, &sch->todo_work)) {
-               /* Already queued, release workqueue ref. */
-               put_device(&sch->dev);
-       }
-}
-
 static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
 {
        int i;
@@ -466,6 +421,65 @@ static void css_evaluate_subchannel(struct subchannel_id schid, int slow)
                css_schedule_eval(schid);
 }
 
+/**
+ * css_sched_sch_todo - schedule a subchannel operation
+ * @sch: subchannel
+ * @todo: todo
+ *
+ * Schedule the operation identified by @todo to be performed on the slow path
+ * workqueue. Do nothing if another operation with higher priority is already
+ * scheduled. Needs to be called with subchannel lock held.
+ */
+void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
+{
+       CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n",
+                     sch->schid.ssid, sch->schid.sch_no, todo);
+       if (sch->todo >= todo)
+               return;
+       /* Get workqueue ref. */
+       if (!get_device(&sch->dev))
+               return;
+       sch->todo = todo;
+       if (!queue_work(cio_work_q, &sch->todo_work)) {
+               /* Already queued, release workqueue ref. */
+               put_device(&sch->dev);
+       }
+}
+
+static void css_sch_todo(struct work_struct *work)
+{
+       struct subchannel *sch;
+       enum sch_todo todo;
+       int ret;
+
+       sch = container_of(work, struct subchannel, todo_work);
+       /* Find out todo. */
+       spin_lock_irq(sch->lock);
+       todo = sch->todo;
+       CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid,
+                     sch->schid.sch_no, todo);
+       sch->todo = SCH_TODO_NOTHING;
+       spin_unlock_irq(sch->lock);
+       /* Perform todo. */
+       switch (todo) {
+       case SCH_TODO_NOTHING:
+               break;
+       case SCH_TODO_EVAL:
+               ret = css_evaluate_known_subchannel(sch, 1);
+               if (ret == -EAGAIN) {
+                       spin_lock_irq(sch->lock);
+                       css_sched_sch_todo(sch, todo);
+                       spin_unlock_irq(sch->lock);
+               }
+               break;
+       case SCH_TODO_UNREG:
+               css_sch_device_unregister(sch);
+               break;
+       }
+       /* Release workqueue ref. */
+       put_device(&sch->dev);
+}
+
 static struct idset *slow_subchannel_set;
 static spinlock_t slow_subchannel_lock;
 static wait_queue_head_t css_eval_wq;
index d734f4a0ecac23cea1d821b316a563b71087ccc6..47269858ecb662af862c38a5e96fc4f9aacfe2ca 100644 (file)
@@ -1868,9 +1868,9 @@ static void __ccw_device_pm_restore(struct ccw_device *cdev)
         */
        cdev->private->flags.resuming = 1;
        cdev->private->path_new_mask = LPM_ANYPATH;
-       css_schedule_eval(sch->schid);
+       css_sched_sch_todo(sch, SCH_TODO_EVAL);
        spin_unlock_irq(sch->lock);
-       css_complete_work();
+       css_wait_for_slow_path();
 
        /* cdev may have been moved to a different subchannel. */
        sch = to_subchannel(cdev->dev.parent);
index 52c233fa2b1281d14a2881618465606447bef365..1b853513c891ca2f010f8703b110f49c6f65afb3 100644 (file)
@@ -496,8 +496,26 @@ static void ccw_device_reset_path_events(struct ccw_device *cdev)
        cdev->private->pgid_reset_mask = 0;
 }
 
-void
-ccw_device_verify_done(struct ccw_device *cdev, int err)
+static void create_fake_irb(struct irb *irb, int type)
+{
+       memset(irb, 0, sizeof(*irb));
+       if (type == FAKE_CMD_IRB) {
+               struct cmd_scsw *scsw = &irb->scsw.cmd;
+               scsw->cc = 1;
+               scsw->fctl = SCSW_FCTL_START_FUNC;
+               scsw->actl = SCSW_ACTL_START_PEND;
+               scsw->stctl = SCSW_STCTL_STATUS_PEND;
+       } else if (type == FAKE_TM_IRB) {
+               struct tm_scsw *scsw = &irb->scsw.tm;
+               scsw->x = 1;
+               scsw->cc = 1;
+               scsw->fctl = SCSW_FCTL_START_FUNC;
+               scsw->actl = SCSW_ACTL_START_PEND;
+               scsw->stctl = SCSW_STCTL_STATUS_PEND;
+       }
+}
+
+void ccw_device_verify_done(struct ccw_device *cdev, int err)
 {
        struct subchannel *sch;
 
@@ -520,12 +538,8 @@ callback:
                ccw_device_done(cdev, DEV_STATE_ONLINE);
                /* Deliver fake irb to device driver, if needed. */
                if (cdev->private->flags.fake_irb) {
-                       memset(&cdev->private->irb, 0, sizeof(struct irb));
-                       cdev->private->irb.scsw.cmd.cc = 1;
-                       cdev->private->irb.scsw.cmd.fctl = SCSW_FCTL_START_FUNC;
-                       cdev->private->irb.scsw.cmd.actl = SCSW_ACTL_START_PEND;
-                       cdev->private->irb.scsw.cmd.stctl =
-                               SCSW_STCTL_STATUS_PEND;
+                       create_fake_irb(&cdev->private->irb,
+                                       cdev->private->flags.fake_irb);
                        cdev->private->flags.fake_irb = 0;
                        if (cdev->handler)
                                cdev->handler(cdev, cdev->private->intparm,
index f98698d5735e887e0fb6cc46f00a63012ecdccb5..ec7fb6d3b479a25a32bfad67ecc36a3539782b39 100644 (file)
@@ -198,7 +198,7 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
        if (cdev->private->state == DEV_STATE_VERIFY) {
                /* Remember to fake irb when finished. */
                if (!cdev->private->flags.fake_irb) {
-                       cdev->private->flags.fake_irb = 1;
+                       cdev->private->flags.fake_irb = FAKE_CMD_IRB;
                        cdev->private->intparm = intparm;
                        return 0;
                } else
@@ -213,9 +213,9 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
        ret = cio_set_options (sch, flags);
        if (ret)
                return ret;
-       /* Adjust requested path mask to excluded varied off paths. */
+       /* Adjust requested path mask to exclude unusable paths. */
        if (lpm) {
-               lpm &= sch->opm;
+               lpm &= sch->lpm;
                if (lpm == 0)
                        return -EACCES;
        }
@@ -605,11 +605,21 @@ int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw,
        sch = to_subchannel(cdev->dev.parent);
        if (!sch->schib.pmcw.ena)
                return -EINVAL;
+       if (cdev->private->state == DEV_STATE_VERIFY) {
+               /* Remember to fake irb when finished. */
+               if (!cdev->private->flags.fake_irb) {
+                       cdev->private->flags.fake_irb = FAKE_TM_IRB;
+                       cdev->private->intparm = intparm;
+                       return 0;
+               } else
+                       /* There's already a fake I/O around. */
+                       return -EBUSY;
+       }
        if (cdev->private->state != DEV_STATE_ONLINE)
                return -EIO;
-       /* Adjust requested path mask to excluded varied off paths. */
+       /* Adjust requested path mask to exclude unusable paths. */
        if (lpm) {
-               lpm &= sch->opm;
+               lpm &= sch->lpm;
                if (lpm == 0)
                        return -EACCES;
        }
index 2ebb492a5c17dcb8a9e05fb23209162edb43d849..76253dfcc1be86a18eba7ac7ea4d6f53c134bc7e 100644 (file)
@@ -111,6 +111,9 @@ enum cdev_todo {
        CDEV_TODO_UNREG_EVAL,
 };
 
+#define FAKE_CMD_IRB   1
+#define FAKE_TM_IRB    2
+
 struct ccw_device_private {
        struct ccw_device *cdev;
        struct subchannel *sch;
@@ -138,7 +141,7 @@ struct ccw_device_private {
                unsigned int doverify:1;    /* delayed path verification */
                unsigned int donotify:1;    /* call notify function */
                unsigned int recog_done:1;  /* dev. recog. complete */
-               unsigned int fake_irb:1;    /* deliver faked irb */
+               unsigned int fake_irb:2;    /* deliver faked irb */
                unsigned int resuming:1;    /* recognition while resume */
                unsigned int pgroup:1;      /* pathgroup is set up */
                unsigned int mpath:1;       /* multipathing is set up */
index ec94f049e99543849ed56c90c665102c40c5b87e..96bbe9d12a79fbef17fa9b4df331adf5a24a96c3 100644 (file)
@@ -1552,6 +1552,8 @@ static void ap_reset(struct ap_device *ap_dev)
        rc = ap_init_queue(ap_dev->qid);
        if (rc == -ENODEV)
                ap_dev->unregistered = 1;
+       else
+               __ap_schedule_poll_timer();
 }
 
 static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags)
index a1fd73df5416129c62d8889f87c1d003b0b6b47c..8ba4510a95195392da12a65ce641ea5f164f3c20 100644 (file)
@@ -199,7 +199,7 @@ config SPI_FSL_LIB
        depends on FSL_SOC
 
 config SPI_FSL_SPI
-       tristate "Freescale SPI controller"
+       bool "Freescale SPI controller"
        depends on FSL_SOC
        select SPI_FSL_LIB
        help
@@ -208,7 +208,7 @@ config SPI_FSL_SPI
          MPC8569 uses the controller in QE mode, MPC8610 in cpu mode.
 
 config SPI_FSL_ESPI
-       tristate "Freescale eSPI controller"
+       bool "Freescale eSPI controller"
        depends on FSL_SOC
        select SPI_FSL_LIB
        help
index 024b48aed5ca6bba6f32857965e8904fe012ca1b..acc88b4d28693da4d59d315b350f0e717837e607 100644 (file)
@@ -13,6 +13,7 @@
  */
 
 #include <linux/kernel.h>
+#include <linux/module.h>
 #include <linux/init.h>
 #include <linux/delay.h>
 #include <linux/spinlock.h>
index e093d3ec41ba26f1da89a100e3f8774511e65a21..0094c645ff0d4cc4562412ef020e6ba838966779 100644 (file)
@@ -256,7 +256,7 @@ static void spi_gpio_cleanup(struct spi_device *spi)
        spi_bitbang_cleanup(spi);
 }
 
-static int __init spi_gpio_alloc(unsigned pin, const char *label, bool is_in)
+static int __devinit spi_gpio_alloc(unsigned pin, const char *label, bool is_in)
 {
        int value;
 
@@ -270,7 +270,7 @@ static int __init spi_gpio_alloc(unsigned pin, const char *label, bool is_in)
        return value;
 }
 
-static int __init
+static int __devinit
 spi_gpio_request(struct spi_gpio_platform_data *pdata, const char *label,
        u16 *res_flags)
 {
index e763254741c296169b6d662d5f398c1a90170cc2..182e9c873822a64b6e9e25976470044eae8bc338 100644 (file)
@@ -8,6 +8,7 @@
  *
  */
 
+#include <linux/module.h>
 #include <linux/init.h>
 #include <linux/spinlock.h>
 #include <linux/workqueue.h>
@@ -426,7 +427,7 @@ static int __devinit nuc900_spi_probe(struct platform_device *pdev)
                goto err_clk;
        }
 
-       mfp_set_groupg(&pdev->dev);
+       mfp_set_groupg(&pdev->dev, NULL);
        nuc900_init_spi(hw);
 
        err = spi_bitbang_start(&hw->bitbang);
index 15877fb0a363d3c8c70fdfc5f7a8b3522cdc8635..9c8b33cc438e0085f607164b6da5a756c48955d7 100644 (file)
@@ -671,7 +671,7 @@ static int do_insnlist_ioctl(struct comedi_device *dev,
        }
 
        insns =
-           kmalloc(sizeof(struct comedi_insn) * insnlist.n_insns, GFP_KERNEL);
+           kcalloc(insnlist.n_insns, sizeof(struct comedi_insn), GFP_KERNEL);
        if (!insns) {
                DPRINTK("kmalloc failed\n");
                ret = -ENOMEM;
@@ -1432,7 +1432,21 @@ static int do_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
        return ret;
 }
 
-static void comedi_unmap(struct vm_area_struct *area)
+
+static void comedi_vm_open(struct vm_area_struct *area)
+{
+       struct comedi_async *async;
+       struct comedi_device *dev;
+
+       async = area->vm_private_data;
+       dev = async->subdevice->device;
+
+       mutex_lock(&dev->mutex);
+       async->mmap_count++;
+       mutex_unlock(&dev->mutex);
+}
+
+static void comedi_vm_close(struct vm_area_struct *area)
 {
        struct comedi_async *async;
        struct comedi_device *dev;
@@ -1446,15 +1460,13 @@ static void comedi_unmap(struct vm_area_struct *area)
 }
 
 static struct vm_operations_struct comedi_vm_ops = {
-       .close = comedi_unmap,
+       .open = comedi_vm_open,
+       .close = comedi_vm_close,
 };
 
 static int comedi_mmap(struct file *file, struct vm_area_struct *vma)
 {
        const unsigned minor = iminor(file->f_dentry->d_inode);
-       struct comedi_device_file_info *dev_file_info =
-           comedi_get_device_file_info(minor);
-       struct comedi_device *dev = dev_file_info->device;
        struct comedi_async *async = NULL;
        unsigned long start = vma->vm_start;
        unsigned long size;
@@ -1462,6 +1474,15 @@ static int comedi_mmap(struct file *file, struct vm_area_struct *vma)
        int i;
        int retval;
        struct comedi_subdevice *s;
+       struct comedi_device_file_info *dev_file_info;
+       struct comedi_device *dev;
+
+       dev_file_info = comedi_get_device_file_info(minor);
+       if (dev_file_info == NULL)
+               return -ENODEV;
+       dev = dev_file_info->device;
+       if (dev == NULL)
+               return -ENODEV;
 
        mutex_lock(&dev->mutex);
        if (!dev->attached) {
@@ -1528,11 +1549,17 @@ static unsigned int comedi_poll(struct file *file, poll_table * wait)
 {
        unsigned int mask = 0;
        const unsigned minor = iminor(file->f_dentry->d_inode);
-       struct comedi_device_file_info *dev_file_info =
-           comedi_get_device_file_info(minor);
-       struct comedi_device *dev = dev_file_info->device;
        struct comedi_subdevice *read_subdev;
        struct comedi_subdevice *write_subdev;
+       struct comedi_device_file_info *dev_file_info;
+       struct comedi_device *dev;
+       dev_file_info = comedi_get_device_file_info(minor);
+
+       if (dev_file_info == NULL)
+               return -ENODEV;
+       dev = dev_file_info->device;
+       if (dev == NULL)
+               return -ENODEV;
 
        mutex_lock(&dev->mutex);
        if (!dev->attached) {
@@ -1578,9 +1605,15 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
        int n, m, count = 0, retval = 0;
        DECLARE_WAITQUEUE(wait, current);
        const unsigned minor = iminor(file->f_dentry->d_inode);
-       struct comedi_device_file_info *dev_file_info =
-           comedi_get_device_file_info(minor);
-       struct comedi_device *dev = dev_file_info->device;
+       struct comedi_device_file_info *dev_file_info;
+       struct comedi_device *dev;
+       dev_file_info = comedi_get_device_file_info(minor);
+
+       if (dev_file_info == NULL)
+               return -ENODEV;
+       dev = dev_file_info->device;
+       if (dev == NULL)
+               return -ENODEV;
 
        if (!dev->attached) {
                DPRINTK("no driver configured on comedi%i\n", dev->minor);
@@ -1640,11 +1673,11 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
                                retval = -EAGAIN;
                                break;
                        }
+                       schedule();
                        if (signal_pending(current)) {
                                retval = -ERESTARTSYS;
                                break;
                        }
-                       schedule();
                        if (!s->busy)
                                break;
                        if (s->busy != file) {
@@ -1683,9 +1716,15 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
        int n, m, count = 0, retval = 0;
        DECLARE_WAITQUEUE(wait, current);
        const unsigned minor = iminor(file->f_dentry->d_inode);
-       struct comedi_device_file_info *dev_file_info =
-           comedi_get_device_file_info(minor);
-       struct comedi_device *dev = dev_file_info->device;
+       struct comedi_device_file_info *dev_file_info;
+       struct comedi_device *dev;
+       dev_file_info = comedi_get_device_file_info(minor);
+
+       if (dev_file_info == NULL)
+               return -ENODEV;
+       dev = dev_file_info->device;
+       if (dev == NULL)
+               return -ENODEV;
 
        if (!dev->attached) {
                DPRINTK("no driver configured on comedi%i\n", dev->minor);
@@ -1741,11 +1780,11 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
                                retval = -EAGAIN;
                                break;
                        }
+                       schedule();
                        if (signal_pending(current)) {
                                retval = -ERESTARTSYS;
                                break;
                        }
-                       schedule();
                        if (!s->busy) {
                                retval = 0;
                                break;
@@ -1885,11 +1924,17 @@ ok:
 static int comedi_close(struct inode *inode, struct file *file)
 {
        const unsigned minor = iminor(inode);
-       struct comedi_device_file_info *dev_file_info =
-           comedi_get_device_file_info(minor);
-       struct comedi_device *dev = dev_file_info->device;
        struct comedi_subdevice *s = NULL;
        int i;
+       struct comedi_device_file_info *dev_file_info;
+       struct comedi_device *dev;
+       dev_file_info = comedi_get_device_file_info(minor);
+
+       if (dev_file_info == NULL)
+               return -ENODEV;
+       dev = dev_file_info->device;
+       if (dev == NULL)
+               return -ENODEV;
 
        mutex_lock(&dev->mutex);
 
@@ -1923,10 +1968,15 @@ static int comedi_close(struct inode *inode, struct file *file)
 static int comedi_fasync(int fd, struct file *file, int on)
 {
        const unsigned minor = iminor(file->f_dentry->d_inode);
-       struct comedi_device_file_info *dev_file_info =
-           comedi_get_device_file_info(minor);
+       struct comedi_device_file_info *dev_file_info;
+       struct comedi_device *dev;
+       dev_file_info = comedi_get_device_file_info(minor);
 
-       struct comedi_device *dev = dev_file_info->device;
+       if (dev_file_info == NULL)
+               return -ENODEV;
+       dev = dev_file_info->device;
+       if (dev == NULL)
+               return -ENODEV;
 
        return fasync_helper(fd, file, on, &dev->async_queue);
 }
index 4500b80eee6bf8d78f30ce9da829f22669e9e0fb..ca6bcf8b02315a9295c1d08deff1fc4efadcfe19 100644 (file)
@@ -1,4 +1,4 @@
-#define DRIVER_VERSION "v0.5"
+#define DRIVER_VERSION "v0.6"
 #define DRIVER_AUTHOR "Bernd Porr, BerndPorr@f2s.com"
 #define DRIVER_DESC "Stirling/ITL USB-DUX SIGMA -- Bernd.Porr@f2s.com"
 /*
@@ -25,7 +25,7 @@ Driver: usbduxsigma
 Description: University of Stirling USB DAQ & INCITE Technology Limited
 Devices: [ITL] USB-DUX (usbduxsigma.o)
 Author: Bernd Porr <BerndPorr@f2s.com>
-Updated: 21 Jul 2011
+Updated: 8 Nov 2011
 Status: testing
 */
 /*
@@ -44,6 +44,7 @@ Status: testing
  *   0.3: proper vendor ID and driver name
  *   0.4: fixed D/A voltage range
  *   0.5: various bug fixes, health check at startup
+ *   0.6: corrected wrong input range
  */
 
 /* generates loads of debug info */
@@ -175,7 +176,7 @@ Status: testing
 /* comedi constants */
 static const struct comedi_lrange range_usbdux_ai_range = { 1, {
                                                                BIP_RANGE
-                                                               (2.65)
+                                                               (2.65/2.0)
                                                                }
 };
 
index dbd1ce1ce7e9df69ec81d2a977953cb0345088ca..2eef85fa38c7957d7fb2458ce9e61251eac9fb82 100644 (file)
@@ -256,25 +256,24 @@ static const struct file_operations iio_event_chrdev_fileops = {
 
 static int iio_event_getfd(struct iio_dev *indio_dev)
 {
+       struct iio_event_interface *ev_int = indio_dev->event_interface;
        int fd;
 
-       if (indio_dev->event_interface == NULL)
+       if (ev_int == NULL)
                return -ENODEV;
 
-       mutex_lock(&indio_dev->event_interface->event_list_lock);
-       if (test_and_set_bit(IIO_BUSY_BIT_POS,
-                            &indio_dev->event_interface->flags)) {
-               mutex_unlock(&indio_dev->event_interface->event_list_lock);
+       mutex_lock(&ev_int->event_list_lock);
+       if (test_and_set_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
+               mutex_unlock(&ev_int->event_list_lock);
                return -EBUSY;
        }
-       mutex_unlock(&indio_dev->event_interface->event_list_lock);
+       mutex_unlock(&ev_int->event_list_lock);
        fd = anon_inode_getfd("iio:event",
-                               &iio_event_chrdev_fileops,
-                               indio_dev->event_interface, O_RDONLY);
+                               &iio_event_chrdev_fileops, ev_int, O_RDONLY);
        if (fd < 0) {
-               mutex_lock(&indio_dev->event_interface->event_list_lock);
-               clear_bit(IIO_BUSY_BIT_POS, &indio_dev->event_interface->flags);
-               mutex_unlock(&indio_dev->event_interface->event_list_lock);
+               mutex_lock(&ev_int->event_list_lock);
+               clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
+               mutex_unlock(&ev_int->event_list_lock);
        }
        return fd;
 }
index 480b0ed2e4de8975ea315504190c5eff02e5ac59..115635f9502456d7e8e3f16470b7f4a269b23b5b 100644 (file)
@@ -1021,6 +1021,7 @@ static int __devinit rtsx_probe(struct pci_dev *pci,
        th = kthread_create(rtsx_scan_thread, dev, "rtsx-scan");
        if (IS_ERR(th)) {
                printk(KERN_ERR "Unable to start the device-scanning thread\n");
+               complete(&dev->scanning_done);
                quiesce_and_remove_host(dev);
                err = PTR_ERR(th);
                goto errout;
index 09c44abb89e8239c322bacb1671418dcd4b32e0a..3872b8cccdcf715067f273e091f3fd2cf419d6ac 100644 (file)
@@ -68,6 +68,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
 {
        struct usbip_device *ud = &vdev->ud;
        struct urb *urb;
+       unsigned long flags;
 
        spin_lock(&vdev->priv_lock);
        urb = pickup_urb_and_free_priv(vdev, pdu->base.seqnum);
@@ -101,9 +102,9 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
 
        usbip_dbg_vhci_rx("now giveback urb %p\n", urb);
 
-       spin_lock(&the_controller->lock);
+       spin_lock_irqsave(&the_controller->lock, flags);
        usb_hcd_unlink_urb_from_ep(vhci_to_hcd(the_controller), urb);
-       spin_unlock(&the_controller->lock);
+       spin_unlock_irqrestore(&the_controller->lock, flags);
 
        usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb, urb->status);
 
@@ -141,6 +142,7 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
 {
        struct vhci_unlink *unlink;
        struct urb *urb;
+       unsigned long flags;
 
        usbip_dump_header(pdu);
 
@@ -170,9 +172,9 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
                urb->status = pdu->u.ret_unlink.status;
                pr_info("urb->status %d\n", urb->status);
 
-               spin_lock(&the_controller->lock);
+               spin_lock_irqsave(&the_controller->lock, flags);
                usb_hcd_unlink_urb_from_ep(vhci_to_hcd(the_controller), urb);
-               spin_unlock(&the_controller->lock);
+               spin_unlock_irqrestore(&the_controller->lock, flags);
 
                usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb,
                                     urb->status);
index 0fd96c10271d8364c9b6e69de8dc80ecba567501..8599545cdf9e6d6c90a07510844f6290ab0a1be6 100644 (file)
@@ -614,13 +614,12 @@ int iscsit_add_reject(
        hdr     = (struct iscsi_reject *) cmd->pdu;
        hdr->reason = reason;
 
-       cmd->buf_ptr = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL);
+       cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL);
        if (!cmd->buf_ptr) {
                pr_err("Unable to allocate memory for cmd->buf_ptr\n");
                iscsit_release_cmd(cmd);
                return -1;
        }
-       memcpy(cmd->buf_ptr, buf, ISCSI_HDR_LEN);
 
        spin_lock_bh(&conn->cmd_lock);
        list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
@@ -661,13 +660,12 @@ int iscsit_add_reject_from_cmd(
        hdr     = (struct iscsi_reject *) cmd->pdu;
        hdr->reason = reason;
 
-       cmd->buf_ptr = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL);
+       cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL);
        if (!cmd->buf_ptr) {
                pr_err("Unable to allocate memory for cmd->buf_ptr\n");
                iscsit_release_cmd(cmd);
                return -1;
        }
-       memcpy(cmd->buf_ptr, buf, ISCSI_HDR_LEN);
 
        if (add_to_conn) {
                spin_lock_bh(&conn->cmd_lock);
@@ -1017,11 +1015,6 @@ done:
                                " non-existent or non-exported iSCSI LUN:"
                                " 0x%016Lx\n", get_unaligned_le64(&hdr->lun));
                }
-               if (ret == PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES)
-                       return iscsit_add_reject_from_cmd(
-                                       ISCSI_REASON_BOOKMARK_NO_RESOURCES,
-                                       1, 1, buf, cmd);
-
                send_check_condition = 1;
                goto attach_cmd;
        }
@@ -1044,6 +1037,8 @@ done:
                 */
                send_check_condition = 1;
        } else {
+               cmd->data_length = cmd->se_cmd.data_length;
+
                if (iscsit_decide_list_to_build(cmd, payload_length) < 0)
                        return iscsit_add_reject_from_cmd(
                                ISCSI_REASON_BOOKMARK_NO_RESOURCES,
@@ -1123,7 +1118,7 @@ attach_cmd:
         * the backend memory allocation.
         */
        ret = transport_generic_new_cmd(&cmd->se_cmd);
-       if ((ret < 0) || (cmd->se_cmd.se_cmd_flags & SCF_SE_CMD_FAILED)) {
+       if (ret < 0) {
                immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
                dump_immediate_data = 1;
                goto after_immediate_data;
@@ -1341,7 +1336,7 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
 
                spin_lock_irqsave(&se_cmd->t_state_lock, flags);
                if (!(se_cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) ||
-                    (se_cmd->se_cmd_flags & SCF_SE_CMD_FAILED))
+                    (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION))
                        dump_unsolicited_data = 1;
                spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
 
@@ -2513,10 +2508,10 @@ static int iscsit_send_data_in(
        if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
                if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) {
                        hdr->flags |= ISCSI_FLAG_DATA_OVERFLOW;
-                       hdr->residual_count = cpu_to_be32(cmd->residual_count);
+                       hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
                } else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) {
                        hdr->flags |= ISCSI_FLAG_DATA_UNDERFLOW;
-                       hdr->residual_count = cpu_to_be32(cmd->residual_count);
+                       hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
                }
        }
        hton24(hdr->dlength, datain.length);
@@ -3018,10 +3013,10 @@ static int iscsit_send_status(
        hdr->flags              |= ISCSI_FLAG_CMD_FINAL;
        if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) {
                hdr->flags |= ISCSI_FLAG_CMD_OVERFLOW;
-               hdr->residual_count = cpu_to_be32(cmd->residual_count);
+               hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
        } else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) {
                hdr->flags |= ISCSI_FLAG_CMD_UNDERFLOW;
-               hdr->residual_count = cpu_to_be32(cmd->residual_count);
+               hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
        }
        hdr->response           = cmd->iscsi_response;
        hdr->cmd_status         = cmd->se_cmd.scsi_status;
@@ -3133,6 +3128,7 @@ static int iscsit_send_task_mgt_rsp(
        hdr                     = (struct iscsi_tm_rsp *) cmd->pdu;
        memset(hdr, 0, ISCSI_HDR_LEN);
        hdr->opcode             = ISCSI_OP_SCSI_TMFUNC_RSP;
+       hdr->flags              = ISCSI_FLAG_CMD_FINAL;
        hdr->response           = iscsit_convert_tcm_tmr_rsp(se_tmr);
        hdr->itt                = cpu_to_be32(cmd->init_task_tag);
        cmd->stat_sn            = conn->stat_sn++;
index beb39469e7f1e4f91c971f6da468c742a338699d..1cd6ce373b83508fd396f82b80290b91372c9e96 100644 (file)
 
 static int chap_string_to_hex(unsigned char *dst, unsigned char *src, int len)
 {
-       int j = DIV_ROUND_UP(len, 2);
+       int j = DIV_ROUND_UP(len, 2), rc;
 
-       hex2bin(dst, src, j);
+       rc = hex2bin(dst, src, j);
+       if (rc < 0)
+               pr_debug("CHAP string contains non hex digit symbols\n");
 
        dst[j] = '\0';
        return j;
index 3723d90d5ae573db84b658fbd70bf2cd7a82bafa..f1a02dad05a02855b4ef59a6341e4bb61660ef30 100644 (file)
@@ -398,7 +398,6 @@ struct iscsi_cmd {
        u32                     pdu_send_order;
        /* Current struct iscsi_pdu in struct iscsi_cmd->pdu_list */
        u32                     pdu_start;
-       u32                     residual_count;
        /* Next struct iscsi_seq to send in struct iscsi_cmd->seq_list */
        u32                     seq_send_order;
        /* Number of struct iscsi_seq in struct iscsi_cmd->seq_list */
@@ -535,7 +534,6 @@ struct iscsi_conn {
        atomic_t                connection_exit;
        atomic_t                connection_recovery;
        atomic_t                connection_reinstatement;
-       atomic_t                connection_wait;
        atomic_t                connection_wait_rcfr;
        atomic_t                sleep_on_conn_wait_comp;
        atomic_t                transport_failed;
@@ -643,7 +641,6 @@ struct iscsi_session {
        atomic_t                session_reinstatement;
        atomic_t                session_stop_active;
        atomic_t                sleep_on_sess_wait_comp;
-       atomic_t                transport_wait_cmds;
        /* connection list */
        struct list_head        sess_conn_list;
        struct list_head        cr_active_list;
index c4c68da3e5004b3fa39eeb71829bbefab4e38632..101b1beb3bca205aed7611ec4424f54cc5b20671 100644 (file)
@@ -938,8 +938,7 @@ int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo)
                 * handle the SCF_SCSI_RESERVATION_CONFLICT case here as well.
                 */
                if (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION) {
-                       if (se_cmd->se_cmd_flags &
-                                       SCF_SCSI_RESERVATION_CONFLICT) {
+                       if (se_cmd->scsi_sense_reason == TCM_RESERVATION_CONFLICT) {
                                cmd->i_state = ISTATE_SEND_STATUS;
                                spin_unlock_bh(&cmd->istate_lock);
                                iscsit_add_cmd_to_response_queue(cmd, cmd->conn,
index daad362a93cecebeca5c9bcce26233da3b202df1..d734bdec24f9cf2b451a7f905b07fab421a19b3b 100644 (file)
@@ -224,7 +224,7 @@ static int iscsi_login_zero_tsih_s1(
                iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
                                ISCSI_LOGIN_STATUS_NO_RESOURCES);
                pr_err("Could not allocate memory for session\n");
-               return -1;
+               return -ENOMEM;
        }
 
        iscsi_login_set_conn_values(sess, conn, pdu->cid);
@@ -250,7 +250,8 @@ static int iscsi_login_zero_tsih_s1(
                pr_err("idr_pre_get() for sess_idr failed\n");
                iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
                                ISCSI_LOGIN_STATUS_NO_RESOURCES);
-               return -1;
+               kfree(sess);
+               return -ENOMEM;
        }
        spin_lock(&sess_idr_lock);
        idr_get_new(&sess_idr, NULL, &sess->session_index);
@@ -270,14 +271,16 @@ static int iscsi_login_zero_tsih_s1(
                                ISCSI_LOGIN_STATUS_NO_RESOURCES);
                pr_err("Unable to allocate memory for"
                                " struct iscsi_sess_ops.\n");
-               return -1;
+               kfree(sess);
+               return -ENOMEM;
        }
 
        sess->se_sess = transport_init_session();
-       if (!sess->se_sess) {
+       if (IS_ERR(sess->se_sess)) {
                iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
                                ISCSI_LOGIN_STATUS_NO_RESOURCES);
-               return -1;
+               kfree(sess);
+               return -ENOMEM;
        }
 
        return 0;
index 426cd4bf6a9aab344425723cd51766aa932cc536..98936cb7c2947ceb0edbaa41dda91d6641a9b892 100644 (file)
@@ -981,14 +981,13 @@ struct iscsi_login *iscsi_target_init_negotiation(
                return NULL;
        }
 
-       login->req = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL);
+       login->req = kmemdup(login_pdu, ISCSI_HDR_LEN, GFP_KERNEL);
        if (!login->req) {
                pr_err("Unable to allocate memory for Login Request.\n");
                iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
                                ISCSI_LOGIN_STATUS_NO_RESOURCES);
                goto out;
        }
-       memcpy(login->req, login_pdu, ISCSI_HDR_LEN);
 
        login->req_buf = kzalloc(MAX_KEY_VALUE_PAIRS, GFP_KERNEL);
        if (!login->req_buf) {
index 3df1c9b8ae6b7e07118575ebbf24a59124644b01..81d5832fbbd537e7bbffe2c21b1792c1e7a2acde 100644 (file)
@@ -113,11 +113,9 @@ static struct se_cmd *tcm_loop_allocate_core_cmd(
                        scsi_bufflen(sc), sc->sc_data_direction, sam_task_attr,
                        &tl_cmd->tl_sense_buf[0]);
 
-       /*
-        * Signal BIDI usage with T_TASK(cmd)->t_tasks_bidi
-        */
        if (scsi_bidi_cmnd(sc))
-               se_cmd->t_tasks_bidi = 1;
+               se_cmd->se_cmd_flags |= SCF_BIDI;
+
        /*
         * Locate the struct se_lun pointer and attach it to struct se_cmd
         */
@@ -148,27 +146,13 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd)
         * Allocate the necessary tasks to complete the received CDB+data
         */
        ret = transport_generic_allocate_tasks(se_cmd, sc->cmnd);
-       if (ret == -ENOMEM) {
-               /* Out of Resources */
-               return PYX_TRANSPORT_LU_COMM_FAILURE;
-       } else if (ret == -EINVAL) {
-               /*
-                * Handle case for SAM_STAT_RESERVATION_CONFLICT
-                */
-               if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT)
-                       return PYX_TRANSPORT_RESERVATION_CONFLICT;
-               /*
-                * Otherwise, return SAM_STAT_CHECK_CONDITION and return
-                * sense data.
-                */
-               return PYX_TRANSPORT_USE_SENSE_REASON;
-       }
-
+       if (ret != 0)
+               return ret;
        /*
         * For BIDI commands, pass in the extra READ buffer
         * to transport_generic_map_mem_to_cmd() below..
         */
-       if (se_cmd->t_tasks_bidi) {
+       if (se_cmd->se_cmd_flags & SCF_BIDI) {
                struct scsi_data_buffer *sdb = scsi_in(sc);
 
                sgl_bidi = sdb->table.sgl;
@@ -194,12 +178,8 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd)
        }
 
        /* Tell the core about our preallocated memory */
-       ret = transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc),
+       return transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc),
                        scsi_sg_count(sc), sgl_bidi, sgl_bidi_count);
-       if (ret < 0)
-               return PYX_TRANSPORT_LU_COMM_FAILURE;
-
-       return 0;
 }
 
 /*
@@ -1360,17 +1340,16 @@ void tcm_loop_drop_scsi_hba(
 {
        struct tcm_loop_hba *tl_hba = container_of(wwn,
                                struct tcm_loop_hba, tl_hba_wwn);
-       int host_no = tl_hba->sh->host_no;
+
+       pr_debug("TCM_Loop_ConfigFS: Deallocating emulated Target"
+               " SAS Address: %s at Linux/SCSI Host ID: %d\n",
+               tl_hba->tl_wwn_address, tl_hba->sh->host_no);
        /*
         * Call device_unregister() on the original tl_hba->dev.
         * tcm_loop_fabric_scsi.c:tcm_loop_release_adapter() will
         * release *tl_hba;
         */
        device_unregister(&tl_hba->dev);
-
-       pr_debug("TCM_Loop_ConfigFS: Deallocated emulated Target"
-               " SAS Address: %s at Linux/SCSI Host ID: %d\n",
-               config_item_name(&wwn->wwn_group.cg_item), host_no);
 }
 
 /* Start items for tcm_loop_cit */
index 88f2ad43ec8b589922e34de340dac32325080f13..1dcbef499d6a09f2add951dee10c955f3541ccb8 100644 (file)
@@ -191,9 +191,10 @@ int target_emulate_set_target_port_groups(struct se_task *task)
        int alua_access_state, primary = 0, rc;
        u16 tg_pt_id, rtpi;
 
-       if (!l_port)
-               return PYX_TRANSPORT_LU_COMM_FAILURE;
-
+       if (!l_port) {
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               return -EINVAL;
+       }
        buf = transport_kmap_first_data_page(cmd);
 
        /*
@@ -203,7 +204,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
        l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem;
        if (!l_tg_pt_gp_mem) {
                pr_err("Unable to access l_port->sep_alua_tg_pt_gp_mem\n");
-               rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+               cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+               rc = -EINVAL;
                goto out;
        }
        spin_lock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
@@ -211,7 +213,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
        if (!l_tg_pt_gp) {
                spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
                pr_err("Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n");
-               rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+               cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+               rc = -EINVAL;
                goto out;
        }
        rc = (l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA);
@@ -220,7 +223,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
        if (!rc) {
                pr_debug("Unable to process SET_TARGET_PORT_GROUPS"
                                " while TPGS_EXPLICT_ALUA is disabled\n");
-               rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+               cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+               rc = -EINVAL;
                goto out;
        }
 
@@ -245,7 +249,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
                         * REQUEST, and the additional sense code set to INVALID
                         * FIELD IN PARAMETER LIST.
                         */
-                       rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+                       cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+                       rc = -EINVAL;
                        goto out;
                }
                rc = -1;
@@ -298,7 +303,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
                         * throw an exception with ASCQ: INVALID_PARAMETER_LIST
                         */
                        if (rc != 0) {
-                               rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+                               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+                               rc = -EINVAL;
                                goto out;
                        }
                } else {
@@ -335,7 +341,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
                         * INVALID_PARAMETER_LIST
                         */
                        if (rc != 0) {
-                               rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+                               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+                               rc = -EINVAL;
                                goto out;
                        }
                }
@@ -1184,7 +1191,6 @@ void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp)
         * struct t10_alua_lu_gp.
         */
        spin_lock(&lu_gps_lock);
-       atomic_set(&lu_gp->lu_gp_shutdown, 1);
        list_del(&lu_gp->lu_gp_node);
        alua_lu_gps_count--;
        spin_unlock(&lu_gps_lock);
@@ -1438,7 +1444,6 @@ struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem(
 
        tg_pt_gp_mem->tg_pt = port;
        port->sep_alua_tg_pt_gp_mem = tg_pt_gp_mem;
-       atomic_set(&port->sep_tg_pt_gp_active, 1);
 
        return tg_pt_gp_mem;
 }
index 683ba02b8247feddd92777fc46824196e5a60180..831468b3163d777f3eb5c982fc05819d37dea3e6 100644 (file)
@@ -478,7 +478,7 @@ target_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
        if (cmd->data_length < 60)
                return 0;
 
-       buf[2] = 0x3c;
+       buf[3] = 0x3c;
        /* Set HEADSUP, ORDSUP, SIMPSUP */
        buf[5] = 0x07;
 
@@ -703,6 +703,7 @@ int target_emulate_inquiry(struct se_task *task)
        if (cmd->data_length < 4) {
                pr_err("SCSI Inquiry payload length: %u"
                        " too small for EVPD=1\n", cmd->data_length);
+               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
                return -EINVAL;
        }
 
@@ -719,6 +720,7 @@ int target_emulate_inquiry(struct se_task *task)
        }
 
        pr_err("Unknown VPD Code: 0x%02x\n", cdb[2]);
+       cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
        ret = -EINVAL;
 
 out_unmap:
@@ -969,7 +971,8 @@ int target_emulate_modesense(struct se_task *task)
        default:
                pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n",
                       cdb[2] & 0x3f, cdb[3]);
-               return PYX_TRANSPORT_UNKNOWN_MODE_PAGE;
+               cmd->scsi_sense_reason = TCM_UNKNOWN_MODE_PAGE;
+               return -EINVAL;
        }
        offset += length;
 
@@ -1027,7 +1030,8 @@ int target_emulate_request_sense(struct se_task *task)
        if (cdb[1] & 0x01) {
                pr_err("REQUEST_SENSE description emulation not"
                        " supported\n");
-               return PYX_TRANSPORT_INVALID_CDB_FIELD;
+               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+               return -ENOSYS;
        }
 
        buf = transport_kmap_first_data_page(cmd);
@@ -1100,7 +1104,8 @@ int target_emulate_unmap(struct se_task *task)
        if (!dev->transport->do_discard) {
                pr_err("UNMAP emulation not supported for: %s\n",
                                dev->transport->name);
-               return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+               cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+               return -ENOSYS;
        }
 
        /* First UNMAP block descriptor starts at 8 byte offset */
@@ -1157,7 +1162,8 @@ int target_emulate_write_same(struct se_task *task)
        if (!dev->transport->do_discard) {
                pr_err("WRITE_SAME emulation not supported"
                                " for: %s\n", dev->transport->name);
-               return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+               cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+               return -ENOSYS;
        }
 
        if (cmd->t_task_cdb[0] == WRITE_SAME)
@@ -1193,11 +1199,13 @@ int target_emulate_write_same(struct se_task *task)
 int target_emulate_synchronize_cache(struct se_task *task)
 {
        struct se_device *dev = task->task_se_cmd->se_dev;
+       struct se_cmd *cmd = task->task_se_cmd;
 
        if (!dev->transport->do_sync_cache) {
                pr_err("SYNCHRONIZE_CACHE emulation not supported"
                        " for: %s\n", dev->transport->name);
-               return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+               cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+               return -ENOSYS;
        }
 
        dev->transport->do_sync_cache(task);
index e0c1e8a8dd4e2140e13d7adedb4c84d414b45725..93d4f6a1b7980c597c119ae7f7f0506d57846993 100644 (file)
@@ -67,9 +67,6 @@ static struct config_group target_core_hbagroup;
 static struct config_group alua_group;
 static struct config_group alua_lu_gps_group;
 
-static DEFINE_SPINLOCK(se_device_lock);
-static LIST_HEAD(se_dev_list);
-
 static inline struct se_hba *
 item_to_hba(struct config_item *item)
 {
@@ -2741,7 +2738,6 @@ static struct config_group *target_core_make_subdev(
                                " struct se_subsystem_dev\n");
                goto unlock;
        }
-       INIT_LIST_HEAD(&se_dev->se_dev_node);
        INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
        spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
        INIT_LIST_HEAD(&se_dev->t10_pr.registration_list);
@@ -2777,9 +2773,6 @@ static struct config_group *target_core_make_subdev(
                        " from allocate_virtdevice()\n");
                goto out;
        }
-       spin_lock(&se_device_lock);
-       list_add_tail(&se_dev->se_dev_node, &se_dev_list);
-       spin_unlock(&se_device_lock);
 
        config_group_init_type_name(&se_dev->se_dev_group, name,
                        &target_core_dev_cit);
@@ -2874,10 +2867,6 @@ static void target_core_drop_subdev(
        mutex_lock(&hba->hba_access_mutex);
        t = hba->transport;
 
-       spin_lock(&se_device_lock);
-       list_del(&se_dev->se_dev_node);
-       spin_unlock(&se_device_lock);
-
        dev_stat_grp = &se_dev->dev_stat_grps.stat_group;
        for (i = 0; dev_stat_grp->default_groups[i]; i++) {
                df_item = &dev_stat_grp->default_groups[i]->cg_item;
index ba5edec2c5f858edaa011a463fae7bb31ad2a008..9b8639425472d8322aab749c6ae03fb56ea2a377 100644 (file)
@@ -104,7 +104,6 @@ int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
                se_cmd->se_lun = deve->se_lun;
                se_cmd->pr_res_key = deve->pr_res_key;
                se_cmd->orig_fe_lun = unpacked_lun;
-               se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev;
                se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
        }
        spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
@@ -137,7 +136,6 @@ int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
                se_lun = &se_sess->se_tpg->tpg_virt_lun0;
                se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
                se_cmd->orig_fe_lun = 0;
-               se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev;
                se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
        }
        /*
@@ -200,7 +198,6 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
                se_lun = deve->se_lun;
                se_cmd->pr_res_key = deve->pr_res_key;
                se_cmd->orig_fe_lun = unpacked_lun;
-               se_cmd->se_orig_obj_ptr = se_cmd->se_dev;
        }
        spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
 
@@ -708,7 +705,7 @@ done:
 
        se_task->task_scsi_status = GOOD;
        transport_complete_task(se_task, 1);
-       return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+       return 0;
 }
 
 /*     se_release_device_for_hba():
@@ -957,8 +954,12 @@ int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
                return -EINVAL;
        }
 
-       pr_err("dpo_emulated not supported\n");
-       return -EINVAL;
+       if (flag) {
+               pr_err("dpo_emulated not supported\n");
+               return -EINVAL;
+       }
+
+       return 0;
 }
 
 int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
@@ -968,7 +969,7 @@ int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
                return -EINVAL;
        }
 
-       if (dev->transport->fua_write_emulated == 0) {
+       if (flag && dev->transport->fua_write_emulated == 0) {
                pr_err("fua_write_emulated not supported\n");
                return -EINVAL;
        }
@@ -985,8 +986,12 @@ int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
                return -EINVAL;
        }
 
-       pr_err("ua read emulated not supported\n");
-       return -EINVAL;
+       if (flag) {
+               pr_err("ua read emulated not supported\n");
+               return -EINVAL;
+       }
+
+       return 0;
 }
 
 int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
@@ -995,7 +1000,7 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
                pr_err("Illegal value %d\n", flag);
                return -EINVAL;
        }
-       if (dev->transport->write_cache_emulated == 0) {
+       if (flag && dev->transport->write_cache_emulated == 0) {
                pr_err("write_cache_emulated not supported\n");
                return -EINVAL;
        }
@@ -1056,7 +1061,7 @@ int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
         * We expect this value to be non-zero when generic Block Layer
         * Discard supported is detected iblock_create_virtdevice().
         */
-       if (!dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
+       if (flag && !dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
                pr_err("Generic Block Discard not supported\n");
                return -ENOSYS;
        }
@@ -1077,7 +1082,7 @@ int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
         * We expect this value to be non-zero when generic Block Layer
         * Discard supported is detected iblock_create_virtdevice().
         */
-       if (!dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
+       if (flag && !dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
                pr_err("Generic Block Discard not supported\n");
                return -ENOSYS;
        }
@@ -1587,7 +1592,6 @@ int core_dev_setup_virtual_lun0(void)
                ret = -ENOMEM;
                goto out;
        }
-       INIT_LIST_HEAD(&se_dev->se_dev_node);
        INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
        spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
        INIT_LIST_HEAD(&se_dev->t10_pr.registration_list);
index 67cd6fe05bfa7c751596752da1ff0dd00035908f..b4864fba4ef0d511758916a8debac60ee9f43674 100644 (file)
@@ -289,9 +289,9 @@ static int fd_do_readv(struct se_task *task)
                return -ENOMEM;
        }
 
-       for (i = 0; i < task->task_sg_nents; i++) {
-               iov[i].iov_len = sg[i].length;
-               iov[i].iov_base = sg_virt(&sg[i]);
+       for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
+               iov[i].iov_len = sg->length;
+               iov[i].iov_base = sg_virt(sg);
        }
 
        old_fs = get_fs();
@@ -342,9 +342,9 @@ static int fd_do_writev(struct se_task *task)
                return -ENOMEM;
        }
 
-       for (i = 0; i < task->task_sg_nents; i++) {
-               iov[i].iov_len = sg[i].length;
-               iov[i].iov_base = sg_virt(&sg[i]);
+       for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
+               iov[i].iov_len = sg->length;
+               iov[i].iov_base = sg_virt(sg);
        }
 
        old_fs = get_fs();
@@ -438,7 +438,7 @@ static int fd_do_task(struct se_task *task)
                if (ret > 0 &&
                    dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0 &&
                    dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
-                   cmd->t_tasks_fua) {
+                   (cmd->se_cmd_flags & SCF_FUA)) {
                        /*
                         * We might need to be a bit smarter here
                         * and return some sense data to let the initiator
@@ -449,13 +449,15 @@ static int fd_do_task(struct se_task *task)
 
        }
 
-       if (ret < 0)
+       if (ret < 0) {
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
                return ret;
+       }
        if (ret) {
                task->task_scsi_status = GOOD;
                transport_complete_task(task, 1);
        }
-       return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+       return 0;
 }
 
 /*     fd_free_task(): (Part of se_subsystem_api_t template)
index 7698efe29262bfd8a7cb521016da0aff1ff0517d..4aa9922044382628fc21e12d8df3ab67840c23ae 100644 (file)
@@ -531,7 +531,7 @@ static int iblock_do_task(struct se_task *task)
                 */
                if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 ||
                    (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
-                    task->task_se_cmd->t_tasks_fua))
+                    (cmd->se_cmd_flags & SCF_FUA)))
                        rw = WRITE_FUA;
                else
                        rw = WRITE;
@@ -554,12 +554,15 @@ static int iblock_do_task(struct se_task *task)
        else {
                pr_err("Unsupported SCSI -> BLOCK LBA conversion:"
                                " %u\n", dev->se_sub_dev->se_dev_attrib.block_size);
-               return PYX_TRANSPORT_LU_COMM_FAILURE;
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               return -ENOSYS;
        }
 
        bio = iblock_get_bio(task, block_lba, sg_num);
-       if (!bio)
-               return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+       if (!bio) {
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               return -ENOMEM;
+       }
 
        bio_list_init(&list);
        bio_list_add(&list, bio);
@@ -588,12 +591,13 @@ static int iblock_do_task(struct se_task *task)
                submit_bio(rw, bio);
        blk_finish_plug(&plug);
 
-       return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+       return 0;
 
 fail:
        while ((bio = bio_list_pop(&list)))
                bio_put(bio);
-       return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+       cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+       return -ENOMEM;
 }
 
 static u32 iblock_get_device_rev(struct se_device *dev)
index 5a4ebfc3a54f34791f6df93a36e02d85d752ebf7..95dee7074aeb5eb05f630fdded543e948ac206b3 100644 (file)
@@ -191,7 +191,7 @@ static int target_check_scsi2_reservation_conflict(struct se_cmd *cmd, int *ret)
                pr_err("Received legacy SPC-2 RESERVE/RELEASE"
                        " while active SPC-3 registrations exist,"
                        " returning RESERVATION_CONFLICT\n");
-               *ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
+               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
                return true;
        }
 
@@ -252,7 +252,8 @@ int target_scsi2_reservation_reserve(struct se_task *task)
            (cmd->t_task_cdb[1] & 0x02)) {
                pr_err("LongIO and Obselete Bits set, returning"
                                " ILLEGAL_REQUEST\n");
-               ret = PYX_TRANSPORT_ILLEGAL_REQUEST;
+               cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+               ret = -EINVAL;
                goto out;
        }
        /*
@@ -277,7 +278,8 @@ int target_scsi2_reservation_reserve(struct se_task *task)
                        " from %s \n", cmd->se_lun->unpacked_lun,
                        cmd->se_deve->mapped_lun,
                        sess->se_node_acl->initiatorname);
-               ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
+               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+               ret = -EINVAL;
                goto out_unlock;
        }
 
@@ -1510,7 +1512,8 @@ static int core_scsi3_decode_spec_i_port(
        tidh_new = kzalloc(sizeof(struct pr_transport_id_holder), GFP_KERNEL);
        if (!tidh_new) {
                pr_err("Unable to allocate tidh_new\n");
-               return PYX_TRANSPORT_LU_COMM_FAILURE;
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               return -EINVAL;
        }
        INIT_LIST_HEAD(&tidh_new->dest_list);
        tidh_new->dest_tpg = tpg;
@@ -1522,7 +1525,8 @@ static int core_scsi3_decode_spec_i_port(
                                sa_res_key, all_tg_pt, aptpl);
        if (!local_pr_reg) {
                kfree(tidh_new);
-               return PYX_TRANSPORT_LU_COMM_FAILURE;
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               return -ENOMEM;
        }
        tidh_new->dest_pr_reg = local_pr_reg;
        /*
@@ -1548,7 +1552,8 @@ static int core_scsi3_decode_spec_i_port(
                pr_err("SPC-3 PR: Illegal tpdl: %u + 28 byte header"
                        " does not equal CDB data_length: %u\n", tpdl,
                        cmd->data_length);
-               ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               ret = -EINVAL;
                goto out;
        }
        /*
@@ -1598,7 +1603,9 @@ static int core_scsi3_decode_spec_i_port(
                                        " for tmp_tpg\n");
                                atomic_dec(&tmp_tpg->tpg_pr_ref_count);
                                smp_mb__after_atomic_dec();
-                               ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+                               cmd->scsi_sense_reason =
+                                       TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+                               ret = -EINVAL;
                                goto out;
                        }
                        /*
@@ -1628,7 +1635,9 @@ static int core_scsi3_decode_spec_i_port(
                                atomic_dec(&dest_node_acl->acl_pr_ref_count);
                                smp_mb__after_atomic_dec();
                                core_scsi3_tpg_undepend_item(tmp_tpg);
-                               ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+                               cmd->scsi_sense_reason =
+                                       TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+                               ret = -EINVAL;
                                goto out;
                        }
 
@@ -1646,7 +1655,8 @@ static int core_scsi3_decode_spec_i_port(
                if (!dest_tpg) {
                        pr_err("SPC-3 PR SPEC_I_PT: Unable to locate"
                                        " dest_tpg\n");
-                       ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+                       cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+                       ret = -EINVAL;
                        goto out;
                }
 #if 0
@@ -1660,7 +1670,8 @@ static int core_scsi3_decode_spec_i_port(
                                " %u for Transport ID: %s\n", tid_len, ptr);
                        core_scsi3_nodeacl_undepend_item(dest_node_acl);
                        core_scsi3_tpg_undepend_item(dest_tpg);
-                       ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+                       cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+                       ret = -EINVAL;
                        goto out;
                }
                /*
@@ -1678,7 +1689,8 @@ static int core_scsi3_decode_spec_i_port(
 
                        core_scsi3_nodeacl_undepend_item(dest_node_acl);
                        core_scsi3_tpg_undepend_item(dest_tpg);
-                       ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+                       cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+                       ret = -EINVAL;
                        goto out;
                }
 
@@ -1690,7 +1702,9 @@ static int core_scsi3_decode_spec_i_port(
                        smp_mb__after_atomic_dec();
                        core_scsi3_nodeacl_undepend_item(dest_node_acl);
                        core_scsi3_tpg_undepend_item(dest_tpg);
-                       ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+                       cmd->scsi_sense_reason =
+                               TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+                       ret = -EINVAL;
                        goto out;
                }
 #if 0
@@ -1727,7 +1741,9 @@ static int core_scsi3_decode_spec_i_port(
                        core_scsi3_lunacl_undepend_item(dest_se_deve);
                        core_scsi3_nodeacl_undepend_item(dest_node_acl);
                        core_scsi3_tpg_undepend_item(dest_tpg);
-                       ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+                       cmd->scsi_sense_reason =
+                               TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+                       ret = -ENOMEM;
                        goto out;
                }
                INIT_LIST_HEAD(&tidh_new->dest_list);
@@ -1759,7 +1775,8 @@ static int core_scsi3_decode_spec_i_port(
                        core_scsi3_nodeacl_undepend_item(dest_node_acl);
                        core_scsi3_tpg_undepend_item(dest_tpg);
                        kfree(tidh_new);
-                       ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+                       cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+                       ret = -EINVAL;
                        goto out;
                }
                tidh_new->dest_pr_reg = dest_pr_reg;
@@ -2098,7 +2115,8 @@ static int core_scsi3_emulate_pro_register(
 
        if (!se_sess || !se_lun) {
                pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
-               return PYX_TRANSPORT_LU_COMM_FAILURE;
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               return -EINVAL;
        }
        se_tpg = se_sess->se_tpg;
        se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
@@ -2117,13 +2135,14 @@ static int core_scsi3_emulate_pro_register(
                if (res_key) {
                        pr_warn("SPC-3 PR: Reservation Key non-zero"
                                " for SA REGISTER, returning CONFLICT\n");
-                       return PYX_TRANSPORT_RESERVATION_CONFLICT;
+                       cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+                       return -EINVAL;
                }
                /*
                 * Do nothing but return GOOD status.
                 */
                if (!sa_res_key)
-                       return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+                       return 0;
 
                if (!spec_i_pt) {
                        /*
@@ -2138,7 +2157,8 @@ static int core_scsi3_emulate_pro_register(
                        if (ret != 0) {
                                pr_err("Unable to allocate"
                                        " struct t10_pr_registration\n");
-                               return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+                               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+                               return -EINVAL;
                        }
                } else {
                        /*
@@ -2197,14 +2217,16 @@ static int core_scsi3_emulate_pro_register(
                                        " 0x%016Lx\n", res_key,
                                        pr_reg->pr_res_key);
                                core_scsi3_put_pr_reg(pr_reg);
-                               return PYX_TRANSPORT_RESERVATION_CONFLICT;
+                               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+                               return -EINVAL;
                        }
                }
                if (spec_i_pt) {
                        pr_err("SPC-3 PR UNREGISTER: SPEC_I_PT"
                                " set while sa_res_key=0\n");
                        core_scsi3_put_pr_reg(pr_reg);
-                       return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+                       cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+                       return -EINVAL;
                }
                /*
                 * An existing ALL_TG_PT=1 registration being released
@@ -2215,7 +2237,8 @@ static int core_scsi3_emulate_pro_register(
                                " registration exists, but ALL_TG_PT=1 bit not"
                                " present in received PROUT\n");
                        core_scsi3_put_pr_reg(pr_reg);
-                       return PYX_TRANSPORT_INVALID_CDB_FIELD;
+                       cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+                       return -EINVAL;
                }
                /*
                 * Allocate APTPL metadata buffer used for UNREGISTER ops
@@ -2227,7 +2250,9 @@ static int core_scsi3_emulate_pro_register(
                                pr_err("Unable to allocate"
                                        " pr_aptpl_buf\n");
                                core_scsi3_put_pr_reg(pr_reg);
-                               return PYX_TRANSPORT_LU_COMM_FAILURE;
+                               cmd->scsi_sense_reason =
+                                       TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+                               return -EINVAL;
                        }
                }
                /*
@@ -2241,7 +2266,8 @@ static int core_scsi3_emulate_pro_register(
                        if (pr_holder < 0) {
                                kfree(pr_aptpl_buf);
                                core_scsi3_put_pr_reg(pr_reg);
-                               return PYX_TRANSPORT_RESERVATION_CONFLICT;
+                               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+                               return -EINVAL;
                        }
 
                        spin_lock(&pr_tmpl->registration_lock);
@@ -2405,7 +2431,8 @@ static int core_scsi3_pro_reserve(
 
        if (!se_sess || !se_lun) {
                pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
-               return PYX_TRANSPORT_LU_COMM_FAILURE;
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               return -EINVAL;
        }
        se_tpg = se_sess->se_tpg;
        se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
@@ -2417,7 +2444,8 @@ static int core_scsi3_pro_reserve(
        if (!pr_reg) {
                pr_err("SPC-3 PR: Unable to locate"
                        " PR_REGISTERED *pr_reg for RESERVE\n");
-               return PYX_TRANSPORT_LU_COMM_FAILURE;
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               return -EINVAL;
        }
        /*
         * From spc4r17 Section 5.7.9: Reserving:
@@ -2433,7 +2461,8 @@ static int core_scsi3_pro_reserve(
                        " does not match existing SA REGISTER res_key:"
                        " 0x%016Lx\n", res_key, pr_reg->pr_res_key);
                core_scsi3_put_pr_reg(pr_reg);
-               return PYX_TRANSPORT_RESERVATION_CONFLICT;
+               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+               return -EINVAL;
        }
        /*
         * From spc4r17 Section 5.7.9: Reserving:
@@ -2448,7 +2477,8 @@ static int core_scsi3_pro_reserve(
        if (scope != PR_SCOPE_LU_SCOPE) {
                pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
                core_scsi3_put_pr_reg(pr_reg);
-               return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               return -EINVAL;
        }
        /*
         * See if we have an existing PR reservation holder pointer at
@@ -2480,7 +2510,8 @@ static int core_scsi3_pro_reserve(
 
                        spin_unlock(&dev->dev_reservation_lock);
                        core_scsi3_put_pr_reg(pr_reg);
-                       return PYX_TRANSPORT_RESERVATION_CONFLICT;
+                       cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+                       return -EINVAL;
                }
                /*
                 * From spc4r17 Section 5.7.9: Reserving:
@@ -2503,7 +2534,8 @@ static int core_scsi3_pro_reserve(
 
                        spin_unlock(&dev->dev_reservation_lock);
                        core_scsi3_put_pr_reg(pr_reg);
-                       return PYX_TRANSPORT_RESERVATION_CONFLICT;
+                       cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+                       return -EINVAL;
                }
                /*
                 * From spc4r17 Section 5.7.9: Reserving:
@@ -2517,7 +2549,7 @@ static int core_scsi3_pro_reserve(
                 */
                spin_unlock(&dev->dev_reservation_lock);
                core_scsi3_put_pr_reg(pr_reg);
-               return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+               return 0;
        }
        /*
         * Otherwise, our *pr_reg becomes the PR reservation holder for said
@@ -2574,7 +2606,8 @@ static int core_scsi3_emulate_pro_reserve(
        default:
                pr_err("SPC-3 PR: Unknown Service Action RESERVE Type:"
                        " 0x%02x\n", type);
-               return PYX_TRANSPORT_INVALID_CDB_FIELD;
+               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+               return -EINVAL;
        }
 
        return ret;
@@ -2630,7 +2663,8 @@ static int core_scsi3_emulate_pro_release(
 
        if (!se_sess || !se_lun) {
                pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
-               return PYX_TRANSPORT_LU_COMM_FAILURE;
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               return -EINVAL;
        }
        /*
         * Locate the existing *pr_reg via struct se_node_acl pointers
@@ -2639,7 +2673,8 @@ static int core_scsi3_emulate_pro_release(
        if (!pr_reg) {
                pr_err("SPC-3 PR: Unable to locate"
                        " PR_REGISTERED *pr_reg for RELEASE\n");
-               return PYX_TRANSPORT_LU_COMM_FAILURE;
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               return -EINVAL;
        }
        /*
         * From spc4r17 Section 5.7.11.2 Releasing:
@@ -2661,7 +2696,7 @@ static int core_scsi3_emulate_pro_release(
                 */
                spin_unlock(&dev->dev_reservation_lock);
                core_scsi3_put_pr_reg(pr_reg);
-               return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+               return 0;
        }
        if ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
            (pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG))
@@ -2675,7 +2710,7 @@ static int core_scsi3_emulate_pro_release(
                 */
                spin_unlock(&dev->dev_reservation_lock);
                core_scsi3_put_pr_reg(pr_reg);
-               return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+               return 0;
        }
        /*
         * From spc4r17 Section 5.7.11.2 Releasing:
@@ -2697,7 +2732,8 @@ static int core_scsi3_emulate_pro_release(
                        " 0x%016Lx\n", res_key, pr_reg->pr_res_key);
                spin_unlock(&dev->dev_reservation_lock);
                core_scsi3_put_pr_reg(pr_reg);
-               return PYX_TRANSPORT_RESERVATION_CONFLICT;
+               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+               return -EINVAL;
        }
        /*
         * From spc4r17 Section 5.7.11.2 Releasing and above:
@@ -2719,7 +2755,8 @@ static int core_scsi3_emulate_pro_release(
 
                spin_unlock(&dev->dev_reservation_lock);
                core_scsi3_put_pr_reg(pr_reg);
-               return PYX_TRANSPORT_RESERVATION_CONFLICT;
+               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+               return -EINVAL;
        }
        /*
         * In response to a persistent reservation release request from the
@@ -2802,7 +2839,8 @@ static int core_scsi3_emulate_pro_clear(
        if (!pr_reg_n) {
                pr_err("SPC-3 PR: Unable to locate"
                        " PR_REGISTERED *pr_reg for CLEAR\n");
-                       return PYX_TRANSPORT_LU_COMM_FAILURE;
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               return -EINVAL;
        }
        /*
         * From spc4r17 section 5.7.11.6, Clearing:
@@ -2821,7 +2859,8 @@ static int core_scsi3_emulate_pro_clear(
                        " existing SA REGISTER res_key:"
                        " 0x%016Lx\n", res_key, pr_reg_n->pr_res_key);
                core_scsi3_put_pr_reg(pr_reg_n);
-               return PYX_TRANSPORT_RESERVATION_CONFLICT;
+               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+               return -EINVAL;
        }
        /*
         * a) Release the persistent reservation, if any;
@@ -2979,8 +3018,10 @@ static int core_scsi3_pro_preempt(
        int all_reg = 0, calling_it_nexus = 0, released_regs = 0;
        int prh_type = 0, prh_scope = 0, ret;
 
-       if (!se_sess)
-               return PYX_TRANSPORT_LU_COMM_FAILURE;
+       if (!se_sess) {
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               return -EINVAL;
+       }
 
        se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
        pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl,
@@ -2989,16 +3030,19 @@ static int core_scsi3_pro_preempt(
                pr_err("SPC-3 PR: Unable to locate"
                        " PR_REGISTERED *pr_reg for PREEMPT%s\n",
                        (abort) ? "_AND_ABORT" : "");
-               return PYX_TRANSPORT_RESERVATION_CONFLICT;
+               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+               return -EINVAL;
        }
        if (pr_reg_n->pr_res_key != res_key) {
                core_scsi3_put_pr_reg(pr_reg_n);
-               return PYX_TRANSPORT_RESERVATION_CONFLICT;
+               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+               return -EINVAL;
        }
        if (scope != PR_SCOPE_LU_SCOPE) {
                pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
                core_scsi3_put_pr_reg(pr_reg_n);
-               return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               return -EINVAL;
        }
        INIT_LIST_HEAD(&preempt_and_abort_list);
 
@@ -3012,7 +3056,8 @@ static int core_scsi3_pro_preempt(
        if (!all_reg && !sa_res_key) {
                spin_unlock(&dev->dev_reservation_lock);
                core_scsi3_put_pr_reg(pr_reg_n);
-               return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               return -EINVAL;
        }
        /*
         * From spc4r17, section 5.7.11.4.4 Removing Registrations:
@@ -3106,7 +3151,8 @@ static int core_scsi3_pro_preempt(
                if (!released_regs) {
                        spin_unlock(&dev->dev_reservation_lock);
                        core_scsi3_put_pr_reg(pr_reg_n);
-                       return PYX_TRANSPORT_RESERVATION_CONFLICT;
+                       cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+                       return -EINVAL;
                }
                /*
                 * For an existing all registrants type reservation
@@ -3297,7 +3343,8 @@ static int core_scsi3_emulate_pro_preempt(
        default:
                pr_err("SPC-3 PR: Unknown Service Action PREEMPT%s"
                        " Type: 0x%02x\n", (abort) ? "_AND_ABORT" : "", type);
-               return PYX_TRANSPORT_INVALID_CDB_FIELD;
+               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+               return -EINVAL;
        }
 
        return ret;
@@ -3331,7 +3378,8 @@ static int core_scsi3_emulate_pro_register_and_move(
 
        if (!se_sess || !se_lun) {
                pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
-               return PYX_TRANSPORT_LU_COMM_FAILURE;
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               return -EINVAL;
        }
        memset(dest_iport, 0, 64);
        memset(i_buf, 0, PR_REG_ISID_ID_LEN);
@@ -3349,7 +3397,8 @@ static int core_scsi3_emulate_pro_register_and_move(
        if (!pr_reg) {
                pr_err("SPC-3 PR: Unable to locate PR_REGISTERED"
                        " *pr_reg for REGISTER_AND_MOVE\n");
-               return PYX_TRANSPORT_LU_COMM_FAILURE;
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               return -EINVAL;
        }
        /*
         * The provided reservation key much match the existing reservation key
@@ -3360,7 +3409,8 @@ static int core_scsi3_emulate_pro_register_and_move(
                        " res_key: 0x%016Lx does not match existing SA REGISTER"
                        " res_key: 0x%016Lx\n", res_key, pr_reg->pr_res_key);
                core_scsi3_put_pr_reg(pr_reg);
-               return PYX_TRANSPORT_RESERVATION_CONFLICT;
+               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+               return -EINVAL;
        }
        /*
         * The service active reservation key needs to be non zero
@@ -3369,7 +3419,8 @@ static int core_scsi3_emulate_pro_register_and_move(
                pr_warn("SPC-3 PR REGISTER_AND_MOVE: Received zero"
                        " sa_res_key\n");
                core_scsi3_put_pr_reg(pr_reg);
-               return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               return -EINVAL;
        }
 
        /*
@@ -3392,7 +3443,8 @@ static int core_scsi3_emulate_pro_register_and_move(
                        " does not equal CDB data_length: %u\n", tid_len,
                        cmd->data_length);
                core_scsi3_put_pr_reg(pr_reg);
-               return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               return -EINVAL;
        }
 
        spin_lock(&dev->se_port_lock);
@@ -3417,7 +3469,8 @@ static int core_scsi3_emulate_pro_register_and_move(
                        atomic_dec(&dest_se_tpg->tpg_pr_ref_count);
                        smp_mb__after_atomic_dec();
                        core_scsi3_put_pr_reg(pr_reg);
-                       return PYX_TRANSPORT_LU_COMM_FAILURE;
+                       cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+                       return -EINVAL;
                }
 
                spin_lock(&dev->se_port_lock);
@@ -3430,7 +3483,8 @@ static int core_scsi3_emulate_pro_register_and_move(
                        " fabric ops from Relative Target Port Identifier:"
                        " %hu\n", rtpi);
                core_scsi3_put_pr_reg(pr_reg);
-               return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               return -EINVAL;
        }
 
        buf = transport_kmap_first_data_page(cmd);
@@ -3445,14 +3499,16 @@ static int core_scsi3_emulate_pro_register_and_move(
                        " from fabric: %s\n", proto_ident,
                        dest_tf_ops->get_fabric_proto_ident(dest_se_tpg),
                        dest_tf_ops->get_fabric_name());
-               ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               ret = -EINVAL;
                goto out;
        }
        if (dest_tf_ops->tpg_parse_pr_out_transport_id == NULL) {
                pr_err("SPC-3 PR REGISTER_AND_MOVE: Fabric does not"
                        " containg a valid tpg_parse_pr_out_transport_id"
                        " function pointer\n");
-               ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               ret = -EINVAL;
                goto out;
        }
        initiator_str = dest_tf_ops->tpg_parse_pr_out_transport_id(dest_se_tpg,
@@ -3460,7 +3516,8 @@ static int core_scsi3_emulate_pro_register_and_move(
        if (!initiator_str) {
                pr_err("SPC-3 PR REGISTER_AND_MOVE: Unable to locate"
                        " initiator_str from Transport ID\n");
-               ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               ret = -EINVAL;
                goto out;
        }
 
@@ -3489,7 +3546,8 @@ static int core_scsi3_emulate_pro_register_and_move(
                pr_err("SPC-3 PR REGISTER_AND_MOVE: TransportID: %s"
                        " matches: %s on received I_T Nexus\n", initiator_str,
                        pr_reg_nacl->initiatorname);
-               ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               ret = -EINVAL;
                goto out;
        }
        if (!strcmp(iport_ptr, pr_reg->pr_reg_isid)) {
@@ -3497,7 +3555,8 @@ static int core_scsi3_emulate_pro_register_and_move(
                        " matches: %s %s on received I_T Nexus\n",
                        initiator_str, iport_ptr, pr_reg_nacl->initiatorname,
                        pr_reg->pr_reg_isid);
-               ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               ret = -EINVAL;
                goto out;
        }
 after_iport_check:
@@ -3517,7 +3576,8 @@ after_iport_check:
                pr_err("Unable to locate %s dest_node_acl for"
                        " TransportID%s\n", dest_tf_ops->get_fabric_name(),
                        initiator_str);
-               ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               ret = -EINVAL;
                goto out;
        }
        ret = core_scsi3_nodeacl_depend_item(dest_node_acl);
@@ -3527,7 +3587,8 @@ after_iport_check:
                atomic_dec(&dest_node_acl->acl_pr_ref_count);
                smp_mb__after_atomic_dec();
                dest_node_acl = NULL;
-               ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               ret = -EINVAL;
                goto out;
        }
 #if 0
@@ -3543,7 +3604,8 @@ after_iport_check:
        if (!dest_se_deve) {
                pr_err("Unable to locate %s dest_se_deve from RTPI:"
                        " %hu\n",  dest_tf_ops->get_fabric_name(), rtpi);
-               ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               ret = -EINVAL;
                goto out;
        }
 
@@ -3553,7 +3615,8 @@ after_iport_check:
                atomic_dec(&dest_se_deve->pr_ref_count);
                smp_mb__after_atomic_dec();
                dest_se_deve = NULL;
-               ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               ret = -EINVAL;
                goto out;
        }
 #if 0
@@ -3572,7 +3635,8 @@ after_iport_check:
                pr_warn("SPC-3 PR REGISTER_AND_MOVE: No reservation"
                        " currently held\n");
                spin_unlock(&dev->dev_reservation_lock);
-               ret = PYX_TRANSPORT_INVALID_CDB_FIELD;
+               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+               ret = -EINVAL;
                goto out;
        }
        /*
@@ -3585,7 +3649,8 @@ after_iport_check:
                pr_warn("SPC-3 PR REGISTER_AND_MOVE: Calling I_T"
                        " Nexus is not reservation holder\n");
                spin_unlock(&dev->dev_reservation_lock);
-               ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
+               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+               ret = -EINVAL;
                goto out;
        }
        /*
@@ -3603,7 +3668,8 @@ after_iport_check:
                        " reservation for type: %s\n",
                        core_scsi3_pr_dump_type(pr_res_holder->pr_res_type));
                spin_unlock(&dev->dev_reservation_lock);
-               ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
+               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+               ret = -EINVAL;
                goto out;
        }
        pr_res_nacl = pr_res_holder->pr_reg_nacl;
@@ -3640,7 +3706,8 @@ after_iport_check:
                                sa_res_key, 0, aptpl, 2, 1);
                if (ret != 0) {
                        spin_unlock(&dev->dev_reservation_lock);
-                       ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+                       cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+                       ret = -EINVAL;
                        goto out;
                }
                dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl,
@@ -3771,7 +3838,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
                pr_err("Received PERSISTENT_RESERVE CDB while legacy"
                        " SPC-2 reservation is held, returning"
                        " RESERVATION_CONFLICT\n");
-               ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
+               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+               ret = EINVAL;
                goto out;
        }
 
@@ -3779,13 +3847,16 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
         * FIXME: A NULL struct se_session pointer means an this is not coming from
         * a $FABRIC_MOD's nexus, but from internal passthrough ops.
         */
-       if (!cmd->se_sess)
-               return PYX_TRANSPORT_LU_COMM_FAILURE;
+       if (!cmd->se_sess) {
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               return -EINVAL;
+       }
 
        if (cmd->data_length < 24) {
                pr_warn("SPC-PR: Received PR OUT parameter list"
                        " length too small: %u\n", cmd->data_length);
-               ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               ret = -EINVAL;
                goto out;
        }
        /*
@@ -3820,7 +3891,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
         * SPEC_I_PT=1 is only valid for Service action: REGISTER
         */
        if (spec_i_pt && ((cdb[1] & 0x1f) != PRO_REGISTER)) {
-               ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               ret = -EINVAL;
                goto out;
        }
 
@@ -3837,7 +3909,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
            (cmd->data_length != 24)) {
                pr_warn("SPC-PR: Received PR OUT illegal parameter"
                        " list length: %u\n", cmd->data_length);
-               ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               ret = -EINVAL;
                goto out;
        }
        /*
@@ -3878,7 +3951,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
        default:
                pr_err("Unknown PERSISTENT_RESERVE_OUT service"
                        " action: 0x%02x\n", cdb[1] & 0x1f);
-               ret = PYX_TRANSPORT_INVALID_CDB_FIELD;
+               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+               ret = -EINVAL;
                break;
        }
 
@@ -3906,7 +3980,8 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd)
        if (cmd->data_length < 8) {
                pr_err("PRIN SA READ_KEYS SCSI Data Length: %u"
                        " too small\n", cmd->data_length);
-               return PYX_TRANSPORT_INVALID_CDB_FIELD;
+               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+               return -EINVAL;
        }
 
        buf = transport_kmap_first_data_page(cmd);
@@ -3965,7 +4040,8 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
        if (cmd->data_length < 8) {
                pr_err("PRIN SA READ_RESERVATIONS SCSI Data Length: %u"
                        " too small\n", cmd->data_length);
-               return PYX_TRANSPORT_INVALID_CDB_FIELD;
+               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+               return -EINVAL;
        }
 
        buf = transport_kmap_first_data_page(cmd);
@@ -4047,7 +4123,8 @@ static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
        if (cmd->data_length < 6) {
                pr_err("PRIN SA REPORT_CAPABILITIES SCSI Data Length:"
                        " %u too small\n", cmd->data_length);
-               return PYX_TRANSPORT_INVALID_CDB_FIELD;
+               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+               return -EINVAL;
        }
 
        buf = transport_kmap_first_data_page(cmd);
@@ -4108,7 +4185,8 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
        if (cmd->data_length < 8) {
                pr_err("PRIN SA READ_FULL_STATUS SCSI Data Length: %u"
                        " too small\n", cmd->data_length);
-               return PYX_TRANSPORT_INVALID_CDB_FIELD;
+               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+               return -EINVAL;
        }
 
        buf = transport_kmap_first_data_page(cmd);
@@ -4255,7 +4333,8 @@ int target_scsi3_emulate_pr_in(struct se_task *task)
                pr_err("Received PERSISTENT_RESERVE CDB while legacy"
                        " SPC-2 reservation is held, returning"
                        " RESERVATION_CONFLICT\n");
-               return PYX_TRANSPORT_RESERVATION_CONFLICT;
+               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+               return -EINVAL;
        }
 
        switch (cmd->t_task_cdb[1] & 0x1f) {
@@ -4274,7 +4353,8 @@ int target_scsi3_emulate_pr_in(struct se_task *task)
        default:
                pr_err("Unknown PERSISTENT_RESERVE_IN service"
                        " action: 0x%02x\n", cmd->t_task_cdb[1] & 0x1f);
-               ret = PYX_TRANSPORT_INVALID_CDB_FIELD;
+               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+               ret = -EINVAL;
                break;
        }
 
index ed32e1efe42906bbfe91a309889c0acde60a8da5..8b15e56b038461169872d964055316c0318e7e31 100644 (file)
@@ -963,6 +963,7 @@ static inline struct bio *pscsi_get_bio(int sg_num)
 static int pscsi_map_sg(struct se_task *task, struct scatterlist *task_sg,
                struct bio **hbio)
 {
+       struct se_cmd *cmd = task->task_se_cmd;
        struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr;
        u32 task_sg_num = task->task_sg_nents;
        struct bio *bio = NULL, *tbio = NULL;
@@ -971,7 +972,7 @@ static int pscsi_map_sg(struct se_task *task, struct scatterlist *task_sg,
        u32 data_len = task->task_size, i, len, bytes, off;
        int nr_pages = (task->task_size + task_sg[0].offset +
                        PAGE_SIZE - 1) >> PAGE_SHIFT;
-       int nr_vecs = 0, rc, ret = PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+       int nr_vecs = 0, rc;
        int rw = (task->task_data_direction == DMA_TO_DEVICE);
 
        *hbio = NULL;
@@ -1058,11 +1059,13 @@ fail:
                bio->bi_next = NULL;
                bio_endio(bio, 0);      /* XXX: should be error */
        }
-       return ret;
+       cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+       return -ENOMEM;
 }
 
 static int pscsi_do_task(struct se_task *task)
 {
+       struct se_cmd *cmd = task->task_se_cmd;
        struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr;
        struct pscsi_plugin_task *pt = PSCSI_TASK(task);
        struct request *req;
@@ -1078,7 +1081,9 @@ static int pscsi_do_task(struct se_task *task)
                if (!req || IS_ERR(req)) {
                        pr_err("PSCSI: blk_get_request() failed: %ld\n",
                                        req ? IS_ERR(req) : -ENOMEM);
-                       return PYX_TRANSPORT_LU_COMM_FAILURE;
+                       cmd->scsi_sense_reason =
+                               TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+                       return -ENODEV;
                }
        } else {
                BUG_ON(!task->task_size);
@@ -1087,8 +1092,11 @@ static int pscsi_do_task(struct se_task *task)
                 * Setup the main struct request for the task->task_sg[] payload
                 */
                ret = pscsi_map_sg(task, task->task_sg, &hbio);
-               if (ret < 0)
-                       return PYX_TRANSPORT_LU_COMM_FAILURE;
+               if (ret < 0) {
+                       cmd->scsi_sense_reason =
+                               TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+                       return ret;
+               }
 
                req = blk_make_request(pdv->pdv_sd->request_queue, hbio,
                                       GFP_KERNEL);
@@ -1115,7 +1123,7 @@ static int pscsi_do_task(struct se_task *task)
                        (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG),
                        pscsi_req_done);
 
-       return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+       return 0;
 
 fail:
        while (hbio) {
@@ -1124,7 +1132,8 @@ fail:
                bio->bi_next = NULL;
                bio_endio(bio, 0);      /* XXX: should be error */
        }
-       return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+       cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+       return -ENOMEM;
 }
 
 /*     pscsi_get_sense_buffer():
@@ -1198,9 +1207,8 @@ static inline void pscsi_process_SAM_status(
                        " 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0],
                        pt->pscsi_result);
                task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
-               task->task_error_status = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
-               task->task_se_cmd->transport_error_status =
-                                       PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+               task->task_se_cmd->scsi_sense_reason =
+                                       TCM_UNSUPPORTED_SCSI_OPCODE;
                transport_complete_task(task, 0);
                break;
        }
index 5158d3846f19cf8f79f69e7efe97f72558b0d413..02e51faa2f4ea168f0a6139c8e303fc9fca81c28 100644 (file)
@@ -343,235 +343,74 @@ static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
        return NULL;
 }
 
-/*     rd_MEMCPY_read():
- *
- *
- */
-static int rd_MEMCPY_read(struct rd_request *req)
+static int rd_MEMCPY(struct rd_request *req, u32 read_rd)
 {
        struct se_task *task = &req->rd_task;
        struct rd_dev *dev = req->rd_task.task_se_cmd->se_dev->dev_ptr;
        struct rd_dev_sg_table *table;
-       struct scatterlist *sg_d, *sg_s;
-       void *dst, *src;
-       u32 i = 0, j = 0, dst_offset = 0, src_offset = 0;
-       u32 length, page_end = 0, table_sg_end;
+       struct scatterlist *rd_sg;
+       struct sg_mapping_iter m;
        u32 rd_offset = req->rd_offset;
+       u32 src_len;
 
        table = rd_get_sg_table(dev, req->rd_page);
        if (!table)
                return -EINVAL;
 
-       table_sg_end = (table->page_end_offset - req->rd_page);
-       sg_d = task->task_sg;
-       sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
+       rd_sg = &table->sg_table[req->rd_page - table->page_start_offset];
 
-       pr_debug("RD[%u]: Read LBA: %llu, Size: %u Page: %u, Offset:"
-               " %u\n", dev->rd_dev_id, task->task_lba, req->rd_size,
-               req->rd_page, req->rd_offset);
-
-       src_offset = rd_offset;
+       pr_debug("RD[%u]: %s LBA: %llu, Size: %u Page: %u, Offset: %u\n",
+                       dev->rd_dev_id, read_rd ? "Read" : "Write",
+                       task->task_lba, req->rd_size, req->rd_page,
+                       rd_offset);
 
+       src_len = PAGE_SIZE - rd_offset;
+       sg_miter_start(&m, task->task_sg, task->task_sg_nents,
+                       read_rd ? SG_MITER_TO_SG : SG_MITER_FROM_SG);
        while (req->rd_size) {
-               if ((sg_d[i].length - dst_offset) <
-                   (sg_s[j].length - src_offset)) {
-                       length = (sg_d[i].length - dst_offset);
-
-                       pr_debug("Step 1 - sg_d[%d]: %p length: %d"
-                               " offset: %u sg_s[%d].length: %u\n", i,
-                               &sg_d[i], sg_d[i].length, sg_d[i].offset, j,
-                               sg_s[j].length);
-                       pr_debug("Step 1 - length: %u dst_offset: %u"
-                               " src_offset: %u\n", length, dst_offset,
-                               src_offset);
-
-                       if (length > req->rd_size)
-                               length = req->rd_size;
-
-                       dst = sg_virt(&sg_d[i++]) + dst_offset;
-                       BUG_ON(!dst);
-
-                       src = sg_virt(&sg_s[j]) + src_offset;
-                       BUG_ON(!src);
-
-                       dst_offset = 0;
-                       src_offset = length;
-                       page_end = 0;
-               } else {
-                       length = (sg_s[j].length - src_offset);
-
-                       pr_debug("Step 2 - sg_d[%d]: %p length: %d"
-                               " offset: %u sg_s[%d].length: %u\n", i,
-                               &sg_d[i], sg_d[i].length, sg_d[i].offset,
-                               j, sg_s[j].length);
-                       pr_debug("Step 2 - length: %u dst_offset: %u"
-                               " src_offset: %u\n", length, dst_offset,
-                               src_offset);
-
-                       if (length > req->rd_size)
-                               length = req->rd_size;
-
-                       dst = sg_virt(&sg_d[i]) + dst_offset;
-                       BUG_ON(!dst);
-
-                       if (sg_d[i].length == length) {
-                               i++;
-                               dst_offset = 0;
-                       } else
-                               dst_offset = length;
-
-                       src = sg_virt(&sg_s[j++]) + src_offset;
-                       BUG_ON(!src);
-
-                       src_offset = 0;
-                       page_end = 1;
-               }
+               u32 len;
+               void *rd_addr;
 
-               memcpy(dst, src, length);
+               sg_miter_next(&m);
+               len = min((u32)m.length, src_len);
+               m.consumed = len;
 
-               pr_debug("page: %u, remaining size: %u, length: %u,"
-                       " i: %u, j: %u\n", req->rd_page,
-                       (req->rd_size - length), length, i, j);
+               rd_addr = sg_virt(rd_sg) + rd_offset;
 
-               req->rd_size -= length;
-               if (!req->rd_size)
-                       return 0;
+               if (read_rd)
+                       memcpy(m.addr, rd_addr, len);
+               else
+                       memcpy(rd_addr, m.addr, len);
 
-               if (!page_end)
+               req->rd_size -= len;
+               if (!req->rd_size)
                        continue;
 
-               if (++req->rd_page <= table->page_end_offset) {
-                       pr_debug("page: %u in same page table\n",
-                               req->rd_page);
+               src_len -= len;
+               if (src_len) {
+                       rd_offset += len;
                        continue;
                }
 
-               pr_debug("getting new page table for page: %u\n",
-                               req->rd_page);
-
-               table = rd_get_sg_table(dev, req->rd_page);
-               if (!table)
-                       return -EINVAL;
-
-               sg_s = &table->sg_table[j = 0];
-       }
-
-       return 0;
-}
-
-/*     rd_MEMCPY_write():
- *
- *
- */
-static int rd_MEMCPY_write(struct rd_request *req)
-{
-       struct se_task *task = &req->rd_task;
-       struct rd_dev *dev = req->rd_task.task_se_cmd->se_dev->dev_ptr;
-       struct rd_dev_sg_table *table;
-       struct scatterlist *sg_d, *sg_s;
-       void *dst, *src;
-       u32 i = 0, j = 0, dst_offset = 0, src_offset = 0;
-       u32 length, page_end = 0, table_sg_end;
-       u32 rd_offset = req->rd_offset;
-
-       table = rd_get_sg_table(dev, req->rd_page);
-       if (!table)
-               return -EINVAL;
-
-       table_sg_end = (table->page_end_offset - req->rd_page);
-       sg_d = &table->sg_table[req->rd_page - table->page_start_offset];
-       sg_s = task->task_sg;
-
-       pr_debug("RD[%d] Write LBA: %llu, Size: %u, Page: %u,"
-               " Offset: %u\n", dev->rd_dev_id, task->task_lba, req->rd_size,
-               req->rd_page, req->rd_offset);
-
-       dst_offset = rd_offset;
-
-       while (req->rd_size) {
-               if ((sg_s[i].length - src_offset) <
-                   (sg_d[j].length - dst_offset)) {
-                       length = (sg_s[i].length - src_offset);
-
-                       pr_debug("Step 1 - sg_s[%d]: %p length: %d"
-                               " offset: %d sg_d[%d].length: %u\n", i,
-                               &sg_s[i], sg_s[i].length, sg_s[i].offset,
-                               j, sg_d[j].length);
-                       pr_debug("Step 1 - length: %u src_offset: %u"
-                               " dst_offset: %u\n", length, src_offset,
-                               dst_offset);
-
-                       if (length > req->rd_size)
-                               length = req->rd_size;
-
-                       src = sg_virt(&sg_s[i++]) + src_offset;
-                       BUG_ON(!src);
-
-                       dst = sg_virt(&sg_d[j]) + dst_offset;
-                       BUG_ON(!dst);
-
-                       src_offset = 0;
-                       dst_offset = length;
-                       page_end = 0;
-               } else {
-                       length = (sg_d[j].length - dst_offset);
-
-                       pr_debug("Step 2 - sg_s[%d]: %p length: %d"
-                               " offset: %d sg_d[%d].length: %u\n", i,
-                               &sg_s[i], sg_s[i].length, sg_s[i].offset,
-                               j, sg_d[j].length);
-                       pr_debug("Step 2 - length: %u src_offset: %u"
-                               " dst_offset: %u\n", length, src_offset,
-                               dst_offset);
-
-                       if (length > req->rd_size)
-                               length = req->rd_size;
-
-                       src = sg_virt(&sg_s[i]) + src_offset;
-                       BUG_ON(!src);
-
-                       if (sg_s[i].length == length) {
-                               i++;
-                               src_offset = 0;
-                       } else
-                               src_offset = length;
-
-                       dst = sg_virt(&sg_d[j++]) + dst_offset;
-                       BUG_ON(!dst);
-
-                       dst_offset = 0;
-                       page_end = 1;
-               }
-
-               memcpy(dst, src, length);
-
-               pr_debug("page: %u, remaining size: %u, length: %u,"
-                       " i: %u, j: %u\n", req->rd_page,
-                       (req->rd_size - length), length, i, j);
-
-               req->rd_size -= length;
-               if (!req->rd_size)
-                       return 0;
-
-               if (!page_end)
-                       continue;
-
-               if (++req->rd_page <= table->page_end_offset) {
-                       pr_debug("page: %u in same page table\n",
-                               req->rd_page);
+               /* rd page completed, next one please */
+               req->rd_page++;
+               rd_offset = 0;
+               src_len = PAGE_SIZE;
+               if (req->rd_page <= table->page_end_offset) {
+                       rd_sg++;
                        continue;
                }
 
-               pr_debug("getting new page table for page: %u\n",
-                               req->rd_page);
-
                table = rd_get_sg_table(dev, req->rd_page);
-               if (!table)
+               if (!table) {
+                       sg_miter_stop(&m);
                        return -EINVAL;
+               }
 
-               sg_d = &table->sg_table[j = 0];
+               /* since we increment, the first sg entry is correct */
+               rd_sg = table->sg_table;
        }
-
+       sg_miter_stop(&m);
        return 0;
 }
 
@@ -583,28 +422,21 @@ static int rd_MEMCPY_do_task(struct se_task *task)
 {
        struct se_device *dev = task->task_se_cmd->se_dev;
        struct rd_request *req = RD_REQ(task);
-       unsigned long long lba;
+       u64 tmp;
        int ret;
 
-       req->rd_page = (task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size) / PAGE_SIZE;
-       lba = task->task_lba;
-       req->rd_offset = (do_div(lba,
-                         (PAGE_SIZE / dev->se_sub_dev->se_dev_attrib.block_size))) *
-                          dev->se_sub_dev->se_dev_attrib.block_size;
+       tmp = task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size;
+       req->rd_offset = do_div(tmp, PAGE_SIZE);
+       req->rd_page = tmp;
        req->rd_size = task->task_size;
 
-       if (task->task_data_direction == DMA_FROM_DEVICE)
-               ret = rd_MEMCPY_read(req);
-       else
-               ret = rd_MEMCPY_write(req);
-
+       ret = rd_MEMCPY(req, task->task_data_direction == DMA_FROM_DEVICE);
        if (ret != 0)
                return ret;
 
        task->task_scsi_status = GOOD;
        transport_complete_task(task, 1);
-
-       return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+       return 0;
 }
 
 /*     rd_free_task(): (Part of se_subsystem_api_t template)
index 217e29df62977559d1886320ea54c6dbe7d04fab..684522805a1f370a99a5745c2fa3815a9c722912 100644 (file)
@@ -345,10 +345,6 @@ static void core_tmr_drain_cmd_list(
                        " %d t_fe_count: %d\n", (preempt_and_abort_list) ?
                        "Preempt" : "", cmd, cmd->t_state,
                        atomic_read(&cmd->t_fe_count));
-               /*
-                * Signal that the command has failed via cmd->se_cmd_flags,
-                */
-               transport_new_cmd_failure(cmd);
 
                core_tmr_handle_tas_abort(tmr_nacl, cmd, tas,
                                atomic_read(&cmd->t_fe_count));
index 3400ae6e93f83d2ae5b25395b97bbd6158877ec0..0257658e2e3ea8a75642ae0dcabc77547ac2379b 100644 (file)
@@ -61,7 +61,6 @@
 static int sub_api_initialized;
 
 static struct workqueue_struct *target_completion_wq;
-static struct kmem_cache *se_cmd_cache;
 static struct kmem_cache *se_sess_cache;
 struct kmem_cache *se_tmr_req_cache;
 struct kmem_cache *se_ua_cache;
@@ -82,24 +81,18 @@ static int transport_generic_get_mem(struct se_cmd *cmd);
 static void transport_put_cmd(struct se_cmd *cmd);
 static void transport_remove_cmd_from_queue(struct se_cmd *cmd);
 static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq);
-static void transport_generic_request_failure(struct se_cmd *, int, int);
+static void transport_generic_request_failure(struct se_cmd *);
 static void target_complete_ok_work(struct work_struct *work);
 
 int init_se_kmem_caches(void)
 {
-       se_cmd_cache = kmem_cache_create("se_cmd_cache",
-                       sizeof(struct se_cmd), __alignof__(struct se_cmd), 0, NULL);
-       if (!se_cmd_cache) {
-               pr_err("kmem_cache_create for struct se_cmd failed\n");
-               goto out;
-       }
        se_tmr_req_cache = kmem_cache_create("se_tmr_cache",
                        sizeof(struct se_tmr_req), __alignof__(struct se_tmr_req),
                        0, NULL);
        if (!se_tmr_req_cache) {
                pr_err("kmem_cache_create() for struct se_tmr_req"
                                " failed\n");
-               goto out_free_cmd_cache;
+               goto out;
        }
        se_sess_cache = kmem_cache_create("se_sess_cache",
                        sizeof(struct se_session), __alignof__(struct se_session),
@@ -182,8 +175,6 @@ out_free_sess_cache:
        kmem_cache_destroy(se_sess_cache);
 out_free_tmr_req_cache:
        kmem_cache_destroy(se_tmr_req_cache);
-out_free_cmd_cache:
-       kmem_cache_destroy(se_cmd_cache);
 out:
        return -ENOMEM;
 }
@@ -191,7 +182,6 @@ out:
 void release_se_kmem_caches(void)
 {
        destroy_workqueue(target_completion_wq);
-       kmem_cache_destroy(se_cmd_cache);
        kmem_cache_destroy(se_tmr_req_cache);
        kmem_cache_destroy(se_sess_cache);
        kmem_cache_destroy(se_ua_cache);
@@ -680,9 +670,9 @@ void transport_complete_sync_cache(struct se_cmd *cmd, int good)
                task->task_scsi_status = GOOD;
        } else {
                task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
-               task->task_error_status = PYX_TRANSPORT_ILLEGAL_REQUEST;
-               task->task_se_cmd->transport_error_status =
-                                       PYX_TRANSPORT_ILLEGAL_REQUEST;
+               task->task_se_cmd->scsi_sense_reason =
+                               TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
        }
 
        transport_complete_task(task, good);
@@ -693,7 +683,7 @@ static void target_complete_failure_work(struct work_struct *work)
 {
        struct se_cmd *cmd = container_of(work, struct se_cmd, work);
 
-       transport_generic_request_failure(cmd, 1, 1);
+       transport_generic_request_failure(cmd);
 }
 
 /*     transport_complete_task():
@@ -755,10 +745,11 @@ void transport_complete_task(struct se_task *task, int success)
        if (cmd->t_tasks_failed) {
                if (!task->task_error_status) {
                        task->task_error_status =
-                               PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
-                       cmd->transport_error_status =
-                               PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+                               TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+                       cmd->scsi_sense_reason =
+                               TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
                }
+
                INIT_WORK(&cmd->work, target_complete_failure_work);
        } else {
                atomic_set(&cmd->t_transport_complete, 1);
@@ -1335,23 +1326,17 @@ struct se_device *transport_add_device_to_core_hba(
        dev->se_hba             = hba;
        dev->se_sub_dev         = se_dev;
        dev->transport          = transport;
-       atomic_set(&dev->active_cmds, 0);
        INIT_LIST_HEAD(&dev->dev_list);
        INIT_LIST_HEAD(&dev->dev_sep_list);
        INIT_LIST_HEAD(&dev->dev_tmr_list);
        INIT_LIST_HEAD(&dev->execute_task_list);
        INIT_LIST_HEAD(&dev->delayed_cmd_list);
-       INIT_LIST_HEAD(&dev->ordered_cmd_list);
        INIT_LIST_HEAD(&dev->state_task_list);
        INIT_LIST_HEAD(&dev->qf_cmd_list);
        spin_lock_init(&dev->execute_task_lock);
        spin_lock_init(&dev->delayed_cmd_lock);
-       spin_lock_init(&dev->ordered_cmd_lock);
-       spin_lock_init(&dev->state_task_lock);
-       spin_lock_init(&dev->dev_alua_lock);
        spin_lock_init(&dev->dev_reservation_lock);
        spin_lock_init(&dev->dev_status_lock);
-       spin_lock_init(&dev->dev_status_thr_lock);
        spin_lock_init(&dev->se_port_lock);
        spin_lock_init(&dev->se_tmr_lock);
        spin_lock_init(&dev->qf_cmd_lock);
@@ -1507,7 +1492,6 @@ void transport_init_se_cmd(
 {
        INIT_LIST_HEAD(&cmd->se_lun_node);
        INIT_LIST_HEAD(&cmd->se_delayed_node);
-       INIT_LIST_HEAD(&cmd->se_ordered_node);
        INIT_LIST_HEAD(&cmd->se_qf_node);
        INIT_LIST_HEAD(&cmd->se_queue_node);
        INIT_LIST_HEAD(&cmd->se_cmd_list);
@@ -1573,6 +1557,8 @@ int transport_generic_allocate_tasks(
                pr_err("Received SCSI CDB with command_size: %d that"
                        " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
                        scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
+               cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
                return -EINVAL;
        }
        /*
@@ -1588,6 +1574,9 @@ int transport_generic_allocate_tasks(
                                " %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
                                scsi_command_size(cdb),
                                (unsigned long)sizeof(cmd->__t_task_cdb));
+                       cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+                       cmd->scsi_sense_reason =
+                                       TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
                        return -ENOMEM;
                }
        } else
@@ -1658,11 +1647,9 @@ int transport_handle_cdb_direct(
         * and call transport_generic_request_failure() if necessary..
         */
        ret = transport_generic_new_cmd(cmd);
-       if (ret < 0) {
-               cmd->transport_error_status = ret;
-               transport_generic_request_failure(cmd, 0,
-                               (cmd->data_direction != DMA_TO_DEVICE));
-       }
+       if (ret < 0)
+               transport_generic_request_failure(cmd);
+
        return 0;
 }
 EXPORT_SYMBOL(transport_handle_cdb_direct);
@@ -1798,20 +1785,16 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
 /*
  * Handle SAM-esque emulation for generic transport request failures.
  */
-static void transport_generic_request_failure(
-       struct se_cmd *cmd,
-       int complete,
-       int sc)
+static void transport_generic_request_failure(struct se_cmd *cmd)
 {
        int ret = 0;
 
        pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
                " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd),
                cmd->t_task_cdb[0]);
-       pr_debug("-----[ i_state: %d t_state: %d transport_error_status: %d\n",
+       pr_debug("-----[ i_state: %d t_state: %d scsi_sense_reason: %d\n",
                cmd->se_tfo->get_cmd_state(cmd),
-               cmd->t_state,
-               cmd->transport_error_status);
+               cmd->t_state, cmd->scsi_sense_reason);
        pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d"
                " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --"
                " t_transport_active: %d t_transport_stop: %d"
@@ -1829,46 +1812,19 @@ static void transport_generic_request_failure(
        if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
                transport_complete_task_attr(cmd);
 
-       if (complete) {
-               cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE;
-       }
-
-       switch (cmd->transport_error_status) {
-       case PYX_TRANSPORT_UNKNOWN_SAM_OPCODE:
-               cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
-               break;
-       case PYX_TRANSPORT_REQ_TOO_MANY_SECTORS:
-               cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY;
-               break;
-       case PYX_TRANSPORT_INVALID_CDB_FIELD:
-               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
-               break;
-       case PYX_TRANSPORT_INVALID_PARAMETER_LIST:
-               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
-               break;
-       case PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES:
-               if (!sc)
-                       transport_new_cmd_failure(cmd);
-               /*
-                * Currently for PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES,
-                * we force this session to fall back to session
-                * recovery.
-                */
-               cmd->se_tfo->fall_back_to_erl0(cmd->se_sess);
-               cmd->se_tfo->stop_session(cmd->se_sess, 0, 0);
-
-               goto check_stop;
-       case PYX_TRANSPORT_LU_COMM_FAILURE:
-       case PYX_TRANSPORT_ILLEGAL_REQUEST:
-               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-               break;
-       case PYX_TRANSPORT_UNKNOWN_MODE_PAGE:
-               cmd->scsi_sense_reason = TCM_UNKNOWN_MODE_PAGE;
-               break;
-       case PYX_TRANSPORT_WRITE_PROTECTED:
-               cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
+       switch (cmd->scsi_sense_reason) {
+       case TCM_NON_EXISTENT_LUN:
+       case TCM_UNSUPPORTED_SCSI_OPCODE:
+       case TCM_INVALID_CDB_FIELD:
+       case TCM_INVALID_PARAMETER_LIST:
+       case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
+       case TCM_UNKNOWN_MODE_PAGE:
+       case TCM_WRITE_PROTECTED:
+       case TCM_CHECK_CONDITION_ABORT_CMD:
+       case TCM_CHECK_CONDITION_UNIT_ATTENTION:
+       case TCM_CHECK_CONDITION_NOT_READY:
                break;
-       case PYX_TRANSPORT_RESERVATION_CONFLICT:
+       case TCM_RESERVATION_CONFLICT:
                /*
                 * No SENSE Data payload for this case, set SCSI Status
                 * and queue the response to $FABRIC_MOD.
@@ -1893,15 +1849,9 @@ static void transport_generic_request_failure(
                if (ret == -EAGAIN || ret == -ENOMEM)
                        goto queue_full;
                goto check_stop;
-       case PYX_TRANSPORT_USE_SENSE_REASON:
-               /*
-                * struct se_cmd->scsi_sense_reason already set
-                */
-               break;
        default:
                pr_err("Unknown transport error for CDB 0x%02x: %d\n",
-                       cmd->t_task_cdb[0],
-                       cmd->transport_error_status);
+                       cmd->t_task_cdb[0], cmd->scsi_sense_reason);
                cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
                break;
        }
@@ -1912,14 +1862,10 @@ static void transport_generic_request_failure(
         * transport_send_check_condition_and_sense() after handling
         * possible unsoliticied write data payloads.
         */
-       if (!sc && !cmd->se_tfo->new_cmd_map)
-               transport_new_cmd_failure(cmd);
-       else {
-               ret = transport_send_check_condition_and_sense(cmd,
-                               cmd->scsi_sense_reason, 0);
-               if (ret == -EAGAIN || ret == -ENOMEM)
-                       goto queue_full;
-       }
+       ret = transport_send_check_condition_and_sense(cmd,
+                       cmd->scsi_sense_reason, 0);
+       if (ret == -EAGAIN || ret == -ENOMEM)
+               goto queue_full;
 
 check_stop:
        transport_lun_remove_cmd(cmd);
@@ -2002,19 +1948,12 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd)
         * to allow the passed struct se_cmd list of tasks to the front of the list.
         */
         if (cmd->sam_task_attr == MSG_HEAD_TAG) {
-               atomic_inc(&cmd->se_dev->dev_hoq_count);
-               smp_mb__after_atomic_inc();
                pr_debug("Added HEAD_OF_QUEUE for CDB:"
                        " 0x%02x, se_ordered_id: %u\n",
                        cmd->t_task_cdb[0],
                        cmd->se_ordered_id);
                return 1;
        } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
-               spin_lock(&cmd->se_dev->ordered_cmd_lock);
-               list_add_tail(&cmd->se_ordered_node,
-                               &cmd->se_dev->ordered_cmd_list);
-               spin_unlock(&cmd->se_dev->ordered_cmd_lock);
-
                atomic_inc(&cmd->se_dev->dev_ordered_sync);
                smp_mb__after_atomic_inc();
 
@@ -2076,9 +2015,9 @@ static int transport_execute_tasks(struct se_cmd *cmd)
 {
        int add_tasks;
 
-       if (se_dev_check_online(cmd->se_orig_obj_ptr) != 0) {
-               cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE;
-               transport_generic_request_failure(cmd, 0, 1);
+       if (se_dev_check_online(cmd->se_dev) != 0) {
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               transport_generic_request_failure(cmd);
                return 0;
        }
 
@@ -2163,14 +2102,13 @@ check_depth:
        else
                error = dev->transport->do_task(task);
        if (error != 0) {
-               cmd->transport_error_status = error;
                spin_lock_irqsave(&cmd->t_state_lock, flags);
                task->task_flags &= ~TF_ACTIVE;
                spin_unlock_irqrestore(&cmd->t_state_lock, flags);
                atomic_set(&cmd->t_transport_sent, 0);
                transport_stop_tasks_for_cmd(cmd);
                atomic_inc(&dev->depth_left);
-               transport_generic_request_failure(cmd, 0, 1);
+               transport_generic_request_failure(cmd);
        }
 
        goto check_depth;
@@ -2178,19 +2116,6 @@ check_depth:
        return 0;
 }
 
-void transport_new_cmd_failure(struct se_cmd *se_cmd)
-{
-       unsigned long flags;
-       /*
-        * Any unsolicited data will get dumped for failed command inside of
-        * the fabric plugin
-        */
-       spin_lock_irqsave(&se_cmd->t_state_lock, flags);
-       se_cmd->se_cmd_flags |= SCF_SE_CMD_FAILED;
-       se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
-       spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
-}
-
 static inline u32 transport_get_sectors_6(
        unsigned char *cdb,
        struct se_cmd *cmd,
@@ -2213,10 +2138,15 @@ static inline u32 transport_get_sectors_6(
 
        /*
         * Everything else assume TYPE_DISK Sector CDB location.
-        * Use 8-bit sector value.
+        * Use 8-bit sector value.  SBC-3 says:
+        *
+        *   A TRANSFER LENGTH field set to zero specifies that 256
+        *   logical blocks shall be written.  Any other value
+        *   specifies the number of logical blocks that shall be
+        *   written.
         */
 type_disk:
-       return (u32)cdb[4];
+       return cdb[4] ? : 256;
 }
 
 static inline u32 transport_get_sectors_10(
@@ -2460,27 +2390,6 @@ static int transport_get_sense_data(struct se_cmd *cmd)
        return -1;
 }
 
-static int
-transport_handle_reservation_conflict(struct se_cmd *cmd)
-{
-       cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
-       cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
-       cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
-       /*
-        * For UA Interlock Code 11b, a RESERVATION CONFLICT will
-        * establish a UNIT ATTENTION with PREVIOUS RESERVATION
-        * CONFLICT STATUS.
-        *
-        * See spc4r17, section 7.4.6 Control Mode Page, Table 349
-        */
-       if (cmd->se_sess &&
-           cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2)
-               core_scsi3_ua_allocate(cmd->se_sess->se_node_acl,
-                       cmd->orig_fe_lun, 0x2C,
-                       ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
-       return -EINVAL;
-}
-
 static inline long long transport_dev_end_lba(struct se_device *dev)
 {
        return dev->transport->get_blocks(dev) + 1;
@@ -2595,8 +2504,12 @@ static int transport_generic_cmd_sequencer(
         */
        if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type) != 0) {
                if (su_dev->t10_pr.pr_ops.t10_seq_non_holder(
-                                       cmd, cdb, pr_reg_type) != 0)
-                       return transport_handle_reservation_conflict(cmd);
+                                       cmd, cdb, pr_reg_type) != 0) {
+                       cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+                       cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
+                       cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+                       return -EBUSY;
+               }
                /*
                 * This means the CDB is allowed for the SCSI Initiator port
                 * when said port is *NOT* holding the legacy SPC-2 or
@@ -2658,7 +2571,8 @@ static int transport_generic_cmd_sequencer(
                        goto out_unsupported_cdb;
                size = transport_get_size(sectors, cdb, cmd);
                cmd->t_task_lba = transport_lba_32(cdb);
-               cmd->t_tasks_fua = (cdb[1] & 0x8);
+               if (cdb[1] & 0x8)
+                       cmd->se_cmd_flags |= SCF_FUA;
                cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
                break;
        case WRITE_12:
@@ -2667,7 +2581,8 @@ static int transport_generic_cmd_sequencer(
                        goto out_unsupported_cdb;
                size = transport_get_size(sectors, cdb, cmd);
                cmd->t_task_lba = transport_lba_32(cdb);
-               cmd->t_tasks_fua = (cdb[1] & 0x8);
+               if (cdb[1] & 0x8)
+                       cmd->se_cmd_flags |= SCF_FUA;
                cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
                break;
        case WRITE_16:
@@ -2676,12 +2591,13 @@ static int transport_generic_cmd_sequencer(
                        goto out_unsupported_cdb;
                size = transport_get_size(sectors, cdb, cmd);
                cmd->t_task_lba = transport_lba_64(cdb);
-               cmd->t_tasks_fua = (cdb[1] & 0x8);
+               if (cdb[1] & 0x8)
+                       cmd->se_cmd_flags |= SCF_FUA;
                cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
                break;
        case XDWRITEREAD_10:
                if ((cmd->data_direction != DMA_TO_DEVICE) ||
-                   !(cmd->t_tasks_bidi))
+                   !(cmd->se_cmd_flags & SCF_BIDI))
                        goto out_invalid_cdb_field;
                sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
                if (sector_ret)
@@ -2700,7 +2616,8 @@ static int transport_generic_cmd_sequencer(
                 * Setup BIDI XOR callback to be run after I/O completion.
                 */
                cmd->transport_complete_callback = &transport_xor_callback;
-               cmd->t_tasks_fua = (cdb[1] & 0x8);
+               if (cdb[1] & 0x8)
+                       cmd->se_cmd_flags |= SCF_FUA;
                break;
        case VARIABLE_LENGTH_CMD:
                service_action = get_unaligned_be16(&cdb[8]);
@@ -2728,7 +2645,8 @@ static int transport_generic_cmd_sequencer(
                         * completion.
                         */
                        cmd->transport_complete_callback = &transport_xor_callback;
-                       cmd->t_tasks_fua = (cdb[10] & 0x8);
+                       if (cdb[1] & 0x8)
+                               cmd->se_cmd_flags |= SCF_FUA;
                        break;
                case WRITE_SAME_32:
                        sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
@@ -3171,18 +3089,13 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
                        " SIMPLE: %u\n", dev->dev_cur_ordered_id,
                        cmd->se_ordered_id);
        } else if (cmd->sam_task_attr == MSG_HEAD_TAG) {
-               atomic_dec(&dev->dev_hoq_count);
-               smp_mb__after_atomic_dec();
                dev->dev_cur_ordered_id++;
                pr_debug("Incremented dev_cur_ordered_id: %u for"
                        " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
                        cmd->se_ordered_id);
        } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
-               spin_lock(&dev->ordered_cmd_lock);
-               list_del(&cmd->se_ordered_node);
                atomic_dec(&dev->dev_ordered_sync);
                smp_mb__after_atomic_dec();
-               spin_unlock(&dev->ordered_cmd_lock);
 
                dev->dev_cur_ordered_id++;
                pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:"
@@ -3495,6 +3408,18 @@ int transport_generic_map_mem_to_cmd(
 
        if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) ||
            (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) {
+               /*
+                * Reject SCSI data overflow with map_mem_to_cmd() as incoming
+                * scatterlists already have been set to follow what the fabric
+                * passes for the original expected data transfer length.
+                */
+               if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
+                       pr_warn("Rejecting SCSI DATA overflow for fabric using"
+                               " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n");
+                       cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+                       cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+                       return -EINVAL;
+               }
 
                cmd->t_data_sg = sgl;
                cmd->t_data_nents = sgl_count;
@@ -3813,7 +3738,7 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
            cmd->data_length) {
                ret = transport_generic_get_mem(cmd);
                if (ret < 0)
-                       return ret;
+                       goto out_fail;
        }
 
        /*
@@ -3842,8 +3767,15 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
                task_cdbs = transport_allocate_control_task(cmd);
        }
 
-       if (task_cdbs <= 0)
+       if (task_cdbs < 0)
                goto out_fail;
+       else if (!task_cdbs && (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) {
+               cmd->t_state = TRANSPORT_COMPLETE;
+               atomic_set(&cmd->t_transport_active, 1);
+               INIT_WORK(&cmd->work, target_complete_ok_work);
+               queue_work(target_completion_wq, &cmd->work);
+               return 0;
+       }
 
        if (set_counts) {
                atomic_inc(&cmd->t_fe_count);
@@ -3929,7 +3861,7 @@ static int transport_generic_write_pending(struct se_cmd *cmd)
        else if (ret < 0)
                return ret;
 
-       return PYX_TRANSPORT_WRITE_PENDING;
+       return 1;
 
 queue_full:
        pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
@@ -4602,9 +4534,6 @@ void transport_send_task_abort(struct se_cmd *cmd)
                if (cmd->se_tfo->write_pending_status(cmd) != 0) {
                        atomic_inc(&cmd->t_transport_aborted);
                        smp_mb__after_atomic_inc();
-                       cmd->scsi_status = SAM_STAT_TASK_ABORTED;
-                       transport_new_cmd_failure(cmd);
-                       return;
                }
        }
        cmd->scsi_status = SAM_STAT_TASK_ABORTED;
@@ -4670,8 +4599,6 @@ static int transport_processing_thread(void *param)
        struct se_cmd *cmd;
        struct se_device *dev = (struct se_device *) param;
 
-       set_user_nice(current, -20);
-
        while (!kthread_should_stop()) {
                ret = wait_event_interruptible(dev->dev_queue_obj.thread_wq,
                                atomic_read(&dev->dev_queue_obj.queue_cnt) ||
@@ -4698,18 +4625,13 @@ get_cmd:
                        }
                        ret = cmd->se_tfo->new_cmd_map(cmd);
                        if (ret < 0) {
-                               cmd->transport_error_status = ret;
-                               transport_generic_request_failure(cmd,
-                                               0, (cmd->data_direction !=
-                                                   DMA_TO_DEVICE));
+                               transport_generic_request_failure(cmd);
                                break;
                        }
                        ret = transport_generic_new_cmd(cmd);
                        if (ret < 0) {
-                               cmd->transport_error_status = ret;
-                               transport_generic_request_failure(cmd,
-                                       0, (cmd->data_direction !=
-                                        DMA_TO_DEVICE));
+                               transport_generic_request_failure(cmd);
+                               break;
                        }
                        break;
                case TRANSPORT_PROCESS_WRITE:
index 4fac37c4c615263abbfa60594cfcfc3f1917dbc7..71fc9cea5dc9ba120b725a4e0f6bf94c879d1340 100644 (file)
@@ -200,7 +200,7 @@ int ft_write_pending(struct se_cmd *se_cmd)
        lport = ep->lp;
        fp = fc_frame_alloc(lport, sizeof(*txrdy));
        if (!fp)
-               return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+               return -ENOMEM; /* Signal QUEUE_FULL */
 
        txrdy = fc_frame_payload_get(fp, sizeof(*txrdy));
        memset(txrdy, 0, sizeof(*txrdy));
index 5f770412ca403265ddfc19856ef4934470a25c63..9402b7387cac570d91ff6001a885daed3bbd8b6d 100644 (file)
@@ -436,8 +436,7 @@ static void ft_del_lport(struct se_wwn *wwn)
        struct ft_lport_acl *lacl = container_of(wwn,
                                struct ft_lport_acl, fc_lport_wwn);
 
-       pr_debug("del lport %s\n",
-                       config_item_name(&wwn->wwn_group.cg_item));
+       pr_debug("del lport %s\n", lacl->name);
        mutex_lock(&ft_lport_lock);
        list_del(&lacl->list);
        mutex_unlock(&ft_lport_lock);
index 4730016d7cd42d8644e8fb5fa8791fc9df080591..45f422ac103fb61678633ee08241b33b7dfe235a 100644 (file)
@@ -1959,7 +1959,7 @@ static int amd5536_start(struct usb_gadget_driver *driver,
        u32 tmp;
 
        if (!driver || !bind || !driver->setup
-                       || driver->speed != USB_SPEED_HIGH)
+                       || driver->speed < USB_SPEED_HIGH)
                return -EINVAL;
        if (!dev)
                return -ENODEV;
index 91fdf790ed20b122bf0a13df0d3c8aed5285ac3c..cf33a8d0fd5df46ec339f5b481b68dc61121243a 100644 (file)
@@ -131,8 +131,8 @@ static int gser_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
        }
        if (!gser->port.in->desc || !gser->port.out->desc) {
                DBG(cdev, "activate generic ttyGS%d\n", gser->port_num);
-               if (!config_ep_by_speed(cdev->gadget, f, gser->port.in) ||
-                   !config_ep_by_speed(cdev->gadget, f, gser->port.out)) {
+               if (config_ep_by_speed(cdev->gadget, f, gser->port.in) ||
+                   config_ep_by_speed(cdev->gadget, f, gser->port.out)) {
                        gser->port.in->desc = NULL;
                        gser->port.out->desc = NULL;
                        return -EINVAL;
index 43a49ecc1f36ed1009eb836350bd07800a4d8c09..dcbc0a2e48dde8be9b27a5c429a4c004bfb8aa9c 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/err.h>
 #include <linux/fsl_devices.h>
 #include <linux/platform_device.h>
+#include <linux/io.h>
 
 #include <mach/hardware.h>
 
@@ -88,7 +89,6 @@ eenahb:
 void fsl_udc_clk_finalize(struct platform_device *pdev)
 {
        struct fsl_usb2_platform_data *pdata = pdev->dev.platform_data;
-#if defined(CONFIG_SOC_IMX35)
        if (cpu_is_mx35()) {
                unsigned int v;
 
@@ -101,7 +101,6 @@ void fsl_udc_clk_finalize(struct platform_device *pdev)
                                        USBPHYCTRL_OTGBASE_OFFSET));
                }
        }
-#endif
 
        /* ULPI transceivers don't need usbpll */
        if (pdata->phy_mode == FSL_USB2_PHY_ULPI) {
index 2a03e4de11c1a277cfe27b670ac48f11cabc006a..e00cf92409ce5114f061801c3d3e29b6b6bae23e 100644 (file)
@@ -2336,8 +2336,7 @@ static int fsl_qe_start(struct usb_gadget_driver *driver,
        if (!udc_controller)
                return -ENODEV;
 
-       if (!driver || (driver->speed != USB_SPEED_FULL
-                       && driver->speed != USB_SPEED_HIGH)
+       if (!driver || driver->speed < USB_SPEED_FULL
                        || !bind || !driver->disconnect || !driver->setup)
                return -EINVAL;
 
index b3b3d83b7c3354744ec1fc4d4dc2765f19293a7c..dd28ef3def71f394f281b70f8ee2335b629f42e6 100644 (file)
@@ -696,12 +696,31 @@ static void fsl_free_request(struct usb_ep *_ep, struct usb_request *_req)
                kfree(req);
 }
 
-/*-------------------------------------------------------------------------*/
+/* Actually add a dTD chain to an empty dQH and let go */
+static void fsl_prime_ep(struct fsl_ep *ep, struct ep_td_struct *td)
+{
+       struct ep_queue_head *qh = get_qh_by_ep(ep);
+
+       /* Write dQH next pointer and terminate bit to 0 */
+       qh->next_dtd_ptr = cpu_to_hc32(td->td_dma
+                       & EP_QUEUE_HEAD_NEXT_POINTER_MASK);
+
+       /* Clear active and halt bit */
+       qh->size_ioc_int_sts &= cpu_to_hc32(~(EP_QUEUE_HEAD_STATUS_ACTIVE
+                                       | EP_QUEUE_HEAD_STATUS_HALT));
+
+       /* Ensure that updates to the QH will occur before priming. */
+       wmb();
+
+       /* Prime endpoint by writing correct bit to ENDPTPRIME */
+       fsl_writel(ep_is_in(ep) ? (1 << (ep_index(ep) + 16))
+                       : (1 << (ep_index(ep))), &dr_regs->endpointprime);
+}
+
+/* Add dTD chain to the dQH of an EP */
 static void fsl_queue_td(struct fsl_ep *ep, struct fsl_req *req)
 {
-       int i = ep_index(ep) * 2 + ep_is_in(ep);
        u32 temp, bitmask, tmp_stat;
-       struct ep_queue_head *dQH = &ep->udc->ep_qh[i];
 
        /* VDBG("QH addr Register 0x%8x", dr_regs->endpointlistaddr);
        VDBG("ep_qh[%d] addr is 0x%8x", i, (u32)&(ep->udc->ep_qh[i])); */
@@ -719,7 +738,7 @@ static void fsl_queue_td(struct fsl_ep *ep, struct fsl_req *req)
                        cpu_to_hc32(req->head->td_dma & DTD_ADDR_MASK);
                /* Read prime bit, if 1 goto done */
                if (fsl_readl(&dr_regs->endpointprime) & bitmask)
-                       goto out;
+                       return;
 
                do {
                        /* Set ATDTW bit in USBCMD */
@@ -736,28 +755,10 @@ static void fsl_queue_td(struct fsl_ep *ep, struct fsl_req *req)
                fsl_writel(temp & ~USB_CMD_ATDTW, &dr_regs->usbcmd);
 
                if (tmp_stat)
-                       goto out;
+                       return;
        }
 
-       /* Write dQH next pointer and terminate bit to 0 */
-       temp = req->head->td_dma & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
-       dQH->next_dtd_ptr = cpu_to_hc32(temp);
-
-       /* Clear active and halt bit */
-       temp = cpu_to_hc32(~(EP_QUEUE_HEAD_STATUS_ACTIVE
-                       | EP_QUEUE_HEAD_STATUS_HALT));
-       dQH->size_ioc_int_sts &= temp;
-
-       /* Ensure that updates to the QH will occur before priming. */
-       wmb();
-
-       /* Prime endpoint by writing 1 to ENDPTPRIME */
-       temp = ep_is_in(ep)
-               ? (1 << (ep_index(ep) + 16))
-               : (1 << (ep_index(ep)));
-       fsl_writel(temp, &dr_regs->endpointprime);
-out:
-       return;
+       fsl_prime_ep(ep, req->head);
 }
 
 /* Fill in the dTD structure
@@ -877,7 +878,7 @@ fsl_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
                VDBG("%s, bad ep", __func__);
                return -EINVAL;
        }
-       if (ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
+       if (usb_endpoint_xfer_isoc(ep->desc)) {
                if (req->req.length > ep->ep.maxpacket)
                        return -EMSGSIZE;
        }
@@ -973,25 +974,20 @@ static int fsl_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
 
                /* The request isn't the last request in this ep queue */
                if (req->queue.next != &ep->queue) {
-                       struct ep_queue_head *qh;
                        struct fsl_req *next_req;
 
-                       qh = ep->qh;
                        next_req = list_entry(req->queue.next, struct fsl_req,
                                        queue);
 
-                       /* Point the QH to the first TD of next request */
-                       fsl_writel((u32) next_req->head, &qh->curr_dtd_ptr);
+                       /* prime with dTD of next request */
+                       fsl_prime_ep(ep, next_req->head);
                }
-
-               /* The request hasn't been processed, patch up the TD chain */
+       /* The request hasn't been processed, patch up the TD chain */
        } else {
                struct fsl_req *prev_req;
 
                prev_req = list_entry(req->queue.prev, struct fsl_req, queue);
-               fsl_writel(fsl_readl(&req->tail->next_td_ptr),
-                               &prev_req->tail->next_td_ptr);
-
+               prev_req->tail->next_td_ptr = req->tail->next_td_ptr;
        }
 
        done(ep, req, -ECONNRESET);
@@ -1032,7 +1028,7 @@ static int fsl_ep_set_halt(struct usb_ep *_ep, int value)
                goto out;
        }
 
-       if (ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
+       if (usb_endpoint_xfer_isoc(ep->desc)) {
                status = -EOPNOTSUPP;
                goto out;
        }
@@ -1068,7 +1064,7 @@ static int fsl_ep_fifo_status(struct usb_ep *_ep)
        struct fsl_udc *udc;
        int size = 0;
        u32 bitmask;
-       struct ep_queue_head *d_qh;
+       struct ep_queue_head *qh;
 
        ep = container_of(_ep, struct fsl_ep, ep);
        if (!_ep || (!ep->desc && ep_index(ep) != 0))
@@ -1079,13 +1075,13 @@ static int fsl_ep_fifo_status(struct usb_ep *_ep)
        if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
                return -ESHUTDOWN;
 
-       d_qh = &ep->udc->ep_qh[ep_index(ep) * 2 + ep_is_in(ep)];
+       qh = get_qh_by_ep(ep);
 
        bitmask = (ep_is_in(ep)) ? (1 << (ep_index(ep) + 16)) :
            (1 << (ep_index(ep)));
 
        if (fsl_readl(&dr_regs->endptstatus) & bitmask)
-               size = (d_qh->size_ioc_int_sts & DTD_PACKET_SIZE)
+               size = (qh->size_ioc_int_sts & DTD_PACKET_SIZE)
                    >> DTD_LENGTH_BIT_POS;
 
        pr_debug("%s %u\n", __func__, size);
@@ -1938,8 +1934,7 @@ static int fsl_start(struct usb_gadget_driver *driver,
        if (!udc_controller)
                return -ENODEV;
 
-       if (!driver || (driver->speed != USB_SPEED_FULL
-                               && driver->speed != USB_SPEED_HIGH)
+       if (!driver || driver->speed < USB_SPEED_FULL
                        || !bind || !driver->disconnect || !driver->setup)
                return -EINVAL;
 
index 1d51be83fda87402d4a77a28ff4fa636d17bff62..f781f5dec41776629584a33a1be5817d8e6778d6 100644 (file)
@@ -569,6 +569,16 @@ static void dump_msg(const char *label, const u8 * buf, unsigned int length)
                                        * 2 + ((windex & USB_DIR_IN) ? 1 : 0))
 #define get_pipe_by_ep(EP)     (ep_index(EP) * 2 + ep_is_in(EP))
 
+static inline struct ep_queue_head *get_qh_by_ep(struct fsl_ep *ep)
+{
+       /* we only have one ep0 structure but two queue heads */
+       if (ep_index(ep) != 0)
+               return ep->qh;
+       else
+               return &ep->udc->ep_qh[(ep->udc->ep0_dir ==
+                               USB_DIR_IN) ? 1 : 0];
+}
+
 struct platform_device;
 #ifdef CONFIG_ARCH_MXC
 int fsl_udc_clk_init(struct platform_device *pdev);
index 91d0af2a24a8537728fb5c5cb176ff0108095ab5..9aa1cbbee45b64597ef9bcc44d5ba5ed219b2d6a 100644 (file)
@@ -1472,7 +1472,7 @@ static int m66592_start(struct usb_gadget_driver *driver,
        int retval;
 
        if (!driver
-                       || driver->speed != USB_SPEED_HIGH
+                       || driver->speed < USB_SPEED_HIGH
                        || !bind
                        || !driver->setup)
                return -EINVAL;
index 7f1bc9a73cda5a1fe45f8c053f426407758e0f50..da2b9d0be3ca0d444d9df8ca5f0b7a9800c3d078 100644 (file)
@@ -1881,7 +1881,7 @@ static int net2280_start(struct usb_gadget *_gadget,
         * (dev->usb->xcvrdiag & FORCE_FULL_SPEED_MODE)
         * "must not be used in normal operation"
         */
-       if (!driver || driver->speed != USB_SPEED_HIGH
+       if (!driver || driver->speed < USB_SPEED_HIGH
                        || !driver->setup)
                return -EINVAL;
 
index 24f84b210ce116cfd78e5bae3a1bee383d198ca0..fc719a3f855717b88417aaff8c2e89d9a2723109 100644 (file)
@@ -1746,7 +1746,7 @@ static int r8a66597_start(struct usb_gadget *gadget,
        struct r8a66597 *r8a66597 = gadget_to_r8a66597(gadget);
 
        if (!driver
-                       || driver->speed != USB_SPEED_HIGH
+                       || driver->speed < USB_SPEED_HIGH
                        || !driver->setup)
                return -EINVAL;
        if (!r8a66597)
index a552453dc94632cacee3ea5212f60aa4563fa519..b31448229f0b26c3a38014cf59e3339060253cb3 100644 (file)
@@ -2586,10 +2586,8 @@ static int s3c_hsotg_start(struct usb_gadget_driver *driver,
                return -EINVAL;
        }
 
-       if (driver->speed != USB_SPEED_HIGH &&
-           driver->speed != USB_SPEED_FULL) {
+       if (driver->speed < USB_SPEED_FULL)
                dev_err(hsotg->dev, "%s: bad speed\n", __func__);
-       }
 
        if (!bind || !driver->setup) {
                dev_err(hsotg->dev, "%s: missing entry points\n", __func__);
index 8d54f893cefe9df7c97363c1812b4f30bb822ff7..20a553b46aedc1d17ecaeaf033ff62eb8f5f472c 100644 (file)
@@ -1142,8 +1142,7 @@ static int s3c_hsudc_start(struct usb_gadget_driver *driver,
        int ret;
 
        if (!driver
-               || (driver->speed != USB_SPEED_FULL &&
-                       driver->speed != USB_SPEED_HIGH)
+               || driver->speed < USB_SPEED_FULL
                || !bind
                || !driver->unbind || !driver->disconnect || !driver->setup)
                return -EINVAL;
index 56a32033adb3485db49484d23619d1f2dcf68071..a60679cbbf858e3c97a978218e16d931635ae1a3 100644 (file)
@@ -1475,6 +1475,7 @@ iso_stream_schedule (
         * jump until after the queue is primed.
         */
        else {
+               int done = 0;
                start = SCHEDULE_SLOP + (now & ~0x07);
 
                /* NOTE:  assumes URB_ISO_ASAP, to limit complexity/bugs */
@@ -1492,18 +1493,18 @@ iso_stream_schedule (
                        if (stream->highspeed) {
                                if (itd_slot_ok(ehci, mod, start,
                                                stream->usecs, period))
-                                       break;
+                                       done = 1;
                        } else {
                                if ((start % 8) >= 6)
                                        continue;
                                if (sitd_slot_ok(ehci, mod, stream,
                                                start, sched, period))
-                                       break;
+                                       done = 1;
                        }
-               } while (start > next);
+               } while (start > next && !done);
 
                /* no room in the schedule */
-               if (start == next) {
+               if (!done) {
                        ehci_dbg(ehci, "iso resched full %p (now %d max %d)\n",
                                urb, now, now + mod);
                        status = -ENOSPC;
index d6e175428618d4b31f69bd3d323e7c5aef93deae..a403b53e86b9fd3dcc742b9da5759330fbc4d212 100644 (file)
@@ -124,7 +124,7 @@ void qset_clear(struct whc *whc, struct whc_qset *qset)
 {
        qset->td_start = qset->td_end = qset->ntds = 0;
 
-       qset->qh.link = cpu_to_le32(QH_LINK_NTDS(8) | QH_LINK_T);
+       qset->qh.link = cpu_to_le64(QH_LINK_NTDS(8) | QH_LINK_T);
        qset->qh.status = qset->qh.status & QH_STATUS_SEQ_MASK;
        qset->qh.err_count = 0;
        qset->qh.scratch[0] = 0;
index aa94c01957919001e6a5804c7bc0434b923f20bc..a1afb7c39f7e70c36c12a8bccdc0a0b64b7f29d3 100644 (file)
@@ -711,7 +711,10 @@ static void xhci_clear_command_ring(struct xhci_hcd *xhci)
        ring = xhci->cmd_ring;
        seg = ring->deq_seg;
        do {
-               memset(seg->trbs, 0, SEGMENT_SIZE);
+               memset(seg->trbs, 0,
+                       sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1));
+               seg->trbs[TRBS_PER_SEGMENT - 1].link.control &=
+                       cpu_to_le32(~TRB_CYCLE);
                seg = seg->next;
        } while (seg != ring->deq_seg);
 
index c1fa12ec7a9ad456a23c761f5c328a721182a070..b63ab1570103f2219afc5b44bdea08acd0531054 100644 (file)
@@ -2301,18 +2301,12 @@ static int musb_suspend(struct device *dev)
                 */
        }
 
-       musb_save_context(musb);
-
        spin_unlock_irqrestore(&musb->lock, flags);
        return 0;
 }
 
 static int musb_resume_noirq(struct device *dev)
 {
-       struct musb     *musb = dev_to_musb(dev);
-
-       musb_restore_context(musb);
-
        /* for static cmos like DaVinci, register values were preserved
         * unless for some reason the whole soc powered down or the USB
         * module got reset through the PSC (vs just being disabled).
index d51043acfe1abc5013217e70b4854e153f8bdf96..922148ff8d2969de64a808046f7ce06c6115ace1 100644 (file)
@@ -1903,7 +1903,7 @@ static int musb_gadget_start(struct usb_gadget *g,
        unsigned long           flags;
        int                     retval = -EINVAL;
 
-       if (driver->speed != USB_SPEED_HIGH)
+       if (driver->speed < USB_SPEED_HIGH)
                goto err0;
 
        pm_runtime_get_sync(musb->controller);
index d9717e0bc1ff65c6d23cae4bf0693c434d8873fe..7f4e803385702499b70e4a40fce377bc28a1e45f 100644 (file)
@@ -751,53 +751,32 @@ static int usbhsg_gadget_start(struct usb_gadget *gadget,
                struct usb_gadget_driver *driver)
 {
        struct usbhsg_gpriv *gpriv = usbhsg_gadget_to_gpriv(gadget);
-       struct usbhs_priv *priv;
-       struct device *dev;
-       int ret;
+       struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv);
 
        if (!driver             ||
            !driver->setup      ||
-           driver->speed != USB_SPEED_HIGH)
+           driver->speed < USB_SPEED_FULL)
                return -EINVAL;
 
-       dev  = usbhsg_gpriv_to_dev(gpriv);
-       priv = usbhsg_gpriv_to_priv(gpriv);
-
        /* first hook up the driver ... */
        gpriv->driver = driver;
        gpriv->gadget.dev.driver = &driver->driver;
 
-       ret = device_add(&gpriv->gadget.dev);
-       if (ret) {
-               dev_err(dev, "device_add error %d\n", ret);
-               goto add_fail;
-       }
-
        return usbhsg_try_start(priv, USBHSG_STATUS_REGISTERD);
-
-add_fail:
-       gpriv->driver = NULL;
-       gpriv->gadget.dev.driver = NULL;
-
-       return ret;
 }
 
 static int usbhsg_gadget_stop(struct usb_gadget *gadget,
                struct usb_gadget_driver *driver)
 {
        struct usbhsg_gpriv *gpriv = usbhsg_gadget_to_gpriv(gadget);
-       struct usbhs_priv *priv;
-       struct device *dev;
+       struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv);
 
        if (!driver             ||
            !driver->unbind)
                return -EINVAL;
 
-       dev  = usbhsg_gpriv_to_dev(gpriv);
-       priv = usbhsg_gpriv_to_priv(gpriv);
-
        usbhsg_try_stop(priv, USBHSG_STATUS_REGISTERD);
-       device_del(&gpriv->gadget.dev);
+       gpriv->gadget.dev.driver = NULL;
        gpriv->driver = NULL;
 
        return 0;
@@ -827,6 +806,13 @@ static int usbhsg_start(struct usbhs_priv *priv)
 
 static int usbhsg_stop(struct usbhs_priv *priv)
 {
+       struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv);
+
+       /* cable disconnect */
+       if (gpriv->driver &&
+           gpriv->driver->disconnect)
+               gpriv->driver->disconnect(&gpriv->gadget);
+
        return usbhsg_try_stop(priv, USBHSG_STATUS_STARTED);
 }
 
@@ -876,12 +862,14 @@ int usbhs_mod_gadget_probe(struct usbhs_priv *priv)
        /*
         * init gadget
         */
-       device_initialize(&gpriv->gadget.dev);
        dev_set_name(&gpriv->gadget.dev, "gadget");
        gpriv->gadget.dev.parent        = dev;
        gpriv->gadget.name              = "renesas_usbhs_udc";
        gpriv->gadget.ops               = &usbhsg_gadget_ops;
        gpriv->gadget.is_dualspeed      = 1;
+       ret = device_register(&gpriv->gadget.dev);
+       if (ret < 0)
+               goto err_add_udc;
 
        INIT_LIST_HEAD(&gpriv->gadget.ep_list);
 
@@ -912,12 +900,15 @@ int usbhs_mod_gadget_probe(struct usbhs_priv *priv)
 
        ret = usb_add_gadget_udc(dev, &gpriv->gadget);
        if (ret)
-               goto err_add_udc;
+               goto err_register;
 
 
        dev_info(dev, "gadget probed\n");
 
        return 0;
+
+err_register:
+       device_unregister(&gpriv->gadget.dev);
 err_add_udc:
        kfree(gpriv->uep);
 
@@ -933,6 +924,8 @@ void usbhs_mod_gadget_remove(struct usbhs_priv *priv)
 
        usb_del_gadget_udc(&gpriv->gadget);
 
+       device_unregister(&gpriv->gadget.dev);
+
        usbhsg_controller_unregister(gpriv);
 
        kfree(gpriv->uep);
index bd4298bb6750d347f825c47026b8de0acb7096a1..ff3db5d056a56484fe594039f5817ac7d6ec4024 100644 (file)
@@ -736,6 +736,7 @@ static struct usb_device_id id_table_combined [] = {
        { USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_ELSTER_UNICOM_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_PROPOX_JTAGCABLEII_PID) },
+       { USB_DEVICE(FTDI_VID, FTDI_PROPOX_ISPCABLEIII_PID) },
        { USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_PID),
                .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
        { USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_H_PID),
index 571fa96b49c7749b983c8d4a8f8228f3500f975c..055b64ef0bbad7ad6dd20200860c6874dd120f1e 100644 (file)
 
 /* Propox devices */
 #define FTDI_PROPOX_JTAGCABLEII_PID    0xD738
+#define FTDI_PROPOX_ISPCABLEIII_PID    0xD739
 
 /* Lenz LI-USB Computer Interface. */
 #define FTDI_LENZ_LIUSB_PID    0xD780
index d865878c9f97449a6168fd69198af63f647ac497..e3426602dc8274dc5536f26e80f74a481eeaacd3 100644 (file)
@@ -661,6 +661,9 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4511, 0xff, 0x01, 0x31) },
        { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4511, 0xff, 0x01, 0x32) },
        { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x01) },
+       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x02) },
+       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x03) },
+       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x08) },
        { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
        { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) },
        { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) },
@@ -747,6 +750,7 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
        { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */
        { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
+       { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
        { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6280) }, /* BP3-USB & BP3-EXT HSDPA */
        { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6008) },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) },
index 3041a974faf39278ef8fad4033e089f8e64b7b9c..24caba79d722a74fd2dba94f9560a7168e4bb26c 100644 (file)
@@ -1854,6 +1854,13 @@ UNUSUAL_DEV(  0x1370, 0x6828, 0x0110, 0x0110,
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
                US_FL_IGNORE_RESIDUE ),
 
+/* Reported by Qinglin Ye <yestyle@gmail.com> */
+UNUSUAL_DEV(  0x13fe, 0x3600, 0x0100, 0x0100,
+               "Kingston",
+               "DT 101 G2",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_BULK_IGNORE_TAG ),
+
 /* Reported by Francesco Foresti <frafore@tiscali.it> */
 UNUSUAL_DEV(  0x14cd, 0x6600, 0x0201, 0x0201,
                "Super Top",
index 55f91d9ab00bd18bd01df877eab846a310595956..29577bf1f559070044ee2c9f50d300d86745f78e 100644 (file)
 /* Clock registers available only on Version 2 */
 #define  LCD_CLK_ENABLE_REG                    0x6c
 #define  LCD_CLK_RESET_REG                     0x70
+#define  LCD_CLK_MAIN_RESET                    BIT(3)
 
 #define LCD_NUM_BUFFERS        2
 
@@ -244,6 +245,10 @@ static inline void lcd_enable_raster(void)
 {
        u32 reg;
 
+       /* Bring LCDC out of reset */
+       if (lcd_revision == LCD_VERSION_2)
+               lcdc_write(0, LCD_CLK_RESET_REG);
+
        reg = lcdc_read(LCD_RASTER_CTRL_REG);
        if (!(reg & LCD_RASTER_ENABLE))
                lcdc_write(reg | LCD_RASTER_ENABLE, LCD_RASTER_CTRL_REG);
@@ -257,6 +262,10 @@ static inline void lcd_disable_raster(void)
        reg = lcdc_read(LCD_RASTER_CTRL_REG);
        if (reg & LCD_RASTER_ENABLE)
                lcdc_write(reg & ~LCD_RASTER_ENABLE, LCD_RASTER_CTRL_REG);
+
+       if (lcd_revision == LCD_VERSION_2)
+               /* Write 1 to reset LCDC */
+               lcdc_write(LCD_CLK_MAIN_RESET, LCD_CLK_RESET_REG);
 }
 
 static void lcd_blit(int load_mode, struct da8xx_fb_par *par)
@@ -584,8 +593,12 @@ static void lcd_reset(struct da8xx_fb_par *par)
        lcdc_write(0, LCD_DMA_CTRL_REG);
        lcdc_write(0, LCD_RASTER_CTRL_REG);
 
-       if (lcd_revision == LCD_VERSION_2)
+       if (lcd_revision == LCD_VERSION_2) {
                lcdc_write(0, LCD_INT_ENABLE_SET_REG);
+               /* Write 1 to reset */
+               lcdc_write(LCD_CLK_MAIN_RESET, LCD_CLK_RESET_REG);
+               lcdc_write(0, LCD_CLK_RESET_REG);
+       }
 }
 
 static void lcd_calc_clk_divider(struct da8xx_fb_par *par)
index 0ccd7adf47bb2db54312287fb5191ac8ed4b2b94..6f61e781f15afa94ae4ebbca32b57407b648a47a 100644 (file)
@@ -19,6 +19,7 @@
  * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
  */
 #include <linux/kernel.h>
+#include <linux/module.h>
 #include <linux/dma-mapping.h>
 #include <linux/mm.h>
 #include <linux/vmalloc.h>
index 3532782551cb80dfa6ee1254be24d669c2fb28ef..5c81533eacaa6224c3aed27d0c1e72a60616c3d7 100644 (file)
@@ -1720,12 +1720,11 @@ static int dispc_ovl_calc_scaling(enum omap_plane plane,
        const int maxdownscale = dss_feat_get_param_max(FEAT_PARAM_DOWNSCALE);
        unsigned long fclk = 0;
 
-       if ((ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0) {
-               if (width != out_width || height != out_height)
-                       return -EINVAL;
-               else
-                       return 0;
-       }
+       if (width == out_width && height == out_height)
+               return 0;
+
+       if ((ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0)
+               return -EINVAL;
 
        if (out_width < width / maxdownscale ||
                        out_width > width * 8)
index 3262f0f1fa35f395aec7637dc4e61f4fdf5766b1..c56378c555b0907255048e32384c1b5b191bc14f 100644 (file)
@@ -269,7 +269,7 @@ static void update_hdmi_timings(struct hdmi_config *cfg,
 unsigned long hdmi_get_pixel_clock(void)
 {
        /* HDMI Pixel Clock in Mhz */
-       return hdmi.ip_data.cfg.timings.timings.pixel_clock * 10000;
+       return hdmi.ip_data.cfg.timings.timings.pixel_clock * 1000;
 }
 
 static void hdmi_compute_pll(struct omap_dss_device *dssdev, int phy,
index 69d882cbe7095f0acde27c9e61e12ea3236948d8..c01c1c162726137e19197c112313aa7458a91e0b 100644 (file)
 #define M1200X720_R60_VSP       POSITIVE
 
 /* 1200x900@60 Sync Polarity (DCON) */
-#define M1200X900_R60_HSP       NEGATIVE
-#define M1200X900_R60_VSP       NEGATIVE
+#define M1200X900_R60_HSP       POSITIVE
+#define M1200X900_R60_VSP       POSITIVE
 
 /* 1280x600@60 Sync Polarity (GTF Mode) */
 #define M1280x600_R60_HSP       NEGATIVE
index 04a5dfcee5a1fd278035bdbc36e86be4b2dd0a45..50634abef9b4a51336e95b8bc2aafb4edb7844a0 100644 (file)
@@ -2369,6 +2369,9 @@ int btrfs_block_rsv_check(struct btrfs_root *root,
 int btrfs_block_rsv_refill(struct btrfs_root *root,
                          struct btrfs_block_rsv *block_rsv,
                          u64 min_reserved);
+int btrfs_block_rsv_refill_noflush(struct btrfs_root *root,
+                                  struct btrfs_block_rsv *block_rsv,
+                                  u64 min_reserved);
 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
                            struct btrfs_block_rsv *dst_rsv,
                            u64 num_bytes);
index 930ae8949737313a9cabfaa3de787c844bdecdf6..2ad813674d77e3cb119fa5049c6e5b7407832645 100644 (file)
@@ -3888,9 +3888,9 @@ int btrfs_block_rsv_check(struct btrfs_root *root,
        return ret;
 }
 
-int btrfs_block_rsv_refill(struct btrfs_root *root,
-                         struct btrfs_block_rsv *block_rsv,
-                         u64 min_reserved)
+static inline int __btrfs_block_rsv_refill(struct btrfs_root *root,
+                                          struct btrfs_block_rsv *block_rsv,
+                                          u64 min_reserved, int flush)
 {
        u64 num_bytes = 0;
        int ret = -ENOSPC;
@@ -3909,7 +3909,7 @@ int btrfs_block_rsv_refill(struct btrfs_root *root,
        if (!ret)
                return 0;
 
-       ret = reserve_metadata_bytes(root, block_rsv, num_bytes, 1);
+       ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
        if (!ret) {
                block_rsv_add_bytes(block_rsv, num_bytes, 0);
                return 0;
@@ -3918,6 +3918,20 @@ int btrfs_block_rsv_refill(struct btrfs_root *root,
        return ret;
 }
 
+int btrfs_block_rsv_refill(struct btrfs_root *root,
+                          struct btrfs_block_rsv *block_rsv,
+                          u64 min_reserved)
+{
+       return __btrfs_block_rsv_refill(root, block_rsv, min_reserved, 1);
+}
+
+int btrfs_block_rsv_refill_noflush(struct btrfs_root *root,
+                                  struct btrfs_block_rsv *block_rsv,
+                                  u64 min_reserved)
+{
+       return __btrfs_block_rsv_refill(root, block_rsv, min_reserved, 0);
+}
+
 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
                            struct btrfs_block_rsv *dst_rsv,
                            u64 num_bytes)
@@ -5093,11 +5107,11 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
        struct btrfs_root *root = orig_root->fs_info->extent_root;
        struct btrfs_free_cluster *last_ptr = NULL;
        struct btrfs_block_group_cache *block_group = NULL;
+       struct btrfs_block_group_cache *used_block_group;
        int empty_cluster = 2 * 1024 * 1024;
        int allowed_chunk_alloc = 0;
        int done_chunk_alloc = 0;
        struct btrfs_space_info *space_info;
-       int last_ptr_loop = 0;
        int loop = 0;
        int index = 0;
        int alloc_type = (data & BTRFS_BLOCK_GROUP_DATA) ?
@@ -5159,6 +5173,7 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
 ideal_cache:
                block_group = btrfs_lookup_block_group(root->fs_info,
                                                       search_start);
+               used_block_group = block_group;
                /*
                 * we don't want to use the block group if it doesn't match our
                 * allocation bits, or if its not cached.
@@ -5196,6 +5211,7 @@ search:
                u64 offset;
                int cached;
 
+               used_block_group = block_group;
                btrfs_get_block_group(block_group);
                search_start = block_group->key.objectid;
 
@@ -5265,84 +5281,73 @@ alloc:
                spin_lock(&block_group->free_space_ctl->tree_lock);
                if (cached &&
                    block_group->free_space_ctl->free_space <
-                   num_bytes + empty_size) {
+                   num_bytes + empty_cluster + empty_size) {
                        spin_unlock(&block_group->free_space_ctl->tree_lock);
                        goto loop;
                }
                spin_unlock(&block_group->free_space_ctl->tree_lock);
 
                /*
-                * Ok we want to try and use the cluster allocator, so lets look
-                * there, unless we are on LOOP_NO_EMPTY_SIZE, since we will
-                * have tried the cluster allocator plenty of times at this
-                * point and not have found anything, so we are likely way too
-                * fragmented for the clustering stuff to find anything, so lets
-                * just skip it and let the allocator find whatever block it can
-                * find
+                * Ok we want to try and use the cluster allocator, so
+                * lets look there
                 */
-               if (last_ptr && loop < LOOP_NO_EMPTY_SIZE) {
+               if (last_ptr) {
                        /*
                         * the refill lock keeps out other
                         * people trying to start a new cluster
                         */
                        spin_lock(&last_ptr->refill_lock);
-                       if (last_ptr->block_group &&
-                           (last_ptr->block_group->ro ||
-                           !block_group_bits(last_ptr->block_group, data))) {
-                               offset = 0;
+                       used_block_group = last_ptr->block_group;
+                       if (used_block_group != block_group &&
+                           (!used_block_group ||
+                            used_block_group->ro ||
+                            !block_group_bits(used_block_group, data))) {
+                               used_block_group = block_group;
                                goto refill_cluster;
                        }
 
-                       offset = btrfs_alloc_from_cluster(block_group, last_ptr,
-                                                num_bytes, search_start);
+                       if (used_block_group != block_group)
+                               btrfs_get_block_group(used_block_group);
+
+                       offset = btrfs_alloc_from_cluster(used_block_group,
+                         last_ptr, num_bytes, used_block_group->key.objectid);
                        if (offset) {
                                /* we have a block, we're done */
                                spin_unlock(&last_ptr->refill_lock);
                                goto checks;
                        }
 
-                       spin_lock(&last_ptr->lock);
-                       /*
-                        * whoops, this cluster doesn't actually point to
-                        * this block group.  Get a ref on the block
-                        * group is does point to and try again
-                        */
-                       if (!last_ptr_loop && last_ptr->block_group &&
-                           last_ptr->block_group != block_group &&
-                           index <=
-                                get_block_group_index(last_ptr->block_group)) {
-
-                               btrfs_put_block_group(block_group);
-                               block_group = last_ptr->block_group;
-                               btrfs_get_block_group(block_group);
-                               spin_unlock(&last_ptr->lock);
-                               spin_unlock(&last_ptr->refill_lock);
-
-                               last_ptr_loop = 1;
-                               search_start = block_group->key.objectid;
-                               /*
-                                * we know this block group is properly
-                                * in the list because
-                                * btrfs_remove_block_group, drops the
-                                * cluster before it removes the block
-                                * group from the list
-                                */
-                               goto have_block_group;
+                       WARN_ON(last_ptr->block_group != used_block_group);
+                       if (used_block_group != block_group) {
+                               btrfs_put_block_group(used_block_group);
+                               used_block_group = block_group;
                        }
-                       spin_unlock(&last_ptr->lock);
 refill_cluster:
+                       BUG_ON(used_block_group != block_group);
+                       /* If we are on LOOP_NO_EMPTY_SIZE, we can't
+                        * set up a new clusters, so lets just skip it
+                        * and let the allocator find whatever block
+                        * it can find.  If we reach this point, we
+                        * will have tried the cluster allocator
+                        * plenty of times and not have found
+                        * anything, so we are likely way too
+                        * fragmented for the clustering stuff to find
+                        * anything.  */
+                       if (loop >= LOOP_NO_EMPTY_SIZE) {
+                               spin_unlock(&last_ptr->refill_lock);
+                               goto unclustered_alloc;
+                       }
+
                        /*
                         * this cluster didn't work out, free it and
                         * start over
                         */
                        btrfs_return_cluster_to_free_space(NULL, last_ptr);
 
-                       last_ptr_loop = 0;
-
                        /* allocate a cluster in this block group */
                        ret = btrfs_find_space_cluster(trans, root,
                                               block_group, last_ptr,
-                                              offset, num_bytes,
+                                              search_start, num_bytes,
                                               empty_cluster + empty_size);
                        if (ret == 0) {
                                /*
@@ -5378,6 +5383,7 @@ refill_cluster:
                        goto loop;
                }
 
+unclustered_alloc:
                offset = btrfs_find_space_for_alloc(block_group, search_start,
                                                    num_bytes, empty_size);
                /*
@@ -5404,14 +5410,14 @@ checks:
                search_start = stripe_align(root, offset);
                /* move on to the next group */
                if (search_start + num_bytes >= search_end) {
-                       btrfs_add_free_space(block_group, offset, num_bytes);
+                       btrfs_add_free_space(used_block_group, offset, num_bytes);
                        goto loop;
                }
 
                /* move on to the next group */
                if (search_start + num_bytes >
-                   block_group->key.objectid + block_group->key.offset) {
-                       btrfs_add_free_space(block_group, offset, num_bytes);
+                   used_block_group->key.objectid + used_block_group->key.offset) {
+                       btrfs_add_free_space(used_block_group, offset, num_bytes);
                        goto loop;
                }
 
@@ -5419,14 +5425,14 @@ checks:
                ins->offset = num_bytes;
 
                if (offset < search_start)
-                       btrfs_add_free_space(block_group, offset,
+                       btrfs_add_free_space(used_block_group, offset,
                                             search_start - offset);
                BUG_ON(offset > search_start);
 
-               ret = btrfs_update_reserved_bytes(block_group, num_bytes,
+               ret = btrfs_update_reserved_bytes(used_block_group, num_bytes,
                                                  alloc_type);
                if (ret == -EAGAIN) {
-                       btrfs_add_free_space(block_group, offset, num_bytes);
+                       btrfs_add_free_space(used_block_group, offset, num_bytes);
                        goto loop;
                }
 
@@ -5435,15 +5441,19 @@ checks:
                ins->offset = num_bytes;
 
                if (offset < search_start)
-                       btrfs_add_free_space(block_group, offset,
+                       btrfs_add_free_space(used_block_group, offset,
                                             search_start - offset);
                BUG_ON(offset > search_start);
+               if (used_block_group != block_group)
+                       btrfs_put_block_group(used_block_group);
                btrfs_put_block_group(block_group);
                break;
 loop:
                failed_cluster_refill = false;
                failed_alloc = false;
                BUG_ON(index != get_block_group_index(block_group));
+               if (used_block_group != block_group)
+                       btrfs_put_block_group(used_block_group);
                btrfs_put_block_group(block_group);
        }
        up_read(&space_info->groups_sem);
index 9472d3de5e52aab2142766767d863ca46f34147d..49f3c9dc09f4c81902299fd81c62da1ed8423250 100644 (file)
@@ -935,8 +935,10 @@ again:
        node = tree_search(tree, start);
        if (!node) {
                prealloc = alloc_extent_state_atomic(prealloc);
-               if (!prealloc)
-                       return -ENOMEM;
+               if (!prealloc) {
+                       err = -ENOMEM;
+                       goto out;
+               }
                err = insert_state(tree, prealloc, start, end, &bits);
                prealloc = NULL;
                BUG_ON(err == -EEXIST);
@@ -992,8 +994,10 @@ hit_next:
         */
        if (state->start < start) {
                prealloc = alloc_extent_state_atomic(prealloc);
-               if (!prealloc)
-                       return -ENOMEM;
+               if (!prealloc) {
+                       err = -ENOMEM;
+                       goto out;
+               }
                err = split_state(tree, state, prealloc, start);
                BUG_ON(err == -EEXIST);
                prealloc = NULL;
@@ -1024,8 +1028,10 @@ hit_next:
                        this_end = last_start - 1;
 
                prealloc = alloc_extent_state_atomic(prealloc);
-               if (!prealloc)
-                       return -ENOMEM;
+               if (!prealloc) {
+                       err = -ENOMEM;
+                       goto out;
+               }
 
                /*
                 * Avoid to free 'prealloc' if it can be merged with
@@ -1051,8 +1057,10 @@ hit_next:
         */
        if (state->start <= end && state->end > end) {
                prealloc = alloc_extent_state_atomic(prealloc);
-               if (!prealloc)
-                       return -ENOMEM;
+               if (!prealloc) {
+                       err = -ENOMEM;
+                       goto out;
+               }
 
                err = split_state(tree, state, prealloc, end + 1);
                BUG_ON(err == -EEXIST);
@@ -2287,14 +2295,20 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
                if (!uptodate) {
                        int failed_mirror;
                        failed_mirror = (int)(unsigned long)bio->bi_bdev;
-                       if (tree->ops && tree->ops->readpage_io_failed_hook)
-                               ret = tree->ops->readpage_io_failed_hook(
-                                               bio, page, start, end,
-                                               failed_mirror, state);
-                       else
-                               ret = bio_readpage_error(bio, page, start, end,
-                                                        failed_mirror, NULL);
+                       /*
+                        * The generic bio_readpage_error handles errors the
+                        * following way: If possible, new read requests are
+                        * created and submitted and will end up in
+                        * end_bio_extent_readpage as well (if we're lucky, not
+                        * in the !uptodate case). In that case it returns 0 and
+                        * we just go on with the next page in our bio. If it
+                        * can't handle the error it will return -EIO and we
+                        * remain responsible for that page.
+                        */
+                       ret = bio_readpage_error(bio, page, start, end,
+                                                       failed_mirror, NULL);
                        if (ret == 0) {
+error_handled:
                                uptodate =
                                        test_bit(BIO_UPTODATE, &bio->bi_flags);
                                if (err)
@@ -2302,6 +2316,13 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
                                uncache_state(&cached);
                                continue;
                        }
+                       if (tree->ops && tree->ops->readpage_io_failed_hook) {
+                               ret = tree->ops->readpage_io_failed_hook(
+                                                       bio, page, start, end,
+                                                       failed_mirror, state);
+                               if (ret == 0)
+                                       goto error_handled;
+                       }
                }
 
                if (uptodate) {
index 6e5b7e4636989661e1e506993dce9f82faee3f35..ec23d43d0c357870a14bf6e79a0ce5baeb93a43d 100644 (file)
@@ -1470,6 +1470,7 @@ static void add_new_bitmap(struct btrfs_free_space_ctl *ctl,
 {
        info->offset = offset_to_bitmap(ctl, offset);
        info->bytes = 0;
+       INIT_LIST_HEAD(&info->list);
        link_free_space(ctl, info);
        ctl->total_bitmaps++;
 
@@ -2319,6 +2320,7 @@ again:
 
        if (!found) {
                start = i;
+               cluster->max_size = 0;
                found = true;
        }
 
index 526dd51a196689699d9beb7b4e3c52dd8c6e5da8..2c984f7d4c2ac581787ecb6a11962d3e6fd1df75 100644 (file)
@@ -3490,7 +3490,7 @@ void btrfs_evict_inode(struct inode *inode)
         * doing the truncate.
         */
        while (1) {
-               ret = btrfs_block_rsv_refill(root, rsv, min_size);
+               ret = btrfs_block_rsv_refill_noflush(root, rsv, min_size);
 
                /*
                 * Try and steal from the global reserve since we will
index a90e749ed6d265ba8f8f196d494b7faf4cd09fa3..72d461656f606647292657f2367ee438a50d2a40 100644 (file)
@@ -1278,7 +1278,7 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root,
                }
                ret = btrfs_grow_device(trans, device, new_size);
                btrfs_commit_transaction(trans, root);
-       } else {
+       } else if (new_size < old_size) {
                ret = btrfs_shrink_device(device, new_size);
        }
 
index fab420db5121b3c8229a4a2f50cea58e8b6cd022..c27bcb67f3304d806ab7e90cef8c3bcdef78b19c 100644 (file)
@@ -256,6 +256,11 @@ static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root, void *ctx)
        btrfs_release_path(swarn->path);
 
        ipath = init_ipath(4096, local_root, swarn->path);
+       if (IS_ERR(ipath)) {
+               ret = PTR_ERR(ipath);
+               ipath = NULL;
+               goto err;
+       }
        ret = paths_from_inode(inum, ipath);
 
        if (ret < 0)
index 17ee7fc5e64e72855f61f52a14d9f3e6000b514a..e28ad4baf483af6b4c7e5d8450dc4a73909d0247 100644 (file)
@@ -1057,7 +1057,7 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
        int i = 0, nr_devices;
        int ret;
 
-       nr_devices = fs_info->fs_devices->rw_devices;
+       nr_devices = fs_info->fs_devices->open_devices;
        BUG_ON(!nr_devices);
 
        devices_info = kmalloc(sizeof(*devices_info) * nr_devices,
@@ -1079,8 +1079,8 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
        else
                min_stripe_size = BTRFS_STRIPE_LEN;
 
-       list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
-               if (!device->in_fs_metadata)
+       list_for_each_entry(device, &fs_devices->devices, dev_list) {
+               if (!device->in_fs_metadata || !device->bdev)
                        continue;
 
                avail_space = device->total_bytes - device->bytes_used;
index c37433d3cd82464adbe13173433521a4ab1cca14..0a8c8f8304b14c3d9190926eeee3dfa5e3d59d33 100644 (file)
@@ -1611,7 +1611,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
        if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
                return -EINVAL;
 
-       bdev = blkdev_get_by_path(device_path, FMODE_EXCL,
+       bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
                                  root->fs_info->bdev_holder);
        if (IS_ERR(bdev))
                return PTR_ERR(bdev);
index d6a972df033800eb0c00f6116b3c4b6bb309bd87..8cd4b52d42174ee0a4d524d3176b59102751c61e 100644 (file)
@@ -441,6 +441,8 @@ cifs_readv_from_socket(struct TCP_Server_Info *server, struct kvec *iov_orig,
        smb_msg.msg_controllen = 0;
 
        for (total_read = 0; to_read; total_read += length, to_read -= length) {
+               try_to_freeze();
+
                if (server_unresponsive(server)) {
                        total_read = -EAGAIN;
                        break;
index cf0b1539b321acf1cdd69e4db0f590d8f83e9293..4dd9283885e745bafdd7dce81e5456f98c5635c9 100644 (file)
@@ -702,6 +702,13 @@ cifs_find_lock_conflict(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock,
                                         lock->type, lock->netfid, conf_lock);
 }
 
+/*
+ * Check if there is another lock that prevents us to set the lock (mandatory
+ * style). If such a lock exists, update the flock structure with its
+ * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
+ * or leave it the same if we can't. Returns 0 if we don't need to request to
+ * the server or 1 otherwise.
+ */
 static int
 cifs_lock_test(struct cifsInodeInfo *cinode, __u64 offset, __u64 length,
               __u8 type, __u16 netfid, struct file_lock *flock)
@@ -739,6 +746,12 @@ cifs_lock_add(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock)
        mutex_unlock(&cinode->lock_mutex);
 }
 
+/*
+ * Set the byte-range lock (mandatory style). Returns:
+ * 1) 0, if we set the lock and don't need to request to the server;
+ * 2) 1, if no locks prevent us but we need to request to the server;
+ * 3) -EACCESS, if there is a lock that prevents us and wait is false.
+ */
 static int
 cifs_lock_add_if(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock,
                 bool wait)
@@ -778,6 +791,13 @@ try_again:
        return rc;
 }
 
+/*
+ * Check if there is another lock that prevents us to set the lock (posix
+ * style). If such a lock exists, update the flock structure with its
+ * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
+ * or leave it the same if we can't. Returns 0 if we don't need to request to
+ * the server or 1 otherwise.
+ */
 static int
 cifs_posix_lock_test(struct file *file, struct file_lock *flock)
 {
@@ -800,6 +820,12 @@ cifs_posix_lock_test(struct file *file, struct file_lock *flock)
        return rc;
 }
 
+/*
+ * Set the byte-range lock (posix style). Returns:
+ * 1) 0, if we set the lock and don't need to request to the server;
+ * 2) 1, if we need to request to the server;
+ * 3) <0, if the error occurs while setting the lock.
+ */
 static int
 cifs_posix_lock_set(struct file *file, struct file_lock *flock)
 {
index 5de03ec20144449c2dea66441b1e66cc577f4a48..a090bbe6ee29e196018867c9f5e4da3efe9d82b9 100644 (file)
@@ -554,7 +554,10 @@ static int find_cifs_entry(const int xid, struct cifs_tcon *pTcon,
                                 rc);
                        return rc;
                }
-               cifs_save_resume_key(cifsFile->srch_inf.last_entry, cifsFile);
+               /* FindFirst/Next set last_entry to NULL on malformed reply */
+               if (cifsFile->srch_inf.last_entry)
+                       cifs_save_resume_key(cifsFile->srch_inf.last_entry,
+                                               cifsFile);
        }
 
        while ((index_to_find >= cifsFile->srch_inf.index_of_last_entry) &&
@@ -562,7 +565,10 @@ static int find_cifs_entry(const int xid, struct cifs_tcon *pTcon,
                cFYI(1, "calling findnext2");
                rc = CIFSFindNext(xid, pTcon, cifsFile->netfid,
                                  &cifsFile->srch_inf);
-               cifs_save_resume_key(cifsFile->srch_inf.last_entry, cifsFile);
+               /* FindFirst/Next set last_entry to NULL on malformed reply */
+               if (cifsFile->srch_inf.last_entry)
+                       cifs_save_resume_key(cifsFile->srch_inf.last_entry,
+                                               cifsFile);
                if (rc)
                        return -ENOENT;
        }
index 7cacba12b8f114468ef56dab7d58fc5678d0c878..80d850881938d0c0950addc4d97ae4855dadfa4a 100644 (file)
@@ -209,7 +209,7 @@ E_md4hash(const unsigned char *passwd, unsigned char *p16,
 {
        int rc;
        int len;
-       __u16 wpwd[129];
+       __le16 wpwd[129];
 
        /* Password cannot be longer than 128 characters */
        if (passwd) /* Password must be converted to NT unicode */
@@ -219,8 +219,8 @@ E_md4hash(const unsigned char *passwd, unsigned char *p16,
                *wpwd = 0; /* Ensure string is null terminated */
        }
 
-       rc = mdfour(p16, (unsigned char *) wpwd, len * sizeof(__u16));
-       memset(wpwd, 0, 129 * sizeof(__u16));
+       rc = mdfour(p16, (unsigned char *) wpwd, len * sizeof(__le16));
+       memset(wpwd, 0, 129 * sizeof(__le16));
 
        return rc;
 }
index 10ba92def3f675985871f0fc72e9c6d7126357a3..89509b5a090e27320e45b9c0c2f5480e082b1a37 100644 (file)
@@ -2439,16 +2439,14 @@ static int prepend_name(char **buffer, int *buflen, struct qstr *name)
 /**
  * prepend_path - Prepend path string to a buffer
  * @path: the dentry/vfsmount to report
- * @root: root vfsmnt/dentry (may be modified by this function)
+ * @root: root vfsmnt/dentry
  * @buffer: pointer to the end of the buffer
  * @buflen: pointer to buffer length
  *
  * Caller holds the rename_lock.
- *
- * If path is not reachable from the supplied root, then the value of
- * root is changed (without modifying refcounts).
  */
-static int prepend_path(const struct path *path, struct path *root,
+static int prepend_path(const struct path *path,
+                       const struct path *root,
                        char **buffer, int *buflen)
 {
        struct dentry *dentry = path->dentry;
@@ -2483,10 +2481,10 @@ static int prepend_path(const struct path *path, struct path *root,
                dentry = parent;
        }
 
-out:
        if (!error && !slash)
                error = prepend(buffer, buflen, "/", 1);
 
+out:
        br_read_unlock(vfsmount_lock);
        return error;
 
@@ -2500,15 +2498,17 @@ global_root:
                WARN(1, "Root dentry has weird name <%.*s>\n",
                     (int) dentry->d_name.len, dentry->d_name.name);
        }
-       root->mnt = vfsmnt;
-       root->dentry = dentry;
+       if (!slash)
+               error = prepend(buffer, buflen, "/", 1);
+       if (!error)
+               error = vfsmnt->mnt_ns ? 1 : 2;
        goto out;
 }
 
 /**
  * __d_path - return the path of a dentry
  * @path: the dentry/vfsmount to report
- * @root: root vfsmnt/dentry (may be modified by this function)
+ * @root: root vfsmnt/dentry
  * @buf: buffer to return value in
  * @buflen: buffer length
  *
@@ -2519,10 +2519,10 @@ global_root:
  *
  * "buflen" should be positive.
  *
- * If path is not reachable from the supplied root, then the value of
- * root is changed (without modifying refcounts).
+ * If the path is not reachable from the supplied root, return %NULL.
  */
-char *__d_path(const struct path *path, struct path *root,
+char *__d_path(const struct path *path,
+              const struct path *root,
               char *buf, int buflen)
 {
        char *res = buf + buflen;
@@ -2533,7 +2533,28 @@ char *__d_path(const struct path *path, struct path *root,
        error = prepend_path(path, root, &res, &buflen);
        write_sequnlock(&rename_lock);
 
-       if (error)
+       if (error < 0)
+               return ERR_PTR(error);
+       if (error > 0)
+               return NULL;
+       return res;
+}
+
+char *d_absolute_path(const struct path *path,
+              char *buf, int buflen)
+{
+       struct path root = {};
+       char *res = buf + buflen;
+       int error;
+
+       prepend(&res, &buflen, "\0", 1);
+       write_seqlock(&rename_lock);
+       error = prepend_path(path, &root, &res, &buflen);
+       write_sequnlock(&rename_lock);
+
+       if (error > 1)
+               error = -EINVAL;
+       if (error < 0)
                return ERR_PTR(error);
        return res;
 }
@@ -2541,8 +2562,9 @@ char *__d_path(const struct path *path, struct path *root,
 /*
  * same as __d_path but appends "(deleted)" for unlinked files.
  */
-static int path_with_deleted(const struct path *path, struct path *root,
-                                char **buf, int *buflen)
+static int path_with_deleted(const struct path *path,
+                            const struct path *root,
+                            char **buf, int *buflen)
 {
        prepend(buf, buflen, "\0", 1);
        if (d_unlinked(path->dentry)) {
@@ -2579,7 +2601,6 @@ char *d_path(const struct path *path, char *buf, int buflen)
 {
        char *res = buf + buflen;
        struct path root;
-       struct path tmp;
        int error;
 
        /*
@@ -2594,9 +2615,8 @@ char *d_path(const struct path *path, char *buf, int buflen)
 
        get_fs_root(current->fs, &root);
        write_seqlock(&rename_lock);
-       tmp = root;
-       error = path_with_deleted(path, &tmp, &res, &buflen);
-       if (error)
+       error = path_with_deleted(path, &root, &res, &buflen);
+       if (error < 0)
                res = ERR_PTR(error);
        write_sequnlock(&rename_lock);
        path_put(&root);
@@ -2617,7 +2637,6 @@ char *d_path_with_unreachable(const struct path *path, char *buf, int buflen)
 {
        char *res = buf + buflen;
        struct path root;
-       struct path tmp;
        int error;
 
        if (path->dentry->d_op && path->dentry->d_op->d_dname)
@@ -2625,9 +2644,8 @@ char *d_path_with_unreachable(const struct path *path, char *buf, int buflen)
 
        get_fs_root(current->fs, &root);
        write_seqlock(&rename_lock);
-       tmp = root;
-       error = path_with_deleted(path, &tmp, &res, &buflen);
-       if (!error && !path_equal(&tmp, &root))
+       error = path_with_deleted(path, &root, &res, &buflen);
+       if (error > 0)
                error = prepend_unreachable(&res, &buflen);
        write_sequnlock(&rename_lock);
        path_put(&root);
@@ -2758,19 +2776,18 @@ SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
        write_seqlock(&rename_lock);
        if (!d_unlinked(pwd.dentry)) {
                unsigned long len;
-               struct path tmp = root;
                char *cwd = page + PAGE_SIZE;
                int buflen = PAGE_SIZE;
 
                prepend(&cwd, &buflen, "\0", 1);
-               error = prepend_path(&pwd, &tmp, &cwd, &buflen);
+               error = prepend_path(&pwd, &root, &cwd, &buflen);
                write_sequnlock(&rename_lock);
 
-               if (error)
+               if (error < 0)
                        goto out;
 
                /* Unreachable from current root */
-               if (!path_equal(&tmp, &root)) {
+               if (error > 0) {
                        error = prepend_unreachable(&cwd, &buflen);
                        if (error)
                                goto out;
index fffec40d5996cb3e3b7e38c6565fa1fa91925704..848f436df29f6ffec6e8487549764d98c63d5b6b 100644 (file)
@@ -2807,8 +2807,8 @@ out:
        spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
 
        /* queue the work to convert unwritten extents to written */
-       queue_work(wq, &io_end->work);
        iocb->private = NULL;
+       queue_work(wq, &io_end->work);
 
        /* XXX: probably should move into the real I/O completion handler */
        inode_dio_done(inode);
index 6d3a1963879b0f13fd4929195a19c958d9deb4bb..cfc6d4448aa54bdc538131b1e53285a92bb20073 100644 (file)
@@ -1048,15 +1048,12 @@ static int show_mountinfo(struct seq_file *m, void *v)
        if (err)
                goto out;
        seq_putc(m, ' ');
-       seq_path_root(m, &mnt_path, &root, " \t\n\\");
-       if (root.mnt != p->root.mnt || root.dentry != p->root.dentry) {
-               /*
-                * Mountpoint is outside root, discard that one.  Ugly,
-                * but less so than trying to do that in iterator in a
-                * race-free way (due to renames).
-                */
-               return SEQ_SKIP;
-       }
+
+       /* mountpoints outside of chroot jail will give SEQ_SKIP on this */
+       err = seq_path_root(m, &mnt_path, &root, " \t\n\\");
+       if (err)
+               goto out;
+
        seq_puts(m, mnt->mnt_flags & MNT_READONLY ? " ro" : " rw");
        show_mnt_opts(m, mnt);
 
@@ -2776,3 +2773,8 @@ void kern_unmount(struct vfsmount *mnt)
        }
 }
 EXPORT_SYMBOL(kern_unmount);
+
+bool our_mnt(struct vfsmount *mnt)
+{
+       return check_mnt(mnt);
+}
index ed553c60de827e0ebad24e3501e0e00d21c82cfc..3165aebb43c87934b743ecf08e5f02cef586d771 100644 (file)
@@ -5699,7 +5699,7 @@ int ocfs2_remove_btree_range(struct inode *inode,
                                           OCFS2_JOURNAL_ACCESS_WRITE);
        if (ret) {
                mlog_errno(ret);
-               goto out;
+               goto out_commit;
        }
 
        dquot_free_space_nodirty(inode,
index c1efe939c774e2c9b909892f6c95434d6da760ee..78b68af3b0e32627b1874277d8ae58003501acb5 100644 (file)
@@ -290,7 +290,15 @@ static int ocfs2_readpage(struct file *file, struct page *page)
        }
 
        if (down_read_trylock(&oi->ip_alloc_sem) == 0) {
+               /*
+                * Unlock the page and cycle ip_alloc_sem so that we don't
+                * busyloop waiting for ip_alloc_sem to unlock
+                */
                ret = AOP_TRUNCATED_PAGE;
+               unlock_page(page);
+               unlock = 0;
+               down_read(&oi->ip_alloc_sem);
+               up_read(&oi->ip_alloc_sem);
                goto out_inode_unlock;
        }
 
@@ -563,6 +571,7 @@ static void ocfs2_dio_end_io(struct kiocb *iocb,
 {
        struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
        int level;
+       wait_queue_head_t *wq = ocfs2_ioend_wq(inode);
 
        /* this io's submitter should not have unlocked this before we could */
        BUG_ON(!ocfs2_iocb_is_rw_locked(iocb));
@@ -570,6 +579,15 @@ static void ocfs2_dio_end_io(struct kiocb *iocb,
        if (ocfs2_iocb_is_sem_locked(iocb))
                ocfs2_iocb_clear_sem_locked(iocb);
 
+       if (ocfs2_iocb_is_unaligned_aio(iocb)) {
+               ocfs2_iocb_clear_unaligned_aio(iocb);
+
+               if (atomic_dec_and_test(&OCFS2_I(inode)->ip_unaligned_aio) &&
+                   waitqueue_active(wq)) {
+                       wake_up_all(wq);
+               }
+       }
+
        ocfs2_iocb_clear_rw_locked(iocb);
 
        level = ocfs2_iocb_rw_locked_level(iocb);
@@ -862,6 +880,12 @@ struct ocfs2_write_ctxt {
        struct page                     *w_pages[OCFS2_MAX_CTXT_PAGES];
        struct page                     *w_target_page;
 
+       /*
+        * w_target_locked is used for page_mkwrite path indicating no unlocking
+        * against w_target_page in ocfs2_write_end_nolock.
+        */
+       unsigned int                    w_target_locked:1;
+
        /*
         * ocfs2_write_end() uses this to know what the real range to
         * write in the target should be.
@@ -895,6 +919,24 @@ void ocfs2_unlock_and_free_pages(struct page **pages, int num_pages)
 
 static void ocfs2_free_write_ctxt(struct ocfs2_write_ctxt *wc)
 {
+       int i;
+
+       /*
+        * w_target_locked is only set to true in the page_mkwrite() case.
+        * The intent is to allow us to lock the target page from write_begin()
+        * to write_end(). The caller must hold a ref on w_target_page.
+        */
+       if (wc->w_target_locked) {
+               BUG_ON(!wc->w_target_page);
+               for (i = 0; i < wc->w_num_pages; i++) {
+                       if (wc->w_target_page == wc->w_pages[i]) {
+                               wc->w_pages[i] = NULL;
+                               break;
+                       }
+               }
+               mark_page_accessed(wc->w_target_page);
+               page_cache_release(wc->w_target_page);
+       }
        ocfs2_unlock_and_free_pages(wc->w_pages, wc->w_num_pages);
 
        brelse(wc->w_di_bh);
@@ -1132,20 +1174,17 @@ static int ocfs2_grab_pages_for_write(struct address_space *mapping,
                         */
                        lock_page(mmap_page);
 
+                       /* Exit and let the caller retry */
                        if (mmap_page->mapping != mapping) {
+                               WARN_ON(mmap_page->mapping);
                                unlock_page(mmap_page);
-                               /*
-                                * Sanity check - the locking in
-                                * ocfs2_pagemkwrite() should ensure
-                                * that this code doesn't trigger.
-                                */
-                               ret = -EINVAL;
-                               mlog_errno(ret);
+                               ret = -EAGAIN;
                                goto out;
                        }
 
                        page_cache_get(mmap_page);
                        wc->w_pages[i] = mmap_page;
+                       wc->w_target_locked = true;
                } else {
                        wc->w_pages[i] = find_or_create_page(mapping, index,
                                                             GFP_NOFS);
@@ -1160,6 +1199,8 @@ static int ocfs2_grab_pages_for_write(struct address_space *mapping,
                        wc->w_target_page = wc->w_pages[i];
        }
 out:
+       if (ret)
+               wc->w_target_locked = false;
        return ret;
 }
 
@@ -1817,11 +1858,23 @@ try_again:
         */
        ret = ocfs2_grab_pages_for_write(mapping, wc, wc->w_cpos, pos, len,
                                         cluster_of_pages, mmap_page);
-       if (ret) {
+       if (ret && ret != -EAGAIN) {
                mlog_errno(ret);
                goto out_quota;
        }
 
+       /*
+        * ocfs2_grab_pages_for_write() returns -EAGAIN if it could not lock
+        * the target page. In this case, we exit with no error and no target
+        * page. This will trigger the caller, page_mkwrite(), to re-try
+        * the operation.
+        */
+       if (ret == -EAGAIN) {
+               BUG_ON(wc->w_target_page);
+               ret = 0;
+               goto out_quota;
+       }
+
        ret = ocfs2_write_cluster_by_desc(mapping, data_ac, meta_ac, wc, pos,
                                          len);
        if (ret) {
index 75cf3ad987a66d911c15234a803243185ccc5a94..ffb2da370a99d05dd4b919fc64a5483dbc2df7a3 100644 (file)
@@ -78,6 +78,7 @@ enum ocfs2_iocb_lock_bits {
        OCFS2_IOCB_RW_LOCK = 0,
        OCFS2_IOCB_RW_LOCK_LEVEL,
        OCFS2_IOCB_SEM,
+       OCFS2_IOCB_UNALIGNED_IO,
        OCFS2_IOCB_NUM_LOCKS
 };
 
@@ -91,4 +92,17 @@ enum ocfs2_iocb_lock_bits {
        clear_bit(OCFS2_IOCB_SEM, (unsigned long *)&iocb->private)
 #define ocfs2_iocb_is_sem_locked(iocb) \
        test_bit(OCFS2_IOCB_SEM, (unsigned long *)&iocb->private)
+
+#define ocfs2_iocb_set_unaligned_aio(iocb) \
+       set_bit(OCFS2_IOCB_UNALIGNED_IO, (unsigned long *)&iocb->private)
+#define ocfs2_iocb_clear_unaligned_aio(iocb) \
+       clear_bit(OCFS2_IOCB_UNALIGNED_IO, (unsigned long *)&iocb->private)
+#define ocfs2_iocb_is_unaligned_aio(iocb) \
+       test_bit(OCFS2_IOCB_UNALIGNED_IO, (unsigned long *)&iocb->private)
+
+#define OCFS2_IOEND_WQ_HASH_SZ 37
+#define ocfs2_ioend_wq(v)   (&ocfs2__ioend_wq[((unsigned long)(v)) %\
+                                           OCFS2_IOEND_WQ_HASH_SZ])
+extern wait_queue_head_t ocfs2__ioend_wq[OCFS2_IOEND_WQ_HASH_SZ];
+
 #endif /* OCFS2_FILE_H */
index 9a3e6bbff27bd4839b487c2c14282fd9ef1a4675..a4e855e3690e6ab844d37788b71649975321cb19 100644 (file)
@@ -216,6 +216,7 @@ struct o2hb_region {
 
        struct list_head        hr_all_item;
        unsigned                hr_unclean_stop:1,
+                               hr_aborted_start:1,
                                hr_item_pinned:1,
                                hr_item_dropped:1;
 
@@ -254,6 +255,10 @@ struct o2hb_region {
         * a more complete api that doesn't lead to this sort of fragility. */
        atomic_t                hr_steady_iterations;
 
+       /* terminate o2hb thread if it does not reach steady state
+        * (hr_steady_iterations == 0) within hr_unsteady_iterations */
+       atomic_t                hr_unsteady_iterations;
+
        char                    hr_dev_name[BDEVNAME_SIZE];
 
        unsigned int            hr_timeout_ms;
@@ -324,6 +329,10 @@ static void o2hb_write_timeout(struct work_struct *work)
 
 static void o2hb_arm_write_timeout(struct o2hb_region *reg)
 {
+       /* Arm writeout only after thread reaches steady state */
+       if (atomic_read(&reg->hr_steady_iterations) != 0)
+               return;
+
        mlog(ML_HEARTBEAT, "Queue write timeout for %u ms\n",
             O2HB_MAX_WRITE_TIMEOUT_MS);
 
@@ -537,9 +546,14 @@ static int o2hb_verify_crc(struct o2hb_region *reg,
        return read == computed;
 }
 
-/* We want to make sure that nobody is heartbeating on top of us --
- * this will help detect an invalid configuration. */
-static void o2hb_check_last_timestamp(struct o2hb_region *reg)
+/*
+ * Compare the slot data with what we wrote in the last iteration.
+ * If the match fails, print an appropriate error message. This is to
+ * detect errors like... another node hearting on the same slot,
+ * flaky device that is losing writes, etc.
+ * Returns 1 if check succeeds, 0 otherwise.
+ */
+static int o2hb_check_own_slot(struct o2hb_region *reg)
 {
        struct o2hb_disk_slot *slot;
        struct o2hb_disk_heartbeat_block *hb_block;
@@ -548,13 +562,13 @@ static void o2hb_check_last_timestamp(struct o2hb_region *reg)
        slot = &reg->hr_slots[o2nm_this_node()];
        /* Don't check on our 1st timestamp */
        if (!slot->ds_last_time)
-               return;
+               return 0;
 
        hb_block = slot->ds_raw_block;
        if (le64_to_cpu(hb_block->hb_seq) == slot->ds_last_time &&
            le64_to_cpu(hb_block->hb_generation) == slot->ds_last_generation &&
            hb_block->hb_node == slot->ds_node_num)
-               return;
+               return 1;
 
 #define ERRSTR1                "Another node is heartbeating on device"
 #define ERRSTR2                "Heartbeat generation mismatch on device"
@@ -574,6 +588,8 @@ static void o2hb_check_last_timestamp(struct o2hb_region *reg)
             (unsigned long long)slot->ds_last_time, hb_block->hb_node,
             (unsigned long long)le64_to_cpu(hb_block->hb_generation),
             (unsigned long long)le64_to_cpu(hb_block->hb_seq));
+
+       return 0;
 }
 
 static inline void o2hb_prepare_block(struct o2hb_region *reg,
@@ -719,17 +735,24 @@ static void o2hb_shutdown_slot(struct o2hb_disk_slot *slot)
        o2nm_node_put(node);
 }
 
-static void o2hb_set_quorum_device(struct o2hb_region *reg,
-                                  struct o2hb_disk_slot *slot)
+static void o2hb_set_quorum_device(struct o2hb_region *reg)
 {
-       assert_spin_locked(&o2hb_live_lock);
-
        if (!o2hb_global_heartbeat_active())
                return;
 
-       if (test_bit(reg->hr_region_num, o2hb_quorum_region_bitmap))
+       /* Prevent race with o2hb_heartbeat_group_drop_item() */
+       if (kthread_should_stop())
+               return;
+
+       /* Tag region as quorum only after thread reaches steady state */
+       if (atomic_read(&reg->hr_steady_iterations) != 0)
                return;
 
+       spin_lock(&o2hb_live_lock);
+
+       if (test_bit(reg->hr_region_num, o2hb_quorum_region_bitmap))
+               goto unlock;
+
        /*
         * A region can be added to the quorum only when it sees all
         * live nodes heartbeat on it. In other words, the region has been
@@ -737,13 +760,10 @@ static void o2hb_set_quorum_device(struct o2hb_region *reg,
         */
        if (memcmp(reg->hr_live_node_bitmap, o2hb_live_node_bitmap,
                   sizeof(o2hb_live_node_bitmap)))
-               return;
-
-       if (slot->ds_changed_samples < O2HB_LIVE_THRESHOLD)
-               return;
+               goto unlock;
 
-       printk(KERN_NOTICE "o2hb: Region %s is now a quorum device\n",
-              config_item_name(&reg->hr_item));
+       printk(KERN_NOTICE "o2hb: Region %s (%s) is now a quorum device\n",
+              config_item_name(&reg->hr_item), reg->hr_dev_name);
 
        set_bit(reg->hr_region_num, o2hb_quorum_region_bitmap);
 
@@ -754,6 +774,8 @@ static void o2hb_set_quorum_device(struct o2hb_region *reg,
        if (o2hb_pop_count(&o2hb_quorum_region_bitmap,
                           O2NM_MAX_REGIONS) > O2HB_PIN_CUT_OFF)
                o2hb_region_unpin(NULL);
+unlock:
+       spin_unlock(&o2hb_live_lock);
 }
 
 static int o2hb_check_slot(struct o2hb_region *reg,
@@ -925,8 +947,6 @@ fire_callbacks:
                slot->ds_equal_samples = 0;
        }
 out:
-       o2hb_set_quorum_device(reg, slot);
-
        spin_unlock(&o2hb_live_lock);
 
        o2hb_run_event_list(&event);
@@ -957,7 +977,8 @@ static int o2hb_highest_node(unsigned long *nodes,
 
 static int o2hb_do_disk_heartbeat(struct o2hb_region *reg)
 {
-       int i, ret, highest_node, change = 0;
+       int i, ret, highest_node;
+       int membership_change = 0, own_slot_ok = 0;
        unsigned long configured_nodes[BITS_TO_LONGS(O2NM_MAX_NODES)];
        unsigned long live_node_bitmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
        struct o2hb_bio_wait_ctxt write_wc;
@@ -966,7 +987,7 @@ static int o2hb_do_disk_heartbeat(struct o2hb_region *reg)
                                       sizeof(configured_nodes));
        if (ret) {
                mlog_errno(ret);
-               return ret;
+               goto bail;
        }
 
        /*
@@ -982,8 +1003,9 @@ static int o2hb_do_disk_heartbeat(struct o2hb_region *reg)
 
        highest_node = o2hb_highest_node(configured_nodes, O2NM_MAX_NODES);
        if (highest_node >= O2NM_MAX_NODES) {
-               mlog(ML_NOTICE, "ocfs2_heartbeat: no configured nodes found!\n");
-               return -EINVAL;
+               mlog(ML_NOTICE, "o2hb: No configured nodes found!\n");
+               ret = -EINVAL;
+               goto bail;
        }
 
        /* No sense in reading the slots of nodes that don't exist
@@ -993,29 +1015,27 @@ static int o2hb_do_disk_heartbeat(struct o2hb_region *reg)
        ret = o2hb_read_slots(reg, highest_node + 1);
        if (ret < 0) {
                mlog_errno(ret);
-               return ret;
+               goto bail;
        }
 
        /* With an up to date view of the slots, we can check that no
         * other node has been improperly configured to heartbeat in
         * our slot. */
-       o2hb_check_last_timestamp(reg);
+       own_slot_ok = o2hb_check_own_slot(reg);
 
        /* fill in the proper info for our next heartbeat */
        o2hb_prepare_block(reg, reg->hr_generation);
 
-       /* And fire off the write. Note that we don't wait on this I/O
-        * until later. */
        ret = o2hb_issue_node_write(reg, &write_wc);
        if (ret < 0) {
                mlog_errno(ret);
-               return ret;
+               goto bail;
        }
 
        i = -1;
        while((i = find_next_bit(configured_nodes,
                                 O2NM_MAX_NODES, i + 1)) < O2NM_MAX_NODES) {
-               change |= o2hb_check_slot(reg, &reg->hr_slots[i]);
+               membership_change |= o2hb_check_slot(reg, &reg->hr_slots[i]);
        }
 
        /*
@@ -1030,18 +1050,39 @@ static int o2hb_do_disk_heartbeat(struct o2hb_region *reg)
                 * disk */
                mlog(ML_ERROR, "Write error %d on device \"%s\"\n",
                     write_wc.wc_error, reg->hr_dev_name);
-               return write_wc.wc_error;
+               ret = write_wc.wc_error;
+               goto bail;
        }
 
-       o2hb_arm_write_timeout(reg);
+       /* Skip disarming the timeout if own slot has stale/bad data */
+       if (own_slot_ok) {
+               o2hb_set_quorum_device(reg);
+               o2hb_arm_write_timeout(reg);
+       }
 
+bail:
        /* let the person who launched us know when things are steady */
-       if (!change && (atomic_read(&reg->hr_steady_iterations) != 0)) {
-               if (atomic_dec_and_test(&reg->hr_steady_iterations))
+       if (atomic_read(&reg->hr_steady_iterations) != 0) {
+               if (!ret && own_slot_ok && !membership_change) {
+                       if (atomic_dec_and_test(&reg->hr_steady_iterations))
+                               wake_up(&o2hb_steady_queue);
+               }
+       }
+
+       if (atomic_read(&reg->hr_steady_iterations) != 0) {
+               if (atomic_dec_and_test(&reg->hr_unsteady_iterations)) {
+                       printk(KERN_NOTICE "o2hb: Unable to stabilize "
+                              "heartbeart on region %s (%s)\n",
+                              config_item_name(&reg->hr_item),
+                              reg->hr_dev_name);
+                       atomic_set(&reg->hr_steady_iterations, 0);
+                       reg->hr_aborted_start = 1;
                        wake_up(&o2hb_steady_queue);
+                       ret = -EIO;
+               }
        }
 
-       return 0;
+       return ret;
 }
 
 /* Subtract b from a, storing the result in a. a *must* have a larger
@@ -1095,7 +1136,8 @@ static int o2hb_thread(void *data)
        /* Pin node */
        o2nm_depend_this_node();
 
-       while (!kthread_should_stop() && !reg->hr_unclean_stop) {
+       while (!kthread_should_stop() &&
+              !reg->hr_unclean_stop && !reg->hr_aborted_start) {
                /* We track the time spent inside
                 * o2hb_do_disk_heartbeat so that we avoid more than
                 * hr_timeout_ms between disk writes. On busy systems
@@ -1103,10 +1145,7 @@ static int o2hb_thread(void *data)
                 * likely to time itself out. */
                do_gettimeofday(&before_hb);
 
-               i = 0;
-               do {
-                       ret = o2hb_do_disk_heartbeat(reg);
-               } while (ret && ++i < 2);
+               ret = o2hb_do_disk_heartbeat(reg);
 
                do_gettimeofday(&after_hb);
                elapsed_msec = o2hb_elapsed_msecs(&before_hb, &after_hb);
@@ -1117,7 +1156,8 @@ static int o2hb_thread(void *data)
                     after_hb.tv_sec, (unsigned long) after_hb.tv_usec,
                     elapsed_msec);
 
-               if (elapsed_msec < reg->hr_timeout_ms) {
+               if (!kthread_should_stop() &&
+                   elapsed_msec < reg->hr_timeout_ms) {
                        /* the kthread api has blocked signals for us so no
                         * need to record the return value. */
                        msleep_interruptible(reg->hr_timeout_ms - elapsed_msec);
@@ -1134,20 +1174,20 @@ static int o2hb_thread(void *data)
         * to timeout on this region when we could just as easily
         * write a clear generation - thus indicating to them that
         * this node has left this region.
-        *
-        * XXX: Should we skip this on unclean_stop? */
-       o2hb_prepare_block(reg, 0);
-       ret = o2hb_issue_node_write(reg, &write_wc);
-       if (ret == 0) {
-               o2hb_wait_on_io(reg, &write_wc);
-       } else {
-               mlog_errno(ret);
+        */
+       if (!reg->hr_unclean_stop && !reg->hr_aborted_start) {
+               o2hb_prepare_block(reg, 0);
+               ret = o2hb_issue_node_write(reg, &write_wc);
+               if (ret == 0)
+                       o2hb_wait_on_io(reg, &write_wc);
+               else
+                       mlog_errno(ret);
        }
 
        /* Unpin node */
        o2nm_undepend_this_node();
 
-       mlog(ML_HEARTBEAT|ML_KTHREAD, "hb thread exiting\n");
+       mlog(ML_HEARTBEAT|ML_KTHREAD, "o2hb thread exiting\n");
 
        return 0;
 }
@@ -1158,6 +1198,7 @@ static int o2hb_debug_open(struct inode *inode, struct file *file)
        struct o2hb_debug_buf *db = inode->i_private;
        struct o2hb_region *reg;
        unsigned long map[BITS_TO_LONGS(O2NM_MAX_NODES)];
+       unsigned long lts;
        char *buf = NULL;
        int i = -1;
        int out = 0;
@@ -1194,9 +1235,11 @@ static int o2hb_debug_open(struct inode *inode, struct file *file)
 
        case O2HB_DB_TYPE_REGION_ELAPSED_TIME:
                reg = (struct o2hb_region *)db->db_data;
-               out += snprintf(buf + out, PAGE_SIZE - out, "%u\n",
-                               jiffies_to_msecs(jiffies -
-                                                reg->hr_last_timeout_start));
+               lts = reg->hr_last_timeout_start;
+               /* If 0, it has never been set before */
+               if (lts)
+                       lts = jiffies_to_msecs(jiffies - lts);
+               out += snprintf(buf + out, PAGE_SIZE - out, "%lu\n", lts);
                goto done;
 
        case O2HB_DB_TYPE_REGION_PINNED:
@@ -1426,6 +1469,8 @@ static void o2hb_region_release(struct config_item *item)
        struct page *page;
        struct o2hb_region *reg = to_o2hb_region(item);
 
+       mlog(ML_HEARTBEAT, "hb region release (%s)\n", reg->hr_dev_name);
+
        if (reg->hr_tmp_block)
                kfree(reg->hr_tmp_block);
 
@@ -1792,7 +1837,10 @@ static ssize_t o2hb_region_dev_write(struct o2hb_region *reg,
                        live_threshold <<= 1;
                spin_unlock(&o2hb_live_lock);
        }
-       atomic_set(&reg->hr_steady_iterations, live_threshold + 1);
+       ++live_threshold;
+       atomic_set(&reg->hr_steady_iterations, live_threshold);
+       /* unsteady_iterations is double the steady_iterations */
+       atomic_set(&reg->hr_unsteady_iterations, (live_threshold << 1));
 
        hb_task = kthread_run(o2hb_thread, reg, "o2hb-%s",
                              reg->hr_item.ci_name);
@@ -1809,14 +1857,12 @@ static ssize_t o2hb_region_dev_write(struct o2hb_region *reg,
        ret = wait_event_interruptible(o2hb_steady_queue,
                                atomic_read(&reg->hr_steady_iterations) == 0);
        if (ret) {
-               /* We got interrupted (hello ptrace!).  Clean up */
-               spin_lock(&o2hb_live_lock);
-               hb_task = reg->hr_task;
-               reg->hr_task = NULL;
-               spin_unlock(&o2hb_live_lock);
+               atomic_set(&reg->hr_steady_iterations, 0);
+               reg->hr_aborted_start = 1;
+       }
 
-               if (hb_task)
-                       kthread_stop(hb_task);
+       if (reg->hr_aborted_start) {
+               ret = -EIO;
                goto out;
        }
 
@@ -1833,8 +1879,8 @@ static ssize_t o2hb_region_dev_write(struct o2hb_region *reg,
                ret = -EIO;
 
        if (hb_task && o2hb_global_heartbeat_active())
-               printk(KERN_NOTICE "o2hb: Heartbeat started on region %s\n",
-                      config_item_name(&reg->hr_item));
+               printk(KERN_NOTICE "o2hb: Heartbeat started on region %s (%s)\n",
+                      config_item_name(&reg->hr_item), reg->hr_dev_name);
 
 out:
        if (filp)
@@ -2092,13 +2138,6 @@ static void o2hb_heartbeat_group_drop_item(struct config_group *group,
 
        /* stop the thread when the user removes the region dir */
        spin_lock(&o2hb_live_lock);
-       if (o2hb_global_heartbeat_active()) {
-               clear_bit(reg->hr_region_num, o2hb_region_bitmap);
-               clear_bit(reg->hr_region_num, o2hb_live_region_bitmap);
-               if (test_bit(reg->hr_region_num, o2hb_quorum_region_bitmap))
-                       quorum_region = 1;
-               clear_bit(reg->hr_region_num, o2hb_quorum_region_bitmap);
-       }
        hb_task = reg->hr_task;
        reg->hr_task = NULL;
        reg->hr_item_dropped = 1;
@@ -2107,19 +2146,30 @@ static void o2hb_heartbeat_group_drop_item(struct config_group *group,
        if (hb_task)
                kthread_stop(hb_task);
 
+       if (o2hb_global_heartbeat_active()) {
+               spin_lock(&o2hb_live_lock);
+               clear_bit(reg->hr_region_num, o2hb_region_bitmap);
+               clear_bit(reg->hr_region_num, o2hb_live_region_bitmap);
+               if (test_bit(reg->hr_region_num, o2hb_quorum_region_bitmap))
+                       quorum_region = 1;
+               clear_bit(reg->hr_region_num, o2hb_quorum_region_bitmap);
+               spin_unlock(&o2hb_live_lock);
+               printk(KERN_NOTICE "o2hb: Heartbeat %s on region %s (%s)\n",
+                      ((atomic_read(&reg->hr_steady_iterations) == 0) ?
+                       "stopped" : "start aborted"), config_item_name(item),
+                      reg->hr_dev_name);
+       }
+
        /*
         * If we're racing a dev_write(), we need to wake them.  They will
         * check reg->hr_task
         */
        if (atomic_read(&reg->hr_steady_iterations) != 0) {
+               reg->hr_aborted_start = 1;
                atomic_set(&reg->hr_steady_iterations, 0);
                wake_up(&o2hb_steady_queue);
        }
 
-       if (o2hb_global_heartbeat_active())
-               printk(KERN_NOTICE "o2hb: Heartbeat stopped on region %s\n",
-                      config_item_name(&reg->hr_item));
-
        config_item_put(item);
 
        if (!o2hb_global_heartbeat_active() || !quorum_region)
index 3a5835904b3db4d522c561908171f528bb74f6bd..dc45deb19e6885e56a1f5be46cbd39444c46f810 100644 (file)
@@ -47,6 +47,7 @@
 #define SC_DEBUG_NAME          "sock_containers"
 #define NST_DEBUG_NAME         "send_tracking"
 #define STATS_DEBUG_NAME       "stats"
+#define NODES_DEBUG_NAME       "connected_nodes"
 
 #define SHOW_SOCK_CONTAINERS   0
 #define SHOW_SOCK_STATS                1
@@ -55,6 +56,7 @@ static struct dentry *o2net_dentry;
 static struct dentry *sc_dentry;
 static struct dentry *nst_dentry;
 static struct dentry *stats_dentry;
+static struct dentry *nodes_dentry;
 
 static DEFINE_SPINLOCK(o2net_debug_lock);
 
@@ -491,53 +493,87 @@ static const struct file_operations sc_seq_fops = {
        .release = sc_fop_release,
 };
 
-int o2net_debugfs_init(void)
+static int o2net_fill_bitmap(char *buf, int len)
 {
-       o2net_dentry = debugfs_create_dir(O2NET_DEBUG_DIR, NULL);
-       if (!o2net_dentry) {
-               mlog_errno(-ENOMEM);
-               goto bail;
-       }
+       unsigned long map[BITS_TO_LONGS(O2NM_MAX_NODES)];
+       int i = -1, out = 0;
 
-       nst_dentry = debugfs_create_file(NST_DEBUG_NAME, S_IFREG|S_IRUSR,
-                                        o2net_dentry, NULL,
-                                        &nst_seq_fops);
-       if (!nst_dentry) {
-               mlog_errno(-ENOMEM);
-               goto bail;
-       }
+       o2net_fill_node_map(map, sizeof(map));
 
-       sc_dentry = debugfs_create_file(SC_DEBUG_NAME, S_IFREG|S_IRUSR,
-                                       o2net_dentry, NULL,
-                                       &sc_seq_fops);
-       if (!sc_dentry) {
-               mlog_errno(-ENOMEM);
-               goto bail;
-       }
+       while ((i = find_next_bit(map, O2NM_MAX_NODES, i + 1)) < O2NM_MAX_NODES)
+               out += snprintf(buf + out, PAGE_SIZE - out, "%d ", i);
+       out += snprintf(buf + out, PAGE_SIZE - out, "\n");
 
-       stats_dentry = debugfs_create_file(STATS_DEBUG_NAME, S_IFREG|S_IRUSR,
-                                          o2net_dentry, NULL,
-                                          &stats_seq_fops);
-       if (!stats_dentry) {
-               mlog_errno(-ENOMEM);
-               goto bail;
-       }
+       return out;
+}
+
+static int nodes_fop_open(struct inode *inode, struct file *file)
+{
+       char *buf;
+
+       buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       i_size_write(inode, o2net_fill_bitmap(buf, PAGE_SIZE));
+
+       file->private_data = buf;
 
        return 0;
-bail:
-       debugfs_remove(stats_dentry);
-       debugfs_remove(sc_dentry);
-       debugfs_remove(nst_dentry);
-       debugfs_remove(o2net_dentry);
-       return -ENOMEM;
 }
 
+static int o2net_debug_release(struct inode *inode, struct file *file)
+{
+       kfree(file->private_data);
+       return 0;
+}
+
+static ssize_t o2net_debug_read(struct file *file, char __user *buf,
+                               size_t nbytes, loff_t *ppos)
+{
+       return simple_read_from_buffer(buf, nbytes, ppos, file->private_data,
+                                      i_size_read(file->f_mapping->host));
+}
+
+static const struct file_operations nodes_fops = {
+       .open           = nodes_fop_open,
+       .release        = o2net_debug_release,
+       .read           = o2net_debug_read,
+       .llseek         = generic_file_llseek,
+};
+
 void o2net_debugfs_exit(void)
 {
+       debugfs_remove(nodes_dentry);
        debugfs_remove(stats_dentry);
        debugfs_remove(sc_dentry);
        debugfs_remove(nst_dentry);
        debugfs_remove(o2net_dentry);
 }
 
+int o2net_debugfs_init(void)
+{
+       mode_t mode = S_IFREG|S_IRUSR;
+
+       o2net_dentry = debugfs_create_dir(O2NET_DEBUG_DIR, NULL);
+       if (o2net_dentry)
+               nst_dentry = debugfs_create_file(NST_DEBUG_NAME, mode,
+                                       o2net_dentry, NULL, &nst_seq_fops);
+       if (nst_dentry)
+               sc_dentry = debugfs_create_file(SC_DEBUG_NAME, mode,
+                                       o2net_dentry, NULL, &sc_seq_fops);
+       if (sc_dentry)
+               stats_dentry = debugfs_create_file(STATS_DEBUG_NAME, mode,
+                                       o2net_dentry, NULL, &stats_seq_fops);
+       if (stats_dentry)
+               nodes_dentry = debugfs_create_file(NODES_DEBUG_NAME, mode,
+                                       o2net_dentry, NULL, &nodes_fops);
+       if (nodes_dentry)
+               return 0;
+
+       o2net_debugfs_exit();
+       mlog_errno(-ENOMEM);
+       return -ENOMEM;
+}
+
 #endif /* CONFIG_DEBUG_FS */
index ad7d0c155de41a3912b5a6fa330b28ca48edf790..044e7b58d31c7662a75e29f636cfb86bf2846b67 100644 (file)
@@ -546,7 +546,7 @@ static void o2net_set_nn_state(struct o2net_node *nn,
        }
 
        if (was_valid && !valid) {
-               printk(KERN_NOTICE "o2net: no longer connected to "
+               printk(KERN_NOTICE "o2net: No longer connected to "
                       SC_NODEF_FMT "\n", SC_NODEF_ARGS(old_sc));
                o2net_complete_nodes_nsw(nn);
        }
@@ -556,7 +556,7 @@ static void o2net_set_nn_state(struct o2net_node *nn,
                cancel_delayed_work(&nn->nn_connect_expired);
                printk(KERN_NOTICE "o2net: %s " SC_NODEF_FMT "\n",
                       o2nm_this_node() > sc->sc_node->nd_num ?
-                               "connected to" : "accepted connection from",
+                      "Connected to" : "Accepted connection from",
                       SC_NODEF_ARGS(sc));
        }
 
@@ -644,7 +644,7 @@ static void o2net_state_change(struct sock *sk)
                        o2net_sc_queue_work(sc, &sc->sc_connect_work);
                        break;
                default:
-                       printk(KERN_INFO "o2net: connection to " SC_NODEF_FMT
+                       printk(KERN_INFO "o2net: Connection to " SC_NODEF_FMT
                              " shutdown, state %d\n",
                              SC_NODEF_ARGS(sc), sk->sk_state);
                        o2net_sc_queue_work(sc, &sc->sc_shutdown_work);
@@ -1035,6 +1035,25 @@ static int o2net_tx_can_proceed(struct o2net_node *nn,
        return ret;
 }
 
+/* Get a map of all nodes to which this node is currently connected to */
+void o2net_fill_node_map(unsigned long *map, unsigned bytes)
+{
+       struct o2net_sock_container *sc;
+       int node, ret;
+
+       BUG_ON(bytes < (BITS_TO_LONGS(O2NM_MAX_NODES) * sizeof(unsigned long)));
+
+       memset(map, 0, bytes);
+       for (node = 0; node < O2NM_MAX_NODES; ++node) {
+               o2net_tx_can_proceed(o2net_nn_from_num(node), &sc, &ret);
+               if (!ret) {
+                       set_bit(node, map);
+                       sc_put(sc);
+               }
+       }
+}
+EXPORT_SYMBOL_GPL(o2net_fill_node_map);
+
 int o2net_send_message_vec(u32 msg_type, u32 key, struct kvec *caller_vec,
                           size_t caller_veclen, u8 target_node, int *status)
 {
@@ -1285,11 +1304,11 @@ static int o2net_check_handshake(struct o2net_sock_container *sc)
        struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num);
 
        if (hand->protocol_version != cpu_to_be64(O2NET_PROTOCOL_VERSION)) {
-               mlog(ML_NOTICE, SC_NODEF_FMT " advertised net protocol "
-                    "version %llu but %llu is required, disconnecting\n",
-                    SC_NODEF_ARGS(sc),
-                    (unsigned long long)be64_to_cpu(hand->protocol_version),
-                    O2NET_PROTOCOL_VERSION);
+               printk(KERN_NOTICE "o2net: " SC_NODEF_FMT " Advertised net "
+                      "protocol version %llu but %llu is required. "
+                      "Disconnecting.\n", SC_NODEF_ARGS(sc),
+                      (unsigned long long)be64_to_cpu(hand->protocol_version),
+                      O2NET_PROTOCOL_VERSION);
 
                /* don't bother reconnecting if its the wrong version. */
                o2net_ensure_shutdown(nn, sc, -ENOTCONN);
@@ -1303,33 +1322,33 @@ static int o2net_check_handshake(struct o2net_sock_container *sc)
         */
        if (be32_to_cpu(hand->o2net_idle_timeout_ms) !=
                                o2net_idle_timeout()) {
-               mlog(ML_NOTICE, SC_NODEF_FMT " uses a network idle timeout of "
-                    "%u ms, but we use %u ms locally.  disconnecting\n",
-                    SC_NODEF_ARGS(sc),
-                    be32_to_cpu(hand->o2net_idle_timeout_ms),
-                    o2net_idle_timeout());
+               printk(KERN_NOTICE "o2net: " SC_NODEF_FMT " uses a network "
+                      "idle timeout of %u ms, but we use %u ms locally. "
+                      "Disconnecting.\n", SC_NODEF_ARGS(sc),
+                      be32_to_cpu(hand->o2net_idle_timeout_ms),
+                      o2net_idle_timeout());
                o2net_ensure_shutdown(nn, sc, -ENOTCONN);
                return -1;
        }
 
        if (be32_to_cpu(hand->o2net_keepalive_delay_ms) !=
                        o2net_keepalive_delay()) {
-               mlog(ML_NOTICE, SC_NODEF_FMT " uses a keepalive delay of "
-                    "%u ms, but we use %u ms locally.  disconnecting\n",
-                    SC_NODEF_ARGS(sc),
-                    be32_to_cpu(hand->o2net_keepalive_delay_ms),
-                    o2net_keepalive_delay());
+               printk(KERN_NOTICE "o2net: " SC_NODEF_FMT " uses a keepalive "
+                      "delay of %u ms, but we use %u ms locally. "
+                      "Disconnecting.\n", SC_NODEF_ARGS(sc),
+                      be32_to_cpu(hand->o2net_keepalive_delay_ms),
+                      o2net_keepalive_delay());
                o2net_ensure_shutdown(nn, sc, -ENOTCONN);
                return -1;
        }
 
        if (be32_to_cpu(hand->o2hb_heartbeat_timeout_ms) !=
                        O2HB_MAX_WRITE_TIMEOUT_MS) {
-               mlog(ML_NOTICE, SC_NODEF_FMT " uses a heartbeat timeout of "
-                    "%u ms, but we use %u ms locally.  disconnecting\n",
-                    SC_NODEF_ARGS(sc),
-                    be32_to_cpu(hand->o2hb_heartbeat_timeout_ms),
-                    O2HB_MAX_WRITE_TIMEOUT_MS);
+               printk(KERN_NOTICE "o2net: " SC_NODEF_FMT " uses a heartbeat "
+                      "timeout of %u ms, but we use %u ms locally. "
+                      "Disconnecting.\n", SC_NODEF_ARGS(sc),
+                      be32_to_cpu(hand->o2hb_heartbeat_timeout_ms),
+                      O2HB_MAX_WRITE_TIMEOUT_MS);
                o2net_ensure_shutdown(nn, sc, -ENOTCONN);
                return -1;
        }
@@ -1540,28 +1559,16 @@ static void o2net_idle_timer(unsigned long data)
 {
        struct o2net_sock_container *sc = (struct o2net_sock_container *)data;
        struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num);
-
 #ifdef CONFIG_DEBUG_FS
-       ktime_t now = ktime_get();
+       unsigned long msecs = ktime_to_ms(ktime_get()) -
+               ktime_to_ms(sc->sc_tv_timer);
+#else
+       unsigned long msecs = o2net_idle_timeout();
 #endif
 
-       printk(KERN_NOTICE "o2net: connection to " SC_NODEF_FMT " has been idle for %u.%u "
-            "seconds, shutting it down.\n", SC_NODEF_ARGS(sc),
-                    o2net_idle_timeout() / 1000,
-                    o2net_idle_timeout() % 1000);
-
-#ifdef CONFIG_DEBUG_FS
-       mlog(ML_NOTICE, "Here are some times that might help debug the "
-            "situation: (Timer: %lld, Now %lld, DataReady %lld, Advance %lld-%lld, "
-            "Key 0x%08x, Func %u, FuncTime %lld-%lld)\n",
-            (long long)ktime_to_us(sc->sc_tv_timer), (long long)ktime_to_us(now),
-            (long long)ktime_to_us(sc->sc_tv_data_ready),
-            (long long)ktime_to_us(sc->sc_tv_advance_start),
-            (long long)ktime_to_us(sc->sc_tv_advance_stop),
-            sc->sc_msg_key, sc->sc_msg_type,
-            (long long)ktime_to_us(sc->sc_tv_func_start),
-            (long long)ktime_to_us(sc->sc_tv_func_stop));
-#endif
+       printk(KERN_NOTICE "o2net: Connection to " SC_NODEF_FMT " has been "
+              "idle for %lu.%lu secs, shutting it down.\n", SC_NODEF_ARGS(sc),
+              msecs / 1000, msecs % 1000);
 
        /*
         * Initialize the nn_timeout so that the next connection attempt
@@ -1694,8 +1701,8 @@ static void o2net_start_connect(struct work_struct *work)
 
 out:
        if (ret) {
-               mlog(ML_NOTICE, "connect attempt to " SC_NODEF_FMT " failed "
-                    "with errno %d\n", SC_NODEF_ARGS(sc), ret);
+               printk(KERN_NOTICE "o2net: Connect attempt to " SC_NODEF_FMT
+                      " failed with errno %d\n", SC_NODEF_ARGS(sc), ret);
                /* 0 err so that another will be queued and attempted
                 * from set_nn_state */
                if (sc)
@@ -1718,8 +1725,8 @@ static void o2net_connect_expired(struct work_struct *work)
 
        spin_lock(&nn->nn_lock);
        if (!nn->nn_sc_valid) {
-               mlog(ML_ERROR, "no connection established with node %u after "
-                    "%u.%u seconds, giving up and returning errors.\n",
+               printk(KERN_NOTICE "o2net: No connection established with "
+                      "node %u after %u.%u seconds, giving up.\n",
                     o2net_num_from_nn(nn),
                     o2net_idle_timeout() / 1000,
                     o2net_idle_timeout() % 1000);
@@ -1862,21 +1869,21 @@ static int o2net_accept_one(struct socket *sock)
 
        node = o2nm_get_node_by_ip(sin.sin_addr.s_addr);
        if (node == NULL) {
-               mlog(ML_NOTICE, "attempt to connect from unknown node at %pI4:%d\n",
-                    &sin.sin_addr.s_addr, ntohs(sin.sin_port));
+               printk(KERN_NOTICE "o2net: Attempt to connect from unknown "
+                      "node at %pI4:%d\n", &sin.sin_addr.s_addr,
+                      ntohs(sin.sin_port));
                ret = -EINVAL;
                goto out;
        }
 
        if (o2nm_this_node() >= node->nd_num) {
                local_node = o2nm_get_node_by_num(o2nm_this_node());
-               mlog(ML_NOTICE, "unexpected connect attempt seen at node '%s' ("
-                    "%u, %pI4:%d) from node '%s' (%u, %pI4:%d)\n",
-                    local_node->nd_name, local_node->nd_num,
-                    &(local_node->nd_ipv4_address),
-                    ntohs(local_node->nd_ipv4_port),
-                    node->nd_name, node->nd_num, &sin.sin_addr.s_addr,
-                    ntohs(sin.sin_port));
+               printk(KERN_NOTICE "o2net: Unexpected connect attempt seen "
+                      "at node '%s' (%u, %pI4:%d) from node '%s' (%u, "
+                      "%pI4:%d)\n", local_node->nd_name, local_node->nd_num,
+                      &(local_node->nd_ipv4_address),
+                      ntohs(local_node->nd_ipv4_port), node->nd_name,
+                      node->nd_num, &sin.sin_addr.s_addr, ntohs(sin.sin_port));
                ret = -EINVAL;
                goto out;
        }
@@ -1901,10 +1908,10 @@ static int o2net_accept_one(struct socket *sock)
                ret = 0;
        spin_unlock(&nn->nn_lock);
        if (ret) {
-               mlog(ML_NOTICE, "attempt to connect from node '%s' at "
-                    "%pI4:%d but it already has an open connection\n",
-                    node->nd_name, &sin.sin_addr.s_addr,
-                    ntohs(sin.sin_port));
+               printk(KERN_NOTICE "o2net: Attempt to connect from node '%s' "
+                      "at %pI4:%d but it already has an open connection\n",
+                      node->nd_name, &sin.sin_addr.s_addr,
+                      ntohs(sin.sin_port));
                goto out;
        }
 
@@ -1984,7 +1991,7 @@ static int o2net_open_listening_sock(__be32 addr, __be16 port)
 
        ret = sock_create(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
        if (ret < 0) {
-               mlog(ML_ERROR, "unable to create socket, ret=%d\n", ret);
+               printk(KERN_ERR "o2net: Error %d while creating socket\n", ret);
                goto out;
        }
 
@@ -2001,16 +2008,15 @@ static int o2net_open_listening_sock(__be32 addr, __be16 port)
        sock->sk->sk_reuse = 1;
        ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin));
        if (ret < 0) {
-               mlog(ML_ERROR, "unable to bind socket at %pI4:%u, "
-                    "ret=%d\n", &addr, ntohs(port), ret);
+               printk(KERN_ERR "o2net: Error %d while binding socket at "
+                      "%pI4:%u\n", ret, &addr, ntohs(port)); 
                goto out;
        }
 
        ret = sock->ops->listen(sock, 64);
-       if (ret < 0) {
-               mlog(ML_ERROR, "unable to listen on %pI4:%u, ret=%d\n",
-                    &addr, ntohs(port), ret);
-       }
+       if (ret < 0)
+               printk(KERN_ERR "o2net: Error %d while listening on %pI4:%u\n",
+                      ret, &addr, ntohs(port));
 
 out:
        if (ret) {
index fd6179eb26d4cd2cfb43f4ff053237837712113e..5bada2a69b503cd365d626a25e4ae6d08ad50b7a 100644 (file)
@@ -106,6 +106,8 @@ int o2net_register_handler(u32 msg_type, u32 key, u32 max_len,
                           struct list_head *unreg_list);
 void o2net_unregister_handler_list(struct list_head *list);
 
+void o2net_fill_node_map(unsigned long *map, unsigned bytes);
+
 struct o2nm_node;
 int o2net_register_hb_callbacks(void);
 void o2net_unregister_hb_callbacks(void);
index e2878b5895fb543a86c11f0128b025dbb0335c38..8fe4e2892ab9ccd983304a825c338df57a5376b8 100644 (file)
@@ -1184,8 +1184,7 @@ static int __ocfs2_delete_entry(handle_t *handle, struct inode *dir,
                        if (pde)
                                le16_add_cpu(&pde->rec_len,
                                                le16_to_cpu(de->rec_len));
-                       else
-                               de->inode = 0;
+                       de->inode = 0;
                        dir->i_version++;
                        ocfs2_journal_dirty(handle, bh);
                        goto bail;
index d602abb51b610d525cc437daa05c25d2105fe0c3..a5952ceecba5a83147389ad4a1cd24972ee0bfbe 100644 (file)
@@ -859,8 +859,8 @@ void dlm_complete_recovery_thread(struct dlm_ctxt *dlm);
 void dlm_wait_for_recovery(struct dlm_ctxt *dlm);
 void dlm_kick_recovery_thread(struct dlm_ctxt *dlm);
 int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node);
-int dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout);
-int dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout);
+void dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout);
+void dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout);
 
 void dlm_put(struct dlm_ctxt *dlm);
 struct dlm_ctxt *dlm_grab(struct dlm_ctxt *dlm);
@@ -877,9 +877,8 @@ static inline void dlm_lockres_get(struct dlm_lock_resource *res)
        kref_get(&res->refs);
 }
 void dlm_lockres_put(struct dlm_lock_resource *res);
-void __dlm_unhash_lockres(struct dlm_lock_resource *res);
-void __dlm_insert_lockres(struct dlm_ctxt *dlm,
-                         struct dlm_lock_resource *res);
+void __dlm_unhash_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res);
+void __dlm_insert_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res);
 struct dlm_lock_resource * __dlm_lookup_lockres_full(struct dlm_ctxt *dlm,
                                                     const char *name,
                                                     unsigned int len,
@@ -902,46 +901,15 @@ struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm,
                                          const char *name,
                                          unsigned int namelen);
 
-#define dlm_lockres_set_refmap_bit(bit,res)  \
-       __dlm_lockres_set_refmap_bit(bit,res,__FILE__,__LINE__)
-#define dlm_lockres_clear_refmap_bit(bit,res)  \
-       __dlm_lockres_clear_refmap_bit(bit,res,__FILE__,__LINE__)
+void dlm_lockres_set_refmap_bit(struct dlm_ctxt *dlm,
+                               struct dlm_lock_resource *res, int bit);
+void dlm_lockres_clear_refmap_bit(struct dlm_ctxt *dlm,
+                                 struct dlm_lock_resource *res, int bit);
 
-static inline void __dlm_lockres_set_refmap_bit(int bit,
-                                               struct dlm_lock_resource *res,
-                                               const char *file,
-                                               int line)
-{
-       //printk("%s:%d:%.*s: setting bit %d\n", file, line,
-       //     res->lockname.len, res->lockname.name, bit);
-       set_bit(bit, res->refmap);
-}
-
-static inline void __dlm_lockres_clear_refmap_bit(int bit,
-                                                 struct dlm_lock_resource *res,
-                                                 const char *file,
-                                                 int line)
-{
-       //printk("%s:%d:%.*s: clearing bit %d\n", file, line,
-       //     res->lockname.len, res->lockname.name, bit);
-       clear_bit(bit, res->refmap);
-}
-
-void __dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm,
-                                  struct dlm_lock_resource *res,
-                                  const char *file,
-                                  int line);
-void __dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
-                                  struct dlm_lock_resource *res,
-                                  int new_lockres,
-                                  const char *file,
-                                  int line);
-#define dlm_lockres_drop_inflight_ref(d,r)  \
-       __dlm_lockres_drop_inflight_ref(d,r,__FILE__,__LINE__)
-#define dlm_lockres_grab_inflight_ref(d,r)  \
-       __dlm_lockres_grab_inflight_ref(d,r,0,__FILE__,__LINE__)
-#define dlm_lockres_grab_inflight_ref_new(d,r)  \
-       __dlm_lockres_grab_inflight_ref(d,r,1,__FILE__,__LINE__)
+void dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm,
+                                  struct dlm_lock_resource *res);
+void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
+                                  struct dlm_lock_resource *res);
 
 void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
 void dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
index 6ed6b95dcf935a6516e935b85a3ca9ffc0b8a9d8..92f2ead0fab6de22fa138cc4410dee6e1544216c 100644 (file)
@@ -157,16 +157,18 @@ static int dlm_protocol_compare(struct dlm_protocol_version *existing,
 
 static void dlm_unregister_domain_handlers(struct dlm_ctxt *dlm);
 
-void __dlm_unhash_lockres(struct dlm_lock_resource *lockres)
+void __dlm_unhash_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
 {
-       if (!hlist_unhashed(&lockres->hash_node)) {
-               hlist_del_init(&lockres->hash_node);
-               dlm_lockres_put(lockres);
-       }
+       if (hlist_unhashed(&res->hash_node))
+               return;
+
+       mlog(0, "%s: Unhash res %.*s\n", dlm->name, res->lockname.len,
+            res->lockname.name);
+       hlist_del_init(&res->hash_node);
+       dlm_lockres_put(res);
 }
 
-void __dlm_insert_lockres(struct dlm_ctxt *dlm,
-                      struct dlm_lock_resource *res)
+void __dlm_insert_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
 {
        struct hlist_head *bucket;
        struct qstr *q;
@@ -180,6 +182,9 @@ void __dlm_insert_lockres(struct dlm_ctxt *dlm,
        dlm_lockres_get(res);
 
        hlist_add_head(&res->hash_node, bucket);
+
+       mlog(0, "%s: Hash res %.*s\n", dlm->name, res->lockname.len,
+            res->lockname.name);
 }
 
 struct dlm_lock_resource * __dlm_lookup_lockres_full(struct dlm_ctxt *dlm,
@@ -539,17 +544,17 @@ again:
 
 static void __dlm_print_nodes(struct dlm_ctxt *dlm)
 {
-       int node = -1;
+       int node = -1, num = 0;
 
        assert_spin_locked(&dlm->spinlock);
 
-       printk(KERN_NOTICE "o2dlm: Nodes in domain %s: ", dlm->name);
-
+       printk("( ");
        while ((node = find_next_bit(dlm->domain_map, O2NM_MAX_NODES,
                                     node + 1)) < O2NM_MAX_NODES) {
                printk("%d ", node);
+               ++num;
        }
-       printk("\n");
+       printk(") %u nodes\n", num);
 }
 
 static int dlm_exit_domain_handler(struct o2net_msg *msg, u32 len, void *data,
@@ -566,11 +571,10 @@ static int dlm_exit_domain_handler(struct o2net_msg *msg, u32 len, void *data,
 
        node = exit_msg->node_idx;
 
-       printk(KERN_NOTICE "o2dlm: Node %u leaves domain %s\n", node, dlm->name);
-
        spin_lock(&dlm->spinlock);
        clear_bit(node, dlm->domain_map);
        clear_bit(node, dlm->exit_domain_map);
+       printk(KERN_NOTICE "o2dlm: Node %u leaves domain %s ", node, dlm->name);
        __dlm_print_nodes(dlm);
 
        /* notify anything attached to the heartbeat events */
@@ -755,6 +759,7 @@ void dlm_unregister_domain(struct dlm_ctxt *dlm)
 
                dlm_mark_domain_leaving(dlm);
                dlm_leave_domain(dlm);
+               printk(KERN_NOTICE "o2dlm: Leaving domain %s\n", dlm->name);
                dlm_force_free_mles(dlm);
                dlm_complete_dlm_shutdown(dlm);
        }
@@ -970,7 +975,7 @@ static int dlm_assert_joined_handler(struct o2net_msg *msg, u32 len, void *data,
                clear_bit(assert->node_idx, dlm->exit_domain_map);
                __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN);
 
-               printk(KERN_NOTICE "o2dlm: Node %u joins domain %s\n",
+               printk(KERN_NOTICE "o2dlm: Node %u joins domain %s ",
                       assert->node_idx, dlm->name);
                __dlm_print_nodes(dlm);
 
@@ -1701,8 +1706,10 @@ static int dlm_try_to_join_domain(struct dlm_ctxt *dlm)
 bail:
        spin_lock(&dlm->spinlock);
        __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN);
-       if (!status)
+       if (!status) {
+               printk(KERN_NOTICE "o2dlm: Joining domain %s ", dlm->name);
                __dlm_print_nodes(dlm);
+       }
        spin_unlock(&dlm->spinlock);
 
        if (ctxt) {
@@ -2131,13 +2138,6 @@ struct dlm_ctxt * dlm_register_domain(const char *domain,
                goto leave;
        }
 
-       if (!o2hb_check_local_node_heartbeating()) {
-               mlog(ML_ERROR, "the local node has not been configured, or is "
-                    "not heartbeating\n");
-               ret = -EPROTO;
-               goto leave;
-       }
-
        mlog(0, "register called for domain \"%s\"\n", domain);
 
 retry:
index 8d39e0fd66f7379b8fb08ac40502efa0d4a51cc6..975810b98492a34f4576b9d3d3ac3e4421fb18c1 100644 (file)
@@ -183,10 +183,6 @@ static enum dlm_status dlmlock_master(struct dlm_ctxt *dlm,
                        kick_thread = 1;
                }
        }
-       /* reduce the inflight count, this may result in the lockres
-        * being purged below during calc_usage */
-       if (lock->ml.node == dlm->node_num)
-               dlm_lockres_drop_inflight_ref(dlm, res);
 
        spin_unlock(&res->spinlock);
        wake_up(&res->wq);
@@ -231,10 +227,16 @@ static enum dlm_status dlmlock_remote(struct dlm_ctxt *dlm,
             lock->ml.type, res->lockname.len,
             res->lockname.name, flags);
 
+       /*
+        * Wait if resource is getting recovered, remastered, etc.
+        * If the resource was remastered and new owner is self, then exit.
+        */
        spin_lock(&res->spinlock);
-
-       /* will exit this call with spinlock held */
        __dlm_wait_on_lockres(res);
+       if (res->owner == dlm->node_num) {
+               spin_unlock(&res->spinlock);
+               return DLM_RECOVERING;
+       }
        res->state |= DLM_LOCK_RES_IN_PROGRESS;
 
        /* add lock to local (secondary) queue */
@@ -319,27 +321,23 @@ static enum dlm_status dlm_send_remote_lock_request(struct dlm_ctxt *dlm,
        tmpret = o2net_send_message(DLM_CREATE_LOCK_MSG, dlm->key, &create,
                                    sizeof(create), res->owner, &status);
        if (tmpret >= 0) {
-               // successfully sent and received
-               ret = status;  // this is already a dlm_status
+               ret = status;
                if (ret == DLM_REJECTED) {
-                       mlog(ML_ERROR, "%s:%.*s: BUG.  this is a stale lockres "
-                            "no longer owned by %u.  that node is coming back "
-                            "up currently.\n", dlm->name, create.namelen,
+                       mlog(ML_ERROR, "%s: res %.*s, Stale lockres no longer "
+                            "owned by node %u. That node is coming back up "
+                            "currently.\n", dlm->name, create.namelen,
                             create.name, res->owner);
                        dlm_print_one_lock_resource(res);
                        BUG();
                }
        } else {
-               mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to "
-                    "node %u\n", tmpret, DLM_CREATE_LOCK_MSG, dlm->key,
-                    res->owner);
-               if (dlm_is_host_down(tmpret)) {
+               mlog(ML_ERROR, "%s: res %.*s, Error %d send CREATE LOCK to "
+                    "node %u\n", dlm->name, create.namelen, create.name,
+                    tmpret, res->owner);
+               if (dlm_is_host_down(tmpret))
                        ret = DLM_RECOVERING;
-                       mlog(0, "node %u died so returning DLM_RECOVERING "
-                            "from lock message!\n", res->owner);
-               } else {
+               else
                        ret = dlm_err_to_dlm_status(tmpret);
-               }
        }
 
        return ret;
@@ -440,7 +438,7 @@ struct dlm_lock * dlm_new_lock(int type, u8 node, u64 cookie,
                /* zero memory only if kernel-allocated */
                lksb = kzalloc(sizeof(*lksb), GFP_NOFS);
                if (!lksb) {
-                       kfree(lock);
+                       kmem_cache_free(dlm_lock_cache, lock);
                        return NULL;
                }
                kernel_allocated = 1;
@@ -718,18 +716,10 @@ retry_lock:
 
                if (status == DLM_RECOVERING || status == DLM_MIGRATING ||
                    status == DLM_FORWARD) {
-                       mlog(0, "retrying lock with migration/"
-                            "recovery/in progress\n");
                        msleep(100);
-                       /* no waiting for dlm_reco_thread */
                        if (recovery) {
                                if (status != DLM_RECOVERING)
                                        goto retry_lock;
-
-                               mlog(0, "%s: got RECOVERING "
-                                    "for $RECOVERY lock, master "
-                                    "was %u\n", dlm->name,
-                                    res->owner);
                                /* wait to see the node go down, then
                                 * drop down and allow the lockres to
                                 * get cleaned up.  need to remaster. */
@@ -741,6 +731,14 @@ retry_lock:
                        }
                }
 
+               /* Inflight taken in dlm_get_lock_resource() is dropped here */
+               spin_lock(&res->spinlock);
+               dlm_lockres_drop_inflight_ref(dlm, res);
+               spin_unlock(&res->spinlock);
+
+               dlm_lockres_calc_usage(dlm, res);
+               dlm_kick_thread(dlm, res);
+
                if (status != DLM_NORMAL) {
                        lock->lksb->flags &= ~DLM_LKSB_GET_LVB;
                        if (status != DLM_NOTQUEUED)
index 11eefb8c12e98fb418f41c31a3b0a32201be3ca1..005261c333b090f5f53f376bd5bbed55b8e16ba7 100644 (file)
@@ -631,39 +631,54 @@ error:
        return NULL;
 }
 
-void __dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
-                                  struct dlm_lock_resource *res,
-                                  int new_lockres,
-                                  const char *file,
-                                  int line)
+void dlm_lockres_set_refmap_bit(struct dlm_ctxt *dlm,
+                               struct dlm_lock_resource *res, int bit)
 {
-       if (!new_lockres)
-               assert_spin_locked(&res->spinlock);
+       assert_spin_locked(&res->spinlock);
+
+       mlog(0, "res %.*s, set node %u, %ps()\n", res->lockname.len,
+            res->lockname.name, bit, __builtin_return_address(0));
+
+       set_bit(bit, res->refmap);
+}
+
+void dlm_lockres_clear_refmap_bit(struct dlm_ctxt *dlm,
+                                 struct dlm_lock_resource *res, int bit)
+{
+       assert_spin_locked(&res->spinlock);
+
+       mlog(0, "res %.*s, clr node %u, %ps()\n", res->lockname.len,
+            res->lockname.name, bit, __builtin_return_address(0));
+
+       clear_bit(bit, res->refmap);
+}
+
+
+void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
+                                  struct dlm_lock_resource *res)
+{
+       assert_spin_locked(&res->spinlock);
 
-       if (!test_bit(dlm->node_num, res->refmap)) {
-               BUG_ON(res->inflight_locks != 0);
-               dlm_lockres_set_refmap_bit(dlm->node_num, res);
-       }
        res->inflight_locks++;
-       mlog(0, "%s:%.*s: inflight++: now %u\n",
-            dlm->name, res->lockname.len, res->lockname.name,
-            res->inflight_locks);
+
+       mlog(0, "%s: res %.*s, inflight++: now %u, %ps()\n", dlm->name,
+            res->lockname.len, res->lockname.name, res->inflight_locks,
+            __builtin_return_address(0));
 }
 
-void __dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm,
-                                  struct dlm_lock_resource *res,
-                                  const char *file,
-                                  int line)
+void dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm,
+                                  struct dlm_lock_resource *res)
 {
        assert_spin_locked(&res->spinlock);
 
        BUG_ON(res->inflight_locks == 0);
+
        res->inflight_locks--;
-       mlog(0, "%s:%.*s: inflight--: now %u\n",
-            dlm->name, res->lockname.len, res->lockname.name,
-            res->inflight_locks);
-       if (res->inflight_locks == 0)
-               dlm_lockres_clear_refmap_bit(dlm->node_num, res);
+
+       mlog(0, "%s: res %.*s, inflight--: now %u, %ps()\n", dlm->name,
+            res->lockname.len, res->lockname.name, res->inflight_locks,
+            __builtin_return_address(0));
+
        wake_up(&res->wq);
 }
 
@@ -697,7 +712,6 @@ struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm,
        unsigned int hash;
        int tries = 0;
        int bit, wait_on_recovery = 0;
-       int drop_inflight_if_nonlocal = 0;
 
        BUG_ON(!lockid);
 
@@ -709,36 +723,33 @@ lookup:
        spin_lock(&dlm->spinlock);
        tmpres = __dlm_lookup_lockres_full(dlm, lockid, namelen, hash);
        if (tmpres) {
-               int dropping_ref = 0;
-
                spin_unlock(&dlm->spinlock);
-
                spin_lock(&tmpres->spinlock);
-               /* We wait for the other thread that is mastering the resource */
+               /* Wait on the thread that is mastering the resource */
                if (tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
                        __dlm_wait_on_lockres(tmpres);
                        BUG_ON(tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN);
+                       spin_unlock(&tmpres->spinlock);
+                       dlm_lockres_put(tmpres);
+                       tmpres = NULL;
+                       goto lookup;
                }
 
-               if (tmpres->owner == dlm->node_num) {
-                       BUG_ON(tmpres->state & DLM_LOCK_RES_DROPPING_REF);
-                       dlm_lockres_grab_inflight_ref(dlm, tmpres);
-               } else if (tmpres->state & DLM_LOCK_RES_DROPPING_REF)
-                       dropping_ref = 1;
-               spin_unlock(&tmpres->spinlock);
-
-               /* wait until done messaging the master, drop our ref to allow
-                * the lockres to be purged, start over. */
-               if (dropping_ref) {
-                       spin_lock(&tmpres->spinlock);
-                       __dlm_wait_on_lockres_flags(tmpres, DLM_LOCK_RES_DROPPING_REF);
+               /* Wait on the resource purge to complete before continuing */
+               if (tmpres->state & DLM_LOCK_RES_DROPPING_REF) {
+                       BUG_ON(tmpres->owner == dlm->node_num);
+                       __dlm_wait_on_lockres_flags(tmpres,
+                                                   DLM_LOCK_RES_DROPPING_REF);
                        spin_unlock(&tmpres->spinlock);
                        dlm_lockres_put(tmpres);
                        tmpres = NULL;
                        goto lookup;
                }
 
-               mlog(0, "found in hash!\n");
+               /* Grab inflight ref to pin the resource */
+               dlm_lockres_grab_inflight_ref(dlm, tmpres);
+
+               spin_unlock(&tmpres->spinlock);
                if (res)
                        dlm_lockres_put(res);
                res = tmpres;
@@ -829,8 +840,8 @@ lookup:
                 * but they might own this lockres.  wait on them. */
                bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
                if (bit < O2NM_MAX_NODES) {
-                       mlog(ML_NOTICE, "%s:%.*s: at least one node (%d) to "
-                            "recover before lock mastery can begin\n",
+                       mlog(0, "%s: res %.*s, At least one node (%d) "
+                            "to recover before lock mastery can begin\n",
                             dlm->name, namelen, (char *)lockid, bit);
                        wait_on_recovery = 1;
                }
@@ -843,12 +854,11 @@ lookup:
 
        /* finally add the lockres to its hash bucket */
        __dlm_insert_lockres(dlm, res);
-       /* since this lockres is new it doesn't not require the spinlock */
-       dlm_lockres_grab_inflight_ref_new(dlm, res);
 
-       /* if this node does not become the master make sure to drop
-        * this inflight reference below */
-       drop_inflight_if_nonlocal = 1;
+       /* Grab inflight ref to pin the resource */
+       spin_lock(&res->spinlock);
+       dlm_lockres_grab_inflight_ref(dlm, res);
+       spin_unlock(&res->spinlock);
 
        /* get an extra ref on the mle in case this is a BLOCK
         * if so, the creator of the BLOCK may try to put the last
@@ -864,8 +874,8 @@ redo_request:
                 * dlm spinlock would be detectable be a change on the mle,
                 * so we only need to clear out the recovery map once. */
                if (dlm_is_recovery_lock(lockid, namelen)) {
-                       mlog(ML_NOTICE, "%s: recovery map is not empty, but "
-                            "must master $RECOVERY lock now\n", dlm->name);
+                       mlog(0, "%s: Recovery map is not empty, but must "
+                            "master $RECOVERY lock now\n", dlm->name);
                        if (!dlm_pre_master_reco_lockres(dlm, res))
                                wait_on_recovery = 0;
                        else {
@@ -883,8 +893,8 @@ redo_request:
                spin_lock(&dlm->spinlock);
                bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
                if (bit < O2NM_MAX_NODES) {
-                       mlog(ML_NOTICE, "%s:%.*s: at least one node (%d) to "
-                            "recover before lock mastery can begin\n",
+                       mlog(0, "%s: res %.*s, At least one node (%d) "
+                            "to recover before lock mastery can begin\n",
                             dlm->name, namelen, (char *)lockid, bit);
                        wait_on_recovery = 1;
                } else
@@ -913,8 +923,8 @@ redo_request:
                         * yet, keep going until it does.  this is how the
                         * master will know that asserts are needed back to
                         * the lower nodes. */
-                       mlog(0, "%s:%.*s: requests only up to %u but master "
-                            "is %u, keep going\n", dlm->name, namelen,
+                       mlog(0, "%s: res %.*s, Requests only up to %u but "
+                            "master is %u, keep going\n", dlm->name, namelen,
                             lockid, nodenum, mle->master);
                }
        }
@@ -924,13 +934,12 @@ wait:
        ret = dlm_wait_for_lock_mastery(dlm, res, mle, &blocked);
        if (ret < 0) {
                wait_on_recovery = 1;
-               mlog(0, "%s:%.*s: node map changed, redo the "
-                    "master request now, blocked=%d\n",
-                    dlm->name, res->lockname.len,
+               mlog(0, "%s: res %.*s, Node map changed, redo the master "
+                    "request now, blocked=%d\n", dlm->name, res->lockname.len,
                     res->lockname.name, blocked);
                if (++tries > 20) {
-                       mlog(ML_ERROR, "%s:%.*s: spinning on "
-                            "dlm_wait_for_lock_mastery, blocked=%d\n",
+                       mlog(ML_ERROR, "%s: res %.*s, Spinning on "
+                            "dlm_wait_for_lock_mastery, blocked = %d\n",
                             dlm->name, res->lockname.len,
                             res->lockname.name, blocked);
                        dlm_print_one_lock_resource(res);
@@ -940,7 +949,8 @@ wait:
                goto redo_request;
        }
 
-       mlog(0, "lockres mastered by %u\n", res->owner);
+       mlog(0, "%s: res %.*s, Mastered by %u\n", dlm->name, res->lockname.len,
+            res->lockname.name, res->owner);
        /* make sure we never continue without this */
        BUG_ON(res->owner == O2NM_MAX_NODES);
 
@@ -952,8 +962,6 @@ wait:
 
 wake_waiters:
        spin_lock(&res->spinlock);
-       if (res->owner != dlm->node_num && drop_inflight_if_nonlocal)
-               dlm_lockres_drop_inflight_ref(dlm, res);
        res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
        spin_unlock(&res->spinlock);
        wake_up(&res->wq);
@@ -1426,9 +1434,7 @@ way_up_top:
                }
 
                if (res->owner == dlm->node_num) {
-                       mlog(0, "%s:%.*s: setting bit %u in refmap\n",
-                            dlm->name, namelen, name, request->node_idx);
-                       dlm_lockres_set_refmap_bit(request->node_idx, res);
+                       dlm_lockres_set_refmap_bit(dlm, res, request->node_idx);
                        spin_unlock(&res->spinlock);
                        response = DLM_MASTER_RESP_YES;
                        if (mle)
@@ -1493,10 +1499,8 @@ way_up_top:
                                 * go back and clean the mles on any
                                 * other nodes */
                                dispatch_assert = 1;
-                               dlm_lockres_set_refmap_bit(request->node_idx, res);
-                               mlog(0, "%s:%.*s: setting bit %u in refmap\n",
-                                    dlm->name, namelen, name,
-                                    request->node_idx);
+                               dlm_lockres_set_refmap_bit(dlm, res,
+                                                          request->node_idx);
                        } else
                                response = DLM_MASTER_RESP_NO;
                } else {
@@ -1702,7 +1706,7 @@ again:
                             "lockres, set the bit in the refmap\n",
                             namelen, lockname, to);
                        spin_lock(&res->spinlock);
-                       dlm_lockres_set_refmap_bit(to, res);
+                       dlm_lockres_set_refmap_bit(dlm, res, to);
                        spin_unlock(&res->spinlock);
                }
        }
@@ -2187,8 +2191,6 @@ int dlm_drop_lockres_ref(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
        namelen = res->lockname.len;
        BUG_ON(namelen > O2NM_MAX_NAME_LEN);
 
-       mlog(0, "%s:%.*s: sending deref to %d\n",
-            dlm->name, namelen, lockname, res->owner);
        memset(&deref, 0, sizeof(deref));
        deref.node_idx = dlm->node_num;
        deref.namelen = namelen;
@@ -2197,14 +2199,12 @@ int dlm_drop_lockres_ref(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
        ret = o2net_send_message(DLM_DEREF_LOCKRES_MSG, dlm->key,
                                 &deref, sizeof(deref), res->owner, &r);
        if (ret < 0)
-               mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to "
-                    "node %u\n", ret, DLM_DEREF_LOCKRES_MSG, dlm->key,
-                    res->owner);
+               mlog(ML_ERROR, "%s: res %.*s, error %d send DEREF to node %u\n",
+                    dlm->name, namelen, lockname, ret, res->owner);
        else if (r < 0) {
                /* BAD.  other node says I did not have a ref. */
-               mlog(ML_ERROR,"while dropping ref on %s:%.*s "
-                   "(master=%u) got %d.\n", dlm->name, namelen,
-                   lockname, res->owner, r);
+               mlog(ML_ERROR, "%s: res %.*s, DEREF to node %u got %d\n",
+                    dlm->name, namelen, lockname, res->owner, r);
                dlm_print_one_lock_resource(res);
                BUG();
        }
@@ -2260,7 +2260,7 @@ int dlm_deref_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
        else {
                BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF);
                if (test_bit(node, res->refmap)) {
-                       dlm_lockres_clear_refmap_bit(node, res);
+                       dlm_lockres_clear_refmap_bit(dlm, res, node);
                        cleared = 1;
                }
        }
@@ -2320,7 +2320,7 @@ static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data)
        BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF);
        if (test_bit(node, res->refmap)) {
                __dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_SETREF_INPROG);
-               dlm_lockres_clear_refmap_bit(node, res);
+               dlm_lockres_clear_refmap_bit(dlm, res, node);
                cleared = 1;
        }
        spin_unlock(&res->spinlock);
@@ -2802,7 +2802,8 @@ static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm,
                                BUG_ON(!list_empty(&lock->bast_list));
                                BUG_ON(lock->ast_pending);
                                BUG_ON(lock->bast_pending);
-                               dlm_lockres_clear_refmap_bit(lock->ml.node, res);
+                               dlm_lockres_clear_refmap_bit(dlm, res,
+                                                            lock->ml.node);
                                list_del_init(&lock->list);
                                dlm_lock_put(lock);
                                /* In a normal unlock, we would have added a
@@ -2823,7 +2824,7 @@ static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm,
                        mlog(0, "%s:%.*s: node %u had a ref to this "
                             "migrating lockres, clearing\n", dlm->name,
                             res->lockname.len, res->lockname.name, bit);
-                       dlm_lockres_clear_refmap_bit(bit, res);
+                       dlm_lockres_clear_refmap_bit(dlm, res, bit);
                }
                bit++;
        }
@@ -2916,9 +2917,9 @@ static int dlm_do_migrate_request(struct dlm_ctxt *dlm,
                                         &migrate, sizeof(migrate), nodenum,
                                         &status);
                if (ret < 0) {
-                       mlog(ML_ERROR, "Error %d when sending message %u (key "
-                            "0x%x) to node %u\n", ret, DLM_MIGRATE_REQUEST_MSG,
-                            dlm->key, nodenum);
+                       mlog(ML_ERROR, "%s: res %.*s, Error %d send "
+                            "MIGRATE_REQUEST to node %u\n", dlm->name,
+                            migrate.namelen, migrate.name, ret, nodenum);
                        if (!dlm_is_host_down(ret)) {
                                mlog(ML_ERROR, "unhandled error=%d!\n", ret);
                                BUG();
@@ -2937,7 +2938,7 @@ static int dlm_do_migrate_request(struct dlm_ctxt *dlm,
                             dlm->name, res->lockname.len, res->lockname.name,
                             nodenum);
                        spin_lock(&res->spinlock);
-                       dlm_lockres_set_refmap_bit(nodenum, res);
+                       dlm_lockres_set_refmap_bit(dlm, res, nodenum);
                        spin_unlock(&res->spinlock);
                }
        }
@@ -3271,7 +3272,7 @@ int dlm_finish_migration(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
         * mastery reference here since old_master will briefly have
         * a reference after the migration completes */
        spin_lock(&res->spinlock);
-       dlm_lockres_set_refmap_bit(old_master, res);
+       dlm_lockres_set_refmap_bit(dlm, res, old_master);
        spin_unlock(&res->spinlock);
 
        mlog(0, "now time to do a migrate request to other nodes\n");
index 7efab6d28a21b4ee6a8376559d70f739a4e1da90..01ebfd0bdad72264b99345378f0c6febe246503d 100644 (file)
@@ -362,40 +362,38 @@ static int dlm_is_node_recovered(struct dlm_ctxt *dlm, u8 node)
 }
 
 
-int dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout)
+void dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout)
 {
-       if (timeout) {
-               mlog(ML_NOTICE, "%s: waiting %dms for notification of "
-                    "death of node %u\n", dlm->name, timeout, node);
+       if (dlm_is_node_dead(dlm, node))
+               return;
+
+       printk(KERN_NOTICE "o2dlm: Waiting on the death of node %u in "
+              "domain %s\n", node, dlm->name);
+
+       if (timeout)
                wait_event_timeout(dlm->dlm_reco_thread_wq,
-                          dlm_is_node_dead(dlm, node),
-                          msecs_to_jiffies(timeout));
-       } else {
-               mlog(ML_NOTICE, "%s: waiting indefinitely for notification "
-                    "of death of node %u\n", dlm->name, node);
+                                  dlm_is_node_dead(dlm, node),
+                                  msecs_to_jiffies(timeout));
+       else
                wait_event(dlm->dlm_reco_thread_wq,
                           dlm_is_node_dead(dlm, node));
-       }
-       /* for now, return 0 */
-       return 0;
 }
 
-int dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout)
+void dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout)
 {
-       if (timeout) {
-               mlog(0, "%s: waiting %dms for notification of "
-                    "recovery of node %u\n", dlm->name, timeout, node);
+       if (dlm_is_node_recovered(dlm, node))
+               return;
+
+       printk(KERN_NOTICE "o2dlm: Waiting on the recovery of node %u in "
+              "domain %s\n", node, dlm->name);
+
+       if (timeout)
                wait_event_timeout(dlm->dlm_reco_thread_wq,
-                          dlm_is_node_recovered(dlm, node),
-                          msecs_to_jiffies(timeout));
-       } else {
-               mlog(0, "%s: waiting indefinitely for notification "
-                    "of recovery of node %u\n", dlm->name, node);
+                                  dlm_is_node_recovered(dlm, node),
+                                  msecs_to_jiffies(timeout));
+       else
                wait_event(dlm->dlm_reco_thread_wq,
                           dlm_is_node_recovered(dlm, node));
-       }
-       /* for now, return 0 */
-       return 0;
 }
 
 /* callers of the top-level api calls (dlmlock/dlmunlock) should
@@ -430,6 +428,8 @@ static void dlm_begin_recovery(struct dlm_ctxt *dlm)
 {
        spin_lock(&dlm->spinlock);
        BUG_ON(dlm->reco.state & DLM_RECO_STATE_ACTIVE);
+       printk(KERN_NOTICE "o2dlm: Begin recovery on domain %s for node %u\n",
+              dlm->name, dlm->reco.dead_node);
        dlm->reco.state |= DLM_RECO_STATE_ACTIVE;
        spin_unlock(&dlm->spinlock);
 }
@@ -440,9 +440,18 @@ static void dlm_end_recovery(struct dlm_ctxt *dlm)
        BUG_ON(!(dlm->reco.state & DLM_RECO_STATE_ACTIVE));
        dlm->reco.state &= ~DLM_RECO_STATE_ACTIVE;
        spin_unlock(&dlm->spinlock);
+       printk(KERN_NOTICE "o2dlm: End recovery on domain %s\n", dlm->name);
        wake_up(&dlm->reco.event);
 }
 
+static void dlm_print_recovery_master(struct dlm_ctxt *dlm)
+{
+       printk(KERN_NOTICE "o2dlm: Node %u (%s) is the Recovery Master for the "
+              "dead node %u in domain %s\n", dlm->reco.new_master,
+              (dlm->node_num == dlm->reco.new_master ? "me" : "he"),
+              dlm->reco.dead_node, dlm->name);
+}
+
 static int dlm_do_recovery(struct dlm_ctxt *dlm)
 {
        int status = 0;
@@ -505,9 +514,8 @@ static int dlm_do_recovery(struct dlm_ctxt *dlm)
                }
                mlog(0, "another node will master this recovery session.\n");
        }
-       mlog(0, "dlm=%s (%d), new_master=%u, this node=%u, dead_node=%u\n",
-            dlm->name, task_pid_nr(dlm->dlm_reco_thread_task), dlm->reco.new_master,
-            dlm->node_num, dlm->reco.dead_node);
+
+       dlm_print_recovery_master(dlm);
 
        /* it is safe to start everything back up here
         * because all of the dead node's lock resources
@@ -518,15 +526,13 @@ static int dlm_do_recovery(struct dlm_ctxt *dlm)
        return 0;
 
 master_here:
-       mlog(ML_NOTICE, "(%d) Node %u is the Recovery Master for the Dead Node "
-            "%u for Domain %s\n", task_pid_nr(dlm->dlm_reco_thread_task),
-            dlm->node_num, dlm->reco.dead_node, dlm->name);
+       dlm_print_recovery_master(dlm);
 
        status = dlm_remaster_locks(dlm, dlm->reco.dead_node);
        if (status < 0) {
                /* we should never hit this anymore */
-               mlog(ML_ERROR, "error %d remastering locks for node %u, "
-                    "retrying.\n", status, dlm->reco.dead_node);
+               mlog(ML_ERROR, "%s: Error %d remastering locks for node %u, "
+                    "retrying.\n", dlm->name, status, dlm->reco.dead_node);
                /* yield a bit to allow any final network messages
                 * to get handled on remaining nodes */
                msleep(100);
@@ -567,7 +573,7 @@ static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node)
                BUG_ON(ndata->state != DLM_RECO_NODE_DATA_INIT);
                ndata->state = DLM_RECO_NODE_DATA_REQUESTING;
 
-               mlog(0, "requesting lock info from node %u\n",
+               mlog(0, "%s: Requesting lock info from node %u\n", dlm->name,
                     ndata->node_num);
 
                if (ndata->node_num == dlm->node_num) {
@@ -640,7 +646,7 @@ static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node)
                spin_unlock(&dlm_reco_state_lock);
        }
 
-       mlog(0, "done requesting all lock info\n");
+       mlog(0, "%s: Done requesting all lock info\n", dlm->name);
 
        /* nodes should be sending reco data now
         * just need to wait */
@@ -802,10 +808,9 @@ static int dlm_request_all_locks(struct dlm_ctxt *dlm, u8 request_from,
 
        /* negative status is handled by caller */
        if (ret < 0)
-               mlog(ML_ERROR, "Error %d when sending message %u (key "
-                    "0x%x) to node %u\n", ret, DLM_LOCK_REQUEST_MSG,
-                    dlm->key, request_from);
-
+               mlog(ML_ERROR, "%s: Error %d send LOCK_REQUEST to node %u "
+                    "to recover dead node %u\n", dlm->name, ret,
+                    request_from, dead_node);
        // return from here, then
        // sleep until all received or error
        return ret;
@@ -956,9 +961,9 @@ static int dlm_send_all_done_msg(struct dlm_ctxt *dlm, u8 dead_node, u8 send_to)
        ret = o2net_send_message(DLM_RECO_DATA_DONE_MSG, dlm->key, &done_msg,
                                 sizeof(done_msg), send_to, &tmpret);
        if (ret < 0) {
-               mlog(ML_ERROR, "Error %d when sending message %u (key "
-                    "0x%x) to node %u\n", ret, DLM_RECO_DATA_DONE_MSG,
-                    dlm->key, send_to);
+               mlog(ML_ERROR, "%s: Error %d send RECO_DATA_DONE to node %u "
+                    "to recover dead node %u\n", dlm->name, ret, send_to,
+                    dead_node);
                if (!dlm_is_host_down(ret)) {
                        BUG();
                }
@@ -1127,9 +1132,11 @@ static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm,
        if (ret < 0) {
                /* XXX: negative status is not handled.
                 * this will end up killing this node. */
-               mlog(ML_ERROR, "Error %d when sending message %u (key "
-                    "0x%x) to node %u\n", ret, DLM_MIG_LOCKRES_MSG,
-                    dlm->key, send_to);
+               mlog(ML_ERROR, "%s: res %.*s, Error %d send MIG_LOCKRES to "
+                    "node %u (%s)\n", dlm->name, mres->lockname_len,
+                    mres->lockname, ret, send_to,
+                    (orig_flags & DLM_MRES_MIGRATION ?
+                     "migration" : "recovery"));
        } else {
                /* might get an -ENOMEM back here */
                ret = status;
@@ -1767,7 +1774,7 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
                             dlm->name, mres->lockname_len, mres->lockname,
                             from);
                        spin_lock(&res->spinlock);
-                       dlm_lockres_set_refmap_bit(from, res);
+                       dlm_lockres_set_refmap_bit(dlm, res, from);
                        spin_unlock(&res->spinlock);
                        added++;
                        break;
@@ -1965,7 +1972,7 @@ skip_lvb:
                        mlog(0, "%s:%.*s: added lock for node %u, "
                             "setting refmap bit\n", dlm->name,
                             res->lockname.len, res->lockname.name, ml->node);
-                       dlm_lockres_set_refmap_bit(ml->node, res);
+                       dlm_lockres_set_refmap_bit(dlm, res, ml->node);
                        added++;
                }
                spin_unlock(&res->spinlock);
@@ -2084,6 +2091,9 @@ static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
 
        list_for_each_entry_safe(res, next, &dlm->reco.resources, recovering) {
                if (res->owner == dead_node) {
+                       mlog(0, "%s: res %.*s, Changing owner from %u to %u\n",
+                            dlm->name, res->lockname.len, res->lockname.name,
+                            res->owner, new_master);
                        list_del_init(&res->recovering);
                        spin_lock(&res->spinlock);
                        /* new_master has our reference from
@@ -2105,40 +2115,30 @@ static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
        for (i = 0; i < DLM_HASH_BUCKETS; i++) {
                bucket = dlm_lockres_hash(dlm, i);
                hlist_for_each_entry(res, hash_iter, bucket, hash_node) {
-                       if (res->state & DLM_LOCK_RES_RECOVERING) {
-                               if (res->owner == dead_node) {
-                                       mlog(0, "(this=%u) res %.*s owner=%u "
-                                            "was not on recovering list, but "
-                                            "clearing state anyway\n",
-                                            dlm->node_num, res->lockname.len,
-                                            res->lockname.name, new_master);
-                               } else if (res->owner == dlm->node_num) {
-                                       mlog(0, "(this=%u) res %.*s owner=%u "
-                                            "was not on recovering list, "
-                                            "owner is THIS node, clearing\n",
-                                            dlm->node_num, res->lockname.len,
-                                            res->lockname.name, new_master);
-                               } else
-                                       continue;
+                       if (!(res->state & DLM_LOCK_RES_RECOVERING))
+                               continue;
 
-                               if (!list_empty(&res->recovering)) {
-                                       mlog(0, "%s:%.*s: lockres was "
-                                            "marked RECOVERING, owner=%u\n",
-                                            dlm->name, res->lockname.len,
-                                            res->lockname.name, res->owner);
-                                       list_del_init(&res->recovering);
-                                       dlm_lockres_put(res);
-                               }
-                               spin_lock(&res->spinlock);
-                               /* new_master has our reference from
-                                * the lock state sent during recovery */
-                               dlm_change_lockres_owner(dlm, res, new_master);
-                               res->state &= ~DLM_LOCK_RES_RECOVERING;
-                               if (__dlm_lockres_has_locks(res))
-                                       __dlm_dirty_lockres(dlm, res);
-                               spin_unlock(&res->spinlock);
-                               wake_up(&res->wq);
+                       if (res->owner != dead_node &&
+                           res->owner != dlm->node_num)
+                               continue;
+
+                       if (!list_empty(&res->recovering)) {
+                               list_del_init(&res->recovering);
+                               dlm_lockres_put(res);
                        }
+
+                       /* new_master has our reference from
+                        * the lock state sent during recovery */
+                       mlog(0, "%s: res %.*s, Changing owner from %u to %u\n",
+                            dlm->name, res->lockname.len, res->lockname.name,
+                            res->owner, new_master);
+                       spin_lock(&res->spinlock);
+                       dlm_change_lockres_owner(dlm, res, new_master);
+                       res->state &= ~DLM_LOCK_RES_RECOVERING;
+                       if (__dlm_lockres_has_locks(res))
+                               __dlm_dirty_lockres(dlm, res);
+                       spin_unlock(&res->spinlock);
+                       wake_up(&res->wq);
                }
        }
 }
@@ -2252,12 +2252,12 @@ static void dlm_free_dead_locks(struct dlm_ctxt *dlm,
                             res->lockname.len, res->lockname.name, freed, dead_node);
                        __dlm_print_one_lock_resource(res);
                }
-               dlm_lockres_clear_refmap_bit(dead_node, res);
+               dlm_lockres_clear_refmap_bit(dlm, res, dead_node);
        } else if (test_bit(dead_node, res->refmap)) {
                mlog(0, "%s:%.*s: dead node %u had a ref, but had "
                     "no locks and had not purged before dying\n", dlm->name,
                     res->lockname.len, res->lockname.name, dead_node);
-               dlm_lockres_clear_refmap_bit(dead_node, res);
+               dlm_lockres_clear_refmap_bit(dlm, res, dead_node);
        }
 
        /* do not kick thread yet */
@@ -2324,9 +2324,9 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
                        dlm_revalidate_lvb(dlm, res, dead_node);
                        if (res->owner == dead_node) {
                                if (res->state & DLM_LOCK_RES_DROPPING_REF) {
-                                       mlog(ML_NOTICE, "Ignore %.*s for "
+                                       mlog(ML_NOTICE, "%s: res %.*s, Skip "
                                             "recovery as it is being freed\n",
-                                            res->lockname.len,
+                                            dlm->name, res->lockname.len,
                                             res->lockname.name);
                                } else
                                        dlm_move_lockres_to_recovery_list(dlm,
index 1d6d1d22c4715e3c89bef69570916cc5bf44c259..e73c833fc2a1a97cac35903f0439115cef813c69 100644 (file)
@@ -94,24 +94,26 @@ int __dlm_lockres_unused(struct dlm_lock_resource *res)
 {
        int bit;
 
+       assert_spin_locked(&res->spinlock);
+
        if (__dlm_lockres_has_locks(res))
                return 0;
 
+       /* Locks are in the process of being created */
+       if (res->inflight_locks)
+               return 0;
+
        if (!list_empty(&res->dirty) || res->state & DLM_LOCK_RES_DIRTY)
                return 0;
 
        if (res->state & DLM_LOCK_RES_RECOVERING)
                return 0;
 
+       /* Another node has this resource with this node as the master */
        bit = find_next_bit(res->refmap, O2NM_MAX_NODES, 0);
        if (bit < O2NM_MAX_NODES)
                return 0;
 
-       /*
-        * since the bit for dlm->node_num is not set, inflight_locks better
-        * be zero
-        */
-       BUG_ON(res->inflight_locks != 0);
        return 1;
 }
 
@@ -185,8 +187,6 @@ static void dlm_purge_lockres(struct dlm_ctxt *dlm,
                /* clear our bit from the master's refmap, ignore errors */
                ret = dlm_drop_lockres_ref(dlm, res);
                if (ret < 0) {
-                       mlog(ML_ERROR, "%s: deref %.*s failed %d\n", dlm->name,
-                            res->lockname.len, res->lockname.name, ret);
                        if (!dlm_is_host_down(ret))
                                BUG();
                }
@@ -209,7 +209,7 @@ static void dlm_purge_lockres(struct dlm_ctxt *dlm,
                BUG();
        }
 
-       __dlm_unhash_lockres(res);
+       __dlm_unhash_lockres(dlm, res);
 
        /* lockres is not in the hash now.  drop the flag and wake up
         * any processes waiting in dlm_get_lock_resource. */
index e1ed5e502ff25dc8afe39de464949ba13c2f9892..81a4cd22f80be84a06eac2b0fbf4348385d76262 100644 (file)
@@ -1692,7 +1692,7 @@ int ocfs2_open_lock(struct inode *inode)
        mlog(0, "inode %llu take PRMODE open lock\n",
             (unsigned long long)OCFS2_I(inode)->ip_blkno);
 
-       if (ocfs2_mount_local(osb))
+       if (ocfs2_is_hard_readonly(osb) || ocfs2_mount_local(osb))
                goto out;
 
        lockres = &OCFS2_I(inode)->ip_open_lockres;
@@ -1718,6 +1718,12 @@ int ocfs2_try_open_lock(struct inode *inode, int write)
             (unsigned long long)OCFS2_I(inode)->ip_blkno,
             write ? "EXMODE" : "PRMODE");
 
+       if (ocfs2_is_hard_readonly(osb)) {
+               if (write)
+                       status = -EROFS;
+               goto out;
+       }
+
        if (ocfs2_mount_local(osb))
                goto out;
 
@@ -2298,7 +2304,7 @@ int ocfs2_inode_lock_full_nested(struct inode *inode,
        if (ocfs2_is_hard_readonly(osb)) {
                if (ex)
                        status = -EROFS;
-               goto bail;
+               goto getbh;
        }
 
        if (ocfs2_mount_local(osb))
@@ -2356,7 +2362,7 @@ local:
                        mlog_errno(status);
                goto bail;
        }
-
+getbh:
        if (ret_bh) {
                status = ocfs2_assign_bh(inode, ret_bh, local_bh);
                if (status < 0) {
@@ -2628,8 +2634,11 @@ int ocfs2_dentry_lock(struct dentry *dentry, int ex)
 
        BUG_ON(!dl);
 
-       if (ocfs2_is_hard_readonly(osb))
-               return -EROFS;
+       if (ocfs2_is_hard_readonly(osb)) {
+               if (ex)
+                       return -EROFS;
+               return 0;
+       }
 
        if (ocfs2_mount_local(osb))
                return 0;
@@ -2647,7 +2656,7 @@ void ocfs2_dentry_unlock(struct dentry *dentry, int ex)
        struct ocfs2_dentry_lock *dl = dentry->d_fsdata;
        struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb);
 
-       if (!ocfs2_mount_local(osb))
+       if (!ocfs2_is_hard_readonly(osb) && !ocfs2_mount_local(osb))
                ocfs2_cluster_unlock(osb, &dl->dl_lockres, level);
 }
 
index 23457b491e8ce53ac5b71d9cd5fc2a5e1400a07f..2f5b92ef0e533146007b49d21dd705a242125dc5 100644 (file)
@@ -832,6 +832,102 @@ out:
        return ret;
 }
 
+int ocfs2_seek_data_hole_offset(struct file *file, loff_t *offset, int origin)
+{
+       struct inode *inode = file->f_mapping->host;
+       int ret;
+       unsigned int is_last = 0, is_data = 0;
+       u16 cs_bits = OCFS2_SB(inode->i_sb)->s_clustersize_bits;
+       u32 cpos, cend, clen, hole_size;
+       u64 extoff, extlen;
+       struct buffer_head *di_bh = NULL;
+       struct ocfs2_extent_rec rec;
+
+       BUG_ON(origin != SEEK_DATA && origin != SEEK_HOLE);
+
+       ret = ocfs2_inode_lock(inode, &di_bh, 0);
+       if (ret) {
+               mlog_errno(ret);
+               goto out;
+       }
+
+       down_read(&OCFS2_I(inode)->ip_alloc_sem);
+
+       if (*offset >= inode->i_size) {
+               ret = -ENXIO;
+               goto out_unlock;
+       }
+
+       if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
+               if (origin == SEEK_HOLE)
+                       *offset = inode->i_size;
+               goto out_unlock;
+       }
+
+       clen = 0;
+       cpos = *offset >> cs_bits;
+       cend = ocfs2_clusters_for_bytes(inode->i_sb, inode->i_size);
+
+       while (cpos < cend && !is_last) {
+               ret = ocfs2_get_clusters_nocache(inode, di_bh, cpos, &hole_size,
+                                                &rec, &is_last);
+               if (ret) {
+                       mlog_errno(ret);
+                       goto out_unlock;
+               }
+
+               extoff = cpos;
+               extoff <<= cs_bits;
+
+               if (rec.e_blkno == 0ULL) {
+                       clen = hole_size;
+                       is_data = 0;
+               } else {
+                       clen = le16_to_cpu(rec.e_leaf_clusters) -
+                               (cpos - le32_to_cpu(rec.e_cpos));
+                       is_data = (rec.e_flags & OCFS2_EXT_UNWRITTEN) ?  0 : 1;
+               }
+
+               if ((!is_data && origin == SEEK_HOLE) ||
+                   (is_data && origin == SEEK_DATA)) {
+                       if (extoff > *offset)
+                               *offset = extoff;
+                       goto out_unlock;
+               }
+
+               if (!is_last)
+                       cpos += clen;
+       }
+
+       if (origin == SEEK_HOLE) {
+               extoff = cpos;
+               extoff <<= cs_bits;
+               extlen = clen;
+               extlen <<=  cs_bits;
+
+               if ((extoff + extlen) > inode->i_size)
+                       extlen = inode->i_size - extoff;
+               extoff += extlen;
+               if (extoff > *offset)
+                       *offset = extoff;
+               goto out_unlock;
+       }
+
+       ret = -ENXIO;
+
+out_unlock:
+
+       brelse(di_bh);
+
+       up_read(&OCFS2_I(inode)->ip_alloc_sem);
+
+       ocfs2_inode_unlock(inode, 0);
+out:
+       if (ret && ret != -ENXIO)
+               ret = -ENXIO;
+       return ret;
+}
+
 int ocfs2_read_virt_blocks(struct inode *inode, u64 v_block, int nr,
                           struct buffer_head *bhs[], int flags,
                           int (*validate)(struct super_block *sb,
index e79d41c2c90972fe801a99ec040b229a92debe98..67ea57d2fd594da7e456c1103bb1652fa68b5f69 100644 (file)
@@ -53,6 +53,8 @@ int ocfs2_extent_map_get_blocks(struct inode *inode, u64 v_blkno, u64 *p_blkno,
 int ocfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
                 u64 map_start, u64 map_len);
 
+int ocfs2_seek_data_hole_offset(struct file *file, loff_t *offset, int origin);
+
 int ocfs2_xattr_get_clusters(struct inode *inode, u32 v_cluster,
                             u32 *p_cluster, u32 *num_clusters,
                             struct ocfs2_extent_list *el,
index de4ea1af041b654f8f1b4f1a000f6492226f701f..6e396683c3d48af7321f77b8658754982586cb35 100644 (file)
@@ -1950,6 +1950,9 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
        if (ret < 0)
                mlog_errno(ret);
 
+       if (file->f_flags & O_SYNC)
+               handle->h_sync = 1;
+
        ocfs2_commit_trans(osb, handle);
 
 out_inode_unlock:
@@ -2052,6 +2055,23 @@ out:
        return ret;
 }
 
+static void ocfs2_aiodio_wait(struct inode *inode)
+{
+       wait_queue_head_t *wq = ocfs2_ioend_wq(inode);
+
+       wait_event(*wq, (atomic_read(&OCFS2_I(inode)->ip_unaligned_aio) == 0));
+}
+
+static int ocfs2_is_io_unaligned(struct inode *inode, size_t count, loff_t pos)
+{
+       int blockmask = inode->i_sb->s_blocksize - 1;
+       loff_t final_size = pos + count;
+
+       if ((pos & blockmask) || (final_size & blockmask))
+               return 1;
+       return 0;
+}
+
 static int ocfs2_prepare_inode_for_refcount(struct inode *inode,
                                            struct file *file,
                                            loff_t pos, size_t count,
@@ -2230,6 +2250,7 @@ static ssize_t ocfs2_file_aio_write(struct kiocb *iocb,
        struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
        int full_coherency = !(osb->s_mount_opt &
                               OCFS2_MOUNT_COHERENCY_BUFFERED);
+       int unaligned_dio = 0;
 
        trace_ocfs2_file_aio_write(inode, file, file->f_path.dentry,
                (unsigned long long)OCFS2_I(inode)->ip_blkno,
@@ -2297,6 +2318,10 @@ relock:
                goto out;
        }
 
+       if (direct_io && !is_sync_kiocb(iocb))
+               unaligned_dio = ocfs2_is_io_unaligned(inode, iocb->ki_left,
+                                                     *ppos);
+
        /*
         * We can't complete the direct I/O as requested, fall back to
         * buffered I/O.
@@ -2311,6 +2336,18 @@ relock:
                goto relock;
        }
 
+       if (unaligned_dio) {
+               /*
+                * Wait on previous unaligned aio to complete before
+                * proceeding.
+                */
+               ocfs2_aiodio_wait(inode);
+
+               /* Mark the iocb as needing a decrement in ocfs2_dio_end_io */
+               atomic_inc(&OCFS2_I(inode)->ip_unaligned_aio);
+               ocfs2_iocb_set_unaligned_aio(iocb);
+       }
+
        /*
         * To later detect whether a journal commit for sync writes is
         * necessary, we sample i_size, and cluster count here.
@@ -2382,8 +2419,12 @@ out_dio:
        if ((ret == -EIOCBQUEUED) || (!ocfs2_iocb_is_rw_locked(iocb))) {
                rw_level = -1;
                have_alloc_sem = 0;
+               unaligned_dio = 0;
        }
 
+       if (unaligned_dio)
+               atomic_dec(&OCFS2_I(inode)->ip_unaligned_aio);
+
 out:
        if (rw_level != -1)
                ocfs2_rw_unlock(inode, rw_level);
@@ -2591,6 +2632,57 @@ bail:
        return ret;
 }
 
+/* Refer generic_file_llseek_unlocked() */
+static loff_t ocfs2_file_llseek(struct file *file, loff_t offset, int origin)
+{
+       struct inode *inode = file->f_mapping->host;
+       int ret = 0;
+
+       mutex_lock(&inode->i_mutex);
+
+       switch (origin) {
+       case SEEK_SET:
+               break;
+       case SEEK_END:
+               offset += inode->i_size;
+               break;
+       case SEEK_CUR:
+               if (offset == 0) {
+                       offset = file->f_pos;
+                       goto out;
+               }
+               offset += file->f_pos;
+               break;
+       case SEEK_DATA:
+       case SEEK_HOLE:
+               ret = ocfs2_seek_data_hole_offset(file, &offset, origin);
+               if (ret)
+                       goto out;
+               break;
+       default:
+               ret = -EINVAL;
+               goto out;
+       }
+
+       if (offset < 0 && !(file->f_mode & FMODE_UNSIGNED_OFFSET))
+               ret = -EINVAL;
+       if (!ret && offset > inode->i_sb->s_maxbytes)
+               ret = -EINVAL;
+       if (ret)
+               goto out;
+
+       if (offset != file->f_pos) {
+               file->f_pos = offset;
+               file->f_version = 0;
+       }
+
+out:
+       mutex_unlock(&inode->i_mutex);
+       if (ret)
+               return ret;
+       return offset;
+}
+
 const struct inode_operations ocfs2_file_iops = {
        .setattr        = ocfs2_setattr,
        .getattr        = ocfs2_getattr,
@@ -2615,7 +2707,7 @@ const struct inode_operations ocfs2_special_file_iops = {
  * ocfs2_fops_no_plocks and ocfs2_dops_no_plocks!
  */
 const struct file_operations ocfs2_fops = {
-       .llseek         = generic_file_llseek,
+       .llseek         = ocfs2_file_llseek,
        .read           = do_sync_read,
        .write          = do_sync_write,
        .mmap           = ocfs2_mmap,
@@ -2663,7 +2755,7 @@ const struct file_operations ocfs2_dops = {
  * the cluster.
  */
 const struct file_operations ocfs2_fops_no_plocks = {
-       .llseek         = generic_file_llseek,
+       .llseek         = ocfs2_file_llseek,
        .read           = do_sync_read,
        .write          = do_sync_write,
        .mmap           = ocfs2_mmap,
index a22d2c098890a9ca67e2056976bfc9869d74a325..17454a904d7bf488093de9f3db61dc529e0f8e3c 100644 (file)
@@ -951,7 +951,7 @@ static void ocfs2_cleanup_delete_inode(struct inode *inode,
        trace_ocfs2_cleanup_delete_inode(
                (unsigned long long)OCFS2_I(inode)->ip_blkno, sync_data);
        if (sync_data)
-               write_inode_now(inode, 1);
+               filemap_write_and_wait(inode->i_mapping);
        truncate_inode_pages(&inode->i_data, 0);
 }
 
index 1c508b149b3ac1bd4325fd33a9aae6bdb70e024a..88924a3133fae7c15ca3f5a5259b64eecd97022e 100644 (file)
@@ -43,6 +43,9 @@ struct ocfs2_inode_info
        /* protects extended attribute changes on this inode */
        struct rw_semaphore             ip_xattr_sem;
 
+       /* Number of outstanding AIO's which are not page aligned */
+       atomic_t                        ip_unaligned_aio;
+
        /* These fields are protected by ip_lock */
        spinlock_t                      ip_lock;
        u32                             ip_open_count;
index bc91072b72196fd335c4b7cbc02ba08cb67254e6..726ff265b296bc3365cfe46e94588c1ee4f6ed6a 100644 (file)
@@ -122,7 +122,7 @@ static int ocfs2_set_inode_attr(struct inode *inode, unsigned flags,
        if ((oldflags & OCFS2_IMMUTABLE_FL) || ((flags ^ oldflags) &
                (OCFS2_APPEND_FL | OCFS2_IMMUTABLE_FL))) {
                if (!capable(CAP_LINUX_IMMUTABLE))
-                       goto bail_unlock;
+                       goto bail_commit;
        }
 
        ocfs2_inode->ip_attr = flags;
@@ -132,6 +132,7 @@ static int ocfs2_set_inode_attr(struct inode *inode, unsigned flags,
        if (status < 0)
                mlog_errno(status);
 
+bail_commit:
        ocfs2_commit_trans(osb, handle);
 bail_unlock:
        ocfs2_inode_unlock(inode, 1);
@@ -381,7 +382,7 @@ int ocfs2_info_handle_freeinode(struct inode *inode,
        if (!oifi) {
                status = -ENOMEM;
                mlog_errno(status);
-               goto bail;
+               goto out_err;
        }
 
        if (o2info_from_user(*oifi, req))
@@ -431,7 +432,7 @@ bail:
                o2info_set_request_error(&oifi->ifi_req, req);
 
        kfree(oifi);
-
+out_err:
        return status;
 }
 
@@ -666,7 +667,7 @@ int ocfs2_info_handle_freefrag(struct inode *inode,
        if (!oiff) {
                status = -ENOMEM;
                mlog_errno(status);
-               goto bail;
+               goto out_err;
        }
 
        if (o2info_from_user(*oiff, req))
@@ -716,7 +717,7 @@ bail:
                o2info_set_request_error(&oiff->iff_req, req);
 
        kfree(oiff);
-
+out_err:
        return status;
 }
 
index 295d56454e8b23b6e9d97a6bd258e6170311c8f5..0a42ae96dca7d4a0f662505e0e51206895ce4156 100644 (file)
@@ -1544,9 +1544,9 @@ static int ocfs2_replay_journal(struct ocfs2_super *osb,
        /* we need to run complete recovery for offline orphan slots */
        ocfs2_replay_map_set_state(osb, REPLAY_NEEDED);
 
-       mlog(ML_NOTICE, "Recovering node %d from slot %d on device (%u,%u)\n",
-            node_num, slot_num,
-            MAJOR(osb->sb->s_dev), MINOR(osb->sb->s_dev));
+       printk(KERN_NOTICE "ocfs2: Begin replay journal (node %d, slot %d) on "\
+              "device (%u,%u)\n", node_num, slot_num, MAJOR(osb->sb->s_dev),
+              MINOR(osb->sb->s_dev));
 
        OCFS2_I(inode)->ip_clusters = le32_to_cpu(fe->i_clusters);
 
@@ -1601,6 +1601,9 @@ static int ocfs2_replay_journal(struct ocfs2_super *osb,
 
        jbd2_journal_destroy(journal);
 
+       printk(KERN_NOTICE "ocfs2: End replay journal (node %d, slot %d) on "\
+              "device (%u,%u)\n", node_num, slot_num, MAJOR(osb->sb->s_dev),
+              MINOR(osb->sb->s_dev));
 done:
        /* drop the lock on this nodes journal */
        if (got_lock)
@@ -1808,6 +1811,20 @@ static inline unsigned long ocfs2_orphan_scan_timeout(void)
  * every slot, queuing a recovery of the slot on the ocfs2_wq thread. This
  * is done to catch any orphans that are left over in orphan directories.
  *
+ * It scans all slots, even ones that are in use. It does so to handle the
+ * case described below:
+ *
+ *   Node 1 has an inode it was using. The dentry went away due to memory
+ *   pressure.  Node 1 closes the inode, but it's on the free list. The node
+ *   has the open lock.
+ *   Node 2 unlinks the inode. It grabs the dentry lock to notify others,
+ *   but node 1 has no dentry and doesn't get the message. It trylocks the
+ *   open lock, sees that another node has a PR, and does nothing.
+ *   Later node 2 runs its orphan dir. It igets the inode, trylocks the
+ *   open lock, sees the PR still, and does nothing.
+ *   Basically, we have to trigger an orphan iput on node 1. The only way
+ *   for this to happen is if node 1 runs node 2's orphan dir.
+ *
  * ocfs2_queue_orphan_scan gets called every ORPHAN_SCAN_SCHEDULE_TIMEOUT
  * seconds.  It gets an EX lock on os_lockres and checks sequence number
  * stored in LVB. If the sequence number has changed, it means some other
index 68cf2f6d3c6a40b22dda3e2d4f7e2d44349167fa..a3385b63ff5e542bcfaabe59744fa3af38b537e4 100644 (file)
@@ -441,10 +441,11 @@ static inline int ocfs2_mknod_credits(struct super_block *sb, int is_dir,
 #define OCFS2_SIMPLE_DIR_EXTEND_CREDITS (2)
 
 /* file update (nlink, etc) + directory mtime/ctime + dir entry block + quota
- * update on dir + index leaf + dx root update for free list */
+ * update on dir + index leaf + dx root update for free list +
+ * previous dirblock update in the free list */
 static inline int ocfs2_link_credits(struct super_block *sb)
 {
-       return 2*OCFS2_INODE_UPDATE_CREDITS + 3 +
+       return 2*OCFS2_INODE_UPDATE_CREDITS + 4 +
               ocfs2_quota_trans_credits(sb);
 }
 
index 3e9393ca39ebd823772ae910cda1b4516d46fa62..9cd41083e99123eca1c48085fb39809e6b906b40 100644 (file)
@@ -61,7 +61,7 @@ static int ocfs2_fault(struct vm_area_struct *area, struct vm_fault *vmf)
 static int __ocfs2_page_mkwrite(struct file *file, struct buffer_head *di_bh,
                                struct page *page)
 {
-       int ret;
+       int ret = VM_FAULT_NOPAGE;
        struct inode *inode = file->f_path.dentry->d_inode;
        struct address_space *mapping = inode->i_mapping;
        loff_t pos = page_offset(page);
@@ -71,32 +71,25 @@ static int __ocfs2_page_mkwrite(struct file *file, struct buffer_head *di_bh,
        void *fsdata;
        loff_t size = i_size_read(inode);
 
-       /*
-        * Another node might have truncated while we were waiting on
-        * cluster locks.
-        * We don't check size == 0 before the shift. This is borrowed
-        * from do_generic_file_read.
-        */
        last_index = (size - 1) >> PAGE_CACHE_SHIFT;
-       if (unlikely(!size || page->index > last_index)) {
-               ret = -EINVAL;
-               goto out;
-       }
 
        /*
-        * The i_size check above doesn't catch the case where nodes
-        * truncated and then re-extended the file. We'll re-check the
-        * page mapping after taking the page lock inside of
-        * ocfs2_write_begin_nolock().
+        * There are cases that lead to the page no longer bebongs to the
+        * mapping.
+        * 1) pagecache truncates locally due to memory pressure.
+        * 2) pagecache truncates when another is taking EX lock against 
+        * inode lock. see ocfs2_data_convert_worker.
+        * 
+        * The i_size check doesn't catch the case where nodes truncated and
+        * then re-extended the file. We'll re-check the page mapping after
+        * taking the page lock inside of ocfs2_write_begin_nolock().
+        *
+        * Let VM retry with these cases.
         */
-       if (!PageUptodate(page) || page->mapping != inode->i_mapping) {
-               /*
-                * the page has been umapped in ocfs2_data_downconvert_worker.
-                * So return 0 here and let VFS retry.
-                */
-               ret = 0;
+       if ((page->mapping != inode->i_mapping) ||
+           (!PageUptodate(page)) ||
+           (page_offset(page) >= size))
                goto out;
-       }
 
        /*
         * Call ocfs2_write_begin() and ocfs2_write_end() to take
@@ -116,17 +109,21 @@ static int __ocfs2_page_mkwrite(struct file *file, struct buffer_head *di_bh,
        if (ret) {
                if (ret != -ENOSPC)
                        mlog_errno(ret);
+               if (ret == -ENOMEM)
+                       ret = VM_FAULT_OOM;
+               else
+                       ret = VM_FAULT_SIGBUS;
                goto out;
        }
 
-       ret = ocfs2_write_end_nolock(mapping, pos, len, len, locked_page,
-                                    fsdata);
-       if (ret < 0) {
-               mlog_errno(ret);
+       if (!locked_page) {
+               ret = VM_FAULT_NOPAGE;
                goto out;
        }
+       ret = ocfs2_write_end_nolock(mapping, pos, len, len, locked_page,
+                                    fsdata);
        BUG_ON(ret != len);
-       ret = 0;
+       ret = VM_FAULT_LOCKED;
 out:
        return ret;
 }
@@ -168,8 +165,6 @@ static int ocfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
 
 out:
        ocfs2_unblock_signals(&oldset);
-       if (ret)
-               ret = VM_FAULT_SIGBUS;
        return ret;
 }
 
index d53cb706f14c27dfc5ec754dc6d0f846e424421c..184c76b8c293907368f325316da04f57801d87eb 100644 (file)
@@ -745,7 +745,7 @@ static int ocfs2_move_extent(struct ocfs2_move_extents_context *context,
         */
        ocfs2_probe_alloc_group(inode, gd_bh, &goal_bit, len, move_max_hop,
                                new_phys_cpos);
-       if (!new_phys_cpos) {
+       if (!*new_phys_cpos) {
                ret = -ENOSPC;
                goto out_commit;
        }
index 409285854f647e2357223bc7a8d24c36b376a6bb..d355e6e36b366bfe7dc8cc91cbabdad05976ba2c 100644 (file)
@@ -836,18 +836,65 @@ static inline unsigned int ocfs2_clusters_to_megabytes(struct super_block *sb,
 
 static inline void _ocfs2_set_bit(unsigned int bit, unsigned long *bitmap)
 {
-       __test_and_set_bit_le(bit, bitmap);
+       __set_bit_le(bit, bitmap);
 }
 #define ocfs2_set_bit(bit, addr) _ocfs2_set_bit((bit), (unsigned long *)(addr))
 
 static inline void _ocfs2_clear_bit(unsigned int bit, unsigned long *bitmap)
 {
-       __test_and_clear_bit_le(bit, bitmap);
+       __clear_bit_le(bit, bitmap);
 }
 #define ocfs2_clear_bit(bit, addr) _ocfs2_clear_bit((bit), (unsigned long *)(addr))
 
 #define ocfs2_test_bit test_bit_le
 #define ocfs2_find_next_zero_bit find_next_zero_bit_le
 #define ocfs2_find_next_bit find_next_bit_le
+
+static inline void *correct_addr_and_bit_unaligned(int *bit, void *addr)
+{
+#if BITS_PER_LONG == 64
+       *bit += ((unsigned long) addr & 7UL) << 3;
+       addr = (void *) ((unsigned long) addr & ~7UL);
+#elif BITS_PER_LONG == 32
+       *bit += ((unsigned long) addr & 3UL) << 3;
+       addr = (void *) ((unsigned long) addr & ~3UL);
+#else
+#error "how many bits you are?!"
+#endif
+       return addr;
+}
+
+static inline void ocfs2_set_bit_unaligned(int bit, void *bitmap)
+{
+       bitmap = correct_addr_and_bit_unaligned(&bit, bitmap);
+       ocfs2_set_bit(bit, bitmap);
+}
+
+static inline void ocfs2_clear_bit_unaligned(int bit, void *bitmap)
+{
+       bitmap = correct_addr_and_bit_unaligned(&bit, bitmap);
+       ocfs2_clear_bit(bit, bitmap);
+}
+
+static inline int ocfs2_test_bit_unaligned(int bit, void *bitmap)
+{
+       bitmap = correct_addr_and_bit_unaligned(&bit, bitmap);
+       return ocfs2_test_bit(bit, bitmap);
+}
+
+static inline int ocfs2_find_next_zero_bit_unaligned(void *bitmap, int max,
+                                                       int start)
+{
+       int fix = 0, ret, tmpmax;
+       bitmap = correct_addr_and_bit_unaligned(&fix, bitmap);
+       tmpmax = max + fix;
+       start += fix;
+
+       ret = ocfs2_find_next_zero_bit(bitmap, tmpmax, start) - fix;
+       if (ret > max)
+               return max;
+       return ret;
+}
+
 #endif  /* OCFS2_H */
 
index dc8007fc924718c6d461b22cee52e4a8fbd6ae7d..f100bf70a9066ed1b917b8ec0451c27231840f92 100644 (file)
@@ -404,7 +404,9 @@ struct ocfs2_quota_recovery *ocfs2_begin_quota_recovery(
        int status = 0;
        struct ocfs2_quota_recovery *rec;
 
-       mlog(ML_NOTICE, "Beginning quota recovery in slot %u\n", slot_num);
+       printk(KERN_NOTICE "ocfs2: Beginning quota recovery on device (%s) for "
+              "slot %u\n", osb->dev_str, slot_num);
+
        rec = ocfs2_alloc_quota_recovery();
        if (!rec)
                return ERR_PTR(-ENOMEM);
@@ -549,8 +551,8 @@ static int ocfs2_recover_local_quota_file(struct inode *lqinode,
                                goto out_commit;
                        }
                        lock_buffer(qbh);
-                       WARN_ON(!ocfs2_test_bit(bit, dchunk->dqc_bitmap));
-                       ocfs2_clear_bit(bit, dchunk->dqc_bitmap);
+                       WARN_ON(!ocfs2_test_bit_unaligned(bit, dchunk->dqc_bitmap));
+                       ocfs2_clear_bit_unaligned(bit, dchunk->dqc_bitmap);
                        le32_add_cpu(&dchunk->dqc_free, 1);
                        unlock_buffer(qbh);
                        ocfs2_journal_dirty(handle, qbh);
@@ -596,7 +598,9 @@ int ocfs2_finish_quota_recovery(struct ocfs2_super *osb,
        struct inode *lqinode;
        unsigned int flags;
 
-       mlog(ML_NOTICE, "Finishing quota recovery in slot %u\n", slot_num);
+       printk(KERN_NOTICE "ocfs2: Finishing quota recovery on device (%s) for "
+              "slot %u\n", osb->dev_str, slot_num);
+
        mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
        for (type = 0; type < MAXQUOTAS; type++) {
                if (list_empty(&(rec->r_list[type])))
@@ -612,8 +616,9 @@ int ocfs2_finish_quota_recovery(struct ocfs2_super *osb,
                /* Someone else is holding the lock? Then he must be
                 * doing the recovery. Just skip the file... */
                if (status == -EAGAIN) {
-                       mlog(ML_NOTICE, "skipping quota recovery for slot %d "
-                            "because quota file is locked.\n", slot_num);
+                       printk(KERN_NOTICE "ocfs2: Skipping quota recovery on "
+                              "device (%s) for slot %d because quota file is "
+                              "locked.\n", osb->dev_str, slot_num);
                        status = 0;
                        goto out_put;
                } else if (status < 0) {
@@ -944,7 +949,7 @@ static struct ocfs2_quota_chunk *ocfs2_find_free_entry(struct super_block *sb,
                      * ol_quota_entries_per_block(sb);
        }
 
-       found = ocfs2_find_next_zero_bit(dchunk->dqc_bitmap, len, 0);
+       found = ocfs2_find_next_zero_bit_unaligned(dchunk->dqc_bitmap, len, 0);
        /* We failed? */
        if (found == len) {
                mlog(ML_ERROR, "Did not find empty entry in chunk %d with %u"
@@ -1208,7 +1213,7 @@ static void olq_alloc_dquot(struct buffer_head *bh, void *private)
        struct ocfs2_local_disk_chunk *dchunk;
 
        dchunk = (struct ocfs2_local_disk_chunk *)bh->b_data;
-       ocfs2_set_bit(*offset, dchunk->dqc_bitmap);
+       ocfs2_set_bit_unaligned(*offset, dchunk->dqc_bitmap);
        le32_add_cpu(&dchunk->dqc_free, -1);
 }
 
@@ -1289,7 +1294,7 @@ int ocfs2_local_release_dquot(handle_t *handle, struct dquot *dquot)
                        (od->dq_chunk->qc_headerbh->b_data);
        /* Mark structure as freed */
        lock_buffer(od->dq_chunk->qc_headerbh);
-       ocfs2_clear_bit(offset, dchunk->dqc_bitmap);
+       ocfs2_clear_bit_unaligned(offset, dchunk->dqc_bitmap);
        le32_add_cpu(&dchunk->dqc_free, 1);
        unlock_buffer(od->dq_chunk->qc_headerbh);
        ocfs2_journal_dirty(handle, od->dq_chunk->qc_headerbh);
index 26fc0014d50936137d0afb898b7c9541e8026087..1424c151cccce0170819ce4e0f36dad7d97461b8 100644 (file)
@@ -493,8 +493,8 @@ int ocfs2_find_slot(struct ocfs2_super *osb)
                        goto bail;
                }
        } else
-               mlog(ML_NOTICE, "slot %d is already allocated to this node!\n",
-                    slot);
+               printk(KERN_INFO "ocfs2: Slot %d on device (%s) was already "
+                      "allocated to this node!\n", slot, osb->dev_str);
 
        ocfs2_set_slot(si, slot, osb->node_num);
        osb->slot_num = slot;
index 19965b00c43caee7df4e09428775a55150ba9f8c..94368017edb378ce1e3961d1976408c954677c9a 100644 (file)
@@ -28,6 +28,7 @@
 #include "cluster/masklog.h"
 #include "cluster/nodemanager.h"
 #include "cluster/heartbeat.h"
+#include "cluster/tcp.h"
 
 #include "stackglue.h"
 
@@ -255,6 +256,61 @@ static void o2cb_dump_lksb(struct ocfs2_dlm_lksb *lksb)
        dlm_print_one_lock(lksb->lksb_o2dlm.lockid);
 }
 
+/*
+ * Check if this node is heartbeating and is connected to all other
+ * heartbeating nodes.
+ */
+static int o2cb_cluster_check(void)
+{
+       u8 node_num;
+       int i;
+       unsigned long hbmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
+       unsigned long netmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
+
+       node_num = o2nm_this_node();
+       if (node_num == O2NM_MAX_NODES) {
+               printk(KERN_ERR "o2cb: This node has not been configured.\n");
+               return -EINVAL;
+       }
+
+       /*
+        * o2dlm expects o2net sockets to be created. If not, then
+        * dlm_join_domain() fails with a stack of errors which are both cryptic
+        * and incomplete. The idea here is to detect upfront whether we have
+        * managed to connect to all nodes or not. If not, then list the nodes
+        * to allow the user to check the configuration (incorrect IP, firewall,
+        * etc.) Yes, this is racy. But its not the end of the world.
+        */
+#define        O2CB_MAP_STABILIZE_COUNT        60
+       for (i = 0; i < O2CB_MAP_STABILIZE_COUNT; ++i) {
+               o2hb_fill_node_map(hbmap, sizeof(hbmap));
+               if (!test_bit(node_num, hbmap)) {
+                       printk(KERN_ERR "o2cb: %s heartbeat has not been "
+                              "started.\n", (o2hb_global_heartbeat_active() ?
+                                             "Global" : "Local"));
+                       return -EINVAL;
+               }
+               o2net_fill_node_map(netmap, sizeof(netmap));
+               /* Force set the current node to allow easy compare */
+               set_bit(node_num, netmap);
+               if (!memcmp(hbmap, netmap, sizeof(hbmap)))
+                       return 0;
+               if (i < O2CB_MAP_STABILIZE_COUNT)
+                       msleep(1000);
+       }
+
+       printk(KERN_ERR "o2cb: This node could not connect to nodes:");
+       i = -1;
+       while ((i = find_next_bit(hbmap, O2NM_MAX_NODES,
+                                 i + 1)) < O2NM_MAX_NODES) {
+               if (!test_bit(i, netmap))
+                       printk(" %u", i);
+       }
+       printk(".\n");
+
+       return -ENOTCONN;
+}
+
 /*
  * Called from the dlm when it's about to evict a node. This is how the
  * classic stack signals node death.
@@ -263,8 +319,8 @@ static void o2dlm_eviction_cb(int node_num, void *data)
 {
        struct ocfs2_cluster_connection *conn = data;
 
-       mlog(ML_NOTICE, "o2dlm has evicted node %d from group %.*s\n",
-            node_num, conn->cc_namelen, conn->cc_name);
+       printk(KERN_NOTICE "o2cb: o2dlm has evicted node %d from domain %.*s\n",
+              node_num, conn->cc_namelen, conn->cc_name);
 
        conn->cc_recovery_handler(node_num, conn->cc_recovery_data);
 }
@@ -280,12 +336,11 @@ static int o2cb_cluster_connect(struct ocfs2_cluster_connection *conn)
        BUG_ON(conn == NULL);
        BUG_ON(conn->cc_proto == NULL);
 
-       /* for now we only have one cluster/node, make sure we see it
-        * in the heartbeat universe */
-       if (!o2hb_check_local_node_heartbeating()) {
-               if (o2hb_global_heartbeat_active())
-                       mlog(ML_ERROR, "Global heartbeat not started\n");
-               rc = -EINVAL;
+       /* Ensure cluster stack is up and all nodes are connected */
+       rc = o2cb_cluster_check();
+       if (rc) {
+               printk(KERN_ERR "o2cb: Cluster check failed. Fix errors "
+                      "before retrying.\n");
                goto out;
        }
 
index 56f61027236b696fce1ccde3e1edaf86acee59a0..4994f8b0e60410ff576fa63299e29e430192080a 100644 (file)
@@ -54,6 +54,7 @@
 #include "ocfs1_fs_compat.h"
 
 #include "alloc.h"
+#include "aops.h"
 #include "blockcheck.h"
 #include "dlmglue.h"
 #include "export.h"
@@ -1107,9 +1108,9 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
 
                ocfs2_set_ro_flag(osb, 1);
 
-               printk(KERN_NOTICE "Readonly device detected. No cluster "
-                      "services will be utilized for this mount. Recovery "
-                      "will be skipped.\n");
+               printk(KERN_NOTICE "ocfs2: Readonly device (%s) detected. "
+                      "Cluster services will not be used for this mount. "
+                      "Recovery will be skipped.\n", osb->dev_str);
        }
 
        if (!ocfs2_is_hard_readonly(osb)) {
@@ -1616,12 +1617,17 @@ static int ocfs2_show_options(struct seq_file *s, struct vfsmount *mnt)
        return 0;
 }
 
+wait_queue_head_t ocfs2__ioend_wq[OCFS2_IOEND_WQ_HASH_SZ];
+
 static int __init ocfs2_init(void)
 {
-       int status;
+       int status, i;
 
        ocfs2_print_version();
 
+       for (i = 0; i < OCFS2_IOEND_WQ_HASH_SZ; i++)
+               init_waitqueue_head(&ocfs2__ioend_wq[i]);
+
        status = init_ocfs2_uptodate_cache();
        if (status < 0) {
                mlog_errno(status);
@@ -1760,7 +1766,7 @@ static void ocfs2_inode_init_once(void *data)
        ocfs2_extent_map_init(&oi->vfs_inode);
        INIT_LIST_HEAD(&oi->ip_io_markers);
        oi->ip_dir_start_lookup = 0;
-
+       atomic_set(&oi->ip_unaligned_aio, 0);
        init_rwsem(&oi->ip_alloc_sem);
        init_rwsem(&oi->ip_xattr_sem);
        mutex_init(&oi->ip_io_mutex);
@@ -1974,7 +1980,8 @@ static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err)
         * If we failed before we got a uuid_str yet, we can't stop
         * heartbeat.  Otherwise, do it.
         */
-       if (!mnt_err && !ocfs2_mount_local(osb) && osb->uuid_str)
+       if (!mnt_err && !ocfs2_mount_local(osb) && osb->uuid_str &&
+           !ocfs2_is_hard_readonly(osb))
                hangup_needed = 1;
 
        if (osb->cconn)
@@ -2353,7 +2360,7 @@ static int ocfs2_initialize_super(struct super_block *sb,
                mlog_errno(status);
                goto bail;
        }
-       cleancache_init_shared_fs((char *)&uuid_net_key, sb);
+       cleancache_init_shared_fs((char *)&di->id2.i_super.s_uuid, sb);
 
 bail:
        return status;
@@ -2462,8 +2469,8 @@ static int ocfs2_check_volume(struct ocfs2_super *osb)
                        goto finally;
                }
        } else {
-               mlog(ML_NOTICE, "File system was not unmounted cleanly, "
-                    "recovering volume.\n");
+               printk(KERN_NOTICE "ocfs2: File system on device (%s) was not "
+                      "unmounted cleanly, recovering it.\n", osb->dev_str);
        }
 
        local = ocfs2_mount_local(osb);
index 194fb22ef79d590580f3245b522d0b095ef3794c..aa9e8777b09a5e345b081b0e495db378485a30e7 100644 (file)
@@ -2376,16 +2376,18 @@ static int ocfs2_remove_value_outside(struct inode*inode,
                }
 
                ret = ocfs2_xattr_value_truncate(inode, vb, 0, &ctxt);
-               if (ret < 0) {
-                       mlog_errno(ret);
-                       break;
-               }
 
                ocfs2_commit_trans(osb, ctxt.handle);
                if (ctxt.meta_ac) {
                        ocfs2_free_alloc_context(ctxt.meta_ac);
                        ctxt.meta_ac = NULL;
                }
+
+               if (ret < 0) {
+                       mlog_errno(ret);
+                       break;
+               }
+
        }
 
        if (ctxt.meta_ac)
index 586174168e2ac8818fc51397223fe683b4afa189..80e4645f7990cf4cc0e636a7c19f9840503cdd42 100644 (file)
@@ -131,12 +131,13 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
                K(i.freeswap),
                K(global_page_state(NR_FILE_DIRTY)),
                K(global_page_state(NR_WRITEBACK)),
-               K(global_page_state(NR_ANON_PAGES)
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+               K(global_page_state(NR_ANON_PAGES)
                  + global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
-                 HPAGE_PMD_NR
+                 HPAGE_PMD_NR),
+#else
+               K(global_page_state(NR_ANON_PAGES)),
 #endif
-                 ),
                K(global_page_state(NR_FILE_MAPPED)),
                K(global_page_state(NR_SHMEM)),
                K(global_page_state(NR_SLAB_RECLAIMABLE) +
index 42b274da92c39d539c7c2f0f82a2f825c1a20383..2a30d67dd6b81ea03c71b3bdea4fa50913a09ae4 100644 (file)
@@ -32,7 +32,7 @@ static cputime64_t get_idle_time(int cpu)
                idle = kstat_cpu(cpu).cpustat.idle;
                idle = cputime64_add(idle, arch_idle_time(cpu));
        } else
-               idle = usecs_to_cputime(idle_time);
+               idle = nsecs_to_jiffies64(1000 * idle_time);
 
        return idle;
 }
@@ -46,7 +46,7 @@ static cputime64_t get_iowait_time(int cpu)
                /* !NO_HZ so we can rely on cpustat.iowait */
                iowait = kstat_cpu(cpu).cpustat.iowait;
        else
-               iowait = usecs_to_cputime(iowait_time);
+               iowait = nsecs_to_jiffies64(1000 * iowait_time);
 
        return iowait;
 }
index 2bd620f0d796cf5dee01590c65b7ac8802e33bf9..57bbf9078ac8f327be28e88b38e10eeff1f9bfc0 100644 (file)
@@ -167,6 +167,7 @@ int pstore_register(struct pstore_info *psi)
        }
 
        psinfo = psi;
+       mutex_init(&psinfo->read_mutex);
        spin_unlock(&pstore_lock);
 
        if (owner && !try_module_get(owner)) {
@@ -195,30 +196,32 @@ EXPORT_SYMBOL_GPL(pstore_register);
 void pstore_get_records(int quiet)
 {
        struct pstore_info *psi = psinfo;
+       char                    *buf = NULL;
        ssize_t                 size;
        u64                     id;
        enum pstore_type_id     type;
        struct timespec         time;
        int                     failed = 0, rc;
-       unsigned long           flags;
 
        if (!psi)
                return;
 
-       spin_lock_irqsave(&psinfo->buf_lock, flags);
+       mutex_lock(&psi->read_mutex);
        rc = psi->open(psi);
        if (rc)
                goto out;
 
-       while ((size = psi->read(&id, &type, &time, psi)) > 0) {
-               rc = pstore_mkfile(type, psi->name, id, psi->buf, (size_t)size,
+       while ((size = psi->read(&id, &type, &time, &buf, psi)) > 0) {
+               rc = pstore_mkfile(type, psi->name, id, buf, (size_t)size,
                                  time, psi);
+               kfree(buf);
+               buf = NULL;
                if (rc && (rc != -EEXIST || !quiet))
                        failed++;
        }
        psi->close(psi);
 out:
-       spin_unlock_irqrestore(&psinfo->buf_lock, flags);
+       mutex_unlock(&psi->read_mutex);
 
        if (failed)
                printk(KERN_WARNING "pstore: failed to load %d record(s) from '%s'\n",
index 05d6b0e78c959a341137c97fbb2ea2fa89b25197..dba43c3ea3afb6605972d3a0e3eca3ac5248876e 100644 (file)
@@ -449,8 +449,6 @@ EXPORT_SYMBOL(seq_path);
 
 /*
  * Same as seq_path, but relative to supplied root.
- *
- * root may be changed, see __d_path().
  */
 int seq_path_root(struct seq_file *m, struct path *path, struct path *root,
                  char *esc)
@@ -463,6 +461,8 @@ int seq_path_root(struct seq_file *m, struct path *path, struct path *root,
                char *p;
 
                p = __d_path(path, root, buf, size);
+               if (!p)
+                       return SEQ_SKIP;
                res = PTR_ERR(p);
                if (!IS_ERR(p)) {
                        char *end = mangle_path(buf, p, esc);
@@ -474,7 +474,7 @@ int seq_path_root(struct seq_file *m, struct path *path, struct path *root,
        }
        seq_commit(m, res);
 
-       return res < 0 ? res : 0;
+       return res < 0 && res != -ENAMETOOLONG ? res : 0;
 }
 
 /*
index b6c4b3795c4a000ce27ac3799b5c39ae6c8044e9..76e4266d2e7e4a8fc3bfc609016e4de467b7e5cf 100644 (file)
@@ -42,6 +42,8 @@ xfs_acl_from_disk(struct xfs_acl *aclp)
        int count, i;
 
        count = be32_to_cpu(aclp->acl_cnt);
+       if (count > XFS_ACL_MAX_ENTRIES)
+               return ERR_PTR(-EFSCORRUPTED);
 
        acl = posix_acl_alloc(count, GFP_KERNEL);
        if (!acl)
index d4906e7c97873b302201cddf442bdc5eeb54a29d..c1b55e5965517a9407f678610b62f29fdabf33b3 100644 (file)
@@ -110,6 +110,7 @@ xfs_attr_namesp_match(int arg_flags, int ondisk_flags)
 /*
  * Query whether the requested number of additional bytes of extended
  * attribute space will be able to fit inline.
+ *
  * Returns zero if not, else the di_forkoff fork offset to be used in the
  * literal area for attribute data once the new bytes have been added.
  *
@@ -122,7 +123,7 @@ xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes)
        int offset;
        int minforkoff; /* lower limit on valid forkoff locations */
        int maxforkoff; /* upper limit on valid forkoff locations */
-       int dsize;      
+       int dsize;
        xfs_mount_t *mp = dp->i_mount;
 
        offset = (XFS_LITINO(mp) - bytes) >> 3; /* rounded down */
@@ -136,47 +137,60 @@ xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes)
                return (offset >= minforkoff) ? minforkoff : 0;
        }
 
-       if (!(mp->m_flags & XFS_MOUNT_ATTR2)) {
-               if (bytes <= XFS_IFORK_ASIZE(dp))
-                       return dp->i_d.di_forkoff;
+       /*
+        * If the requested numbers of bytes is smaller or equal to the
+        * current attribute fork size we can always proceed.
+        *
+        * Note that if_bytes in the data fork might actually be larger than
+        * the current data fork size is due to delalloc extents. In that
+        * case either the extent count will go down when they are converted
+        * to real extents, or the delalloc conversion will take care of the
+        * literal area rebalancing.
+        */
+       if (bytes <= XFS_IFORK_ASIZE(dp))
+               return dp->i_d.di_forkoff;
+
+       /*
+        * For attr2 we can try to move the forkoff if there is space in the
+        * literal area, but for the old format we are done if there is no
+        * space in the fixed attribute fork.
+        */
+       if (!(mp->m_flags & XFS_MOUNT_ATTR2))
                return 0;
-       }
 
        dsize = dp->i_df.if_bytes;
-       
+
        switch (dp->i_d.di_format) {
        case XFS_DINODE_FMT_EXTENTS:
-               /* 
+               /*
                 * If there is no attr fork and the data fork is extents, 
-                * determine if creating the default attr fork will result 
-                * in the extents form migrating to btree. If so, the 
-                * minimum offset only needs to be the space required for 
+                * determine if creating the default attr fork will result
+                * in the extents form migrating to btree. If so, the
+                * minimum offset only needs to be the space required for
                 * the btree root.
-                */ 
+                */
                if (!dp->i_d.di_forkoff && dp->i_df.if_bytes >
                    xfs_default_attroffset(dp))
                        dsize = XFS_BMDR_SPACE_CALC(MINDBTPTRS);
                break;
-               
        case XFS_DINODE_FMT_BTREE:
                /*
-                * If have data btree then keep forkoff if we have one,
-                * otherwise we are adding a new attr, so then we set 
-                * minforkoff to where the btree root can finish so we have 
+                * If we have a data btree then keep forkoff if we have one,
+                * otherwise we are adding a new attr, so then we set
+                * minforkoff to where the btree root can finish so we have
                 * plenty of room for attrs
                 */
                if (dp->i_d.di_forkoff) {
-                       if (offset < dp->i_d.di_forkoff) 
+                       if (offset < dp->i_d.di_forkoff)
                                return 0;
-                       else 
-                               return dp->i_d.di_forkoff;
-               } else
-                       dsize = XFS_BMAP_BROOT_SPACE(dp->i_df.if_broot);
+                       return dp->i_d.di_forkoff;
+               }
+               dsize = XFS_BMAP_BROOT_SPACE(dp->i_df.if_broot);
                break;
        }
-       
-       /* 
-        * A data fork btree root must have space for at least 
+
+       /*
+        * A data fork btree root must have space for at least
         * MINDBTPTRS key/ptr pairs if the data fork is small or empty.
         */
        minforkoff = MAX(dsize, XFS_BMDR_SPACE_CALC(MINDBTPTRS));
@@ -186,10 +200,10 @@ xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes)
        maxforkoff = XFS_LITINO(mp) - XFS_BMDR_SPACE_CALC(MINABTPTRS);
        maxforkoff = maxforkoff >> 3;   /* rounded down */
 
-       if (offset >= minforkoff && offset < maxforkoff)
-               return offset;
        if (offset >= maxforkoff)
                return maxforkoff;
+       if (offset >= minforkoff)
+               return offset;
        return 0;
 }
 
index c68baeb0974adb2e57f690496fa8957c94c92e0d..d0ab78837057815f17605150d31a633c2eeb2739 100644 (file)
@@ -2383,6 +2383,8 @@ xfs_bmap_btalloc(
        int             tryagain;
        int             error;
 
+       ASSERT(ap->length);
+
        mp = ap->ip->i_mount;
        align = ap->userdata ? xfs_get_extsz_hint(ap->ip) : 0;
        if (unlikely(align)) {
@@ -4629,6 +4631,8 @@ xfs_bmapi_allocate(
        int                     error;
        int                     rt;
 
+       ASSERT(bma->length > 0);
+
        rt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(bma->ip);
 
        /*
@@ -4849,6 +4853,7 @@ xfs_bmapi_write(
        ASSERT(*nmap <= XFS_BMAP_MAX_NMAP);
        ASSERT(!(flags & XFS_BMAPI_IGSTATE));
        ASSERT(tp != NULL);
+       ASSERT(len > 0);
 
        whichfork = (flags & XFS_BMAPI_ATTRFORK) ?
                XFS_ATTR_FORK : XFS_DATA_FORK;
@@ -4918,9 +4923,22 @@ xfs_bmapi_write(
                        bma.eof = eof;
                        bma.conv = !!(flags & XFS_BMAPI_CONVERT);
                        bma.wasdel = wasdelay;
-                       bma.length = len;
                        bma.offset = bno;
 
+                       /*
+                        * There's a 32/64 bit type mismatch between the
+                        * allocation length request (which can be 64 bits in
+                        * length) and the bma length request, which is
+                        * xfs_extlen_t and therefore 32 bits. Hence we have to
+                        * check for 32-bit overflows and handle them here.
+                        */
+                       if (len > (xfs_filblks_t)MAXEXTLEN)
+                               bma.length = MAXEXTLEN;
+                       else
+                               bma.length = len;
+
+                       ASSERT(len > 0);
+                       ASSERT(bma.length > 0);
                        error = xfs_bmapi_allocate(&bma, flags);
                        if (error)
                                goto error0;
index da108977b21f8582c7af92e2d4edcdbca8ba07b7..558910f5e3c06a3451558a486a7b982b0f8ee19d 100644 (file)
@@ -98,22 +98,22 @@ xfs_fs_encode_fh(
        switch (fileid_type) {
        case FILEID_INO32_GEN_PARENT:
                spin_lock(&dentry->d_lock);
-               fid->i32.parent_ino = dentry->d_parent->d_inode->i_ino;
+               fid->i32.parent_ino = XFS_I(dentry->d_parent->d_inode)->i_ino;
                fid->i32.parent_gen = dentry->d_parent->d_inode->i_generation;
                spin_unlock(&dentry->d_lock);
                /*FALLTHRU*/
        case FILEID_INO32_GEN:
-               fid->i32.ino = inode->i_ino;
+               fid->i32.ino = XFS_I(inode)->i_ino;
                fid->i32.gen = inode->i_generation;
                break;
        case FILEID_INO32_GEN_PARENT | XFS_FILEID_TYPE_64FLAG:
                spin_lock(&dentry->d_lock);
-               fid64->parent_ino = dentry->d_parent->d_inode->i_ino;
+               fid64->parent_ino = XFS_I(dentry->d_parent->d_inode)->i_ino;
                fid64->parent_gen = dentry->d_parent->d_inode->i_generation;
                spin_unlock(&dentry->d_lock);
                /*FALLTHRU*/
        case FILEID_INO32_GEN | XFS_FILEID_TYPE_64FLAG:
-               fid64->ino = inode->i_ino;
+               fid64->ino = XFS_I(inode)->i_ino;
                fid64->gen = inode->i_generation;
                break;
        }
index c0237c602f11deb92fa5f533f74a647005fbf1b8..755ee8164880fe4122bac9de94119f1c7086a9b7 100644 (file)
@@ -2835,6 +2835,27 @@ corrupt_out:
        return XFS_ERROR(EFSCORRUPTED);
 }
 
+void
+xfs_promote_inode(
+       struct xfs_inode        *ip)
+{
+       struct xfs_buf          *bp;
+
+       ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
+
+       bp = xfs_incore(ip->i_mount->m_ddev_targp, ip->i_imap.im_blkno,
+                       ip->i_imap.im_len, XBF_TRYLOCK);
+       if (!bp)
+               return;
+
+       if (XFS_BUF_ISDELAYWRITE(bp)) {
+               xfs_buf_delwri_promote(bp);
+               wake_up_process(ip->i_mount->m_ddev_targp->bt_task);
+       }
+
+       xfs_buf_relse(bp);
+}
+
 /*
  * Return a pointer to the extent record at file index idx.
  */
index 760140d1dd661f42e653576a7c947b11eb8b6fbd..b4cd4739f98e74b2e256295b48fe64e285b320b0 100644 (file)
@@ -498,6 +498,7 @@ int         xfs_iunlink(struct xfs_trans *, xfs_inode_t *);
 void           xfs_iext_realloc(xfs_inode_t *, int, int);
 void           xfs_iunpin_wait(xfs_inode_t *);
 int            xfs_iflush(xfs_inode_t *, uint);
+void           xfs_promote_inode(struct xfs_inode *);
 void           xfs_lock_inodes(xfs_inode_t **, int, uint);
 void           xfs_lock_two_inodes(xfs_inode_t *, xfs_inode_t *, uint);
 
index a14cd89fe4655e2647d92d2191c1ffb8e6588787..34817adf4b9ed837da47d6f9ccfa977829fdd33c 100644 (file)
@@ -150,6 +150,117 @@ xlog_grant_add_space(
        } while (head_val != old);
 }
 
+STATIC bool
+xlog_reserveq_wake(
+       struct log              *log,
+       int                     *free_bytes)
+{
+       struct xlog_ticket      *tic;
+       int                     need_bytes;
+
+       list_for_each_entry(tic, &log->l_reserveq, t_queue) {
+               if (tic->t_flags & XLOG_TIC_PERM_RESERV)
+                       need_bytes = tic->t_unit_res * tic->t_cnt;
+               else
+                       need_bytes = tic->t_unit_res;
+
+               if (*free_bytes < need_bytes)
+                       return false;
+               *free_bytes -= need_bytes;
+
+               trace_xfs_log_grant_wake_up(log, tic);
+               wake_up(&tic->t_wait);
+       }
+
+       return true;
+}
+
+STATIC bool
+xlog_writeq_wake(
+       struct log              *log,
+       int                     *free_bytes)
+{
+       struct xlog_ticket      *tic;
+       int                     need_bytes;
+
+       list_for_each_entry(tic, &log->l_writeq, t_queue) {
+               ASSERT(tic->t_flags & XLOG_TIC_PERM_RESERV);
+
+               need_bytes = tic->t_unit_res;
+
+               if (*free_bytes < need_bytes)
+                       return false;
+               *free_bytes -= need_bytes;
+
+               trace_xfs_log_regrant_write_wake_up(log, tic);
+               wake_up(&tic->t_wait);
+       }
+
+       return true;
+}
+
+STATIC int
+xlog_reserveq_wait(
+       struct log              *log,
+       struct xlog_ticket      *tic,
+       int                     need_bytes)
+{
+       list_add_tail(&tic->t_queue, &log->l_reserveq);
+
+       do {
+               if (XLOG_FORCED_SHUTDOWN(log))
+                       goto shutdown;
+               xlog_grant_push_ail(log, need_bytes);
+
+               XFS_STATS_INC(xs_sleep_logspace);
+               trace_xfs_log_grant_sleep(log, tic);
+
+               xlog_wait(&tic->t_wait, &log->l_grant_reserve_lock);
+               trace_xfs_log_grant_wake(log, tic);
+
+               spin_lock(&log->l_grant_reserve_lock);
+               if (XLOG_FORCED_SHUTDOWN(log))
+                       goto shutdown;
+       } while (xlog_space_left(log, &log->l_grant_reserve_head) < need_bytes);
+
+       list_del_init(&tic->t_queue);
+       return 0;
+shutdown:
+       list_del_init(&tic->t_queue);
+       return XFS_ERROR(EIO);
+}
+
+STATIC int
+xlog_writeq_wait(
+       struct log              *log,
+       struct xlog_ticket      *tic,
+       int                     need_bytes)
+{
+       list_add_tail(&tic->t_queue, &log->l_writeq);
+
+       do {
+               if (XLOG_FORCED_SHUTDOWN(log))
+                       goto shutdown;
+               xlog_grant_push_ail(log, need_bytes);
+
+               XFS_STATS_INC(xs_sleep_logspace);
+               trace_xfs_log_regrant_write_sleep(log, tic);
+
+               xlog_wait(&tic->t_wait, &log->l_grant_write_lock);
+               trace_xfs_log_regrant_write_wake(log, tic);
+
+               spin_lock(&log->l_grant_write_lock);
+               if (XLOG_FORCED_SHUTDOWN(log))
+                       goto shutdown;
+       } while (xlog_space_left(log, &log->l_grant_write_head) < need_bytes);
+
+       list_del_init(&tic->t_queue);
+       return 0;
+shutdown:
+       list_del_init(&tic->t_queue);
+       return XFS_ERROR(EIO);
+}
+
 static void
 xlog_tic_reset_res(xlog_ticket_t *tic)
 {
@@ -350,8 +461,19 @@ xfs_log_reserve(
                retval = xlog_grant_log_space(log, internal_ticket);
        }
 
+       if (unlikely(retval)) {
+               /*
+                * If we are failing, make sure the ticket doesn't have any
+                * current reservations.  We don't want to add this back
+                * when the ticket/ transaction gets cancelled.
+                */
+               internal_ticket->t_curr_res = 0;
+               /* ungrant will give back unit_res * t_cnt. */
+               internal_ticket->t_cnt = 0;
+       }
+
        return retval;
-}      /* xfs_log_reserve */
+}
 
 
 /*
@@ -2481,8 +2603,8 @@ restart:
 /*
  * Atomically get the log space required for a log ticket.
  *
- * Once a ticket gets put onto the reserveq, it will only return after
- * the needed reservation is satisfied.
+ * Once a ticket gets put onto the reserveq, it will only return after the
+ * needed reservation is satisfied.
  *
  * This function is structured so that it has a lock free fast path. This is
  * necessary because every new transaction reservation will come through this
@@ -2490,113 +2612,53 @@ restart:
  * every pass.
  *
  * As tickets are only ever moved on and off the reserveq under the
- * l_grant_reserve_lock, we only need to take that lock if we are going
- * to add the ticket to the queue and sleep. We can avoid taking the lock if the
- * ticket was never added to the reserveq because the t_queue list head will be
- * empty and we hold the only reference to it so it can safely be checked
- * unlocked.
+ * l_grant_reserve_lock, we only need to take that lock if we are going to add
+ * the ticket to the queue and sleep. We can avoid taking the lock if the ticket
+ * was never added to the reserveq because the t_queue list head will be empty
+ * and we hold the only reference to it so it can safely be checked unlocked.
  */
 STATIC int
-xlog_grant_log_space(xlog_t       *log,
-                    xlog_ticket_t *tic)
+xlog_grant_log_space(
+       struct log              *log,
+       struct xlog_ticket      *tic)
 {
-       int              free_bytes;
-       int              need_bytes;
+       int                     free_bytes, need_bytes;
+       int                     error = 0;
 
-#ifdef DEBUG
-       if (log->l_flags & XLOG_ACTIVE_RECOVERY)
-               panic("grant Recovery problem");
-#endif
+       ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY));
 
        trace_xfs_log_grant_enter(log, tic);
 
+       /*
+        * If there are other waiters on the queue then give them a chance at
+        * logspace before us.  Wake up the first waiters, if we do not wake
+        * up all the waiters then go to sleep waiting for more free space,
+        * otherwise try to get some space for this transaction.
+        */
        need_bytes = tic->t_unit_res;
        if (tic->t_flags & XFS_LOG_PERM_RESERV)
                need_bytes *= tic->t_ocnt;
-
-       /* something is already sleeping; insert new transaction at end */
-       if (!list_empty_careful(&log->l_reserveq)) {
-               spin_lock(&log->l_grant_reserve_lock);
-               /* recheck the queue now we are locked */
-               if (list_empty(&log->l_reserveq)) {
-                       spin_unlock(&log->l_grant_reserve_lock);
-                       goto redo;
-               }
-               list_add_tail(&tic->t_queue, &log->l_reserveq);
-
-               trace_xfs_log_grant_sleep1(log, tic);
-
-               /*
-                * Gotta check this before going to sleep, while we're
-                * holding the grant lock.
-                */
-               if (XLOG_FORCED_SHUTDOWN(log))
-                       goto error_return;
-
-               XFS_STATS_INC(xs_sleep_logspace);
-               xlog_wait(&tic->t_wait, &log->l_grant_reserve_lock);
-
-               /*
-                * If we got an error, and the filesystem is shutting down,
-                * we'll catch it down below. So just continue...
-                */
-               trace_xfs_log_grant_wake1(log, tic);
-       }
-
-redo:
-       if (XLOG_FORCED_SHUTDOWN(log))
-               goto error_return_unlocked;
-
        free_bytes = xlog_space_left(log, &log->l_grant_reserve_head);
-       if (free_bytes < need_bytes) {
+       if (!list_empty_careful(&log->l_reserveq)) {
                spin_lock(&log->l_grant_reserve_lock);
-               if (list_empty(&tic->t_queue))
-                       list_add_tail(&tic->t_queue, &log->l_reserveq);
-
-               trace_xfs_log_grant_sleep2(log, tic);
-
-               if (XLOG_FORCED_SHUTDOWN(log))
-                       goto error_return;
-
-               xlog_grant_push_ail(log, need_bytes);
-
-               XFS_STATS_INC(xs_sleep_logspace);
-               xlog_wait(&tic->t_wait, &log->l_grant_reserve_lock);
-
-               trace_xfs_log_grant_wake2(log, tic);
-               goto redo;
-       }
-
-       if (!list_empty(&tic->t_queue)) {
+               if (!xlog_reserveq_wake(log, &free_bytes) ||
+                   free_bytes < need_bytes)
+                       error = xlog_reserveq_wait(log, tic, need_bytes);
+               spin_unlock(&log->l_grant_reserve_lock);
+       } else if (free_bytes < need_bytes) {
                spin_lock(&log->l_grant_reserve_lock);
-               list_del_init(&tic->t_queue);
+               error = xlog_reserveq_wait(log, tic, need_bytes);
                spin_unlock(&log->l_grant_reserve_lock);
        }
+       if (error)
+               return error;
 
-       /* we've got enough space */
        xlog_grant_add_space(log, &log->l_grant_reserve_head, need_bytes);
        xlog_grant_add_space(log, &log->l_grant_write_head, need_bytes);
        trace_xfs_log_grant_exit(log, tic);
        xlog_verify_grant_tail(log);
        return 0;
-
-error_return_unlocked:
-       spin_lock(&log->l_grant_reserve_lock);
-error_return:
-       list_del_init(&tic->t_queue);
-       spin_unlock(&log->l_grant_reserve_lock);
-       trace_xfs_log_grant_error(log, tic);
-
-       /*
-        * If we are failing, make sure the ticket doesn't have any
-        * current reservations. We don't want to add this back when
-        * the ticket/transaction gets cancelled.
-        */
-       tic->t_curr_res = 0;
-       tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */
-       return XFS_ERROR(EIO);
-}      /* xlog_grant_log_space */
-
+}
 
 /*
  * Replenish the byte reservation required by moving the grant write head.
@@ -2605,10 +2667,12 @@ error_return:
  * free fast path.
  */
 STATIC int
-xlog_regrant_write_log_space(xlog_t       *log,
-                            xlog_ticket_t *tic)
+xlog_regrant_write_log_space(
+       struct log              *log,
+       struct xlog_ticket      *tic)
 {
-       int             free_bytes, need_bytes;
+       int                     free_bytes, need_bytes;
+       int                     error = 0;
 
        tic->t_curr_res = tic->t_unit_res;
        xlog_tic_reset_res(tic);
@@ -2616,104 +2680,38 @@ xlog_regrant_write_log_space(xlog_t       *log,
        if (tic->t_cnt > 0)
                return 0;
 
-#ifdef DEBUG
-       if (log->l_flags & XLOG_ACTIVE_RECOVERY)
-               panic("regrant Recovery problem");
-#endif
+       ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY));
 
        trace_xfs_log_regrant_write_enter(log, tic);
-       if (XLOG_FORCED_SHUTDOWN(log))
-               goto error_return_unlocked;
 
-       /* If there are other waiters on the queue then give them a
-        * chance at logspace before us. Wake up the first waiters,
-        * if we do not wake up all the waiters then go to sleep waiting
-        * for more free space, otherwise try to get some space for
-        * this transaction.
+       /*
+        * If there are other waiters on the queue then give them a chance at
+        * logspace before us.  Wake up the first waiters, if we do not wake
+        * up all the waiters then go to sleep waiting for more free space,
+        * otherwise try to get some space for this transaction.
         */
        need_bytes = tic->t_unit_res;
-       if (!list_empty_careful(&log->l_writeq)) {
-               struct xlog_ticket *ntic;
-
-               spin_lock(&log->l_grant_write_lock);
-               free_bytes = xlog_space_left(log, &log->l_grant_write_head);
-               list_for_each_entry(ntic, &log->l_writeq, t_queue) {
-                       ASSERT(ntic->t_flags & XLOG_TIC_PERM_RESERV);
-
-                       if (free_bytes < ntic->t_unit_res)
-                               break;
-                       free_bytes -= ntic->t_unit_res;
-                       wake_up(&ntic->t_wait);
-               }
-
-               if (ntic != list_first_entry(&log->l_writeq,
-                                               struct xlog_ticket, t_queue)) {
-                       if (list_empty(&tic->t_queue))
-                               list_add_tail(&tic->t_queue, &log->l_writeq);
-                       trace_xfs_log_regrant_write_sleep1(log, tic);
-
-                       xlog_grant_push_ail(log, need_bytes);
-
-                       XFS_STATS_INC(xs_sleep_logspace);
-                       xlog_wait(&tic->t_wait, &log->l_grant_write_lock);
-                       trace_xfs_log_regrant_write_wake1(log, tic);
-               } else
-                       spin_unlock(&log->l_grant_write_lock);
-       }
-
-redo:
-       if (XLOG_FORCED_SHUTDOWN(log))
-               goto error_return_unlocked;
-
        free_bytes = xlog_space_left(log, &log->l_grant_write_head);
-       if (free_bytes < need_bytes) {
+       if (!list_empty_careful(&log->l_writeq)) {
                spin_lock(&log->l_grant_write_lock);
-               if (list_empty(&tic->t_queue))
-                       list_add_tail(&tic->t_queue, &log->l_writeq);
-
-               if (XLOG_FORCED_SHUTDOWN(log))
-                       goto error_return;
-
-               xlog_grant_push_ail(log, need_bytes);
-
-               XFS_STATS_INC(xs_sleep_logspace);
-               trace_xfs_log_regrant_write_sleep2(log, tic);
-               xlog_wait(&tic->t_wait, &log->l_grant_write_lock);
-
-               trace_xfs_log_regrant_write_wake2(log, tic);
-               goto redo;
-       }
-
-       if (!list_empty(&tic->t_queue)) {
+               if (!xlog_writeq_wake(log, &free_bytes) ||
+                   free_bytes < need_bytes)
+                       error = xlog_writeq_wait(log, tic, need_bytes);
+               spin_unlock(&log->l_grant_write_lock);
+       } else if (free_bytes < need_bytes) {
                spin_lock(&log->l_grant_write_lock);
-               list_del_init(&tic->t_queue);
+               error = xlog_writeq_wait(log, tic, need_bytes);
                spin_unlock(&log->l_grant_write_lock);
        }
 
-       /* we've got enough space */
+       if (error)
+               return error;
+
        xlog_grant_add_space(log, &log->l_grant_write_head, need_bytes);
        trace_xfs_log_regrant_write_exit(log, tic);
        xlog_verify_grant_tail(log);
        return 0;
-
-
- error_return_unlocked:
-       spin_lock(&log->l_grant_write_lock);
- error_return:
-       list_del_init(&tic->t_queue);
-       spin_unlock(&log->l_grant_write_lock);
-       trace_xfs_log_regrant_write_error(log, tic);
-
-       /*
-        * If we are failing, make sure the ticket doesn't have any
-        * current reservations. We don't want to add this back when
-        * the ticket/transaction gets cancelled.
-        */
-       tic->t_curr_res = 0;
-       tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */
-       return XFS_ERROR(EIO);
-}      /* xlog_regrant_write_log_space */
-
+}
 
 /* The first cnt-1 times through here we don't need to
  * move the grant write head because the permanent
index aa3dc1a4d53d4f85f97a38f0db217bfcc4e3c953..be5c51d8f7572d715fc51f0c86563b46aba380c8 100644 (file)
@@ -770,6 +770,17 @@ restart:
        if (!xfs_iflock_nowait(ip)) {
                if (!(sync_mode & SYNC_WAIT))
                        goto out;
+
+               /*
+                * If we only have a single dirty inode in a cluster there is
+                * a fair chance that the AIL push may have pushed it into
+                * the buffer, but xfsbufd won't touch it until 30 seconds
+                * from now, and thus we will lock up here.
+                *
+                * Promote the inode buffer to the front of the delwri list
+                * and wake up xfsbufd now.
+                */
+               xfs_promote_inode(ip);
                xfs_iflock(ip);
        }
 
index f1d2802b2f0782130954248f11237c10cf09e92f..49403579887324b87c821bc7eb62b0114314ce20 100644 (file)
@@ -834,18 +834,14 @@ DEFINE_LOGGRANT_EVENT(xfs_log_umount_write);
 DEFINE_LOGGRANT_EVENT(xfs_log_grant_enter);
 DEFINE_LOGGRANT_EVENT(xfs_log_grant_exit);
 DEFINE_LOGGRANT_EVENT(xfs_log_grant_error);
-DEFINE_LOGGRANT_EVENT(xfs_log_grant_sleep1);
-DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake1);
-DEFINE_LOGGRANT_EVENT(xfs_log_grant_sleep2);
-DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake2);
+DEFINE_LOGGRANT_EVENT(xfs_log_grant_sleep);
+DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake);
 DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake_up);
 DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_enter);
 DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_exit);
 DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_error);
-DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_sleep1);
-DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake1);
-DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_sleep2);
-DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake2);
+DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_sleep);
+DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake);
 DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake_up);
 DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_enter);
 DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_exit);
index f4c38d8c6674a3dd71ea08a47dc68e89e81e0d92..2292d1af9d705f129ae523ce00a6b7794fb1648c 100644 (file)
@@ -685,9 +685,15 @@ __SYSCALL(__NR_syncfs, sys_syncfs)
 __SYSCALL(__NR_setns, sys_setns)
 #define __NR_sendmmsg 269
 __SC_COMP(__NR_sendmmsg, sys_sendmmsg, compat_sys_sendmmsg)
+#define __NR_process_vm_readv 270
+__SC_COMP(__NR_process_vm_readv, sys_process_vm_readv, \
+          compat_sys_process_vm_readv)
+#define __NR_process_vm_writev 271
+__SC_COMP(__NR_process_vm_writev, sys_process_vm_writev, \
+          compat_sys_process_vm_writev)
 
 #undef __NR_syscalls
-#define __NR_syscalls 270
+#define __NR_syscalls 272
 
 /*
  * All syscalls below here should go away really,
index f81676f1b3105636bde40ebd0c99d5e7265f7600..4e4fbb820e204156729017b1488b77fe679ee30a 100644 (file)
        {0x1002, 0x6770, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6778, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6779, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6840, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6841, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6842, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6843, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6849, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6858, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6859, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6880, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6888, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6889, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
index 1d161cb3aca5c098b76dcc1fdd47bdd8de579814..12050434d57a248909d695a80445e958dea81e32 100644 (file)
 /**
  * User-desired buffer creation information structure.
  *
- * @size: requested size for the object.
+ * @size: user-desired memory allocation size.
  *     - this size value would be page-aligned internally.
  * @flags: user request for setting memory type or cache attributes.
- * @handle: returned handle for the object.
- * @pad: just padding to be 64-bit aligned.
+ * @handle: returned a handle to created gem object.
+ *     - this handle will be set by gem module of kernel side.
  */
 struct drm_exynos_gem_create {
-       unsigned int size;
+       uint64_t size;
        unsigned int flags;
        unsigned int handle;
-       unsigned int pad;
 };
 
 /**
index 139c4db55f1736eebc35cc4601ee98d0d10a40c7..c86c940d1de3a58b73fd4027b80524daca997478 100644 (file)
@@ -156,6 +156,7 @@ extern u64 timecounter_cyc2time(struct timecounter *tc,
  * @mult:              cycle to nanosecond multiplier
  * @shift:             cycle to nanosecond divisor (power of two)
  * @max_idle_ns:       max idle time permitted by the clocksource (nsecs)
+ * @maxadj             maximum adjustment value to mult (~11%)
  * @flags:             flags describing special properties
  * @archdata:          arch-specific data
  * @suspend:           suspend function for the clocksource, if necessary
@@ -172,7 +173,7 @@ struct clocksource {
        u32 mult;
        u32 shift;
        u64 max_idle_ns;
-
+       u32 maxadj;
 #ifdef CONFIG_ARCH_CLOCKSOURCE_DATA
        struct arch_clocksource_data archdata;
 #endif
index 154bf56830156876d56c1ced7d4c1e6c96973805..66ed067fb7291e89f1548718e581d1ed348c4120 100644 (file)
@@ -552,5 +552,14 @@ extern ssize_t compat_rw_copy_check_uvector(int type,
 
 extern void __user *compat_alloc_user_space(unsigned long len);
 
+asmlinkage ssize_t compat_sys_process_vm_readv(compat_pid_t pid,
+               const struct compat_iovec __user *lvec,
+               unsigned long liovcnt, const struct compat_iovec __user *rvec,
+               unsigned long riovcnt, unsigned long flags);
+asmlinkage ssize_t compat_sys_process_vm_writev(compat_pid_t pid,
+               const struct compat_iovec __user *lvec,
+               unsigned long liovcnt, const struct compat_iovec __user *rvec,
+               unsigned long riovcnt, unsigned long flags);
+
 #endif /* CONFIG_COMPAT */
 #endif /* _LINUX_COMPAT_H */
index 4df926199369622bffed05e3e60eeff8c42e4532..ed9f74f6c519a1f071348d691d69c7ed5795e938 100644 (file)
@@ -339,7 +339,8 @@ extern int d_validate(struct dentry *, struct dentry *);
  */
 extern char *dynamic_dname(struct dentry *, char *, int, const char *, ...);
 
-extern char *__d_path(const struct path *path, struct path *root, char *, int);
+extern char *__d_path(const struct path *, const struct path *, char *, int);
+extern char *d_absolute_path(const struct path *, char *, int);
 extern char *d_path(const struct path *, char *, int);
 extern char *d_path_with_unreachable(const struct path *, char *, int);
 extern char *dentry_path_raw(struct dentry *, char *, int);
index e3130220ce3e3ddf7837c64b5fdadb0342f9f690..e0bc4ffb8e7f0ec42a916219ab02f43a112609d1 100644 (file)
@@ -393,8 +393,8 @@ struct inodes_stat_t {
 #include <linux/semaphore.h>
 #include <linux/fiemap.h>
 #include <linux/rculist_bl.h>
-#include <linux/shrinker.h>
 #include <linux/atomic.h>
+#include <linux/shrinker.h>
 
 #include <asm/byteorder.h>
 
@@ -1942,6 +1942,7 @@ extern int fd_statfs(int, struct kstatfs *);
 extern int statfs_by_dentry(struct dentry *, struct kstatfs *);
 extern int freeze_super(struct super_block *super);
 extern int thaw_super(struct super_block *super);
+extern bool our_mnt(struct vfsmount *mnt);
 
 extern int current_umask(void);
 
index 96efa6794ea5293a59f40638d344005b2497f0bc..c3da42dd22baf17cbf853e6defaed7796bfba950 100644 (file)
@@ -172,6 +172,7 @@ enum {
        TRACE_EVENT_FL_FILTERED_BIT,
        TRACE_EVENT_FL_RECORDED_CMD_BIT,
        TRACE_EVENT_FL_CAP_ANY_BIT,
+       TRACE_EVENT_FL_NO_SET_FILTER_BIT,
 };
 
 enum {
@@ -179,6 +180,7 @@ enum {
        TRACE_EVENT_FL_FILTERED         = (1 << TRACE_EVENT_FL_FILTERED_BIT),
        TRACE_EVENT_FL_RECORDED_CMD     = (1 << TRACE_EVENT_FL_RECORDED_CMD_BIT),
        TRACE_EVENT_FL_CAP_ANY          = (1 << TRACE_EVENT_FL_CAP_ANY_BIT),
+       TRACE_EVENT_FL_NO_SET_FILTER    = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT),
 };
 
 struct ftrace_event_call {
index 94b1e356c02ab4fa808b5dd43d1ed9f57afbe50f..32574eef93941bab73a9b43138cd8a67511101ec 100644 (file)
@@ -126,6 +126,8 @@ extern struct cred init_cred;
 # define INIT_PERF_EVENTS(tsk)
 #endif
 
+#define INIT_TASK_COMM "swapper"
+
 /*
  *  INIT_TASK is used to set up the first task table, touch at
  * your own risk!. Base=0, limit=0x1fffff (=2MB)
@@ -162,7 +164,7 @@ extern struct cred init_cred;
        .group_leader   = &tsk,                                         \
        RCU_INIT_POINTER(.real_cred, &init_cred),                       \
        RCU_INIT_POINTER(.cred, &init_cred),                            \
-       .comm           = "swapper",                                    \
+       .comm           = INIT_TASK_COMM,                               \
        .thread         = INIT_THREAD,                                  \
        .fs             = &init_fs,                                     \
        .files          = &init_files,                                  \
index 3dc3a8c2c4858a1d3400aa2d5fd029d36a1177c6..4baadd18f4ad3402f47fbd2ac919bafba519bed4 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/mmzone.h>
 #include <linux/rbtree.h>
 #include <linux/prio_tree.h>
+#include <linux/atomic.h>
 #include <linux/debug_locks.h>
 #include <linux/mm_types.h>
 #include <linux/range.h>
index cbeb5867cff79d7d70952cc6285063c3feb92e7c..a82ad4dd306a657565f5c9987f4b888468325ba2 100644 (file)
@@ -2536,6 +2536,8 @@ extern void               net_disable_timestamp(void);
 extern void *dev_seq_start(struct seq_file *seq, loff_t *pos);
 extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos);
 extern void dev_seq_stop(struct seq_file *seq, void *v);
+extern int dev_seq_open_ops(struct inode *inode, struct file *file,
+                           const struct seq_operations *ops);
 #endif
 
 extern int netdev_class_create_file(struct class_attribute *class_attr);
index 172ba70306d1e77a4591a0474ad58c1df9460796..2aaee0ca9da847ec447abc59df56c916f18bc6ac 100644 (file)
 #define PCI_DEVICE_ID_AMD_11H_NB_DRAM  0x1302
 #define PCI_DEVICE_ID_AMD_11H_NB_MISC  0x1303
 #define PCI_DEVICE_ID_AMD_11H_NB_LINK  0x1304
+#define PCI_DEVICE_ID_AMD_15H_NB_F0    0x1600
+#define PCI_DEVICE_ID_AMD_15H_NB_F1    0x1601
+#define PCI_DEVICE_ID_AMD_15H_NB_F2    0x1602
 #define PCI_DEVICE_ID_AMD_15H_NB_F3    0x1603
 #define PCI_DEVICE_ID_AMD_15H_NB_F4    0x1604
+#define PCI_DEVICE_ID_AMD_15H_NB_F5    0x1605
 #define PCI_DEVICE_ID_AMD_CNB17H_F3    0x1703
 #define PCI_DEVICE_ID_AMD_LANCE                0x2000
 #define PCI_DEVICE_ID_AMD_LANCE_HOME   0x2001
index 1e9ebe5e0091e7fa1b1fabb72f72af979f35104c..b1f89122bf6a820102f43714fd42246d1dadd207 100644 (file)
@@ -822,6 +822,7 @@ struct perf_event {
        int                             mmap_locked;
        struct user_struct              *mmap_user;
        struct ring_buffer              *rb;
+       struct list_head                rb_entry;
 
        /* poll related */
        wait_queue_head_t               waitq;
index c5336705921fdae6a3feb8ea99dfa252ae012c52..7281d5acf2f971a2fbae880b7935ac37cdea01df 100644 (file)
@@ -30,7 +30,7 @@
  */
 
 struct tc_stats {
-       __u64   bytes;                  /* NUmber of enqueues bytes */
+       __u64   bytes;                  /* Number of enqueued bytes */
        __u32   packets;                /* Number of enqueued packets   */
        __u32   drops;                  /* Packets dropped because of lack of resources */
        __u32   overlimits;             /* Number of throttle events when this
@@ -297,7 +297,7 @@ struct tc_htb_glob {
        __u32 debug;            /* debug flags */
 
        /* stats */
-       __u32 direct_pkts; /* count of non shapped packets */
+       __u32 direct_pkts; /* count of non shaped packets */
 };
 enum {
        TCA_HTB_UNSPEC,
@@ -503,7 +503,7 @@ enum {
 };
 #define NETEM_LOSS_MAX (__NETEM_LOSS_MAX - 1)
 
-/* State transition probablities for 4 state model */
+/* State transition probabilities for 4 state model */
 struct tc_netem_gimodel {
        __u32   p13;
        __u32   p31;
index 5c4c8b18c8b7809832d1be52808256f5a803a426..3f3ed83a9aa52cf69df6fd7a925d521cbca96f6d 100644 (file)
@@ -54,118 +54,145 @@ typedef struct pm_message {
 /**
  * struct dev_pm_ops - device PM callbacks
  *
- * Several driver power state transitions are externally visible, affecting
+ * Several device power state transitions are externally visible, affecting
  * the state of pending I/O queues and (for drivers that touch hardware)
  * interrupts, wakeups, DMA, and other hardware state.  There may also be
- * internal transitions to various low power modes, which are transparent
+ * internal transitions to various low-power modes which are transparent
  * to the rest of the driver stack (such as a driver that's ON gating off
  * clocks which are not in active use).
  *
- * The externally visible transitions are handled with the help of the following
- * callbacks included in this structure:
- *
- * @prepare: Prepare the device for the upcoming transition, but do NOT change
- *     its hardware state.  Prevent new children of the device from being
- *     registered after @prepare() returns (the driver's subsystem and
- *     generally the rest of the kernel is supposed to prevent new calls to the
- *     probe method from being made too once @prepare() has succeeded).  If
- *     @prepare() detects a situation it cannot handle (e.g. registration of a
- *     child already in progress), it may return -EAGAIN, so that the PM core
- *     can execute it once again (e.g. after the new child has been registered)
- *     to recover from the race condition.  This method is executed for all
- *     kinds of suspend transitions and is followed by one of the suspend
- *     callbacks: @suspend(), @freeze(), or @poweroff().
- *     The PM core executes @prepare() for all devices before starting to
- *     execute suspend callbacks for any of them, so drivers may assume all of
- *     the other devices to be present and functional while @prepare() is being
- *     executed.  In particular, it is safe to make GFP_KERNEL memory
- *     allocations from within @prepare().  However, drivers may NOT assume
- *     anything about the availability of the user space at that time and it
- *     is not correct to request firmware from within @prepare() (it's too
- *     late to do that).  [To work around this limitation, drivers may
- *     register suspend and hibernation notifiers that are executed before the
- *     freezing of tasks.]
+ * The externally visible transitions are handled with the help of callbacks
+ * included in this structure in such a way that two levels of callbacks are
+ * involved.  First, the PM core executes callbacks provided by PM domains,
+ * device types, classes and bus types.  They are the subsystem-level callbacks
+ * supposed to execute callbacks provided by device drivers, although they may
+ * choose not to do that.  If the driver callbacks are executed, they have to
+ * collaborate with the subsystem-level callbacks to achieve the goals
+ * appropriate for the given system transition, given transition phase and the
+ * subsystem the device belongs to.
+ *
+ * @prepare: The principal role of this callback is to prevent new children of
+ *     the device from being registered after it has returned (the driver's
+ *     subsystem and generally the rest of the kernel is supposed to prevent
+ *     new calls to the probe method from being made too once @prepare() has
+ *     succeeded).  If @prepare() detects a situation it cannot handle (e.g.
+ *     registration of a child already in progress), it may return -EAGAIN, so
+ *     that the PM core can execute it once again (e.g. after a new child has
+ *     been registered) to recover from the race condition.
+ *     This method is executed for all kinds of suspend transitions and is
+ *     followed by one of the suspend callbacks: @suspend(), @freeze(), or
+ *     @poweroff().  The PM core executes subsystem-level @prepare() for all
+ *     devices before starting to invoke suspend callbacks for any of them, so
+ *     generally devices may be assumed to be functional or to respond to
+ *     runtime resume requests while @prepare() is being executed.  However,
+ *     device drivers may NOT assume anything about the availability of user
+ *     space at that time and it is NOT valid to request firmware from within
+ *     @prepare() (it's too late to do that).  It also is NOT valid to allocate
+ *     substantial amounts of memory from @prepare() in the GFP_KERNEL mode.
+ *     [To work around these limitations, drivers may register suspend and
+ *     hibernation notifiers to be executed before the freezing of tasks.]
  *
  * @complete: Undo the changes made by @prepare().  This method is executed for
  *     all kinds of resume transitions, following one of the resume callbacks:
  *     @resume(), @thaw(), @restore().  Also called if the state transition
- *     fails before the driver's suspend callback (@suspend(), @freeze(),
- *     @poweroff()) can be executed (e.g. if the suspend callback fails for one
+ *     fails before the driver's suspend callback: @suspend(), @freeze() or
+ *     @poweroff(), can be executed (e.g. if the suspend callback fails for one
  *     of the other devices that the PM core has unsuccessfully attempted to
  *     suspend earlier).
- *     The PM core executes @complete() after it has executed the appropriate
- *     resume callback for all devices.
+ *     The PM core executes subsystem-level @complete() after it has executed
+ *     the appropriate resume callbacks for all devices.
  *
  * @suspend: Executed before putting the system into a sleep state in which the
- *     contents of main memory are preserved.  Quiesce the device, put it into
- *     a low power state appropriate for the upcoming system state (such as
- *     PCI_D3hot), and enable wakeup events as appropriate.
+ *     contents of main memory are preserved.  The exact action to perform
+ *     depends on the device's subsystem (PM domain, device type, class or bus
+ *     type), but generally the device must be quiescent after subsystem-level
+ *     @suspend() has returned, so that it doesn't do any I/O or DMA.
+ *     Subsystem-level @suspend() is executed for all devices after invoking
+ *     subsystem-level @prepare() for all of them.
  *
  * @resume: Executed after waking the system up from a sleep state in which the
- *     contents of main memory were preserved.  Put the device into the
- *     appropriate state, according to the information saved in memory by the
- *     preceding @suspend().  The driver starts working again, responding to
- *     hardware events and software requests.  The hardware may have gone
- *     through a power-off reset, or it may have maintained state from the
- *     previous suspend() which the driver may rely on while resuming.  On most
- *     platforms, there are no restrictions on availability of resources like
- *     clocks during @resume().
+ *     contents of main memory were preserved.  The exact action to perform
+ *     depends on the device's subsystem, but generally the driver is expected
+ *     to start working again, responding to hardware events and software
+ *     requests (the device itself may be left in a low-power state, waiting
+ *     for a runtime resume to occur).  The state of the device at the time its
+ *     driver's @resume() callback is run depends on the platform and subsystem
+ *     the device belongs to.  On most platforms, there are no restrictions on
+ *     availability of resources like clocks during @resume().
+ *     Subsystem-level @resume() is executed for all devices after invoking
+ *     subsystem-level @resume_noirq() for all of them.
  *
  * @freeze: Hibernation-specific, executed before creating a hibernation image.
- *     Quiesce operations so that a consistent image can be created, but do NOT
- *     otherwise put the device into a low power device state and do NOT emit
- *     system wakeup events.  Save in main memory the device settings to be
- *     used by @restore() during the subsequent resume from hibernation or by
- *     the subsequent @thaw(), if the creation of the image or the restoration
- *     of main memory contents from it fails.
+ *     Analogous to @suspend(), but it should not enable the device to signal
+ *     wakeup events or change its power state.  The majority of subsystems
+ *     (with the notable exception of the PCI bus type) expect the driver-level
+ *     @freeze() to save the device settings in memory to be used by @restore()
+ *     during the subsequent resume from hibernation.
+ *     Subsystem-level @freeze() is executed for all devices after invoking
+ *     subsystem-level @prepare() for all of them.
  *
  * @thaw: Hibernation-specific, executed after creating a hibernation image OR
- *     if the creation of the image fails.  Also executed after a failing
+ *     if the creation of an image has failed.  Also executed after a failing
  *     attempt to restore the contents of main memory from such an image.
  *     Undo the changes made by the preceding @freeze(), so the device can be
  *     operated in the same way as immediately before the call to @freeze().
+ *     Subsystem-level @thaw() is executed for all devices after invoking
+ *     subsystem-level @thaw_noirq() for all of them.  It also may be executed
+ *     directly after @freeze() in case of a transition error.
  *
  * @poweroff: Hibernation-specific, executed after saving a hibernation image.
- *     Quiesce the device, put it into a low power state appropriate for the
- *     upcoming system state (such as PCI_D3hot), and enable wakeup events as
- *     appropriate.
+ *     Analogous to @suspend(), but it need not save the device's settings in
+ *     memory.
+ *     Subsystem-level @poweroff() is executed for all devices after invoking
+ *     subsystem-level @prepare() for all of them.
  *
  * @restore: Hibernation-specific, executed after restoring the contents of main
- *     memory from a hibernation image.  Driver starts working again,
- *     responding to hardware events and software requests.  Drivers may NOT
- *     make ANY assumptions about the hardware state right prior to @restore().
- *     On most platforms, there are no restrictions on availability of
- *     resources like clocks during @restore().
- *
- * @suspend_noirq: Complete the operations of ->suspend() by carrying out any
- *     actions required for suspending the device that need interrupts to be
- *     disabled
- *
- * @resume_noirq: Prepare for the execution of ->resume() by carrying out any
- *     actions required for resuming the device that need interrupts to be
- *     disabled
- *
- * @freeze_noirq: Complete the operations of ->freeze() by carrying out any
- *     actions required for freezing the device that need interrupts to be
- *     disabled
- *
- * @thaw_noirq: Prepare for the execution of ->thaw() by carrying out any
- *     actions required for thawing the device that need interrupts to be
- *     disabled
- *
- * @poweroff_noirq: Complete the operations of ->poweroff() by carrying out any
- *     actions required for handling the device that need interrupts to be
- *     disabled
- *
- * @restore_noirq: Prepare for the execution of ->restore() by carrying out any
- *     actions required for restoring the operations of the device that need
- *     interrupts to be disabled
+ *     memory from a hibernation image, analogous to @resume().
+ *
+ * @suspend_noirq: Complete the actions started by @suspend().  Carry out any
+ *     additional operations required for suspending the device that might be
+ *     racing with its driver's interrupt handler, which is guaranteed not to
+ *     run while @suspend_noirq() is being executed.
+ *     It generally is expected that the device will be in a low-power state
+ *     (appropriate for the target system sleep state) after subsystem-level
+ *     @suspend_noirq() has returned successfully.  If the device can generate
+ *     system wakeup signals and is enabled to wake up the system, it should be
+ *     configured to do so at that time.  However, depending on the platform
+ *     and device's subsystem, @suspend() may be allowed to put the device into
+ *     the low-power state and configure it to generate wakeup signals, in
+ *     which case it generally is not necessary to define @suspend_noirq().
+ *
+ * @resume_noirq: Prepare for the execution of @resume() by carrying out any
+ *     operations required for resuming the device that might be racing with
+ *     its driver's interrupt handler, which is guaranteed not to run while
+ *     @resume_noirq() is being executed.
+ *
+ * @freeze_noirq: Complete the actions started by @freeze().  Carry out any
+ *     additional operations required for freezing the device that might be
+ *     racing with its driver's interrupt handler, which is guaranteed not to
+ *     run while @freeze_noirq() is being executed.
+ *     The power state of the device should not be changed by either @freeze()
+ *     or @freeze_noirq() and it should not be configured to signal system
+ *     wakeup by any of these callbacks.
+ *
+ * @thaw_noirq: Prepare for the execution of @thaw() by carrying out any
+ *     operations required for thawing the device that might be racing with its
+ *     driver's interrupt handler, which is guaranteed not to run while
+ *     @thaw_noirq() is being executed.
+ *
+ * @poweroff_noirq: Complete the actions started by @poweroff().  Analogous to
+ *     @suspend_noirq(), but it need not save the device's settings in memory.
+ *
+ * @restore_noirq: Prepare for the execution of @restore() by carrying out any
+ *     operations required for thawing the device that might be racing with its
+ *     driver's interrupt handler, which is guaranteed not to run while
+ *     @restore_noirq() is being executed.  Analogous to @resume_noirq().
  *
  * All of the above callbacks, except for @complete(), return error codes.
  * However, the error codes returned by the resume operations, @resume(),
- * @thaw(), @restore(), @resume_noirq(), @thaw_noirq(), and @restore_noirq() do
+ * @thaw(), @restore(), @resume_noirq(), @thaw_noirq(), and @restore_noirq(), do
  * not cause the PM core to abort the resume transition during which they are
- * returned.  The error codes returned in that cases are only printed by the PM
+ * returned.  The error codes returned in those cases are only printed by the PM
  * core to the system logs for debugging purposes.  Still, it is recommended
  * that drivers only return error codes from their resume methods in case of an
  * unrecoverable failure (i.e. when the device being handled refuses to resume
@@ -174,31 +201,43 @@ typedef struct pm_message {
  * their children.
  *
  * It is allowed to unregister devices while the above callbacks are being
- * executed.  However, it is not allowed to unregister a device from within any
- * of its own callbacks.
+ * executed.  However, a callback routine must NOT try to unregister the device
+ * it was called for, although it may unregister children of that device (for
+ * example, if it detects that a child was unplugged while the system was
+ * asleep).
+ *
+ * Refer to Documentation/power/devices.txt for more information about the role
+ * of the above callbacks in the system suspend process.
  *
- * There also are the following callbacks related to run-time power management
- * of devices:
+ * There also are callbacks related to runtime power management of devices.
+ * Again, these callbacks are executed by the PM core only for subsystems
+ * (PM domains, device types, classes and bus types) and the subsystem-level
+ * callbacks are supposed to invoke the driver callbacks.  Moreover, the exact
+ * actions to be performed by a device driver's callbacks generally depend on
+ * the platform and subsystem the device belongs to.
  *
  * @runtime_suspend: Prepare the device for a condition in which it won't be
  *     able to communicate with the CPU(s) and RAM due to power management.
- *     This need not mean that the device should be put into a low power state.
+ *     This need not mean that the device should be put into a low-power state.
  *     For example, if the device is behind a link which is about to be turned
  *     off, the device may remain at full power.  If the device does go to low
- *     power and is capable of generating run-time wake-up events, remote
- *     wake-up (i.e., a hardware mechanism allowing the device to request a
- *     change of its power state via a wake-up event, such as PCI PME) should
- *     be enabled for it.
+ *     power and is capable of generating runtime wakeup events, remote wakeup
+ *     (i.e., a hardware mechanism allowing the device to request a change of
+ *     its power state via an interrupt) should be enabled for it.
  *
  * @runtime_resume: Put the device into the fully active state in response to a
- *     wake-up event generated by hardware or at the request of software.  If
- *     necessary, put the device into the full power state and restore its
+ *     wakeup event generated by hardware or at the request of software.  If
+ *     necessary, put the device into the full-power state and restore its
  *     registers, so that it is fully operational.
  *
- * @runtime_idle: Device appears to be inactive and it might be put into a low
- *     power state if all of the necessary conditions are satisfied.  Check
+ * @runtime_idle: Device appears to be inactive and it might be put into a
+ *     low-power state if all of the necessary conditions are satisfied.  Check
  *     these conditions and handle the device as appropriate, possibly queueing
  *     a suspend request for it.  The return value is ignored by the PM core.
+ *
+ * Refer to Documentation/power/runtime_pm.txt for more information about the
+ * role of the above callbacks in device runtime power management.
+ *
  */
 
 struct dev_pm_ops {
index ea567321ae3c3b2ac02967680f99de1687f99982..2ca8cde5459d3445b2897c02d58ac2d12dba5bba 100644 (file)
@@ -35,10 +35,12 @@ struct pstore_info {
        spinlock_t      buf_lock;       /* serialize access to 'buf' */
        char            *buf;
        size_t          bufsize;
+       struct mutex    read_mutex;     /* serialize open/read/close */
        int             (*open)(struct pstore_info *psi);
        int             (*close)(struct pstore_info *psi);
        ssize_t         (*read)(u64 *id, enum pstore_type_id *type,
-                       struct timespec *time, struct pstore_info *psi);
+                       struct timespec *time, char **buf,
+                       struct pstore_info *psi);
        int             (*write)(enum pstore_type_id type, u64 *id,
                        unsigned int part, size_t size, struct pstore_info *psi);
        int             (*erase)(enum pstore_type_id type, u64 id,
index a83833a1f7a26f589a7584252ff413333286f5ea..07ceb97d53facc505bae4b489d3cae96bd7d4021 100644 (file)
@@ -35,7 +35,7 @@ struct shrinker {
 
        /* These are for internal use */
        struct list_head list;
-       long nr;        /* objs pending delete */
+       atomic_long_t nr_in_batch; /* objs pending delete */
 };
 #define DEFAULT_SEEKS 2 /* A good number if you don't know better. */
 extern void register_shrinker(struct shrinker *);
index e2accb3164d8d969245e4567b0f308d4a35cb152..d0de882c0d96d5277f23306bf1d2884c40807292 100644 (file)
@@ -24,7 +24,7 @@ struct sigma_firmware {
 struct sigma_firmware_header {
        unsigned char magic[7];
        u8 version;
-       u32 crc;
+       __le32 crc;
 };
 
 enum {
@@ -40,19 +40,14 @@ enum {
 struct sigma_action {
        u8 instr;
        u8 len_hi;
-       u16 len;
-       u16 addr;
+       __le16 len;
+       __be16 addr;
        unsigned char payload[];
 };
 
 static inline u32 sigma_action_len(struct sigma_action *sa)
 {
-       return (sa->len_hi << 16) | sa->len;
-}
-
-static inline size_t sigma_action_size(struct sigma_action *sa, u32 payload_len)
-{
-       return sizeof(*sa) + payload_len + (payload_len % 2);
+       return (sa->len_hi << 16) | le16_to_cpu(sa->len);
 }
 
 extern int process_sigma_firmware(struct i2c_client *client, const char *name);
index 4fb6c43817918992f8334c49022d5184ea45c8e4..6faec1a6021629d73896fb425b49f7820720c151 100644 (file)
@@ -205,12 +205,7 @@ dst_feature(const struct dst_entry *dst, u32 feature)
 
 static inline u32 dst_mtu(const struct dst_entry *dst)
 {
-       u32 mtu = dst_metric_raw(dst, RTAX_MTU);
-
-       if (!mtu)
-               mtu = dst->ops->default_mtu(dst);
-
-       return mtu;
+       return dst->ops->mtu(dst);
 }
 
 /* RTT metrics are stored in milliseconds for user ABI, but used as jiffies */
index 9adb99845a5695b8159cc6c208bc6004ebf7b25c..e1c2ee0eef47506020f743230e923d840e64e7a9 100644 (file)
@@ -17,7 +17,7 @@ struct dst_ops {
        int                     (*gc)(struct dst_ops *ops);
        struct dst_entry *      (*check)(struct dst_entry *, __u32 cookie);
        unsigned int            (*default_advmss)(const struct dst_entry *);
-       unsigned int            (*default_mtu)(const struct dst_entry *);
+       unsigned int            (*mtu)(const struct dst_entry *);
        u32 *                   (*cow_metrics)(struct dst_entry *, unsigned long);
        void                    (*destroy)(struct dst_entry *);
        void                    (*ifdown)(struct dst_entry *,
index b897d6e6d0a5a35f6a19b5403bbd7b5cefa47a39..f941964a9931978002b7ab4a137408b0d69c09d9 100644 (file)
@@ -31,6 +31,7 @@
 /** struct ip_options - IP Options
  *
  * @faddr - Saved first hop address
+ * @nexthop - Saved nexthop address in LSRR and SSRR
  * @is_data - Options in __data, rather than skb
  * @is_strictroute - Strict source route
  * @srr_is_hit - Packet destination addr was our one
@@ -41,6 +42,7 @@
  */
 struct ip_options {
        __be32          faddr;
+       __be32          nexthop;
        unsigned char   optlen;
        unsigned char   srr;
        unsigned char   rr;
index 78c83e62218fbfff60506e4323f72343aed2b83a..e9ff3fc5e688615b6b4df99b0aef82af0621ab57 100644 (file)
@@ -35,6 +35,7 @@ struct inet_peer {
 
        u32                     metrics[RTAX_MAX];
        u32                     rate_tokens;    /* rate limiting for ICMP */
+       int                     redirect_genid;
        unsigned long           rate_last;
        unsigned long           pmtu_expires;
        u32                     pmtu_orig;
index 4283508b3e185882bff18fe267f205df4eda39cc..a88fb6939387f228ac5826949f68151e0fceaf16 100644 (file)
@@ -67,18 +67,18 @@ struct nf_ct_event_notifier {
        int (*fcn)(unsigned int events, struct nf_ct_event *item);
 };
 
-extern struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb;
-extern int nf_conntrack_register_notifier(struct nf_ct_event_notifier *nb);
-extern void nf_conntrack_unregister_notifier(struct nf_ct_event_notifier *nb);
+extern int nf_conntrack_register_notifier(struct net *net, struct nf_ct_event_notifier *nb);
+extern void nf_conntrack_unregister_notifier(struct net *net, struct nf_ct_event_notifier *nb);
 
 extern void nf_ct_deliver_cached_events(struct nf_conn *ct);
 
 static inline void
 nf_conntrack_event_cache(enum ip_conntrack_events event, struct nf_conn *ct)
 {
+       struct net *net = nf_ct_net(ct);
        struct nf_conntrack_ecache *e;
 
-       if (nf_conntrack_event_cb == NULL)
+       if (net->ct.nf_conntrack_event_cb == NULL)
                return;
 
        e = nf_ct_ecache_find(ct);
@@ -95,11 +95,12 @@ nf_conntrack_eventmask_report(unsigned int eventmask,
                              int report)
 {
        int ret = 0;
+       struct net *net = nf_ct_net(ct);
        struct nf_ct_event_notifier *notify;
        struct nf_conntrack_ecache *e;
 
        rcu_read_lock();
-       notify = rcu_dereference(nf_conntrack_event_cb);
+       notify = rcu_dereference(net->ct.nf_conntrack_event_cb);
        if (notify == NULL)
                goto out_unlock;
 
@@ -164,9 +165,8 @@ struct nf_exp_event_notifier {
        int (*fcn)(unsigned int events, struct nf_exp_event *item);
 };
 
-extern struct nf_exp_event_notifier __rcu *nf_expect_event_cb;
-extern int nf_ct_expect_register_notifier(struct nf_exp_event_notifier *nb);
-extern void nf_ct_expect_unregister_notifier(struct nf_exp_event_notifier *nb);
+extern int nf_ct_expect_register_notifier(struct net *net, struct nf_exp_event_notifier *nb);
+extern void nf_ct_expect_unregister_notifier(struct net *net, struct nf_exp_event_notifier *nb);
 
 static inline void
 nf_ct_expect_event_report(enum ip_conntrack_expect_events event,
@@ -174,11 +174,12 @@ nf_ct_expect_event_report(enum ip_conntrack_expect_events event,
                          u32 pid,
                          int report)
 {
+       struct net *net = nf_ct_exp_net(exp);
        struct nf_exp_event_notifier *notify;
        struct nf_conntrack_ecache *e;
 
        rcu_read_lock();
-       notify = rcu_dereference(nf_expect_event_cb);
+       notify = rcu_dereference(net->ct.nf_expect_event_cb);
        if (notify == NULL)
                goto out_unlock;
 
index 0249399e51a773608814e6cabaeb36915d7c6533..7a911eca0f18b4a751b3410b75223080d5da4dbe 100644 (file)
@@ -18,6 +18,8 @@ struct netns_ct {
        struct hlist_nulls_head unconfirmed;
        struct hlist_nulls_head dying;
        struct ip_conntrack_stat __percpu *stat;
+       struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb;
+       struct nf_exp_event_notifier __rcu *nf_expect_event_cb;
        int                     sysctl_events;
        unsigned int            sysctl_events_retry_timeout;
        int                     sysctl_acct;
index 3319f16b3beb899727c7a434e75fb1010d7ee139..b72a3b83393604b29845514942a5cb8ad8729970 100644 (file)
@@ -116,7 +116,7 @@ struct red_parms {
        u32             qR;             /* Cached random number */
 
        unsigned long   qavg;           /* Average queue length: A scaled */
-       psched_time_t   qidlestart;     /* Start of current idle period */
+       ktime_t         qidlestart;     /* Start of current idle period */
 };
 
 static inline u32 red_rmask(u8 Plog)
@@ -148,17 +148,17 @@ static inline void red_set_parms(struct red_parms *p,
 
 static inline int red_is_idling(struct red_parms *p)
 {
-       return p->qidlestart != PSCHED_PASTPERFECT;
+       return p->qidlestart.tv64 != 0;
 }
 
 static inline void red_start_of_idle_period(struct red_parms *p)
 {
-       p->qidlestart = psched_get_time();
+       p->qidlestart = ktime_get();
 }
 
 static inline void red_end_of_idle_period(struct red_parms *p)
 {
-       p->qidlestart = PSCHED_PASTPERFECT;
+       p->qidlestart.tv64 = 0;
 }
 
 static inline void red_restart(struct red_parms *p)
@@ -170,13 +170,10 @@ static inline void red_restart(struct red_parms *p)
 
 static inline unsigned long red_calc_qavg_from_idle_time(struct red_parms *p)
 {
-       psched_time_t now;
-       long us_idle;
+       s64 delta = ktime_us_delta(ktime_get(), p->qidlestart);
+       long us_idle = min_t(s64, delta, p->Scell_max);
        int  shift;
 
-       now = psched_get_time();
-       us_idle = psched_tdiff_bounded(now, p->qidlestart, p->Scell_max);
-
        /*
         * The problem: ideally, average length queue recalcultion should
         * be done over constant clock intervals. This is too expensive, so
index db7b3432f07c41ce124c9d2a792035cead8b048d..91855d185b537f96fc0ea09134c96a93b63aa3d3 100644 (file)
@@ -71,12 +71,12 @@ struct rtable {
        struct fib_info         *fi; /* for client ref to shared metrics */
 };
 
-static inline bool rt_is_input_route(struct rtable *rt)
+static inline bool rt_is_input_route(const struct rtable *rt)
 {
        return rt->rt_route_iif != 0;
 }
 
-static inline bool rt_is_output_route(struct rtable *rt)
+static inline bool rt_is_output_route(const struct rtable *rt)
 {
        return rt->rt_route_iif == 0;
 }
index 7f5fed3c89e1808f6f2535ea06bb20cdd772b7f6..6873c7dd9145d2a23f682d9e8d2c695a7571d656 100644 (file)
@@ -103,9 +103,10 @@ enum se_cmd_flags_table {
        SCF_SCSI_NON_DATA_CDB           = 0x00000040,
        SCF_SCSI_CDB_EXCEPTION          = 0x00000080,
        SCF_SCSI_RESERVATION_CONFLICT   = 0x00000100,
-       SCF_SE_CMD_FAILED               = 0x00000400,
+       SCF_FUA                         = 0x00000200,
        SCF_SE_LUN_CMD                  = 0x00000800,
        SCF_SE_ALLOW_EOO                = 0x00001000,
+       SCF_BIDI                        = 0x00002000,
        SCF_SENT_CHECK_CONDITION        = 0x00004000,
        SCF_OVERFLOW_BIT                = 0x00008000,
        SCF_UNDERFLOW_BIT               = 0x00010000,
@@ -154,6 +155,7 @@ enum tcm_sense_reason_table {
        TCM_CHECK_CONDITION_ABORT_CMD           = 0x0d,
        TCM_CHECK_CONDITION_UNIT_ATTENTION      = 0x0e,
        TCM_CHECK_CONDITION_NOT_READY           = 0x0f,
+       TCM_RESERVATION_CONFLICT                = 0x10,
 };
 
 struct se_obj {
@@ -211,7 +213,6 @@ struct t10_alua_lu_gp {
        u16     lu_gp_id;
        int     lu_gp_valid_id;
        u32     lu_gp_members;
-       atomic_t lu_gp_shutdown;
        atomic_t lu_gp_ref_cnt;
        spinlock_t lu_gp_lock;
        struct config_group lu_gp_group;
@@ -422,11 +423,9 @@ struct se_cmd {
        int                     sam_task_attr;
        /* Transport protocol dependent state, see transport_state_table */
        enum transport_state_table t_state;
-       /* Transport specific error status */
-       int                     transport_error_status;
        /* Used to signal cmd->se_tfo->check_release_cmd() usage per cmd */
-       int                     check_release:1;
-       int                     cmd_wait_set:1;
+       unsigned                check_release:1;
+       unsigned                cmd_wait_set:1;
        /* See se_cmd_flags_table */
        u32                     se_cmd_flags;
        u32                     se_ordered_id;
@@ -441,13 +440,10 @@ struct se_cmd {
        /* Used for sense data */
        void                    *sense_buffer;
        struct list_head        se_delayed_node;
-       struct list_head        se_ordered_node;
        struct list_head        se_lun_node;
        struct list_head        se_qf_node;
        struct se_device      *se_dev;
        struct se_dev_entry   *se_deve;
-       struct se_device        *se_obj_ptr;
-       struct se_device        *se_orig_obj_ptr;
        struct se_lun           *se_lun;
        /* Only used for internal passthrough and legacy TCM fabric modules */
        struct se_session       *se_sess;
@@ -463,8 +459,6 @@ struct se_cmd {
        unsigned char           __t_task_cdb[TCM_MAX_COMMAND_SIZE];
        unsigned long long      t_task_lba;
        int                     t_tasks_failed;
-       int                     t_tasks_fua;
-       bool                    t_tasks_bidi;
        u32                     t_tasks_sg_chained_no;
        atomic_t                t_fe_count;
        atomic_t                t_se_count;
@@ -489,14 +483,6 @@ struct se_cmd {
 
        struct work_struct      work;
 
-       /*
-        * Used for pre-registered fabric SGL passthrough WRITE and READ
-        * with the special SCF_PASSTHROUGH_CONTIG_TO_SG case for TCM_Loop
-        * and other HW target mode fabric modules.
-        */
-       struct scatterlist      *t_task_pt_sgl;
-       u32                     t_task_pt_sgl_num;
-
        struct scatterlist      *t_data_sg;
        unsigned int            t_data_nents;
        struct scatterlist      *t_bidi_data_sg;
@@ -562,7 +548,7 @@ struct se_node_acl {
 } ____cacheline_aligned;
 
 struct se_session {
-       int                     sess_tearing_down:1;
+       unsigned                sess_tearing_down:1;
        u64                     sess_bin_isid;
        struct se_node_acl      *se_node_acl;
        struct se_portal_group *se_tpg;
@@ -683,7 +669,6 @@ struct se_subsystem_dev {
        struct t10_reservation t10_pr;
        spinlock_t      se_dev_lock;
        void            *se_dev_su_ptr;
-       struct list_head se_dev_node;
        struct config_group se_dev_group;
        /* For T10 Reservations */
        struct config_group se_dev_pr_group;
@@ -692,9 +677,6 @@ struct se_subsystem_dev {
 } ____cacheline_aligned;
 
 struct se_device {
-       /* Set to 1 if thread is NOT sleeping on thread_sem */
-       u8                      thread_active;
-       u8                      dev_status_timer_flags;
        /* RELATIVE TARGET PORT IDENTIFER Counter */
        u16                     dev_rpti_counter;
        /* Used for SAM Task Attribute ordering */
@@ -719,14 +701,10 @@ struct se_device {
        u64                     write_bytes;
        spinlock_t              stats_lock;
        /* Active commands on this virtual SE device */
-       atomic_t                active_cmds;
        atomic_t                simple_cmds;
        atomic_t                depth_left;
        atomic_t                dev_ordered_id;
-       atomic_t                dev_tur_active;
        atomic_t                execute_tasks;
-       atomic_t                dev_status_thr_count;
-       atomic_t                dev_hoq_count;
        atomic_t                dev_ordered_sync;
        atomic_t                dev_qf_count;
        struct se_obj           dev_obj;
@@ -734,14 +712,9 @@ struct se_device {
        struct se_obj           dev_export_obj;
        struct se_queue_obj     dev_queue_obj;
        spinlock_t              delayed_cmd_lock;
-       spinlock_t              ordered_cmd_lock;
        spinlock_t              execute_task_lock;
-       spinlock_t              state_task_lock;
-       spinlock_t              dev_alua_lock;
        spinlock_t              dev_reservation_lock;
-       spinlock_t              dev_state_lock;
        spinlock_t              dev_status_lock;
-       spinlock_t              dev_status_thr_lock;
        spinlock_t              se_port_lock;
        spinlock_t              se_tmr_lock;
        spinlock_t              qf_cmd_lock;
@@ -753,14 +726,10 @@ struct se_device {
        struct t10_pr_registration *dev_pr_res_holder;
        struct list_head        dev_sep_list;
        struct list_head        dev_tmr_list;
-       struct timer_list       dev_status_timer;
        /* Pointer to descriptor for processing thread */
        struct task_struct      *process_thread;
-       pid_t                   process_thread_pid;
-       struct task_struct              *dev_mgmt_thread;
        struct work_struct      qf_work_queue;
        struct list_head        delayed_cmd_list;
-       struct list_head        ordered_cmd_list;
        struct list_head        execute_task_list;
        struct list_head        state_task_list;
        struct list_head        qf_cmd_list;
@@ -771,8 +740,6 @@ struct se_device {
        struct se_subsystem_api *transport;
        /* Linked list for struct se_hba struct se_device list */
        struct list_head        dev_list;
-       /* Linked list for struct se_global->g_se_dev_list */
-       struct list_head        g_se_dev_list;
 }  ____cacheline_aligned;
 
 struct se_hba {
@@ -834,7 +801,6 @@ struct se_port {
        u32             sep_index;
        struct scsi_port_stats sep_stats;
        /* Used for ALUA Target Port Groups membership */
-       atomic_t        sep_tg_pt_gp_active;
        atomic_t        sep_tg_pt_secondary_offline;
        /* Used for PR ALL_TG_PT=1 */
        atomic_t        sep_tg_pt_ref_cnt;
index c16e9431dd01bb40d748fc315d04d103d77a351d..dac4f2d859fd72734fbc54ffda839216fe554079 100644 (file)
 
 #define PYX_TRANSPORT_STATUS_INTERVAL          5 /* In seconds */
 
-#define PYX_TRANSPORT_SENT_TO_TRANSPORT                0
-#define PYX_TRANSPORT_WRITE_PENDING            1
-
-#define PYX_TRANSPORT_UNKNOWN_SAM_OPCODE       -1
-#define PYX_TRANSPORT_HBA_QUEUE_FULL           -2
-#define PYX_TRANSPORT_REQ_TOO_MANY_SECTORS     -3
-#define PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES  -4
-#define PYX_TRANSPORT_INVALID_CDB_FIELD                -5
-#define PYX_TRANSPORT_INVALID_PARAMETER_LIST   -6
-#define PYX_TRANSPORT_LU_COMM_FAILURE          -7
-#define PYX_TRANSPORT_UNKNOWN_MODE_PAGE                -8
-#define PYX_TRANSPORT_WRITE_PROTECTED          -9
-#define PYX_TRANSPORT_RESERVATION_CONFLICT     -10
-#define PYX_TRANSPORT_ILLEGAL_REQUEST          -11
-#define PYX_TRANSPORT_USE_SENSE_REASON         -12
-
-#ifndef SAM_STAT_RESERVATION_CONFLICT
-#define SAM_STAT_RESERVATION_CONFLICT          0x18
-#endif
-
-#define TRANSPORT_PLUGIN_FREE                  0
-#define TRANSPORT_PLUGIN_REGISTERED            1
-
 #define TRANSPORT_PLUGIN_PHBA_PDEV             1
 #define TRANSPORT_PLUGIN_VHBA_PDEV             2
 #define TRANSPORT_PLUGIN_VHBA_VDEV             3
@@ -158,7 +135,6 @@ extern int transport_generic_allocate_tasks(struct se_cmd *, unsigned char *);
 extern int transport_handle_cdb_direct(struct se_cmd *);
 extern int transport_generic_handle_cdb_map(struct se_cmd *);
 extern int transport_generic_handle_data(struct se_cmd *);
-extern void transport_new_cmd_failure(struct se_cmd *);
 extern int transport_generic_handle_tmr(struct se_cmd *);
 extern bool target_stop_task(struct se_task *task, unsigned long *flags);
 extern int transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *, u32,
index b66ebb2032c6d6b87c928876cab69c235d2425f3..378c7ed6760be0563b501971df2a60e03a672ea7 100644 (file)
@@ -307,15 +307,8 @@ struct omap_dss_board_info {
        void (*dsi_disable_pads)(int dsi_id, unsigned lane_mask);
 };
 
-#if defined(CONFIG_OMAP2_DSS_MODULE) || defined(CONFIG_OMAP2_DSS)
 /* Init with the board info */
 extern int omap_display_init(struct omap_dss_board_info *board_data);
-#else
-static inline int omap_display_init(struct omap_dss_board_info *board_data)
-{
-       return 0;
-}
-#endif
 
 struct omap_display_platform_data {
        struct omap_dss_board_info *board_data;
index 5e828a2ca8e64641749da837fe093cefcfc958dc..213c0351dad8fc8315d6d79a5bf3706a59c823bd 100644 (file)
@@ -153,6 +153,13 @@ static void freezer_destroy(struct cgroup_subsys *ss,
        kfree(cgroup_freezer(cgroup));
 }
 
+/* task is frozen or will freeze immediately when next it gets woken */
+static bool is_task_frozen_enough(struct task_struct *task)
+{
+       return frozen(task) ||
+               (task_is_stopped_or_traced(task) && freezing(task));
+}
+
 /*
  * The call to cgroup_lock() in the freezer.state write method prevents
  * a write to that file racing against an attach, and hence the
@@ -231,7 +238,7 @@ static void update_if_frozen(struct cgroup *cgroup,
        cgroup_iter_start(cgroup, &it);
        while ((task = cgroup_iter_next(cgroup, &it))) {
                ntotal++;
-               if (frozen(task))
+               if (is_task_frozen_enough(task))
                        nfrozen++;
        }
 
@@ -284,7 +291,7 @@ static int try_to_freeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
        while ((task = cgroup_iter_next(cgroup, &it))) {
                if (!freeze_task(task, true))
                        continue;
-               if (frozen(task))
+               if (is_task_frozen_enough(task))
                        continue;
                if (!freezing(task) && !freezer_should_skip(task))
                        num_cant_freeze_now++;
index 0e8457da6f9551c3eae667a6ac09a735a341b4bf..d3b9df5962c25bdbd3ca324756474366ff8c6d68 100644 (file)
@@ -185,6 +185,9 @@ static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
 static void update_context_time(struct perf_event_context *ctx);
 static u64 perf_event_time(struct perf_event *event);
 
+static void ring_buffer_attach(struct perf_event *event,
+                              struct ring_buffer *rb);
+
 void __weak perf_event_print_debug(void)       { }
 
 extern __weak const char *perf_pmu_name(void)
@@ -2171,9 +2174,10 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx,
         */
        cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
 
-       perf_event_sched_in(cpuctx, ctx, task);
+       if (ctx->nr_events)
+               cpuctx->task_ctx = ctx;
 
-       cpuctx->task_ctx = ctx;
+       perf_event_sched_in(cpuctx, cpuctx->task_ctx, task);
 
        perf_pmu_enable(ctx->pmu);
        perf_ctx_unlock(cpuctx, ctx);
@@ -3190,12 +3194,33 @@ static unsigned int perf_poll(struct file *file, poll_table *wait)
        struct ring_buffer *rb;
        unsigned int events = POLL_HUP;
 
+       /*
+        * Race between perf_event_set_output() and perf_poll(): perf_poll()
+        * grabs the rb reference but perf_event_set_output() overrides it.
+        * Here is the timeline for two threads T1, T2:
+        * t0: T1, rb = rcu_dereference(event->rb)
+        * t1: T2, old_rb = event->rb
+        * t2: T2, event->rb = new rb
+        * t3: T2, ring_buffer_detach(old_rb)
+        * t4: T1, ring_buffer_attach(rb1)
+        * t5: T1, poll_wait(event->waitq)
+        *
+        * To avoid this problem, we grab mmap_mutex in perf_poll()
+        * thereby ensuring that the assignment of the new ring buffer
+        * and the detachment of the old buffer appear atomic to perf_poll()
+        */
+       mutex_lock(&event->mmap_mutex);
+
        rcu_read_lock();
        rb = rcu_dereference(event->rb);
-       if (rb)
+       if (rb) {
+               ring_buffer_attach(event, rb);
                events = atomic_xchg(&rb->poll, 0);
+       }
        rcu_read_unlock();
 
+       mutex_unlock(&event->mmap_mutex);
+
        poll_wait(file, &event->waitq, wait);
 
        return events;
@@ -3496,6 +3521,49 @@ unlock:
        return ret;
 }
 
+static void ring_buffer_attach(struct perf_event *event,
+                              struct ring_buffer *rb)
+{
+       unsigned long flags;
+
+       if (!list_empty(&event->rb_entry))
+               return;
+
+       spin_lock_irqsave(&rb->event_lock, flags);
+       if (!list_empty(&event->rb_entry))
+               goto unlock;
+
+       list_add(&event->rb_entry, &rb->event_list);
+unlock:
+       spin_unlock_irqrestore(&rb->event_lock, flags);
+}
+
+static void ring_buffer_detach(struct perf_event *event,
+                              struct ring_buffer *rb)
+{
+       unsigned long flags;
+
+       if (list_empty(&event->rb_entry))
+               return;
+
+       spin_lock_irqsave(&rb->event_lock, flags);
+       list_del_init(&event->rb_entry);
+       wake_up_all(&event->waitq);
+       spin_unlock_irqrestore(&rb->event_lock, flags);
+}
+
+static void ring_buffer_wakeup(struct perf_event *event)
+{
+       struct ring_buffer *rb;
+
+       rcu_read_lock();
+       rb = rcu_dereference(event->rb);
+       list_for_each_entry_rcu(event, &rb->event_list, rb_entry) {
+               wake_up_all(&event->waitq);
+       }
+       rcu_read_unlock();
+}
+
 static void rb_free_rcu(struct rcu_head *rcu_head)
 {
        struct ring_buffer *rb;
@@ -3521,9 +3589,19 @@ static struct ring_buffer *ring_buffer_get(struct perf_event *event)
 
 static void ring_buffer_put(struct ring_buffer *rb)
 {
+       struct perf_event *event, *n;
+       unsigned long flags;
+
        if (!atomic_dec_and_test(&rb->refcount))
                return;
 
+       spin_lock_irqsave(&rb->event_lock, flags);
+       list_for_each_entry_safe(event, n, &rb->event_list, rb_entry) {
+               list_del_init(&event->rb_entry);
+               wake_up_all(&event->waitq);
+       }
+       spin_unlock_irqrestore(&rb->event_lock, flags);
+
        call_rcu(&rb->rcu_head, rb_free_rcu);
 }
 
@@ -3546,6 +3624,7 @@ static void perf_mmap_close(struct vm_area_struct *vma)
                atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
                vma->vm_mm->pinned_vm -= event->mmap_locked;
                rcu_assign_pointer(event->rb, NULL);
+               ring_buffer_detach(event, rb);
                mutex_unlock(&event->mmap_mutex);
 
                ring_buffer_put(rb);
@@ -3700,7 +3779,7 @@ static const struct file_operations perf_fops = {
 
 void perf_event_wakeup(struct perf_event *event)
 {
-       wake_up_all(&event->waitq);
+       ring_buffer_wakeup(event);
 
        if (event->pending_kill) {
                kill_fasync(&event->fasync, SIGIO, event->pending_kill);
@@ -5822,6 +5901,8 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
        INIT_LIST_HEAD(&event->group_entry);
        INIT_LIST_HEAD(&event->event_entry);
        INIT_LIST_HEAD(&event->sibling_list);
+       INIT_LIST_HEAD(&event->rb_entry);
+
        init_waitqueue_head(&event->waitq);
        init_irq_work(&event->pending, perf_pending_event);
 
@@ -6028,6 +6109,8 @@ set:
 
        old_rb = event->rb;
        rcu_assign_pointer(event->rb, rb);
+       if (old_rb)
+               ring_buffer_detach(event, old_rb);
        ret = 0;
 unlock:
        mutex_unlock(&event->mmap_mutex);
index 09097dd8116c0e0bf5120d4da26c5f539a7f600a..64568a699375f105232eb588963da8707f926295 100644 (file)
@@ -22,6 +22,9 @@ struct ring_buffer {
        local_t                         lost;           /* nr records lost   */
 
        long                            watermark;      /* wakeup watermark  */
+       /* poll crap */
+       spinlock_t                      event_lock;
+       struct list_head                event_list;
 
        struct perf_event_mmap_page     *user_page;
        void                            *data_pages[0];
index a2a29205cc0fc10913277132162e0a515a944552..7f3011c6b57fa3288c7e4c46a555736763765a84 100644 (file)
@@ -209,6 +209,9 @@ ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
                rb->writable = 1;
 
        atomic_set(&rb->refcount, 1);
+
+       INIT_LIST_HEAD(&rb->event_list);
+       spin_lock_init(&rb->event_lock);
 }
 
 #ifndef CONFIG_PERF_USE_VMALLOC
index 422e567eecf63636a7c349b6319e6d541c6f3818..ae34bf51682b4a204de93f62943055350cd5c4d0 100644 (file)
@@ -885,10 +885,13 @@ static void __remove_hrtimer(struct hrtimer *timer,
                             struct hrtimer_clock_base *base,
                             unsigned long newstate, int reprogram)
 {
+       struct timerqueue_node *next_timer;
        if (!(timer->state & HRTIMER_STATE_ENQUEUED))
                goto out;
 
-       if (&timer->node == timerqueue_getnext(&base->active)) {
+       next_timer = timerqueue_getnext(&base->active);
+       timerqueue_del(&base->active, &timer->node);
+       if (&timer->node == next_timer) {
 #ifdef CONFIG_HIGH_RES_TIMERS
                /* Reprogram the clock event device. if enabled */
                if (reprogram && hrtimer_hres_active()) {
@@ -901,7 +904,6 @@ static void __remove_hrtimer(struct hrtimer *timer,
                }
 #endif
        }
-       timerqueue_del(&base->active, &timer->node);
        if (!timerqueue_getnext(&base->active))
                base->cpu_base->active_bases &= ~(1 << base->index);
 out:
index 67ce837ae52cdd70115a8ce436f923e96a26adc4..1da999f5e746caedacb8b40d0015e7c3f273ad04 100644 (file)
@@ -623,8 +623,9 @@ static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
 
 static int irq_wait_for_interrupt(struct irqaction *action)
 {
+       set_current_state(TASK_INTERRUPTIBLE);
+
        while (!kthread_should_stop()) {
-               set_current_state(TASK_INTERRUPTIBLE);
 
                if (test_and_clear_bit(IRQTF_RUNTHREAD,
                                       &action->thread_flags)) {
@@ -632,7 +633,9 @@ static int irq_wait_for_interrupt(struct irqaction *action)
                        return 0;
                }
                schedule();
+               set_current_state(TASK_INTERRUPTIBLE);
        }
+       __set_current_state(TASK_RUNNING);
        return -1;
 }
 
@@ -1596,7 +1599,7 @@ int request_percpu_irq(unsigned int irq, irq_handler_t handler,
                return -ENOMEM;
 
        action->handler = handler;
-       action->flags = IRQF_PERCPU;
+       action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND;
        action->name = devname;
        action->percpu_dev_id = dev_id;
 
index b5f4742693c01f2d0c286af36de1434a0436ff41..dc813a948be2379fe20a0218e1d270212b8408d3 100644 (file)
@@ -84,7 +84,9 @@ static int try_one_irq(int irq, struct irq_desc *desc, bool force)
         */
        action = desc->action;
        if (!action || !(action->flags & IRQF_SHARED) ||
-           (action->flags & __IRQF_TIMER) || !action->next)
+           (action->flags & __IRQF_TIMER) ||
+           (action->handler(irq, action->dev_id) == IRQ_HANDLED) ||
+           !action->next)
                goto out;
 
        /* Already running on another processor */
index bbdfe2a462a088b210d5792c674b215274b1b39b..66ff7109f6970ca63cb4aa9bf6b4800d69ef2f3a 100644 (file)
@@ -66,8 +66,9 @@ void jump_label_inc(struct jump_label_key *key)
                return;
 
        jump_label_lock();
-       if (atomic_add_return(1, &key->enabled) == 1)
+       if (atomic_read(&key->enabled) == 0)
                jump_label_update(key, JUMP_LABEL_ENABLE);
+       atomic_inc(&key->enabled);
        jump_label_unlock();
 }
 
index e69434b070da3f922909ece9417627e11234dcd6..b2e08c932d91c6507f07d637f61f0df1205778b8 100644 (file)
@@ -44,6 +44,7 @@
 #include <linux/stringify.h>
 #include <linux/bitops.h>
 #include <linux/gfp.h>
+#include <linux/kmemcheck.h>
 
 #include <asm/sections.h>
 
@@ -2948,7 +2949,12 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
 void lockdep_init_map(struct lockdep_map *lock, const char *name,
                      struct lock_class_key *key, int subclass)
 {
-       memset(lock, 0, sizeof(*lock));
+       int i;
+
+       kmemcheck_mark_initialized(lock, sizeof(*lock));
+
+       for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
+               lock->class_cache[i] = NULL;
 
 #ifdef CONFIG_LOCK_STAT
        lock->cpu = raw_smp_processor_id();
index 196c01268ebd73d50c0a5f4d52d913c9aff8bd8b..a6b0503574ee714b2cbe6c871741d298ad9ce5f7 100644 (file)
@@ -347,7 +347,7 @@ int hibernation_snapshot(int platform_mode)
 
        error = freeze_kernel_threads();
        if (error)
-               goto Close;
+               goto Cleanup;
 
        if (hibernation_test(TEST_FREEZER) ||
                hibernation_testmode(HIBERNATION_TESTPROC)) {
@@ -357,12 +357,14 @@ int hibernation_snapshot(int platform_mode)
                 * successful freezer test.
                 */
                freezer_test_done = true;
-               goto Close;
+               goto Cleanup;
        }
 
        error = dpm_prepare(PMSG_FREEZE);
-       if (error)
-               goto Complete_devices;
+       if (error) {
+               dpm_complete(msg);
+               goto Cleanup;
+       }
 
        suspend_console();
        pm_restrict_gfp_mask();
@@ -391,8 +393,6 @@ int hibernation_snapshot(int platform_mode)
                pm_restore_gfp_mask();
 
        resume_console();
-
- Complete_devices:
        dpm_complete(msg);
 
  Close:
@@ -402,6 +402,10 @@ int hibernation_snapshot(int platform_mode)
  Recover_platform:
        platform_recover(platform_mode);
        goto Resume_devices;
+
+ Cleanup:
+       swsusp_free();
+       goto Close;
 }
 
 /**
index 1455a0d4eedd4b386c759d689f939ba5d7a9007a..7982a0a841eaf082fe929e24a1f4bd5aefd4f015 100644 (file)
@@ -1293,10 +1293,11 @@ again:
        raw_spin_lock(&logbuf_lock);
        if (con_start != log_end)
                retry = 1;
+       raw_spin_unlock_irqrestore(&logbuf_lock, flags);
+
        if (retry && console_trylock())
                goto again;
 
-       raw_spin_unlock_irqrestore(&logbuf_lock, flags);
        if (wake_klogd)
                wake_up_klogd();
 }
index 0e9344a71be33f6335bf55fd3a7bfe0f52418162..d6b149ccf925c320841e8a42f31fd23b6ee64dc6 100644 (file)
@@ -71,6 +71,7 @@
 #include <linux/ctype.h>
 #include <linux/ftrace.h>
 #include <linux/slab.h>
+#include <linux/init_task.h>
 
 #include <asm/tlb.h>
 #include <asm/irq_regs.h>
@@ -4810,6 +4811,9 @@ EXPORT_SYMBOL(wait_for_completion);
  * This waits for either a completion of a specific task to be signaled or for a
  * specified timeout to expire. The timeout is in jiffies. It is not
  * interruptible.
+ *
+ * The return value is 0 if timed out, and positive (at least 1, or number of
+ * jiffies left till timeout) if completed.
  */
 unsigned long __sched
 wait_for_completion_timeout(struct completion *x, unsigned long timeout)
@@ -4824,6 +4828,8 @@ EXPORT_SYMBOL(wait_for_completion_timeout);
  *
  * This waits for completion of a specific task to be signaled. It is
  * interruptible.
+ *
+ * The return value is -ERESTARTSYS if interrupted, 0 if completed.
  */
 int __sched wait_for_completion_interruptible(struct completion *x)
 {
@@ -4841,6 +4847,9 @@ EXPORT_SYMBOL(wait_for_completion_interruptible);
  *
  * This waits for either a completion of a specific task to be signaled or for a
  * specified timeout to expire. It is interruptible. The timeout is in jiffies.
+ *
+ * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
+ * positive (at least 1, or number of jiffies left till timeout) if completed.
  */
 long __sched
 wait_for_completion_interruptible_timeout(struct completion *x,
@@ -4856,6 +4865,8 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
  *
  * This waits to be signaled for completion of a specific task. It can be
  * interrupted by a kill signal.
+ *
+ * The return value is -ERESTARTSYS if interrupted, 0 if completed.
  */
 int __sched wait_for_completion_killable(struct completion *x)
 {
@@ -4874,6 +4885,9 @@ EXPORT_SYMBOL(wait_for_completion_killable);
  * This waits for either a completion of a specific task to be
  * signaled or for a specified timeout to expire. It can be
  * interrupted by a kill signal. The timeout is in jiffies.
+ *
+ * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
+ * positive (at least 1, or number of jiffies left till timeout) if completed.
  */
 long __sched
 wait_for_completion_killable_timeout(struct completion *x,
@@ -6099,6 +6113,9 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
         */
        idle->sched_class = &idle_sched_class;
        ftrace_graph_init_idle_task(idle, cpu);
+#if defined(CONFIG_SMP)
+       sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
+#endif
 }
 
 /*
index 5c9e67923b7cfd7826903c17322c3f0c55de5d74..a78ed2736ba79f02a201d8256bd9e0a56d57981e 100644 (file)
@@ -772,19 +772,32 @@ static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
                list_del_leaf_cfs_rq(cfs_rq);
 }
 
+static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq)
+{
+       long tg_weight;
+
+       /*
+        * Use this CPU's actual weight instead of the last load_contribution
+        * to gain a more accurate current total weight. See
+        * update_cfs_rq_load_contribution().
+        */
+       tg_weight = atomic_read(&tg->load_weight);
+       tg_weight -= cfs_rq->load_contribution;
+       tg_weight += cfs_rq->load.weight;
+
+       return tg_weight;
+}
+
 static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
 {
-       long load_weight, load, shares;
+       long tg_weight, load, shares;
 
+       tg_weight = calc_tg_weight(tg, cfs_rq);
        load = cfs_rq->load.weight;
 
-       load_weight = atomic_read(&tg->load_weight);
-       load_weight += load;
-       load_weight -= cfs_rq->load_contribution;
-
        shares = (tg->shares * load);
-       if (load_weight)
-               shares /= load_weight;
+       if (tg_weight)
+               shares /= tg_weight;
 
        if (shares < MIN_SHARES)
                shares = MIN_SHARES;
@@ -1743,7 +1756,7 @@ static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
 
 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
 {
-       if (!cfs_rq->runtime_enabled || !cfs_rq->nr_running)
+       if (!cfs_rq->runtime_enabled || cfs_rq->nr_running)
                return;
 
        __return_cfs_rq_runtime(cfs_rq);
@@ -2036,36 +2049,100 @@ static void task_waking_fair(struct task_struct *p)
  * Adding load to a group doesn't make a group heavier, but can cause movement
  * of group shares between cpus. Assuming the shares were perfectly aligned one
  * can calculate the shift in shares.
+ *
+ * Calculate the effective load difference if @wl is added (subtracted) to @tg
+ * on this @cpu and results in a total addition (subtraction) of @wg to the
+ * total group weight.
+ *
+ * Given a runqueue weight distribution (rw_i) we can compute a shares
+ * distribution (s_i) using:
+ *
+ *   s_i = rw_i / \Sum rw_j                                            (1)
+ *
+ * Suppose we have 4 CPUs and our @tg is a direct child of the root group and
+ * has 7 equal weight tasks, distributed as below (rw_i), with the resulting
+ * shares distribution (s_i):
+ *
+ *   rw_i = {   2,   4,   1,   0 }
+ *   s_i  = { 2/7, 4/7, 1/7,   0 }
+ *
+ * As per wake_affine() we're interested in the load of two CPUs (the CPU the
+ * task used to run on and the CPU the waker is running on), we need to
+ * compute the effect of waking a task on either CPU and, in case of a sync
+ * wakeup, compute the effect of the current task going to sleep.
+ *
+ * So for a change of @wl to the local @cpu with an overall group weight change
+ * of @wl we can compute the new shares distribution (s'_i) using:
+ *
+ *   s'_i = (rw_i + @wl) / (@wg + \Sum rw_j)                           (2)
+ *
+ * Suppose we're interested in CPUs 0 and 1, and want to compute the load
+ * differences in waking a task to CPU 0. The additional task changes the
+ * weight and shares distributions like:
+ *
+ *   rw'_i = {   3,   4,   1,   0 }
+ *   s'_i  = { 3/8, 4/8, 1/8,   0 }
+ *
+ * We can then compute the difference in effective weight by using:
+ *
+ *   dw_i = S * (s'_i - s_i)                                           (3)
+ *
+ * Where 'S' is the group weight as seen by its parent.
+ *
+ * Therefore the effective change in loads on CPU 0 would be 5/56 (3/8 - 2/7)
+ * times the weight of the group. The effect on CPU 1 would be -4/56 (4/8 -
+ * 4/7) times the weight of the group.
  */
 static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
 {
        struct sched_entity *se = tg->se[cpu];
 
-       if (!tg->parent)
+       if (!tg->parent)        /* the trivial, non-cgroup case */
                return wl;
 
        for_each_sched_entity(se) {
-               long lw, w;
+               long w, W;
 
                tg = se->my_q->tg;
-               w = se->my_q->load.weight;
 
-               /* use this cpu's instantaneous contribution */
-               lw = atomic_read(&tg->load_weight);
-               lw -= se->my_q->load_contribution;
-               lw += w + wg;
+               /*
+                * W = @wg + \Sum rw_j
+                */
+               W = wg + calc_tg_weight(tg, se->my_q);
 
-               wl += w;
+               /*
+                * w = rw_i + @wl
+                */
+               w = se->my_q->load.weight + wl;
 
-               if (lw > 0 && wl < lw)
-                       wl = (wl * tg->shares) / lw;
+               /*
+                * wl = S * s'_i; see (2)
+                */
+               if (W > 0 && w < W)
+                       wl = (w * tg->shares) / W;
                else
                        wl = tg->shares;
 
-               /* zero point is MIN_SHARES */
+               /*
+                * Per the above, wl is the new se->load.weight value; since
+                * those are clipped to [MIN_SHARES, ...) do so now. See
+                * calc_cfs_shares().
+                */
                if (wl < MIN_SHARES)
                        wl = MIN_SHARES;
+
+               /*
+                * wl = dw_i = S * (s'_i - s_i); see (3)
+                */
                wl -= se->load.weight;
+
+               /*
+                * Recursively apply this logic to all parent groups to compute
+                * the final effective load change on the root group. Since
+                * only the @tg group gets extra weight, all parent groups can
+                * only redistribute existing shares. @wl is the shift in shares
+                * resulting from this level per the above.
+                */
                wg = 0;
        }
 
@@ -2249,7 +2326,8 @@ static int select_idle_sibling(struct task_struct *p, int target)
        int cpu = smp_processor_id();
        int prev_cpu = task_cpu(p);
        struct sched_domain *sd;
-       int i;
+       struct sched_group *sg;
+       int i, smt = 0;
 
        /*
         * If the task is going to be woken-up on this cpu and if it is
@@ -2269,25 +2347,38 @@ static int select_idle_sibling(struct task_struct *p, int target)
         * Otherwise, iterate the domains and find an elegible idle cpu.
         */
        rcu_read_lock();
+again:
        for_each_domain(target, sd) {
-               if (!(sd->flags & SD_SHARE_PKG_RESOURCES))
-                       break;
+               if (!smt && (sd->flags & SD_SHARE_CPUPOWER))
+                       continue;
 
-               for_each_cpu_and(i, sched_domain_span(sd), tsk_cpus_allowed(p)) {
-                       if (idle_cpu(i)) {
-                               target = i;
-                               break;
+               if (!(sd->flags & SD_SHARE_PKG_RESOURCES)) {
+                       if (!smt) {
+                               smt = 1;
+                               goto again;
                        }
+                       break;
                }
 
-               /*
-                * Lets stop looking for an idle sibling when we reached
-                * the domain that spans the current cpu and prev_cpu.
-                */
-               if (cpumask_test_cpu(cpu, sched_domain_span(sd)) &&
-                   cpumask_test_cpu(prev_cpu, sched_domain_span(sd)))
-                       break;
+               sg = sd->groups;
+               do {
+                       if (!cpumask_intersects(sched_group_cpus(sg),
+                                               tsk_cpus_allowed(p)))
+                               goto next;
+
+                       for_each_cpu(i, sched_group_cpus(sg)) {
+                               if (!idle_cpu(i))
+                                       goto next;
+                       }
+
+                       target = cpumask_first_and(sched_group_cpus(sg),
+                                       tsk_cpus_allowed(p));
+                       goto done;
+next:
+                       sg = sg->next;
+               } while (sg != sd->groups);
        }
+done:
        rcu_read_unlock();
 
        return target;
@@ -3511,7 +3602,7 @@ static bool update_sd_pick_busiest(struct sched_domain *sd,
 }
 
 /**
- * update_sd_lb_stats - Update sched_group's statistics for load balancing.
+ * update_sd_lb_stats - Update sched_domain's statistics for load balancing.
  * @sd: sched_domain whose statistics are to be updated.
  * @this_cpu: Cpu for which load balance is currently performed.
  * @idle: Idle status of this_cpu
index efa0a7b75dde7408e89bd07e5b1a490c4f68ea95..84802245abd2562acad3c0eb734fe48cc5213b8e 100644 (file)
@@ -67,3 +67,4 @@ SCHED_FEAT(NONTASK_POWER, 1)
 SCHED_FEAT(TTWU_QUEUE, 1)
 
 SCHED_FEAT(FORCE_SD_OVERLAP, 0)
+SCHED_FEAT(RT_RUNTIME_SHARE, 1)
index 056cbd2e2a27fea8cb15e76bfc711fe32de03303..583a1368afe6ed7d96879d762f73553b2068e27c 100644 (file)
@@ -560,6 +560,9 @@ static int balance_runtime(struct rt_rq *rt_rq)
 {
        int more = 0;
 
+       if (!sched_feat(RT_RUNTIME_SHARE))
+               return more;
+
        if (rt_rq->rt_time > rt_rq->rt_runtime) {
                raw_spin_unlock(&rt_rq->rt_runtime_lock);
                more = do_balance_runtime(rt_rq);
index c436e790b21bf7cd89878eb9d92146e1b1c511d6..8a46f5d64504f15dcaf31ec4f5fcee7ea15a8bdf 100644 (file)
@@ -195,7 +195,7 @@ static enum hrtimer_restart alarmtimer_fired(struct hrtimer *timer)
                struct alarm *alarm;
                ktime_t expired = next->expires;
 
-               if (expired.tv64 >= now.tv64)
+               if (expired.tv64 > now.tv64)
                        break;
 
                alarm = container_of(next, struct alarm, node);
index 1ecd6ba36d6c6d3d0ce404450878ea5da047e01d..c4eb71c8b2ea3290c6b83f2f6d72b3c8c62a7787 100644 (file)
@@ -387,6 +387,7 @@ void clockevents_exchange_device(struct clock_event_device *old,
         * released list and do a notify add later.
         */
        if (old) {
+               old->event_handler = clockevents_handle_noop;
                clockevents_set_mode(old, CLOCK_EVT_MODE_UNUSED);
                list_del(&old->list);
                list_add(&old->list, &clockevents_released);
index cf52fda2e0966d005e152ff8747d5d44db5fbd7a..da2f760e780c9cdf7d1e2b4b02cbf9fffa900cea 100644 (file)
@@ -491,6 +491,22 @@ void clocksource_touch_watchdog(void)
        clocksource_resume_watchdog();
 }
 
+/**
+ * clocksource_max_adjustment- Returns max adjustment amount
+ * @cs:         Pointer to clocksource
+ *
+ */
+static u32 clocksource_max_adjustment(struct clocksource *cs)
+{
+       u64 ret;
+       /*
+        * We won't try to correct for more then 11% adjustments (110,000 ppm),
+        */
+       ret = (u64)cs->mult * 11;
+       do_div(ret,100);
+       return (u32)ret;
+}
+
 /**
  * clocksource_max_deferment - Returns max time the clocksource can be deferred
  * @cs:         Pointer to clocksource
@@ -503,25 +519,28 @@ static u64 clocksource_max_deferment(struct clocksource *cs)
        /*
         * Calculate the maximum number of cycles that we can pass to the
         * cyc2ns function without overflowing a 64-bit signed result. The
-        * maximum number of cycles is equal to ULLONG_MAX/cs->mult which
-        * is equivalent to the below.
-        * max_cycles < (2^63)/cs->mult
-        * max_cycles < 2^(log2((2^63)/cs->mult))
-        * max_cycles < 2^(log2(2^63) - log2(cs->mult))
-        * max_cycles < 2^(63 - log2(cs->mult))
-        * max_cycles < 1 << (63 - log2(cs->mult))
+        * maximum number of cycles is equal to ULLONG_MAX/(cs->mult+cs->maxadj)
+        * which is equivalent to the below.
+        * max_cycles < (2^63)/(cs->mult + cs->maxadj)
+        * max_cycles < 2^(log2((2^63)/(cs->mult + cs->maxadj)))
+        * max_cycles < 2^(log2(2^63) - log2(cs->mult + cs->maxadj))
+        * max_cycles < 2^(63 - log2(cs->mult + cs->maxadj))
+        * max_cycles < 1 << (63 - log2(cs->mult + cs->maxadj))
         * Please note that we add 1 to the result of the log2 to account for
         * any rounding errors, ensure the above inequality is satisfied and
         * no overflow will occur.
         */
-       max_cycles = 1ULL << (63 - (ilog2(cs->mult) + 1));
+       max_cycles = 1ULL << (63 - (ilog2(cs->mult + cs->maxadj) + 1));
 
        /*
         * The actual maximum number of cycles we can defer the clocksource is
         * determined by the minimum of max_cycles and cs->mask.
+        * Note: Here we subtract the maxadj to make sure we don't sleep for
+        * too long if there's a large negative adjustment.
         */
        max_cycles = min_t(u64, max_cycles, (u64) cs->mask);
-       max_nsecs = clocksource_cyc2ns(max_cycles, cs->mult, cs->shift);
+       max_nsecs = clocksource_cyc2ns(max_cycles, cs->mult - cs->maxadj,
+                                       cs->shift);
 
        /*
         * To ensure that the clocksource does not wrap whilst we are idle,
@@ -529,7 +548,7 @@ static u64 clocksource_max_deferment(struct clocksource *cs)
         * note a margin of 12.5% is used because this can be computed with
         * a shift, versus say 10% which would require division.
         */
-       return max_nsecs - (max_nsecs >> 5);
+       return max_nsecs - (max_nsecs >> 3);
 }
 
 #ifndef CONFIG_ARCH_USES_GETTIMEOFFSET
@@ -640,7 +659,6 @@ static void clocksource_enqueue(struct clocksource *cs)
 void __clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq)
 {
        u64 sec;
-
        /*
         * Calc the maximum number of seconds which we can run before
         * wrapping around. For clocksources which have a mask > 32bit
@@ -651,7 +669,7 @@ void __clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq)
         * ~ 0.06ppm granularity for NTP. We apply the same 12.5%
         * margin as we do in clocksource_max_deferment()
         */
-       sec = (cs->mask - (cs->mask >> 5));
+       sec = (cs->mask - (cs->mask >> 3));
        do_div(sec, freq);
        do_div(sec, scale);
        if (!sec)
@@ -661,6 +679,20 @@ void __clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq)
 
        clocks_calc_mult_shift(&cs->mult, &cs->shift, freq,
                               NSEC_PER_SEC / scale, sec * scale);
+
+       /*
+        * for clocksources that have large mults, to avoid overflow.
+        * Since mult may be adjusted by ntp, add an safety extra margin
+        *
+        */
+       cs->maxadj = clocksource_max_adjustment(cs);
+       while ((cs->mult + cs->maxadj < cs->mult)
+               || (cs->mult - cs->maxadj > cs->mult)) {
+               cs->mult >>= 1;
+               cs->shift--;
+               cs->maxadj = clocksource_max_adjustment(cs);
+       }
+
        cs->max_idle_ns = clocksource_max_deferment(cs);
 }
 EXPORT_SYMBOL_GPL(__clocksource_updatefreq_scale);
@@ -701,6 +733,12 @@ EXPORT_SYMBOL_GPL(__clocksource_register_scale);
  */
 int clocksource_register(struct clocksource *cs)
 {
+       /* calculate max adjustment for given mult/shift */
+       cs->maxadj = clocksource_max_adjustment(cs);
+       WARN_ONCE(cs->mult + cs->maxadj < cs->mult,
+               "Clocksource %s might overflow on 11%% adjustment\n",
+               cs->name);
+
        /* calculate max idle time permitted for this clocksource */
        cs->max_idle_ns = clocksource_max_deferment(cs);
 
index f954282d9a82758acf951960392e37b5b07c6014..fd4a7b1625a20b35fad917817e136fdbd2d48e62 100644 (file)
@@ -71,7 +71,7 @@ int tick_check_broadcast_device(struct clock_event_device *dev)
             (dev->features & CLOCK_EVT_FEAT_C3STOP))
                return 0;
 
-       clockevents_exchange_device(NULL, dev);
+       clockevents_exchange_device(tick_broadcast_device.evtdev, dev);
        tick_broadcast_device.evtdev = dev;
        if (!cpumask_empty(tick_get_broadcast_mask()))
                tick_broadcast_start_periodic(dev);
index 2b021b0e8507e7e4f9951780c88877bb2d4d56bd..237841378c031ef0f2fa6c492559e695a111b74f 100644 (file)
@@ -249,6 +249,8 @@ ktime_t ktime_get(void)
                secs = xtime.tv_sec + wall_to_monotonic.tv_sec;
                nsecs = xtime.tv_nsec + wall_to_monotonic.tv_nsec;
                nsecs += timekeeping_get_ns();
+               /* If arch requires, add in gettimeoffset() */
+               nsecs += arch_gettimeoffset();
 
        } while (read_seqretry(&xtime_lock, seq));
        /*
@@ -280,6 +282,8 @@ void ktime_get_ts(struct timespec *ts)
                *ts = xtime;
                tomono = wall_to_monotonic;
                nsecs = timekeeping_get_ns();
+               /* If arch requires, add in gettimeoffset() */
+               nsecs += arch_gettimeoffset();
 
        } while (read_seqretry(&xtime_lock, seq));
 
@@ -802,14 +806,44 @@ static void timekeeping_adjust(s64 offset)
        s64 error, interval = timekeeper.cycle_interval;
        int adj;
 
+       /*
+        * The point of this is to check if the error is greater then half
+        * an interval.
+        *
+        * First we shift it down from NTP_SHIFT to clocksource->shifted nsecs.
+        *
+        * Note we subtract one in the shift, so that error is really error*2.
+        * This "saves" dividing(shifting) intererval twice, but keeps the
+        * (error > interval) comparision as still measuring if error is
+        * larger then half an interval.
+        *
+        * Note: It does not "save" on aggrivation when reading the code.
+        */
        error = timekeeper.ntp_error >> (timekeeper.ntp_error_shift - 1);
        if (error > interval) {
+               /*
+                * We now divide error by 4(via shift), which checks if
+                * the error is greater then twice the interval.
+                * If it is greater, we need a bigadjust, if its smaller,
+                * we can adjust by 1.
+                */
                error >>= 2;
+               /*
+                * XXX - In update_wall_time, we round up to the next
+                * nanosecond, and store the amount rounded up into
+                * the error. This causes the likely below to be unlikely.
+                *
+                * The properfix is to avoid rounding up by using
+                * the high precision timekeeper.xtime_nsec instead of
+                * xtime.tv_nsec everywhere. Fixing this will take some
+                * time.
+                */
                if (likely(error <= interval))
                        adj = 1;
                else
                        adj = timekeeping_bigadjust(error, &interval, &offset);
        } else if (error < -interval) {
+               /* See comment above, this is just switched for the negative */
                error >>= 2;
                if (likely(error >= -interval)) {
                        adj = -1;
@@ -817,9 +851,65 @@ static void timekeeping_adjust(s64 offset)
                        offset = -offset;
                } else
                        adj = timekeeping_bigadjust(error, &interval, &offset);
-       } else
+       } else /* No adjustment needed */
                return;
 
+       WARN_ONCE(timekeeper.clock->maxadj &&
+                       (timekeeper.mult + adj > timekeeper.clock->mult +
+                                               timekeeper.clock->maxadj),
+                       "Adjusting %s more then 11%% (%ld vs %ld)\n",
+                       timekeeper.clock->name, (long)timekeeper.mult + adj,
+                       (long)timekeeper.clock->mult +
+                               timekeeper.clock->maxadj);
+       /*
+        * So the following can be confusing.
+        *
+        * To keep things simple, lets assume adj == 1 for now.
+        *
+        * When adj != 1, remember that the interval and offset values
+        * have been appropriately scaled so the math is the same.
+        *
+        * The basic idea here is that we're increasing the multiplier
+        * by one, this causes the xtime_interval to be incremented by
+        * one cycle_interval. This is because:
+        *      xtime_interval = cycle_interval * mult
+        * So if mult is being incremented by one:
+        *      xtime_interval = cycle_interval * (mult + 1)
+        * Its the same as:
+        *      xtime_interval = (cycle_interval * mult) + cycle_interval
+        * Which can be shortened to:
+        *      xtime_interval += cycle_interval
+        *
+        * So offset stores the non-accumulated cycles. Thus the current
+        * time (in shifted nanoseconds) is:
+        *      now = (offset * adj) + xtime_nsec
+        * Now, even though we're adjusting the clock frequency, we have
+        * to keep time consistent. In other words, we can't jump back
+        * in time, and we also want to avoid jumping forward in time.
+        *
+        * So given the same offset value, we need the time to be the same
+        * both before and after the freq adjustment.
+        *      now = (offset * adj_1) + xtime_nsec_1
+        *      now = (offset * adj_2) + xtime_nsec_2
+        * So:
+        *      (offset * adj_1) + xtime_nsec_1 =
+        *              (offset * adj_2) + xtime_nsec_2
+        * And we know:
+        *      adj_2 = adj_1 + 1
+        * So:
+        *      (offset * adj_1) + xtime_nsec_1 =
+        *              (offset * (adj_1+1)) + xtime_nsec_2
+        *      (offset * adj_1) + xtime_nsec_1 =
+        *              (offset * adj_1) + offset + xtime_nsec_2
+        * Canceling the sides:
+        *      xtime_nsec_1 = offset + xtime_nsec_2
+        * Which gives us:
+        *      xtime_nsec_2 = xtime_nsec_1 - offset
+        * Which simplfies to:
+        *      xtime_nsec -= offset
+        *
+        * XXX - TODO: Doc ntp_error calculation.
+        */
        timekeeper.mult += adj;
        timekeeper.xtime_interval += interval;
        timekeeper.xtime_nsec -= offset;
index dbaa62422b13c057754252d986f97440ec7dd3af..9c3c62b0c4bc89ebd307ff79950039021d157861 100644 (file)
@@ -1368,7 +1368,7 @@ SYSCALL_DEFINE0(getppid)
        int pid;
 
        rcu_read_lock();
-       pid = task_tgid_vnr(current->real_parent);
+       pid = task_tgid_vnr(rcu_dereference(current->real_parent));
        rcu_read_unlock();
 
        return pid;
index 900b409543db10cfc46b9da703f463a1cee9b78e..b1e8943fed1d3a9fd61916527c59c70d57af7d2c 100644 (file)
@@ -152,7 +152,6 @@ void clear_ftrace_function(void)
        ftrace_pid_function = ftrace_stub;
 }
 
-#undef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
 #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
 /*
  * For those archs that do not test ftrace_trace_stop in their
@@ -1212,7 +1211,9 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable,
        if (!src->count) {
                free_ftrace_hash_rcu(*dst);
                rcu_assign_pointer(*dst, EMPTY_HASH);
-               return 0;
+               /* still need to update the function records */
+               ret = 0;
+               goto out;
        }
 
        /*
index 581876f9f3872e9103a0110893396652599a1d6a..c212a7f934ec4841d8c9887ecf93f6a0b7526b27 100644 (file)
@@ -1078,7 +1078,6 @@ event_subsystem_dir(const char *name, struct dentry *d_events)
        /* First see if we did not already create this dir */
        list_for_each_entry(system, &event_subsystems, list) {
                if (strcmp(system->name, name) == 0) {
-                       __get_system(system);
                        system->nr_events++;
                        return system->entry;
                }
index 816d3d074979306713836d9447382cb641aecb54..95dc31efd6dd503dbd169a159dac4c5048f5a568 100644 (file)
@@ -1649,7 +1649,9 @@ static int replace_system_preds(struct event_subsystem *system,
                 */
                err = replace_preds(call, NULL, ps, filter_string, true);
                if (err)
-                       goto fail;
+                       call->flags |= TRACE_EVENT_FL_NO_SET_FILTER;
+               else
+                       call->flags &= ~TRACE_EVENT_FL_NO_SET_FILTER;
        }
 
        list_for_each_entry(call, &ftrace_events, list) {
@@ -1658,6 +1660,9 @@ static int replace_system_preds(struct event_subsystem *system,
                if (strcmp(call->class->system, system->name) != 0)
                        continue;
 
+               if (call->flags & TRACE_EVENT_FL_NO_SET_FILTER)
+                       continue;
+
                filter_item = kzalloc(sizeof(*filter_item), GFP_KERNEL);
                if (!filter_item)
                        goto fail_mem;
@@ -1686,7 +1691,7 @@ static int replace_system_preds(struct event_subsystem *system,
                 * replace the filter for the call.
                 */
                filter = call->filter;
-               call->filter = filter_item->filter;
+               rcu_assign_pointer(call->filter, filter_item->filter);
                filter_item->filter = filter;
 
                fail = false;
@@ -1741,7 +1746,7 @@ int apply_event_filter(struct ftrace_event_call *call, char *filter_string)
                filter = call->filter;
                if (!filter)
                        goto out_unlock;
-               call->filter = NULL;
+               RCU_INIT_POINTER(call->filter, NULL);
                /* Make sure the filter is not being used */
                synchronize_sched();
                __free_filter(filter);
@@ -1782,7 +1787,7 @@ out:
         * string
         */
        tmp = call->filter;
-       call->filter = filter;
+       rcu_assign_pointer(call->filter, filter);
        if (tmp) {
                /* Make sure the call is done with the filter */
                synchronize_sched();
index 74c6c7fce74900ec9870a15313268c87f9e1ecc3..fea790a2b17659e9b701987101db7929bdfbe8df 100644 (file)
@@ -245,7 +245,7 @@ static void put_hash_bucket(struct hash_bucket *bucket,
 
 static bool exact_match(struct dma_debug_entry *a, struct dma_debug_entry *b)
 {
-       return ((a->dev_addr == a->dev_addr) &&
+       return ((a->dev_addr == b->dev_addr) &&
                (a->dev == b->dev)) ? true : false;
 }
 
index 4298abaae153033caafe1f8ac641fdef1dee097e..36b3d988b4ef6ac8c263ee0732c1d08513afb04f 100644 (file)
@@ -2259,12 +2259,8 @@ static void khugepaged_do_scan(struct page **hpage)
 
 static void khugepaged_alloc_sleep(void)
 {
-       DEFINE_WAIT(wait);
-       add_wait_queue(&khugepaged_wait, &wait);
-       schedule_timeout_interruptible(
-               msecs_to_jiffies(
-                       khugepaged_alloc_sleep_millisecs));
-       remove_wait_queue(&khugepaged_wait, &wait);
+       wait_event_freezable_timeout(khugepaged_wait, false,
+                       msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
 }
 
 #ifndef CONFIG_NUMA
@@ -2313,14 +2309,10 @@ static void khugepaged_loop(void)
                if (unlikely(kthread_should_stop()))
                        break;
                if (khugepaged_has_work()) {
-                       DEFINE_WAIT(wait);
                        if (!khugepaged_scan_sleep_millisecs)
                                continue;
-                       add_wait_queue(&khugepaged_wait, &wait);
-                       schedule_timeout_interruptible(
-                               msecs_to_jiffies(
-                                       khugepaged_scan_sleep_millisecs));
-                       remove_wait_queue(&khugepaged_wait, &wait);
+                       wait_event_freezable_timeout(khugepaged_wait, false,
+                           msecs_to_jiffies(khugepaged_scan_sleep_millisecs));
                } else if (khugepaged_enabled())
                        wait_event_freezable(khugepaged_wait,
                                             khugepaged_wait_event());
index bb28a5f9db8ddbf2f65391fe9206ca99b46a256b..73f17c0293c0a0e57a62f65f11c969b9319532f5 100644 (file)
@@ -576,6 +576,7 @@ static void prep_compound_gigantic_page(struct page *page, unsigned long order)
        __SetPageHead(page);
        for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
                __SetPageTail(p);
+               set_page_count(p, 0);
                p->first_page = page;
        }
 }
index 578e29174fa6a0b84e8cbb10f8bf22ffd9a37b57..177aca424a069ac1ae1b44d48a8e6d992cd42a4d 100644 (file)
@@ -871,9 +871,9 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
 
        if (anon_vma)
                put_anon_vma(anon_vma);
-out:
        unlock_page(hpage);
 
+out:
        if (rc != -EAGAIN) {
                list_del(&hpage->lru);
                put_page(hpage);
index 9dd443d89d8be665813bbeb4e17e54fafde46428..2b8ba3aebf6e2c6b46b0d12dfea058ee3ab022fe 100644 (file)
@@ -356,8 +356,8 @@ void prep_compound_page(struct page *page, unsigned long order)
        __SetPageHead(page);
        for (i = 1; i < nr_pages; i++) {
                struct page *p = page + i;
-
                __SetPageTail(p);
+               set_page_count(p, 0);
                p->first_page = page;
        }
 }
@@ -3377,9 +3377,15 @@ static void setup_zone_migrate_reserve(struct zone *zone)
        unsigned long block_migratetype;
        int reserve;
 
-       /* Get the start pfn, end pfn and the number of blocks to reserve */
+       /*
+        * Get the start pfn, end pfn and the number of blocks to reserve
+        * We have to be careful to be aligned to pageblock_nr_pages to
+        * make sure that we always check pfn_valid for the first page in
+        * the block.
+        */
        start_pfn = zone->zone_start_pfn;
        end_pfn = start_pfn + zone->spanned_pages;
+       start_pfn = roundup(start_pfn, pageblock_nr_pages);
        reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >>
                                                        pageblock_order;
 
index ea534960a04bcda7e87a18cacf67bd2eb1fc5fe0..12a48a88c0d8cb00dd55b2adcea3e536493dee1a 100644 (file)
@@ -50,14 +50,13 @@ static struct page **pcpu_get_pages_and_bitmap(struct pcpu_chunk *chunk,
 
        if (!pages || !bitmap) {
                if (may_alloc && !pages)
-                       pages = pcpu_mem_alloc(pages_size);
+                       pages = pcpu_mem_zalloc(pages_size);
                if (may_alloc && !bitmap)
-                       bitmap = pcpu_mem_alloc(bitmap_size);
+                       bitmap = pcpu_mem_zalloc(bitmap_size);
                if (!pages || !bitmap)
                        return NULL;
        }
 
-       memset(pages, 0, pages_size);
        bitmap_copy(bitmap, chunk->populated, pcpu_unit_pages);
 
        *bitmapp = bitmap;
@@ -143,8 +142,8 @@ static void pcpu_pre_unmap_flush(struct pcpu_chunk *chunk,
                                 int page_start, int page_end)
 {
        flush_cache_vunmap(
-               pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start),
-               pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end));
+               pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start),
+               pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end));
 }
 
 static void __pcpu_unmap_pages(unsigned long addr, int nr_pages)
@@ -206,8 +205,8 @@ static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk,
                                      int page_start, int page_end)
 {
        flush_tlb_kernel_range(
-               pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start),
-               pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end));
+               pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start),
+               pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end));
 }
 
 static int __pcpu_map_pages(unsigned long addr, struct page **pages,
@@ -284,8 +283,8 @@ static void pcpu_post_map_flush(struct pcpu_chunk *chunk,
                                int page_start, int page_end)
 {
        flush_cache_vmap(
-               pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start),
-               pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end));
+               pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start),
+               pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end));
 }
 
 /**
index bf80e55dbed7e66fcd28f6dbc46c86fbd222a008..3bb810a72006cd65e345e16fd124b61e029964db 100644 (file)
@@ -116,9 +116,9 @@ static int pcpu_atom_size __read_mostly;
 static int pcpu_nr_slots __read_mostly;
 static size_t pcpu_chunk_struct_size __read_mostly;
 
-/* cpus with the lowest and highest unit numbers */
-static unsigned int pcpu_first_unit_cpu __read_mostly;
-static unsigned int pcpu_last_unit_cpu __read_mostly;
+/* cpus with the lowest and highest unit addresses */
+static unsigned int pcpu_low_unit_cpu __read_mostly;
+static unsigned int pcpu_high_unit_cpu __read_mostly;
 
 /* the address of the first chunk which starts with the kernel static area */
 void *pcpu_base_addr __read_mostly;
@@ -273,11 +273,11 @@ static void __maybe_unused pcpu_next_pop(struct pcpu_chunk *chunk,
             (rs) = (re) + 1, pcpu_next_pop((chunk), &(rs), &(re), (end)))
 
 /**
- * pcpu_mem_alloc - allocate memory
+ * pcpu_mem_zalloc - allocate memory
  * @size: bytes to allocate
  *
  * Allocate @size bytes.  If @size is smaller than PAGE_SIZE,
- * kzalloc() is used; otherwise, vmalloc() is used.  The returned
+ * kzalloc() is used; otherwise, vzalloc() is used.  The returned
  * memory is always zeroed.
  *
  * CONTEXT:
@@ -286,7 +286,7 @@ static void __maybe_unused pcpu_next_pop(struct pcpu_chunk *chunk,
  * RETURNS:
  * Pointer to the allocated area on success, NULL on failure.
  */
-static void *pcpu_mem_alloc(size_t size)
+static void *pcpu_mem_zalloc(size_t size)
 {
        if (WARN_ON_ONCE(!slab_is_available()))
                return NULL;
@@ -302,7 +302,7 @@ static void *pcpu_mem_alloc(size_t size)
  * @ptr: memory to free
  * @size: size of the area
  *
- * Free @ptr.  @ptr should have been allocated using pcpu_mem_alloc().
+ * Free @ptr.  @ptr should have been allocated using pcpu_mem_zalloc().
  */
 static void pcpu_mem_free(void *ptr, size_t size)
 {
@@ -384,7 +384,7 @@ static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
        size_t old_size = 0, new_size = new_alloc * sizeof(new[0]);
        unsigned long flags;
 
-       new = pcpu_mem_alloc(new_size);
+       new = pcpu_mem_zalloc(new_size);
        if (!new)
                return -ENOMEM;
 
@@ -604,11 +604,12 @@ static struct pcpu_chunk *pcpu_alloc_chunk(void)
 {
        struct pcpu_chunk *chunk;
 
-       chunk = pcpu_mem_alloc(pcpu_chunk_struct_size);
+       chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size);
        if (!chunk)
                return NULL;
 
-       chunk->map = pcpu_mem_alloc(PCPU_DFL_MAP_ALLOC * sizeof(chunk->map[0]));
+       chunk->map = pcpu_mem_zalloc(PCPU_DFL_MAP_ALLOC *
+                                               sizeof(chunk->map[0]));
        if (!chunk->map) {
                kfree(chunk);
                return NULL;
@@ -977,6 +978,17 @@ bool is_kernel_percpu_address(unsigned long addr)
  * address.  The caller is responsible for ensuring @addr stays valid
  * until this function finishes.
  *
+ * percpu allocator has special setup for the first chunk, which currently
+ * supports either embedding in linear address space or vmalloc mapping,
+ * and, from the second one, the backing allocator (currently either vm or
+ * km) provides translation.
+ *
+ * The addr can be tranlated simply without checking if it falls into the
+ * first chunk. But the current code reflects better how percpu allocator
+ * actually works, and the verification can discover both bugs in percpu
+ * allocator itself and per_cpu_ptr_to_phys() callers. So we keep current
+ * code.
+ *
  * RETURNS:
  * The physical address for @addr.
  */
@@ -984,19 +996,19 @@ phys_addr_t per_cpu_ptr_to_phys(void *addr)
 {
        void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
        bool in_first_chunk = false;
-       unsigned long first_start, first_end;
+       unsigned long first_low, first_high;
        unsigned int cpu;
 
        /*
-        * The following test on first_start/end isn't strictly
+        * The following test on unit_low/high isn't strictly
         * necessary but will speed up lookups of addresses which
         * aren't in the first chunk.
         */
-       first_start = pcpu_chunk_addr(pcpu_first_chunk, pcpu_first_unit_cpu, 0);
-       first_end = pcpu_chunk_addr(pcpu_first_chunk, pcpu_last_unit_cpu,
-                                   pcpu_unit_pages);
-       if ((unsigned long)addr >= first_start &&
-           (unsigned long)addr < first_end) {
+       first_low = pcpu_chunk_addr(pcpu_first_chunk, pcpu_low_unit_cpu, 0);
+       first_high = pcpu_chunk_addr(pcpu_first_chunk, pcpu_high_unit_cpu,
+                                    pcpu_unit_pages);
+       if ((unsigned long)addr >= first_low &&
+           (unsigned long)addr < first_high) {
                for_each_possible_cpu(cpu) {
                        void *start = per_cpu_ptr(base, cpu);
 
@@ -1233,7 +1245,9 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
 
        for (cpu = 0; cpu < nr_cpu_ids; cpu++)
                unit_map[cpu] = UINT_MAX;
-       pcpu_first_unit_cpu = NR_CPUS;
+
+       pcpu_low_unit_cpu = NR_CPUS;
+       pcpu_high_unit_cpu = NR_CPUS;
 
        for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
                const struct pcpu_group_info *gi = &ai->groups[group];
@@ -1253,9 +1267,13 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
                        unit_map[cpu] = unit + i;
                        unit_off[cpu] = gi->base_offset + i * ai->unit_size;
 
-                       if (pcpu_first_unit_cpu == NR_CPUS)
-                               pcpu_first_unit_cpu = cpu;
-                       pcpu_last_unit_cpu = cpu;
+                       /* determine low/high unit_cpu */
+                       if (pcpu_low_unit_cpu == NR_CPUS ||
+                           unit_off[cpu] < unit_off[pcpu_low_unit_cpu])
+                               pcpu_low_unit_cpu = cpu;
+                       if (pcpu_high_unit_cpu == NR_CPUS ||
+                           unit_off[cpu] > unit_off[pcpu_high_unit_cpu])
+                               pcpu_high_unit_cpu = cpu;
                }
        }
        pcpu_nr_units = unit;
@@ -1889,7 +1907,7 @@ void __init percpu_init_late(void)
 
                BUILD_BUG_ON(size > PAGE_SIZE);
 
-               map = pcpu_mem_alloc(size);
+               map = pcpu_mem_zalloc(size);
                BUG_ON(!map);
 
                spin_lock_irqsave(&pcpu_lock, flags);
index 708efe886154da626cfe01f4a2b6eaf055ada914..83311c9aaf9de0ad8494f04703c248f155cfc2c1 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -595,6 +595,7 @@ static enum {
        PARTIAL_AC,
        PARTIAL_L3,
        EARLY,
+       LATE,
        FULL
 } g_cpucache_up;
 
@@ -671,7 +672,7 @@ static void init_node_lock_keys(int q)
 {
        struct cache_sizes *s = malloc_sizes;
 
-       if (g_cpucache_up != FULL)
+       if (g_cpucache_up < LATE)
                return;
 
        for (s = malloc_sizes; s->cs_size != ULONG_MAX; s++) {
@@ -1666,6 +1667,8 @@ void __init kmem_cache_init_late(void)
 {
        struct kmem_cache *cachep;
 
+       g_cpucache_up = LATE;
+
        /* Annotate slab for lockdep -- annotate the malloc caches */
        init_lock_keys();
 
index 7d2a996c307e4306bd233f4ae340a02d6915ffb1..ed3334d9b6da77f64796f1933f7672284b159fc2 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1862,7 +1862,7 @@ static void unfreeze_partials(struct kmem_cache *s)
 {
        struct kmem_cache_node *n = NULL;
        struct kmem_cache_cpu *c = this_cpu_ptr(s->cpu_slab);
-       struct page *page;
+       struct page *page, *discard_page = NULL;
 
        while ((page = c->partial)) {
                enum slab_modes { M_PARTIAL, M_FREE };
@@ -1904,7 +1904,8 @@ static void unfreeze_partials(struct kmem_cache *s)
                                if (l == M_PARTIAL)
                                        remove_partial(n, page);
                                else
-                                       add_partial(n, page, 1);
+                                       add_partial(n, page,
+                                               DEACTIVATE_TO_TAIL);
 
                                l = m;
                        }
@@ -1915,14 +1916,22 @@ static void unfreeze_partials(struct kmem_cache *s)
                                "unfreezing slab"));
 
                if (m == M_FREE) {
-                       stat(s, DEACTIVATE_EMPTY);
-                       discard_slab(s, page);
-                       stat(s, FREE_SLAB);
+                       page->next = discard_page;
+                       discard_page = page;
                }
        }
 
        if (n)
                spin_unlock(&n->list_lock);
+
+       while (discard_page) {
+               page = discard_page;
+               discard_page = discard_page->next;
+
+               stat(s, DEACTIVATE_EMPTY);
+               discard_slab(s, page);
+               stat(s, FREE_SLAB);
+       }
 }
 
 /*
@@ -1969,7 +1978,7 @@ int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
                page->pobjects = pobjects;
                page->next = oldpage;
 
-       } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage);
+       } while (irqsafe_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage);
        stat(s, CPU_PARTIAL_FREE);
        return pobjects;
 }
@@ -4435,30 +4444,31 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
 
                for_each_possible_cpu(cpu) {
                        struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
+                       int node = ACCESS_ONCE(c->node);
                        struct page *page;
 
-                       if (!c || c->node < 0)
+                       if (node < 0)
                                continue;
-
-                       if (c->page) {
-                                       if (flags & SO_TOTAL)
-                                               x = c->page->objects;
+                       page = ACCESS_ONCE(c->page);
+                       if (page) {
+                               if (flags & SO_TOTAL)
+                                       x = page->objects;
                                else if (flags & SO_OBJECTS)
-                                       x = c->page->inuse;
+                                       x = page->inuse;
                                else
                                        x = 1;
 
                                total += x;
-                               nodes[c->node] += x;
+                               nodes[node] += x;
                        }
                        page = c->partial;
 
                        if (page) {
                                x = page->pobjects;
-                                total += x;
-                                nodes[c->node] += x;
+                               total += x;
+                               nodes[node] += x;
                        }
-                       per_cpu[c->node]++;
+                       per_cpu[node]++;
                }
        }
 
index 3231bf3328781b023c9e3b0b772aa1685c940d31..1d8b32f0713977ad2e3aeb6ba3d2554bb1389b53 100644 (file)
@@ -1633,6 +1633,8 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
                goto fail;
 
        addr = __vmalloc_area_node(area, gfp_mask, prot, node, caller);
+       if (!addr)
+               return NULL;
 
        /*
         * In this function, newly allocated vm_struct is not added
index a1893c050795b6e92f36a71cd0f5dd985f334a2c..f54a05b7a61d9eb658562b191996beb9d4cea397 100644 (file)
@@ -183,7 +183,7 @@ static unsigned long zone_nr_lru_pages(struct zone *zone,
  */
 void register_shrinker(struct shrinker *shrinker)
 {
-       shrinker->nr = 0;
+       atomic_long_set(&shrinker->nr_in_batch, 0);
        down_write(&shrinker_rwsem);
        list_add_tail(&shrinker->list, &shrinker_list);
        up_write(&shrinker_rwsem);
@@ -247,25 +247,26 @@ unsigned long shrink_slab(struct shrink_control *shrink,
 
        list_for_each_entry(shrinker, &shrinker_list, list) {
                unsigned long long delta;
-               unsigned long total_scan;
-               unsigned long max_pass;
+               long total_scan;
+               long max_pass;
                int shrink_ret = 0;
                long nr;
                long new_nr;
                long batch_size = shrinker->batch ? shrinker->batch
                                                  : SHRINK_BATCH;
 
+               max_pass = do_shrinker_shrink(shrinker, shrink, 0);
+               if (max_pass <= 0)
+                       continue;
+
                /*
                 * copy the current shrinker scan count into a local variable
                 * and zero it so that other concurrent shrinker invocations
                 * don't also do this scanning work.
                 */
-               do {
-                       nr = shrinker->nr;
-               } while (cmpxchg(&shrinker->nr, nr, 0) != nr);
+               nr = atomic_long_xchg(&shrinker->nr_in_batch, 0);
 
                total_scan = nr;
-               max_pass = do_shrinker_shrink(shrinker, shrink, 0);
                delta = (4 * nr_pages_scanned) / shrinker->seeks;
                delta *= max_pass;
                do_div(delta, lru_pages + 1);
@@ -325,12 +326,11 @@ unsigned long shrink_slab(struct shrink_control *shrink,
                 * manner that handles concurrent updates. If we exhausted the
                 * scan, there is no need to do an update.
                 */
-               do {
-                       nr = shrinker->nr;
-                       new_nr = total_scan + nr;
-                       if (total_scan <= 0)
-                               break;
-               } while (cmpxchg(&shrinker->nr, nr, new_nr) != nr);
+               if (total_scan > 0)
+                       new_nr = atomic_long_add_return(total_scan,
+                                       &shrinker->nr_in_batch);
+               else
+                       new_nr = atomic_long_read(&shrinker->nr_in_batch);
 
                trace_mm_shrink_slab_end(shrinker, shrink_ret, nr, new_nr);
        }
index e5f9ece3c9a0f9637c8ad98382b88705b27e1d30..a1daf8227ed11c1a1853a8fb5a246919e9deb5f5 100644 (file)
@@ -18,6 +18,7 @@
 #include <net/sock.h>
 
 #include "br_private.h"
+#include "br_private_stp.h"
 
 static inline size_t br_nlmsg_size(void)
 {
@@ -188,6 +189,11 @@ static int br_rtm_setlink(struct sk_buff *skb,  struct nlmsghdr *nlh, void *arg)
 
        p->state = new_state;
        br_log_state(p);
+
+       spin_lock_bh(&p->br->lock);
+       br_port_state_selection(p->br);
+       spin_unlock_bh(&p->br->lock);
+
        br_ifinfo_notify(RTM_NEWLINK, p);
 
        return 0;
index ad0a3f7cf6cc73081ca600b241060ce5ef867e44..dd147d78a5889ab6c2139712199c3672f9e087c6 100644 (file)
@@ -399,25 +399,24 @@ void br_port_state_selection(struct net_bridge *br)
        struct net_bridge_port *p;
        unsigned int liveports = 0;
 
-       /* Don't change port states if userspace is handling STP */
-       if (br->stp_enabled == BR_USER_STP)
-               return;
-
        list_for_each_entry(p, &br->port_list, list) {
                if (p->state == BR_STATE_DISABLED)
                        continue;
 
-               if (p->port_no == br->root_port) {
-                       p->config_pending = 0;
-                       p->topology_change_ack = 0;
-                       br_make_forwarding(p);
-               } else if (br_is_designated_port(p)) {
-                       del_timer(&p->message_age_timer);
-                       br_make_forwarding(p);
-               } else {
-                       p->config_pending = 0;
-                       p->topology_change_ack = 0;
-                       br_make_blocking(p);
+               /* Don't change port states if userspace is handling STP */
+               if (br->stp_enabled != BR_USER_STP) {
+                       if (p->port_no == br->root_port) {
+                               p->config_pending = 0;
+                               p->topology_change_ack = 0;
+                               br_make_forwarding(p);
+                       } else if (br_is_designated_port(p)) {
+                               del_timer(&p->message_age_timer);
+                               br_make_forwarding(p);
+                       } else {
+                               p->config_pending = 0;
+                               p->topology_change_ack = 0;
+                               br_make_blocking(p);
+                       }
                }
 
                if (p->state == BR_STATE_FORWARDING)
index f39921171d0d94e51e1270891a6703be598d01e9..d3ca87bf23b7ff952a428b367f6ad525382e6740 100644 (file)
@@ -136,20 +136,21 @@ static int cffrml_receive(struct cflayer *layr, struct cfpkt *pkt)
 
 static int cffrml_transmit(struct cflayer *layr, struct cfpkt *pkt)
 {
-       int tmp;
        u16 chks;
        u16 len;
+       __le16 data;
+
        struct cffrml *this = container_obj(layr);
        if (this->dofcs) {
                chks = cfpkt_iterate(pkt, cffrml_checksum, 0xffff);
-               tmp = cpu_to_le16(chks);
-               cfpkt_add_trail(pkt, &tmp, 2);
+               data = cpu_to_le16(chks);
+               cfpkt_add_trail(pkt, &data, 2);
        } else {
                cfpkt_pad_trail(pkt, 2);
        }
        len = cfpkt_getlen(pkt);
-       tmp = cpu_to_le16(len);
-       cfpkt_add_head(pkt, &tmp, 2);
+       data = cpu_to_le16(len);
+       cfpkt_add_head(pkt, &data, 2);
        cfpkt_info(pkt)->hdr_len += 2;
        if (cfpkt_erroneous(pkt)) {
                pr_err("Packet is erroneous!\n");
index 6ba50a1e404c4bac04cc7d56718865d5a1749c2f..5a13edfc9f73411f688920b51400489eb3e89ee2 100644 (file)
@@ -1396,7 +1396,7 @@ rollback:
        for_each_net(net) {
                for_each_netdev(net, dev) {
                        if (dev == last)
-                               break;
+                               goto outroll;
 
                        if (dev->flags & IFF_UP) {
                                nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
@@ -1407,6 +1407,7 @@ rollback:
                }
        }
 
+outroll:
        raw_notifier_chain_unregister(&netdev_chain, nb);
        goto unlock;
 }
@@ -4282,6 +4283,12 @@ static int dev_seq_open(struct inode *inode, struct file *file)
                            sizeof(struct dev_iter_state));
 }
 
+int dev_seq_open_ops(struct inode *inode, struct file *file,
+                    const struct seq_operations *ops)
+{
+       return seq_open_net(inode, file, ops, sizeof(struct dev_iter_state));
+}
+
 static const struct file_operations dev_seq_fops = {
        .owner   = THIS_MODULE,
        .open    = dev_seq_open,
index 277faef9148d0053bc6987ae67e73571d49a4d99..febba516db6274c83d56577e7dc4d7fd61a15b72 100644 (file)
@@ -696,8 +696,7 @@ static const struct seq_operations dev_mc_seq_ops = {
 
 static int dev_mc_seq_open(struct inode *inode, struct file *file)
 {
-       return seq_open_net(inode, file, &dev_mc_seq_ops,
-                           sizeof(struct seq_net_private));
+       return dev_seq_open_ops(inode, file, &dev_mc_seq_ops);
 }
 
 static const struct file_operations dev_mc_seq_fops = {
index 039d51e6c284e7ab655319d399b9d40060357dcf..5ac07d31fbc9e395abf71ae46c897b5db3c7d6f8 100644 (file)
@@ -2397,7 +2397,10 @@ static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
        struct net *net = seq_file_net(seq);
        struct neigh_table *tbl = state->tbl;
 
-       pn = pn->next;
+       do {
+               pn = pn->next;
+       } while (pn && !net_eq(pneigh_net(pn), net));
+
        while (!pn) {
                if (++state->bucket > PNEIGH_HASHMASK)
                        break;
index 182236b2510aeb16e6e0e2026264d683edf93e7e..9b570a6a33c5d8c52d777e160742dc31ec350c16 100644 (file)
  * but then some measure against one socket starving all other sockets
  * would be needed.
  *
- * It was 128 by default. Experiments with real servers show, that
+ * The minimum value of it is 128. Experiments with real servers show that
  * it is absolutely not enough even at 100conn/sec. 256 cures most
- * of problems. This value is adjusted to 128 for very small machines
- * (<=32Mb of memory) and to 1024 on normal or better ones (>=256Mb).
+ * of problems.
+ * This value is adjusted to 128 for low memory machines,
+ * and it will increase in proportion to the memory of machine.
  * Note : Dont forget somaxconn that may limit backlog too.
  */
 int sysctl_max_syn_backlog = 256;
index 025233de25f969cb67e075d6ea5c4a9bba746bd9..925991ae6f52e67e2221f1ed7490a4583bf26b9d 100644 (file)
@@ -19,6 +19,7 @@ static int __init net_secret_init(void)
 }
 late_initcall(net_secret_init);
 
+#ifdef CONFIG_INET
 static u32 seq_scale(u32 seq)
 {
        /*
@@ -33,6 +34,7 @@ static u32 seq_scale(u32 seq)
         */
        return seq + (ktime_to_ns(ktime_get_real()) >> 6);
 }
+#endif
 
 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
 __u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr,
index 18a3cebb753d39d57b2962f991f32fb048d76c29..3c30ee4a57105a2746b3e1b2bfab68af8ef0c917 100644 (file)
@@ -2230,7 +2230,7 @@ static int skb_prepare_for_shift(struct sk_buff *skb)
  * @shiftlen: shift up to this many bytes
  *
  * Attempts to shift up to shiftlen worth of bytes, which may be less than
- * the length of the skb, from tgt to skb. Returns number bytes shifted.
+ * the length of the skb, from skb to tgt. Returns number bytes shifted.
  * It's up to caller to free skb if everything was shifted.
  *
  * If @tgt runs out of frags, the whole operation is aborted.
index 90a919afbed79ee5998f510badfaaa199a441086..3f4e5414c8e5200711839e8847437809047cb778 100644 (file)
@@ -111,6 +111,7 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
        rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
                               inet->inet_sport, inet->inet_dport, sk);
        if (IS_ERR(rt)) {
+               err = PTR_ERR(rt);
                rt = NULL;
                goto failure;
        }
index a77d16158eb6fa2eec5b1890b81ce12b92d88851..94f4ec036669cc152af1a9ff9639889016235228 100644 (file)
@@ -112,7 +112,7 @@ static unsigned long dn_rt_deadline;
 static int dn_dst_gc(struct dst_ops *ops);
 static struct dst_entry *dn_dst_check(struct dst_entry *, __u32);
 static unsigned int dn_dst_default_advmss(const struct dst_entry *dst);
-static unsigned int dn_dst_default_mtu(const struct dst_entry *dst);
+static unsigned int dn_dst_mtu(const struct dst_entry *dst);
 static void dn_dst_destroy(struct dst_entry *);
 static struct dst_entry *dn_dst_negative_advice(struct dst_entry *);
 static void dn_dst_link_failure(struct sk_buff *);
@@ -135,7 +135,7 @@ static struct dst_ops dn_dst_ops = {
        .gc =                   dn_dst_gc,
        .check =                dn_dst_check,
        .default_advmss =       dn_dst_default_advmss,
-       .default_mtu =          dn_dst_default_mtu,
+       .mtu =                  dn_dst_mtu,
        .cow_metrics =          dst_cow_metrics_generic,
        .destroy =              dn_dst_destroy,
        .negative_advice =      dn_dst_negative_advice,
@@ -825,9 +825,11 @@ static unsigned int dn_dst_default_advmss(const struct dst_entry *dst)
        return dn_mss_from_pmtu(dst->dev, dst_mtu(dst));
 }
 
-static unsigned int dn_dst_default_mtu(const struct dst_entry *dst)
+static unsigned int dn_dst_mtu(const struct dst_entry *dst)
 {
-       return dst->dev->mtu;
+       unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
+
+       return mtu ? : dst->dev->mtu;
 }
 
 static struct neighbour *dn_dst_neigh_lookup(const struct dst_entry *dst, const void *daddr)
index 67f691bd4acfee77b2b3402e2e9e4e5092bab8c0..d9c150cc59a952ac86585b6c600acc398b80bc14 100644 (file)
@@ -36,16 +36,13 @@ static void dn_slow_timer(unsigned long arg);
 
 void dn_start_slow_timer(struct sock *sk)
 {
-       sk->sk_timer.expires    = jiffies + SLOW_INTERVAL;
-       sk->sk_timer.function   = dn_slow_timer;
-       sk->sk_timer.data       = (unsigned long)sk;
-
-       add_timer(&sk->sk_timer);
+       setup_timer(&sk->sk_timer, dn_slow_timer, (unsigned long)sk);
+       sk_reset_timer(sk, &sk->sk_timer, jiffies + SLOW_INTERVAL);
 }
 
 void dn_stop_slow_timer(struct sock *sk)
 {
-       del_timer(&sk->sk_timer);
+       sk_stop_timer(sk, &sk->sk_timer);
 }
 
 static void dn_slow_timer(unsigned long arg)
@@ -53,12 +50,10 @@ static void dn_slow_timer(unsigned long arg)
        struct sock *sk = (struct sock *)arg;
        struct dn_scp *scp = DN_SK(sk);
 
-       sock_hold(sk);
        bh_lock_sock(sk);
 
        if (sock_owned_by_user(sk)) {
-               sk->sk_timer.expires = jiffies + HZ / 10;
-               add_timer(&sk->sk_timer);
+               sk_reset_timer(sk, &sk->sk_timer, jiffies + HZ / 10);
                goto out;
        }
 
@@ -100,9 +95,7 @@ static void dn_slow_timer(unsigned long arg)
                        scp->keepalive_fxn(sk);
        }
 
-       sk->sk_timer.expires = jiffies + SLOW_INTERVAL;
-
-       add_timer(&sk->sk_timer);
+       sk_reset_timer(sk, &sk->sk_timer, jiffies + SLOW_INTERVAL);
 out:
        bh_unlock_sock(sk);
        sock_put(sk);
index c6b5092f29a15511bb7c69f37a86dca2c538a164..65f01dc47565bcc26282d4472bbd9c3fb2d3712c 100644 (file)
@@ -1490,7 +1490,9 @@ static int devinet_conf_proc(ctl_table *ctl, int write,
                             void __user *buffer,
                             size_t *lenp, loff_t *ppos)
 {
+       int old_value = *(int *)ctl->data;
        int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
+       int new_value = *(int *)ctl->data;
 
        if (write) {
                struct ipv4_devconf *cnf = ctl->extra1;
@@ -1501,6 +1503,9 @@ static int devinet_conf_proc(ctl_table *ctl, int write,
 
                if (cnf == net->ipv4.devconf_dflt)
                        devinet_copy_dflt_conf(net, i);
+               if (i == IPV4_DEVCONF_ACCEPT_LOCAL - 1)
+                       if ((new_value == 0) && (old_value != 0))
+                               rt_cache_flush(net, 0);
        }
 
        return ret;
index c7472eff2d514b475579d1a3e5a89269d04fce0e..b2ca095cb9dab36a4eb6e8da7308c4e55a6437cc 100644 (file)
@@ -1716,7 +1716,8 @@ static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
        if (err) {
                int j;
 
-               pmc->sfcount[sfmode]--;
+               if (!delta)
+                       pmc->sfcount[sfmode]--;
                for (j=0; j<i; j++)
                        (void) ip_mc_del1_src(pmc, sfmode, &psfsrc[j]);
        } else if (isexclude != (pmc->sfcount[MCAST_EXCLUDE] != 0)) {
index 68e8ac5143834b80e09cc4bbbbdaecf05a3a2710..ccee270a9b6587efe63e74abcd298bae34fea779 100644 (file)
@@ -108,9 +108,6 @@ static int inet_csk_diag_fill(struct sock *sk,
                       icsk->icsk_ca_ops->name);
        }
 
-       if ((ext & (1 << (INET_DIAG_TOS - 1))) && (sk->sk_family != AF_INET6))
-               RTA_PUT_U8(skb, INET_DIAG_TOS, inet->tos);
-
        r->idiag_family = sk->sk_family;
        r->idiag_state = sk->sk_state;
        r->idiag_timer = 0;
@@ -125,16 +122,23 @@ static int inet_csk_diag_fill(struct sock *sk,
        r->id.idiag_src[0] = inet->inet_rcv_saddr;
        r->id.idiag_dst[0] = inet->inet_daddr;
 
+       /* IPv6 dual-stack sockets use inet->tos for IPv4 connections,
+        * hence this needs to be included regardless of socket family.
+        */
+       if (ext & (1 << (INET_DIAG_TOS - 1)))
+               RTA_PUT_U8(skb, INET_DIAG_TOS, inet->tos);
+
 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
        if (r->idiag_family == AF_INET6) {
                const struct ipv6_pinfo *np = inet6_sk(sk);
 
+               if (ext & (1 << (INET_DIAG_TCLASS - 1)))
+                       RTA_PUT_U8(skb, INET_DIAG_TCLASS, np->tclass);
+
                ipv6_addr_copy((struct in6_addr *)r->id.idiag_src,
                               &np->rcv_saddr);
                ipv6_addr_copy((struct in6_addr *)r->id.idiag_dst,
                               &np->daddr);
-               if (ext & (1 << (INET_DIAG_TCLASS - 1)))
-                       RTA_PUT_U8(skb, INET_DIAG_TCLASS, np->tclass);
        }
 #endif
 
index 3b34d1c862709e7bde3cd3665fad1e191692506d..29a07b6c7168f7369b13e25d8c96011c6118ec56 100644 (file)
@@ -84,7 +84,7 @@ int ip_forward(struct sk_buff *skb)
 
        rt = skb_rtable(skb);
 
-       if (opt->is_strictroute && ip_hdr(skb)->daddr != rt->rt_gateway)
+       if (opt->is_strictroute && opt->nexthop != rt->rt_gateway)
                goto sr_failed;
 
        if (unlikely(skb->len > dst_mtu(&rt->dst) && !skb_is_gso(skb) &&
index 05d20cca9d66efd2500268a3d0ba4a31117739e7..1e60f7679075b660f489bcd1a9f462c832c5e77e 100644 (file)
@@ -568,12 +568,13 @@ void ip_forward_options(struct sk_buff *skb)
                     ) {
                        if (srrptr + 3 > srrspace)
                                break;
-                       if (memcmp(&ip_hdr(skb)->daddr, &optptr[srrptr-1], 4) == 0)
+                       if (memcmp(&opt->nexthop, &optptr[srrptr-1], 4) == 0)
                                break;
                }
                if (srrptr + 3 <= srrspace) {
                        opt->is_changed = 1;
                        ip_rt_get_source(&optptr[srrptr-1], skb, rt);
+                       ip_hdr(skb)->daddr = opt->nexthop;
                        optptr[2] = srrptr+4;
                } else if (net_ratelimit())
                        printk(KERN_CRIT "ip_forward(): Argh! Destination lost!\n");
@@ -640,7 +641,7 @@ int ip_options_rcv_srr(struct sk_buff *skb)
        }
        if (srrptr <= srrspace) {
                opt->srr_is_hit = 1;
-               iph->daddr = nexthop;
+               opt->nexthop = nexthop;
                opt->is_changed = 1;
        }
        return 0;
index 9899619ab9b8db0f9d8d02c8005c0e6bb01fda94..4f47e064e262c2f24e7cb13eacfcebff0fad86a3 100644 (file)
@@ -64,7 +64,8 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type)
        /* Change in oif may mean change in hh_len. */
        hh_len = skb_dst(skb)->dev->hard_header_len;
        if (skb_headroom(skb) < hh_len &&
-           pskb_expand_head(skb, hh_len - skb_headroom(skb), 0, GFP_ATOMIC))
+           pskb_expand_head(skb, HH_DATA_ALIGN(hh_len - skb_headroom(skb)),
+                               0, GFP_ATOMIC))
                return -1;
 
        return 0;
index 1dfc18a03fd4219fe9dd419011877d0a7ce3dfa7..f19f2182894c1c1549d27eb79cb409914e5b5bf0 100644 (file)
@@ -325,7 +325,6 @@ config IP_NF_TARGET_TTL
 # raw + specific targets
 config IP_NF_RAW
        tristate  'raw table support (required for NOTRACK/TRACE)'
-       depends on NETFILTER_ADVANCED
        help
          This option adds a `raw' table to iptables. This table is the very
          first in the netfilter framework and hooks in at the PREROUTING
index 0c74da8a04732ce9702e9c1c8d80496f1c8df423..46af62363b8c1e9ef452b6d750886f21e56f08b1 100644 (file)
 #include <net/secure_seq.h>
 
 #define RT_FL_TOS(oldflp4) \
-    ((u32)(oldflp4->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK)))
+       ((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
 
 #define IP_MAX_MTU     0xFFF0
 
@@ -131,6 +131,7 @@ static int ip_rt_mtu_expires __read_mostly  = 10 * 60 * HZ;
 static int ip_rt_min_pmtu __read_mostly                = 512 + 20 + 20;
 static int ip_rt_min_advmss __read_mostly      = 256;
 static int rt_chain_length_max __read_mostly   = 20;
+static int redirect_genid;
 
 /*
  *     Interface to generic destination cache.
@@ -138,7 +139,7 @@ static int rt_chain_length_max __read_mostly        = 20;
 
 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
 static unsigned int     ipv4_default_advmss(const struct dst_entry *dst);
-static unsigned int     ipv4_default_mtu(const struct dst_entry *dst);
+static unsigned int     ipv4_mtu(const struct dst_entry *dst);
 static void             ipv4_dst_destroy(struct dst_entry *dst);
 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
 static void             ipv4_link_failure(struct sk_buff *skb);
@@ -193,7 +194,7 @@ static struct dst_ops ipv4_dst_ops = {
        .gc =                   rt_garbage_collect,
        .check =                ipv4_dst_check,
        .default_advmss =       ipv4_default_advmss,
-       .default_mtu =          ipv4_default_mtu,
+       .mtu =                  ipv4_mtu,
        .cow_metrics =          ipv4_cow_metrics,
        .destroy =              ipv4_dst_destroy,
        .ifdown =               ipv4_dst_ifdown,
@@ -416,9 +417,13 @@ static int rt_cache_seq_show(struct seq_file *seq, void *v)
        else {
                struct rtable *r = v;
                struct neighbour *n;
-               int len;
+               int len, HHUptod;
 
+               rcu_read_lock();
                n = dst_get_neighbour(&r->dst);
+               HHUptod = (n && (n->nud_state & NUD_CONNECTED)) ? 1 : 0;
+               rcu_read_unlock();
+
                seq_printf(seq, "%s\t%08X\t%08X\t%8X\t%d\t%u\t%d\t"
                              "%08X\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n",
                        r->dst.dev ? r->dst.dev->name : "*",
@@ -432,7 +437,7 @@ static int rt_cache_seq_show(struct seq_file *seq, void *v)
                              dst_metric(&r->dst, RTAX_RTTVAR)),
                        r->rt_key_tos,
                        -1,
-                       (n && (n->nud_state & NUD_CONNECTED)) ? 1 : 0,
+                       HHUptod,
                        r->rt_spec_dst, &len);
 
                seq_printf(seq, "%*s\n", 127 - len, "");
@@ -837,6 +842,7 @@ static void rt_cache_invalidate(struct net *net)
 
        get_random_bytes(&shuffle, sizeof(shuffle));
        atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
+       redirect_genid++;
 }
 
 /*
@@ -1304,7 +1310,7 @@ static void rt_del(unsigned hash, struct rtable *rt)
        spin_unlock_bh(rt_hash_lock_addr(hash));
 }
 
-static int check_peer_redir(struct dst_entry *dst, struct inet_peer *peer)
+static void check_peer_redir(struct dst_entry *dst, struct inet_peer *peer)
 {
        struct rtable *rt = (struct rtable *) dst;
        __be32 orig_gw = rt->rt_gateway;
@@ -1315,21 +1321,19 @@ static int check_peer_redir(struct dst_entry *dst, struct inet_peer *peer)
        rt->rt_gateway = peer->redirect_learned.a4;
 
        n = ipv4_neigh_lookup(&rt->dst, &rt->rt_gateway);
-       if (IS_ERR(n))
-               return PTR_ERR(n);
+       if (IS_ERR(n)) {
+               rt->rt_gateway = orig_gw;
+               return;
+       }
        old_n = xchg(&rt->dst._neighbour, n);
        if (old_n)
                neigh_release(old_n);
-       if (!n || !(n->nud_state & NUD_VALID)) {
-               if (n)
-                       neigh_event_send(n, NULL);
-               rt->rt_gateway = orig_gw;
-               return -EAGAIN;
+       if (!(n->nud_state & NUD_VALID)) {
+               neigh_event_send(n, NULL);
        } else {
                rt->rt_flags |= RTCF_REDIRECTED;
                call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n);
        }
-       return 0;
 }
 
 /* called in rcu_read_lock() section */
@@ -1391,8 +1395,10 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
 
                                peer = rt->peer;
                                if (peer) {
-                                       if (peer->redirect_learned.a4 != new_gw) {
+                                       if (peer->redirect_learned.a4 != new_gw ||
+                                           peer->redirect_genid != redirect_genid) {
                                                peer->redirect_learned.a4 = new_gw;
+                                               peer->redirect_genid = redirect_genid;
                                                atomic_inc(&__rt_peer_genid);
                                        }
                                        check_peer_redir(&rt->dst, peer);
@@ -1685,12 +1691,8 @@ static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
 }
 
 
-static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
+static void ipv4_validate_peer(struct rtable *rt)
 {
-       struct rtable *rt = (struct rtable *) dst;
-
-       if (rt_is_expired(rt))
-               return NULL;
        if (rt->rt_peer_genid != rt_peer_genid()) {
                struct inet_peer *peer;
 
@@ -1699,17 +1701,26 @@ static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
 
                peer = rt->peer;
                if (peer) {
-                       check_peer_pmtu(dst, peer);
+                       check_peer_pmtu(&rt->dst, peer);
 
+                       if (peer->redirect_genid != redirect_genid)
+                               peer->redirect_learned.a4 = 0;
                        if (peer->redirect_learned.a4 &&
-                           peer->redirect_learned.a4 != rt->rt_gateway) {
-                               if (check_peer_redir(dst, peer))
-                                       return NULL;
-                       }
+                           peer->redirect_learned.a4 != rt->rt_gateway)
+                               check_peer_redir(&rt->dst, peer);
                }
 
                rt->rt_peer_genid = rt_peer_genid();
        }
+}
+
+static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
+{
+       struct rtable *rt = (struct rtable *) dst;
+
+       if (rt_is_expired(rt))
+               return NULL;
+       ipv4_validate_peer(rt);
        return dst;
 }
 
@@ -1814,12 +1825,17 @@ static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
        return advmss;
 }
 
-static unsigned int ipv4_default_mtu(const struct dst_entry *dst)
+static unsigned int ipv4_mtu(const struct dst_entry *dst)
 {
-       unsigned int mtu = dst->dev->mtu;
+       const struct rtable *rt = (const struct rtable *) dst;
+       unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
+
+       if (mtu && rt_is_output_route(rt))
+               return mtu;
+
+       mtu = dst->dev->mtu;
 
        if (unlikely(dst_metric_locked(dst, RTAX_MTU))) {
-               const struct rtable *rt = (const struct rtable *) dst;
 
                if (rt->rt_gateway != rt->rt_dst && mtu > 576)
                        mtu = 576;
@@ -1852,6 +1868,8 @@ static void rt_init_metrics(struct rtable *rt, const struct flowi4 *fl4,
                dst_init_metrics(&rt->dst, peer->metrics, false);
 
                check_peer_pmtu(&rt->dst, peer);
+               if (peer->redirect_genid != redirect_genid)
+                       peer->redirect_learned.a4 = 0;
                if (peer->redirect_learned.a4 &&
                    peer->redirect_learned.a4 != rt->rt_gateway) {
                        rt->rt_gateway = peer->redirect_learned.a4;
@@ -2357,6 +2375,7 @@ int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr,
                    rth->rt_mark == skb->mark &&
                    net_eq(dev_net(rth->dst.dev), net) &&
                    !rt_is_expired(rth)) {
+                       ipv4_validate_peer(rth);
                        if (noref) {
                                dst_use_noref(&rth->dst, jiffies);
                                skb_dst_set_noref(skb, &rth->dst);
@@ -2415,11 +2434,11 @@ EXPORT_SYMBOL(ip_route_input_common);
 static struct rtable *__mkroute_output(const struct fib_result *res,
                                       const struct flowi4 *fl4,
                                       __be32 orig_daddr, __be32 orig_saddr,
-                                      int orig_oif, struct net_device *dev_out,
+                                      int orig_oif, __u8 orig_rtos,
+                                      struct net_device *dev_out,
                                       unsigned int flags)
 {
        struct fib_info *fi = res->fi;
-       u32 tos = RT_FL_TOS(fl4);
        struct in_device *in_dev;
        u16 type = res->type;
        struct rtable *rth;
@@ -2470,7 +2489,7 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
        rth->rt_genid = rt_genid(dev_net(dev_out));
        rth->rt_flags   = flags;
        rth->rt_type    = type;
-       rth->rt_key_tos = tos;
+       rth->rt_key_tos = orig_rtos;
        rth->rt_dst     = fl4->daddr;
        rth->rt_src     = fl4->saddr;
        rth->rt_route_iif = 0;
@@ -2520,7 +2539,7 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
 static struct rtable *ip_route_output_slow(struct net *net, struct flowi4 *fl4)
 {
        struct net_device *dev_out = NULL;
-       u32 tos = RT_FL_TOS(fl4);
+       __u8 tos = RT_FL_TOS(fl4);
        unsigned int flags = 0;
        struct fib_result res;
        struct rtable *rth;
@@ -2696,7 +2715,7 @@ static struct rtable *ip_route_output_slow(struct net *net, struct flowi4 *fl4)
 
 make_route:
        rth = __mkroute_output(&res, fl4, orig_daddr, orig_saddr, orig_oif,
-                              dev_out, flags);
+                              tos, dev_out, flags);
        if (!IS_ERR(rth)) {
                unsigned int hash;
 
@@ -2732,6 +2751,7 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *flp4)
                            (IPTOS_RT_MASK | RTO_ONLINK)) &&
                    net_eq(dev_net(rth->dst.dev), net) &&
                    !rt_is_expired(rth)) {
+                       ipv4_validate_peer(rth);
                        dst_use(&rth->dst, jiffies);
                        RT_CACHE_STAT_INC(out_hit);
                        rcu_read_unlock_bh();
@@ -2755,9 +2775,11 @@ static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 coo
        return NULL;
 }
 
-static unsigned int ipv4_blackhole_default_mtu(const struct dst_entry *dst)
+static unsigned int ipv4_blackhole_mtu(const struct dst_entry *dst)
 {
-       return 0;
+       unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
+
+       return mtu ? : dst->dev->mtu;
 }
 
 static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
@@ -2775,7 +2797,7 @@ static struct dst_ops ipv4_dst_blackhole_ops = {
        .protocol               =       cpu_to_be16(ETH_P_IP),
        .destroy                =       ipv4_dst_destroy,
        .check                  =       ipv4_blackhole_dst_check,
-       .default_mtu            =       ipv4_blackhole_default_mtu,
+       .mtu                    =       ipv4_blackhole_mtu,
        .default_advmss         =       ipv4_default_advmss,
        .update_pmtu            =       ipv4_rt_blackhole_update_pmtu,
        .cow_metrics            =       ipv4_rt_blackhole_cow_metrics,
index ab0966df1e2a8aec9e4ecb40e77332cbebbde466..5a65eeac1d29b8f4035314fb1db1dc403af1a428 100644 (file)
@@ -1164,7 +1164,7 @@ int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
        struct inet_sock *inet = inet_sk(sk);
        struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
        struct sk_buff *skb;
-       unsigned int ulen;
+       unsigned int ulen, copied;
        int peeked;
        int err;
        int is_udplite = IS_UDPLITE(sk);
@@ -1186,9 +1186,10 @@ try_again:
                goto out;
 
        ulen = skb->len - sizeof(struct udphdr);
-       if (len > ulen)
-               len = ulen;
-       else if (len < ulen)
+       copied = len;
+       if (copied > ulen)
+               copied = ulen;
+       else if (copied < ulen)
                msg->msg_flags |= MSG_TRUNC;
 
        /*
@@ -1197,14 +1198,14 @@ try_again:
         * coverage checksum (UDP-Lite), do it before the copy.
         */
 
-       if (len < ulen || UDP_SKB_CB(skb)->partial_cov) {
+       if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) {
                if (udp_lib_checksum_complete(skb))
                        goto csum_copy_err;
        }
 
        if (skb_csum_unnecessary(skb))
                err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
-                                             msg->msg_iov, len);
+                                             msg->msg_iov, copied);
        else {
                err = skb_copy_and_csum_datagram_iovec(skb,
                                                       sizeof(struct udphdr),
@@ -1233,7 +1234,7 @@ try_again:
        if (inet->cmsg_flags)
                ip_cmsg_recv(msg, skb);
 
-       err = len;
+       err = copied;
        if (flags & MSG_TRUNC)
                err = ulen;
 
index fee46d5a2f125f54451f5115dbddd050138af3a7..1567fb120392a0231dcf47f5c72ac950f952ae48 100644 (file)
@@ -85,7 +85,7 @@ struct dst_entry *inet6_csk_route_req(struct sock *sk,
  * request_sock (formerly open request) hash tables.
  */
 static u32 inet6_synq_hash(const struct in6_addr *raddr, const __be16 rport,
-                          const u32 rnd, const u16 synq_hsize)
+                          const u32 rnd, const u32 synq_hsize)
 {
        u32 c;
 
index c99e3ee9781f246f82185e8a0e246e02a258cf82..26cb08c84b7488d533eafcccf44b3c134b8b9b2d 100644 (file)
@@ -503,7 +503,7 @@ done:
                        goto e_inval;
                if (val > 255 || val < -1)
                        goto e_inval;
-               np->mcast_hops = val;
+               np->mcast_hops = (val == -1 ? IPV6_DEFAULT_MCASTHOPS : val);
                retv = 0;
                break;
 
index 44e5b7f2a6c1badcbf4dbb5ed2a844ae2daa199c..0cb78d7ddaf5f74237bd6f9e871ae255e9175072 100644 (file)
@@ -1571,7 +1571,7 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
        }
        if (!rt->rt6i_peer)
                rt6_bind_peer(rt, 1);
-       if (inet_peer_xrlim_allow(rt->rt6i_peer, 1*HZ))
+       if (!inet_peer_xrlim_allow(rt->rt6i_peer, 1*HZ))
                goto release;
 
        if (dev->addr_len) {
index 448464844a253474fed5624f75c68c9bb33cd98e..f792b34cbe9cb2710e072c33e4127b4c0d416210 100644 (file)
@@ -186,7 +186,6 @@ config IP6_NF_MANGLE
 
 config IP6_NF_RAW
        tristate  'raw table support (required for TRACE)'
-       depends on NETFILTER_ADVANCED
        help
          This option adds a `raw' table to ip6tables. This table is the very
          first in the netfilter framework and hooks in at the PREROUTING
index 8473016bba4a8cd6ae4dfc420c7827524fc3a84b..3399dd326287e1504b9c5bd72701143574dda4b4 100644 (file)
@@ -77,7 +77,7 @@ static struct rt6_info *ip6_rt_copy(const struct rt6_info *ort,
                                    const struct in6_addr *dest);
 static struct dst_entry        *ip6_dst_check(struct dst_entry *dst, u32 cookie);
 static unsigned int     ip6_default_advmss(const struct dst_entry *dst);
-static unsigned int     ip6_default_mtu(const struct dst_entry *dst);
+static unsigned int     ip6_mtu(const struct dst_entry *dst);
 static struct dst_entry *ip6_negative_advice(struct dst_entry *);
 static void            ip6_dst_destroy(struct dst_entry *);
 static void            ip6_dst_ifdown(struct dst_entry *,
@@ -144,7 +144,7 @@ static struct dst_ops ip6_dst_ops_template = {
        .gc_thresh              =       1024,
        .check                  =       ip6_dst_check,
        .default_advmss         =       ip6_default_advmss,
-       .default_mtu            =       ip6_default_mtu,
+       .mtu                    =       ip6_mtu,
        .cow_metrics            =       ipv6_cow_metrics,
        .destroy                =       ip6_dst_destroy,
        .ifdown                 =       ip6_dst_ifdown,
@@ -155,9 +155,11 @@ static struct dst_ops ip6_dst_ops_template = {
        .neigh_lookup           =       ip6_neigh_lookup,
 };
 
-static unsigned int ip6_blackhole_default_mtu(const struct dst_entry *dst)
+static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst)
 {
-       return 0;
+       unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
+
+       return mtu ? : dst->dev->mtu;
 }
 
 static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
@@ -175,7 +177,7 @@ static struct dst_ops ip6_dst_blackhole_ops = {
        .protocol               =       cpu_to_be16(ETH_P_IPV6),
        .destroy                =       ip6_dst_destroy,
        .check                  =       ip6_dst_check,
-       .default_mtu            =       ip6_blackhole_default_mtu,
+       .mtu                    =       ip6_blackhole_mtu,
        .default_advmss         =       ip6_default_advmss,
        .update_pmtu            =       ip6_rt_blackhole_update_pmtu,
        .cow_metrics            =       ip6_rt_blackhole_cow_metrics,
@@ -1041,10 +1043,15 @@ static unsigned int ip6_default_advmss(const struct dst_entry *dst)
        return mtu;
 }
 
-static unsigned int ip6_default_mtu(const struct dst_entry *dst)
+static unsigned int ip6_mtu(const struct dst_entry *dst)
 {
-       unsigned int mtu = IPV6_MIN_MTU;
        struct inet6_dev *idev;
+       unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
+
+       if (mtu)
+               return mtu;
+
+       mtu = IPV6_MIN_MTU;
 
        rcu_read_lock();
        idev = __in6_dev_get(dst->dev);
index 36131d122a6f3f9007776ff343a9197bd0430b0a..2dea4bb7b54a3381a7c50e60c2ade383eabe477b 100644 (file)
@@ -1255,6 +1255,13 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
        if (!want_cookie || tmp_opt.tstamp_ok)
                TCP_ECN_create_request(req, tcp_hdr(skb));
 
+       treq->iif = sk->sk_bound_dev_if;
+
+       /* So that link locals have meaning */
+       if (!sk->sk_bound_dev_if &&
+           ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
+               treq->iif = inet6_iif(skb);
+
        if (!isn) {
                struct inet_peer *peer = NULL;
 
@@ -1264,12 +1271,6 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
                        atomic_inc(&skb->users);
                        treq->pktopts = skb;
                }
-               treq->iif = sk->sk_bound_dev_if;
-
-               /* So that link locals have meaning */
-               if (!sk->sk_bound_dev_if &&
-                   ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
-                       treq->iif = inet6_iif(skb);
 
                if (want_cookie) {
                        isn = cookie_v6_init_sequence(sk, skb, &req->mss);
index 846f4757eb8d46394a604595be0698d485ae1ab0..8c25419151839cc2e7a8be6940de19d62a505690 100644 (file)
@@ -340,7 +340,7 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
        struct ipv6_pinfo *np = inet6_sk(sk);
        struct inet_sock *inet = inet_sk(sk);
        struct sk_buff *skb;
-       unsigned int ulen;
+       unsigned int ulen, copied;
        int peeked;
        int err;
        int is_udplite = IS_UDPLITE(sk);
@@ -363,9 +363,10 @@ try_again:
                goto out;
 
        ulen = skb->len - sizeof(struct udphdr);
-       if (len > ulen)
-               len = ulen;
-       else if (len < ulen)
+       copied = len;
+       if (copied > ulen)
+               copied = ulen;
+       else if (copied < ulen)
                msg->msg_flags |= MSG_TRUNC;
 
        is_udp4 = (skb->protocol == htons(ETH_P_IP));
@@ -376,14 +377,14 @@ try_again:
         * coverage checksum (UDP-Lite), do it before the copy.
         */
 
-       if (len < ulen || UDP_SKB_CB(skb)->partial_cov) {
+       if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) {
                if (udp_lib_checksum_complete(skb))
                        goto csum_copy_err;
        }
 
        if (skb_csum_unnecessary(skb))
                err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
-                                             msg->msg_iov,len);
+                                             msg->msg_iov, copied       );
        else {
                err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov);
                if (err == -EINVAL)
@@ -432,7 +433,7 @@ try_again:
                        datagram_recv_ctl(sk, msg, skb);
        }
 
-       err = len;
+       err = copied;
        if (flags & MSG_TRUNC)
                err = ulen;
 
index cf0f308abf5e7324aa05a40cfffd4eee166b0d51..89ff8c67943e8af26efe1bac80b306e297921ebd 100644 (file)
@@ -1072,7 +1072,7 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
 
        /* Get routing info from the tunnel socket */
        skb_dst_drop(skb);
-       skb_dst_set(skb, dst_clone(__sk_dst_get(sk)));
+       skb_dst_set(skb, dst_clone(__sk_dst_check(sk, 0)));
 
        inet = inet_sk(sk);
        fl = &inet->cork.fl;
index b3f65520e7a716312adc51d48d8fa9d8e2993c3d..b064e4df12c6d03a0534f5a8b026b9785befc190 100644 (file)
@@ -161,6 +161,12 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
                return -ENOENT;
        }
 
+       /* if we're already stopping ignore any new requests to stop */
+       if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
+               spin_unlock_bh(&sta->lock);
+               return -EALREADY;
+       }
+
        if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) {
                /* not even started yet! */
                ieee80211_assign_tid_tx(sta, tid, NULL);
@@ -169,6 +175,8 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
                return 0;
        }
 
+       set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state);
+
        spin_unlock_bh(&sta->lock);
 
 #ifdef CONFIG_MAC80211_HT_DEBUG
@@ -176,8 +184,6 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
               sta->sta.addr, tid);
 #endif /* CONFIG_MAC80211_HT_DEBUG */
 
-       set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state);
-
        del_timer_sync(&tid_tx->addba_resp_timer);
 
        /*
@@ -187,6 +193,20 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
         */
        clear_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state);
 
+       /*
+        * There might be a few packets being processed right now (on
+        * another CPU) that have already gotten past the aggregation
+        * check when it was still OPERATIONAL and consequently have
+        * IEEE80211_TX_CTL_AMPDU set. In that case, this code might
+        * call into the driver at the same time or even before the
+        * TX paths calls into it, which could confuse the driver.
+        *
+        * Wait for all currently running TX paths to finish before
+        * telling the driver. New packets will not go through since
+        * the aggregation session is no longer OPERATIONAL.
+        */
+       synchronize_net();
+
        tid_tx->stop_initiator = initiator;
        tid_tx->tx_stop = tx;
 
@@ -757,11 +777,27 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
                goto out;
        }
 
-       del_timer(&tid_tx->addba_resp_timer);
+       del_timer_sync(&tid_tx->addba_resp_timer);
 
 #ifdef CONFIG_MAC80211_HT_DEBUG
        printk(KERN_DEBUG "switched off addBA timer for tid %d\n", tid);
 #endif
+
+       /*
+        * addba_resp_timer may have fired before we got here, and
+        * caused WANT_STOP to be set. If the stop then was already
+        * processed further, STOPPING might be set.
+        */
+       if (test_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state) ||
+           test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
+#ifdef CONFIG_MAC80211_HT_DEBUG
+               printk(KERN_DEBUG
+                      "got addBA resp for tid %d but we already gave up\n",
+                      tid);
+#endif
+               goto out;
+       }
+
        /*
         * IEEE 802.11-2007 7.3.1.14:
         * In an ADDBA Response frame, when the Status Code field
index c5f341798c16769a1e5c5b7f48937e1a15d47f80..3110cbdc501b83d3650c978d26c51c2ef7c47568 100644 (file)
@@ -274,9 +274,9 @@ static ssize_t sta_ht_capa_read(struct file *file, char __user *userbuf,
 
                PRINT_HT_CAP((htc->cap & BIT(10)), "HT Delayed Block Ack");
 
-               PRINT_HT_CAP((htc->cap & BIT(11)), "Max AMSDU length: "
-                            "3839 bytes");
                PRINT_HT_CAP(!(htc->cap & BIT(11)), "Max AMSDU length: "
+                            "3839 bytes");
+               PRINT_HT_CAP((htc->cap & BIT(11)), "Max AMSDU length: "
                             "7935 bytes");
 
                /*
index d999bf3b84e1b27d0d79c91d44b7771756d92a3e..cae443563ec9d98a8230de9fae394b960fa66621 100644 (file)
@@ -757,6 +757,12 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
        if (!local->int_scan_req)
                return -ENOMEM;
 
+       for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+               if (!local->hw.wiphy->bands[band])
+                       continue;
+               local->int_scan_req->rates[band] = (u32) -1;
+       }
+
        /* if low-level driver supports AP, we also support VLAN */
        if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_AP)) {
                hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP_VLAN);
index 80de436eae20eed9aa58f2a99fc7366b4f840649..16518f386117ad8f35dc075bd7ee4f244ab298c4 100644 (file)
@@ -260,7 +260,7 @@ static void ieee80211_add_tx_radiotap_header(struct ieee80211_supported_band
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
        struct ieee80211_radiotap_header *rthdr;
        unsigned char *pos;
-       __le16 txflags;
+       u16 txflags;
 
        rthdr = (struct ieee80211_radiotap_header *) skb_push(skb, rtap_len);
 
@@ -290,13 +290,13 @@ static void ieee80211_add_tx_radiotap_header(struct ieee80211_supported_band
        txflags = 0;
        if (!(info->flags & IEEE80211_TX_STAT_ACK) &&
            !is_multicast_ether_addr(hdr->addr1))
-               txflags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_FAIL);
+               txflags |= IEEE80211_RADIOTAP_F_TX_FAIL;
 
        if ((info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) ||
            (info->status.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT))
-               txflags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_CTS);
+               txflags |= IEEE80211_RADIOTAP_F_TX_CTS;
        else if (info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
-               txflags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_RTS);
+               txflags |= IEEE80211_RADIOTAP_F_TX_RTS;
 
        put_unaligned_le16(txflags, pos);
        pos += 2;
index eca0fad09709518266d9aed37c61f75e2f05afcf..d5230ecc784d2702feb580da4d80e9ef1d3ec0bd 100644 (file)
@@ -1039,7 +1039,6 @@ int ieee80211_reconfig(struct ieee80211_local *local)
                                             struct ieee80211_sub_if_data,
                                             u.ap);
 
-                       memset(&sta->sta.drv_priv, 0, hw->sta_data_size);
                        WARN_ON(drv_sta_add(local, sdata, &sta->sta));
                }
        }
index 8260b13d93c926cf9964d7a4f4e225be0123a68c..d5597b759ba39724e7bc3fe5376b7b5bb02eb13c 100644 (file)
@@ -201,7 +201,6 @@ config NF_CONNTRACK_BROADCAST
 
 config NF_CONNTRACK_NETBIOS_NS
        tristate "NetBIOS name service protocol support"
-       depends on NETFILTER_ADVANCED
        select NF_CONNTRACK_BROADCAST
        help
          NetBIOS name service requests are sent as broadcast messages from an
@@ -542,7 +541,6 @@ config NETFILTER_XT_TARGET_NOTRACK
        tristate  '"NOTRACK" target support'
        depends on IP_NF_RAW || IP6_NF_RAW
        depends on NF_CONNTRACK
-       depends on NETFILTER_ADVANCED
        help
          The NOTRACK target allows a select rule to specify
          which packets *not* to enter the conntrack/NAT
index 6ee10f5d59bd8a4fde8ad54447c5552d11863303..37d667e3f6f82d82e442b66d4c38dd592cba06d3 100644 (file)
@@ -158,7 +158,7 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
        const struct ip_set_hash *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_ipport4_elem data = { };
-       u32 ip, ip_to, p = 0, port, port_to;
+       u32 ip, ip_to = 0, p = 0, port, port_to;
        u32 timeout = h->timeout;
        bool with_ports = false;
        int ret;
index fb90e344e90709f205bef4c2a61b3855be1c7f56..e69e2718fbe162343eaf97defd30153155ee2ef1 100644 (file)
@@ -162,7 +162,7 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
        const struct ip_set_hash *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_ipportip4_elem data = { };
-       u32 ip, ip_to, p = 0, port, port_to;
+       u32 ip, ip_to = 0, p = 0, port, port_to;
        u32 timeout = h->timeout;
        bool with_ports = false;
        int ret;
index deb3e3dfa5fcb13ba9f125f0a9dbcc7042d0735a..64199b4e93c952e24ca8c5508af1d3788b1286f8 100644 (file)
@@ -184,7 +184,7 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
        const struct ip_set_hash *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_ipportnet4_elem data = { .cidr = HOST_MASK };
-       u32 ip, ip_to, p = 0, port, port_to;
+       u32 ip, ip_to = 0, p = 0, port, port_to;
        u32 ip2_from = 0, ip2_to, ip2_last, ip2;
        u32 timeout = h->timeout;
        bool with_ports = false;
index 6b368be937c615610a7fa7b90d26b58a2fa041d5..b62c4148b92131444f6e132cb55a058991d68379 100644 (file)
 
 static DEFINE_MUTEX(nf_ct_ecache_mutex);
 
-struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb __read_mostly;
-EXPORT_SYMBOL_GPL(nf_conntrack_event_cb);
-
-struct nf_exp_event_notifier __rcu *nf_expect_event_cb __read_mostly;
-EXPORT_SYMBOL_GPL(nf_expect_event_cb);
-
 /* deliver cached events and clear cache entry - must be called with locally
  * disabled softirqs */
 void nf_ct_deliver_cached_events(struct nf_conn *ct)
 {
+       struct net *net = nf_ct_net(ct);
        unsigned long events;
        struct nf_ct_event_notifier *notify;
        struct nf_conntrack_ecache *e;
 
        rcu_read_lock();
-       notify = rcu_dereference(nf_conntrack_event_cb);
+       notify = rcu_dereference(net->ct.nf_conntrack_event_cb);
        if (notify == NULL)
                goto out_unlock;
 
@@ -83,19 +78,20 @@ out_unlock:
 }
 EXPORT_SYMBOL_GPL(nf_ct_deliver_cached_events);
 
-int nf_conntrack_register_notifier(struct nf_ct_event_notifier *new)
+int nf_conntrack_register_notifier(struct net *net,
+                                  struct nf_ct_event_notifier *new)
 {
        int ret = 0;
        struct nf_ct_event_notifier *notify;
 
        mutex_lock(&nf_ct_ecache_mutex);
-       notify = rcu_dereference_protected(nf_conntrack_event_cb,
+       notify = rcu_dereference_protected(net->ct.nf_conntrack_event_cb,
                                           lockdep_is_held(&nf_ct_ecache_mutex));
        if (notify != NULL) {
                ret = -EBUSY;
                goto out_unlock;
        }
-       RCU_INIT_POINTER(nf_conntrack_event_cb, new);
+       RCU_INIT_POINTER(net->ct.nf_conntrack_event_cb, new);
        mutex_unlock(&nf_ct_ecache_mutex);
        return ret;
 
@@ -105,32 +101,34 @@ out_unlock:
 }
 EXPORT_SYMBOL_GPL(nf_conntrack_register_notifier);
 
-void nf_conntrack_unregister_notifier(struct nf_ct_event_notifier *new)
+void nf_conntrack_unregister_notifier(struct net *net,
+                                     struct nf_ct_event_notifier *new)
 {
        struct nf_ct_event_notifier *notify;
 
        mutex_lock(&nf_ct_ecache_mutex);
-       notify = rcu_dereference_protected(nf_conntrack_event_cb,
+       notify = rcu_dereference_protected(net->ct.nf_conntrack_event_cb,
                                           lockdep_is_held(&nf_ct_ecache_mutex));
        BUG_ON(notify != new);
-       RCU_INIT_POINTER(nf_conntrack_event_cb, NULL);
+       RCU_INIT_POINTER(net->ct.nf_conntrack_event_cb, NULL);
        mutex_unlock(&nf_ct_ecache_mutex);
 }
 EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier);
 
-int nf_ct_expect_register_notifier(struct nf_exp_event_notifier *new)
+int nf_ct_expect_register_notifier(struct net *net,
+                                  struct nf_exp_event_notifier *new)
 {
        int ret = 0;
        struct nf_exp_event_notifier *notify;
 
        mutex_lock(&nf_ct_ecache_mutex);
-       notify = rcu_dereference_protected(nf_expect_event_cb,
+       notify = rcu_dereference_protected(net->ct.nf_expect_event_cb,
                                           lockdep_is_held(&nf_ct_ecache_mutex));
        if (notify != NULL) {
                ret = -EBUSY;
                goto out_unlock;
        }
-       RCU_INIT_POINTER(nf_expect_event_cb, new);
+       RCU_INIT_POINTER(net->ct.nf_expect_event_cb, new);
        mutex_unlock(&nf_ct_ecache_mutex);
        return ret;
 
@@ -140,15 +138,16 @@ out_unlock:
 }
 EXPORT_SYMBOL_GPL(nf_ct_expect_register_notifier);
 
-void nf_ct_expect_unregister_notifier(struct nf_exp_event_notifier *new)
+void nf_ct_expect_unregister_notifier(struct net *net,
+                                     struct nf_exp_event_notifier *new)
 {
        struct nf_exp_event_notifier *notify;
 
        mutex_lock(&nf_ct_ecache_mutex);
-       notify = rcu_dereference_protected(nf_expect_event_cb,
+       notify = rcu_dereference_protected(net->ct.nf_expect_event_cb,
                                           lockdep_is_held(&nf_ct_ecache_mutex));
        BUG_ON(notify != new);
-       RCU_INIT_POINTER(nf_expect_event_cb, NULL);
+       RCU_INIT_POINTER(net->ct.nf_expect_event_cb, NULL);
        mutex_unlock(&nf_ct_ecache_mutex);
 }
 EXPORT_SYMBOL_GPL(nf_ct_expect_unregister_notifier);
index e58aa9b1fe8a043226c4bc7ee90f5145a7adc316..ef21b221f0363a8700fbbc3aadd316423c4f9d66 100644 (file)
@@ -4,7 +4,7 @@
  * (C) 2001 by Jay Schulist <jschlst@samba.org>
  * (C) 2002-2006 by Harald Welte <laforge@gnumonks.org>
  * (C) 2003 by Patrick Mchardy <kaber@trash.net>
- * (C) 2005-2008 by Pablo Neira Ayuso <pablo@netfilter.org>
+ * (C) 2005-2011 by Pablo Neira Ayuso <pablo@netfilter.org>
  *
  * Initial connection tracking via netlink development funded and
  * generally made possible by Network Robots, Inc. (www.networkrobots.com)
@@ -2163,6 +2163,54 @@ MODULE_ALIAS("ip_conntrack_netlink");
 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK);
 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK_EXP);
 
+static int __net_init ctnetlink_net_init(struct net *net)
+{
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
+       int ret;
+
+       ret = nf_conntrack_register_notifier(net, &ctnl_notifier);
+       if (ret < 0) {
+               pr_err("ctnetlink_init: cannot register notifier.\n");
+               goto err_out;
+       }
+
+       ret = nf_ct_expect_register_notifier(net, &ctnl_notifier_exp);
+       if (ret < 0) {
+               pr_err("ctnetlink_init: cannot expect register notifier.\n");
+               goto err_unreg_notifier;
+       }
+#endif
+       return 0;
+
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
+err_unreg_notifier:
+       nf_conntrack_unregister_notifier(net, &ctnl_notifier);
+err_out:
+       return ret;
+#endif
+}
+
+static void ctnetlink_net_exit(struct net *net)
+{
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
+       nf_ct_expect_unregister_notifier(net, &ctnl_notifier_exp);
+       nf_conntrack_unregister_notifier(net, &ctnl_notifier);
+#endif
+}
+
+static void __net_exit ctnetlink_net_exit_batch(struct list_head *net_exit_list)
+{
+       struct net *net;
+
+       list_for_each_entry(net, net_exit_list, exit_list)
+               ctnetlink_net_exit(net);
+}
+
+static struct pernet_operations ctnetlink_net_ops = {
+       .init           = ctnetlink_net_init,
+       .exit_batch     = ctnetlink_net_exit_batch,
+};
+
 static int __init ctnetlink_init(void)
 {
        int ret;
@@ -2180,28 +2228,15 @@ static int __init ctnetlink_init(void)
                goto err_unreg_subsys;
        }
 
-#ifdef CONFIG_NF_CONNTRACK_EVENTS
-       ret = nf_conntrack_register_notifier(&ctnl_notifier);
-       if (ret < 0) {
-               pr_err("ctnetlink_init: cannot register notifier.\n");
+       if (register_pernet_subsys(&ctnetlink_net_ops)) {
+               pr_err("ctnetlink_init: cannot register pernet operations\n");
                goto err_unreg_exp_subsys;
        }
 
-       ret = nf_ct_expect_register_notifier(&ctnl_notifier_exp);
-       if (ret < 0) {
-               pr_err("ctnetlink_init: cannot expect register notifier.\n");
-               goto err_unreg_notifier;
-       }
-#endif
-
        return 0;
 
-#ifdef CONFIG_NF_CONNTRACK_EVENTS
-err_unreg_notifier:
-       nf_conntrack_unregister_notifier(&ctnl_notifier);
 err_unreg_exp_subsys:
        nfnetlink_subsys_unregister(&ctnl_exp_subsys);
-#endif
 err_unreg_subsys:
        nfnetlink_subsys_unregister(&ctnl_subsys);
 err_out:
@@ -2213,11 +2248,7 @@ static void __exit ctnetlink_exit(void)
        pr_info("ctnetlink: unregistering from nfnetlink.\n");
 
        nf_ct_remove_userspace_expectations();
-#ifdef CONFIG_NF_CONNTRACK_EVENTS
-       nf_ct_expect_unregister_notifier(&ctnl_notifier_exp);
-       nf_conntrack_unregister_notifier(&ctnl_notifier);
-#endif
-
+       unregister_pernet_subsys(&ctnetlink_net_ops);
        nfnetlink_subsys_unregister(&ctnl_exp_subsys);
        nfnetlink_subsys_unregister(&ctnl_subsys);
 }
index 9c24de10a6579b78e452e47912c56e5d894e58ea..824f184f7a9bbecbfd9217e86fd474485b135ade 100644 (file)
@@ -111,8 +111,6 @@ int netlbl_cfg_unlbl_map_add(const char *domain,
        struct netlbl_domaddr_map *addrmap = NULL;
        struct netlbl_domaddr4_map *map4 = NULL;
        struct netlbl_domaddr6_map *map6 = NULL;
-       const struct in_addr *addr4, *mask4;
-       const struct in6_addr *addr6, *mask6;
 
        entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
        if (entry == NULL)
@@ -133,9 +131,9 @@ int netlbl_cfg_unlbl_map_add(const char *domain,
                INIT_LIST_HEAD(&addrmap->list6);
 
                switch (family) {
-               case AF_INET:
-                       addr4 = addr;
-                       mask4 = mask;
+               case AF_INET: {
+                       const struct in_addr *addr4 = addr;
+                       const struct in_addr *mask4 = mask;
                        map4 = kzalloc(sizeof(*map4), GFP_ATOMIC);
                        if (map4 == NULL)
                                goto cfg_unlbl_map_add_failure;
@@ -148,9 +146,11 @@ int netlbl_cfg_unlbl_map_add(const char *domain,
                        if (ret_val != 0)
                                goto cfg_unlbl_map_add_failure;
                        break;
-               case AF_INET6:
-                       addr6 = addr;
-                       mask6 = mask;
+                       }
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+               case AF_INET6: {
+                       const struct in6_addr *addr6 = addr;
+                       const struct in6_addr *mask6 = mask;
                        map6 = kzalloc(sizeof(*map6), GFP_ATOMIC);
                        if (map6 == NULL)
                                goto cfg_unlbl_map_add_failure;
@@ -162,11 +162,13 @@ int netlbl_cfg_unlbl_map_add(const char *domain,
                        map6->list.addr.s6_addr32[3] &= mask6->s6_addr32[3];
                        ipv6_addr_copy(&map6->list.mask, mask6);
                        map6->list.valid = 1;
-                       ret_val = netlbl_af4list_add(&map4->list,
-                                                    &addrmap->list4);
+                       ret_val = netlbl_af6list_add(&map6->list,
+                                                    &addrmap->list6);
                        if (ret_val != 0)
                                goto cfg_unlbl_map_add_failure;
                        break;
+                       }
+#endif /* IPv6 */
                default:
                        goto cfg_unlbl_map_add_failure;
                        break;
@@ -225,9 +227,11 @@ int netlbl_cfg_unlbl_static_add(struct net *net,
        case AF_INET:
                addr_len = sizeof(struct in_addr);
                break;
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
        case AF_INET6:
                addr_len = sizeof(struct in6_addr);
                break;
+#endif /* IPv6 */
        default:
                return -EPFNOSUPPORT;
        }
@@ -266,9 +270,11 @@ int netlbl_cfg_unlbl_static_del(struct net *net,
        case AF_INET:
                addr_len = sizeof(struct in_addr);
                break;
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
        case AF_INET6:
                addr_len = sizeof(struct in6_addr);
                break;
+#endif /* IPv6 */
        default:
                return -EPFNOSUPPORT;
        }
index 6649463da1b68e6e59e6709c7e1d0b527a60ab63..d617161f8dd3904ddda7ac23da1178ccd03a79c4 100644 (file)
@@ -209,8 +209,8 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt)
                                 ctl->Plog, ctl->Scell_log,
                                 nla_data(tb[TCA_RED_STAB]));
 
-       if (skb_queue_empty(&sch->q))
-               red_end_of_idle_period(&q->parms);
+       if (!q->qdisc->q.qlen)
+               red_start_of_idle_period(&q->parms);
 
        sch_tree_unlock(sch);
        return 0;
index a3b7120fcc74c45cb642a7edc3813bde893240f1..4f4c52c0eeb3b28459d1abad498531a777c598bc 100644 (file)
@@ -225,11 +225,11 @@ static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt)
 
 
 static int
-__teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *dev)
+__teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res,
+              struct net_device *dev, struct netdev_queue *txq,
+              struct neighbour *mn)
 {
-       struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, 0);
-       struct teql_sched_data *q = qdisc_priv(dev_queue->qdisc);
-       struct neighbour *mn = dst_get_neighbour(skb_dst(skb));
+       struct teql_sched_data *q = qdisc_priv(txq->qdisc);
        struct neighbour *n = q->ncache;
 
        if (mn->tbl == NULL)
@@ -262,17 +262,26 @@ __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *
 }
 
 static inline int teql_resolve(struct sk_buff *skb,
-                              struct sk_buff *skb_res, struct net_device *dev)
+                              struct sk_buff *skb_res,
+                              struct net_device *dev,
+                              struct netdev_queue *txq)
 {
-       struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
+       struct dst_entry *dst = skb_dst(skb);
+       struct neighbour *mn;
+       int res;
+
        if (txq->qdisc == &noop_qdisc)
                return -ENODEV;
 
-       if (dev->header_ops == NULL ||
-           skb_dst(skb) == NULL ||
-           dst_get_neighbour(skb_dst(skb)) == NULL)
+       if (!dev->header_ops || !dst)
                return 0;
-       return __teql_resolve(skb, skb_res, dev);
+
+       rcu_read_lock();
+       mn = dst_get_neighbour(dst);
+       res = mn ? __teql_resolve(skb, skb_res, dev, txq, mn) : 0;
+       rcu_read_unlock();
+
+       return res;
 }
 
 static netdev_tx_t teql_master_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -307,7 +316,7 @@ restart:
                        continue;
                }
 
-               switch (teql_resolve(skb, skb_res, slave)) {
+               switch (teql_resolve(skb, skb_res, slave, slave_txq)) {
                case 0:
                        if (__netif_tx_trylock(slave_txq)) {
                                unsigned int length = qdisc_pkt_len(skb);
index 865e68fef21c326c631183c7c7d5ded4ad842647..bf812048cf6f7a244c547e0cd31a731351abfab3 100644 (file)
@@ -82,7 +82,7 @@ static struct sctp_auth_bytes *sctp_auth_create_key(__u32 key_len, gfp_t gfp)
        struct sctp_auth_bytes *key;
 
        /* Verify that we are not going to overflow INT_MAX */
-       if ((INT_MAX - key_len) < sizeof(struct sctp_auth_bytes))
+       if (key_len > (INT_MAX - sizeof(struct sctp_auth_bytes)))
                return NULL;
 
        /* Allocate the shared key */
index 2d78d95955ab5474172ac406a853bc12fff0bea8..55472c48825e6fd43c3357a2f58b398231a2767e 100644 (file)
@@ -496,7 +496,7 @@ static int xs_nospace(struct rpc_task *task)
        struct rpc_rqst *req = task->tk_rqstp;
        struct rpc_xprt *xprt = req->rq_xprt;
        struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
-       int ret = 0;
+       int ret = -EAGAIN;
 
        dprintk("RPC: %5u xmit incomplete (%u left of %u)\n",
                        task->tk_pid, req->rq_slen - req->rq_bytes_sent,
@@ -508,7 +508,6 @@ static int xs_nospace(struct rpc_task *task)
        /* Don't race with disconnect */
        if (xprt_connected(xprt)) {
                if (test_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags)) {
-                       ret = -EAGAIN;
                        /*
                         * Notify TCP that we're limited by the application
                         * window size
index 466fbcc5cf77a92ef491be50eb836f652165da16..b595a3d8679f016a7b0936683170a69cc86c7905 100644 (file)
@@ -1957,6 +1957,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
                        if ((UNIXCB(skb).pid  != siocb->scm->pid) ||
                            (UNIXCB(skb).cred != siocb->scm->cred)) {
                                skb_queue_head(&sk->sk_receive_queue, skb);
+                               sk->sk_data_ready(sk, skb->len);
                                break;
                        }
                } else {
@@ -1974,6 +1975,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
                chunk = min_t(unsigned int, skb->len, size);
                if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
                        skb_queue_head(&sk->sk_receive_queue, skb);
+                       sk->sk_data_ready(sk, skb->len);
                        if (copied == 0)
                                copied = -EFAULT;
                        break;
@@ -1991,6 +1993,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
                        /* put the skb back if we didn't use it up.. */
                        if (skb->len) {
                                skb_queue_head(&sk->sk_receive_queue, skb);
+                               sk->sk_data_ready(sk, skb->len);
                                break;
                        }
 
@@ -2006,6 +2009,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
 
                        /* put message back and return */
                        skb_queue_head(&sk->sk_receive_queue, skb);
+                       sk->sk_data_ready(sk, skb->len);
                        break;
                }
        } while (size);
index b3a476fe82725f738f5215b32cdf2296970451d2..ffafda5022c2b72d8f45714458e5fdd16203c4ee 100644 (file)
@@ -89,8 +89,8 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
        [NL80211_ATTR_IFINDEX] = { .type = NLA_U32 },
        [NL80211_ATTR_IFNAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ-1 },
 
-       [NL80211_ATTR_MAC] = { .type = NLA_BINARY, .len = ETH_ALEN },
-       [NL80211_ATTR_PREV_BSSID] = { .type = NLA_BINARY, .len = ETH_ALEN },
+       [NL80211_ATTR_MAC] = { .len = ETH_ALEN },
+       [NL80211_ATTR_PREV_BSSID] = { .len = ETH_ALEN },
 
        [NL80211_ATTR_KEY] = { .type = NLA_NESTED, },
        [NL80211_ATTR_KEY_DATA] = { .type = NLA_BINARY,
index e71f5a66574e31cb1d4e44429402ddcb5a61a529..3302c56f60d1511d292a2e9b4f71ad87b6d6f722 100644 (file)
 #define REG_DBG_PRINT(args...)
 #endif
 
+static struct regulatory_request core_request_world = {
+       .initiator = NL80211_REGDOM_SET_BY_CORE,
+       .alpha2[0] = '0',
+       .alpha2[1] = '0',
+       .intersect = false,
+       .processed = true,
+       .country_ie_env = ENVIRON_ANY,
+};
+
 /* Receipt of information from last regulatory request */
-static struct regulatory_request *last_request;
+static struct regulatory_request *last_request = &core_request_world;
 
 /* To trigger userspace events */
 static struct platform_device *reg_pdev;
@@ -150,7 +159,7 @@ static char user_alpha2[2];
 module_param(ieee80211_regdom, charp, 0444);
 MODULE_PARM_DESC(ieee80211_regdom, "IEEE 802.11 regulatory domain code");
 
-static void reset_regdomains(void)
+static void reset_regdomains(bool full_reset)
 {
        /* avoid freeing static information or freeing something twice */
        if (cfg80211_regdomain == cfg80211_world_regdom)
@@ -165,6 +174,13 @@ static void reset_regdomains(void)
 
        cfg80211_world_regdom = &world_regdom;
        cfg80211_regdomain = NULL;
+
+       if (!full_reset)
+               return;
+
+       if (last_request != &core_request_world)
+               kfree(last_request);
+       last_request = &core_request_world;
 }
 
 /*
@@ -175,7 +191,7 @@ static void update_world_regdomain(const struct ieee80211_regdomain *rd)
 {
        BUG_ON(!last_request);
 
-       reset_regdomains();
+       reset_regdomains(false);
 
        cfg80211_world_regdom = rd;
        cfg80211_regdomain = rd;
@@ -1407,7 +1423,8 @@ static int __regulatory_hint(struct wiphy *wiphy,
        }
 
 new_request:
-       kfree(last_request);
+       if (last_request != &core_request_world)
+               kfree(last_request);
 
        last_request = pending_request;
        last_request->intersect = intersect;
@@ -1577,9 +1594,6 @@ static int regulatory_hint_core(const char *alpha2)
 {
        struct regulatory_request *request;
 
-       kfree(last_request);
-       last_request = NULL;
-
        request = kzalloc(sizeof(struct regulatory_request),
                          GFP_KERNEL);
        if (!request)
@@ -1777,7 +1791,7 @@ static void restore_regulatory_settings(bool reset_user)
        mutex_lock(&cfg80211_mutex);
        mutex_lock(&reg_mutex);
 
-       reset_regdomains();
+       reset_regdomains(true);
        restore_alpha2(alpha2, reset_user);
 
        /*
@@ -2037,12 +2051,18 @@ static int __set_regdom(const struct ieee80211_regdomain *rd)
        }
 
        request_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx);
+       if (!request_wiphy &&
+           (last_request->initiator == NL80211_REGDOM_SET_BY_DRIVER ||
+            last_request->initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE)) {
+               schedule_delayed_work(&reg_timeout, 0);
+               return -ENODEV;
+       }
 
        if (!last_request->intersect) {
                int r;
 
                if (last_request->initiator != NL80211_REGDOM_SET_BY_DRIVER) {
-                       reset_regdomains();
+                       reset_regdomains(false);
                        cfg80211_regdomain = rd;
                        return 0;
                }
@@ -2063,7 +2083,7 @@ static int __set_regdom(const struct ieee80211_regdomain *rd)
                if (r)
                        return r;
 
-               reset_regdomains();
+               reset_regdomains(false);
                cfg80211_regdomain = rd;
                return 0;
        }
@@ -2088,7 +2108,7 @@ static int __set_regdom(const struct ieee80211_regdomain *rd)
 
                rd = NULL;
 
-               reset_regdomains();
+               reset_regdomains(false);
                cfg80211_regdomain = intersected_rd;
 
                return 0;
@@ -2108,7 +2128,7 @@ static int __set_regdom(const struct ieee80211_regdomain *rd)
        kfree(rd);
        rd = NULL;
 
-       reset_regdomains();
+       reset_regdomains(false);
        cfg80211_regdomain = intersected_rd;
 
        return 0;
@@ -2261,11 +2281,8 @@ void /* __init_or_exit */ regulatory_exit(void)
        mutex_lock(&cfg80211_mutex);
        mutex_lock(&reg_mutex);
 
-       reset_regdomains();
-
-       kfree(last_request);
+       reset_regdomains(true);
 
-       last_request = NULL;
        dev_set_uevent_suppress(&reg_pdev->dev, true);
 
        platform_device_unregister(reg_pdev);
index 552df27dcf53d3388fb2eb9ab3ba7a31c7ffcd64..2118d6446630e3ef64ae3e65b0e7066daf5a85c2 100644 (file)
@@ -2382,9 +2382,11 @@ static unsigned int xfrm_default_advmss(const struct dst_entry *dst)
        return dst_metric_advmss(dst->path);
 }
 
-static unsigned int xfrm_default_mtu(const struct dst_entry *dst)
+static unsigned int xfrm_mtu(const struct dst_entry *dst)
 {
-       return dst_mtu(dst->path);
+       unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
+
+       return mtu ? : dst_mtu(dst->path);
 }
 
 static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst, const void *daddr)
@@ -2411,8 +2413,8 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
                        dst_ops->check = xfrm_dst_check;
                if (likely(dst_ops->default_advmss == NULL))
                        dst_ops->default_advmss = xfrm_default_advmss;
-               if (likely(dst_ops->default_mtu == NULL))
-                       dst_ops->default_mtu = xfrm_default_mtu;
+               if (likely(dst_ops->mtu == NULL))
+                       dst_ops->mtu = xfrm_mtu;
                if (likely(dst_ops->negative_advice == NULL))
                        dst_ops->negative_advice = xfrm_negative_advice;
                if (likely(dst_ops->link_failure == NULL))
index 36cc0cc39e78e135630384e6500f4a789fe7aa2a..b566eba4a65cc16128b34152154c3ba26505b692 100644 (file)
@@ -57,23 +57,44 @@ static int prepend(char **buffer, int buflen, const char *str, int namelen)
 static int d_namespace_path(struct path *path, char *buf, int buflen,
                            char **name, int flags)
 {
-       struct path root, tmp;
        char *res;
-       int connected, error = 0;
+       int error = 0;
+       int connected = 1;
+
+       if (path->mnt->mnt_flags & MNT_INTERNAL) {
+               /* it's not mounted anywhere */
+               res = dentry_path(path->dentry, buf, buflen);
+               *name = res;
+               if (IS_ERR(res)) {
+                       *name = buf;
+                       return PTR_ERR(res);
+               }
+               if (path->dentry->d_sb->s_magic == PROC_SUPER_MAGIC &&
+                   strncmp(*name, "/sys/", 5) == 0) {
+                       /* TODO: convert over to using a per namespace
+                        * control instead of hard coded /proc
+                        */
+                       return prepend(name, *name - buf, "/proc", 5);
+               }
+               return 0;
+       }
 
-       /* Get the root we want to resolve too, released below */
+       /* resolve paths relative to chroot?*/
        if (flags & PATH_CHROOT_REL) {
-               /* resolve paths relative to chroot */
+               struct path root;
                get_fs_root(current->fs, &root);
-       } else {
-               /* resolve paths relative to namespace */
-               root.mnt = current->nsproxy->mnt_ns->root;
-               root.dentry = root.mnt->mnt_root;
-               path_get(&root);
+               res = __d_path(path, &root, buf, buflen);
+               if (res && !IS_ERR(res)) {
+                       /* everything's fine */
+                       *name = res;
+                       path_put(&root);
+                       goto ok;
+               }
+               path_put(&root);
+               connected = 0;
        }
 
-       tmp = root;
-       res = __d_path(path, &tmp, buf, buflen);
+       res = d_absolute_path(path, buf, buflen);
 
        *name = res;
        /* handle error conditions - and still allow a partial path to
@@ -84,7 +105,10 @@ static int d_namespace_path(struct path *path, char *buf, int buflen,
                *name = buf;
                goto out;
        }
+       if (!our_mnt(path->mnt))
+               connected = 0;
 
+ok:
        /* Handle two cases:
         * 1. A deleted dentry && profile is not allowing mediation of deleted
         * 2. On some filesystems, newly allocated dentries appear to the
@@ -97,10 +121,7 @@ static int d_namespace_path(struct path *path, char *buf, int buflen,
                        goto out;
        }
 
-       /* Determine if the path is connected to the expected root */
-       connected = tmp.dentry == root.dentry && tmp.mnt == root.mnt;
-
-       /* If the path is not connected,
+       /* If the path is not connected to the expected root,
         * check if it is a sysctl and handle specially else remove any
         * leading / that __d_path may have returned.
         * Unless
@@ -112,17 +133,9 @@ static int d_namespace_path(struct path *path, char *buf, int buflen,
         *     namespace root.
         */
        if (!connected) {
-               /* is the disconnect path a sysctl? */
-               if (tmp.dentry->d_sb->s_magic == PROC_SUPER_MAGIC &&
-                   strncmp(*name, "/sys/", 5) == 0) {
-                       /* TODO: convert over to using a per namespace
-                        * control instead of hard coded /proc
-                        */
-                       error = prepend(name, *name - buf, "/proc", 5);
-               } else if (!(flags & PATH_CONNECT_PATH) &&
+               if (!(flags & PATH_CONNECT_PATH) &&
                           !(((flags & CHROOT_NSCONNECT) == CHROOT_NSCONNECT) &&
-                            (tmp.mnt == current->nsproxy->mnt_ns->root &&
-                             tmp.dentry == tmp.mnt->mnt_root))) {
+                            our_mnt(path->mnt))) {
                        /* disconnected path, don't return pathname starting
                         * with '/'
                         */
@@ -133,8 +146,6 @@ static int d_namespace_path(struct path *path, char *buf, int buflen,
        }
 
 out:
-       path_put(&root);
-
        return error;
 }
 
index 738bbdf8d4c77ceba3ba3abfe733a90dc56cd4fb..d9f3ced8756ec4dc87492b8edecf172375843f5d 100644 (file)
@@ -101,9 +101,8 @@ static char *tomoyo_get_absolute_path(struct path *path, char * const buffer,
 {
        char *pos = ERR_PTR(-ENOMEM);
        if (buflen >= 256) {
-               struct path ns_root = { };
                /* go to whatever namespace root we are under */
-               pos = __d_path(path, &ns_root, buffer, buflen - 1);
+               pos = d_absolute_path(path, buffer, buflen - 1);
                if (!IS_ERR(pos) && *pos == '/' && pos[1]) {
                        struct inode *inode = path->dentry->d_inode;
                        if (inode && S_ISDIR(inode->i_mode)) {
@@ -294,8 +293,16 @@ char *tomoyo_realpath_from_path(struct path *path)
                        pos = tomoyo_get_local_path(path->dentry, buf,
                                                    buf_len - 1);
                /* Get absolute name for the rest. */
-               else
+               else {
                        pos = tomoyo_get_absolute_path(path, buf, buf_len - 1);
+                       /*
+                        * Fall back to local name if absolute name is not
+                        * available.
+                        */
+                       if (pos == ERR_PTR(-EINVAL))
+                               pos = tomoyo_get_local_path(path->dentry, buf,
+                                                           buf_len - 1);
+               }
 encode:
                if (IS_ERR(pos))
                        continue;
index e083122ca55af550f6d4b72e6d5cea5c8c2e1a05..dbf94b189e7576b6e8a3194c5612b0397e6060ee 100644 (file)
@@ -148,7 +148,7 @@ static int cs5535audio_build_dma_packets(struct cs5535audio *cs5535au,
                struct cs5535audio_dma_desc *desc =
                        &((struct cs5535audio_dma_desc *) dma->desc_buf.area)[i];
                desc->addr = cpu_to_le32(addr);
-               desc->size = cpu_to_le32(period_bytes);
+               desc->size = cpu_to_le16(period_bytes);
                desc->ctlreserved = cpu_to_le16(PRD_EOP);
                desc_addr += sizeof(struct cs5535audio_dma_desc);
                addr += period_bytes;
index e44b107fdc7594c47df5458b7acc0c6440454561..4562e9de6a1ab0dc7015e28d7be14c2eafa5a6b3 100644 (file)
@@ -4046,9 +4046,9 @@ int snd_hda_check_board_codec_sid_config(struct hda_codec *codec,
 
        /* Search for codec ID */
        for (q = tbl; q->subvendor; q++) {
-               unsigned long vendorid = (q->subdevice) | (q->subvendor << 16);
-
-               if (vendorid == codec->subsystem_id)
+               unsigned int mask = 0xffff0000 | q->subdevice_mask;
+               unsigned int id = (q->subdevice | (q->subvendor << 16)) & mask;
+               if ((codec->subsystem_id & mask) == id)
                        break;
        }
 
index 7ae7578bdcc038a576efce5d6f1110522641670d..c1da422e085a5230d8b7eb837925772b11ea4328 100644 (file)
@@ -347,18 +347,28 @@ int snd_hdmi_get_eld(struct hdmi_eld *eld,
 
        for (i = 0; i < size; i++) {
                unsigned int val = hdmi_get_eld_data(codec, nid, i);
+               /*
+                * Graphics driver might be writing to ELD buffer right now.
+                * Just abort. The caller will repoll after a while.
+                */
                if (!(val & AC_ELDD_ELD_VALID)) {
-                       if (!i) {
-                               snd_printd(KERN_INFO
-                                          "HDMI: invalid ELD data\n");
-                               ret = -EINVAL;
-                               goto error;
-                       }
                        snd_printd(KERN_INFO
                                  "HDMI: invalid ELD data byte %d\n", i);
-                       val = 0;
-               } else
-                       val &= AC_ELDD_ELD_DATA;
+                       ret = -EINVAL;
+                       goto error;
+               }
+               val &= AC_ELDD_ELD_DATA;
+               /*
+                * The first byte cannot be zero. This can happen on some DVI
+                * connections. Some Intel chips may also need some 250ms delay
+                * to return non-zero ELD data, even when the graphics driver
+                * correctly writes ELD content before setting ELD_valid bit.
+                */
+               if (!val && !i) {
+                       snd_printdd(KERN_INFO "HDMI: 0 ELD data\n");
+                       ret = -EINVAL;
+                       goto error;
+               }
                buf[i] = val;
        }
 
index 096507d2ca9a7323c8d8e674ff4da7d921e677e4..7d98240def0b768d4f35e5f5cd33136794559b7f 100644 (file)
@@ -2508,7 +2508,6 @@ static struct snd_pci_quirk position_fix_list[] __devinitdata = {
        SND_PCI_QUIRK(0x1043, 0x81b3, "ASUS", POS_FIX_LPIB),
        SND_PCI_QUIRK(0x1043, 0x81e7, "ASUS M2V", POS_FIX_LPIB),
        SND_PCI_QUIRK(0x104d, 0x9069, "Sony VPCS11V9E", POS_FIX_LPIB),
-       SND_PCI_QUIRK(0x1106, 0x3288, "ASUS M2V-MX SE", POS_FIX_LPIB),
        SND_PCI_QUIRK(0x1297, 0x3166, "Shuttle", POS_FIX_LPIB),
        SND_PCI_QUIRK(0x1458, 0xa022, "ga-ma770-ud3", POS_FIX_LPIB),
        SND_PCI_QUIRK(0x1462, 0x1002, "MSI Wind U115", POS_FIX_LPIB),
index 2fbab8e29576bb236ce51fd5f4050bc0283843b6..70a7abda7e225744bf93d90f795c86bfcd95dac6 100644 (file)
@@ -58,6 +58,8 @@ struct cs_spec {
        unsigned int gpio_mask;
        unsigned int gpio_dir;
        unsigned int gpio_data;
+       unsigned int gpio_eapd_hp; /* EAPD GPIO bit for headphones */
+       unsigned int gpio_eapd_speaker; /* EAPD GPIO bit for speakers */
 
        struct hda_pcm pcm_rec[2];      /* PCM information */
 
@@ -76,6 +78,7 @@ enum {
        CS420X_MBP53,
        CS420X_MBP55,
        CS420X_IMAC27,
+       CS420X_APPLE,
        CS420X_AUTO,
        CS420X_MODELS
 };
@@ -928,10 +931,9 @@ static void cs_automute(struct hda_codec *codec)
                                        spdif_present ? 0 : PIN_OUT);
                }
        }
-       if (spec->board_config == CS420X_MBP53 ||
-           spec->board_config == CS420X_MBP55 ||
-           spec->board_config == CS420X_IMAC27) {
-               unsigned int gpio = hp_present ? 0x02 : 0x08;
+       if (spec->gpio_eapd_hp) {
+               unsigned int gpio = hp_present ?
+                       spec->gpio_eapd_hp : spec->gpio_eapd_speaker;
                snd_hda_codec_write(codec, 0x01, 0,
                                    AC_VERB_SET_GPIO_DATA, gpio);
        }
@@ -1276,6 +1278,7 @@ static const char * const cs420x_models[CS420X_MODELS] = {
        [CS420X_MBP53] = "mbp53",
        [CS420X_MBP55] = "mbp55",
        [CS420X_IMAC27] = "imac27",
+       [CS420X_APPLE] = "apple",
        [CS420X_AUTO] = "auto",
 };
 
@@ -1285,7 +1288,13 @@ static const struct snd_pci_quirk cs420x_cfg_tbl[] = {
        SND_PCI_QUIRK(0x10de, 0x0d94, "MacBookAir 3,1(2)", CS420X_MBP55),
        SND_PCI_QUIRK(0x10de, 0xcb79, "MacBookPro 5,5", CS420X_MBP55),
        SND_PCI_QUIRK(0x10de, 0xcb89, "MacBookPro 7,1", CS420X_MBP55),
-       SND_PCI_QUIRK(0x8086, 0x7270, "IMac 27 Inch", CS420X_IMAC27),
+       /* this conflicts with too many other models */
+       /*SND_PCI_QUIRK(0x8086, 0x7270, "IMac 27 Inch", CS420X_IMAC27),*/
+       {} /* terminator */
+};
+
+static const struct snd_pci_quirk cs420x_codec_cfg_tbl[] = {
+       SND_PCI_QUIRK_VENDOR(0x106b, "Apple", CS420X_APPLE),
        {} /* terminator */
 };
 
@@ -1367,6 +1376,10 @@ static int patch_cs420x(struct hda_codec *codec)
        spec->board_config =
                snd_hda_check_board_config(codec, CS420X_MODELS,
                                           cs420x_models, cs420x_cfg_tbl);
+       if (spec->board_config < 0)
+               spec->board_config =
+                       snd_hda_check_board_codec_sid_config(codec,
+                               CS420X_MODELS, NULL, cs420x_codec_cfg_tbl);
        if (spec->board_config >= 0)
                fix_pincfg(codec, spec->board_config, cs_pincfgs);
 
@@ -1374,10 +1387,11 @@ static int patch_cs420x(struct hda_codec *codec)
        case CS420X_IMAC27:
        case CS420X_MBP53:
        case CS420X_MBP55:
-               /* GPIO1 = headphones */
-               /* GPIO3 = speakers */
-               spec->gpio_mask = 0x0a;
-               spec->gpio_dir = 0x0a;
+       case CS420X_APPLE:
+               spec->gpio_eapd_hp = 2; /* GPIO1 = headphones */
+               spec->gpio_eapd_speaker = 8; /* GPIO3 = speakers */
+               spec->gpio_mask = spec->gpio_dir =
+                       spec->gpio_eapd_hp | spec->gpio_eapd_speaker;
                break;
        }
 
index 9850c5b481eae067af45fa88eb05a34dcfe23bdc..c505fd5d338cc91d1de2199143c8141f24890355 100644 (file)
@@ -69,6 +69,7 @@ struct hdmi_spec_per_pin {
        struct hda_codec *codec;
        struct hdmi_eld sink_eld;
        struct delayed_work work;
+       int repoll_count;
 };
 
 struct hdmi_spec {
@@ -748,7 +749,7 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec, int pin_idx,
  * Unsolicited events
  */
 
-static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, bool retry);
+static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll);
 
 static void hdmi_intrinsic_event(struct hda_codec *codec, unsigned int res)
 {
@@ -766,7 +767,7 @@ static void hdmi_intrinsic_event(struct hda_codec *codec, unsigned int res)
        if (pin_idx < 0)
                return;
 
-       hdmi_present_sense(&spec->pins[pin_idx], true);
+       hdmi_present_sense(&spec->pins[pin_idx], 1);
 }
 
 static void hdmi_non_intrinsic_event(struct hda_codec *codec, unsigned int res)
@@ -960,7 +961,7 @@ static int hdmi_read_pin_conn(struct hda_codec *codec, int pin_idx)
        return 0;
 }
 
-static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, bool retry)
+static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)
 {
        struct hda_codec *codec = per_pin->codec;
        struct hdmi_eld *eld = &per_pin->sink_eld;
@@ -989,7 +990,7 @@ static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, bool retry)
        if (eld_valid) {
                if (!snd_hdmi_get_eld(eld, codec, pin_nid))
                        snd_hdmi_show_eld(eld);
-               else if (retry) {
+               else if (repoll) {
                        queue_delayed_work(codec->bus->workq,
                                           &per_pin->work,
                                           msecs_to_jiffies(300));
@@ -1004,7 +1005,10 @@ static void hdmi_repoll_eld(struct work_struct *work)
        struct hdmi_spec_per_pin *per_pin =
        container_of(to_delayed_work(work), struct hdmi_spec_per_pin, work);
 
-       hdmi_present_sense(per_pin, false);
+       if (per_pin->repoll_count++ > 6)
+               per_pin->repoll_count = 0;
+
+       hdmi_present_sense(per_pin, per_pin->repoll_count);
 }
 
 static int hdmi_add_pin(struct hda_codec *codec, hda_nid_t pin_nid)
@@ -1235,7 +1239,7 @@ static int generic_hdmi_build_jack(struct hda_codec *codec, int pin_idx)
        if (err < 0)
                return err;
 
-       hdmi_present_sense(per_pin, false);
+       hdmi_present_sense(per_pin, 0);
        return 0;
 }
 
index 336d14eb72af875bcee54e9b3c5823cc2371ef19..1d07e8fa243360d25236a4942ab5e363d69d1558 100644 (file)
@@ -277,6 +277,12 @@ static bool alc_dyn_adc_pcm_resetup(struct hda_codec *codec, int cur)
        return false;
 }
 
+static inline hda_nid_t get_capsrc(struct alc_spec *spec, int idx)
+{
+       return spec->capsrc_nids ?
+               spec->capsrc_nids[idx] : spec->adc_nids[idx];
+}
+
 /* select the given imux item; either unmute exclusively or select the route */
 static int alc_mux_select(struct hda_codec *codec, unsigned int adc_idx,
                          unsigned int idx, bool force)
@@ -291,6 +297,8 @@ static int alc_mux_select(struct hda_codec *codec, unsigned int adc_idx,
        imux = &spec->input_mux[mux_idx];
        if (!imux->num_items && mux_idx > 0)
                imux = &spec->input_mux[0];
+       if (!imux->num_items)
+               return 0;
 
        if (idx >= imux->num_items)
                idx = imux->num_items - 1;
@@ -303,8 +311,7 @@ static int alc_mux_select(struct hda_codec *codec, unsigned int adc_idx,
                adc_idx = spec->dyn_adc_idx[idx];
        }
 
-       nid = spec->capsrc_nids ?
-               spec->capsrc_nids[adc_idx] : spec->adc_nids[adc_idx];
+       nid = get_capsrc(spec, adc_idx);
 
        /* no selection? */
        num_conns = snd_hda_get_conn_list(codec, nid, NULL);
@@ -1054,8 +1061,19 @@ static bool alc_rebuild_imux_for_auto_mic(struct hda_codec *codec)
        spec->imux_pins[2] = spec->dock_mic_pin;
        for (i = 0; i < 3; i++) {
                strcpy(imux->items[i].label, texts[i]);
-               if (spec->imux_pins[i])
+               if (spec->imux_pins[i]) {
+                       hda_nid_t pin = spec->imux_pins[i];
+                       int c;
+                       for (c = 0; c < spec->num_adc_nids; c++) {
+                               hda_nid_t cap = get_capsrc(spec, c);
+                               int idx = get_connection_index(codec, cap, pin);
+                               if (idx >= 0) {
+                                       imux->items[i].index = idx;
+                                       break;
+                               }
+                       }
                        imux->num_items = i + 1;
+               }
        }
        spec->num_mux_defs = 1;
        spec->input_mux = imux;
@@ -1957,10 +1975,8 @@ static int alc_build_controls(struct hda_codec *codec)
                if (!kctl)
                        kctl = snd_hda_find_mixer_ctl(codec, "Input Source");
                for (i = 0; kctl && i < kctl->count; i++) {
-                       const hda_nid_t *nids = spec->capsrc_nids;
-                       if (!nids)
-                               nids = spec->adc_nids;
-                       err = snd_hda_add_nid(codec, kctl, i, nids[i]);
+                       err = snd_hda_add_nid(codec, kctl, i,
+                                             get_capsrc(spec, i));
                        if (err < 0)
                                return err;
                }
@@ -2615,6 +2631,8 @@ static const char *alc_get_line_out_pfx(struct alc_spec *spec, int ch,
        case AUTO_PIN_SPEAKER_OUT:
                if (cfg->line_outs == 1)
                        return "Speaker";
+               if (cfg->line_outs == 2)
+                       return ch ? "Bass Speaker" : "Speaker";
                break;
        case AUTO_PIN_HP_OUT:
                /* for multi-io case, only the primary out */
@@ -2747,8 +2765,7 @@ static int alc_auto_create_input_ctls(struct hda_codec *codec)
                }
 
                for (c = 0; c < num_adcs; c++) {
-                       hda_nid_t cap = spec->capsrc_nids ?
-                               spec->capsrc_nids[c] : spec->adc_nids[c];
+                       hda_nid_t cap = get_capsrc(spec, c);
                        idx = get_connection_index(codec, cap, pin);
                        if (idx >= 0) {
                                spec->imux_pins[imux->num_items] = pin;
@@ -2889,7 +2906,7 @@ static hda_nid_t alc_auto_look_for_dac(struct hda_codec *codec, hda_nid_t pin)
                if (!nid)
                        continue;
                if (found_in_nid_list(nid, spec->multiout.dac_nids,
-                                     spec->multiout.num_dacs))
+                                     ARRAY_SIZE(spec->private_dac_nids)))
                        continue;
                if (found_in_nid_list(nid, spec->multiout.hp_out_nid,
                                      ARRAY_SIZE(spec->multiout.hp_out_nid)))
@@ -2910,6 +2927,7 @@ static hda_nid_t get_dac_if_single(struct hda_codec *codec, hda_nid_t pin)
        return 0;
 }
 
+/* return 0 if no possible DAC is found, 1 if one or more found */
 static int alc_auto_fill_extra_dacs(struct hda_codec *codec, int num_outs,
                                    const hda_nid_t *pins, hda_nid_t *dacs)
 {
@@ -2927,7 +2945,7 @@ static int alc_auto_fill_extra_dacs(struct hda_codec *codec, int num_outs,
                if (!dacs[i])
                        dacs[i] = alc_auto_look_for_dac(codec, pins[i]);
        }
-       return 0;
+       return 1;
 }
 
 static int alc_auto_fill_multi_ios(struct hda_codec *codec,
@@ -2937,7 +2955,7 @@ static int alc_auto_fill_multi_ios(struct hda_codec *codec,
 static int alc_auto_fill_dac_nids(struct hda_codec *codec)
 {
        struct alc_spec *spec = codec->spec;
-       const struct auto_pin_cfg *cfg = &spec->autocfg;
+       struct auto_pin_cfg *cfg = &spec->autocfg;
        bool redone = false;
        int i;
 
@@ -2948,6 +2966,7 @@ static int alc_auto_fill_dac_nids(struct hda_codec *codec)
        spec->multiout.extra_out_nid[0] = 0;
        memset(spec->private_dac_nids, 0, sizeof(spec->private_dac_nids));
        spec->multiout.dac_nids = spec->private_dac_nids;
+       spec->multi_ios = 0;
 
        /* fill hard-wired DACs first */
        if (!redone) {
@@ -2981,10 +3000,12 @@ static int alc_auto_fill_dac_nids(struct hda_codec *codec)
        for (i = 0; i < cfg->line_outs; i++) {
                if (spec->private_dac_nids[i])
                        spec->multiout.num_dacs++;
-               else
+               else {
                        memmove(spec->private_dac_nids + i,
                                spec->private_dac_nids + i + 1,
                                sizeof(hda_nid_t) * (cfg->line_outs - i - 1));
+                       spec->private_dac_nids[cfg->line_outs - 1] = 0;
+               }
        }
 
        if (cfg->line_outs == 1 && cfg->line_out_type != AUTO_PIN_SPEAKER_OUT) {
@@ -3006,9 +3027,28 @@ static int alc_auto_fill_dac_nids(struct hda_codec *codec)
        if (cfg->line_out_type != AUTO_PIN_HP_OUT)
                alc_auto_fill_extra_dacs(codec, cfg->hp_outs, cfg->hp_pins,
                                 spec->multiout.hp_out_nid);
-       if (cfg->line_out_type != AUTO_PIN_SPEAKER_OUT)
-               alc_auto_fill_extra_dacs(codec, cfg->speaker_outs, cfg->speaker_pins,
-                                spec->multiout.extra_out_nid);
+       if (cfg->line_out_type != AUTO_PIN_SPEAKER_OUT) {
+               int err = alc_auto_fill_extra_dacs(codec, cfg->speaker_outs,
+                                       cfg->speaker_pins,
+                                       spec->multiout.extra_out_nid);
+               /* if no speaker volume is assigned, try again as the primary
+                * output
+                */
+               if (!err && cfg->speaker_outs > 0 &&
+                   cfg->line_out_type == AUTO_PIN_HP_OUT) {
+                       cfg->hp_outs = cfg->line_outs;
+                       memcpy(cfg->hp_pins, cfg->line_out_pins,
+                              sizeof(cfg->hp_pins));
+                       cfg->line_outs = cfg->speaker_outs;
+                       memcpy(cfg->line_out_pins, cfg->speaker_pins,
+                              sizeof(cfg->speaker_pins));
+                       cfg->speaker_outs = 0;
+                       memset(cfg->speaker_pins, 0, sizeof(cfg->speaker_pins));
+                       cfg->line_out_type = AUTO_PIN_SPEAKER_OUT;
+                       redone = false;
+                       goto again;
+               }
+       }
 
        return 0;
 }
@@ -3158,7 +3198,8 @@ static int alc_auto_create_multi_out_ctls(struct hda_codec *codec,
 }
 
 static int alc_auto_create_extra_out(struct hda_codec *codec, hda_nid_t pin,
-                                    hda_nid_t dac, const char *pfx)
+                                    hda_nid_t dac, const char *pfx,
+                                    int cidx)
 {
        struct alc_spec *spec = codec->spec;
        hda_nid_t sw, vol;
@@ -3174,15 +3215,15 @@ static int alc_auto_create_extra_out(struct hda_codec *codec, hda_nid_t pin,
                if (is_ctl_used(spec->sw_ctls, val))
                        return 0; /* already created */
                mark_ctl_usage(spec->sw_ctls, val);
-               return add_pb_sw_ctrl(spec, ALC_CTL_WIDGET_MUTE, pfx, val);
+               return __add_pb_sw_ctrl(spec, ALC_CTL_WIDGET_MUTE, pfx, cidx, val);
        }
 
        sw = alc_look_for_out_mute_nid(codec, pin, dac);
        vol = alc_look_for_out_vol_nid(codec, pin, dac);
-       err = alc_auto_add_stereo_vol(codec, pfx, 0, vol);
+       err = alc_auto_add_stereo_vol(codec, pfx, cidx, vol);
        if (err < 0)
                return err;
-       err = alc_auto_add_stereo_sw(codec, pfx, 0, sw);
+       err = alc_auto_add_stereo_sw(codec, pfx, cidx, sw);
        if (err < 0)
                return err;
        return 0;
@@ -3223,16 +3264,21 @@ static int alc_auto_create_extra_outs(struct hda_codec *codec, int num_pins,
                hda_nid_t dac = *dacs;
                if (!dac)
                        dac = spec->multiout.dac_nids[0];
-               return alc_auto_create_extra_out(codec, *pins, dac, pfx);
+               return alc_auto_create_extra_out(codec, *pins, dac, pfx, 0);
        }
 
        if (dacs[num_pins - 1]) {
                /* OK, we have a multi-output system with individual volumes */
                for (i = 0; i < num_pins; i++) {
-                       snprintf(name, sizeof(name), "%s %s",
-                                pfx, channel_name[i]);
-                       err = alc_auto_create_extra_out(codec, pins[i], dacs[i],
-                                                       name);
+                       if (num_pins >= 3) {
+                               snprintf(name, sizeof(name), "%s %s",
+                                        pfx, channel_name[i]);
+                               err = alc_auto_create_extra_out(codec, pins[i], dacs[i],
+                                                               name, 0);
+                       } else {
+                               err = alc_auto_create_extra_out(codec, pins[i], dacs[i],
+                                                               pfx, i);
+                       }
                        if (err < 0)
                                return err;
                }
@@ -3694,8 +3740,7 @@ static int init_capsrc_for_pin(struct hda_codec *codec, hda_nid_t pin)
        if (!pin)
                return 0;
        for (i = 0; i < spec->num_adc_nids; i++) {
-               hda_nid_t cap = spec->capsrc_nids ?
-                       spec->capsrc_nids[i] : spec->adc_nids[i];
+               hda_nid_t cap = get_capsrc(spec, i);
                int idx;
 
                idx = get_connection_index(codec, cap, pin);
index 470f6f286e8144f4f7a8282b2dc126239c69f765..eeb25d529e30b51bebbb42924a4733c6308dc2df 100644 (file)
@@ -215,6 +215,7 @@ struct sigmatel_spec {
        unsigned int gpio_mute;
        unsigned int gpio_led;
        unsigned int gpio_led_polarity;
+       unsigned int vref_mute_led_nid; /* pin NID for mute-LED vref control */
        unsigned int vref_led;
 
        /* stream */
@@ -1641,6 +1642,8 @@ static const struct snd_pci_quirk stac92hd73xx_codec_id_cfg_tbl[] = {
                      "Alienware M17x", STAC_ALIENWARE_M17X),
        SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x043a,
                      "Alienware M17x", STAC_ALIENWARE_M17X),
+       SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0490,
+                     "Alienware M17x", STAC_ALIENWARE_M17X),
        {} /* terminator */
 };
 
@@ -4316,12 +4319,10 @@ static void stac_store_hints(struct hda_codec *codec)
                spec->eapd_switch = val;
        get_int_hint(codec, "gpio_led_polarity", &spec->gpio_led_polarity);
        if (get_int_hint(codec, "gpio_led", &spec->gpio_led)) {
-               if (spec->gpio_led <= 8) {
-                       spec->gpio_mask |= spec->gpio_led;
-                       spec->gpio_dir |= spec->gpio_led;
-                       if (spec->gpio_led_polarity)
-                               spec->gpio_data |= spec->gpio_led;
-               }
+               spec->gpio_mask |= spec->gpio_led;
+               spec->gpio_dir |= spec->gpio_led;
+               if (spec->gpio_led_polarity)
+                       spec->gpio_data |= spec->gpio_led;
        }
 }
 
@@ -4439,7 +4440,9 @@ static int stac92xx_init(struct hda_codec *codec)
                int pinctl, def_conf;
 
                /* power on when no jack detection is available */
-               if (!spec->hp_detect) {
+               /* or when the VREF is used for controlling LED */
+               if (!spec->hp_detect ||
+                   spec->vref_mute_led_nid == nid) {
                        stac_toggle_power_map(codec, nid, 1);
                        continue;
                }
@@ -4911,8 +4914,14 @@ static int find_mute_led_gpio(struct hda_codec *codec, int default_polarity)
                        if (sscanf(dev->name, "HP_Mute_LED_%d_%x",
                                  &spec->gpio_led_polarity,
                                  &spec->gpio_led) == 2) {
-                               if (spec->gpio_led < 4)
+                               unsigned int max_gpio;
+                               max_gpio = snd_hda_param_read(codec, codec->afg,
+                                                             AC_PAR_GPIO_CAP);
+                               max_gpio &= AC_GPIO_IO_COUNT;
+                               if (spec->gpio_led < max_gpio)
                                        spec->gpio_led = 1 << spec->gpio_led;
+                               else
+                                       spec->vref_mute_led_nid = spec->gpio_led;
                                return 1;
                        }
                        if (sscanf(dev->name, "HP_Mute_LED_%d",
@@ -5041,29 +5050,12 @@ static int stac92xx_pre_resume(struct hda_codec *codec)
        struct sigmatel_spec *spec = codec->spec;
 
        /* sync mute LED */
-       if (spec->gpio_led) {
-               if (spec->gpio_led <= 8) {
-                       stac_gpio_set(codec, spec->gpio_mask,
-                                       spec->gpio_dir, spec->gpio_data);
-               } else {
-                       stac_vrefout_set(codec,
-                                       spec->gpio_led, spec->vref_led);
-               }
-       }
-       return 0;
-}
-
-static int stac92xx_post_suspend(struct hda_codec *codec)
-{
-       struct sigmatel_spec *spec = codec->spec;
-       if (spec->gpio_led > 8) {
-               /* with vref-out pin used for mute led control
-                * codec AFG is prevented from D3 state, but on
-                * system suspend it can (and should) be used
-                */
-               snd_hda_codec_read(codec, codec->afg, 0,
-                               AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
-       }
+       if (spec->vref_mute_led_nid)
+               stac_vrefout_set(codec, spec->vref_mute_led_nid,
+                                spec->vref_led);
+       else if (spec->gpio_led)
+               stac_gpio_set(codec, spec->gpio_mask,
+                             spec->gpio_dir, spec->gpio_data);
        return 0;
 }
 
@@ -5074,7 +5066,7 @@ static void stac92xx_set_power_state(struct hda_codec *codec, hda_nid_t fg,
        struct sigmatel_spec *spec = codec->spec;
 
        if (power_state == AC_PWRST_D3) {
-               if (spec->gpio_led > 8) {
+               if (spec->vref_mute_led_nid) {
                        /* with vref-out pin used for mute led control
                         * codec AFG is prevented from D3 state
                         */
@@ -5127,7 +5119,7 @@ static int stac92xx_update_led_status(struct hda_codec *codec)
                }
        }
        /*polarity defines *not* muted state level*/
-       if (spec->gpio_led <= 8) {
+       if (!spec->vref_mute_led_nid) {
                if (muted)
                        spec->gpio_data &= ~spec->gpio_led; /* orange */
                else
@@ -5145,7 +5137,8 @@ static int stac92xx_update_led_status(struct hda_codec *codec)
                muted_lvl = spec->gpio_led_polarity ?
                                AC_PINCTL_VREF_GRD : AC_PINCTL_VREF_HIZ;
                spec->vref_led = muted ? muted_lvl : notmtd_lvl;
-               stac_vrefout_set(codec, spec->gpio_led, spec->vref_led);
+               stac_vrefout_set(codec, spec->vref_mute_led_nid,
+                                spec->vref_led);
        }
        return 0;
 }
@@ -5659,15 +5652,13 @@ again:
 
 #ifdef CONFIG_SND_HDA_POWER_SAVE
        if (spec->gpio_led) {
-               if (spec->gpio_led <= 8) {
+               if (!spec->vref_mute_led_nid) {
                        spec->gpio_mask |= spec->gpio_led;
                        spec->gpio_dir |= spec->gpio_led;
                        spec->gpio_data |= spec->gpio_led;
                } else {
                        codec->patch_ops.set_power_state =
                                        stac92xx_set_power_state;
-                       codec->patch_ops.post_suspend =
-                                       stac92xx_post_suspend;
                }
                codec->patch_ops.pre_resume = stac92xx_pre_resume;
                codec->patch_ops.check_power_status =
@@ -5974,15 +5965,13 @@ again:
 
 #ifdef CONFIG_SND_HDA_POWER_SAVE
        if (spec->gpio_led) {
-               if (spec->gpio_led <= 8) {
+               if (!spec->vref_mute_led_nid) {
                        spec->gpio_mask |= spec->gpio_led;
                        spec->gpio_dir |= spec->gpio_led;
                        spec->gpio_data |= spec->gpio_led;
                } else {
                        codec->patch_ops.set_power_state =
                                        stac92xx_set_power_state;
-                       codec->patch_ops.post_suspend =
-                                       stac92xx_post_suspend;
                }
                codec->patch_ops.pre_resume = stac92xx_pre_resume;
                codec->patch_ops.check_power_status =
index 431c0d417eeb61500c3332c687568a7851307201..b5137629f8e942a75a941e0e183cef98fb58db02 100644 (file)
@@ -208,6 +208,7 @@ struct via_spec {
        /* work to check hp jack state */
        struct hda_codec *codec;
        struct delayed_work vt1708_hp_work;
+       int hp_work_active;
        int vt1708_jack_detect;
        int vt1708_hp_present;
 
@@ -305,27 +306,35 @@ enum {
 static void analog_low_current_mode(struct hda_codec *codec);
 static bool is_aa_path_mute(struct hda_codec *codec);
 
-static void vt1708_start_hp_work(struct via_spec *spec)
+#define hp_detect_with_aa(codec) \
+       (snd_hda_get_bool_hint(codec, "analog_loopback_hp_detect") == 1 && \
+        !is_aa_path_mute(codec))
+
+static void vt1708_stop_hp_work(struct via_spec *spec)
 {
        if (spec->codec_type != VT1708 || spec->autocfg.hp_pins[0] == 0)
                return;
-       snd_hda_codec_write(spec->codec, 0x1, 0, 0xf81,
-                           !spec->vt1708_jack_detect);
-       if (!delayed_work_pending(&spec->vt1708_hp_work))
-               schedule_delayed_work(&spec->vt1708_hp_work,
-                                     msecs_to_jiffies(100));
+       if (spec->hp_work_active) {
+               snd_hda_codec_write(spec->codec, 0x1, 0, 0xf81, 1);
+               cancel_delayed_work_sync(&spec->vt1708_hp_work);
+               spec->hp_work_active = 0;
+       }
 }
 
-static void vt1708_stop_hp_work(struct via_spec *spec)
+static void vt1708_update_hp_work(struct via_spec *spec)
 {
        if (spec->codec_type != VT1708 || spec->autocfg.hp_pins[0] == 0)
                return;
-       if (snd_hda_get_bool_hint(spec->codec, "analog_loopback_hp_detect") == 1
-           && !is_aa_path_mute(spec->codec))
-               return;
-       snd_hda_codec_write(spec->codec, 0x1, 0, 0xf81,
-                           !spec->vt1708_jack_detect);
-       cancel_delayed_work_sync(&spec->vt1708_hp_work);
+       if (spec->vt1708_jack_detect &&
+           (spec->active_streams || hp_detect_with_aa(spec->codec))) {
+               if (!spec->hp_work_active) {
+                       snd_hda_codec_write(spec->codec, 0x1, 0, 0xf81, 0);
+                       schedule_delayed_work(&spec->vt1708_hp_work,
+                                             msecs_to_jiffies(100));
+                       spec->hp_work_active = 1;
+               }
+       } else if (!hp_detect_with_aa(spec->codec))
+               vt1708_stop_hp_work(spec);
 }
 
 static void set_widgets_power_state(struct hda_codec *codec)
@@ -343,12 +352,7 @@ static int analog_input_switch_put(struct snd_kcontrol *kcontrol,
 
        set_widgets_power_state(codec);
        analog_low_current_mode(snd_kcontrol_chip(kcontrol));
-       if (snd_hda_get_bool_hint(codec, "analog_loopback_hp_detect") == 1) {
-               if (is_aa_path_mute(codec))
-                       vt1708_start_hp_work(codec->spec);
-               else
-                       vt1708_stop_hp_work(codec->spec);
-       }
+       vt1708_update_hp_work(codec->spec);
        return change;
 }
 
@@ -1154,7 +1158,7 @@ static int via_playback_multi_pcm_prepare(struct hda_pcm_stream *hinfo,
        spec->cur_dac_stream_tag = stream_tag;
        spec->cur_dac_format = format;
        mutex_unlock(&spec->config_mutex);
-       vt1708_start_hp_work(spec);
+       vt1708_update_hp_work(spec);
        return 0;
 }
 
@@ -1174,7 +1178,7 @@ static int via_playback_hp_pcm_prepare(struct hda_pcm_stream *hinfo,
        spec->cur_hp_stream_tag = stream_tag;
        spec->cur_hp_format = format;
        mutex_unlock(&spec->config_mutex);
-       vt1708_start_hp_work(spec);
+       vt1708_update_hp_work(spec);
        return 0;
 }
 
@@ -1188,7 +1192,7 @@ static int via_playback_multi_pcm_cleanup(struct hda_pcm_stream *hinfo,
        snd_hda_multi_out_analog_cleanup(codec, &spec->multiout);
        spec->active_streams &= ~STREAM_MULTI_OUT;
        mutex_unlock(&spec->config_mutex);
-       vt1708_stop_hp_work(spec);
+       vt1708_update_hp_work(spec);
        return 0;
 }
 
@@ -1203,7 +1207,7 @@ static int via_playback_hp_pcm_cleanup(struct hda_pcm_stream *hinfo,
                snd_hda_codec_setup_stream(codec, spec->hp_dac_nid, 0, 0, 0);
        spec->active_streams &= ~STREAM_INDEP_HP;
        mutex_unlock(&spec->config_mutex);
-       vt1708_stop_hp_work(spec);
+       vt1708_update_hp_work(spec);
        return 0;
 }
 
@@ -1645,7 +1649,8 @@ static void via_hp_automute(struct hda_codec *codec)
        int nums;
        struct via_spec *spec = codec->spec;
 
-       if (!spec->hp_independent_mode && spec->autocfg.hp_pins[0])
+       if (!spec->hp_independent_mode && spec->autocfg.hp_pins[0] &&
+           (spec->codec_type != VT1708 || spec->vt1708_jack_detect))
                present = snd_hda_jack_detect(codec, spec->autocfg.hp_pins[0]);
 
        if (spec->smart51_enabled)
@@ -2612,8 +2617,6 @@ static int vt1708_jack_detect_get(struct snd_kcontrol *kcontrol,
 
        if (spec->codec_type != VT1708)
                return 0;
-       spec->vt1708_jack_detect =
-               !((snd_hda_codec_read(codec, 0x1, 0, 0xf84, 0) >> 8) & 0x1);
        ucontrol->value.integer.value[0] = spec->vt1708_jack_detect;
        return 0;
 }
@@ -2623,18 +2626,22 @@ static int vt1708_jack_detect_put(struct snd_kcontrol *kcontrol,
 {
        struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
        struct via_spec *spec = codec->spec;
-       int change;
+       int val;
 
        if (spec->codec_type != VT1708)
                return 0;
-       spec->vt1708_jack_detect = ucontrol->value.integer.value[0];
-       change = (0x1 & (snd_hda_codec_read(codec, 0x1, 0, 0xf84, 0) >> 8))
-               == !spec->vt1708_jack_detect;
-       if (spec->vt1708_jack_detect) {
+       val = !!ucontrol->value.integer.value[0];
+       if (spec->vt1708_jack_detect == val)
+               return 0;
+       spec->vt1708_jack_detect = val;
+       if (spec->vt1708_jack_detect &&
+           snd_hda_get_bool_hint(codec, "analog_loopback_hp_detect") != 1) {
                mute_aa_path(codec, 1);
                notify_aa_path_ctls(codec);
        }
-       return change;
+       via_hp_automute(codec);
+       vt1708_update_hp_work(spec);
+       return 1;
 }
 
 static const struct snd_kcontrol_new vt1708_jack_detect_ctl = {
@@ -2771,6 +2778,7 @@ static int via_init(struct hda_codec *codec)
        via_auto_init_unsol_event(codec);
 
        via_hp_automute(codec);
+       vt1708_update_hp_work(spec);
 
        return 0;
 }
@@ -2787,7 +2795,9 @@ static void vt1708_update_hp_jack_state(struct work_struct *work)
                spec->vt1708_hp_present ^= 1;
                via_hp_automute(spec->codec);
        }
-       vt1708_start_hp_work(spec);
+       if (spec->vt1708_jack_detect)
+               schedule_delayed_work(&spec->vt1708_hp_work,
+                                     msecs_to_jiffies(100));
 }
 
 static int get_mux_nids(struct hda_codec *codec)
index 5c8717e29eebdbc4983933dcccaee36492457e41..8c3e7fcefd99c91c38d39f92fd0cae8323cee210 100644 (file)
@@ -78,10 +78,15 @@ unsigned long lx_dsp_reg_read(struct lx6464es *chip, int port)
        return ioread32(address);
 }
 
-void lx_dsp_reg_readbuf(struct lx6464es *chip, int port, u32 *data, u32 len)
+static void lx_dsp_reg_readbuf(struct lx6464es *chip, int port, u32 *data,
+                              u32 len)
 {
-       void __iomem *address = lx_dsp_register(chip, port);
-       memcpy_fromio(data, address, len*sizeof(u32));
+       u32 __iomem *address = lx_dsp_register(chip, port);
+       int i;
+
+       /* we cannot use memcpy_fromio */
+       for (i = 0; i != len; ++i)
+               data[i] = ioread32(address + i);
 }
 
 
@@ -91,11 +96,15 @@ void lx_dsp_reg_write(struct lx6464es *chip, int port, unsigned data)
        iowrite32(data, address);
 }
 
-void lx_dsp_reg_writebuf(struct lx6464es *chip, int port, const u32 *data,
-                        u32 len)
+static void lx_dsp_reg_writebuf(struct lx6464es *chip, int port,
+                               const u32 *data, u32 len)
 {
-       void __iomem *address = lx_dsp_register(chip, port);
-       memcpy_toio(address, data, len*sizeof(u32));
+       u32 __iomem *address = lx_dsp_register(chip, port);
+       int i;
+
+       /* we cannot use memcpy_to */
+       for (i = 0; i != len; ++i)
+               iowrite32(data[i], address + i);
 }
 
 
index 1dd562980b6c3595c233beffe49012c6352c605c..4d7ff797a6468abf5b5499cdfd92dab430d9948f 100644 (file)
@@ -72,10 +72,7 @@ enum {
 };
 
 unsigned long lx_dsp_reg_read(struct lx6464es *chip, int port);
-void lx_dsp_reg_readbuf(struct lx6464es *chip, int port, u32 *data, u32 len);
 void lx_dsp_reg_write(struct lx6464es *chip, int port, unsigned data);
-void lx_dsp_reg_writebuf(struct lx6464es *chip, int port, const u32 *data,
-                        u32 len);
 
 /* plx register access */
 enum {
index e760adad9523ebf82db4f07dbc259c73d8d44326..19ee2203cbb50fe7250bca2bad79c14e15a6069b 100644 (file)
@@ -6518,7 +6518,7 @@ static int __devinit snd_hdspm_create(struct snd_card *card,
                        hdspm->io_type = AES32;
                        hdspm->card_name = "RME AES32";
                        hdspm->midiPorts = 2;
-               } else if ((hdspm->firmware_rev == 0xd5) ||
+               } else if ((hdspm->firmware_rev == 0xd2) ||
                        ((hdspm->firmware_rev >= 0xc8)  &&
                                (hdspm->firmware_rev <= 0xcf))) {
                        hdspm->io_type = MADI;
index a391e622a19209f535eb0441e85348cda4d085aa..28dfafb56dd1d70a9d95e4ef53f6e7ba22fb4faf 100644 (file)
@@ -41,6 +41,7 @@ MODULE_SUPPORTED_DEVICE("{{SiS,SiS7019 Audio Accelerator}}");
 static int index = SNDRV_DEFAULT_IDX1; /* Index 0-MAX */
 static char *id = SNDRV_DEFAULT_STR1;  /* ID for this card */
 static int enable = 1;
+static int codecs = 1;
 
 module_param(index, int, 0444);
 MODULE_PARM_DESC(index, "Index value for SiS7019 Audio Accelerator.");
@@ -48,6 +49,8 @@ module_param(id, charp, 0444);
 MODULE_PARM_DESC(id, "ID string for SiS7019 Audio Accelerator.");
 module_param(enable, bool, 0444);
 MODULE_PARM_DESC(enable, "Enable SiS7019 Audio Accelerator.");
+module_param(codecs, int, 0444);
+MODULE_PARM_DESC(codecs, "Set bit to indicate that codec number is expected to be present (default 1)");
 
 static DEFINE_PCI_DEVICE_TABLE(snd_sis7019_ids) = {
        { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x7019) },
@@ -140,6 +143,9 @@ struct sis7019 {
        dma_addr_t silence_dma_addr;
 };
 
+/* These values are also used by the module param 'codecs' to indicate
+ * which codecs should be present.
+ */
 #define SIS_PRIMARY_CODEC_PRESENT      0x0001
 #define SIS_SECONDARY_CODEC_PRESENT    0x0002
 #define SIS_TERTIARY_CODEC_PRESENT     0x0004
@@ -1078,6 +1084,7 @@ static int sis_chip_init(struct sis7019 *sis)
 {
        unsigned long io = sis->ioport;
        void __iomem *ioaddr = sis->ioaddr;
+       unsigned long timeout;
        u16 status;
        int count;
        int i;
@@ -1104,21 +1111,45 @@ static int sis_chip_init(struct sis7019 *sis)
        while ((inw(io + SIS_AC97_STATUS) & SIS_AC97_STATUS_BUSY) && --count)
                udelay(1);
 
+       /* Command complete, we can let go of the semaphore now.
+        */
+       outl(SIS_AC97_SEMA_RELEASE, io + SIS_AC97_SEMA);
+       if (!count)
+               return -EIO;
+
        /* Now that we've finished the reset, find out what's attached.
+        * There are some codec/board combinations that take an extremely
+        * long time to come up. 350+ ms has been observed in the field,
+        * so we'll give them up to 500ms.
         */
-       status = inl(io + SIS_AC97_STATUS);
-       if (status & SIS_AC97_STATUS_CODEC_READY)
-               sis->codecs_present |= SIS_PRIMARY_CODEC_PRESENT;
-       if (status & SIS_AC97_STATUS_CODEC2_READY)
-               sis->codecs_present |= SIS_SECONDARY_CODEC_PRESENT;
-       if (status & SIS_AC97_STATUS_CODEC3_READY)
-               sis->codecs_present |= SIS_TERTIARY_CODEC_PRESENT;
-
-       /* All done, let go of the semaphore, and check for errors
+       sis->codecs_present = 0;
+       timeout = msecs_to_jiffies(500) + jiffies;
+       while (time_before_eq(jiffies, timeout)) {
+               status = inl(io + SIS_AC97_STATUS);
+               if (status & SIS_AC97_STATUS_CODEC_READY)
+                       sis->codecs_present |= SIS_PRIMARY_CODEC_PRESENT;
+               if (status & SIS_AC97_STATUS_CODEC2_READY)
+                       sis->codecs_present |= SIS_SECONDARY_CODEC_PRESENT;
+               if (status & SIS_AC97_STATUS_CODEC3_READY)
+                       sis->codecs_present |= SIS_TERTIARY_CODEC_PRESENT;
+
+               if (sis->codecs_present == codecs)
+                       break;
+
+               msleep(1);
+       }
+
+       /* All done, check for errors.
         */
-       outl(SIS_AC97_SEMA_RELEASE, io + SIS_AC97_SEMA);
-       if (!sis->codecs_present || !count)
+       if (!sis->codecs_present) {
+               printk(KERN_ERR "sis7019: could not find any codecs\n");
                return -EIO;
+       }
+
+       if (sis->codecs_present != codecs) {
+               printk(KERN_WARNING "sis7019: missing codecs, found %0x, expected %0x\n",
+                      sis->codecs_present, codecs);
+       }
 
        /* Let the hardware know that the audio driver is alive,
         * and enable PCM slots on the AC-link for L/R playback (3 & 4) and
@@ -1390,6 +1421,17 @@ static int __devinit snd_sis7019_probe(struct pci_dev *pci,
        if (!enable)
                goto error_out;
 
+       /* The user can specify which codecs should be present so that we
+        * can wait for them to show up if they are slow to recover from
+        * the AC97 cold reset. We default to a single codec, the primary.
+        *
+        * We assume that SIS_PRIMARY_*_PRESENT matches bits 0-2.
+        */
+       codecs &= SIS_PRIMARY_CODEC_PRESENT | SIS_SECONDARY_CODEC_PRESENT |
+                 SIS_TERTIARY_CODEC_PRESENT;
+       if (!codecs)
+               codecs = SIS_PRIMARY_CODEC_PRESENT;
+
        rc = snd_card_create(index, id, THIS_MODULE, sizeof(*sis), &card);
        if (rc < 0)
                goto error_out;
index bee3c94f58b0736c57f361141e0ed32e58640317..d1fcc816ce9705c5aca82f68eb327d65618301cd 100644 (file)
@@ -1,6 +1,6 @@
 config SND_ATMEL_SOC
        tristate "SoC Audio for the Atmel System-on-Chip"
-       depends on ARCH_AT91 || AVR32
+       depends on ARCH_AT91
        help
          Say Y or M if you want to add support for codecs attached to
          the ATMEL SSC interface. You will also need
@@ -24,25 +24,6 @@ config SND_AT91_SOC_SAM9G20_WM8731
          Say Y if you want to add support for SoC audio on WM8731-based
          AT91sam9g20 evaluation board.
 
-config SND_AT32_SOC_PLAYPAQ
-        tristate "SoC Audio support for PlayPaq with WM8510"
-        depends on SND_ATMEL_SOC && BOARD_PLAYPAQ && AT91_PROGRAMMABLE_CLOCKS
-        select SND_ATMEL_SOC_SSC
-        select SND_SOC_WM8510
-        help
-          Say Y or M here if you want to add support for SoC audio
-          on the LRS PlayPaq.
-
-config SND_AT32_SOC_PLAYPAQ_SLAVE
-        bool "Run CODEC on PlayPaq in slave mode"
-        depends on SND_AT32_SOC_PLAYPAQ
-        default n
-        help
-          Say Y if you want to run with the AT32 SSC generating the BCLK
-          and FRAME signals on the PlayPaq.  Unless you want to play
-          with the AT32 as the SSC master, you probably want to say N here,
-          as this will give you better sound quality.
-
 config SND_AT91_SOC_AFEB9260
        tristate "SoC Audio support for AFEB9260 board"
        depends on ARCH_AT91 && MACH_AFEB9260 && SND_ATMEL_SOC
index e7ea56bd5f82a94de94d4169c464e809d34dfb5e..a5c0bf19da78f01e823fc614c61528c30272a67c 100644 (file)
@@ -8,9 +8,5 @@ obj-$(CONFIG_SND_ATMEL_SOC_SSC) += snd-soc-atmel_ssc_dai.o
 # AT91 Machine Support
 snd-soc-sam9g20-wm8731-objs := sam9g20_wm8731.o
 
-# AT32 Machine Support
-snd-soc-playpaq-objs := playpaq_wm8510.o
-
 obj-$(CONFIG_SND_AT91_SOC_SAM9G20_WM8731) += snd-soc-sam9g20-wm8731.o
-obj-$(CONFIG_SND_AT32_SOC_PLAYPAQ) += snd-soc-playpaq.o
 obj-$(CONFIG_SND_AT91_SOC_AFEB9260) += snd-soc-afeb9260.o
diff --git a/sound/soc/atmel/playpaq_wm8510.c b/sound/soc/atmel/playpaq_wm8510.c
deleted file mode 100644 (file)
index 73ae99a..0000000
+++ /dev/null
@@ -1,473 +0,0 @@
-/* sound/soc/at32/playpaq_wm8510.c
- * ASoC machine driver for PlayPaq using WM8510 codec
- *
- * Copyright (C) 2008 Long Range Systems
- *    Geoffrey Wossum <gwossum@acm.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This code is largely inspired by sound/soc/at91/eti_b1_wm8731.c
- *
- * NOTE: If you don't have the AT32 enhanced portmux configured (which
- * isn't currently in the mainline or Atmel patched kernel), you will
- * need to set the MCLK pin (PA30) to peripheral A in your board initialization
- * code.  Something like:
- *     at32_select_periph(GPIO_PIN_PA(30), GPIO_PERIPH_A, 0);
- *
- */
-
-/* #define DEBUG */
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/clk.h>
-#include <linux/timer.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-
-#include <sound/core.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-#include <sound/soc.h>
-
-#include <mach/at32ap700x.h>
-#include <mach/portmux.h>
-
-#include "../codecs/wm8510.h"
-#include "atmel-pcm.h"
-#include "atmel_ssc_dai.h"
-
-
-/*-------------------------------------------------------------------------*\
- * constants
-\*-------------------------------------------------------------------------*/
-#define MCLK_PIN               GPIO_PIN_PA(30)
-#define MCLK_PERIPH            GPIO_PERIPH_A
-
-
-/*-------------------------------------------------------------------------*\
- * data types
-\*-------------------------------------------------------------------------*/
-/* SSC clocking data */
-struct ssc_clock_data {
-       /* CMR div */
-       unsigned int cmr_div;
-
-       /* Frame period (as needed by xCMR.PERIOD) */
-       unsigned int period;
-
-       /* The SSC clock rate these settings where calculated for */
-       unsigned long ssc_rate;
-};
-
-
-/*-------------------------------------------------------------------------*\
- * module data
-\*-------------------------------------------------------------------------*/
-static struct clk *_gclk0;
-static struct clk *_pll0;
-
-#define CODEC_CLK (_gclk0)
-
-
-/*-------------------------------------------------------------------------*\
- * Sound SOC operations
-\*-------------------------------------------------------------------------*/
-#if defined CONFIG_SND_AT32_SOC_PLAYPAQ_SLAVE
-static struct ssc_clock_data playpaq_wm8510_calc_ssc_clock(
-       struct snd_pcm_hw_params *params,
-       struct snd_soc_dai *cpu_dai)
-{
-       struct at32_ssc_info *ssc_p = snd_soc_dai_get_drvdata(cpu_dai);
-       struct ssc_device *ssc = ssc_p->ssc;
-       struct ssc_clock_data cd;
-       unsigned int rate, width_bits, channels;
-       unsigned int bitrate, ssc_div;
-       unsigned actual_rate;
-
-
-       /*
-        * Figure out required bitrate
-        */
-       rate = params_rate(params);
-       channels = params_channels(params);
-       width_bits = snd_pcm_format_physical_width(params_format(params));
-       bitrate = rate * width_bits * channels;
-
-
-       /*
-        * Figure out required SSC divider and period for required bitrate
-        */
-       cd.ssc_rate = clk_get_rate(ssc->clk);
-       ssc_div = cd.ssc_rate / bitrate;
-       cd.cmr_div = ssc_div / 2;
-       if (ssc_div & 1) {
-               /* round cmr_div up */
-               cd.cmr_div++;
-       }
-       cd.period = width_bits - 1;
-
-
-       /*
-        * Find actual rate, compare to requested rate
-        */
-       actual_rate = (cd.ssc_rate / (cd.cmr_div * 2)) / (2 * (cd.period + 1));
-       pr_debug("playpaq_wm8510: Request rate = %u, actual rate = %u\n",
-                rate, actual_rate);
-
-
-       return cd;
-}
-#endif /* CONFIG_SND_AT32_SOC_PLAYPAQ_SLAVE */
-
-
-
-static int playpaq_wm8510_hw_params(struct snd_pcm_substream *substream,
-                                   struct snd_pcm_hw_params *params)
-{
-       struct snd_soc_pcm_runtime *rtd = substream->private_data;
-       struct snd_soc_dai *codec_dai = rtd->codec_dai;
-       struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
-       struct at32_ssc_info *ssc_p = snd_soc_dai_get_drvdata(cpu_dai);
-       struct ssc_device *ssc = ssc_p->ssc;
-       unsigned int pll_out = 0, bclk = 0, mclk_div = 0;
-       int ret;
-
-
-       /* Due to difficulties with getting the correct clocks from the AT32's
-        * PLL0, we're going to let the CODEC be in charge of all the clocks
-        */
-#if !defined CONFIG_SND_AT32_SOC_PLAYPAQ_SLAVE
-       const unsigned int fmt = (SND_SOC_DAIFMT_I2S |
-                                 SND_SOC_DAIFMT_NB_NF |
-                                 SND_SOC_DAIFMT_CBM_CFM);
-#else
-       struct ssc_clock_data cd;
-       const unsigned int fmt = (SND_SOC_DAIFMT_I2S |
-                                 SND_SOC_DAIFMT_NB_NF |
-                                 SND_SOC_DAIFMT_CBS_CFS);
-#endif
-
-       if (ssc == NULL) {
-               pr_warning("playpaq_wm8510_hw_params: ssc is NULL!\n");
-               return -EINVAL;
-       }
-
-
-       /*
-        * Figure out PLL and BCLK dividers for WM8510
-        */
-       switch (params_rate(params)) {
-       case 48000:
-               pll_out = 24576000;
-               mclk_div = WM8510_MCLKDIV_2;
-               bclk = WM8510_BCLKDIV_8;
-               break;
-
-       case 44100:
-               pll_out = 22579200;
-               mclk_div = WM8510_MCLKDIV_2;
-               bclk = WM8510_BCLKDIV_8;
-               break;
-
-       case 22050:
-               pll_out = 22579200;
-               mclk_div = WM8510_MCLKDIV_4;
-               bclk = WM8510_BCLKDIV_8;
-               break;
-
-       case 16000:
-               pll_out = 24576000;
-               mclk_div = WM8510_MCLKDIV_6;
-               bclk = WM8510_BCLKDIV_8;
-               break;
-
-       case 11025:
-               pll_out = 22579200;
-               mclk_div = WM8510_MCLKDIV_8;
-               bclk = WM8510_BCLKDIV_8;
-               break;
-
-       case 8000:
-               pll_out = 24576000;
-               mclk_div = WM8510_MCLKDIV_12;
-               bclk = WM8510_BCLKDIV_8;
-               break;
-
-       default:
-               pr_warning("playpaq_wm8510: Unsupported sample rate %d\n",
-                          params_rate(params));
-               return -EINVAL;
-       }
-
-
-       /*
-        * set CPU and CODEC DAI configuration
-        */
-       ret = snd_soc_dai_set_fmt(codec_dai, fmt);
-       if (ret < 0) {
-               pr_warning("playpaq_wm8510: "
-                          "Failed to set CODEC DAI format (%d)\n",
-                          ret);
-               return ret;
-       }
-       ret = snd_soc_dai_set_fmt(cpu_dai, fmt);
-       if (ret < 0) {
-               pr_warning("playpaq_wm8510: "
-                          "Failed to set CPU DAI format (%d)\n",
-                          ret);
-               return ret;
-       }
-
-
-       /*
-        * Set CPU clock configuration
-        */
-#if defined CONFIG_SND_AT32_SOC_PLAYPAQ_SLAVE
-       cd = playpaq_wm8510_calc_ssc_clock(params, cpu_dai);
-       pr_debug("playpaq_wm8510: cmr_div = %d, period = %d\n",
-                cd.cmr_div, cd.period);
-       ret = snd_soc_dai_set_clkdiv(cpu_dai, AT32_SSC_CMR_DIV, cd.cmr_div);
-       if (ret < 0) {
-               pr_warning("playpaq_wm8510: Failed to set CPU CMR_DIV (%d)\n",
-                          ret);
-               return ret;
-       }
-       ret = snd_soc_dai_set_clkdiv(cpu_dai, AT32_SSC_TCMR_PERIOD,
-                                         cd.period);
-       if (ret < 0) {
-               pr_warning("playpaq_wm8510: "
-                          "Failed to set CPU transmit period (%d)\n",
-                          ret);
-               return ret;
-       }
-#endif /* CONFIG_SND_AT32_SOC_PLAYPAQ_SLAVE */
-
-
-       /*
-        * Set CODEC clock configuration
-        */
-       pr_debug("playpaq_wm8510: "
-                "pll_in = %ld, pll_out = %u, bclk = %x, mclk = %x\n",
-                clk_get_rate(CODEC_CLK), pll_out, bclk, mclk_div);
-
-
-#if !defined CONFIG_SND_AT32_SOC_PLAYPAQ_SLAVE
-       ret = snd_soc_dai_set_clkdiv(codec_dai, WM8510_BCLKDIV, bclk);
-       if (ret < 0) {
-               pr_warning
-                   ("playpaq_wm8510: Failed to set CODEC DAI BCLKDIV (%d)\n",
-                    ret);
-               return ret;
-       }
-#endif /* CONFIG_SND_AT32_SOC_PLAYPAQ_SLAVE */
-
-
-       ret = snd_soc_dai_set_pll(codec_dai, 0, 0,
-                                        clk_get_rate(CODEC_CLK), pll_out);
-       if (ret < 0) {
-               pr_warning("playpaq_wm8510: Failed to set CODEC DAI PLL (%d)\n",
-                          ret);
-               return ret;
-       }
-
-
-       ret = snd_soc_dai_set_clkdiv(codec_dai, WM8510_MCLKDIV, mclk_div);
-       if (ret < 0) {
-               pr_warning("playpaq_wm8510: Failed to set CODEC MCLKDIV (%d)\n",
-                          ret);
-               return ret;
-       }
-
-
-       return 0;
-}
-
-
-
-static struct snd_soc_ops playpaq_wm8510_ops = {
-       .hw_params = playpaq_wm8510_hw_params,
-};
-
-
-
-static const struct snd_soc_dapm_widget playpaq_dapm_widgets[] = {
-       SND_SOC_DAPM_MIC("Int Mic", NULL),
-       SND_SOC_DAPM_SPK("Ext Spk", NULL),
-};
-
-
-
-static const struct snd_soc_dapm_route intercon[] = {
-       /* speaker connected to SPKOUT */
-       {"Ext Spk", NULL, "SPKOUTP"},
-       {"Ext Spk", NULL, "SPKOUTN"},
-
-       {"Mic Bias", NULL, "Int Mic"},
-       {"MICN", NULL, "Mic Bias"},
-       {"MICP", NULL, "Mic Bias"},
-};
-
-
-
-static int playpaq_wm8510_init(struct snd_soc_pcm_runtime *rtd)
-{
-       struct snd_soc_codec *codec = rtd->codec;
-       struct snd_soc_dapm_context *dapm = &codec->dapm;
-       int i;
-
-       /*
-        * Add DAPM widgets
-        */
-       for (i = 0; i < ARRAY_SIZE(playpaq_dapm_widgets); i++)
-               snd_soc_dapm_new_control(dapm, &playpaq_dapm_widgets[i]);
-
-
-
-       /*
-        * Setup audio path interconnects
-        */
-       snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon));
-
-
-
-       /* always connected pins */
-       snd_soc_dapm_enable_pin(dapm, "Int Mic");
-       snd_soc_dapm_enable_pin(dapm, "Ext Spk");
-
-
-
-       /* Make CSB show PLL rate */
-       snd_soc_dai_set_clkdiv(rtd->codec_dai, WM8510_OPCLKDIV,
-                                      WM8510_OPCLKDIV_1 | 4);
-
-       return 0;
-}
-
-
-
-static struct snd_soc_dai_link playpaq_wm8510_dai = {
-       .name = "WM8510",
-       .stream_name = "WM8510 PCM",
-       .cpu_dai_name= "atmel-ssc-dai.0",
-       .platform_name = "atmel-pcm-audio",
-       .codec_name = "wm8510-codec.0-0x1a",
-       .codec_dai_name = "wm8510-hifi",
-       .init = playpaq_wm8510_init,
-       .ops = &playpaq_wm8510_ops,
-};
-
-
-
-static struct snd_soc_card snd_soc_playpaq = {
-       .name = "LRS_PlayPaq_WM8510",
-       .dai_link = &playpaq_wm8510_dai,
-       .num_links = 1,
-};
-
-static struct platform_device *playpaq_snd_device;
-
-
-static int __init playpaq_asoc_init(void)
-{
-       int ret = 0;
-
-       /*
-        * Configure MCLK for WM8510
-        */
-       _gclk0 = clk_get(NULL, "gclk0");
-       if (IS_ERR(_gclk0)) {
-               _gclk0 = NULL;
-               ret = PTR_ERR(_gclk0);
-               goto err_gclk0;
-       }
-       _pll0 = clk_get(NULL, "pll0");
-       if (IS_ERR(_pll0)) {
-               _pll0 = NULL;
-               ret = PTR_ERR(_pll0);
-               goto err_pll0;
-       }
-       ret = clk_set_parent(_gclk0, _pll0);
-       if (ret) {
-               pr_warning("snd-soc-playpaq: "
-                          "Failed to set PLL0 as parent for DAC clock\n");
-               goto err_set_clk;
-       }
-       clk_set_rate(CODEC_CLK, 12000000);
-       clk_enable(CODEC_CLK);
-
-#if defined CONFIG_AT32_ENHANCED_PORTMUX
-       at32_select_periph(MCLK_PIN, MCLK_PERIPH, 0);
-#endif
-
-
-       /*
-        * Create and register platform device
-        */
-       playpaq_snd_device = platform_device_alloc("soc-audio", 0);
-       if (playpaq_snd_device == NULL) {
-               ret = -ENOMEM;
-               goto err_device_alloc;
-       }
-
-       platform_set_drvdata(playpaq_snd_device, &snd_soc_playpaq);
-
-       ret = platform_device_add(playpaq_snd_device);
-       if (ret) {
-               pr_warning("playpaq_wm8510: platform_device_add failed (%d)\n",
-                          ret);
-               goto err_device_add;
-       }
-
-       return 0;
-
-
-err_device_add:
-       if (playpaq_snd_device != NULL) {
-               platform_device_put(playpaq_snd_device);
-               playpaq_snd_device = NULL;
-       }
-err_device_alloc:
-err_set_clk:
-       if (_pll0 != NULL) {
-               clk_put(_pll0);
-               _pll0 = NULL;
-       }
-err_pll0:
-       if (_gclk0 != NULL) {
-               clk_put(_gclk0);
-               _gclk0 = NULL;
-       }
-       return ret;
-}
-
-
-static void __exit playpaq_asoc_exit(void)
-{
-       if (_gclk0 != NULL) {
-               clk_put(_gclk0);
-               _gclk0 = NULL;
-       }
-       if (_pll0 != NULL) {
-               clk_put(_pll0);
-               _pll0 = NULL;
-       }
-
-#if defined CONFIG_AT32_ENHANCED_PORTMUX
-       at32_free_pin(MCLK_PIN);
-#endif
-
-       platform_device_unregister(playpaq_snd_device);
-       playpaq_snd_device = NULL;
-}
-
-module_init(playpaq_asoc_init);
-module_exit(playpaq_asoc_exit);
-
-MODULE_AUTHOR("Geoffrey Wossum <gwossum@acm.org>");
-MODULE_DESCRIPTION("ASoC machine driver for LRS PlayPaq");
-MODULE_LICENSE("GPL");
index 444747f0db26615992e360b6a162eaf7b9f9ac44..dd7be0dbbc58189ff153b1a7b5724967bd4caadd 100644 (file)
@@ -34,7 +34,7 @@
 
 #define AD1836_ADC_CTRL2               13
 #define AD1836_ADC_WORD_LEN_MASK       0x30
-#define AD1836_ADC_WORD_OFFSET         5
+#define AD1836_ADC_WORD_OFFSET         4
 #define AD1836_ADC_SERFMT_MASK         (7 << 6)
 #define AD1836_ADC_SERFMT_PCK256       (0x4 << 6)
 #define AD1836_ADC_SERFMT_PCK128       (0x5 << 6)
index 1ccf8dd47576ce4c746ffb54461c19f196acff43..45c63028b40d1636b56f6aa3e5f13e0d110b9a5d 100644 (file)
@@ -245,7 +245,7 @@ static const char *adau1373_bass_hpf_cutoff_text[] = {
 };
 
 static const unsigned int adau1373_bass_tlv[] = {
-       TLV_DB_RANGE_HEAD(4),
+       TLV_DB_RANGE_HEAD(3),
        0, 2, TLV_DB_SCALE_ITEM(-600, 600, 1),
        3, 4, TLV_DB_SCALE_ITEM(950, 250, 0),
        5, 7, TLV_DB_SCALE_ITEM(1400, 150, 0),
index f1f237ecec2a6c43dcdf9ee31b72da7f0cdecff6..73f46eb459f15fa43c5aadc89c2d5a61346fb351 100644 (file)
@@ -601,7 +601,6 @@ static int cs4270_soc_suspend(struct snd_soc_codec *codec, pm_message_t mesg)
 static int cs4270_soc_resume(struct snd_soc_codec *codec)
 {
        struct cs4270_private *cs4270 = snd_soc_codec_get_drvdata(codec);
-       struct i2c_client *i2c_client = to_i2c_client(codec->dev);
        int reg;
 
        regulator_bulk_enable(ARRAY_SIZE(cs4270->supplies),
@@ -612,14 +611,7 @@ static int cs4270_soc_resume(struct snd_soc_codec *codec)
        ndelay(500);
 
        /* first restore the entire register cache ... */
-       for (reg = CS4270_FIRSTREG; reg <= CS4270_LASTREG; reg++) {
-               u8 val = snd_soc_read(codec, reg);
-
-               if (i2c_smbus_write_byte_data(i2c_client, reg, val)) {
-                       dev_err(codec->dev, "i2c write failed\n");
-                       return -EIO;
-               }
-       }
+       snd_soc_cache_sync(codec);
 
        /* ... then disable the power-down bits */
        reg = snd_soc_read(codec, CS4270_PWRCTL);
index 23d1bd5dadda36185e2c56702e7fd17a739f9a1a..69fde1506fe1fde2fe312ff80176723171ccef07 100644 (file)
@@ -434,7 +434,8 @@ static int cs4271_soc_suspend(struct snd_soc_codec *codec, pm_message_t mesg)
 {
        int ret;
        /* Set power-down bit */
-       ret = snd_soc_update_bits(codec, CS4271_MODE2, 0, CS4271_MODE2_PDN);
+       ret = snd_soc_update_bits(codec, CS4271_MODE2, CS4271_MODE2_PDN,
+                                 CS4271_MODE2_PDN);
        if (ret < 0)
                return ret;
        return 0;
@@ -501,8 +502,9 @@ static int cs4271_probe(struct snd_soc_codec *codec)
                return ret;
        }
 
-       ret = snd_soc_update_bits(codec, CS4271_MODE2, 0,
-               CS4271_MODE2_PDN | CS4271_MODE2_CPEN);
+       ret = snd_soc_update_bits(codec, CS4271_MODE2,
+                                 CS4271_MODE2_PDN | CS4271_MODE2_CPEN,
+                                 CS4271_MODE2_PDN | CS4271_MODE2_CPEN);
        if (ret < 0)
                return ret;
        ret = snd_soc_update_bits(codec, CS4271_MODE2, CS4271_MODE2_PDN, 0);
index 8c3c8205d19e99016e47b1564aa58cbdf91b0bab..1ee66361f61b946e5738798daf03d61baf2f8ecb 100644 (file)
@@ -555,7 +555,7 @@ static int cs42l51_probe(struct snd_soc_codec *codec)
 
 static struct snd_soc_codec_driver soc_codec_device_cs42l51 = {
        .probe =        cs42l51_probe,
-       .reg_cache_size = CS42L51_NUMREGS,
+       .reg_cache_size = CS42L51_NUMREGS + 1,
        .reg_word_size = sizeof(u8),
 };
 
index 9e7e964a5fa3fd1a8824c17c9dad4bbc60923909..dcf6f2a1600ae5726a60d0a7e11ef6e53d57d466 100644 (file)
@@ -106,13 +106,13 @@ static int max9877_set_2reg(struct snd_kcontrol *kcontrol,
        unsigned int mask = mc->max;
        unsigned int val = (ucontrol->value.integer.value[0] & mask);
        unsigned int val2 = (ucontrol->value.integer.value[1] & mask);
-       unsigned int change = 1;
+       unsigned int change = 0;
 
-       if (((max9877_regs[reg] >> shift) & mask) == val)
-               change = 0;
+       if (((max9877_regs[reg] >> shift) & mask) != val)
+               change = 1;
 
-       if (((max9877_regs[reg2] >> shift) & mask) == val2)
-               change = 0;
+       if (((max9877_regs[reg2] >> shift) & mask) != val2)
+               change = 1;
 
        if (change) {
                max9877_regs[reg] &= ~(mask << shift);
index 27a078cbb6eb2542cb6bc0fa0be372b5a77cabac..4646e808b90a334e0d59935a1e93244672d276be 100644 (file)
@@ -177,7 +177,7 @@ static const DECLARE_TLV_DB_SCALE(dac_vol_tlv, -95625, 375, 0);
 static const DECLARE_TLV_DB_SCALE(in_vol_tlv, -3450, 150, 0);
 /* {0, +20, +24, +30, +35, +40, +44, +50, +52}dB */
 static unsigned int mic_bst_tlv[] = {
-       TLV_DB_RANGE_HEAD(6),
+       TLV_DB_RANGE_HEAD(7),
        0, 0, TLV_DB_SCALE_ITEM(0, 0, 0),
        1, 1, TLV_DB_SCALE_ITEM(2000, 0, 0),
        2, 2, TLV_DB_SCALE_ITEM(2400, 0, 0),
index d15695d1c27397a7b3f2966c1806584e5e9a39bc..bbcf921166f7470fad24577d2aa52d1cf85258f4 100644 (file)
@@ -365,7 +365,7 @@ static const DECLARE_TLV_DB_SCALE(capture_6db_attenuate, -600, 600, 0);
 
 /* tlv for mic gain, 0db 20db 30db 40db */
 static const unsigned int mic_gain_tlv[] = {
-       TLV_DB_RANGE_HEAD(4),
+       TLV_DB_RANGE_HEAD(2),
        0, 0, TLV_DB_SCALE_ITEM(0, 0, 0),
        1, 3, TLV_DB_SCALE_ITEM(2000, 1000, 0),
 };
index bb82408ab8e1bb93f187dac610e5a1141501bd72..d2f37152f940cebc67f93a5bff140e64a67a07e7 100644 (file)
@@ -76,6 +76,8 @@ struct sta32x_priv {
 
        unsigned int mclk;
        unsigned int format;
+
+       u32 coef_shadow[STA32X_COEF_COUNT];
 };
 
 static const DECLARE_TLV_DB_SCALE(mvol_tlv, -12700, 50, 1);
@@ -227,6 +229,7 @@ static int sta32x_coefficient_put(struct snd_kcontrol *kcontrol,
                                  struct snd_ctl_elem_value *ucontrol)
 {
        struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+       struct sta32x_priv *sta32x = snd_soc_codec_get_drvdata(codec);
        int numcoef = kcontrol->private_value >> 16;
        int index = kcontrol->private_value & 0xffff;
        unsigned int cfud;
@@ -239,6 +242,11 @@ static int sta32x_coefficient_put(struct snd_kcontrol *kcontrol,
        snd_soc_write(codec, STA32X_CFUD, cfud);
 
        snd_soc_write(codec, STA32X_CFADDR2, index);
+       for (i = 0; i < numcoef && (index + i < STA32X_COEF_COUNT); i++)
+               sta32x->coef_shadow[index + i] =
+                         (ucontrol->value.bytes.data[3 * i] << 16)
+                       | (ucontrol->value.bytes.data[3 * i + 1] << 8)
+                       | (ucontrol->value.bytes.data[3 * i + 2]);
        for (i = 0; i < 3 * numcoef; i++)
                snd_soc_write(codec, STA32X_B1CF1 + i,
                              ucontrol->value.bytes.data[i]);
@@ -252,6 +260,48 @@ static int sta32x_coefficient_put(struct snd_kcontrol *kcontrol,
        return 0;
 }
 
+int sta32x_sync_coef_shadow(struct snd_soc_codec *codec)
+{
+       struct sta32x_priv *sta32x = snd_soc_codec_get_drvdata(codec);
+       unsigned int cfud;
+       int i;
+
+       /* preserve reserved bits in STA32X_CFUD */
+       cfud = snd_soc_read(codec, STA32X_CFUD) & 0xf0;
+
+       for (i = 0; i < STA32X_COEF_COUNT; i++) {
+               snd_soc_write(codec, STA32X_CFADDR2, i);
+               snd_soc_write(codec, STA32X_B1CF1,
+                             (sta32x->coef_shadow[i] >> 16) & 0xff);
+               snd_soc_write(codec, STA32X_B1CF2,
+                             (sta32x->coef_shadow[i] >> 8) & 0xff);
+               snd_soc_write(codec, STA32X_B1CF3,
+                             (sta32x->coef_shadow[i]) & 0xff);
+               /* chip documentation does not say if the bits are
+                * self-clearing, so do it explicitly */
+               snd_soc_write(codec, STA32X_CFUD, cfud);
+               snd_soc_write(codec, STA32X_CFUD, cfud | 0x01);
+       }
+       return 0;
+}
+
+int sta32x_cache_sync(struct snd_soc_codec *codec)
+{
+       unsigned int mute;
+       int rc;
+
+       if (!codec->cache_sync)
+               return 0;
+
+       /* mute during register sync */
+       mute = snd_soc_read(codec, STA32X_MMUTE);
+       snd_soc_write(codec, STA32X_MMUTE, mute | STA32X_MMUTE_MMUTE);
+       sta32x_sync_coef_shadow(codec);
+       rc = snd_soc_cache_sync(codec);
+       snd_soc_write(codec, STA32X_MMUTE, mute);
+       return rc;
+}
+
 #define SINGLE_COEF(xname, index) \
 {      .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
        .info = sta32x_coefficient_info, \
@@ -661,7 +711,7 @@ static int sta32x_set_bias_level(struct snd_soc_codec *codec,
                                return ret;
                        }
 
-                       snd_soc_cache_sync(codec);
+                       sta32x_cache_sync(codec);
                }
 
                /* Power up to mute */
@@ -790,6 +840,17 @@ static int sta32x_probe(struct snd_soc_codec *codec)
                            STA32X_CxCFG_OM_MASK,
                            2 << STA32X_CxCFG_OM_SHIFT);
 
+       /* initialize coefficient shadow RAM with reset values */
+       for (i = 4; i <= 49; i += 5)
+               sta32x->coef_shadow[i] = 0x400000;
+       for (i = 50; i <= 54; i++)
+               sta32x->coef_shadow[i] = 0x7fffff;
+       sta32x->coef_shadow[55] = 0x5a9df7;
+       sta32x->coef_shadow[56] = 0x7fffff;
+       sta32x->coef_shadow[59] = 0x7fffff;
+       sta32x->coef_shadow[60] = 0x400000;
+       sta32x->coef_shadow[61] = 0x400000;
+
        sta32x_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
        /* Bias level configuration will have done an extra enable */
        regulator_bulk_disable(ARRAY_SIZE(sta32x->supplies), sta32x->supplies);
index b97ee5a75667399e77a5bcd7493432407d554cc2..d8e32a6262ee087ec13e73d24e57b936eefaa42b 100644 (file)
@@ -19,6 +19,7 @@
 /* STA326 register addresses */
 
 #define STA32X_REGISTER_COUNT  0x2d
+#define STA32X_COEF_COUNT 62
 
 #define STA32X_CONFA   0x00
 #define STA32X_CONFB    0x01
index c5ca8cfea60f80f8de27cc5d12ad55f69bd900f2..0441893e270ed2b5621833fecc997b6b20f85159 100644 (file)
@@ -863,13 +863,13 @@ static struct i2c_driver uda1380_i2c_driver = {
 
 static int __init uda1380_modinit(void)
 {
-       int ret;
+       int ret = 0;
 #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
        ret = i2c_add_driver(&uda1380_i2c_driver);
        if (ret != 0)
                pr_err("Failed to register UDA1380 I2C driver: %d\n", ret);
 #endif
-       return 0;
+       return ret;
 }
 module_init(uda1380_modinit);
 
index 7e5ec03f6f8dd579d1bd43413fd1d007a1989bcb..a7c9ae17fc7eb0e743a8dbfb27db88fea58a456e 100644 (file)
@@ -453,6 +453,7 @@ static int wm8731_set_bias_level(struct snd_soc_codec *codec,
                snd_soc_write(codec, WM8731_PWR, 0xffff);
                regulator_bulk_disable(ARRAY_SIZE(wm8731->supplies),
                                       wm8731->supplies);
+               codec->cache_sync = 1;
                break;
        }
        codec->dapm.bias_level = level;
index a9504710bb692e806655e785e5f35121ecf4afc1..3a629d0d690ed1fbe8129f096e0edf492faf9eb2 100644 (file)
@@ -190,6 +190,9 @@ static int wm8753_set_dai(struct snd_kcontrol *kcontrol,
        struct wm8753_priv *wm8753 = snd_soc_codec_get_drvdata(codec);
        u16 ioctl;
 
+       if (wm8753->dai_func == ucontrol->value.integer.value[0])
+               return 0;
+
        if (codec->active)
                return -EBUSY;
 
index 91d3c6dbeba3317758d747a6c6568cae625ea79e..53edd9a8c758f24943de1219b37cb45602fc11ef 100644 (file)
@@ -1973,7 +1973,7 @@ static int wm8962_reset(struct snd_soc_codec *codec)
 static const DECLARE_TLV_DB_SCALE(inpga_tlv, -2325, 75, 0);
 static const DECLARE_TLV_DB_SCALE(mixin_tlv, -1500, 300, 0);
 static const unsigned int mixinpga_tlv[] = {
-       TLV_DB_RANGE_HEAD(7),
+       TLV_DB_RANGE_HEAD(5),
        0, 1, TLV_DB_SCALE_ITEM(0, 600, 0),
        2, 2, TLV_DB_SCALE_ITEM(1300, 1300, 0),
        3, 4, TLV_DB_SCALE_ITEM(1800, 200, 0),
@@ -1988,7 +1988,7 @@ static const DECLARE_TLV_DB_SCALE(bypass_tlv, -1500, 300, 0);
 static const DECLARE_TLV_DB_SCALE(out_tlv, -12100, 100, 1);
 static const DECLARE_TLV_DB_SCALE(hp_tlv, -700, 100, 0);
 static const unsigned int classd_tlv[] = {
-       TLV_DB_RANGE_HEAD(7),
+       TLV_DB_RANGE_HEAD(2),
        0, 6, TLV_DB_SCALE_ITEM(0, 150, 0),
        7, 7, TLV_DB_SCALE_ITEM(1200, 0, 0),
 };
index eec8e143511665a538c6950a4f679ad217aad827..d1a142f48b09f03fb565a5c4a6419842a854c6f6 100644 (file)
@@ -512,7 +512,7 @@ static const DECLARE_TLV_DB_SCALE(drc_comp_threash, -4500, 75, 0);
 static const DECLARE_TLV_DB_SCALE(drc_comp_amp, -2250, 75, 0);
 static const DECLARE_TLV_DB_SCALE(drc_min_tlv, -1800, 600, 0);
 static const unsigned int drc_max_tlv[] = {
-       TLV_DB_RANGE_HEAD(4),
+       TLV_DB_RANGE_HEAD(2),
        0, 2, TLV_DB_SCALE_ITEM(1200, 600, 0),
        3, 3, TLV_DB_SCALE_ITEM(3600, 0, 0),
 };
index 9c982e47eb99308b377e7143d8024c19863b14c5..d0c545b73d7865c04b9fefd286b85b7fe63fc5b2 100644 (file)
@@ -1325,15 +1325,15 @@ SND_SOC_DAPM_DAC("DAC1R", NULL, WM8994_POWER_MANAGEMENT_5, 0, 0),
 };
 
 static const struct snd_soc_dapm_widget wm8994_adc_revd_widgets[] = {
-SND_SOC_DAPM_MUX_E("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux,
-                  adc_mux_ev, SND_SOC_DAPM_PRE_PMU),
-SND_SOC_DAPM_MUX_E("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux,
-                  adc_mux_ev, SND_SOC_DAPM_PRE_PMU),
+SND_SOC_DAPM_VIRT_MUX_E("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux,
+                       adc_mux_ev, SND_SOC_DAPM_PRE_PMU),
+SND_SOC_DAPM_VIRT_MUX_E("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux,
+                       adc_mux_ev, SND_SOC_DAPM_PRE_PMU),
 };
 
 static const struct snd_soc_dapm_widget wm8994_adc_widgets[] = {
-SND_SOC_DAPM_MUX("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux),
-SND_SOC_DAPM_MUX("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux),
+SND_SOC_DAPM_VIRT_MUX("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux),
+SND_SOC_DAPM_VIRT_MUX("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux),
 };
 
 static const struct snd_soc_dapm_widget wm8994_dapm_widgets[] = {
@@ -2357,6 +2357,11 @@ static int wm8994_hw_params(struct snd_pcm_substream *substream,
        bclk |= best << WM8994_AIF1_BCLK_DIV_SHIFT;
 
        lrclk = bclk_rate / params_rate(params);
+       if (!lrclk) {
+               dev_err(dai->dev, "Unable to generate LRCLK from %dHz BCLK\n",
+                       bclk_rate);
+               return -EINVAL;
+       }
        dev_dbg(dai->dev, "Using LRCLK rate %d for actual LRCLK %dHz\n",
                lrclk, bclk_rate / lrclk);
 
@@ -3178,6 +3183,8 @@ static int wm8994_codec_probe(struct snd_soc_codec *codec)
                switch (wm8994->revision) {
                case 0:
                case 1:
+               case 2:
+               case 3:
                        wm8994->hubs.dcs_codes_l = -9;
                        wm8994->hubs.dcs_codes_r = -5;
                        break;
index 3cd35a02c28c7164f525f00f7375ac5ef4d6bbb1..4a398c3bfe84aea9ef4f6c8540f09b8b2bf34282 100644 (file)
@@ -807,7 +807,6 @@ static int wm9081_set_bias_level(struct snd_soc_codec *codec,
                        mdelay(100);
 
                        /* Normal bias enable & soft start off */
-                       reg |= WM9081_BIAS_ENA;
                        reg &= ~WM9081_VMID_RAMP;
                        snd_soc_write(codec, WM9081_VMID_CONTROL, reg);
 
@@ -818,7 +817,7 @@ static int wm9081_set_bias_level(struct snd_soc_codec *codec,
                }
 
                /* VMID 2*240k */
-               reg = snd_soc_read(codec, WM9081_BIAS_CONTROL_1);
+               reg = snd_soc_read(codec, WM9081_VMID_CONTROL);
                reg &= ~WM9081_VMID_SEL_MASK;
                reg |= 0x04;
                snd_soc_write(codec, WM9081_VMID_CONTROL, reg);
@@ -830,14 +829,15 @@ static int wm9081_set_bias_level(struct snd_soc_codec *codec,
                break;
 
        case SND_SOC_BIAS_OFF:
-               /* Startup bias source */
+               /* Startup bias source and disable bias */
                reg = snd_soc_read(codec, WM9081_BIAS_CONTROL_1);
                reg |= WM9081_BIAS_SRC;
+               reg &= ~WM9081_BIAS_ENA;
                snd_soc_write(codec, WM9081_BIAS_CONTROL_1, reg);
 
-               /* Disable VMID and biases with soft ramping */
+               /* Disable VMID with soft ramping */
                reg = snd_soc_read(codec, WM9081_VMID_CONTROL);
-               reg &= ~(WM9081_VMID_SEL_MASK | WM9081_BIAS_ENA);
+               reg &= ~WM9081_VMID_SEL_MASK;
                reg |= WM9081_VMID_RAMP;
                snd_soc_write(codec, WM9081_VMID_CONTROL, reg);
 
index 2b5252c9e37774963a55626e284878e7ff777414..f94c06057c64c31ac6e4bd70fbf80799cc52837d 100644 (file)
@@ -177,19 +177,19 @@ static void wait_for_dc_servo(struct snd_soc_codec *codec)
 }
 
 static const unsigned int in_tlv[] = {
-       TLV_DB_RANGE_HEAD(6),
+       TLV_DB_RANGE_HEAD(3),
        0, 0, TLV_DB_SCALE_ITEM(-600, 0, 0),
        1, 3, TLV_DB_SCALE_ITEM(-350, 350, 0),
        4, 6, TLV_DB_SCALE_ITEM(600, 600, 0),
 };
 static const unsigned int mix_tlv[] = {
-       TLV_DB_RANGE_HEAD(4),
+       TLV_DB_RANGE_HEAD(2),
        0, 2, TLV_DB_SCALE_ITEM(-1200, 300, 0),
        3, 3, TLV_DB_SCALE_ITEM(0, 0, 0),
 };
 static const DECLARE_TLV_DB_SCALE(out_tlv, -5700, 100, 0);
 static const unsigned int spkboost_tlv[] = {
-       TLV_DB_RANGE_HEAD(7),
+       TLV_DB_RANGE_HEAD(2),
        0, 6, TLV_DB_SCALE_ITEM(0, 150, 0),
        7, 7, TLV_DB_SCALE_ITEM(1200, 0, 0),
 };
index 84f33d4ea2cd5ec5461d0f8518c2462c3a58d544..48e61e912400fb2d2cca4ad74d9faf17a3075444 100644 (file)
@@ -40,7 +40,7 @@ static const DECLARE_TLV_DB_SCALE(outmix_tlv, -2100, 300, 0);
 static const DECLARE_TLV_DB_SCALE(spkmixout_tlv, -1800, 600, 1);
 static const DECLARE_TLV_DB_SCALE(outpga_tlv, -5700, 100, 0);
 static const unsigned int spkboost_tlv[] = {
-       TLV_DB_RANGE_HEAD(7),
+       TLV_DB_RANGE_HEAD(2),
        0, 6, TLV_DB_SCALE_ITEM(0, 150, 0),
        7, 7, TLV_DB_SCALE_ITEM(1200, 0, 0),
 };
index 0268cf989736f303a224fbd26b0db2e8bf97f2ed..83c4bd5b2dd76bbf3f401c5a6bf0159d337f29b4 100644 (file)
@@ -694,6 +694,7 @@ static int __devinit fsl_ssi_probe(struct platform_device *pdev)
 
        /* Initialize the the device_attribute structure */
        dev_attr = &ssi_private->dev_attr;
+       sysfs_attr_init(&dev_attr->attr);
        dev_attr->attr.name = "statistics";
        dev_attr->attr.mode = S_IRUGO;
        dev_attr->show = fsl_sysfs_ssi_show;
index 31af405bda843cc691e755cc6bb4a0afec78925f..ae49f1c78c6de797bd193946b1d350aa00bd8a40 100644 (file)
@@ -392,7 +392,8 @@ static int mpc8610_hpcd_probe(struct platform_device *pdev)
        }
 
        if (strcasecmp(sprop, "i2s-slave") == 0) {
-               machine_data->dai_format = SND_SOC_DAIFMT_I2S;
+               machine_data->dai_format =
+                       SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBM_CFM;
                machine_data->codec_clk_direction = SND_SOC_CLOCK_OUT;
                machine_data->cpu_clk_direction = SND_SOC_CLOCK_IN;
 
@@ -409,31 +410,38 @@ static int mpc8610_hpcd_probe(struct platform_device *pdev)
                }
                machine_data->clk_frequency = be32_to_cpup(iprop);
        } else if (strcasecmp(sprop, "i2s-master") == 0) {
-               machine_data->dai_format = SND_SOC_DAIFMT_I2S;
+               machine_data->dai_format =
+                       SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBS_CFS;
                machine_data->codec_clk_direction = SND_SOC_CLOCK_IN;
                machine_data->cpu_clk_direction = SND_SOC_CLOCK_OUT;
        } else if (strcasecmp(sprop, "lj-slave") == 0) {
-               machine_data->dai_format = SND_SOC_DAIFMT_LEFT_J;
+               machine_data->dai_format =
+                       SND_SOC_DAIFMT_LEFT_J | SND_SOC_DAIFMT_CBM_CFM;
                machine_data->codec_clk_direction = SND_SOC_CLOCK_OUT;
                machine_data->cpu_clk_direction = SND_SOC_CLOCK_IN;
        } else if (strcasecmp(sprop, "lj-master") == 0) {
-               machine_data->dai_format = SND_SOC_DAIFMT_LEFT_J;
+               machine_data->dai_format =
+                       SND_SOC_DAIFMT_LEFT_J | SND_SOC_DAIFMT_CBS_CFS;
                machine_data->codec_clk_direction = SND_SOC_CLOCK_IN;
                machine_data->cpu_clk_direction = SND_SOC_CLOCK_OUT;
        } else if (strcasecmp(sprop, "rj-slave") == 0) {
-               machine_data->dai_format = SND_SOC_DAIFMT_RIGHT_J;
+               machine_data->dai_format =
+                       SND_SOC_DAIFMT_RIGHT_J | SND_SOC_DAIFMT_CBM_CFM;
                machine_data->codec_clk_direction = SND_SOC_CLOCK_OUT;
                machine_data->cpu_clk_direction = SND_SOC_CLOCK_IN;
        } else if (strcasecmp(sprop, "rj-master") == 0) {
-               machine_data->dai_format = SND_SOC_DAIFMT_RIGHT_J;
+               machine_data->dai_format =
+                       SND_SOC_DAIFMT_RIGHT_J | SND_SOC_DAIFMT_CBS_CFS;
                machine_data->codec_clk_direction = SND_SOC_CLOCK_IN;
                machine_data->cpu_clk_direction = SND_SOC_CLOCK_OUT;
        } else if (strcasecmp(sprop, "ac97-slave") == 0) {
-               machine_data->dai_format = SND_SOC_DAIFMT_AC97;
+               machine_data->dai_format =
+                       SND_SOC_DAIFMT_AC97 | SND_SOC_DAIFMT_CBM_CFM;
                machine_data->codec_clk_direction = SND_SOC_CLOCK_OUT;
                machine_data->cpu_clk_direction = SND_SOC_CLOCK_IN;
        } else if (strcasecmp(sprop, "ac97-master") == 0) {
-               machine_data->dai_format = SND_SOC_DAIFMT_AC97;
+               machine_data->dai_format =
+                       SND_SOC_DAIFMT_AC97 | SND_SOC_DAIFMT_CBS_CFS;
                machine_data->codec_clk_direction = SND_SOC_CLOCK_IN;
                machine_data->cpu_clk_direction = SND_SOC_CLOCK_OUT;
        } else {
index b133bfcc5848ea8f6ec3c7cab18772ac18bc07b9..738391757f2ccb1a5aa6a8883de0ba127fedea68 100644 (file)
@@ -28,7 +28,7 @@ config SND_MXC_SOC_WM1133_EV1
 
 config SND_SOC_MX27VIS_AIC32X4
        tristate "SoC audio support for Visstrim M10 boards"
-       depends on MACH_IMX27_VISSTRIM_M10
+       depends on MACH_IMX27_VISSTRIM_M10 && I2C
        select SND_SOC_TLV320AIC32X4
        select SND_MXC_SOC_MX2
        help
index 8f49e165f4d1dd40119143b3b971a2b2f964025c..c62d715235e29ac5fa20639271d79958a57fd853 100644 (file)
@@ -12,6 +12,7 @@ config SND_KIRKWOOD_SOC_I2S
 config SND_KIRKWOOD_SOC_OPENRD
        tristate "SoC Audio support for Kirkwood Openrd Client"
        depends on SND_KIRKWOOD_SOC && (MACH_OPENRD_CLIENT || MACH_OPENRD_ULTIMATE)
+       depends on I2C
        select SND_KIRKWOOD_SOC_I2S
        select SND_SOC_CS42L51
        help
@@ -20,7 +21,7 @@ config SND_KIRKWOOD_SOC_OPENRD
 
 config SND_KIRKWOOD_SOC_T5325
        tristate "SoC Audio support for HP t5325"
-       depends on SND_KIRKWOOD_SOC && MACH_T5325
+       depends on SND_KIRKWOOD_SOC && MACH_T5325 && I2C
        select SND_KIRKWOOD_SOC_I2S
        select SND_SOC_ALC5623
        help
index 9c0edad90d8b4b3591c7c3a23f4c03b9e42594c1..a4e3237956e26dc499602f9b7397cf196c7ee5ec 100644 (file)
@@ -365,7 +365,8 @@ static int __devinit nuc900_ac97_drvprobe(struct platform_device *pdev)
        if (ret)
                goto out3;
 
-       mfp_set_groupg(nuc900_audio->dev); /* enbale ac97 multifunction pin*/
+       /* enbale ac97 multifunction pin */
+       mfp_set_groupg(nuc900_audio->dev, "nuc900-audio");
 
        return 0;
 
index ffd2242e305f0827fb742f2c2339041d1859785f..a0f7d3cfa470b0857586b10021290aec5b4fd20f 100644 (file)
@@ -151,6 +151,7 @@ config SND_SOC_ZYLONITE
 config SND_SOC_RAUMFELD
        tristate "SoC Audio support Raumfeld audio adapter"
        depends on SND_PXA2XX_SOC && (MACH_RAUMFELD_SPEAKER || MACH_RAUMFELD_CONNECTOR)
+       depends on I2C && SPI_MASTER
        select SND_PXA_SOC_SSP
        select SND_SOC_CS4270
        select SND_SOC_AK4104
@@ -159,7 +160,7 @@ config SND_SOC_RAUMFELD
 
 config SND_PXA2XX_SOC_HX4700
        tristate "SoC Audio support for HP iPAQ hx4700"
-       depends on SND_PXA2XX_SOC && MACH_H4700
+       depends on SND_PXA2XX_SOC && MACH_H4700 && I2C
        select SND_PXA2XX_SOC_I2S
        select SND_SOC_AK4641
        help
index f75e43997d5beb5b27a82f8d023eddd33311538e..ad9ac42522e2539faf84c6286cc1614573e441f2 100644 (file)
@@ -9,6 +9,7 @@
 
 #include "../codecs/wm8994.h"
 #include <sound/pcm_params.h>
+#include <linux/module.h>
 
  /*
   * Default CFG switch settings to use this driver:
index 85bf541a771d05226b761d6d274e7affd8cba6e2..4b8e35410eb1962623cc31882967397747e5b3e7 100644 (file)
@@ -191,7 +191,7 @@ static int speyside_late_probe(struct snd_soc_card *card)
        snd_soc_dapm_ignore_suspend(&card->dapm, "Headset Mic");
        snd_soc_dapm_ignore_suspend(&card->dapm, "Main AMIC");
        snd_soc_dapm_ignore_suspend(&card->dapm, "Main DMIC");
-       snd_soc_dapm_ignore_suspend(&card->dapm, "Speaker");
+       snd_soc_dapm_ignore_suspend(&card->dapm, "Main Speaker");
        snd_soc_dapm_ignore_suspend(&card->dapm, "WM1250 Output");
        snd_soc_dapm_ignore_suspend(&card->dapm, "WM1250 Input");
 
index a5d3685a5d38049313391ddb8e174edd9c28a21b..a25fa63ce9a27501a4f2d4a6334911076200532e 100644 (file)
@@ -709,6 +709,12 @@ int snd_soc_resume(struct device *dev)
        struct snd_soc_card *card = dev_get_drvdata(dev);
        int i, ac97_control = 0;
 
+       /* If the initialization of this soc device failed, there is no codec
+        * associated with it. Just bail out in this case.
+        */
+       if (list_empty(&card->codec_dev_list))
+               return 0;
+
        /* AC97 devices might have other drivers hanging off them so
         * need to resume immediately.  Other drivers don't have that
         * problem and may take a substantial amount of time to resume
index 0c12b98484bdd8316418358b5cead696e8774c57..4220bb0f27301aa962964b9eb645fd0f5e51e17c 100644 (file)
@@ -58,7 +58,36 @@ int snd_soc_params_to_bclk(struct snd_pcm_hw_params *params)
 }
 EXPORT_SYMBOL_GPL(snd_soc_params_to_bclk);
 
-static struct snd_soc_platform_driver dummy_platform;
+static const struct snd_pcm_hardware dummy_dma_hardware = {
+       .formats                = 0xffffffff,
+       .channels_min           = 1,
+       .channels_max           = UINT_MAX,
+
+       /* Random values to keep userspace happy when checking constraints */
+       .info                   = SNDRV_PCM_INFO_INTERLEAVED |
+                                 SNDRV_PCM_INFO_BLOCK_TRANSFER,
+       .buffer_bytes_max       = 128*1024,
+       .period_bytes_min       = PAGE_SIZE,
+       .period_bytes_max       = PAGE_SIZE*2,
+       .periods_min            = 2,
+       .periods_max            = 128,
+};
+
+static int dummy_dma_open(struct snd_pcm_substream *substream)
+{
+       snd_soc_set_runtime_hwparams(substream, &dummy_dma_hardware);
+
+       return 0;
+}
+
+static struct snd_pcm_ops dummy_dma_ops = {
+       .open           = dummy_dma_open,
+       .ioctl          = snd_pcm_lib_ioctl,
+};
+
+static struct snd_soc_platform_driver dummy_platform = {
+       .ops = &dummy_dma_ops,
+};
 
 static __devinit int snd_soc_dummy_probe(struct platform_device *pdev)
 {
index b61945f3af9e594aa28b2d992239bd4f4a714c6b..32d2a21f2e3b5a401948d24b81c945e239e8fa4e 100644 (file)
@@ -1632,6 +1632,37 @@ YAMAHA_DEVICE(0x7010, "UB99"),
                }
        }
 },
+{
+       /* Roland GAIA SH-01 */
+       USB_DEVICE(0x0582, 0x0111),
+       .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+               .vendor_name = "Roland",
+               .product_name = "GAIA",
+               .ifnum = QUIRK_ANY_INTERFACE,
+               .type = QUIRK_COMPOSITE,
+               .data = (const struct snd_usb_audio_quirk[]) {
+                       {
+                               .ifnum = 0,
+                               .type = QUIRK_AUDIO_STANDARD_INTERFACE
+                       },
+                       {
+                               .ifnum = 1,
+                               .type = QUIRK_AUDIO_STANDARD_INTERFACE
+                       },
+                       {
+                               .ifnum = 2,
+                               .type = QUIRK_MIDI_FIXED_ENDPOINT,
+                               .data = &(const struct snd_usb_midi_endpoint_info) {
+                               .out_cables = 0x0003,
+                               .in_cables  = 0x0003
+                               }
+                       },
+                       {
+                               .ifnum = -1
+                       }
+               }
+       }
+},
 {
        USB_DEVICE(0x0582, 0x0113),
        .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
index 7d98676808d8722a39623de8983642f987cea7cc..955930e0a5c34cd0852f92aa39a7c73492c159b5 100644 (file)
@@ -463,7 +463,8 @@ static int run_perf_stat(int argc __used, const char **argv)
 
        list_for_each_entry(counter, &evsel_list->entries, node) {
                if (create_perf_stat_counter(counter, first) < 0) {
-                       if (errno == EINVAL || errno == ENOSYS || errno == ENOENT) {
+                       if (errno == EINVAL || errno == ENOSYS ||
+                           errno == ENOENT || errno == EOPNOTSUPP) {
                                if (verbose)
                                        ui__warning("%s event is not supported by the kernel.\n",
                                                    event_name(counter));
index e42626422587851b9c1b6e755dfdb09858640124..d7915d4e77cb629e4560d499ac2c1c902ecce5db 100644 (file)
@@ -34,6 +34,16 @@ int __perf_evsel__sample_size(u64 sample_type)
        return size;
 }
 
+static void hists__init(struct hists *hists)
+{
+       memset(hists, 0, sizeof(*hists));
+       hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
+       hists->entries_in = &hists->entries_in_array[0];
+       hists->entries_collapsed = RB_ROOT;
+       hists->entries = RB_ROOT;
+       pthread_mutex_init(&hists->lock, NULL);
+}
+
 void perf_evsel__init(struct perf_evsel *evsel,
                      struct perf_event_attr *attr, int idx)
 {
index bcd05d05b4f01969906efe3dbfcc899ccdea554a..33c17a2b2a81e739066991a9a9facd2b38ee0416 100644 (file)
@@ -388,7 +388,7 @@ static int write_event_desc(int fd, struct perf_header *h __used,
                /*
                 * write event string as passed on cmdline
                 */
-               ret = do_write_string(fd, attr->name);
+               ret = do_write_string(fd, event_name(attr));
                if (ret < 0)
                        return ret;
                /*
index a36a3fa81ffba45ea6602145530289c1c326acbc..abef2703cd242eb8b8e5f1763cadd50286bdc8be 100644 (file)
@@ -1211,13 +1211,3 @@ size_t hists__fprintf_nr_events(struct hists *hists, FILE *fp)
 
        return ret;
 }
-
-void hists__init(struct hists *hists)
-{
-       memset(hists, 0, sizeof(*hists));
-       hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
-       hists->entries_in = &hists->entries_in_array[0];
-       hists->entries_collapsed = RB_ROOT;
-       hists->entries = RB_ROOT;
-       pthread_mutex_init(&hists->lock, NULL);
-}
index c86c1d27bd1eca09cef6c00949293a251b345ef9..89289c8e935e78973a906fb96edd164973d20ac9 100644 (file)
@@ -63,8 +63,6 @@ struct hists {
        struct callchain_cursor callchain_cursor;
 };
 
-void hists__init(struct hists *hists);
-
 struct hist_entry *__hists__add_entry(struct hists *self,
                                      struct addr_location *al,
                                      struct symbol *parent, u64 period);
index 85c1e6b76f0a4bbdd3d2c5b9e0d7359733dbfda7..0f4555ce90635a767f4a609b917905b5f58bdd0a 100644 (file)
@@ -1333,6 +1333,10 @@ int perf_session__cpu_bitmap(struct perf_session *session,
        }
 
        map = cpu_map__new(cpu_list);
+       if (map == NULL) {
+               pr_err("Invalid cpu_list\n");
+               return -1;
+       }
 
        for (i = 0; i < map->nr; i++) {
                int cpu = map->map[i];
index 0a7ed5b5e281c88b321de87ced66a3d29ebb003d..6c164dc9ee957dbf3df642f712b2fdc1485d2dc6 100644 (file)
@@ -1537,6 +1537,8 @@ process_flags(struct event *event, struct print_arg *arg, char **tok)
        field = malloc_or_die(sizeof(*field));
 
        type = process_arg(event, field, &token);
+       while (type == EVENT_OP)
+               type = process_op(event, field, &token);
        if (test_type_token(type, token, EVENT_DELIM, ","))
                goto out_free;