summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2011-12-02 13:49:21 -0500
committerDavid S. Miller <davem@davemloft.net>2011-12-02 13:49:21 -0500
commitb3613118eb30a589d971e4eccbbb2a1314f5dfd4 (patch)
tree868c1ee59e1b5c19a4f2e43716400d0001a994e5
parent7505afe28c16a8d386624930a018d0052c75d687 (diff)
parent5983fe2b29df5885880d7fa3b91aca306c7564ef (diff)
downloadlinux-3.10-b3613118eb30a589d971e4eccbbb2a1314f5dfd4.tar.gz
linux-3.10-b3613118eb30a589d971e4eccbbb2a1314f5dfd4.tar.bz2
linux-3.10-b3613118eb30a589d971e4eccbbb2a1314f5dfd4.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
-rw-r--r--Documentation/DocBook/uio-howto.tmpl7
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt1
-rw-r--r--Documentation/filesystems/btrfs.txt4
-rw-r--r--Documentation/i2c/ten-bit-addresses36
-rw-r--r--Documentation/power/devices.txt111
-rw-r--r--Documentation/power/runtime_pm.txt40
-rw-r--r--Documentation/serial/serial-rs485.txt14
-rw-r--r--MAINTAINERS19
-rw-r--r--Makefile2
-rw-r--r--arch/arm/Kconfig20
-rw-r--r--arch/arm/common/gic.c16
-rw-r--r--arch/arm/common/pl330.c12
-rw-r--r--arch/arm/configs/at91cap9_defconfig (renamed from arch/arm/configs/at91cap9adk_defconfig)7
-rw-r--r--arch/arm/configs/at91rm9200_defconfig47
-rw-r--r--arch/arm/configs/at91sam9260_defconfig (renamed from arch/arm/configs/at91sam9260ek_defconfig)16
-rw-r--r--arch/arm/configs/at91sam9g20_defconfig (renamed from arch/arm/configs/at91sam9g20ek_defconfig)23
-rw-r--r--arch/arm/configs/at91sam9g45_defconfig7
-rw-r--r--arch/arm/configs/at91sam9rl_defconfig (renamed from arch/arm/configs/at91sam9rlek_defconfig)5
-rw-r--r--arch/arm/configs/ezx_defconfig2
-rw-r--r--arch/arm/configs/imote2_defconfig2
-rw-r--r--arch/arm/configs/magician_defconfig2
-rw-r--r--arch/arm/configs/omap1_defconfig1
-rw-r--r--arch/arm/configs/u300_defconfig13
-rw-r--r--arch/arm/configs/u8500_defconfig14
-rw-r--r--arch/arm/configs/zeus_defconfig2
-rw-r--r--arch/arm/include/asm/pmu.h10
-rw-r--r--arch/arm/include/asm/topology.h2
-rw-r--r--arch/arm/kernel/entry-armv.S2
-rw-r--r--arch/arm/kernel/kprobes-arm.c4
-rw-r--r--arch/arm/kernel/kprobes-test-arm.c27
-rw-r--r--arch/arm/kernel/kprobes-test-thumb.c16
-rw-r--r--arch/arm/kernel/kprobes-test.h100
-rw-r--r--arch/arm/kernel/perf_event.c11
-rw-r--r--arch/arm/kernel/pmu.c1
-rw-r--r--arch/arm/kernel/process.c3
-rw-r--r--arch/arm/kernel/topology.c2
-rw-r--r--arch/arm/lib/bitops.h26
-rw-r--r--arch/arm/lib/changebit.S4
-rw-r--r--arch/arm/lib/clearbit.S4
-rw-r--r--arch/arm/lib/setbit.S4
-rw-r--r--arch/arm/lib/testchangebit.S4
-rw-r--r--arch/arm/lib/testclearbit.S4
-rw-r--r--arch/arm/lib/testsetbit.S4
-rw-r--r--arch/arm/mach-exynos/cpuidle.c2
-rw-r--r--arch/arm/mach-highbank/highbank.c4
-rw-r--r--arch/arm/mach-imx/Kconfig13
-rw-r--r--arch/arm/mach-imx/clock-imx6q.c7
-rw-r--r--arch/arm/mach-imx/mach-imx6q.c1
-rw-r--r--arch/arm/mach-imx/mm-imx3.c109
-rw-r--r--arch/arm/mach-imx/src.c7
-rw-r--r--arch/arm/mach-mmp/gplugd.c2
-rw-r--r--arch/arm/mach-mmp/include/mach/gpio-pxa.h2
-rw-r--r--arch/arm/mach-mx5/cpu.c5
-rw-r--r--arch/arm/mach-mx5/mm.c6
-rw-r--r--arch/arm/mach-mxs/clock-mx28.c2
-rw-r--r--arch/arm/mach-omap1/Kconfig8
-rw-r--r--arch/arm/mach-omap1/board-ams-delta.c10
-rw-r--r--arch/arm/mach-omap1/clock.h3
-rw-r--r--arch/arm/mach-omap1/clock_data.c53
-rw-r--r--arch/arm/mach-omap1/devices.c3
-rw-r--r--arch/arm/mach-omap2/Kconfig1
-rw-r--r--arch/arm/mach-omap2/Makefile5
-rw-r--r--arch/arm/mach-omap2/cpuidle34xx.c1
-rw-r--r--arch/arm/mach-omap2/display.c159
-rw-r--r--arch/arm/mach-omap2/display.h29
-rw-r--r--arch/arm/mach-omap2/io.h0
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c6
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_2420_data.c17
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_2430_data.c17
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_2xxx_3xxx_ipblock_data.c5
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_3xxx_data.c37
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_44xx_data.c24
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_common_data.c4
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_common_data.h4
-rw-r--r--arch/arm/mach-omap2/omap_l3_noc.c2
-rw-r--r--arch/arm/mach-omap2/pm.c6
-rw-r--r--arch/arm/mach-omap2/smartreflex.c2
-rw-r--r--arch/arm/mach-omap2/twl-common.c11
-rw-r--r--arch/arm/mach-omap2/twl-common.h3
-rw-r--r--arch/arm/mach-pxa/balloon3.c2
-rw-r--r--arch/arm/mach-pxa/colibri-pxa320.c2
-rw-r--r--arch/arm/mach-pxa/gumstix.c2
-rw-r--r--arch/arm/mach-pxa/include/mach/palm27x.h4
-rw-r--r--arch/arm/mach-pxa/palm27x.c4
-rw-r--r--arch/arm/mach-pxa/palmtc.c2
-rw-r--r--arch/arm/mach-pxa/vpac270.c2
-rw-r--r--arch/arm/mach-s3c64xx/mach-crag6410-module.c2
-rw-r--r--arch/arm/mm/cache-l2x0.c2
-rw-r--r--arch/arm/mm/dma-mapping.c11
-rw-r--r--arch/arm/mm/mmap.c23
-rw-r--r--arch/arm/plat-mxc/include/mach/common.h2
-rw-r--r--arch/arm/plat-mxc/include/mach/mxc.h14
-rw-r--r--arch/arm/plat-mxc/include/mach/system.h7
-rw-r--r--arch/arm/plat-mxc/system.c3
-rw-r--r--arch/arm/plat-omap/include/plat/clock.h2
-rw-r--r--arch/arm/plat-omap/include/plat/common.h3
-rw-r--r--arch/arm/plat-s3c24xx/cpu-freq-debugfs.c2
-rw-r--r--arch/arm/plat-s5p/sysmmu.c1
-rw-r--r--arch/arm/plat-samsung/include/plat/gpio-cfg.h2
-rw-r--r--arch/arm/plat-samsung/pd.c2
-rw-r--r--arch/arm/plat-samsung/pwm.c2
-rw-r--r--arch/arm/tools/mach-types1
-rw-r--r--arch/microblaze/include/asm/namei.h22
-rw-r--r--arch/powerpc/boot/dts/p1023rds.dts17
-rw-r--r--arch/powerpc/configs/ppc44x_defconfig2
-rw-r--r--arch/powerpc/mm/hugetlbpage.c1
-rw-r--r--arch/powerpc/platforms/85xx/Kconfig2
-rw-r--r--arch/powerpc/platforms/85xx/p3060_qds.c2
-rw-r--r--arch/powerpc/sysdev/ehv_pic.c1
-rw-r--r--arch/powerpc/sysdev/fsl_lbc.c1
-rw-r--r--arch/powerpc/sysdev/qe_lib/qe.c2
-rw-r--r--arch/x86/um/asm/processor.h2
-rw-r--r--drivers/acpi/apei/erst.c31
-rw-r--r--drivers/ata/ahci_platform.c2
-rw-r--r--drivers/ata/libata-sff.c4
-rw-r--r--drivers/base/node.c14
-rw-r--r--drivers/crypto/mv_cesa.c12
-rw-r--r--drivers/edac/mpc85xx_edac.c2
-rw-r--r--drivers/firmware/efivars.c12
-rw-r--r--drivers/gpio/gpio-pca953x.c4
-rw-r--r--drivers/gpu/drm/drm_crtc.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_buf.c62
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_buf.h21
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_connector.c78
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c76
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.h25
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h13
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.c83
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.h1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c66
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c44
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c71
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c89
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h28
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c57
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c6
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h19
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c12
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c1
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h21
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c2
-rw-r--r--drivers/gpu/drm/i915/intel_display.c33
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c411
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c3
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c92
-rw-r--r--drivers/gpu/drm/radeon/r300.c94
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c26
-rw-r--r--drivers/gpu/drm/radeon/radeon.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c202
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c8
-rw-r--r--drivers/gpu/vga/vgaarb.c18
-rw-r--r--drivers/hwmon/ad7314.c1
-rw-r--r--drivers/hwmon/ads7871.c1
-rw-r--r--drivers/hwmon/exynos4_tmu.c12
-rw-r--r--drivers/hwmon/gpio-fan.c13
-rw-r--r--drivers/hwmon/jz4740-hwmon.c12
-rw-r--r--drivers/hwmon/ntc_thermistor.c14
-rw-r--r--drivers/hwmon/s3c-hwmon.c13
-rw-r--r--drivers/hwmon/sch5627.c13
-rw-r--r--drivers/hwmon/sch5636.c13
-rw-r--r--drivers/hwmon/twl4030-madc-hwmon.c14
-rw-r--r--drivers/hwmon/ultra45_env.c13
-rw-r--r--drivers/hwmon/wm831x-hwmon.c12
-rw-r--r--drivers/hwmon/wm8350-hwmon.c12
-rw-r--r--drivers/i2c/algos/i2c-algo-bit.c4
-rw-r--r--drivers/i2c/busses/i2c-nuc900.c2
-rw-r--r--drivers/i2c/i2c-core.c4
-rw-r--r--drivers/i2c/i2c-dev.c2
-rw-r--r--drivers/infiniband/core/addr.c9
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c4
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c10
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c2
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c6
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c18
-rw-r--r--drivers/infiniband/hw/qib/qib_qsfp.c12
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c13
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c20
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c13
-rw-r--r--drivers/input/mouse/elantech.c26
-rw-r--r--drivers/input/serio/ams_delta_serio.c1
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h14
-rw-r--r--drivers/isdn/divert/divert_procfs.c6
-rw-r--r--drivers/isdn/i4l/isdn_net.c3
-rw-r--r--drivers/misc/Kconfig15
-rw-r--r--drivers/misc/ad525x_dpot.h2
-rw-r--r--drivers/misc/pch_phub.c81
-rw-r--r--drivers/misc/spear13xx_pcie_gadget.c2
-rw-r--r--drivers/net/bonding/bond_main.c33
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c2
-rw-r--r--drivers/net/ethernet/freescale/Kconfig1
-rw-r--r--drivers/net/ethernet/jme.c113
-rw-r--r--drivers/net/ethernet/jme.h19
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c3
-rw-r--r--drivers/net/wireless/rtlwifi/ps.c17
-rw-r--r--drivers/of/irq.c16
-rw-r--r--drivers/pci/Kconfig1
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c29
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c3
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c27
-rw-r--r--drivers/pci/hotplug/shpchp_core.c4
-rw-r--r--drivers/pci/hotplug/shpchp_hpc.c4
-rw-r--r--drivers/regulator/aat2870-regulator.c2
-rw-r--r--drivers/regulator/core.c2
-rw-r--r--drivers/regulator/tps65910-regulator.c14
-rw-r--r--drivers/regulator/twl-regulator.c46
-rw-r--r--drivers/spi/spi-nuc900.c2
-rw-r--r--drivers/staging/et131x/Kconfig3
-rw-r--r--drivers/staging/et131x/et131x.c12
-rw-r--r--drivers/staging/iio/industrialio-core.c25
-rw-r--r--drivers/staging/slicoss/Kconfig2
-rw-r--r--drivers/tty/hvc/hvc_dcc.c2
-rw-r--r--drivers/tty/serial/Kconfig14
-rw-r--r--drivers/tty/serial/atmel_serial.c16
-rw-r--r--drivers/tty/serial/crisv10.c10
-rw-r--r--drivers/tty/serial/mfd.c4
-rw-r--r--drivers/tty/serial/pch_uart.c19
-rw-r--r--drivers/tty/tty_ldisc.c30
-rw-r--r--drivers/usb/class/cdc-acm.c8
-rw-r--r--drivers/usb/core/hub.c6
-rw-r--r--drivers/usb/core/quirks.c27
-rw-r--r--drivers/usb/dwc3/gadget.c1
-rw-r--r--drivers/usb/gadget/Kconfig9
-rw-r--r--drivers/usb/gadget/ci13xxx_msm.c2
-rw-r--r--drivers/usb/gadget/ci13xxx_udc.c21
-rw-r--r--drivers/usb/gadget/f_mass_storage.c6
-rw-r--r--drivers/usb/gadget/f_midi.c138
-rw-r--r--drivers/usb/gadget/file_storage.c4
-rw-r--r--drivers/usb/gadget/fsl_udc_core.c3
-rw-r--r--drivers/usb/gadget/inode.c5
-rw-r--r--drivers/usb/gadget/pch_udc.c10
-rw-r--r--drivers/usb/gadget/r8a66597-udc.c30
-rw-r--r--drivers/usb/gadget/udc-core.c10
-rw-r--r--drivers/usb/host/ehci-sched.c15
-rw-r--r--drivers/usb/host/ehci-xls.c2
-rw-r--r--drivers/usb/host/ohci-at91.c6
-rw-r--r--drivers/usb/host/ohci-hcd.c15
-rw-r--r--drivers/usb/host/ohci-pci.c26
-rw-r--r--drivers/usb/host/ohci.h1
-rw-r--r--drivers/usb/host/pci-quirks.c57
-rw-r--r--drivers/usb/host/xhci-mem.c5
-rw-r--r--drivers/usb/host/xhci-ring.c13
-rw-r--r--drivers/usb/host/xhci.c34
-rw-r--r--drivers/usb/musb/Kconfig3
-rw-r--r--drivers/usb/musb/am35x.c1
-rw-r--r--drivers/usb/musb/da8xx.c1
-rw-r--r--drivers/usb/musb/musb_core.c3
-rw-r--r--drivers/usb/musb/musb_gadget.c4
-rw-r--r--drivers/usb/renesas_usbhs/common.c2
-rw-r--r--drivers/usb/renesas_usbhs/fifo.c4
-rw-r--r--drivers/usb/renesas_usbhs/mod.h8
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c4
-rw-r--r--drivers/usb/renesas_usbhs/mod_host.c63
-rw-r--r--drivers/usb/serial/ark3116.c10
-rw-r--r--drivers/usb/serial/ftdi_sio.c14
-rw-r--r--drivers/usb/serial/option.c28
-rw-r--r--drivers/usb/serial/pl2303.c1
-rw-r--r--drivers/usb/serial/pl2303.h4
-rw-r--r--drivers/usb/storage/ene_ub6250.c3
-rw-r--r--drivers/usb/storage/protocol.c7
-rw-r--r--drivers/video/da8xx-fb.c15
-rw-r--r--drivers/video/omap/dispc.c1
-rw-r--r--drivers/video/omap2/dss/dispc.c11
-rw-r--r--drivers/video/omap2/dss/hdmi.c2
-rw-r--r--drivers/video/via/share.h4
-rw-r--r--drivers/virtio/Kconfig2
-rw-r--r--drivers/virtio/virtio_mmio.c2
-rw-r--r--drivers/virtio/virtio_pci.c18
-rw-r--r--drivers/watchdog/Kconfig7
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/adx_wdt.c355
-rw-r--r--drivers/watchdog/s3c2410_wdt.c4
-rw-r--r--drivers/watchdog/wm831x_wdt.c2
-rw-r--r--fs/btrfs/backref.c2
-rw-r--r--fs/btrfs/ctree.c17
-rw-r--r--fs/btrfs/ctree.h8
-rw-r--r--fs/btrfs/disk-io.c147
-rw-r--r--fs/btrfs/extent-tree.c153
-rw-r--r--fs/btrfs/extent_io.c36
-rw-r--r--fs/btrfs/extent_io.h2
-rw-r--r--fs/btrfs/free-space-cache.c65
-rw-r--r--fs/btrfs/inode.c8
-rw-r--r--fs/btrfs/ioctl.c17
-rw-r--r--fs/btrfs/scrub.c7
-rw-r--r--fs/btrfs/super.c6
-rw-r--r--fs/btrfs/transaction.c8
-rw-r--r--fs/btrfs/volumes.h6
-rw-r--r--fs/ceph/dir.c2
-rw-r--r--fs/ceph/inode.c9
-rw-r--r--fs/ceph/super.c6
-rw-r--r--fs/dcache.c11
-rw-r--r--fs/ecryptfs/crypto.c26
-rw-r--r--fs/ecryptfs/ecryptfs_kernel.h5
-rw-r--r--fs/ecryptfs/file.c23
-rw-r--r--fs/ecryptfs/inode.c52
-rw-r--r--fs/ext4/balloc.c2
-rw-r--r--fs/ext4/inode.c3
-rw-r--r--fs/ext4/super.c6
-rw-r--r--fs/minix/bitmap.c55
-rw-r--r--fs/minix/inode.c25
-rw-r--r--fs/minix/minix.h11
-rw-r--r--fs/namespace.c6
-rw-r--r--fs/nfs/dir.c2
-rw-r--r--fs/nfs/file.c91
-rw-r--r--fs/nfs/inode.c2
-rw-r--r--fs/nfs/internal.h2
-rw-r--r--fs/nfs/nfs3proc.c1
-rw-r--r--fs/nfs/nfs4proc.c4
-rw-r--r--fs/nfs/pnfs.c26
-rw-r--r--fs/nfs/proc.c1
-rw-r--r--fs/nfs/read.c14
-rw-r--r--fs/ocfs2/alloc.c2
-rw-r--r--fs/ocfs2/aops.c69
-rw-r--r--fs/ocfs2/aops.h14
-rw-r--r--fs/ocfs2/cluster/heartbeat.c194
-rw-r--r--fs/ocfs2/cluster/netdebug.c102
-rw-r--r--fs/ocfs2/cluster/tcp.c138
-rw-r--r--fs/ocfs2/cluster/tcp.h2
-rw-r--r--fs/ocfs2/dir.c3
-rw-r--r--fs/ocfs2/dlm/dlmcommon.h56
-rw-r--r--fs/ocfs2/dlm/dlmdomain.c44
-rw-r--r--fs/ocfs2/dlm/dlmlock.c54
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c175
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c164
-rw-r--r--fs/ocfs2/dlm/dlmthread.c16
-rw-r--r--fs/ocfs2/dlmglue.c21
-rw-r--r--fs/ocfs2/extent_map.c96
-rw-r--r--fs/ocfs2/extent_map.h2
-rw-r--r--fs/ocfs2/file.c96
-rw-r--r--fs/ocfs2/inode.c2
-rw-r--r--fs/ocfs2/inode.h3
-rw-r--r--fs/ocfs2/ioctl.c11
-rw-r--r--fs/ocfs2/journal.c23
-rw-r--r--fs/ocfs2/journal.h5
-rw-r--r--fs/ocfs2/mmap.c53
-rw-r--r--fs/ocfs2/move_extents.c2
-rw-r--r--fs/ocfs2/ocfs2.h51
-rw-r--r--fs/ocfs2/quota_local.c23
-rw-r--r--fs/ocfs2/slot_map.c4
-rw-r--r--fs/ocfs2/stack_o2cb.c71
-rw-r--r--fs/ocfs2/super.c25
-rw-r--r--fs/ocfs2/xattr.c10
-rw-r--r--fs/pstore/platform.c13
-rw-r--r--include/drm/drm_mode.h2
-rw-r--r--include/drm/exynos_drm.h9
-rw-r--r--include/drm/radeon_drm.h4
-rw-r--r--include/linux/ceph/osd_client.h8
-rw-r--r--include/linux/clocksource.h3
-rw-r--r--include/linux/device.h2
-rw-r--r--include/linux/i2c.h3
-rw-r--r--include/linux/init_task.h1
-rw-r--r--include/linux/mfd/tps65910.h3
-rw-r--r--include/linux/netdevice.h2
-rw-r--r--include/linux/nfs_fs.h3
-rw-r--r--include/linux/nfs_xdr.h1
-rw-r--r--include/linux/pci-ats.h6
-rw-r--r--include/linux/pci.h2
-rw-r--r--include/linux/pm.h229
-rw-r--r--include/linux/pstore.h4
-rw-r--r--include/linux/sched.h1
-rw-r--r--include/linux/serial.h14
-rw-r--r--include/linux/virtio_config.h2
-rw-r--r--include/linux/virtio_mmio.h2
-rw-r--r--include/net/inetpeer.h1
-rw-r--r--include/net/netfilter/nf_conntrack_ecache.h19
-rw-r--r--include/net/netns/conntrack.h2
-rw-r--r--include/net/red.h15
-rw-r--r--include/video/omapdss.h7
-rw-r--r--kernel/cgroup_freezer.c11
-rw-r--r--kernel/fork.c5
-rw-r--r--kernel/hrtimer.c6
-rw-r--r--kernel/irq/manage.c2
-rw-r--r--kernel/irq/spurious.c4
-rw-r--r--kernel/power/hibernate.c16
-rw-r--r--kernel/time/clocksource.c58
-rw-r--r--kernel/time/timekeeping.c92
-rw-r--r--mm/page-writeback.c23
-rw-r--r--mm/percpu-vm.c17
-rw-r--r--mm/percpu.c62
-rw-r--r--mm/slub.c42
-rw-r--r--net/bridge/br_netlink.c6
-rw-r--r--net/bridge/br_stp.c29
-rw-r--r--net/ceph/osd_client.c2
-rw-r--r--net/core/dev.c9
-rw-r--r--net/core/dev_addr_lists.c3
-rw-r--r--net/decnet/dn_timer.c17
-rw-r--r--net/ipv4/devinet.c5
-rw-r--r--net/ipv4/netfilter.c3
-rw-r--r--net/ipv4/route.c44
-rw-r--r--net/ipv4/udp.c15
-rw-r--r--net/ipv6/ipv6_sockglue.c2
-rw-r--r--net/ipv6/udp.c15
-rw-r--r--net/l2tp/l2tp_core.c2
-rw-r--r--net/mac80211/agg-tx.c42
-rw-r--r--net/netfilter/Kconfig1
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipport.c2
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipportip.c2
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipportnet.c2
-rw-r--r--net/netfilter/nf_conntrack_ecache.c37
-rw-r--r--net/netfilter/nf_conntrack_netlink.c73
-rw-r--r--net/netlabel/netlabel_kapi.c22
-rw-r--r--net/sched/sch_red.c4
-rw-r--r--net/sched/sch_teql.c31
-rw-r--r--net/sctp/auth.c2
-rw-r--r--net/sunrpc/xprtsock.c7
-rw-r--r--net/unix/af_unix.c4
-rw-r--r--net/wireless/nl80211.c4
-rw-r--r--sound/pci/cs5535audio/cs5535audio_pcm.c2
-rw-r--r--sound/pci/hda/hda_codec.c6
-rw-r--r--sound/pci/hda/hda_eld.c28
-rw-r--r--sound/pci/hda/patch_cirrus.c32
-rw-r--r--sound/pci/hda/patch_hdmi.c16
-rw-r--r--sound/pci/hda/patch_realtek.c34
-rw-r--r--sound/pci/hda/patch_sigmatel.c2
-rw-r--r--sound/pci/hda/patch_via.c76
-rw-r--r--sound/pci/lx6464es/lx_core.c23
-rw-r--r--sound/pci/lx6464es/lx_core.h3
-rw-r--r--sound/pci/rme9652/hdspm.c2
-rw-r--r--sound/soc/codecs/adau1373.c2
-rw-r--r--sound/soc/codecs/cs4271.c8
-rw-r--r--sound/soc/codecs/rt5631.c2
-rw-r--r--sound/soc/codecs/sgtl5000.c2
-rw-r--r--sound/soc/codecs/sta32x.c63
-rw-r--r--sound/soc/codecs/sta32x.h1
-rw-r--r--sound/soc/codecs/wm8731.c1
-rw-r--r--sound/soc/codecs/wm8753.c3
-rw-r--r--sound/soc/codecs/wm8962.c4
-rw-r--r--sound/soc/codecs/wm8993.c2
-rw-r--r--sound/soc/codecs/wm9081.c10
-rw-r--r--sound/soc/codecs/wm9090.c6
-rw-r--r--sound/soc/codecs/wm_hubs.c2
-rw-r--r--sound/soc/fsl/fsl_ssi.c1
-rw-r--r--sound/soc/nuc900/nuc900-ac97.c3
-rwxr-xr-xtools/testing/ktest/ktest.pl16
436 files changed, 5400 insertions, 3493 deletions
diff --git a/Documentation/DocBook/uio-howto.tmpl b/Documentation/DocBook/uio-howto.tmpl
index 54883de5d5f..ac3d0018140 100644
--- a/Documentation/DocBook/uio-howto.tmpl
+++ b/Documentation/DocBook/uio-howto.tmpl
@@ -521,6 +521,11 @@ Here's a description of the fields of <varname>struct uio_mem</varname>:
<itemizedlist>
<listitem><para>
+<varname>const char *name</varname>: Optional. Set this to help identify
+the memory region, it will show up in the corresponding sysfs node.
+</para></listitem>
+
+<listitem><para>
<varname>int memtype</varname>: Required if the mapping is used. Set this to
<varname>UIO_MEM_PHYS</varname> if you you have physical memory on your
card to be mapped. Use <varname>UIO_MEM_LOGICAL</varname> for logical
@@ -553,7 +558,7 @@ instead to remember such an address.
</itemizedlist>
<para>
-Please do not touch the <varname>kobj</varname> element of
+Please do not touch the <varname>map</varname> element of
<varname>struct uio_mem</varname>! It is used by the UIO framework
to set up sysfs files for this mapping. Simply leave it alone.
</para>
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index e8552782b44..874921e9780 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -33,6 +33,7 @@ qcom Qualcomm, Inc.
ramtron Ramtron International
samsung Samsung Semiconductor
schindler Schindler
+sil Silicon Image
simtek
sirf SiRF Technology, Inc.
stericsson ST-Ericsson
diff --git a/Documentation/filesystems/btrfs.txt b/Documentation/filesystems/btrfs.txt
index 64087c34327..7671352216f 100644
--- a/Documentation/filesystems/btrfs.txt
+++ b/Documentation/filesystems/btrfs.txt
@@ -63,8 +63,8 @@ IRC network.
Userspace tools for creating and manipulating Btrfs file systems are
available from the git repository at the following location:
- http://git.kernel.org/?p=linux/kernel/git/mason/btrfs-progs-unstable.git
- git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-progs-unstable.git
+ http://git.kernel.org/?p=linux/kernel/git/mason/btrfs-progs.git
+ git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-progs.git
These include the following tools:
diff --git a/Documentation/i2c/ten-bit-addresses b/Documentation/i2c/ten-bit-addresses
index e9890709c50..cdfe13901b9 100644
--- a/Documentation/i2c/ten-bit-addresses
+++ b/Documentation/i2c/ten-bit-addresses
@@ -1,22 +1,24 @@
The I2C protocol knows about two kinds of device addresses: normal 7 bit
addresses, and an extended set of 10 bit addresses. The sets of addresses
do not intersect: the 7 bit address 0x10 is not the same as the 10 bit
-address 0x10 (though a single device could respond to both of them). You
-select a 10 bit address by adding an extra byte after the address
-byte:
- S Addr7 Rd/Wr ....
-becomes
- S 11110 Addr10 Rd/Wr
-S is the start bit, Rd/Wr the read/write bit, and if you count the number
-of bits, you will see the there are 8 after the S bit for 7 bit addresses,
-and 16 after the S bit for 10 bit addresses.
+address 0x10 (though a single device could respond to both of them).
-WARNING! The current 10 bit address support is EXPERIMENTAL. There are
-several places in the code that will cause SEVERE PROBLEMS with 10 bit
-addresses, even though there is some basic handling and hooks. Also,
-almost no supported adapter handles the 10 bit addresses correctly.
+I2C messages to and from 10-bit address devices have a different format.
+See the I2C specification for the details.
-As soon as a real 10 bit address device is spotted 'in the wild', we
-can and will add proper support. Right now, 10 bit address devices
-are defined by the I2C protocol, but we have never seen a single device
-which supports them.
+The current 10 bit address support is minimal. It should work, however
+you can expect some problems along the way:
+* Not all bus drivers support 10-bit addresses. Some don't because the
+ hardware doesn't support them (SMBus doesn't require 10-bit address
+ support for example), some don't because nobody bothered adding the
+ code (or it's there but not working properly.) Software implementation
+ (i2c-algo-bit) is known to work.
+* Some optional features do not support 10-bit addresses. This is the
+ case of automatic detection and instantiation of devices by their,
+ drivers, for example.
+* Many user-space packages (for example i2c-tools) lack support for
+ 10-bit addresses.
+
+Note that 10-bit address devices are still pretty rare, so the limitations
+listed above could stay for a long time, maybe even forever if nobody
+needs them to be fixed.
diff --git a/Documentation/power/devices.txt b/Documentation/power/devices.txt
index 646a89e0c07..3139fb505dc 100644
--- a/Documentation/power/devices.txt
+++ b/Documentation/power/devices.txt
@@ -123,9 +123,10 @@ please refer directly to the source code for more information about it.
Subsystem-Level Methods
-----------------------
The core methods to suspend and resume devices reside in struct dev_pm_ops
-pointed to by the pm member of struct bus_type, struct device_type and
-struct class. They are mostly of interest to the people writing infrastructure
-for buses, like PCI or USB, or device type and device class drivers.
+pointed to by the ops member of struct dev_pm_domain, or by the pm member of
+struct bus_type, struct device_type and struct class. They are mostly of
+interest to the people writing infrastructure for platforms and buses, like PCI
+or USB, or device type and device class drivers.
Bus drivers implement these methods as appropriate for the hardware and the
drivers using it; PCI works differently from USB, and so on. Not many people
@@ -139,41 +140,57 @@ sequencing in the driver model tree.
/sys/devices/.../power/wakeup files
-----------------------------------
-All devices in the driver model have two flags to control handling of wakeup
-events (hardware signals that can force the device and/or system out of a low
-power state). These flags are initialized by bus or device driver code using
+All device objects in the driver model contain fields that control the handling
+of system wakeup events (hardware signals that can force the system out of a
+sleep state). These fields are initialized by bus or device driver code using
device_set_wakeup_capable() and device_set_wakeup_enable(), defined in
include/linux/pm_wakeup.h.
-The "can_wakeup" flag just records whether the device (and its driver) can
+The "power.can_wakeup" flag just records whether the device (and its driver) can
physically support wakeup events. The device_set_wakeup_capable() routine
-affects this flag. The "should_wakeup" flag controls whether the device should
-try to use its wakeup mechanism. device_set_wakeup_enable() affects this flag;
-for the most part drivers should not change its value. The initial value of
-should_wakeup is supposed to be false for the majority of devices; the major
-exceptions are power buttons, keyboards, and Ethernet adapters whose WoL
-(wake-on-LAN) feature has been set up with ethtool. It should also default
-to true for devices that don't generate wakeup requests on their own but merely
-forward wakeup requests from one bus to another (like PCI bridges).
+affects this flag. The "power.wakeup" field is a pointer to an object of type
+struct wakeup_source used for controlling whether or not the device should use
+its system wakeup mechanism and for notifying the PM core of system wakeup
+events signaled by the device. This object is only present for wakeup-capable
+devices (i.e. devices whose "can_wakeup" flags are set) and is created (or
+removed) by device_set_wakeup_capable().
Whether or not a device is capable of issuing wakeup events is a hardware
matter, and the kernel is responsible for keeping track of it. By contrast,
whether or not a wakeup-capable device should issue wakeup events is a policy
decision, and it is managed by user space through a sysfs attribute: the
-power/wakeup file. User space can write the strings "enabled" or "disabled" to
-set or clear the "should_wakeup" flag, respectively. This file is only present
-for wakeup-capable devices (i.e. devices whose "can_wakeup" flags are set)
-and is created (or removed) by device_set_wakeup_capable(). Reads from the
-file will return the corresponding string.
-
-The device_may_wakeup() routine returns true only if both flags are set.
+"power/wakeup" file. User space can write the strings "enabled" or "disabled"
+to it to indicate whether or not, respectively, the device is supposed to signal
+system wakeup. This file is only present if the "power.wakeup" object exists
+for the given device and is created (or removed) along with that object, by
+device_set_wakeup_capable(). Reads from the file will return the corresponding
+string.
+
+The "power/wakeup" file is supposed to contain the "disabled" string initially
+for the majority of devices; the major exceptions are power buttons, keyboards,
+and Ethernet adapters whose WoL (wake-on-LAN) feature has been set up with
+ethtool. It should also default to "enabled" for devices that don't generate
+wakeup requests on their own but merely forward wakeup requests from one bus to
+another (like PCI Express ports).
+
+The device_may_wakeup() routine returns true only if the "power.wakeup" object
+exists and the corresponding "power/wakeup" file contains the string "enabled".
This information is used by subsystems, like the PCI bus type code, to see
whether or not to enable the devices' wakeup mechanisms. If device wakeup
mechanisms are enabled or disabled directly by drivers, they also should use
device_may_wakeup() to decide what to do during a system sleep transition.
-However for runtime power management, wakeup events should be enabled whenever
-the device and driver both support them, regardless of the should_wakeup flag.
-
+Device drivers, however, are not supposed to call device_set_wakeup_enable()
+directly in any case.
+
+It ought to be noted that system wakeup is conceptually different from "remote
+wakeup" used by runtime power management, although it may be supported by the
+same physical mechanism. Remote wakeup is a feature allowing devices in
+low-power states to trigger specific interrupts to signal conditions in which
+they should be put into the full-power state. Those interrupts may or may not
+be used to signal system wakeup events, depending on the hardware design. On
+some systems it is impossible to trigger them from system sleep states. In any
+case, remote wakeup should always be enabled for runtime power management for
+all devices and drivers that support it.
/sys/devices/.../power/control files
------------------------------------
@@ -249,20 +266,31 @@ for every device before the next phase begins. Not all busses or classes
support all these callbacks and not all drivers use all the callbacks. The
various phases always run after tasks have been frozen and before they are
unfrozen. Furthermore, the *_noirq phases run at a time when IRQ handlers have
-been disabled (except for those marked with the IRQ_WAKEUP flag).
-
-All phases use bus, type, or class callbacks (that is, methods defined in
-dev->bus->pm, dev->type->pm, or dev->class->pm). These callbacks are mutually
-exclusive, so if the device type provides a struct dev_pm_ops object pointed to
-by its pm field (i.e. both dev->type and dev->type->pm are defined), the
-callbacks included in that object (i.e. dev->type->pm) will be used. Otherwise,
-if the class provides a struct dev_pm_ops object pointed to by its pm field
-(i.e. both dev->class and dev->class->pm are defined), the PM core will use the
-callbacks from that object (i.e. dev->class->pm). Finally, if the pm fields of
-both the device type and class objects are NULL (or those objects do not exist),
-the callbacks provided by the bus (that is, the callbacks from dev->bus->pm)
-will be used (this allows device types to override callbacks provided by bus
-types or classes if necessary).
+been disabled (except for those marked with the IRQF_NO_SUSPEND flag).
+
+All phases use PM domain, bus, type, or class callbacks (that is, methods
+defined in dev->pm_domain->ops, dev->bus->pm, dev->type->pm, or dev->class->pm).
+These callbacks are regarded by the PM core as mutually exclusive. Moreover,
+PM domain callbacks always take precedence over bus, type and class callbacks,
+while type callbacks take precedence over bus and class callbacks, and class
+callbacks take precedence over bus callbacks. To be precise, the following
+rules are used to determine which callback to execute in the given phase:
+
+ 1. If dev->pm_domain is present, the PM core will attempt to execute the
+ callback included in dev->pm_domain->ops. If that callback is not
+ present, no action will be carried out for the given device.
+
+ 2. Otherwise, if both dev->type and dev->type->pm are present, the callback
+ included in dev->type->pm will be executed.
+
+ 3. Otherwise, if both dev->class and dev->class->pm are present, the
+ callback included in dev->class->pm will be executed.
+
+ 4. Otherwise, if both dev->bus and dev->bus->pm are present, the callback
+ included in dev->bus->pm will be executed.
+
+This allows PM domains and device types to override callbacks provided by bus
+types or device classes if necessary.
These callbacks may in turn invoke device- or driver-specific methods stored in
dev->driver->pm, but they don't have to.
@@ -283,9 +311,8 @@ When the system goes into the standby or memory sleep state, the phases are:
After the prepare callback method returns, no new children may be
registered below the device. The method may also prepare the device or
- driver in some way for the upcoming system power transition (for
- example, by allocating additional memory required for this purpose), but
- it should not put the device into a low-power state.
+ driver in some way for the upcoming system power transition, but it
+ should not put the device into a low-power state.
2. The suspend methods should quiesce the device to stop it from performing
I/O. They also may save the device registers and put it into the
diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt
index 5336149f831..c2ae8bf77d4 100644
--- a/Documentation/power/runtime_pm.txt
+++ b/Documentation/power/runtime_pm.txt
@@ -44,25 +44,33 @@ struct dev_pm_ops {
};
The ->runtime_suspend(), ->runtime_resume() and ->runtime_idle() callbacks
-are executed by the PM core for either the power domain, or the device type
-(if the device power domain's struct dev_pm_ops does not exist), or the class
-(if the device power domain's and type's struct dev_pm_ops object does not
-exist), or the bus type (if the device power domain's, type's and class'
-struct dev_pm_ops objects do not exist) of the given device, so the priority
-order of callbacks from high to low is that power domain callbacks, device
-type callbacks, class callbacks and bus type callbacks, and the high priority
-one will take precedence over low priority one. The bus type, device type and
-class callbacks are referred to as subsystem-level callbacks in what follows,
-and generally speaking, the power domain callbacks are used for representing
-power domains within a SoC.
+are executed by the PM core for the device's subsystem that may be either of
+the following:
+
+ 1. PM domain of the device, if the device's PM domain object, dev->pm_domain,
+ is present.
+
+ 2. Device type of the device, if both dev->type and dev->type->pm are present.
+
+ 3. Device class of the device, if both dev->class and dev->class->pm are
+ present.
+
+ 4. Bus type of the device, if both dev->bus and dev->bus->pm are present.
+
+The PM core always checks which callback to use in the order given above, so the
+priority order of callbacks from high to low is: PM domain, device type, class
+and bus type. Moreover, the high-priority one will always take precedence over
+a low-priority one. The PM domain, bus type, device type and class callbacks
+are referred to as subsystem-level callbacks in what follows.
By default, the callbacks are always invoked in process context with interrupts
enabled. However, subsystems can use the pm_runtime_irq_safe() helper function
-to tell the PM core that a device's ->runtime_suspend() and ->runtime_resume()
-callbacks should be invoked in atomic context with interrupts disabled.
-This implies that these callback routines must not block or sleep, but it also
-means that the synchronous helper functions listed at the end of Section 4 can
-be used within an interrupt handler or in an atomic context.
+to tell the PM core that their ->runtime_suspend(), ->runtime_resume() and
+->runtime_idle() callbacks may be invoked in atomic context with interrupts
+disabled for a given device. This implies that the callback routines in
+question must not block or sleep, but it also means that the synchronous helper
+functions listed at the end of Section 4 may be used for that device within an
+interrupt handler or generally in an atomic context.
The subsystem-level suspend callback is _entirely_ _responsible_ for handling
the suspend of the device as appropriate, which may, but need not include
diff --git a/Documentation/serial/serial-rs485.txt b/Documentation/serial/serial-rs485.txt
index 079cb3df62c..41c8378c0b2 100644
--- a/Documentation/serial/serial-rs485.txt
+++ b/Documentation/serial/serial-rs485.txt
@@ -97,15 +97,23 @@
struct serial_rs485 rs485conf;
- /* Set RS485 mode: */
+ /* Enable RS485 mode: */
rs485conf.flags |= SER_RS485_ENABLED;
+ /* Set logical level for RTS pin equal to 1 when sending: */
+ rs485conf.flags |= SER_RS485_RTS_ON_SEND;
+ /* or, set logical level for RTS pin equal to 0 when sending: */
+ rs485conf.flags &= ~(SER_RS485_RTS_ON_SEND);
+
+ /* Set logical level for RTS pin equal to 1 after sending: */
+ rs485conf.flags |= SER_RS485_RTS_AFTER_SEND;
+ /* or, set logical level for RTS pin equal to 0 after sending: */
+ rs485conf.flags &= ~(SER_RS485_RTS_AFTER_SEND);
+
/* Set rts delay before send, if needed: */
- rs485conf.flags |= SER_RS485_RTS_BEFORE_SEND;
rs485conf.delay_rts_before_send = ...;
/* Set rts delay after send, if needed: */
- rs485conf.flags |= SER_RS485_RTS_AFTER_SEND;
rs485conf.delay_rts_after_send = ...;
/* Set this flag if you want to receive data even whilst sending data */
diff --git a/MAINTAINERS b/MAINTAINERS
index 717d9e959b1..c88eb7bb3a6 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -789,6 +789,7 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
T: git git://git.pengutronix.de/git/imx/linux-2.6.git
F: arch/arm/mach-mx*/
+F: arch/arm/mach-imx/
F: arch/arm/plat-mxc/
ARM/FREESCALE IMX51
@@ -804,6 +805,13 @@ S: Maintained
T: git git://git.linaro.org/people/shawnguo/linux-2.6.git
F: arch/arm/mach-imx/*imx6*
+ARM/FREESCALE MXS ARM ARCHITECTURE
+M: Shawn Guo <shawn.guo@linaro.org>
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S: Maintained
+T: git git://git.linaro.org/people/shawnguo/linux-2.6.git
+F: arch/arm/mach-mxs/
+
ARM/GLOMATION GESBC9312SX MACHINE SUPPORT
M: Lennert Buytenhek <kernel@wantstofly.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -1789,6 +1797,14 @@ F: include/net/cfg80211.h
F: net/wireless/*
X: net/wireless/wext*
+CHAR and MISC DRIVERS
+M: Arnd Bergmann <arnd@arndb.de>
+M: Greg Kroah-Hartman <greg@kroah.com>
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc.git
+S: Maintained
+F: drivers/char/*
+F: drivers/misc/*
+
CHECKPATCH
M: Andy Whitcroft <apw@canonical.com>
S: Supported
@@ -3720,7 +3736,7 @@ F: fs/jbd2/
F: include/linux/jbd2.h
JSM Neo PCI based serial card
-M: Breno Leitao <leitao@linux.vnet.ibm.com>
+M: Lucas Tavares <lucaskt@linux.vnet.ibm.com>
L: linux-serial@vger.kernel.org
S: Maintained
F: drivers/tty/serial/jsm/
@@ -5659,7 +5675,6 @@ F: drivers/media/video/*7146*
F: include/media/*7146*
SAMSUNG AUDIO (ASoC) DRIVERS
-M: Jassi Brar <jassisinghbrar@gmail.com>
M: Sangbeom Kim <sbkim73@samsung.com>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
S: Supported
diff --git a/Makefile b/Makefile
index dab8610c4d6..12aafc20efb 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 2
SUBLEVEL = 0
-EXTRAVERSION = -rc2
+EXTRAVERSION = -rc4
NAME = Saber-toothed Squirrel
# *DOCUMENTATION*
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 44789eff983..e084b7e981e 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1231,7 +1231,7 @@ config ARM_ERRATA_742231
capabilities of the processor.
config PL310_ERRATA_588369
- bool "Clean & Invalidate maintenance operations do not invalidate clean lines"
+ bool "PL310 errata: Clean & Invalidate maintenance operations do not invalidate clean lines"
depends on CACHE_L2X0
help
The PL310 L2 cache controller implements three types of Clean &
@@ -1256,7 +1256,7 @@ config ARM_ERRATA_720789
entries regardless of the ASID.
config PL310_ERRATA_727915
- bool "Background Clean & Invalidate by Way operation can cause data corruption"
+ bool "PL310 errata: Background Clean & Invalidate by Way operation can cause data corruption"
depends on CACHE_L2X0
help
PL310 implements the Clean & Invalidate by Way L2 cache maintenance
@@ -1289,8 +1289,8 @@ config ARM_ERRATA_751472
operation is received by a CPU before the ICIALLUIS has completed,
potentially leading to corrupted entries in the cache or TLB.
-config ARM_ERRATA_753970
- bool "ARM errata: cache sync operation may be faulty"
+config PL310_ERRATA_753970
+ bool "PL310 errata: cache sync operation may be faulty"
depends on CACHE_PL310
help
This option enables the workaround for the 753970 PL310 (r3p0) erratum.
@@ -1352,6 +1352,18 @@ config ARM_ERRATA_764369
relevant cache maintenance functions and sets a specific bit
in the diagnostic control register of the SCU.
+config PL310_ERRATA_769419
+ bool "PL310 errata: no automatic Store Buffer drain"
+ depends on CACHE_L2X0
+ help
+ On revisions of the PL310 prior to r3p2, the Store Buffer does
+ not automatically drain. This can cause normal, non-cacheable
+ writes to be retained when the memory system is idle, leading
+ to suboptimal I/O performance for drivers using coherent DMA.
+ This option adds a write barrier to the cpu_idle loop so that,
+ on systems with an outer cache, the store buffer is drained
+ explicitly.
+
endmenu
source "arch/arm/common/Kconfig"
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c
index 0e6ae470c94..410a546060a 100644
--- a/arch/arm/common/gic.c
+++ b/arch/arm/common/gic.c
@@ -526,7 +526,8 @@ static void __init gic_pm_init(struct gic_chip_data *gic)
sizeof(u32));
BUG_ON(!gic->saved_ppi_conf);
- cpu_pm_register_notifier(&gic_notifier_block);
+ if (gic == &gic_data[0])
+ cpu_pm_register_notifier(&gic_notifier_block);
}
#else
static void __init gic_pm_init(struct gic_chip_data *gic)
@@ -581,13 +582,16 @@ void __init gic_init(unsigned int gic_nr, int irq_start,
* For primary GICs, skip over SGIs.
* For secondary GICs, skip over PPIs, too.
*/
+ domain->hwirq_base = 32;
if (gic_nr == 0) {
gic_cpu_base_addr = cpu_base;
- domain->hwirq_base = 16;
- if (irq_start > 0)
- irq_start = (irq_start & ~31) + 16;
- } else
- domain->hwirq_base = 32;
+
+ if ((irq_start & 31) > 0) {
+ domain->hwirq_base = 16;
+ if (irq_start != -1)
+ irq_start = (irq_start & ~31) + 16;
+ }
+ }
/*
* Find out how many interrupts are supported.
diff --git a/arch/arm/common/pl330.c b/arch/arm/common/pl330.c
index 7129cfbdacd..f407a6b35d3 100644
--- a/arch/arm/common/pl330.c
+++ b/arch/arm/common/pl330.c
@@ -1211,8 +1211,8 @@ static inline u32 _prepare_ccr(const struct pl330_reqcfg *rqc)
ccr |= (rqc->brst_size << CC_SRCBRSTSIZE_SHFT);
ccr |= (rqc->brst_size << CC_DSTBRSTSIZE_SHFT);
- ccr |= (rqc->dcctl << CC_SRCCCTRL_SHFT);
- ccr |= (rqc->scctl << CC_DSTCCTRL_SHFT);
+ ccr |= (rqc->scctl << CC_SRCCCTRL_SHFT);
+ ccr |= (rqc->dcctl << CC_DSTCCTRL_SHFT);
ccr |= (rqc->swap << CC_SWAP_SHFT);
@@ -1623,6 +1623,11 @@ static inline int _alloc_event(struct pl330_thread *thrd)
return -1;
}
+static bool _chan_ns(const struct pl330_info *pi, int i)
+{
+ return pi->pcfg.irq_ns & (1 << i);
+}
+
/* Upon success, returns IdentityToken for the
* allocated channel, NULL otherwise.
*/
@@ -1647,7 +1652,8 @@ void *pl330_request_channel(const struct pl330_info *pi)
for (i = 0; i < chans; i++) {
thrd = &pl330->channels[i];
- if (thrd->free) {
+ if ((thrd->free) && (!_manager_ns(thrd) ||
+ _chan_ns(pi, i))) {
thrd->ev = _alloc_event(thrd);
if (thrd->ev >= 0) {
thrd->free = false;
diff --git a/arch/arm/configs/at91cap9adk_defconfig b/arch/arm/configs/at91cap9_defconfig
index ffb1edd9336..8826eb218e7 100644
--- a/arch/arm/configs/at91cap9adk_defconfig
+++ b/arch/arm/configs/at91cap9_defconfig
@@ -38,7 +38,6 @@ CONFIG_IP_PNP_RARP=y
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
@@ -52,16 +51,12 @@ CONFIG_MTD_NAND_ATMEL=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_ATMEL_SSC=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_SCSI_MULTI_LUN=y
CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
CONFIG_MII=y
CONFIG_MACB=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
CONFIG_INPUT_EVDEV=y
# CONFIG_INPUT_KEYBOARD is not set
@@ -81,7 +76,6 @@ CONFIG_WATCHDOG=y
CONFIG_WATCHDOG_NOWAYOUT=y
CONFIG_FB=y
CONFIG_FB_ATMEL=y
-# CONFIG_VGA_CONSOLE is not set
CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_MONO is not set
# CONFIG_LOGO_LINUX_CLUT224 is not set
@@ -99,7 +93,6 @@ CONFIG_MMC_AT91=m
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_AT91SAM9=y
CONFIG_EXT2_FS=y
-CONFIG_INOTIFY=y
CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
diff --git a/arch/arm/configs/at91rm9200_defconfig b/arch/arm/configs/at91rm9200_defconfig
index 38cb7c98542..bbe4e1a1f5d 100644
--- a/arch/arm/configs/at91rm9200_defconfig
+++ b/arch/arm/configs/at91rm9200_defconfig
@@ -5,7 +5,6 @@ CONFIG_SYSVIPC=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_MODULES=y
CONFIG_MODULE_FORCE_LOAD=y
@@ -56,7 +55,6 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_NET_IPIP=m
-CONFIG_NET_IPGRE=m
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
CONFIG_INET_IPCOMP=m
@@ -75,18 +73,8 @@ CONFIG_IPV6_TUNNEL=m
CONFIG_BRIDGE=m
CONFIG_VLAN_8021Q=m
CONFIG_BT=m
-CONFIG_BT_L2CAP=m
-CONFIG_BT_SCO=m
-CONFIG_BT_RFCOMM=m
-CONFIG_BT_RFCOMM_TTY=y
-CONFIG_BT_BNEP=m
-CONFIG_BT_BNEP_MC_FILTER=y
-CONFIG_BT_BNEP_PROTO_FILTER=y
-CONFIG_BT_HIDP=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
-CONFIG_MTD_CONCAT=y
-CONFIG_MTD_PARTITIONS=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_AFS_PARTS=y
CONFIG_MTD_CHAR=y
@@ -108,8 +96,6 @@ CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_NBD=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_ATMEL_TCLIB=y
-CONFIG_EEPROM_LEGACY=m
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_BLK_DEV_SR=m
@@ -119,14 +105,23 @@ CONFIG_SCSI_MULTI_LUN=y
# CONFIG_SCSI_LOWLEVEL is not set
CONFIG_NETDEVICES=y
CONFIG_TUN=m
+CONFIG_ARM_AT91_ETHER=y
CONFIG_PHYLIB=y
CONFIG_DAVICOM_PHY=y
CONFIG_SMSC_PHY=y
CONFIG_MICREL_PHY=y
-CONFIG_NET_ETHERNET=y
-CONFIG_ARM_AT91_ETHER=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
+CONFIG_PPP=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_MPPE=m
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPPOE=m
+CONFIG_PPP_ASYNC=y
+CONFIG_SLIP=m
+CONFIG_SLIP_COMPRESSED=y
+CONFIG_SLIP_SMART=y
+CONFIG_SLIP_MODE_SLIP6=y
CONFIG_USB_CATC=m
CONFIG_USB_KAWETH=m
CONFIG_USB_PEGASUS=m
@@ -139,18 +134,6 @@ CONFIG_USB_NET_RNDIS_HOST=m
CONFIG_USB_ALI_M5632=y
CONFIG_USB_AN2720=y
CONFIG_USB_EPSON2888=y
-CONFIG_PPP=y
-CONFIG_PPP_MULTILINK=y
-CONFIG_PPP_FILTER=y
-CONFIG_PPP_ASYNC=y
-CONFIG_PPP_DEFLATE=y
-CONFIG_PPP_BSDCOMP=y
-CONFIG_PPP_MPPE=m
-CONFIG_PPPOE=m
-CONFIG_SLIP=m
-CONFIG_SLIP_COMPRESSED=y
-CONFIG_SLIP_SMART=y
-CONFIG_SLIP_MODE_SLIP6=y
# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
CONFIG_INPUT_MOUSEDEV_SCREEN_X=640
CONFIG_INPUT_MOUSEDEV_SCREEN_Y=480
@@ -158,9 +141,9 @@ CONFIG_INPUT_EVDEV=y
CONFIG_KEYBOARD_GPIO=y
# CONFIG_INPUT_MOUSE is not set
CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_LEGACY_PTY_COUNT=32
CONFIG_SERIAL_ATMEL=y
CONFIG_SERIAL_ATMEL_CONSOLE=y
-CONFIG_LEGACY_PTY_COUNT=32
CONFIG_HW_RANDOM=y
CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
@@ -290,7 +273,6 @@ CONFIG_NFS_V3_ACL=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
CONFIG_NFSD=y
-CONFIG_SMB_FS=m
CONFIG_CIFS=m
CONFIG_PARTITION_ADVANCED=y
CONFIG_MAC_PARTITION=y
@@ -335,7 +317,6 @@ CONFIG_NLS_UTF8=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
CONFIG_DEBUG_KERNEL=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
# CONFIG_FTRACE is not set
CONFIG_CRYPTO_PCBC=y
CONFIG_CRYPTO_SHA1=y
diff --git a/arch/arm/configs/at91sam9260ek_defconfig b/arch/arm/configs/at91sam9260_defconfig
index f8a9226413b..505b3765f87 100644
--- a/arch/arm/configs/at91sam9260ek_defconfig
+++ b/arch/arm/configs/at91sam9260_defconfig
@@ -12,11 +12,23 @@ CONFIG_MODULE_UNLOAD=y
# CONFIG_IOSCHED_CFQ is not set
CONFIG_ARCH_AT91=y
CONFIG_ARCH_AT91SAM9260=y
+CONFIG_ARCH_AT91SAM9260_SAM9XE=y
CONFIG_MACH_AT91SAM9260EK=y
+CONFIG_MACH_CAM60=y
+CONFIG_MACH_SAM9_L9260=y
+CONFIG_MACH_AFEB9260=y
+CONFIG_MACH_USB_A9260=y
+CONFIG_MACH_QIL_A9260=y
+CONFIG_MACH_CPU9260=y
+CONFIG_MACH_FLEXIBITY=y
+CONFIG_MACH_SNAPPER_9260=y
+CONFIG_MACH_AT91SAM_DT=y
CONFIG_AT91_PROGRAMMABLE_CLOCKS=y
# CONFIG_ARM_THUMB is not set
CONFIG_ZBOOT_ROM_TEXT=0x0
CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_ARM_APPENDED_DTB=y
+CONFIG_ARM_ATAG_DTB_COMPAT=y
CONFIG_CMDLINE="mem=64M console=ttyS0,115200 initrd=0x21100000,3145728 root=/dev/ram0 rw"
CONFIG_FPE_NWFPE=y
CONFIG_NET=y
@@ -33,12 +45,10 @@ CONFIG_IP_PNP_BOOTP=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_ATMEL_SSC=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_SCSI_MULTI_LUN=y
CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
CONFIG_MII=y
CONFIG_MACB=y
# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
@@ -55,7 +65,6 @@ CONFIG_I2C_GPIO=y
CONFIG_WATCHDOG=y
CONFIG_WATCHDOG_NOWAYOUT=y
CONFIG_AT91SAM9X_WATCHDOG=y
-# CONFIG_VGA_CONSOLE is not set
# CONFIG_USB_HID is not set
CONFIG_USB=y
CONFIG_USB_DEVICEFS=y
@@ -71,7 +80,6 @@ CONFIG_USB_G_SERIAL=m
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_AT91SAM9=y
CONFIG_EXT2_FS=y
-CONFIG_INOTIFY=y
CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
CONFIG_CRAMFS=y
diff --git a/arch/arm/configs/at91sam9g20ek_defconfig b/arch/arm/configs/at91sam9g20_defconfig
index 9e90e6d7929..9123568d9a8 100644
--- a/arch/arm/configs/at91sam9g20ek_defconfig
+++ b/arch/arm/configs/at91sam9g20_defconfig
@@ -14,6 +14,15 @@ CONFIG_ARCH_AT91=y
CONFIG_ARCH_AT91SAM9G20=y
CONFIG_MACH_AT91SAM9G20EK=y
CONFIG_MACH_AT91SAM9G20EK_2MMC=y
+CONFIG_MACH_CPU9G20=y
+CONFIG_MACH_ACMENETUSFOXG20=y
+CONFIG_MACH_PORTUXG20=y
+CONFIG_MACH_STAMP9G20=y
+CONFIG_MACH_PCONTROL_G20=y
+CONFIG_MACH_GSIA18S=y
+CONFIG_MACH_USB_A9G20=y
+CONFIG_MACH_SNAPPER_9260=y
+CONFIG_MACH_AT91SAM_DT=y
CONFIG_AT91_PROGRAMMABLE_CLOCKS=y
# CONFIG_ARM_THUMB is not set
CONFIG_AEABI=y
@@ -21,9 +30,10 @@ CONFIG_LEDS=y
CONFIG_LEDS_CPU=y
CONFIG_ZBOOT_ROM_TEXT=0x0
CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_ARM_APPENDED_DTB=y
+CONFIG_ARM_ATAG_DTB_COMPAT=y
CONFIG_CMDLINE="mem=64M console=ttyS0,115200 initrd=0x21100000,3145728 root=/dev/ram0 rw"
CONFIG_FPE_NWFPE=y
-CONFIG_PM=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -37,8 +47,6 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
-CONFIG_MTD_CONCAT=y
-CONFIG_MTD_PARTITIONS=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
@@ -48,17 +56,13 @@ CONFIG_MTD_NAND_ATMEL=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_ATMEL_SSC=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_SCSI_MULTI_LUN=y
# CONFIG_SCSI_LOWLEVEL is not set
CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
CONFIG_MII=y
CONFIG_MACB=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
CONFIG_INPUT_MOUSEDEV_SCREEN_X=320
CONFIG_INPUT_MOUSEDEV_SCREEN_Y=240
@@ -66,15 +70,14 @@ CONFIG_INPUT_EVDEV=y
# CONFIG_KEYBOARD_ATKBD is not set
CONFIG_KEYBOARD_GPIO=y
# CONFIG_INPUT_MOUSE is not set
+CONFIG_LEGACY_PTY_COUNT=16
CONFIG_SERIAL_ATMEL=y
CONFIG_SERIAL_ATMEL_CONSOLE=y
-CONFIG_LEGACY_PTY_COUNT=16
CONFIG_HW_RANDOM=y
CONFIG_SPI=y
CONFIG_SPI_ATMEL=y
CONFIG_SPI_SPIDEV=y
# CONFIG_HWMON is not set
-# CONFIG_VGA_CONSOLE is not set
CONFIG_SOUND=y
CONFIG_SND=y
CONFIG_SND_SEQUENCER=y
@@ -82,7 +85,6 @@ CONFIG_SND_MIXER_OSS=y
CONFIG_SND_PCM_OSS=y
CONFIG_SND_SEQUENCER_OSS=y
# CONFIG_SND_VERBOSE_PROCFS is not set
-CONFIG_SND_AT73C213=y
CONFIG_USB=y
CONFIG_USB_DEVICEFS=y
# CONFIG_USB_DEVICE_CLASS is not set
@@ -105,7 +107,6 @@ CONFIG_LEDS_TRIGGER_HEARTBEAT=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_AT91SAM9=y
CONFIG_EXT2_FS=y
-CONFIG_INOTIFY=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
diff --git a/arch/arm/configs/at91sam9g45_defconfig b/arch/arm/configs/at91sam9g45_defconfig
index c5876d244f4..606d48f3b8f 100644
--- a/arch/arm/configs/at91sam9g45_defconfig
+++ b/arch/arm/configs/at91sam9g45_defconfig
@@ -18,6 +18,7 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_ARCH_AT91=y
CONFIG_ARCH_AT91SAM9G45=y
CONFIG_MACH_AT91SAM9M10G45EK=y
+CONFIG_MACH_AT91SAM_DT=y
CONFIG_AT91_PROGRAMMABLE_CLOCKS=y
CONFIG_AT91_SLOW_CLOCK=y
CONFIG_AEABI=y
@@ -73,11 +74,8 @@ CONFIG_SCSI_MULTI_LUN=y
# CONFIG_SCSI_LOWLEVEL is not set
CONFIG_NETDEVICES=y
CONFIG_MII=y
-CONFIG_DAVICOM_PHY=y
-CONFIG_NET_ETHERNET=y
CONFIG_MACB=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
+CONFIG_DAVICOM_PHY=y
CONFIG_LIBERTAS_THINFIRM=m
CONFIG_LIBERTAS_THINFIRM_USB=m
CONFIG_AT76C50X_USB=m
@@ -131,7 +129,6 @@ CONFIG_I2C_GPIO=y
CONFIG_SPI=y
CONFIG_SPI_ATMEL=y
# CONFIG_HWMON is not set
-# CONFIG_MFD_SUPPORT is not set
CONFIG_FB=y
CONFIG_FB_ATMEL=y
CONFIG_FB_UDL=m
diff --git a/arch/arm/configs/at91sam9rlek_defconfig b/arch/arm/configs/at91sam9rl_defconfig
index 75621e4d03f..ad562ee6420 100644
--- a/arch/arm/configs/at91sam9rlek_defconfig
+++ b/arch/arm/configs/at91sam9rl_defconfig
@@ -23,8 +23,6 @@ CONFIG_NET=y
CONFIG_UNIX=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
-CONFIG_MTD_CONCAT=y
-CONFIG_MTD_PARTITIONS=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
@@ -35,7 +33,6 @@ CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=4
CONFIG_BLK_DEV_RAM_SIZE=24576
-CONFIG_ATMEL_SSC=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_SCSI_MULTI_LUN=y
@@ -62,13 +59,11 @@ CONFIG_WATCHDOG_NOWAYOUT=y
CONFIG_AT91SAM9X_WATCHDOG=y
CONFIG_FB=y
CONFIG_FB_ATMEL=y
-# CONFIG_VGA_CONSOLE is not set
CONFIG_MMC=y
CONFIG_MMC_AT91=m
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_AT91SAM9=y
CONFIG_EXT2_FS=y
-CONFIG_INOTIFY=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
diff --git a/arch/arm/configs/ezx_defconfig b/arch/arm/configs/ezx_defconfig
index 227a477346e..d95763d5f0d 100644
--- a/arch/arm/configs/ezx_defconfig
+++ b/arch/arm/configs/ezx_defconfig
@@ -287,7 +287,7 @@ CONFIG_USB=y
# CONFIG_USB_DEVICE_CLASS is not set
CONFIG_USB_OHCI_HCD=y
CONFIG_USB_GADGET=y
-CONFIG_USB_GADGET_PXA27X=y
+CONFIG_USB_PXA27X=y
CONFIG_USB_ETH=m
# CONFIG_USB_ETH_RNDIS is not set
CONFIG_MMC=y
diff --git a/arch/arm/configs/imote2_defconfig b/arch/arm/configs/imote2_defconfig
index 176ec22af03..fd996bb1302 100644
--- a/arch/arm/configs/imote2_defconfig
+++ b/arch/arm/configs/imote2_defconfig
@@ -263,7 +263,7 @@ CONFIG_USB=y
# CONFIG_USB_DEVICE_CLASS is not set
CONFIG_USB_OHCI_HCD=y
CONFIG_USB_GADGET=y
-CONFIG_USB_GADGET_PXA27X=y
+CONFIG_USB_PXA27X=y
CONFIG_USB_ETH=m
# CONFIG_USB_ETH_RNDIS is not set
CONFIG_MMC=y
diff --git a/arch/arm/configs/magician_defconfig b/arch/arm/configs/magician_defconfig
index a88e64d4e9a..443675d317e 100644
--- a/arch/arm/configs/magician_defconfig
+++ b/arch/arm/configs/magician_defconfig
@@ -132,7 +132,7 @@ CONFIG_USB_MON=m
CONFIG_USB_OHCI_HCD=y
CONFIG_USB_GADGET=y
CONFIG_USB_GADGET_VBUS_DRAW=500
-CONFIG_USB_GADGET_PXA27X=y
+CONFIG_USB_PXA27X=y
CONFIG_USB_ETH=m
# CONFIG_USB_ETH_RNDIS is not set
CONFIG_USB_GADGETFS=m
diff --git a/arch/arm/configs/omap1_defconfig b/arch/arm/configs/omap1_defconfig
index 7b63462b349..a7e77758137 100644
--- a/arch/arm/configs/omap1_defconfig
+++ b/arch/arm/configs/omap1_defconfig
@@ -48,7 +48,6 @@ CONFIG_MACH_SX1=y
CONFIG_MACH_NOKIA770=y
CONFIG_MACH_AMS_DELTA=y
CONFIG_MACH_OMAP_GENERIC=y
-CONFIG_OMAP_CLOCKS_SET_BY_BOOTLOADER=y
CONFIG_OMAP_ARM_216MHZ=y
CONFIG_OMAP_ARM_195MHZ=y
CONFIG_OMAP_ARM_192MHZ=y
diff --git a/arch/arm/configs/u300_defconfig b/arch/arm/configs/u300_defconfig
index 4a5a12681be..374000ec4e4 100644
--- a/arch/arm/configs/u300_defconfig
+++ b/arch/arm/configs/u300_defconfig
@@ -14,8 +14,6 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_ARCH_U300=y
CONFIG_MACH_U300=y
CONFIG_MACH_U300_BS335=y
-CONFIG_MACH_U300_DUAL_RAM=y
-CONFIG_U300_DEBUG=y
CONFIG_MACH_U300_SPIDUMMY=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
@@ -26,19 +24,21 @@ CONFIG_ZBOOT_ROM_BSS=0x0
CONFIG_CMDLINE="root=/dev/ram0 rw rootfstype=rootfs console=ttyAMA0,115200n8 lpj=515072"
CONFIG_CPU_IDLE=y
CONFIG_FPE_NWFPE=y
-CONFIG_PM=y
# CONFIG_SUSPEND is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
-# CONFIG_MISC_DEVICES is not set
+CONFIG_MTD=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_FSMC=y
# CONFIG_INPUT_MOUSEDEV is not set
CONFIG_INPUT_EVDEV=y
# CONFIG_KEYBOARD_ATKBD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
+CONFIG_LEGACY_PTY_COUNT=16
CONFIG_SERIAL_AMBA_PL011=y
CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
-CONFIG_LEGACY_PTY_COUNT=16
# CONFIG_HW_RANDOM is not set
CONFIG_I2C=y
# CONFIG_HWMON is not set
@@ -51,6 +51,7 @@ CONFIG_BACKLIGHT_CLASS_DEVICE=y
# CONFIG_HID_SUPPORT is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_MMC=y
+CONFIG_MMC_CLKGATE=y
CONFIG_MMC_ARMMMCI=y
CONFIG_RTC_CLASS=y
# CONFIG_RTC_HCTOSYS is not set
@@ -65,10 +66,8 @@ CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_KERNEL=y
# CONFIG_SCHED_DEBUG is not set
CONFIG_TIMER_STATS=y
# CONFIG_DEBUG_PREEMPT is not set
CONFIG_DEBUG_INFO=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
# CONFIG_CRC32 is not set
diff --git a/arch/arm/configs/u8500_defconfig b/arch/arm/configs/u8500_defconfig
index 97d31a4663d..2d7b6e7b727 100644
--- a/arch/arm/configs/u8500_defconfig
+++ b/arch/arm/configs/u8500_defconfig
@@ -10,7 +10,7 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_ARCH_U8500=y
CONFIG_UX500_SOC_DB5500=y
CONFIG_UX500_SOC_DB8500=y
-CONFIG_MACH_U8500=y
+CONFIG_MACH_HREFV60=y
CONFIG_MACH_SNOWBALL=y
CONFIG_MACH_U5500=y
CONFIG_NO_HZ=y
@@ -24,6 +24,7 @@ CONFIG_CPU_FREQ=y
CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
CONFIG_VFP=y
CONFIG_NEON=y
+CONFIG_PM_RUNTIME=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -41,11 +42,8 @@ CONFIG_MISC_DEVICES=y
CONFIG_AB8500_PWM=y
CONFIG_SENSORS_BH1780=y
CONFIG_NETDEVICES=y
-CONFIG_SMSC_PHY=y
-CONFIG_NET_ETHERNET=y
CONFIG_SMSC911X=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
+CONFIG_SMSC_PHY=y
# CONFIG_WLAN is not set
# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
CONFIG_INPUT_EVDEV=y
@@ -72,15 +70,12 @@ CONFIG_SPI=y
CONFIG_SPI_PL022=y
CONFIG_GPIO_STMPE=y
CONFIG_GPIO_TC3589X=y
-# CONFIG_HWMON is not set
CONFIG_MFD_STMPE=y
CONFIG_MFD_TC3589X=y
+CONFIG_AB5500_CORE=y
CONFIG_AB8500_CORE=y
CONFIG_REGULATOR_AB8500=y
# CONFIG_HID_SUPPORT is not set
-CONFIG_USB_MUSB_HDRC=y
-CONFIG_USB_GADGET_MUSB_HDRC=y
-CONFIG_MUSB_PIO_ONLY=y
CONFIG_USB_GADGET=y
CONFIG_AB8500_USB=y
CONFIG_MMC=y
@@ -97,6 +92,7 @@ CONFIG_DMADEVICES=y
CONFIG_STE_DMA40=y
CONFIG_STAGING=y
CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4=y
+CONFIG_HSEM_U8500=y
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
diff --git a/arch/arm/configs/zeus_defconfig b/arch/arm/configs/zeus_defconfig
index 59577ad3f4e..547a3c1e59d 100644
--- a/arch/arm/configs/zeus_defconfig
+++ b/arch/arm/configs/zeus_defconfig
@@ -140,7 +140,7 @@ CONFIG_USB_SERIAL=m
CONFIG_USB_SERIAL_GENERIC=y
CONFIG_USB_SERIAL_MCT_U232=m
CONFIG_USB_GADGET=m
-CONFIG_USB_GADGET_PXA27X=y
+CONFIG_USB_PXA27X=y
CONFIG_USB_ETH=m
CONFIG_USB_GADGETFS=m
CONFIG_USB_FILE_STORAGE=m
diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h
index 71d99b83cdb..0bda22c094a 100644
--- a/arch/arm/include/asm/pmu.h
+++ b/arch/arm/include/asm/pmu.h
@@ -55,16 +55,6 @@ reserve_pmu(enum arm_pmu_type type);
extern void
release_pmu(enum arm_pmu_type type);
-/**
- * init_pmu() - Initialise the PMU.
- *
- * Initialise the system ready for PMU enabling. This should typically set the
- * IRQ affinity and nothing else. The users (oprofile/perf events etc) will do
- * the actual hardware initialisation.
- */
-extern int
-init_pmu(enum arm_pmu_type type);
-
#else /* CONFIG_CPU_HAS_PMU */
#include <linux/err.h>
diff --git a/arch/arm/include/asm/topology.h b/arch/arm/include/asm/topology.h
index a7e457ed27c..58b8b84adcd 100644
--- a/arch/arm/include/asm/topology.h
+++ b/arch/arm/include/asm/topology.h
@@ -25,7 +25,7 @@ extern struct cputopo_arm cpu_topology[NR_CPUS];
void init_cpu_topology(void);
void store_cpu_topology(unsigned int cpuid);
-const struct cpumask *cpu_coregroup_mask(unsigned int cpu);
+const struct cpumask *cpu_coregroup_mask(int cpu);
#else
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 9ad50c4208a..b145f16c91b 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -497,7 +497,7 @@ ENDPROC(__und_usr)
.popsection
.pushsection __ex_table,"a"
.long 1b, 4b
-#if __LINUX_ARM_ARCH__ >= 7
+#if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
.long 2b, 4b
.long 3b, 4b
#endif
diff --git a/arch/arm/kernel/kprobes-arm.c b/arch/arm/kernel/kprobes-arm.c
index 9fe8910308a..8a30c89da70 100644
--- a/arch/arm/kernel/kprobes-arm.c
+++ b/arch/arm/kernel/kprobes-arm.c
@@ -519,10 +519,12 @@ static const union decode_item arm_cccc_0000_____1001_table[] = {
static const union decode_item arm_cccc_0001_____1001_table[] = {
/* Synchronization primitives */
+#if __LINUX_ARM_ARCH__ < 6
+ /* Deprecated on ARMv6 and may be UNDEFINED on v7 */
/* SMP/SWPB cccc 0001 0x00 xxxx xxxx xxxx 1001 xxxx */
DECODE_EMULATEX (0x0fb000f0, 0x01000090, emulate_rd12rn16rm0_rwflags_nopc,
REGS(NOPC, NOPC, 0, 0, NOPC)),
-
+#endif
/* LDREX/STREX{,D,B,H} cccc 0001 1xxx xxxx xxxx xxxx 1001 xxxx */
/* And unallocated instructions... */
DECODE_END
diff --git a/arch/arm/kernel/kprobes-test-arm.c b/arch/arm/kernel/kprobes-test-arm.c
index fc82de8bdcc..ba32b393b3f 100644
--- a/arch/arm/kernel/kprobes-test-arm.c
+++ b/arch/arm/kernel/kprobes-test-arm.c
@@ -427,18 +427,25 @@ void kprobe_arm_test_cases(void)
TEST_GROUP("Synchronization primitives")
- /*
- * Use hard coded constants for SWP instructions to avoid warnings
- * about deprecated instructions.
- */
- TEST_RP( ".word 0xe108e097 @ swp lr, r",7,VAL2,", [r",8,0,"]")
- TEST_R( ".word 0x610d0091 @ swpvs r0, r",1,VAL1,", [sp]")
- TEST_RP( ".word 0xe10cd09e @ swp sp, r",14,VAL2,", [r",12,13*4,"]")
+#if __LINUX_ARM_ARCH__ < 6
+ TEST_RP("swp lr, r",7,VAL2,", [r",8,0,"]")
+ TEST_R( "swpvs r0, r",1,VAL1,", [sp]")
+ TEST_RP("swp sp, r",14,VAL2,", [r",12,13*4,"]")
+#else
+ TEST_UNSUPPORTED(".word 0xe108e097 @ swp lr, r7, [r8]")
+ TEST_UNSUPPORTED(".word 0x610d0091 @ swpvs r0, r1, [sp]")
+ TEST_UNSUPPORTED(".word 0xe10cd09e @ swp sp, r14 [r12]")
+#endif
TEST_UNSUPPORTED(".word 0xe102f091 @ swp pc, r1, [r2]")
TEST_UNSUPPORTED(".word 0xe102009f @ swp r0, pc, [r2]")
TEST_UNSUPPORTED(".word 0xe10f0091 @ swp r0, r1, [pc]")
- TEST_RP( ".word 0xe148e097 @ swpb lr, r",7,VAL2,", [r",8,0,"]")
- TEST_R( ".word 0x614d0091 @ swpvsb r0, r",1,VAL1,", [sp]")
+#if __LINUX_ARM_ARCH__ < 6
+ TEST_RP("swpb lr, r",7,VAL2,", [r",8,0,"]")
+ TEST_R( "swpvsb r0, r",1,VAL1,", [sp]")
+#else
+ TEST_UNSUPPORTED(".word 0xe148e097 @ swpb lr, r7, [r8]")
+ TEST_UNSUPPORTED(".word 0x614d0091 @ swpvsb r0, r1, [sp]")
+#endif
TEST_UNSUPPORTED(".word 0xe142f091 @ swpb pc, r1, [r2]")
TEST_UNSUPPORTED(".word 0xe1100090") /* Unallocated space */
@@ -550,7 +557,7 @@ void kprobe_arm_test_cases(void)
TEST_RPR( "strccd r",8, VAL2,", [r",13,0, ", r",12,48,"]")
TEST_RPR( "strd r",4, VAL1,", [r",2, 24,", r",3, 48,"]!")
TEST_RPR( "strcsd r",12,VAL2,", [r",11,48,", -r",10,24,"]!")
- TEST_RPR( "strd r",2, VAL1,", [r",3, 24,"], r",4,48,"")
+ TEST_RPR( "strd r",2, VAL1,", [r",5, 24,"], r",4,48,"")
TEST_RPR( "strd r",10,VAL2,", [r",9, 48,"], -r",7,24,"")
TEST_UNSUPPORTED(".word 0xe1afc0fa @ strd r12, [pc, r10]!")
diff --git a/arch/arm/kernel/kprobes-test-thumb.c b/arch/arm/kernel/kprobes-test-thumb.c
index 5e726c31c45..5d8b8579222 100644
--- a/arch/arm/kernel/kprobes-test-thumb.c
+++ b/arch/arm/kernel/kprobes-test-thumb.c
@@ -222,8 +222,8 @@ void kprobe_thumb16_test_cases(void)
DONT_TEST_IN_ITBLOCK(
TEST_BF_R( "cbnz r",0,0, ", 2f")
TEST_BF_R( "cbz r",2,-1,", 2f")
- TEST_BF_RX( "cbnz r",4,1, ", 2f",0x20)
- TEST_BF_RX( "cbz r",7,0, ", 2f",0x40)
+ TEST_BF_RX( "cbnz r",4,1, ", 2f", SPACE_0x20)
+ TEST_BF_RX( "cbz r",7,0, ", 2f", SPACE_0x40)
)
TEST_R("sxth r0, r",7, HH1,"")
TEST_R("sxth r7, r",0, HH2,"")
@@ -246,7 +246,7 @@ DONT_TEST_IN_ITBLOCK(
TESTCASE_START(code) \
TEST_ARG_PTR(13, offset) \
TEST_ARG_END("") \
- TEST_BRANCH_F(code,0) \
+ TEST_BRANCH_F(code) \
TESTCASE_END
TEST("push {r0}")
@@ -319,8 +319,8 @@ CONDITION_INSTRUCTIONS(8,
TEST_BF( "b 2f")
TEST_BB( "b 2b")
- TEST_BF_X("b 2f", 0x400)
- TEST_BB_X("b 2b", 0x400)
+ TEST_BF_X("b 2f", SPACE_0x400)
+ TEST_BB_X("b 2b", SPACE_0x400)
TEST_GROUP("Testing instructions in IT blocks")
@@ -746,7 +746,7 @@ CONDITION_INSTRUCTIONS(22,
TEST_BB("bne.w 2b")
TEST_BF("bgt.w 2f")
TEST_BB("blt.w 2b")
- TEST_BF_X("bpl.w 2f",0x1000)
+ TEST_BF_X("bpl.w 2f", SPACE_0x1000)
)
TEST_UNSUPPORTED("msr cpsr, r0")
@@ -786,11 +786,11 @@ CONDITION_INSTRUCTIONS(22,
TEST_BF( "b.w 2f")
TEST_BB( "b.w 2b")
- TEST_BF_X("b.w 2f", 0x1000)
+ TEST_BF_X("b.w 2f", SPACE_0x1000)
TEST_BF( "bl.w 2f")
TEST_BB( "bl.w 2b")
- TEST_BB_X("bl.w 2b", 0x1000)
+ TEST_BB_X("bl.w 2b", SPACE_0x1000)
TEST_X( "blx __dummy_arm_subroutine",
".arm \n\t"
diff --git a/arch/arm/kernel/kprobes-test.h b/arch/arm/kernel/kprobes-test.h
index 0dc5d77b935..e28a869b1ae 100644
--- a/arch/arm/kernel/kprobes-test.h
+++ b/arch/arm/kernel/kprobes-test.h
@@ -149,23 +149,31 @@ struct test_arg_end {
"1: "instruction" \n\t" \
" nop \n\t"
-#define TEST_BRANCH_F(instruction, xtra_dist) \
+#define TEST_BRANCH_F(instruction) \
TEST_INSTRUCTION(instruction) \
- ".if "#xtra_dist" \n\t" \
" b 99f \n\t" \
- ".space "#xtra_dist" \n\t" \
- ".endif \n\t" \
+ "2: nop \n\t"
+
+#define TEST_BRANCH_B(instruction) \
+ " b 50f \n\t" \
+ " b 99f \n\t" \
+ "2: nop \n\t" \
+ " b 99f \n\t" \
+ TEST_INSTRUCTION(instruction)
+
+#define TEST_BRANCH_FX(instruction, codex) \
+ TEST_INSTRUCTION(instruction) \
+ " b 99f \n\t" \
+ codex" \n\t" \
" b 99f \n\t" \
"2: nop \n\t"
-#define TEST_BRANCH_B(instruction, xtra_dist) \
+#define TEST_BRANCH_BX(instruction, codex) \
" b 50f \n\t" \
" b 99f \n\t" \
"2: nop \n\t" \
" b 99f \n\t" \
- ".if "#xtra_dist" \n\t" \
- ".space "#xtra_dist" \n\t" \
- ".endif \n\t" \
+ codex" \n\t" \
TEST_INSTRUCTION(instruction)
#define TESTCASE_END \
@@ -301,47 +309,60 @@ struct test_arg_end {
TESTCASE_START(code1 #reg1 code2) \
TEST_ARG_PTR(reg1, val1) \
TEST_ARG_END("") \
- TEST_BRANCH_F(code1 #reg1 code2, 0) \
+ TEST_BRANCH_F(code1 #reg1 code2) \
TESTCASE_END
-#define TEST_BF_X(code, xtra_dist) \
+#define TEST_BF(code) \
TESTCASE_START(code) \
TEST_ARG_END("") \
- TEST_BRANCH_F(code, xtra_dist) \
+ TEST_BRANCH_F(code) \
TESTCASE_END
-#define TEST_BB_X(code, xtra_dist) \
+#define TEST_BB(code) \
TESTCASE_START(code) \
TEST_ARG_END("") \
- TEST_BRANCH_B(code, xtra_dist) \
+ TEST_BRANCH_B(code) \
TESTCASE_END
-#define TEST_BF_RX(code1, reg, val, code2, xtra_dist) \
- TESTCASE_START(code1 #reg code2) \
- TEST_ARG_REG(reg, val) \
- TEST_ARG_END("") \
- TEST_BRANCH_F(code1 #reg code2, xtra_dist) \
+#define TEST_BF_R(code1, reg, val, code2) \
+ TESTCASE_START(code1 #reg code2) \
+ TEST_ARG_REG(reg, val) \
+ TEST_ARG_END("") \
+ TEST_BRANCH_F(code1 #reg code2) \
TESTCASE_END
-#define TEST_BB_RX(code1, reg, val, code2, xtra_dist) \
- TESTCASE_START(code1 #reg code2) \
- TEST_ARG_REG(reg, val) \
- TEST_ARG_END("") \
- TEST_BRANCH_B(code1 #reg code2, xtra_dist) \
+#define TEST_BB_R(code1, reg, val, code2) \
+ TESTCASE_START(code1 #reg code2) \
+ TEST_ARG_REG(reg, val) \
+ TEST_ARG_END("") \
+ TEST_BRANCH_B(code1 #reg code2) \
TESTCASE_END
-#define TEST_BF(code) TEST_BF_X(code, 0)
-#define TEST_BB(code) TEST_BB_X(code, 0)
-
-#define TEST_BF_R(code1, reg, val, code2) TEST_BF_RX(code1, reg, val, code2, 0)
-#define TEST_BB_R(code1, reg, val, code2) TEST_BB_RX(code1, reg, val, code2, 0)
-
#define TEST_BF_RR(code1, reg1, val1, code2, reg2, val2, code3) \
TESTCASE_START(code1 #reg1 code2 #reg2 code3) \
TEST_ARG_REG(reg1, val1) \
TEST_ARG_REG(reg2, val2) \
TEST_ARG_END("") \
- TEST_BRANCH_F(code1 #reg1 code2 #reg2 code3, 0) \
+ TEST_BRANCH_F(code1 #reg1 code2 #reg2 code3) \
+ TESTCASE_END
+
+#define TEST_BF_X(code, codex) \
+ TESTCASE_START(code) \
+ TEST_ARG_END("") \
+ TEST_BRANCH_FX(code, codex) \
+ TESTCASE_END
+
+#define TEST_BB_X(code, codex) \
+ TESTCASE_START(code) \
+ TEST_ARG_END("") \
+ TEST_BRANCH_BX(code, codex) \
+ TESTCASE_END
+
+#define TEST_BF_RX(code1, reg, val, code2, codex) \
+ TESTCASE_START(code1 #reg code2) \
+ TEST_ARG_REG(reg, val) \
+ TEST_ARG_END("") \
+ TEST_BRANCH_FX(code1 #reg code2, codex) \
TESTCASE_END
#define TEST_X(code, codex) \
@@ -372,6 +393,25 @@ struct test_arg_end {
TESTCASE_END
+/*
+ * Macros for defining space directives spread over multiple lines.
+ * These are required so the compiler guesses better the length of inline asm
+ * code and will spill the literal pool early enough to avoid generating PC
+ * relative loads with out of range offsets.
+ */
+#define TWICE(x) x x
+#define SPACE_0x8 TWICE(".space 4\n\t")
+#define SPACE_0x10 TWICE(SPACE_0x8)
+#define SPACE_0x20 TWICE(SPACE_0x10)
+#define SPACE_0x40 TWICE(SPACE_0x20)
+#define SPACE_0x80 TWICE(SPACE_0x40)
+#define SPACE_0x100 TWICE(SPACE_0x80)
+#define SPACE_0x200 TWICE(SPACE_0x100)
+#define SPACE_0x400 TWICE(SPACE_0x200)
+#define SPACE_0x800 TWICE(SPACE_0x400)
+#define SPACE_0x1000 TWICE(SPACE_0x800)
+
+
/* Various values used in test cases... */
#define N(val) (val ^ 0xffffffff)
#define VAL1 0x12345678
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 24e2347be6b..c475379199b 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -343,8 +343,14 @@ validate_group(struct perf_event *event)
{
struct perf_event *sibling, *leader = event->group_leader;
struct pmu_hw_events fake_pmu;
+ DECLARE_BITMAP(fake_used_mask, ARMPMU_MAX_HWEVENTS);
- memset(&fake_pmu, 0, sizeof(fake_pmu));
+ /*
+ * Initialise the fake PMU. We only need to populate the
+ * used_mask for the purposes of validation.
+ */
+ memset(fake_used_mask, 0, sizeof(fake_used_mask));
+ fake_pmu.used_mask = fake_used_mask;
if (!validate_event(&fake_pmu, leader))
return -ENOSPC;
@@ -396,6 +402,9 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu)
int i, err, irq, irqs;
struct platform_device *pmu_device = armpmu->plat_device;
+ if (!pmu_device)
+ return -ENODEV;
+
err = reserve_pmu(armpmu->type);
if (err) {
pr_warning("unable to reserve pmu\n");
diff --git a/arch/arm/kernel/pmu.c b/arch/arm/kernel/pmu.c
index 2c3407ee857..2334bf8a650 100644
--- a/arch/arm/kernel/pmu.c
+++ b/arch/arm/kernel/pmu.c
@@ -33,3 +33,4 @@ release_pmu(enum arm_pmu_type type)
{
clear_bit_unlock(type, pmu_lock);
}
+EXPORT_SYMBOL_GPL(release_pmu);
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 75316f0dd02..3d0c6fb74ae 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -192,6 +192,9 @@ void cpu_idle(void)
#endif
local_irq_disable();
+#ifdef CONFIG_PL310_ERRATA_769419
+ wmb();
+#endif
if (hlt_counter) {
local_irq_enable();
cpu_relax();
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c
index 1040c00405d..8200deaa14f 100644
--- a/arch/arm/kernel/topology.c
+++ b/arch/arm/kernel/topology.c
@@ -43,7 +43,7 @@
struct cputopo_arm cpu_topology[NR_CPUS];
-const struct cpumask *cpu_coregroup_mask(unsigned int cpu)
+const struct cpumask *cpu_coregroup_mask(int cpu)
{
return &cpu_topology[cpu].core_sibling;
}
diff --git a/arch/arm/lib/bitops.h b/arch/arm/lib/bitops.h
index 10d868a5a48..d6408d1ee54 100644
--- a/arch/arm/lib/bitops.h
+++ b/arch/arm/lib/bitops.h
@@ -1,5 +1,9 @@
+#include <asm/unwind.h>
+
#if __LINUX_ARM_ARCH__ >= 6
- .macro bitop, instr
+ .macro bitop, name, instr
+ENTRY( \name )
+UNWIND( .fnstart )
ands ip, r1, #3
strneb r1, [ip] @ assert word-aligned
mov r2, #1
@@ -13,9 +17,13 @@
cmp r0, #0
bne 1b
bx lr
+UNWIND( .fnend )
+ENDPROC(\name )
.endm
- .macro testop, instr, store
+ .macro testop, name, instr, store
+ENTRY( \name )
+UNWIND( .fnstart )
ands ip, r1, #3
strneb r1, [ip] @ assert word-aligned
mov r2, #1
@@ -34,9 +42,13 @@
cmp r0, #0
movne r0, #1
2: bx lr
+UNWIND( .fnend )
+ENDPROC(\name )
.endm
#else
- .macro bitop, instr
+ .macro bitop, name, instr
+ENTRY( \name )
+UNWIND( .fnstart )
ands ip, r1, #3
strneb r1, [ip] @ assert word-aligned
and r2, r0, #31
@@ -49,6 +61,8 @@
str r2, [r1, r0, lsl #2]
restore_irqs ip
mov pc, lr
+UNWIND( .fnend )
+ENDPROC(\name )
.endm
/**
@@ -59,7 +73,9 @@
* Note: we can trivially conditionalise the store instruction
* to avoid dirtying the data cache.
*/
- .macro testop, instr, store
+ .macro testop, name, instr, store
+ENTRY( \name )
+UNWIND( .fnstart )
ands ip, r1, #3
strneb r1, [ip] @ assert word-aligned
and r3, r0, #31
@@ -73,5 +89,7 @@
moveq r0, #0
restore_irqs ip
mov pc, lr
+UNWIND( .fnend )
+ENDPROC(\name )
.endm
#endif
diff --git a/arch/arm/lib/changebit.S b/arch/arm/lib/changebit.S
index 68ed5b62e83..f4027862172 100644
--- a/arch/arm/lib/changebit.S
+++ b/arch/arm/lib/changebit.S
@@ -12,6 +12,4 @@
#include "bitops.h"
.text
-ENTRY(_change_bit)
- bitop eor
-ENDPROC(_change_bit)
+bitop _change_bit, eor
diff --git a/arch/arm/lib/clearbit.S b/arch/arm/lib/clearbit.S
index 4c04c3b51ee..f6b75fb64d3 100644
--- a/arch/arm/lib/clearbit.S
+++ b/arch/arm/lib/clearbit.S
@@ -12,6 +12,4 @@
#include "bitops.h"
.text
-ENTRY(_clear_bit)
- bitop bic
-ENDPROC(_clear_bit)
+bitop _clear_bit, bic
diff --git a/arch/arm/lib/setbit.S b/arch/arm/lib/setbit.S
index bbee5c66a23..618fedae4b3 100644
--- a/arch/arm/lib/setbit.S
+++ b/arch/arm/lib/setbit.S
@@ -12,6 +12,4 @@
#include "bitops.h"
.text
-ENTRY(_set_bit)
- bitop orr
-ENDPROC(_set_bit)
+bitop _set_bit, orr
diff --git a/arch/arm/lib/testchangebit.S b/arch/arm/lib/testchangebit.S
index 15a4d431f22..4becdc3a59c 100644
--- a/arch/arm/lib/testchangebit.S
+++ b/arch/arm/lib/testchangebit.S
@@ -12,6 +12,4 @@
#include "bitops.h"
.text
-ENTRY(_test_and_change_bit)
- testop eor, str
-ENDPROC(_test_and_change_bit)
+testop _test_and_change_bit, eor, str
diff --git a/arch/arm/lib/testclearbit.S b/arch/arm/lib/testclearbit.S
index 521b66b5b95..918841dcce7 100644
--- a/arch/arm/lib/testclearbit.S
+++ b/arch/arm/lib/testclearbit.S
@@ -12,6 +12,4 @@
#include "bitops.h"
.text
-ENTRY(_test_and_clear_bit)
- testop bicne, strne
-ENDPROC(_test_and_clear_bit)
+testop _test_and_clear_bit, bicne, strne
diff --git a/arch/arm/lib/testsetbit.S b/arch/arm/lib/testsetbit.S
index 1c98cc2185b..8d1b2fe9e48 100644
--- a/arch/arm/lib/testsetbit.S
+++ b/arch/arm/lib/testsetbit.S
@@ -12,6 +12,4 @@
#include "bitops.h"
.text
-ENTRY(_test_and_set_bit)
- testop orreq, streq
-ENDPROC(_test_and_set_bit)
+testop _test_and_set_bit, orreq, streq
diff --git a/arch/arm/mach-exynos/cpuidle.c b/arch/arm/mach-exynos/cpuidle.c
index 35f6502144a..4ebb382c597 100644
--- a/arch/arm/mach-exynos/cpuidle.c
+++ b/arch/arm/mach-exynos/cpuidle.c
@@ -12,6 +12,8 @@
#include <linux/init.h>
#include <linux/cpuidle.h>
#include <linux/io.h>
+#include <linux/export.h>
+#include <linux/time.h>
#include <asm/proc-fns.h>
diff --git a/arch/arm/mach-highbank/highbank.c b/arch/arm/mach-highbank/highbank.c
index b82dcf08e74..88660d500f5 100644
--- a/arch/arm/mach-highbank/highbank.c
+++ b/arch/arm/mach-highbank/highbank.c
@@ -22,6 +22,7 @@
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/of_address.h>
+#include <linux/smp.h>
#include <asm/cacheflush.h>
#include <asm/unified.h>
@@ -72,6 +73,9 @@ static void __init highbank_map_io(void)
void highbank_set_cpu_jump(int cpu, void *jump_addr)
{
+#ifdef CONFIG_SMP
+ cpu = cpu_logical_map(cpu);
+#endif
writel(BSYM(virt_to_phys(jump_addr)), HB_JUMP_TABLE_VIRT(cpu));
__cpuc_flush_dcache_area(HB_JUMP_TABLE_VIRT(cpu), 16);
outer_clean_range(HB_JUMP_TABLE_PHYS(cpu),
diff --git a/arch/arm/mach-imx/Kconfig b/arch/arm/mach-imx/Kconfig
index 5f7f9c2a34a..c44aa974e79 100644
--- a/arch/arm/mach-imx/Kconfig
+++ b/arch/arm/mach-imx/Kconfig
@@ -10,11 +10,6 @@ config HAVE_IMX_MMDC
config HAVE_IMX_SRC
bool
-#
-# ARCH_MX31 and ARCH_MX35 are left for compatibility
-# Some usages assume that having one of them implies not having (e.g.) ARCH_MX2.
-# To easily distinguish good and reviewed from unreviewed usages new (and IMHO
-# more sensible) names are used: SOC_IMX31 and SOC_IMX35
config ARCH_MX1
bool
@@ -27,12 +22,6 @@ config ARCH_MX25
config MACH_MX27
bool
-config ARCH_MX31
- bool
-
-config ARCH_MX35
- bool
-
config SOC_IMX1
bool
select ARCH_MX1
@@ -72,7 +61,6 @@ config SOC_IMX31
select CPU_V6
select IMX_HAVE_PLATFORM_MXC_RNGA
select ARCH_MXC_AUDMUX_V2
- select ARCH_MX31
select MXC_AVIC
select SMP_ON_UP if SMP
@@ -82,7 +70,6 @@ config SOC_IMX35
select ARCH_MXC_IOMUX_V3
select ARCH_MXC_AUDMUX_V2
select HAVE_EPIT
- select ARCH_MX35
select MXC_AVIC
select SMP_ON_UP if SMP
diff --git a/arch/arm/mach-imx/clock-imx6q.c b/arch/arm/mach-imx/clock-imx6q.c
index 613a1b993bf..039a7abb165 100644
--- a/arch/arm/mach-imx/clock-imx6q.c
+++ b/arch/arm/mach-imx/clock-imx6q.c
@@ -1953,14 +1953,17 @@ static struct map_desc imx6q_clock_desc[] = {
imx_map_entry(MX6Q, ANATOP, MT_DEVICE),
};
+void __init imx6q_clock_map_io(void)
+{
+ iotable_init(imx6q_clock_desc, ARRAY_SIZE(imx6q_clock_desc));
+}
+
int __init mx6q_clocks_init(void)
{
struct device_node *np;
void __iomem *base;
int i, irq;
- iotable_init(imx6q_clock_desc, ARRAY_SIZE(imx6q_clock_desc));
-
/* retrieve the freqency of fixed clocks from device tree */
for_each_compatible_node(np, NULL, "fixed-clock") {
u32 rate;
diff --git a/arch/arm/mach-imx/mach-imx6q.c b/arch/arm/mach-imx/mach-imx6q.c
index 8bf5fa34948..9cd860a27af 100644
--- a/arch/arm/mach-imx/mach-imx6q.c
+++ b/arch/arm/mach-imx/mach-imx6q.c
@@ -34,6 +34,7 @@ static void __init imx6q_map_io(void)
{
imx_lluart_map_io();
imx_scu_map_io();
+ imx6q_clock_map_io();
}
static void __init imx6q_gpio_add_irq_domain(struct device_node *np,
diff --git a/arch/arm/mach-imx/mm-imx3.c b/arch/arm/mach-imx/mm-imx3.c
index 9f0e82ec339..31807d2a8b7 100644
--- a/arch/arm/mach-imx/mm-imx3.c
+++ b/arch/arm/mach-imx/mm-imx3.c
@@ -33,29 +33,32 @@
static void imx3_idle(void)
{
unsigned long reg = 0;
- __asm__ __volatile__(
- /* disable I and D cache */
- "mrc p15, 0, %0, c1, c0, 0\n"
- "bic %0, %0, #0x00001000\n"
- "bic %0, %0, #0x00000004\n"
- "mcr p15, 0, %0, c1, c0, 0\n"
- /* invalidate I cache */
- "mov %0, #0\n"
- "mcr p15, 0, %0, c7, c5, 0\n"
- /* clear and invalidate D cache */
- "mov %0, #0\n"
- "mcr p15, 0, %0, c7, c14, 0\n"
- /* WFI */
- "mov %0, #0\n"
- "mcr p15, 0, %0, c7, c0, 4\n"
- "nop\n" "nop\n" "nop\n" "nop\n"
- "nop\n" "nop\n" "nop\n"
- /* enable I and D cache */
- "mrc p15, 0, %0, c1, c0, 0\n"
- "orr %0, %0, #0x00001000\n"
- "orr %0, %0, #0x00000004\n"
- "mcr p15, 0, %0, c1, c0, 0\n"
- : "=r" (reg));
+
+ if (!need_resched())
+ __asm__ __volatile__(
+ /* disable I and D cache */
+ "mrc p15, 0, %0, c1, c0, 0\n"
+ "bic %0, %0, #0x00001000\n"
+ "bic %0, %0, #0x00000004\n"
+ "mcr p15, 0, %0, c1, c0, 0\n"
+ /* invalidate I cache */
+ "mov %0, #0\n"
+ "mcr p15, 0, %0, c7, c5, 0\n"
+ /* clear and invalidate D cache */
+ "mov %0, #0\n"
+ "mcr p15, 0, %0, c7, c14, 0\n"
+ /* WFI */
+ "mov %0, #0\n"
+ "mcr p15, 0, %0, c7, c0, 4\n"
+ "nop\n" "nop\n" "nop\n" "nop\n"
+ "nop\n" "nop\n" "nop\n"
+ /* enable I and D cache */
+ "mrc p15, 0, %0, c1, c0, 0\n"
+ "orr %0, %0, #0x00001000\n"
+ "orr %0, %0, #0x00000004\n"
+ "mcr p15, 0, %0, c1, c0, 0\n"
+ : "=r" (reg));
+ local_irq_enable();
}
static void __iomem *imx3_ioremap(unsigned long phys_addr, size_t size,
@@ -108,6 +111,7 @@ void imx3_init_l2x0(void)
l2x0_init(l2x0_base, 0x00030024, 0x00000000);
}
+#ifdef CONFIG_SOC_IMX31
static struct map_desc mx31_io_desc[] __initdata = {
imx_map_entry(MX31, X_MEMC, MT_DEVICE),
imx_map_entry(MX31, AVIC, MT_DEVICE_NONSHARED),
@@ -126,33 +130,11 @@ void __init mx31_map_io(void)
iotable_init(mx31_io_desc, ARRAY_SIZE(mx31_io_desc));
}
-static struct map_desc mx35_io_desc[] __initdata = {
- imx_map_entry(MX35, X_MEMC, MT_DEVICE),
- imx_map_entry(MX35, AVIC, MT_DEVICE_NONSHARED),
- imx_map_entry(MX35, AIPS1, MT_DEVICE_NONSHARED),
- imx_map_entry(MX35, AIPS2, MT_DEVICE_NONSHARED),
- imx_map_entry(MX35, SPBA0, MT_DEVICE_NONSHARED),
-};
-
-void __init mx35_map_io(void)
-{
- iotable_init(mx35_io_desc, ARRAY_SIZE(mx35_io_desc));
-}
-
void __init imx31_init_early(void)
{
mxc_set_cpu_type(MXC_CPU_MX31);
mxc_arch_reset_init(MX31_IO_ADDRESS(MX31_WDOG_BASE_ADDR));
- imx_idle = imx3_idle;
- imx_ioremap = imx3_ioremap;
-}
-
-void __init imx35_init_early(void)
-{
- mxc_set_cpu_type(MXC_CPU_MX35);
- mxc_iomux_v3_init(MX35_IO_ADDRESS(MX35_IOMUXC_BASE_ADDR));
- mxc_arch_reset_init(MX35_IO_ADDRESS(MX35_WDOG_BASE_ADDR));
- imx_idle = imx3_idle;
+ pm_idle = imx3_idle;
imx_ioremap = imx3_ioremap;
}
@@ -161,11 +143,6 @@ void __init mx31_init_irq(void)
mxc_init_irq(MX31_IO_ADDRESS(MX31_AVIC_BASE_ADDR));
}
-void __init mx35_init_irq(void)
-{
- mxc_init_irq(MX35_IO_ADDRESS(MX35_AVIC_BASE_ADDR));
-}
-
static struct sdma_script_start_addrs imx31_to1_sdma_script __initdata = {
.per_2_per_addr = 1677,
};
@@ -199,6 +176,35 @@ void __init imx31_soc_init(void)
imx_add_imx_sdma("imx31-sdma", MX31_SDMA_BASE_ADDR, MX31_INT_SDMA, &imx31_sdma_pdata);
}
+#endif /* ifdef CONFIG_SOC_IMX31 */
+
+#ifdef CONFIG_SOC_IMX35
+static struct map_desc mx35_io_desc[] __initdata = {
+ imx_map_entry(MX35, X_MEMC, MT_DEVICE),
+ imx_map_entry(MX35, AVIC, MT_DEVICE_NONSHARED),
+ imx_map_entry(MX35, AIPS1, MT_DEVICE_NONSHARED),
+ imx_map_entry(MX35, AIPS2, MT_DEVICE_NONSHARED),
+ imx_map_entry(MX35, SPBA0, MT_DEVICE_NONSHARED),
+};
+
+void __init mx35_map_io(void)
+{
+ iotable_init(mx35_io_desc, ARRAY_SIZE(mx35_io_desc));
+}
+
+void __init imx35_init_early(void)
+{
+ mxc_set_cpu_type(MXC_CPU_MX35);
+ mxc_iomux_v3_init(MX35_IO_ADDRESS(MX35_IOMUXC_BASE_ADDR));
+ mxc_arch_reset_init(MX35_IO_ADDRESS(MX35_WDOG_BASE_ADDR));
+ pm_idle = imx3_idle;
+ imx_ioremap = imx3_ioremap;
+}
+
+void __init mx35_init_irq(void)
+{
+ mxc_init_irq(MX35_IO_ADDRESS(MX35_AVIC_BASE_ADDR));
+}
static struct sdma_script_start_addrs imx35_to1_sdma_script __initdata = {
.ap_2_ap_addr = 642,
@@ -254,3 +260,4 @@ void __init imx35_soc_init(void)
imx_add_imx_sdma("imx35-sdma", MX35_SDMA_BASE_ADDR, MX35_INT_SDMA, &imx35_sdma_pdata);
}
+#endif /* ifdef CONFIG_SOC_IMX35 */
diff --git a/arch/arm/mach-imx/src.c b/arch/arm/mach-imx/src.c
index 36cacbd0dcc..a8e33681b73 100644
--- a/arch/arm/mach-imx/src.c
+++ b/arch/arm/mach-imx/src.c
@@ -14,6 +14,7 @@
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/smp.h>
#include <asm/unified.h>
#define SRC_SCR 0x000
@@ -23,10 +24,15 @@
static void __iomem *src_base;
+#ifndef CONFIG_SMP
+#define cpu_logical_map(cpu) 0
+#endif
+
void imx_enable_cpu(int cpu, bool enable)
{
u32 mask, val;
+ cpu = cpu_logical_map(cpu);
mask = 1 << (BP_SRC_SCR_CORE1_ENABLE + cpu - 1);
val = readl_relaxed(src_base + SRC_SCR);
val = enable ? val | mask : val & ~mask;
@@ -35,6 +41,7 @@ void imx_enable_cpu(int cpu, bool enable)
void imx_set_cpu_jump(int cpu, void *jump_addr)
{
+ cpu = cpu_logical_map(cpu);
writel_relaxed(BSYM(virt_to_phys(jump_addr)),
src_base + SRC_GPR1 + cpu * 8);
}
diff --git a/arch/arm/mach-mmp/gplugd.c b/arch/arm/mach-mmp/gplugd.c
index 69156568bc4..4665767a4f7 100644
--- a/arch/arm/mach-mmp/gplugd.c
+++ b/arch/arm/mach-mmp/gplugd.c
@@ -182,7 +182,7 @@ static void __init gplugd_init(void)
/* on-chip devices */
pxa168_add_uart(3);
- pxa168_add_ssp(0);
+ pxa168_add_ssp(1);
pxa168_add_twsi(0, NULL, ARRAY_AND_SIZE(gplugd_i2c_board_info));
pxa168_add_eth(&gplugd_eth_platform_data);
diff --git a/arch/arm/mach-mmp/include/mach/gpio-pxa.h b/arch/arm/mach-mmp/include/mach/gpio-pxa.h
index d14eeaf1632..99b4ce1b656 100644
--- a/arch/arm/mach-mmp/include/mach/gpio-pxa.h
+++ b/arch/arm/mach-mmp/include/mach/gpio-pxa.h
@@ -7,7 +7,7 @@
#define GPIO_REGS_VIRT (APB_VIRT_BASE + 0x19000)
#define BANK_OFF(n) (((n) < 3) ? (n) << 2 : 0x100 + (((n) - 3) << 2))
-#define GPIO_REG(x) (GPIO_REGS_VIRT + (x))
+#define GPIO_REG(x) (*(volatile u32 *)(GPIO_REGS_VIRT + (x)))
#define NR_BUILTIN_GPIO IRQ_GPIO_NUM
diff --git a/arch/arm/mach-mx5/cpu.c b/arch/arm/mach-mx5/cpu.c
index 5c5328257dc..5e2e7a84386 100644
--- a/arch/arm/mach-mx5/cpu.c
+++ b/arch/arm/mach-mx5/cpu.c
@@ -16,7 +16,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <mach/hardware.h>
-#include <asm/io.h>
+#include <linux/io.h>
static int mx5_cpu_rev = -1;
@@ -67,7 +67,8 @@ static int __init mx51_neon_fixup(void)
if (!cpu_is_mx51())
return 0;
- if (mx51_revision() < IMX_CHIP_REVISION_3_0 && (elf_hwcap & HWCAP_NEON)) {
+ if (mx51_revision() < IMX_CHIP_REVISION_3_0 &&
+ (elf_hwcap & HWCAP_NEON)) {
elf_hwcap &= ~HWCAP_NEON;
pr_info("Turning off NEON support, detected broken NEON implementation\n");
}
diff --git a/arch/arm/mach-mx5/mm.c b/arch/arm/mach-mx5/mm.c
index 26eacc9d0d9..df4a508f240 100644
--- a/arch/arm/mach-mx5/mm.c
+++ b/arch/arm/mach-mx5/mm.c
@@ -23,7 +23,9 @@
static void imx5_idle(void)
{
- mx5_cpu_lp_set(WAIT_UNCLOCKED_POWER_OFF);
+ if (!need_resched())
+ mx5_cpu_lp_set(WAIT_UNCLOCKED_POWER_OFF);
+ local_irq_enable();
}
/*
@@ -89,7 +91,7 @@ void __init imx51_init_early(void)
mxc_set_cpu_type(MXC_CPU_MX51);
mxc_iomux_v3_init(MX51_IO_ADDRESS(MX51_IOMUXC_BASE_ADDR));
mxc_arch_reset_init(MX51_IO_ADDRESS(MX51_WDOG1_BASE_ADDR));
- imx_idle = imx5_idle;
+ pm_idle = imx5_idle;
}
void __init imx53_init_early(void)
diff --git a/arch/arm/mach-mxs/clock-mx28.c b/arch/arm/mach-mxs/clock-mx28.c
index 229ae349421..da6e4aad177 100644
--- a/arch/arm/mach-mxs/clock-mx28.c
+++ b/arch/arm/mach-mxs/clock-mx28.c
@@ -404,7 +404,7 @@ static int name##_set_rate(struct clk *clk, unsigned long rate) \
reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_##dr); \
reg &= ~BM_CLKCTRL_##dr##_DIV; \
reg |= div << BP_CLKCTRL_##dr##_DIV; \
- if (reg | (1 << clk->enable_shift)) { \
+ if (reg & (1 << clk->enable_shift)) { \
pr_err("%s: clock is gated\n", __func__); \
return -EINVAL; \
} \
diff --git a/arch/arm/mach-omap1/Kconfig b/arch/arm/mach-omap1/Kconfig
index e0a028161dd..73f287d6429 100644
--- a/arch/arm/mach-omap1/Kconfig
+++ b/arch/arm/mach-omap1/Kconfig
@@ -171,14 +171,6 @@ config MACH_OMAP_GENERIC
comment "OMAP CPU Speed"
depends on ARCH_OMAP1
-config OMAP_CLOCKS_SET_BY_BOOTLOADER
- bool "OMAP clocks set by bootloader"
- depends on ARCH_OMAP1
- help
- Enable this option to prevent the kernel from overriding the clock
- frequencies programmed by bootloader for MPU, DSP, MMUs, TC,
- internal LCD controller and MPU peripherals.
-
config OMAP_ARM_216MHZ
bool "OMAP ARM 216 MHz CPU (1710 only)"
depends on ARCH_OMAP1 && ARCH_OMAP16XX
diff --git a/arch/arm/mach-omap1/board-ams-delta.c b/arch/arm/mach-omap1/board-ams-delta.c
index 51bae31cf36..b0f15d234a1 100644
--- a/arch/arm/mach-omap1/board-ams-delta.c
+++ b/arch/arm/mach-omap1/board-ams-delta.c
@@ -302,8 +302,6 @@ static void __init ams_delta_init(void)
omap_cfg_reg(J19_1610_CAM_D6);
omap_cfg_reg(J18_1610_CAM_D7);
- iotable_init(ams_delta_io_desc, ARRAY_SIZE(ams_delta_io_desc));
-
omap_board_config = ams_delta_config;
omap_board_config_size = ARRAY_SIZE(ams_delta_config);
omap_serial_init();
@@ -373,10 +371,16 @@ static int __init ams_delta_modem_init(void)
}
arch_initcall(ams_delta_modem_init);
+static void __init ams_delta_map_io(void)
+{
+ omap15xx_map_io();
+ iotable_init(ams_delta_io_desc, ARRAY_SIZE(ams_delta_io_desc));
+}
+
MACHINE_START(AMS_DELTA, "Amstrad E3 (Delta)")
/* Maintainer: Jonathan McDowell <noodles@earth.li> */
.atag_offset = 0x100,
- .map_io = omap15xx_map_io,
+ .map_io = ams_delta_map_io,
.init_early = omap1_init_early,
.reserve = omap_reserve,
.init_irq = omap1_init_irq,
diff --git a/arch/arm/mach-omap1/clock.h b/arch/arm/mach-omap1/clock.h
index eaf09efb91c..16b1423b454 100644
--- a/arch/arm/mach-omap1/clock.h
+++ b/arch/arm/mach-omap1/clock.h
@@ -17,7 +17,8 @@
#include <plat/clock.h>
-extern int __init omap1_clk_init(void);
+int omap1_clk_init(void);
+void omap1_clk_late_init(void);
extern int omap1_clk_enable(struct clk *clk);
extern void omap1_clk_disable(struct clk *clk);
extern long omap1_clk_round_rate(struct clk *clk, unsigned long rate);
diff --git a/arch/arm/mach-omap1/clock_data.c b/arch/arm/mach-omap1/clock_data.c
index 92400b9eb69..1297bb58869 100644
--- a/arch/arm/mach-omap1/clock_data.c
+++ b/arch/arm/mach-omap1/clock_data.c
@@ -767,6 +767,15 @@ static struct clk_functions omap1_clk_functions = {
.clk_disable_unused = omap1_clk_disable_unused,
};
+static void __init omap1_show_rates(void)
+{
+ pr_notice("Clocking rate (xtal/DPLL1/MPU): "
+ "%ld.%01ld/%ld.%01ld/%ld.%01ld MHz\n",
+ ck_ref.rate / 1000000, (ck_ref.rate / 100000) % 10,
+ ck_dpll1.rate / 1000000, (ck_dpll1.rate / 100000) % 10,
+ arm_ck.rate / 1000000, (arm_ck.rate / 100000) % 10);
+}
+
int __init omap1_clk_init(void)
{
struct omap_clk *c;
@@ -835,9 +844,12 @@ int __init omap1_clk_init(void)
/* We want to be in syncronous scalable mode */
omap_writew(0x1000, ARM_SYSST);
-#ifdef CONFIG_OMAP_CLOCKS_SET_BY_BOOTLOADER
- /* Use values set by bootloader. Determine PLL rate and recalculate
- * dependent clocks as if kernel had changed PLL or divisors.
+
+ /*
+ * Initially use the values set by bootloader. Determine PLL rate and
+ * recalculate dependent clocks as if kernel had changed PLL or
+ * divisors. See also omap1_clk_late_init() that can reprogram dpll1
+ * after the SRAM is initialized.
*/
{
unsigned pll_ctl_val = omap_readw(DPLL_CTL);
@@ -862,25 +874,10 @@ int __init omap1_clk_init(void)
}
}
}
-#else
- /* Find the highest supported frequency and enable it */
- if (omap1_select_table_rate(&virtual_ck_mpu, ~0)) {
- printk(KERN_ERR "System frequencies not set. Check your config.\n");
- /* Guess sane values (60MHz) */
- omap_writew(0x2290, DPLL_CTL);
- omap_writew(cpu_is_omap7xx() ? 0x3005 : 0x1005, ARM_CKCTL);
- ck_dpll1.rate = 60000000;
- }
-#endif
propagate_rate(&ck_dpll1);
/* Cache rates for clocks connected to ck_ref (not dpll1) */
propagate_rate(&ck_ref);
- printk(KERN_INFO "Clocking rate (xtal/DPLL1/MPU): "
- "%ld.%01ld/%ld.%01ld/%ld.%01ld MHz\n",
- ck_ref.rate / 1000000, (ck_ref.rate / 100000) % 10,
- ck_dpll1.rate / 1000000, (ck_dpll1.rate / 100000) % 10,
- arm_ck.rate / 1000000, (arm_ck.rate / 100000) % 10);
-
+ omap1_show_rates();
if (machine_is_omap_perseus2() || machine_is_omap_fsample()) {
/* Select slicer output as OMAP input clock */
omap_writew(omap_readw(OMAP7XX_PCC_UPLD_CTRL) & ~0x1,
@@ -925,3 +922,21 @@ int __init omap1_clk_init(void)
return 0;
}
+
+#define OMAP1_DPLL1_SANE_VALUE 60000000
+
+void __init omap1_clk_late_init(void)
+{
+ if (ck_dpll1.rate >= OMAP1_DPLL1_SANE_VALUE)
+ return;
+
+ /* Find the highest supported frequency and enable it */
+ if (omap1_select_table_rate(&virtual_ck_mpu, ~0)) {
+ pr_err("System frequencies not set, using default. Check your config.\n");
+ omap_writew(0x2290, DPLL_CTL);
+ omap_writew(cpu_is_omap7xx() ? 0x3005 : 0x1005, ARM_CKCTL);
+ ck_dpll1.rate = OMAP1_DPLL1_SANE_VALUE;
+ }
+ propagate_rate(&ck_dpll1);
+ omap1_show_rates();
+}
diff --git a/arch/arm/mach-omap1/devices.c b/arch/arm/mach-omap1/devices.c
index 48ef9888e82..475cb2f50d8 100644
--- a/arch/arm/mach-omap1/devices.c
+++ b/arch/arm/mach-omap1/devices.c
@@ -30,6 +30,8 @@
#include <plat/omap7xx.h>
#include <plat/mcbsp.h>
+#include "clock.h"
+
/*-------------------------------------------------------------------------*/
#if defined(CONFIG_RTC_DRV_OMAP) || defined(CONFIG_RTC_DRV_OMAP_MODULE)
@@ -293,6 +295,7 @@ static int __init omap1_init_devices(void)
return -ENODEV;
omap_sram_init();
+ omap1_clk_late_init();
/* please keep these calls, and their implementations above,
* in alphabetical order so they're easier to sort through.
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index 50341471890..e1293aa513d 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -334,6 +334,7 @@ config MACH_OMAP4_PANDA
config OMAP3_EMU
bool "OMAP3 debugging peripherals"
depends on ARCH_OMAP3
+ select ARM_AMBA
select OC_ETM
help
Say Y here to enable debugging hardware of omap3
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index 69ab1c06913..b009f17dee5 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -4,7 +4,7 @@
# Common support
obj-y := id.o io.o control.o mux.o devices.o serial.o gpmc.o timer.o pm.o \
- common.o gpio.o dma.o wd_timer.o
+ common.o gpio.o dma.o wd_timer.o display.o
omap-2-3-common = irq.o sdrc.o
hwmod-common = omap_hwmod.o \
@@ -264,7 +264,4 @@ smsc911x-$(CONFIG_SMSC911X) := gpmc-smsc911x.o
obj-y += $(smsc911x-m) $(smsc911x-y)
obj-$(CONFIG_ARCH_OMAP4) += hwspinlock.o
-disp-$(CONFIG_OMAP2_DSS) := display.o
-obj-y += $(disp-m) $(disp-y)
-
obj-y += common-board-devices.o twl-common.o
diff --git a/arch/arm/mach-omap2/cpuidle34xx.c b/arch/arm/mach-omap2/cpuidle34xx.c
index 1fe35c24fba..942bb4f19f9 100644
--- a/arch/arm/mach-omap2/cpuidle34xx.c
+++ b/arch/arm/mach-omap2/cpuidle34xx.c
@@ -24,6 +24,7 @@
#include <linux/sched.h>
#include <linux/cpuidle.h>
+#include <linux/export.h>
#include <plat/prcm.h>
#include <plat/irqs.h>
diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c
index adb2756e242..dce9905d64b 100644
--- a/arch/arm/mach-omap2/display.c
+++ b/arch/arm/mach-omap2/display.c
@@ -27,8 +27,35 @@
#include <plat/omap_hwmod.h>
#include <plat/omap_device.h>
#include <plat/omap-pm.h>
+#include <plat/common.h>
#include "control.h"
+#include "display.h"
+
+#define DISPC_CONTROL 0x0040
+#define DISPC_CONTROL2 0x0238
+#define DISPC_IRQSTATUS 0x0018
+
+#define DSS_SYSCONFIG 0x10
+#define DSS_SYSSTATUS 0x14
+#define DSS_CONTROL 0x40
+#define DSS_SDI_CONTROL 0x44
+#define DSS_PLL_CONTROL 0x48
+
+#define LCD_EN_MASK (0x1 << 0)
+#define DIGIT_EN_MASK (0x1 << 1)
+
+#define FRAMEDONE_IRQ_SHIFT 0
+#define EVSYNC_EVEN_IRQ_SHIFT 2
+#define EVSYNC_ODD_IRQ_SHIFT 3
+#define FRAMEDONE2_IRQ_SHIFT 22
+#define FRAMEDONETV_IRQ_SHIFT 24
+
+/*
+ * FRAMEDONE_IRQ_TIMEOUT: how long (in milliseconds) to wait during DISPC
+ * reset before deciding that something has gone wrong
+ */
+#define FRAMEDONE_IRQ_TIMEOUT 100
static struct platform_device omap_display_device = {
.name = "omapdss",
@@ -172,3 +199,135 @@ int __init omap_display_init(struct omap_dss_board_info *board_data)
return r;
}
+
+static void dispc_disable_outputs(void)
+{
+ u32 v, irq_mask = 0;
+ bool lcd_en, digit_en, lcd2_en = false;
+ int i;
+ struct omap_dss_dispc_dev_attr *da;
+ struct omap_hwmod *oh;
+
+ oh = omap_hwmod_lookup("dss_dispc");
+ if (!oh) {
+ WARN(1, "display: could not disable outputs during reset - could not find dss_dispc hwmod\n");
+ return;
+ }
+
+ if (!oh->dev_attr) {
+ pr_err("display: could not disable outputs during reset due to missing dev_attr\n");
+ return;
+ }
+
+ da = (struct omap_dss_dispc_dev_attr *)oh->dev_attr;
+
+ /* store value of LCDENABLE and DIGITENABLE bits */
+ v = omap_hwmod_read(oh, DISPC_CONTROL);
+ lcd_en = v & LCD_EN_MASK;
+ digit_en = v & DIGIT_EN_MASK;
+
+ /* store value of LCDENABLE for LCD2 */
+ if (da->manager_count > 2) {
+ v = omap_hwmod_read(oh, DISPC_CONTROL2);
+ lcd2_en = v & LCD_EN_MASK;
+ }
+
+ if (!(lcd_en | digit_en | lcd2_en))
+ return; /* no managers currently enabled */
+
+ /*
+ * If any manager was enabled, we need to disable it before
+ * DSS clocks are disabled or DISPC module is reset
+ */
+ if (lcd_en)
+ irq_mask |= 1 << FRAMEDONE_IRQ_SHIFT;
+
+ if (digit_en) {
+ if (da->has_framedonetv_irq) {
+ irq_mask |= 1 << FRAMEDONETV_IRQ_SHIFT;
+ } else {
+ irq_mask |= 1 << EVSYNC_EVEN_IRQ_SHIFT |
+ 1 << EVSYNC_ODD_IRQ_SHIFT;
+ }
+ }
+
+ if (lcd2_en)
+ irq_mask |= 1 << FRAMEDONE2_IRQ_SHIFT;
+
+ /*
+ * clear any previous FRAMEDONE, FRAMEDONETV,
+ * EVSYNC_EVEN/ODD or FRAMEDONE2 interrupts
+ */
+ omap_hwmod_write(irq_mask, oh, DISPC_IRQSTATUS);
+
+ /* disable LCD and TV managers */
+ v = omap_hwmod_read(oh, DISPC_CONTROL);
+ v &= ~(LCD_EN_MASK | DIGIT_EN_MASK);
+ omap_hwmod_write(v, oh, DISPC_CONTROL);
+
+ /* disable LCD2 manager */
+ if (da->manager_count > 2) {
+ v = omap_hwmod_read(oh, DISPC_CONTROL2);
+ v &= ~LCD_EN_MASK;
+ omap_hwmod_write(v, oh, DISPC_CONTROL2);
+ }
+
+ i = 0;
+ while ((omap_hwmod_read(oh, DISPC_IRQSTATUS) & irq_mask) !=
+ irq_mask) {
+ i++;
+ if (i > FRAMEDONE_IRQ_TIMEOUT) {
+ pr_err("didn't get FRAMEDONE1/2 or TV interrupt\n");
+ break;
+ }
+ mdelay(1);
+ }
+}
+
+#define MAX_MODULE_SOFTRESET_WAIT 10000
+int omap_dss_reset(struct omap_hwmod *oh)
+{
+ struct omap_hwmod_opt_clk *oc;
+ int c = 0;
+ int i, r;
+
+ if (!(oh->class->sysc->sysc_flags & SYSS_HAS_RESET_STATUS)) {
+ pr_err("dss_core: hwmod data doesn't contain reset data\n");
+ return -EINVAL;
+ }
+
+ for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++)
+ if (oc->_clk)
+ clk_enable(oc->_clk);
+
+ dispc_disable_outputs();
+
+ /* clear SDI registers */
+ if (cpu_is_omap3430()) {
+ omap_hwmod_write(0x0, oh, DSS_SDI_CONTROL);
+ omap_hwmod_write(0x0, oh, DSS_PLL_CONTROL);
+ }
+
+ /*
+ * clear DSS_CONTROL register to switch DSS clock sources to
+ * PRCM clock, if any
+ */
+ omap_hwmod_write(0x0, oh, DSS_CONTROL);
+
+ omap_test_timeout((omap_hwmod_read(oh, oh->class->sysc->syss_offs)
+ & SYSS_RESETDONE_MASK),
+ MAX_MODULE_SOFTRESET_WAIT, c);
+
+ if (c == MAX_MODULE_SOFTRESET_WAIT)
+ pr_warning("dss_core: waiting for reset to finish failed\n");
+ else
+ pr_debug("dss_core: softreset done\n");
+
+ for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++)
+ if (oc->_clk)
+ clk_disable(oc->_clk);
+
+ r = (c == MAX_MODULE_SOFTRESET_WAIT) ? -ETIMEDOUT : 0;
+
+ return r;
+}
diff --git a/arch/arm/mach-omap2/display.h b/arch/arm/mach-omap2/display.h
new file mode 100644
index 00000000000..b871b017b35
--- /dev/null
+++ b/arch/arm/mach-omap2/display.h
@@ -0,0 +1,29 @@
+/*
+ * display.h - OMAP2+ integration-specific DSS header
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ARCH_ARM_MACH_OMAP2_DISPLAY_H
+#define __ARCH_ARM_MACH_OMAP2_DISPLAY_H
+
+#include <linux/kernel.h>
+
+struct omap_dss_dispc_dev_attr {
+ u8 manager_count;
+ bool has_framedonetv_irq;
+};
+
+#endif
diff --git a/arch/arm/mach-omap2/io.h b/arch/arm/mach-omap2/io.h
deleted file mode 100644
index e69de29bb2d..00000000000
--- a/arch/arm/mach-omap2/io.h
+++ /dev/null
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index 6b3088db83b..207a2ff9a8c 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -749,7 +749,7 @@ static int _count_mpu_irqs(struct omap_hwmod *oh)
ohii = &oh->mpu_irqs[i++];
} while (ohii->irq != -1);
- return i;
+ return i-1;
}
/**
@@ -772,7 +772,7 @@ static int _count_sdma_reqs(struct omap_hwmod *oh)
ohdi = &oh->sdma_reqs[i++];
} while (ohdi->dma_req != -1);
- return i;
+ return i-1;
}
/**
@@ -795,7 +795,7 @@ static int _count_ocp_if_addr_spaces(struct omap_hwmod_ocp_if *os)
mem = &os->addr[i++];
} while (mem->pa_start != mem->pa_end);
- return i;
+ return i-1;
}
/**
diff --git a/arch/arm/mach-omap2/omap_hwmod_2420_data.c b/arch/arm/mach-omap2/omap_hwmod_2420_data.c
index 6d720621352..a5409ce3f32 100644
--- a/arch/arm/mach-omap2/omap_hwmod_2420_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_2420_data.c
@@ -875,6 +875,10 @@ static struct omap_hwmod_ocp_if *omap2420_dss_slaves[] = {
};
static struct omap_hwmod_opt_clk dss_opt_clks[] = {
+ /*
+ * The DSS HW needs all DSS clocks enabled during reset. The dss_core
+ * driver does not use these clocks.
+ */
{ .role = "tv_clk", .clk = "dss_54m_fck" },
{ .role = "sys_clk", .clk = "dss2_fck" },
};
@@ -899,7 +903,7 @@ static struct omap_hwmod omap2420_dss_core_hwmod = {
.slaves_cnt = ARRAY_SIZE(omap2420_dss_slaves),
.masters = omap2420_dss_masters,
.masters_cnt = ARRAY_SIZE(omap2420_dss_masters),
- .flags = HWMOD_NO_IDLEST,
+ .flags = HWMOD_NO_IDLEST | HWMOD_CONTROL_OPT_CLKS_IN_RESET,
};
/* l4_core -> dss_dispc */
@@ -939,6 +943,7 @@ static struct omap_hwmod omap2420_dss_dispc_hwmod = {
.slaves = omap2420_dss_dispc_slaves,
.slaves_cnt = ARRAY_SIZE(omap2420_dss_dispc_slaves),
.flags = HWMOD_NO_IDLEST,
+ .dev_attr = &omap2_3_dss_dispc_dev_attr
};
/* l4_core -> dss_rfbi */
@@ -961,6 +966,10 @@ static struct omap_hwmod_ocp_if *omap2420_dss_rfbi_slaves[] = {
&omap2420_l4_core__dss_rfbi,
};
+static struct omap_hwmod_opt_clk dss_rfbi_opt_clks[] = {
+ { .role = "ick", .clk = "dss_ick" },
+};
+
static struct omap_hwmod omap2420_dss_rfbi_hwmod = {
.name = "dss_rfbi",
.class = &omap2_rfbi_hwmod_class,
@@ -972,6 +981,8 @@ static struct omap_hwmod omap2420_dss_rfbi_hwmod = {
.module_offs = CORE_MOD,
},
},
+ .opt_clks = dss_rfbi_opt_clks,
+ .opt_clks_cnt = ARRAY_SIZE(dss_rfbi_opt_clks),
.slaves = omap2420_dss_rfbi_slaves,
.slaves_cnt = ARRAY_SIZE(omap2420_dss_rfbi_slaves),
.flags = HWMOD_NO_IDLEST,
@@ -981,7 +992,7 @@ static struct omap_hwmod omap2420_dss_rfbi_hwmod = {
static struct omap_hwmod_ocp_if omap2420_l4_core__dss_venc = {
.master = &omap2420_l4_core_hwmod,
.slave = &omap2420_dss_venc_hwmod,
- .clk = "dss_54m_fck",
+ .clk = "dss_ick",
.addr = omap2_dss_venc_addrs,
.fw = {
.omap2 = {
@@ -1001,7 +1012,7 @@ static struct omap_hwmod_ocp_if *omap2420_dss_venc_slaves[] = {
static struct omap_hwmod omap2420_dss_venc_hwmod = {
.name = "dss_venc",
.class = &omap2_venc_hwmod_class,
- .main_clk = "dss1_fck",
+ .main_clk = "dss_54m_fck",
.prcm = {
.omap2 = {
.prcm_reg_id = 1,
diff --git a/arch/arm/mach-omap2/omap_hwmod_2430_data.c b/arch/arm/mach-omap2/omap_hwmod_2430_data.c
index a2580d01c3f..c4f56cb60d7 100644
--- a/arch/arm/mach-omap2/omap_hwmod_2430_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_2430_data.c
@@ -942,6 +942,10 @@ static struct omap_hwmod_ocp_if *omap2430_dss_slaves[] = {
};
static struct omap_hwmod_opt_clk dss_opt_clks[] = {
+ /*
+ * The DSS HW needs all DSS clocks enabled during reset. The dss_core
+ * driver does not use these clocks.
+ */
{ .role = "tv_clk", .clk = "dss_54m_fck" },
{ .role = "sys_clk", .clk = "dss2_fck" },
};
@@ -966,7 +970,7 @@ static struct omap_hwmod omap2430_dss_core_hwmod = {
.slaves_cnt = ARRAY_SIZE(omap2430_dss_slaves),
.masters = omap2430_dss_masters,
.masters_cnt = ARRAY_SIZE(omap2430_dss_masters),
- .flags = HWMOD_NO_IDLEST,
+ .flags = HWMOD_NO_IDLEST | HWMOD_CONTROL_OPT_CLKS_IN_RESET,
};
/* l4_core -> dss_dispc */
@@ -1000,6 +1004,7 @@ static struct omap_hwmod omap2430_dss_dispc_hwmod = {
.slaves = omap2430_dss_dispc_slaves,
.slaves_cnt = ARRAY_SIZE(omap2430_dss_dispc_slaves),
.flags = HWMOD_NO_IDLEST,
+ .dev_attr = &omap2_3_dss_dispc_dev_attr
};
/* l4_core -> dss_rfbi */
@@ -1016,6 +1021,10 @@ static struct omap_hwmod_ocp_if *omap2430_dss_rfbi_slaves[] = {
&omap2430_l4_core__dss_rfbi,
};
+static struct omap_hwmod_opt_clk dss_rfbi_opt_clks[] = {
+ { .role = "ick", .clk = "dss_ick" },
+};
+
static struct omap_hwmod omap2430_dss_rfbi_hwmod = {
.name = "dss_rfbi",
.class = &omap2_rfbi_hwmod_class,
@@ -1027,6 +1036,8 @@ static struct omap_hwmod omap2430_dss_rfbi_hwmod = {
.module_offs = CORE_MOD,
},
},
+ .opt_clks = dss_rfbi_opt_clks,
+ .opt_clks_cnt = ARRAY_SIZE(dss_rfbi_opt_clks),
.slaves = omap2430_dss_rfbi_slaves,
.slaves_cnt = ARRAY_SIZE(omap2430_dss_rfbi_slaves),
.flags = HWMOD_NO_IDLEST,
@@ -1036,7 +1047,7 @@ static struct omap_hwmod omap2430_dss_rfbi_hwmod = {
static struct omap_hwmod_ocp_if omap2430_l4_core__dss_venc = {
.master = &omap2430_l4_core_hwmod,
.slave = &omap2430_dss_venc_hwmod,
- .clk = "dss_54m_fck",
+ .clk = "dss_ick",
.addr = omap2_dss_venc_addrs,
.flags = OCPIF_SWSUP_IDLE,
.user = OCP_USER_MPU | OCP_USER_SDMA,
@@ -1050,7 +1061,7 @@ static struct omap_hwmod_ocp_if *omap2430_dss_venc_slaves[] = {
static struct omap_hwmod omap2430_dss_venc_hwmod = {
.name = "dss_venc",
.class = &omap2_venc_hwmod_class,
- .main_clk = "dss1_fck",
+ .main_clk = "dss_54m_fck",
.prcm = {
.omap2 = {
.prcm_reg_id = 1,
diff --git a/arch/arm/mach-omap2/omap_hwmod_2xxx_3xxx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_2xxx_3xxx_ipblock_data.c
index c451729d289..c11273da5dc 100644
--- a/arch/arm/mach-omap2/omap_hwmod_2xxx_3xxx_ipblock_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_2xxx_3xxx_ipblock_data.c
@@ -11,6 +11,7 @@
#include <plat/omap_hwmod.h>
#include <plat/serial.h>
#include <plat/dma.h>
+#include <plat/common.h>
#include <mach/irqs.h>
@@ -43,13 +44,15 @@ static struct omap_hwmod_class_sysconfig omap2_dss_sysc = {
.rev_offs = 0x0000,
.sysc_offs = 0x0010,
.syss_offs = 0x0014,
- .sysc_flags = (SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE),
+ .sysc_flags = (SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE |
+ SYSS_HAS_RESET_STATUS),
.sysc_fields = &omap_hwmod_sysc_type1,
};
struct omap_hwmod_class omap2_dss_hwmod_class = {
.name = "dss",
.sysc = &omap2_dss_sysc,
+ .reset = omap_dss_reset,
};
/*
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
index bc9035ec87f..7f8915ad509 100644
--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
@@ -1369,9 +1369,14 @@ static struct omap_hwmod_ocp_if *omap3xxx_dss_slaves[] = {
};
static struct omap_hwmod_opt_clk dss_opt_clks[] = {
- { .role = "tv_clk", .clk = "dss_tv_fck" },
- { .role = "video_clk", .clk = "dss_96m_fck" },
+ /*
+ * The DSS HW needs all DSS clocks enabled during reset. The dss_core
+ * driver does not use these clocks.
+ */
{ .role = "sys_clk", .clk = "dss2_alwon_fck" },
+ { .role = "tv_clk", .clk = "dss_tv_fck" },
+ /* required only on OMAP3430 */
+ { .role = "tv_dac_clk", .clk = "dss_96m_fck" },
};
static struct omap_hwmod omap3430es1_dss_core_hwmod = {
@@ -1394,11 +1399,12 @@ static struct omap_hwmod omap3430es1_dss_core_hwmod = {
.slaves_cnt = ARRAY_SIZE(omap3430es1_dss_slaves),
.masters = omap3xxx_dss_masters,
.masters_cnt = ARRAY_SIZE(omap3xxx_dss_masters),
- .flags = HWMOD_NO_IDLEST,
+ .flags = HWMOD_NO_IDLEST | HWMOD_CONTROL_OPT_CLKS_IN_RESET,
};
static struct omap_hwmod omap3xxx_dss_core_hwmod = {
.name = "dss_core",
+ .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
.class = &omap2_dss_hwmod_class,
.main_clk = "dss1_alwon_fck", /* instead of dss_fck */
.sdma_reqs = omap3xxx_dss_sdma_chs,
@@ -1456,6 +1462,7 @@ static struct omap_hwmod omap3xxx_dss_dispc_hwmod = {
.slaves = omap3xxx_dss_dispc_slaves,
.slaves_cnt = ARRAY_SIZE(omap3xxx_dss_dispc_slaves),
.flags = HWMOD_NO_IDLEST,
+ .dev_attr = &omap2_3_dss_dispc_dev_attr
};
/*
@@ -1486,6 +1493,7 @@ static struct omap_hwmod_addr_space omap3xxx_dss_dsi1_addrs[] = {
static struct omap_hwmod_ocp_if omap3xxx_l4_core__dss_dsi1 = {
.master = &omap3xxx_l4_core_hwmod,
.slave = &omap3xxx_dss_dsi1_hwmod,
+ .clk = "dss_ick",
.addr = omap3xxx_dss_dsi1_addrs,
.fw = {
.omap2 = {
@@ -1502,6 +1510,10 @@ static struct omap_hwmod_ocp_if *omap3xxx_dss_dsi1_slaves[] = {
&omap3xxx_l4_core__dss_dsi1,
};
+static struct omap_hwmod_opt_clk dss_dsi1_opt_clks[] = {
+ { .role = "sys_clk", .clk = "dss2_alwon_fck" },
+};
+
static struct omap_hwmod omap3xxx_dss_dsi1_hwmod = {
.name = "dss_dsi1",
.class = &omap3xxx_dsi_hwmod_class,
@@ -1514,6 +1526,8 @@ static struct omap_hwmod omap3xxx_dss_dsi1_hwmod = {
.module_offs = OMAP3430_DSS_MOD,
},
},
+ .opt_clks = dss_dsi1_opt_clks,
+ .opt_clks_cnt = ARRAY_SIZE(dss_dsi1_opt_clks),
.slaves = omap3xxx_dss_dsi1_slaves,
.slaves_cnt = ARRAY_SIZE(omap3xxx_dss_dsi1_slaves),
.flags = HWMOD_NO_IDLEST,
@@ -1540,6 +1554,10 @@ static struct omap_hwmod_ocp_if *omap3xxx_dss_rfbi_slaves[] = {
&omap3xxx_l4_core__dss_rfbi,
};
+static struct omap_hwmod_opt_clk dss_rfbi_opt_clks[] = {
+ { .role = "ick", .clk = "dss_ick" },
+};
+
static struct omap_hwmod omap3xxx_dss_rfbi_hwmod = {
.name = "dss_rfbi",
.class = &omap2_rfbi_hwmod_class,
@@ -1551,6 +1569,8 @@ static struct omap_hwmod omap3xxx_dss_rfbi_hwmod = {
.module_offs = OMAP3430_DSS_MOD,
},
},
+ .opt_clks = dss_rfbi_opt_clks,
+ .opt_clks_cnt = ARRAY_SIZE(dss_rfbi_opt_clks),
.slaves = omap3xxx_dss_rfbi_slaves,
.slaves_cnt = ARRAY_SIZE(omap3xxx_dss_rfbi_slaves),
.flags = HWMOD_NO_IDLEST,
@@ -1560,7 +1580,7 @@ static struct omap_hwmod omap3xxx_dss_rfbi_hwmod = {
static struct omap_hwmod_ocp_if omap3xxx_l4_core__dss_venc = {
.master = &omap3xxx_l4_core_hwmod,
.slave = &omap3xxx_dss_venc_hwmod,
- .clk = "dss_tv_fck",
+ .clk = "dss_ick",
.addr = omap2_dss_venc_addrs,
.fw = {
.omap2 = {
@@ -1578,10 +1598,15 @@ static struct omap_hwmod_ocp_if *omap3xxx_dss_venc_slaves[] = {
&omap3xxx_l4_core__dss_venc,
};
+static struct omap_hwmod_opt_clk dss_venc_opt_clks[] = {
+ /* required only on OMAP3430 */
+ { .role = "tv_dac_clk", .clk = "dss_96m_fck" },
+};
+
static struct omap_hwmod omap3xxx_dss_venc_hwmod = {
.name = "dss_venc",
.class = &omap2_venc_hwmod_class,
- .main_clk = "dss1_alwon_fck",
+ .main_clk = "dss_tv_fck",
.prcm = {
.omap2 = {
.prcm_reg_id = 1,
@@ -1589,6 +1614,8 @@ static struct omap_hwmod omap3xxx_dss_venc_hwmod = {
.module_offs = OMAP3430_DSS_MOD,
},
},
+ .opt_clks = dss_venc_opt_clks,
+ .opt_clks_cnt = ARRAY_SIZE(dss_venc_opt_clks),
.slaves = omap3xxx_dss_venc_slaves,
.slaves_cnt = ARRAY_SIZE(omap3xxx_dss_venc_slaves),
.flags = HWMOD_NO_IDLEST,
diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
index 7695e5d4331..daaf165af69 100644
--- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
@@ -30,6 +30,7 @@
#include <plat/mmc.h>
#include <plat/i2c.h>
#include <plat/dmtimer.h>
+#include <plat/common.h>
#include "omap_hwmod_common_data.h"
@@ -1187,6 +1188,7 @@ static struct omap_hwmod_class_sysconfig omap44xx_dss_sysc = {
static struct omap_hwmod_class omap44xx_dss_hwmod_class = {
.name = "dss",
.sysc = &omap44xx_dss_sysc,
+ .reset = omap_dss_reset,
};
/* dss */
@@ -1240,12 +1242,12 @@ static struct omap_hwmod_ocp_if *omap44xx_dss_slaves[] = {
static struct omap_hwmod_opt_clk dss_opt_clks[] = {
{ .role = "sys_clk", .clk = "dss_sys_clk" },
{ .role = "tv_clk", .clk = "dss_tv_clk" },
- { .role = "dss_clk", .clk = "dss_dss_clk" },
- { .role = "video_clk", .clk = "dss_48mhz_clk" },
+ { .role = "hdmi_clk", .clk = "dss_48mhz_clk" },
};
static struct omap_hwmod omap44xx_dss_hwmod = {
.name = "dss_core",
+ .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
.class = &omap44xx_dss_hwmod_class,
.clkdm_name = "l3_dss_clkdm",
.main_clk = "dss_dss_clk",
@@ -1325,6 +1327,11 @@ static struct omap_hwmod_addr_space omap44xx_dss_dispc_addrs[] = {
{ }
};
+static struct omap_dss_dispc_dev_attr omap44xx_dss_dispc_dev_attr = {
+ .manager_count = 3,
+ .has_framedonetv_irq = 1
+};
+
/* l4_per -> dss_dispc */
static struct omap_hwmod_ocp_if omap44xx_l4_per__dss_dispc = {
.master = &omap44xx_l4_per_hwmod,
@@ -1340,12 +1347,6 @@ static struct omap_hwmod_ocp_if *omap44xx_dss_dispc_slaves[] = {
&omap44xx_l4_per__dss_dispc,
};
-static struct omap_hwmod_opt_clk dss_dispc_opt_clks[] = {
- { .role = "sys_clk", .clk = "dss_sys_clk" },
- { .role = "tv_clk", .clk = "dss_tv_clk" },
- { .role = "hdmi_clk", .clk = "dss_48mhz_clk" },
-};
-
static struct omap_hwmod omap44xx_dss_dispc_hwmod = {
.name = "dss_dispc",
.class = &omap44xx_dispc_hwmod_class,
@@ -1359,10 +1360,9 @@ static struct omap_hwmod omap44xx_dss_dispc_hwmod = {
.context_offs = OMAP4_RM_DSS_DSS_CONTEXT_OFFSET,
},
},
- .opt_clks = dss_dispc_opt_clks,
- .opt_clks_cnt = ARRAY_SIZE(dss_dispc_opt_clks),
.slaves = omap44xx_dss_dispc_slaves,
.slaves_cnt = ARRAY_SIZE(omap44xx_dss_dispc_slaves),
+ .dev_attr = &omap44xx_dss_dispc_dev_attr
};
/*
@@ -1624,7 +1624,7 @@ static struct omap_hwmod omap44xx_dss_hdmi_hwmod = {
.clkdm_name = "l3_dss_clkdm",
.mpu_irqs = omap44xx_dss_hdmi_irqs,
.sdma_reqs = omap44xx_dss_hdmi_sdma_reqs,
- .main_clk = "dss_dss_clk",
+ .main_clk = "dss_48mhz_clk",
.prcm = {
.omap4 = {
.clkctrl_offs = OMAP4_CM_DSS_DSS_CLKCTRL_OFFSET,
@@ -1785,7 +1785,7 @@ static struct omap_hwmod omap44xx_dss_venc_hwmod = {
.name = "dss_venc",
.class = &omap44xx_venc_hwmod_class,
.clkdm_name = "l3_dss_clkdm",
- .main_clk = "dss_dss_clk",
+ .main_clk = "dss_tv_clk",
.prcm = {
.omap4 = {
.clkctrl_offs = OMAP4_CM_DSS_DSS_CLKCTRL_OFFSET,
diff --git a/arch/arm/mach-omap2/omap_hwmod_common_data.c b/arch/arm/mach-omap2/omap_hwmod_common_data.c
index de832ebc93a..51e5418899f 100644
--- a/arch/arm/mach-omap2/omap_hwmod_common_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_common_data.c
@@ -49,3 +49,7 @@ struct omap_hwmod_sysc_fields omap_hwmod_sysc_type2 = {
.srst_shift = SYSC_TYPE2_SOFTRESET_SHIFT,
};
+struct omap_dss_dispc_dev_attr omap2_3_dss_dispc_dev_attr = {
+ .manager_count = 2,
+ .has_framedonetv_irq = 0
+};
diff --git a/arch/arm/mach-omap2/omap_hwmod_common_data.h b/arch/arm/mach-omap2/omap_hwmod_common_data.h
index 39a7c37f458..ad5d8f04c0b 100644
--- a/arch/arm/mach-omap2/omap_hwmod_common_data.h
+++ b/arch/arm/mach-omap2/omap_hwmod_common_data.h
@@ -16,6 +16,8 @@
#include <plat/omap_hwmod.h>
+#include "display.h"
+
/* Common address space across OMAP2xxx */
extern struct omap_hwmod_addr_space omap2xxx_uart1_addr_space[];
extern struct omap_hwmod_addr_space omap2xxx_uart2_addr_space[];
@@ -111,4 +113,6 @@ extern struct omap_hwmod_class omap2xxx_dma_hwmod_class;
extern struct omap_hwmod_class omap2xxx_mailbox_hwmod_class;
extern struct omap_hwmod_class omap2xxx_mcspi_class;
+extern struct omap_dss_dispc_dev_attr omap2_3_dss_dispc_dev_attr;
+
#endif
diff --git a/arch/arm/mach-omap2/omap_l3_noc.c b/arch/arm/mach-omap2/omap_l3_noc.c
index 6a66aa5e2a5..d15225ff5c4 100644
--- a/arch/arm/mach-omap2/omap_l3_noc.c
+++ b/arch/arm/mach-omap2/omap_l3_noc.c
@@ -237,7 +237,7 @@ static int __devexit omap4_l3_remove(struct platform_device *pdev)
static const struct of_device_id l3_noc_match[] = {
{.compatible = "ti,omap4-l3-noc", },
{},
-}
+};
MODULE_DEVICE_TABLE(of, l3_noc_match);
#else
#define l3_noc_match NULL
diff --git a/arch/arm/mach-omap2/pm.c b/arch/arm/mach-omap2/pm.c
index 1e79bdf313e..00bff46ca48 100644
--- a/arch/arm/mach-omap2/pm.c
+++ b/arch/arm/mach-omap2/pm.c
@@ -24,6 +24,7 @@
#include "powerdomain.h"
#include "clockdomain.h"
#include "pm.h"
+#include "twl-common.h"
static struct omap_device_pm_latency *pm_lats;
@@ -226,11 +227,8 @@ postcore_initcall(omap2_common_pm_init);
static int __init omap2_common_pm_late_init(void)
{
- /* Init the OMAP TWL parameters */
- omap3_twl_init();
- omap4_twl_init();
-
/* Init the voltage layer */
+ omap_pmic_late_init();
omap_voltage_late_init();
/* Initialize the voltages */
diff --git a/arch/arm/mach-omap2/smartreflex.c b/arch/arm/mach-omap2/smartreflex.c
index 6a4f6839a7d..cf246b39bac 100644
--- a/arch/arm/mach-omap2/smartreflex.c
+++ b/arch/arm/mach-omap2/smartreflex.c
@@ -139,7 +139,7 @@ static irqreturn_t sr_interrupt(int irq, void *data)
sr_write_reg(sr_info, ERRCONFIG_V1, status);
} else if (sr_info->ip_type == SR_TYPE_V2) {
/* Read the status bits */
- sr_read_reg(sr_info, IRQSTATUS);
+ status = sr_read_reg(sr_info, IRQSTATUS);
/* Clear them by writing back */
sr_write_reg(sr_info, IRQSTATUS, status);
diff --git a/arch/arm/mach-omap2/twl-common.c b/arch/arm/mach-omap2/twl-common.c
index 52243577216..10b20c652e5 100644
--- a/arch/arm/mach-omap2/twl-common.c
+++ b/arch/arm/mach-omap2/twl-common.c
@@ -30,6 +30,7 @@
#include <plat/usb.h>
#include "twl-common.h"
+#include "pm.h"
static struct i2c_board_info __initdata pmic_i2c_board_info = {
.addr = 0x48,
@@ -48,6 +49,16 @@ void __init omap_pmic_init(int bus, u32 clkrate,
omap_register_i2c_bus(bus, clkrate, &pmic_i2c_board_info, 1);
}
+void __init omap_pmic_late_init(void)
+{
+ /* Init the OMAP TWL parameters (if PMIC has been registerd) */
+ if (!pmic_i2c_board_info.irq)
+ return;
+
+ omap3_twl_init();
+ omap4_twl_init();
+}
+
#if defined(CONFIG_ARCH_OMAP3)
static struct twl4030_usb_data omap3_usb_pdata = {
.usb_mode = T2_USB_MODE_ULPI,
diff --git a/arch/arm/mach-omap2/twl-common.h b/arch/arm/mach-omap2/twl-common.h
index 5e83a5bd37f..275dde8cb27 100644
--- a/arch/arm/mach-omap2/twl-common.h
+++ b/arch/arm/mach-omap2/twl-common.h
@@ -1,6 +1,8 @@
#ifndef __OMAP_PMIC_COMMON__
#define __OMAP_PMIC_COMMON__
+#include <plat/irqs.h>
+
#define TWL_COMMON_PDATA_USB (1 << 0)
#define TWL_COMMON_PDATA_BCI (1 << 1)
#define TWL_COMMON_PDATA_MADC (1 << 2)
@@ -30,6 +32,7 @@ struct twl4030_platform_data;
void omap_pmic_init(int bus, u32 clkrate, const char *pmic_type, int pmic_irq,
struct twl4030_platform_data *pmic_data);
+void omap_pmic_late_init(void);
static inline void omap2_pmic_init(const char *pmic_type,
struct twl4030_platform_data *pmic_data)
diff --git a/arch/arm/mach-pxa/balloon3.c b/arch/arm/mach-pxa/balloon3.c
index fc0b8544e17..4b81f59a4cb 100644
--- a/arch/arm/mach-pxa/balloon3.c
+++ b/arch/arm/mach-pxa/balloon3.c
@@ -307,7 +307,7 @@ static inline void balloon3_mmc_init(void) {}
/******************************************************************************
* USB Gadget
******************************************************************************/
-#if defined(CONFIG_USB_GADGET_PXA27X)||defined(CONFIG_USB_GADGET_PXA27X_MODULE)
+#if defined(CONFIG_USB_PXA27X)||defined(CONFIG_USB_PXA27X_MODULE)
static void balloon3_udc_command(int cmd)
{
if (cmd == PXA2XX_UDC_CMD_CONNECT)
diff --git a/arch/arm/mach-pxa/colibri-pxa320.c b/arch/arm/mach-pxa/colibri-pxa320.c
index 692e1ffc558..d23b92b8048 100644
--- a/arch/arm/mach-pxa/colibri-pxa320.c
+++ b/arch/arm/mach-pxa/colibri-pxa320.c
@@ -146,7 +146,7 @@ static void __init colibri_pxa320_init_eth(void)
static inline void __init colibri_pxa320_init_eth(void) {}
#endif /* CONFIG_AX88796 */
-#if defined(CONFIG_USB_GADGET_PXA27X)||defined(CONFIG_USB_GADGET_PXA27X_MODULE)
+#if defined(CONFIG_USB_PXA27X)||defined(CONFIG_USB_PXA27X_MODULE)
static struct gpio_vbus_mach_info colibri_pxa320_gpio_vbus_info = {
.gpio_vbus = mfp_to_gpio(MFP_PIN_GPIO96),
.gpio_pullup = -1,
diff --git a/arch/arm/mach-pxa/gumstix.c b/arch/arm/mach-pxa/gumstix.c
index 9c8208ca041..ffdd70dad32 100644
--- a/arch/arm/mach-pxa/gumstix.c
+++ b/arch/arm/mach-pxa/gumstix.c
@@ -106,7 +106,7 @@ static void __init gumstix_mmc_init(void)
}
#endif
-#ifdef CONFIG_USB_GADGET_PXA25X
+#ifdef CONFIG_USB_PXA25X
static struct gpio_vbus_mach_info gumstix_udc_info = {
.gpio_vbus = GPIO_GUMSTIX_USB_GPIOn,
.gpio_pullup = GPIO_GUMSTIX_USB_GPIOx,
diff --git a/arch/arm/mach-pxa/include/mach/palm27x.h b/arch/arm/mach-pxa/include/mach/palm27x.h
index f80bbe246af..d4eac3d6ffb 100644
--- a/arch/arm/mach-pxa/include/mach/palm27x.h
+++ b/arch/arm/mach-pxa/include/mach/palm27x.h
@@ -37,8 +37,8 @@ extern void __init palm27x_lcd_init(int power,
#define palm27x_lcd_init(power, mode) do {} while (0)
#endif
-#if defined(CONFIG_USB_GADGET_PXA27X) || \
- defined(CONFIG_USB_GADGET_PXA27X_MODULE)
+#if defined(CONFIG_USB_PXA27X) || \
+ defined(CONFIG_USB_PXA27X_MODULE)
extern void __init palm27x_udc_init(int vbus, int pullup,
int vbus_inverted);
#else
diff --git a/arch/arm/mach-pxa/palm27x.c b/arch/arm/mach-pxa/palm27x.c
index 325c245c0a0..fbc10d7b95d 100644
--- a/arch/arm/mach-pxa/palm27x.c
+++ b/arch/arm/mach-pxa/palm27x.c
@@ -164,8 +164,8 @@ void __init palm27x_lcd_init(int power, struct pxafb_mode_info *mode)
/******************************************************************************
* USB Gadget
******************************************************************************/
-#if defined(CONFIG_USB_GADGET_PXA27X) || \
- defined(CONFIG_USB_GADGET_PXA27X_MODULE)
+#if defined(CONFIG_USB_PXA27X) || \
+ defined(CONFIG_USB_PXA27X_MODULE)
static struct gpio_vbus_mach_info palm27x_udc_info = {
.gpio_vbus_inverted = 1,
};
diff --git a/arch/arm/mach-pxa/palmtc.c b/arch/arm/mach-pxa/palmtc.c
index 6ec7caefb37..2c24c67fd92 100644
--- a/arch/arm/mach-pxa/palmtc.c
+++ b/arch/arm/mach-pxa/palmtc.c
@@ -338,7 +338,7 @@ static inline void palmtc_mkp_init(void) {}
/******************************************************************************
* UDC
******************************************************************************/
-#if defined(CONFIG_USB_GADGET_PXA25X)||defined(CONFIG_USB_GADGET_PXA25X_MODULE)
+#if defined(CONFIG_USB_PXA25X)||defined(CONFIG_USB_PXA25X_MODULE)
static struct gpio_vbus_mach_info palmtc_udc_info = {
.gpio_vbus = GPIO_NR_PALMTC_USB_DETECT_N,
.gpio_vbus_inverted = 1,
diff --git a/arch/arm/mach-pxa/vpac270.c b/arch/arm/mach-pxa/vpac270.c
index a7539a6ed1f..ca0c6615028 100644
--- a/arch/arm/mach-pxa/vpac270.c
+++ b/arch/arm/mach-pxa/vpac270.c
@@ -343,7 +343,7 @@ static inline void vpac270_uhc_init(void) {}
/******************************************************************************
* USB Gadget
******************************************************************************/
-#if defined(CONFIG_USB_GADGET_PXA27X)||defined(CONFIG_USB_GADGET_PXA27X_MODULE)
+#if defined(CONFIG_USB_PXA27X)||defined(CONFIG_USB_PXA27X_MODULE)
static struct gpio_vbus_mach_info vpac270_gpio_vbus_info = {
.gpio_vbus = GPIO41_VPAC270_UDC_DETECT,
.gpio_pullup = -1,
diff --git a/arch/arm/mach-s3c64xx/mach-crag6410-module.c b/arch/arm/mach-s3c64xx/mach-crag6410-module.c
index 66668565ee7..f208154b138 100644
--- a/arch/arm/mach-s3c64xx/mach-crag6410-module.c
+++ b/arch/arm/mach-s3c64xx/mach-crag6410-module.c
@@ -8,7 +8,7 @@
* published by the Free Software Foundation.
*/
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/interrupt.h>
#include <linux/i2c.h>
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 8ac9e9f8479..b1e192ba8c2 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -61,7 +61,7 @@ static inline void cache_sync(void)
{
void __iomem *base = l2x0_base;
-#ifdef CONFIG_ARM_ERRATA_753970
+#ifdef CONFIG_PL310_ERRATA_753970
/* write to an unmmapped register */
writel_relaxed(0, base + L2X0_DUMMY_REG);
#else
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index e4e7f6cba1a..1aa664a1999 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -168,7 +168,7 @@ static int __init consistent_init(void)
pte_t *pte;
int i = 0;
unsigned long base = consistent_base;
- unsigned long num_ptes = (CONSISTENT_END - base) >> PGDIR_SHIFT;
+ unsigned long num_ptes = (CONSISTENT_END - base) >> PMD_SHIFT;
consistent_pte = kmalloc(num_ptes * sizeof(pte_t), GFP_KERNEL);
if (!consistent_pte) {
@@ -332,6 +332,15 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
struct page *page;
void *addr;
+ /*
+ * Following is a work-around (a.k.a. hack) to prevent pages
+ * with __GFP_COMP being passed to split_page() which cannot
+ * handle them. The real problem is that this flag probably
+ * should be 0 on ARM as it is not supported on this
+ * platform; see CONFIG_HUGETLBFS.
+ */
+ gfp &= ~(__GFP_COMP);
+
*handle = ~0;
size = PAGE_ALIGN(size);
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
index 74be05f3e03..44b628e4d6e 100644
--- a/arch/arm/mm/mmap.c
+++ b/arch/arm/mm/mmap.c
@@ -9,8 +9,7 @@
#include <linux/io.h>
#include <linux/personality.h>
#include <linux/random.h>
-#include <asm/cputype.h>
-#include <asm/system.h>
+#include <asm/cachetype.h>
#define COLOUR_ALIGN(addr,pgoff) \
((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \
@@ -32,25 +31,15 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
unsigned long start_addr;
-#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
- unsigned int cache_type;
- int do_align = 0, aliasing = 0;
+ int do_align = 0;
+ int aliasing = cache_is_vipt_aliasing();
/*
* We only need to do colour alignment if either the I or D
- * caches alias. This is indicated by bits 9 and 21 of the
- * cache type register.
+ * caches alias.
*/
- cache_type = read_cpuid_cachetype();
- if (cache_type != read_cpuid_id()) {
- aliasing = (cache_type | cache_type >> 12) & (1 << 11);
- if (aliasing)
- do_align = filp || flags & MAP_SHARED;
- }
-#else
-#define do_align 0
-#define aliasing 0
-#endif
+ if (aliasing)
+ do_align = filp || (flags & MAP_SHARED);
/*
* We enforce the MAP_FIXED case.
diff --git a/arch/arm/plat-mxc/include/mach/common.h b/arch/arm/plat-mxc/include/mach/common.h
index 83b745a5e1b..c75f254abd8 100644
--- a/arch/arm/plat-mxc/include/mach/common.h
+++ b/arch/arm/plat-mxc/include/mach/common.h
@@ -85,7 +85,6 @@ enum mxc_cpu_pwr_mode {
};
extern void mx5_cpu_lp_set(enum mxc_cpu_pwr_mode mode);
-extern void (*imx_idle)(void);
extern void imx_print_silicon_rev(const char *cpu, int srev);
void avic_handle_irq(struct pt_regs *);
@@ -133,4 +132,5 @@ extern void imx53_qsb_common_init(void);
extern void imx53_smd_common_init(void);
extern int imx6q_set_lpm(enum mxc_cpu_pwr_mode mode);
extern void imx6q_pm_init(void);
+extern void imx6q_clock_map_io(void);
#endif
diff --git a/arch/arm/plat-mxc/include/mach/mxc.h b/arch/arm/plat-mxc/include/mach/mxc.h
index 00a78193c68..a4d36d601d5 100644
--- a/arch/arm/plat-mxc/include/mach/mxc.h
+++ b/arch/arm/plat-mxc/include/mach/mxc.h
@@ -50,20 +50,6 @@
#define IMX_CHIP_REVISION_3_3 0x33
#define IMX_CHIP_REVISION_UNKNOWN 0xff
-#define IMX_CHIP_REVISION_1_0_STRING "1.0"
-#define IMX_CHIP_REVISION_1_1_STRING "1.1"
-#define IMX_CHIP_REVISION_1_2_STRING "1.2"
-#define IMX_CHIP_REVISION_1_3_STRING "1.3"
-#define IMX_CHIP_REVISION_2_0_STRING "2.0"
-#define IMX_CHIP_REVISION_2_1_STRING "2.1"
-#define IMX_CHIP_REVISION_2_2_STRING "2.2"
-#define IMX_CHIP_REVISION_2_3_STRING "2.3"
-#define IMX_CHIP_REVISION_3_0_STRING "3.0"
-#define IMX_CHIP_REVISION_3_1_STRING "3.1"
-#define IMX_CHIP_REVISION_3_2_STRING "3.2"
-#define IMX_CHIP_REVISION_3_3_STRING "3.3"
-#define IMX_CHIP_REVISION_UNKNOWN_STRING "unknown"
-
#ifndef __ASSEMBLY__
extern unsigned int __mxc_cpu_type;
#endif
diff --git a/arch/arm/plat-mxc/include/mach/system.h b/arch/arm/plat-mxc/include/mach/system.h
index cf88b3593fb..b9895d25016 100644
--- a/arch/arm/plat-mxc/include/mach/system.h
+++ b/arch/arm/plat-mxc/include/mach/system.h
@@ -17,14 +17,9 @@
#ifndef __ASM_ARCH_MXC_SYSTEM_H__
#define __ASM_ARCH_MXC_SYSTEM_H__
-extern void (*imx_idle)(void);
-
static inline void arch_idle(void)
{
- if (imx_idle != NULL)
- (imx_idle)();
- else
- cpu_do_idle();
+ cpu_do_idle();
}
void arch_reset(char mode, const char *cmd);
diff --git a/arch/arm/plat-mxc/system.c b/arch/arm/plat-mxc/system.c
index 9dad8dcc2ea..d65fb31a55c 100644
--- a/arch/arm/plat-mxc/system.c
+++ b/arch/arm/plat-mxc/system.c
@@ -21,6 +21,7 @@
#include <linux/io.h>
#include <linux/err.h>
#include <linux/delay.h>
+#include <linux/module.h>
#include <mach/hardware.h>
#include <mach/common.h>
@@ -28,8 +29,8 @@
#include <asm/system.h>
#include <asm/mach-types.h>
-void (*imx_idle)(void) = NULL;
void __iomem *(*imx_ioremap)(unsigned long, size_t, unsigned int) = NULL;
+EXPORT_SYMBOL_GPL(imx_ioremap);
static void __iomem *wdog_base;
diff --git a/arch/arm/plat-omap/include/plat/clock.h b/arch/arm/plat-omap/include/plat/clock.h
index 197ca03c3f7..eb73ab40e95 100644
--- a/arch/arm/plat-omap/include/plat/clock.h
+++ b/arch/arm/plat-omap/include/plat/clock.h
@@ -165,8 +165,8 @@ struct dpll_data {
u8 auto_recal_bit;
u8 recal_en_bit;
u8 recal_st_bit;
- u8 flags;
# endif
+ u8 flags;
};
#endif
diff --git a/arch/arm/plat-omap/include/plat/common.h b/arch/arm/plat-omap/include/plat/common.h
index c50df4814f6..3ff3e36580f 100644
--- a/arch/arm/plat-omap/include/plat/common.h
+++ b/arch/arm/plat-omap/include/plat/common.h
@@ -30,6 +30,7 @@
#include <linux/delay.h>
#include <plat/i2c.h>
+#include <plat/omap_hwmod.h>
struct sys_timer;
@@ -55,6 +56,8 @@ void am35xx_init_early(void);
void ti816x_init_early(void);
void omap4430_init_early(void);
+extern int omap_dss_reset(struct omap_hwmod *);
+
void omap_sram_init(void);
/*
diff --git a/arch/arm/plat-s3c24xx/cpu-freq-debugfs.c b/arch/arm/plat-s3c24xx/cpu-freq-debugfs.c
index a9276667c2f..c7adad0e8de 100644
--- a/arch/arm/plat-s3c24xx/cpu-freq-debugfs.c
+++ b/arch/arm/plat-s3c24xx/cpu-freq-debugfs.c
@@ -12,7 +12,7 @@
*/
#include <linux/init.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/cpufreq.h>
diff --git a/arch/arm/plat-s5p/sysmmu.c b/arch/arm/plat-s5p/sysmmu.c
index e1cbc728c77..c8bec9c7655 100644
--- a/arch/arm/plat-s5p/sysmmu.c
+++ b/arch/arm/plat-s5p/sysmmu.c
@@ -11,6 +11,7 @@
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
+#include <linux/export.h>
#include <asm/pgtable.h>
diff --git a/arch/arm/plat-samsung/include/plat/gpio-cfg.h b/arch/arm/plat-samsung/include/plat/gpio-cfg.h
index d48245bb02b..df8155b9d4d 100644
--- a/arch/arm/plat-samsung/include/plat/gpio-cfg.h
+++ b/arch/arm/plat-samsung/include/plat/gpio-cfg.h
@@ -24,6 +24,8 @@
#ifndef __PLAT_GPIO_CFG_H
#define __PLAT_GPIO_CFG_H __FILE__
+#include<linux/types.h>
+
typedef unsigned int __bitwise__ samsung_gpio_pull_t;
typedef unsigned int __bitwise__ s5p_gpio_drvstr_t;
diff --git a/arch/arm/plat-samsung/pd.c b/arch/arm/plat-samsung/pd.c
index efe1d564473..312b510d86b 100644
--- a/arch/arm/plat-samsung/pd.c
+++ b/arch/arm/plat-samsung/pd.c
@@ -11,7 +11,7 @@
*/
#include <linux/init.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/platform_device.h>
#include <linux/err.h>
#include <linux/pm_runtime.h>
diff --git a/arch/arm/plat-samsung/pwm.c b/arch/arm/plat-samsung/pwm.c
index dc1185dcf80..c559d8438c7 100644
--- a/arch/arm/plat-samsung/pwm.c
+++ b/arch/arm/plat-samsung/pwm.c
@@ -11,7 +11,7 @@
* the Free Software Foundation; either version 2 of the License.
*/
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
diff --git a/arch/arm/tools/mach-types b/arch/arm/tools/mach-types
index 5bdeef96984..ccbe16f4722 100644
--- a/arch/arm/tools/mach-types
+++ b/arch/arm/tools/mach-types
@@ -1123,5 +1123,6 @@ blissc MACH_BLISSC BLISSC 3491
thales_adc MACH_THALES_ADC THALES_ADC 3492
ubisys_p9d_evp MACH_UBISYS_P9D_EVP UBISYS_P9D_EVP 3493
atdgp318 MACH_ATDGP318 ATDGP318 3494
+m28evk MACH_M28EVK M28EVK 3613
smdk4212 MACH_SMDK4212 SMDK4212 3638
smdk4412 MACH_SMDK4412 SMDK4412 3765
diff --git a/arch/microblaze/include/asm/namei.h b/arch/microblaze/include/asm/namei.h
deleted file mode 100644
index 61d60b8a07d..00000000000
--- a/arch/microblaze/include/asm/namei.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Copyright (C) 2006 Atmark Techno, Inc.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#ifndef _ASM_MICROBLAZE_NAMEI_H
-#define _ASM_MICROBLAZE_NAMEI_H
-
-#ifdef __KERNEL__
-
-/* This dummy routine maybe changed to something useful
- * for /usr/gnemul/ emulation stuff.
- * Look at asm-sparc/namei.h for details.
- */
-#define __emul_prefix() NULL
-
-#endif /* __KERNEL__ */
-
-#endif /* _ASM_MICROBLAZE_NAMEI_H */
diff --git a/arch/powerpc/boot/dts/p1023rds.dts b/arch/powerpc/boot/dts/p1023rds.dts
index d9b776740a6..d3b478242ea 100644
--- a/arch/powerpc/boot/dts/p1023rds.dts
+++ b/arch/powerpc/boot/dts/p1023rds.dts
@@ -449,6 +449,7 @@
interrupt-parent = <&mpic>;
interrupts = <16 2>;
interrupt-map-mask = <0xf800 0 0 7>;
+ /* IRQ[0:3] are pulled up on board, set to active-low */
interrupt-map = <
/* IDSEL 0x0 */
0000 0 0 1 &mpic 0 1
@@ -488,11 +489,15 @@
interrupt-parent = <&mpic>;
interrupts = <16 2>;
interrupt-map-mask = <0xf800 0 0 7>;
+ /*
+ * IRQ[4:6] only for PCIe, set to active-high,
+ * IRQ[7] is pulled up on board, set to active-low
+ */
interrupt-map = <
/* IDSEL 0x0 */
- 0000 0 0 1 &mpic 4 1
- 0000 0 0 2 &mpic 5 1
- 0000 0 0 3 &mpic 6 1
+ 0000 0 0 1 &mpic 4 2
+ 0000 0 0 2 &mpic 5 2
+ 0000 0 0 3 &mpic 6 2
0000 0 0 4 &mpic 7 1
>;
ranges = <0x2000000 0x0 0xa0000000
@@ -527,12 +532,16 @@
interrupt-parent = <&mpic>;
interrupts = <16 2>;
interrupt-map-mask = <0xf800 0 0 7>;
+ /*
+ * IRQ[8:10] are pulled up on board, set to active-low
+ * IRQ[11] only for PCIe, set to active-high,
+ */
interrupt-map = <
/* IDSEL 0x0 */
0000 0 0 1 &mpic 8 1
0000 0 0 2 &mpic 9 1
0000 0 0 3 &mpic 10 1
- 0000 0 0 4 &mpic 11 1
+ 0000 0 0 4 &mpic 11 2
>;
ranges = <0x2000000 0x0 0x80000000
0x2000000 0x0 0x80000000
diff --git a/arch/powerpc/configs/ppc44x_defconfig b/arch/powerpc/configs/ppc44x_defconfig
index 6cdf1c0d2c8..3b98d735434 100644
--- a/arch/powerpc/configs/ppc44x_defconfig
+++ b/arch/powerpc/configs/ppc44x_defconfig
@@ -52,6 +52,8 @@ CONFIG_MTD_CFI=y
CONFIG_MTD_JEDECPROBE=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP_OF=y
+CONFIG_MTD_NAND=m
+CONFIG_MTD_NAND_NDFC=m
CONFIG_MTD_UBI=m
CONFIG_MTD_UBI_GLUEBI=m
CONFIG_PROC_DEVICETREE=y
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 5964371303a..8558b572e55 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -15,6 +15,7 @@
#include <linux/of_fdt.h>
#include <linux/memblock.h>
#include <linux/bootmem.h>
+#include <linux/moduleparam.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/tlb.h>
diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig
index 45023e26aea..d7946be298b 100644
--- a/arch/powerpc/platforms/85xx/Kconfig
+++ b/arch/powerpc/platforms/85xx/Kconfig
@@ -203,7 +203,7 @@ config P3060_QDS
select PPC_E500MC
select PHYS_64BIT
select SWIOTLB
- select MPC8xxx_GPIO
+ select GPIO_MPC8XXX
select HAS_RAPIDIO
select PPC_EPAPR_HV_PIC
help
diff --git a/arch/powerpc/platforms/85xx/p3060_qds.c b/arch/powerpc/platforms/85xx/p3060_qds.c
index 01dcf44871e..081cf4ac188 100644
--- a/arch/powerpc/platforms/85xx/p3060_qds.c
+++ b/arch/powerpc/platforms/85xx/p3060_qds.c
@@ -70,7 +70,7 @@ define_machine(p3060_qds) {
.power_save = e500_idle,
};
-machine_device_initcall(p3060_qds, declare_of_platform_devices);
+machine_device_initcall(p3060_qds, corenet_ds_publish_devices);
#ifdef CONFIG_SWIOTLB
machine_arch_initcall(p3060_qds, swiotlb_setup_bus_notifier);
diff --git a/arch/powerpc/sysdev/ehv_pic.c b/arch/powerpc/sysdev/ehv_pic.c
index af1a5df46b3..b6731e4a664 100644
--- a/arch/powerpc/sysdev/ehv_pic.c
+++ b/arch/powerpc/sysdev/ehv_pic.c
@@ -280,6 +280,7 @@ void __init ehv_pic_init(void)
if (!ehv_pic->irqhost) {
of_node_put(np);
+ kfree(ehv_pic);
return;
}
diff --git a/arch/powerpc/sysdev/fsl_lbc.c b/arch/powerpc/sysdev/fsl_lbc.c
index c4d96fa32ba..d5c3c90ee69 100644
--- a/arch/powerpc/sysdev/fsl_lbc.c
+++ b/arch/powerpc/sysdev/fsl_lbc.c
@@ -328,6 +328,7 @@ static int __devinit fsl_lbc_ctrl_probe(struct platform_device *dev)
err:
iounmap(fsl_lbc_ctrl_dev->regs);
kfree(fsl_lbc_ctrl_dev);
+ fsl_lbc_ctrl_dev = NULL;
return ret;
}
diff --git a/arch/powerpc/sysdev/qe_lib/qe.c b/arch/powerpc/sysdev/qe_lib/qe.c
index 3363fbc964f..ceb09cbd232 100644
--- a/arch/powerpc/sysdev/qe_lib/qe.c
+++ b/arch/powerpc/sysdev/qe_lib/qe.c
@@ -216,7 +216,7 @@ int qe_setbrg(enum qe_clock brg, unsigned int rate, unsigned int multiplier)
/* Errata QE_General4, which affects some MPC832x and MPC836x SOCs, says
that the BRG divisor must be even if you're not using divide-by-16
mode. */
- if (!div16 && (divisor & 1))
+ if (!div16 && (divisor & 1) && (divisor > 3))
divisor++;
tempval = ((divisor - 1) << QE_BRGC_DIVISOR_SHIFT) |
diff --git a/arch/x86/um/asm/processor.h b/arch/x86/um/asm/processor.h
index 118c143a9cb..2c32df6fe23 100644
--- a/arch/x86/um/asm/processor.h
+++ b/arch/x86/um/asm/processor.h
@@ -11,7 +11,7 @@
#endif
#define KSTK_EIP(tsk) KSTK_REG(tsk, HOST_IP)
-#define KSTK_ESP(tsk) KSTK_REG(tsk, HOST_IP)
+#define KSTK_ESP(tsk) KSTK_REG(tsk, HOST_SP)
#define KSTK_EBP(tsk) KSTK_REG(tsk, HOST_BP)
#define ARCH_IS_STACKGROW(address) \
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index 127408069ca..631b9477b99 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -932,7 +932,8 @@ static int erst_check_table(struct acpi_table_erst *erst_tab)
static int erst_open_pstore(struct pstore_info *psi);
static int erst_close_pstore(struct pstore_info *psi);
static ssize_t erst_reader(u64 *id, enum pstore_type_id *type,
- struct timespec *time, struct pstore_info *psi);
+ struct timespec *time, char **buf,
+ struct pstore_info *psi);
static int erst_writer(enum pstore_type_id type, u64 *id, unsigned int part,
size_t size, struct pstore_info *psi);
static int erst_clearer(enum pstore_type_id type, u64 id,
@@ -986,17 +987,23 @@ static int erst_close_pstore(struct pstore_info *psi)
}
static ssize_t erst_reader(u64 *id, enum pstore_type_id *type,
- struct timespec *time, struct pstore_info *psi)
+ struct timespec *time, char **buf,
+ struct pstore_info *psi)
{
int rc;
ssize_t len = 0;
u64 record_id;
- struct cper_pstore_record *rcd = (struct cper_pstore_record *)
- (erst_info.buf - sizeof(*rcd));
+ struct cper_pstore_record *rcd;
+ size_t rcd_len = sizeof(*rcd) + erst_info.bufsize;
if (erst_disable)
return -ENODEV;
+ rcd = kmalloc(rcd_len, GFP_KERNEL);
+ if (!rcd) {
+ rc = -ENOMEM;
+ goto out;
+ }
skip:
rc = erst_get_record_id_next(&reader_pos, &record_id);
if (rc)
@@ -1004,22 +1011,27 @@ skip:
/* no more record */
if (record_id == APEI_ERST_INVALID_RECORD_ID) {
- rc = -1;
+ rc = -EINVAL;
goto out;
}
- len = erst_read(record_id, &rcd->hdr, sizeof(*rcd) +
- erst_info.bufsize);
+ len = erst_read(record_id, &rcd->hdr, rcd_len);
/* The record may be cleared by others, try read next record */
if (len == -ENOENT)
goto skip;
- else if (len < 0) {
- rc = -1;
+ else if (len < sizeof(*rcd)) {
+ rc = -EIO;
goto out;
}
if (uuid_le_cmp(rcd->hdr.creator_id, CPER_CREATOR_PSTORE) != 0)
goto skip;
+ *buf = kmalloc(len, GFP_KERNEL);
+ if (*buf == NULL) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ memcpy(*buf, rcd->data, len - sizeof(*rcd));
*id = record_id;
if (uuid_le_cmp(rcd->sec_hdr.section_type,
CPER_SECTION_TYPE_DMESG) == 0)
@@ -1037,6 +1049,7 @@ skip:
time->tv_nsec = 0;
out:
+ kfree(rcd);
return (rc < 0) ? rc : (len - sizeof(*rcd));
}
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index ec555951176..43b875810d1 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -67,7 +67,7 @@ static int __init ahci_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct ahci_platform_data *pdata = dev_get_platdata(dev);
const struct platform_device_id *id = platform_get_device_id(pdev);
- struct ata_port_info pi = ahci_port_info[id->driver_data];
+ struct ata_port_info pi = ahci_port_info[id ? id->driver_data : 0];
const struct ata_port_info *ppi[] = { &pi, NULL };
struct ahci_host_priv *hpriv;
struct ata_host *host;
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 63d53277d6a..4cadfa28f94 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -2533,10 +2533,12 @@ static int ata_pci_init_one(struct pci_dev *pdev,
if (rc)
goto out;
+#ifdef CONFIG_ATA_BMDMA
if (bmdma)
/* prepare and activate BMDMA host */
rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
else
+#endif
/* prepare and activate SFF host */
rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
if (rc)
@@ -2544,10 +2546,12 @@ static int ata_pci_init_one(struct pci_dev *pdev,
host->private_data = host_priv;
host->flags |= hflags;
+#ifdef CONFIG_ATA_BMDMA
if (bmdma) {
pci_set_master(pdev);
rc = ata_pci_sff_activate_host(host, ata_bmdma_interrupt, sht);
} else
+#endif
rc = ata_pci_sff_activate_host(host, ata_sff_interrupt, sht);
out:
if (rc == 0)
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 793f796c4da..5693ecee9a4 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -127,12 +127,13 @@ static ssize_t node_read_meminfo(struct sys_device * dev,
nid, K(node_page_state(nid, NR_WRITEBACK)),
nid, K(node_page_state(nid, NR_FILE_PAGES)),
nid, K(node_page_state(nid, NR_FILE_MAPPED)),
- nid, K(node_page_state(nid, NR_ANON_PAGES)
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ nid, K(node_page_state(nid, NR_ANON_PAGES)
+ node_page_state(nid, NR_ANON_TRANSPARENT_HUGEPAGES) *
- HPAGE_PMD_NR
+ HPAGE_PMD_NR),
+#else
+ nid, K(node_page_state(nid, NR_ANON_PAGES)),
#endif
- ),
nid, K(node_page_state(nid, NR_SHMEM)),
nid, node_page_state(nid, NR_KERNEL_STACK) *
THREAD_SIZE / 1024,
@@ -143,13 +144,14 @@ static ssize_t node_read_meminfo(struct sys_device * dev,
nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE) +
node_page_state(nid, NR_SLAB_UNRECLAIMABLE)),
nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE)),
- nid, K(node_page_state(nid, NR_SLAB_UNRECLAIMABLE))
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ nid, K(node_page_state(nid, NR_SLAB_UNRECLAIMABLE))
, nid,
K(node_page_state(nid, NR_ANON_TRANSPARENT_HUGEPAGES) *
- HPAGE_PMD_NR)
+ HPAGE_PMD_NR));
+#else
+ nid, K(node_page_state(nid, NR_SLAB_UNRECLAIMABLE)));
#endif
- );
n += hugetlb_report_node_meminfo(nid, buf + n);
return n;
}
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
index 5c6f56f2144..dcd8babae9e 100644
--- a/drivers/crypto/mv_cesa.c
+++ b/drivers/crypto/mv_cesa.c
@@ -343,11 +343,13 @@ static void mv_process_hash_current(int first_block)
else
op.config |= CFG_MID_FRAG;
- writel(req_ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A);
- writel(req_ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B);
- writel(req_ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C);
- writel(req_ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D);
- writel(req_ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E);
+ if (first_block) {
+ writel(req_ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A);
+ writel(req_ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B);
+ writel(req_ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C);
+ writel(req_ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D);
+ writel(req_ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E);
+ }
}
memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config));
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index 8af8e864a9c..73464a62adf 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -1128,7 +1128,7 @@ static struct of_device_id mpc85xx_mc_err_of_match[] = {
{ .compatible = "fsl,p1020-memory-controller", },
{ .compatible = "fsl,p1021-memory-controller", },
{ .compatible = "fsl,p2020-memory-controller", },
- { .compatible = "fsl,p4080-memory-controller", },
+ { .compatible = "fsl,qoriq-memory-controller", },
{},
};
MODULE_DEVICE_TABLE(of, mpc85xx_mc_err_of_match);
diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
index 8370f72d87f..b0a81173a26 100644
--- a/drivers/firmware/efivars.c
+++ b/drivers/firmware/efivars.c
@@ -457,7 +457,8 @@ static int efi_pstore_close(struct pstore_info *psi)
}
static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
- struct timespec *timespec, struct pstore_info *psi)
+ struct timespec *timespec,
+ char **buf, struct pstore_info *psi)
{
efi_guid_t vendor = LINUX_EFI_CRASH_GUID;
struct efivars *efivars = psi->data;
@@ -478,7 +479,11 @@ static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
timespec->tv_nsec = 0;
get_var_data_locked(efivars, &efivars->walk_entry->var);
size = efivars->walk_entry->var.DataSize;
- memcpy(psi->buf, efivars->walk_entry->var.Data, size);
+ *buf = kmalloc(size, GFP_KERNEL);
+ if (*buf == NULL)
+ return -ENOMEM;
+ memcpy(*buf, efivars->walk_entry->var.Data,
+ size);
efivars->walk_entry = list_entry(efivars->walk_entry->list.next,
struct efivar_entry, list);
return size;
@@ -576,7 +581,8 @@ static int efi_pstore_close(struct pstore_info *psi)
}
static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
- struct timespec *time, struct pstore_info *psi)
+ struct timespec *timespec,
+ char **buf, struct pstore_info *psi)
{
return -1;
}
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 147df8ae79d..d3f3e8f5456 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -546,7 +546,7 @@ static void pca953x_irq_teardown(struct pca953x_chip *chip)
* Translate OpenFirmware node properties into platform_data
* WARNING: This is DEPRECATED and will be removed eventually!
*/
-void
+static void
pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, int *invert)
{
struct device_node *node;
@@ -574,7 +574,7 @@ pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, int *invert)
*invert = *val;
}
#else
-void
+static void
pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, int *invert)
{
*gpio_base = -1;
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 405c63b9d53..8323fc38984 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -1873,6 +1873,10 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
}
if (num_clips && clips_ptr) {
+ if (num_clips < 0 || num_clips > DRM_MODE_FB_DIRTY_MAX_CLIPS) {
+ ret = -EINVAL;
+ goto out_err1;
+ }
clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL);
if (!clips) {
ret = -ENOMEM;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.c b/drivers/gpu/drm/exynos/exynos_drm_buf.c
index 6f8afea94fc..2bb07bca511 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_buf.c
@@ -27,82 +27,84 @@
#include "drm.h"
#include "exynos_drm_drv.h"
+#include "exynos_drm_gem.h"
#include "exynos_drm_buf.h"
-static DEFINE_MUTEX(exynos_drm_buf_lock);
-
static int lowlevel_buffer_allocate(struct drm_device *dev,
- struct exynos_drm_buf_entry *entry)
+ struct exynos_drm_gem_buf *buffer)
{
DRM_DEBUG_KMS("%s\n", __FILE__);
- entry->vaddr = dma_alloc_writecombine(dev->dev, entry->size,
- (dma_addr_t *)&entry->paddr, GFP_KERNEL);
- if (!entry->paddr) {
+ buffer->kvaddr = dma_alloc_writecombine(dev->dev, buffer->size,
+ &buffer->dma_addr, GFP_KERNEL);
+ if (!buffer->kvaddr) {
DRM_ERROR("failed to allocate buffer.\n");
return -ENOMEM;
}
- DRM_DEBUG_KMS("allocated : vaddr(0x%x), paddr(0x%x), size(0x%x)\n",
- (unsigned int)entry->vaddr, entry->paddr, entry->size);
+ DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n",
+ (unsigned long)buffer->kvaddr,
+ (unsigned long)buffer->dma_addr,
+ buffer->size);
return 0;
}
static void lowlevel_buffer_deallocate(struct drm_device *dev,
- struct exynos_drm_buf_entry *entry)
+ struct exynos_drm_gem_buf *buffer)
{
DRM_DEBUG_KMS("%s.\n", __FILE__);
- if (entry->paddr && entry->vaddr && entry->size)
- dma_free_writecombine(dev->dev, entry->size, entry->vaddr,
- entry->paddr);
+ if (buffer->dma_addr && buffer->size)
+ dma_free_writecombine(dev->dev, buffer->size, buffer->kvaddr,
+ (dma_addr_t)buffer->dma_addr);
else
- DRM_DEBUG_KMS("entry data is null.\n");
+ DRM_DEBUG_KMS("buffer data are invalid.\n");
}
-struct exynos_drm_buf_entry *exynos_drm_buf_create(struct drm_device *dev,
+struct exynos_drm_gem_buf *exynos_drm_buf_create(struct drm_device *dev,
unsigned int size)
{
- struct exynos_drm_buf_entry *entry;
+ struct exynos_drm_gem_buf *buffer;
DRM_DEBUG_KMS("%s.\n", __FILE__);
+ DRM_DEBUG_KMS("desired size = 0x%x\n", size);
- entry = kzalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry) {
- DRM_ERROR("failed to allocate exynos_drm_buf_entry.\n");
+ buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+ if (!buffer) {
+ DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n");
return ERR_PTR(-ENOMEM);
}
- entry->size = size;
+ buffer->size = size;
/*
* allocate memory region with size and set the memory information
- * to vaddr and paddr of a entry object.
+ * to vaddr and dma_addr of a buffer object.
*/
- if (lowlevel_buffer_allocate(dev, entry) < 0) {
- kfree(entry);
- entry = NULL;
+ if (lowlevel_buffer_allocate(dev, buffer) < 0) {
+ kfree(buffer);
+ buffer = NULL;
return ERR_PTR(-ENOMEM);
}
- return entry;
+ return buffer;
}
void exynos_drm_buf_destroy(struct drm_device *dev,
- struct exynos_drm_buf_entry *entry)
+ struct exynos_drm_gem_buf *buffer)
{
DRM_DEBUG_KMS("%s.\n", __FILE__);
- if (!entry) {
- DRM_DEBUG_KMS("entry is null.\n");
+ if (!buffer) {
+ DRM_DEBUG_KMS("buffer is null.\n");
return;
}
- lowlevel_buffer_deallocate(dev, entry);
+ lowlevel_buffer_deallocate(dev, buffer);
- kfree(entry);
- entry = NULL;
+ kfree(buffer);
+ buffer = NULL;
}
MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.h b/drivers/gpu/drm/exynos/exynos_drm_buf.h
index 045d59eab01..6e91f9caa5d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_buf.h
@@ -26,28 +26,15 @@
#ifndef _EXYNOS_DRM_BUF_H_
#define _EXYNOS_DRM_BUF_H_
-/*
- * exynos drm buffer entry structure.
- *
- * @paddr: physical address of allocated memory.
- * @vaddr: kernel virtual address of allocated memory.
- * @size: size of allocated memory.
- */
-struct exynos_drm_buf_entry {
- dma_addr_t paddr;
- void __iomem *vaddr;
- unsigned int size;
-};
-
/* allocate physical memory. */
-struct exynos_drm_buf_entry *exynos_drm_buf_create(struct drm_device *dev,
+struct exynos_drm_gem_buf *exynos_drm_buf_create(struct drm_device *dev,
unsigned int size);
-/* get physical memory information of a drm framebuffer. */
-struct exynos_drm_buf_entry *exynos_drm_fb_get_buf(struct drm_framebuffer *fb);
+/* get memory information of a drm framebuffer. */
+struct exynos_drm_gem_buf *exynos_drm_fb_get_buf(struct drm_framebuffer *fb);
/* remove allocated physical memory. */
void exynos_drm_buf_destroy(struct drm_device *dev,
- struct exynos_drm_buf_entry *entry);
+ struct exynos_drm_gem_buf *buffer);
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.c b/drivers/gpu/drm/exynos/exynos_drm_connector.c
index 985d9e76872..d620b078425 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_connector.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_connector.c
@@ -37,6 +37,8 @@
struct exynos_drm_connector {
struct drm_connector drm_connector;
+ uint32_t encoder_id;
+ struct exynos_drm_manager *manager;
};
/* convert exynos_video_timings to drm_display_mode */
@@ -47,6 +49,7 @@ convert_to_display_mode(struct drm_display_mode *mode,
DRM_DEBUG_KMS("%s\n", __FILE__);
mode->clock = timing->pixclock / 1000;
+ mode->vrefresh = timing->refresh;
mode->hdisplay = timing->xres;
mode->hsync_start = mode->hdisplay + timing->left_margin;
@@ -57,6 +60,12 @@ convert_to_display_mode(struct drm_display_mode *mode,
mode->vsync_start = mode->vdisplay + timing->upper_margin;
mode->vsync_end = mode->vsync_start + timing->vsync_len;
mode->vtotal = mode->vsync_end + timing->lower_margin;
+
+ if (timing->vmode & FB_VMODE_INTERLACED)
+ mode->flags |= DRM_MODE_FLAG_INTERLACE;
+
+ if (timing->vmode & FB_VMODE_DOUBLE)
+ mode->flags |= DRM_MODE_FLAG_DBLSCAN;
}
/* convert drm_display_mode to exynos_video_timings */
@@ -69,7 +78,7 @@ convert_to_video_timing(struct fb_videomode *timing,
memset(timing, 0, sizeof(*timing));
timing->pixclock = mode->clock * 1000;
- timing->refresh = mode->vrefresh;
+ timing->refresh = drm_mode_vrefresh(mode);
timing->xres = mode->hdisplay;
timing->left_margin = mode->hsync_start - mode->hdisplay;
@@ -92,15 +101,16 @@ convert_to_video_timing(struct fb_videomode *timing,
static int exynos_drm_connector_get_modes(struct drm_connector *connector)
{
- struct exynos_drm_manager *manager =
- exynos_drm_get_manager(connector->encoder);
- struct exynos_drm_display *display = manager->display;
+ struct exynos_drm_connector *exynos_connector =
+ to_exynos_connector(connector);
+ struct exynos_drm_manager *manager = exynos_connector->manager;
+ struct exynos_drm_display_ops *display_ops = manager->display_ops;
unsigned int count;
DRM_DEBUG_KMS("%s\n", __FILE__);
- if (!display) {
- DRM_DEBUG_KMS("display is null.\n");
+ if (!display_ops) {
+ DRM_DEBUG_KMS("display_ops is null.\n");
return 0;
}
@@ -112,7 +122,7 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
* P.S. in case of lcd panel, count is always 1 if success
* because lcd panel has only one mode.
*/
- if (display->get_edid) {
+ if (display_ops->get_edid) {
int ret;
void *edid;
@@ -122,7 +132,7 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
return 0;
}
- ret = display->get_edid(manager->dev, connector,
+ ret = display_ops->get_edid(manager->dev, connector,
edid, MAX_EDID);
if (ret < 0) {
DRM_ERROR("failed to get edid data.\n");
@@ -140,8 +150,8 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
struct drm_display_mode *mode = drm_mode_create(connector->dev);
struct fb_videomode *timing;
- if (display->get_timing)
- timing = display->get_timing(manager->dev);
+ if (display_ops->get_timing)
+ timing = display_ops->get_timing(manager->dev);
else {
drm_mode_destroy(connector->dev, mode);
return 0;
@@ -162,9 +172,10 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
static int exynos_drm_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct exynos_drm_manager *manager =
- exynos_drm_get_manager(connector->encoder);
- struct exynos_drm_display *display = manager->display;
+ struct exynos_drm_connector *exynos_connector =
+ to_exynos_connector(connector);
+ struct exynos_drm_manager *manager = exynos_connector->manager;
+ struct exynos_drm_display_ops *display_ops = manager->display_ops;
struct fb_videomode timing;
int ret = MODE_BAD;
@@ -172,8 +183,8 @@ static int exynos_drm_connector_mode_valid(struct drm_connector *connector,
convert_to_video_timing(&timing, mode);
- if (display && display->check_timing)
- if (!display->check_timing(manager->dev, (void *)&timing))
+ if (display_ops && display_ops->check_timing)
+ if (!display_ops->check_timing(manager->dev, (void *)&timing))
ret = MODE_OK;
return ret;
@@ -181,9 +192,25 @@ static int exynos_drm_connector_mode_valid(struct drm_connector *connector,
struct drm_encoder *exynos_drm_best_encoder(struct drm_connector *connector)
{
+ struct drm_device *dev = connector->dev;
+ struct exynos_drm_connector *exynos_connector =
+ to_exynos_connector(connector);
+ struct drm_mode_object *obj;
+ struct drm_encoder *encoder;
+
DRM_DEBUG_KMS("%s\n", __FILE__);
- return connector->encoder;
+ obj = drm_mode_object_find(dev, exynos_connector->encoder_id,
+ DRM_MODE_OBJECT_ENCODER);
+ if (!obj) {
+ DRM_DEBUG_KMS("Unknown ENCODER ID %d\n",
+ exynos_connector->encoder_id);
+ return NULL;
+ }
+
+ encoder = obj_to_encoder(obj);
+
+ return encoder;
}
static struct drm_connector_helper_funcs exynos_connector_helper_funcs = {
@@ -196,15 +223,17 @@ static struct drm_connector_helper_funcs exynos_connector_helper_funcs = {
static enum drm_connector_status
exynos_drm_connector_detect(struct drm_connector *connector, bool force)
{
- struct exynos_drm_manager *manager =
- exynos_drm_get_manager(connector->encoder);
- struct exynos_drm_display *display = manager->display;
+ struct exynos_drm_connector *exynos_connector =
+ to_exynos_connector(connector);
+ struct exynos_drm_manager *manager = exynos_connector->manager;
+ struct exynos_drm_display_ops *display_ops =
+ manager->display_ops;
enum drm_connector_status status = connector_status_disconnected;
DRM_DEBUG_KMS("%s\n", __FILE__);
- if (display && display->is_connected) {
- if (display->is_connected(manager->dev))
+ if (display_ops && display_ops->is_connected) {
+ if (display_ops->is_connected(manager->dev))
status = connector_status_connected;
else
status = connector_status_disconnected;
@@ -251,9 +280,11 @@ struct drm_connector *exynos_drm_connector_create(struct drm_device *dev,
connector = &exynos_connector->drm_connector;
- switch (manager->display->type) {
+ switch (manager->display_ops->type) {
case EXYNOS_DISPLAY_TYPE_HDMI:
type = DRM_MODE_CONNECTOR_HDMIA;
+ connector->interlace_allowed = true;
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
break;
default:
type = DRM_MODE_CONNECTOR_Unknown;
@@ -267,7 +298,10 @@ struct drm_connector *exynos_drm_connector_create(struct drm_device *dev,
if (err)
goto err_connector;
+ exynos_connector->encoder_id = encoder->base.id;
+ exynos_connector->manager = manager;
connector->encoder = encoder;
+
err = drm_mode_connector_attach_encoder(connector, encoder);
if (err) {
DRM_ERROR("failed to attach a connector to a encoder\n");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index 9337e5e2dbb..ee43cc22085 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -29,36 +29,17 @@
#include "drmP.h"
#include "drm_crtc_helper.h"
+#include "exynos_drm_crtc.h"
#include "exynos_drm_drv.h"
#include "exynos_drm_fb.h"
#include "exynos_drm_encoder.h"
+#include "exynos_drm_gem.h"
#include "exynos_drm_buf.h"
#define to_exynos_crtc(x) container_of(x, struct exynos_drm_crtc,\
drm_crtc)
/*
- * Exynos specific crtc postion structure.
- *
- * @fb_x: offset x on a framebuffer to be displyed
- * - the unit is screen coordinates.
- * @fb_y: offset y on a framebuffer to be displayed
- * - the unit is screen coordinates.
- * @crtc_x: offset x on hardware screen.
- * @crtc_y: offset y on hardware screen.
- * @crtc_w: width of hardware screen.
- * @crtc_h: height of hardware screen.
- */
-struct exynos_drm_crtc_pos {
- unsigned int fb_x;
- unsigned int fb_y;
- unsigned int crtc_x;
- unsigned int crtc_y;
- unsigned int crtc_w;
- unsigned int crtc_h;
-};
-
-/*
* Exynos specific crtc structure.
*
* @drm_crtc: crtc object.
@@ -85,30 +66,31 @@ static void exynos_drm_crtc_apply(struct drm_crtc *crtc)
exynos_drm_fn_encoder(crtc, overlay,
exynos_drm_encoder_crtc_mode_set);
- exynos_drm_fn_encoder(crtc, NULL, exynos_drm_encoder_crtc_commit);
+ exynos_drm_fn_encoder(crtc, &exynos_crtc->pipe,
+ exynos_drm_encoder_crtc_commit);
}
-static int exynos_drm_overlay_update(struct exynos_drm_overlay *overlay,
- struct drm_framebuffer *fb,
- struct drm_display_mode *mode,
- struct exynos_drm_crtc_pos *pos)
+int exynos_drm_overlay_update(struct exynos_drm_overlay *overlay,
+ struct drm_framebuffer *fb,
+ struct drm_display_mode *mode,
+ struct exynos_drm_crtc_pos *pos)
{
- struct exynos_drm_buf_entry *entry;
+ struct exynos_drm_gem_buf *buffer;
unsigned int actual_w;
unsigned int actual_h;
- entry = exynos_drm_fb_get_buf(fb);
- if (!entry) {
- DRM_LOG_KMS("entry is null.\n");
+ buffer = exynos_drm_fb_get_buf(fb);
+ if (!buffer) {
+ DRM_LOG_KMS("buffer is null.\n");
return -EFAULT;
}
- overlay->paddr = entry->paddr;
- overlay->vaddr = entry->vaddr;
+ overlay->dma_addr = buffer->dma_addr;
+ overlay->vaddr = buffer->kvaddr;
- DRM_DEBUG_KMS("vaddr = 0x%lx, paddr = 0x%lx\n",
+ DRM_DEBUG_KMS("vaddr = 0x%lx, dma_addr = 0x%lx\n",
(unsigned long)overlay->vaddr,
- (unsigned long)overlay->paddr);
+ (unsigned long)overlay->dma_addr);
actual_w = min((mode->hdisplay - pos->crtc_x), pos->crtc_w);
actual_h = min((mode->vdisplay - pos->crtc_y), pos->crtc_h);
@@ -171,9 +153,26 @@ static int exynos_drm_crtc_update(struct drm_crtc *crtc)
static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode)
{
- DRM_DEBUG_KMS("%s\n", __FILE__);
+ struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
- /* TODO */
+ DRM_DEBUG_KMS("crtc[%d] mode[%d]\n", crtc->base.id, mode);
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ exynos_drm_fn_encoder(crtc, &exynos_crtc->pipe,
+ exynos_drm_encoder_crtc_commit);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ /* TODO */
+ exynos_drm_fn_encoder(crtc, NULL,
+ exynos_drm_encoder_crtc_disable);
+ break;
+ default:
+ DRM_DEBUG_KMS("unspecified mode %d\n", mode);
+ break;
+ }
}
static void exynos_drm_crtc_prepare(struct drm_crtc *crtc)
@@ -185,9 +184,12 @@ static void exynos_drm_crtc_prepare(struct drm_crtc *crtc)
static void exynos_drm_crtc_commit(struct drm_crtc *crtc)
{
+ struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+
DRM_DEBUG_KMS("%s\n", __FILE__);
- /* drm framework doesn't check NULL. */
+ exynos_drm_fn_encoder(crtc, &exynos_crtc->pipe,
+ exynos_drm_encoder_crtc_commit);
}
static bool
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.h b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
index c584042d6d2..25f72a62cb8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
@@ -35,4 +35,29 @@ int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr);
int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int crtc);
void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int crtc);
+/*
+ * Exynos specific crtc postion structure.
+ *
+ * @fb_x: offset x on a framebuffer to be displyed
+ * - the unit is screen coordinates.
+ * @fb_y: offset y on a framebuffer to be displayed
+ * - the unit is screen coordinates.
+ * @crtc_x: offset x on hardware screen.
+ * @crtc_y: offset y on hardware screen.
+ * @crtc_w: width of hardware screen.
+ * @crtc_h: height of hardware screen.
+ */
+struct exynos_drm_crtc_pos {
+ unsigned int fb_x;
+ unsigned int fb_y;
+ unsigned int crtc_x;
+ unsigned int crtc_y;
+ unsigned int crtc_w;
+ unsigned int crtc_h;
+};
+
+int exynos_drm_overlay_update(struct exynos_drm_overlay *overlay,
+ struct drm_framebuffer *fb,
+ struct drm_display_mode *mode,
+ struct exynos_drm_crtc_pos *pos);
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 83810cbe3c1..53e2216de61 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -27,6 +27,7 @@
#include "drmP.h"
#include "drm.h"
+#include "drm_crtc_helper.h"
#include <drm/exynos_drm.h>
@@ -61,6 +62,9 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
drm_mode_config_init(dev);
+ /* init kms poll for handling hpd */
+ drm_kms_helper_poll_init(dev);
+
exynos_drm_mode_config_init(dev);
/*
@@ -116,6 +120,7 @@ static int exynos_drm_unload(struct drm_device *dev)
exynos_drm_fbdev_fini(dev);
exynos_drm_device_unregister(dev);
drm_vblank_cleanup(dev);
+ drm_kms_helper_poll_fini(dev);
drm_mode_config_cleanup(dev);
kfree(dev->dev_private);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index c03683f2ae7..5e02e6ecc2e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -29,6 +29,7 @@
#ifndef _EXYNOS_DRM_DRV_H_
#define _EXYNOS_DRM_DRV_H_
+#include <linux/module.h>
#include "drm.h"
#define MAX_CRTC 2
@@ -79,8 +80,8 @@ struct exynos_drm_overlay_ops {
* @scan_flag: interlace or progressive way.
* (it could be DRM_MODE_FLAG_*)
* @bpp: pixel size.(in bit)
- * @paddr: bus(accessed by dma) physical memory address to this overlay
- * and this is physically continuous.
+ * @dma_addr: bus(accessed by dma) address to the memory region allocated
+ * for a overlay.
* @vaddr: virtual memory addresss to this overlay.
* @default_win: a window to be enabled.
* @color_key: color key on or off.
@@ -108,7 +109,7 @@ struct exynos_drm_overlay {
unsigned int scan_flag;
unsigned int bpp;
unsigned int pitch;
- dma_addr_t paddr;
+ dma_addr_t dma_addr;
void __iomem *vaddr;
bool default_win;
@@ -130,7 +131,7 @@ struct exynos_drm_overlay {
* @check_timing: check if timing is valid or not.
* @power_on: display device on or off.
*/
-struct exynos_drm_display {
+struct exynos_drm_display_ops {
enum exynos_drm_output_type type;
bool (*is_connected)(struct device *dev);
int (*get_edid)(struct device *dev, struct drm_connector *connector,
@@ -146,12 +147,14 @@ struct exynos_drm_display {
* @mode_set: convert drm_display_mode to hw specific display mode and
* would be called by encoder->mode_set().
* @commit: set current hw specific display mode to hw.
+ * @disable: disable hardware specific display mode.
* @enable_vblank: specific driver callback for enabling vblank interrupt.
* @disable_vblank: specific driver callback for disabling vblank interrupt.
*/
struct exynos_drm_manager_ops {
void (*mode_set)(struct device *subdrv_dev, void *mode);
void (*commit)(struct device *subdrv_dev);
+ void (*disable)(struct device *subdrv_dev);
int (*enable_vblank)(struct device *subdrv_dev);
void (*disable_vblank)(struct device *subdrv_dev);
};
@@ -178,7 +181,7 @@ struct exynos_drm_manager {
int pipe;
struct exynos_drm_manager_ops *ops;
struct exynos_drm_overlay_ops *overlay_ops;
- struct exynos_drm_display *display;
+ struct exynos_drm_display_ops *display_ops;
};
/*
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.c b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
index 7cf6fa86a67..153061415ba 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
@@ -53,15 +53,36 @@ static void exynos_drm_encoder_dpms(struct drm_encoder *encoder, int mode)
struct drm_device *dev = encoder->dev;
struct drm_connector *connector;
struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder);
+ struct exynos_drm_manager_ops *manager_ops = manager->ops;
DRM_DEBUG_KMS("%s, encoder dpms: %d\n", __FILE__, mode);
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ if (manager_ops && manager_ops->commit)
+ manager_ops->commit(manager->dev);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ /* TODO */
+ if (manager_ops && manager_ops->disable)
+ manager_ops->disable(manager->dev);
+ break;
+ default:
+ DRM_ERROR("unspecified mode %d\n", mode);
+ break;
+ }
+
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
if (connector->encoder == encoder) {
- struct exynos_drm_display *display = manager->display;
+ struct exynos_drm_display_ops *display_ops =
+ manager->display_ops;
- if (display && display->power_on)
- display->power_on(manager->dev, mode);
+ DRM_DEBUG_KMS("connector[%d] dpms[%d]\n",
+ connector->base.id, mode);
+ if (display_ops && display_ops->power_on)
+ display_ops->power_on(manager->dev, mode);
}
}
}
@@ -116,15 +137,11 @@ static void exynos_drm_encoder_commit(struct drm_encoder *encoder)
{
struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder);
struct exynos_drm_manager_ops *manager_ops = manager->ops;
- struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
DRM_DEBUG_KMS("%s\n", __FILE__);
if (manager_ops && manager_ops->commit)
manager_ops->commit(manager->dev);
-
- if (overlay_ops && overlay_ops->commit)
- overlay_ops->commit(manager->dev);
}
static struct drm_crtc *
@@ -208,10 +225,23 @@ void exynos_drm_fn_encoder(struct drm_crtc *crtc, void *data,
{
struct drm_device *dev = crtc->dev;
struct drm_encoder *encoder;
+ struct exynos_drm_private *private = dev->dev_private;
+ struct exynos_drm_manager *manager;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- if (encoder->crtc != crtc)
- continue;
+ /*
+ * if crtc is detached from encoder, check pipe,
+ * otherwise check crtc attached to encoder
+ */
+ if (!encoder->crtc) {
+ manager = to_exynos_encoder(encoder)->manager;
+ if (manager->pipe < 0 ||
+ private->crtc[manager->pipe] != crtc)
+ continue;
+ } else {
+ if (encoder->crtc != crtc)
+ continue;
+ }
fn(encoder, data);
}
@@ -250,8 +280,18 @@ void exynos_drm_encoder_crtc_commit(struct drm_encoder *encoder, void *data)
struct exynos_drm_manager *manager =
to_exynos_encoder(encoder)->manager;
struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
+ int crtc = *(int *)data;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ /*
+ * when crtc is detached from encoder, this pipe is used
+ * to select manager operation
+ */
+ manager->pipe = crtc;
- overlay_ops->commit(manager->dev);
+ if (overlay_ops && overlay_ops->commit)
+ overlay_ops->commit(manager->dev);
}
void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data)
@@ -261,7 +301,28 @@ void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data)
struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
struct exynos_drm_overlay *overlay = data;
- overlay_ops->mode_set(manager->dev, overlay);
+ if (overlay_ops && overlay_ops->mode_set)
+ overlay_ops->mode_set(manager->dev, overlay);
+}
+
+void exynos_drm_encoder_crtc_disable(struct drm_encoder *encoder, void *data)
+{
+ struct exynos_drm_manager *manager =
+ to_exynos_encoder(encoder)->manager;
+ struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
+
+ DRM_DEBUG_KMS("\n");
+
+ if (overlay_ops && overlay_ops->disable)
+ overlay_ops->disable(manager->dev);
+
+ /*
+ * crtc is already detached from encoder and last
+ * function for detaching is properly done, so
+ * clear pipe from manager to prevent repeated call
+ */
+ if (!encoder->crtc)
+ manager->pipe = -1;
}
MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.h b/drivers/gpu/drm/exynos/exynos_drm_encoder.h
index 5ecd645d06a..a22acfbf0e4 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.h
@@ -41,5 +41,6 @@ void exynos_drm_enable_vblank(struct drm_encoder *encoder, void *data);
void exynos_drm_disable_vblank(struct drm_encoder *encoder, void *data);
void exynos_drm_encoder_crtc_commit(struct drm_encoder *encoder, void *data);
void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data);
+void exynos_drm_encoder_crtc_disable(struct drm_encoder *encoder, void *data);
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 48d29cfd524..5bf4a1ac7f8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -29,7 +29,9 @@
#include "drmP.h"
#include "drm_crtc.h"
#include "drm_crtc_helper.h"
+#include "drm_fb_helper.h"
+#include "exynos_drm_drv.h"
#include "exynos_drm_fb.h"
#include "exynos_drm_buf.h"
#include "exynos_drm_gem.h"
@@ -41,14 +43,14 @@
*
* @fb: drm framebuffer obejct.
* @exynos_gem_obj: exynos specific gem object containing a gem object.
- * @entry: pointer to exynos drm buffer entry object.
- * - containing only the information to physically continuous memory
- * region allocated at default framebuffer creation.
+ * @buffer: pointer to exynos_drm_gem_buffer object.
+ * - contain the memory information to memory region allocated
+ * at default framebuffer creation.
*/
struct exynos_drm_fb {
struct drm_framebuffer fb;
struct exynos_drm_gem_obj *exynos_gem_obj;
- struct exynos_drm_buf_entry *entry;
+ struct exynos_drm_gem_buf *buffer;
};
static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
@@ -63,8 +65,8 @@ static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
* default framebuffer has no gem object so
* a buffer of the default framebuffer should be released at here.
*/
- if (!exynos_fb->exynos_gem_obj && exynos_fb->entry)
- exynos_drm_buf_destroy(fb->dev, exynos_fb->entry);
+ if (!exynos_fb->exynos_gem_obj && exynos_fb->buffer)
+ exynos_drm_buf_destroy(fb->dev, exynos_fb->buffer);
kfree(exynos_fb);
exynos_fb = NULL;
@@ -143,29 +145,29 @@ exynos_drm_fb_init(struct drm_file *file_priv, struct drm_device *dev,
*/
if (!mode_cmd->handle) {
if (!file_priv) {
- struct exynos_drm_buf_entry *entry;
+ struct exynos_drm_gem_buf *buffer;
/*
* in case that file_priv is NULL, it allocates
* only buffer and this buffer would be used
* for default framebuffer.
*/
- entry = exynos_drm_buf_create(dev, size);
- if (IS_ERR(entry)) {
- ret = PTR_ERR(entry);
+ buffer = exynos_drm_buf_create(dev, size);
+ if (IS_ERR(buffer)) {
+ ret = PTR_ERR(buffer);
goto err_buffer;
}
- exynos_fb->entry = entry;
+ exynos_fb->buffer = buffer;
- DRM_LOG_KMS("default fb: paddr = 0x%lx, size = 0x%x\n",
- (unsigned long)entry->paddr, size);
+ DRM_LOG_KMS("default: dma_addr = 0x%lx, size = 0x%x\n",
+ (unsigned long)buffer->dma_addr, size);
goto out;
} else {
- exynos_gem_obj = exynos_drm_gem_create(file_priv, dev,
- size,
- &mode_cmd->handle);
+ exynos_gem_obj = exynos_drm_gem_create(dev, file_priv,
+ &mode_cmd->handle,
+ size);
if (IS_ERR(exynos_gem_obj)) {
ret = PTR_ERR(exynos_gem_obj);
goto err_buffer;
@@ -189,10 +191,10 @@ exynos_drm_fb_init(struct drm_file *file_priv, struct drm_device *dev,
* so that default framebuffer has no its own gem object,
* only its own buffer object.
*/
- exynos_fb->entry = exynos_gem_obj->entry;
+ exynos_fb->buffer = exynos_gem_obj->buffer;
- DRM_LOG_KMS("paddr = 0x%lx, size = 0x%x, gem object = 0x%x\n",
- (unsigned long)exynos_fb->entry->paddr, size,
+ DRM_LOG_KMS("dma_addr = 0x%lx, size = 0x%x, gem object = 0x%x\n",
+ (unsigned long)exynos_fb->buffer->dma_addr, size,
(unsigned int)&exynos_gem_obj->base);
out:
@@ -220,26 +222,36 @@ struct drm_framebuffer *exynos_drm_fb_create(struct drm_device *dev,
return exynos_drm_fb_init(file_priv, dev, mode_cmd);
}
-struct exynos_drm_buf_entry *exynos_drm_fb_get_buf(struct drm_framebuffer *fb)
+struct exynos_drm_gem_buf *exynos_drm_fb_get_buf(struct drm_framebuffer *fb)
{
struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
- struct exynos_drm_buf_entry *entry;
+ struct exynos_drm_gem_buf *buffer;
DRM_DEBUG_KMS("%s\n", __FILE__);
- entry = exynos_fb->entry;
- if (!entry)
+ buffer = exynos_fb->buffer;
+ if (!buffer)
return NULL;
- DRM_DEBUG_KMS("vaddr = 0x%lx, paddr = 0x%lx\n",
- (unsigned long)entry->vaddr,
- (unsigned long)entry->paddr);
+ DRM_DEBUG_KMS("vaddr = 0x%lx, dma_addr = 0x%lx\n",
+ (unsigned long)buffer->kvaddr,
+ (unsigned long)buffer->dma_addr);
- return entry;
+ return buffer;
+}
+
+static void exynos_drm_output_poll_changed(struct drm_device *dev)
+{
+ struct exynos_drm_private *private = dev->dev_private;
+ struct drm_fb_helper *fb_helper = private->fb_helper;
+
+ if (fb_helper)
+ drm_fb_helper_hotplug_event(fb_helper);
}
static struct drm_mode_config_funcs exynos_drm_mode_config_funcs = {
.fb_create = exynos_drm_fb_create,
+ .output_poll_changed = exynos_drm_output_poll_changed,
};
void exynos_drm_mode_config_init(struct drm_device *dev)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 1f4b3d1a771..836f4100818 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -33,6 +33,7 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_fb.h"
+#include "exynos_drm_gem.h"
#include "exynos_drm_buf.h"
#define MAX_CONNECTOR 4
@@ -85,15 +86,13 @@ static struct fb_ops exynos_drm_fb_ops = {
};
static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
- struct drm_framebuffer *fb,
- unsigned int fb_width,
- unsigned int fb_height)
+ struct drm_framebuffer *fb)
{
struct fb_info *fbi = helper->fbdev;
struct drm_device *dev = helper->dev;
struct exynos_drm_fbdev *exynos_fb = to_exynos_fbdev(helper);
- struct exynos_drm_buf_entry *entry;
- unsigned int size = fb_width * fb_height * (fb->bits_per_pixel >> 3);
+ struct exynos_drm_gem_buf *buffer;
+ unsigned int size = fb->width * fb->height * (fb->bits_per_pixel >> 3);
unsigned long offset;
DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -101,20 +100,20 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
exynos_fb->fb = fb;
drm_fb_helper_fill_fix(fbi, fb->pitch, fb->depth);
- drm_fb_helper_fill_var(fbi, helper, fb_width, fb_height);
+ drm_fb_helper_fill_var(fbi, helper, fb->width, fb->height);
- entry = exynos_drm_fb_get_buf(fb);
- if (!entry) {
- DRM_LOG_KMS("entry is null.\n");
+ buffer = exynos_drm_fb_get_buf(fb);
+ if (!buffer) {
+ DRM_LOG_KMS("buffer is null.\n");
return -EFAULT;
}
offset = fbi->var.xoffset * (fb->bits_per_pixel >> 3);
offset += fbi->var.yoffset * fb->pitch;
- dev->mode_config.fb_base = entry->paddr;
- fbi->screen_base = entry->vaddr + offset;
- fbi->fix.smem_start = entry->paddr + offset;
+ dev->mode_config.fb_base = (resource_size_t)buffer->dma_addr;
+ fbi->screen_base = buffer->kvaddr + offset;
+ fbi->fix.smem_start = (unsigned long)(buffer->dma_addr + offset);
fbi->screen_size = size;
fbi->fix.smem_len = size;
@@ -171,8 +170,7 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
goto out;
}
- ret = exynos_drm_fbdev_update(helper, helper->fb, sizes->fb_width,
- sizes->fb_height);
+ ret = exynos_drm_fbdev_update(helper, helper->fb);
if (ret < 0)
fb_dealloc_cmap(&fbi->cmap);
@@ -235,8 +233,7 @@ static int exynos_drm_fbdev_recreate(struct drm_fb_helper *helper,
}
helper->fb = exynos_fbdev->fb;
- return exynos_drm_fbdev_update(helper, helper->fb, sizes->fb_width,
- sizes->fb_height);
+ return exynos_drm_fbdev_update(helper, helper->fb);
}
static int exynos_drm_fbdev_probe(struct drm_fb_helper *helper,
@@ -405,6 +402,18 @@ int exynos_drm_fbdev_reinit(struct drm_device *dev)
fb_helper = private->fb_helper;
if (fb_helper) {
+ struct list_head temp_list;
+
+ INIT_LIST_HEAD(&temp_list);
+
+ /*
+ * fb_helper is reintialized but kernel fb is reused
+ * so kernel_fb_list need to be backuped and restored
+ */
+ if (!list_empty(&fb_helper->kernel_fb_list))
+ list_replace_init(&fb_helper->kernel_fb_list,
+ &temp_list);
+
drm_fb_helper_fini(fb_helper);
ret = drm_fb_helper_init(dev, fb_helper,
@@ -414,6 +423,9 @@ int exynos_drm_fbdev_reinit(struct drm_device *dev)
return ret;
}
+ if (!list_empty(&temp_list))
+ list_replace(&temp_list, &fb_helper->kernel_fb_list);
+
ret = drm_fb_helper_single_add_all_connectors(fb_helper);
if (ret < 0) {
DRM_ERROR("failed to add fb helper to connectors\n");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 4659c88cdd9..db3b3d9e731 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -64,7 +64,7 @@ struct fimd_win_data {
unsigned int fb_width;
unsigned int fb_height;
unsigned int bpp;
- dma_addr_t paddr;
+ dma_addr_t dma_addr;
void __iomem *vaddr;
unsigned int buf_offsize;
unsigned int line_size; /* bytes */
@@ -124,7 +124,7 @@ static int fimd_display_power_on(struct device *dev, int mode)
return 0;
}
-static struct exynos_drm_display fimd_display = {
+static struct exynos_drm_display_ops fimd_display_ops = {
.type = EXYNOS_DISPLAY_TYPE_LCD,
.is_connected = fimd_display_is_connected,
.get_timing = fimd_get_timing,
@@ -177,6 +177,40 @@ static void fimd_commit(struct device *dev)
writel(val, ctx->regs + VIDCON0);
}
+static void fimd_disable(struct device *dev)
+{
+ struct fimd_context *ctx = get_fimd_context(dev);
+ struct exynos_drm_subdrv *subdrv = &ctx->subdrv;
+ struct drm_device *drm_dev = subdrv->drm_dev;
+ struct exynos_drm_manager *manager = &subdrv->manager;
+ u32 val;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ /* fimd dma off */
+ val = readl(ctx->regs + VIDCON0);
+ val &= ~(VIDCON0_ENVID | VIDCON0_ENVID_F);
+ writel(val, ctx->regs + VIDCON0);
+
+ /*
+ * if vblank is enabled status with dma off then
+ * it disables vsync interrupt.
+ */
+ if (drm_dev->vblank_enabled[manager->pipe] &&
+ atomic_read(&drm_dev->vblank_refcount[manager->pipe])) {
+ drm_vblank_put(drm_dev, manager->pipe);
+
+ /*
+ * if vblank_disable_allowed is 0 then disable
+ * vsync interrupt right now else the vsync interrupt
+ * would be disabled by drm timer once a current process
+ * gives up ownershop of vblank event.
+ */
+ if (!drm_dev->vblank_disable_allowed)
+ drm_vblank_off(drm_dev, manager->pipe);
+ }
+}
+
static int fimd_enable_vblank(struct device *dev)
{
struct fimd_context *ctx = get_fimd_context(dev);
@@ -220,6 +254,7 @@ static void fimd_disable_vblank(struct device *dev)
static struct exynos_drm_manager_ops fimd_manager_ops = {
.commit = fimd_commit,
+ .disable = fimd_disable,
.enable_vblank = fimd_enable_vblank,
.disable_vblank = fimd_disable_vblank,
};
@@ -251,7 +286,7 @@ static void fimd_win_mode_set(struct device *dev,
win_data->ovl_height = overlay->crtc_height;
win_data->fb_width = overlay->fb_width;
win_data->fb_height = overlay->fb_height;
- win_data->paddr = overlay->paddr + offset;
+ win_data->dma_addr = overlay->dma_addr + offset;
win_data->vaddr = overlay->vaddr + offset;
win_data->bpp = overlay->bpp;
win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) *
@@ -263,7 +298,7 @@ static void fimd_win_mode_set(struct device *dev,
DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
win_data->ovl_width, win_data->ovl_height);
DRM_DEBUG_KMS("paddr = 0x%lx, vaddr = 0x%lx\n",
- (unsigned long)win_data->paddr,
+ (unsigned long)win_data->dma_addr,
(unsigned long)win_data->vaddr);
DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n",
overlay->fb_width, overlay->crtc_width);
@@ -376,16 +411,16 @@ static void fimd_win_commit(struct device *dev)
writel(val, ctx->regs + SHADOWCON);
/* buffer start address */
- val = win_data->paddr;
+ val = (unsigned long)win_data->dma_addr;
writel(val, ctx->regs + VIDWx_BUF_START(win, 0));
/* buffer end address */
size = win_data->fb_width * win_data->ovl_height * (win_data->bpp >> 3);
- val = win_data->paddr + size;
+ val = (unsigned long)(win_data->dma_addr + size);
writel(val, ctx->regs + VIDWx_BUF_END(win, 0));
DRM_DEBUG_KMS("start addr = 0x%lx, end addr = 0x%lx, size = 0x%lx\n",
- (unsigned long)win_data->paddr, val, size);
+ (unsigned long)win_data->dma_addr, val, size);
DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
win_data->ovl_width, win_data->ovl_height);
@@ -447,7 +482,6 @@ static void fimd_win_commit(struct device *dev)
static void fimd_win_disable(struct device *dev)
{
struct fimd_context *ctx = get_fimd_context(dev);
- struct fimd_win_data *win_data;
int win = ctx->default_win;
u32 val;
@@ -456,8 +490,6 @@ static void fimd_win_disable(struct device *dev)
if (win < 0 || win > WINDOWS_NR)
return;
- win_data = &ctx->win_data[win];
-
/* protect windows */
val = readl(ctx->regs + SHADOWCON);
val |= SHADOWCON_WINx_PROTECT(win);
@@ -528,6 +560,16 @@ static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
/* VSYNC interrupt */
writel(VIDINTCON1_INT_FRAME, ctx->regs + VIDINTCON1);
+ /*
+ * in case that vblank_disable_allowed is 1, it could induce
+ * the problem that manager->pipe could be -1 because with
+ * disable callback, vsync interrupt isn't disabled and at this moment,
+ * vsync interrupt could occur. the vsync interrupt would be disabled
+ * by timer handler later.
+ */
+ if (manager->pipe == -1)
+ return IRQ_HANDLED;
+
drm_handle_vblank(drm_dev, manager->pipe);
fimd_finish_pageflip(drm_dev, manager->pipe);
@@ -548,13 +590,6 @@ static int fimd_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
*/
drm_dev->irq_enabled = 1;
- /*
- * with vblank_disable_allowed = 1, vblank interrupt will be disabled
- * by drm timer once a current process gives up ownership of
- * vblank event.(drm_vblank_put function was called)
- */
- drm_dev->vblank_disable_allowed = 1;
-
return 0;
}
@@ -731,7 +766,7 @@ static int __devinit fimd_probe(struct platform_device *pdev)
subdrv->manager.pipe = -1;
subdrv->manager.ops = &fimd_manager_ops;
subdrv->manager.overlay_ops = &fimd_overlay_ops;
- subdrv->manager.display = &fimd_display;
+ subdrv->manager.display_ops = &fimd_display_ops;
subdrv->manager.dev = dev;
platform_set_drvdata(pdev, ctx);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index a8e7a88906e..aba0fe47f7e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -62,40 +62,28 @@ static unsigned int get_gem_mmap_offset(struct drm_gem_object *obj)
return (unsigned int)obj->map_list.hash.key << PAGE_SHIFT;
}
-struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_file *file_priv,
- struct drm_device *dev, unsigned int size,
- unsigned int *handle)
+static struct exynos_drm_gem_obj
+ *exynos_drm_gem_init(struct drm_device *drm_dev,
+ struct drm_file *file_priv, unsigned int *handle,
+ unsigned int size)
{
struct exynos_drm_gem_obj *exynos_gem_obj;
- struct exynos_drm_buf_entry *entry;
struct drm_gem_object *obj;
int ret;
- DRM_DEBUG_KMS("%s\n", __FILE__);
-
- size = roundup(size, PAGE_SIZE);
-
exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL);
if (!exynos_gem_obj) {
DRM_ERROR("failed to allocate exynos gem object.\n");
return ERR_PTR(-ENOMEM);
}
- /* allocate the new buffer object and memory region. */
- entry = exynos_drm_buf_create(dev, size);
- if (!entry) {
- kfree(exynos_gem_obj);
- return ERR_PTR(-ENOMEM);
- }
-
- exynos_gem_obj->entry = entry;
-
obj = &exynos_gem_obj->base;
- ret = drm_gem_object_init(dev, obj, size);
+ ret = drm_gem_object_init(drm_dev, obj, size);
if (ret < 0) {
- DRM_ERROR("failed to initailize gem object.\n");
- goto err_obj_init;
+ DRM_ERROR("failed to initialize gem object.\n");
+ ret = -EINVAL;
+ goto err_object_init;
}
DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
@@ -127,24 +115,50 @@ err_handle_create:
err_create_mmap_offset:
drm_gem_object_release(obj);
-err_obj_init:
- exynos_drm_buf_destroy(dev, exynos_gem_obj->entry);
-
+err_object_init:
kfree(exynos_gem_obj);
return ERR_PTR(ret);
}
+struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
+ struct drm_file *file_priv,
+ unsigned int *handle, unsigned long size)
+{
+
+ struct exynos_drm_gem_obj *exynos_gem_obj = NULL;
+ struct exynos_drm_gem_buf *buffer;
+
+ size = roundup(size, PAGE_SIZE);
+
+ DRM_DEBUG_KMS("%s: size = 0x%lx\n", __FILE__, size);
+
+ buffer = exynos_drm_buf_create(dev, size);
+ if (IS_ERR(buffer)) {
+ return ERR_CAST(buffer);
+ }
+
+ exynos_gem_obj = exynos_drm_gem_init(dev, file_priv, handle, size);
+ if (IS_ERR(exynos_gem_obj)) {
+ exynos_drm_buf_destroy(dev, buffer);
+ return exynos_gem_obj;
+ }
+
+ exynos_gem_obj->buffer = buffer;
+
+ return exynos_gem_obj;
+}
+
int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file_priv)
{
struct drm_exynos_gem_create *args = data;
- struct exynos_drm_gem_obj *exynos_gem_obj;
+ struct exynos_drm_gem_obj *exynos_gem_obj = NULL;
- DRM_DEBUG_KMS("%s : size = 0x%x\n", __FILE__, args->size);
+ DRM_DEBUG_KMS("%s\n", __FILE__);
- exynos_gem_obj = exynos_drm_gem_create(file_priv, dev, args->size,
- &args->handle);
+ exynos_gem_obj = exynos_drm_gem_create(dev, file_priv,
+ &args->handle, args->size);
if (IS_ERR(exynos_gem_obj))
return PTR_ERR(exynos_gem_obj);
@@ -175,7 +189,7 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp,
{
struct drm_gem_object *obj = filp->private_data;
struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
- struct exynos_drm_buf_entry *entry;
+ struct exynos_drm_gem_buf *buffer;
unsigned long pfn, vm_size;
DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -187,20 +201,20 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp,
vm_size = vma->vm_end - vma->vm_start;
/*
- * a entry contains information to physically continuous memory
+ * a buffer contains information to physically continuous memory
* allocated by user request or at framebuffer creation.
*/
- entry = exynos_gem_obj->entry;
+ buffer = exynos_gem_obj->buffer;
/* check if user-requested size is valid. */
- if (vm_size > entry->size)
+ if (vm_size > buffer->size)
return -EINVAL;
/*
* get page frame number to physical memory to be mapped
* to user space.
*/
- pfn = exynos_gem_obj->entry->paddr >> PAGE_SHIFT;
+ pfn = ((unsigned long)exynos_gem_obj->buffer->dma_addr) >> PAGE_SHIFT;
DRM_DEBUG_KMS("pfn = 0x%lx\n", pfn);
@@ -281,7 +295,7 @@ void exynos_drm_gem_free_object(struct drm_gem_object *gem_obj)
exynos_gem_obj = to_exynos_gem_obj(gem_obj);
- exynos_drm_buf_destroy(gem_obj->dev, exynos_gem_obj->entry);
+ exynos_drm_buf_destroy(gem_obj->dev, exynos_gem_obj->buffer);
kfree(exynos_gem_obj);
}
@@ -302,8 +316,8 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
args->pitch = args->width * args->bpp >> 3;
args->size = args->pitch * args->height;
- exynos_gem_obj = exynos_drm_gem_create(file_priv, dev, args->size,
- &args->handle);
+ exynos_gem_obj = exynos_drm_gem_create(dev, file_priv, &args->handle,
+ args->size);
if (IS_ERR(exynos_gem_obj))
return PTR_ERR(exynos_gem_obj);
@@ -360,7 +374,8 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
mutex_lock(&dev->struct_mutex);
- pfn = (exynos_gem_obj->entry->paddr >> PAGE_SHIFT) + page_offset;
+ pfn = (((unsigned long)exynos_gem_obj->buffer->dma_addr) >>
+ PAGE_SHIFT) + page_offset;
ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index e5fc0148277..ef8797334e6 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -30,13 +30,29 @@
struct exynos_drm_gem_obj, base)
/*
+ * exynos drm gem buffer structure.
+ *
+ * @kvaddr: kernel virtual address to allocated memory region.
+ * @dma_addr: bus address(accessed by dma) to allocated memory region.
+ * - this address could be physical address without IOMMU and
+ * device address with IOMMU.
+ * @size: size of allocated memory region.
+ */
+struct exynos_drm_gem_buf {
+ void __iomem *kvaddr;
+ dma_addr_t dma_addr;
+ unsigned long size;
+};
+
+/*
* exynos drm buffer structure.
*
* @base: a gem object.
* - a new handle to this gem object would be created
* by drm_gem_handle_create().
- * @entry: pointer to exynos drm buffer entry object.
- * - containing the information to physically
+ * @buffer: a pointer to exynos_drm_gem_buffer object.
+ * - contain the information to memory region allocated
+ * by user request or at framebuffer creation.
* continuous memory region allocated by user request
* or at framebuffer creation.
*
@@ -45,13 +61,13 @@
*/
struct exynos_drm_gem_obj {
struct drm_gem_object base;
- struct exynos_drm_buf_entry *entry;
+ struct exynos_drm_gem_buf *buffer;
};
/* create a new buffer and get a new gem handle. */
-struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_file *file_priv,
- struct drm_device *dev, unsigned int size,
- unsigned int *handle);
+struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
+ struct drm_file *file_priv,
+ unsigned int *handle, unsigned long size);
/*
* request gem object creation and buffer allocation as the size
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 4f40f1ce1d8..d09a6e02dc9 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -636,11 +636,16 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
+ int ret;
ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
if (ring->size == 0)
return 0;
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
seq_printf(m, "Ring %s:\n", ring->name);
seq_printf(m, " Head : %08x\n", I915_READ_HEAD(ring) & HEAD_ADDR);
seq_printf(m, " Tail : %08x\n", I915_READ_TAIL(ring) & TAIL_ADDR);
@@ -654,6 +659,8 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data)
seq_printf(m, " Control : %08x\n", I915_READ_CTL(ring));
seq_printf(m, " Start : %08x\n", I915_READ_START(ring));
+ mutex_unlock(&dev->struct_mutex);
+
return 0;
}
@@ -842,7 +849,16 @@ static int i915_rstdby_delays(struct seq_file *m, void *unused)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- u16 crstanddelay = I915_READ16(CRSTANDVID);
+ u16 crstanddelay;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ crstanddelay = I915_READ16(CRSTANDVID);
+
+ mutex_unlock(&dev->struct_mutex);
seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f));
@@ -940,7 +956,11 @@ static int i915_delayfreq_table(struct seq_file *m, void *unused)
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
u32 delayfreq;
- int i;
+ int ret, i;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
for (i = 0; i < 16; i++) {
delayfreq = I915_READ(PXVFREQ_BASE + i * 4);
@@ -948,6 +968,8 @@ static int i915_delayfreq_table(struct seq_file *m, void *unused)
(delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT);
}
+ mutex_unlock(&dev->struct_mutex);
+
return 0;
}
@@ -962,13 +984,19 @@ static int i915_inttoext_table(struct seq_file *m, void *unused)
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
u32 inttoext;
- int i;
+ int ret, i;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
for (i = 1; i <= 32; i++) {
inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4);
seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext);
}
+ mutex_unlock(&dev->struct_mutex);
+
return 0;
}
@@ -977,9 +1005,19 @@ static int i915_drpc_info(struct seq_file *m, void *unused)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- u32 rgvmodectl = I915_READ(MEMMODECTL);
- u32 rstdbyctl = I915_READ(RSTDBYCTL);
- u16 crstandvid = I915_READ16(CRSTANDVID);
+ u32 rgvmodectl, rstdbyctl;
+ u16 crstandvid;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ rgvmodectl = I915_READ(MEMMODECTL);
+ rstdbyctl = I915_READ(RSTDBYCTL);
+ crstandvid = I915_READ16(CRSTANDVID);
+
+ mutex_unlock(&dev->struct_mutex);
seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
"yes" : "no");
@@ -1167,9 +1205,16 @@ static int i915_gfxec(struct seq_file *m, void *unused)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
+ mutex_unlock(&dev->struct_mutex);
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index e9c2cfe45da..15bfa9145d2 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -68,7 +68,7 @@ module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600);
MODULE_PARM_DESC(i915_enable_rc6,
"Enable power-saving render C-state 6 (default: true)");
-unsigned int i915_enable_fbc __read_mostly = -1;
+int i915_enable_fbc __read_mostly = -1;
module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600);
MODULE_PARM_DESC(i915_enable_fbc,
"Enable frame buffer compression for power savings "
@@ -80,7 +80,7 @@ MODULE_PARM_DESC(lvds_downclock,
"Use panel (LVDS/eDP) downclocking for power savings "
"(default: false)");
-unsigned int i915_panel_use_ssc __read_mostly = -1;
+int i915_panel_use_ssc __read_mostly = -1;
module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600);
MODULE_PARM_DESC(lvds_use_ssc,
"Use Spread Spectrum Clock with panels [LVDS/eDP] "
@@ -107,7 +107,7 @@ static struct drm_driver driver;
extern int intel_agp_enabled;
#define INTEL_VGA_DEVICE(id, info) { \
- .class = PCI_CLASS_DISPLAY_VGA << 8, \
+ .class = PCI_BASE_CLASS_DISPLAY << 16, \
.class_mask = 0xff0000, \
.vendor = 0x8086, \
.device = id, \
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 06a37f4fd74..4a9c1b97980 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -126,6 +126,9 @@ struct drm_i915_master_private {
struct _drm_i915_sarea *sarea_priv;
};
#define I915_FENCE_REG_NONE -1
+#define I915_MAX_NUM_FENCES 16
+/* 16 fences + sign bit for FENCE_REG_NONE */
+#define I915_MAX_NUM_FENCE_BITS 5
struct drm_i915_fence_reg {
struct list_head lru_list;
@@ -168,7 +171,7 @@ struct drm_i915_error_state {
u32 instdone1;
u32 seqno;
u64 bbaddr;
- u64 fence[16];
+ u64 fence[I915_MAX_NUM_FENCES];
struct timeval time;
struct drm_i915_error_object {
int page_count;
@@ -182,7 +185,7 @@ struct drm_i915_error_state {
u32 gtt_offset;
u32 read_domains;
u32 write_domain;
- s32 fence_reg:5;
+ s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
s32 pinned:2;
u32 tiling:2;
u32 dirty:1;
@@ -375,7 +378,7 @@ typedef struct drm_i915_private {
struct notifier_block lid_notifier;
int crt_ddc_pin;
- struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */
+ struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
int num_fence_regs; /* 8 on pre-965, 16 otherwise */
@@ -506,7 +509,7 @@ typedef struct drm_i915_private {
u8 saveAR[21];
u8 saveDACMASK;
u8 saveCR[37];
- uint64_t saveFENCE[16];
+ uint64_t saveFENCE[I915_MAX_NUM_FENCES];
u32 saveCURACNTR;
u32 saveCURAPOS;
u32 saveCURABASE;
@@ -777,10 +780,8 @@ struct drm_i915_gem_object {
* Fence register bits (if any) for this object. Will be set
* as needed when mapped into the GTT.
* Protected by dev->struct_mutex.
- *
- * Size: 4 bits for 16 fences + sign (for FENCE_REG_NONE)
*/
- signed int fence_reg:5;
+ signed int fence_reg:I915_MAX_NUM_FENCE_BITS;
/**
* Advice: are the backing pages purgeable?
@@ -999,10 +1000,10 @@ extern int i915_panel_ignore_lid __read_mostly;
extern unsigned int i915_powersave __read_mostly;
extern unsigned int i915_semaphores __read_mostly;
extern unsigned int i915_lvds_downclock __read_mostly;
-extern unsigned int i915_panel_use_ssc __read_mostly;
+extern int i915_panel_use_ssc __read_mostly;
extern int i915_vbt_sdvo_panel_type __read_mostly;
extern unsigned int i915_enable_rc6 __read_mostly;
-extern unsigned int i915_enable_fbc __read_mostly;
+extern int i915_enable_fbc __read_mostly;
extern bool i915_enable_hangcheck __read_mostly;
extern int i915_suspend(struct drm_device *dev, pm_message_t state);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index d18b07adcff..8359dc77704 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1745,7 +1745,7 @@ static void i915_gem_reset_fences(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
- for (i = 0; i < 16; i++) {
+ for (i = 0; i < dev_priv->num_fence_regs; i++) {
struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
struct drm_i915_gem_object *obj = reg->obj;
@@ -3512,9 +3512,11 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
* so emit a request to do so.
*/
request = kzalloc(sizeof(*request), GFP_KERNEL);
- if (request)
+ if (request) {
ret = i915_add_request(obj->ring, NULL, request);
- else
+ if (ret)
+ kfree(request);
+ } else
ret = -ENOMEM;
}
@@ -3613,7 +3615,7 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
obj->base.read_domains = I915_GEM_DOMAIN_CPU;
- if (IS_GEN6(dev)) {
+ if (IS_GEN6(dev) || IS_GEN7(dev)) {
/* On Gen6, we can have the GPU use the LLC (the CPU
* cache) for about a 10% performance improvement
* compared to uncached. Graphics requests other than
@@ -3877,7 +3879,7 @@ i915_gem_load(struct drm_device *dev)
INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
for (i = 0; i < I915_NUM_RINGS; i++)
init_ring_lists(&dev_priv->ring[i]);
- for (i = 0; i < 16; i++)
+ for (i = 0; i < I915_MAX_NUM_FENCES; i++)
INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
i915_gem_retire_work_handler);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 9ee2729fe5c..b40004b5597 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -824,6 +824,7 @@ static void i915_gem_record_fences(struct drm_device *dev,
/* Fences */
switch (INTEL_INFO(dev)->gen) {
+ case 7:
case 6:
for (i = 0; i < 16; i++)
error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 5a09416e611..b080cc82400 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -1553,12 +1553,21 @@
*/
#define PP_READY (1 << 30)
#define PP_SEQUENCE_NONE (0 << 28)
-#define PP_SEQUENCE_ON (1 << 28)
-#define PP_SEQUENCE_OFF (2 << 28)
-#define PP_SEQUENCE_MASK 0x30000000
+#define PP_SEQUENCE_POWER_UP (1 << 28)
+#define PP_SEQUENCE_POWER_DOWN (2 << 28)
+#define PP_SEQUENCE_MASK (3 << 28)
+#define PP_SEQUENCE_SHIFT 28
#define PP_CYCLE_DELAY_ACTIVE (1 << 27)
-#define PP_SEQUENCE_STATE_ON_IDLE (1 << 3)
#define PP_SEQUENCE_STATE_MASK 0x0000000f
+#define PP_SEQUENCE_STATE_OFF_IDLE (0x0 << 0)
+#define PP_SEQUENCE_STATE_OFF_S0_1 (0x1 << 0)
+#define PP_SEQUENCE_STATE_OFF_S0_2 (0x2 << 0)
+#define PP_SEQUENCE_STATE_OFF_S0_3 (0x3 << 0)
+#define PP_SEQUENCE_STATE_ON_IDLE (0x8 << 0)
+#define PP_SEQUENCE_STATE_ON_S1_0 (0x9 << 0)
+#define PP_SEQUENCE_STATE_ON_S1_2 (0xa << 0)
+#define PP_SEQUENCE_STATE_ON_S1_3 (0xb << 0)
+#define PP_SEQUENCE_STATE_RESET (0xf << 0)
#define PP_CONTROL 0x61204
#define POWER_TARGET_ON (1 << 0)
#define PP_ON_DELAYS 0x61208
@@ -3444,6 +3453,10 @@
#define GT_FIFO_FREE_ENTRIES 0x120008
#define GT_FIFO_NUM_RESERVED_ENTRIES 20
+#define GEN6_UCGCTL2 0x9404
+# define GEN6_RCPBUNIT_CLOCK_GATE_DISABLE (1 << 12)
+# define GEN6_RCCUNIT_CLOCK_GATE_DISABLE (1 << 11)
+
#define GEN6_RPNSWREQ 0xA008
#define GEN6_TURBO_DISABLE (1<<31)
#define GEN6_FREQUENCY(x) ((x)<<25)
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index f8f602d7665..7886e4fb60e 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -370,6 +370,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
/* Fences */
switch (INTEL_INFO(dev)->gen) {
+ case 7:
case 6:
for (i = 0; i < 16; i++)
dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
@@ -404,6 +405,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
/* Fences */
switch (INTEL_INFO(dev)->gen) {
+ case 7:
case 6:
for (i = 0; i < 16; i++)
I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 981b1f1c04d..e77a863a383 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2933,7 +2933,8 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
/* For PCH DP, enable TRANS_DP_CTL */
if (HAS_PCH_CPT(dev) &&
- intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
+ (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
+ intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5;
reg = TRANS_DP_CTL(pipe);
temp = I915_READ(reg);
@@ -4711,7 +4712,7 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
lvds_bpc = 6;
if (lvds_bpc < display_bpc) {
- DRM_DEBUG_DRIVER("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc);
+ DRM_DEBUG_KMS("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc);
display_bpc = lvds_bpc;
}
continue;
@@ -4722,7 +4723,7 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
unsigned int edp_bpc = dev_priv->edp.bpp / 3;
if (edp_bpc < display_bpc) {
- DRM_DEBUG_DRIVER("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
+ DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
display_bpc = edp_bpc;
}
continue;
@@ -4737,7 +4738,7 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
/* Don't use an invalid EDID bpc value */
if (connector->display_info.bpc &&
connector->display_info.bpc < display_bpc) {
- DRM_DEBUG_DRIVER("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc);
+ DRM_DEBUG_KMS("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc);
display_bpc = connector->display_info.bpc;
}
}
@@ -4748,10 +4749,10 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
*/
if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
if (display_bpc > 8 && display_bpc < 12) {
- DRM_DEBUG_DRIVER("forcing bpc to 12 for HDMI\n");
+ DRM_DEBUG_KMS("forcing bpc to 12 for HDMI\n");
display_bpc = 12;
} else {
- DRM_DEBUG_DRIVER("forcing bpc to 8 for HDMI\n");
+ DRM_DEBUG_KMS("forcing bpc to 8 for HDMI\n");
display_bpc = 8;
}
}
@@ -4789,8 +4790,8 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
display_bpc = min(display_bpc, bpc);
- DRM_DEBUG_DRIVER("setting pipe bpc to %d (max display bpc %d)\n",
- bpc, display_bpc);
+ DRM_DEBUG_KMS("setting pipe bpc to %d (max display bpc %d)\n",
+ bpc, display_bpc);
*pipe_bpp = display_bpc * 3;
@@ -5671,7 +5672,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
pipeconf &= ~PIPECONF_DITHER_TYPE_MASK;
if ((is_lvds && dev_priv->lvds_dither) || dither) {
pipeconf |= PIPECONF_DITHER_EN;
- pipeconf |= PIPECONF_DITHER_TYPE_ST1;
+ pipeconf |= PIPECONF_DITHER_TYPE_SP;
}
if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
intel_dp_set_m_n(crtc, mode, adjusted_mode);
@@ -8148,6 +8149,20 @@ static void gen6_init_clock_gating(struct drm_device *dev)
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
+ /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
+ * gating disable must be set. Failure to set it results in
+ * flickering pixels due to Z write ordering failures after
+ * some amount of runtime in the Mesa "fire" demo, and Unigine
+ * Sanctuary and Tropics, and apparently anything else with
+ * alpha test or pixel discard.
+ *
+ * According to the spec, bit 11 (RCCUNIT) must also be set,
+ * but we didn't debug actual testcases to find it out.
+ */
+ I915_WRITE(GEN6_UCGCTL2,
+ GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
+ GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
+
/*
* According to the spec the following bits should be
* set in order to enable memory self-refresh and fbc:
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 09b318b0227..4d0358fad93 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -59,7 +59,6 @@ struct intel_dp {
struct i2c_algo_dp_aux_data algo;
bool is_pch_edp;
uint8_t train_set[4];
- uint8_t link_status[DP_LINK_STATUS_SIZE];
int panel_power_up_delay;
int panel_power_down_delay;
int panel_power_cycle_delay;
@@ -68,7 +67,6 @@ struct intel_dp {
struct drm_display_mode *panel_fixed_mode; /* for eDP */
struct delayed_work panel_vdd_work;
bool want_panel_vdd;
- unsigned long panel_off_jiffies;
};
/**
@@ -157,16 +155,12 @@ intel_edp_link_config(struct intel_encoder *intel_encoder,
static int
intel_dp_max_lane_count(struct intel_dp *intel_dp)
{
- int max_lane_count = 4;
-
- if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
- max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f;
- switch (max_lane_count) {
- case 1: case 2: case 4:
- break;
- default:
- max_lane_count = 4;
- }
+ int max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f;
+ switch (max_lane_count) {
+ case 1: case 2: case 4:
+ break;
+ default:
+ max_lane_count = 4;
}
return max_lane_count;
}
@@ -768,12 +762,11 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
continue;
intel_dp = enc_to_intel_dp(encoder);
- if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT) {
+ if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT ||
+ intel_dp->base.type == INTEL_OUTPUT_EDP)
+ {
lane_count = intel_dp->lane_count;
break;
- } else if (is_edp(intel_dp)) {
- lane_count = dev_priv->edp.lanes;
- break;
}
}
@@ -810,6 +803,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct drm_crtc *crtc = intel_dp->base.base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -822,18 +816,31 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
ironlake_edp_pll_off(encoder);
}
- intel_dp->DP = DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
- intel_dp->DP |= intel_dp->color_range;
+ /*
+ * There are three kinds of DP registers:
+ *
+ * IBX PCH
+ * CPU
+ * CPT PCH
+ *
+ * IBX PCH and CPU are the same for almost everything,
+ * except that the CPU DP PLL is configured in this
+ * register
+ *
+ * CPT PCH is quite different, having many bits moved
+ * to the TRANS_DP_CTL register instead. That
+ * configuration happens (oddly) in ironlake_pch_enable
+ */
- if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
- intel_dp->DP |= DP_SYNC_HS_HIGH;
- if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
- intel_dp->DP |= DP_SYNC_VS_HIGH;
+ /* Preserve the BIOS-computed detected bit. This is
+ * supposed to be read-only.
+ */
+ intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
+ intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
- if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
- intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
- else
- intel_dp->DP |= DP_LINK_TRAIN_OFF;
+ /* Handle DP bits in common between all three register formats */
+
+ intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
switch (intel_dp->lane_count) {
case 1:
@@ -852,59 +859,106 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
intel_write_eld(encoder, adjusted_mode);
}
-
memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
intel_dp->link_configuration[0] = intel_dp->link_bw;
intel_dp->link_configuration[1] = intel_dp->lane_count;
intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B;
-
/*
* Check for DPCD version > 1.1 and enhanced framing support
*/
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
(intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
- intel_dp->DP |= DP_ENHANCED_FRAMING;
}
- /* CPT DP's pipe select is decided in TRANS_DP_CTL */
- if (intel_crtc->pipe == 1 && !HAS_PCH_CPT(dev))
- intel_dp->DP |= DP_PIPEB_SELECT;
+ /* Split out the IBX/CPU vs CPT settings */
- if (is_cpu_edp(intel_dp)) {
- /* don't miss out required setting for eDP */
- intel_dp->DP |= DP_PLL_ENABLE;
- if (adjusted_mode->clock < 200000)
- intel_dp->DP |= DP_PLL_FREQ_160MHZ;
- else
- intel_dp->DP |= DP_PLL_FREQ_270MHZ;
+ if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
+ intel_dp->DP |= intel_dp->color_range;
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ intel_dp->DP |= DP_SYNC_HS_HIGH;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ intel_dp->DP |= DP_SYNC_VS_HIGH;
+ intel_dp->DP |= DP_LINK_TRAIN_OFF;
+
+ if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
+ intel_dp->DP |= DP_ENHANCED_FRAMING;
+
+ if (intel_crtc->pipe == 1)
+ intel_dp->DP |= DP_PIPEB_SELECT;
+
+ if (is_cpu_edp(intel_dp)) {
+ /* don't miss out required setting for eDP */
+ intel_dp->DP |= DP_PLL_ENABLE;
+ if (adjusted_mode->clock < 200000)
+ intel_dp->DP |= DP_PLL_FREQ_160MHZ;
+ else
+ intel_dp->DP |= DP_PLL_FREQ_270MHZ;
+ }
+ } else {
+ intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
}
}
-static void ironlake_wait_panel_off(struct intel_dp *intel_dp)
+#define IDLE_ON_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
+#define IDLE_ON_VALUE (PP_ON | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
+
+#define IDLE_OFF_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
+#define IDLE_OFF_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
+
+#define IDLE_CYCLE_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
+#define IDLE_CYCLE_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
+
+static void ironlake_wait_panel_status(struct intel_dp *intel_dp,
+ u32 mask,
+ u32 value)
{
- unsigned long off_time;
- unsigned long delay;
+ struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
- DRM_DEBUG_KMS("Wait for panel power off time\n");
+ DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
+ mask, value,
+ I915_READ(PCH_PP_STATUS),
+ I915_READ(PCH_PP_CONTROL));
- if (ironlake_edp_have_panel_power(intel_dp) ||
- ironlake_edp_have_panel_vdd(intel_dp))
- {
- DRM_DEBUG_KMS("Panel still on, no delay needed\n");
- return;
+ if (_wait_for((I915_READ(PCH_PP_STATUS) & mask) == value, 5000, 10)) {
+ DRM_ERROR("Panel status timeout: status %08x control %08x\n",
+ I915_READ(PCH_PP_STATUS),
+ I915_READ(PCH_PP_CONTROL));
}
+}
- off_time = intel_dp->panel_off_jiffies + msecs_to_jiffies(intel_dp->panel_power_down_delay);
- if (time_after(jiffies, off_time)) {
- DRM_DEBUG_KMS("Time already passed");
- return;
- }
- delay = jiffies_to_msecs(off_time - jiffies);
- if (delay > intel_dp->panel_power_down_delay)
- delay = intel_dp->panel_power_down_delay;
- DRM_DEBUG_KMS("Waiting an additional %ld ms\n", delay);
- msleep(delay);
+static void ironlake_wait_panel_on(struct intel_dp *intel_dp)
+{
+ DRM_DEBUG_KMS("Wait for panel power on\n");
+ ironlake_wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
+}
+
+static void ironlake_wait_panel_off(struct intel_dp *intel_dp)
+{
+ DRM_DEBUG_KMS("Wait for panel power off time\n");
+ ironlake_wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
+}
+
+static void ironlake_wait_panel_power_cycle(struct intel_dp *intel_dp)
+{
+ DRM_DEBUG_KMS("Wait for panel power cycle\n");
+ ironlake_wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
+}
+
+
+/* Read the current pp_control value, unlocking the register if it
+ * is locked
+ */
+
+static u32 ironlake_get_pp_control(struct drm_i915_private *dev_priv)
+{
+ u32 control = I915_READ(PCH_PP_CONTROL);
+
+ control &= ~PANEL_UNLOCK_MASK;
+ control |= PANEL_UNLOCK_REGS;
+ return control;
}
static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
@@ -921,15 +975,16 @@ static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
"eDP VDD already requested on\n");
intel_dp->want_panel_vdd = true;
+
if (ironlake_edp_have_panel_vdd(intel_dp)) {
DRM_DEBUG_KMS("eDP VDD already on\n");
return;
}
- ironlake_wait_panel_off(intel_dp);
- pp = I915_READ(PCH_PP_CONTROL);
- pp &= ~PANEL_UNLOCK_MASK;
- pp |= PANEL_UNLOCK_REGS;
+ if (!ironlake_edp_have_panel_power(intel_dp))
+ ironlake_wait_panel_power_cycle(intel_dp);
+
+ pp = ironlake_get_pp_control(dev_priv);
pp |= EDP_FORCE_VDD;
I915_WRITE(PCH_PP_CONTROL, pp);
POSTING_READ(PCH_PP_CONTROL);
@@ -952,9 +1007,7 @@ static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
u32 pp;
if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) {
- pp = I915_READ(PCH_PP_CONTROL);
- pp &= ~PANEL_UNLOCK_MASK;
- pp |= PANEL_UNLOCK_REGS;
+ pp = ironlake_get_pp_control(dev_priv);
pp &= ~EDP_FORCE_VDD;
I915_WRITE(PCH_PP_CONTROL, pp);
POSTING_READ(PCH_PP_CONTROL);
@@ -962,7 +1015,8 @@ static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
/* Make sure sequencer is idle before allowing subsequent activity */
DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n",
I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL));
- intel_dp->panel_off_jiffies = jiffies;
+
+ msleep(intel_dp->panel_power_down_delay);
}
}
@@ -972,9 +1026,9 @@ static void ironlake_panel_vdd_work(struct work_struct *__work)
struct intel_dp, panel_vdd_work);
struct drm_device *dev = intel_dp->base.base.dev;
- mutex_lock(&dev->struct_mutex);
+ mutex_lock(&dev->mode_config.mutex);
ironlake_panel_vdd_off_sync(intel_dp);
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev->mode_config.mutex);
}
static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
@@ -984,7 +1038,7 @@ static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
DRM_DEBUG_KMS("Turn eDP VDD off %d\n", intel_dp->want_panel_vdd);
WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on");
-
+
intel_dp->want_panel_vdd = false;
if (sync) {
@@ -1000,23 +1054,25 @@ static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
}
}
-/* Returns true if the panel was already on when called */
static void ironlake_edp_panel_on(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 pp, idle_on_mask = PP_ON | PP_SEQUENCE_STATE_ON_IDLE;
+ u32 pp;
if (!is_edp(intel_dp))
return;
- if (ironlake_edp_have_panel_power(intel_dp))
+
+ DRM_DEBUG_KMS("Turn eDP power on\n");
+
+ if (ironlake_edp_have_panel_power(intel_dp)) {
+ DRM_DEBUG_KMS("eDP power already on\n");
return;
+ }
- ironlake_wait_panel_off(intel_dp);
- pp = I915_READ(PCH_PP_CONTROL);
- pp &= ~PANEL_UNLOCK_MASK;
- pp |= PANEL_UNLOCK_REGS;
+ ironlake_wait_panel_power_cycle(intel_dp);
+ pp = ironlake_get_pp_control(dev_priv);
if (IS_GEN5(dev)) {
/* ILK workaround: disable reset around power sequence */
pp &= ~PANEL_POWER_RESET;
@@ -1025,13 +1081,13 @@ static void ironlake_edp_panel_on(struct intel_dp *intel_dp)
}
pp |= POWER_TARGET_ON;
+ if (!IS_GEN5(dev))
+ pp |= PANEL_POWER_RESET;
+
I915_WRITE(PCH_PP_CONTROL, pp);
POSTING_READ(PCH_PP_CONTROL);
- if (wait_for((I915_READ(PCH_PP_STATUS) & idle_on_mask) == idle_on_mask,
- 5000))
- DRM_ERROR("panel on wait timed out: 0x%08x\n",
- I915_READ(PCH_PP_STATUS));
+ ironlake_wait_panel_on(intel_dp);
if (IS_GEN5(dev)) {
pp |= PANEL_POWER_RESET; /* restore panel reset bit */
@@ -1040,46 +1096,25 @@ static void ironlake_edp_panel_on(struct intel_dp *intel_dp)
}
}
-static void ironlake_edp_panel_off(struct drm_encoder *encoder)
+static void ironlake_edp_panel_off(struct intel_dp *intel_dp)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- struct drm_device *dev = encoder->dev;
+ struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 pp, idle_off_mask = PP_ON | PP_SEQUENCE_MASK |
- PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK;
+ u32 pp;
if (!is_edp(intel_dp))
return;
- pp = I915_READ(PCH_PP_CONTROL);
- pp &= ~PANEL_UNLOCK_MASK;
- pp |= PANEL_UNLOCK_REGS;
- if (IS_GEN5(dev)) {
- /* ILK workaround: disable reset around power sequence */
- pp &= ~PANEL_POWER_RESET;
- I915_WRITE(PCH_PP_CONTROL, pp);
- POSTING_READ(PCH_PP_CONTROL);
- }
+ DRM_DEBUG_KMS("Turn eDP power off\n");
- intel_dp->panel_off_jiffies = jiffies;
+ WARN(intel_dp->want_panel_vdd, "Cannot turn power off while VDD is on\n");
- if (IS_GEN5(dev)) {
- pp &= ~POWER_TARGET_ON;
- I915_WRITE(PCH_PP_CONTROL, pp);
- POSTING_READ(PCH_PP_CONTROL);
- pp &= ~POWER_TARGET_ON;
- I915_WRITE(PCH_PP_CONTROL, pp);
- POSTING_READ(PCH_PP_CONTROL);
- msleep(intel_dp->panel_power_cycle_delay);
-
- if (wait_for((I915_READ(PCH_PP_STATUS) & idle_off_mask) == 0, 5000))
- DRM_ERROR("panel off wait timed out: 0x%08x\n",
- I915_READ(PCH_PP_STATUS));
+ pp = ironlake_get_pp_control(dev_priv);
+ pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
+ I915_WRITE(PCH_PP_CONTROL, pp);
+ POSTING_READ(PCH_PP_CONTROL);
- pp |= PANEL_POWER_RESET; /* restore panel reset bit */
- I915_WRITE(PCH_PP_CONTROL, pp);
- POSTING_READ(PCH_PP_CONTROL);
- }
+ ironlake_wait_panel_off(intel_dp);
}
static void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
@@ -1099,9 +1134,7 @@ static void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
* allowing it to appear.
*/
msleep(intel_dp->backlight_on_delay);
- pp = I915_READ(PCH_PP_CONTROL);
- pp &= ~PANEL_UNLOCK_MASK;
- pp |= PANEL_UNLOCK_REGS;
+ pp = ironlake_get_pp_control(dev_priv);
pp |= EDP_BLC_ENABLE;
I915_WRITE(PCH_PP_CONTROL, pp);
POSTING_READ(PCH_PP_CONTROL);
@@ -1117,9 +1150,7 @@ static void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
return;
DRM_DEBUG_KMS("\n");
- pp = I915_READ(PCH_PP_CONTROL);
- pp &= ~PANEL_UNLOCK_MASK;
- pp |= PANEL_UNLOCK_REGS;
+ pp = ironlake_get_pp_control(dev_priv);
pp &= ~EDP_BLC_ENABLE;
I915_WRITE(PCH_PP_CONTROL, pp);
POSTING_READ(PCH_PP_CONTROL);
@@ -1187,17 +1218,18 @@ static void intel_dp_prepare(struct drm_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ ironlake_edp_backlight_off(intel_dp);
+ ironlake_edp_panel_off(intel_dp);
+
/* Wake up the sink first */
ironlake_edp_panel_vdd_on(intel_dp);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+ intel_dp_link_down(intel_dp);
ironlake_edp_panel_vdd_off(intel_dp, false);
/* Make sure the panel is off before trying to
* change the mode
*/
- ironlake_edp_backlight_off(intel_dp);
- intel_dp_link_down(intel_dp);
- ironlake_edp_panel_off(encoder);
}
static void intel_dp_commit(struct drm_encoder *encoder)
@@ -1211,7 +1243,6 @@ static void intel_dp_commit(struct drm_encoder *encoder)
intel_dp_start_link_train(intel_dp);
ironlake_edp_panel_on(intel_dp);
ironlake_edp_panel_vdd_off(intel_dp, true);
-
intel_dp_complete_link_train(intel_dp);
ironlake_edp_backlight_on(intel_dp);
@@ -1230,16 +1261,20 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
uint32_t dp_reg = I915_READ(intel_dp->output_reg);
if (mode != DRM_MODE_DPMS_ON) {
+ ironlake_edp_backlight_off(intel_dp);
+ ironlake_edp_panel_off(intel_dp);
+
ironlake_edp_panel_vdd_on(intel_dp);
- if (is_edp(intel_dp))
- ironlake_edp_backlight_off(intel_dp);
intel_dp_sink_dpms(intel_dp, mode);
intel_dp_link_down(intel_dp);
- ironlake_edp_panel_off(encoder);
- if (is_edp(intel_dp) && !is_pch_edp(intel_dp))
- ironlake_edp_pll_off(encoder);
ironlake_edp_panel_vdd_off(intel_dp, false);
+
+ if (is_cpu_edp(intel_dp))
+ ironlake_edp_pll_off(encoder);
} else {
+ if (is_cpu_edp(intel_dp))
+ ironlake_edp_pll_on(encoder);
+
ironlake_edp_panel_vdd_on(intel_dp);
intel_dp_sink_dpms(intel_dp, mode);
if (!(dp_reg & DP_PORT_EN)) {
@@ -1247,7 +1282,6 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
ironlake_edp_panel_on(intel_dp);
ironlake_edp_panel_vdd_off(intel_dp, true);
intel_dp_complete_link_train(intel_dp);
- ironlake_edp_backlight_on(intel_dp);
} else
ironlake_edp_panel_vdd_off(intel_dp, false);
ironlake_edp_backlight_on(intel_dp);
@@ -1285,11 +1319,11 @@ intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address,
* link status information
*/
static bool
-intel_dp_get_link_status(struct intel_dp *intel_dp)
+intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
{
return intel_dp_aux_native_read_retry(intel_dp,
DP_LANE0_1_STATUS,
- intel_dp->link_status,
+ link_status,
DP_LINK_STATUS_SIZE);
}
@@ -1301,27 +1335,25 @@ intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
}
static uint8_t
-intel_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE],
+intel_get_adjust_request_voltage(uint8_t adjust_request[2],
int lane)
{
- int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
int s = ((lane & 1) ?
DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
- uint8_t l = intel_dp_link_status(link_status, i);
+ uint8_t l = adjust_request[lane>>1];
return ((l >> s) & 3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
}
static uint8_t
-intel_get_adjust_request_pre_emphasis(uint8_t link_status[DP_LINK_STATUS_SIZE],
+intel_get_adjust_request_pre_emphasis(uint8_t adjust_request[2],
int lane)
{
- int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
int s = ((lane & 1) ?
DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
- uint8_t l = intel_dp_link_status(link_status, i);
+ uint8_t l = adjust_request[lane>>1];
return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
}
@@ -1344,6 +1376,7 @@ static char *link_train_names[] = {
* a maximum voltage of 800mV and a maximum pre-emphasis of 6dB
*/
#define I830_DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_800
+#define I830_DP_VOLTAGE_MAX_CPT DP_TRAIN_VOLTAGE_SWING_1200
static uint8_t
intel_dp_pre_emphasis_max(uint8_t voltage_swing)
@@ -1362,15 +1395,18 @@ intel_dp_pre_emphasis_max(uint8_t voltage_swing)
}
static void
-intel_get_adjust_train(struct intel_dp *intel_dp)
+intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
{
+ struct drm_device *dev = intel_dp->base.base.dev;
uint8_t v = 0;
uint8_t p = 0;
int lane;
+ uint8_t *adjust_request = link_status + (DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS);
+ int voltage_max;
for (lane = 0; lane < intel_dp->lane_count; lane++) {
- uint8_t this_v = intel_get_adjust_request_voltage(intel_dp->link_status, lane);
- uint8_t this_p = intel_get_adjust_request_pre_emphasis(intel_dp->link_status, lane);
+ uint8_t this_v = intel_get_adjust_request_voltage(adjust_request, lane);
+ uint8_t this_p = intel_get_adjust_request_pre_emphasis(adjust_request, lane);
if (this_v > v)
v = this_v;
@@ -1378,8 +1414,12 @@ intel_get_adjust_train(struct intel_dp *intel_dp)
p = this_p;
}
- if (v >= I830_DP_VOLTAGE_MAX)
- v = I830_DP_VOLTAGE_MAX | DP_TRAIN_MAX_SWING_REACHED;
+ if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+ voltage_max = I830_DP_VOLTAGE_MAX_CPT;
+ else
+ voltage_max = I830_DP_VOLTAGE_MAX;
+ if (v >= voltage_max)
+ v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
if (p >= intel_dp_pre_emphasis_max(v))
p = intel_dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
@@ -1389,7 +1429,7 @@ intel_get_adjust_train(struct intel_dp *intel_dp)
}
static uint32_t
-intel_dp_signal_levels(uint8_t train_set, int lane_count)
+intel_dp_signal_levels(uint8_t train_set)
{
uint32_t signal_levels = 0;
@@ -1458,9 +1498,8 @@ static uint8_t
intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
int lane)
{
- int i = DP_LANE0_1_STATUS + (lane >> 1);
int s = (lane & 1) * 4;
- uint8_t l = intel_dp_link_status(link_status, i);
+ uint8_t l = link_status[lane>>1];
return (l >> s) & 0xf;
}
@@ -1485,18 +1524,18 @@ intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count
DP_LANE_CHANNEL_EQ_DONE|\
DP_LANE_SYMBOL_LOCKED)
static bool
-intel_channel_eq_ok(struct intel_dp *intel_dp)
+intel_channel_eq_ok(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
{
uint8_t lane_align;
uint8_t lane_status;
int lane;
- lane_align = intel_dp_link_status(intel_dp->link_status,
+ lane_align = intel_dp_link_status(link_status,
DP_LANE_ALIGN_STATUS_UPDATED);
if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
return false;
for (lane = 0; lane < intel_dp->lane_count; lane++) {
- lane_status = intel_get_lane_status(intel_dp->link_status, lane);
+ lane_status = intel_get_lane_status(link_status, lane);
if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS)
return false;
}
@@ -1521,8 +1560,9 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
ret = intel_dp_aux_native_write(intel_dp,
DP_TRAINING_LANE0_SET,
- intel_dp->train_set, 4);
- if (ret != 4)
+ intel_dp->train_set,
+ intel_dp->lane_count);
+ if (ret != intel_dp->lane_count)
return false;
return true;
@@ -1538,7 +1578,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
int i;
uint8_t voltage;
bool clock_recovery = false;
- int tries;
+ int voltage_tries, loop_tries;
u32 reg;
uint32_t DP = intel_dp->DP;
@@ -1565,16 +1605,20 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
DP &= ~DP_LINK_TRAIN_MASK;
memset(intel_dp->train_set, 0, 4);
voltage = 0xff;
- tries = 0;
+ voltage_tries = 0;
+ loop_tries = 0;
clock_recovery = false;
for (;;) {
/* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
+ uint8_t link_status[DP_LINK_STATUS_SIZE];
uint32_t signal_levels;
- if (IS_GEN6(dev) && is_edp(intel_dp)) {
+
+ if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
} else {
- signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count);
+ signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]);
+ DRM_DEBUG_KMS("training pattern 1 signal levels %08x\n", signal_levels);
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
}
@@ -1590,10 +1634,13 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
/* Set training pattern 1 */
udelay(100);
- if (!intel_dp_get_link_status(intel_dp))
+ if (!intel_dp_get_link_status(intel_dp, link_status)) {
+ DRM_ERROR("failed to get link status\n");
break;
+ }
- if (intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) {
+ if (intel_clock_recovery_ok(link_status, intel_dp->lane_count)) {
+ DRM_DEBUG_KMS("clock recovery OK\n");
clock_recovery = true;
break;
}
@@ -1602,20 +1649,30 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
for (i = 0; i < intel_dp->lane_count; i++)
if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
break;
- if (i == intel_dp->lane_count)
- break;
+ if (i == intel_dp->lane_count) {
+ ++loop_tries;
+ if (loop_tries == 5) {
+ DRM_DEBUG_KMS("too many full retries, give up\n");
+ break;
+ }
+ memset(intel_dp->train_set, 0, 4);
+ voltage_tries = 0;
+ continue;
+ }
/* Check to see if we've tried the same voltage 5 times */
if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
- ++tries;
- if (tries == 5)
+ ++voltage_tries;
+ if (voltage_tries == 5) {
+ DRM_DEBUG_KMS("too many voltage retries, give up\n");
break;
+ }
} else
- tries = 0;
+ voltage_tries = 0;
voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
/* Compute new intel_dp->train_set as requested by target */
- intel_get_adjust_train(intel_dp);
+ intel_get_adjust_train(intel_dp, link_status);
}
intel_dp->DP = DP;
@@ -1638,6 +1695,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
for (;;) {
/* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
uint32_t signal_levels;
+ uint8_t link_status[DP_LINK_STATUS_SIZE];
if (cr_tries > 5) {
DRM_ERROR("failed to train DP, aborting\n");
@@ -1645,11 +1703,11 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
break;
}
- if (IS_GEN6(dev) && is_edp(intel_dp)) {
+ if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
} else {
- signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count);
+ signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]);
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
}
@@ -1665,17 +1723,17 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
break;
udelay(400);
- if (!intel_dp_get_link_status(intel_dp))
+ if (!intel_dp_get_link_status(intel_dp, link_status))
break;
/* Make sure clock is still ok */
- if (!intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) {
+ if (!intel_clock_recovery_ok(link_status, intel_dp->lane_count)) {
intel_dp_start_link_train(intel_dp);
cr_tries++;
continue;
}
- if (intel_channel_eq_ok(intel_dp)) {
+ if (intel_channel_eq_ok(intel_dp, link_status)) {
channel_eq = true;
break;
}
@@ -1690,7 +1748,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
}
/* Compute new intel_dp->train_set as requested by target */
- intel_get_adjust_train(intel_dp);
+ intel_get_adjust_train(intel_dp, link_status);
++tries;
}
@@ -1735,8 +1793,12 @@ intel_dp_link_down(struct intel_dp *intel_dp)
msleep(17);
- if (is_edp(intel_dp))
- DP |= DP_LINK_TRAIN_OFF;
+ if (is_edp(intel_dp)) {
+ if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+ DP |= DP_LINK_TRAIN_OFF_CPT;
+ else
+ DP |= DP_LINK_TRAIN_OFF;
+ }
if (!HAS_PCH_CPT(dev) &&
I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
@@ -1822,6 +1884,7 @@ static void
intel_dp_check_link_status(struct intel_dp *intel_dp)
{
u8 sink_irq_vector;
+ u8 link_status[DP_LINK_STATUS_SIZE];
if (intel_dp->dpms_mode != DRM_MODE_DPMS_ON)
return;
@@ -1830,7 +1893,7 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
return;
/* Try to read receiver status if the link appears to be up */
- if (!intel_dp_get_link_status(intel_dp)) {
+ if (!intel_dp_get_link_status(intel_dp, link_status)) {
intel_dp_link_down(intel_dp);
return;
}
@@ -1855,7 +1918,7 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
}
- if (!intel_channel_eq_ok(intel_dp)) {
+ if (!intel_channel_eq_ok(intel_dp, link_status)) {
DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
drm_get_encoder_name(&intel_dp->base.base));
intel_dp_start_link_train(intel_dp);
@@ -2179,7 +2242,8 @@ intel_trans_dp_port_sel(struct drm_crtc *crtc)
continue;
intel_dp = enc_to_intel_dp(encoder);
- if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT)
+ if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT ||
+ intel_dp->base.type == INTEL_OUTPUT_EDP)
return intel_dp->output_reg;
}
@@ -2321,7 +2385,7 @@ intel_dp_init(struct drm_device *dev, int output_reg)
cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
PANEL_LIGHT_ON_DELAY_SHIFT;
-
+
cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
PANEL_LIGHT_OFF_DELAY_SHIFT;
@@ -2354,11 +2418,10 @@ intel_dp_init(struct drm_device *dev, int output_reg)
DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
- intel_dp->panel_off_jiffies = jiffies - intel_dp->panel_power_down_delay;
-
ironlake_edp_panel_vdd_on(intel_dp);
ret = intel_dp_get_dpcd(intel_dp);
ironlake_edp_panel_vdd_off(intel_dp, false);
+
if (ret) {
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
dev_priv->no_aux_handshake =
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 499d4c0dbee..21f60b7d69a 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -326,7 +326,8 @@ static int intel_panel_update_status(struct backlight_device *bd)
static int intel_panel_get_brightness(struct backlight_device *bd)
{
struct drm_device *dev = bl_get_data(bd);
- return intel_panel_get_backlight(dev);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ return dev_priv->backlight_level;
}
static const struct backlight_ops intel_panel_bl_ops = {
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index 7fdfa8ea757..38e1bda73d3 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -480,21 +480,23 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
}
break;
case DB_Z_INFO:
- r = evergreen_cs_packet_next_reloc(p, &reloc);
- if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
- return -EINVAL;
- }
track->db_z_info = radeon_get_ib_value(p, idx);
- ib[idx] &= ~Z_ARRAY_MODE(0xf);
- track->db_z_info &= ~Z_ARRAY_MODE(0xf);
- if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
- ib[idx] |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
- track->db_z_info |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
- } else {
- ib[idx] |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
- track->db_z_info |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
+ if (!p->keep_tiling_flags) {
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ ib[idx] &= ~Z_ARRAY_MODE(0xf);
+ track->db_z_info &= ~Z_ARRAY_MODE(0xf);
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
+ ib[idx] |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
+ track->db_z_info |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
+ } else {
+ ib[idx] |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
+ track->db_z_info |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
+ }
}
break;
case DB_STENCIL_INFO:
@@ -607,40 +609,44 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR5_INFO:
case CB_COLOR6_INFO:
case CB_COLOR7_INFO:
- r = evergreen_cs_packet_next_reloc(p, &reloc);
- if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
- return -EINVAL;
- }
tmp = (reg - CB_COLOR0_INFO) / 0x3c;
track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
- if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
- ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
- track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
- } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
- ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
- track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
+ if (!p->keep_tiling_flags) {
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
+ ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
+ track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
+ } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
+ ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
+ track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
+ }
}
break;
case CB_COLOR8_INFO:
case CB_COLOR9_INFO:
case CB_COLOR10_INFO:
case CB_COLOR11_INFO:
- r = evergreen_cs_packet_next_reloc(p, &reloc);
- if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
- return -EINVAL;
- }
tmp = ((reg - CB_COLOR8_INFO) / 0x1c) + 8;
track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
- if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
- ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
- track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
- } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
- ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
- track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
+ if (!p->keep_tiling_flags) {
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
+ ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
+ track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
+ } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
+ ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
+ track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
+ }
}
break;
case CB_COLOR0_PITCH:
@@ -1311,10 +1317,12 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
ib[idx+1+(i*8)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
- if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
- ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
- else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
- ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
+ if (!p->keep_tiling_flags) {
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+ ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
+ else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
+ ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
+ }
texture = reloc->robj;
/* tex mip base */
r = evergreen_cs_packet_next_reloc(p, &reloc);
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 400b26df652..c93bc64707e 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -701,16 +701,21 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
return r;
}
- if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
- tile_flags |= R300_TXO_MACRO_TILE;
- if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
- tile_flags |= R300_TXO_MICRO_TILE;
- else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
- tile_flags |= R300_TXO_MICRO_TILE_SQUARE;
-
- tmp = idx_value + ((u32)reloc->lobj.gpu_offset);
- tmp |= tile_flags;
- ib[idx] = tmp;
+ if (p->keep_tiling_flags) {
+ ib[idx] = (idx_value & 31) | /* keep the 1st 5 bits */
+ ((idx_value & ~31) + (u32)reloc->lobj.gpu_offset);
+ } else {
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+ tile_flags |= R300_TXO_MACRO_TILE;
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
+ tile_flags |= R300_TXO_MICRO_TILE;
+ else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
+ tile_flags |= R300_TXO_MICRO_TILE_SQUARE;
+
+ tmp = idx_value + ((u32)reloc->lobj.gpu_offset);
+ tmp |= tile_flags;
+ ib[idx] = tmp;
+ }
track->textures[i].robj = reloc->robj;
track->tex_dirty = true;
break;
@@ -760,24 +765,26 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
/* RB3D_COLORPITCH1 */
/* RB3D_COLORPITCH2 */
/* RB3D_COLORPITCH3 */
- r = r100_cs_packet_next_reloc(p, &reloc);
- if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
- r100_cs_dump_packet(p, pkt);
- return r;
- }
+ if (!p->keep_tiling_flags) {
+ r = r100_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
+ r100_cs_dump_packet(p, pkt);
+ return r;
+ }
- if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
- tile_flags |= R300_COLOR_TILE_ENABLE;
- if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
- tile_flags |= R300_COLOR_MICROTILE_ENABLE;
- else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
- tile_flags |= R300_COLOR_MICROTILE_SQUARE_ENABLE;
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+ tile_flags |= R300_COLOR_TILE_ENABLE;
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
+ tile_flags |= R300_COLOR_MICROTILE_ENABLE;
+ else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
+ tile_flags |= R300_COLOR_MICROTILE_SQUARE_ENABLE;
- tmp = idx_value & ~(0x7 << 16);
- tmp |= tile_flags;
- ib[idx] = tmp;
+ tmp = idx_value & ~(0x7 << 16);
+ tmp |= tile_flags;
+ ib[idx] = tmp;
+ }
i = (reg - 0x4E38) >> 2;
track->cb[i].pitch = idx_value & 0x3FFE;
switch (((idx_value >> 21) & 0xF)) {
@@ -843,25 +850,26 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
break;
case 0x4F24:
/* ZB_DEPTHPITCH */
- r = r100_cs_packet_next_reloc(p, &reloc);
- if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
- r100_cs_dump_packet(p, pkt);
- return r;
- }
-
- if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
- tile_flags |= R300_DEPTHMACROTILE_ENABLE;
- if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
- tile_flags |= R300_DEPTHMICROTILE_TILED;
- else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
- tile_flags |= R300_DEPTHMICROTILE_TILED_SQUARE;
+ if (!p->keep_tiling_flags) {
+ r = r100_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
+ r100_cs_dump_packet(p, pkt);
+ return r;
+ }
- tmp = idx_value & ~(0x7 << 16);
- tmp |= tile_flags;
- ib[idx] = tmp;
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+ tile_flags |= R300_DEPTHMACROTILE_ENABLE;
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
+ tile_flags |= R300_DEPTHMICROTILE_TILED;
+ else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
+ tile_flags |= R300_DEPTHMICROTILE_TILED_SQUARE;
+ tmp = idx_value & ~(0x7 << 16);
+ tmp |= tile_flags;
+ ib[idx] = tmp;
+ }
track->zb.pitch = idx_value & 0x3FFC;
track->zb_dirty = true;
break;
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 0a2e023c155..cb1acffd243 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -941,7 +941,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
track->db_depth_control = radeon_get_ib_value(p, idx);
break;
case R_028010_DB_DEPTH_INFO:
- if (r600_cs_packet_next_is_pkt3_nop(p)) {
+ if (!p->keep_tiling_flags &&
+ r600_cs_packet_next_is_pkt3_nop(p)) {
r = r600_cs_packet_next_reloc(p, &reloc);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
@@ -992,7 +993,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case R_0280B4_CB_COLOR5_INFO:
case R_0280B8_CB_COLOR6_INFO:
case R_0280BC_CB_COLOR7_INFO:
- if (r600_cs_packet_next_is_pkt3_nop(p)) {
+ if (!p->keep_tiling_flags &&
+ r600_cs_packet_next_is_pkt3_nop(p)) {
r = r600_cs_packet_next_reloc(p, &reloc);
if (r) {
dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
@@ -1291,10 +1293,12 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
mip_offset <<= 8;
word0 = radeon_get_ib_value(p, idx + 0);
- if (tiling_flags & RADEON_TILING_MACRO)
- word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
- else if (tiling_flags & RADEON_TILING_MICRO)
- word0 |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
+ if (!p->keep_tiling_flags) {
+ if (tiling_flags & RADEON_TILING_MACRO)
+ word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
+ else if (tiling_flags & RADEON_TILING_MICRO)
+ word0 |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
+ }
word1 = radeon_get_ib_value(p, idx + 1);
w0 = G_038000_TEX_WIDTH(word0) + 1;
h0 = G_038004_TEX_HEIGHT(word1) + 1;
@@ -1621,10 +1625,12 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
base_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
- if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
- ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
- else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
- ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
+ if (!p->keep_tiling_flags) {
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+ ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
+ else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
+ ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
+ }
texture = reloc->robj;
/* tex mip base */
r = r600_cs_packet_next_reloc(p, &reloc);
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index fc5a1d642cb..8227e76b5c7 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -611,7 +611,8 @@ struct radeon_cs_parser {
struct radeon_ib *ib;
void *track;
unsigned family;
- int parser_error;
+ int parser_error;
+ bool keep_tiling_flags;
};
extern int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx);
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index fecd705a1a5..d24baf30efc 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -62,6 +62,87 @@ union atom_supported_devices {
struct _ATOM_SUPPORTED_DEVICES_INFO_2d1 info_2d1;
};
+static void radeon_lookup_i2c_gpio_quirks(struct radeon_device *rdev,
+ ATOM_GPIO_I2C_ASSIGMENT *gpio,
+ u8 index)
+{
+ /* r4xx mask is technically not used by the hw, so patch in the legacy mask bits */
+ if ((rdev->family == CHIP_R420) ||
+ (rdev->family == CHIP_R423) ||
+ (rdev->family == CHIP_RV410)) {
+ if ((le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x0018) ||
+ (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x0019) ||
+ (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x001a)) {
+ gpio->ucClkMaskShift = 0x19;
+ gpio->ucDataMaskShift = 0x18;
+ }
+ }
+
+ /* some evergreen boards have bad data for this entry */
+ if (ASIC_IS_DCE4(rdev)) {
+ if ((index == 7) &&
+ (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) &&
+ (gpio->sucI2cId.ucAccess == 0)) {
+ gpio->sucI2cId.ucAccess = 0x97;
+ gpio->ucDataMaskShift = 8;
+ gpio->ucDataEnShift = 8;
+ gpio->ucDataY_Shift = 8;
+ gpio->ucDataA_Shift = 8;
+ }
+ }
+
+ /* some DCE3 boards have bad data for this entry */
+ if (ASIC_IS_DCE3(rdev)) {
+ if ((index == 4) &&
+ (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) &&
+ (gpio->sucI2cId.ucAccess == 0x94))
+ gpio->sucI2cId.ucAccess = 0x14;
+ }
+}
+
+static struct radeon_i2c_bus_rec radeon_get_bus_rec_for_i2c_gpio(ATOM_GPIO_I2C_ASSIGMENT *gpio)
+{
+ struct radeon_i2c_bus_rec i2c;
+
+ memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec));
+
+ i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
+ i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
+ i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
+ i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4;
+ i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4;
+ i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4;
+ i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4;
+ i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4;
+ i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift);
+ i2c.mask_data_mask = (1 << gpio->ucDataMaskShift);
+ i2c.en_clk_mask = (1 << gpio->ucClkEnShift);
+ i2c.en_data_mask = (1 << gpio->ucDataEnShift);
+ i2c.y_clk_mask = (1 << gpio->ucClkY_Shift);
+ i2c.y_data_mask = (1 << gpio->ucDataY_Shift);
+ i2c.a_clk_mask = (1 << gpio->ucClkA_Shift);
+ i2c.a_data_mask = (1 << gpio->ucDataA_Shift);
+
+ if (gpio->sucI2cId.sbfAccess.bfHW_Capable)
+ i2c.hw_capable = true;
+ else
+ i2c.hw_capable = false;
+
+ if (gpio->sucI2cId.ucAccess == 0xa0)
+ i2c.mm_i2c = true;
+ else
+ i2c.mm_i2c = false;
+
+ i2c.i2c_id = gpio->sucI2cId.ucAccess;
+
+ if (i2c.mask_clk_reg)
+ i2c.valid = true;
+ else
+ i2c.valid = false;
+
+ return i2c;
+}
+
static struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rdev,
uint8_t id)
{
@@ -85,71 +166,10 @@ static struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rd
for (i = 0; i < num_indices; i++) {
gpio = &i2c_info->asGPIO_Info[i];
- /* r4xx mask is technically not used by the hw, so patch in the legacy mask bits */
- if ((rdev->family == CHIP_R420) ||
- (rdev->family == CHIP_R423) ||
- (rdev->family == CHIP_RV410)) {
- if ((le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x0018) ||
- (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x0019) ||
- (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x001a)) {
- gpio->ucClkMaskShift = 0x19;
- gpio->ucDataMaskShift = 0x18;
- }
- }
-
- /* some evergreen boards have bad data for this entry */
- if (ASIC_IS_DCE4(rdev)) {
- if ((i == 7) &&
- (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) &&
- (gpio->sucI2cId.ucAccess == 0)) {
- gpio->sucI2cId.ucAccess = 0x97;
- gpio->ucDataMaskShift = 8;
- gpio->ucDataEnShift = 8;
- gpio->ucDataY_Shift = 8;
- gpio->ucDataA_Shift = 8;
- }
- }
-
- /* some DCE3 boards have bad data for this entry */
- if (ASIC_IS_DCE3(rdev)) {
- if ((i == 4) &&
- (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) &&
- (gpio->sucI2cId.ucAccess == 0x94))
- gpio->sucI2cId.ucAccess = 0x14;
- }
+ radeon_lookup_i2c_gpio_quirks(rdev, gpio, i);
if (gpio->sucI2cId.ucAccess == id) {
- i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
- i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
- i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
- i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4;
- i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4;
- i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4;
- i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4;
- i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4;
- i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift);
- i2c.mask_data_mask = (1 << gpio->ucDataMaskShift);
- i2c.en_clk_mask = (1 << gpio->ucClkEnShift);
- i2c.en_data_mask = (1 << gpio->ucDataEnShift);
- i2c.y_clk_mask = (1 << gpio->ucClkY_Shift);
- i2c.y_data_mask = (1 << gpio->ucDataY_Shift);
- i2c.a_clk_mask = (1 << gpio->ucClkA_Shift);
- i2c.a_data_mask = (1 << gpio->ucDataA_Shift);
-
- if (gpio->sucI2cId.sbfAccess.bfHW_Capable)
- i2c.hw_capable = true;
- else
- i2c.hw_capable = false;
-
- if (gpio->sucI2cId.ucAccess == 0xa0)
- i2c.mm_i2c = true;
- else
- i2c.mm_i2c = false;
-
- i2c.i2c_id = gpio->sucI2cId.ucAccess;
-
- if (i2c.mask_clk_reg)
- i2c.valid = true;
+ i2c = radeon_get_bus_rec_for_i2c_gpio(gpio);
break;
}
}
@@ -169,8 +189,6 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev)
int i, num_indices;
char stmp[32];
- memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec));
-
if (atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) {
i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset);
@@ -179,60 +197,12 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev)
for (i = 0; i < num_indices; i++) {
gpio = &i2c_info->asGPIO_Info[i];
- i2c.valid = false;
-
- /* some evergreen boards have bad data for this entry */
- if (ASIC_IS_DCE4(rdev)) {
- if ((i == 7) &&
- (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) &&
- (gpio->sucI2cId.ucAccess == 0)) {
- gpio->sucI2cId.ucAccess = 0x97;
- gpio->ucDataMaskShift = 8;
- gpio->ucDataEnShift = 8;
- gpio->ucDataY_Shift = 8;
- gpio->ucDataA_Shift = 8;
- }
- }
- /* some DCE3 boards have bad data for this entry */
- if (ASIC_IS_DCE3(rdev)) {
- if ((i == 4) &&
- (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) &&
- (gpio->sucI2cId.ucAccess == 0x94))
- gpio->sucI2cId.ucAccess = 0x14;
- }
-
- i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
- i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
- i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
- i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4;
- i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4;
- i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4;
- i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4;
- i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4;
- i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift);
- i2c.mask_data_mask = (1 << gpio->ucDataMaskShift);
- i2c.en_clk_mask = (1 << gpio->ucClkEnShift);
- i2c.en_data_mask = (1 << gpio->ucDataEnShift);
- i2c.y_clk_mask = (1 << gpio->ucClkY_Shift);
- i2c.y_data_mask = (1 << gpio->ucDataY_Shift);
- i2c.a_clk_mask = (1 << gpio->ucClkA_Shift);
- i2c.a_data_mask = (1 << gpio->ucDataA_Shift);
-
- if (gpio->sucI2cId.sbfAccess.bfHW_Capable)
- i2c.hw_capable = true;
- else
- i2c.hw_capable = false;
-
- if (gpio->sucI2cId.ucAccess == 0xa0)
- i2c.mm_i2c = true;
- else
- i2c.mm_i2c = false;
+ radeon_lookup_i2c_gpio_quirks(rdev, gpio, i);
- i2c.i2c_id = gpio->sucI2cId.ucAccess;
+ i2c = radeon_get_bus_rec_for_i2c_gpio(gpio);
- if (i2c.mask_clk_reg) {
- i2c.valid = true;
+ if (i2c.valid) {
sprintf(stmp, "0x%x", i2c.i2c_id);
rdev->i2c_bus[i] = radeon_i2c_create(rdev->ddev, &i2c, stmp);
}
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index ccaa243c144..29afd71e084 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -93,7 +93,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
{
struct drm_radeon_cs *cs = data;
uint64_t *chunk_array_ptr;
- unsigned size, i;
+ unsigned size, i, flags = 0;
if (!cs->num_chunks) {
return 0;
@@ -140,6 +140,10 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
if (p->chunks[i].length_dw == 0)
return -EINVAL;
}
+ if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS &&
+ !p->chunks[i].length_dw) {
+ return -EINVAL;
+ }
p->chunks[i].length_dw = user_chunk.length_dw;
p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data;
@@ -155,6 +159,9 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
p->chunks[i].user_ptr, size)) {
return -EFAULT;
}
+ if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
+ flags = p->chunks[i].kdata[0];
+ }
} else {
p->chunks[i].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL);
p->chunks[i].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL);
@@ -174,6 +181,8 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
p->chunks[p->chunk_ib_idx].length_dw);
return -EINVAL;
}
+
+ p->keep_tiling_flags = (flags & RADEON_CS_KEEP_TILING_FLAGS) != 0;
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index a0b35e90948..71499fc3daf 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -53,9 +53,10 @@
* 2.9.0 - r600 tiling (s3tc,rgtc) working, SET_PREDICATION packet 3 on r600 + eg, backend query
* 2.10.0 - fusion 2D tiling
* 2.11.0 - backend map, initial compute support for the CS checker
+ * 2.12.0 - RADEON_CS_KEEP_TILING_FLAGS
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 11
+#define KMS_DRIVER_MINOR 12
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 617b64678fc..0bb0f5f713e 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -574,10 +574,16 @@ retry:
return ret;
spin_lock(&glob->lru_lock);
+
+ if (unlikely(list_empty(&bo->ddestroy))) {
+ spin_unlock(&glob->lru_lock);
+ return 0;
+ }
+
ret = ttm_bo_reserve_locked(bo, interruptible,
no_wait_reserve, false, 0);
- if (unlikely(ret != 0) || list_empty(&bo->ddestroy)) {
+ if (unlikely(ret != 0)) {
spin_unlock(&glob->lru_lock);
return ret;
}
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index bdde899af72..111d956d8e7 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -991,14 +991,20 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
uc = &priv->cards[i];
}
- if (!uc)
- return -EINVAL;
+ if (!uc) {
+ ret_val = -EINVAL;
+ goto done;
+ }
- if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0)
- return -EINVAL;
+ if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0) {
+ ret_val = -EINVAL;
+ goto done;
+ }
- if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0)
- return -EINVAL;
+ if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0) {
+ ret_val = -EINVAL;
+ goto done;
+ }
vga_put(pdev, io_state);
diff --git a/drivers/hwmon/ad7314.c b/drivers/hwmon/ad7314.c
index 318e38e8537..5d760f3d21c 100644
--- a/drivers/hwmon/ad7314.c
+++ b/drivers/hwmon/ad7314.c
@@ -160,7 +160,6 @@ MODULE_DEVICE_TABLE(spi, ad7314_id);
static struct spi_driver ad7314_driver = {
.driver = {
.name = "ad7314",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = ad7314_probe,
diff --git a/drivers/hwmon/ads7871.c b/drivers/hwmon/ads7871.c
index 52319340e18..04450f8bf5d 100644
--- a/drivers/hwmon/ads7871.c
+++ b/drivers/hwmon/ads7871.c
@@ -227,7 +227,6 @@ static int __devexit ads7871_remove(struct spi_device *spi)
static struct spi_driver ads7871_driver = {
.driver = {
.name = DEVICE_NAME,
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
diff --git a/drivers/hwmon/exynos4_tmu.c b/drivers/hwmon/exynos4_tmu.c
index faa0884f61f..f2359a0093b 100644
--- a/drivers/hwmon/exynos4_tmu.c
+++ b/drivers/hwmon/exynos4_tmu.c
@@ -506,17 +506,7 @@ static struct platform_driver exynos4_tmu_driver = {
.resume = exynos4_tmu_resume,
};
-static int __init exynos4_tmu_driver_init(void)
-{
- return platform_driver_register(&exynos4_tmu_driver);
-}
-module_init(exynos4_tmu_driver_init);
-
-static void __exit exynos4_tmu_driver_exit(void)
-{
- platform_driver_unregister(&exynos4_tmu_driver);
-}
-module_exit(exynos4_tmu_driver_exit);
+module_platform_driver(exynos4_tmu_driver);
MODULE_DESCRIPTION("EXYNOS4 TMU Driver");
MODULE_AUTHOR("Donggeun Kim <dg77.kim@samsung.com>");
diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
index 89aa9fb743a..9ba38f318ff 100644
--- a/drivers/hwmon/gpio-fan.c
+++ b/drivers/hwmon/gpio-fan.c
@@ -539,18 +539,7 @@ static struct platform_driver gpio_fan_driver = {
},
};
-static int __init gpio_fan_init(void)
-{
- return platform_driver_register(&gpio_fan_driver);
-}
-
-static void __exit gpio_fan_exit(void)
-{
- platform_driver_unregister(&gpio_fan_driver);
-}
-
-module_init(gpio_fan_init);
-module_exit(gpio_fan_exit);
+module_platform_driver(gpio_fan_driver);
MODULE_AUTHOR("Simon Guinot <sguinot@lacie.com>");
MODULE_DESCRIPTION("GPIO FAN driver");
diff --git a/drivers/hwmon/jz4740-hwmon.c b/drivers/hwmon/jz4740-hwmon.c
index fea292d4340..7a48b1eb423 100644
--- a/drivers/hwmon/jz4740-hwmon.c
+++ b/drivers/hwmon/jz4740-hwmon.c
@@ -212,17 +212,7 @@ struct platform_driver jz4740_hwmon_driver = {
},
};
-static int __init jz4740_hwmon_init(void)
-{
- return platform_driver_register(&jz4740_hwmon_driver);
-}
-module_init(jz4740_hwmon_init);
-
-static void __exit jz4740_hwmon_exit(void)
-{
- platform_driver_unregister(&jz4740_hwmon_driver);
-}
-module_exit(jz4740_hwmon_exit);
+module_platform_driver(jz4740_hwmon_driver);
MODULE_DESCRIPTION("JZ4740 SoC HWMON driver");
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
index eab11615dce..9b382ec2c3b 100644
--- a/drivers/hwmon/ntc_thermistor.c
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -432,19 +432,7 @@ static struct platform_driver ntc_thermistor_driver = {
.id_table = ntc_thermistor_id,
};
-static int __init ntc_thermistor_init(void)
-{
- return platform_driver_register(&ntc_thermistor_driver);
-}
-
-module_init(ntc_thermistor_init);
-
-static void __exit ntc_thermistor_cleanup(void)
-{
- platform_driver_unregister(&ntc_thermistor_driver);
-}
-
-module_exit(ntc_thermistor_cleanup);
+module_platform_driver(ntc_thermistor_driver);
MODULE_DESCRIPTION("NTC Thermistor Driver");
MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
diff --git a/drivers/hwmon/s3c-hwmon.c b/drivers/hwmon/s3c-hwmon.c
index b39f52e2752..f6c26d19f52 100644
--- a/drivers/hwmon/s3c-hwmon.c
+++ b/drivers/hwmon/s3c-hwmon.c
@@ -393,18 +393,7 @@ static struct platform_driver s3c_hwmon_driver = {
.remove = __devexit_p(s3c_hwmon_remove),
};
-static int __init s3c_hwmon_init(void)
-{
- return platform_driver_register(&s3c_hwmon_driver);
-}
-
-static void __exit s3c_hwmon_exit(void)
-{
- platform_driver_unregister(&s3c_hwmon_driver);
-}
-
-module_init(s3c_hwmon_init);
-module_exit(s3c_hwmon_exit);
+module_platform_driver(s3c_hwmon_driver);
MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
MODULE_DESCRIPTION("S3C ADC HWMon driver");
diff --git a/drivers/hwmon/sch5627.c b/drivers/hwmon/sch5627.c
index e3b5c6039c2..79b6dabe316 100644
--- a/drivers/hwmon/sch5627.c
+++ b/drivers/hwmon/sch5627.c
@@ -590,19 +590,8 @@ static struct platform_driver sch5627_driver = {
.remove = sch5627_remove,
};
-static int __init sch5627_init(void)
-{
- return platform_driver_register(&sch5627_driver);
-}
-
-static void __exit sch5627_exit(void)
-{
- platform_driver_unregister(&sch5627_driver);
-}
+module_platform_driver(sch5627_driver);
MODULE_DESCRIPTION("SMSC SCH5627 Hardware Monitoring Driver");
MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
MODULE_LICENSE("GPL");
-
-module_init(sch5627_init);
-module_exit(sch5627_exit);
diff --git a/drivers/hwmon/sch5636.c b/drivers/hwmon/sch5636.c
index 244407aa79f..9d5236fb09b 100644
--- a/drivers/hwmon/sch5636.c
+++ b/drivers/hwmon/sch5636.c
@@ -521,19 +521,8 @@ static struct platform_driver sch5636_driver = {
.remove = sch5636_remove,
};
-static int __init sch5636_init(void)
-{
- return platform_driver_register(&sch5636_driver);
-}
-
-static void __exit sch5636_exit(void)
-{
- platform_driver_unregister(&sch5636_driver);
-}
+module_platform_driver(sch5636_driver);
MODULE_DESCRIPTION("SMSC SCH5636 Hardware Monitoring Driver");
MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
MODULE_LICENSE("GPL");
-
-module_init(sch5636_init);
-module_exit(sch5636_exit);
diff --git a/drivers/hwmon/twl4030-madc-hwmon.c b/drivers/hwmon/twl4030-madc-hwmon.c
index 57240740b16..0018c7dd009 100644
--- a/drivers/hwmon/twl4030-madc-hwmon.c
+++ b/drivers/hwmon/twl4030-madc-hwmon.c
@@ -136,19 +136,7 @@ static struct platform_driver twl4030_madc_hwmon_driver = {
},
};
-static int __init twl4030_madc_hwmon_init(void)
-{
- return platform_driver_register(&twl4030_madc_hwmon_driver);
-}
-
-module_init(twl4030_madc_hwmon_init);
-
-static void __exit twl4030_madc_hwmon_exit(void)
-{
- platform_driver_unregister(&twl4030_madc_hwmon_driver);
-}
-
-module_exit(twl4030_madc_hwmon_exit);
+module_platform_driver(twl4030_madc_hwmon_driver);
MODULE_DESCRIPTION("TWL4030 ADC Hwmon driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/ultra45_env.c b/drivers/hwmon/ultra45_env.c
index 3cd07bf42dc..b9a87e89bab 100644
--- a/drivers/hwmon/ultra45_env.c
+++ b/drivers/hwmon/ultra45_env.c
@@ -309,15 +309,4 @@ static struct platform_driver env_driver = {
.remove = __devexit_p(env_remove),
};
-static int __init env_init(void)
-{
- return platform_driver_register(&env_driver);
-}
-
-static void __exit env_exit(void)
-{
- platform_driver_unregister(&env_driver);
-}
-
-module_init(env_init);
-module_exit(env_exit);
+module_platform_driver(env_driver);
diff --git a/drivers/hwmon/wm831x-hwmon.c b/drivers/hwmon/wm831x-hwmon.c
index 97b1f834a47..9b598ed2602 100644
--- a/drivers/hwmon/wm831x-hwmon.c
+++ b/drivers/hwmon/wm831x-hwmon.c
@@ -209,17 +209,7 @@ static struct platform_driver wm831x_hwmon_driver = {
},
};
-static int __init wm831x_hwmon_init(void)
-{
- return platform_driver_register(&wm831x_hwmon_driver);
-}
-module_init(wm831x_hwmon_init);
-
-static void __exit wm831x_hwmon_exit(void)
-{
- platform_driver_unregister(&wm831x_hwmon_driver);
-}
-module_exit(wm831x_hwmon_exit);
+module_platform_driver(wm831x_hwmon_driver);
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
MODULE_DESCRIPTION("WM831x Hardware Monitoring");
diff --git a/drivers/hwmon/wm8350-hwmon.c b/drivers/hwmon/wm8350-hwmon.c
index 13290595ca8..3ff67edbdc4 100644
--- a/drivers/hwmon/wm8350-hwmon.c
+++ b/drivers/hwmon/wm8350-hwmon.c
@@ -133,17 +133,7 @@ static struct platform_driver wm8350_hwmon_driver = {
},
};
-static int __init wm8350_hwmon_init(void)
-{
- return platform_driver_register(&wm8350_hwmon_driver);
-}
-module_init(wm8350_hwmon_init);
-
-static void __exit wm8350_hwmon_exit(void)
-{
- platform_driver_unregister(&wm8350_hwmon_driver);
-}
-module_exit(wm8350_hwmon_exit);
+module_platform_driver(wm8350_hwmon_driver);
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
MODULE_DESCRIPTION("WM8350 Hardware Monitoring");
diff --git a/drivers/i2c/algos/i2c-algo-bit.c b/drivers/i2c/algos/i2c-algo-bit.c
index 85584a547c2..525c7345fa0 100644
--- a/drivers/i2c/algos/i2c-algo-bit.c
+++ b/drivers/i2c/algos/i2c-algo-bit.c
@@ -488,7 +488,7 @@ static int bit_doAddress(struct i2c_adapter *i2c_adap, struct i2c_msg *msg)
if (flags & I2C_M_TEN) {
/* a ten bit address */
- addr = 0xf0 | ((msg->addr >> 7) & 0x03);
+ addr = 0xf0 | ((msg->addr >> 7) & 0x06);
bit_dbg(2, &i2c_adap->dev, "addr0: %d\n", addr);
/* try extended address code...*/
ret = try_address(i2c_adap, addr, retries);
@@ -498,7 +498,7 @@ static int bit_doAddress(struct i2c_adapter *i2c_adap, struct i2c_msg *msg)
return -ENXIO;
}
/* the remaining 8 bit address */
- ret = i2c_outb(i2c_adap, msg->addr & 0x7f);
+ ret = i2c_outb(i2c_adap, msg->addr & 0xff);
if ((ret != 1) && !nak_ok) {
/* the chip did not ack / xmission error occurred */
dev_err(&i2c_adap->dev, "died at 2nd address code\n");
diff --git a/drivers/i2c/busses/i2c-nuc900.c b/drivers/i2c/busses/i2c-nuc900.c
index 835e47b39bc..03b61577888 100644
--- a/drivers/i2c/busses/i2c-nuc900.c
+++ b/drivers/i2c/busses/i2c-nuc900.c
@@ -593,7 +593,7 @@ static int __devinit nuc900_i2c_probe(struct platform_device *pdev)
i2c->adap.algo_data = i2c;
i2c->adap.dev.parent = &pdev->dev;
- mfp_set_groupg(&pdev->dev);
+ mfp_set_groupg(&pdev->dev, NULL);
clk_get_rate(i2c->clk);
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 131079a3e29..1e5606185b4 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -539,8 +539,10 @@ i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info)
client->dev.type = &i2c_client_type;
client->dev.of_node = info->of_node;
+ /* For 10-bit clients, add an arbitrary offset to avoid collisions */
dev_set_name(&client->dev, "%d-%04x", i2c_adapter_id(adap),
- client->addr);
+ client->addr | ((client->flags & I2C_CLIENT_TEN)
+ ? 0xa000 : 0));
status = device_register(&client->dev);
if (status)
goto out_err;
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index c90ce50b619..57a45ce84b2 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -579,7 +579,7 @@ static int i2cdev_detach_adapter(struct device *dev, void *dummy)
return 0;
}
-int i2cdev_notifier_call(struct notifier_block *nb, unsigned long action,
+static int i2cdev_notifier_call(struct notifier_block *nb, unsigned long action,
void *data)
{
struct device *dev = data;
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index adf0757280e..a20c3c8224e 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -216,7 +216,9 @@ static int addr4_resolve(struct sockaddr_in *src_in,
neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, rt->dst.dev);
if (!neigh || !(neigh->nud_state & NUD_VALID)) {
+ rcu_read_lock();
neigh_event_send(dst_get_neighbour(&rt->dst), NULL);
+ rcu_read_unlock();
ret = -ENODATA;
if (neigh)
goto release;
@@ -274,15 +276,16 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
goto put;
}
+ rcu_read_lock();
neigh = dst_get_neighbour(dst);
if (!neigh || !(neigh->nud_state & NUD_VALID)) {
if (neigh)
neigh_event_send(neigh, NULL);
ret = -ENODATA;
- goto put;
+ } else {
+ ret = rdma_copy_addr(addr, dst->dev, neigh->ha);
}
-
- ret = rdma_copy_addr(addr, dst->dev, neigh->ha);
+ rcu_read_unlock();
put:
dst_release(dst);
return ret;
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index de6d0774e60..c88b12beef2 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -1375,8 +1375,10 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
goto reject;
}
dst = &rt->dst;
+ rcu_read_lock();
neigh = dst_get_neighbour(dst);
l2t = t3_l2t_get(tdev, neigh, neigh->dev);
+ rcu_read_unlock();
if (!l2t) {
printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
__func__);
@@ -1946,10 +1948,12 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
}
ep->dst = &rt->dst;
+ rcu_read_lock();
neigh = dst_get_neighbour(ep->dst);
/* get a l2t entry */
ep->l2t = t3_l2t_get(ep->com.tdev, neigh, neigh->dev);
+ rcu_read_unlock();
if (!ep->l2t) {
printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
err = -ENOMEM;
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index b36cdac9c55..0747004313a 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -542,8 +542,10 @@ static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
(mpa_rev_to_use == 2 ? MPA_ENHANCED_RDMA_CONN : 0);
mpa->private_data_size = htons(ep->plen);
mpa->revision = mpa_rev_to_use;
- if (mpa_rev_to_use == 1)
+ if (mpa_rev_to_use == 1) {
ep->tried_with_mpa_v1 = 1;
+ ep->retry_with_mpa_v1 = 0;
+ }
if (mpa_rev_to_use == 2) {
mpa->private_data_size +=
@@ -1594,6 +1596,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
goto reject;
}
dst = &rt->dst;
+ rcu_read_lock();
neigh = dst_get_neighbour(dst);
if (neigh->dev->flags & IFF_LOOPBACK) {
pdev = ip_dev_find(&init_net, peer_ip);
@@ -1620,6 +1623,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
rss_qid = dev->rdev.lldi.rxq_ids[
cxgb4_port_idx(neigh->dev) * step];
}
+ rcu_read_unlock();
if (!l2t) {
printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
__func__);
@@ -1820,6 +1824,7 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
}
ep->dst = &rt->dst;
+ rcu_read_lock();
neigh = dst_get_neighbour(ep->dst);
/* get a l2t entry */
@@ -1856,6 +1861,7 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[
cxgb4_port_idx(neigh->dev) * step];
}
+ rcu_read_unlock();
if (!ep->l2t) {
printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
err = -ENOMEM;
@@ -2301,6 +2307,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
}
ep->dst = &rt->dst;
+ rcu_read_lock();
neigh = dst_get_neighbour(ep->dst);
/* get a l2t entry */
@@ -2339,6 +2346,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
ep->retry_with_mpa_v1 = 0;
ep->tried_with_mpa_v1 = 0;
}
+ rcu_read_unlock();
if (!ep->l2t) {
printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
err = -ENOMEM;
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index f35a935267e..0f1607c8325 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -311,7 +311,7 @@ void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
while (ptr != cq->sw_pidx) {
cqe = &cq->sw_queue[ptr];
if (RQ_TYPE(cqe) && (CQE_OPCODE(cqe) != FW_RI_READ_RESP) &&
- (CQE_QPID(cqe) == wq->rq.qid) && cqe_completes_wr(cqe, wq))
+ (CQE_QPID(cqe) == wq->sq.qid) && cqe_completes_wr(cqe, wq))
(*count)++;
if (++ptr == cq->size)
ptr = 0;
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index dfce9ea98a3..0a52d72371e 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -1377,9 +1377,11 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
neigh_release(neigh);
}
- if ((neigh == NULL) || (!(neigh->nud_state & NUD_VALID)))
+ if ((neigh == NULL) || (!(neigh->nud_state & NUD_VALID))) {
+ rcu_read_lock();
neigh_event_send(dst_get_neighbour(&rt->dst), NULL);
-
+ rcu_read_unlock();
+ }
ip_rt_put(rt);
return rc;
}
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 5bd2162b95d..1d5895941e1 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -2307,19 +2307,11 @@ static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
SYM_LSB(IBCCtrlA_0, MaxPktLen);
ppd->cpspec->ibcctrl_a = ibc; /* without linkcmd or linkinitcmd! */
- /* initially come up waiting for TS1, without sending anything. */
- val = ppd->cpspec->ibcctrl_a | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
- QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
-
- ppd->cpspec->ibcctrl_a = val;
/*
* Reset the PCS interface to the serdes (and also ibc, which is still
* in reset from above). Writes new value of ibcctrl_a as last step.
*/
qib_7322_mini_pcs_reset(ppd);
- qib_write_kreg(dd, kr_scratch, 0ULL);
- /* clear the linkinit cmds */
- ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, LinkInitCmd);
if (!ppd->cpspec->ibcctrl_b) {
unsigned lse = ppd->link_speed_enabled;
@@ -2385,6 +2377,14 @@ static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0, IBLinkEn);
set_vls(ppd);
+ /* initially come up DISABLED, without sending anything. */
+ val = ppd->cpspec->ibcctrl_a | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
+ QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
+ qib_write_kreg_port(ppd, krp_ibcctrl_a, val);
+ qib_write_kreg(dd, kr_scratch, 0ULL);
+ /* clear the linkinit cmds */
+ ppd->cpspec->ibcctrl_a = val & ~SYM_MASK(IBCCtrlA_0, LinkInitCmd);
+
/* be paranoid against later code motion, etc. */
spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvIBPortEnable);
@@ -5241,7 +5241,7 @@ static int qib_7322_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
off */
if (ppd->dd->flags & QIB_HAS_QSFP) {
qd->t_insert = get_jiffies_64();
- schedule_work(&qd->work);
+ queue_work(ib_wq, &qd->work);
}
spin_lock_irqsave(&ppd->sdma_lock, flags);
if (__qib_sdma_running(ppd))
diff --git a/drivers/infiniband/hw/qib/qib_qsfp.c b/drivers/infiniband/hw/qib/qib_qsfp.c
index e06c4ed383f..fa71b1e666c 100644
--- a/drivers/infiniband/hw/qib/qib_qsfp.c
+++ b/drivers/infiniband/hw/qib/qib_qsfp.c
@@ -480,18 +480,6 @@ void qib_qsfp_init(struct qib_qsfp_data *qd,
udelay(20); /* Generous RST dwell */
dd->f_gpio_mod(dd, mask, mask, mask);
- /* Spec says module can take up to two seconds! */
- mask = QSFP_GPIO_MOD_PRS_N;
- if (qd->ppd->hw_pidx)
- mask <<= QSFP_GPIO_PORT2_SHIFT;
-
- /* Do not try to wait here. Better to let event handle it */
- if (!qib_qsfp_mod_present(qd->ppd))
- goto bail;
- /* We see a module, but it may be unwise to look yet. Just schedule */
- qd->t_insert = get_jiffies_64();
- queue_work(ib_wq, &qd->work);
-bail:
return;
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 0ef9af94997..4115be54ba3 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -57,21 +57,24 @@ struct ipoib_ah *ipoib_create_ah(struct net_device *dev,
struct ib_pd *pd, struct ib_ah_attr *attr)
{
struct ipoib_ah *ah;
+ struct ib_ah *vah;
ah = kmalloc(sizeof *ah, GFP_KERNEL);
if (!ah)
- return NULL;
+ return ERR_PTR(-ENOMEM);
ah->dev = dev;
ah->last_send = 0;
kref_init(&ah->ref);
- ah->ah = ib_create_ah(pd, attr);
- if (IS_ERR(ah->ah)) {
+ vah = ib_create_ah(pd, attr);
+ if (IS_ERR(vah)) {
kfree(ah);
- ah = NULL;
- } else
+ ah = (struct ipoib_ah *)vah;
+ } else {
+ ah->ah = vah;
ipoib_dbg(netdev_priv(dev), "Created ah %p\n", ah->ah);
+ }
return ah;
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 57ae9b9265e..d3ed89ca485 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -432,7 +432,7 @@ static void path_rec_completion(int status,
spin_lock_irqsave(&priv->lock, flags);
- if (ah) {
+ if (!IS_ERR_OR_NULL(ah)) {
path->pathrec = *pathrec;
old_ah = path->ah;
@@ -555,6 +555,7 @@ static int path_rec_start(struct net_device *dev,
return 0;
}
+/* called with rcu_read_lock */
static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
@@ -636,6 +637,7 @@ err_drop:
spin_unlock_irqrestore(&priv->lock, flags);
}
+/* called with rcu_read_lock */
static void ipoib_path_lookup(struct sk_buff *skb, struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(skb->dev);
@@ -720,13 +722,14 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct neighbour *n = NULL;
unsigned long flags;
+ rcu_read_lock();
if (likely(skb_dst(skb)))
n = dst_get_neighbour(skb_dst(skb));
if (likely(n)) {
if (unlikely(!*to_ipoib_neigh(n))) {
ipoib_path_lookup(skb, dev);
- return NETDEV_TX_OK;
+ goto unlock;
}
neigh = *to_ipoib_neigh(n);
@@ -749,17 +752,17 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
ipoib_neigh_free(dev, neigh);
spin_unlock_irqrestore(&priv->lock, flags);
ipoib_path_lookup(skb, dev);
- return NETDEV_TX_OK;
+ goto unlock;
}
if (ipoib_cm_get(neigh)) {
if (ipoib_cm_up(neigh)) {
ipoib_cm_send(dev, skb, ipoib_cm_get(neigh));
- return NETDEV_TX_OK;
+ goto unlock;
}
} else if (neigh->ah) {
ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(n->ha));
- return NETDEV_TX_OK;
+ goto unlock;
}
if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
@@ -793,13 +796,14 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
phdr->hwaddr + 4);
dev_kfree_skb_any(skb);
++dev->stats.tx_dropped;
- return NETDEV_TX_OK;
+ goto unlock;
}
unicast_arp_send(skb, dev, phdr);
}
}
-
+unlock:
+ rcu_read_unlock();
return NETDEV_TX_OK;
}
@@ -837,7 +841,7 @@ static int ipoib_hard_header(struct sk_buff *skb,
dst = skb_dst(skb);
n = NULL;
if (dst)
- n = dst_get_neighbour(dst);
+ n = dst_get_neighbour_raw(dst);
if ((!dst || !n) && daddr) {
struct ipoib_pseudoheader *phdr =
(struct ipoib_pseudoheader *) skb_push(skb, sizeof *phdr);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 1b7a9768635..873bff97e69 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -240,8 +240,11 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
av.grh.dgid = mcast->mcmember.mgid;
ah = ipoib_create_ah(dev, priv->pd, &av);
- if (!ah) {
- ipoib_warn(priv, "ib_address_create failed\n");
+ if (IS_ERR(ah)) {
+ ipoib_warn(priv, "ib_address_create failed %ld\n",
+ -PTR_ERR(ah));
+ /* use original error */
+ return PTR_ERR(ah);
} else {
spin_lock_irq(&priv->lock);
mcast->ah = ah;
@@ -266,7 +269,7 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
skb->dev = dev;
if (dst)
- n = dst_get_neighbour(dst);
+ n = dst_get_neighbour_raw(dst);
if (!dst || !n) {
/* put pseudoheader back on for next time */
skb_push(skb, sizeof (struct ipoib_pseudoheader));
@@ -722,6 +725,8 @@ out:
if (mcast && mcast->ah) {
struct dst_entry *dst = skb_dst(skb);
struct neighbour *n = NULL;
+
+ rcu_read_lock();
if (dst)
n = dst_get_neighbour(dst);
if (n && !*to_ipoib_neigh(n)) {
@@ -734,7 +739,7 @@ out:
list_add_tail(&neigh->list, &mcast->neigh_list);
}
}
-
+ rcu_read_unlock();
spin_unlock_irqrestore(&priv->lock, flags);
ipoib_send(dev, skb, mcast->ah, IB_MULTICAST_QPN);
return;
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 09b93b11a27..e2a9867c19d 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1210,18 +1210,28 @@ static int elantech_reconnect(struct psmouse *psmouse)
*/
static int elantech_set_properties(struct elantech_data *etd)
{
+ /* This represents the version of IC body. */
int ver = (etd->fw_version & 0x0f0000) >> 16;
+ /* Early version of Elan touchpads doesn't obey the rule. */
if (etd->fw_version < 0x020030 || etd->fw_version == 0x020600)
etd->hw_version = 1;
- else if (etd->fw_version < 0x150600)
- etd->hw_version = 2;
- else if (ver == 5)
- etd->hw_version = 3;
- else if (ver == 6)
- etd->hw_version = 4;
- else
- return -1;
+ else {
+ switch (ver) {
+ case 2:
+ case 4:
+ etd->hw_version = 2;
+ break;
+ case 5:
+ etd->hw_version = 3;
+ break;
+ case 6:
+ etd->hw_version = 4;
+ break;
+ default:
+ return -1;
+ }
+ }
/*
* Turn on packet checking by default.
diff --git a/drivers/input/serio/ams_delta_serio.c b/drivers/input/serio/ams_delta_serio.c
index 4b2a42f9f0b..d4d08bd9205 100644
--- a/drivers/input/serio/ams_delta_serio.c
+++ b/drivers/input/serio/ams_delta_serio.c
@@ -24,6 +24,7 @@
#include <linux/irq.h>
#include <linux/serio.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include <asm/mach-types.h>
#include <plat/board-ams-delta.h>
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index bb9f5d31f0d..b4cfc6c8be8 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -431,6 +431,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V13"),
},
},
+ {
+ /* Newer HP Pavilion dv4 models */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4 Notebook PC"),
+ },
+ },
{ }
};
@@ -560,6 +567,13 @@ static const struct dmi_system_id __initconst i8042_dmi_notimeout_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V13"),
},
},
+ {
+ /* Newer HP Pavilion dv4 models */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4 Notebook PC"),
+ },
+ },
{ }
};
diff --git a/drivers/isdn/divert/divert_procfs.c b/drivers/isdn/divert/divert_procfs.c
index 33ec9e46777..9021182c4b7 100644
--- a/drivers/isdn/divert/divert_procfs.c
+++ b/drivers/isdn/divert/divert_procfs.c
@@ -242,6 +242,12 @@ static int isdn_divert_ioctl_unlocked(struct file *file, uint cmd, ulong arg)
case IIOCDOCFINT:
if (!divert_if.drv_to_name(dioctl.cf_ctrl.drvid))
return (-EINVAL); /* invalid driver */
+ if (strnlen(dioctl.cf_ctrl.msn, sizeof(dioctl.cf_ctrl.msn)) ==
+ sizeof(dioctl.cf_ctrl.msn))
+ return -EINVAL;
+ if (strnlen(dioctl.cf_ctrl.fwd_nr, sizeof(dioctl.cf_ctrl.fwd_nr)) ==
+ sizeof(dioctl.cf_ctrl.fwd_nr))
+ return -EINVAL;
if ((i = cf_command(dioctl.cf_ctrl.drvid,
(cmd == IIOCDOCFACT) ? 1 : (cmd == IIOCDOCFDIS) ? 0 : 2,
dioctl.cf_ctrl.cfproc,
diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
index 1f73d7f7e02..2339d7396b9 100644
--- a/drivers/isdn/i4l/isdn_net.c
+++ b/drivers/isdn/i4l/isdn_net.c
@@ -2756,6 +2756,9 @@ isdn_net_setcfg(isdn_net_ioctl_cfg * cfg)
char *c,
*e;
+ if (strnlen(cfg->drvid, sizeof(cfg->drvid)) ==
+ sizeof(cfg->drvid))
+ return -EINVAL;
drvidx = -1;
chidx = -1;
strcpy(drvid, cfg->drvid);
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index d593878d66d..5664696f2d3 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -472,7 +472,7 @@ config BMP085
module will be called bmp085.
config PCH_PHUB
- tristate "Intel EG20T PCH / OKI SEMICONDUCTOR IOH(ML7213/ML7223) PHUB"
+ tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) PHUB"
depends on PCI
help
This driver is for PCH(Platform controller Hub) PHUB(Packet Hub) of
@@ -480,12 +480,13 @@ config PCH_PHUB
processor. The Topcliff has MAC address and Option ROM data in SROM.
This driver can access MAC address and Option ROM data in SROM.
- This driver also can be used for OKI SEMICONDUCTOR IOH(Input/
- Output Hub), ML7213 and ML7223.
- ML7213 IOH is for IVI(In-Vehicle Infotainment) use and ML7223 IOH is
- for MP(Media Phone) use.
- ML7213/ML7223 is companion chip for Intel Atom E6xx series.
- ML7213/ML7223 is completely compatible for Intel EG20T PCH.
+ This driver also can be used for LAPIS Semiconductor's IOH,
+ ML7213/ML7223/ML7831.
+ ML7213 which is for IVI(In-Vehicle Infotainment) use.
+ ML7223 IOH is for MP(Media Phone) use.
+ ML7831 IOH is for general purpose use.
+ ML7213/ML7223/ML7831 is companion chip for Intel Atom E6xx series.
+ ML7213/ML7223/ML7831 is completely compatible for Intel EG20T PCH.
To compile this driver as a module, choose M here: the module will
be called pch_phub.
diff --git a/drivers/misc/ad525x_dpot.h b/drivers/misc/ad525x_dpot.h
index a662f5987b6..82b2cb77ae1 100644
--- a/drivers/misc/ad525x_dpot.h
+++ b/drivers/misc/ad525x_dpot.h
@@ -100,7 +100,7 @@ enum dpot_devid {
AD5293_ID = DPOT_CONF(F_RDACS_RW | F_SPI_16BIT, BRDAC0, 10, 27),
AD7376_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT,
BRDAC0, 7, 28),
- AD8400_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT,
+ AD8400_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT,
BRDAC0, 8, 29),
AD8402_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT,
BRDAC0 | BRDAC1, 8, 30),
diff --git a/drivers/misc/pch_phub.c b/drivers/misc/pch_phub.c
index dee33addcae..10fc4785dba 100644
--- a/drivers/misc/pch_phub.c
+++ b/drivers/misc/pch_phub.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2010 OKI SEMICONDUCTOR CO., LTD.
+ * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -41,10 +41,10 @@
#define PCH_PHUB_ROM_START_ADDR_EG20T 0x80 /* ROM data area start address offset
(Intel EG20T PCH)*/
#define PCH_PHUB_ROM_START_ADDR_ML7213 0x400 /* ROM data area start address
- offset(OKI SEMICONDUCTOR ML7213)
+ offset(LAPIS Semicon ML7213)
*/
#define PCH_PHUB_ROM_START_ADDR_ML7223 0x400 /* ROM data area start address
- offset(OKI SEMICONDUCTOR ML7223)
+ offset(LAPIS Semicon ML7223)
*/
/* MAX number of INT_REDUCE_CONTROL registers */
@@ -73,6 +73,9 @@
#define PCI_DEVICE_ID_ROHM_ML7223_mPHUB 0x8012 /* for Bus-m */
#define PCI_DEVICE_ID_ROHM_ML7223_nPHUB 0x8002 /* for Bus-n */
+/* Macros for ML7831 */
+#define PCI_DEVICE_ID_ROHM_ML7831_PHUB 0x8801
+
/* SROM ACCESS Macro */
#define PCH_WORD_ADDR_MASK (~((1 << 2) - 1))
@@ -115,6 +118,7 @@
* @pch_mac_start_address: MAC address area start address
* @pch_opt_rom_start_address: Option ROM start address
* @ioh_type: Save IOH type
+ * @pdev: pointer to pci device struct
*/
struct pch_phub_reg {
u32 phub_id_reg;
@@ -136,6 +140,7 @@ struct pch_phub_reg {
u32 pch_mac_start_address;
u32 pch_opt_rom_start_address;
int ioh_type;
+ struct pci_dev *pdev;
};
/* SROM SPEC for MAC address assignment offset */
@@ -471,7 +476,7 @@ static int pch_phub_write_gbe_mac_addr(struct pch_phub_reg *chip, u8 *data)
int retval;
int i;
- if (chip->ioh_type == 1) /* EG20T */
+ if ((chip->ioh_type == 1) || (chip->ioh_type == 5)) /* EG20T or ML7831*/
retval = pch_phub_gbe_serial_rom_conf(chip);
else /* ML7223 */
retval = pch_phub_gbe_serial_rom_conf_mp(chip);
@@ -498,6 +503,7 @@ static ssize_t pch_phub_bin_read(struct file *filp, struct kobject *kobj,
unsigned int orom_size;
int ret;
int err;
+ ssize_t rom_size;
struct pch_phub_reg *chip =
dev_get_drvdata(container_of(kobj, struct device, kobj));
@@ -509,6 +515,10 @@ static ssize_t pch_phub_bin_read(struct file *filp, struct kobject *kobj,
}
/* Get Rom signature */
+ chip->pch_phub_extrom_base_address = pci_map_rom(chip->pdev, &rom_size);
+ if (!chip->pch_phub_extrom_base_address)
+ goto exrom_map_err;
+
pch_phub_read_serial_rom(chip, chip->pch_opt_rom_start_address,
(unsigned char *)&rom_signature);
rom_signature &= 0xff;
@@ -539,10 +549,13 @@ static ssize_t pch_phub_bin_read(struct file *filp, struct kobject *kobj,
goto return_err;
}
return_ok:
+ pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address);
mutex_unlock(&pch_phub_mutex);
return addr_offset;
return_err:
+ pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address);
+exrom_map_err:
mutex_unlock(&pch_phub_mutex);
return_err_nomutex:
return err;
@@ -555,6 +568,7 @@ static ssize_t pch_phub_bin_write(struct file *filp, struct kobject *kobj,
int err;
unsigned int addr_offset;
int ret;
+ ssize_t rom_size;
struct pch_phub_reg *chip =
dev_get_drvdata(container_of(kobj, struct device, kobj));
@@ -571,6 +585,12 @@ static ssize_t pch_phub_bin_write(struct file *filp, struct kobject *kobj,
goto return_ok;
}
+ chip->pch_phub_extrom_base_address = pci_map_rom(chip->pdev, &rom_size);
+ if (!chip->pch_phub_extrom_base_address) {
+ err = -ENOMEM;
+ goto exrom_map_err;
+ }
+
for (addr_offset = 0; addr_offset < count; addr_offset++) {
if (PCH_PHUB_OROM_SIZE < off + addr_offset)
goto return_ok;
@@ -585,10 +605,14 @@ static ssize_t pch_phub_bin_write(struct file *filp, struct kobject *kobj,
}
return_ok:
+ pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address);
mutex_unlock(&pch_phub_mutex);
return addr_offset;
return_err:
+ pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address);
+
+exrom_map_err:
mutex_unlock(&pch_phub_mutex);
return err;
}
@@ -598,8 +622,14 @@ static ssize_t show_pch_mac(struct device *dev, struct device_attribute *attr,
{
u8 mac[8];
struct pch_phub_reg *chip = dev_get_drvdata(dev);
+ ssize_t rom_size;
+
+ chip->pch_phub_extrom_base_address = pci_map_rom(chip->pdev, &rom_size);
+ if (!chip->pch_phub_extrom_base_address)
+ return -ENOMEM;
pch_phub_read_gbe_mac_addr(chip, mac);
+ pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address);
return sprintf(buf, "%pM\n", mac);
}
@@ -608,6 +638,7 @@ static ssize_t store_pch_mac(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
u8 mac[6];
+ ssize_t rom_size;
struct pch_phub_reg *chip = dev_get_drvdata(dev);
if (count != 18)
@@ -617,7 +648,12 @@ static ssize_t store_pch_mac(struct device *dev, struct device_attribute *attr,
(u32 *)&mac[0], (u32 *)&mac[1], (u32 *)&mac[2], (u32 *)&mac[3],
(u32 *)&mac[4], (u32 *)&mac[5]);
+ chip->pch_phub_extrom_base_address = pci_map_rom(chip->pdev, &rom_size);
+ if (!chip->pch_phub_extrom_base_address)
+ return -ENOMEM;
+
pch_phub_write_gbe_mac_addr(chip, mac);
+ pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address);
return count;
}
@@ -640,7 +676,6 @@ static int __devinit pch_phub_probe(struct pci_dev *pdev,
int retval;
int ret;
- ssize_t rom_size;
struct pch_phub_reg *chip;
chip = kzalloc(sizeof(struct pch_phub_reg), GFP_KERNEL);
@@ -677,19 +712,7 @@ static int __devinit pch_phub_probe(struct pci_dev *pdev,
"in pch_phub_base_address variable is %p\n", __func__,
chip->pch_phub_base_address);
- if (id->driver_data != 3) {
- chip->pch_phub_extrom_base_address =\
- pci_map_rom(pdev, &rom_size);
- if (chip->pch_phub_extrom_base_address == 0) {
- dev_err(&pdev->dev, "%s: pci_map_rom FAILED", __func__);
- ret = -ENOMEM;
- goto err_pci_map;
- }
- dev_dbg(&pdev->dev, "%s : "
- "pci_map_rom SUCCESS and value in "
- "pch_phub_extrom_base_address variable is %p\n",
- __func__, chip->pch_phub_extrom_base_address);
- }
+ chip->pdev = pdev; /* Save pci device struct */
if (id->driver_data == 1) { /* EG20T PCH */
const char *board_name;
@@ -763,6 +786,22 @@ static int __devinit pch_phub_probe(struct pci_dev *pdev,
chip->pch_opt_rom_start_address =\
PCH_PHUB_ROM_START_ADDR_ML7223;
chip->pch_mac_start_address = PCH_PHUB_MAC_START_ADDR_ML7223;
+ } else if (id->driver_data == 5) { /* ML7831 */
+ retval = sysfs_create_file(&pdev->dev.kobj,
+ &dev_attr_pch_mac.attr);
+ if (retval)
+ goto err_sysfs_create;
+
+ retval = sysfs_create_bin_file(&pdev->dev.kobj, &pch_bin_attr);
+ if (retval)
+ goto exit_bin_attr;
+
+ /* set the prefech value */
+ iowrite32(0x000affaa, chip->pch_phub_base_address + 0x14);
+ /* set the interrupt delay value */
+ iowrite32(0x25, chip->pch_phub_base_address + 0x44);
+ chip->pch_opt_rom_start_address = PCH_PHUB_ROM_START_ADDR_EG20T;
+ chip->pch_mac_start_address = PCH_PHUB_MAC_START_ADDR_EG20T;
}
chip->ioh_type = id->driver_data;
@@ -773,8 +812,6 @@ exit_bin_attr:
sysfs_remove_file(&pdev->dev.kobj, &dev_attr_pch_mac.attr);
err_sysfs_create:
- pci_unmap_rom(pdev, chip->pch_phub_extrom_base_address);
-err_pci_map:
pci_iounmap(pdev, chip->pch_phub_base_address);
err_pci_iomap:
pci_release_regions(pdev);
@@ -792,7 +829,6 @@ static void __devexit pch_phub_remove(struct pci_dev *pdev)
sysfs_remove_file(&pdev->dev.kobj, &dev_attr_pch_mac.attr);
sysfs_remove_bin_file(&pdev->dev.kobj, &pch_bin_attr);
- pci_unmap_rom(pdev, chip->pch_phub_extrom_base_address);
pci_iounmap(pdev, chip->pch_phub_base_address);
pci_release_regions(pdev);
pci_disable_device(pdev);
@@ -847,6 +883,7 @@ static struct pci_device_id pch_phub_pcidev_id[] = {
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ROHM_ML7213_PHUB), 2, },
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ROHM_ML7223_mPHUB), 3, },
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ROHM_ML7223_nPHUB), 4, },
+ { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ROHM_ML7831_PHUB), 5, },
{ }
};
MODULE_DEVICE_TABLE(pci, pch_phub_pcidev_id);
@@ -873,5 +910,5 @@ static void __exit pch_phub_pci_exit(void)
module_init(pch_phub_pci_init);
module_exit(pch_phub_pci_exit);
-MODULE_DESCRIPTION("Intel EG20T PCH/OKI SEMICONDUCTOR IOH(ML7213/ML7223) PHUB");
+MODULE_DESCRIPTION("Intel EG20T PCH/LAPIS Semiconductor IOH(ML7213/ML7223) PHUB");
MODULE_LICENSE("GPL");
diff --git a/drivers/misc/spear13xx_pcie_gadget.c b/drivers/misc/spear13xx_pcie_gadget.c
index cfbddbef11d..43d073bc1d9 100644
--- a/drivers/misc/spear13xx_pcie_gadget.c
+++ b/drivers/misc/spear13xx_pcie_gadget.c
@@ -903,6 +903,6 @@ static void __exit spear_pcie_gadget_exit(void)
}
module_exit(spear_pcie_gadget_exit);
-MODULE_ALIAS("pcie-gadget-spear");
+MODULE_ALIAS("platform:pcie-gadget-spear");
MODULE_AUTHOR("Pratyush Anand");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 25a44d94be1..3216c514fdc 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2554,30 +2554,6 @@ re_arm:
}
}
-static __be32 bond_glean_dev_ip(struct net_device *dev)
-{
- struct in_device *idev;
- struct in_ifaddr *ifa;
- __be32 addr = 0;
-
- if (!dev)
- return 0;
-
- rcu_read_lock();
- idev = __in_dev_get_rcu(dev);
- if (!idev)
- goto out;
-
- ifa = idev->ifa_list;
- if (!ifa)
- goto out;
-
- addr = ifa->ifa_local;
-out:
- rcu_read_unlock();
- return addr;
-}
-
static int bond_has_this_ip(struct bonding *bond, __be32 ip)
{
struct vlan_entry *vlan;
@@ -3323,6 +3299,10 @@ static int bond_inetaddr_event(struct notifier_block *this, unsigned long event,
struct bonding *bond;
struct vlan_entry *vlan;
+ /* we only care about primary address */
+ if(ifa->ifa_flags & IFA_F_SECONDARY)
+ return NOTIFY_DONE;
+
list_for_each_entry(bond, &bn->dev_list, bond_list) {
if (bond->dev == event_dev) {
switch (event) {
@@ -3330,7 +3310,7 @@ static int bond_inetaddr_event(struct notifier_block *this, unsigned long event,
bond->master_ip = ifa->ifa_local;
return NOTIFY_OK;
case NETDEV_DOWN:
- bond->master_ip = bond_glean_dev_ip(bond->dev);
+ bond->master_ip = 0;
return NOTIFY_OK;
default:
return NOTIFY_DONE;
@@ -3346,8 +3326,7 @@ static int bond_inetaddr_event(struct notifier_block *this, unsigned long event,
vlan->vlan_ip = ifa->ifa_local;
return NOTIFY_OK;
case NETDEV_DOWN:
- vlan->vlan_ip =
- bond_glean_dev_ip(vlan_dev);
+ vlan->vlan_ip = 0;
return NOTIFY_OK;
default:
return NOTIFY_DONE;
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 26be1dfc157..f801754c71a 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -614,7 +614,7 @@ static int dm9000_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
if (!dm->wake_state)
irq_set_irq_wake(dm->irq_wake, 1);
- else if (dm->wake_state & !opts)
+ else if (dm->wake_state && !opts)
irq_set_irq_wake(dm->irq_wake, 0);
}
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index c520cfd3b29..5272f9d4dda 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -24,6 +24,7 @@ config FEC
bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
depends on (M523x || M527x || M5272 || M528x || M520x || M532x || \
ARCH_MXC || ARCH_MXS)
+ default ARCH_MXC || ARCH_MXS if ARM
select PHYLIB
---help---
Say Y here if you want to use the built-in 10/100 Fast ethernet
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index 5c0b531949e..27d651a80f3 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -1745,6 +1745,112 @@ jme_phy_off(struct jme_adapter *jme)
}
static int
+jme_phy_specreg_read(struct jme_adapter *jme, u32 specreg)
+{
+ u32 phy_addr;
+
+ phy_addr = JM_PHY_SPEC_REG_READ | specreg;
+ jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_ADDR_REG,
+ phy_addr);
+ return jme_mdio_read(jme->dev, jme->mii_if.phy_id,
+ JM_PHY_SPEC_DATA_REG);
+}
+
+static void
+jme_phy_specreg_write(struct jme_adapter *jme, u32 ext_reg, u32 phy_data)
+{
+ u32 phy_addr;
+
+ phy_addr = JM_PHY_SPEC_REG_WRITE | ext_reg;
+ jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_DATA_REG,
+ phy_data);
+ jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_ADDR_REG,
+ phy_addr);
+}
+
+static int
+jme_phy_calibration(struct jme_adapter *jme)
+{
+ u32 ctrl1000, phy_data;
+
+ jme_phy_off(jme);
+ jme_phy_on(jme);
+ /* Enabel PHY test mode 1 */
+ ctrl1000 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_CTRL1000);
+ ctrl1000 &= ~PHY_GAD_TEST_MODE_MSK;
+ ctrl1000 |= PHY_GAD_TEST_MODE_1;
+ jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_CTRL1000, ctrl1000);
+
+ phy_data = jme_phy_specreg_read(jme, JM_PHY_EXT_COMM_2_REG);
+ phy_data &= ~JM_PHY_EXT_COMM_2_CALI_MODE_0;
+ phy_data |= JM_PHY_EXT_COMM_2_CALI_LATCH |
+ JM_PHY_EXT_COMM_2_CALI_ENABLE;
+ jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_2_REG, phy_data);
+ msleep(20);
+ phy_data = jme_phy_specreg_read(jme, JM_PHY_EXT_COMM_2_REG);
+ phy_data &= ~(JM_PHY_EXT_COMM_2_CALI_ENABLE |
+ JM_PHY_EXT_COMM_2_CALI_MODE_0 |
+ JM_PHY_EXT_COMM_2_CALI_LATCH);
+ jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_2_REG, phy_data);
+
+ /* Disable PHY test mode */
+ ctrl1000 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_CTRL1000);
+ ctrl1000 &= ~PHY_GAD_TEST_MODE_MSK;
+ jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_CTRL1000, ctrl1000);
+ return 0;
+}
+
+static int
+jme_phy_setEA(struct jme_adapter *jme)
+{
+ u32 phy_comm0 = 0, phy_comm1 = 0;
+ u8 nic_ctrl;
+
+ pci_read_config_byte(jme->pdev, PCI_PRIV_SHARE_NICCTRL, &nic_ctrl);
+ if ((nic_ctrl & 0x3) == JME_FLAG_PHYEA_ENABLE)
+ return 0;
+
+ switch (jme->pdev->device) {
+ case PCI_DEVICE_ID_JMICRON_JMC250:
+ if (((jme->chip_main_rev == 5) &&
+ ((jme->chip_sub_rev == 0) || (jme->chip_sub_rev == 1) ||
+ (jme->chip_sub_rev == 3))) ||
+ (jme->chip_main_rev >= 6)) {
+ phy_comm0 = 0x008A;
+ phy_comm1 = 0x4109;
+ }
+ if ((jme->chip_main_rev == 3) &&
+ ((jme->chip_sub_rev == 1) || (jme->chip_sub_rev == 2)))
+ phy_comm0 = 0xE088;
+ break;
+ case PCI_DEVICE_ID_JMICRON_JMC260:
+ if (((jme->chip_main_rev == 5) &&
+ ((jme->chip_sub_rev == 0) || (jme->chip_sub_rev == 1) ||
+ (jme->chip_sub_rev == 3))) ||
+ (jme->chip_main_rev >= 6)) {
+ phy_comm0 = 0x008A;
+ phy_comm1 = 0x4109;
+ }
+ if ((jme->chip_main_rev == 3) &&
+ ((jme->chip_sub_rev == 1) || (jme->chip_sub_rev == 2)))
+ phy_comm0 = 0xE088;
+ if ((jme->chip_main_rev == 2) && (jme->chip_sub_rev == 0))
+ phy_comm0 = 0x608A;
+ if ((jme->chip_main_rev == 2) && (jme->chip_sub_rev == 2))
+ phy_comm0 = 0x408A;
+ break;
+ default:
+ return -ENODEV;
+ }
+ if (phy_comm0)
+ jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_0_REG, phy_comm0);
+ if (phy_comm1)
+ jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_1_REG, phy_comm1);
+
+ return 0;
+}
+
+static int
jme_open(struct net_device *netdev)
{
struct jme_adapter *jme = netdev_priv(netdev);
@@ -1769,7 +1875,8 @@ jme_open(struct net_device *netdev)
jme_set_settings(netdev, &jme->old_ecmd);
else
jme_reset_phy_processor(jme);
-
+ jme_phy_calibration(jme);
+ jme_phy_setEA(jme);
jme_reset_link(jme);
return 0;
@@ -3184,7 +3291,8 @@ jme_resume(struct device *dev)
jme_set_settings(netdev, &jme->old_ecmd);
else
jme_reset_phy_processor(jme);
-
+ jme_phy_calibration(jme);
+ jme_phy_setEA(jme);
jme_start_irq(jme);
netif_device_attach(netdev);
@@ -3239,4 +3347,3 @@ MODULE_DESCRIPTION("JMicron JMC2x0 PCI Express Ethernet driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
MODULE_DEVICE_TABLE(pci, jme_pci_tbl);
-
diff --git a/drivers/net/ethernet/jme.h b/drivers/net/ethernet/jme.h
index 02ea27c1dcb..4304072bd3c 100644
--- a/drivers/net/ethernet/jme.h
+++ b/drivers/net/ethernet/jme.h
@@ -760,6 +760,25 @@ enum jme_rxmcs_bits {
RXMCS_CHECKSUM,
};
+/* Extern PHY common register 2 */
+
+#define PHY_GAD_TEST_MODE_1 0x00002000
+#define PHY_GAD_TEST_MODE_MSK 0x0000E000
+#define JM_PHY_SPEC_REG_READ 0x00004000
+#define JM_PHY_SPEC_REG_WRITE 0x00008000
+#define PHY_CALIBRATION_DELAY 20
+#define JM_PHY_SPEC_ADDR_REG 0x1E
+#define JM_PHY_SPEC_DATA_REG 0x1F
+
+#define JM_PHY_EXT_COMM_0_REG 0x30
+#define JM_PHY_EXT_COMM_1_REG 0x31
+#define JM_PHY_EXT_COMM_2_REG 0x32
+#define JM_PHY_EXT_COMM_2_CALI_ENABLE 0x01
+#define JM_PHY_EXT_COMM_2_CALI_MODE_0 0x02
+#define JM_PHY_EXT_COMM_2_CALI_LATCH 0x10
+#define PCI_PRIV_SHARE_NICCTRL 0xF5
+#define JME_FLAG_PHYEA_ENABLE 0x2
+
/*
* Wakeup Frame setup interface registers
*/
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 662ab7e9a0f..8876134b35b 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -1827,7 +1827,8 @@ static void ath9k_set_power_sleep(struct ath_hw *ah, int setChip)
}
/* Clear Bit 14 of AR_WA after putting chip into Full Sleep mode. */
- REG_WRITE(ah, AR_WA, ah->WARegVal & ~AR_WA_D3_L1_DISABLE);
+ if (AR_SREV_9300_20_OR_LATER(ah))
+ REG_WRITE(ah, AR_WA, ah->WARegVal & ~AR_WA_D3_L1_DISABLE);
}
/*
diff --git a/drivers/net/wireless/rtlwifi/ps.c b/drivers/net/wireless/rtlwifi/ps.c
index db526284454..55c8e50f45f 100644
--- a/drivers/net/wireless/rtlwifi/ps.c
+++ b/drivers/net/wireless/rtlwifi/ps.c
@@ -395,7 +395,7 @@ void rtl_lps_enter(struct ieee80211_hw *hw)
if (mac->link_state != MAC80211_LINKED)
return;
- spin_lock(&rtlpriv->locks.lps_lock);
+ spin_lock_irq(&rtlpriv->locks.lps_lock);
/* Idle for a while if we connect to AP a while ago. */
if (mac->cnt_after_linked >= 2) {
@@ -407,7 +407,7 @@ void rtl_lps_enter(struct ieee80211_hw *hw)
}
}
- spin_unlock(&rtlpriv->locks.lps_lock);
+ spin_unlock_irq(&rtlpriv->locks.lps_lock);
}
/*Leave the leisure power save mode.*/
@@ -416,8 +416,9 @@ void rtl_lps_leave(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ unsigned long flags;
- spin_lock(&rtlpriv->locks.lps_lock);
+ spin_lock_irqsave(&rtlpriv->locks.lps_lock, flags);
if (ppsc->fwctrl_lps) {
if (ppsc->dot11_psmode != EACTIVE) {
@@ -438,7 +439,7 @@ void rtl_lps_leave(struct ieee80211_hw *hw)
rtl_lps_set_psmode(hw, EACTIVE);
}
}
- spin_unlock(&rtlpriv->locks.lps_lock);
+ spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flags);
}
/* For sw LPS*/
@@ -539,9 +540,9 @@ void rtl_swlps_rf_awake(struct ieee80211_hw *hw)
RT_CLEAR_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM);
}
- spin_lock(&rtlpriv->locks.lps_lock);
+ spin_lock_irq(&rtlpriv->locks.lps_lock);
rtl_ps_set_rf_state(hw, ERFON, RF_CHANGE_BY_PS);
- spin_unlock(&rtlpriv->locks.lps_lock);
+ spin_unlock_irq(&rtlpriv->locks.lps_lock);
}
void rtl_swlps_rfon_wq_callback(void *data)
@@ -574,9 +575,9 @@ void rtl_swlps_rf_sleep(struct ieee80211_hw *hw)
if (rtlpriv->link_info.busytraffic)
return;
- spin_lock(&rtlpriv->locks.lps_lock);
+ spin_lock_irq(&rtlpriv->locks.lps_lock);
rtl_ps_set_rf_state(hw, ERFSLEEP, RF_CHANGE_BY_PS);
- spin_unlock(&rtlpriv->locks.lps_lock);
+ spin_unlock_irq(&rtlpriv->locks.lps_lock);
if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM &&
!RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM)) {
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 6d3dd3988d0..19c0115092d 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -60,27 +60,27 @@ EXPORT_SYMBOL_GPL(irq_of_parse_and_map);
*/
struct device_node *of_irq_find_parent(struct device_node *child)
{
- struct device_node *p, *c = child;
+ struct device_node *p;
const __be32 *parp;
- if (!of_node_get(c))
+ if (!of_node_get(child))
return NULL;
do {
- parp = of_get_property(c, "interrupt-parent", NULL);
+ parp = of_get_property(child, "interrupt-parent", NULL);
if (parp == NULL)
- p = of_get_parent(c);
+ p = of_get_parent(child);
else {
if (of_irq_workarounds & OF_IMAP_NO_PHANDLE)
p = of_node_get(of_irq_dflt_pic);
else
p = of_find_node_by_phandle(be32_to_cpup(parp));
}
- of_node_put(c);
- c = p;
+ of_node_put(child);
+ child = p;
} while (p && of_get_property(p, "#interrupt-cells", NULL) == NULL);
- return (p == child) ? NULL : p;
+ return p;
}
/**
@@ -424,6 +424,8 @@ void __init of_irq_init(const struct of_device_id *matches)
desc->dev = np;
desc->interrupt_parent = of_irq_find_parent(np);
+ if (desc->interrupt_parent == np)
+ desc->interrupt_parent = NULL;
list_add_tail(&desc->list, &intc_desc_list);
}
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index b6f9749b4fa..f02b5235056 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -76,6 +76,7 @@ config PCI_IOV
config PCI_PRI
bool "PCI PRI support"
+ depends on PCI
select PCI_ATS
help
PRI is the PCI Page Request Interface. It allows PCI devices that are
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 596172b4ae9..fce1c54a0c8 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -459,8 +459,17 @@ static int add_bridge(acpi_handle handle)
{
acpi_status status;
unsigned long long tmp;
+ struct acpi_pci_root *root;
acpi_handle dummy_handle;
+ /*
+ * We shouldn't use this bridge if PCIe native hotplug control has been
+ * granted by the BIOS for it.
+ */
+ root = acpi_pci_find_root(handle);
+ if (root && (root->osc_control_set & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL))
+ return -ENODEV;
+
/* if the bridge doesn't have _STA, we assume it is always there */
status = acpi_get_handle(handle, "_STA", &dummy_handle);
if (ACPI_SUCCESS(status)) {
@@ -1376,13 +1385,23 @@ static void handle_hotplug_event_func(acpi_handle handle, u32 type,
static acpi_status
find_root_bridges(acpi_handle handle, u32 lvl, void *context, void **rv)
{
+ struct acpi_pci_root *root;
int *count = (int *)context;
- if (acpi_is_root_bridge(handle)) {
- acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
- handle_hotplug_event_bridge, NULL);
- (*count)++;
- }
+ if (!acpi_is_root_bridge(handle))
+ return AE_OK;
+
+ root = acpi_pci_find_root(handle);
+ if (!root)
+ return AE_OK;
+
+ if (root->osc_control_set & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL)
+ return AE_OK;
+
+ (*count)++;
+ acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
+ handle_hotplug_event_bridge, NULL);
+
return AE_OK ;
}
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index 1e9c9aacc3a..085dbb5fc16 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -213,9 +213,6 @@ static int board_added(struct slot *p_slot)
goto err_exit;
}
- /* Wait for 1 second after checking link training status */
- msleep(1000);
-
/* Check for a power fault */
if (ctrl->power_fault_detected || pciehp_query_power_fault(p_slot)) {
ctrl_err(ctrl, "Power fault on slot %s\n", slot_name(p_slot));
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 96dc4734e4a..7b1414810ae 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -280,6 +280,14 @@ int pciehp_check_link_status(struct controller *ctrl)
else
msleep(1000);
+ /*
+ * Need to wait for 1000 ms after Data Link Layer Link Active
+ * (DLLLA) bit reads 1b before sending configuration request.
+ * We need it before checking Link Training (LT) bit becuase
+ * LT is still set even after DLLLA bit is set on some platform.
+ */
+ msleep(1000);
+
retval = pciehp_readw(ctrl, PCI_EXP_LNKSTA, &lnk_status);
if (retval) {
ctrl_err(ctrl, "Cannot read LNKSTATUS register\n");
@@ -294,6 +302,16 @@ int pciehp_check_link_status(struct controller *ctrl)
return retval;
}
+ /*
+ * If the port supports Link speeds greater than 5.0 GT/s, we
+ * must wait for 100 ms after Link training completes before
+ * sending configuration request.
+ */
+ if (ctrl->pcie->port->subordinate->max_bus_speed > PCIE_SPEED_5_0GT)
+ msleep(100);
+
+ pcie_update_link_speed(ctrl->pcie->port->subordinate, lnk_status);
+
return retval;
}
@@ -484,7 +502,6 @@ int pciehp_power_on_slot(struct slot * slot)
u16 slot_cmd;
u16 cmd_mask;
u16 slot_status;
- u16 lnk_status;
int retval = 0;
/* Clear sticky power-fault bit from previous power failures */
@@ -516,14 +533,6 @@ int pciehp_power_on_slot(struct slot * slot)
ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd);
- retval = pciehp_readw(ctrl, PCI_EXP_LNKSTA, &lnk_status);
- if (retval) {
- ctrl_err(ctrl, "%s: Cannot read LNKSTA register\n",
- __func__);
- return retval;
- }
- pcie_update_link_speed(ctrl->pcie->port->subordinate, lnk_status);
-
return retval;
}
diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c
index aca972bbfb4..dd7e0c51a33 100644
--- a/drivers/pci/hotplug/shpchp_core.c
+++ b/drivers/pci/hotplug/shpchp_core.c
@@ -278,8 +278,8 @@ static int get_adapter_status (struct hotplug_slot *hotplug_slot, u8 *value)
static int is_shpc_capable(struct pci_dev *dev)
{
- if ((dev->vendor == PCI_VENDOR_ID_AMD) || (dev->device ==
- PCI_DEVICE_ID_AMD_GOLAM_7450))
+ if (dev->vendor == PCI_VENDOR_ID_AMD &&
+ dev->device == PCI_DEVICE_ID_AMD_GOLAM_7450)
return 1;
if (!pci_find_capability(dev, PCI_CAP_ID_SHPC))
return 0;
diff --git a/drivers/pci/hotplug/shpchp_hpc.c b/drivers/pci/hotplug/shpchp_hpc.c
index 36547f0ce30..75ba2311b54 100644
--- a/drivers/pci/hotplug/shpchp_hpc.c
+++ b/drivers/pci/hotplug/shpchp_hpc.c
@@ -944,8 +944,8 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev)
ctrl->pci_dev = pdev; /* pci_dev of the P2P bridge */
ctrl_dbg(ctrl, "Hotplug Controller:\n");
- if ((pdev->vendor == PCI_VENDOR_ID_AMD) || (pdev->device ==
- PCI_DEVICE_ID_AMD_GOLAM_7450)) {
+ if (pdev->vendor == PCI_VENDOR_ID_AMD &&
+ pdev->device == PCI_DEVICE_ID_AMD_GOLAM_7450) {
/* amd shpc driver doesn't use Base Offset; assume 0 */
ctrl->mmio_base = pci_resource_start(pdev, 0);
ctrl->mmio_size = pci_resource_len(pdev, 0);
diff --git a/drivers/regulator/aat2870-regulator.c b/drivers/regulator/aat2870-regulator.c
index 5abeb3ac3e8..298c6c6a279 100644
--- a/drivers/regulator/aat2870-regulator.c
+++ b/drivers/regulator/aat2870-regulator.c
@@ -160,7 +160,7 @@ static struct aat2870_regulator *aat2870_get_regulator(int id)
break;
}
- if (!ri)
+ if (i == ARRAY_SIZE(aat2870_regulators))
return NULL;
ri->enable_addr = AAT2870_LDO_EN;
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 669d0216022..938398f3e86 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -2799,8 +2799,8 @@ void regulator_unregister(struct regulator_dev *rdev)
list_del(&rdev->list);
if (rdev->supply)
regulator_put(rdev->supply);
- device_unregister(&rdev->dev);
kfree(rdev->constraints);
+ device_unregister(&rdev->dev);
mutex_unlock(&regulator_list_mutex);
}
EXPORT_SYMBOL_GPL(regulator_unregister);
diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c
index 66d2d60b436..b552aae55b4 100644
--- a/drivers/regulator/tps65910-regulator.c
+++ b/drivers/regulator/tps65910-regulator.c
@@ -664,10 +664,10 @@ static int tps65910_set_voltage_dcdc(struct regulator_dev *dev,
switch (id) {
case TPS65910_REG_VDD1:
- dcdc_mult = (selector / VDD1_2_NUM_VOLTS) + 1;
+ dcdc_mult = (selector / VDD1_2_NUM_VOLT_FINE) + 1;
if (dcdc_mult == 1)
dcdc_mult--;
- vsel = (selector % VDD1_2_NUM_VOLTS) + 3;
+ vsel = (selector % VDD1_2_NUM_VOLT_FINE) + 3;
tps65910_modify_bits(pmic, TPS65910_VDD1,
(dcdc_mult << VDD1_VGAIN_SEL_SHIFT),
@@ -675,10 +675,10 @@ static int tps65910_set_voltage_dcdc(struct regulator_dev *dev,
tps65910_reg_write(pmic, TPS65910_VDD1_OP, vsel);
break;
case TPS65910_REG_VDD2:
- dcdc_mult = (selector / VDD1_2_NUM_VOLTS) + 1;
+ dcdc_mult = (selector / VDD1_2_NUM_VOLT_FINE) + 1;
if (dcdc_mult == 1)
dcdc_mult--;
- vsel = (selector % VDD1_2_NUM_VOLTS) + 3;
+ vsel = (selector % VDD1_2_NUM_VOLT_FINE) + 3;
tps65910_modify_bits(pmic, TPS65910_VDD2,
(dcdc_mult << VDD2_VGAIN_SEL_SHIFT),
@@ -756,9 +756,9 @@ static int tps65910_list_voltage_dcdc(struct regulator_dev *dev,
switch (id) {
case TPS65910_REG_VDD1:
case TPS65910_REG_VDD2:
- mult = (selector / VDD1_2_NUM_VOLTS) + 1;
+ mult = (selector / VDD1_2_NUM_VOLT_FINE) + 1;
volt = VDD1_2_MIN_VOLT +
- (selector % VDD1_2_NUM_VOLTS) * VDD1_2_OFFSET;
+ (selector % VDD1_2_NUM_VOLT_FINE) * VDD1_2_OFFSET;
break;
case TPS65911_REG_VDDCTRL:
volt = VDDCTRL_MIN_VOLT + (selector * VDDCTRL_OFFSET);
@@ -947,6 +947,8 @@ static __devinit int tps65910_probe(struct platform_device *pdev)
if (i == TPS65910_REG_VDD1 || i == TPS65910_REG_VDD2) {
pmic->desc[i].ops = &tps65910_ops_dcdc;
+ pmic->desc[i].n_voltages = VDD1_2_NUM_VOLT_FINE *
+ VDD1_2_NUM_VOLT_COARSE;
} else if (i == TPS65910_REG_VDD3) {
if (tps65910_chip_id(tps65910) == TPS65910)
pmic->desc[i].ops = &tps65910_ops_vdd3;
diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c
index ee8747f4fa0..11cc308d66e 100644
--- a/drivers/regulator/twl-regulator.c
+++ b/drivers/regulator/twl-regulator.c
@@ -71,6 +71,7 @@ struct twlreg_info {
#define VREG_TYPE 1
#define VREG_REMAP 2
#define VREG_DEDICATED 3 /* LDO control */
+#define VREG_VOLTAGE_SMPS_4030 9
/* TWL6030 register offsets */
#define VREG_TRANS 1
#define VREG_STATE 2
@@ -514,6 +515,32 @@ static struct regulator_ops twl4030ldo_ops = {
.get_status = twl4030reg_get_status,
};
+static int
+twl4030smps_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV,
+ unsigned *selector)
+{
+ struct twlreg_info *info = rdev_get_drvdata(rdev);
+ int vsel = DIV_ROUND_UP(min_uV - 600000, 12500);
+
+ twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE_SMPS_4030,
+ vsel);
+ return 0;
+}
+
+static int twl4030smps_get_voltage(struct regulator_dev *rdev)
+{
+ struct twlreg_info *info = rdev_get_drvdata(rdev);
+ int vsel = twlreg_read(info, TWL_MODULE_PM_RECEIVER,
+ VREG_VOLTAGE_SMPS_4030);
+
+ return vsel * 12500 + 600000;
+}
+
+static struct regulator_ops twl4030smps_ops = {
+ .set_voltage = twl4030smps_set_voltage,
+ .get_voltage = twl4030smps_get_voltage,
+};
+
static int twl6030ldo_list_voltage(struct regulator_dev *rdev, unsigned index)
{
struct twlreg_info *info = rdev_get_drvdata(rdev);
@@ -856,6 +883,21 @@ static struct regulator_ops twlsmps_ops = {
}, \
}
+#define TWL4030_ADJUSTABLE_SMPS(label, offset, num, turnon_delay, remap_conf) \
+ { \
+ .base = offset, \
+ .id = num, \
+ .delay = turnon_delay, \
+ .remap = remap_conf, \
+ .desc = { \
+ .name = #label, \
+ .id = TWL4030_REG_##label, \
+ .ops = &twl4030smps_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ }, \
+ }
+
#define TWL6030_ADJUSTABLE_LDO(label, offset, min_mVolts, max_mVolts) { \
.base = offset, \
.min_mV = min_mVolts, \
@@ -947,8 +989,8 @@ static struct twlreg_info twl_regs[] = {
TWL4030_ADJUSTABLE_LDO(VINTANA2, 0x43, 12, 100, 0x08),
TWL4030_FIXED_LDO(VINTDIG, 0x47, 1500, 13, 100, 0x08),
TWL4030_ADJUSTABLE_LDO(VIO, 0x4b, 14, 1000, 0x08),
- TWL4030_ADJUSTABLE_LDO(VDD1, 0x55, 15, 1000, 0x08),
- TWL4030_ADJUSTABLE_LDO(VDD2, 0x63, 16, 1000, 0x08),
+ TWL4030_ADJUSTABLE_SMPS(VDD1, 0x55, 15, 1000, 0x08),
+ TWL4030_ADJUSTABLE_SMPS(VDD2, 0x63, 16, 1000, 0x08),
TWL4030_FIXED_LDO(VUSB1V5, 0x71, 1500, 17, 100, 0x08),
TWL4030_FIXED_LDO(VUSB1V8, 0x74, 1800, 18, 100, 0x08),
TWL4030_FIXED_LDO(VUSB3V1, 0x77, 3100, 19, 150, 0x08),
diff --git a/drivers/spi/spi-nuc900.c b/drivers/spi/spi-nuc900.c
index e763254741c..21c70b2b831 100644
--- a/drivers/spi/spi-nuc900.c
+++ b/drivers/spi/spi-nuc900.c
@@ -426,7 +426,7 @@ static int __devinit nuc900_spi_probe(struct platform_device *pdev)
goto err_clk;
}
- mfp_set_groupg(&pdev->dev);
+ mfp_set_groupg(&pdev->dev, NULL);
nuc900_init_spi(hw);
err = spi_bitbang_start(&hw->bitbang);
diff --git a/drivers/staging/et131x/Kconfig b/drivers/staging/et131x/Kconfig
index 9e1864c6dfd..8190f2aaf53 100644
--- a/drivers/staging/et131x/Kconfig
+++ b/drivers/staging/et131x/Kconfig
@@ -1,6 +1,7 @@
config ET131X
tristate "Agere ET-1310 Gigabit Ethernet support"
- depends on PCI
+ depends on PCI && NET && NETDEVICES
+ select PHYLIB
default n
---help---
This driver supports Agere ET-1310 ethernet adapters.
diff --git a/drivers/staging/et131x/et131x.c b/drivers/staging/et131x/et131x.c
index f5f44a02456..0c1c6ca8c37 100644
--- a/drivers/staging/et131x/et131x.c
+++ b/drivers/staging/et131x/et131x.c
@@ -4469,6 +4469,12 @@ static int et131x_resume(struct device *dev)
return 0;
}
+static SIMPLE_DEV_PM_OPS(et131x_pm_ops, et131x_suspend, et131x_resume);
+#define ET131X_PM_OPS (&et131x_pm_ops)
+#else
+#define ET131X_PM_OPS NULL
+#endif
+
/* ISR functions */
/**
@@ -5470,12 +5476,6 @@ err_out:
return result;
}
-static SIMPLE_DEV_PM_OPS(et131x_pm_ops, et131x_suspend, et131x_resume);
-#define ET131X_PM_OPS (&et131x_pm_ops)
-#else
-#define ET131X_PM_OPS NULL
-#endif
-
static DEFINE_PCI_DEVICE_TABLE(et131x_pci_table) = {
{ PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_GIG), 0UL},
{ PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_FAST), 0UL},
diff --git a/drivers/staging/iio/industrialio-core.c b/drivers/staging/iio/industrialio-core.c
index 326e967d54e..aec9311b108 100644
--- a/drivers/staging/iio/industrialio-core.c
+++ b/drivers/staging/iio/industrialio-core.c
@@ -242,19 +242,26 @@ static const struct file_operations iio_event_chrdev_fileops = {
static int iio_event_getfd(struct iio_dev *indio_dev)
{
- if (indio_dev->event_interface == NULL)
+ struct iio_event_interface *ev_int = indio_dev->event_interface;
+ int fd;
+
+ if (ev_int == NULL)
return -ENODEV;
- mutex_lock(&indio_dev->event_interface->event_list_lock);
- if (test_and_set_bit(IIO_BUSY_BIT_POS,
- &indio_dev->event_interface->flags)) {
- mutex_unlock(&indio_dev->event_interface->event_list_lock);
+ mutex_lock(&ev_int->event_list_lock);
+ if (test_and_set_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
+ mutex_unlock(&ev_int->event_list_lock);
return -EBUSY;
}
- mutex_unlock(&indio_dev->event_interface->event_list_lock);
- return anon_inode_getfd("iio:event",
- &iio_event_chrdev_fileops,
- indio_dev->event_interface, O_RDONLY);
+ mutex_unlock(&ev_int->event_list_lock);
+ fd = anon_inode_getfd("iio:event",
+ &iio_event_chrdev_fileops, ev_int, O_RDONLY);
+ if (fd < 0) {
+ mutex_lock(&ev_int->event_list_lock);
+ clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
+ mutex_unlock(&ev_int->event_list_lock);
+ }
+ return fd;
}
static int __init iio_init(void)
diff --git a/drivers/staging/slicoss/Kconfig b/drivers/staging/slicoss/Kconfig
index 5cde96b2e6e..5c2a15b42df 100644
--- a/drivers/staging/slicoss/Kconfig
+++ b/drivers/staging/slicoss/Kconfig
@@ -1,6 +1,6 @@
config SLICOSS
tristate "Alacritech Gigabit IS-NIC support"
- depends on PCI && X86
+ depends on PCI && X86 && NET
default n
help
This driver supports Alacritech's IS-NIC gigabit ethernet cards.
diff --git a/drivers/tty/hvc/hvc_dcc.c b/drivers/tty/hvc/hvc_dcc.c
index 435f6facbc2..44fbebab507 100644
--- a/drivers/tty/hvc/hvc_dcc.c
+++ b/drivers/tty/hvc/hvc_dcc.c
@@ -46,6 +46,7 @@ static inline char __dcc_getchar(void)
asm volatile("mrc p14, 0, %0, c0, c5, 0 @ read comms data reg"
: "=r" (__c));
+ isb();
return __c;
}
@@ -55,6 +56,7 @@ static inline void __dcc_putchar(char c)
asm volatile("mcr p14, 0, %0, c0, c5, 0 @ write a char"
: /* no output register */
: "r" (c));
+ isb();
}
static int hvc_dcc_put_chars(uint32_t vt, const char *buf, int count)
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 5f479dada6f..925a1e547a8 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -1560,7 +1560,7 @@ config SERIAL_IFX6X60
Support for the IFX6x60 modem devices on Intel MID platforms.
config SERIAL_PCH_UART
- tristate "Intel EG20T PCH / OKI SEMICONDUCTOR IOH(ML7213/ML7223) UART"
+ tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) UART"
depends on PCI
select SERIAL_CORE
help
@@ -1568,12 +1568,12 @@ config SERIAL_PCH_UART
which is an IOH(Input/Output Hub) for x86 embedded processor.
Enabling PCH_DMA, this PCH UART works as DMA mode.
- This driver also can be used for OKI SEMICONDUCTOR IOH(Input/
- Output Hub), ML7213 and ML7223.
- ML7213 IOH is for IVI(In-Vehicle Infotainment) use and ML7223 IOH is
- for MP(Media Phone) use.
- ML7213/ML7223 is companion chip for Intel Atom E6xx series.
- ML7213/ML7223 is completely compatible for Intel EG20T PCH.
+ This driver also can be used for LAPIS Semiconductor IOH(Input/
+ Output Hub), ML7213, ML7223 and ML7831.
+ ML7213 IOH is for IVI(In-Vehicle Infotainment) use, ML7223 IOH is
+ for MP(Media Phone) use and ML7831 IOH is for general purpose use.
+ ML7213/ML7223/ML7831 is companion chip for Intel Atom E6xx series.
+ ML7213/ML7223/ML7831 is completely compatible for Intel EG20T PCH.
config SERIAL_MSM_SMD
bool "Enable tty device interface for some SMD ports"
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 4a0f86fa1e9..4c823f341d9 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -228,7 +228,7 @@ void atmel_config_rs485(struct uart_port *port, struct serial_rs485 *rs485conf)
if (rs485conf->flags & SER_RS485_ENABLED) {
dev_dbg(port->dev, "Setting UART to RS485\n");
atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
- if (rs485conf->flags & SER_RS485_RTS_AFTER_SEND)
+ if ((rs485conf->delay_rts_after_send) > 0)
UART_PUT_TTGR(port, rs485conf->delay_rts_after_send);
mode |= ATMEL_US_USMODE_RS485;
} else {
@@ -304,7 +304,7 @@ static void atmel_set_mctrl(struct uart_port *port, u_int mctrl)
if (atmel_port->rs485.flags & SER_RS485_ENABLED) {
dev_dbg(port->dev, "Setting UART to RS485\n");
- if (atmel_port->rs485.flags & SER_RS485_RTS_AFTER_SEND)
+ if ((atmel_port->rs485.delay_rts_after_send) > 0)
UART_PUT_TTGR(port,
atmel_port->rs485.delay_rts_after_send);
mode |= ATMEL_US_USMODE_RS485;
@@ -1228,7 +1228,7 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
if (atmel_port->rs485.flags & SER_RS485_ENABLED) {
dev_dbg(port->dev, "Setting UART to RS485\n");
- if (atmel_port->rs485.flags & SER_RS485_RTS_AFTER_SEND)
+ if ((atmel_port->rs485.delay_rts_after_send) > 0)
UART_PUT_TTGR(port,
atmel_port->rs485.delay_rts_after_send);
mode |= ATMEL_US_USMODE_RS485;
@@ -1447,16 +1447,6 @@ static void __devinit atmel_of_init_port(struct atmel_uart_port *atmel_port,
rs485conf->delay_rts_after_send = rs485_delay[1];
rs485conf->flags = 0;
- if (rs485conf->delay_rts_before_send == 0 &&
- rs485conf->delay_rts_after_send == 0) {
- rs485conf->flags |= SER_RS485_RTS_ON_SEND;
- } else {
- if (rs485conf->delay_rts_before_send)
- rs485conf->flags |= SER_RS485_RTS_BEFORE_SEND;
- if (rs485conf->delay_rts_after_send)
- rs485conf->flags |= SER_RS485_RTS_AFTER_SEND;
- }
-
if (of_get_property(np, "rs485-rx-during-tx", NULL))
rs485conf->flags |= SER_RS485_RX_DURING_TX;
diff --git a/drivers/tty/serial/crisv10.c b/drivers/tty/serial/crisv10.c
index b7435043f2f..1dfba7b779c 100644
--- a/drivers/tty/serial/crisv10.c
+++ b/drivers/tty/serial/crisv10.c
@@ -3234,9 +3234,8 @@ rs_write(struct tty_struct *tty,
e100_disable_rx(info);
e100_enable_rx_irq(info);
#endif
- if ((info->rs485.flags & SER_RS485_RTS_BEFORE_SEND) &&
- (info->rs485.delay_rts_before_send > 0))
- msleep(info->rs485.delay_rts_before_send);
+ if (info->rs485.delay_rts_before_send > 0)
+ msleep(info->rs485.delay_rts_before_send);
}
#endif /* CONFIG_ETRAX_RS485 */
@@ -3693,10 +3692,6 @@ rs_ioctl(struct tty_struct *tty,
rs485data.delay_rts_before_send = rs485ctrl.delay_rts_before_send;
rs485data.flags = 0;
- if (rs485data.delay_rts_before_send != 0)
- rs485data.flags |= SER_RS485_RTS_BEFORE_SEND;
- else
- rs485data.flags &= ~(SER_RS485_RTS_BEFORE_SEND);
if (rs485ctrl.enabled)
rs485data.flags |= SER_RS485_ENABLED;
@@ -4531,7 +4526,6 @@ static int __init rs_init(void)
/* Set sane defaults */
info->rs485.flags &= ~(SER_RS485_RTS_ON_SEND);
info->rs485.flags |= SER_RS485_RTS_AFTER_SEND;
- info->rs485.flags &= ~(SER_RS485_RTS_BEFORE_SEND);
info->rs485.delay_rts_before_send = 0;
info->rs485.flags &= ~(SER_RS485_ENABLED);
#endif
diff --git a/drivers/tty/serial/mfd.c b/drivers/tty/serial/mfd.c
index 286c386d9c4..e272d3919c6 100644
--- a/drivers/tty/serial/mfd.c
+++ b/drivers/tty/serial/mfd.c
@@ -884,7 +884,6 @@ serial_hsu_set_termios(struct uart_port *port, struct ktermios *termios,
{
struct uart_hsu_port *up =
container_of(port, struct uart_hsu_port, port);
- struct tty_struct *tty = port->state->port.tty;
unsigned char cval, fcr = 0;
unsigned long flags;
unsigned int baud, quot;
@@ -907,8 +906,7 @@ serial_hsu_set_termios(struct uart_port *port, struct ktermios *termios,
}
/* CMSPAR isn't supported by this driver */
- if (tty)
- tty->termios->c_cflag &= ~CMSPAR;
+ termios->c_cflag &= ~CMSPAR;
if (termios->c_cflag & CSTOPB)
cval |= UART_LCR_STOP;
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index 21febef926a..d6aba8c087e 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -1,5 +1,5 @@
/*
- *Copyright (C) 2010 OKI SEMICONDUCTOR CO., LTD.
+ *Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
*
*This program is free software; you can redistribute it and/or modify
*it under the terms of the GNU General Public License as published by
@@ -46,8 +46,8 @@ enum {
/* Set the max number of UART port
* Intel EG20T PCH: 4 port
- * OKI SEMICONDUCTOR ML7213 IOH: 3 port
- * OKI SEMICONDUCTOR ML7223 IOH: 2 port
+ * LAPIS Semiconductor ML7213 IOH: 3 port
+ * LAPIS Semiconductor ML7223 IOH: 2 port
*/
#define PCH_UART_NR 4
@@ -258,6 +258,8 @@ enum pch_uart_num_t {
pch_ml7213_uart2,
pch_ml7223_uart0,
pch_ml7223_uart1,
+ pch_ml7831_uart0,
+ pch_ml7831_uart1,
};
static struct pch_uart_driver_data drv_dat[] = {
@@ -270,6 +272,8 @@ static struct pch_uart_driver_data drv_dat[] = {
[pch_ml7213_uart2] = {PCH_UART_2LINE, 2},
[pch_ml7223_uart0] = {PCH_UART_8LINE, 0},
[pch_ml7223_uart1] = {PCH_UART_2LINE, 1},
+ [pch_ml7831_uart0] = {PCH_UART_8LINE, 0},
+ [pch_ml7831_uart1] = {PCH_UART_2LINE, 1},
};
static unsigned int default_baud = 9600;
@@ -628,6 +632,7 @@ static void pch_request_dma(struct uart_port *port)
dev_err(priv->port.dev, "%s:dma_request_channel FAILS(Rx)\n",
__func__);
dma_release_channel(priv->chan_tx);
+ priv->chan_tx = NULL;
return;
}
@@ -1215,8 +1220,7 @@ static void pch_uart_shutdown(struct uart_port *port)
dev_err(priv->port.dev,
"pch_uart_hal_set_fifo Failed(ret=%d)\n", ret);
- if (priv->use_dma_flag)
- pch_free_dma(port);
+ pch_free_dma(port);
free_irq(priv->port.irq, priv);
}
@@ -1280,6 +1284,7 @@ static void pch_uart_set_termios(struct uart_port *port,
if (rtn)
goto out;
+ pch_uart_set_mctrl(&priv->port, priv->port.mctrl);
/* Don't rewrite B0 */
if (tty_termios_baud_rate(termios))
tty_termios_encode_baud_rate(termios, baud, baud);
@@ -1552,6 +1557,10 @@ static DEFINE_PCI_DEVICE_TABLE(pch_uart_pci_id) = {
.driver_data = pch_ml7223_uart0},
{PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x800D),
.driver_data = pch_ml7223_uart1},
+ {PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x8811),
+ .driver_data = pch_ml7831_uart0},
+ {PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x8812),
+ .driver_data = pch_ml7831_uart1},
{0,},
};
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index 512c49f98e8..8e0924f5544 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -36,6 +36,7 @@
#include <linux/kmod.h>
#include <linux/nsproxy.h>
+#include <linux/ratelimit.h>
/*
* This guards the refcounted line discipline lists. The lock
@@ -547,15 +548,16 @@ static void tty_ldisc_flush_works(struct tty_struct *tty)
/**
* tty_ldisc_wait_idle - wait for the ldisc to become idle
* @tty: tty to wait for
+ * @timeout: for how long to wait at most
*
* Wait for the line discipline to become idle. The discipline must
* have been halted for this to guarantee it remains idle.
*/
-static int tty_ldisc_wait_idle(struct tty_struct *tty)
+static int tty_ldisc_wait_idle(struct tty_struct *tty, long timeout)
{
- int ret;
+ long ret;
ret = wait_event_timeout(tty_ldisc_idle,
- atomic_read(&tty->ldisc->users) == 1, 5 * HZ);
+ atomic_read(&tty->ldisc->users) == 1, timeout);
if (ret < 0)
return ret;
return ret > 0 ? 0 : -EBUSY;
@@ -665,7 +667,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
tty_ldisc_flush_works(tty);
- retval = tty_ldisc_wait_idle(tty);
+ retval = tty_ldisc_wait_idle(tty, 5 * HZ);
tty_lock();
mutex_lock(&tty->ldisc_mutex);
@@ -762,8 +764,6 @@ static int tty_ldisc_reinit(struct tty_struct *tty, int ldisc)
if (IS_ERR(ld))
return -1;
- WARN_ON_ONCE(tty_ldisc_wait_idle(tty));
-
tty_ldisc_close(tty, tty->ldisc);
tty_ldisc_put(tty->ldisc);
tty->ldisc = NULL;
@@ -838,7 +838,7 @@ void tty_ldisc_hangup(struct tty_struct *tty)
tty_unlock();
cancel_work_sync(&tty->buf.work);
mutex_unlock(&tty->ldisc_mutex);
-
+retry:
tty_lock();
mutex_lock(&tty->ldisc_mutex);
@@ -847,6 +847,22 @@ void tty_ldisc_hangup(struct tty_struct *tty)
it means auditing a lot of other paths so this is
a FIXME */
if (tty->ldisc) { /* Not yet closed */
+ if (atomic_read(&tty->ldisc->users) != 1) {
+ char cur_n[TASK_COMM_LEN], tty_n[64];
+ long timeout = 3 * HZ;
+ tty_unlock();
+
+ while (tty_ldisc_wait_idle(tty, timeout) == -EBUSY) {
+ timeout = MAX_SCHEDULE_TIMEOUT;
+ printk_ratelimited(KERN_WARNING
+ "%s: waiting (%s) for %s took too long, but we keep waiting...\n",
+ __func__, get_task_comm(cur_n, current),
+ tty_name(tty, tty_n));
+ }
+ mutex_unlock(&tty->ldisc_mutex);
+ goto retry;
+ }
+
if (reset == 0) {
if (!tty_ldisc_reinit(tty, tty->termios->c_line))
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 6960715c506..e8c564a5334 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -539,7 +539,6 @@ static void acm_port_down(struct acm *acm)
{
int i;
- mutex_lock(&open_mutex);
if (acm->dev) {
usb_autopm_get_interface(acm->control);
acm_set_control(acm, acm->ctrlout = 0);
@@ -551,14 +550,15 @@ static void acm_port_down(struct acm *acm)
acm->control->needs_remote_wakeup = 0;
usb_autopm_put_interface(acm->control);
}
- mutex_unlock(&open_mutex);
}
static void acm_tty_hangup(struct tty_struct *tty)
{
struct acm *acm = tty->driver_data;
tty_port_hangup(&acm->port);
+ mutex_lock(&open_mutex);
acm_port_down(acm);
+ mutex_unlock(&open_mutex);
}
static void acm_tty_close(struct tty_struct *tty, struct file *filp)
@@ -569,8 +569,9 @@ static void acm_tty_close(struct tty_struct *tty, struct file *filp)
shutdown */
if (!acm)
return;
+
+ mutex_lock(&open_mutex);
if (tty_port_close_start(&acm->port, tty, filp) == 0) {
- mutex_lock(&open_mutex);
if (!acm->dev) {
tty_port_tty_set(&acm->port, NULL);
acm_tty_unregister(acm);
@@ -582,6 +583,7 @@ static void acm_tty_close(struct tty_struct *tty, struct file *filp)
acm_port_down(acm);
tty_port_close_end(&acm->port, tty);
tty_port_tty_set(&acm->port, NULL);
+ mutex_unlock(&open_mutex);
}
static int acm_tty_write(struct tty_struct *tty,
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 96f05b29c9a..79781461eec 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -813,6 +813,12 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
USB_PORT_FEAT_C_PORT_LINK_STATE);
}
+ if ((portchange & USB_PORT_STAT_C_BH_RESET) &&
+ hub_is_superspeed(hub->hdev)) {
+ need_debounce_delay = true;
+ clear_port_feature(hub->hdev, port1,
+ USB_PORT_FEAT_C_BH_PORT_RESET);
+ }
/* We can forget about a "removed" device when there's a
* physical disconnect or the connect status changes.
*/
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index d6a8d8269bf..ecf12e15a7e 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -50,15 +50,42 @@ static const struct usb_device_id usb_quirk_list[] = {
/* Logitech Webcam B/C500 */
{ USB_DEVICE(0x046d, 0x0807), .driver_info = USB_QUIRK_RESET_RESUME },
+ /* Logitech Webcam C600 */
+ { USB_DEVICE(0x046d, 0x0808), .driver_info = USB_QUIRK_RESET_RESUME },
+
/* Logitech Webcam Pro 9000 */
{ USB_DEVICE(0x046d, 0x0809), .driver_info = USB_QUIRK_RESET_RESUME },
+ /* Logitech Webcam C905 */
+ { USB_DEVICE(0x046d, 0x080a), .driver_info = USB_QUIRK_RESET_RESUME },
+
+ /* Logitech Webcam C210 */
+ { USB_DEVICE(0x046d, 0x0819), .driver_info = USB_QUIRK_RESET_RESUME },
+
+ /* Logitech Webcam C260 */
+ { USB_DEVICE(0x046d, 0x081a), .driver_info = USB_QUIRK_RESET_RESUME },
+
/* Logitech Webcam C310 */
{ USB_DEVICE(0x046d, 0x081b), .driver_info = USB_QUIRK_RESET_RESUME },
+ /* Logitech Webcam C910 */
+ { USB_DEVICE(0x046d, 0x0821), .driver_info = USB_QUIRK_RESET_RESUME },
+
+ /* Logitech Webcam C160 */
+ { USB_DEVICE(0x046d, 0x0824), .driver_info = USB_QUIRK_RESET_RESUME },
+
/* Logitech Webcam C270 */
{ USB_DEVICE(0x046d, 0x0825), .driver_info = USB_QUIRK_RESET_RESUME },
+ /* Logitech Quickcam Pro 9000 */
+ { USB_DEVICE(0x046d, 0x0990), .driver_info = USB_QUIRK_RESET_RESUME },
+
+ /* Logitech Quickcam E3500 */
+ { USB_DEVICE(0x046d, 0x09a4), .driver_info = USB_QUIRK_RESET_RESUME },
+
+ /* Logitech Quickcam Vision Pro */
+ { USB_DEVICE(0x046d, 0x09a6), .driver_info = USB_QUIRK_RESET_RESUME },
+
/* Logitech Harmony 700-series */
{ USB_DEVICE(0x046d, 0xc122), .driver_info = USB_QUIRK_DELAY_INIT },
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index fa824cfdd2e..25dbd8614e7 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1284,6 +1284,7 @@ static int __devinit dwc3_gadget_init_endpoints(struct dwc3 *dwc)
int ret;
dep->endpoint.maxpacket = 1024;
+ dep->endpoint.max_streams = 15;
dep->endpoint.ops = &dwc3_gadget_ep_ops;
list_add_tail(&dep->endpoint.ep_list,
&dwc->gadget.ep_list);
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index b21cd376c11..23a447373c5 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -469,7 +469,7 @@ config USB_LANGWELL
gadget drivers to also be dynamically linked.
config USB_EG20T
- tristate "Intel EG20T PCH/OKI SEMICONDUCTOR ML7213 IOH UDC"
+ tristate "Intel EG20T PCH/LAPIS Semiconductor IOH(ML7213/ML7831) UDC"
depends on PCI
select USB_GADGET_DUALSPEED
help
@@ -485,10 +485,11 @@ config USB_EG20T
This driver dose not support interrupt transfer or isochronous
transfer modes.
- This driver also can be used for OKI SEMICONDUCTOR's ML7213 which is
+ This driver also can be used for LAPIS Semiconductor's ML7213 which is
for IVI(In-Vehicle Infotainment) use.
- ML7213 is companion chip for Intel Atom E6xx series.
- ML7213 is completely compatible for Intel EG20T PCH.
+ ML7831 is for general purpose use.
+ ML7213/ML7831 is companion chip for Intel Atom E6xx series.
+ ML7213/ML7831 is completely compatible for Intel EG20T PCH.
config USB_CI13XXX_MSM
tristate "MIPS USB CI13xxx for MSM"
diff --git a/drivers/usb/gadget/ci13xxx_msm.c b/drivers/usb/gadget/ci13xxx_msm.c
index 4eedfe55715..1fc612914c5 100644
--- a/drivers/usb/gadget/ci13xxx_msm.c
+++ b/drivers/usb/gadget/ci13xxx_msm.c
@@ -122,3 +122,5 @@ static int __init ci13xxx_msm_init(void)
return platform_driver_register(&ci13xxx_msm_driver);
}
module_init(ci13xxx_msm_init);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/gadget/ci13xxx_udc.c b/drivers/usb/gadget/ci13xxx_udc.c
index 83428f56253..9a0c3979ff4 100644
--- a/drivers/usb/gadget/ci13xxx_udc.c
+++ b/drivers/usb/gadget/ci13xxx_udc.c
@@ -71,6 +71,9 @@
/******************************************************************************
* DEFINE
*****************************************************************************/
+
+#define DMA_ADDR_INVALID (~(dma_addr_t)0)
+
/* ctrl register bank access */
static DEFINE_SPINLOCK(udc_lock);
@@ -1434,7 +1437,7 @@ static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
return -EALREADY;
mReq->req.status = -EALREADY;
- if (length && !mReq->req.dma) {
+ if (length && mReq->req.dma == DMA_ADDR_INVALID) {
mReq->req.dma = \
dma_map_single(mEp->device, mReq->req.buf,
length, mEp->dir ? DMA_TO_DEVICE :
@@ -1453,7 +1456,7 @@ static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
dma_unmap_single(mEp->device, mReq->req.dma,
length, mEp->dir ? DMA_TO_DEVICE :
DMA_FROM_DEVICE);
- mReq->req.dma = 0;
+ mReq->req.dma = DMA_ADDR_INVALID;
mReq->map = 0;
}
return -ENOMEM;
@@ -1549,7 +1552,7 @@ static int _hardware_dequeue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
if (mReq->map) {
dma_unmap_single(mEp->device, mReq->req.dma, mReq->req.length,
mEp->dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
- mReq->req.dma = 0;
+ mReq->req.dma = DMA_ADDR_INVALID;
mReq->map = 0;
}
@@ -1610,7 +1613,6 @@ __acquires(mEp->lock)
* @gadget: gadget
*
* This function returns an error code
- * Caller must hold lock
*/
static int _gadget_stop_activity(struct usb_gadget *gadget)
{
@@ -2189,6 +2191,7 @@ static struct usb_request *ep_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
mReq = kzalloc(sizeof(struct ci13xxx_req), gfp_flags);
if (mReq != NULL) {
INIT_LIST_HEAD(&mReq->queue);
+ mReq->req.dma = DMA_ADDR_INVALID;
mReq->ptr = dma_pool_alloc(mEp->td_pool, gfp_flags,
&mReq->dma);
@@ -2328,7 +2331,7 @@ static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
if (mReq->map) {
dma_unmap_single(mEp->device, mReq->req.dma, mReq->req.length,
mEp->dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
- mReq->req.dma = 0;
+ mReq->req.dma = DMA_ADDR_INVALID;
mReq->map = 0;
}
req->status = -ECONNRESET;
@@ -2500,12 +2503,12 @@ static int ci13xxx_wakeup(struct usb_gadget *_gadget)
spin_lock_irqsave(udc->lock, flags);
if (!udc->remote_wakeup) {
ret = -EOPNOTSUPP;
- dbg_trace("remote wakeup feature is not enabled\n");
+ trace("remote wakeup feature is not enabled\n");
goto out;
}
if (!hw_cread(CAP_PORTSC, PORTSC_SUSP)) {
ret = -EINVAL;
- dbg_trace("port is not suspended\n");
+ trace("port is not suspended\n");
goto out;
}
hw_cwrite(CAP_PORTSC, PORTSC_FPR, PORTSC_FPR);
@@ -2703,7 +2706,9 @@ static int ci13xxx_stop(struct usb_gadget_driver *driver)
if (udc->udc_driver->notify_event)
udc->udc_driver->notify_event(udc,
CI13XXX_CONTROLLER_STOPPED_EVENT);
+ spin_unlock_irqrestore(udc->lock, flags);
_gadget_stop_activity(&udc->gadget);
+ spin_lock_irqsave(udc->lock, flags);
pm_runtime_put(&udc->gadget.dev);
}
@@ -2850,7 +2855,7 @@ static int udc_probe(struct ci13xxx_udc_driver *driver, struct device *dev,
struct ci13xxx *udc;
int retval = 0;
- trace("%p, %p, %p", dev, regs, name);
+ trace("%p, %p, %p", dev, regs, driver->name);
if (dev == NULL || regs == NULL || driver == NULL ||
driver->name == NULL)
diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c
index 52583a23533..c39d58860fa 100644
--- a/drivers/usb/gadget/f_mass_storage.c
+++ b/drivers/usb/gadget/f_mass_storage.c
@@ -624,7 +624,8 @@ static int fsg_setup(struct usb_function *f,
if (ctrl->bRequestType !=
(USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE))
break;
- if (w_index != fsg->interface_number || w_value != 0)
+ if (w_index != fsg->interface_number || w_value != 0 ||
+ w_length != 0)
return -EDOM;
/*
@@ -639,7 +640,8 @@ static int fsg_setup(struct usb_function *f,
if (ctrl->bRequestType !=
(USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE))
break;
- if (w_index != fsg->interface_number || w_value != 0)
+ if (w_index != fsg->interface_number || w_value != 0 ||
+ w_length != 1)
return -EDOM;
VDBG(fsg, "get max LUN\n");
*(u8 *)req->buf = fsg->common->nluns - 1;
diff --git a/drivers/usb/gadget/f_midi.c b/drivers/usb/gadget/f_midi.c
index 67b222908cf..3797b3d6c62 100644
--- a/drivers/usb/gadget/f_midi.c
+++ b/drivers/usb/gadget/f_midi.c
@@ -95,7 +95,6 @@ static void f_midi_transmit(struct f_midi *midi, struct usb_request *req);
DECLARE_UAC_AC_HEADER_DESCRIPTOR(1);
DECLARE_USB_MIDI_OUT_JACK_DESCRIPTOR(1);
-DECLARE_USB_MIDI_OUT_JACK_DESCRIPTOR(16);
DECLARE_USB_MS_ENDPOINT_DESCRIPTOR(16);
/* B.3.1 Standard AC Interface Descriptor */
@@ -140,26 +139,6 @@ static struct usb_ms_header_descriptor ms_header_desc __initdata = {
/* .wTotalLength = DYNAMIC */
};
-/* B.4.3 Embedded MIDI IN Jack Descriptor */
-static struct usb_midi_in_jack_descriptor jack_in_emb_desc = {
- .bLength = USB_DT_MIDI_IN_SIZE,
- .bDescriptorType = USB_DT_CS_INTERFACE,
- .bDescriptorSubtype = USB_MS_MIDI_IN_JACK,
- .bJackType = USB_MS_EMBEDDED,
- /* .bJackID = DYNAMIC */
-};
-
-/* B.4.4 Embedded MIDI OUT Jack Descriptor */
-static struct usb_midi_out_jack_descriptor_16 jack_out_emb_desc = {
- /* .bLength = DYNAMIC */
- .bDescriptorType = USB_DT_CS_INTERFACE,
- .bDescriptorSubtype = USB_MS_MIDI_OUT_JACK,
- .bJackType = USB_MS_EMBEDDED,
- /* .bJackID = DYNAMIC */
- /* .bNrInputPins = DYNAMIC */
- /* .pins = DYNAMIC */
-};
-
/* B.5.1 Standard Bulk OUT Endpoint Descriptor */
static struct usb_endpoint_descriptor bulk_out_desc = {
.bLength = USB_DT_ENDPOINT_AUDIO_SIZE,
@@ -758,9 +737,11 @@ fail:
static int __init
f_midi_bind(struct usb_configuration *c, struct usb_function *f)
{
- struct usb_descriptor_header *midi_function[(MAX_PORTS * 2) + 12];
+ struct usb_descriptor_header **midi_function;
struct usb_midi_in_jack_descriptor jack_in_ext_desc[MAX_PORTS];
+ struct usb_midi_in_jack_descriptor jack_in_emb_desc[MAX_PORTS];
struct usb_midi_out_jack_descriptor_1 jack_out_ext_desc[MAX_PORTS];
+ struct usb_midi_out_jack_descriptor_1 jack_out_emb_desc[MAX_PORTS];
struct usb_composite_dev *cdev = c->cdev;
struct f_midi *midi = func_to_midi(f);
int status, n, jack = 1, i = 0;
@@ -798,6 +779,14 @@ f_midi_bind(struct usb_configuration *c, struct usb_function *f)
goto fail;
midi->out_ep->driver_data = cdev; /* claim */
+ /* allocate temporary function list */
+ midi_function = kcalloc((MAX_PORTS * 4) + 9, sizeof(midi_function),
+ GFP_KERNEL);
+ if (!midi_function) {
+ status = -ENOMEM;
+ goto fail;
+ }
+
/*
* construct the function's descriptor set. As the number of
* input and output MIDI ports is configurable, we have to do
@@ -811,73 +800,74 @@ f_midi_bind(struct usb_configuration *c, struct usb_function *f)
/* calculate the header's wTotalLength */
n = USB_DT_MS_HEADER_SIZE
- + (1 + midi->in_ports) * USB_DT_MIDI_IN_SIZE
- + (1 + midi->out_ports) * USB_DT_MIDI_OUT_SIZE(1);
+ + (midi->in_ports + midi->out_ports) *
+ (USB_DT_MIDI_IN_SIZE + USB_DT_MIDI_OUT_SIZE(1));
ms_header_desc.wTotalLength = cpu_to_le16(n);
midi_function[i++] = (struct usb_descriptor_header *) &ms_header_desc;
- /* we have one embedded IN jack */
- jack_in_emb_desc.bJackID = jack++;
- midi_function[i++] = (struct usb_descriptor_header *) &jack_in_emb_desc;
-
- /* and a dynamic amount of external IN jacks */
- for (n = 0; n < midi->in_ports; n++) {
- struct usb_midi_in_jack_descriptor *ext = &jack_in_ext_desc[n];
-
- ext->bLength = USB_DT_MIDI_IN_SIZE;
- ext->bDescriptorType = USB_DT_CS_INTERFACE;
- ext->bDescriptorSubtype = USB_MS_MIDI_IN_JACK;
- ext->bJackType = USB_MS_EXTERNAL;
- ext->bJackID = jack++;
- ext->iJack = 0;
-
- midi_function[i++] = (struct usb_descriptor_header *) ext;
- }
-
- /* one embedded OUT jack ... */
- jack_out_emb_desc.bLength = USB_DT_MIDI_OUT_SIZE(midi->in_ports);
- jack_out_emb_desc.bJackID = jack++;
- jack_out_emb_desc.bNrInputPins = midi->in_ports;
- /* ... which referencess all external IN jacks */
+ /* configure the external IN jacks, each linked to an embedded OUT jack */
for (n = 0; n < midi->in_ports; n++) {
- jack_out_emb_desc.pins[n].baSourceID = jack_in_ext_desc[n].bJackID;
- jack_out_emb_desc.pins[n].baSourcePin = 1;
+ struct usb_midi_in_jack_descriptor *in_ext = &jack_in_ext_desc[n];
+ struct usb_midi_out_jack_descriptor_1 *out_emb = &jack_out_emb_desc[n];
+
+ in_ext->bLength = USB_DT_MIDI_IN_SIZE;
+ in_ext->bDescriptorType = USB_DT_CS_INTERFACE;
+ in_ext->bDescriptorSubtype = USB_MS_MIDI_IN_JACK;
+ in_ext->bJackType = USB_MS_EXTERNAL;
+ in_ext->bJackID = jack++;
+ in_ext->iJack = 0;
+ midi_function[i++] = (struct usb_descriptor_header *) in_ext;
+
+ out_emb->bLength = USB_DT_MIDI_OUT_SIZE(1);
+ out_emb->bDescriptorType = USB_DT_CS_INTERFACE;
+ out_emb->bDescriptorSubtype = USB_MS_MIDI_OUT_JACK;
+ out_emb->bJackType = USB_MS_EMBEDDED;
+ out_emb->bJackID = jack++;
+ out_emb->bNrInputPins = 1;
+ out_emb->pins[0].baSourcePin = 1;
+ out_emb->pins[0].baSourceID = in_ext->bJackID;
+ out_emb->iJack = 0;
+ midi_function[i++] = (struct usb_descriptor_header *) out_emb;
+
+ /* link it to the endpoint */
+ ms_in_desc.baAssocJackID[n] = out_emb->bJackID;
}
- midi_function[i++] = (struct usb_descriptor_header *) &jack_out_emb_desc;
-
- /* and multiple external OUT jacks ... */
+ /* configure the external OUT jacks, each linked to an embedded IN jack */
for (n = 0; n < midi->out_ports; n++) {
- struct usb_midi_out_jack_descriptor_1 *ext = &jack_out_ext_desc[n];
- int m;
-
- ext->bLength = USB_DT_MIDI_OUT_SIZE(1);
- ext->bDescriptorType = USB_DT_CS_INTERFACE;
- ext->bDescriptorSubtype = USB_MS_MIDI_OUT_JACK;
- ext->bJackType = USB_MS_EXTERNAL;
- ext->bJackID = jack++;
- ext->bNrInputPins = 1;
- ext->iJack = 0;
- /* ... which all reference the same embedded IN jack */
- for (m = 0; m < midi->out_ports; m++) {
- ext->pins[m].baSourceID = jack_in_emb_desc.bJackID;
- ext->pins[m].baSourcePin = 1;
- }
-
- midi_function[i++] = (struct usb_descriptor_header *) ext;
+ struct usb_midi_in_jack_descriptor *in_emb = &jack_in_emb_desc[n];
+ struct usb_midi_out_jack_descriptor_1 *out_ext = &jack_out_ext_desc[n];
+
+ in_emb->bLength = USB_DT_MIDI_IN_SIZE;
+ in_emb->bDescriptorType = USB_DT_CS_INTERFACE;
+ in_emb->bDescriptorSubtype = USB_MS_MIDI_IN_JACK;
+ in_emb->bJackType = USB_MS_EMBEDDED;
+ in_emb->bJackID = jack++;
+ in_emb->iJack = 0;
+ midi_function[i++] = (struct usb_descriptor_header *) in_emb;
+
+ out_ext->bLength = USB_DT_MIDI_OUT_SIZE(1);
+ out_ext->bDescriptorType = USB_DT_CS_INTERFACE;
+ out_ext->bDescriptorSubtype = USB_MS_MIDI_OUT_JACK;
+ out_ext->bJackType = USB_MS_EXTERNAL;
+ out_ext->bJackID = jack++;
+ out_ext->bNrInputPins = 1;
+ out_ext->iJack = 0;
+ out_ext->pins[0].baSourceID = in_emb->bJackID;
+ out_ext->pins[0].baSourcePin = 1;
+ midi_function[i++] = (struct usb_descriptor_header *) out_ext;
+
+ /* link it to the endpoint */
+ ms_out_desc.baAssocJackID[n] = in_emb->bJackID;
}
/* configure the endpoint descriptors ... */
ms_out_desc.bLength = USB_DT_MS_ENDPOINT_SIZE(midi->in_ports);
ms_out_desc.bNumEmbMIDIJack = midi->in_ports;
- for (n = 0; n < midi->in_ports; n++)
- ms_out_desc.baAssocJackID[n] = jack_in_emb_desc.bJackID;
ms_in_desc.bLength = USB_DT_MS_ENDPOINT_SIZE(midi->out_ports);
ms_in_desc.bNumEmbMIDIJack = midi->out_ports;
- for (n = 0; n < midi->out_ports; n++)
- ms_in_desc.baAssocJackID[n] = jack_out_emb_desc.bJackID;
/* ... and add them to the list */
midi_function[i++] = (struct usb_descriptor_header *) &bulk_out_desc;
@@ -901,6 +891,8 @@ f_midi_bind(struct usb_configuration *c, struct usb_function *f)
f->descriptors = usb_copy_descriptors(midi_function);
}
+ kfree(midi_function);
+
return 0;
fail:
diff --git a/drivers/usb/gadget/file_storage.c b/drivers/usb/gadget/file_storage.c
index f7e39b0365c..11b5196284a 100644
--- a/drivers/usb/gadget/file_storage.c
+++ b/drivers/usb/gadget/file_storage.c
@@ -859,7 +859,7 @@ static int class_setup_req(struct fsg_dev *fsg,
if (ctrl->bRequestType != (USB_DIR_OUT |
USB_TYPE_CLASS | USB_RECIP_INTERFACE))
break;
- if (w_index != 0 || w_value != 0) {
+ if (w_index != 0 || w_value != 0 || w_length != 0) {
value = -EDOM;
break;
}
@@ -875,7 +875,7 @@ static int class_setup_req(struct fsg_dev *fsg,
if (ctrl->bRequestType != (USB_DIR_IN |
USB_TYPE_CLASS | USB_RECIP_INTERFACE))
break;
- if (w_index != 0 || w_value != 0) {
+ if (w_index != 0 || w_value != 0 || w_length != 1) {
value = -EDOM;
break;
}
diff --git a/drivers/usb/gadget/fsl_udc_core.c b/drivers/usb/gadget/fsl_udc_core.c
index d786ba31fc0..b3b3d83b7c3 100644
--- a/drivers/usb/gadget/fsl_udc_core.c
+++ b/drivers/usb/gadget/fsl_udc_core.c
@@ -2480,8 +2480,7 @@ static int __init fsl_udc_probe(struct platform_device *pdev)
#ifndef CONFIG_ARCH_MXC
if (pdata->have_sysif_regs)
- usb_sys_regs = (struct usb_sys_interface *)
- ((u32)dr_regs + USB_DR_SYS_OFFSET);
+ usb_sys_regs = (void *)dr_regs + USB_DR_SYS_OFFSET;
#endif
/* Initialize USB clocks */
diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c
index a392ec0d2d5..6ccae2707e5 100644
--- a/drivers/usb/gadget/inode.c
+++ b/drivers/usb/gadget/inode.c
@@ -1730,8 +1730,9 @@ static void
gadgetfs_disconnect (struct usb_gadget *gadget)
{
struct dev_data *dev = get_gadget_data (gadget);
+ unsigned long flags;
- spin_lock (&dev->lock);
+ spin_lock_irqsave (&dev->lock, flags);
if (dev->state == STATE_DEV_UNCONNECTED)
goto exit;
dev->state = STATE_DEV_UNCONNECTED;
@@ -1740,7 +1741,7 @@ gadgetfs_disconnect (struct usb_gadget *gadget)
next_event (dev, GADGETFS_DISCONNECT);
ep0_readable (dev);
exit:
- spin_unlock (&dev->lock);
+ spin_unlock_irqrestore (&dev->lock, flags);
}
static void
diff --git a/drivers/usb/gadget/pch_udc.c b/drivers/usb/gadget/pch_udc.c
index 550d6dcdf10..5048a0c0764 100644
--- a/drivers/usb/gadget/pch_udc.c
+++ b/drivers/usb/gadget/pch_udc.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2010 OKI SEMICONDUCTOR CO., LTD.
+ * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -354,6 +354,7 @@ struct pch_udc_dev {
#define PCI_DEVICE_ID_INTEL_EG20T_UDC 0x8808
#define PCI_VENDOR_ID_ROHM 0x10DB
#define PCI_DEVICE_ID_ML7213_IOH_UDC 0x801D
+#define PCI_DEVICE_ID_ML7831_IOH_UDC 0x8808
static const char ep0_string[] = "ep0in";
static DEFINE_SPINLOCK(udc_stall_spinlock); /* stall spin lock */
@@ -2970,6 +2971,11 @@ static DEFINE_PCI_DEVICE_TABLE(pch_udc_pcidev_id) = {
.class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
.class_mask = 0xffffffff,
},
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_ROHM, PCI_DEVICE_ID_ML7831_IOH_UDC),
+ .class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
+ .class_mask = 0xffffffff,
+ },
{ 0 },
};
@@ -2999,5 +3005,5 @@ static void __exit pch_udc_pci_exit(void)
module_exit(pch_udc_pci_exit);
MODULE_DESCRIPTION("Intel EG20T USB Device Controller");
-MODULE_AUTHOR("OKI SEMICONDUCTOR, <toshiharu-linux@dsn.okisemi.com>");
+MODULE_AUTHOR("LAPIS Semiconductor, <tomoya-linux@dsn.lapis-semi.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/usb/gadget/r8a66597-udc.c b/drivers/usb/gadget/r8a66597-udc.c
index 68a826a1b86..24f84b210ce 100644
--- a/drivers/usb/gadget/r8a66597-udc.c
+++ b/drivers/usb/gadget/r8a66597-udc.c
@@ -1718,6 +1718,8 @@ static void r8a66597_fifo_flush(struct usb_ep *_ep)
if (list_empty(&ep->queue) && !ep->busy) {
pipe_stop(ep->r8a66597, ep->pipenum);
r8a66597_bclr(ep->r8a66597, BCLR, ep->fifoctr);
+ r8a66597_write(ep->r8a66597, ACLRM, ep->pipectr);
+ r8a66597_write(ep->r8a66597, 0, ep->pipectr);
}
spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
}
@@ -1742,7 +1744,6 @@ static int r8a66597_start(struct usb_gadget *gadget,
struct usb_gadget_driver *driver)
{
struct r8a66597 *r8a66597 = gadget_to_r8a66597(gadget);
- int retval;
if (!driver
|| driver->speed != USB_SPEED_HIGH
@@ -1752,16 +1753,7 @@ static int r8a66597_start(struct usb_gadget *gadget,
return -ENODEV;
/* hook up the driver */
- driver->driver.bus = NULL;
r8a66597->driver = driver;
- r8a66597->gadget.dev.driver = &driver->driver;
-
- retval = device_add(&r8a66597->gadget.dev);
- if (retval) {
- dev_err(r8a66597_to_dev(r8a66597), "device_add error (%d)\n",
- retval);
- goto error;
- }
init_controller(r8a66597);
r8a66597_bset(r8a66597, VBSE, INTENB0);
@@ -1775,12 +1767,6 @@ static int r8a66597_start(struct usb_gadget *gadget,
}
return 0;
-
-error:
- r8a66597->driver = NULL;
- r8a66597->gadget.dev.driver = NULL;
-
- return retval;
}
static int r8a66597_stop(struct usb_gadget *gadget,
@@ -1794,7 +1780,6 @@ static int r8a66597_stop(struct usb_gadget *gadget,
disable_controller(r8a66597);
spin_unlock_irqrestore(&r8a66597->lock, flags);
- device_del(&r8a66597->gadget.dev);
r8a66597->driver = NULL;
return 0;
}
@@ -1845,6 +1830,7 @@ static int __exit r8a66597_remove(struct platform_device *pdev)
clk_put(r8a66597->clk);
}
#endif
+ device_unregister(&r8a66597->gadget.dev);
kfree(r8a66597);
return 0;
}
@@ -1924,13 +1910,17 @@ static int __init r8a66597_probe(struct platform_device *pdev)
r8a66597->irq_sense_low = irq_trigger == IRQF_TRIGGER_LOW;
r8a66597->gadget.ops = &r8a66597_gadget_ops;
- device_initialize(&r8a66597->gadget.dev);
dev_set_name(&r8a66597->gadget.dev, "gadget");
r8a66597->gadget.is_dualspeed = 1;
r8a66597->gadget.dev.parent = &pdev->dev;
r8a66597->gadget.dev.dma_mask = pdev->dev.dma_mask;
r8a66597->gadget.dev.release = pdev->dev.release;
r8a66597->gadget.name = udc_name;
+ ret = device_register(&r8a66597->gadget.dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "device_register failed\n");
+ goto clean_up;
+ }
init_timer(&r8a66597->timer);
r8a66597->timer.function = r8a66597_timer;
@@ -1945,7 +1935,7 @@ static int __init r8a66597_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "cannot get clock \"%s\"\n",
clk_name);
ret = PTR_ERR(r8a66597->clk);
- goto clean_up;
+ goto clean_up_dev;
}
clk_enable(r8a66597->clk);
}
@@ -2014,7 +2004,9 @@ clean_up2:
clk_disable(r8a66597->clk);
clk_put(r8a66597->clk);
}
+clean_up_dev:
#endif
+ device_unregister(&r8a66597->gadget.dev);
clean_up:
if (r8a66597) {
if (r8a66597->sudmac_reg)
diff --git a/drivers/usb/gadget/udc-core.c b/drivers/usb/gadget/udc-core.c
index 022baeca7c9..6939e17f458 100644
--- a/drivers/usb/gadget/udc-core.c
+++ b/drivers/usb/gadget/udc-core.c
@@ -210,10 +210,10 @@ static void usb_gadget_remove_driver(struct usb_udc *udc)
kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
if (udc_is_newstyle(udc)) {
- usb_gadget_disconnect(udc->gadget);
+ udc->driver->disconnect(udc->gadget);
udc->driver->unbind(udc->gadget);
usb_gadget_udc_stop(udc->gadget, udc->driver);
-
+ usb_gadget_disconnect(udc->gadget);
} else {
usb_gadget_stop(udc->gadget, udc->driver);
}
@@ -344,7 +344,7 @@ EXPORT_SYMBOL_GPL(usb_gadget_unregister_driver);
static ssize_t usb_udc_srp_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t n)
{
- struct usb_udc *udc = dev_get_drvdata(dev);
+ struct usb_udc *udc = container_of(dev, struct usb_udc, dev);
if (sysfs_streq(buf, "1"))
usb_gadget_wakeup(udc->gadget);
@@ -378,7 +378,7 @@ static ssize_t usb_udc_speed_show(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%s\n",
usb_speed_string(udc->gadget->speed));
}
-static DEVICE_ATTR(speed, S_IRUSR, usb_udc_speed_show, NULL);
+static DEVICE_ATTR(speed, S_IRUGO, usb_udc_speed_show, NULL);
#define USB_UDC_ATTR(name) \
ssize_t usb_udc_##name##_show(struct device *dev, \
@@ -389,7 +389,7 @@ ssize_t usb_udc_##name##_show(struct device *dev, \
\
return snprintf(buf, PAGE_SIZE, "%d\n", gadget->name); \
} \
-static DEVICE_ATTR(name, S_IRUSR, usb_udc_##name##_show, NULL)
+static DEVICE_ATTR(name, S_IRUGO, usb_udc_##name##_show, NULL)
static USB_UDC_ATTR(is_dualspeed);
static USB_UDC_ATTR(is_otg);
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index 2e829fae648..56a32033adb 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -1479,10 +1479,15 @@ iso_stream_schedule (
/* NOTE: assumes URB_ISO_ASAP, to limit complexity/bugs */
- /* find a uframe slot with enough bandwidth */
- next = start + period;
- for (; start < next; start++) {
-
+ /* find a uframe slot with enough bandwidth.
+ * Early uframes are more precious because full-speed
+ * iso IN transfers can't use late uframes,
+ * and therefore they should be allocated last.
+ */
+ next = start;
+ start += period;
+ do {
+ start--;
/* check schedule: enough space? */
if (stream->highspeed) {
if (itd_slot_ok(ehci, mod, start,
@@ -1495,7 +1500,7 @@ iso_stream_schedule (
start, sched, period))
break;
}
- }
+ } while (start > next);
/* no room in the schedule */
if (start == next) {
diff --git a/drivers/usb/host/ehci-xls.c b/drivers/usb/host/ehci-xls.c
index fe74bd67601..b4fb511d24b 100644
--- a/drivers/usb/host/ehci-xls.c
+++ b/drivers/usb/host/ehci-xls.c
@@ -19,7 +19,7 @@ static int ehci_xls_setup(struct usb_hcd *hcd)
ehci->caps = hcd->regs;
ehci->regs = hcd->regs +
- HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase));
+ HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));
dbg_hcs_params(ehci, "reset");
dbg_hcc_params(ehci, "reset");
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index ba3a46b78b7..95a9fec38e8 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -223,6 +223,9 @@ static void ohci_at91_usb_set_power(struct at91_usbh_data *pdata, int port, int
if (port < 0 || port >= 2)
return;
+ if (pdata->vbus_pin[port] <= 0)
+ return;
+
gpio_set_value(pdata->vbus_pin[port], !pdata->vbus_pin_inverted ^ enable);
}
@@ -231,6 +234,9 @@ static int ohci_at91_usb_get_power(struct at91_usbh_data *pdata, int port)
if (port < 0 || port >= 2)
return -EINVAL;
+ if (pdata->vbus_pin[port] <= 0)
+ return -EINVAL;
+
return gpio_get_value(pdata->vbus_pin[port]) ^ !pdata->vbus_pin_inverted;
}
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 34efd479e06..b2639191549 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -389,17 +389,14 @@ ohci_shutdown (struct usb_hcd *hcd)
struct ohci_hcd *ohci;
ohci = hcd_to_ohci (hcd);
- ohci_writel (ohci, OHCI_INTR_MIE, &ohci->regs->intrdisable);
- ohci->hc_control = ohci_readl(ohci, &ohci->regs->control);
+ ohci_writel(ohci, (u32) ~0, &ohci->regs->intrdisable);
- /* If the SHUTDOWN quirk is set, don't put the controller in RESET */
- ohci->hc_control &= (ohci->flags & OHCI_QUIRK_SHUTDOWN ?
- OHCI_CTRL_RWC | OHCI_CTRL_HCFS :
- OHCI_CTRL_RWC);
- ohci_writel(ohci, ohci->hc_control, &ohci->regs->control);
+ /* Software reset, after which the controller goes into SUSPEND */
+ ohci_writel(ohci, OHCI_HCR, &ohci->regs->cmdstatus);
+ ohci_readl(ohci, &ohci->regs->cmdstatus); /* flush the writes */
+ udelay(10);
- /* flush the writes */
- (void) ohci_readl (ohci, &ohci->regs->control);
+ ohci_writel(ohci, ohci->fminterval, &ohci->regs->fminterval);
}
static int check_ed(struct ohci_hcd *ohci, struct ed *ed)
diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
index ad8166c681e..bc01b064585 100644
--- a/drivers/usb/host/ohci-pci.c
+++ b/drivers/usb/host/ohci-pci.c
@@ -175,28 +175,6 @@ static int ohci_quirk_amd700(struct usb_hcd *hcd)
return 0;
}
-/* nVidia controllers continue to drive Reset signalling on the bus
- * even after system shutdown, wasting power. This flag tells the
- * shutdown routine to leave the controller OPERATIONAL instead of RESET.
- */
-static int ohci_quirk_nvidia_shutdown(struct usb_hcd *hcd)
-{
- struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
- struct ohci_hcd *ohci = hcd_to_ohci(hcd);
-
- /* Evidently nVidia fixed their later hardware; this is a guess at
- * the changeover point.
- */
-#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_USB 0x026d
-
- if (pdev->device < PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_USB) {
- ohci->flags |= OHCI_QUIRK_SHUTDOWN;
- ohci_dbg(ohci, "enabled nVidia shutdown quirk\n");
- }
-
- return 0;
-}
-
static void sb800_prefetch(struct ohci_hcd *ohci, int on)
{
struct pci_dev *pdev;
@@ -260,10 +238,6 @@ static const struct pci_device_id ohci_pci_quirks[] = {
PCI_DEVICE(PCI_VENDOR_ID_ATI, 0x4399),
.driver_data = (unsigned long)ohci_quirk_amd700,
},
- {
- PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID),
- .driver_data = (unsigned long) ohci_quirk_nvidia_shutdown,
- },
/* FIXME for some of the early AMD 760 southbridges, OHCI
* won't work at all. blacklist them.
diff --git a/drivers/usb/host/ohci.h b/drivers/usb/host/ohci.h
index 35e5fd640ce..0795b934d00 100644
--- a/drivers/usb/host/ohci.h
+++ b/drivers/usb/host/ohci.h
@@ -403,7 +403,6 @@ struct ohci_hcd {
#define OHCI_QUIRK_HUB_POWER 0x100 /* distrust firmware power/oc setup */
#define OHCI_QUIRK_AMD_PLL 0x200 /* AMD PLL quirk*/
#define OHCI_QUIRK_AMD_PREFETCH 0x400 /* pre-fetch for ISO transfer */
-#define OHCI_QUIRK_SHUTDOWN 0x800 /* nVidia power bug */
// there are also chip quirks/bugs in init logic
struct work_struct nec_work; /* Worker for NEC quirk */
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index 27a3dec32fa..caf87428ca4 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -37,6 +37,7 @@
#define OHCI_INTRENABLE 0x10
#define OHCI_INTRDISABLE 0x14
#define OHCI_FMINTERVAL 0x34
+#define OHCI_HCFS (3 << 6) /* hc functional state */
#define OHCI_HCR (1 << 0) /* host controller reset */
#define OHCI_OCR (1 << 3) /* ownership change request */
#define OHCI_CTRL_RWC (1 << 9) /* remote wakeup connected */
@@ -466,6 +467,8 @@ static void __devinit quirk_usb_handoff_ohci(struct pci_dev *pdev)
{
void __iomem *base;
u32 control;
+ u32 fminterval;
+ int cnt;
if (!mmio_resource_enabled(pdev, 0))
return;
@@ -498,41 +501,32 @@ static void __devinit quirk_usb_handoff_ohci(struct pci_dev *pdev)
}
#endif
- /* reset controller, preserving RWC (and possibly IR) */
- writel(control & OHCI_CTRL_MASK, base + OHCI_CONTROL);
- readl(base + OHCI_CONTROL);
+ /* disable interrupts */
+ writel((u32) ~0, base + OHCI_INTRDISABLE);
- /* Some NVIDIA controllers stop working if kept in RESET for too long */
- if (pdev->vendor == PCI_VENDOR_ID_NVIDIA) {
- u32 fminterval;
- int cnt;
+ /* Reset the USB bus, if the controller isn't already in RESET */
+ if (control & OHCI_HCFS) {
+ /* Go into RESET, preserving RWC (and possibly IR) */
+ writel(control & OHCI_CTRL_MASK, base + OHCI_CONTROL);
+ readl(base + OHCI_CONTROL);
- /* drive reset for at least 50 ms (7.1.7.5) */
+ /* drive bus reset for at least 50 ms (7.1.7.5) */
msleep(50);
+ }
- /* software reset of the controller, preserving HcFmInterval */
- fminterval = readl(base + OHCI_FMINTERVAL);
- writel(OHCI_HCR, base + OHCI_CMDSTATUS);
+ /* software reset of the controller, preserving HcFmInterval */
+ fminterval = readl(base + OHCI_FMINTERVAL);
+ writel(OHCI_HCR, base + OHCI_CMDSTATUS);
- /* reset requires max 10 us delay */
- for (cnt = 30; cnt > 0; --cnt) { /* ... allow extra time */
- if ((readl(base + OHCI_CMDSTATUS) & OHCI_HCR) == 0)
- break;
- udelay(1);
- }
- writel(fminterval, base + OHCI_FMINTERVAL);
-
- /* Now we're in the SUSPEND state with all devices reset
- * and wakeups and interrupts disabled
- */
+ /* reset requires max 10 us delay */
+ for (cnt = 30; cnt > 0; --cnt) { /* ... allow extra time */
+ if ((readl(base + OHCI_CMDSTATUS) & OHCI_HCR) == 0)
+ break;
+ udelay(1);
}
+ writel(fminterval, base + OHCI_FMINTERVAL);
- /*
- * disable interrupts
- */
- writel(~(u32)0, base + OHCI_INTRDISABLE);
- writel(~(u32)0, base + OHCI_INTRSTATUS);
-
+ /* Now the controller is safely in SUSPEND and nothing can wake it up */
iounmap(base);
}
@@ -627,7 +621,7 @@ static void __devinit quirk_usb_disable_ehci(struct pci_dev *pdev)
void __iomem *base, *op_reg_base;
u32 hcc_params, cap, val;
u8 offset, cap_length;
- int wait_time, delta, count = 256/4;
+ int wait_time, count = 256/4;
if (!mmio_resource_enabled(pdev, 0))
return;
@@ -673,11 +667,10 @@ static void __devinit quirk_usb_disable_ehci(struct pci_dev *pdev)
writel(val, op_reg_base + EHCI_USBCMD);
wait_time = 2000;
- delta = 100;
do {
writel(0x3f, op_reg_base + EHCI_USBSTS);
- udelay(delta);
- wait_time -= delta;
+ udelay(100);
+ wait_time -= 100;
val = readl(op_reg_base + EHCI_USBSTS);
if ((val == ~(u32)0) || (val & EHCI_USBSTS_HALTED)) {
break;
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 42a22b8e692..0e4b25fa3bc 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -982,7 +982,6 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
struct xhci_virt_device *dev;
struct xhci_ep_ctx *ep0_ctx;
struct xhci_slot_ctx *slot_ctx;
- struct xhci_input_control_ctx *ctrl_ctx;
u32 port_num;
struct usb_device *top_dev;
@@ -994,12 +993,8 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
return -EINVAL;
}
ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0);
- ctrl_ctx = xhci_get_input_control_ctx(xhci, dev->in_ctx);
slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx);
- /* 2) New slot context and endpoint 0 context are valid*/
- ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG);
-
/* 3) Only the control endpoint is valid - one endpoint context */
slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | udev->route);
switch (udev->speed) {
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 940321b3ec6..9f1d4b15d81 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -816,23 +816,24 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
struct xhci_ring *ring;
struct xhci_td *cur_td;
int ret, i, j;
+ unsigned long flags;
ep = (struct xhci_virt_ep *) arg;
xhci = ep->xhci;
- spin_lock(&xhci->lock);
+ spin_lock_irqsave(&xhci->lock, flags);
ep->stop_cmds_pending--;
if (xhci->xhc_state & XHCI_STATE_DYING) {
xhci_dbg(xhci, "Stop EP timer ran, but another timer marked "
"xHCI as DYING, exiting.\n");
- spin_unlock(&xhci->lock);
+ spin_unlock_irqrestore(&xhci->lock, flags);
return;
}
if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) {
xhci_dbg(xhci, "Stop EP timer ran, but no command pending, "
"exiting.\n");
- spin_unlock(&xhci->lock);
+ spin_unlock_irqrestore(&xhci->lock, flags);
return;
}
@@ -844,11 +845,11 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
xhci->xhc_state |= XHCI_STATE_DYING;
/* Disable interrupts from the host controller and start halting it */
xhci_quiesce(xhci);
- spin_unlock(&xhci->lock);
+ spin_unlock_irqrestore(&xhci->lock, flags);
ret = xhci_halt(xhci);
- spin_lock(&xhci->lock);
+ spin_lock_irqsave(&xhci->lock, flags);
if (ret < 0) {
/* This is bad; the host is not responding to commands and it's
* not allowing itself to be halted. At least interrupts are
@@ -896,7 +897,7 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
}
}
}
- spin_unlock(&xhci->lock);
+ spin_unlock_irqrestore(&xhci->lock, flags);
xhci_dbg(xhci, "Calling usb_hc_died()\n");
usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
xhci_dbg(xhci, "xHCI host controller is dead.\n");
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 1ff95a0df57..aa94c019579 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -799,7 +799,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
u32 command, temp = 0;
struct usb_hcd *hcd = xhci_to_hcd(xhci);
struct usb_hcd *secondary_hcd;
- int retval;
+ int retval = 0;
/* Wait a bit if either of the roothubs need to settle from the
* transition into bus suspend.
@@ -809,6 +809,9 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
xhci->bus_state[1].next_statechange))
msleep(100);
+ set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+ set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
+
spin_lock_irq(&xhci->lock);
if (xhci->quirks & XHCI_RESET_ON_RESUME)
hibernated = true;
@@ -878,20 +881,13 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
return retval;
xhci_dbg(xhci, "Start the primary HCD\n");
retval = xhci_run(hcd->primary_hcd);
- if (retval)
- goto failed_restart;
-
- xhci_dbg(xhci, "Start the secondary HCD\n");
- retval = xhci_run(secondary_hcd);
if (!retval) {
- set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
- set_bit(HCD_FLAG_HW_ACCESSIBLE,
- &xhci->shared_hcd->flags);
+ xhci_dbg(xhci, "Start the secondary HCD\n");
+ retval = xhci_run(secondary_hcd);
}
-failed_restart:
hcd->state = HC_STATE_SUSPENDED;
xhci->shared_hcd->state = HC_STATE_SUSPENDED;
- return retval;
+ goto done;
}
/* step 4: set Run/Stop bit */
@@ -910,11 +906,14 @@ failed_restart:
* Running endpoints by ringing their doorbells
*/
- set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
- set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
-
spin_unlock_irq(&xhci->lock);
- return 0;
+
+ done:
+ if (retval == 0) {
+ usb_hcd_resume_root_hub(hcd);
+ usb_hcd_resume_root_hub(xhci->shared_hcd);
+ }
+ return retval;
}
#endif /* CONFIG_PM */
@@ -3504,6 +3503,10 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
/* Otherwise, update the control endpoint ring enqueue pointer. */
else
xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev);
+ ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
+ ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG);
+ ctrl_ctx->drop_flags = 0;
+
xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
@@ -3585,7 +3588,6 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
virt_dev->address = (le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK)
+ 1;
/* Zero the input context control for later use */
- ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
ctrl_ctx->add_flags = 0;
ctrl_ctx->drop_flags = 0;
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index fc34b8b1191..07a03460a59 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -11,6 +11,7 @@ config USB_MUSB_HDRC
select TWL4030_USB if MACH_OMAP_3430SDP
select TWL6030_USB if MACH_OMAP_4430SDP || MACH_OMAP4_PANDA
select USB_OTG_UTILS
+ select USB_GADGET_DUALSPEED
tristate 'Inventra Highspeed Dual Role Controller (TI, ADI, ...)'
help
Say Y here if your system has a dual role high speed USB
@@ -60,7 +61,7 @@ config USB_MUSB_BLACKFIN
config USB_MUSB_UX500
tristate "U8500 and U5500"
- depends on (ARCH_U8500 && AB8500_USB) || (ARCH_U5500)
+ depends on (ARCH_U8500 && AB8500_USB)
endchoice
diff --git a/drivers/usb/musb/am35x.c b/drivers/usb/musb/am35x.c
index 08f1d0b662a..e233d2b7d33 100644
--- a/drivers/usb/musb/am35x.c
+++ b/drivers/usb/musb/am35x.c
@@ -27,6 +27,7 @@
*/
#include <linux/init.h>
+#include <linux/module.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/platform_device.h>
diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c
index 4da7492ddbd..2613bfdb09b 100644
--- a/drivers/usb/musb/da8xx.c
+++ b/drivers/usb/musb/da8xx.c
@@ -27,6 +27,7 @@
*/
#include <linux/init.h>
+#include <linux/module.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/platform_device.h>
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 20a28731c33..c1fa12ec7a9 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -1477,8 +1477,7 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb)
/*-------------------------------------------------------------------------*/
#if defined(CONFIG_SOC_OMAP2430) || defined(CONFIG_SOC_OMAP3430) || \
- defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_ARCH_U8500) || \
- defined(CONFIG_ARCH_U5500)
+ defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_ARCH_U8500)
static irqreturn_t generic_interrupt(int irq, void *__hci)
{
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index ae4a20acef6..d51043acfe1 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -1999,10 +1999,6 @@ static void stop_activity(struct musb *musb, struct usb_gadget_driver *driver)
nuke(&hw_ep->ep_out, -ESHUTDOWN);
}
}
-
- spin_unlock(&musb->lock);
- driver->disconnect(&musb->g);
- spin_lock(&musb->lock);
}
}
diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c
index d2e2efaba65..08c679c0dde 100644
--- a/drivers/usb/renesas_usbhs/common.c
+++ b/drivers/usb/renesas_usbhs/common.c
@@ -405,7 +405,7 @@ int usbhsc_drvcllbck_notify_hotplug(struct platform_device *pdev)
/*
* platform functions
*/
-static int __devinit usbhs_probe(struct platform_device *pdev)
+static int usbhs_probe(struct platform_device *pdev)
{
struct renesas_usbhs_platform_info *info = pdev->dev.platform_data;
struct renesas_usbhs_driver_callback *dfunc;
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
index 8da685e796d..ffdf5d15085 100644
--- a/drivers/usb/renesas_usbhs/fifo.c
+++ b/drivers/usb/renesas_usbhs/fifo.c
@@ -820,7 +820,7 @@ static int usbhsf_dma_prepare_push(struct usbhs_pkt *pkt, int *is_done)
if (len % 4) /* 32bit alignment */
goto usbhsf_pio_prepare_push;
- if ((*(u32 *) pkt->buf + pkt->actual) & 0x7) /* 8byte alignment */
+ if ((uintptr_t)(pkt->buf + pkt->actual) & 0x7) /* 8byte alignment */
goto usbhsf_pio_prepare_push;
/* get enable DMA fifo */
@@ -897,7 +897,7 @@ static int usbhsf_dma_try_pop(struct usbhs_pkt *pkt, int *is_done)
if (!fifo)
goto usbhsf_pio_prepare_pop;
- if ((*(u32 *) pkt->buf + pkt->actual) & 0x7) /* 8byte alignment */
+ if ((uintptr_t)(pkt->buf + pkt->actual) & 0x7) /* 8byte alignment */
goto usbhsf_pio_prepare_pop;
ret = usbhsf_fifo_select(pipe, fifo, 0);
diff --git a/drivers/usb/renesas_usbhs/mod.h b/drivers/usb/renesas_usbhs/mod.h
index 8ae3733031c..6c6875533f0 100644
--- a/drivers/usb/renesas_usbhs/mod.h
+++ b/drivers/usb/renesas_usbhs/mod.h
@@ -143,8 +143,8 @@ void usbhs_irq_callback_update(struct usbhs_priv *priv, struct usbhs_mod *mod);
*/
#if defined(CONFIG_USB_RENESAS_USBHS_HCD) || \
defined(CONFIG_USB_RENESAS_USBHS_HCD_MODULE)
-extern int __devinit usbhs_mod_host_probe(struct usbhs_priv *priv);
-extern int __devexit usbhs_mod_host_remove(struct usbhs_priv *priv);
+extern int usbhs_mod_host_probe(struct usbhs_priv *priv);
+extern int usbhs_mod_host_remove(struct usbhs_priv *priv);
#else
static inline int usbhs_mod_host_probe(struct usbhs_priv *priv)
{
@@ -157,8 +157,8 @@ static inline void usbhs_mod_host_remove(struct usbhs_priv *priv)
#if defined(CONFIG_USB_RENESAS_USBHS_UDC) || \
defined(CONFIG_USB_RENESAS_USBHS_UDC_MODULE)
-extern int __devinit usbhs_mod_gadget_probe(struct usbhs_priv *priv);
-extern void __devexit usbhs_mod_gadget_remove(struct usbhs_priv *priv);
+extern int usbhs_mod_gadget_probe(struct usbhs_priv *priv);
+extern void usbhs_mod_gadget_remove(struct usbhs_priv *priv);
#else
static inline int usbhs_mod_gadget_probe(struct usbhs_priv *priv)
{
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index 4cc7ee0babc..d9717e0bc1f 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -830,7 +830,7 @@ static int usbhsg_stop(struct usbhs_priv *priv)
return usbhsg_try_stop(priv, USBHSG_STATUS_STARTED);
}
-int __devinit usbhs_mod_gadget_probe(struct usbhs_priv *priv)
+int usbhs_mod_gadget_probe(struct usbhs_priv *priv)
{
struct usbhsg_gpriv *gpriv;
struct usbhsg_uep *uep;
@@ -927,7 +927,7 @@ usbhs_mod_gadget_probe_err_gpriv:
return ret;
}
-void __devexit usbhs_mod_gadget_remove(struct usbhs_priv *priv)
+void usbhs_mod_gadget_remove(struct usbhs_priv *priv)
{
struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv);
diff --git a/drivers/usb/renesas_usbhs/mod_host.c b/drivers/usb/renesas_usbhs/mod_host.c
index 1a7208a50af..bade761a1e5 100644
--- a/drivers/usb/renesas_usbhs/mod_host.c
+++ b/drivers/usb/renesas_usbhs/mod_host.c
@@ -103,7 +103,7 @@ struct usbhsh_hpriv {
u32 port_stat; /* USB_PORT_STAT_xxx */
- struct completion *done;
+ struct completion setup_ack_done;
/* see usbhsh_req_alloc/free */
struct list_head ureq_link_active;
@@ -355,6 +355,7 @@ static void usbhsh_device_free(struct usbhsh_hpriv *hpriv,
struct usbhsh_ep *usbhsh_endpoint_alloc(struct usbhsh_hpriv *hpriv,
struct usbhsh_device *udev,
struct usb_host_endpoint *ep,
+ int dir_in_req,
gfp_t mem_flags)
{
struct usbhs_priv *priv = usbhsh_hpriv_to_priv(hpriv);
@@ -364,27 +365,38 @@ struct usbhsh_ep *usbhsh_endpoint_alloc(struct usbhsh_hpriv *hpriv,
struct usbhs_pipe *pipe, *best_pipe;
struct device *dev = usbhsh_hcd_to_dev(hcd);
struct usb_endpoint_descriptor *desc = &ep->desc;
- int type, i;
+ int type, i, dir_in;
unsigned int min_usr;
+ dir_in_req = !!dir_in_req;
+
uep = kzalloc(sizeof(struct usbhsh_ep), mem_flags);
if (!uep) {
dev_err(dev, "usbhsh_ep alloc fail\n");
return NULL;
}
- type = usb_endpoint_type(desc);
+
+ if (usb_endpoint_xfer_control(desc)) {
+ best_pipe = usbhsh_hpriv_to_dcp(hpriv);
+ goto usbhsh_endpoint_alloc_find_pipe;
+ }
/*
* find best pipe for endpoint
* see
* HARDWARE LIMITATION
*/
+ type = usb_endpoint_type(desc);
min_usr = ~0;
best_pipe = NULL;
- usbhs_for_each_pipe_with_dcp(pipe, priv, i) {
+ usbhs_for_each_pipe(pipe, priv, i) {
if (!usbhs_pipe_type_is(pipe, type))
continue;
+ dir_in = !!usbhs_pipe_is_dir_in(pipe);
+ if (0 != (dir_in - dir_in_req))
+ continue;
+
info = usbhsh_pipe_info(pipe);
if (min_usr > info->usr_cnt) {
@@ -398,7 +410,7 @@ struct usbhsh_ep *usbhsh_endpoint_alloc(struct usbhsh_hpriv *hpriv,
kfree(uep);
return NULL;
}
-
+usbhsh_endpoint_alloc_find_pipe:
/*
* init uep
*/
@@ -423,6 +435,7 @@ struct usbhsh_ep *usbhsh_endpoint_alloc(struct usbhsh_hpriv *hpriv,
* see
* DCPMAXP/PIPEMAXP
*/
+ usbhs_pipe_sequence_data0(uep->pipe);
usbhs_pipe_config_update(uep->pipe,
usbhsh_device_number(hpriv, udev),
usb_endpoint_num(desc),
@@ -430,7 +443,7 @@ struct usbhsh_ep *usbhsh_endpoint_alloc(struct usbhsh_hpriv *hpriv,
dev_dbg(dev, "%s [%d-%s](%p)\n", __func__,
usbhsh_device_number(hpriv, udev),
- usbhs_pipe_name(pipe), uep);
+ usbhs_pipe_name(uep->pipe), uep);
return uep;
}
@@ -549,8 +562,7 @@ static void usbhsh_setup_stage_packet_push(struct usbhsh_hpriv *hpriv,
* usbhsh_irq_setup_ack()
* usbhsh_irq_setup_err()
*/
- DECLARE_COMPLETION(done);
- hpriv->done = &done;
+ init_completion(&hpriv->setup_ack_done);
/* copy original request */
memcpy(&req, urb->setup_packet, sizeof(struct usb_ctrlrequest));
@@ -572,8 +584,7 @@ static void usbhsh_setup_stage_packet_push(struct usbhsh_hpriv *hpriv,
/*
* wait setup packet ACK
*/
- wait_for_completion(&done);
- hpriv->done = NULL;
+ wait_for_completion(&hpriv->setup_ack_done);
dev_dbg(dev, "%s done\n", __func__);
}
@@ -724,11 +735,11 @@ static int usbhsh_urb_enqueue(struct usb_hcd *hcd,
struct usbhsh_device *udev, *new_udev = NULL;
struct usbhs_pipe *pipe;
struct usbhsh_ep *uep;
+ int is_dir_in = usb_pipein(urb->pipe);
int ret;
- dev_dbg(dev, "%s (%s)\n",
- __func__, usb_pipein(urb->pipe) ? "in" : "out");
+ dev_dbg(dev, "%s (%s)\n", __func__, is_dir_in ? "in" : "out");
ret = usb_hcd_link_urb_to_ep(hcd, urb);
if (ret)
@@ -751,7 +762,8 @@ static int usbhsh_urb_enqueue(struct usb_hcd *hcd,
*/
uep = usbhsh_ep_to_uep(ep);
if (!uep) {
- uep = usbhsh_endpoint_alloc(hpriv, udev, ep, mem_flags);
+ uep = usbhsh_endpoint_alloc(hpriv, udev, ep,
+ is_dir_in, mem_flags);
if (!uep)
goto usbhsh_urb_enqueue_error_free_device;
}
@@ -1095,10 +1107,7 @@ static int usbhsh_irq_setup_ack(struct usbhs_priv *priv,
dev_dbg(dev, "setup packet OK\n");
- if (unlikely(!hpriv->done))
- dev_err(dev, "setup ack happen without necessary data\n");
- else
- complete(hpriv->done); /* see usbhsh_urb_enqueue() */
+ complete(&hpriv->setup_ack_done); /* see usbhsh_urb_enqueue() */
return 0;
}
@@ -1111,10 +1120,7 @@ static int usbhsh_irq_setup_err(struct usbhs_priv *priv,
dev_dbg(dev, "setup packet Err\n");
- if (unlikely(!hpriv->done))
- dev_err(dev, "setup err happen without necessary data\n");
- else
- complete(hpriv->done); /* see usbhsh_urb_enqueue() */
+ complete(&hpriv->setup_ack_done); /* see usbhsh_urb_enqueue() */
return 0;
}
@@ -1221,8 +1227,18 @@ static int usbhsh_stop(struct usbhs_priv *priv)
{
struct usbhsh_hpriv *hpriv = usbhsh_priv_to_hpriv(priv);
struct usb_hcd *hcd = usbhsh_hpriv_to_hcd(hpriv);
+ struct usbhs_mod *mod = usbhs_mod_get_current(priv);
struct device *dev = usbhs_priv_to_dev(priv);
+ /*
+ * disable irq callback
+ */
+ mod->irq_attch = NULL;
+ mod->irq_dtch = NULL;
+ mod->irq_sack = NULL;
+ mod->irq_sign = NULL;
+ usbhs_irq_callback_update(priv, mod);
+
usb_remove_hcd(hcd);
/* disable sys */
@@ -1235,7 +1251,7 @@ static int usbhsh_stop(struct usbhs_priv *priv)
return 0;
}
-int __devinit usbhs_mod_host_probe(struct usbhs_priv *priv)
+int usbhs_mod_host_probe(struct usbhs_priv *priv)
{
struct usbhsh_hpriv *hpriv;
struct usb_hcd *hcd;
@@ -1279,7 +1295,6 @@ int __devinit usbhs_mod_host_probe(struct usbhs_priv *priv)
hpriv->mod.stop = usbhsh_stop;
hpriv->pipe_info = pipe_info;
hpriv->pipe_size = pipe_size;
- hpriv->done = NULL;
usbhsh_req_list_init(hpriv);
usbhsh_port_stat_init(hpriv);
@@ -1299,7 +1314,7 @@ usbhs_mod_host_probe_err:
return -ENOMEM;
}
-int __devexit usbhs_mod_host_remove(struct usbhs_priv *priv)
+int usbhs_mod_host_remove(struct usbhs_priv *priv)
{
struct usbhsh_hpriv *hpriv = usbhsh_priv_to_hpriv(priv);
struct usb_hcd *hcd = usbhsh_hpriv_to_hcd(hpriv);
diff --git a/drivers/usb/serial/ark3116.c b/drivers/usb/serial/ark3116.c
index 5cdb9d91227..18e875b92e0 100644
--- a/drivers/usb/serial/ark3116.c
+++ b/drivers/usb/serial/ark3116.c
@@ -42,7 +42,7 @@ static int debug;
* Version information
*/
-#define DRIVER_VERSION "v0.6"
+#define DRIVER_VERSION "v0.7"
#define DRIVER_AUTHOR "Bart Hartgers <bart.hartgers+ark3116@gmail.com>"
#define DRIVER_DESC "USB ARK3116 serial/IrDA driver"
#define DRIVER_DEV_DESC "ARK3116 RS232/IrDA"
@@ -380,10 +380,6 @@ static int ark3116_open(struct tty_struct *tty, struct usb_serial_port *port)
goto err_out;
}
- /* setup termios */
- if (tty)
- ark3116_set_termios(tty, port, NULL);
-
/* remove any data still left: also clears error state */
ark3116_read_reg(serial, UART_RX, buf);
@@ -406,6 +402,10 @@ static int ark3116_open(struct tty_struct *tty, struct usb_serial_port *port)
/* enable DMA */
ark3116_write_reg(port->serial, UART_FCR, UART_FCR_DMA_SELECT);
+ /* setup termios */
+ if (tty)
+ ark3116_set_termios(tty, port, NULL);
+
err_out:
kfree(buf);
return result;
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 8fe034d2d3e..bd4298bb675 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -2104,13 +2104,19 @@ static void ftdi_set_termios(struct tty_struct *tty,
cflag = termios->c_cflag;
- /* FIXME -For this cut I don't care if the line is really changing or
- not - so just do the change regardless - should be able to
- compare old_termios and tty->termios */
+ if (old_termios->c_cflag == termios->c_cflag
+ && old_termios->c_ispeed == termios->c_ispeed
+ && old_termios->c_ospeed == termios->c_ospeed)
+ goto no_c_cflag_changes;
+
/* NOTE These routines can get interrupted by
ftdi_sio_read_bulk_callback - need to examine what this means -
don't see any problems yet */
+ if ((old_termios->c_cflag & (CSIZE|PARODD|PARENB|CMSPAR|CSTOPB)) ==
+ (termios->c_cflag & (CSIZE|PARODD|PARENB|CMSPAR|CSTOPB)))
+ goto no_data_parity_stop_changes;
+
/* Set number of data bits, parity, stop bits */
urb_value = 0;
@@ -2151,6 +2157,7 @@ static void ftdi_set_termios(struct tty_struct *tty,
}
/* Now do the baudrate */
+no_data_parity_stop_changes:
if ((cflag & CBAUD) == B0) {
/* Disable flow control */
if (usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
@@ -2178,6 +2185,7 @@ static void ftdi_set_termios(struct tty_struct *tty,
/* Set flow control */
/* Note device also supports DTR/CD (ugh) and Xon/Xoff in hardware */
+no_c_cflag_changes:
if (cflag & CRTSCTS) {
dbg("%s Setting to CRTSCTS flow control", __func__);
if (usb_control_msg(dev,
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 89ae1f65e1b..d865878c9f9 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -156,6 +156,7 @@ static void option_instat_callback(struct urb *urb);
#define HUAWEI_PRODUCT_K4511 0x14CC
#define HUAWEI_PRODUCT_ETS1220 0x1803
#define HUAWEI_PRODUCT_E353 0x1506
+#define HUAWEI_PRODUCT_E173S 0x1C05
#define QUANTA_VENDOR_ID 0x0408
#define QUANTA_PRODUCT_Q101 0xEA02
@@ -316,6 +317,9 @@ static void option_instat_callback(struct urb *urb);
#define ZTE_PRODUCT_AC8710 0xfff1
#define ZTE_PRODUCT_AC2726 0xfff5
#define ZTE_PRODUCT_AC8710T 0xffff
+#define ZTE_PRODUCT_MC2718 0xffe8
+#define ZTE_PRODUCT_AD3812 0xffeb
+#define ZTE_PRODUCT_MC2716 0xffed
#define BENQ_VENDOR_ID 0x04a5
#define BENQ_PRODUCT_H10 0x4068
@@ -468,6 +472,10 @@ static void option_instat_callback(struct urb *urb);
#define YUGA_PRODUCT_CLU528 0x260D
#define YUGA_PRODUCT_CLU526 0x260F
+/* Viettel products */
+#define VIETTEL_VENDOR_ID 0x2262
+#define VIETTEL_PRODUCT_VT1000 0x0002
+
/* some devices interfaces need special handling due to a number of reasons */
enum option_blacklist_reason {
OPTION_BLACKLIST_NONE = 0,
@@ -500,6 +508,18 @@ static const struct option_blacklist_info zte_k3765_z_blacklist = {
.reserved = BIT(4),
};
+static const struct option_blacklist_info zte_ad3812_z_blacklist = {
+ .sendsetup = BIT(0) | BIT(1) | BIT(2),
+};
+
+static const struct option_blacklist_info zte_mc2718_z_blacklist = {
+ .sendsetup = BIT(1) | BIT(2) | BIT(3) | BIT(4),
+};
+
+static const struct option_blacklist_info zte_mc2716_z_blacklist = {
+ .sendsetup = BIT(1) | BIT(2) | BIT(3),
+};
+
static const struct option_blacklist_info huawei_cdc12_blacklist = {
.reserved = BIT(1) | BIT(2),
};
@@ -622,6 +642,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143D, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143E, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143F, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173S, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff),
@@ -1043,6 +1064,12 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710T, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2718, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&zte_mc2718_z_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AD3812, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&zte_ad3812_z_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2716, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&zte_mc2716_z_blacklist },
{ USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) },
{ USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) },
{ USB_DEVICE(ALINK_VENDOR_ID, DLINK_PRODUCT_DWM_652_U5) }, /* Yes, ALINK_VENDOR_ID */
@@ -1141,6 +1168,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU516) },
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU528) },
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU526) },
+ { USB_DEVICE_AND_INTERFACE_INFO(VIETTEL_VENDOR_ID, VIETTEL_PRODUCT_VT1000, 0xff, 0xff, 0xff) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, option_ids);
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 9083d1e616b..fc2d66f7f4e 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -91,7 +91,6 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) },
{ USB_DEVICE(SANWA_VENDOR_ID, SANWA_PRODUCT_ID) },
{ USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530_PRODUCT_ID) },
- { USB_DEVICE(WINCHIPHEAD_VENDOR_ID, WINCHIPHEAD_USBSER_PRODUCT_ID) },
{ USB_DEVICE(SMART_VENDOR_ID, SMART_PRODUCT_ID) },
{ } /* Terminating entry */
};
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index 3d10d7f0207..c38b8c00c06 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -145,10 +145,6 @@
#define ADLINK_VENDOR_ID 0x0b63
#define ADLINK_ND6530_PRODUCT_ID 0x6530
-/* WinChipHead USB->RS 232 adapter */
-#define WINCHIPHEAD_VENDOR_ID 0x4348
-#define WINCHIPHEAD_USBSER_PRODUCT_ID 0x5523
-
/* SMART USB Serial Adapter */
#define SMART_VENDOR_ID 0x0b8c
#define SMART_PRODUCT_ID 0x2303
diff --git a/drivers/usb/storage/ene_ub6250.c b/drivers/usb/storage/ene_ub6250.c
index 4dca3ef0668..9fbe742343c 100644
--- a/drivers/usb/storage/ene_ub6250.c
+++ b/drivers/usb/storage/ene_ub6250.c
@@ -1762,10 +1762,9 @@ static int ms_scsi_write(struct us_data *us, struct scsi_cmnd *srb)
result = ene_send_scsi_cmd(us, FDIR_WRITE, scsi_sglist(srb), 1);
} else {
void *buf;
- int offset;
+ int offset = 0;
u16 PhyBlockAddr;
u8 PageNum;
- u32 result;
u16 len, oldphy, newphy;
buf = kmalloc(blenByte, GFP_KERNEL);
diff --git a/drivers/usb/storage/protocol.c b/drivers/usb/storage/protocol.c
index 93c1a4d86f5..82dd834709c 100644
--- a/drivers/usb/storage/protocol.c
+++ b/drivers/usb/storage/protocol.c
@@ -59,7 +59,9 @@
void usb_stor_pad12_command(struct scsi_cmnd *srb, struct us_data *us)
{
- /* Pad the SCSI command with zeros out to 12 bytes
+ /*
+ * Pad the SCSI command with zeros out to 12 bytes. If the
+ * command already is 12 bytes or longer, leave it alone.
*
* NOTE: This only works because a scsi_cmnd struct field contains
* a unsigned char cmnd[16], so we know we have storage available
@@ -67,9 +69,6 @@ void usb_stor_pad12_command(struct scsi_cmnd *srb, struct us_data *us)
for (; srb->cmd_len<12; srb->cmd_len++)
srb->cmnd[srb->cmd_len] = 0;
- /* set command length to 12 bytes */
- srb->cmd_len = 12;
-
/* send the command to the transport layer */
usb_stor_invoke_transport(srb, us);
}
diff --git a/drivers/video/da8xx-fb.c b/drivers/video/da8xx-fb.c
index 55f91d9ab00..29577bf1f55 100644
--- a/drivers/video/da8xx-fb.c
+++ b/drivers/video/da8xx-fb.c
@@ -116,6 +116,7 @@
/* Clock registers available only on Version 2 */
#define LCD_CLK_ENABLE_REG 0x6c
#define LCD_CLK_RESET_REG 0x70
+#define LCD_CLK_MAIN_RESET BIT(3)
#define LCD_NUM_BUFFERS 2
@@ -244,6 +245,10 @@ static inline void lcd_enable_raster(void)
{
u32 reg;
+ /* Bring LCDC out of reset */
+ if (lcd_revision == LCD_VERSION_2)
+ lcdc_write(0, LCD_CLK_RESET_REG);
+
reg = lcdc_read(LCD_RASTER_CTRL_REG);
if (!(reg & LCD_RASTER_ENABLE))
lcdc_write(reg | LCD_RASTER_ENABLE, LCD_RASTER_CTRL_REG);
@@ -257,6 +262,10 @@ static inline void lcd_disable_raster(void)
reg = lcdc_read(LCD_RASTER_CTRL_REG);
if (reg & LCD_RASTER_ENABLE)
lcdc_write(reg & ~LCD_RASTER_ENABLE, LCD_RASTER_CTRL_REG);
+
+ if (lcd_revision == LCD_VERSION_2)
+ /* Write 1 to reset LCDC */
+ lcdc_write(LCD_CLK_MAIN_RESET, LCD_CLK_RESET_REG);
}
static void lcd_blit(int load_mode, struct da8xx_fb_par *par)
@@ -584,8 +593,12 @@ static void lcd_reset(struct da8xx_fb_par *par)
lcdc_write(0, LCD_DMA_CTRL_REG);
lcdc_write(0, LCD_RASTER_CTRL_REG);
- if (lcd_revision == LCD_VERSION_2)
+ if (lcd_revision == LCD_VERSION_2) {
lcdc_write(0, LCD_INT_ENABLE_SET_REG);
+ /* Write 1 to reset */
+ lcdc_write(LCD_CLK_MAIN_RESET, LCD_CLK_RESET_REG);
+ lcdc_write(0, LCD_CLK_RESET_REG);
+ }
}
static void lcd_calc_clk_divider(struct da8xx_fb_par *par)
diff --git a/drivers/video/omap/dispc.c b/drivers/video/omap/dispc.c
index 0ccd7adf47b..6f61e781f15 100644
--- a/drivers/video/omap/dispc.c
+++ b/drivers/video/omap/dispc.c
@@ -19,6 +19,7 @@
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/dma-mapping.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c
index 3532782551c..5c81533eaca 100644
--- a/drivers/video/omap2/dss/dispc.c
+++ b/drivers/video/omap2/dss/dispc.c
@@ -1720,12 +1720,11 @@ static int dispc_ovl_calc_scaling(enum omap_plane plane,
const int maxdownscale = dss_feat_get_param_max(FEAT_PARAM_DOWNSCALE);
unsigned long fclk = 0;
- if ((ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0) {
- if (width != out_width || height != out_height)
- return -EINVAL;
- else
- return 0;
- }
+ if (width == out_width && height == out_height)
+ return 0;
+
+ if ((ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0)
+ return -EINVAL;
if (out_width < width / maxdownscale ||
out_width > width * 8)
diff --git a/drivers/video/omap2/dss/hdmi.c b/drivers/video/omap2/dss/hdmi.c
index 3262f0f1fa3..c56378c555b 100644
--- a/drivers/video/omap2/dss/hdmi.c
+++ b/drivers/video/omap2/dss/hdmi.c
@@ -269,7 +269,7 @@ static void update_hdmi_timings(struct hdmi_config *cfg,
unsigned long hdmi_get_pixel_clock(void)
{
/* HDMI Pixel Clock in Mhz */
- return hdmi.ip_data.cfg.timings.timings.pixel_clock * 10000;
+ return hdmi.ip_data.cfg.timings.timings.pixel_clock * 1000;
}
static void hdmi_compute_pll(struct omap_dss_device *dssdev, int phy,
diff --git a/drivers/video/via/share.h b/drivers/video/via/share.h
index 69d882cbe70..c01c1c16272 100644
--- a/drivers/video/via/share.h
+++ b/drivers/video/via/share.h
@@ -559,8 +559,8 @@
#define M1200X720_R60_VSP POSITIVE
/* 1200x900@60 Sync Polarity (DCON) */
-#define M1200X900_R60_HSP NEGATIVE
-#define M1200X900_R60_VSP NEGATIVE
+#define M1200X900_R60_HSP POSITIVE
+#define M1200X900_R60_VSP POSITIVE
/* 1280x600@60 Sync Polarity (GTF Mode) */
#define M1280x600_R60_HSP NEGATIVE
diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig
index 816ed08e7cf..1a61939b85f 100644
--- a/drivers/virtio/Kconfig
+++ b/drivers/virtio/Kconfig
@@ -37,7 +37,7 @@ config VIRTIO_BALLOON
config VIRTIO_MMIO
tristate "Platform bus driver for memory mapped virtio devices (EXPERIMENTAL)"
- depends on EXPERIMENTAL
+ depends on HAS_IOMEM && EXPERIMENTAL
select VIRTIO
select VIRTIO_RING
---help---
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index 2f57380d7ed..0269717436a 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -118,7 +118,7 @@ static void vm_finalize_features(struct virtio_device *vdev)
vring_transport_features(vdev);
for (i = 0; i < ARRAY_SIZE(vdev->features); i++) {
- writel(i, vm_dev->base + VIRTIO_MMIO_GUEST_FEATURES_SET);
+ writel(i, vm_dev->base + VIRTIO_MMIO_GUEST_FEATURES_SEL);
writel(vdev->features[i],
vm_dev->base + VIRTIO_MMIO_GUEST_FEATURES);
}
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
index 91683e6e7af..baabb7937ec 100644
--- a/drivers/virtio/virtio_pci.c
+++ b/drivers/virtio/virtio_pci.c
@@ -169,11 +169,29 @@ static void vp_set_status(struct virtio_device *vdev, u8 status)
iowrite8(status, vp_dev->ioaddr + VIRTIO_PCI_STATUS);
}
+/* wait for pending irq handlers */
+static void vp_synchronize_vectors(struct virtio_device *vdev)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+ int i;
+
+ if (vp_dev->intx_enabled)
+ synchronize_irq(vp_dev->pci_dev->irq);
+
+ for (i = 0; i < vp_dev->msix_vectors; ++i)
+ synchronize_irq(vp_dev->msix_entries[i].vector);
+}
+
static void vp_reset(struct virtio_device *vdev)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
/* 0 status means a reset. */
iowrite8(0, vp_dev->ioaddr + VIRTIO_PCI_STATUS);
+ /* Flush out the status write, and flush in device writes,
+ * including MSi-X interrupts, if any. */
+ ioread8(vp_dev->ioaddr + VIRTIO_PCI_STATUS);
+ /* Flush pending VQ/configuration callbacks. */
+ vp_synchronize_vectors(vdev);
}
/* the notify function used when creating a virt queue */
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 6285867a935..79fd606b7cd 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -314,13 +314,6 @@ config NUC900_WATCHDOG
To compile this driver as a module, choose M here: the
module will be called nuc900_wdt.
-config ADX_WATCHDOG
- tristate "Avionic Design Xanthos watchdog"
- depends on ARCH_PXA_ADX
- help
- Say Y here if you want support for the watchdog timer on Avionic
- Design Xanthos boards.
-
config TS72XX_WATCHDOG
tristate "TS-72XX SBC Watchdog"
depends on MACH_TS72XX
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 55bd5740e91..fe893e91935 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -51,7 +51,6 @@ obj-$(CONFIG_ORION_WATCHDOG) += orion_wdt.o
obj-$(CONFIG_COH901327_WATCHDOG) += coh901327_wdt.o
obj-$(CONFIG_STMP3XXX_WATCHDOG) += stmp3xxx_wdt.o
obj-$(CONFIG_NUC900_WATCHDOG) += nuc900_wdt.o
-obj-$(CONFIG_ADX_WATCHDOG) += adx_wdt.o
obj-$(CONFIG_TS72XX_WATCHDOG) += ts72xx_wdt.o
obj-$(CONFIG_IMX2_WDT) += imx2_wdt.o
diff --git a/drivers/watchdog/adx_wdt.c b/drivers/watchdog/adx_wdt.c
deleted file mode 100644
index af6e6b16475..00000000000
--- a/drivers/watchdog/adx_wdt.c
+++ /dev/null
@@ -1,355 +0,0 @@
-/*
- * Copyright (C) 2008-2009 Avionic Design GmbH
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/fs.h>
-#include <linux/gfp.h>
-#include <linux/io.h>
-#include <linux/miscdevice.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/types.h>
-#include <linux/uaccess.h>
-#include <linux/watchdog.h>
-
-#define WATCHDOG_NAME "adx-wdt"
-
-/* register offsets */
-#define ADX_WDT_CONTROL 0x00
-#define ADX_WDT_CONTROL_ENABLE (1 << 0)
-#define ADX_WDT_CONTROL_nRESET (1 << 1)
-#define ADX_WDT_TIMEOUT 0x08
-
-static struct platform_device *adx_wdt_dev;
-static unsigned long driver_open;
-
-#define WDT_STATE_STOP 0
-#define WDT_STATE_START 1
-
-struct adx_wdt {
- void __iomem *base;
- unsigned long timeout;
- unsigned int state;
- unsigned int wake;
- spinlock_t lock;
-};
-
-static const struct watchdog_info adx_wdt_info = {
- .identity = "Avionic Design Xanthos Watchdog",
- .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
-};
-
-static void adx_wdt_start_locked(struct adx_wdt *wdt)
-{
- u32 ctrl;
-
- ctrl = readl(wdt->base + ADX_WDT_CONTROL);
- ctrl |= ADX_WDT_CONTROL_ENABLE;
- writel(ctrl, wdt->base + ADX_WDT_CONTROL);
- wdt->state = WDT_STATE_START;
-}
-
-static void adx_wdt_start(struct adx_wdt *wdt)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&wdt->lock, flags);
- adx_wdt_start_locked(wdt);
- spin_unlock_irqrestore(&wdt->lock, flags);
-}
-
-static void adx_wdt_stop_locked(struct adx_wdt *wdt)
-{
- u32 ctrl;
-
- ctrl = readl(wdt->base + ADX_WDT_CONTROL);
- ctrl &= ~ADX_WDT_CONTROL_ENABLE;
- writel(ctrl, wdt->base + ADX_WDT_CONTROL);
- wdt->state = WDT_STATE_STOP;
-}
-
-static void adx_wdt_stop(struct adx_wdt *wdt)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&wdt->lock, flags);
- adx_wdt_stop_locked(wdt);
- spin_unlock_irqrestore(&wdt->lock, flags);
-}
-
-static void adx_wdt_set_timeout(struct adx_wdt *wdt, unsigned long seconds)
-{
- unsigned long timeout = seconds * 1000;
- unsigned long flags;
- unsigned int state;
-
- spin_lock_irqsave(&wdt->lock, flags);
- state = wdt->state;
- adx_wdt_stop_locked(wdt);
- writel(timeout, wdt->base + ADX_WDT_TIMEOUT);
-
- if (state == WDT_STATE_START)
- adx_wdt_start_locked(wdt);
-
- wdt->timeout = timeout;
- spin_unlock_irqrestore(&wdt->lock, flags);
-}
-
-static void adx_wdt_get_timeout(struct adx_wdt *wdt, unsigned long *seconds)
-{
- *seconds = wdt->timeout / 1000;
-}
-
-static void adx_wdt_keepalive(struct adx_wdt *wdt)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&wdt->lock, flags);
- writel(wdt->timeout, wdt->base + ADX_WDT_TIMEOUT);
- spin_unlock_irqrestore(&wdt->lock, flags);
-}
-
-static int adx_wdt_open(struct inode *inode, struct file *file)
-{
- struct adx_wdt *wdt = platform_get_drvdata(adx_wdt_dev);
-
- if (test_and_set_bit(0, &driver_open))
- return -EBUSY;
-
- file->private_data = wdt;
- adx_wdt_set_timeout(wdt, 30);
- adx_wdt_start(wdt);
-
- return nonseekable_open(inode, file);
-}
-
-static int adx_wdt_release(struct inode *inode, struct file *file)
-{
- struct adx_wdt *wdt = file->private_data;
-
- adx_wdt_stop(wdt);
- clear_bit(0, &driver_open);
-
- return 0;
-}
-
-static long adx_wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- struct adx_wdt *wdt = file->private_data;
- void __user *argp = (void __user *)arg;
- unsigned long __user *p = argp;
- unsigned long seconds = 0;
- unsigned int options;
- long ret = -EINVAL;
-
- switch (cmd) {
- case WDIOC_GETSUPPORT:
- if (copy_to_user(argp, &adx_wdt_info, sizeof(adx_wdt_info)))
- return -EFAULT;
- else
- return 0;
-
- case WDIOC_GETSTATUS:
- case WDIOC_GETBOOTSTATUS:
- return put_user(0, p);
-
- case WDIOC_KEEPALIVE:
- adx_wdt_keepalive(wdt);
- return 0;
-
- case WDIOC_SETTIMEOUT:
- if (get_user(seconds, p))
- return -EFAULT;
-
- adx_wdt_set_timeout(wdt, seconds);
-
- /* fallthrough */
- case WDIOC_GETTIMEOUT:
- adx_wdt_get_timeout(wdt, &seconds);
- return put_user(seconds, p);
-
- case WDIOC_SETOPTIONS:
- if (copy_from_user(&options, argp, sizeof(options)))
- return -EFAULT;
-
- if (options & WDIOS_DISABLECARD) {
- adx_wdt_stop(wdt);
- ret = 0;
- }
-
- if (options & WDIOS_ENABLECARD) {
- adx_wdt_start(wdt);
- ret = 0;
- }
-
- return ret;
-
- default:
- break;
- }
-
- return -ENOTTY;
-}
-
-static ssize_t adx_wdt_write(struct file *file, const char __user *data,
- size_t len, loff_t *ppos)
-{
- struct adx_wdt *wdt = file->private_data;
-
- if (len)
- adx_wdt_keepalive(wdt);
-
- return len;
-}
-
-static const struct file_operations adx_wdt_fops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .open = adx_wdt_open,
- .release = adx_wdt_release,
- .unlocked_ioctl = adx_wdt_ioctl,
- .write = adx_wdt_write,
-};
-
-static struct miscdevice adx_wdt_miscdev = {
- .minor = WATCHDOG_MINOR,
- .name = "watchdog",
- .fops = &adx_wdt_fops,
-};
-
-static int __devinit adx_wdt_probe(struct platform_device *pdev)
-{
- struct resource *res;
- struct adx_wdt *wdt;
- int ret = 0;
- u32 ctrl;
-
- wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
- if (!wdt) {
- dev_err(&pdev->dev, "cannot allocate WDT structure\n");
- return -ENOMEM;
- }
-
- spin_lock_init(&wdt->lock);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "cannot obtain I/O memory region\n");
- return -ENXIO;
- }
-
- res = devm_request_mem_region(&pdev->dev, res->start,
- resource_size(res), res->name);
- if (!res) {
- dev_err(&pdev->dev, "cannot request I/O memory region\n");
- return -ENXIO;
- }
-
- wdt->base = devm_ioremap_nocache(&pdev->dev, res->start,
- resource_size(res));
- if (!wdt->base) {
- dev_err(&pdev->dev, "cannot remap I/O memory region\n");
- return -ENXIO;
- }
-
- /* disable watchdog and reboot on timeout */
- ctrl = readl(wdt->base + ADX_WDT_CONTROL);
- ctrl &= ~ADX_WDT_CONTROL_ENABLE;
- ctrl &= ~ADX_WDT_CONTROL_nRESET;
- writel(ctrl, wdt->base + ADX_WDT_CONTROL);
-
- platform_set_drvdata(pdev, wdt);
- adx_wdt_dev = pdev;
-
- ret = misc_register(&adx_wdt_miscdev);
- if (ret) {
- dev_err(&pdev->dev, "cannot register miscdev on minor %d "
- "(err=%d)\n", WATCHDOG_MINOR, ret);
- return ret;
- }
-
- return 0;
-}
-
-static int __devexit adx_wdt_remove(struct platform_device *pdev)
-{
- struct adx_wdt *wdt = platform_get_drvdata(pdev);
-
- misc_deregister(&adx_wdt_miscdev);
- adx_wdt_stop(wdt);
- platform_set_drvdata(pdev, NULL);
-
- return 0;
-}
-
-static void adx_wdt_shutdown(struct platform_device *pdev)
-{
- struct adx_wdt *wdt = platform_get_drvdata(pdev);
- adx_wdt_stop(wdt);
-}
-
-#ifdef CONFIG_PM
-static int adx_wdt_suspend(struct device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct adx_wdt *wdt = platform_get_drvdata(pdev);
-
- wdt->wake = (wdt->state == WDT_STATE_START) ? 1 : 0;
- adx_wdt_stop(wdt);
-
- return 0;
-}
-
-static int adx_wdt_resume(struct device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct adx_wdt *wdt = platform_get_drvdata(pdev);
-
- if (wdt->wake)
- adx_wdt_start(wdt);
-
- return 0;
-}
-
-static const struct dev_pm_ops adx_wdt_pm_ops = {
- .suspend = adx_wdt_suspend,
- .resume = adx_wdt_resume,
-};
-
-# define ADX_WDT_PM_OPS (&adx_wdt_pm_ops)
-#else
-# define ADX_WDT_PM_OPS NULL
-#endif
-
-static struct platform_driver adx_wdt_driver = {
- .probe = adx_wdt_probe,
- .remove = __devexit_p(adx_wdt_remove),
- .shutdown = adx_wdt_shutdown,
- .driver = {
- .name = WATCHDOG_NAME,
- .owner = THIS_MODULE,
- .pm = ADX_WDT_PM_OPS,
- },
-};
-
-static int __init adx_wdt_init(void)
-{
- return platform_driver_register(&adx_wdt_driver);
-}
-
-static void __exit adx_wdt_exit(void)
-{
- platform_driver_unregister(&adx_wdt_driver);
-}
-
-module_init(adx_wdt_init);
-module_exit(adx_wdt_exit);
-
-MODULE_DESCRIPTION("Avionic Design Xanthos Watchdog Driver");
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index 5de7e4fa5b8..a79e3840782 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -401,8 +401,8 @@ static int __devinit s3c2410wdt_probe(struct platform_device *pdev)
dev_info(dev, "watchdog %sactive, reset %sabled, irq %sabled\n",
(wtcon & S3C2410_WTCON_ENABLE) ? "" : "in",
- (wtcon & S3C2410_WTCON_RSTEN) ? "" : "dis",
- (wtcon & S3C2410_WTCON_INTEN) ? "" : "en");
+ (wtcon & S3C2410_WTCON_RSTEN) ? "en" : "dis",
+ (wtcon & S3C2410_WTCON_INTEN) ? "en" : "dis");
return 0;
diff --git a/drivers/watchdog/wm831x_wdt.c b/drivers/watchdog/wm831x_wdt.c
index 7be38556aed..e789a47db41 100644
--- a/drivers/watchdog/wm831x_wdt.c
+++ b/drivers/watchdog/wm831x_wdt.c
@@ -150,7 +150,7 @@ static int wm831x_wdt_set_timeout(struct watchdog_device *wdt_dev,
if (wm831x_wdt_cfgs[i].time == timeout)
break;
if (i == ARRAY_SIZE(wm831x_wdt_cfgs))
- ret = -EINVAL;
+ return -EINVAL;
ret = wm831x_reg_unlock(wm831x);
if (ret == 0) {
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index 8855aad3929..22c64fff1bd 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -683,7 +683,7 @@ static int inode_to_path(u64 inum, struct btrfs_inode_ref *iref,
return PTR_ERR(fspath);
if (fspath > fspath_min) {
- ipath->fspath->val[i] = (u64)fspath;
+ ipath->fspath->val[i] = (u64)(unsigned long)fspath;
++ipath->fspath->elem_cnt;
ipath->fspath->bytes_left = fspath - fspath_min;
} else {
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 0fe615e4ea3..dede441bdee 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -514,10 +514,25 @@ static inline int should_cow_block(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct extent_buffer *buf)
{
+ /* ensure we can see the force_cow */
+ smp_rmb();
+
+ /*
+ * We do not need to cow a block if
+ * 1) this block is not created or changed in this transaction;
+ * 2) this block does not belong to TREE_RELOC tree;
+ * 3) the root is not forced COW.
+ *
+ * What is forced COW:
+ * when we create snapshot during commiting the transaction,
+ * after we've finished coping src root, we must COW the shared
+ * block to ensure the metadata consistency.
+ */
if (btrfs_header_generation(buf) == trans->transid &&
!btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
!(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
- btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
+ btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) &&
+ !root->force_cow)
return 0;
return 1;
}
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index b9ba59ff929..50634abef9b 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -848,7 +848,8 @@ struct btrfs_free_cluster {
enum btrfs_caching_type {
BTRFS_CACHE_NO = 0,
BTRFS_CACHE_STARTED = 1,
- BTRFS_CACHE_FINISHED = 2,
+ BTRFS_CACHE_FAST = 2,
+ BTRFS_CACHE_FINISHED = 3,
};
enum btrfs_disk_cache_state {
@@ -1271,6 +1272,8 @@ struct btrfs_root {
* for stat. It may be used for more later
*/
dev_t anon_dev;
+
+ int force_cow;
};
struct btrfs_ioctl_defrag_range_args {
@@ -2366,6 +2369,9 @@ int btrfs_block_rsv_check(struct btrfs_root *root,
int btrfs_block_rsv_refill(struct btrfs_root *root,
struct btrfs_block_rsv *block_rsv,
u64 min_reserved);
+int btrfs_block_rsv_refill_noflush(struct btrfs_root *root,
+ struct btrfs_block_rsv *block_rsv,
+ u64 min_reserved);
int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
struct btrfs_block_rsv *dst_rsv,
u64 num_bytes);
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 62afe5c5694..632f8f3cc9d 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -620,7 +620,7 @@ out:
static int btree_io_failed_hook(struct bio *failed_bio,
struct page *page, u64 start, u64 end,
- u64 mirror_num, struct extent_state *state)
+ int mirror_num, struct extent_state *state)
{
struct extent_io_tree *tree;
unsigned long len;
@@ -2573,22 +2573,10 @@ static int write_dev_supers(struct btrfs_device *device,
int errors = 0;
u32 crc;
u64 bytenr;
- int last_barrier = 0;
if (max_mirrors == 0)
max_mirrors = BTRFS_SUPER_MIRROR_MAX;
- /* make sure only the last submit_bh does a barrier */
- if (do_barriers) {
- for (i = 0; i < max_mirrors; i++) {
- bytenr = btrfs_sb_offset(i);
- if (bytenr + BTRFS_SUPER_INFO_SIZE >=
- device->total_bytes)
- break;
- last_barrier = i;
- }
- }
-
for (i = 0; i < max_mirrors; i++) {
bytenr = btrfs_sb_offset(i);
if (bytenr + BTRFS_SUPER_INFO_SIZE >= device->total_bytes)
@@ -2634,17 +2622,136 @@ static int write_dev_supers(struct btrfs_device *device,
bh->b_end_io = btrfs_end_buffer_write_sync;
}
- if (i == last_barrier && do_barriers)
- ret = submit_bh(WRITE_FLUSH_FUA, bh);
- else
- ret = submit_bh(WRITE_SYNC, bh);
-
+ /*
+ * we fua the first super. The others we allow
+ * to go down lazy.
+ */
+ ret = submit_bh(WRITE_FUA, bh);
if (ret)
errors++;
}
return errors < i ? 0 : -1;
}
+/*
+ * endio for the write_dev_flush, this will wake anyone waiting
+ * for the barrier when it is done
+ */
+static void btrfs_end_empty_barrier(struct bio *bio, int err)
+{
+ if (err) {
+ if (err == -EOPNOTSUPP)
+ set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
+ clear_bit(BIO_UPTODATE, &bio->bi_flags);
+ }
+ if (bio->bi_private)
+ complete(bio->bi_private);
+ bio_put(bio);
+}
+
+/*
+ * trigger flushes for one the devices. If you pass wait == 0, the flushes are
+ * sent down. With wait == 1, it waits for the previous flush.
+ *
+ * any device where the flush fails with eopnotsupp are flagged as not-barrier
+ * capable
+ */
+static int write_dev_flush(struct btrfs_device *device, int wait)
+{
+ struct bio *bio;
+ int ret = 0;
+
+ if (device->nobarriers)
+ return 0;
+
+ if (wait) {
+ bio = device->flush_bio;
+ if (!bio)
+ return 0;
+
+ wait_for_completion(&device->flush_wait);
+
+ if (bio_flagged(bio, BIO_EOPNOTSUPP)) {
+ printk("btrfs: disabling barriers on dev %s\n",
+ device->name);
+ device->nobarriers = 1;
+ }
+ if (!bio_flagged(bio, BIO_UPTODATE)) {
+ ret = -EIO;
+ }
+
+ /* drop the reference from the wait == 0 run */
+ bio_put(bio);
+ device->flush_bio = NULL;
+
+ return ret;
+ }
+
+ /*
+ * one reference for us, and we leave it for the
+ * caller
+ */
+ device->flush_bio = NULL;;
+ bio = bio_alloc(GFP_NOFS, 0);
+ if (!bio)
+ return -ENOMEM;
+
+ bio->bi_end_io = btrfs_end_empty_barrier;
+ bio->bi_bdev = device->bdev;
+ init_completion(&device->flush_wait);
+ bio->bi_private = &device->flush_wait;
+ device->flush_bio = bio;
+
+ bio_get(bio);
+ submit_bio(WRITE_FLUSH, bio);
+
+ return 0;
+}
+
+/*
+ * send an empty flush down to each device in parallel,
+ * then wait for them
+ */
+static int barrier_all_devices(struct btrfs_fs_info *info)
+{
+ struct list_head *head;
+ struct btrfs_device *dev;
+ int errors = 0;
+ int ret;
+
+ /* send down all the barriers */
+ head = &info->fs_devices->devices;
+ list_for_each_entry_rcu(dev, head, dev_list) {
+ if (!dev->bdev) {
+ errors++;
+ continue;
+ }
+ if (!dev->in_fs_metadata || !dev->writeable)
+ continue;
+
+ ret = write_dev_flush(dev, 0);
+ if (ret)
+ errors++;
+ }
+
+ /* wait for all the barriers */
+ list_for_each_entry_rcu(dev, head, dev_list) {
+ if (!dev->bdev) {
+ errors++;
+ continue;
+ }
+ if (!dev->in_fs_metadata || !dev->writeable)
+ continue;
+
+ ret = write_dev_flush(dev, 1);
+ if (ret)
+ errors++;
+ }
+ if (errors)
+ return -EIO;
+ return 0;
+}
+
int write_all_supers(struct btrfs_root *root, int max_mirrors)
{
struct list_head *head;
@@ -2666,6 +2773,10 @@ int write_all_supers(struct btrfs_root *root, int max_mirrors)
mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
head = &root->fs_info->fs_devices->devices;
+
+ if (do_barriers)
+ barrier_all_devices(root->fs_info);
+
list_for_each_entry_rcu(dev, head, dev_list) {
if (!dev->bdev) {
total_errors++;
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index b232150b5b6..f0d5718d258 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -467,13 +467,59 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
struct btrfs_root *root,
int load_cache_only)
{
+ DEFINE_WAIT(wait);
struct btrfs_fs_info *fs_info = cache->fs_info;
struct btrfs_caching_control *caching_ctl;
int ret = 0;
- smp_mb();
- if (cache->cached != BTRFS_CACHE_NO)
+ caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
+ BUG_ON(!caching_ctl);
+
+ INIT_LIST_HEAD(&caching_ctl->list);
+ mutex_init(&caching_ctl->mutex);
+ init_waitqueue_head(&caching_ctl->wait);
+ caching_ctl->block_group = cache;
+ caching_ctl->progress = cache->key.objectid;
+ atomic_set(&caching_ctl->count, 1);
+ caching_ctl->work.func = caching_thread;
+
+ spin_lock(&cache->lock);
+ /*
+ * This should be a rare occasion, but this could happen I think in the
+ * case where one thread starts to load the space cache info, and then
+ * some other thread starts a transaction commit which tries to do an
+ * allocation while the other thread is still loading the space cache
+ * info. The previous loop should have kept us from choosing this block
+ * group, but if we've moved to the state where we will wait on caching
+ * block groups we need to first check if we're doing a fast load here,
+ * so we can wait for it to finish, otherwise we could end up allocating
+ * from a block group who's cache gets evicted for one reason or
+ * another.
+ */
+ while (cache->cached == BTRFS_CACHE_FAST) {
+ struct btrfs_caching_control *ctl;
+
+ ctl = cache->caching_ctl;
+ atomic_inc(&ctl->count);
+ prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
+ spin_unlock(&cache->lock);
+
+ schedule();
+
+ finish_wait(&ctl->wait, &wait);
+ put_caching_control(ctl);
+ spin_lock(&cache->lock);
+ }
+
+ if (cache->cached != BTRFS_CACHE_NO) {
+ spin_unlock(&cache->lock);
+ kfree(caching_ctl);
return 0;
+ }
+ WARN_ON(cache->caching_ctl);
+ cache->caching_ctl = caching_ctl;
+ cache->cached = BTRFS_CACHE_FAST;
+ spin_unlock(&cache->lock);
/*
* We can't do the read from on-disk cache during a commit since we need
@@ -484,56 +530,51 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
if (trans && (!trans->transaction->in_commit) &&
(root && root != root->fs_info->tree_root) &&
btrfs_test_opt(root, SPACE_CACHE)) {
- spin_lock(&cache->lock);
- if (cache->cached != BTRFS_CACHE_NO) {
- spin_unlock(&cache->lock);
- return 0;
- }
- cache->cached = BTRFS_CACHE_STARTED;
- spin_unlock(&cache->lock);
-
ret = load_free_space_cache(fs_info, cache);
spin_lock(&cache->lock);
if (ret == 1) {
+ cache->caching_ctl = NULL;
cache->cached = BTRFS_CACHE_FINISHED;
cache->last_byte_to_unpin = (u64)-1;
} else {
- cache->cached = BTRFS_CACHE_NO;
+ if (load_cache_only) {
+ cache->caching_ctl = NULL;
+ cache->cached = BTRFS_CACHE_NO;
+ } else {
+ cache->cached = BTRFS_CACHE_STARTED;
+ }
}
spin_unlock(&cache->lock);
+ wake_up(&caching_ctl->wait);
if (ret == 1) {
+ put_caching_control(caching_ctl);
free_excluded_extents(fs_info->extent_root, cache);
return 0;
}
+ } else {
+ /*
+ * We are not going to do the fast caching, set cached to the
+ * appropriate value and wakeup any waiters.
+ */
+ spin_lock(&cache->lock);
+ if (load_cache_only) {
+ cache->caching_ctl = NULL;
+ cache->cached = BTRFS_CACHE_NO;
+ } else {
+ cache->cached = BTRFS_CACHE_STARTED;
+ }
+ spin_unlock(&cache->lock);
+ wake_up(&caching_ctl->wait);
}
- if (load_cache_only)
- return 0;
-
- caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
- BUG_ON(!caching_ctl);
-
- INIT_LIST_HEAD(&caching_ctl->list);
- mutex_init(&caching_ctl->mutex);
- init_waitqueue_head(&caching_ctl->wait);
- caching_ctl->block_group = cache;
- caching_ctl->progress = cache->key.objectid;
- /* one for caching kthread, one for caching block group list */
- atomic_set(&caching_ctl->count, 2);
- caching_ctl->work.func = caching_thread;
-
- spin_lock(&cache->lock);
- if (cache->cached != BTRFS_CACHE_NO) {
- spin_unlock(&cache->lock);
- kfree(caching_ctl);
+ if (load_cache_only) {
+ put_caching_control(caching_ctl);
return 0;
}
- cache->caching_ctl = caching_ctl;
- cache->cached = BTRFS_CACHE_STARTED;
- spin_unlock(&cache->lock);
down_write(&fs_info->extent_commit_sem);
+ atomic_inc(&caching_ctl->count);
list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
up_write(&fs_info->extent_commit_sem);
@@ -3847,9 +3888,9 @@ int btrfs_block_rsv_check(struct btrfs_root *root,
return ret;
}
-int btrfs_block_rsv_refill(struct btrfs_root *root,
- struct btrfs_block_rsv *block_rsv,
- u64 min_reserved)
+static inline int __btrfs_block_rsv_refill(struct btrfs_root *root,
+ struct btrfs_block_rsv *block_rsv,
+ u64 min_reserved, int flush)
{
u64 num_bytes = 0;
int ret = -ENOSPC;
@@ -3868,7 +3909,7 @@ int btrfs_block_rsv_refill(struct btrfs_root *root,
if (!ret)
return 0;
- ret = reserve_metadata_bytes(root, block_rsv, num_bytes, 1);
+ ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
if (!ret) {
block_rsv_add_bytes(block_rsv, num_bytes, 0);
return 0;
@@ -3877,6 +3918,20 @@ int btrfs_block_rsv_refill(struct btrfs_root *root,
return ret;
}
+int btrfs_block_rsv_refill(struct btrfs_root *root,
+ struct btrfs_block_rsv *block_rsv,
+ u64 min_reserved)
+{
+ return __btrfs_block_rsv_refill(root, block_rsv, min_reserved, 1);
+}
+
+int btrfs_block_rsv_refill_noflush(struct btrfs_root *root,
+ struct btrfs_block_rsv *block_rsv,
+ u64 min_reserved)
+{
+ return __btrfs_block_rsv_refill(root, block_rsv, min_reserved, 0);
+}
+
int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
struct btrfs_block_rsv *dst_rsv,
u64 num_bytes)
@@ -5178,13 +5233,15 @@ search:
}
have_block_group:
- if (unlikely(block_group->cached == BTRFS_CACHE_NO)) {
+ cached = block_group_cache_done(block_group);
+ if (unlikely(!cached)) {
u64 free_percent;
+ found_uncached_bg = true;
ret = cache_block_group(block_group, trans,
orig_root, 1);
if (block_group->cached == BTRFS_CACHE_FINISHED)
- goto have_block_group;
+ goto alloc;
free_percent = btrfs_block_group_used(&block_group->item);
free_percent *= 100;
@@ -5206,7 +5263,6 @@ have_block_group:
orig_root, 0);
BUG_ON(ret);
}
- found_uncached_bg = true;
/*
* If loop is set for cached only, try the next block
@@ -5216,17 +5272,14 @@ have_block_group:
goto loop;
}
- cached = block_group_cache_done(block_group);
- if (unlikely(!cached))
- found_uncached_bg = true;
-
+alloc:
if (unlikely(block_group->ro))
goto loop;
spin_lock(&block_group->free_space_ctl->tree_lock);
if (cached &&
block_group->free_space_ctl->free_space <
- num_bytes + empty_size) {
+ num_bytes + empty_cluster + empty_size) {
spin_unlock(&block_group->free_space_ctl->tree_lock);
goto loop;
}
@@ -5247,12 +5300,10 @@ have_block_group:
* people trying to start a new cluster
*/
spin_lock(&last_ptr->refill_lock);
- if (last_ptr->block_group &&
- (last_ptr->block_group->ro ||
- !block_group_bits(last_ptr->block_group, data))) {
- offset = 0;
+ if (!last_ptr->block_group ||
+ last_ptr->block_group->ro ||
+ !block_group_bits(last_ptr->block_group, data))
goto refill_cluster;
- }
offset = btrfs_alloc_from_cluster(block_group, last_ptr,
num_bytes, search_start);
@@ -5303,7 +5354,7 @@ refill_cluster:
/* allocate a cluster in this block group */
ret = btrfs_find_space_cluster(trans, root,
block_group, last_ptr,
- offset, num_bytes,
+ search_start, num_bytes,
empty_cluster + empty_size);
if (ret == 0) {
/*
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 1f87c4d0e7a..be1bf627a14 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2285,16 +2285,22 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
clean_io_failure(start, page);
}
if (!uptodate) {
- u64 failed_mirror;
- failed_mirror = (u64)bio->bi_bdev;
- if (tree->ops && tree->ops->readpage_io_failed_hook)
- ret = tree->ops->readpage_io_failed_hook(
- bio, page, start, end,
- failed_mirror, state);
- else
- ret = bio_readpage_error(bio, page, start, end,
- failed_mirror, NULL);
+ int failed_mirror;
+ failed_mirror = (int)(unsigned long)bio->bi_bdev;
+ /*
+ * The generic bio_readpage_error handles errors the
+ * following way: If possible, new read requests are
+ * created and submitted and will end up in
+ * end_bio_extent_readpage as well (if we're lucky, not
+ * in the !uptodate case). In that case it returns 0 and
+ * we just go on with the next page in our bio. If it
+ * can't handle the error it will return -EIO and we
+ * remain responsible for that page.
+ */
+ ret = bio_readpage_error(bio, page, start, end,
+ failed_mirror, NULL);
if (ret == 0) {
+error_handled:
uptodate =
test_bit(BIO_UPTODATE, &bio->bi_flags);
if (err)
@@ -2302,6 +2308,13 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
uncache_state(&cached);
continue;
}
+ if (tree->ops && tree->ops->readpage_io_failed_hook) {
+ ret = tree->ops->readpage_io_failed_hook(
+ bio, page, start, end,
+ failed_mirror, state);
+ if (ret == 0)
+ goto error_handled;
+ }
}
if (uptodate) {
@@ -3366,6 +3379,9 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
return -ENOMEM;
path->leave_spinning = 1;
+ start = ALIGN(start, BTRFS_I(inode)->root->sectorsize);
+ len = ALIGN(len, BTRFS_I(inode)->root->sectorsize);
+
/*
* lookup the last file extent. We're not using i_size here
* because there might be preallocation past i_size
@@ -3413,7 +3429,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0,
&cached_state, GFP_NOFS);
- em = get_extent_skip_holes(inode, off, last_for_get_extent,
+ em = get_extent_skip_holes(inode, start, last_for_get_extent,
get_extent);
if (!em)
goto out;
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index feb9be0e23b..7604c300132 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -70,7 +70,7 @@ struct extent_io_ops {
unsigned long bio_flags);
int (*readpage_io_hook)(struct page *page, u64 start, u64 end);
int (*readpage_io_failed_hook)(struct bio *bio, struct page *page,
- u64 start, u64 end, u64 failed_mirror,
+ u64 start, u64 end, int failed_mirror,
struct extent_state *state);
int (*writepage_io_failed_hook)(struct bio *bio, struct page *page,
u64 start, u64 end,
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 181760f9d2a..ec23d43d0c3 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -351,6 +351,11 @@ static int io_ctl_prepare_pages(struct io_ctl *io_ctl, struct inode *inode,
}
}
+ for (i = 0; i < io_ctl->num_pages; i++) {
+ clear_page_dirty_for_io(io_ctl->pages[i]);
+ set_page_extent_mapped(io_ctl->pages[i]);
+ }
+
return 0;
}
@@ -1465,6 +1470,7 @@ static void add_new_bitmap(struct btrfs_free_space_ctl *ctl,
{
info->offset = offset_to_bitmap(ctl, offset);
info->bytes = 0;
+ INIT_LIST_HEAD(&info->list);
link_free_space(ctl, info);
ctl->total_bitmaps++;
@@ -1844,7 +1850,13 @@ again:
info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
1, 0);
if (!info) {
- WARN_ON(1);
+ /* the tree logging code might be calling us before we
+ * have fully loaded the free space rbtree for this
+ * block group. So it is possible the entry won't
+ * be in the rbtree yet at all. The caching code
+ * will make sure not to put it in the rbtree if
+ * the logging code has pinned it.
+ */
goto out_lock;
}
}
@@ -2308,6 +2320,7 @@ again:
if (!found) {
start = i;
+ cluster->max_size = 0;
found = true;
}
@@ -2451,16 +2464,23 @@ setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
{
struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
struct btrfs_free_space *entry;
- struct rb_node *node;
int ret = -ENOSPC;
+ u64 bitmap_offset = offset_to_bitmap(ctl, offset);
if (ctl->total_bitmaps == 0)
return -ENOSPC;
/*
- * First check our cached list of bitmaps and see if there is an entry
- * here that will work.
+ * The bitmap that covers offset won't be in the list unless offset
+ * is just its start offset.
*/
+ entry = list_first_entry(bitmaps, struct btrfs_free_space, list);
+ if (entry->offset != bitmap_offset) {
+ entry = tree_search_offset(ctl, bitmap_offset, 1, 0);
+ if (entry && list_empty(&entry->list))
+ list_add(&entry->list, bitmaps);
+ }
+
list_for_each_entry(entry, bitmaps, list) {
if (entry->bytes < min_bytes)
continue;
@@ -2471,38 +2491,10 @@ setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
}
/*
- * If we do have entries on our list and we are here then we didn't find
- * anything, so go ahead and get the next entry after the last entry in
- * this list and start the search from there.
+ * The bitmaps list has all the bitmaps that record free space
+ * starting after offset, so no more search is required.
*/
- if (!list_empty(bitmaps)) {
- entry = list_entry(bitmaps->prev, struct btrfs_free_space,
- list);
- node = rb_next(&entry->offset_index);
- if (!node)
- return -ENOSPC;
- entry = rb_entry(node, struct btrfs_free_space, offset_index);
- goto search;
- }
-
- entry = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), 0, 1);
- if (!entry)
- return -ENOSPC;
-
-search:
- node = &entry->offset_index;
- do {
- entry = rb_entry(node, struct btrfs_free_space, offset_index);
- node = rb_next(&entry->offset_index);
- if (!entry->bitmap)
- continue;
- if (entry->bytes < min_bytes)
- continue;
- ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset,
- bytes, min_bytes);
- } while (ret && node);
-
- return ret;
+ return -ENOSPC;
}
/*
@@ -2520,8 +2512,8 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
u64 offset, u64 bytes, u64 empty_size)
{
struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
- struct list_head bitmaps;
struct btrfs_free_space *entry, *tmp;
+ LIST_HEAD(bitmaps);
u64 min_bytes;
int ret;
@@ -2560,7 +2552,6 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
goto out;
}
- INIT_LIST_HEAD(&bitmaps);
ret = setup_cluster_no_bitmap(block_group, cluster, &bitmaps, offset,
bytes, min_bytes);
if (ret)
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 116ab67a06d..2c984f7d4c2 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -3490,7 +3490,7 @@ void btrfs_evict_inode(struct inode *inode)
* doing the truncate.
*/
while (1) {
- ret = btrfs_block_rsv_refill(root, rsv, min_size);
+ ret = btrfs_block_rsv_refill_noflush(root, rsv, min_size);
/*
* Try and steal from the global reserve since we will
@@ -6794,11 +6794,13 @@ static int btrfs_getattr(struct vfsmount *mnt,
struct dentry *dentry, struct kstat *stat)
{
struct inode *inode = dentry->d_inode;
+ u32 blocksize = inode->i_sb->s_blocksize;
+
generic_fillattr(inode, stat);
stat->dev = BTRFS_I(inode)->root->anon_dev;
stat->blksize = PAGE_CACHE_SIZE;
- stat->blocks = (inode_get_bytes(inode) +
- BTRFS_I(inode)->delalloc_bytes) >> 9;
+ stat->blocks = (ALIGN(inode_get_bytes(inode), blocksize) +
+ ALIGN(BTRFS_I(inode)->delalloc_bytes, blocksize)) >> 9;
return 0;
}
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 4a34c472f12..72d461656f6 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -1216,12 +1216,12 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root,
*devstr = '\0';
devstr = vol_args->name;
devid = simple_strtoull(devstr, &end, 10);
- printk(KERN_INFO "resizing devid %llu\n",
+ printk(KERN_INFO "btrfs: resizing devid %llu\n",
(unsigned long long)devid);
}
device = btrfs_find_device(root, devid, NULL, NULL);
if (!device) {
- printk(KERN_INFO "resizer unable to find device %llu\n",
+ printk(KERN_INFO "btrfs: resizer unable to find device %llu\n",
(unsigned long long)devid);
ret = -EINVAL;
goto out_unlock;
@@ -1267,7 +1267,7 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root,
do_div(new_size, root->sectorsize);
new_size *= root->sectorsize;
- printk(KERN_INFO "new size for %s is %llu\n",
+ printk(KERN_INFO "btrfs: new size for %s is %llu\n",
device->name, (unsigned long long)new_size);
if (new_size > old_size) {
@@ -1278,7 +1278,7 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root,
}
ret = btrfs_grow_device(trans, device, new_size);
btrfs_commit_transaction(trans, root);
- } else {
+ } else if (new_size < old_size) {
ret = btrfs_shrink_device(device, new_size);
}
@@ -2930,11 +2930,13 @@ static long btrfs_ioctl_ino_to_path(struct btrfs_root *root, void __user *arg)
goto out;
for (i = 0; i < ipath->fspath->elem_cnt; ++i) {
- rel_ptr = ipath->fspath->val[i] - (u64)ipath->fspath->val;
+ rel_ptr = ipath->fspath->val[i] -
+ (u64)(unsigned long)ipath->fspath->val;
ipath->fspath->val[i] = rel_ptr;
}
- ret = copy_to_user((void *)ipa->fspath, (void *)ipath->fspath, size);
+ ret = copy_to_user((void *)(unsigned long)ipa->fspath,
+ (void *)(unsigned long)ipath->fspath, size);
if (ret) {
ret = -EFAULT;
goto out;
@@ -3017,7 +3019,8 @@ static long btrfs_ioctl_logical_to_ino(struct btrfs_root *root,
if (ret < 0)
goto out;
- ret = copy_to_user((void *)loi->inodes, (void *)inodes, size);
+ ret = copy_to_user((void *)(unsigned long)loi->inodes,
+ (void *)(unsigned long)inodes, size);
if (ret)
ret = -EFAULT;
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index f4190f22edf..c27bcb67f33 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -256,6 +256,11 @@ static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root, void *ctx)
btrfs_release_path(swarn->path);
ipath = init_ipath(4096, local_root, swarn->path);
+ if (IS_ERR(ipath)) {
+ ret = PTR_ERR(ipath);
+ ipath = NULL;
+ goto err;
+ }
ret = paths_from_inode(inum, ipath);
if (ret < 0)
@@ -272,7 +277,7 @@ static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root, void *ctx)
swarn->logical, swarn->dev->name,
(unsigned long long)swarn->sector, root, inum, offset,
min(isize - offset, (u64)PAGE_SIZE), nlink,
- (char *)ipath->fspath->val[i]);
+ (char *)(unsigned long)ipath->fspath->val[i]);
free_ipath(ipath);
return 0;
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 17ee7fc5e64..e28ad4baf48 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -1057,7 +1057,7 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
int i = 0, nr_devices;
int ret;
- nr_devices = fs_info->fs_devices->rw_devices;
+ nr_devices = fs_info->fs_devices->open_devices;
BUG_ON(!nr_devices);
devices_info = kmalloc(sizeof(*devices_info) * nr_devices,
@@ -1079,8 +1079,8 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
else
min_stripe_size = BTRFS_STRIPE_LEN;
- list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
- if (!device->in_fs_metadata)
+ list_for_each_entry(device, &fs_devices->devices, dev_list) {
+ if (!device->in_fs_metadata || !device->bdev)
continue;
avail_space = device->total_bytes - device->bytes_used;
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 6a0574e923b..81376d94cd3 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -785,6 +785,10 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
btrfs_save_ino_cache(root, trans);
+ /* see comments in should_cow_block() */
+ root->force_cow = 0;
+ smp_wmb();
+
if (root->commit_root != root->node) {
mutex_lock(&root->fs_commit_mutex);
switch_commit_root(root);
@@ -947,6 +951,10 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
btrfs_tree_unlock(old);
free_extent_buffer(old);
+ /* see comments in should_cow_block() */
+ root->force_cow = 1;
+ smp_wmb();
+
btrfs_set_root_node(new_root_item, tmp);
/* record when the snapshot was created in key.offset */
key.offset = trans->transid;
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index ab5b1c49f35..78f2d4d4f37 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -100,6 +100,12 @@ struct btrfs_device {
struct reada_zone *reada_curr_zone;
struct radix_tree_root reada_zones;
struct radix_tree_root reada_extents;
+
+ /* for sending down flush barriers */
+ struct bio *flush_bio;
+ struct completion flush_wait;
+ int nobarriers;
+
};
struct btrfs_fs_devices {
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 2abd0dfad7f..bca3948e9db 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -1143,7 +1143,7 @@ static void ceph_d_prune(struct dentry *dentry)
{
struct ceph_dentry_info *di;
- dout("d_release %p\n", dentry);
+ dout("ceph_d_prune %p\n", dentry);
/* do we have a valid parent? */
if (!dentry->d_parent || IS_ROOT(dentry))
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index e392bfce84a..116f36502f1 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -1328,12 +1328,13 @@ int ceph_inode_set_size(struct inode *inode, loff_t size)
*/
void ceph_queue_writeback(struct inode *inode)
{
+ ihold(inode);
if (queue_work(ceph_inode_to_client(inode)->wb_wq,
&ceph_inode(inode)->i_wb_work)) {
dout("ceph_queue_writeback %p\n", inode);
- ihold(inode);
} else {
dout("ceph_queue_writeback %p failed\n", inode);
+ iput(inode);
}
}
@@ -1353,12 +1354,13 @@ static void ceph_writeback_work(struct work_struct *work)
*/
void ceph_queue_invalidate(struct inode *inode)
{
+ ihold(inode);
if (queue_work(ceph_inode_to_client(inode)->pg_inv_wq,
&ceph_inode(inode)->i_pg_inv_work)) {
dout("ceph_queue_invalidate %p\n", inode);
- ihold(inode);
} else {
dout("ceph_queue_invalidate %p failed\n", inode);
+ iput(inode);
}
}
@@ -1434,13 +1436,14 @@ void ceph_queue_vmtruncate(struct inode *inode)
{
struct ceph_inode_info *ci = ceph_inode(inode);
+ ihold(inode);
if (queue_work(ceph_sb_to_client(inode->i_sb)->trunc_wq,
&ci->i_vmtruncate_work)) {
dout("ceph_queue_vmtruncate %p\n", inode);
- ihold(inode);
} else {
dout("ceph_queue_vmtruncate %p failed, pending=%d\n",
inode, ci->i_truncate_pending);
+ iput(inode);
}
}
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index a90846fac75..8dc73a594a9 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -638,10 +638,12 @@ static struct dentry *open_root_dentry(struct ceph_fs_client *fsc,
if (err == 0) {
dout("open_root_inode success\n");
if (ceph_ino(req->r_target_inode) == CEPH_INO_ROOT &&
- fsc->sb->s_root == NULL)
+ fsc->sb->s_root == NULL) {
root = d_alloc_root(req->r_target_inode);
- else
+ ceph_init_dentry(root);
+ } else {
root = d_obtain_alias(req->r_target_inode);
+ }
req->r_target_inode = NULL;
dout("open_root_inode success, root dentry is %p\n", root);
} else {
diff --git a/fs/dcache.c b/fs/dcache.c
index a901c6901bc..10ba92def3f 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -36,6 +36,7 @@
#include <linux/bit_spinlock.h>
#include <linux/rculist_bl.h>
#include <linux/prefetch.h>
+#include <linux/ratelimit.h>
#include "internal.h"
/*
@@ -2383,8 +2384,16 @@ struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode)
actual = __d_unalias(inode, dentry, alias);
}
write_sequnlock(&rename_lock);
- if (IS_ERR(actual))
+ if (IS_ERR(actual)) {
+ if (PTR_ERR(actual) == -ELOOP)
+ pr_warn_ratelimited(
+ "VFS: Lookup of '%s' in %s %s"
+ " would have caused loop\n",
+ dentry->d_name.name,
+ inode->i_sb->s_type->name,
+ inode->i_sb->s_id);
dput(alias);
+ }
goto out_nolock;
}
}
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index 58609bde3b9..2a834255c75 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -967,7 +967,7 @@ static void ecryptfs_set_default_crypt_stat_vals(
/**
* ecryptfs_new_file_context
- * @ecryptfs_dentry: The eCryptfs dentry
+ * @ecryptfs_inode: The eCryptfs inode
*
* If the crypto context for the file has not yet been established,
* this is where we do that. Establishing a new crypto context
@@ -984,13 +984,13 @@ static void ecryptfs_set_default_crypt_stat_vals(
*
* Returns zero on success; non-zero otherwise
*/
-int ecryptfs_new_file_context(struct dentry *ecryptfs_dentry)
+int ecryptfs_new_file_context(struct inode *ecryptfs_inode)
{
struct ecryptfs_crypt_stat *crypt_stat =
- &ecryptfs_inode_to_private(ecryptfs_dentry->d_inode)->crypt_stat;
+ &ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat;
struct ecryptfs_mount_crypt_stat *mount_crypt_stat =
&ecryptfs_superblock_to_private(
- ecryptfs_dentry->d_sb)->mount_crypt_stat;
+ ecryptfs_inode->i_sb)->mount_crypt_stat;
int cipher_name_len;
int rc = 0;
@@ -1299,12 +1299,12 @@ static int ecryptfs_write_headers_virt(char *page_virt, size_t max,
}
static int
-ecryptfs_write_metadata_to_contents(struct dentry *ecryptfs_dentry,
+ecryptfs_write_metadata_to_contents(struct inode *ecryptfs_inode,
char *virt, size_t virt_len)
{
int rc;
- rc = ecryptfs_write_lower(ecryptfs_dentry->d_inode, virt,
+ rc = ecryptfs_write_lower(ecryptfs_inode, virt,
0, virt_len);
if (rc < 0)
printk(KERN_ERR "%s: Error attempting to write header "
@@ -1338,7 +1338,8 @@ static unsigned long ecryptfs_get_zeroed_pages(gfp_t gfp_mask,
/**
* ecryptfs_write_metadata
- * @ecryptfs_dentry: The eCryptfs dentry
+ * @ecryptfs_dentry: The eCryptfs dentry, which should be negative
+ * @ecryptfs_inode: The newly created eCryptfs inode
*
* Write the file headers out. This will likely involve a userspace
* callout, in which the session key is encrypted with one or more
@@ -1348,10 +1349,11 @@ static unsigned long ecryptfs_get_zeroed_pages(gfp_t gfp_mask,
*
* Returns zero on success; non-zero on error
*/
-int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry)
+int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry,
+ struct inode *ecryptfs_inode)
{
struct ecryptfs_crypt_stat *crypt_stat =
- &ecryptfs_inode_to_private(ecryptfs_dentry->d_inode)->crypt_stat;
+ &ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat;
unsigned int order;
char *virt;
size_t virt_len;
@@ -1391,7 +1393,7 @@ int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry)
rc = ecryptfs_write_metadata_to_xattr(ecryptfs_dentry, virt,
size);
else
- rc = ecryptfs_write_metadata_to_contents(ecryptfs_dentry, virt,
+ rc = ecryptfs_write_metadata_to_contents(ecryptfs_inode, virt,
virt_len);
if (rc) {
printk(KERN_ERR "%s: Error writing metadata out to lower file; "
@@ -1943,7 +1945,7 @@ static unsigned char *portable_filename_chars = ("-.0123456789ABCD"
/* We could either offset on every reverse map or just pad some 0x00's
* at the front here */
-static const unsigned char filename_rev_map[] = {
+static const unsigned char filename_rev_map[256] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 7 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 15 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 23 */
@@ -1959,7 +1961,7 @@ static const unsigned char filename_rev_map[] = {
0x00, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, /* 103 */
0x2D, 0x2E, 0x2F, 0x30, 0x31, 0x32, 0x33, 0x34, /* 111 */
0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, /* 119 */
- 0x3D, 0x3E, 0x3F
+ 0x3D, 0x3E, 0x3F /* 123 - 255 initialized to 0x00 */
};
/**
diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h
index 54481a3b2c7..a9f29b12fbf 100644
--- a/fs/ecryptfs/ecryptfs_kernel.h
+++ b/fs/ecryptfs/ecryptfs_kernel.h
@@ -584,9 +584,10 @@ int ecryptfs_init_crypt_ctx(struct ecryptfs_crypt_stat *crypt_stat);
int ecryptfs_write_inode_size_to_metadata(struct inode *ecryptfs_inode);
int ecryptfs_encrypt_page(struct page *page);
int ecryptfs_decrypt_page(struct page *page);
-int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry);
+int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry,
+ struct inode *ecryptfs_inode);
int ecryptfs_read_metadata(struct dentry *ecryptfs_dentry);
-int ecryptfs_new_file_context(struct dentry *ecryptfs_dentry);
+int ecryptfs_new_file_context(struct inode *ecryptfs_inode);
void ecryptfs_write_crypt_stat_flags(char *page_virt,
struct ecryptfs_crypt_stat *crypt_stat,
size_t *written);
diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
index c6ac98cf9ba..d3f95f941c4 100644
--- a/fs/ecryptfs/file.c
+++ b/fs/ecryptfs/file.c
@@ -139,6 +139,27 @@ out:
return rc;
}
+static void ecryptfs_vma_close(struct vm_area_struct *vma)
+{
+ filemap_write_and_wait(vma->vm_file->f_mapping);
+}
+
+static const struct vm_operations_struct ecryptfs_file_vm_ops = {
+ .close = ecryptfs_vma_close,
+ .fault = filemap_fault,
+};
+
+static int ecryptfs_file_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ int rc;
+
+ rc = generic_file_mmap(file, vma);
+ if (!rc)
+ vma->vm_ops = &ecryptfs_file_vm_ops;
+
+ return rc;
+}
+
struct kmem_cache *ecryptfs_file_info_cache;
/**
@@ -349,7 +370,7 @@ const struct file_operations ecryptfs_main_fops = {
#ifdef CONFIG_COMPAT
.compat_ioctl = ecryptfs_compat_ioctl,
#endif
- .mmap = generic_file_mmap,
+ .mmap = ecryptfs_file_mmap,
.open = ecryptfs_open,
.flush = ecryptfs_flush,
.release = ecryptfs_release,
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index a36d327f152..32f90a3ae63 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -172,22 +172,23 @@ ecryptfs_create_underlying_file(struct inode *lower_dir_inode,
* it. It will also update the eCryptfs directory inode to mimic the
* stat of the lower directory inode.
*
- * Returns zero on success; non-zero on error condition
+ * Returns the new eCryptfs inode on success; an ERR_PTR on error condition
*/
-static int
+static struct inode *
ecryptfs_do_create(struct inode *directory_inode,
struct dentry *ecryptfs_dentry, int mode)
{
int rc;
struct dentry *lower_dentry;
struct dentry *lower_dir_dentry;
+ struct inode *inode;
lower_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry);
lower_dir_dentry = lock_parent(lower_dentry);
if (IS_ERR(lower_dir_dentry)) {
ecryptfs_printk(KERN_ERR, "Error locking directory of "
"dentry\n");
- rc = PTR_ERR(lower_dir_dentry);
+ inode = ERR_CAST(lower_dir_dentry);
goto out;
}
rc = ecryptfs_create_underlying_file(lower_dir_dentry->d_inode,
@@ -195,20 +196,19 @@ ecryptfs_do_create(struct inode *directory_inode,
if (rc) {
printk(KERN_ERR "%s: Failure to create dentry in lower fs; "
"rc = [%d]\n", __func__, rc);
+ inode = ERR_PTR(rc);
goto out_lock;
}
- rc = ecryptfs_interpose(lower_dentry, ecryptfs_dentry,
- directory_inode->i_sb);
- if (rc) {
- ecryptfs_printk(KERN_ERR, "Failure in ecryptfs_interpose\n");
+ inode = __ecryptfs_get_inode(lower_dentry->d_inode,
+ directory_inode->i_sb);
+ if (IS_ERR(inode))
goto out_lock;
- }
fsstack_copy_attr_times(directory_inode, lower_dir_dentry->d_inode);
fsstack_copy_inode_size(directory_inode, lower_dir_dentry->d_inode);
out_lock:
unlock_dir(lower_dir_dentry);
out:
- return rc;
+ return inode;
}
/**
@@ -219,26 +219,26 @@ out:
*
* Returns zero on success
*/
-static int ecryptfs_initialize_file(struct dentry *ecryptfs_dentry)
+static int ecryptfs_initialize_file(struct dentry *ecryptfs_dentry,
+ struct inode *ecryptfs_inode)
{
struct ecryptfs_crypt_stat *crypt_stat =
- &ecryptfs_inode_to_private(ecryptfs_dentry->d_inode)->crypt_stat;
+ &ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat;
int rc = 0;
- if (S_ISDIR(ecryptfs_dentry->d_inode->i_mode)) {
+ if (S_ISDIR(ecryptfs_inode->i_mode)) {
ecryptfs_printk(KERN_DEBUG, "This is a directory\n");
crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED);
goto out;
}
ecryptfs_printk(KERN_DEBUG, "Initializing crypto context\n");
- rc = ecryptfs_new_file_context(ecryptfs_dentry);
+ rc = ecryptfs_new_file_context(ecryptfs_inode);
if (rc) {
ecryptfs_printk(KERN_ERR, "Error creating new file "
"context; rc = [%d]\n", rc);
goto out;
}
- rc = ecryptfs_get_lower_file(ecryptfs_dentry,
- ecryptfs_dentry->d_inode);
+ rc = ecryptfs_get_lower_file(ecryptfs_dentry, ecryptfs_inode);
if (rc) {
printk(KERN_ERR "%s: Error attempting to initialize "
"the lower file for the dentry with name "
@@ -246,10 +246,10 @@ static int ecryptfs_initialize_file(struct dentry *ecryptfs_dentry)
ecryptfs_dentry->d_name.name, rc);
goto out;
}
- rc = ecryptfs_write_metadata(ecryptfs_dentry);
+ rc = ecryptfs_write_metadata(ecryptfs_dentry, ecryptfs_inode);
if (rc)
printk(KERN_ERR "Error writing headers; rc = [%d]\n", rc);
- ecryptfs_put_lower_file(ecryptfs_dentry->d_inode);
+ ecryptfs_put_lower_file(ecryptfs_inode);
out:
return rc;
}
@@ -269,18 +269,28 @@ static int
ecryptfs_create(struct inode *directory_inode, struct dentry *ecryptfs_dentry,
int mode, struct nameidata *nd)
{
+ struct inode *ecryptfs_inode;
int rc;
- /* ecryptfs_do_create() calls ecryptfs_interpose() */
- rc = ecryptfs_do_create(directory_inode, ecryptfs_dentry, mode);
- if (unlikely(rc)) {
+ ecryptfs_inode = ecryptfs_do_create(directory_inode, ecryptfs_dentry,
+ mode);
+ if (unlikely(IS_ERR(ecryptfs_inode))) {
ecryptfs_printk(KERN_WARNING, "Failed to create file in"
"lower filesystem\n");
+ rc = PTR_ERR(ecryptfs_inode);
goto out;
}
/* At this point, a file exists on "disk"; we need to make sure
* that this on disk file is prepared to be an ecryptfs file */
- rc = ecryptfs_initialize_file(ecryptfs_dentry);
+ rc = ecryptfs_initialize_file(ecryptfs_dentry, ecryptfs_inode);
+ if (rc) {
+ drop_nlink(ecryptfs_inode);
+ unlock_new_inode(ecryptfs_inode);
+ iput(ecryptfs_inode);
+ goto out;
+ }
+ d_instantiate(ecryptfs_dentry, ecryptfs_inode);
+ unlock_new_inode(ecryptfs_inode);
out:
return rc;
}
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index f6dba4505f1..12ccacda44e 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -565,7 +565,7 @@ ext4_fsblk_t ext4_count_free_clusters(struct super_block *sb)
brelse(bitmap_bh);
printk(KERN_DEBUG "ext4_count_free_clusters: stored = %llu"
", computed = %llu, %llu\n",
- EXT4_B2C(sbi, ext4_free_blocks_count(es)),
+ EXT4_B2C(EXT4_SB(sb), ext4_free_blocks_count(es)),
desc_count, bitmap_count);
return bitmap_count;
#else
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 240f6e2dc7e..848f436df29 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -2270,6 +2270,7 @@ retry:
ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: "
"%ld pages, ino %lu; err %d", __func__,
wbc->nr_to_write, inode->i_ino, ret);
+ blk_finish_plug(&plug);
goto out_writepages;
}
@@ -2806,8 +2807,8 @@ out:
spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
/* queue the work to convert unwritten extents to written */
- queue_work(wq, &io_end->work);
iocb->private = NULL;
+ queue_work(wq, &io_end->work);
/* XXX: probably should move into the real I/O completion handler */
inode_dio_done(inode);
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 9953d80145a..3858767ec67 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1683,7 +1683,9 @@ static int parse_options(char *options, struct super_block *sb,
data_opt = EXT4_MOUNT_WRITEBACK_DATA;
datacheck:
if (is_remount) {
- if (test_opt(sb, DATA_FLAGS) != data_opt) {
+ if (!sbi->s_journal)
+ ext4_msg(sb, KERN_WARNING, "Remounting file system with no journal so ignoring journalled data option");
+ else if (test_opt(sb, DATA_FLAGS) != data_opt) {
ext4_msg(sb, KERN_ERR,
"Cannot change data mode on remount");
return 0;
@@ -3099,8 +3101,6 @@ static void ext4_destroy_lazyinit_thread(void)
}
static int ext4_fill_super(struct super_block *sb, void *data, int silent)
- __releases(kernel_lock)
- __acquires(kernel_lock)
{
char *orig_data = kstrdup(data, GFP_KERNEL);
struct buffer_head *bh;
diff --git a/fs/minix/bitmap.c b/fs/minix/bitmap.c
index 3f32bcb0d9b..ef175cb8cfd 100644
--- a/fs/minix/bitmap.c
+++ b/fs/minix/bitmap.c
@@ -16,38 +16,26 @@
#include <linux/bitops.h>
#include <linux/sched.h>
-static const int nibblemap[] = { 4,3,3,2,3,2,2,1,3,2,2,1,2,1,1,0 };
-
static DEFINE_SPINLOCK(bitmap_lock);
-static unsigned long count_free(struct buffer_head *map[], unsigned numblocks, __u32 numbits)
+/*
+ * bitmap consists of blocks filled with 16bit words
+ * bit set == busy, bit clear == free
+ * endianness is a mess, but for counting zero bits it really doesn't matter...
+ */
+static __u32 count_free(struct buffer_head *map[], unsigned blocksize, __u32 numbits)
{
- unsigned i, j, sum = 0;
- struct buffer_head *bh;
-
- for (i=0; i<numblocks-1; i++) {
- if (!(bh=map[i]))
- return(0);
- for (j=0; j<bh->b_size; j++)
- sum += nibblemap[bh->b_data[j] & 0xf]
- + nibblemap[(bh->b_data[j]>>4) & 0xf];
- }
+ __u32 sum = 0;
+ unsigned blocks = DIV_ROUND_UP(numbits, blocksize * 8);
- if (numblocks==0 || !(bh=map[numblocks-1]))
- return(0);
- i = ((numbits - (numblocks-1) * bh->b_size * 8) / 16) * 2;
- for (j=0; j<i; j++) {
- sum += nibblemap[bh->b_data[j] & 0xf]
- + nibblemap[(bh->b_data[j]>>4) & 0xf];
+ while (blocks--) {
+ unsigned words = blocksize / 2;
+ __u16 *p = (__u16 *)(*map++)->b_data;
+ while (words--)
+ sum += 16 - hweight16(*p++);
}
- i = numbits%16;
- if (i!=0) {
- i = *(__u16 *)(&bh->b_data[j]) | ~((1<<i) - 1);
- sum += nibblemap[i & 0xf] + nibblemap[(i>>4) & 0xf];
- sum += nibblemap[(i>>8) & 0xf] + nibblemap[(i>>12) & 0xf];
- }
- return(sum);
+ return sum;
}
void minix_free_block(struct inode *inode, unsigned long block)
@@ -105,10 +93,12 @@ int minix_new_block(struct inode * inode)
return 0;
}
-unsigned long minix_count_free_blocks(struct minix_sb_info *sbi)
+unsigned long minix_count_free_blocks(struct super_block *sb)
{
- return (count_free(sbi->s_zmap, sbi->s_zmap_blocks,
- sbi->s_nzones - sbi->s_firstdatazone + 1)
+ struct minix_sb_info *sbi = minix_sb(sb);
+ u32 bits = sbi->s_nzones - (sbi->s_firstdatazone + 1);
+
+ return (count_free(sbi->s_zmap, sb->s_blocksize, bits)
<< sbi->s_log_zone_size);
}
@@ -273,7 +263,10 @@ struct inode *minix_new_inode(const struct inode *dir, int mode, int *error)
return inode;
}
-unsigned long minix_count_free_inodes(struct minix_sb_info *sbi)
+unsigned long minix_count_free_inodes(struct super_block *sb)
{
- return count_free(sbi->s_imap, sbi->s_imap_blocks, sbi->s_ninodes + 1);
+ struct minix_sb_info *sbi = minix_sb(sb);
+ u32 bits = sbi->s_ninodes + 1;
+
+ return count_free(sbi->s_imap, sb->s_blocksize, bits);
}
diff --git a/fs/minix/inode.c b/fs/minix/inode.c
index 64cdcd662ff..1d9e33966db 100644
--- a/fs/minix/inode.c
+++ b/fs/minix/inode.c
@@ -279,6 +279,27 @@ static int minix_fill_super(struct super_block *s, void *data, int silent)
else if (sbi->s_mount_state & MINIX_ERROR_FS)
printk("MINIX-fs: mounting file system with errors, "
"running fsck is recommended\n");
+
+ /* Apparently minix can create filesystems that allocate more blocks for
+ * the bitmaps than needed. We simply ignore that, but verify it didn't
+ * create one with not enough blocks and bail out if so.
+ */
+ block = minix_blocks_needed(sbi->s_ninodes, s->s_blocksize);
+ if (sbi->s_imap_blocks < block) {
+ printk("MINIX-fs: file system does not have enough "
+ "imap blocks allocated. Refusing to mount\n");
+ goto out_iput;
+ }
+
+ block = minix_blocks_needed(
+ (sbi->s_nzones - (sbi->s_firstdatazone + 1)),
+ s->s_blocksize);
+ if (sbi->s_zmap_blocks < block) {
+ printk("MINIX-fs: file system does not have enough "
+ "zmap blocks allocated. Refusing to mount.\n");
+ goto out_iput;
+ }
+
return 0;
out_iput:
@@ -339,10 +360,10 @@ static int minix_statfs(struct dentry *dentry, struct kstatfs *buf)
buf->f_type = sb->s_magic;
buf->f_bsize = sb->s_blocksize;
buf->f_blocks = (sbi->s_nzones - sbi->s_firstdatazone) << sbi->s_log_zone_size;
- buf->f_bfree = minix_count_free_blocks(sbi);
+ buf->f_bfree = minix_count_free_blocks(sb);
buf->f_bavail = buf->f_bfree;
buf->f_files = sbi->s_ninodes;
- buf->f_ffree = minix_count_free_inodes(sbi);
+ buf->f_ffree = minix_count_free_inodes(sb);
buf->f_namelen = sbi->s_namelen;
buf->f_fsid.val[0] = (u32)id;
buf->f_fsid.val[1] = (u32)(id >> 32);
diff --git a/fs/minix/minix.h b/fs/minix/minix.h
index 341e2122879..26bbd55e82e 100644
--- a/fs/minix/minix.h
+++ b/fs/minix/minix.h
@@ -48,10 +48,10 @@ extern struct minix_inode * minix_V1_raw_inode(struct super_block *, ino_t, stru
extern struct minix2_inode * minix_V2_raw_inode(struct super_block *, ino_t, struct buffer_head **);
extern struct inode * minix_new_inode(const struct inode *, int, int *);
extern void minix_free_inode(struct inode * inode);
-extern unsigned long minix_count_free_inodes(struct minix_sb_info *sbi);
+extern unsigned long minix_count_free_inodes(struct super_block *sb);
extern int minix_new_block(struct inode * inode);
extern void minix_free_block(struct inode *inode, unsigned long block);
-extern unsigned long minix_count_free_blocks(struct minix_sb_info *sbi);
+extern unsigned long minix_count_free_blocks(struct super_block *sb);
extern int minix_getattr(struct vfsmount *, struct dentry *, struct kstat *);
extern int minix_prepare_chunk(struct page *page, loff_t pos, unsigned len);
@@ -88,6 +88,11 @@ static inline struct minix_inode_info *minix_i(struct inode *inode)
return list_entry(inode, struct minix_inode_info, vfs_inode);
}
+static inline unsigned minix_blocks_needed(unsigned bits, unsigned blocksize)
+{
+ return DIV_ROUND_UP(bits, blocksize * 8);
+}
+
#if defined(CONFIG_MINIX_FS_NATIVE_ENDIAN) && \
defined(CONFIG_MINIX_FS_BIG_ENDIAN_16BIT_INDEXED)
@@ -125,7 +130,7 @@ static inline int minix_find_first_zero_bit(const void *vaddr, unsigned size)
if (!size)
return 0;
- size = (size >> 4) + ((size & 15) > 0);
+ size >>= 4;
while (*p++ == 0xffff) {
if (--size == 0)
return (p - addr) << 4;
diff --git a/fs/namespace.c b/fs/namespace.c
index 50ee30345b4..6d3a1963879 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -2493,6 +2493,7 @@ EXPORT_SYMBOL(create_mnt_ns);
struct dentry *mount_subtree(struct vfsmount *mnt, const char *name)
{
struct mnt_namespace *ns;
+ struct super_block *s;
struct path path;
int err;
@@ -2509,10 +2510,11 @@ struct dentry *mount_subtree(struct vfsmount *mnt, const char *name)
return ERR_PTR(err);
/* trade a vfsmount reference for active sb one */
- atomic_inc(&path.mnt->mnt_sb->s_active);
+ s = path.mnt->mnt_sb;
+ atomic_inc(&s->s_active);
mntput(path.mnt);
/* lock the sucker */
- down_write(&path.mnt->mnt_sb->s_umount);
+ down_write(&s->s_umount);
/* ... and return the root of (sub)tree on it */
return path.dentry;
}
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index b238d95ac48..ac289909814 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1468,12 +1468,12 @@ static struct dentry *nfs_atomic_lookup(struct inode *dir, struct dentry *dentry
res = NULL;
goto out;
/* This turned out not to be a regular file */
+ case -EISDIR:
case -ENOTDIR:
goto no_open;
case -ELOOP:
if (!(nd->intent.open.flags & O_NOFOLLOW))
goto no_open;
- /* case -EISDIR: */
/* case -EINVAL: */
default:
res = ERR_CAST(inode);
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 0a1f8312b4d..eca56d4b39c 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -40,48 +40,8 @@
#define NFSDBG_FACILITY NFSDBG_FILE
-static int nfs_file_open(struct inode *, struct file *);
-static int nfs_file_release(struct inode *, struct file *);
-static loff_t nfs_file_llseek(struct file *file, loff_t offset, int origin);
-static int nfs_file_mmap(struct file *, struct vm_area_struct *);
-static ssize_t nfs_file_splice_read(struct file *filp, loff_t *ppos,
- struct pipe_inode_info *pipe,
- size_t count, unsigned int flags);
-static ssize_t nfs_file_read(struct kiocb *, const struct iovec *iov,
- unsigned long nr_segs, loff_t pos);
-static ssize_t nfs_file_splice_write(struct pipe_inode_info *pipe,
- struct file *filp, loff_t *ppos,
- size_t count, unsigned int flags);
-static ssize_t nfs_file_write(struct kiocb *, const struct iovec *iov,
- unsigned long nr_segs, loff_t pos);
-static int nfs_file_flush(struct file *, fl_owner_t id);
-static int nfs_file_fsync(struct file *, loff_t, loff_t, int datasync);
-static int nfs_check_flags(int flags);
-static int nfs_lock(struct file *filp, int cmd, struct file_lock *fl);
-static int nfs_flock(struct file *filp, int cmd, struct file_lock *fl);
-static int nfs_setlease(struct file *file, long arg, struct file_lock **fl);
-
static const struct vm_operations_struct nfs_file_vm_ops;
-const struct file_operations nfs_file_operations = {
- .llseek = nfs_file_llseek,
- .read = do_sync_read,
- .write = do_sync_write,
- .aio_read = nfs_file_read,
- .aio_write = nfs_file_write,
- .mmap = nfs_file_mmap,
- .open = nfs_file_open,
- .flush = nfs_file_flush,
- .release = nfs_file_release,
- .fsync = nfs_file_fsync,
- .lock = nfs_lock,
- .flock = nfs_flock,
- .splice_read = nfs_file_splice_read,
- .splice_write = nfs_file_splice_write,
- .check_flags = nfs_check_flags,
- .setlease = nfs_setlease,
-};
-
const struct inode_operations nfs_file_inode_operations = {
.permission = nfs_permission,
.getattr = nfs_getattr,
@@ -886,3 +846,54 @@ static int nfs_setlease(struct file *file, long arg, struct file_lock **fl)
file->f_path.dentry->d_name.name, arg);
return -EINVAL;
}
+
+const struct file_operations nfs_file_operations = {
+ .llseek = nfs_file_llseek,
+ .read = do_sync_read,
+ .write = do_sync_write,
+ .aio_read = nfs_file_read,
+ .aio_write = nfs_file_write,
+ .mmap = nfs_file_mmap,
+ .open = nfs_file_open,
+ .flush = nfs_file_flush,
+ .release = nfs_file_release,
+ .fsync = nfs_file_fsync,
+ .lock = nfs_lock,
+ .flock = nfs_flock,
+ .splice_read = nfs_file_splice_read,
+ .splice_write = nfs_file_splice_write,
+ .check_flags = nfs_check_flags,
+ .setlease = nfs_setlease,
+};
+
+#ifdef CONFIG_NFS_V4
+static int
+nfs4_file_open(struct inode *inode, struct file *filp)
+{
+ /*
+ * NFSv4 opens are handled in d_lookup and d_revalidate. If we get to
+ * this point, then something is very wrong
+ */
+ dprintk("NFS: %s called! inode=%p filp=%p\n", __func__, inode, filp);
+ return -ENOTDIR;
+}
+
+const struct file_operations nfs4_file_operations = {
+ .llseek = nfs_file_llseek,
+ .read = do_sync_read,
+ .write = do_sync_write,
+ .aio_read = nfs_file_read,
+ .aio_write = nfs_file_write,
+ .mmap = nfs_file_mmap,
+ .open = nfs4_file_open,
+ .flush = nfs_file_flush,
+ .release = nfs_file_release,
+ .fsync = nfs_file_fsync,
+ .lock = nfs_lock,
+ .flock = nfs_flock,
+ .splice_read = nfs_file_splice_read,
+ .splice_write = nfs_file_splice_write,
+ .check_flags = nfs_check_flags,
+ .setlease = nfs_setlease,
+};
+#endif /* CONFIG_NFS_V4 */
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index c07a55aec83..50a15fa8cf9 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -291,7 +291,7 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
*/
inode->i_op = NFS_SB(sb)->nfs_client->rpc_ops->file_inode_ops;
if (S_ISREG(inode->i_mode)) {
- inode->i_fop = &nfs_file_operations;
+ inode->i_fop = NFS_SB(sb)->nfs_client->rpc_ops->file_ops;
inode->i_data.a_ops = &nfs_file_aops;
inode->i_data.backing_dev_info = &NFS_SB(sb)->backing_dev_info;
} else if (S_ISDIR(inode->i_mode)) {
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index c1a1bd8ddf1..3f4d95751d5 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -299,6 +299,8 @@ extern void nfs_read_prepare(struct rpc_task *task, void *calldata);
extern int nfs_generic_pagein(struct nfs_pageio_descriptor *desc,
struct list_head *head);
+extern void nfs_pageio_init_read_mds(struct nfs_pageio_descriptor *pgio,
+ struct inode *inode);
extern void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio);
extern void nfs_readdata_release(struct nfs_read_data *rdata);
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index 85f1690ca08..d4bc9ed9174 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -853,6 +853,7 @@ const struct nfs_rpc_ops nfs_v3_clientops = {
.dentry_ops = &nfs_dentry_operations,
.dir_inode_ops = &nfs3_dir_inode_operations,
.file_inode_ops = &nfs3_file_inode_operations,
+ .file_ops = &nfs_file_operations,
.getroot = nfs3_proc_get_root,
.getattr = nfs3_proc_getattr,
.setattr = nfs3_proc_setattr,
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index b60fddf606f..be2bbac1381 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -2464,8 +2464,7 @@ static int nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, struct qst
case -NFS4ERR_BADNAME:
return -ENOENT;
case -NFS4ERR_MOVED:
- err = nfs4_get_referral(dir, name, fattr, fhandle);
- break;
+ return nfs4_get_referral(dir, name, fattr, fhandle);
case -NFS4ERR_WRONGSEC:
nfs_fixup_secinfo_attributes(fattr, fhandle);
}
@@ -6253,6 +6252,7 @@ const struct nfs_rpc_ops nfs_v4_clientops = {
.dentry_ops = &nfs4_dentry_operations,
.dir_inode_ops = &nfs4_dir_inode_operations,
.file_inode_ops = &nfs4_file_inode_operations,
+ .file_ops = &nfs4_file_operations,
.getroot = nfs4_proc_get_root,
.getattr = nfs4_proc_getattr,
.setattr = nfs4_proc_setattr,
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index baf73536bc0..8e672a2b2d6 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -1260,6 +1260,25 @@ pnfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc)
}
EXPORT_SYMBOL_GPL(pnfs_generic_pg_writepages);
+static void pnfs_ld_handle_read_error(struct nfs_read_data *data)
+{
+ struct nfs_pageio_descriptor pgio;
+
+ put_lseg(data->lseg);
+ data->lseg = NULL;
+ dprintk("pnfs write error = %d\n", data->pnfs_error);
+
+ nfs_pageio_init_read_mds(&pgio, data->inode);
+
+ while (!list_empty(&data->pages)) {
+ struct nfs_page *req = nfs_list_entry(data->pages.next);
+
+ nfs_list_remove_request(req);
+ nfs_pageio_add_request(&pgio, req);
+ }
+ nfs_pageio_complete(&pgio);
+}
+
/*
* Called by non rpc-based layout drivers
*/
@@ -1268,11 +1287,8 @@ void pnfs_ld_read_done(struct nfs_read_data *data)
if (likely(!data->pnfs_error)) {
__nfs4_read_done_cb(data);
data->mds_ops->rpc_call_done(&data->task, data);
- } else {
- put_lseg(data->lseg);
- data->lseg = NULL;
- dprintk("pnfs write error = %d\n", data->pnfs_error);
- }
+ } else
+ pnfs_ld_handle_read_error(data);
data->mds_ops->rpc_release(data);
}
EXPORT_SYMBOL_GPL(pnfs_ld_read_done);
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
index ac40b8535d7..f48125da198 100644
--- a/fs/nfs/proc.c
+++ b/fs/nfs/proc.c
@@ -710,6 +710,7 @@ const struct nfs_rpc_ops nfs_v2_clientops = {
.dentry_ops = &nfs_dentry_operations,
.dir_inode_ops = &nfs_dir_inode_operations,
.file_inode_ops = &nfs_file_inode_operations,
+ .file_ops = &nfs_file_operations,
.getroot = nfs_proc_get_root,
.getattr = nfs_proc_getattr,
.setattr = nfs_proc_setattr,
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index 8b48ec63f72..cfa175c223d 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -109,7 +109,7 @@ static void nfs_readpage_truncate_uninitialised_page(struct nfs_read_data *data)
}
}
-static void nfs_pageio_init_read_mds(struct nfs_pageio_descriptor *pgio,
+void nfs_pageio_init_read_mds(struct nfs_pageio_descriptor *pgio,
struct inode *inode)
{
nfs_pageio_init(pgio, inode, &nfs_pageio_read_ops,
@@ -534,23 +534,13 @@ static void nfs_readpage_result_full(struct rpc_task *task, void *calldata)
static void nfs_readpage_release_full(void *calldata)
{
struct nfs_read_data *data = calldata;
- struct nfs_pageio_descriptor pgio;
- if (data->pnfs_error) {
- nfs_pageio_init_read_mds(&pgio, data->inode);
- pgio.pg_recoalesce = 1;
- }
while (!list_empty(&data->pages)) {
struct nfs_page *req = nfs_list_entry(data->pages.next);
nfs_list_remove_request(req);
- if (!data->pnfs_error)
- nfs_readpage_release(req);
- else
- nfs_pageio_add_request(&pgio, req);
+ nfs_readpage_release(req);
}
- if (data->pnfs_error)
- nfs_pageio_complete(&pgio);
nfs_readdata_release(calldata);
}
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index ed553c60de8..3165aebb43c 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -5699,7 +5699,7 @@ int ocfs2_remove_btree_range(struct inode *inode,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
- goto out;
+ goto out_commit;
}
dquot_free_space_nodirty(inode,
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index c1efe939c77..78b68af3b0e 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -290,7 +290,15 @@ static int ocfs2_readpage(struct file *file, struct page *page)
}
if (down_read_trylock(&oi->ip_alloc_sem) == 0) {
+ /*
+ * Unlock the page and cycle ip_alloc_sem so that we don't
+ * busyloop waiting for ip_alloc_sem to unlock
+ */
ret = AOP_TRUNCATED_PAGE;
+ unlock_page(page);
+ unlock = 0;
+ down_read(&oi->ip_alloc_sem);
+ up_read(&oi->ip_alloc_sem);
goto out_inode_unlock;
}
@@ -563,6 +571,7 @@ static void ocfs2_dio_end_io(struct kiocb *iocb,
{
struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
int level;
+ wait_queue_head_t *wq = ocfs2_ioend_wq(inode);
/* this io's submitter should not have unlocked this before we could */
BUG_ON(!ocfs2_iocb_is_rw_locked(iocb));
@@ -570,6 +579,15 @@ static void ocfs2_dio_end_io(struct kiocb *iocb,
if (ocfs2_iocb_is_sem_locked(iocb))
ocfs2_iocb_clear_sem_locked(iocb);
+ if (ocfs2_iocb_is_unaligned_aio(iocb)) {
+ ocfs2_iocb_clear_unaligned_aio(iocb);
+
+ if (atomic_dec_and_test(&OCFS2_I(inode)->ip_unaligned_aio) &&
+ waitqueue_active(wq)) {
+ wake_up_all(wq);
+ }
+ }
+
ocfs2_iocb_clear_rw_locked(iocb);
level = ocfs2_iocb_rw_locked_level(iocb);
@@ -863,6 +881,12 @@ struct ocfs2_write_ctxt {
struct page *w_target_page;
/*
+ * w_target_locked is used for page_mkwrite path indicating no unlocking
+ * against w_target_page in ocfs2_write_end_nolock.
+ */
+ unsigned int w_target_locked:1;
+
+ /*
* ocfs2_write_end() uses this to know what the real range to
* write in the target should be.
*/
@@ -895,6 +919,24 @@ void ocfs2_unlock_and_free_pages(struct page **pages, int num_pages)
static void ocfs2_free_write_ctxt(struct ocfs2_write_ctxt *wc)
{
+ int i;
+
+ /*
+ * w_target_locked is only set to true in the page_mkwrite() case.
+ * The intent is to allow us to lock the target page from write_begin()
+ * to write_end(). The caller must hold a ref on w_target_page.
+ */
+ if (wc->w_target_locked) {
+ BUG_ON(!wc->w_target_page);
+ for (i = 0; i < wc->w_num_pages; i++) {
+ if (wc->w_target_page == wc->w_pages[i]) {
+ wc->w_pages[i] = NULL;
+ break;
+ }
+ }
+ mark_page_accessed(wc->w_target_page);
+ page_cache_release(wc->w_target_page);
+ }
ocfs2_unlock_and_free_pages(wc->w_pages, wc->w_num_pages);
brelse(wc->w_di_bh);
@@ -1132,20 +1174,17 @@ static int ocfs2_grab_pages_for_write(struct address_space *mapping,
*/
lock_page(mmap_page);
+ /* Exit and let the caller retry */
if (mmap_page->mapping != mapping) {
+ WARN_ON(mmap_page->mapping);
unlock_page(mmap_page);
- /*
- * Sanity check - the locking in
- * ocfs2_pagemkwrite() should ensure
- * that this code doesn't trigger.
- */
- ret = -EINVAL;
- mlog_errno(ret);
+ ret = -EAGAIN;
goto out;
}
page_cache_get(mmap_page);
wc->w_pages[i] = mmap_page;
+ wc->w_target_locked = true;
} else {
wc->w_pages[i] = find_or_create_page(mapping, index,
GFP_NOFS);
@@ -1160,6 +1199,8 @@ static int ocfs2_grab_pages_for_write(struct address_space *mapping,
wc->w_target_page = wc->w_pages[i];
}
out:
+ if (ret)
+ wc->w_target_locked = false;
return ret;
}
@@ -1817,11 +1858,23 @@ try_again:
*/
ret = ocfs2_grab_pages_for_write(mapping, wc, wc->w_cpos, pos, len,
cluster_of_pages, mmap_page);
- if (ret) {
+ if (ret && ret != -EAGAIN) {
mlog_errno(ret);
goto out_quota;
}
+ /*
+ * ocfs2_grab_pages_for_write() returns -EAGAIN if it could not lock
+ * the target page. In this case, we exit with no error and no target
+ * page. This will trigger the caller, page_mkwrite(), to re-try
+ * the operation.
+ */
+ if (ret == -EAGAIN) {
+ BUG_ON(wc->w_target_page);
+ ret = 0;
+ goto out_quota;
+ }
+
ret = ocfs2_write_cluster_by_desc(mapping, data_ac, meta_ac, wc, pos,
len);
if (ret) {
diff --git a/fs/ocfs2/aops.h b/fs/ocfs2/aops.h
index 75cf3ad987a..ffb2da370a9 100644
--- a/fs/ocfs2/aops.h
+++ b/fs/ocfs2/aops.h
@@ -78,6 +78,7 @@ enum ocfs2_iocb_lock_bits {
OCFS2_IOCB_RW_LOCK = 0,
OCFS2_IOCB_RW_LOCK_LEVEL,
OCFS2_IOCB_SEM,
+ OCFS2_IOCB_UNALIGNED_IO,
OCFS2_IOCB_NUM_LOCKS
};
@@ -91,4 +92,17 @@ enum ocfs2_iocb_lock_bits {
clear_bit(OCFS2_IOCB_SEM, (unsigned long *)&iocb->private)
#define ocfs2_iocb_is_sem_locked(iocb) \
test_bit(OCFS2_IOCB_SEM, (unsigned long *)&iocb->private)
+
+#define ocfs2_iocb_set_unaligned_aio(iocb) \
+ set_bit(OCFS2_IOCB_UNALIGNED_IO, (unsigned long *)&iocb->private)
+#define ocfs2_iocb_clear_unaligned_aio(iocb) \
+ clear_bit(OCFS2_IOCB_UNALIGNED_IO, (unsigned long *)&iocb->private)
+#define ocfs2_iocb_is_unaligned_aio(iocb) \
+ test_bit(OCFS2_IOCB_UNALIGNED_IO, (unsigned long *)&iocb->private)
+
+#define OCFS2_IOEND_WQ_HASH_SZ 37
+#define ocfs2_ioend_wq(v) (&ocfs2__ioend_wq[((unsigned long)(v)) %\
+ OCFS2_IOEND_WQ_HASH_SZ])
+extern wait_queue_head_t ocfs2__ioend_wq[OCFS2_IOEND_WQ_HASH_SZ];
+
#endif /* OCFS2_FILE_H */
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index 9a3e6bbff27..a4e855e3690 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -216,6 +216,7 @@ struct o2hb_region {
struct list_head hr_all_item;
unsigned hr_unclean_stop:1,
+ hr_aborted_start:1,
hr_item_pinned:1,
hr_item_dropped:1;
@@ -254,6 +255,10 @@ struct o2hb_region {
* a more complete api that doesn't lead to this sort of fragility. */
atomic_t hr_steady_iterations;
+ /* terminate o2hb thread if it does not reach steady state
+ * (hr_steady_iterations == 0) within hr_unsteady_iterations */
+ atomic_t hr_unsteady_iterations;
+
char hr_dev_name[BDEVNAME_SIZE];
unsigned int hr_timeout_ms;
@@ -324,6 +329,10 @@ static void o2hb_write_timeout(struct work_struct *work)
static void o2hb_arm_write_timeout(struct o2hb_region *reg)
{
+ /* Arm writeout only after thread reaches steady state */
+ if (atomic_read(&reg->hr_steady_iterations) != 0)
+ return;
+
mlog(ML_HEARTBEAT, "Queue write timeout for %u ms\n",
O2HB_MAX_WRITE_TIMEOUT_MS);
@@ -537,9 +546,14 @@ static int o2hb_verify_crc(struct o2hb_region *reg,
return read == computed;
}
-/* We want to make sure that nobody is heartbeating on top of us --
- * this will help detect an invalid configuration. */
-static void o2hb_check_last_timestamp(struct o2hb_region *reg)
+/*
+ * Compare the slot data with what we wrote in the last iteration.
+ * If the match fails, print an appropriate error message. This is to
+ * detect errors like... another node hearting on the same slot,
+ * flaky device that is losing writes, etc.
+ * Returns 1 if check succeeds, 0 otherwise.
+ */
+static int o2hb_check_own_slot(struct o2hb_region *reg)
{
struct o2hb_disk_slot *slot;
struct o2hb_disk_heartbeat_block *hb_block;
@@ -548,13 +562,13 @@ static void o2hb_check_last_timestamp(struct o2hb_region *reg)
slot = &reg->hr_slots[o2nm_this_node()];
/* Don't check on our 1st timestamp */
if (!slot->ds_last_time)
- return;
+ return 0;
hb_block = slot->ds_raw_block;
if (le64_to_cpu(hb_block->hb_seq) == slot->ds_last_time &&
le64_to_cpu(hb_block->hb_generation) == slot->ds_last_generation &&
hb_block->hb_node == slot->ds_node_num)
- return;
+ return 1;
#define ERRSTR1 "Another node is heartbeating on device"
#define ERRSTR2 "Heartbeat generation mismatch on device"
@@ -574,6 +588,8 @@ static void o2hb_check_last_timestamp(struct o2hb_region *reg)
(unsigned long long)slot->ds_last_time, hb_block->hb_node,
(unsigned long long)le64_to_cpu(hb_block->hb_generation),
(unsigned long long)le64_to_cpu(hb_block->hb_seq));
+
+ return 0;
}
static inline void o2hb_prepare_block(struct o2hb_region *reg,
@@ -719,17 +735,24 @@ static void o2hb_shutdown_slot(struct o2hb_disk_slot *slot)
o2nm_node_put(node);
}
-static void o2hb_set_quorum_device(struct o2hb_region *reg,
- struct o2hb_disk_slot *slot)
+static void o2hb_set_quorum_device(struct o2hb_region *reg)
{
- assert_spin_locked(&o2hb_live_lock);
-
if (!o2hb_global_heartbeat_active())
return;
- if (test_bit(reg->hr_region_num, o2hb_quorum_region_bitmap))
+ /* Prevent race with o2hb_heartbeat_group_drop_item() */
+ if (kthread_should_stop())
+ return;
+
+ /* Tag region as quorum only after thread reaches steady state */
+ if (atomic_read(&reg->hr_steady_iterations) != 0)
return;
+ spin_lock(&o2hb_live_lock);
+
+ if (test_bit(reg->hr_region_num, o2hb_quorum_region_bitmap))
+ goto unlock;
+
/*
* A region can be added to the quorum only when it sees all
* live nodes heartbeat on it. In other words, the region has been
@@ -737,13 +760,10 @@ static void o2hb_set_quorum_device(struct o2hb_region *reg,
*/
if (memcmp(reg->hr_live_node_bitmap, o2hb_live_node_bitmap,
sizeof(o2hb_live_node_bitmap)))
- return;
-
- if (slot->ds_changed_samples < O2HB_LIVE_THRESHOLD)
- return;
+ goto unlock;
- printk(KERN_NOTICE "o2hb: Region %s is now a quorum device\n",
- config_item_name(&reg->hr_item));
+ printk(KERN_NOTICE "o2hb: Region %s (%s) is now a quorum device\n",
+ config_item_name(&reg->hr_item), reg->hr_dev_name);
set_bit(reg->hr_region_num, o2hb_quorum_region_bitmap);
@@ -754,6 +774,8 @@ static void o2hb_set_quorum_device(struct o2hb_region *reg,
if (o2hb_pop_count(&o2hb_quorum_region_bitmap,
O2NM_MAX_REGIONS) > O2HB_PIN_CUT_OFF)
o2hb_region_unpin(NULL);
+unlock:
+ spin_unlock(&o2hb_live_lock);
}
static int o2hb_check_slot(struct o2hb_region *reg,
@@ -925,8 +947,6 @@ fire_callbacks:
slot->ds_equal_samples = 0;
}
out:
- o2hb_set_quorum_device(reg, slot);
-
spin_unlock(&o2hb_live_lock);
o2hb_run_event_list(&event);
@@ -957,7 +977,8 @@ static int o2hb_highest_node(unsigned long *nodes,
static int o2hb_do_disk_heartbeat(struct o2hb_region *reg)
{
- int i, ret, highest_node, change = 0;
+ int i, ret, highest_node;
+ int membership_change = 0, own_slot_ok = 0;
unsigned long configured_nodes[BITS_TO_LONGS(O2NM_MAX_NODES)];
unsigned long live_node_bitmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
struct o2hb_bio_wait_ctxt write_wc;
@@ -966,7 +987,7 @@ static int o2hb_do_disk_heartbeat(struct o2hb_region *reg)
sizeof(configured_nodes));
if (ret) {
mlog_errno(ret);
- return ret;
+ goto bail;
}
/*
@@ -982,8 +1003,9 @@ static int o2hb_do_disk_heartbeat(struct o2hb_region *reg)
highest_node = o2hb_highest_node(configured_nodes, O2NM_MAX_NODES);
if (highest_node >= O2NM_MAX_NODES) {
- mlog(ML_NOTICE, "ocfs2_heartbeat: no configured nodes found!\n");
- return -EINVAL;
+ mlog(ML_NOTICE, "o2hb: No configured nodes found!\n");
+ ret = -EINVAL;
+ goto bail;
}
/* No sense in reading the slots of nodes that don't exist
@@ -993,29 +1015,27 @@ static int o2hb_do_disk_heartbeat(struct o2hb_region *reg)
ret = o2hb_read_slots(reg, highest_node + 1);
if (ret < 0) {
mlog_errno(ret);
- return ret;
+ goto bail;
}
/* With an up to date view of the slots, we can check that no
* other node has been improperly configured to heartbeat in
* our slot. */
- o2hb_check_last_timestamp(reg);
+ own_slot_ok = o2hb_check_own_slot(reg);
/* fill in the proper info for our next heartbeat */
o2hb_prepare_block(reg, reg->hr_generation);
- /* And fire off the write. Note that we don't wait on this I/O
- * until later. */
ret = o2hb_issue_node_write(reg, &write_wc);
if (ret < 0) {
mlog_errno(ret);
- return ret;
+ goto bail;
}
i = -1;
while((i = find_next_bit(configured_nodes,
O2NM_MAX_NODES, i + 1)) < O2NM_MAX_NODES) {
- change |= o2hb_check_slot(reg, &reg->hr_slots[i]);
+ membership_change |= o2hb_check_slot(reg, &reg->hr_slots[i]);
}
/*
@@ -1030,18 +1050,39 @@ static int o2hb_do_disk_heartbeat(struct o2hb_region *reg)
* disk */
mlog(ML_ERROR, "Write error %d on device \"%s\"\n",
write_wc.wc_error, reg->hr_dev_name);
- return write_wc.wc_error;
+ ret = write_wc.wc_error;
+ goto bail;
}
- o2hb_arm_write_timeout(reg);
+ /* Skip disarming the timeout if own slot has stale/bad data */
+ if (own_slot_ok) {
+ o2hb_set_quorum_device(reg);
+ o2hb_arm_write_timeout(reg);
+ }
+bail:
/* let the person who launched us know when things are steady */
- if (!change && (atomic_read(&reg->hr_steady_iterations) != 0)) {
- if (atomic_dec_and_test(&reg->hr_steady_iterations))
+ if (atomic_read(&reg->hr_steady_iterations) != 0) {
+ if (!ret && own_slot_ok && !membership_change) {
+ if (atomic_dec_and_test(&reg->hr_steady_iterations))
+ wake_up(&o2hb_steady_queue);
+ }
+ }
+
+ if (atomic_read(&reg->hr_steady_iterations) != 0) {
+ if (atomic_dec_and_test(&reg->hr_unsteady_iterations)) {
+ printk(KERN_NOTICE "o2hb: Unable to stabilize "
+ "heartbeart on region %s (%s)\n",
+ config_item_name(&reg->hr_item),
+ reg->hr_dev_name);
+ atomic_set(&reg->hr_steady_iterations, 0);
+ reg->hr_aborted_start = 1;
wake_up(&o2hb_steady_queue);
+ ret = -EIO;
+ }
}
- return 0;
+ return ret;
}
/* Subtract b from a, storing the result in a. a *must* have a larger
@@ -1095,7 +1136,8 @@ static int o2hb_thread(void *data)
/* Pin node */
o2nm_depend_this_node();
- while (!kthread_should_stop() && !reg->hr_unclean_stop) {
+ while (!kthread_should_stop() &&
+ !reg->hr_unclean_stop && !reg->hr_aborted_start) {
/* We track the time spent inside
* o2hb_do_disk_heartbeat so that we avoid more than
* hr_timeout_ms between disk writes. On busy systems
@@ -1103,10 +1145,7 @@ static int o2hb_thread(void *data)
* likely to time itself out. */
do_gettimeofday(&before_hb);
- i = 0;
- do {
- ret = o2hb_do_disk_heartbeat(reg);
- } while (ret && ++i < 2);
+ ret = o2hb_do_disk_heartbeat(reg);
do_gettimeofday(&after_hb);
elapsed_msec = o2hb_elapsed_msecs(&before_hb, &after_hb);
@@ -1117,7 +1156,8 @@ static int o2hb_thread(void *data)
after_hb.tv_sec, (unsigned long) after_hb.tv_usec,
elapsed_msec);
- if (elapsed_msec < reg->hr_timeout_ms) {
+ if (!kthread_should_stop() &&
+ elapsed_msec < reg->hr_timeout_ms) {
/* the kthread api has blocked signals for us so no
* need to record the return value. */
msleep_interruptible(reg->hr_timeout_ms - elapsed_msec);
@@ -1134,20 +1174,20 @@ static int o2hb_thread(void *data)
* to timeout on this region when we could just as easily
* write a clear generation - thus indicating to them that
* this node has left this region.
- *
- * XXX: Should we skip this on unclean_stop? */
- o2hb_prepare_block(reg, 0);
- ret = o2hb_issue_node_write(reg, &write_wc);
- if (ret == 0) {
- o2hb_wait_on_io(reg, &write_wc);
- } else {
- mlog_errno(ret);
+ */
+ if (!reg->hr_unclean_stop && !reg->hr_aborted_start) {
+ o2hb_prepare_block(reg, 0);
+ ret = o2hb_issue_node_write(reg, &write_wc);
+ if (ret == 0)
+ o2hb_wait_on_io(reg, &write_wc);
+ else
+ mlog_errno(ret);
}
/* Unpin node */
o2nm_undepend_this_node();
- mlog(ML_HEARTBEAT|ML_KTHREAD, "hb thread exiting\n");
+ mlog(ML_HEARTBEAT|ML_KTHREAD, "o2hb thread exiting\n");
return 0;
}
@@ -1158,6 +1198,7 @@ static int o2hb_debug_open(struct inode *inode, struct file *file)
struct o2hb_debug_buf *db = inode->i_private;
struct o2hb_region *reg;
unsigned long map[BITS_TO_LONGS(O2NM_MAX_NODES)];
+ unsigned long lts;
char *buf = NULL;
int i = -1;
int out = 0;
@@ -1194,9 +1235,11 @@ static int o2hb_debug_open(struct inode *inode, struct file *file)
case O2HB_DB_TYPE_REGION_ELAPSED_TIME:
reg = (struct o2hb_region *)db->db_data;
- out += snprintf(buf + out, PAGE_SIZE - out, "%u\n",
- jiffies_to_msecs(jiffies -
- reg->hr_last_timeout_start));
+ lts = reg->hr_last_timeout_start;
+ /* If 0, it has never been set before */
+ if (lts)
+ lts = jiffies_to_msecs(jiffies - lts);
+ out += snprintf(buf + out, PAGE_SIZE - out, "%lu\n", lts);
goto done;
case O2HB_DB_TYPE_REGION_PINNED:
@@ -1426,6 +1469,8 @@ static void o2hb_region_release(struct config_item *item)
struct page *page;
struct o2hb_region *reg = to_o2hb_region(item);
+ mlog(ML_HEARTBEAT, "hb region release (%s)\n", reg->hr_dev_name);
+
if (reg->hr_tmp_block)
kfree(reg->hr_tmp_block);
@@ -1792,7 +1837,10 @@ static ssize_t o2hb_region_dev_write(struct o2hb_region *reg,
live_threshold <<= 1;
spin_unlock(&o2hb_live_lock);
}
- atomic_set(&reg->hr_steady_iterations, live_threshold + 1);
+ ++live_threshold;
+ atomic_set(&reg->hr_steady_iterations, live_threshold);
+ /* unsteady_iterations is double the steady_iterations */
+ atomic_set(&reg->hr_unsteady_iterations, (live_threshold << 1));
hb_task = kthread_run(o2hb_thread, reg, "o2hb-%s",
reg->hr_item.ci_name);
@@ -1809,14 +1857,12 @@ static ssize_t o2hb_region_dev_write(struct o2hb_region *reg,
ret = wait_event_interruptible(o2hb_steady_queue,
atomic_read(&reg->hr_steady_iterations) == 0);
if (ret) {
- /* We got interrupted (hello ptrace!). Clean up */
- spin_lock(&o2hb_live_lock);
- hb_task = reg->hr_task;
- reg->hr_task = NULL;
- spin_unlock(&o2hb_live_lock);
+ atomic_set(&reg->hr_steady_iterations, 0);
+ reg->hr_aborted_start = 1;
+ }
- if (hb_task)
- kthread_stop(hb_task);
+ if (reg->hr_aborted_start) {
+ ret = -EIO;
goto out;
}
@@ -1833,8 +1879,8 @@ static ssize_t o2hb_region_dev_write(struct o2hb_region *reg,
ret = -EIO;
if (hb_task && o2hb_global_heartbeat_active())
- printk(KERN_NOTICE "o2hb: Heartbeat started on region %s\n",
- config_item_name(&reg->hr_item));
+ printk(KERN_NOTICE "o2hb: Heartbeat started on region %s (%s)\n",
+ config_item_name(&reg->hr_item), reg->hr_dev_name);
out:
if (filp)
@@ -2092,13 +2138,6 @@ static void o2hb_heartbeat_group_drop_item(struct config_group *group,
/* stop the thread when the user removes the region dir */
spin_lock(&o2hb_live_lock);
- if (o2hb_global_heartbeat_active()) {
- clear_bit(reg->hr_region_num, o2hb_region_bitmap);
- clear_bit(reg->hr_region_num, o2hb_live_region_bitmap);
- if (test_bit(reg->hr_region_num, o2hb_quorum_region_bitmap))
- quorum_region = 1;
- clear_bit(reg->hr_region_num, o2hb_quorum_region_bitmap);
- }
hb_task = reg->hr_task;
reg->hr_task = NULL;
reg->hr_item_dropped = 1;
@@ -2107,19 +2146,30 @@ static void o2hb_heartbeat_group_drop_item(struct config_group *group,
if (hb_task)
kthread_stop(hb_task);
+ if (o2hb_global_heartbeat_active()) {
+ spin_lock(&o2hb_live_lock);
+ clear_bit(reg->hr_region_num, o2hb_region_bitmap);
+ clear_bit(reg->hr_region_num, o2hb_live_region_bitmap);
+ if (test_bit(reg->hr_region_num, o2hb_quorum_region_bitmap))
+ quorum_region = 1;
+ clear_bit(reg->hr_region_num, o2hb_quorum_region_bitmap);
+ spin_unlock(&o2hb_live_lock);
+ printk(KERN_NOTICE "o2hb: Heartbeat %s on region %s (%s)\n",
+ ((atomic_read(&reg->hr_steady_iterations) == 0) ?
+ "stopped" : "start aborted"), config_item_name(item),
+ reg->hr_dev_name);
+ }
+
/*
* If we're racing a dev_write(), we need to wake them. They will
* check reg->hr_task
*/
if (atomic_read(&reg->hr_steady_iterations) != 0) {
+ reg->hr_aborted_start = 1;
atomic_set(&reg->hr_steady_iterations, 0);
wake_up(&o2hb_steady_queue);
}
- if (o2hb_global_heartbeat_active())
- printk(KERN_NOTICE "o2hb: Heartbeat stopped on region %s\n",
- config_item_name(&reg->hr_item));
-
config_item_put(item);
if (!o2hb_global_heartbeat_active() || !quorum_region)
diff --git a/fs/ocfs2/cluster/netdebug.c b/fs/ocfs2/cluster/netdebug.c
index 3a5835904b3..dc45deb19e6 100644
--- a/fs/ocfs2/cluster/netdebug.c
+++ b/fs/ocfs2/cluster/netdebug.c
@@ -47,6 +47,7 @@
#define SC_DEBUG_NAME "sock_containers"
#define NST_DEBUG_NAME "send_tracking"
#define STATS_DEBUG_NAME "stats"
+#define NODES_DEBUG_NAME "connected_nodes"
#define SHOW_SOCK_CONTAINERS 0
#define SHOW_SOCK_STATS 1
@@ -55,6 +56,7 @@ static struct dentry *o2net_dentry;
static struct dentry *sc_dentry;
static struct dentry *nst_dentry;
static struct dentry *stats_dentry;
+static struct dentry *nodes_dentry;
static DEFINE_SPINLOCK(o2net_debug_lock);
@@ -491,53 +493,87 @@ static const struct file_operations sc_seq_fops = {
.release = sc_fop_release,
};
-int o2net_debugfs_init(void)
+static int o2net_fill_bitmap(char *buf, int len)
{
- o2net_dentry = debugfs_create_dir(O2NET_DEBUG_DIR, NULL);
- if (!o2net_dentry) {
- mlog_errno(-ENOMEM);
- goto bail;
- }
+ unsigned long map[BITS_TO_LONGS(O2NM_MAX_NODES)];
+ int i = -1, out = 0;
- nst_dentry = debugfs_create_file(NST_DEBUG_NAME, S_IFREG|S_IRUSR,
- o2net_dentry, NULL,
- &nst_seq_fops);
- if (!nst_dentry) {
- mlog_errno(-ENOMEM);
- goto bail;
- }
+ o2net_fill_node_map(map, sizeof(map));
- sc_dentry = debugfs_create_file(SC_DEBUG_NAME, S_IFREG|S_IRUSR,
- o2net_dentry, NULL,
- &sc_seq_fops);
- if (!sc_dentry) {
- mlog_errno(-ENOMEM);
- goto bail;
- }
+ while ((i = find_next_bit(map, O2NM_MAX_NODES, i + 1)) < O2NM_MAX_NODES)
+ out += snprintf(buf + out, PAGE_SIZE - out, "%d ", i);
+ out += snprintf(buf + out, PAGE_SIZE - out, "\n");
- stats_dentry = debugfs_create_file(STATS_DEBUG_NAME, S_IFREG|S_IRUSR,
- o2net_dentry, NULL,
- &stats_seq_fops);
- if (!stats_dentry) {
- mlog_errno(-ENOMEM);
- goto bail;
- }
+ return out;
+}
+
+static int nodes_fop_open(struct inode *inode, struct file *file)
+{
+ char *buf;
+
+ buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ i_size_write(inode, o2net_fill_bitmap(buf, PAGE_SIZE));
+
+ file->private_data = buf;
return 0;
-bail:
- debugfs_remove(stats_dentry);
- debugfs_remove(sc_dentry);
- debugfs_remove(nst_dentry);
- debugfs_remove(o2net_dentry);
- return -ENOMEM;
}
+static int o2net_debug_release(struct inode *inode, struct file *file)
+{
+ kfree(file->private_data);
+ return 0;
+}
+
+static ssize_t o2net_debug_read(struct file *file, char __user *buf,
+ size_t nbytes, loff_t *ppos)
+{
+ return simple_read_from_buffer(buf, nbytes, ppos, file->private_data,
+ i_size_read(file->f_mapping->host));
+}
+
+static const struct file_operations nodes_fops = {
+ .open = nodes_fop_open,
+ .release = o2net_debug_release,
+ .read = o2net_debug_read,
+ .llseek = generic_file_llseek,
+};
+
void o2net_debugfs_exit(void)
{
+ debugfs_remove(nodes_dentry);
debugfs_remove(stats_dentry);
debugfs_remove(sc_dentry);
debugfs_remove(nst_dentry);
debugfs_remove(o2net_dentry);
}
+int o2net_debugfs_init(void)
+{
+ mode_t mode = S_IFREG|S_IRUSR;
+
+ o2net_dentry = debugfs_create_dir(O2NET_DEBUG_DIR, NULL);
+ if (o2net_dentry)
+ nst_dentry = debugfs_create_file(NST_DEBUG_NAME, mode,
+ o2net_dentry, NULL, &nst_seq_fops);
+ if (nst_dentry)
+ sc_dentry = debugfs_create_file(SC_DEBUG_NAME, mode,
+ o2net_dentry, NULL, &sc_seq_fops);
+ if (sc_dentry)
+ stats_dentry = debugfs_create_file(STATS_DEBUG_NAME, mode,
+ o2net_dentry, NULL, &stats_seq_fops);
+ if (stats_dentry)
+ nodes_dentry = debugfs_create_file(NODES_DEBUG_NAME, mode,
+ o2net_dentry, NULL, &nodes_fops);
+ if (nodes_dentry)
+ return 0;
+
+ o2net_debugfs_exit();
+ mlog_errno(-ENOMEM);
+ return -ENOMEM;
+}
+
#endif /* CONFIG_DEBUG_FS */
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index ad7d0c155de..044e7b58d31 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -546,7 +546,7 @@ static void o2net_set_nn_state(struct o2net_node *nn,
}
if (was_valid && !valid) {
- printk(KERN_NOTICE "o2net: no longer connected to "
+ printk(KERN_NOTICE "o2net: No longer connected to "
SC_NODEF_FMT "\n", SC_NODEF_ARGS(old_sc));
o2net_complete_nodes_nsw(nn);
}
@@ -556,7 +556,7 @@ static void o2net_set_nn_state(struct o2net_node *nn,
cancel_delayed_work(&nn->nn_connect_expired);
printk(KERN_NOTICE "o2net: %s " SC_NODEF_FMT "\n",
o2nm_this_node() > sc->sc_node->nd_num ?
- "connected to" : "accepted connection from",
+ "Connected to" : "Accepted connection from",
SC_NODEF_ARGS(sc));
}
@@ -644,7 +644,7 @@ static void o2net_state_change(struct sock *sk)
o2net_sc_queue_work(sc, &sc->sc_connect_work);
break;
default:
- printk(KERN_INFO "o2net: connection to " SC_NODEF_FMT
+ printk(KERN_INFO "o2net: Connection to " SC_NODEF_FMT
" shutdown, state %d\n",
SC_NODEF_ARGS(sc), sk->sk_state);
o2net_sc_queue_work(sc, &sc->sc_shutdown_work);
@@ -1035,6 +1035,25 @@ static int o2net_tx_can_proceed(struct o2net_node *nn,
return ret;
}
+/* Get a map of all nodes to which this node is currently connected to */
+void o2net_fill_node_map(unsigned long *map, unsigned bytes)
+{
+ struct o2net_sock_container *sc;
+ int node, ret;
+
+ BUG_ON(bytes < (BITS_TO_LONGS(O2NM_MAX_NODES) * sizeof(unsigned long)));
+
+ memset(map, 0, bytes);
+ for (node = 0; node < O2NM_MAX_NODES; ++node) {
+ o2net_tx_can_proceed(o2net_nn_from_num(node), &sc, &ret);
+ if (!ret) {
+ set_bit(node, map);
+ sc_put(sc);
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(o2net_fill_node_map);
+
int o2net_send_message_vec(u32 msg_type, u32 key, struct kvec *caller_vec,
size_t caller_veclen, u8 target_node, int *status)
{
@@ -1285,11 +1304,11 @@ static int o2net_check_handshake(struct o2net_sock_container *sc)
struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num);
if (hand->protocol_version != cpu_to_be64(O2NET_PROTOCOL_VERSION)) {
- mlog(ML_NOTICE, SC_NODEF_FMT " advertised net protocol "
- "version %llu but %llu is required, disconnecting\n",
- SC_NODEF_ARGS(sc),
- (unsigned long long)be64_to_cpu(hand->protocol_version),
- O2NET_PROTOCOL_VERSION);
+ printk(KERN_NOTICE "o2net: " SC_NODEF_FMT " Advertised net "
+ "protocol version %llu but %llu is required. "
+ "Disconnecting.\n", SC_NODEF_ARGS(sc),
+ (unsigned long long)be64_to_cpu(hand->protocol_version),
+ O2NET_PROTOCOL_VERSION);
/* don't bother reconnecting if its the wrong version. */
o2net_ensure_shutdown(nn, sc, -ENOTCONN);
@@ -1303,33 +1322,33 @@ static int o2net_check_handshake(struct o2net_sock_container *sc)
*/
if (be32_to_cpu(hand->o2net_idle_timeout_ms) !=
o2net_idle_timeout()) {
- mlog(ML_NOTICE, SC_NODEF_FMT " uses a network idle timeout of "
- "%u ms, but we use %u ms locally. disconnecting\n",
- SC_NODEF_ARGS(sc),
- be32_to_cpu(hand->o2net_idle_timeout_ms),
- o2net_idle_timeout());
+ printk(KERN_NOTICE "o2net: " SC_NODEF_FMT " uses a network "
+ "idle timeout of %u ms, but we use %u ms locally. "
+ "Disconnecting.\n", SC_NODEF_ARGS(sc),
+ be32_to_cpu(hand->o2net_idle_timeout_ms),
+ o2net_idle_timeout());
o2net_ensure_shutdown(nn, sc, -ENOTCONN);
return -1;
}
if (be32_to_cpu(hand->o2net_keepalive_delay_ms) !=
o2net_keepalive_delay()) {
- mlog(ML_NOTICE, SC_NODEF_FMT " uses a keepalive delay of "
- "%u ms, but we use %u ms locally. disconnecting\n",
- SC_NODEF_ARGS(sc),
- be32_to_cpu(hand->o2net_keepalive_delay_ms),
- o2net_keepalive_delay());
+ printk(KERN_NOTICE "o2net: " SC_NODEF_FMT " uses a keepalive "
+ "delay of %u ms, but we use %u ms locally. "
+ "Disconnecting.\n", SC_NODEF_ARGS(sc),
+ be32_to_cpu(hand->o2net_keepalive_delay_ms),
+ o2net_keepalive_delay());
o2net_ensure_shutdown(nn, sc, -ENOTCONN);
return -1;
}
if (be32_to_cpu(hand->o2hb_heartbeat_timeout_ms) !=
O2HB_MAX_WRITE_TIMEOUT_MS) {
- mlog(ML_NOTICE, SC_NODEF_FMT " uses a heartbeat timeout of "
- "%u ms, but we use %u ms locally. disconnecting\n",
- SC_NODEF_ARGS(sc),
- be32_to_cpu(hand->o2hb_heartbeat_timeout_ms),
- O2HB_MAX_WRITE_TIMEOUT_MS);
+ printk(KERN_NOTICE "o2net: " SC_NODEF_FMT " uses a heartbeat "
+ "timeout of %u ms, but we use %u ms locally. "
+ "Disconnecting.\n", SC_NODEF_ARGS(sc),
+ be32_to_cpu(hand->o2hb_heartbeat_timeout_ms),
+ O2HB_MAX_WRITE_TIMEOUT_MS);
o2net_ensure_shutdown(nn, sc, -ENOTCONN);
return -1;
}
@@ -1540,28 +1559,16 @@ static void o2net_idle_timer(unsigned long data)
{
struct o2net_sock_container *sc = (struct o2net_sock_container *)data;
struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num);
-
#ifdef CONFIG_DEBUG_FS
- ktime_t now = ktime_get();
+ unsigned long msecs = ktime_to_ms(ktime_get()) -
+ ktime_to_ms(sc->sc_tv_timer);
+#else
+ unsigned long msecs = o2net_idle_timeout();
#endif
- printk(KERN_NOTICE "o2net: connection to " SC_NODEF_FMT " has been idle for %u.%u "
- "seconds, shutting it down.\n", SC_NODEF_ARGS(sc),
- o2net_idle_timeout() / 1000,
- o2net_idle_timeout() % 1000);
-
-#ifdef CONFIG_DEBUG_FS
- mlog(ML_NOTICE, "Here are some times that might help debug the "
- "situation: (Timer: %lld, Now %lld, DataReady %lld, Advance %lld-%lld, "
- "Key 0x%08x, Func %u, FuncTime %lld-%lld)\n",
- (long long)ktime_to_us(sc->sc_tv_timer), (long long)ktime_to_us(now),
- (long long)ktime_to_us(sc->sc_tv_data_ready),
- (long long)ktime_to_us(sc->sc_tv_advance_start),
- (long long)ktime_to_us(sc->sc_tv_advance_stop),
- sc->sc_msg_key, sc->sc_msg_type,
- (long long)ktime_to_us(sc->sc_tv_func_start),
- (long long)ktime_to_us(sc->sc_tv_func_stop));
-#endif
+ printk(KERN_NOTICE "o2net: Connection to " SC_NODEF_FMT " has been "
+ "idle for %lu.%lu secs, shutting it down.\n", SC_NODEF_ARGS(sc),
+ msecs / 1000, msecs % 1000);
/*
* Initialize the nn_timeout so that the next connection attempt
@@ -1694,8 +1701,8 @@ static void o2net_start_connect(struct work_struct *work)
out:
if (ret) {
- mlog(ML_NOTICE, "connect attempt to " SC_NODEF_FMT " failed "
- "with errno %d\n", SC_NODEF_ARGS(sc), ret);
+ printk(KERN_NOTICE "o2net: Connect attempt to " SC_NODEF_FMT
+ " failed with errno %d\n", SC_NODEF_ARGS(sc), ret);
/* 0 err so that another will be queued and attempted
* from set_nn_state */
if (sc)
@@ -1718,8 +1725,8 @@ static void o2net_connect_expired(struct work_struct *work)
spin_lock(&nn->nn_lock);
if (!nn->nn_sc_valid) {
- mlog(ML_ERROR, "no connection established with node %u after "
- "%u.%u seconds, giving up and returning errors.\n",
+ printk(KERN_NOTICE "o2net: No connection established with "
+ "node %u after %u.%u seconds, giving up.\n",
o2net_num_from_nn(nn),
o2net_idle_timeout() / 1000,
o2net_idle_timeout() % 1000);
@@ -1862,21 +1869,21 @@ static int o2net_accept_one(struct socket *sock)
node = o2nm_get_node_by_ip(sin.sin_addr.s_addr);
if (node == NULL) {
- mlog(ML_NOTICE, "attempt to connect from unknown node at %pI4:%d\n",
- &sin.sin_addr.s_addr, ntohs(sin.sin_port));
+ printk(KERN_NOTICE "o2net: Attempt to connect from unknown "
+ "node at %pI4:%d\n", &sin.sin_addr.s_addr,
+ ntohs(sin.sin_port));
ret = -EINVAL;
goto out;
}
if (o2nm_this_node() >= node->nd_num) {
local_node = o2nm_get_node_by_num(o2nm_this_node());
- mlog(ML_NOTICE, "unexpected connect attempt seen at node '%s' ("
- "%u, %pI4:%d) from node '%s' (%u, %pI4:%d)\n",
- local_node->nd_name, local_node->nd_num,
- &(local_node->nd_ipv4_address),
- ntohs(local_node->nd_ipv4_port),
- node->nd_name, node->nd_num, &sin.sin_addr.s_addr,
- ntohs(sin.sin_port));
+ printk(KERN_NOTICE "o2net: Unexpected connect attempt seen "
+ "at node '%s' (%u, %pI4:%d) from node '%s' (%u, "
+ "%pI4:%d)\n", local_node->nd_name, local_node->nd_num,
+ &(local_node->nd_ipv4_address),
+ ntohs(local_node->nd_ipv4_port), node->nd_name,
+ node->nd_num, &sin.sin_addr.s_addr, ntohs(sin.sin_port));
ret = -EINVAL;
goto out;
}
@@ -1901,10 +1908,10 @@ static int o2net_accept_one(struct socket *sock)
ret = 0;
spin_unlock(&nn->nn_lock);
if (ret) {
- mlog(ML_NOTICE, "attempt to connect from node '%s' at "
- "%pI4:%d but it already has an open connection\n",
- node->nd_name, &sin.sin_addr.s_addr,
- ntohs(sin.sin_port));
+ printk(KERN_NOTICE "o2net: Attempt to connect from node '%s' "
+ "at %pI4:%d but it already has an open connection\n",
+ node->nd_name, &sin.sin_addr.s_addr,
+ ntohs(sin.sin_port));
goto out;
}
@@ -1984,7 +1991,7 @@ static int o2net_open_listening_sock(__be32 addr, __be16 port)
ret = sock_create(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
if (ret < 0) {
- mlog(ML_ERROR, "unable to create socket, ret=%d\n", ret);
+ printk(KERN_ERR "o2net: Error %d while creating socket\n", ret);
goto out;
}
@@ -2001,16 +2008,15 @@ static int o2net_open_listening_sock(__be32 addr, __be16 port)
sock->sk->sk_reuse = 1;
ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin));
if (ret < 0) {
- mlog(ML_ERROR, "unable to bind socket at %pI4:%u, "
- "ret=%d\n", &addr, ntohs(port), ret);
+ printk(KERN_ERR "o2net: Error %d while binding socket at "
+ "%pI4:%u\n", ret, &addr, ntohs(port));
goto out;
}
ret = sock->ops->listen(sock, 64);
- if (ret < 0) {
- mlog(ML_ERROR, "unable to listen on %pI4:%u, ret=%d\n",
- &addr, ntohs(port), ret);
- }
+ if (ret < 0)
+ printk(KERN_ERR "o2net: Error %d while listening on %pI4:%u\n",
+ ret, &addr, ntohs(port));
out:
if (ret) {
diff --git a/fs/ocfs2/cluster/tcp.h b/fs/ocfs2/cluster/tcp.h
index fd6179eb26d..5bada2a69b5 100644
--- a/fs/ocfs2/cluster/tcp.h
+++ b/fs/ocfs2/cluster/tcp.h
@@ -106,6 +106,8 @@ int o2net_register_handler(u32 msg_type, u32 key, u32 max_len,
struct list_head *unreg_list);
void o2net_unregister_handler_list(struct list_head *list);
+void o2net_fill_node_map(unsigned long *map, unsigned bytes);
+
struct o2nm_node;
int o2net_register_hb_callbacks(void);
void o2net_unregister_hb_callbacks(void);
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c
index e2878b5895f..8fe4e2892ab 100644
--- a/fs/ocfs2/dir.c
+++ b/fs/ocfs2/dir.c
@@ -1184,8 +1184,7 @@ static int __ocfs2_delete_entry(handle_t *handle, struct inode *dir,
if (pde)
le16_add_cpu(&pde->rec_len,
le16_to_cpu(de->rec_len));
- else
- de->inode = 0;
+ de->inode = 0;
dir->i_version++;
ocfs2_journal_dirty(handle, bh);
goto bail;
diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h
index d602abb51b6..a5952ceecba 100644
--- a/fs/ocfs2/dlm/dlmcommon.h
+++ b/fs/ocfs2/dlm/dlmcommon.h
@@ -859,8 +859,8 @@ void dlm_complete_recovery_thread(struct dlm_ctxt *dlm);
void dlm_wait_for_recovery(struct dlm_ctxt *dlm);
void dlm_kick_recovery_thread(struct dlm_ctxt *dlm);
int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node);
-int dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout);
-int dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout);
+void dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout);
+void dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout);
void dlm_put(struct dlm_ctxt *dlm);
struct dlm_ctxt *dlm_grab(struct dlm_ctxt *dlm);
@@ -877,9 +877,8 @@ static inline void dlm_lockres_get(struct dlm_lock_resource *res)
kref_get(&res->refs);
}
void dlm_lockres_put(struct dlm_lock_resource *res);
-void __dlm_unhash_lockres(struct dlm_lock_resource *res);
-void __dlm_insert_lockres(struct dlm_ctxt *dlm,
- struct dlm_lock_resource *res);
+void __dlm_unhash_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res);
+void __dlm_insert_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res);
struct dlm_lock_resource * __dlm_lookup_lockres_full(struct dlm_ctxt *dlm,
const char *name,
unsigned int len,
@@ -902,46 +901,15 @@ struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm,
const char *name,
unsigned int namelen);
-#define dlm_lockres_set_refmap_bit(bit,res) \
- __dlm_lockres_set_refmap_bit(bit,res,__FILE__,__LINE__)
-#define dlm_lockres_clear_refmap_bit(bit,res) \
- __dlm_lockres_clear_refmap_bit(bit,res,__FILE__,__LINE__)
+void dlm_lockres_set_refmap_bit(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res, int bit);
+void dlm_lockres_clear_refmap_bit(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res, int bit);
-static inline void __dlm_lockres_set_refmap_bit(int bit,
- struct dlm_lock_resource *res,
- const char *file,
- int line)
-{
- //printk("%s:%d:%.*s: setting bit %d\n", file, line,
- // res->lockname.len, res->lockname.name, bit);
- set_bit(bit, res->refmap);
-}
-
-static inline void __dlm_lockres_clear_refmap_bit(int bit,
- struct dlm_lock_resource *res,
- const char *file,
- int line)
-{
- //printk("%s:%d:%.*s: clearing bit %d\n", file, line,
- // res->lockname.len, res->lockname.name, bit);
- clear_bit(bit, res->refmap);
-}
-
-void __dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm,
- struct dlm_lock_resource *res,
- const char *file,
- int line);
-void __dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
- struct dlm_lock_resource *res,
- int new_lockres,
- const char *file,
- int line);
-#define dlm_lockres_drop_inflight_ref(d,r) \
- __dlm_lockres_drop_inflight_ref(d,r,__FILE__,__LINE__)
-#define dlm_lockres_grab_inflight_ref(d,r) \
- __dlm_lockres_grab_inflight_ref(d,r,0,__FILE__,__LINE__)
-#define dlm_lockres_grab_inflight_ref_new(d,r) \
- __dlm_lockres_grab_inflight_ref(d,r,1,__FILE__,__LINE__)
+void dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res);
+void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res);
void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
void dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index 6ed6b95dcf9..92f2ead0fab 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -157,16 +157,18 @@ static int dlm_protocol_compare(struct dlm_protocol_version *existing,
static void dlm_unregister_domain_handlers(struct dlm_ctxt *dlm);
-void __dlm_unhash_lockres(struct dlm_lock_resource *lockres)
+void __dlm_unhash_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
{
- if (!hlist_unhashed(&lockres->hash_node)) {
- hlist_del_init(&lockres->hash_node);
- dlm_lockres_put(lockres);
- }
+ if (hlist_unhashed(&res->hash_node))
+ return;
+
+ mlog(0, "%s: Unhash res %.*s\n", dlm->name, res->lockname.len,
+ res->lockname.name);
+ hlist_del_init(&res->hash_node);
+ dlm_lockres_put(res);
}
-void __dlm_insert_lockres(struct dlm_ctxt *dlm,
- struct dlm_lock_resource *res)
+void __dlm_insert_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
{
struct hlist_head *bucket;
struct qstr *q;
@@ -180,6 +182,9 @@ void __dlm_insert_lockres(struct dlm_ctxt *dlm,
dlm_lockres_get(res);
hlist_add_head(&res->hash_node, bucket);
+
+ mlog(0, "%s: Hash res %.*s\n", dlm->name, res->lockname.len,
+ res->lockname.name);
}
struct dlm_lock_resource * __dlm_lookup_lockres_full(struct dlm_ctxt *dlm,
@@ -539,17 +544,17 @@ again:
static void __dlm_print_nodes(struct dlm_ctxt *dlm)
{
- int node = -1;
+ int node = -1, num = 0;
assert_spin_locked(&dlm->spinlock);
- printk(KERN_NOTICE "o2dlm: Nodes in domain %s: ", dlm->name);
-
+ printk("( ");
while ((node = find_next_bit(dlm->domain_map, O2NM_MAX_NODES,
node + 1)) < O2NM_MAX_NODES) {
printk("%d ", node);
+ ++num;
}
- printk("\n");
+ printk(") %u nodes\n", num);
}
static int dlm_exit_domain_handler(struct o2net_msg *msg, u32 len, void *data,
@@ -566,11 +571,10 @@ static int dlm_exit_domain_handler(struct o2net_msg *msg, u32 len, void *data,
node = exit_msg->node_idx;
- printk(KERN_NOTICE "o2dlm: Node %u leaves domain %s\n", node, dlm->name);
-
spin_lock(&dlm->spinlock);
clear_bit(node, dlm->domain_map);
clear_bit(node, dlm->exit_domain_map);
+ printk(KERN_NOTICE "o2dlm: Node %u leaves domain %s ", node, dlm->name);
__dlm_print_nodes(dlm);
/* notify anything attached to the heartbeat events */
@@ -755,6 +759,7 @@ void dlm_unregister_domain(struct dlm_ctxt *dlm)
dlm_mark_domain_leaving(dlm);
dlm_leave_domain(dlm);
+ printk(KERN_NOTICE "o2dlm: Leaving domain %s\n", dlm->name);
dlm_force_free_mles(dlm);
dlm_complete_dlm_shutdown(dlm);
}
@@ -970,7 +975,7 @@ static int dlm_assert_joined_handler(struct o2net_msg *msg, u32 len, void *data,
clear_bit(assert->node_idx, dlm->exit_domain_map);
__dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN);
- printk(KERN_NOTICE "o2dlm: Node %u joins domain %s\n",
+ printk(KERN_NOTICE "o2dlm: Node %u joins domain %s ",
assert->node_idx, dlm->name);
__dlm_print_nodes(dlm);
@@ -1701,8 +1706,10 @@ static int dlm_try_to_join_domain(struct dlm_ctxt *dlm)
bail:
spin_lock(&dlm->spinlock);
__dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN);
- if (!status)
+ if (!status) {
+ printk(KERN_NOTICE "o2dlm: Joining domain %s ", dlm->name);
__dlm_print_nodes(dlm);
+ }
spin_unlock(&dlm->spinlock);
if (ctxt) {
@@ -2131,13 +2138,6 @@ struct dlm_ctxt * dlm_register_domain(const char *domain,
goto leave;
}
- if (!o2hb_check_local_node_heartbeating()) {
- mlog(ML_ERROR, "the local node has not been configured, or is "
- "not heartbeating\n");
- ret = -EPROTO;
- goto leave;
- }
-
mlog(0, "register called for domain \"%s\"\n", domain);
retry:
diff --git a/fs/ocfs2/dlm/dlmlock.c b/fs/ocfs2/dlm/dlmlock.c
index 8d39e0fd66f..975810b9849 100644
--- a/fs/ocfs2/dlm/dlmlock.c
+++ b/fs/ocfs2/dlm/dlmlock.c
@@ -183,10 +183,6 @@ static enum dlm_status dlmlock_master(struct dlm_ctxt *dlm,
kick_thread = 1;
}
}
- /* reduce the inflight count, this may result in the lockres
- * being purged below during calc_usage */
- if (lock->ml.node == dlm->node_num)
- dlm_lockres_drop_inflight_ref(dlm, res);
spin_unlock(&res->spinlock);
wake_up(&res->wq);
@@ -231,10 +227,16 @@ static enum dlm_status dlmlock_remote(struct dlm_ctxt *dlm,
lock->ml.type, res->lockname.len,
res->lockname.name, flags);
+ /*
+ * Wait if resource is getting recovered, remastered, etc.
+ * If the resource was remastered and new owner is self, then exit.
+ */
spin_lock(&res->spinlock);
-
- /* will exit this call with spinlock held */
__dlm_wait_on_lockres(res);
+ if (res->owner == dlm->node_num) {
+ spin_unlock(&res->spinlock);
+ return DLM_RECOVERING;
+ }
res->state |= DLM_LOCK_RES_IN_PROGRESS;
/* add lock to local (secondary) queue */
@@ -319,27 +321,23 @@ static enum dlm_status dlm_send_remote_lock_request(struct dlm_ctxt *dlm,
tmpret = o2net_send_message(DLM_CREATE_LOCK_MSG, dlm->key, &create,
sizeof(create), res->owner, &status);
if (tmpret >= 0) {
- // successfully sent and received
- ret = status; // this is already a dlm_status
+ ret = status;
if (ret == DLM_REJECTED) {
- mlog(ML_ERROR, "%s:%.*s: BUG. this is a stale lockres "
- "no longer owned by %u. that node is coming back "
- "up currently.\n", dlm->name, create.namelen,
+ mlog(ML_ERROR, "%s: res %.*s, Stale lockres no longer "
+ "owned by node %u. That node is coming back up "
+ "currently.\n", dlm->name, create.namelen,
create.name, res->owner);
dlm_print_one_lock_resource(res);
BUG();
}
} else {
- mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to "
- "node %u\n", tmpret, DLM_CREATE_LOCK_MSG, dlm->key,
- res->owner);
- if (dlm_is_host_down(tmpret)) {
+ mlog(ML_ERROR, "%s: res %.*s, Error %d send CREATE LOCK to "
+ "node %u\n", dlm->name, create.namelen, create.name,
+ tmpret, res->owner);
+ if (dlm_is_host_down(tmpret))
ret = DLM_RECOVERING;
- mlog(0, "node %u died so returning DLM_RECOVERING "
- "from lock message!\n", res->owner);
- } else {
+ else
ret = dlm_err_to_dlm_status(tmpret);
- }
}
return ret;
@@ -440,7 +438,7 @@ struct dlm_lock * dlm_new_lock(int type, u8 node, u64 cookie,
/* zero memory only if kernel-allocated */
lksb = kzalloc(sizeof(*lksb), GFP_NOFS);
if (!lksb) {
- kfree(lock);
+ kmem_cache_free(dlm_lock_cache, lock);
return NULL;
}
kernel_allocated = 1;
@@ -718,18 +716,10 @@ retry_lock:
if (status == DLM_RECOVERING || status == DLM_MIGRATING ||
status == DLM_FORWARD) {
- mlog(0, "retrying lock with migration/"
- "recovery/in progress\n");
msleep(100);
- /* no waiting for dlm_reco_thread */
if (recovery) {
if (status != DLM_RECOVERING)
goto retry_lock;
-
- mlog(0, "%s: got RECOVERING "
- "for $RECOVERY lock, master "
- "was %u\n", dlm->name,
- res->owner);
/* wait to see the node go down, then
* drop down and allow the lockres to
* get cleaned up. need to remaster. */
@@ -741,6 +731,14 @@ retry_lock:
}
}
+ /* Inflight taken in dlm_get_lock_resource() is dropped here */
+ spin_lock(&res->spinlock);
+ dlm_lockres_drop_inflight_ref(dlm, res);
+ spin_unlock(&res->spinlock);
+
+ dlm_lockres_calc_usage(dlm, res);
+ dlm_kick_thread(dlm, res);
+
if (status != DLM_NORMAL) {
lock->lksb->flags &= ~DLM_LKSB_GET_LVB;
if (status != DLM_NOTQUEUED)
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index 11eefb8c12e..005261c333b 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -631,39 +631,54 @@ error:
return NULL;
}
-void __dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
- struct dlm_lock_resource *res,
- int new_lockres,
- const char *file,
- int line)
+void dlm_lockres_set_refmap_bit(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res, int bit)
{
- if (!new_lockres)
- assert_spin_locked(&res->spinlock);
+ assert_spin_locked(&res->spinlock);
+
+ mlog(0, "res %.*s, set node %u, %ps()\n", res->lockname.len,
+ res->lockname.name, bit, __builtin_return_address(0));
+
+ set_bit(bit, res->refmap);
+}
+
+void dlm_lockres_clear_refmap_bit(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res, int bit)
+{
+ assert_spin_locked(&res->spinlock);
+
+ mlog(0, "res %.*s, clr node %u, %ps()\n", res->lockname.len,
+ res->lockname.name, bit, __builtin_return_address(0));
+
+ clear_bit(bit, res->refmap);
+}
+
+
+void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res)
+{
+ assert_spin_locked(&res->spinlock);
- if (!test_bit(dlm->node_num, res->refmap)) {
- BUG_ON(res->inflight_locks != 0);
- dlm_lockres_set_refmap_bit(dlm->node_num, res);
- }
res->inflight_locks++;
- mlog(0, "%s:%.*s: inflight++: now %u\n",
- dlm->name, res->lockname.len, res->lockname.name,
- res->inflight_locks);
+
+ mlog(0, "%s: res %.*s, inflight++: now %u, %ps()\n", dlm->name,
+ res->lockname.len, res->lockname.name, res->inflight_locks,
+ __builtin_return_address(0));
}
-void __dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm,
- struct dlm_lock_resource *res,
- const char *file,
- int line)
+void dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res)
{
assert_spin_locked(&res->spinlock);
BUG_ON(res->inflight_locks == 0);
+
res->inflight_locks--;
- mlog(0, "%s:%.*s: inflight--: now %u\n",
- dlm->name, res->lockname.len, res->lockname.name,
- res->inflight_locks);
- if (res->inflight_locks == 0)
- dlm_lockres_clear_refmap_bit(dlm->node_num, res);
+
+ mlog(0, "%s: res %.*s, inflight--: now %u, %ps()\n", dlm->name,
+ res->lockname.len, res->lockname.name, res->inflight_locks,
+ __builtin_return_address(0));
+
wake_up(&res->wq);
}
@@ -697,7 +712,6 @@ struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm,
unsigned int hash;
int tries = 0;
int bit, wait_on_recovery = 0;
- int drop_inflight_if_nonlocal = 0;
BUG_ON(!lockid);
@@ -709,36 +723,33 @@ lookup:
spin_lock(&dlm->spinlock);
tmpres = __dlm_lookup_lockres_full(dlm, lockid, namelen, hash);
if (tmpres) {
- int dropping_ref = 0;
-
spin_unlock(&dlm->spinlock);
-
spin_lock(&tmpres->spinlock);
- /* We wait for the other thread that is mastering the resource */
+ /* Wait on the thread that is mastering the resource */
if (tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
__dlm_wait_on_lockres(tmpres);
BUG_ON(tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN);
+ spin_unlock(&tmpres->spinlock);
+ dlm_lockres_put(tmpres);
+ tmpres = NULL;
+ goto lookup;
}
- if (tmpres->owner == dlm->node_num) {
- BUG_ON(tmpres->state & DLM_LOCK_RES_DROPPING_REF);
- dlm_lockres_grab_inflight_ref(dlm, tmpres);
- } else if (tmpres->state & DLM_LOCK_RES_DROPPING_REF)
- dropping_ref = 1;
- spin_unlock(&tmpres->spinlock);
-
- /* wait until done messaging the master, drop our ref to allow
- * the lockres to be purged, start over. */
- if (dropping_ref) {
- spin_lock(&tmpres->spinlock);
- __dlm_wait_on_lockres_flags(tmpres, DLM_LOCK_RES_DROPPING_REF);
+ /* Wait on the resource purge to complete before continuing */
+ if (tmpres->state & DLM_LOCK_RES_DROPPING_REF) {
+ BUG_ON(tmpres->owner == dlm->node_num);
+ __dlm_wait_on_lockres_flags(tmpres,
+ DLM_LOCK_RES_DROPPING_REF);
spin_unlock(&tmpres->spinlock);
dlm_lockres_put(tmpres);
tmpres = NULL;
goto lookup;
}
- mlog(0, "found in hash!\n");
+ /* Grab inflight ref to pin the resource */
+ dlm_lockres_grab_inflight_ref(dlm, tmpres);
+
+ spin_unlock(&tmpres->spinlock);
if (res)
dlm_lockres_put(res);
res = tmpres;
@@ -829,8 +840,8 @@ lookup:
* but they might own this lockres. wait on them. */
bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
if (bit < O2NM_MAX_NODES) {
- mlog(ML_NOTICE, "%s:%.*s: at least one node (%d) to "
- "recover before lock mastery can begin\n",
+ mlog(0, "%s: res %.*s, At least one node (%d) "
+ "to recover before lock mastery can begin\n",
dlm->name, namelen, (char *)lockid, bit);
wait_on_recovery = 1;
}
@@ -843,12 +854,11 @@ lookup:
/* finally add the lockres to its hash bucket */
__dlm_insert_lockres(dlm, res);
- /* since this lockres is new it doesn't not require the spinlock */
- dlm_lockres_grab_inflight_ref_new(dlm, res);
- /* if this node does not become the master make sure to drop
- * this inflight reference below */
- drop_inflight_if_nonlocal = 1;
+ /* Grab inflight ref to pin the resource */
+ spin_lock(&res->spinlock);
+ dlm_lockres_grab_inflight_ref(dlm, res);
+ spin_unlock(&res->spinlock);
/* get an extra ref on the mle in case this is a BLOCK
* if so, the creator of the BLOCK may try to put the last
@@ -864,8 +874,8 @@ redo_request:
* dlm spinlock would be detectable be a change on the mle,
* so we only need to clear out the recovery map once. */
if (dlm_is_recovery_lock(lockid, namelen)) {
- mlog(ML_NOTICE, "%s: recovery map is not empty, but "
- "must master $RECOVERY lock now\n", dlm->name);
+ mlog(0, "%s: Recovery map is not empty, but must "
+ "master $RECOVERY lock now\n", dlm->name);
if (!dlm_pre_master_reco_lockres(dlm, res))
wait_on_recovery = 0;
else {
@@ -883,8 +893,8 @@ redo_request:
spin_lock(&dlm->spinlock);
bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
if (bit < O2NM_MAX_NODES) {
- mlog(ML_NOTICE, "%s:%.*s: at least one node (%d) to "
- "recover before lock mastery can begin\n",
+ mlog(0, "%s: res %.*s, At least one node (%d) "
+ "to recover before lock mastery can begin\n",
dlm->name, namelen, (char *)lockid, bit);
wait_on_recovery = 1;
} else
@@ -913,8 +923,8 @@ redo_request:
* yet, keep going until it does. this is how the
* master will know that asserts are needed back to
* the lower nodes. */
- mlog(0, "%s:%.*s: requests only up to %u but master "
- "is %u, keep going\n", dlm->name, namelen,
+ mlog(0, "%s: res %.*s, Requests only up to %u but "
+ "master is %u, keep going\n", dlm->name, namelen,
lockid, nodenum, mle->master);
}
}
@@ -924,13 +934,12 @@ wait:
ret = dlm_wait_for_lock_mastery(dlm, res, mle, &blocked);
if (ret < 0) {
wait_on_recovery = 1;
- mlog(0, "%s:%.*s: node map changed, redo the "
- "master request now, blocked=%d\n",
- dlm->name, res->lockname.len,
+ mlog(0, "%s: res %.*s, Node map changed, redo the master "
+ "request now, blocked=%d\n", dlm->name, res->lockname.len,
res->lockname.name, blocked);
if (++tries > 20) {
- mlog(ML_ERROR, "%s:%.*s: spinning on "
- "dlm_wait_for_lock_mastery, blocked=%d\n",
+ mlog(ML_ERROR, "%s: res %.*s, Spinning on "
+ "dlm_wait_for_lock_mastery, blocked = %d\n",
dlm->name, res->lockname.len,
res->lockname.name, blocked);
dlm_print_one_lock_resource(res);
@@ -940,7 +949,8 @@ wait:
goto redo_request;
}
- mlog(0, "lockres mastered by %u\n", res->owner);
+ mlog(0, "%s: res %.*s, Mastered by %u\n", dlm->name, res->lockname.len,
+ res->lockname.name, res->owner);
/* make sure we never continue without this */
BUG_ON(res->owner == O2NM_MAX_NODES);
@@ -952,8 +962,6 @@ wait:
wake_waiters:
spin_lock(&res->spinlock);
- if (res->owner != dlm->node_num && drop_inflight_if_nonlocal)
- dlm_lockres_drop_inflight_ref(dlm, res);
res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
spin_unlock(&res->spinlock);
wake_up(&res->wq);
@@ -1426,9 +1434,7 @@ way_up_top:
}
if (res->owner == dlm->node_num) {
- mlog(0, "%s:%.*s: setting bit %u in refmap\n",
- dlm->name, namelen, name, request->node_idx);
- dlm_lockres_set_refmap_bit(request->node_idx, res);
+ dlm_lockres_set_refmap_bit(dlm, res, request->node_idx);
spin_unlock(&res->spinlock);
response = DLM_MASTER_RESP_YES;
if (mle)
@@ -1493,10 +1499,8 @@ way_up_top:
* go back and clean the mles on any
* other nodes */
dispatch_assert = 1;
- dlm_lockres_set_refmap_bit(request->node_idx, res);
- mlog(0, "%s:%.*s: setting bit %u in refmap\n",
- dlm->name, namelen, name,
- request->node_idx);
+ dlm_lockres_set_refmap_bit(dlm, res,
+ request->node_idx);
} else
response = DLM_MASTER_RESP_NO;
} else {
@@ -1702,7 +1706,7 @@ again:
"lockres, set the bit in the refmap\n",
namelen, lockname, to);
spin_lock(&res->spinlock);
- dlm_lockres_set_refmap_bit(to, res);
+ dlm_lockres_set_refmap_bit(dlm, res, to);
spin_unlock(&res->spinlock);
}
}
@@ -2187,8 +2191,6 @@ int dlm_drop_lockres_ref(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
namelen = res->lockname.len;
BUG_ON(namelen > O2NM_MAX_NAME_LEN);
- mlog(0, "%s:%.*s: sending deref to %d\n",
- dlm->name, namelen, lockname, res->owner);
memset(&deref, 0, sizeof(deref));
deref.node_idx = dlm->node_num;
deref.namelen = namelen;
@@ -2197,14 +2199,12 @@ int dlm_drop_lockres_ref(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
ret = o2net_send_message(DLM_DEREF_LOCKRES_MSG, dlm->key,
&deref, sizeof(deref), res->owner, &r);
if (ret < 0)
- mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to "
- "node %u\n", ret, DLM_DEREF_LOCKRES_MSG, dlm->key,
- res->owner);
+ mlog(ML_ERROR, "%s: res %.*s, error %d send DEREF to node %u\n",
+ dlm->name, namelen, lockname, ret, res->owner);
else if (r < 0) {
/* BAD. other node says I did not have a ref. */
- mlog(ML_ERROR,"while dropping ref on %s:%.*s "
- "(master=%u) got %d.\n", dlm->name, namelen,
- lockname, res->owner, r);
+ mlog(ML_ERROR, "%s: res %.*s, DEREF to node %u got %d\n",
+ dlm->name, namelen, lockname, res->owner, r);
dlm_print_one_lock_resource(res);
BUG();
}
@@ -2260,7 +2260,7 @@ int dlm_deref_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
else {
BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF);
if (test_bit(node, res->refmap)) {
- dlm_lockres_clear_refmap_bit(node, res);
+ dlm_lockres_clear_refmap_bit(dlm, res, node);
cleared = 1;
}
}
@@ -2320,7 +2320,7 @@ static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data)
BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF);
if (test_bit(node, res->refmap)) {
__dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_SETREF_INPROG);
- dlm_lockres_clear_refmap_bit(node, res);
+ dlm_lockres_clear_refmap_bit(dlm, res, node);
cleared = 1;
}
spin_unlock(&res->spinlock);
@@ -2802,7 +2802,8 @@ static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm,
BUG_ON(!list_empty(&lock->bast_list));
BUG_ON(lock->ast_pending);
BUG_ON(lock->bast_pending);
- dlm_lockres_clear_refmap_bit(lock->ml.node, res);
+ dlm_lockres_clear_refmap_bit(dlm, res,
+ lock->ml.node);
list_del_init(&lock->list);
dlm_lock_put(lock);
/* In a normal unlock, we would have added a
@@ -2823,7 +2824,7 @@ static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm,
mlog(0, "%s:%.*s: node %u had a ref to this "
"migrating lockres, clearing\n", dlm->name,
res->lockname.len, res->lockname.name, bit);
- dlm_lockres_clear_refmap_bit(bit, res);
+ dlm_lockres_clear_refmap_bit(dlm, res, bit);
}
bit++;
}
@@ -2916,9 +2917,9 @@ static int dlm_do_migrate_request(struct dlm_ctxt *dlm,
&migrate, sizeof(migrate), nodenum,
&status);
if (ret < 0) {
- mlog(ML_ERROR, "Error %d when sending message %u (key "
- "0x%x) to node %u\n", ret, DLM_MIGRATE_REQUEST_MSG,
- dlm->key, nodenum);
+ mlog(ML_ERROR, "%s: res %.*s, Error %d send "
+ "MIGRATE_REQUEST to node %u\n", dlm->name,
+ migrate.namelen, migrate.name, ret, nodenum);
if (!dlm_is_host_down(ret)) {
mlog(ML_ERROR, "unhandled error=%d!\n", ret);
BUG();
@@ -2937,7 +2938,7 @@ static int dlm_do_migrate_request(struct dlm_ctxt *dlm,
dlm->name, res->lockname.len, res->lockname.name,
nodenum);
spin_lock(&res->spinlock);
- dlm_lockres_set_refmap_bit(nodenum, res);
+ dlm_lockres_set_refmap_bit(dlm, res, nodenum);
spin_unlock(&res->spinlock);
}
}
@@ -3271,7 +3272,7 @@ int dlm_finish_migration(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
* mastery reference here since old_master will briefly have
* a reference after the migration completes */
spin_lock(&res->spinlock);
- dlm_lockres_set_refmap_bit(old_master, res);
+ dlm_lockres_set_refmap_bit(dlm, res, old_master);
spin_unlock(&res->spinlock);
mlog(0, "now time to do a migrate request to other nodes\n");
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index 7efab6d28a2..01ebfd0bdad 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -362,40 +362,38 @@ static int dlm_is_node_recovered(struct dlm_ctxt *dlm, u8 node)
}
-int dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout)
+void dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout)
{
- if (timeout) {
- mlog(ML_NOTICE, "%s: waiting %dms for notification of "
- "death of node %u\n", dlm->name, timeout, node);
+ if (dlm_is_node_dead(dlm, node))
+ return;
+
+ printk(KERN_NOTICE "o2dlm: Waiting on the death of node %u in "
+ "domain %s\n", node, dlm->name);
+
+ if (timeout)
wait_event_timeout(dlm->dlm_reco_thread_wq,
- dlm_is_node_dead(dlm, node),
- msecs_to_jiffies(timeout));
- } else {
- mlog(ML_NOTICE, "%s: waiting indefinitely for notification "
- "of death of node %u\n", dlm->name, node);
+ dlm_is_node_dead(dlm, node),
+ msecs_to_jiffies(timeout));
+ else
wait_event(dlm->dlm_reco_thread_wq,
dlm_is_node_dead(dlm, node));
- }
- /* for now, return 0 */
- return 0;
}
-int dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout)
+void dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout)
{
- if (timeout) {
- mlog(0, "%s: waiting %dms for notification of "
- "recovery of node %u\n", dlm->name, timeout, node);
+ if (dlm_is_node_recovered(dlm, node))
+ return;
+
+ printk(KERN_NOTICE "o2dlm: Waiting on the recovery of node %u in "
+ "domain %s\n", node, dlm->name);
+
+ if (timeout)
wait_event_timeout(dlm->dlm_reco_thread_wq,
- dlm_is_node_recovered(dlm, node),
- msecs_to_jiffies(timeout));
- } else {
- mlog(0, "%s: waiting indefinitely for notification "
- "of recovery of node %u\n", dlm->name, node);
+ dlm_is_node_recovered(dlm, node),
+ msecs_to_jiffies(timeout));
+ else
wait_event(dlm->dlm_reco_thread_wq,
dlm_is_node_recovered(dlm, node));
- }
- /* for now, return 0 */
- return 0;
}
/* callers of the top-level api calls (dlmlock/dlmunlock) should
@@ -430,6 +428,8 @@ static void dlm_begin_recovery(struct dlm_ctxt *dlm)
{
spin_lock(&dlm->spinlock);
BUG_ON(dlm->reco.state & DLM_RECO_STATE_ACTIVE);
+ printk(KERN_NOTICE "o2dlm: Begin recovery on domain %s for node %u\n",
+ dlm->name, dlm->reco.dead_node);
dlm->reco.state |= DLM_RECO_STATE_ACTIVE;
spin_unlock(&dlm->spinlock);
}
@@ -440,9 +440,18 @@ static void dlm_end_recovery(struct dlm_ctxt *dlm)
BUG_ON(!(dlm->reco.state & DLM_RECO_STATE_ACTIVE));
dlm->reco.state &= ~DLM_RECO_STATE_ACTIVE;
spin_unlock(&dlm->spinlock);
+ printk(KERN_NOTICE "o2dlm: End recovery on domain %s\n", dlm->name);
wake_up(&dlm->reco.event);
}
+static void dlm_print_recovery_master(struct dlm_ctxt *dlm)
+{
+ printk(KERN_NOTICE "o2dlm: Node %u (%s) is the Recovery Master for the "
+ "dead node %u in domain %s\n", dlm->reco.new_master,
+ (dlm->node_num == dlm->reco.new_master ? "me" : "he"),
+ dlm->reco.dead_node, dlm->name);
+}
+
static int dlm_do_recovery(struct dlm_ctxt *dlm)
{
int status = 0;
@@ -505,9 +514,8 @@ static int dlm_do_recovery(struct dlm_ctxt *dlm)
}
mlog(0, "another node will master this recovery session.\n");
}
- mlog(0, "dlm=%s (%d), new_master=%u, this node=%u, dead_node=%u\n",
- dlm->name, task_pid_nr(dlm->dlm_reco_thread_task), dlm->reco.new_master,
- dlm->node_num, dlm->reco.dead_node);
+
+ dlm_print_recovery_master(dlm);
/* it is safe to start everything back up here
* because all of the dead node's lock resources
@@ -518,15 +526,13 @@ static int dlm_do_recovery(struct dlm_ctxt *dlm)
return 0;
master_here:
- mlog(ML_NOTICE, "(%d) Node %u is the Recovery Master for the Dead Node "
- "%u for Domain %s\n", task_pid_nr(dlm->dlm_reco_thread_task),
- dlm->node_num, dlm->reco.dead_node, dlm->name);
+ dlm_print_recovery_master(dlm);
status = dlm_remaster_locks(dlm, dlm->reco.dead_node);
if (status < 0) {
/* we should never hit this anymore */
- mlog(ML_ERROR, "error %d remastering locks for node %u, "
- "retrying.\n", status, dlm->reco.dead_node);
+ mlog(ML_ERROR, "%s: Error %d remastering locks for node %u, "
+ "retrying.\n", dlm->name, status, dlm->reco.dead_node);
/* yield a bit to allow any final network messages
* to get handled on remaining nodes */
msleep(100);
@@ -567,7 +573,7 @@ static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node)
BUG_ON(ndata->state != DLM_RECO_NODE_DATA_INIT);
ndata->state = DLM_RECO_NODE_DATA_REQUESTING;
- mlog(0, "requesting lock info from node %u\n",
+ mlog(0, "%s: Requesting lock info from node %u\n", dlm->name,
ndata->node_num);
if (ndata->node_num == dlm->node_num) {
@@ -640,7 +646,7 @@ static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node)
spin_unlock(&dlm_reco_state_lock);
}
- mlog(0, "done requesting all lock info\n");
+ mlog(0, "%s: Done requesting all lock info\n", dlm->name);
/* nodes should be sending reco data now
* just need to wait */
@@ -802,10 +808,9 @@ static int dlm_request_all_locks(struct dlm_ctxt *dlm, u8 request_from,
/* negative status is handled by caller */
if (ret < 0)
- mlog(ML_ERROR, "Error %d when sending message %u (key "
- "0x%x) to node %u\n", ret, DLM_LOCK_REQUEST_MSG,
- dlm->key, request_from);
-
+ mlog(ML_ERROR, "%s: Error %d send LOCK_REQUEST to node %u "
+ "to recover dead node %u\n", dlm->name, ret,
+ request_from, dead_node);
// return from here, then
// sleep until all received or error
return ret;
@@ -956,9 +961,9 @@ static int dlm_send_all_done_msg(struct dlm_ctxt *dlm, u8 dead_node, u8 send_to)
ret = o2net_send_message(DLM_RECO_DATA_DONE_MSG, dlm->key, &done_msg,
sizeof(done_msg), send_to, &tmpret);
if (ret < 0) {
- mlog(ML_ERROR, "Error %d when sending message %u (key "
- "0x%x) to node %u\n", ret, DLM_RECO_DATA_DONE_MSG,
- dlm->key, send_to);
+ mlog(ML_ERROR, "%s: Error %d send RECO_DATA_DONE to node %u "
+ "to recover dead node %u\n", dlm->name, ret, send_to,
+ dead_node);
if (!dlm_is_host_down(ret)) {
BUG();
}
@@ -1127,9 +1132,11 @@ static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm,
if (ret < 0) {
/* XXX: negative status is not handled.
* this will end up killing this node. */
- mlog(ML_ERROR, "Error %d when sending message %u (key "
- "0x%x) to node %u\n", ret, DLM_MIG_LOCKRES_MSG,
- dlm->key, send_to);
+ mlog(ML_ERROR, "%s: res %.*s, Error %d send MIG_LOCKRES to "
+ "node %u (%s)\n", dlm->name, mres->lockname_len,
+ mres->lockname, ret, send_to,
+ (orig_flags & DLM_MRES_MIGRATION ?
+ "migration" : "recovery"));
} else {
/* might get an -ENOMEM back here */
ret = status;
@@ -1767,7 +1774,7 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
dlm->name, mres->lockname_len, mres->lockname,
from);
spin_lock(&res->spinlock);
- dlm_lockres_set_refmap_bit(from, res);
+ dlm_lockres_set_refmap_bit(dlm, res, from);
spin_unlock(&res->spinlock);
added++;
break;
@@ -1965,7 +1972,7 @@ skip_lvb:
mlog(0, "%s:%.*s: added lock for node %u, "
"setting refmap bit\n", dlm->name,
res->lockname.len, res->lockname.name, ml->node);
- dlm_lockres_set_refmap_bit(ml->node, res);
+ dlm_lockres_set_refmap_bit(dlm, res, ml->node);
added++;
}
spin_unlock(&res->spinlock);
@@ -2084,6 +2091,9 @@ static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
list_for_each_entry_safe(res, next, &dlm->reco.resources, recovering) {
if (res->owner == dead_node) {
+ mlog(0, "%s: res %.*s, Changing owner from %u to %u\n",
+ dlm->name, res->lockname.len, res->lockname.name,
+ res->owner, new_master);
list_del_init(&res->recovering);
spin_lock(&res->spinlock);
/* new_master has our reference from
@@ -2105,40 +2115,30 @@ static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
for (i = 0; i < DLM_HASH_BUCKETS; i++) {
bucket = dlm_lockres_hash(dlm, i);
hlist_for_each_entry(res, hash_iter, bucket, hash_node) {
- if (res->state & DLM_LOCK_RES_RECOVERING) {
- if (res->owner == dead_node) {
- mlog(0, "(this=%u) res %.*s owner=%u "
- "was not on recovering list, but "
- "clearing state anyway\n",
- dlm->node_num, res->lockname.len,
- res->lockname.name, new_master);
- } else if (res->owner == dlm->node_num) {
- mlog(0, "(this=%u) res %.*s owner=%u "
- "was not on recovering list, "
- "owner is THIS node, clearing\n",
- dlm->node_num, res->lockname.len,
- res->lockname.name, new_master);
- } else
- continue;
+ if (!(res->state & DLM_LOCK_RES_RECOVERING))
+ continue;
- if (!list_empty(&res->recovering)) {
- mlog(0, "%s:%.*s: lockres was "
- "marked RECOVERING, owner=%u\n",
- dlm->name, res->lockname.len,
- res->lockname.name, res->owner);
- list_del_init(&res->recovering);
- dlm_lockres_put(res);
- }
- spin_lock(&res->spinlock);
- /* new_master has our reference from
- * the lock state sent during recovery */
- dlm_change_lockres_owner(dlm, res, new_master);
- res->state &= ~DLM_LOCK_RES_RECOVERING;
- if (__dlm_lockres_has_locks(res))
- __dlm_dirty_lockres(dlm, res);
- spin_unlock(&res->spinlock);
- wake_up(&res->wq);
+ if (res->owner != dead_node &&
+ res->owner != dlm->node_num)
+ continue;
+
+ if (!list_empty(&res->recovering)) {
+ list_del_init(&res->recovering);
+ dlm_lockres_put(res);
}
+
+ /* new_master has our reference from
+ * the lock state sent during recovery */
+ mlog(0, "%s: res %.*s, Changing owner from %u to %u\n",
+ dlm->name, res->lockname.len, res->lockname.name,
+ res->owner, new_master);
+ spin_lock(&res->spinlock);
+ dlm_change_lockres_owner(dlm, res, new_master);
+ res->state &= ~DLM_LOCK_RES_RECOVERING;
+ if (__dlm_lockres_has_locks(res))
+ __dlm_dirty_lockres(dlm, res);
+ spin_unlock(&res->spinlock);
+ wake_up(&res->wq);
}
}
}
@@ -2252,12 +2252,12 @@ static void dlm_free_dead_locks(struct dlm_ctxt *dlm,
res->lockname.len, res->lockname.name, freed, dead_node);
__dlm_print_one_lock_resource(res);
}
- dlm_lockres_clear_refmap_bit(dead_node, res);
+ dlm_lockres_clear_refmap_bit(dlm, res, dead_node);
} else if (test_bit(dead_node, res->refmap)) {
mlog(0, "%s:%.*s: dead node %u had a ref, but had "
"no locks and had not purged before dying\n", dlm->name,
res->lockname.len, res->lockname.name, dead_node);
- dlm_lockres_clear_refmap_bit(dead_node, res);
+ dlm_lockres_clear_refmap_bit(dlm, res, dead_node);
}
/* do not kick thread yet */
@@ -2324,9 +2324,9 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
dlm_revalidate_lvb(dlm, res, dead_node);
if (res->owner == dead_node) {
if (res->state & DLM_LOCK_RES_DROPPING_REF) {
- mlog(ML_NOTICE, "Ignore %.*s for "
+ mlog(ML_NOTICE, "%s: res %.*s, Skip "
"recovery as it is being freed\n",
- res->lockname.len,
+ dlm->name, res->lockname.len,
res->lockname.name);
} else
dlm_move_lockres_to_recovery_list(dlm,
diff --git a/fs/ocfs2/dlm/dlmthread.c b/fs/ocfs2/dlm/dlmthread.c
index 1d6d1d22c47..e73c833fc2a 100644
--- a/fs/ocfs2/dlm/dlmthread.c
+++ b/fs/ocfs2/dlm/dlmthread.c
@@ -94,24 +94,26 @@ int __dlm_lockres_unused(struct dlm_lock_resource *res)
{
int bit;
+ assert_spin_locked(&res->spinlock);
+
if (__dlm_lockres_has_locks(res))
return 0;
+ /* Locks are in the process of being created */
+ if (res->inflight_locks)
+ return 0;
+
if (!list_empty(&res->dirty) || res->state & DLM_LOCK_RES_DIRTY)
return 0;
if (res->state & DLM_LOCK_RES_RECOVERING)
return 0;
+ /* Another node has this resource with this node as the master */
bit = find_next_bit(res->refmap, O2NM_MAX_NODES, 0);
if (bit < O2NM_MAX_NODES)
return 0;
- /*
- * since the bit for dlm->node_num is not set, inflight_locks better
- * be zero
- */
- BUG_ON(res->inflight_locks != 0);
return 1;
}
@@ -185,8 +187,6 @@ static void dlm_purge_lockres(struct dlm_ctxt *dlm,
/* clear our bit from the master's refmap, ignore errors */
ret = dlm_drop_lockres_ref(dlm, res);
if (ret < 0) {
- mlog(ML_ERROR, "%s: deref %.*s failed %d\n", dlm->name,
- res->lockname.len, res->lockname.name, ret);
if (!dlm_is_host_down(ret))
BUG();
}
@@ -209,7 +209,7 @@ static void dlm_purge_lockres(struct dlm_ctxt *dlm,
BUG();
}
- __dlm_unhash_lockres(res);
+ __dlm_unhash_lockres(dlm, res);
/* lockres is not in the hash now. drop the flag and wake up
* any processes waiting in dlm_get_lock_resource. */
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index e1ed5e502ff..81a4cd22f80 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -1692,7 +1692,7 @@ int ocfs2_open_lock(struct inode *inode)
mlog(0, "inode %llu take PRMODE open lock\n",
(unsigned long long)OCFS2_I(inode)->ip_blkno);
- if (ocfs2_mount_local(osb))
+ if (ocfs2_is_hard_readonly(osb) || ocfs2_mount_local(osb))
goto out;
lockres = &OCFS2_I(inode)->ip_open_lockres;
@@ -1718,6 +1718,12 @@ int ocfs2_try_open_lock(struct inode *inode, int write)
(unsigned long long)OCFS2_I(inode)->ip_blkno,
write ? "EXMODE" : "PRMODE");
+ if (ocfs2_is_hard_readonly(osb)) {
+ if (write)
+ status = -EROFS;
+ goto out;
+ }
+
if (ocfs2_mount_local(osb))
goto out;
@@ -2298,7 +2304,7 @@ int ocfs2_inode_lock_full_nested(struct inode *inode,
if (ocfs2_is_hard_readonly(osb)) {
if (ex)
status = -EROFS;
- goto bail;
+ goto getbh;
}
if (ocfs2_mount_local(osb))
@@ -2356,7 +2362,7 @@ local:
mlog_errno(status);
goto bail;
}
-
+getbh:
if (ret_bh) {
status = ocfs2_assign_bh(inode, ret_bh, local_bh);
if (status < 0) {
@@ -2628,8 +2634,11 @@ int ocfs2_dentry_lock(struct dentry *dentry, int ex)
BUG_ON(!dl);
- if (ocfs2_is_hard_readonly(osb))
- return -EROFS;
+ if (ocfs2_is_hard_readonly(osb)) {
+ if (ex)
+ return -EROFS;
+ return 0;
+ }
if (ocfs2_mount_local(osb))
return 0;
@@ -2647,7 +2656,7 @@ void ocfs2_dentry_unlock(struct dentry *dentry, int ex)
struct ocfs2_dentry_lock *dl = dentry->d_fsdata;
struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb);
- if (!ocfs2_mount_local(osb))
+ if (!ocfs2_is_hard_readonly(osb) && !ocfs2_mount_local(osb))
ocfs2_cluster_unlock(osb, &dl->dl_lockres, level);
}
diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c
index 23457b491e8..2f5b92ef0e5 100644
--- a/fs/ocfs2/extent_map.c
+++ b/fs/ocfs2/extent_map.c
@@ -832,6 +832,102 @@ out:
return ret;
}
+int ocfs2_seek_data_hole_offset(struct file *file, loff_t *offset, int origin)
+{
+ struct inode *inode = file->f_mapping->host;
+ int ret;
+ unsigned int is_last = 0, is_data = 0;
+ u16 cs_bits = OCFS2_SB(inode->i_sb)->s_clustersize_bits;
+ u32 cpos, cend, clen, hole_size;
+ u64 extoff, extlen;
+ struct buffer_head *di_bh = NULL;
+ struct ocfs2_extent_rec rec;
+
+ BUG_ON(origin != SEEK_DATA && origin != SEEK_HOLE);
+
+ ret = ocfs2_inode_lock(inode, &di_bh, 0);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ down_read(&OCFS2_I(inode)->ip_alloc_sem);
+
+ if (*offset >= inode->i_size) {
+ ret = -ENXIO;
+ goto out_unlock;
+ }
+
+ if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
+ if (origin == SEEK_HOLE)
+ *offset = inode->i_size;
+ goto out_unlock;
+ }
+
+ clen = 0;
+ cpos = *offset >> cs_bits;
+ cend = ocfs2_clusters_for_bytes(inode->i_sb, inode->i_size);
+
+ while (cpos < cend && !is_last) {
+ ret = ocfs2_get_clusters_nocache(inode, di_bh, cpos, &hole_size,
+ &rec, &is_last);
+ if (ret) {
+ mlog_errno(ret);
+ goto out_unlock;
+ }
+
+ extoff = cpos;
+ extoff <<= cs_bits;
+
+ if (rec.e_blkno == 0ULL) {
+ clen = hole_size;
+ is_data = 0;
+ } else {
+ clen = le16_to_cpu(rec.e_leaf_clusters) -
+ (cpos - le32_to_cpu(rec.e_cpos));
+ is_data = (rec.e_flags & OCFS2_EXT_UNWRITTEN) ? 0 : 1;
+ }
+
+ if ((!is_data && origin == SEEK_HOLE) ||
+ (is_data && origin == SEEK_DATA)) {
+ if (extoff > *offset)
+ *offset = extoff;
+ goto out_unlock;
+ }
+
+ if (!is_last)
+ cpos += clen;
+ }
+
+ if (origin == SEEK_HOLE) {
+ extoff = cpos;
+ extoff <<= cs_bits;
+ extlen = clen;
+ extlen <<= cs_bits;
+
+ if ((extoff + extlen) > inode->i_size)
+ extlen = inode->i_size - extoff;
+ extoff += extlen;
+ if (extoff > *offset)
+ *offset = extoff;
+ goto out_unlock;
+ }
+
+ ret = -ENXIO;
+
+out_unlock:
+
+ brelse(di_bh);
+
+ up_read(&OCFS2_I(inode)->ip_alloc_sem);
+
+ ocfs2_inode_unlock(inode, 0);
+out:
+ if (ret && ret != -ENXIO)
+ ret = -ENXIO;
+ return ret;
+}
+
int ocfs2_read_virt_blocks(struct inode *inode, u64 v_block, int nr,
struct buffer_head *bhs[], int flags,
int (*validate)(struct super_block *sb,
diff --git a/fs/ocfs2/extent_map.h b/fs/ocfs2/extent_map.h
index e79d41c2c90..67ea57d2fd5 100644
--- a/fs/ocfs2/extent_map.h
+++ b/fs/ocfs2/extent_map.h
@@ -53,6 +53,8 @@ int ocfs2_extent_map_get_blocks(struct inode *inode, u64 v_blkno, u64 *p_blkno,
int ocfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
u64 map_start, u64 map_len);
+int ocfs2_seek_data_hole_offset(struct file *file, loff_t *offset, int origin);
+
int ocfs2_xattr_get_clusters(struct inode *inode, u32 v_cluster,
u32 *p_cluster, u32 *num_clusters,
struct ocfs2_extent_list *el,
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index de4ea1af041..6e396683c3d 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -1950,6 +1950,9 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
if (ret < 0)
mlog_errno(ret);
+ if (file->f_flags & O_SYNC)
+ handle->h_sync = 1;
+
ocfs2_commit_trans(osb, handle);
out_inode_unlock:
@@ -2052,6 +2055,23 @@ out:
return ret;
}
+static void ocfs2_aiodio_wait(struct inode *inode)
+{
+ wait_queue_head_t *wq = ocfs2_ioend_wq(inode);
+
+ wait_event(*wq, (atomic_read(&OCFS2_I(inode)->ip_unaligned_aio) == 0));
+}
+
+static int ocfs2_is_io_unaligned(struct inode *inode, size_t count, loff_t pos)
+{
+ int blockmask = inode->i_sb->s_blocksize - 1;
+ loff_t final_size = pos + count;
+
+ if ((pos & blockmask) || (final_size & blockmask))
+ return 1;
+ return 0;
+}
+
static int ocfs2_prepare_inode_for_refcount(struct inode *inode,
struct file *file,
loff_t pos, size_t count,
@@ -2230,6 +2250,7 @@ static ssize_t ocfs2_file_aio_write(struct kiocb *iocb,
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
int full_coherency = !(osb->s_mount_opt &
OCFS2_MOUNT_COHERENCY_BUFFERED);
+ int unaligned_dio = 0;
trace_ocfs2_file_aio_write(inode, file, file->f_path.dentry,
(unsigned long long)OCFS2_I(inode)->ip_blkno,
@@ -2297,6 +2318,10 @@ relock:
goto out;
}
+ if (direct_io && !is_sync_kiocb(iocb))
+ unaligned_dio = ocfs2_is_io_unaligned(inode, iocb->ki_left,
+ *ppos);
+
/*
* We can't complete the direct I/O as requested, fall back to
* buffered I/O.
@@ -2311,6 +2336,18 @@ relock:
goto relock;
}
+ if (unaligned_dio) {
+ /*
+ * Wait on previous unaligned aio to complete before
+ * proceeding.
+ */
+ ocfs2_aiodio_wait(inode);
+
+ /* Mark the iocb as needing a decrement in ocfs2_dio_end_io */
+ atomic_inc(&OCFS2_I(inode)->ip_unaligned_aio);
+ ocfs2_iocb_set_unaligned_aio(iocb);
+ }
+
/*
* To later detect whether a journal commit for sync writes is
* necessary, we sample i_size, and cluster count here.
@@ -2382,8 +2419,12 @@ out_dio:
if ((ret == -EIOCBQUEUED) || (!ocfs2_iocb_is_rw_locked(iocb))) {
rw_level = -1;
have_alloc_sem = 0;
+ unaligned_dio = 0;
}
+ if (unaligned_dio)
+ atomic_dec(&OCFS2_I(inode)->ip_unaligned_aio);
+
out:
if (rw_level != -1)
ocfs2_rw_unlock(inode, rw_level);
@@ -2591,6 +2632,57 @@ bail:
return ret;
}
+/* Refer generic_file_llseek_unlocked() */
+static loff_t ocfs2_file_llseek(struct file *file, loff_t offset, int origin)
+{
+ struct inode *inode = file->f_mapping->host;
+ int ret = 0;
+
+ mutex_lock(&inode->i_mutex);
+
+ switch (origin) {
+ case SEEK_SET:
+ break;
+ case SEEK_END:
+ offset += inode->i_size;
+ break;
+ case SEEK_CUR:
+ if (offset == 0) {
+ offset = file->f_pos;
+ goto out;
+ }
+ offset += file->f_pos;
+ break;
+ case SEEK_DATA:
+ case SEEK_HOLE:
+ ret = ocfs2_seek_data_hole_offset(file, &offset, origin);
+ if (ret)
+ goto out;
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (offset < 0 && !(file->f_mode & FMODE_UNSIGNED_OFFSET))
+ ret = -EINVAL;
+ if (!ret && offset > inode->i_sb->s_maxbytes)
+ ret = -EINVAL;
+ if (ret)
+ goto out;
+
+ if (offset != file->f_pos) {
+ file->f_pos = offset;
+ file->f_version = 0;
+ }
+
+out:
+ mutex_unlock(&inode->i_mutex);
+ if (ret)
+ return ret;
+ return offset;
+}
+
const struct inode_operations ocfs2_file_iops = {
.setattr = ocfs2_setattr,
.getattr = ocfs2_getattr,
@@ -2615,7 +2707,7 @@ const struct inode_operations ocfs2_special_file_iops = {
* ocfs2_fops_no_plocks and ocfs2_dops_no_plocks!
*/
const struct file_operations ocfs2_fops = {
- .llseek = generic_file_llseek,
+ .llseek = ocfs2_file_llseek,
.read = do_sync_read,
.write = do_sync_write,
.mmap = ocfs2_mmap,
@@ -2663,7 +2755,7 @@ const struct file_operations ocfs2_dops = {
* the cluster.
*/
const struct file_operations ocfs2_fops_no_plocks = {
- .llseek = generic_file_llseek,
+ .llseek = ocfs2_file_llseek,
.read = do_sync_read,
.write = do_sync_write,
.mmap = ocfs2_mmap,
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c
index a22d2c09889..17454a904d7 100644
--- a/fs/ocfs2/inode.c
+++ b/fs/ocfs2/inode.c
@@ -951,7 +951,7 @@ static void ocfs2_cleanup_delete_inode(struct inode *inode,
trace_ocfs2_cleanup_delete_inode(
(unsigned long long)OCFS2_I(inode)->ip_blkno, sync_data);
if (sync_data)
- write_inode_now(inode, 1);
+ filemap_write_and_wait(inode->i_mapping);
truncate_inode_pages(&inode->i_data, 0);
}
diff --git a/fs/ocfs2/inode.h b/fs/ocfs2/inode.h
index 1c508b149b3..88924a3133f 100644
--- a/fs/ocfs2/inode.h
+++ b/fs/ocfs2/inode.h
@@ -43,6 +43,9 @@ struct ocfs2_inode_info
/* protects extended attribute changes on this inode */
struct rw_semaphore ip_xattr_sem;
+ /* Number of outstanding AIO's which are not page aligned */
+ atomic_t ip_unaligned_aio;
+
/* These fields are protected by ip_lock */
spinlock_t ip_lock;
u32 ip_open_count;
diff --git a/fs/ocfs2/ioctl.c b/fs/ocfs2/ioctl.c
index bc91072b721..726ff265b29 100644
--- a/fs/ocfs2/ioctl.c
+++ b/fs/ocfs2/ioctl.c
@@ -122,7 +122,7 @@ static int ocfs2_set_inode_attr(struct inode *inode, unsigned flags,
if ((oldflags & OCFS2_IMMUTABLE_FL) || ((flags ^ oldflags) &
(OCFS2_APPEND_FL | OCFS2_IMMUTABLE_FL))) {
if (!capable(CAP_LINUX_IMMUTABLE))
- goto bail_unlock;
+ goto bail_commit;
}
ocfs2_inode->ip_attr = flags;
@@ -132,6 +132,7 @@ static int ocfs2_set_inode_attr(struct inode *inode, unsigned flags,
if (status < 0)
mlog_errno(status);
+bail_commit:
ocfs2_commit_trans(osb, handle);
bail_unlock:
ocfs2_inode_unlock(inode, 1);
@@ -381,7 +382,7 @@ int ocfs2_info_handle_freeinode(struct inode *inode,
if (!oifi) {
status = -ENOMEM;
mlog_errno(status);
- goto bail;
+ goto out_err;
}
if (o2info_from_user(*oifi, req))
@@ -431,7 +432,7 @@ bail:
o2info_set_request_error(&oifi->ifi_req, req);
kfree(oifi);
-
+out_err:
return status;
}
@@ -666,7 +667,7 @@ int ocfs2_info_handle_freefrag(struct inode *inode,
if (!oiff) {
status = -ENOMEM;
mlog_errno(status);
- goto bail;
+ goto out_err;
}
if (o2info_from_user(*oiff, req))
@@ -716,7 +717,7 @@ bail:
o2info_set_request_error(&oiff->iff_req, req);
kfree(oiff);
-
+out_err:
return status;
}
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index 295d56454e8..0a42ae96dca 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -1544,9 +1544,9 @@ static int ocfs2_replay_journal(struct ocfs2_super *osb,
/* we need to run complete recovery for offline orphan slots */
ocfs2_replay_map_set_state(osb, REPLAY_NEEDED);
- mlog(ML_NOTICE, "Recovering node %d from slot %d on device (%u,%u)\n",
- node_num, slot_num,
- MAJOR(osb->sb->s_dev), MINOR(osb->sb->s_dev));
+ printk(KERN_NOTICE "ocfs2: Begin replay journal (node %d, slot %d) on "\
+ "device (%u,%u)\n", node_num, slot_num, MAJOR(osb->sb->s_dev),
+ MINOR(osb->sb->s_dev));
OCFS2_I(inode)->ip_clusters = le32_to_cpu(fe->i_clusters);
@@ -1601,6 +1601,9 @@ static int ocfs2_replay_journal(struct ocfs2_super *osb,
jbd2_journal_destroy(journal);
+ printk(KERN_NOTICE "ocfs2: End replay journal (node %d, slot %d) on "\
+ "device (%u,%u)\n", node_num, slot_num, MAJOR(osb->sb->s_dev),
+ MINOR(osb->sb->s_dev));
done:
/* drop the lock on this nodes journal */
if (got_lock)
@@ -1808,6 +1811,20 @@ static inline unsigned long ocfs2_orphan_scan_timeout(void)
* every slot, queuing a recovery of the slot on the ocfs2_wq thread. This
* is done to catch any orphans that are left over in orphan directories.
*
+ * It scans all slots, even ones that are in use. It does so to handle the
+ * case described below:
+ *
+ * Node 1 has an inode it was using. The dentry went away due to memory
+ * pressure. Node 1 closes the inode, but it's on the free list. The node
+ * has the open lock.
+ * Node 2 unlinks the inode. It grabs the dentry lock to notify others,
+ * but node 1 has no dentry and doesn't get the message. It trylocks the
+ * open lock, sees that another node has a PR, and does nothing.
+ * Later node 2 runs its orphan dir. It igets the inode, trylocks the
+ * open lock, sees the PR still, and does nothing.
+ * Basically, we have to trigger an orphan iput on node 1. The only way
+ * for this to happen is if node 1 runs node 2's orphan dir.
+ *
* ocfs2_queue_orphan_scan gets called every ORPHAN_SCAN_SCHEDULE_TIMEOUT
* seconds. It gets an EX lock on os_lockres and checks sequence number
* stored in LVB. If the sequence number has changed, it means some other
diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h
index 68cf2f6d3c6..a3385b63ff5 100644
--- a/fs/ocfs2/journal.h
+++ b/fs/ocfs2/journal.h
@@ -441,10 +441,11 @@ static inline int ocfs2_mknod_credits(struct super_block *sb, int is_dir,
#define OCFS2_SIMPLE_DIR_EXTEND_CREDITS (2)
/* file update (nlink, etc) + directory mtime/ctime + dir entry block + quota
- * update on dir + index leaf + dx root update for free list */
+ * update on dir + index leaf + dx root update for free list +
+ * previous dirblock update in the free list */
static inline int ocfs2_link_credits(struct super_block *sb)
{
- return 2*OCFS2_INODE_UPDATE_CREDITS + 3 +
+ return 2*OCFS2_INODE_UPDATE_CREDITS + 4 +
ocfs2_quota_trans_credits(sb);
}
diff --git a/fs/ocfs2/mmap.c b/fs/ocfs2/mmap.c
index 3e9393ca39e..9cd41083e99 100644
--- a/fs/ocfs2/mmap.c
+++ b/fs/ocfs2/mmap.c
@@ -61,7 +61,7 @@ static int ocfs2_fault(struct vm_area_struct *area, struct vm_fault *vmf)
static int __ocfs2_page_mkwrite(struct file *file, struct buffer_head *di_bh,
struct page *page)
{
- int ret;
+ int ret = VM_FAULT_NOPAGE;
struct inode *inode = file->f_path.dentry->d_inode;
struct address_space *mapping = inode->i_mapping;
loff_t pos = page_offset(page);
@@ -71,32 +71,25 @@ static int __ocfs2_page_mkwrite(struct file *file, struct buffer_head *di_bh,
void *fsdata;
loff_t size = i_size_read(inode);
- /*
- * Another node might have truncated while we were waiting on
- * cluster locks.
- * We don't check size == 0 before the shift. This is borrowed
- * from do_generic_file_read.
- */
last_index = (size - 1) >> PAGE_CACHE_SHIFT;
- if (unlikely(!size || page->index > last_index)) {
- ret = -EINVAL;
- goto out;
- }
/*
- * The i_size check above doesn't catch the case where nodes
- * truncated and then re-extended the file. We'll re-check the
- * page mapping after taking the page lock inside of
- * ocfs2_write_begin_nolock().
+ * There are cases that lead to the page no longer bebongs to the
+ * mapping.
+ * 1) pagecache truncates locally due to memory pressure.
+ * 2) pagecache truncates when another is taking EX lock against
+ * inode lock. see ocfs2_data_convert_worker.
+ *
+ * The i_size check doesn't catch the case where nodes truncated and
+ * then re-extended the file. We'll re-check the page mapping after
+ * taking the page lock inside of ocfs2_write_begin_nolock().
+ *
+ * Let VM retry with these cases.
*/
- if (!PageUptodate(page) || page->mapping != inode->i_mapping) {
- /*
- * the page has been umapped in ocfs2_data_downconvert_worker.
- * So return 0 here and let VFS retry.
- */
- ret = 0;
+ if ((page->mapping != inode->i_mapping) ||
+ (!PageUptodate(page)) ||
+ (page_offset(page) >= size))
goto out;
- }
/*
* Call ocfs2_write_begin() and ocfs2_write_end() to take
@@ -116,17 +109,21 @@ static int __ocfs2_page_mkwrite(struct file *file, struct buffer_head *di_bh,
if (ret) {
if (ret != -ENOSPC)
mlog_errno(ret);
+ if (ret == -ENOMEM)
+ ret = VM_FAULT_OOM;
+ else
+ ret = VM_FAULT_SIGBUS;
goto out;
}
- ret = ocfs2_write_end_nolock(mapping, pos, len, len, locked_page,
- fsdata);
- if (ret < 0) {
- mlog_errno(ret);
+ if (!locked_page) {
+ ret = VM_FAULT_NOPAGE;
goto out;
}
+ ret = ocfs2_write_end_nolock(mapping, pos, len, len, locked_page,
+ fsdata);
BUG_ON(ret != len);
- ret = 0;
+ ret = VM_FAULT_LOCKED;
out:
return ret;
}
@@ -168,8 +165,6 @@ static int ocfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
out:
ocfs2_unblock_signals(&oldset);
- if (ret)
- ret = VM_FAULT_SIGBUS;
return ret;
}
diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c
index d53cb706f14..184c76b8c29 100644
--- a/fs/ocfs2/move_extents.c
+++ b/fs/ocfs2/move_extents.c
@@ -745,7 +745,7 @@ static int ocfs2_move_extent(struct ocfs2_move_extents_context *context,
*/
ocfs2_probe_alloc_group(inode, gd_bh, &goal_bit, len, move_max_hop,
new_phys_cpos);
- if (!new_phys_cpos) {
+ if (!*new_phys_cpos) {
ret = -ENOSPC;
goto out_commit;
}
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index 409285854f6..d355e6e36b3 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -836,18 +836,65 @@ static inline unsigned int ocfs2_clusters_to_megabytes(struct super_block *sb,
static inline void _ocfs2_set_bit(unsigned int bit, unsigned long *bitmap)
{
- __test_and_set_bit_le(bit, bitmap);
+ __set_bit_le(bit, bitmap);
}
#define ocfs2_set_bit(bit, addr) _ocfs2_set_bit((bit), (unsigned long *)(addr))
static inline void _ocfs2_clear_bit(unsigned int bit, unsigned long *bitmap)
{
- __test_and_clear_bit_le(bit, bitmap);
+ __clear_bit_le(bit, bitmap);
}
#define ocfs2_clear_bit(bit, addr) _ocfs2_clear_bit((bit), (unsigned long *)(addr))
#define ocfs2_test_bit test_bit_le
#define ocfs2_find_next_zero_bit find_next_zero_bit_le
#define ocfs2_find_next_bit find_next_bit_le
+
+static inline void *correct_addr_and_bit_unaligned(int *bit, void *addr)
+{
+#if BITS_PER_LONG == 64
+ *bit += ((unsigned long) addr & 7UL) << 3;
+ addr = (void *) ((unsigned long) addr & ~7UL);
+#elif BITS_PER_LONG == 32
+ *bit += ((unsigned long) addr & 3UL) << 3;
+ addr = (void *) ((unsigned long) addr & ~3UL);
+#else
+#error "how many bits you are?!"
+#endif
+ return addr;
+}
+
+static inline void ocfs2_set_bit_unaligned(int bit, void *bitmap)
+{
+ bitmap = correct_addr_and_bit_unaligned(&bit, bitmap);
+ ocfs2_set_bit(bit, bitmap);
+}
+
+static inline void ocfs2_clear_bit_unaligned(int bit, void *bitmap)
+{
+ bitmap = correct_addr_and_bit_unaligned(&bit, bitmap);
+ ocfs2_clear_bit(bit, bitmap);
+}
+
+static inline int ocfs2_test_bit_unaligned(int bit, void *bitmap)
+{
+ bitmap = correct_addr_and_bit_unaligned(&bit, bitmap);
+ return ocfs2_test_bit(bit, bitmap);
+}
+
+static inline int ocfs2_find_next_zero_bit_unaligned(void *bitmap, int max,
+ int start)
+{
+ int fix = 0, ret, tmpmax;
+ bitmap = correct_addr_and_bit_unaligned(&fix, bitmap);
+ tmpmax = max + fix;
+ start += fix;
+
+ ret = ocfs2_find_next_zero_bit(bitmap, tmpmax, start) - fix;
+ if (ret > max)
+ return max;
+ return ret;
+}
+
#endif /* OCFS2_H */
diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c
index dc8007fc924..f100bf70a90 100644
--- a/fs/ocfs2/quota_local.c
+++ b/fs/ocfs2/quota_local.c
@@ -404,7 +404,9 @@ struct ocfs2_quota_recovery *ocfs2_begin_quota_recovery(
int status = 0;
struct ocfs2_quota_recovery *rec;
- mlog(ML_NOTICE, "Beginning quota recovery in slot %u\n", slot_num);
+ printk(KERN_NOTICE "ocfs2: Beginning quota recovery on device (%s) for "
+ "slot %u\n", osb->dev_str, slot_num);
+
rec = ocfs2_alloc_quota_recovery();
if (!rec)
return ERR_PTR(-ENOMEM);
@@ -549,8 +551,8 @@ static int ocfs2_recover_local_quota_file(struct inode *lqinode,
goto out_commit;
}
lock_buffer(qbh);
- WARN_ON(!ocfs2_test_bit(bit, dchunk->dqc_bitmap));
- ocfs2_clear_bit(bit, dchunk->dqc_bitmap);
+ WARN_ON(!ocfs2_test_bit_unaligned(bit, dchunk->dqc_bitmap));
+ ocfs2_clear_bit_unaligned(bit, dchunk->dqc_bitmap);
le32_add_cpu(&dchunk->dqc_free, 1);
unlock_buffer(qbh);
ocfs2_journal_dirty(handle, qbh);
@@ -596,7 +598,9 @@ int ocfs2_finish_quota_recovery(struct ocfs2_super *osb,
struct inode *lqinode;
unsigned int flags;
- mlog(ML_NOTICE, "Finishing quota recovery in slot %u\n", slot_num);
+ printk(KERN_NOTICE "ocfs2: Finishing quota recovery on device (%s) for "
+ "slot %u\n", osb->dev_str, slot_num);
+
mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
for (type = 0; type < MAXQUOTAS; type++) {
if (list_empty(&(rec->r_list[type])))
@@ -612,8 +616,9 @@ int ocfs2_finish_quota_recovery(struct ocfs2_super *osb,
/* Someone else is holding the lock? Then he must be
* doing the recovery. Just skip the file... */
if (status == -EAGAIN) {
- mlog(ML_NOTICE, "skipping quota recovery for slot %d "
- "because quota file is locked.\n", slot_num);
+ printk(KERN_NOTICE "ocfs2: Skipping quota recovery on "
+ "device (%s) for slot %d because quota file is "
+ "locked.\n", osb->dev_str, slot_num);
status = 0;
goto out_put;
} else if (status < 0) {
@@ -944,7 +949,7 @@ static struct ocfs2_quota_chunk *ocfs2_find_free_entry(struct super_block *sb,
* ol_quota_entries_per_block(sb);
}
- found = ocfs2_find_next_zero_bit(dchunk->dqc_bitmap, len, 0);
+ found = ocfs2_find_next_zero_bit_unaligned(dchunk->dqc_bitmap, len, 0);
/* We failed? */
if (found == len) {
mlog(ML_ERROR, "Did not find empty entry in chunk %d with %u"
@@ -1208,7 +1213,7 @@ static void olq_alloc_dquot(struct buffer_head *bh, void *private)
struct ocfs2_local_disk_chunk *dchunk;
dchunk = (struct ocfs2_local_disk_chunk *)bh->b_data;
- ocfs2_set_bit(*offset, dchunk->dqc_bitmap);
+ ocfs2_set_bit_unaligned(*offset, dchunk->dqc_bitmap);
le32_add_cpu(&dchunk->dqc_free, -1);
}
@@ -1289,7 +1294,7 @@ int ocfs2_local_release_dquot(handle_t *handle, struct dquot *dquot)
(od->dq_chunk->qc_headerbh->b_data);
/* Mark structure as freed */
lock_buffer(od->dq_chunk->qc_headerbh);
- ocfs2_clear_bit(offset, dchunk->dqc_bitmap);
+ ocfs2_clear_bit_unaligned(offset, dchunk->dqc_bitmap);
le32_add_cpu(&dchunk->dqc_free, 1);
unlock_buffer(od->dq_chunk->qc_headerbh);
ocfs2_journal_dirty(handle, od->dq_chunk->qc_headerbh);
diff --git a/fs/ocfs2/slot_map.c b/fs/ocfs2/slot_map.c
index 26fc0014d50..1424c151ccc 100644
--- a/fs/ocfs2/slot_map.c
+++ b/fs/ocfs2/slot_map.c
@@ -493,8 +493,8 @@ int ocfs2_find_slot(struct ocfs2_super *osb)
goto bail;
}
} else
- mlog(ML_NOTICE, "slot %d is already allocated to this node!\n",
- slot);
+ printk(KERN_INFO "ocfs2: Slot %d on device (%s) was already "
+ "allocated to this node!\n", slot, osb->dev_str);
ocfs2_set_slot(si, slot, osb->node_num);
osb->slot_num = slot;
diff --git a/fs/ocfs2/stack_o2cb.c b/fs/ocfs2/stack_o2cb.c
index 19965b00c43..94368017edb 100644
--- a/fs/ocfs2/stack_o2cb.c
+++ b/fs/ocfs2/stack_o2cb.c
@@ -28,6 +28,7 @@
#include "cluster/masklog.h"
#include "cluster/nodemanager.h"
#include "cluster/heartbeat.h"
+#include "cluster/tcp.h"
#include "stackglue.h"
@@ -256,6 +257,61 @@ static void o2cb_dump_lksb(struct ocfs2_dlm_lksb *lksb)
}
/*
+ * Check if this node is heartbeating and is connected to all other
+ * heartbeating nodes.
+ */
+static int o2cb_cluster_check(void)
+{
+ u8 node_num;
+ int i;
+ unsigned long hbmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
+ unsigned long netmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
+
+ node_num = o2nm_this_node();
+ if (node_num == O2NM_MAX_NODES) {
+ printk(KERN_ERR "o2cb: This node has not been configured.\n");
+ return -EINVAL;
+ }
+
+ /*
+ * o2dlm expects o2net sockets to be created. If not, then
+ * dlm_join_domain() fails with a stack of errors which are both cryptic
+ * and incomplete. The idea here is to detect upfront whether we have
+ * managed to connect to all nodes or not. If not, then list the nodes
+ * to allow the user to check the configuration (incorrect IP, firewall,
+ * etc.) Yes, this is racy. But its not the end of the world.
+ */
+#define O2CB_MAP_STABILIZE_COUNT 60
+ for (i = 0; i < O2CB_MAP_STABILIZE_COUNT; ++i) {
+ o2hb_fill_node_map(hbmap, sizeof(hbmap));
+ if (!test_bit(node_num, hbmap)) {
+ printk(KERN_ERR "o2cb: %s heartbeat has not been "
+ "started.\n", (o2hb_global_heartbeat_active() ?
+ "Global" : "Local"));
+ return -EINVAL;
+ }
+ o2net_fill_node_map(netmap, sizeof(netmap));
+ /* Force set the current node to allow easy compare */
+ set_bit(node_num, netmap);
+ if (!memcmp(hbmap, netmap, sizeof(hbmap)))
+ return 0;
+ if (i < O2CB_MAP_STABILIZE_COUNT)
+ msleep(1000);
+ }
+
+ printk(KERN_ERR "o2cb: This node could not connect to nodes:");
+ i = -1;
+ while ((i = find_next_bit(hbmap, O2NM_MAX_NODES,
+ i + 1)) < O2NM_MAX_NODES) {
+ if (!test_bit(i, netmap))
+ printk(" %u", i);
+ }
+ printk(".\n");
+
+ return -ENOTCONN;
+}
+
+/*
* Called from the dlm when it's about to evict a node. This is how the
* classic stack signals node death.
*/
@@ -263,8 +319,8 @@ static void o2dlm_eviction_cb(int node_num, void *data)
{
struct ocfs2_cluster_connection *conn = data;
- mlog(ML_NOTICE, "o2dlm has evicted node %d from group %.*s\n",
- node_num, conn->cc_namelen, conn->cc_name);
+ printk(KERN_NOTICE "o2cb: o2dlm has evicted node %d from domain %.*s\n",
+ node_num, conn->cc_namelen, conn->cc_name);
conn->cc_recovery_handler(node_num, conn->cc_recovery_data);
}
@@ -280,12 +336,11 @@ static int o2cb_cluster_connect(struct ocfs2_cluster_connection *conn)
BUG_ON(conn == NULL);
BUG_ON(conn->cc_proto == NULL);
- /* for now we only have one cluster/node, make sure we see it
- * in the heartbeat universe */
- if (!o2hb_check_local_node_heartbeating()) {
- if (o2hb_global_heartbeat_active())
- mlog(ML_ERROR, "Global heartbeat not started\n");
- rc = -EINVAL;
+ /* Ensure cluster stack is up and all nodes are connected */
+ rc = o2cb_cluster_check();
+ if (rc) {
+ printk(KERN_ERR "o2cb: Cluster check failed. Fix errors "
+ "before retrying.\n");
goto out;
}
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 56f61027236..4994f8b0e60 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -54,6 +54,7 @@
#include "ocfs1_fs_compat.h"
#include "alloc.h"
+#include "aops.h"
#include "blockcheck.h"
#include "dlmglue.h"
#include "export.h"
@@ -1107,9 +1108,9 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
ocfs2_set_ro_flag(osb, 1);
- printk(KERN_NOTICE "Readonly device detected. No cluster "
- "services will be utilized for this mount. Recovery "
- "will be skipped.\n");
+ printk(KERN_NOTICE "ocfs2: Readonly device (%s) detected. "
+ "Cluster services will not be used for this mount. "
+ "Recovery will be skipped.\n", osb->dev_str);
}
if (!ocfs2_is_hard_readonly(osb)) {
@@ -1616,12 +1617,17 @@ static int ocfs2_show_options(struct seq_file *s, struct vfsmount *mnt)
return 0;
}
+wait_queue_head_t ocfs2__ioend_wq[OCFS2_IOEND_WQ_HASH_SZ];
+
static int __init ocfs2_init(void)
{
- int status;
+ int status, i;
ocfs2_print_version();
+ for (i = 0; i < OCFS2_IOEND_WQ_HASH_SZ; i++)
+ init_waitqueue_head(&ocfs2__ioend_wq[i]);
+
status = init_ocfs2_uptodate_cache();
if (status < 0) {
mlog_errno(status);
@@ -1760,7 +1766,7 @@ static void ocfs2_inode_init_once(void *data)
ocfs2_extent_map_init(&oi->vfs_inode);
INIT_LIST_HEAD(&oi->ip_io_markers);
oi->ip_dir_start_lookup = 0;
-
+ atomic_set(&oi->ip_unaligned_aio, 0);
init_rwsem(&oi->ip_alloc_sem);
init_rwsem(&oi->ip_xattr_sem);
mutex_init(&oi->ip_io_mutex);
@@ -1974,7 +1980,8 @@ static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err)
* If we failed before we got a uuid_str yet, we can't stop
* heartbeat. Otherwise, do it.
*/
- if (!mnt_err && !ocfs2_mount_local(osb) && osb->uuid_str)
+ if (!mnt_err && !ocfs2_mount_local(osb) && osb->uuid_str &&
+ !ocfs2_is_hard_readonly(osb))
hangup_needed = 1;
if (osb->cconn)
@@ -2353,7 +2360,7 @@ static int ocfs2_initialize_super(struct super_block *sb,
mlog_errno(status);
goto bail;
}
- cleancache_init_shared_fs((char *)&uuid_net_key, sb);
+ cleancache_init_shared_fs((char *)&di->id2.i_super.s_uuid, sb);
bail:
return status;
@@ -2462,8 +2469,8 @@ static int ocfs2_check_volume(struct ocfs2_super *osb)
goto finally;
}
} else {
- mlog(ML_NOTICE, "File system was not unmounted cleanly, "
- "recovering volume.\n");
+ printk(KERN_NOTICE "ocfs2: File system on device (%s) was not "
+ "unmounted cleanly, recovering it.\n", osb->dev_str);
}
local = ocfs2_mount_local(osb);
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index 194fb22ef79..aa9e8777b09 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -2376,16 +2376,18 @@ static int ocfs2_remove_value_outside(struct inode*inode,
}
ret = ocfs2_xattr_value_truncate(inode, vb, 0, &ctxt);
- if (ret < 0) {
- mlog_errno(ret);
- break;
- }
ocfs2_commit_trans(osb, ctxt.handle);
if (ctxt.meta_ac) {
ocfs2_free_alloc_context(ctxt.meta_ac);
ctxt.meta_ac = NULL;
}
+
+ if (ret < 0) {
+ mlog_errno(ret);
+ break;
+ }
+
}
if (ctxt.meta_ac)
diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
index 2bd620f0d79..57bbf9078ac 100644
--- a/fs/pstore/platform.c
+++ b/fs/pstore/platform.c
@@ -167,6 +167,7 @@ int pstore_register(struct pstore_info *psi)
}
psinfo = psi;
+ mutex_init(&psinfo->read_mutex);
spin_unlock(&pstore_lock);
if (owner && !try_module_get(owner)) {
@@ -195,30 +196,32 @@ EXPORT_SYMBOL_GPL(pstore_register);
void pstore_get_records(int quiet)
{
struct pstore_info *psi = psinfo;
+ char *buf = NULL;
ssize_t size;
u64 id;
enum pstore_type_id type;
struct timespec time;
int failed = 0, rc;
- unsigned long flags;
if (!psi)
return;
- spin_lock_irqsave(&psinfo->buf_lock, flags);
+ mutex_lock(&psi->read_mutex);
rc = psi->open(psi);
if (rc)
goto out;
- while ((size = psi->read(&id, &type, &time, psi)) > 0) {
- rc = pstore_mkfile(type, psi->name, id, psi->buf, (size_t)size,
+ while ((size = psi->read(&id, &type, &time, &buf, psi)) > 0) {
+ rc = pstore_mkfile(type, psi->name, id, buf, (size_t)size,
time, psi);
+ kfree(buf);
+ buf = NULL;
if (rc && (rc != -EEXIST || !quiet))
failed++;
}
psi->close(psi);
out:
- spin_unlock_irqrestore(&psinfo->buf_lock, flags);
+ mutex_unlock(&psi->read_mutex);
if (failed)
printk(KERN_WARNING "pstore: failed to load %d record(s) from '%s'\n",
diff --git a/include/drm/drm_mode.h b/include/drm/drm_mode.h
index d30bedfeb7e..ddd46db65b5 100644
--- a/include/drm/drm_mode.h
+++ b/include/drm/drm_mode.h
@@ -235,6 +235,8 @@ struct drm_mode_fb_cmd {
#define DRM_MODE_FB_DIRTY_ANNOTATE_FILL 0x02
#define DRM_MODE_FB_DIRTY_FLAGS 0x03
+#define DRM_MODE_FB_DIRTY_MAX_CLIPS 256
+
/*
* Mark a region of a framebuffer as dirty.
*
diff --git a/include/drm/exynos_drm.h b/include/drm/exynos_drm.h
index 1d161cb3aca..12050434d57 100644
--- a/include/drm/exynos_drm.h
+++ b/include/drm/exynos_drm.h
@@ -32,17 +32,16 @@
/**
* User-desired buffer creation information structure.
*
- * @size: requested size for the object.
+ * @size: user-desired memory allocation size.
* - this size value would be page-aligned internally.
* @flags: user request for setting memory type or cache attributes.
- * @handle: returned handle for the object.
- * @pad: just padding to be 64-bit aligned.
+ * @handle: returned a handle to created gem object.
+ * - this handle will be set by gem module of kernel side.
*/
struct drm_exynos_gem_create {
- unsigned int size;
+ uint64_t size;
unsigned int flags;
unsigned int handle;
- unsigned int pad;
};
/**
diff --git a/include/drm/radeon_drm.h b/include/drm/radeon_drm.h
index b65be6054a1..be94be6d6f1 100644
--- a/include/drm/radeon_drm.h
+++ b/include/drm/radeon_drm.h
@@ -874,6 +874,10 @@ struct drm_radeon_gem_pwrite {
#define RADEON_CHUNK_ID_RELOCS 0x01
#define RADEON_CHUNK_ID_IB 0x02
+#define RADEON_CHUNK_ID_FLAGS 0x03
+
+/* The first dword of RADEON_CHUNK_ID_FLAGS is a uint32 of these flags: */
+#define RADEON_CS_KEEP_TILING_FLAGS 0x01
struct drm_radeon_cs_chunk {
uint32_t chunk_id;
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index f88eacb111d..7c05ac202d9 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -10,6 +10,12 @@
#include "osdmap.h"
#include "messenger.h"
+/*
+ * Maximum object name size
+ * (must be at least as big as RBD_MAX_MD_NAME_LEN -- currently 100)
+ */
+#define MAX_OBJ_NAME_SIZE 100
+
struct ceph_msg;
struct ceph_snap_context;
struct ceph_osd_request;
@@ -75,7 +81,7 @@ struct ceph_osd_request {
struct inode *r_inode; /* for use by callbacks */
void *r_priv; /* ditto */
- char r_oid[40]; /* object name */
+ char r_oid[MAX_OBJ_NAME_SIZE]; /* object name */
int r_oid_len;
unsigned long r_stamp; /* send OR check time */
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index 139c4db55f1..c86c940d1de 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -156,6 +156,7 @@ extern u64 timecounter_cyc2time(struct timecounter *tc,
* @mult: cycle to nanosecond multiplier
* @shift: cycle to nanosecond divisor (power of two)
* @max_idle_ns: max idle time permitted by the clocksource (nsecs)
+ * @maxadj maximum adjustment value to mult (~11%)
* @flags: flags describing special properties
* @archdata: arch-specific data
* @suspend: suspend function for the clocksource, if necessary
@@ -172,7 +173,7 @@ struct clocksource {
u32 mult;
u32 shift;
u64 max_idle_ns;
-
+ u32 maxadj;
#ifdef CONFIG_ARCH_CLOCKSOURCE_DATA
struct arch_clocksource_data archdata;
#endif
diff --git a/include/linux/device.h b/include/linux/device.h
index 52b3a4111df..3136ede5a1e 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -69,7 +69,7 @@ extern void bus_remove_file(struct bus_type *, struct bus_attribute *);
* @resume: Called to bring a device on this bus out of sleep mode.
* @pm: Power management operations of this bus, callback the specific
* device driver's pm-ops.
- * @iommu_ops IOMMU specific operations for this bus, used to attach IOMMU
+ * @iommu_ops: IOMMU specific operations for this bus, used to attach IOMMU
* driver implementations to a bus and allow the driver to do
* bus-specific setup
* @p: The private data of the driver core, only the driver core can
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index a81bf6d23b3..07d103a06d6 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -432,9 +432,6 @@ void i2c_unlock_adapter(struct i2c_adapter *);
/* Internal numbers to terminate lists */
#define I2C_CLIENT_END 0xfffeU
-/* The numbers to use to set I2C bus address */
-#define ANY_I2C_BUS 0xffff
-
/* Construct an I2C_CLIENT_END-terminated array of i2c addresses */
#define I2C_ADDRS(addr, addrs...) \
((const unsigned short []){ addr, ## addrs, I2C_CLIENT_END })
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 08ffab01e76..94b1e356c02 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -184,7 +184,6 @@ extern struct cred init_cred;
[PIDTYPE_SID] = INIT_PID_LINK(PIDTYPE_SID), \
}, \
.thread_group = LIST_HEAD_INIT(tsk.thread_group), \
- .dirties = INIT_PROP_LOCAL_SINGLE(dirties), \
INIT_IDS \
INIT_PERF_EVENTS(tsk) \
INIT_TRACE_IRQFLAGS \
diff --git a/include/linux/mfd/tps65910.h b/include/linux/mfd/tps65910.h
index 82b4c8801a4..8bf2cb9502d 100644
--- a/include/linux/mfd/tps65910.h
+++ b/include/linux/mfd/tps65910.h
@@ -243,7 +243,8 @@
/*Registers VDD1, VDD2 voltage values definitions */
-#define VDD1_2_NUM_VOLTS 73
+#define VDD1_2_NUM_VOLT_FINE 73
+#define VDD1_2_NUM_VOLT_COARSE 3
#define VDD1_2_MIN_VOLT 6000
#define VDD1_2_OFFSET 125
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 21440e31fda..eef257c76a4 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -2552,6 +2552,8 @@ extern void net_disable_timestamp(void);
extern void *dev_seq_start(struct seq_file *seq, loff_t *pos);
extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos);
extern void dev_seq_stop(struct seq_file *seq, void *v);
+extern int dev_seq_open_ops(struct inode *inode, struct file *file,
+ const struct seq_operations *ops);
#endif
extern int netdev_class_create_file(struct class_attribute *class_attr);
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index ab2c6343361..92ecf5585fa 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -410,6 +410,9 @@ extern const struct inode_operations nfs_file_inode_operations;
extern const struct inode_operations nfs3_file_inode_operations;
#endif /* CONFIG_NFS_V3 */
extern const struct file_operations nfs_file_operations;
+#ifdef CONFIG_NFS_V4
+extern const struct file_operations nfs4_file_operations;
+#endif /* CONFIG_NFS_V4 */
extern const struct address_space_operations nfs_file_aops;
extern const struct address_space_operations nfs_dir_aops;
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index c74595ba709..2a7c533be5d 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -1192,6 +1192,7 @@ struct nfs_rpc_ops {
const struct dentry_operations *dentry_ops;
const struct inode_operations *dir_inode_ops;
const struct inode_operations *file_inode_ops;
+ const struct file_operations *file_ops;
int (*getroot) (struct nfs_server *, struct nfs_fh *,
struct nfs_fsinfo *);
diff --git a/include/linux/pci-ats.h b/include/linux/pci-ats.h
index e3d0b389024..7ef68724f0f 100644
--- a/include/linux/pci-ats.h
+++ b/include/linux/pci-ats.h
@@ -12,7 +12,7 @@ struct pci_ats {
unsigned int is_enabled:1; /* Enable bit is set */
};
-#ifdef CONFIG_PCI_IOV
+#ifdef CONFIG_PCI_ATS
extern int pci_enable_ats(struct pci_dev *dev, int ps);
extern void pci_disable_ats(struct pci_dev *dev);
@@ -29,7 +29,7 @@ static inline int pci_ats_enabled(struct pci_dev *dev)
return dev->ats && dev->ats->is_enabled;
}
-#else /* CONFIG_PCI_IOV */
+#else /* CONFIG_PCI_ATS */
static inline int pci_enable_ats(struct pci_dev *dev, int ps)
{
@@ -50,7 +50,7 @@ static inline int pci_ats_enabled(struct pci_dev *dev)
return 0;
}
-#endif /* CONFIG_PCI_IOV */
+#endif /* CONFIG_PCI_ATS */
#ifdef CONFIG_PCI_PRI
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 337df0d5d5f..7cda65b5f79 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -338,7 +338,7 @@ struct pci_dev {
struct list_head msi_list;
#endif
struct pci_vpd *vpd;
-#ifdef CONFIG_PCI_IOV
+#ifdef CONFIG_PCI_ATS
union {
struct pci_sriov *sriov; /* SR-IOV capability related */
struct pci_dev *physfn; /* the PF this VF is associated with */
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 5c4c8b18c8b..3f3ed83a9aa 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -54,118 +54,145 @@ typedef struct pm_message {
/**
* struct dev_pm_ops - device PM callbacks
*
- * Several driver power state transitions are externally visible, affecting
+ * Several device power state transitions are externally visible, affecting
* the state of pending I/O queues and (for drivers that touch hardware)
* interrupts, wakeups, DMA, and other hardware state. There may also be
- * internal transitions to various low power modes, which are transparent
+ * internal transitions to various low-power modes which are transparent
* to the rest of the driver stack (such as a driver that's ON gating off
* clocks which are not in active use).
*
- * The externally visible transitions are handled with the help of the following
- * callbacks included in this structure:
- *
- * @prepare: Prepare the device for the upcoming transition, but do NOT change
- * its hardware state. Prevent new children of the device from being
- * registered after @prepare() returns (the driver's subsystem and
- * generally the rest of the kernel is supposed to prevent new calls to the
- * probe method from being made too once @prepare() has succeeded). If
- * @prepare() detects a situation it cannot handle (e.g. registration of a
- * child already in progress), it may return -EAGAIN, so that the PM core
- * can execute it once again (e.g. after the new child has been registered)
- * to recover from the race condition. This method is executed for all
- * kinds of suspend transitions and is followed by one of the suspend
- * callbacks: @suspend(), @freeze(), or @poweroff().
- * The PM core executes @prepare() for all devices before starting to
- * execute suspend callbacks for any of them, so drivers may assume all of
- * the other devices to be present and functional while @prepare() is being
- * executed. In particular, it is safe to make GFP_KERNEL memory
- * allocations from within @prepare(). However, drivers may NOT assume
- * anything about the availability of the user space at that time and it
- * is not correct to request firmware from within @prepare() (it's too
- * late to do that). [To work around this limitation, drivers may
- * register suspend and hibernation notifiers that are executed before the
- * freezing of tasks.]
+ * The externally visible transitions are handled with the help of callbacks
+ * included in this structure in such a way that two levels of callbacks are
+ * involved. First, the PM core executes callbacks provided by PM domains,
+ * device types, classes and bus types. They are the subsystem-level callbacks
+ * supposed to execute callbacks provided by device drivers, although they may
+ * choose not to do that. If the driver callbacks are executed, they have to
+ * collaborate with the subsystem-level callbacks to achieve the goals
+ * appropriate for the given system transition, given transition phase and the
+ * subsystem the device belongs to.
+ *
+ * @prepare: The principal role of this callback is to prevent new children of
+ * the device from being registered after it has returned (the driver's
+ * subsystem and generally the rest of the kernel is supposed to prevent
+ * new calls to the probe method from being made too once @prepare() has
+ * succeeded). If @prepare() detects a situation it cannot handle (e.g.
+ * registration of a child already in progress), it may return -EAGAIN, so
+ * that the PM core can execute it once again (e.g. after a new child has
+ * been registered) to recover from the race condition.
+ * This method is executed for all kinds of suspend transitions and is
+ * followed by one of the suspend callbacks: @suspend(), @freeze(), or
+ * @poweroff(). The PM core executes subsystem-level @prepare() for all
+ * devices before starting to invoke suspend callbacks for any of them, so
+ * generally devices may be assumed to be functional or to respond to
+ * runtime resume requests while @prepare() is being executed. However,
+ * device drivers may NOT assume anything about the availability of user
+ * space at that time and it is NOT valid to request firmware from within
+ * @prepare() (it's too late to do that). It also is NOT valid to allocate
+ * substantial amounts of memory from @prepare() in the GFP_KERNEL mode.
+ * [To work around these limitations, drivers may register suspend and
+ * hibernation notifiers to be executed before the freezing of tasks.]
*
* @complete: Undo the changes made by @prepare(). This method is executed for
* all kinds of resume transitions, following one of the resume callbacks:
* @resume(), @thaw(), @restore(). Also called if the state transition
- * fails before the driver's suspend callback (@suspend(), @freeze(),
- * @poweroff()) can be executed (e.g. if the suspend callback fails for one
+ * fails before the driver's suspend callback: @suspend(), @freeze() or
+ * @poweroff(), can be executed (e.g. if the suspend callback fails for one
* of the other devices that the PM core has unsuccessfully attempted to
* suspend earlier).
- * The PM core executes @complete() after it has executed the appropriate
- * resume callback for all devices.
+ * The PM core executes subsystem-level @complete() after it has executed
+ * the appropriate resume callbacks for all devices.
*
* @suspend: Executed before putting the system into a sleep state in which the
- * contents of main memory are preserved. Quiesce the device, put it into
- * a low power state appropriate for the upcoming system state (such as
- * PCI_D3hot), and enable wakeup events as appropriate.
+ * contents of main memory are preserved. The exact action to perform
+ * depends on the device's subsystem (PM domain, device type, class or bus
+ * type), but generally the device must be quiescent after subsystem-level
+ * @suspend() has returned, so that it doesn't do any I/O or DMA.
+ * Subsystem-level @suspend() is executed for all devices after invoking
+ * subsystem-level @prepare() for all of them.
*
* @resume: Executed after waking the system up from a sleep state in which the
- * contents of main memory were preserved. Put the device into the
- * appropriate state, according to the information saved in memory by the
- * preceding @suspend(). The driver starts working again, responding to
- * hardware events and software requests. The hardware may have gone
- * through a power-off reset, or it may have maintained state from the
- * previous suspend() which the driver may rely on while resuming. On most
- * platforms, there are no restrictions on availability of resources like
- * clocks during @resume().
+ * contents of main memory were preserved. The exact action to perform
+ * depends on the device's subsystem, but generally the driver is expected
+ * to start working again, responding to hardware events and software
+ * requests (the device itself may be left in a low-power state, waiting
+ * for a runtime resume to occur). The state of the device at the time its
+ * driver's @resume() callback is run depends on the platform and subsystem
+ * the device belongs to. On most platforms, there are no restrictions on
+ * availability of resources like clocks during @resume().
+ * Subsystem-level @resume() is executed for all devices after invoking
+ * subsystem-level @resume_noirq() for all of them.
*
* @freeze: Hibernation-specific, executed before creating a hibernation image.
- * Quiesce operations so that a consistent image can be created, but do NOT
- * otherwise put the device into a low power device state and do NOT emit
- * system wakeup events. Save in main memory the device settings to be
- * used by @restore() during the subsequent resume from hibernation or by
- * the subsequent @thaw(), if the creation of the image or the restoration
- * of main memory contents from it fails.
+ * Analogous to @suspend(), but it should not enable the device to signal
+ * wakeup events or change its power state. The majority of subsystems
+ * (with the notable exception of the PCI bus type) expect the driver-level
+ * @freeze() to save the device settings in memory to be used by @restore()
+ * during the subsequent resume from hibernation.
+ * Subsystem-level @freeze() is executed for all devices after invoking
+ * subsystem-level @prepare() for all of them.
*
* @thaw: Hibernation-specific, executed after creating a hibernation image OR
- * if the creation of the image fails. Also executed after a failing
+ * if the creation of an image has failed. Also executed after a failing
* attempt to restore the contents of main memory from such an image.
* Undo the changes made by the preceding @freeze(), so the device can be
* operated in the same way as immediately before the call to @freeze().
+ * Subsystem-level @thaw() is executed for all devices after invoking
+ * subsystem-level @thaw_noirq() for all of them. It also may be executed
+ * directly after @freeze() in case of a transition error.
*
* @poweroff: Hibernation-specific, executed after saving a hibernation image.
- * Quiesce the device, put it into a low power state appropriate for the
- * upcoming system state (such as PCI_D3hot), and enable wakeup events as
- * appropriate.
+ * Analogous to @suspend(), but it need not save the device's settings in
+ * memory.
+ * Subsystem-level @poweroff() is executed for all devices after invoking
+ * subsystem-level @prepare() for all of them.
*
* @restore: Hibernation-specific, executed after restoring the contents of main
- * memory from a hibernation image. Driver starts working again,
- * responding to hardware events and software requests. Drivers may NOT
- * make ANY assumptions about the hardware state right prior to @restore().
- * On most platforms, there are no restrictions on availability of
- * resources like clocks during @restore().
- *
- * @suspend_noirq: Complete the operations of ->suspend() by carrying out any
- * actions required for suspending the device that need interrupts to be
- * disabled
- *
- * @resume_noirq: Prepare for the execution of ->resume() by carrying out any
- * actions required for resuming the device that need interrupts to be
- * disabled
- *
- * @freeze_noirq: Complete the operations of ->freeze() by carrying out any
- * actions required for freezing the device that need interrupts to be
- * disabled
- *
- * @thaw_noirq: Prepare for the execution of ->thaw() by carrying out any
- * actions required for thawing the device that need interrupts to be
- * disabled
- *
- * @poweroff_noirq: Complete the operations of ->poweroff() by carrying out any
- * actions required for handling the device that need interrupts to be
- * disabled
- *
- * @restore_noirq: Prepare for the execution of ->restore() by carrying out any
- * actions required for restoring the operations of the device that need
- * interrupts to be disabled
+ * memory from a hibernation image, analogous to @resume().
+ *
+ * @suspend_noirq: Complete the actions started by @suspend(). Carry out any
+ * additional operations required for suspending the device that might be
+ * racing with its driver's interrupt handler, which is guaranteed not to
+ * run while @suspend_noirq() is being executed.
+ * It generally is expected that the device will be in a low-power state
+ * (appropriate for the target system sleep state) after subsystem-level
+ * @suspend_noirq() has returned successfully. If the device can generate
+ * system wakeup signals and is enabled to wake up the system, it should be
+ * configured to do so at that time. However, depending on the platform
+ * and device's subsystem, @suspend() may be allowed to put the device into
+ * the low-power state and configure it to generate wakeup signals, in
+ * which case it generally is not necessary to define @suspend_noirq().
+ *
+ * @resume_noirq: Prepare for the execution of @resume() by carrying out any
+ * operations required for resuming the device that might be racing with
+ * its driver's interrupt handler, which is guaranteed not to run while
+ * @resume_noirq() is being executed.
+ *
+ * @freeze_noirq: Complete the actions started by @freeze(). Carry out any
+ * additional operations required for freezing the device that might be
+ * racing with its driver's interrupt handler, which is guaranteed not to
+ * run while @freeze_noirq() is being executed.
+ * The power state of the device should not be changed by either @freeze()
+ * or @freeze_noirq() and it should not be configured to signal system
+ * wakeup by any of these callbacks.
+ *
+ * @thaw_noirq: Prepare for the execution of @thaw() by carrying out any
+ * operations required for thawing the device that might be racing with its
+ * driver's interrupt handler, which is guaranteed not to run while
+ * @thaw_noirq() is being executed.
+ *
+ * @poweroff_noirq: Complete the actions started by @poweroff(). Analogous to
+ * @suspend_noirq(), but it need not save the device's settings in memory.
+ *
+ * @restore_noirq: Prepare for the execution of @restore() by carrying out any
+ * operations required for thawing the device that might be racing with its
+ * driver's interrupt handler, which is guaranteed not to run while
+ * @restore_noirq() is being executed. Analogous to @resume_noirq().
*
* All of the above callbacks, except for @complete(), return error codes.
* However, the error codes returned by the resume operations, @resume(),
- * @thaw(), @restore(), @resume_noirq(), @thaw_noirq(), and @restore_noirq() do
+ * @thaw(), @restore(), @resume_noirq(), @thaw_noirq(), and @restore_noirq(), do
* not cause the PM core to abort the resume transition during which they are
- * returned. The error codes returned in that cases are only printed by the PM
+ * returned. The error codes returned in those cases are only printed by the PM
* core to the system logs for debugging purposes. Still, it is recommended
* that drivers only return error codes from their resume methods in case of an
* unrecoverable failure (i.e. when the device being handled refuses to resume
@@ -174,31 +201,43 @@ typedef struct pm_message {
* their children.
*
* It is allowed to unregister devices while the above callbacks are being
- * executed. However, it is not allowed to unregister a device from within any
- * of its own callbacks.
+ * executed. However, a callback routine must NOT try to unregister the device
+ * it was called for, although it may unregister children of that device (for
+ * example, if it detects that a child was unplugged while the system was
+ * asleep).
+ *
+ * Refer to Documentation/power/devices.txt for more information about the role
+ * of the above callbacks in the system suspend process.
*
- * There also are the following callbacks related to run-time power management
- * of devices:
+ * There also are callbacks related to runtime power management of devices.
+ * Again, these callbacks are executed by the PM core only for subsystems
+ * (PM domains, device types, classes and bus types) and the subsystem-level
+ * callbacks are supposed to invoke the driver callbacks. Moreover, the exact
+ * actions to be performed by a device driver's callbacks generally depend on
+ * the platform and subsystem the device belongs to.
*
* @runtime_suspend: Prepare the device for a condition in which it won't be
* able to communicate with the CPU(s) and RAM due to power management.
- * This need not mean that the device should be put into a low power state.
+ * This need not mean that the device should be put into a low-power state.
* For example, if the device is behind a link which is about to be turned
* off, the device may remain at full power. If the device does go to low
- * power and is capable of generating run-time wake-up events, remote
- * wake-up (i.e., a hardware mechanism allowing the device to request a
- * change of its power state via a wake-up event, such as PCI PME) should
- * be enabled for it.
+ * power and is capable of generating runtime wakeup events, remote wakeup
+ * (i.e., a hardware mechanism allowing the device to request a change of
+ * its power state via an interrupt) should be enabled for it.
*
* @runtime_resume: Put the device into the fully active state in response to a
- * wake-up event generated by hardware or at the request of software. If
- * necessary, put the device into the full power state and restore its
+ * wakeup event generated by hardware or at the request of software. If
+ * necessary, put the device into the full-power state and restore its
* registers, so that it is fully operational.
*
- * @runtime_idle: Device appears to be inactive and it might be put into a low
- * power state if all of the necessary conditions are satisfied. Check
+ * @runtime_idle: Device appears to be inactive and it might be put into a
+ * low-power state if all of the necessary conditions are satisfied. Check
* these conditions and handle the device as appropriate, possibly queueing
* a suspend request for it. The return value is ignored by the PM core.
+ *
+ * Refer to Documentation/power/runtime_pm.txt for more information about the
+ * role of the above callbacks in device runtime power management.
+ *
*/
struct dev_pm_ops {
diff --git a/include/linux/pstore.h b/include/linux/pstore.h
index ea567321ae3..2ca8cde5459 100644
--- a/include/linux/pstore.h
+++ b/include/linux/pstore.h
@@ -35,10 +35,12 @@ struct pstore_info {
spinlock_t buf_lock; /* serialize access to 'buf' */
char *buf;
size_t bufsize;
+ struct mutex read_mutex; /* serialize open/read/close */
int (*open)(struct pstore_info *psi);
int (*close)(struct pstore_info *psi);
ssize_t (*read)(u64 *id, enum pstore_type_id *type,
- struct timespec *time, struct pstore_info *psi);
+ struct timespec *time, char **buf,
+ struct pstore_info *psi);
int (*write)(enum pstore_type_id type, u64 *id,
unsigned int part, size_t size, struct pstore_info *psi);
int (*erase)(enum pstore_type_id type, u64 id,
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 68daf4f27e2..1c4f3e9b9bc 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1521,7 +1521,6 @@ struct task_struct {
#ifdef CONFIG_FAULT_INJECTION
int make_it_fail;
#endif
- struct prop_local_single dirties;
/*
* when (nr_dirtied >= nr_dirtied_pause), it's time to call
* balance_dirty_pages() for some dirty throttling pause
diff --git a/include/linux/serial.h b/include/linux/serial.h
index 97ff8e27a6c..3d86517fe7d 100644
--- a/include/linux/serial.h
+++ b/include/linux/serial.h
@@ -207,13 +207,15 @@ struct serial_icounter_struct {
struct serial_rs485 {
__u32 flags; /* RS485 feature flags */
-#define SER_RS485_ENABLED (1 << 0)
-#define SER_RS485_RTS_ON_SEND (1 << 1)
-#define SER_RS485_RTS_AFTER_SEND (1 << 2)
-#define SER_RS485_RTS_BEFORE_SEND (1 << 3)
+#define SER_RS485_ENABLED (1 << 0) /* If enabled */
+#define SER_RS485_RTS_ON_SEND (1 << 1) /* Logical level for
+ RTS pin when
+ sending */
+#define SER_RS485_RTS_AFTER_SEND (1 << 2) /* Logical level for
+ RTS pin after sent*/
#define SER_RS485_RX_DURING_TX (1 << 4)
- __u32 delay_rts_before_send; /* Milliseconds */
- __u32 delay_rts_after_send; /* Milliseconds */
+ __u32 delay_rts_before_send; /* Delay before send (milliseconds) */
+ __u32 delay_rts_after_send; /* Delay after send (milliseconds) */
__u32 padding[5]; /* Memory is cheap, new structs
are a royal PITA .. */
};
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h
index 63f98d0a8ef..5206d6541da 100644
--- a/include/linux/virtio_config.h
+++ b/include/linux/virtio_config.h
@@ -85,6 +85,8 @@
* @reset: reset the device
* vdev: the virtio device
* After this, status and feature negotiation must be done again
+ * Device must not be reset from its vq/config callbacks, or in
+ * parallel with being added/removed.
* @find_vqs: find virtqueues and instantiate them.
* vdev: the virtio_device
* nvqs: the number of virtqueues to find
diff --git a/include/linux/virtio_mmio.h b/include/linux/virtio_mmio.h
index 27c7edefbc8..5c7b6f0daef 100644
--- a/include/linux/virtio_mmio.h
+++ b/include/linux/virtio_mmio.h
@@ -63,7 +63,7 @@
#define VIRTIO_MMIO_GUEST_FEATURES 0x020
/* Activated features set selector - Write Only */
-#define VIRTIO_MMIO_GUEST_FEATURES_SET 0x024
+#define VIRTIO_MMIO_GUEST_FEATURES_SEL 0x024
/* Guest's memory page size in bytes - Write Only */
#define VIRTIO_MMIO_GUEST_PAGE_SIZE 0x028
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
index 73a5c26c01e..06b795dd590 100644
--- a/include/net/inetpeer.h
+++ b/include/net/inetpeer.h
@@ -35,6 +35,7 @@ struct inet_peer {
u32 metrics[RTAX_MAX];
u32 rate_tokens; /* rate limiting for ICMP */
+ int redirect_genid;
unsigned long rate_last;
unsigned long pmtu_expires;
u32 pmtu_orig;
diff --git a/include/net/netfilter/nf_conntrack_ecache.h b/include/net/netfilter/nf_conntrack_ecache.h
index 4283508b3e1..a88fb693938 100644
--- a/include/net/netfilter/nf_conntrack_ecache.h
+++ b/include/net/netfilter/nf_conntrack_ecache.h
@@ -67,18 +67,18 @@ struct nf_ct_event_notifier {
int (*fcn)(unsigned int events, struct nf_ct_event *item);
};
-extern struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb;
-extern int nf_conntrack_register_notifier(struct nf_ct_event_notifier *nb);
-extern void nf_conntrack_unregister_notifier(struct nf_ct_event_notifier *nb);
+extern int nf_conntrack_register_notifier(struct net *net, struct nf_ct_event_notifier *nb);
+extern void nf_conntrack_unregister_notifier(struct net *net, struct nf_ct_event_notifier *nb);
extern void nf_ct_deliver_cached_events(struct nf_conn *ct);
static inline void
nf_conntrack_event_cache(enum ip_conntrack_events event, struct nf_conn *ct)
{
+ struct net *net = nf_ct_net(ct);
struct nf_conntrack_ecache *e;
- if (nf_conntrack_event_cb == NULL)
+ if (net->ct.nf_conntrack_event_cb == NULL)
return;
e = nf_ct_ecache_find(ct);
@@ -95,11 +95,12 @@ nf_conntrack_eventmask_report(unsigned int eventmask,
int report)
{
int ret = 0;
+ struct net *net = nf_ct_net(ct);
struct nf_ct_event_notifier *notify;
struct nf_conntrack_ecache *e;
rcu_read_lock();
- notify = rcu_dereference(nf_conntrack_event_cb);
+ notify = rcu_dereference(net->ct.nf_conntrack_event_cb);
if (notify == NULL)
goto out_unlock;
@@ -164,9 +165,8 @@ struct nf_exp_event_notifier {
int (*fcn)(unsigned int events, struct nf_exp_event *item);
};
-extern struct nf_exp_event_notifier __rcu *nf_expect_event_cb;
-extern int nf_ct_expect_register_notifier(struct nf_exp_event_notifier *nb);
-extern void nf_ct_expect_unregister_notifier(struct nf_exp_event_notifier *nb);
+extern int nf_ct_expect_register_notifier(struct net *net, struct nf_exp_event_notifier *nb);
+extern void nf_ct_expect_unregister_notifier(struct net *net, struct nf_exp_event_notifier *nb);
static inline void
nf_ct_expect_event_report(enum ip_conntrack_expect_events event,
@@ -174,11 +174,12 @@ nf_ct_expect_event_report(enum ip_conntrack_expect_events event,
u32 pid,
int report)
{
+ struct net *net = nf_ct_exp_net(exp);
struct nf_exp_event_notifier *notify;
struct nf_conntrack_ecache *e;
rcu_read_lock();
- notify = rcu_dereference(nf_expect_event_cb);
+ notify = rcu_dereference(net->ct.nf_expect_event_cb);
if (notify == NULL)
goto out_unlock;
diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
index 0249399e51a..7a911eca0f1 100644
--- a/include/net/netns/conntrack.h
+++ b/include/net/netns/conntrack.h
@@ -18,6 +18,8 @@ struct netns_ct {
struct hlist_nulls_head unconfirmed;
struct hlist_nulls_head dying;
struct ip_conntrack_stat __percpu *stat;
+ struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb;
+ struct nf_exp_event_notifier __rcu *nf_expect_event_cb;
int sysctl_events;
unsigned int sysctl_events_retry_timeout;
int sysctl_acct;
diff --git a/include/net/red.h b/include/net/red.h
index 3319f16b3be..b72a3b83393 100644
--- a/include/net/red.h
+++ b/include/net/red.h
@@ -116,7 +116,7 @@ struct red_parms {
u32 qR; /* Cached random number */
unsigned long qavg; /* Average queue length: A scaled */
- psched_time_t qidlestart; /* Start of current idle period */
+ ktime_t qidlestart; /* Start of current idle period */
};
static inline u32 red_rmask(u8 Plog)
@@ -148,17 +148,17 @@ static inline void red_set_parms(struct red_parms *p,
static inline int red_is_idling(struct red_parms *p)
{
- return p->qidlestart != PSCHED_PASTPERFECT;
+ return p->qidlestart.tv64 != 0;
}
static inline void red_start_of_idle_period(struct red_parms *p)
{
- p->qidlestart = psched_get_time();
+ p->qidlestart = ktime_get();
}
static inline void red_end_of_idle_period(struct red_parms *p)
{
- p->qidlestart = PSCHED_PASTPERFECT;
+ p->qidlestart.tv64 = 0;
}
static inline void red_restart(struct red_parms *p)
@@ -170,13 +170,10 @@ static inline void red_restart(struct red_parms *p)
static inline unsigned long red_calc_qavg_from_idle_time(struct red_parms *p)
{
- psched_time_t now;
- long us_idle;
+ s64 delta = ktime_us_delta(ktime_get(), p->qidlestart);
+ long us_idle = min_t(s64, delta, p->Scell_max);
int shift;
- now = psched_get_time();
- us_idle = psched_tdiff_bounded(now, p->qidlestart, p->Scell_max);
-
/*
* The problem: ideally, average length queue recalcultion should
* be done over constant clock intervals. This is too expensive, so
diff --git a/include/video/omapdss.h b/include/video/omapdss.h
index b66ebb2032c..378c7ed6760 100644
--- a/include/video/omapdss.h
+++ b/include/video/omapdss.h
@@ -307,15 +307,8 @@ struct omap_dss_board_info {
void (*dsi_disable_pads)(int dsi_id, unsigned lane_mask);
};
-#if defined(CONFIG_OMAP2_DSS_MODULE) || defined(CONFIG_OMAP2_DSS)
/* Init with the board info */
extern int omap_display_init(struct omap_dss_board_info *board_data);
-#else
-static inline int omap_display_init(struct omap_dss_board_info *board_data)
-{
- return 0;
-}
-#endif
struct omap_display_platform_data {
struct omap_dss_board_info *board_data;
diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c
index 5e828a2ca8e..213c0351dad 100644
--- a/kernel/cgroup_freezer.c
+++ b/kernel/cgroup_freezer.c
@@ -153,6 +153,13 @@ static void freezer_destroy(struct cgroup_subsys *ss,
kfree(cgroup_freezer(cgroup));
}
+/* task is frozen or will freeze immediately when next it gets woken */
+static bool is_task_frozen_enough(struct task_struct *task)
+{
+ return frozen(task) ||
+ (task_is_stopped_or_traced(task) && freezing(task));
+}
+
/*
* The call to cgroup_lock() in the freezer.state write method prevents
* a write to that file racing against an attach, and hence the
@@ -231,7 +238,7 @@ static void update_if_frozen(struct cgroup *cgroup,
cgroup_iter_start(cgroup, &it);
while ((task = cgroup_iter_next(cgroup, &it))) {
ntotal++;
- if (frozen(task))
+ if (is_task_frozen_enough(task))
nfrozen++;
}
@@ -284,7 +291,7 @@ static int try_to_freeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
while ((task = cgroup_iter_next(cgroup, &it))) {
if (!freeze_task(task, true))
continue;
- if (frozen(task))
+ if (is_task_frozen_enough(task))
continue;
if (!freezing(task) && !freezer_should_skip(task))
num_cant_freeze_now++;
diff --git a/kernel/fork.c b/kernel/fork.c
index ba0d1726132..da4a6a10d08 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -162,7 +162,6 @@ static void account_kernel_stack(struct thread_info *ti, int account)
void free_task(struct task_struct *tsk)
{
- prop_local_destroy_single(&tsk->dirties);
account_kernel_stack(tsk->stack, -1);
free_thread_info(tsk->stack);
rt_mutex_debug_task_free(tsk);
@@ -274,10 +273,6 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
tsk->stack = ti;
- err = prop_local_init_single(&tsk->dirties);
- if (err)
- goto out;
-
setup_thread_stack(tsk, orig);
clear_user_return_notifier(tsk);
clear_tsk_need_resched(tsk);
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 422e567eecf..ae34bf51682 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -885,10 +885,13 @@ static void __remove_hrtimer(struct hrtimer *timer,
struct hrtimer_clock_base *base,
unsigned long newstate, int reprogram)
{
+ struct timerqueue_node *next_timer;
if (!(timer->state & HRTIMER_STATE_ENQUEUED))
goto out;
- if (&timer->node == timerqueue_getnext(&base->active)) {
+ next_timer = timerqueue_getnext(&base->active);
+ timerqueue_del(&base->active, &timer->node);
+ if (&timer->node == next_timer) {
#ifdef CONFIG_HIGH_RES_TIMERS
/* Reprogram the clock event device. if enabled */
if (reprogram && hrtimer_hres_active()) {
@@ -901,7 +904,6 @@ static void __remove_hrtimer(struct hrtimer *timer,
}
#endif
}
- timerqueue_del(&base->active, &timer->node);
if (!timerqueue_getnext(&base->active))
base->cpu_base->active_bases &= ~(1 << base->index);
out:
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 67ce837ae52..0e2b179bc7b 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -1596,7 +1596,7 @@ int request_percpu_irq(unsigned int irq, irq_handler_t handler,
return -ENOMEM;
action->handler = handler;
- action->flags = IRQF_PERCPU;
+ action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND;
action->name = devname;
action->percpu_dev_id = dev_id;
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index b5f4742693c..dc813a948be 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -84,7 +84,9 @@ static int try_one_irq(int irq, struct irq_desc *desc, bool force)
*/
action = desc->action;
if (!action || !(action->flags & IRQF_SHARED) ||
- (action->flags & __IRQF_TIMER) || !action->next)
+ (action->flags & __IRQF_TIMER) ||
+ (action->handler(irq, action->dev_id) == IRQ_HANDLED) ||
+ !action->next)
goto out;
/* Already running on another processor */
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index 196c01268eb..a6b0503574e 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -347,7 +347,7 @@ int hibernation_snapshot(int platform_mode)
error = freeze_kernel_threads();
if (error)
- goto Close;
+ goto Cleanup;
if (hibernation_test(TEST_FREEZER) ||
hibernation_testmode(HIBERNATION_TESTPROC)) {
@@ -357,12 +357,14 @@ int hibernation_snapshot(int platform_mode)
* successful freezer test.
*/
freezer_test_done = true;
- goto Close;
+ goto Cleanup;
}
error = dpm_prepare(PMSG_FREEZE);
- if (error)
- goto Complete_devices;
+ if (error) {
+ dpm_complete(msg);
+ goto Cleanup;
+ }
suspend_console();
pm_restrict_gfp_mask();
@@ -391,8 +393,6 @@ int hibernation_snapshot(int platform_mode)
pm_restore_gfp_mask();
resume_console();
-
- Complete_devices:
dpm_complete(msg);
Close:
@@ -402,6 +402,10 @@ int hibernation_snapshot(int platform_mode)
Recover_platform:
platform_recover(platform_mode);
goto Resume_devices;
+
+ Cleanup:
+ swsusp_free();
+ goto Close;
}
/**
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index cf52fda2e09..cfc65e1eb9f 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -492,6 +492,22 @@ void clocksource_touch_watchdog(void)
}
/**
+ * clocksource_max_adjustment- Returns max adjustment amount
+ * @cs: Pointer to clocksource
+ *
+ */
+static u32 clocksource_max_adjustment(struct clocksource *cs)
+{
+ u64 ret;
+ /*
+ * We won't try to correct for more then 11% adjustments (110,000 ppm),
+ */
+ ret = (u64)cs->mult * 11;
+ do_div(ret,100);
+ return (u32)ret;
+}
+
+/**
* clocksource_max_deferment - Returns max time the clocksource can be deferred
* @cs: Pointer to clocksource
*
@@ -503,25 +519,28 @@ static u64 clocksource_max_deferment(struct clocksource *cs)
/*
* Calculate the maximum number of cycles that we can pass to the
* cyc2ns function without overflowing a 64-bit signed result. The
- * maximum number of cycles is equal to ULLONG_MAX/cs->mult which
- * is equivalent to the below.
- * max_cycles < (2^63)/cs->mult
- * max_cycles < 2^(log2((2^63)/cs->mult))
- * max_cycles < 2^(log2(2^63) - log2(cs->mult))
- * max_cycles < 2^(63 - log2(cs->mult))
- * max_cycles < 1 << (63 - log2(cs->mult))
+ * maximum number of cycles is equal to ULLONG_MAX/(cs->mult+cs->maxadj)
+ * which is equivalent to the below.
+ * max_cycles < (2^63)/(cs->mult + cs->maxadj)
+ * max_cycles < 2^(log2((2^63)/(cs->mult + cs->maxadj)))
+ * max_cycles < 2^(log2(2^63) - log2(cs->mult + cs->maxadj))
+ * max_cycles < 2^(63 - log2(cs->mult + cs->maxadj))
+ * max_cycles < 1 << (63 - log2(cs->mult + cs->maxadj))
* Please note that we add 1 to the result of the log2 to account for
* any rounding errors, ensure the above inequality is satisfied and
* no overflow will occur.
*/
- max_cycles = 1ULL << (63 - (ilog2(cs->mult) + 1));
+ max_cycles = 1ULL << (63 - (ilog2(cs->mult + cs->maxadj) + 1));
/*
* The actual maximum number of cycles we can defer the clocksource is
* determined by the minimum of max_cycles and cs->mask.
+ * Note: Here we subtract the maxadj to make sure we don't sleep for
+ * too long if there's a large negative adjustment.
*/
max_cycles = min_t(u64, max_cycles, (u64) cs->mask);
- max_nsecs = clocksource_cyc2ns(max_cycles, cs->mult, cs->shift);
+ max_nsecs = clocksource_cyc2ns(max_cycles, cs->mult - cs->maxadj,
+ cs->shift);
/*
* To ensure that the clocksource does not wrap whilst we are idle,
@@ -640,7 +659,6 @@ static void clocksource_enqueue(struct clocksource *cs)
void __clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq)
{
u64 sec;
-
/*
* Calc the maximum number of seconds which we can run before
* wrapping around. For clocksources which have a mask > 32bit
@@ -661,6 +679,20 @@ void __clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq)
clocks_calc_mult_shift(&cs->mult, &cs->shift, freq,
NSEC_PER_SEC / scale, sec * scale);
+
+ /*
+ * for clocksources that have large mults, to avoid overflow.
+ * Since mult may be adjusted by ntp, add an safety extra margin
+ *
+ */
+ cs->maxadj = clocksource_max_adjustment(cs);
+ while ((cs->mult + cs->maxadj < cs->mult)
+ || (cs->mult - cs->maxadj > cs->mult)) {
+ cs->mult >>= 1;
+ cs->shift--;
+ cs->maxadj = clocksource_max_adjustment(cs);
+ }
+
cs->max_idle_ns = clocksource_max_deferment(cs);
}
EXPORT_SYMBOL_GPL(__clocksource_updatefreq_scale);
@@ -701,6 +733,12 @@ EXPORT_SYMBOL_GPL(__clocksource_register_scale);
*/
int clocksource_register(struct clocksource *cs)
{
+ /* calculate max adjustment for given mult/shift */
+ cs->maxadj = clocksource_max_adjustment(cs);
+ WARN_ONCE(cs->mult + cs->maxadj < cs->mult,
+ "Clocksource %s might overflow on 11%% adjustment\n",
+ cs->name);
+
/* calculate max idle time permitted for this clocksource */
cs->max_idle_ns = clocksource_max_deferment(cs);
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 2b021b0e850..237841378c0 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -249,6 +249,8 @@ ktime_t ktime_get(void)
secs = xtime.tv_sec + wall_to_monotonic.tv_sec;
nsecs = xtime.tv_nsec + wall_to_monotonic.tv_nsec;
nsecs += timekeeping_get_ns();
+ /* If arch requires, add in gettimeoffset() */
+ nsecs += arch_gettimeoffset();
} while (read_seqretry(&xtime_lock, seq));
/*
@@ -280,6 +282,8 @@ void ktime_get_ts(struct timespec *ts)
*ts = xtime;
tomono = wall_to_monotonic;
nsecs = timekeeping_get_ns();
+ /* If arch requires, add in gettimeoffset() */
+ nsecs += arch_gettimeoffset();
} while (read_seqretry(&xtime_lock, seq));
@@ -802,14 +806,44 @@ static void timekeeping_adjust(s64 offset)
s64 error, interval = timekeeper.cycle_interval;
int adj;
+ /*
+ * The point of this is to check if the error is greater then half
+ * an interval.
+ *
+ * First we shift it down from NTP_SHIFT to clocksource->shifted nsecs.
+ *
+ * Note we subtract one in the shift, so that error is really error*2.
+ * This "saves" dividing(shifting) intererval twice, but keeps the
+ * (error > interval) comparision as still measuring if error is
+ * larger then half an interval.
+ *
+ * Note: It does not "save" on aggrivation when reading the code.
+ */
error = timekeeper.ntp_error >> (timekeeper.ntp_error_shift - 1);
if (error > interval) {
+ /*
+ * We now divide error by 4(via shift), which checks if
+ * the error is greater then twice the interval.
+ * If it is greater, we need a bigadjust, if its smaller,
+ * we can adjust by 1.
+ */
error >>= 2;
+ /*
+ * XXX - In update_wall_time, we round up to the next
+ * nanosecond, and store the amount rounded up into
+ * the error. This causes the likely below to be unlikely.
+ *
+ * The properfix is to avoid rounding up by using
+ * the high precision timekeeper.xtime_nsec instead of
+ * xtime.tv_nsec everywhere. Fixing this will take some
+ * time.
+ */
if (likely(error <= interval))
adj = 1;
else
adj = timekeeping_bigadjust(error, &interval, &offset);
} else if (error < -interval) {
+ /* See comment above, this is just switched for the negative */
error >>= 2;
if (likely(error >= -interval)) {
adj = -1;
@@ -817,9 +851,65 @@ static void timekeeping_adjust(s64 offset)
offset = -offset;
} else
adj = timekeeping_bigadjust(error, &interval, &offset);
- } else
+ } else /* No adjustment needed */
return;
+ WARN_ONCE(timekeeper.clock->maxadj &&
+ (timekeeper.mult + adj > timekeeper.clock->mult +
+ timekeeper.clock->maxadj),
+ "Adjusting %s more then 11%% (%ld vs %ld)\n",
+ timekeeper.clock->name, (long)timekeeper.mult + adj,
+ (long)timekeeper.clock->mult +
+ timekeeper.clock->maxadj);
+ /*
+ * So the following can be confusing.
+ *
+ * To keep things simple, lets assume adj == 1 for now.
+ *
+ * When adj != 1, remember that the interval and offset values
+ * have been appropriately scaled so the math is the same.
+ *
+ * The basic idea here is that we're increasing the multiplier
+ * by one, this causes the xtime_interval to be incremented by
+ * one cycle_interval. This is because:
+ * xtime_interval = cycle_interval * mult
+ * So if mult is being incremented by one:
+ * xtime_interval = cycle_interval * (mult + 1)
+ * Its the same as:
+ * xtime_interval = (cycle_interval * mult) + cycle_interval
+ * Which can be shortened to:
+ * xtime_interval += cycle_interval
+ *
+ * So offset stores the non-accumulated cycles. Thus the current
+ * time (in shifted nanoseconds) is:
+ * now = (offset * adj) + xtime_nsec
+ * Now, even though we're adjusting the clock frequency, we have
+ * to keep time consistent. In other words, we can't jump back
+ * in time, and we also want to avoid jumping forward in time.
+ *
+ * So given the same offset value, we need the time to be the same
+ * both before and after the freq adjustment.
+ * now = (offset * adj_1) + xtime_nsec_1
+ * now = (offset * adj_2) + xtime_nsec_2
+ * So:
+ * (offset * adj_1) + xtime_nsec_1 =
+ * (offset * adj_2) + xtime_nsec_2
+ * And we know:
+ * adj_2 = adj_1 + 1
+ * So:
+ * (offset * adj_1) + xtime_nsec_1 =
+ * (offset * (adj_1+1)) + xtime_nsec_2
+ * (offset * adj_1) + xtime_nsec_1 =
+ * (offset * adj_1) + offset + xtime_nsec_2
+ * Canceling the sides:
+ * xtime_nsec_1 = offset + xtime_nsec_2
+ * Which gives us:
+ * xtime_nsec_2 = xtime_nsec_1 - offset
+ * Which simplfies to:
+ * xtime_nsec -= offset
+ *
+ * XXX - TODO: Doc ntp_error calculation.
+ */
timekeeper.mult += adj;
timekeeper.xtime_interval += interval;
timekeeper.xtime_nsec -= offset;
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index a3278f00523..71252486bc6 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -128,7 +128,6 @@ unsigned long global_dirty_limit;
*
*/
static struct prop_descriptor vm_completions;
-static struct prop_descriptor vm_dirties;
/*
* couple the period to the dirty_ratio:
@@ -154,7 +153,6 @@ static void update_completion_period(void)
{
int shift = calc_period_shift();
prop_change_shift(&vm_completions, shift);
- prop_change_shift(&vm_dirties, shift);
writeback_set_ratelimit();
}
@@ -235,11 +233,6 @@ void bdi_writeout_inc(struct backing_dev_info *bdi)
}
EXPORT_SYMBOL_GPL(bdi_writeout_inc);
-void task_dirty_inc(struct task_struct *tsk)
-{
- prop_inc_single(&vm_dirties, &tsk->dirties);
-}
-
/*
* Obtain an accurate fraction of the BDI's portion.
*/
@@ -1133,17 +1126,17 @@ pause:
pages_dirtied,
pause,
start_time);
- __set_current_state(TASK_UNINTERRUPTIBLE);
+ __set_current_state(TASK_KILLABLE);
io_schedule_timeout(pause);
- dirty_thresh = hard_dirty_limit(dirty_thresh);
/*
- * max-pause area. If dirty exceeded but still within this
- * area, no need to sleep for more than 200ms: (a) 8 pages per
- * 200ms is typically more than enough to curb heavy dirtiers;
- * (b) the pause time limit makes the dirtiers more responsive.
+ * This is typically equal to (nr_dirty < dirty_thresh) and can
+ * also keep "1000+ dd on a slow USB stick" under control.
*/
- if (nr_dirty < dirty_thresh)
+ if (task_ratelimit)
+ break;
+
+ if (fatal_signal_pending(current))
break;
}
@@ -1395,7 +1388,6 @@ void __init page_writeback_init(void)
shift = calc_period_shift();
prop_descriptor_init(&vm_completions, shift);
- prop_descriptor_init(&vm_dirties, shift);
}
/**
@@ -1724,7 +1716,6 @@ void account_page_dirtied(struct page *page, struct address_space *mapping)
__inc_zone_page_state(page, NR_DIRTIED);
__inc_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE);
__inc_bdi_stat(mapping->backing_dev_info, BDI_DIRTIED);
- task_dirty_inc(current);
task_io_account_write(PAGE_CACHE_SIZE);
}
}
diff --git a/mm/percpu-vm.c b/mm/percpu-vm.c
index ea534960a04..12a48a88c0d 100644
--- a/mm/percpu-vm.c
+++ b/mm/percpu-vm.c
@@ -50,14 +50,13 @@ static struct page **pcpu_get_pages_and_bitmap(struct pcpu_chunk *chunk,
if (!pages || !bitmap) {
if (may_alloc && !pages)
- pages = pcpu_mem_alloc(pages_size);
+ pages = pcpu_mem_zalloc(pages_size);
if (may_alloc && !bitmap)
- bitmap = pcpu_mem_alloc(bitmap_size);
+ bitmap = pcpu_mem_zalloc(bitmap_size);
if (!pages || !bitmap)
return NULL;
}
- memset(pages, 0, pages_size);
bitmap_copy(bitmap, chunk->populated, pcpu_unit_pages);
*bitmapp = bitmap;
@@ -143,8 +142,8 @@ static void pcpu_pre_unmap_flush(struct pcpu_chunk *chunk,
int page_start, int page_end)
{
flush_cache_vunmap(
- pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start),
- pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end));
+ pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start),
+ pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end));
}
static void __pcpu_unmap_pages(unsigned long addr, int nr_pages)
@@ -206,8 +205,8 @@ static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk,
int page_start, int page_end)
{
flush_tlb_kernel_range(
- pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start),
- pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end));
+ pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start),
+ pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end));
}
static int __pcpu_map_pages(unsigned long addr, struct page **pages,
@@ -284,8 +283,8 @@ static void pcpu_post_map_flush(struct pcpu_chunk *chunk,
int page_start, int page_end)
{
flush_cache_vmap(
- pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start),
- pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end));
+ pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start),
+ pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end));
}
/**
diff --git a/mm/percpu.c b/mm/percpu.c
index bf80e55dbed..3bb810a7200 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -116,9 +116,9 @@ static int pcpu_atom_size __read_mostly;
static int pcpu_nr_slots __read_mostly;
static size_t pcpu_chunk_struct_size __read_mostly;
-/* cpus with the lowest and highest unit numbers */
-static unsigned int pcpu_first_unit_cpu __read_mostly;
-static unsigned int pcpu_last_unit_cpu __read_mostly;
+/* cpus with the lowest and highest unit addresses */
+static unsigned int pcpu_low_unit_cpu __read_mostly;
+static unsigned int pcpu_high_unit_cpu __read_mostly;
/* the address of the first chunk which starts with the kernel static area */
void *pcpu_base_addr __read_mostly;
@@ -273,11 +273,11 @@ static void __maybe_unused pcpu_next_pop(struct pcpu_chunk *chunk,
(rs) = (re) + 1, pcpu_next_pop((chunk), &(rs), &(re), (end)))
/**
- * pcpu_mem_alloc - allocate memory
+ * pcpu_mem_zalloc - allocate memory
* @size: bytes to allocate
*
* Allocate @size bytes. If @size is smaller than PAGE_SIZE,
- * kzalloc() is used; otherwise, vmalloc() is used. The returned
+ * kzalloc() is used; otherwise, vzalloc() is used. The returned
* memory is always zeroed.
*
* CONTEXT:
@@ -286,7 +286,7 @@ static void __maybe_unused pcpu_next_pop(struct pcpu_chunk *chunk,
* RETURNS:
* Pointer to the allocated area on success, NULL on failure.
*/
-static void *pcpu_mem_alloc(size_t size)
+static void *pcpu_mem_zalloc(size_t size)
{
if (WARN_ON_ONCE(!slab_is_available()))
return NULL;
@@ -302,7 +302,7 @@ static void *pcpu_mem_alloc(size_t size)
* @ptr: memory to free
* @size: size of the area
*
- * Free @ptr. @ptr should have been allocated using pcpu_mem_alloc().
+ * Free @ptr. @ptr should have been allocated using pcpu_mem_zalloc().
*/
static void pcpu_mem_free(void *ptr, size_t size)
{
@@ -384,7 +384,7 @@ static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
size_t old_size = 0, new_size = new_alloc * sizeof(new[0]);
unsigned long flags;
- new = pcpu_mem_alloc(new_size);
+ new = pcpu_mem_zalloc(new_size);
if (!new)
return -ENOMEM;
@@ -604,11 +604,12 @@ static struct pcpu_chunk *pcpu_alloc_chunk(void)
{
struct pcpu_chunk *chunk;
- chunk = pcpu_mem_alloc(pcpu_chunk_struct_size);
+ chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size);
if (!chunk)
return NULL;
- chunk->map = pcpu_mem_alloc(PCPU_DFL_MAP_ALLOC * sizeof(chunk->map[0]));
+ chunk->map = pcpu_mem_zalloc(PCPU_DFL_MAP_ALLOC *
+ sizeof(chunk->map[0]));
if (!chunk->map) {
kfree(chunk);
return NULL;
@@ -977,6 +978,17 @@ bool is_kernel_percpu_address(unsigned long addr)
* address. The caller is responsible for ensuring @addr stays valid
* until this function finishes.
*
+ * percpu allocator has special setup for the first chunk, which currently
+ * supports either embedding in linear address space or vmalloc mapping,
+ * and, from the second one, the backing allocator (currently either vm or
+ * km) provides translation.
+ *
+ * The addr can be tranlated simply without checking if it falls into the
+ * first chunk. But the current code reflects better how percpu allocator
+ * actually works, and the verification can discover both bugs in percpu
+ * allocator itself and per_cpu_ptr_to_phys() callers. So we keep current
+ * code.
+ *
* RETURNS:
* The physical address for @addr.
*/
@@ -984,19 +996,19 @@ phys_addr_t per_cpu_ptr_to_phys(void *addr)
{
void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
bool in_first_chunk = false;
- unsigned long first_start, first_end;
+ unsigned long first_low, first_high;
unsigned int cpu;
/*
- * The following test on first_start/end isn't strictly
+ * The following test on unit_low/high isn't strictly
* necessary but will speed up lookups of addresses which
* aren't in the first chunk.
*/
- first_start = pcpu_chunk_addr(pcpu_first_chunk, pcpu_first_unit_cpu, 0);
- first_end = pcpu_chunk_addr(pcpu_first_chunk, pcpu_last_unit_cpu,
- pcpu_unit_pages);
- if ((unsigned long)addr >= first_start &&
- (unsigned long)addr < first_end) {
+ first_low = pcpu_chunk_addr(pcpu_first_chunk, pcpu_low_unit_cpu, 0);
+ first_high = pcpu_chunk_addr(pcpu_first_chunk, pcpu_high_unit_cpu,
+ pcpu_unit_pages);
+ if ((unsigned long)addr >= first_low &&
+ (unsigned long)addr < first_high) {
for_each_possible_cpu(cpu) {
void *start = per_cpu_ptr(base, cpu);
@@ -1233,7 +1245,9 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
for (cpu = 0; cpu < nr_cpu_ids; cpu++)
unit_map[cpu] = UINT_MAX;
- pcpu_first_unit_cpu = NR_CPUS;
+
+ pcpu_low_unit_cpu = NR_CPUS;
+ pcpu_high_unit_cpu = NR_CPUS;
for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
const struct pcpu_group_info *gi = &ai->groups[group];
@@ -1253,9 +1267,13 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
unit_map[cpu] = unit + i;
unit_off[cpu] = gi->base_offset + i * ai->unit_size;
- if (pcpu_first_unit_cpu == NR_CPUS)
- pcpu_first_unit_cpu = cpu;
- pcpu_last_unit_cpu = cpu;
+ /* determine low/high unit_cpu */
+ if (pcpu_low_unit_cpu == NR_CPUS ||
+ unit_off[cpu] < unit_off[pcpu_low_unit_cpu])
+ pcpu_low_unit_cpu = cpu;
+ if (pcpu_high_unit_cpu == NR_CPUS ||
+ unit_off[cpu] > unit_off[pcpu_high_unit_cpu])
+ pcpu_high_unit_cpu = cpu;
}
}
pcpu_nr_units = unit;
@@ -1889,7 +1907,7 @@ void __init percpu_init_late(void)
BUILD_BUG_ON(size > PAGE_SIZE);
- map = pcpu_mem_alloc(size);
+ map = pcpu_mem_zalloc(size);
BUG_ON(!map);
spin_lock_irqsave(&pcpu_lock, flags);
diff --git a/mm/slub.c b/mm/slub.c
index 7d2a996c307..ed3334d9b6d 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1862,7 +1862,7 @@ static void unfreeze_partials(struct kmem_cache *s)
{
struct kmem_cache_node *n = NULL;
struct kmem_cache_cpu *c = this_cpu_ptr(s->cpu_slab);
- struct page *page;
+ struct page *page, *discard_page = NULL;
while ((page = c->partial)) {
enum slab_modes { M_PARTIAL, M_FREE };
@@ -1904,7 +1904,8 @@ static void unfreeze_partials(struct kmem_cache *s)
if (l == M_PARTIAL)
remove_partial(n, page);
else
- add_partial(n, page, 1);
+ add_partial(n, page,
+ DEACTIVATE_TO_TAIL);
l = m;
}
@@ -1915,14 +1916,22 @@ static void unfreeze_partials(struct kmem_cache *s)
"unfreezing slab"));
if (m == M_FREE) {
- stat(s, DEACTIVATE_EMPTY);
- discard_slab(s, page);
- stat(s, FREE_SLAB);
+ page->next = discard_page;
+ discard_page = page;
}
}
if (n)
spin_unlock(&n->list_lock);
+
+ while (discard_page) {
+ page = discard_page;
+ discard_page = discard_page->next;
+
+ stat(s, DEACTIVATE_EMPTY);
+ discard_slab(s, page);
+ stat(s, FREE_SLAB);
+ }
}
/*
@@ -1969,7 +1978,7 @@ int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
page->pobjects = pobjects;
page->next = oldpage;
- } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage);
+ } while (irqsafe_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage);
stat(s, CPU_PARTIAL_FREE);
return pobjects;
}
@@ -4435,30 +4444,31 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
for_each_possible_cpu(cpu) {
struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
+ int node = ACCESS_ONCE(c->node);
struct page *page;
- if (!c || c->node < 0)
+ if (node < 0)
continue;
-
- if (c->page) {
- if (flags & SO_TOTAL)
- x = c->page->objects;
+ page = ACCESS_ONCE(c->page);
+ if (page) {
+ if (flags & SO_TOTAL)
+ x = page->objects;
else if (flags & SO_OBJECTS)
- x = c->page->inuse;
+ x = page->inuse;
else
x = 1;
total += x;
- nodes[c->node] += x;
+ nodes[node] += x;
}
page = c->partial;
if (page) {
x = page->pobjects;
- total += x;
- nodes[c->node] += x;
+ total += x;
+ nodes[node] += x;
}
- per_cpu[c->node]++;
+ per_cpu[node]++;
}
}
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index e5f9ece3c9a..a1daf8227ed 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -18,6 +18,7 @@
#include <net/sock.h>
#include "br_private.h"
+#include "br_private_stp.h"
static inline size_t br_nlmsg_size(void)
{
@@ -188,6 +189,11 @@ static int br_rtm_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
p->state = new_state;
br_log_state(p);
+
+ spin_lock_bh(&p->br->lock);
+ br_port_state_selection(p->br);
+ spin_unlock_bh(&p->br->lock);
+
br_ifinfo_notify(RTM_NEWLINK, p);
return 0;
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
index ad0a3f7cf6c..dd147d78a58 100644
--- a/net/bridge/br_stp.c
+++ b/net/bridge/br_stp.c
@@ -399,25 +399,24 @@ void br_port_state_selection(struct net_bridge *br)
struct net_bridge_port *p;
unsigned int liveports = 0;
- /* Don't change port states if userspace is handling STP */
- if (br->stp_enabled == BR_USER_STP)
- return;
-
list_for_each_entry(p, &br->port_list, list) {
if (p->state == BR_STATE_DISABLED)
continue;
- if (p->port_no == br->root_port) {
- p->config_pending = 0;
- p->topology_change_ack = 0;
- br_make_forwarding(p);
- } else if (br_is_designated_port(p)) {
- del_timer(&p->message_age_timer);
- br_make_forwarding(p);
- } else {
- p->config_pending = 0;
- p->topology_change_ack = 0;
- br_make_blocking(p);
+ /* Don't change port states if userspace is handling STP */
+ if (br->stp_enabled != BR_USER_STP) {
+ if (p->port_no == br->root_port) {
+ p->config_pending = 0;
+ p->topology_change_ack = 0;
+ br_make_forwarding(p);
+ } else if (br_is_designated_port(p)) {
+ del_timer(&p->message_age_timer);
+ br_make_forwarding(p);
+ } else {
+ p->config_pending = 0;
+ p->topology_change_ack = 0;
+ br_make_blocking(p);
+ }
}
if (p->state == BR_STATE_FORWARDING)
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 733e46008b8..f4f3f58f523 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -244,7 +244,7 @@ struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
ceph_pagelist_init(req->r_trail);
}
/* create request message; allow space for oid */
- msg_size += 40;
+ msg_size += MAX_OBJ_NAME_SIZE;
if (snapc)
msg_size += sizeof(u64) * snapc->num_snaps;
if (use_mempool)
diff --git a/net/core/dev.c b/net/core/dev.c
index e0c3deec59b..f494675471a 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1387,7 +1387,7 @@ rollback:
for_each_net(net) {
for_each_netdev(net, dev) {
if (dev == last)
- break;
+ goto outroll;
if (dev->flags & IFF_UP) {
nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
@@ -1398,6 +1398,7 @@ rollback:
}
}
+outroll:
raw_notifier_chain_unregister(&netdev_chain, nb);
goto unlock;
}
@@ -4209,6 +4210,12 @@ static int dev_seq_open(struct inode *inode, struct file *file)
sizeof(struct dev_iter_state));
}
+int dev_seq_open_ops(struct inode *inode, struct file *file,
+ const struct seq_operations *ops)
+{
+ return seq_open_net(inode, file, ops, sizeof(struct dev_iter_state));
+}
+
static const struct file_operations dev_seq_fops = {
.owner = THIS_MODULE,
.open = dev_seq_open,
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
index 277faef9148..febba516db6 100644
--- a/net/core/dev_addr_lists.c
+++ b/net/core/dev_addr_lists.c
@@ -696,8 +696,7 @@ static const struct seq_operations dev_mc_seq_ops = {
static int dev_mc_seq_open(struct inode *inode, struct file *file)
{
- return seq_open_net(inode, file, &dev_mc_seq_ops,
- sizeof(struct seq_net_private));
+ return dev_seq_open_ops(inode, file, &dev_mc_seq_ops);
}
static const struct file_operations dev_mc_seq_fops = {
diff --git a/net/decnet/dn_timer.c b/net/decnet/dn_timer.c
index 67f691bd4ac..d9c150cc59a 100644
--- a/net/decnet/dn_timer.c
+++ b/net/decnet/dn_timer.c
@@ -36,16 +36,13 @@ static void dn_slow_timer(unsigned long arg);
void dn_start_slow_timer(struct sock *sk)
{
- sk->sk_timer.expires = jiffies + SLOW_INTERVAL;
- sk->sk_timer.function = dn_slow_timer;
- sk->sk_timer.data = (unsigned long)sk;
-
- add_timer(&sk->sk_timer);
+ setup_timer(&sk->sk_timer, dn_slow_timer, (unsigned long)sk);
+ sk_reset_timer(sk, &sk->sk_timer, jiffies + SLOW_INTERVAL);
}
void dn_stop_slow_timer(struct sock *sk)
{
- del_timer(&sk->sk_timer);
+ sk_stop_timer(sk, &sk->sk_timer);
}
static void dn_slow_timer(unsigned long arg)
@@ -53,12 +50,10 @@ static void dn_slow_timer(unsigned long arg)
struct sock *sk = (struct sock *)arg;
struct dn_scp *scp = DN_SK(sk);
- sock_hold(sk);
bh_lock_sock(sk);
if (sock_owned_by_user(sk)) {
- sk->sk_timer.expires = jiffies + HZ / 10;
- add_timer(&sk->sk_timer);
+ sk_reset_timer(sk, &sk->sk_timer, jiffies + HZ / 10);
goto out;
}
@@ -100,9 +95,7 @@ static void dn_slow_timer(unsigned long arg)
scp->keepalive_fxn(sk);
}
- sk->sk_timer.expires = jiffies + SLOW_INTERVAL;
-
- add_timer(&sk->sk_timer);
+ sk_reset_timer(sk, &sk->sk_timer, jiffies + SLOW_INTERVAL);
out:
bh_unlock_sock(sk);
sock_put(sk);
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index c6b5092f29a..65f01dc4756 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1490,7 +1490,9 @@ static int devinet_conf_proc(ctl_table *ctl, int write,
void __user *buffer,
size_t *lenp, loff_t *ppos)
{
+ int old_value = *(int *)ctl->data;
int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
+ int new_value = *(int *)ctl->data;
if (write) {
struct ipv4_devconf *cnf = ctl->extra1;
@@ -1501,6 +1503,9 @@ static int devinet_conf_proc(ctl_table *ctl, int write,
if (cnf == net->ipv4.devconf_dflt)
devinet_copy_dflt_conf(net, i);
+ if (i == IPV4_DEVCONF_ACCEPT_LOCAL - 1)
+ if ((new_value == 0) && (old_value != 0))
+ rt_cache_flush(net, 0);
}
return ret;
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
index 9899619ab9b..4f47e064e26 100644
--- a/net/ipv4/netfilter.c
+++ b/net/ipv4/netfilter.c
@@ -64,7 +64,8 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type)
/* Change in oif may mean change in hh_len. */
hh_len = skb_dst(skb)->dev->hard_header_len;
if (skb_headroom(skb) < hh_len &&
- pskb_expand_head(skb, hh_len - skb_headroom(skb), 0, GFP_ATOMIC))
+ pskb_expand_head(skb, HH_DATA_ALIGN(hh_len - skb_headroom(skb)),
+ 0, GFP_ATOMIC))
return -1;
return 0;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 9a20663d596..7047069cf96 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -130,6 +130,7 @@ static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
static int ip_rt_min_advmss __read_mostly = 256;
static int rt_chain_length_max __read_mostly = 20;
+static int redirect_genid;
/*
* Interface to generic destination cache.
@@ -415,9 +416,13 @@ static int rt_cache_seq_show(struct seq_file *seq, void *v)
else {
struct rtable *r = v;
struct neighbour *n;
- int len;
+ int len, HHUptod;
+ rcu_read_lock();
n = dst_get_neighbour(&r->dst);
+ HHUptod = (n && (n->nud_state & NUD_CONNECTED)) ? 1 : 0;
+ rcu_read_unlock();
+
seq_printf(seq, "%s\t%08X\t%08X\t%8X\t%d\t%u\t%d\t"
"%08X\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n",
r->dst.dev ? r->dst.dev->name : "*",
@@ -431,7 +436,7 @@ static int rt_cache_seq_show(struct seq_file *seq, void *v)
dst_metric(&r->dst, RTAX_RTTVAR)),
r->rt_key_tos,
-1,
- (n && (n->nud_state & NUD_CONNECTED)) ? 1 : 0,
+ HHUptod,
r->rt_spec_dst, &len);
seq_printf(seq, "%*s\n", 127 - len, "");
@@ -836,6 +841,7 @@ static void rt_cache_invalidate(struct net *net)
get_random_bytes(&shuffle, sizeof(shuffle));
atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
+ redirect_genid++;
}
/*
@@ -1385,8 +1391,10 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
peer = rt->peer;
if (peer) {
- if (peer->redirect_learned.a4 != new_gw) {
+ if (peer->redirect_learned.a4 != new_gw ||
+ peer->redirect_genid != redirect_genid) {
peer->redirect_learned.a4 = new_gw;
+ peer->redirect_genid = redirect_genid;
atomic_inc(&__rt_peer_genid);
}
check_peer_redir(&rt->dst, peer);
@@ -1679,12 +1687,8 @@ static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
}
-static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
+static struct rtable *ipv4_validate_peer(struct rtable *rt)
{
- struct rtable *rt = (struct rtable *) dst;
-
- if (rt_is_expired(rt))
- return NULL;
if (rt->rt_peer_genid != rt_peer_genid()) {
struct inet_peer *peer;
@@ -1693,17 +1697,29 @@ static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
peer = rt->peer;
if (peer) {
- check_peer_pmtu(dst, peer);
+ check_peer_pmtu(&rt->dst, peer);
+ if (peer->redirect_genid != redirect_genid)
+ peer->redirect_learned.a4 = 0;
if (peer->redirect_learned.a4 &&
peer->redirect_learned.a4 != rt->rt_gateway) {
- if (check_peer_redir(dst, peer))
+ if (check_peer_redir(&rt->dst, peer))
return NULL;
}
}
rt->rt_peer_genid = rt_peer_genid();
}
+ return rt;
+}
+
+static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
+{
+ struct rtable *rt = (struct rtable *) dst;
+
+ if (rt_is_expired(rt))
+ return NULL;
+ dst = (struct dst_entry *) ipv4_validate_peer(rt);
return dst;
}
@@ -1851,6 +1867,8 @@ static void rt_init_metrics(struct rtable *rt, const struct flowi4 *fl4,
dst_init_metrics(&rt->dst, peer->metrics, false);
check_peer_pmtu(&rt->dst, peer);
+ if (peer->redirect_genid != redirect_genid)
+ peer->redirect_learned.a4 = 0;
if (peer->redirect_learned.a4 &&
peer->redirect_learned.a4 != rt->rt_gateway) {
rt->rt_gateway = peer->redirect_learned.a4;
@@ -2356,6 +2374,9 @@ int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr,
rth->rt_mark == skb->mark &&
net_eq(dev_net(rth->dst.dev), net) &&
!rt_is_expired(rth)) {
+ rth = ipv4_validate_peer(rth);
+ if (!rth)
+ continue;
if (noref) {
dst_use_noref(&rth->dst, jiffies);
skb_dst_set_noref(skb, &rth->dst);
@@ -2731,6 +2752,9 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *flp4)
(IPTOS_RT_MASK | RTO_ONLINK)) &&
net_eq(dev_net(rth->dst.dev), net) &&
!rt_is_expired(rth)) {
+ rth = ipv4_validate_peer(rth);
+ if (!rth)
+ continue;
dst_use(&rth->dst, jiffies);
RT_CACHE_STAT_INC(out_hit);
rcu_read_unlock_bh();
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index b867ea23ece..ad481b32f1e 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1164,7 +1164,7 @@ int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
struct inet_sock *inet = inet_sk(sk);
struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
struct sk_buff *skb;
- unsigned int ulen;
+ unsigned int ulen, copied;
int peeked;
int err;
int is_udplite = IS_UDPLITE(sk);
@@ -1186,9 +1186,10 @@ try_again:
goto out;
ulen = skb->len - sizeof(struct udphdr);
- if (len > ulen)
- len = ulen;
- else if (len < ulen)
+ copied = len;
+ if (copied > ulen)
+ copied = ulen;
+ else if (copied < ulen)
msg->msg_flags |= MSG_TRUNC;
/*
@@ -1197,14 +1198,14 @@ try_again:
* coverage checksum (UDP-Lite), do it before the copy.
*/
- if (len < ulen || UDP_SKB_CB(skb)->partial_cov) {
+ if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) {
if (udp_lib_checksum_complete(skb))
goto csum_copy_err;
}
if (skb_csum_unnecessary(skb))
err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
- msg->msg_iov, len);
+ msg->msg_iov, copied);
else {
err = skb_copy_and_csum_datagram_iovec(skb,
sizeof(struct udphdr),
@@ -1233,7 +1234,7 @@ try_again:
if (inet->cmsg_flags)
ip_cmsg_recv(msg, skb);
- err = len;
+ err = copied;
if (flags & MSG_TRUNC)
err = ulen;
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 29993b7079a..18a2719003c 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -503,7 +503,7 @@ done:
goto e_inval;
if (val > 255 || val < -1)
goto e_inval;
- np->mcast_hops = val;
+ np->mcast_hops = (val == -1 ? IPV6_DEFAULT_MCASTHOPS : val);
retv = 0;
break;
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 84ec9db86ee..adfe26a7fc6 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -340,7 +340,7 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
struct ipv6_pinfo *np = inet6_sk(sk);
struct inet_sock *inet = inet_sk(sk);
struct sk_buff *skb;
- unsigned int ulen;
+ unsigned int ulen, copied;
int peeked;
int err;
int is_udplite = IS_UDPLITE(sk);
@@ -363,9 +363,10 @@ try_again:
goto out;
ulen = skb->len - sizeof(struct udphdr);
- if (len > ulen)
- len = ulen;
- else if (len < ulen)
+ copied = len;
+ if (copied > ulen)
+ copied = ulen;
+ else if (copied < ulen)
msg->msg_flags |= MSG_TRUNC;
is_udp4 = (skb->protocol == htons(ETH_P_IP));
@@ -376,14 +377,14 @@ try_again:
* coverage checksum (UDP-Lite), do it before the copy.
*/
- if (len < ulen || UDP_SKB_CB(skb)->partial_cov) {
+ if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) {
if (udp_lib_checksum_complete(skb))
goto csum_copy_err;
}
if (skb_csum_unnecessary(skb))
err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
- msg->msg_iov,len);
+ msg->msg_iov, copied );
else {
err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov);
if (err == -EINVAL)
@@ -431,7 +432,7 @@ try_again:
datagram_recv_ctl(sk, msg, skb);
}
- err = len;
+ err = copied;
if (flags & MSG_TRUNC)
err = ulen;
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index cf0f308abf5..89ff8c67943 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -1072,7 +1072,7 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
/* Get routing info from the tunnel socket */
skb_dst_drop(skb);
- skb_dst_set(skb, dst_clone(__sk_dst_get(sk)));
+ skb_dst_set(skb, dst_clone(__sk_dst_check(sk, 0)));
inet = inet_sk(sk);
fl = &inet->cork.fl;
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index 39d72ccaffb..556765749b9 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -162,6 +162,12 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
return -ENOENT;
}
+ /* if we're already stopping ignore any new requests to stop */
+ if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
+ spin_unlock_bh(&sta->lock);
+ return -EALREADY;
+ }
+
if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) {
/* not even started yet! */
ieee80211_assign_tid_tx(sta, tid, NULL);
@@ -170,6 +176,8 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
return 0;
}
+ set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state);
+
spin_unlock_bh(&sta->lock);
#ifdef CONFIG_MAC80211_HT_DEBUG
@@ -177,8 +185,6 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
sta->sta.addr, tid);
#endif /* CONFIG_MAC80211_HT_DEBUG */
- set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state);
-
del_timer_sync(&tid_tx->addba_resp_timer);
/*
@@ -188,6 +194,20 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
*/
clear_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state);
+ /*
+ * There might be a few packets being processed right now (on
+ * another CPU) that have already gotten past the aggregation
+ * check when it was still OPERATIONAL and consequently have
+ * IEEE80211_TX_CTL_AMPDU set. In that case, this code might
+ * call into the driver at the same time or even before the
+ * TX paths calls into it, which could confuse the driver.
+ *
+ * Wait for all currently running TX paths to finish before
+ * telling the driver. New packets will not go through since
+ * the aggregation session is no longer OPERATIONAL.
+ */
+ synchronize_net();
+
tid_tx->stop_initiator = initiator;
tid_tx->tx_stop = tx;
@@ -753,11 +773,27 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
goto out;
}
- del_timer(&tid_tx->addba_resp_timer);
+ del_timer_sync(&tid_tx->addba_resp_timer);
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "switched off addBA timer for tid %d\n", tid);
#endif
+
+ /*
+ * addba_resp_timer may have fired before we got here, and
+ * caused WANT_STOP to be set. If the stop then was already
+ * processed further, STOPPING might be set.
+ */
+ if (test_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state) ||
+ test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
+#ifdef CONFIG_MAC80211_HT_DEBUG
+ printk(KERN_DEBUG
+ "got addBA resp for tid %d but we already gave up\n",
+ tid);
+#endif
+ goto out;
+ }
+
/*
* IEEE 802.11-2007 7.3.1.14:
* In an ADDBA Response frame, when the Status Code field
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index e8f37969229..d5597b759ba 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -201,7 +201,6 @@ config NF_CONNTRACK_BROADCAST
config NF_CONNTRACK_NETBIOS_NS
tristate "NetBIOS name service protocol support"
- depends on NETFILTER_ADVANCED
select NF_CONNTRACK_BROADCAST
help
NetBIOS name service requests are sent as broadcast messages from an
diff --git a/net/netfilter/ipset/ip_set_hash_ipport.c b/net/netfilter/ipset/ip_set_hash_ipport.c
index 6ee10f5d59b..37d667e3f6f 100644
--- a/net/netfilter/ipset/ip_set_hash_ipport.c
+++ b/net/netfilter/ipset/ip_set_hash_ipport.c
@@ -158,7 +158,7 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
const struct ip_set_hash *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ipport4_elem data = { };
- u32 ip, ip_to, p = 0, port, port_to;
+ u32 ip, ip_to = 0, p = 0, port, port_to;
u32 timeout = h->timeout;
bool with_ports = false;
int ret;
diff --git a/net/netfilter/ipset/ip_set_hash_ipportip.c b/net/netfilter/ipset/ip_set_hash_ipportip.c
index fb90e344e90..e69e2718fbe 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportip.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportip.c
@@ -162,7 +162,7 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
const struct ip_set_hash *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ipportip4_elem data = { };
- u32 ip, ip_to, p = 0, port, port_to;
+ u32 ip, ip_to = 0, p = 0, port, port_to;
u32 timeout = h->timeout;
bool with_ports = false;
int ret;
diff --git a/net/netfilter/ipset/ip_set_hash_ipportnet.c b/net/netfilter/ipset/ip_set_hash_ipportnet.c
index deb3e3dfa5f..64199b4e93c 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportnet.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportnet.c
@@ -184,7 +184,7 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
const struct ip_set_hash *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ipportnet4_elem data = { .cidr = HOST_MASK };
- u32 ip, ip_to, p = 0, port, port_to;
+ u32 ip, ip_to = 0, p = 0, port, port_to;
u32 ip2_from = 0, ip2_to, ip2_last, ip2;
u32 timeout = h->timeout;
bool with_ports = false;
diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
index 6b368be937c..b62c4148b92 100644
--- a/net/netfilter/nf_conntrack_ecache.c
+++ b/net/netfilter/nf_conntrack_ecache.c
@@ -27,22 +27,17 @@
static DEFINE_MUTEX(nf_ct_ecache_mutex);
-struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb __read_mostly;
-EXPORT_SYMBOL_GPL(nf_conntrack_event_cb);
-
-struct nf_exp_event_notifier __rcu *nf_expect_event_cb __read_mostly;
-EXPORT_SYMBOL_GPL(nf_expect_event_cb);
-
/* deliver cached events and clear cache entry - must be called with locally
* disabled softirqs */
void nf_ct_deliver_cached_events(struct nf_conn *ct)
{
+ struct net *net = nf_ct_net(ct);
unsigned long events;
struct nf_ct_event_notifier *notify;
struct nf_conntrack_ecache *e;
rcu_read_lock();
- notify = rcu_dereference(nf_conntrack_event_cb);
+ notify = rcu_dereference(net->ct.nf_conntrack_event_cb);
if (notify == NULL)
goto out_unlock;
@@ -83,19 +78,20 @@ out_unlock:
}
EXPORT_SYMBOL_GPL(nf_ct_deliver_cached_events);
-int nf_conntrack_register_notifier(struct nf_ct_event_notifier *new)
+int nf_conntrack_register_notifier(struct net *net,
+ struct nf_ct_event_notifier *new)
{
int ret = 0;
struct nf_ct_event_notifier *notify;
mutex_lock(&nf_ct_ecache_mutex);
- notify = rcu_dereference_protected(nf_conntrack_event_cb,
+ notify = rcu_dereference_protected(net->ct.nf_conntrack_event_cb,
lockdep_is_held(&nf_ct_ecache_mutex));
if (notify != NULL) {
ret = -EBUSY;
goto out_unlock;
}
- RCU_INIT_POINTER(nf_conntrack_event_cb, new);
+ RCU_INIT_POINTER(net->ct.nf_conntrack_event_cb, new);
mutex_unlock(&nf_ct_ecache_mutex);
return ret;
@@ -105,32 +101,34 @@ out_unlock:
}
EXPORT_SYMBOL_GPL(nf_conntrack_register_notifier);
-void nf_conntrack_unregister_notifier(struct nf_ct_event_notifier *new)
+void nf_conntrack_unregister_notifier(struct net *net,
+ struct nf_ct_event_notifier *new)
{
struct nf_ct_event_notifier *notify;
mutex_lock(&nf_ct_ecache_mutex);
- notify = rcu_dereference_protected(nf_conntrack_event_cb,
+ notify = rcu_dereference_protected(net->ct.nf_conntrack_event_cb,
lockdep_is_held(&nf_ct_ecache_mutex));
BUG_ON(notify != new);
- RCU_INIT_POINTER(nf_conntrack_event_cb, NULL);
+ RCU_INIT_POINTER(net->ct.nf_conntrack_event_cb, NULL);
mutex_unlock(&nf_ct_ecache_mutex);
}
EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier);
-int nf_ct_expect_register_notifier(struct nf_exp_event_notifier *new)
+int nf_ct_expect_register_notifier(struct net *net,
+ struct nf_exp_event_notifier *new)
{
int ret = 0;
struct nf_exp_event_notifier *notify;
mutex_lock(&nf_ct_ecache_mutex);
- notify = rcu_dereference_protected(nf_expect_event_cb,
+ notify = rcu_dereference_protected(net->ct.nf_expect_event_cb,
lockdep_is_held(&nf_ct_ecache_mutex));
if (notify != NULL) {
ret = -EBUSY;
goto out_unlock;
}
- RCU_INIT_POINTER(nf_expect_event_cb, new);
+ RCU_INIT_POINTER(net->ct.nf_expect_event_cb, new);
mutex_unlock(&nf_ct_ecache_mutex);
return ret;
@@ -140,15 +138,16 @@ out_unlock:
}
EXPORT_SYMBOL_GPL(nf_ct_expect_register_notifier);
-void nf_ct_expect_unregister_notifier(struct nf_exp_event_notifier *new)
+void nf_ct_expect_unregister_notifier(struct net *net,
+ struct nf_exp_event_notifier *new)
{
struct nf_exp_event_notifier *notify;
mutex_lock(&nf_ct_ecache_mutex);
- notify = rcu_dereference_protected(nf_expect_event_cb,
+ notify = rcu_dereference_protected(net->ct.nf_expect_event_cb,
lockdep_is_held(&nf_ct_ecache_mutex));
BUG_ON(notify != new);
- RCU_INIT_POINTER(nf_expect_event_cb, NULL);
+ RCU_INIT_POINTER(net->ct.nf_expect_event_cb, NULL);
mutex_unlock(&nf_ct_ecache_mutex);
}
EXPORT_SYMBOL_GPL(nf_ct_expect_unregister_notifier);
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index e58aa9b1fe8..ef21b221f03 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -4,7 +4,7 @@
* (C) 2001 by Jay Schulist <jschlst@samba.org>
* (C) 2002-2006 by Harald Welte <laforge@gnumonks.org>
* (C) 2003 by Patrick Mchardy <kaber@trash.net>
- * (C) 2005-2008 by Pablo Neira Ayuso <pablo@netfilter.org>
+ * (C) 2005-2011 by Pablo Neira Ayuso <pablo@netfilter.org>
*
* Initial connection tracking via netlink development funded and
* generally made possible by Network Robots, Inc. (www.networkrobots.com)
@@ -2163,6 +2163,54 @@ MODULE_ALIAS("ip_conntrack_netlink");
MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK);
MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK_EXP);
+static int __net_init ctnetlink_net_init(struct net *net)
+{
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
+ int ret;
+
+ ret = nf_conntrack_register_notifier(net, &ctnl_notifier);
+ if (ret < 0) {
+ pr_err("ctnetlink_init: cannot register notifier.\n");
+ goto err_out;
+ }
+
+ ret = nf_ct_expect_register_notifier(net, &ctnl_notifier_exp);
+ if (ret < 0) {
+ pr_err("ctnetlink_init: cannot expect register notifier.\n");
+ goto err_unreg_notifier;
+ }
+#endif
+ return 0;
+
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
+err_unreg_notifier:
+ nf_conntrack_unregister_notifier(net, &ctnl_notifier);
+err_out:
+ return ret;
+#endif
+}
+
+static void ctnetlink_net_exit(struct net *net)
+{
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
+ nf_ct_expect_unregister_notifier(net, &ctnl_notifier_exp);
+ nf_conntrack_unregister_notifier(net, &ctnl_notifier);
+#endif
+}
+
+static void __net_exit ctnetlink_net_exit_batch(struct list_head *net_exit_list)
+{
+ struct net *net;
+
+ list_for_each_entry(net, net_exit_list, exit_list)
+ ctnetlink_net_exit(net);
+}
+
+static struct pernet_operations ctnetlink_net_ops = {
+ .init = ctnetlink_net_init,
+ .exit_batch = ctnetlink_net_exit_batch,
+};
+
static int __init ctnetlink_init(void)
{
int ret;
@@ -2180,28 +2228,15 @@ static int __init ctnetlink_init(void)
goto err_unreg_subsys;
}
-#ifdef CONFIG_NF_CONNTRACK_EVENTS
- ret = nf_conntrack_register_notifier(&ctnl_notifier);
- if (ret < 0) {
- pr_err("ctnetlink_init: cannot register notifier.\n");
+ if (register_pernet_subsys(&ctnetlink_net_ops)) {
+ pr_err("ctnetlink_init: cannot register pernet operations\n");
goto err_unreg_exp_subsys;
}
- ret = nf_ct_expect_register_notifier(&ctnl_notifier_exp);
- if (ret < 0) {
- pr_err("ctnetlink_init: cannot expect register notifier.\n");
- goto err_unreg_notifier;
- }
-#endif
-
return 0;
-#ifdef CONFIG_NF_CONNTRACK_EVENTS
-err_unreg_notifier:
- nf_conntrack_unregister_notifier(&ctnl_notifier);
err_unreg_exp_subsys:
nfnetlink_subsys_unregister(&ctnl_exp_subsys);
-#endif
err_unreg_subsys:
nfnetlink_subsys_unregister(&ctnl_subsys);
err_out:
@@ -2213,11 +2248,7 @@ static void __exit ctnetlink_exit(void)
pr_info("ctnetlink: unregistering from nfnetlink.\n");
nf_ct_remove_userspace_expectations();
-#ifdef CONFIG_NF_CONNTRACK_EVENTS
- nf_ct_expect_unregister_notifier(&ctnl_notifier_exp);
- nf_conntrack_unregister_notifier(&ctnl_notifier);
-#endif
-
+ unregister_pernet_subsys(&ctnetlink_net_ops);
nfnetlink_subsys_unregister(&ctnl_exp_subsys);
nfnetlink_subsys_unregister(&ctnl_subsys);
}
diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c
index 3735297c524..5952237c0c8 100644
--- a/net/netlabel/netlabel_kapi.c
+++ b/net/netlabel/netlabel_kapi.c
@@ -111,8 +111,6 @@ int netlbl_cfg_unlbl_map_add(const char *domain,
struct netlbl_domaddr_map *addrmap = NULL;
struct netlbl_domaddr4_map *map4 = NULL;
struct netlbl_domaddr6_map *map6 = NULL;
- const struct in_addr *addr4, *mask4;
- const struct in6_addr *addr6, *mask6;
entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
if (entry == NULL)
@@ -133,9 +131,9 @@ int netlbl_cfg_unlbl_map_add(const char *domain,
INIT_LIST_HEAD(&addrmap->list6);
switch (family) {
- case AF_INET:
- addr4 = addr;
- mask4 = mask;
+ case AF_INET: {
+ const struct in_addr *addr4 = addr;
+ const struct in_addr *mask4 = mask;
map4 = kzalloc(sizeof(*map4), GFP_ATOMIC);
if (map4 == NULL)
goto cfg_unlbl_map_add_failure;
@@ -148,9 +146,11 @@ int netlbl_cfg_unlbl_map_add(const char *domain,
if (ret_val != 0)
goto cfg_unlbl_map_add_failure;
break;
- case AF_INET6:
- addr6 = addr;
- mask6 = mask;
+ }
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+ case AF_INET6: {
+ const struct in6_addr *addr6 = addr;
+ const struct in6_addr *mask6 = mask;
map6 = kzalloc(sizeof(*map6), GFP_ATOMIC);
if (map6 == NULL)
goto cfg_unlbl_map_add_failure;
@@ -167,6 +167,8 @@ int netlbl_cfg_unlbl_map_add(const char *domain,
if (ret_val != 0)
goto cfg_unlbl_map_add_failure;
break;
+ }
+#endif /* IPv6 */
default:
goto cfg_unlbl_map_add_failure;
break;
@@ -225,9 +227,11 @@ int netlbl_cfg_unlbl_static_add(struct net *net,
case AF_INET:
addr_len = sizeof(struct in_addr);
break;
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
case AF_INET6:
addr_len = sizeof(struct in6_addr);
break;
+#endif /* IPv6 */
default:
return -EPFNOSUPPORT;
}
@@ -266,9 +270,11 @@ int netlbl_cfg_unlbl_static_del(struct net *net,
case AF_INET:
addr_len = sizeof(struct in_addr);
break;
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
case AF_INET6:
addr_len = sizeof(struct in6_addr);
break;
+#endif /* IPv6 */
default:
return -EPFNOSUPPORT;
}
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index 6649463da1b..d617161f8dd 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -209,8 +209,8 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt)
ctl->Plog, ctl->Scell_log,
nla_data(tb[TCA_RED_STAB]));
- if (skb_queue_empty(&sch->q))
- red_end_of_idle_period(&q->parms);
+ if (!q->qdisc->q.qlen)
+ red_start_of_idle_period(&q->parms);
sch_tree_unlock(sch);
return 0;
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 283bfe3de59..ed1336e1592 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -225,11 +225,11 @@ static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt)
static int
-__teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *dev)
+__teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res,
+ struct net_device *dev, struct netdev_queue *txq,
+ struct neighbour *mn)
{
- struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, 0);
- struct teql_sched_data *q = qdisc_priv(dev_queue->qdisc);
- struct neighbour *mn = dst_get_neighbour(skb_dst(skb));
+ struct teql_sched_data *q = qdisc_priv(txq->qdisc);
struct neighbour *n = q->ncache;
if (mn->tbl == NULL)
@@ -262,17 +262,26 @@ __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *
}
static inline int teql_resolve(struct sk_buff *skb,
- struct sk_buff *skb_res, struct net_device *dev)
+ struct sk_buff *skb_res,
+ struct net_device *dev,
+ struct netdev_queue *txq)
{
- struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
+ struct dst_entry *dst = skb_dst(skb);
+ struct neighbour *mn;
+ int res;
+
if (txq->qdisc == &noop_qdisc)
return -ENODEV;
- if (dev->header_ops == NULL ||
- skb_dst(skb) == NULL ||
- dst_get_neighbour(skb_dst(skb)) == NULL)
+ if (!dev->header_ops || !dst)
return 0;
- return __teql_resolve(skb, skb_res, dev);
+
+ rcu_read_lock();
+ mn = dst_get_neighbour(dst);
+ res = mn ? __teql_resolve(skb, skb_res, dev, txq, mn) : 0;
+ rcu_read_unlock();
+
+ return res;
}
static netdev_tx_t teql_master_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -307,7 +316,7 @@ restart:
continue;
}
- switch (teql_resolve(skb, skb_res, slave)) {
+ switch (teql_resolve(skb, skb_res, slave, slave_txq)) {
case 0:
if (__netif_tx_trylock(slave_txq)) {
unsigned int length = qdisc_pkt_len(skb);
diff --git a/net/sctp/auth.c b/net/sctp/auth.c
index 865e68fef21..bf812048cf6 100644
--- a/net/sctp/auth.c
+++ b/net/sctp/auth.c
@@ -82,7 +82,7 @@ static struct sctp_auth_bytes *sctp_auth_create_key(__u32 key_len, gfp_t gfp)
struct sctp_auth_bytes *key;
/* Verify that we are not going to overflow INT_MAX */
- if ((INT_MAX - key_len) < sizeof(struct sctp_auth_bytes))
+ if (key_len > (INT_MAX - sizeof(struct sctp_auth_bytes)))
return NULL;
/* Allocate the shared key */
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index d7f97ef2659..55472c48825 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -496,7 +496,7 @@ static int xs_nospace(struct rpc_task *task)
struct rpc_rqst *req = task->tk_rqstp;
struct rpc_xprt *xprt = req->rq_xprt;
struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
- int ret = 0;
+ int ret = -EAGAIN;
dprintk("RPC: %5u xmit incomplete (%u left of %u)\n",
task->tk_pid, req->rq_slen - req->rq_bytes_sent,
@@ -508,7 +508,6 @@ static int xs_nospace(struct rpc_task *task)
/* Don't race with disconnect */
if (xprt_connected(xprt)) {
if (test_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags)) {
- ret = -EAGAIN;
/*
* Notify TCP that we're limited by the application
* window size
@@ -2530,8 +2529,10 @@ static struct rpc_xprt *xs_setup_xprt(struct xprt_create *args,
int err;
err = xs_init_anyaddr(args->dstaddr->sa_family,
(struct sockaddr *)&new->srcaddr);
- if (err != 0)
+ if (err != 0) {
+ xprt_free(xprt);
return ERR_PTR(err);
+ }
}
return xprt;
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 466fbcc5cf7..b595a3d8679 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1957,6 +1957,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
if ((UNIXCB(skb).pid != siocb->scm->pid) ||
(UNIXCB(skb).cred != siocb->scm->cred)) {
skb_queue_head(&sk->sk_receive_queue, skb);
+ sk->sk_data_ready(sk, skb->len);
break;
}
} else {
@@ -1974,6 +1975,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
chunk = min_t(unsigned int, skb->len, size);
if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
skb_queue_head(&sk->sk_receive_queue, skb);
+ sk->sk_data_ready(sk, skb->len);
if (copied == 0)
copied = -EFAULT;
break;
@@ -1991,6 +1993,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
/* put the skb back if we didn't use it up.. */
if (skb->len) {
skb_queue_head(&sk->sk_receive_queue, skb);
+ sk->sk_data_ready(sk, skb->len);
break;
}
@@ -2006,6 +2009,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
/* put message back and return */
skb_queue_head(&sk->sk_receive_queue, skb);
+ sk->sk_data_ready(sk, skb->len);
break;
}
} while (size);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index a1cabde7cb5..eee9ccc7ada 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -89,8 +89,8 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
[NL80211_ATTR_IFINDEX] = { .type = NLA_U32 },
[NL80211_ATTR_IFNAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ-1 },
- [NL80211_ATTR_MAC] = { .type = NLA_BINARY, .len = ETH_ALEN },
- [NL80211_ATTR_PREV_BSSID] = { .type = NLA_BINARY, .len = ETH_ALEN },
+ [NL80211_ATTR_MAC] = { .len = ETH_ALEN },
+ [NL80211_ATTR_PREV_BSSID] = { .len = ETH_ALEN },
[NL80211_ATTR_KEY] = { .type = NLA_NESTED, },
[NL80211_ATTR_KEY_DATA] = { .type = NLA_BINARY,
diff --git a/sound/pci/cs5535audio/cs5535audio_pcm.c b/sound/pci/cs5535audio/cs5535audio_pcm.c
index e083122ca55..dbf94b189e7 100644
--- a/sound/pci/cs5535audio/cs5535audio_pcm.c
+++ b/sound/pci/cs5535audio/cs5535audio_pcm.c
@@ -148,7 +148,7 @@ static int cs5535audio_build_dma_packets(struct cs5535audio *cs5535au,
struct cs5535audio_dma_desc *desc =
&((struct cs5535audio_dma_desc *) dma->desc_buf.area)[i];
desc->addr = cpu_to_le32(addr);
- desc->size = cpu_to_le32(period_bytes);
+ desc->size = cpu_to_le16(period_bytes);
desc->ctlreserved = cpu_to_le16(PRD_EOP);
desc_addr += sizeof(struct cs5535audio_dma_desc);
addr += period_bytes;
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index e44b107fdc7..4562e9de6a1 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -4046,9 +4046,9 @@ int snd_hda_check_board_codec_sid_config(struct hda_codec *codec,
/* Search for codec ID */
for (q = tbl; q->subvendor; q++) {
- unsigned long vendorid = (q->subdevice) | (q->subvendor << 16);
-
- if (vendorid == codec->subsystem_id)
+ unsigned int mask = 0xffff0000 | q->subdevice_mask;
+ unsigned int id = (q->subdevice | (q->subvendor << 16)) & mask;
+ if ((codec->subsystem_id & mask) == id)
break;
}
diff --git a/sound/pci/hda/hda_eld.c b/sound/pci/hda/hda_eld.c
index 7ae7578bdcc..c1da422e085 100644
--- a/sound/pci/hda/hda_eld.c
+++ b/sound/pci/hda/hda_eld.c
@@ -347,18 +347,28 @@ int snd_hdmi_get_eld(struct hdmi_eld *eld,
for (i = 0; i < size; i++) {
unsigned int val = hdmi_get_eld_data(codec, nid, i);
+ /*
+ * Graphics driver might be writing to ELD buffer right now.
+ * Just abort. The caller will repoll after a while.
+ */
if (!(val & AC_ELDD_ELD_VALID)) {
- if (!i) {
- snd_printd(KERN_INFO
- "HDMI: invalid ELD data\n");
- ret = -EINVAL;
- goto error;
- }
snd_printd(KERN_INFO
"HDMI: invalid ELD data byte %d\n", i);
- val = 0;
- } else
- val &= AC_ELDD_ELD_DATA;
+ ret = -EINVAL;
+ goto error;
+ }
+ val &= AC_ELDD_ELD_DATA;
+ /*
+ * The first byte cannot be zero. This can happen on some DVI
+ * connections. Some Intel chips may also need some 250ms delay
+ * to return non-zero ELD data, even when the graphics driver
+ * correctly writes ELD content before setting ELD_valid bit.
+ */
+ if (!val && !i) {
+ snd_printdd(KERN_INFO "HDMI: 0 ELD data\n");
+ ret = -EINVAL;
+ goto error;
+ }
buf[i] = val;
}
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
index 2fbab8e2957..70a7abda7e2 100644
--- a/sound/pci/hda/patch_cirrus.c
+++ b/sound/pci/hda/patch_cirrus.c
@@ -58,6 +58,8 @@ struct cs_spec {
unsigned int gpio_mask;
unsigned int gpio_dir;
unsigned int gpio_data;
+ unsigned int gpio_eapd_hp; /* EAPD GPIO bit for headphones */
+ unsigned int gpio_eapd_speaker; /* EAPD GPIO bit for speakers */
struct hda_pcm pcm_rec[2]; /* PCM information */
@@ -76,6 +78,7 @@ enum {
CS420X_MBP53,
CS420X_MBP55,
CS420X_IMAC27,
+ CS420X_APPLE,
CS420X_AUTO,
CS420X_MODELS
};
@@ -928,10 +931,9 @@ static void cs_automute(struct hda_codec *codec)
spdif_present ? 0 : PIN_OUT);
}
}
- if (spec->board_config == CS420X_MBP53 ||
- spec->board_config == CS420X_MBP55 ||
- spec->board_config == CS420X_IMAC27) {
- unsigned int gpio = hp_present ? 0x02 : 0x08;
+ if (spec->gpio_eapd_hp) {
+ unsigned int gpio = hp_present ?
+ spec->gpio_eapd_hp : spec->gpio_eapd_speaker;
snd_hda_codec_write(codec, 0x01, 0,
AC_VERB_SET_GPIO_DATA, gpio);
}
@@ -1276,6 +1278,7 @@ static const char * const cs420x_models[CS420X_MODELS] = {
[CS420X_MBP53] = "mbp53",
[CS420X_MBP55] = "mbp55",
[CS420X_IMAC27] = "imac27",
+ [CS420X_APPLE] = "apple",
[CS420X_AUTO] = "auto",
};
@@ -1285,7 +1288,13 @@ static const struct snd_pci_quirk cs420x_cfg_tbl[] = {
SND_PCI_QUIRK(0x10de, 0x0d94, "MacBookAir 3,1(2)", CS420X_MBP55),
SND_PCI_QUIRK(0x10de, 0xcb79, "MacBookPro 5,5", CS420X_MBP55),
SND_PCI_QUIRK(0x10de, 0xcb89, "MacBookPro 7,1", CS420X_MBP55),
- SND_PCI_QUIRK(0x8086, 0x7270, "IMac 27 Inch", CS420X_IMAC27),
+ /* this conflicts with too many other models */
+ /*SND_PCI_QUIRK(0x8086, 0x7270, "IMac 27 Inch", CS420X_IMAC27),*/
+ {} /* terminator */
+};
+
+static const struct snd_pci_quirk cs420x_codec_cfg_tbl[] = {
+ SND_PCI_QUIRK_VENDOR(0x106b, "Apple", CS420X_APPLE),
{} /* terminator */
};
@@ -1367,6 +1376,10 @@ static int patch_cs420x(struct hda_codec *codec)
spec->board_config =
snd_hda_check_board_config(codec, CS420X_MODELS,
cs420x_models, cs420x_cfg_tbl);
+ if (spec->board_config < 0)
+ spec->board_config =
+ snd_hda_check_board_codec_sid_config(codec,
+ CS420X_MODELS, NULL, cs420x_codec_cfg_tbl);
if (spec->board_config >= 0)
fix_pincfg(codec, spec->board_config, cs_pincfgs);
@@ -1374,10 +1387,11 @@ static int patch_cs420x(struct hda_codec *codec)
case CS420X_IMAC27:
case CS420X_MBP53:
case CS420X_MBP55:
- /* GPIO1 = headphones */
- /* GPIO3 = speakers */
- spec->gpio_mask = 0x0a;
- spec->gpio_dir = 0x0a;
+ case CS420X_APPLE:
+ spec->gpio_eapd_hp = 2; /* GPIO1 = headphones */
+ spec->gpio_eapd_speaker = 8; /* GPIO3 = speakers */
+ spec->gpio_mask = spec->gpio_dir =
+ spec->gpio_eapd_hp | spec->gpio_eapd_speaker;
break;
}
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 9850c5b481e..c505fd5d338 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -69,6 +69,7 @@ struct hdmi_spec_per_pin {
struct hda_codec *codec;
struct hdmi_eld sink_eld;
struct delayed_work work;
+ int repoll_count;
};
struct hdmi_spec {
@@ -748,7 +749,7 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec, int pin_idx,
* Unsolicited events
*/
-static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, bool retry);
+static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll);
static void hdmi_intrinsic_event(struct hda_codec *codec, unsigned int res)
{
@@ -766,7 +767,7 @@ static void hdmi_intrinsic_event(struct hda_codec *codec, unsigned int res)
if (pin_idx < 0)
return;
- hdmi_present_sense(&spec->pins[pin_idx], true);
+ hdmi_present_sense(&spec->pins[pin_idx], 1);
}
static void hdmi_non_intrinsic_event(struct hda_codec *codec, unsigned int res)
@@ -960,7 +961,7 @@ static int hdmi_read_pin_conn(struct hda_codec *codec, int pin_idx)
return 0;
}
-static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, bool retry)
+static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)
{
struct hda_codec *codec = per_pin->codec;
struct hdmi_eld *eld = &per_pin->sink_eld;
@@ -989,7 +990,7 @@ static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, bool retry)
if (eld_valid) {
if (!snd_hdmi_get_eld(eld, codec, pin_nid))
snd_hdmi_show_eld(eld);
- else if (retry) {
+ else if (repoll) {
queue_delayed_work(codec->bus->workq,
&per_pin->work,
msecs_to_jiffies(300));
@@ -1004,7 +1005,10 @@ static void hdmi_repoll_eld(struct work_struct *work)
struct hdmi_spec_per_pin *per_pin =
container_of(to_delayed_work(work), struct hdmi_spec_per_pin, work);
- hdmi_present_sense(per_pin, false);
+ if (per_pin->repoll_count++ > 6)
+ per_pin->repoll_count = 0;
+
+ hdmi_present_sense(per_pin, per_pin->repoll_count);
}
static int hdmi_add_pin(struct hda_codec *codec, hda_nid_t pin_nid)
@@ -1235,7 +1239,7 @@ static int generic_hdmi_build_jack(struct hda_codec *codec, int pin_idx)
if (err < 0)
return err;
- hdmi_present_sense(per_pin, false);
+ hdmi_present_sense(per_pin, 0);
return 0;
}
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 336d14eb72a..cbde019d3d5 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -277,6 +277,12 @@ static bool alc_dyn_adc_pcm_resetup(struct hda_codec *codec, int cur)
return false;
}
+static inline hda_nid_t get_capsrc(struct alc_spec *spec, int idx)
+{
+ return spec->capsrc_nids ?
+ spec->capsrc_nids[idx] : spec->adc_nids[idx];
+}
+
/* select the given imux item; either unmute exclusively or select the route */
static int alc_mux_select(struct hda_codec *codec, unsigned int adc_idx,
unsigned int idx, bool force)
@@ -303,8 +309,7 @@ static int alc_mux_select(struct hda_codec *codec, unsigned int adc_idx,
adc_idx = spec->dyn_adc_idx[idx];
}
- nid = spec->capsrc_nids ?
- spec->capsrc_nids[adc_idx] : spec->adc_nids[adc_idx];
+ nid = get_capsrc(spec, adc_idx);
/* no selection? */
num_conns = snd_hda_get_conn_list(codec, nid, NULL);
@@ -1054,8 +1059,19 @@ static bool alc_rebuild_imux_for_auto_mic(struct hda_codec *codec)
spec->imux_pins[2] = spec->dock_mic_pin;
for (i = 0; i < 3; i++) {
strcpy(imux->items[i].label, texts[i]);
- if (spec->imux_pins[i])
+ if (spec->imux_pins[i]) {
+ hda_nid_t pin = spec->imux_pins[i];
+ int c;
+ for (c = 0; c < spec->num_adc_nids; c++) {
+ hda_nid_t cap = get_capsrc(spec, c);
+ int idx = get_connection_index(codec, cap, pin);
+ if (idx >= 0) {
+ imux->items[i].index = idx;
+ break;
+ }
+ }
imux->num_items = i + 1;
+ }
}
spec->num_mux_defs = 1;
spec->input_mux = imux;
@@ -1957,10 +1973,8 @@ static int alc_build_controls(struct hda_codec *codec)
if (!kctl)
kctl = snd_hda_find_mixer_ctl(codec, "Input Source");
for (i = 0; kctl && i < kctl->count; i++) {
- const hda_nid_t *nids = spec->capsrc_nids;
- if (!nids)
- nids = spec->adc_nids;
- err = snd_hda_add_nid(codec, kctl, i, nids[i]);
+ err = snd_hda_add_nid(codec, kctl, i,
+ get_capsrc(spec, i));
if (err < 0)
return err;
}
@@ -2747,8 +2761,7 @@ static int alc_auto_create_input_ctls(struct hda_codec *codec)
}
for (c = 0; c < num_adcs; c++) {
- hda_nid_t cap = spec->capsrc_nids ?
- spec->capsrc_nids[c] : spec->adc_nids[c];
+ hda_nid_t cap = get_capsrc(spec, c);
idx = get_connection_index(codec, cap, pin);
if (idx >= 0) {
spec->imux_pins[imux->num_items] = pin;
@@ -3694,8 +3707,7 @@ static int init_capsrc_for_pin(struct hda_codec *codec, hda_nid_t pin)
if (!pin)
return 0;
for (i = 0; i < spec->num_adc_nids; i++) {
- hda_nid_t cap = spec->capsrc_nids ?
- spec->capsrc_nids[i] : spec->adc_nids[i];
+ hda_nid_t cap = get_capsrc(spec, i);
int idx;
idx = get_connection_index(codec, cap, pin);
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 470f6f286e8..f3658658548 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -1641,6 +1641,8 @@ static const struct snd_pci_quirk stac92hd73xx_codec_id_cfg_tbl[] = {
"Alienware M17x", STAC_ALIENWARE_M17X),
SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x043a,
"Alienware M17x", STAC_ALIENWARE_M17X),
+ SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0490,
+ "Alienware M17x", STAC_ALIENWARE_M17X),
{} /* terminator */
};
diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
index 431c0d417ee..b5137629f8e 100644
--- a/sound/pci/hda/patch_via.c
+++ b/sound/pci/hda/patch_via.c
@@ -208,6 +208,7 @@ struct via_spec {
/* work to check hp jack state */
struct hda_codec *codec;
struct delayed_work vt1708_hp_work;
+ int hp_work_active;
int vt1708_jack_detect;
int vt1708_hp_present;
@@ -305,27 +306,35 @@ enum {
static void analog_low_current_mode(struct hda_codec *codec);
static bool is_aa_path_mute(struct hda_codec *codec);
-static void vt1708_start_hp_work(struct via_spec *spec)
+#define hp_detect_with_aa(codec) \
+ (snd_hda_get_bool_hint(codec, "analog_loopback_hp_detect") == 1 && \
+ !is_aa_path_mute(codec))
+
+static void vt1708_stop_hp_work(struct via_spec *spec)
{
if (spec->codec_type != VT1708 || spec->autocfg.hp_pins[0] == 0)
return;
- snd_hda_codec_write(spec->codec, 0x1, 0, 0xf81,
- !spec->vt1708_jack_detect);
- if (!delayed_work_pending(&spec->vt1708_hp_work))
- schedule_delayed_work(&spec->vt1708_hp_work,
- msecs_to_jiffies(100));
+ if (spec->hp_work_active) {
+ snd_hda_codec_write(spec->codec, 0x1, 0, 0xf81, 1);
+ cancel_delayed_work_sync(&spec->vt1708_hp_work);
+ spec->hp_work_active = 0;
+ }
}
-static void vt1708_stop_hp_work(struct via_spec *spec)
+static void vt1708_update_hp_work(struct via_spec *spec)
{
if (spec->codec_type != VT1708 || spec->autocfg.hp_pins[0] == 0)
return;
- if (snd_hda_get_bool_hint(spec->codec, "analog_loopback_hp_detect") == 1
- && !is_aa_path_mute(spec->codec))
- return;
- snd_hda_codec_write(spec->codec, 0x1, 0, 0xf81,
- !spec->vt1708_jack_detect);
- cancel_delayed_work_sync(&spec->vt1708_hp_work);
+ if (spec->vt1708_jack_detect &&
+ (spec->active_streams || hp_detect_with_aa(spec->codec))) {
+ if (!spec->hp_work_active) {
+ snd_hda_codec_write(spec->codec, 0x1, 0, 0xf81, 0);
+ schedule_delayed_work(&spec->vt1708_hp_work,
+ msecs_to_jiffies(100));
+ spec->hp_work_active = 1;
+ }
+ } else if (!hp_detect_with_aa(spec->codec))
+ vt1708_stop_hp_work(spec);
}
static void set_widgets_power_state(struct hda_codec *codec)
@@ -343,12 +352,7 @@ static int analog_input_switch_put(struct snd_kcontrol *kcontrol,
set_widgets_power_state(codec);
analog_low_current_mode(snd_kcontrol_chip(kcontrol));
- if (snd_hda_get_bool_hint(codec, "analog_loopback_hp_detect") == 1) {
- if (is_aa_path_mute(codec))
- vt1708_start_hp_work(codec->spec);
- else
- vt1708_stop_hp_work(codec->spec);
- }
+ vt1708_update_hp_work(codec->spec);
return change;
}
@@ -1154,7 +1158,7 @@ static int via_playback_multi_pcm_prepare(struct hda_pcm_stream *hinfo,
spec->cur_dac_stream_tag = stream_tag;
spec->cur_dac_format = format;
mutex_unlock(&spec->config_mutex);
- vt1708_start_hp_work(spec);
+ vt1708_update_hp_work(spec);
return 0;
}
@@ -1174,7 +1178,7 @@ static int via_playback_hp_pcm_prepare(struct hda_pcm_stream *hinfo,
spec->cur_hp_stream_tag = stream_tag;
spec->cur_hp_format = format;
mutex_unlock(&spec->config_mutex);
- vt1708_start_hp_work(spec);
+ vt1708_update_hp_work(spec);
return 0;
}
@@ -1188,7 +1192,7 @@ static int via_playback_multi_pcm_cleanup(struct hda_pcm_stream *hinfo,
snd_hda_multi_out_analog_cleanup(codec, &spec->multiout);
spec->active_streams &= ~STREAM_MULTI_OUT;
mutex_unlock(&spec->config_mutex);
- vt1708_stop_hp_work(spec);
+ vt1708_update_hp_work(spec);
return 0;
}
@@ -1203,7 +1207,7 @@ static int via_playback_hp_pcm_cleanup(struct hda_pcm_stream *hinfo,
snd_hda_codec_setup_stream(codec, spec->hp_dac_nid, 0, 0, 0);
spec->active_streams &= ~STREAM_INDEP_HP;
mutex_unlock(&spec->config_mutex);
- vt1708_stop_hp_work(spec);
+ vt1708_update_hp_work(spec);
return 0;
}
@@ -1645,7 +1649,8 @@ static void via_hp_automute(struct hda_codec *codec)
int nums;
struct via_spec *spec = codec->spec;
- if (!spec->hp_independent_mode && spec->autocfg.hp_pins[0])
+ if (!spec->hp_independent_mode && spec->autocfg.hp_pins[0] &&
+ (spec->codec_type != VT1708 || spec->vt1708_jack_detect))
present = snd_hda_jack_detect(codec, spec->autocfg.hp_pins[0]);
if (spec->smart51_enabled)
@@ -2612,8 +2617,6 @@ static int vt1708_jack_detect_get(struct snd_kcontrol *kcontrol,
if (spec->codec_type != VT1708)
return 0;
- spec->vt1708_jack_detect =
- !((snd_hda_codec_read(codec, 0x1, 0, 0xf84, 0) >> 8) & 0x1);
ucontrol->value.integer.value[0] = spec->vt1708_jack_detect;
return 0;
}
@@ -2623,18 +2626,22 @@ static int vt1708_jack_detect_put(struct snd_kcontrol *kcontrol,
{
struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
struct via_spec *spec = codec->spec;
- int change;
+ int val;
if (spec->codec_type != VT1708)
return 0;
- spec->vt1708_jack_detect = ucontrol->value.integer.value[0];
- change = (0x1 & (snd_hda_codec_read(codec, 0x1, 0, 0xf84, 0) >> 8))
- == !spec->vt1708_jack_detect;
- if (spec->vt1708_jack_detect) {
+ val = !!ucontrol->value.integer.value[0];
+ if (spec->vt1708_jack_detect == val)
+ return 0;
+ spec->vt1708_jack_detect = val;
+ if (spec->vt1708_jack_detect &&
+ snd_hda_get_bool_hint(codec, "analog_loopback_hp_detect") != 1) {
mute_aa_path(codec, 1);
notify_aa_path_ctls(codec);
}
- return change;
+ via_hp_automute(codec);
+ vt1708_update_hp_work(spec);
+ return 1;
}
static const struct snd_kcontrol_new vt1708_jack_detect_ctl = {
@@ -2771,6 +2778,7 @@ static int via_init(struct hda_codec *codec)
via_auto_init_unsol_event(codec);
via_hp_automute(codec);
+ vt1708_update_hp_work(spec);
return 0;
}
@@ -2787,7 +2795,9 @@ static void vt1708_update_hp_jack_state(struct work_struct *work)
spec->vt1708_hp_present ^= 1;
via_hp_automute(spec->codec);
}
- vt1708_start_hp_work(spec);
+ if (spec->vt1708_jack_detect)
+ schedule_delayed_work(&spec->vt1708_hp_work,
+ msecs_to_jiffies(100));
}
static int get_mux_nids(struct hda_codec *codec)
diff --git a/sound/pci/lx6464es/lx_core.c b/sound/pci/lx6464es/lx_core.c
index 5c8717e29ee..8c3e7fcefd9 100644
--- a/sound/pci/lx6464es/lx_core.c
+++ b/sound/pci/lx6464es/lx_core.c
@@ -78,10 +78,15 @@ unsigned long lx_dsp_reg_read(struct lx6464es *chip, int port)
return ioread32(address);
}
-void lx_dsp_reg_readbuf(struct lx6464es *chip, int port, u32 *data, u32 len)
+static void lx_dsp_reg_readbuf(struct lx6464es *chip, int port, u32 *data,
+ u32 len)
{
- void __iomem *address = lx_dsp_register(chip, port);
- memcpy_fromio(data, address, len*sizeof(u32));
+ u32 __iomem *address = lx_dsp_register(chip, port);
+ int i;
+
+ /* we cannot use memcpy_fromio */
+ for (i = 0; i != len; ++i)
+ data[i] = ioread32(address + i);
}
@@ -91,11 +96,15 @@ void lx_dsp_reg_write(struct lx6464es *chip, int port, unsigned data)
iowrite32(data, address);
}
-void lx_dsp_reg_writebuf(struct lx6464es *chip, int port, const u32 *data,
- u32 len)
+static void lx_dsp_reg_writebuf(struct lx6464es *chip, int port,
+ const u32 *data, u32 len)
{
- void __iomem *address = lx_dsp_register(chip, port);
- memcpy_toio(address, data, len*sizeof(u32));
+ u32 __iomem *address = lx_dsp_register(chip, port);
+ int i;
+
+ /* we cannot use memcpy_to */
+ for (i = 0; i != len; ++i)
+ iowrite32(data[i], address + i);
}
diff --git a/sound/pci/lx6464es/lx_core.h b/sound/pci/lx6464es/lx_core.h
index 1dd562980b6..4d7ff797a64 100644
--- a/sound/pci/lx6464es/lx_core.h
+++ b/sound/pci/lx6464es/lx_core.h
@@ -72,10 +72,7 @@ enum {
};
unsigned long lx_dsp_reg_read(struct lx6464es *chip, int port);
-void lx_dsp_reg_readbuf(struct lx6464es *chip, int port, u32 *data, u32 len);
void lx_dsp_reg_write(struct lx6464es *chip, int port, unsigned data);
-void lx_dsp_reg_writebuf(struct lx6464es *chip, int port, const u32 *data,
- u32 len);
/* plx register access */
enum {
diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
index e760adad952..19ee2203cbb 100644
--- a/sound/pci/rme9652/hdspm.c
+++ b/sound/pci/rme9652/hdspm.c
@@ -6518,7 +6518,7 @@ static int __devinit snd_hdspm_create(struct snd_card *card,
hdspm->io_type = AES32;
hdspm->card_name = "RME AES32";
hdspm->midiPorts = 2;
- } else if ((hdspm->firmware_rev == 0xd5) ||
+ } else if ((hdspm->firmware_rev == 0xd2) ||
((hdspm->firmware_rev >= 0xc8) &&
(hdspm->firmware_rev <= 0xcf))) {
hdspm->io_type = MADI;
diff --git a/sound/soc/codecs/adau1373.c b/sound/soc/codecs/adau1373.c
index 1ccf8dd4757..45c63028b40 100644
--- a/sound/soc/codecs/adau1373.c
+++ b/sound/soc/codecs/adau1373.c
@@ -245,7 +245,7 @@ static const char *adau1373_bass_hpf_cutoff_text[] = {
};
static const unsigned int adau1373_bass_tlv[] = {
- TLV_DB_RANGE_HEAD(4),
+ TLV_DB_RANGE_HEAD(3),
0, 2, TLV_DB_SCALE_ITEM(-600, 600, 1),
3, 4, TLV_DB_SCALE_ITEM(950, 250, 0),
5, 7, TLV_DB_SCALE_ITEM(1400, 150, 0),
diff --git a/sound/soc/codecs/cs4271.c b/sound/soc/codecs/cs4271.c
index 23d1bd5dadd..69fde1506fe 100644
--- a/sound/soc/codecs/cs4271.c
+++ b/sound/soc/codecs/cs4271.c
@@ -434,7 +434,8 @@ static int cs4271_soc_suspend(struct snd_soc_codec *codec, pm_message_t mesg)
{
int ret;
/* Set power-down bit */
- ret = snd_soc_update_bits(codec, CS4271_MODE2, 0, CS4271_MODE2_PDN);
+ ret = snd_soc_update_bits(codec, CS4271_MODE2, CS4271_MODE2_PDN,
+ CS4271_MODE2_PDN);
if (ret < 0)
return ret;
return 0;
@@ -501,8 +502,9 @@ static int cs4271_probe(struct snd_soc_codec *codec)
return ret;
}
- ret = snd_soc_update_bits(codec, CS4271_MODE2, 0,
- CS4271_MODE2_PDN | CS4271_MODE2_CPEN);
+ ret = snd_soc_update_bits(codec, CS4271_MODE2,
+ CS4271_MODE2_PDN | CS4271_MODE2_CPEN,
+ CS4271_MODE2_PDN | CS4271_MODE2_CPEN);
if (ret < 0)
return ret;
ret = snd_soc_update_bits(codec, CS4271_MODE2, CS4271_MODE2_PDN, 0);
diff --git a/sound/soc/codecs/rt5631.c b/sound/soc/codecs/rt5631.c
index 27a078cbb6e..4646e808b90 100644
--- a/sound/soc/codecs/rt5631.c
+++ b/sound/soc/codecs/rt5631.c
@@ -177,7 +177,7 @@ static const DECLARE_TLV_DB_SCALE(dac_vol_tlv, -95625, 375, 0);
static const DECLARE_TLV_DB_SCALE(in_vol_tlv, -3450, 150, 0);
/* {0, +20, +24, +30, +35, +40, +44, +50, +52}dB */
static unsigned int mic_bst_tlv[] = {
- TLV_DB_RANGE_HEAD(6),
+ TLV_DB_RANGE_HEAD(7),
0, 0, TLV_DB_SCALE_ITEM(0, 0, 0),
1, 1, TLV_DB_SCALE_ITEM(2000, 0, 0),
2, 2, TLV_DB_SCALE_ITEM(2400, 0, 0),
diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
index d15695d1c27..bbcf921166f 100644
--- a/sound/soc/codecs/sgtl5000.c
+++ b/sound/soc/codecs/sgtl5000.c
@@ -365,7 +365,7 @@ static const DECLARE_TLV_DB_SCALE(capture_6db_attenuate, -600, 600, 0);
/* tlv for mic gain, 0db 20db 30db 40db */
static const unsigned int mic_gain_tlv[] = {
- TLV_DB_RANGE_HEAD(4),
+ TLV_DB_RANGE_HEAD(2),
0, 0, TLV_DB_SCALE_ITEM(0, 0, 0),
1, 3, TLV_DB_SCALE_ITEM(2000, 1000, 0),
};
diff --git a/sound/soc/codecs/sta32x.c b/sound/soc/codecs/sta32x.c
index bb82408ab8e..d2f37152f94 100644
--- a/sound/soc/codecs/sta32x.c
+++ b/sound/soc/codecs/sta32x.c
@@ -76,6 +76,8 @@ struct sta32x_priv {
unsigned int mclk;
unsigned int format;
+
+ u32 coef_shadow[STA32X_COEF_COUNT];
};
static const DECLARE_TLV_DB_SCALE(mvol_tlv, -12700, 50, 1);
@@ -227,6 +229,7 @@ static int sta32x_coefficient_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct sta32x_priv *sta32x = snd_soc_codec_get_drvdata(codec);
int numcoef = kcontrol->private_value >> 16;
int index = kcontrol->private_value & 0xffff;
unsigned int cfud;
@@ -239,6 +242,11 @@ static int sta32x_coefficient_put(struct snd_kcontrol *kcontrol,
snd_soc_write(codec, STA32X_CFUD, cfud);
snd_soc_write(codec, STA32X_CFADDR2, index);
+ for (i = 0; i < numcoef && (index + i < STA32X_COEF_COUNT); i++)
+ sta32x->coef_shadow[index + i] =
+ (ucontrol->value.bytes.data[3 * i] << 16)
+ | (ucontrol->value.bytes.data[3 * i + 1] << 8)
+ | (ucontrol->value.bytes.data[3 * i + 2]);
for (i = 0; i < 3 * numcoef; i++)
snd_soc_write(codec, STA32X_B1CF1 + i,
ucontrol->value.bytes.data[i]);
@@ -252,6 +260,48 @@ static int sta32x_coefficient_put(struct snd_kcontrol *kcontrol,
return 0;
}
+int sta32x_sync_coef_shadow(struct snd_soc_codec *codec)
+{
+ struct sta32x_priv *sta32x = snd_soc_codec_get_drvdata(codec);
+ unsigned int cfud;
+ int i;
+
+ /* preserve reserved bits in STA32X_CFUD */
+ cfud = snd_soc_read(codec, STA32X_CFUD) & 0xf0;
+
+ for (i = 0; i < STA32X_COEF_COUNT; i++) {
+ snd_soc_write(codec, STA32X_CFADDR2, i);
+ snd_soc_write(codec, STA32X_B1CF1,
+ (sta32x->coef_shadow[i] >> 16) & 0xff);
+ snd_soc_write(codec, STA32X_B1CF2,
+ (sta32x->coef_shadow[i] >> 8) & 0xff);
+ snd_soc_write(codec, STA32X_B1CF3,
+ (sta32x->coef_shadow[i]) & 0xff);
+ /* chip documentation does not say if the bits are
+ * self-clearing, so do it explicitly */
+ snd_soc_write(codec, STA32X_CFUD, cfud);
+ snd_soc_write(codec, STA32X_CFUD, cfud | 0x01);
+ }
+ return 0;
+}
+
+int sta32x_cache_sync(struct snd_soc_codec *codec)
+{
+ unsigned int mute;
+ int rc;
+
+ if (!codec->cache_sync)
+ return 0;
+
+ /* mute during register sync */
+ mute = snd_soc_read(codec, STA32X_MMUTE);
+ snd_soc_write(codec, STA32X_MMUTE, mute | STA32X_MMUTE_MMUTE);
+ sta32x_sync_coef_shadow(codec);
+ rc = snd_soc_cache_sync(codec);
+ snd_soc_write(codec, STA32X_MMUTE, mute);
+ return rc;
+}
+
#define SINGLE_COEF(xname, index) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
.info = sta32x_coefficient_info, \
@@ -661,7 +711,7 @@ static int sta32x_set_bias_level(struct snd_soc_codec *codec,
return ret;
}
- snd_soc_cache_sync(codec);
+ sta32x_cache_sync(codec);
}
/* Power up to mute */
@@ -790,6 +840,17 @@ static int sta32x_probe(struct snd_soc_codec *codec)
STA32X_CxCFG_OM_MASK,
2 << STA32X_CxCFG_OM_SHIFT);
+ /* initialize coefficient shadow RAM with reset values */
+ for (i = 4; i <= 49; i += 5)
+ sta32x->coef_shadow[i] = 0x400000;
+ for (i = 50; i <= 54; i++)
+ sta32x->coef_shadow[i] = 0x7fffff;
+ sta32x->coef_shadow[55] = 0x5a9df7;
+ sta32x->coef_shadow[56] = 0x7fffff;
+ sta32x->coef_shadow[59] = 0x7fffff;
+ sta32x->coef_shadow[60] = 0x400000;
+ sta32x->coef_shadow[61] = 0x400000;
+
sta32x_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
/* Bias level configuration will have done an extra enable */
regulator_bulk_disable(ARRAY_SIZE(sta32x->supplies), sta32x->supplies);
diff --git a/sound/soc/codecs/sta32x.h b/sound/soc/codecs/sta32x.h
index b97ee5a7566..d8e32a6262e 100644
--- a/sound/soc/codecs/sta32x.h
+++ b/sound/soc/codecs/sta32x.h
@@ -19,6 +19,7 @@
/* STA326 register addresses */
#define STA32X_REGISTER_COUNT 0x2d
+#define STA32X_COEF_COUNT 62
#define STA32X_CONFA 0x00
#define STA32X_CONFB 0x01
diff --git a/sound/soc/codecs/wm8731.c b/sound/soc/codecs/wm8731.c
index 7e5ec03f6f8..a7c9ae17fc7 100644
--- a/sound/soc/codecs/wm8731.c
+++ b/sound/soc/codecs/wm8731.c
@@ -453,6 +453,7 @@ static int wm8731_set_bias_level(struct snd_soc_codec *codec,
snd_soc_write(codec, WM8731_PWR, 0xffff);
regulator_bulk_disable(ARRAY_SIZE(wm8731->supplies),
wm8731->supplies);
+ codec->cache_sync = 1;
break;
}
codec->dapm.bias_level = level;
diff --git a/sound/soc/codecs/wm8753.c b/sound/soc/codecs/wm8753.c
index a9504710bb6..3a629d0d690 100644
--- a/sound/soc/codecs/wm8753.c
+++ b/sound/soc/codecs/wm8753.c
@@ -190,6 +190,9 @@ static int wm8753_set_dai(struct snd_kcontrol *kcontrol,
struct wm8753_priv *wm8753 = snd_soc_codec_get_drvdata(codec);
u16 ioctl;
+ if (wm8753->dai_func == ucontrol->value.integer.value[0])
+ return 0;
+
if (codec->active)
return -EBUSY;
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
index 91d3c6dbeba..53edd9a8c75 100644
--- a/sound/soc/codecs/wm8962.c
+++ b/sound/soc/codecs/wm8962.c
@@ -1973,7 +1973,7 @@ static int wm8962_reset(struct snd_soc_codec *codec)
static const DECLARE_TLV_DB_SCALE(inpga_tlv, -2325, 75, 0);
static const DECLARE_TLV_DB_SCALE(mixin_tlv, -1500, 300, 0);
static const unsigned int mixinpga_tlv[] = {
- TLV_DB_RANGE_HEAD(7),
+ TLV_DB_RANGE_HEAD(5),
0, 1, TLV_DB_SCALE_ITEM(0, 600, 0),
2, 2, TLV_DB_SCALE_ITEM(1300, 1300, 0),
3, 4, TLV_DB_SCALE_ITEM(1800, 200, 0),
@@ -1988,7 +1988,7 @@ static const DECLARE_TLV_DB_SCALE(bypass_tlv, -1500, 300, 0);
static const DECLARE_TLV_DB_SCALE(out_tlv, -12100, 100, 1);
static const DECLARE_TLV_DB_SCALE(hp_tlv, -700, 100, 0);
static const unsigned int classd_tlv[] = {
- TLV_DB_RANGE_HEAD(7),
+ TLV_DB_RANGE_HEAD(2),
0, 6, TLV_DB_SCALE_ITEM(0, 150, 0),
7, 7, TLV_DB_SCALE_ITEM(1200, 0, 0),
};
diff --git a/sound/soc/codecs/wm8993.c b/sound/soc/codecs/wm8993.c
index eec8e143511..d1a142f48b0 100644
--- a/sound/soc/codecs/wm8993.c
+++ b/sound/soc/codecs/wm8993.c
@@ -512,7 +512,7 @@ static const DECLARE_TLV_DB_SCALE(drc_comp_threash, -4500, 75, 0);
static const DECLARE_TLV_DB_SCALE(drc_comp_amp, -2250, 75, 0);
static const DECLARE_TLV_DB_SCALE(drc_min_tlv, -1800, 600, 0);
static const unsigned int drc_max_tlv[] = {
- TLV_DB_RANGE_HEAD(4),
+ TLV_DB_RANGE_HEAD(2),
0, 2, TLV_DB_SCALE_ITEM(1200, 600, 0),
3, 3, TLV_DB_SCALE_ITEM(3600, 0, 0),
};
diff --git a/sound/soc/codecs/wm9081.c b/sound/soc/codecs/wm9081.c
index 3cd35a02c28..4a398c3bfe8 100644
--- a/sound/soc/codecs/wm9081.c
+++ b/sound/soc/codecs/wm9081.c
@@ -807,7 +807,6 @@ static int wm9081_set_bias_level(struct snd_soc_codec *codec,
mdelay(100);
/* Normal bias enable & soft start off */
- reg |= WM9081_BIAS_ENA;
reg &= ~WM9081_VMID_RAMP;
snd_soc_write(codec, WM9081_VMID_CONTROL, reg);
@@ -818,7 +817,7 @@ static int wm9081_set_bias_level(struct snd_soc_codec *codec,
}
/* VMID 2*240k */
- reg = snd_soc_read(codec, WM9081_BIAS_CONTROL_1);
+ reg = snd_soc_read(codec, WM9081_VMID_CONTROL);
reg &= ~WM9081_VMID_SEL_MASK;
reg |= 0x04;
snd_soc_write(codec, WM9081_VMID_CONTROL, reg);
@@ -830,14 +829,15 @@ static int wm9081_set_bias_level(struct snd_soc_codec *codec,
break;
case SND_SOC_BIAS_OFF:
- /* Startup bias source */
+ /* Startup bias source and disable bias */
reg = snd_soc_read(codec, WM9081_BIAS_CONTROL_1);
reg |= WM9081_BIAS_SRC;
+ reg &= ~WM9081_BIAS_ENA;
snd_soc_write(codec, WM9081_BIAS_CONTROL_1, reg);
- /* Disable VMID and biases with soft ramping */
+ /* Disable VMID with soft ramping */
reg = snd_soc_read(codec, WM9081_VMID_CONTROL);
- reg &= ~(WM9081_VMID_SEL_MASK | WM9081_BIAS_ENA);
+ reg &= ~WM9081_VMID_SEL_MASK;
reg |= WM9081_VMID_RAMP;
snd_soc_write(codec, WM9081_VMID_CONTROL, reg);
diff --git a/sound/soc/codecs/wm9090.c b/sound/soc/codecs/wm9090.c
index 2b5252c9e37..f94c06057c6 100644
--- a/sound/soc/codecs/wm9090.c
+++ b/sound/soc/codecs/wm9090.c
@@ -177,19 +177,19 @@ static void wait_for_dc_servo(struct snd_soc_codec *codec)
}
static const unsigned int in_tlv[] = {
- TLV_DB_RANGE_HEAD(6),
+ TLV_DB_RANGE_HEAD(3),
0, 0, TLV_DB_SCALE_ITEM(-600, 0, 0),
1, 3, TLV_DB_SCALE_ITEM(-350, 350, 0),
4, 6, TLV_DB_SCALE_ITEM(600, 600, 0),
};
static const unsigned int mix_tlv[] = {
- TLV_DB_RANGE_HEAD(4),
+ TLV_DB_RANGE_HEAD(2),
0, 2, TLV_DB_SCALE_ITEM(-1200, 300, 0),
3, 3, TLV_DB_SCALE_ITEM(0, 0, 0),
};
static const DECLARE_TLV_DB_SCALE(out_tlv, -5700, 100, 0);
static const unsigned int spkboost_tlv[] = {
- TLV_DB_RANGE_HEAD(7),
+ TLV_DB_RANGE_HEAD(2),
0, 6, TLV_DB_SCALE_ITEM(0, 150, 0),
7, 7, TLV_DB_SCALE_ITEM(1200, 0, 0),
};
diff --git a/sound/soc/codecs/wm_hubs.c b/sound/soc/codecs/wm_hubs.c
index 84f33d4ea2c..48e61e91240 100644
--- a/sound/soc/codecs/wm_hubs.c
+++ b/sound/soc/codecs/wm_hubs.c
@@ -40,7 +40,7 @@ static const DECLARE_TLV_DB_SCALE(outmix_tlv, -2100, 300, 0);
static const DECLARE_TLV_DB_SCALE(spkmixout_tlv, -1800, 600, 1);
static const DECLARE_TLV_DB_SCALE(outpga_tlv, -5700, 100, 0);
static const unsigned int spkboost_tlv[] = {
- TLV_DB_RANGE_HEAD(7),
+ TLV_DB_RANGE_HEAD(2),
0, 6, TLV_DB_SCALE_ITEM(0, 150, 0),
7, 7, TLV_DB_SCALE_ITEM(1200, 0, 0),
};
diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
index 0268cf98973..83c4bd5b2dd 100644
--- a/sound/soc/fsl/fsl_ssi.c
+++ b/sound/soc/fsl/fsl_ssi.c
@@ -694,6 +694,7 @@ static int __devinit fsl_ssi_probe(struct platform_device *pdev)
/* Initialize the the device_attribute structure */
dev_attr = &ssi_private->dev_attr;
+ sysfs_attr_init(&dev_attr->attr);
dev_attr->attr.name = "statistics";
dev_attr->attr.mode = S_IRUGO;
dev_attr->show = fsl_sysfs_ssi_show;
diff --git a/sound/soc/nuc900/nuc900-ac97.c b/sound/soc/nuc900/nuc900-ac97.c
index 9c0edad90d8..a4e3237956e 100644
--- a/sound/soc/nuc900/nuc900-ac97.c
+++ b/sound/soc/nuc900/nuc900-ac97.c
@@ -365,7 +365,8 @@ static int __devinit nuc900_ac97_drvprobe(struct platform_device *pdev)
if (ret)
goto out3;
- mfp_set_groupg(nuc900_audio->dev); /* enbale ac97 multifunction pin*/
+ /* enbale ac97 multifunction pin */
+ mfp_set_groupg(nuc900_audio->dev, "nuc900-audio");
return 0;
diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl
index 30e2befd6f2..8b4c2535b26 100755
--- a/tools/testing/ktest/ktest.pl
+++ b/tools/testing/ktest/ktest.pl
@@ -747,6 +747,18 @@ sub __eval_option {
# Add space to evaluate the character before $
$option = " $option";
my $retval = "";
+ my $repeated = 0;
+ my $parent = 0;
+
+ foreach my $test (keys %repeat_tests) {
+ if ($i >= $test &&
+ $i < $test + $repeat_tests{$test}) {
+
+ $repeated = 1;
+ $parent = $test;
+ last;
+ }
+ }
while ($option =~ /(.*?[^\\])\$\{(.*?)\}(.*)/) {
my $start = $1;
@@ -760,10 +772,14 @@ sub __eval_option {
# otherwise see if the default OPT (without [$i]) exists.
my $o = "$var\[$i\]";
+ my $parento = "$var\[$parent\]";
if (defined($opt{$o})) {
$o = $opt{$o};
$retval = "$retval$o";
+ } elsif ($repeated && defined($opt{$parento})) {
+ $o = $opt{$parento};
+ $retval = "$retval$o";
} elsif (defined($opt{$var})) {
$o = $opt{$var};
$retval = "$retval$o";